diff --git a/api.html b/api.html
index 58c863d..0539bda 100644
--- a/api.html
+++ b/api.html
@@ -425,7 +425,7 @@ document.write(`
-
-img2latex(model: VisionEncoderDecoderModel | ORTModelForVision2Seq, tokenizer: RobertaTokenizerFast, images: list[str] | list[ndarray], device: device | None = None, out_format: Literal['latex', 'katex'] = 'latex', keep_style: bool = False, max_tokens: int = 1024, num_beams: int = 1, no_repeat_ngram_size: int = 0) → list[str][source]
+img2latex(model: VisionEncoderDecoderModel | ORTModelForVision2Seq, tokenizer: RobertaTokenizerFast, images: list[str] | list[ndarray], device: device | None = None, out_format: Literal['latex', 'katex'] = 'latex', keep_style: bool = False, max_tokens: int = 1024, num_beams: int = 1, no_repeat_ngram_size: int = 0) → list[str][source]
Convert images to LaTeX or KaTeX formatted strings.
- Parameters:
@@ -463,7 +463,7 @@ document.write(`
-
-paragraph2md(img_path: str, latexdet_model: InferenceSession, textdet_model: TextDetector, textrec_model: TextRecognizer, latexrec_model: VisionEncoderDecoderModel | ORTModelForVision2Seq, tokenizer: RobertaTokenizerFast, device: device | None = None, num_beams=1) → str[source]
+paragraph2md(img_path: str, latexdet_model: InferenceSession, textdet_model: TextDetector, textrec_model: TextRecognizer, latexrec_model: VisionEncoderDecoderModel | ORTModelForVision2Seq, tokenizer: RobertaTokenizerFast, device: device | None = None, num_beams=1) → str[source]
Convert an image containing both text and mathematical formulas to markdown format.
This function processes a mixed-content image by:
1. Detecting mathematical formulas using a latex detection model