From 6179cc3226ae1419362f8b7d21fd86a8a8ff5a6d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E4=B8=89=E6=B4=8B=E4=B8=89=E6=B4=8B?= <1258009915@qq.com>
Date: Sat, 6 Apr 2024 07:27:27 +0000
Subject: [PATCH 1/9] =?UTF-8?q?web=20demo=E6=94=AF=E6=8C=81katex,=20?=
=?UTF-8?q?=E4=B8=8D=E5=86=8D=E9=9C=80=E8=A6=81=E6=9C=AC=E5=9C=B0=E5=AE=89?=
=?UTF-8?q?=E8=A3=85xelatex=E6=B8=B2=E6=9F=93=E5=99=A8?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.gitignore | 7 ++-
requirements.txt | 3 +-
src/web.py | 124 +++++++++--------------------------------------
3 files changed, 29 insertions(+), 105 deletions(-)
diff --git a/.gitignore b/.gitignore
index 37c7cfd..43e8e15 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,12 @@
+**/.DS_Store
**/__pycache__
**/.vscode
+
**/train_result
+**/ckpt
+**/*cache
+**/.cache
+**/data
**/logs
-**/.cache
**/tmp*
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index b09c00d..232173c 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -9,5 +9,4 @@ tensorboardX
nltk
python-multipart
-pdf2image
-# augraphy
\ No newline at end of file
+augraphy
\ No newline at end of file
diff --git a/src/web.py b/src/web.py
index 6dc10ff..88a4381 100644
--- a/src/web.py
+++ b/src/web.py
@@ -2,14 +2,11 @@ import os
import io
import base64
import tempfile
-import time
-import subprocess
import shutil
import streamlit as st
+import re
-from PIL import Image, ImageChops
-from pathlib import Path
-from pdf2image import convert_from_path
+from PIL import Image
from models.ocr_model.utils.inference import inference
from models.ocr_model.model.TexTeller import TexTeller
@@ -69,6 +66,19 @@ def get_model():
def get_tokenizer():
return TexTeller.get_tokenizer(os.environ['TOKENIZER_DIR'])
+def to_katex(formula: str) -> str:
+ res = formula
+ res = re.sub(r'\\mbox\{([^}]*)\}', r'\1', res)
+ res = re.sub(r'boldmath\$(.*?)\$', r'bm{\1}', res)
+ res = re.sub(r'\\\[(.*?)\\\]', r'\1\\newline', res)
+
+ pattern = r'(\\(?:left|middle|right|big|Big|bigg|Bigg|bigl|Bigl|biggl|Biggl|bigm|Bigm|biggm|Biggm|bigr|Bigr|biggr|Biggr))\{([^}]*)\}'
+ replacement = r'\1\2'
+ res = re.sub(pattern, replacement, res)
+ if res.endswith(r'\newline'):
+ res = res[:-8]
+ return res
+
def get_image_base64(img_file):
buffered = io.BytesIO()
img_file.seek(0)
@@ -76,55 +86,12 @@ def get_image_base64(img_file):
img.save(buffered, format="PNG")
return base64.b64encode(buffered.getvalue()).decode()
-def rendering(formula: str, out_img_path: Path) -> bool:
- build_dir = out_img_path / 'build'
- build_dir.mkdir(exist_ok=True, parents=True)
- f = build_dir / 'formula.tex'
- f.touch(exist_ok=True)
- f.write_text(tex.format(formula=formula))
-
- p = subprocess.Popen([
- 'xelatex',
- f'-output-directory={build_dir}',
- '-interaction=nonstopmode',
- '-halt-on-error',
- f'{f}'
- ])
- p.communicate()
- return p.returncode == 0
-
-def pdf_to_pngbytes(pdf_path):
- images = convert_from_path(pdf_path, dpi=400,first_page=1, last_page=1)
- trimmed_images = trim(images[0])
- png_image_bytes = io.BytesIO()
- trimmed_images.save(png_image_bytes, format='PNG')
- png_image_bytes.seek(0)
- return png_image_bytes
-
-def trim(im):
- bg = Image.new(im.mode, im.size, im.getpixel((0,0)))
- diff = ImageChops.difference(im, bg)
- diff = ImageChops.add(diff, diff, 2.0, -100)
- bbox = diff.getbbox()
- if bbox:
- return im.crop(bbox)
- return im
-
-
model = get_model()
tokenizer = get_tokenizer()
-# check if xelatex is installed
-xelatex_installed = os.system('which xelatex > /dev/null 2>&1') == 0
if "start" not in st.session_state:
st.session_state["start"] = 1
-
- if xelatex_installed:
- st.toast('Hooray!', icon='🎉')
- time.sleep(0.5)
- st.toast("Xelatex have been detected.", icon='✅')
- else:
- st.error('xelatex is not installed. Please install it before using TexTeller.')
+ st.toast('Hooray!', icon='🎉')
# ============================ pages =============================== #
@@ -133,11 +100,6 @@ st.markdown(html_string, unsafe_allow_html=True)
uploaded_file = st.file_uploader("",type=['jpg', 'png', 'pdf'])
-if xelatex_installed:
- st.caption('🥳 Xelatex have been detected, rendered image will be displayed in the web page.')
-else:
- st.caption('😭 Xelatex is not detected, please check the resulting latex code by yourself, or check ... to have your xelatex setup ready.')
-
if uploaded_file:
img = Image.open(uploaded_file)
@@ -170,60 +132,18 @@ if uploaded_file:
with st.spinner("Predicting..."):
uploaded_file.seek(0)
- TeXTeller_result = inference(
+ TexTeller_result = inference(
model,
tokenizer,
[png_file_path],
True if os.environ['USE_CUDA'] == 'True' else False,
int(os.environ['NUM_BEAM'])
)[0]
- if not xelatex_installed:
- st.markdown(fail_gif_html, unsafe_allow_html=True)
- st.warning('Unable to find xelatex to render image. Please check the prediction results yourself.', icon="🤡")
- txt = st.text_area(
- ":red[Predicted formula]",
- TeXTeller_result,
- height=150,
- )
- else:
- is_successed = rendering(TeXTeller_result, Path(temp_dir))
- if is_successed:
- # st.code(TeXTeller_result, language='latex')
-
- img_base64 = get_image_base64(pdf_to_pngbytes(Path(temp_dir) / 'build' / 'formula.pdf'))
- st.markdown(suc_gif_html, unsafe_allow_html=True)
- st.success('Successfully rendered!', icon="✅")
- txt = st.text_area(
- ":red[Predicted formula]",
- TeXTeller_result,
- height=150,
- )
- # st.latex(TeXTeller_result)
- st.markdown(f"""
-
-
-

-
- """, unsafe_allow_html=True)
- else:
- st.markdown(fail_gif_html, unsafe_allow_html=True)
- st.error('Rendering failed. You can try using a higher resolution image or splitting the multi line formula into a single line for better results.', icon="❌")
- txt = st.text_area(
- ":red[Predicted formula]",
- TeXTeller_result,
- height=150,
- )
+ st.success('Completed!', icon="✅")
+ st.markdown(suc_gif_html, unsafe_allow_html=True)
+ katex_res = to_katex(TexTeller_result)
+ st.text_area(":red[Predicted formula]", katex_res, height=150)
+ st.latex(katex_res)
shutil.rmtree(temp_dir)
From 09f02166dbd4b13a61bd0fdac0c68db7b60502bb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E4=B8=89=E6=B4=8B=E4=B8=89=E6=B4=8B?= <1258009915@qq.com>
Date: Sat, 6 Apr 2024 07:43:03 +0000
Subject: [PATCH 2/9] update README.md
---
README.md | 17 +----------------
assets/README_zh.md | 17 +----------------
2 files changed, 2 insertions(+), 32 deletions(-)
diff --git a/README.md b/README.md
index 4b48fe0..3884503 100644
--- a/README.md
+++ b/README.md
@@ -36,16 +36,6 @@ python=3.10
> [!WARNING]
> Only CUDA versions >= 12.0 have been fully tested, so it is recommended to use CUDA version >= 12.0
-## 🖼 About Rendering LaTeX as Images
-
-* **Install XeLaTex** and ensure `xelatex` can be called directly from the command line.
-
-* To ensure correct rendering of the predicted formulas, **include the following packages** in your `.tex` file:
-
- ```tex
- \usepackage{multirow,multicol,amsmath,amsfonts,amssymb,mathtools,bm,mathrsfs,wasysym,amsbsy,upgreek,mathalfa,stmaryrd,mathrsfs,dsfont,amsthm,amsmath,multirow}
- ```
-
## 🚀 Getting Started
1. Clone the repository:
@@ -73,9 +63,7 @@ python=3.10
## 🌐 Web Demo
-First, **ensure that [poppler](https://poppler.freedesktop.org/) is correctly installed and added to the `PATH`** (so that the `pdftoppm` command can be directly used in the terminal).
-
-Then, go to the `TexTeller/src` directory and run the following command:
+Go to the `TexTeller/src` directory and run the following command:
```bash
./start_web.sh
@@ -86,9 +74,6 @@ Enter `http://localhost:8501` in a browser to view the web demo.
> [!TIP]
> You can change the default configuration of `start_web.sh`, for example, to use GPU for inference (e.g. `USE_CUDA=True`) or to increase the number of beams (e.g. `NUM_BEAM=3`) to achieve higher accuracy
-> [!IMPORTANT]
-> If you want to directly render the prediction results as images on the web (for example, to check if the prediction is correct), you need to ensure [xelatex is correctly installed](https://github.com/OleehyO/TexTeller/blob/main/README.md#-about-rendering-latex-as-images)
-
## 📡 API Usage
We use [ray serve](https://github.com/ray-project/ray) to provide an API interface for TexTeller, allowing you to integrate TexTeller into your own projects. To start the server, you first need to enter the `TexTeller/src` directory and then run the following command:
diff --git a/assets/README_zh.md b/assets/README_zh.md
index 67ad91a..1b511f2 100644
--- a/assets/README_zh.md
+++ b/assets/README_zh.md
@@ -36,16 +36,6 @@ python=3.10
> [!WARNING]
> 只有CUDA版本>= 12.0被完全测试过,所以最好使用>= 12.0的CUDA版本
-## 🖼 关于把latex渲染成图片
-
-* **安装XeLaTex** 并确保`xelatex`可以直接被命令行调用。
-
-* 为了确保正确渲染预测出的公式, 需要在`.tex`文件中**引入以下宏包**:
-
- ```tex
- \usepackage{multirow,multicol,amsmath,amsfonts,amssymb,mathtools,bm,mathrsfs,wasysym,amsbsy,upgreek,mathalfa,stmaryrd,mathrsfs,dsfont,amsthm,amsmath,multirow}
- ```
-
## 🚀 开搞
1. 克隆本仓库:
@@ -101,9 +91,7 @@ python=3.10
## 🌐 网页演示
-首先**确保[poppler](https://poppler.freedesktop.org/)被正确安装,并添加到`PATH`路径中**(终端可以直接使用`pdftoppm`命令)。
-
-然后进入 `TexTeller/src` 目录,运行以下命令
+进入 `TexTeller/src` 目录,运行以下命令
```bash
./start_web.sh
@@ -114,9 +102,6 @@ python=3.10
> [!TIP]
> 你可以改变`start_web.sh`的默认配置, 例如使用GPU进行推理(e.g. `USE_CUDA=True`) 或者增加beams的数量(e.g. `NUM_BEAM=3`)来获得更高的精确度
-> [!IMPORTANT]
-> 如果你想直接把预测结果在网页上渲染成图片(比如为了检查预测结果是否正确)你需要确保[xelatex被正确安装](https://github.com/OleehyO/TexTeller/blob/main/assets/README_zh.md#-%E5%85%B3%E4%BA%8E%E6%8A%8Alatex%E6%B8%B2%E6%9F%93%E6%88%90%E5%9B%BE%E7%89%87)
-
## 📡 API调用
我们使用[ray serve](https://github.com/ray-project/ray)来对外提供一个TexTeller的API接口,通过使用这个接口,你可以把TexTeller整合到自己的项目里。要想启动server,你需要先进入`TexTeller/src`目录然后运行以下命令:
From 35bc4e71a184d6acc811d1c8af757225fed0c21d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E4=B8=89=E6=B4=8B=E4=B8=89=E6=B4=8B?= <1258009915@qq.com>
Date: Sat, 6 Apr 2024 11:38:59 +0000
Subject: [PATCH 3/9] =?UTF-8?q?inference.py=E6=94=AF=E6=8C=81katex?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/inference.py | 14 ++++++++------
src/utils.py | 15 +++++++++++++++
src/web.py | 15 +--------------
3 files changed, 24 insertions(+), 20 deletions(-)
create mode 100644 src/utils.py
diff --git a/src/inference.py b/src/inference.py
index c6d6f61..2f4127a 100644
--- a/src/inference.py
+++ b/src/inference.py
@@ -1,8 +1,10 @@
import os
import argparse
+import cv2 as cv
from pathlib import Path
-from models.ocr_model.utils.inference import inference
+from utils import to_katex
+from models.ocr_model.utils.inference import inference as latex_inference
from models.ocr_model.model.TexTeller import TexTeller
@@ -21,16 +23,16 @@ if __name__ == '__main__':
action='store_true',
help='use cuda or not'
)
-
args = parser.parse_args()
# You can use your own checkpoint and tokenizer path.
print('Loading model and tokenizer...')
- model = TexTeller.from_pretrained()
+ latex_rec_model = TexTeller.from_pretrained()
tokenizer = TexTeller.get_tokenizer()
print('Model and tokenizer loaded.')
- img_path = [args.img]
+ img = cv.imread(args.img)
print('Inference...')
- res = inference(model, tokenizer, img_path, args.cuda)
- print(res[0])
+ res = latex_inference(latex_rec_model, tokenizer, [img], args.cuda)
+ res = to_katex(res[0])
+ print(res)
diff --git a/src/utils.py b/src/utils.py
new file mode 100644
index 0000000..6131bae
--- /dev/null
+++ b/src/utils.py
@@ -0,0 +1,15 @@
+import re
+
+
+def to_katex(formula: str) -> str:
+ res = formula
+ res = re.sub(r'\\mbox\{([^}]*)\}', r'\1', res)
+ res = re.sub(r'boldmath\$(.*?)\$', r'bm{\1}', res)
+ res = re.sub(r'\\\[(.*?)\\\]', r'\1\\newline', res)
+
+ pattern = r'(\\(?:left|middle|right|big|Big|bigg|Bigg|bigl|Bigl|biggl|Biggl|bigm|Bigm|biggm|Biggm|bigr|Bigr|biggr|Biggr))\{([^}]*)\}'
+ replacement = r'\1\2'
+ res = re.sub(pattern, replacement, res)
+ if res.endswith(r'\newline'):
+ res = res[:-8]
+ return res
diff --git a/src/web.py b/src/web.py
index 88a4381..9b53a59 100644
--- a/src/web.py
+++ b/src/web.py
@@ -4,11 +4,11 @@ import base64
import tempfile
import shutil
import streamlit as st
-import re
from PIL import Image
from models.ocr_model.utils.inference import inference
from models.ocr_model.model.TexTeller import TexTeller
+from utils import to_katex
html_string = '''
@@ -66,19 +66,6 @@ def get_model():
def get_tokenizer():
return TexTeller.get_tokenizer(os.environ['TOKENIZER_DIR'])
-def to_katex(formula: str) -> str:
- res = formula
- res = re.sub(r'\\mbox\{([^}]*)\}', r'\1', res)
- res = re.sub(r'boldmath\$(.*?)\$', r'bm{\1}', res)
- res = re.sub(r'\\\[(.*?)\\\]', r'\1\\newline', res)
-
- pattern = r'(\\(?:left|middle|right|big|Big|bigg|Bigg|bigl|Bigl|biggl|Biggl|bigm|Bigm|biggm|Biggm|bigr|Bigr|biggr|Biggr))\{([^}]*)\}'
- replacement = r'\1\2'
- res = re.sub(pattern, replacement, res)
- if res.endswith(r'\newline'):
- res = res[:-8]
- return res
-
def get_image_base64(img_file):
buffered = io.BytesIO()
img_file.seek(0)
From 8fdaef43f9b82a92a426cd504b0979237178c0d6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E4=B8=89=E6=B4=8B=E4=B8=89=E6=B4=8B?= <1258009915@qq.com>
Date: Sat, 6 Apr 2024 11:57:50 +0000
Subject: [PATCH 4/9] update README.md
---
README.md | 11 ++++++++---
assets/README_zh.md | 9 +++++++--
src/start_web.bat | 11 +++++++++++
3 files changed, 26 insertions(+), 5 deletions(-)
create mode 100644 src/start_web.bat
diff --git a/README.md b/README.md
index 3884503..8a7d029 100644
--- a/README.md
+++ b/README.md
@@ -44,13 +44,15 @@ python=3.10
git clone https://github.com/OleehyO/TexTeller
```
-2. After [installing pytorch](https://pytorch.org/get-started/locally/#start-locally), install the project's dependencies:
+2. [Installing pytorch](https://pytorch.org/get-started/locally/#start-locally)
+
+3. Install the project's dependencies:
```bash
pip install -r requirements.txt
```
-3. Enter the `TexTeller/src` directory and run the following command in the terminal to start inference:
+4. Enter the `TexTeller/src` directory and run the following command in the terminal to start inference:
```bash
python inference.py -img "/path/to/image.{jpg,png}"
@@ -72,7 +74,10 @@ Go to the `TexTeller/src` directory and run the following command:
Enter `http://localhost:8501` in a browser to view the web demo.
> [!TIP]
-> You can change the default configuration of `start_web.sh`, for example, to use GPU for inference (e.g. `USE_CUDA=True`) or to increase the number of beams (e.g. `NUM_BEAM=3`) to achieve higher accuracy
+> You can change the default configuration of `start_web.sh`, for example, to use GPU for inference (e.g. `USE_CUDA=True`) or to increase the number of beams (e.g. `NUM_BEAM=3`) to achieve higher accuracy.
+
+> [!NOTE]
+> If you are Windows user, please run the `start_web.bat` file instead.
## 📡 API Usage
diff --git a/assets/README_zh.md b/assets/README_zh.md
index 1b511f2..809609b 100644
--- a/assets/README_zh.md
+++ b/assets/README_zh.md
@@ -44,13 +44,15 @@ python=3.10
git clone https://github.com/OleehyO/TexTeller
```
-2. [安装pytorch](https://pytorch.org/get-started/locally/#start-locally)后,再安装本项目的依赖包:
+2. [安装pytorch](https://pytorch.org/get-started/locally/#start-locally)
+
+3. 安装本项目的依赖包:
```bash
pip install -r requirements.txt
```
-3. 进入`TexTeller/src`目录,在终端运行以下命令进行推理:
+4. 进入`TexTeller/src`目录,在终端运行以下命令进行推理:
```bash
python inference.py -img "/path/to/image.{jpg,png}"
@@ -102,6 +104,9 @@ python=3.10
> [!TIP]
> 你可以改变`start_web.sh`的默认配置, 例如使用GPU进行推理(e.g. `USE_CUDA=True`) 或者增加beams的数量(e.g. `NUM_BEAM=3`)来获得更高的精确度
+> [!NOTE]
+> 对于Windows用户, 请运行 `start_web.bat`文件.
+
## 📡 API调用
我们使用[ray serve](https://github.com/ray-project/ray)来对外提供一个TexTeller的API接口,通过使用这个接口,你可以把TexTeller整合到自己的项目里。要想启动server,你需要先进入`TexTeller/src`目录然后运行以下命令:
diff --git a/src/start_web.bat b/src/start_web.bat
new file mode 100644
index 0000000..fd521e4
--- /dev/null
+++ b/src/start_web.bat
@@ -0,0 +1,11 @@
+@echo off
+SETLOCAL ENABLEEXTENSIONS
+
+set CHECKPOINT_DIR=default
+set TOKENIZER_DIR=default
+set USE_CUDA=False REM True or False (case-sensitive)
+set NUM_BEAM=1
+
+streamlit run web.py
+
+ENDLOCAL
From 936744ea1360635a49ba89da58fc8fb3a66b1308 Mon Sep 17 00:00:00 2001
From: TonyLee1256 <1508059870@qq.com>
Date: Thu, 11 Apr 2024 16:44:19 +0000
Subject: [PATCH 5/9] =?UTF-8?q?=E6=96=B0=E5=A2=9E=E5=85=AC=E5=BC=8F?=
=?UTF-8?q?=E6=A3=80=E6=B5=8B=E6=A8=A1=E5=9D=97?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
README.md | 19 +
assets/README_zh.md | 20 +
requirements.txt | 3 +-
src/infer_det.py | 197 +++++++++
src/models/det_model/model/infer_cfg.yml | 27 ++
src/models/det_model/preprocess.py | 494 +++++++++++++++++++++++
src/rec_infer_from_crop_imgs.py | 59 +++
7 files changed, 818 insertions(+), 1 deletion(-)
create mode 100644 src/infer_det.py
create mode 100644 src/models/det_model/model/infer_cfg.yml
create mode 100644 src/models/det_model/preprocess.py
create mode 100644 src/rec_infer_from_crop_imgs.py
diff --git a/README.md b/README.md
index 8a7d029..ce6c208 100644
--- a/README.md
+++ b/README.md
@@ -27,6 +27,9 @@ TexTeller was trained with ~~550K~~7.5M image-formula pairs (dataset available [
* 📮[2024-03-25] TexTeller 2.0 released! The training data for TexTeller 2.0 has been increased to 7.5M (about **15 times more** than TexTeller 1.0 and also improved in data quality). The trained TexTeller 2.0 demonstrated **superior performance** in the test set, especially in recognizing rare symbols, complex multi-line formulas, and matrices.
> [There](./assets/test.pdf) are more test images here and a horizontal comparison of recognition models from different companies.
+* 📮[2024-04-11] Added whole image inference capability, just need to additionally install the onnxruntime library to get the new feature! We manually annotated formulas in 3,415 Chinese textbook images and used 8,272 formula images from the IBEM English paper detection dataset. We trained a formula object detection model based on the RT-DETR-R50 architecture and exported the trained model to the ONNX format. This allows inputting an image and recognizing all formulas in the image in one go.
+
+
## 🔑 Prerequisites
python=3.10
@@ -79,6 +82,22 @@ Enter `http://localhost:8501` in a browser to view the web demo.
> [!NOTE]
> If you are Windows user, please run the `start_web.bat` file instead.
+## Inference on Whole Images
+### Download Weights
+The ONNX model trained on the 8,272 IBEM dataset (https://zenodo.org/records/4757865) of English papers:
+https://huggingface.co/TonyLee1256/texteller_det/resolve/main/rtdetr_r50vd_6x_coco_trained_on_IBEM_en_papers.onnx?download=true
+
+The ONNX model trained on 2,560 Chinese textbook images (100+ layouts):
+https://huggingface.co/TonyLee1256/texteller_det/blob/main/rtdetr_r50vd_6x_coco_trained_on_cn_textbook.onnx
+
+### Formula Detection
+Run infer_det.py in the TexTeller/src directory.
+This will detect all formulas in the input image, draw the detection results on the entire image and save it, and crop and save each detected formula as a separate image.
+
+### Batch Formula Recognition
+Run rec_infer_from_crop_imgs.py.
+Based on the formula detection results from the previous step, this script will perform batch recognition on all cropped formula images and save the recognition results as text files.
+
## 📡 API Usage
We use [ray serve](https://github.com/ray-project/ray) to provide an API interface for TexTeller, allowing you to integrate TexTeller into your own projects. To start the server, you first need to enter the `TexTeller/src` directory and then run the following command:
diff --git a/assets/README_zh.md b/assets/README_zh.md
index 809609b..de05ba5 100644
--- a/assets/README_zh.md
+++ b/assets/README_zh.md
@@ -27,6 +27,9 @@ TexTeller用了~~550K~~7.5M的图片-公式对进行训练(数据集可以在[
* 📮[2024-03-25] TexTeller2.0发布!TexTeller2.0的训练数据增大到了7.5M(相较于TexTeller1.0**增加了~15倍**并且数据质量也有所改善)。训练后的TexTeller2.0在测试集中展现出了**更加优越的性能**,尤其在生僻符号、复杂多行、矩阵的识别场景中。
> 在[这里](./test.pdf)有更多的测试图片以及各家识别模型的横向对比。
+* 📮[2024-04-11] 增加了整图推理的功能,只需额外安装onnxruntime库即可获取新功能!我们自行标注了3415张中文教材图片中的公式,并使用了8272张来自于IBEM英文论文公式检测数据集中的公式,基于RT-DETR-R50模型进行了公式目标检测的训练,并将训练好的模型导出为了onnx格式。以方便输入图片,一次性对图片中的所有公式进行识别。
+
+
## 🔑 前置条件
python=3.10
@@ -107,6 +110,23 @@ python=3.10
> [!NOTE]
> 对于Windows用户, 请运行 `start_web.bat`文件.
+## 整图推理
+
+### 下载权重
+在8272张IBEM数据集(https://zenodo.org/records/4757865)上训练,并导出的onnx模型:
+https://huggingface.co/TonyLee1256/texteller_det/resolve/main/rtdetr_r50vd_6x_coco_trained_on_IBEM_en_papers.onnx?download=true
+在2560张中文教材数据(100+版式)上训练,并导出的onnx模型:
+https://huggingface.co/TonyLee1256/texteller_det/blob/main/rtdetr_r50vd_6x_coco_trained_on_cn_textbook.onnx
+
+### 公式检测
+cd TexTeller/src
+infer_det.py
+运行后,对整张图中的所有公式进行检测,绘制整图检测结果并保存,并将每一个检测出的目标单独裁剪并保存下来。
+
+### 公式批识别
+rec_infer_from_crop_imgs.py
+基于上一步公式检测的结果,对裁剪出的所有公式进行批量识别,将识别结果保存为txt文件。
+
## 📡 API调用
我们使用[ray serve](https://github.com/ray-project/ray)来对外提供一个TexTeller的API接口,通过使用这个接口,你可以把TexTeller整合到自己的项目里。要想启动server,你需要先进入`TexTeller/src`目录然后运行以下命令:
diff --git a/requirements.txt b/requirements.txt
index 232173c..780156f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,5 +8,6 @@ accelerate
tensorboardX
nltk
python-multipart
+augraphy
-augraphy
\ No newline at end of file
+onnxruntime
\ No newline at end of file
diff --git a/src/infer_det.py b/src/infer_det.py
new file mode 100644
index 0000000..cc5da44
--- /dev/null
+++ b/src/infer_det.py
@@ -0,0 +1,197 @@
+import os
+import yaml
+import argparse
+import numpy as np
+import glob
+from onnxruntime import InferenceSession
+from tqdm import tqdm
+
+from models.det_model.preprocess import Compose
+import cv2
+
+# 注意:文件名要标准,最好都用下划线
+
+# Global dictionary
+SUPPORT_MODELS = {
+ 'YOLO', 'PPYOLOE', 'RCNN', 'SSD', 'Face', 'FCOS', 'SOLOv2', 'TTFNet',
+ 'S2ANet', 'JDE', 'FairMOT', 'DeepSORT', 'GFL', 'PicoDet', 'CenterNet',
+ 'TOOD', 'RetinaNet', 'StrongBaseline', 'STGCN', 'YOLOX', 'HRNet',
+ 'DETR'
+}
+
+parser = argparse.ArgumentParser(description=__doc__)
+parser.add_argument("--infer_cfg", type=str, help="infer_cfg.yml",
+ default="./models/det_model/model/infer_cfg.yml"
+ )
+parser.add_argument('--onnx_file', type=str, help="onnx model file path",
+ default="./models/det_model/model/rtdetr_r50vd_6x_coco.onnx"
+ )
+parser.add_argument("--image_dir", type=str)
+parser.add_argument("--image_file", type=str, default='/data/ljm/TexTeller/src/Tr00_0001015-page02.jpg')
+parser.add_argument("--imgsave_dir", type=str,
+ default="."
+ )
+
+def get_test_images(infer_dir, infer_img):
+ """
+ Get image path list in TEST mode
+ """
+ assert infer_img is not None or infer_dir is not None, \
+ "--image_file or --image_dir should be set"
+ assert infer_img is None or os.path.isfile(infer_img), \
+ "{} is not a file".format(infer_img)
+ assert infer_dir is None or os.path.isdir(infer_dir), \
+ "{} is not a directory".format(infer_dir)
+
+ # infer_img has a higher priority
+ if infer_img and os.path.isfile(infer_img):
+ return [infer_img]
+
+ images = set()
+ infer_dir = os.path.abspath(infer_dir)
+ assert os.path.isdir(infer_dir), \
+ "infer_dir {} is not a directory".format(infer_dir)
+ exts = ['jpg', 'jpeg', 'png', 'bmp']
+ exts += [ext.upper() for ext in exts]
+ for ext in exts:
+ images.update(glob.glob('{}/*.{}'.format(infer_dir, ext)))
+ images = list(images)
+
+ assert len(images) > 0, "no image found in {}".format(infer_dir)
+ print("Found {} inference images in total.".format(len(images)))
+
+ return images
+
+
+class PredictConfig(object):
+ """set config of preprocess, postprocess and visualize
+ Args:
+ infer_config (str): path of infer_cfg.yml
+ """
+
+ def __init__(self, infer_config):
+ # parsing Yaml config for Preprocess
+ with open(infer_config) as f:
+ yml_conf = yaml.safe_load(f)
+ self.check_model(yml_conf)
+ self.arch = yml_conf['arch']
+ self.preprocess_infos = yml_conf['Preprocess']
+ self.min_subgraph_size = yml_conf['min_subgraph_size']
+ self.label_list = yml_conf['label_list']
+ self.use_dynamic_shape = yml_conf['use_dynamic_shape']
+ self.draw_threshold = yml_conf.get("draw_threshold", 0.5)
+ self.mask = yml_conf.get("mask", False)
+ self.tracker = yml_conf.get("tracker", None)
+ self.nms = yml_conf.get("NMS", None)
+ self.fpn_stride = yml_conf.get("fpn_stride", None)
+
+ # 预定义颜色池
+ color_pool = [(0, 255, 0), (255, 0, 0), (0, 0, 255), (255, 255, 0), (0, 255, 255)]
+ # 根据label_list动态生成颜色映射
+ self.colors = {label: color_pool[i % len(color_pool)] for i, label in enumerate(self.label_list)}
+
+ if self.arch == 'RCNN' and yml_conf.get('export_onnx', False):
+ print(
+ 'The RCNN export model is used for ONNX and it only supports batch_size = 1'
+ )
+ self.print_config()
+
+ def check_model(self, yml_conf):
+ """
+ Raises:
+ ValueError: loaded model not in supported model type
+ """
+ for support_model in SUPPORT_MODELS:
+ if support_model in yml_conf['arch']:
+ return True
+ raise ValueError("Unsupported arch: {}, expect {}".format(yml_conf[
+ 'arch'], SUPPORT_MODELS))
+
+ def print_config(self):
+ print('----------- Model Configuration -----------')
+ print('%s: %s' % ('Model Arch', self.arch))
+ print('%s: ' % ('Transform Order'))
+ for op_info in self.preprocess_infos:
+ print('--%s: %s' % ('transform op', op_info['type']))
+ print('--------------------------------------------')
+
+
+def draw_bbox(image, outputs, infer_config):
+ for output in outputs:
+ cls_id, score, xmin, ymin, xmax, ymax = output
+ if score > infer_config.draw_threshold:
+ # 获取类别名
+ label = infer_config.label_list[int(cls_id)]
+ # 根据类别名获取颜色
+ color = infer_config.colors[label]
+ cv2.rectangle(image, (int(xmin), int(ymin)), (int(xmax), int(ymax)), color, 2)
+ cv2.putText(image, "{}: {:.2f}".format(label, score),
+ (int(xmin), int(ymin - 5)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
+ return image
+
+
+def predict_image(infer_config, predictor, img_list):
+ # load preprocess transforms
+ transforms = Compose(infer_config.preprocess_infos)
+ errImgList = []
+
+ # Check and create subimg_save_dir if not exist
+ subimg_save_dir = os.path.join(FLAGS.imgsave_dir, 'subimages')
+ os.makedirs(subimg_save_dir, exist_ok=True)
+
+ # predict image
+ for img_path in tqdm(img_list):
+ img = cv2.imread(img_path)
+ if img is None:
+ print(f"Warning: Could not read image {img_path}. Skipping...")
+ errImgList.append(img_path)
+ continue
+
+ inputs = transforms(img_path)
+ inputs_name = [var.name for var in predictor.get_inputs()]
+ inputs = {k: inputs[k][None, ] for k in inputs_name}
+
+ outputs = predictor.run(output_names=None, input_feed=inputs)
+
+ print("ONNXRuntime predict: ")
+ if infer_config.arch in ["HRNet"]:
+ print(np.array(outputs[0]))
+ else:
+ bboxes = np.array(outputs[0])
+ for bbox in bboxes:
+ if bbox[0] > -1 and bbox[1] > infer_config.draw_threshold:
+ print(f"{int(bbox[0])} {bbox[1]} "
+ f"{bbox[2]} {bbox[3]} {bbox[4]} {bbox[5]}")
+
+ # Save the subimages (crop from the original image)
+ subimg_counter = 1
+ for output in np.array(outputs[0]):
+ cls_id, score, xmin, ymin, xmax, ymax = output
+ if score > infer_config.draw_threshold:
+ label = infer_config.label_list[int(cls_id)]
+ subimg = img[int(ymin):int(ymax), int(xmin):int(xmax)]
+ subimg_filename = f"{os.path.splitext(os.path.basename(img_path))[0]}_{label}_{xmin:.2f}_{ymin:.2f}_{xmax:.2f}_{ymax:.2f}.jpg"
+ subimg_path = os.path.join(subimg_save_dir, subimg_filename)
+ cv2.imwrite(subimg_path, subimg)
+ subimg_counter += 1
+
+ # Draw bounding boxes and save the image with bounding boxes
+ img_with_bbox = draw_bbox(img, np.array(outputs[0]), infer_config)
+ output_dir = FLAGS.imgsave_dir
+ os.makedirs(output_dir, exist_ok=True)
+ output_file = os.path.join(output_dir, "output_" + os.path.basename(img_path))
+ cv2.imwrite(output_file, img_with_bbox)
+
+ print("ErrorImgs:")
+ print(errImgList)
+
+if __name__ == '__main__':
+ FLAGS = parser.parse_args()
+ # load image list
+ img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)
+ # load predictor
+ predictor = InferenceSession(FLAGS.onnx_file)
+ # load infer config
+ infer_config = PredictConfig(FLAGS.infer_cfg)
+
+ predict_image(infer_config, predictor, img_list)
diff --git a/src/models/det_model/model/infer_cfg.yml b/src/models/det_model/model/infer_cfg.yml
new file mode 100644
index 0000000..0c156fc
--- /dev/null
+++ b/src/models/det_model/model/infer_cfg.yml
@@ -0,0 +1,27 @@
+mode: paddle
+draw_threshold: 0.5
+metric: COCO
+use_dynamic_shape: false
+arch: DETR
+min_subgraph_size: 3
+Preprocess:
+- interp: 2
+ keep_ratio: false
+ target_size:
+ - 640
+ - 640
+ type: Resize
+- mean:
+ - 0.0
+ - 0.0
+ - 0.0
+ norm_type: none
+ std:
+ - 1.0
+ - 1.0
+ - 1.0
+ type: NormalizeImage
+- type: Permute
+label_list:
+- isolated
+- embedding
diff --git a/src/models/det_model/preprocess.py b/src/models/det_model/preprocess.py
new file mode 100644
index 0000000..3554b7f
--- /dev/null
+++ b/src/models/det_model/preprocess.py
@@ -0,0 +1,494 @@
+import numpy as np
+import cv2
+import copy
+
+
+def decode_image(img_path):
+ with open(img_path, 'rb') as f:
+ im_read = f.read()
+ data = np.frombuffer(im_read, dtype='uint8')
+ im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
+ im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
+ img_info = {
+ "im_shape": np.array(
+ im.shape[:2], dtype=np.float32),
+ "scale_factor": np.array(
+ [1., 1.], dtype=np.float32)
+ }
+ return im, img_info
+
+
+class Resize(object):
+ """resize image by target_size and max_size
+ Args:
+ target_size (int): the target size of image
+ keep_ratio (bool): whether keep_ratio or not, default true
+ interp (int): method of resize
+ """
+
+ def __init__(self, target_size, keep_ratio=True, interp=cv2.INTER_LINEAR):
+ if isinstance(target_size, int):
+ target_size = [target_size, target_size]
+ self.target_size = target_size
+ self.keep_ratio = keep_ratio
+ self.interp = interp
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ assert len(self.target_size) == 2
+ assert self.target_size[0] > 0 and self.target_size[1] > 0
+ im_channel = im.shape[2]
+ im_scale_y, im_scale_x = self.generate_scale(im)
+ im = cv2.resize(
+ im,
+ None,
+ None,
+ fx=im_scale_x,
+ fy=im_scale_y,
+ interpolation=self.interp)
+ im_info['im_shape'] = np.array(im.shape[:2]).astype('float32')
+ im_info['scale_factor'] = np.array(
+ [im_scale_y, im_scale_x]).astype('float32')
+ return im, im_info
+
+ def generate_scale(self, im):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ Returns:
+ im_scale_x: the resize ratio of X
+ im_scale_y: the resize ratio of Y
+ """
+ origin_shape = im.shape[:2]
+ im_c = im.shape[2]
+ if self.keep_ratio:
+ im_size_min = np.min(origin_shape)
+ im_size_max = np.max(origin_shape)
+ target_size_min = np.min(self.target_size)
+ target_size_max = np.max(self.target_size)
+ im_scale = float(target_size_min) / float(im_size_min)
+ if np.round(im_scale * im_size_max) > target_size_max:
+ im_scale = float(target_size_max) / float(im_size_max)
+ im_scale_x = im_scale
+ im_scale_y = im_scale
+ else:
+ resize_h, resize_w = self.target_size
+ im_scale_y = resize_h / float(origin_shape[0])
+ im_scale_x = resize_w / float(origin_shape[1])
+ return im_scale_y, im_scale_x
+
+
+class NormalizeImage(object):
+ """normalize image
+ Args:
+ mean (list): im - mean
+ std (list): im / std
+ is_scale (bool): whether need im / 255
+ norm_type (str): type in ['mean_std', 'none']
+ """
+
+ def __init__(self, mean, std, is_scale=True, norm_type='mean_std'):
+ self.mean = mean
+ self.std = std
+ self.is_scale = is_scale
+ self.norm_type = norm_type
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ im = im.astype(np.float32, copy=False)
+ if self.is_scale:
+ scale = 1.0 / 255.0
+ im *= scale
+
+ if self.norm_type == 'mean_std':
+ mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
+ std = np.array(self.std)[np.newaxis, np.newaxis, :]
+ im -= mean
+ im /= std
+ return im, im_info
+
+
+class Permute(object):
+ """permute image
+ Args:
+ to_bgr (bool): whether convert RGB to BGR
+ channel_first (bool): whether convert HWC to CHW
+ """
+
+ def __init__(self, ):
+ super(Permute, self).__init__()
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ im = im.transpose((2, 0, 1)).copy()
+ return im, im_info
+
+
+class PadStride(object):
+ """ padding image for model with FPN, instead PadBatch(pad_to_stride) in original config
+ Args:
+ stride (bool): model with FPN need image shape % stride == 0
+ """
+
+ def __init__(self, stride=0):
+ self.coarsest_stride = stride
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ coarsest_stride = self.coarsest_stride
+ if coarsest_stride <= 0:
+ return im, im_info
+ im_c, im_h, im_w = im.shape
+ pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride)
+ pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride)
+ padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32)
+ padding_im[:, :im_h, :im_w] = im
+ return padding_im, im_info
+
+
+class LetterBoxResize(object):
+ def __init__(self, target_size):
+ """
+ Resize image to target size, convert normalized xywh to pixel xyxy
+ format ([x_center, y_center, width, height] -> [x0, y0, x1, y1]).
+ Args:
+ target_size (int|list): image target size.
+ """
+ super(LetterBoxResize, self).__init__()
+ if isinstance(target_size, int):
+ target_size = [target_size, target_size]
+ self.target_size = target_size
+
+ def letterbox(self, img, height, width, color=(127.5, 127.5, 127.5)):
+ # letterbox: resize a rectangular image to a padded rectangular
+ shape = img.shape[:2] # [height, width]
+ ratio_h = float(height) / shape[0]
+ ratio_w = float(width) / shape[1]
+ ratio = min(ratio_h, ratio_w)
+ new_shape = (round(shape[1] * ratio),
+ round(shape[0] * ratio)) # [width, height]
+ padw = (width - new_shape[0]) / 2
+ padh = (height - new_shape[1]) / 2
+ top, bottom = round(padh - 0.1), round(padh + 0.1)
+ left, right = round(padw - 0.1), round(padw + 0.1)
+
+ img = cv2.resize(
+ img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border
+ img = cv2.copyMakeBorder(
+ img, top, bottom, left, right, cv2.BORDER_CONSTANT,
+ value=color) # padded rectangular
+ return img, ratio, padw, padh
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ assert len(self.target_size) == 2
+ assert self.target_size[0] > 0 and self.target_size[1] > 0
+ height, width = self.target_size
+ h, w = im.shape[:2]
+ im, ratio, padw, padh = self.letterbox(im, height=height, width=width)
+
+ new_shape = [round(h * ratio), round(w * ratio)]
+ im_info['im_shape'] = np.array(new_shape, dtype=np.float32)
+ im_info['scale_factor'] = np.array([ratio, ratio], dtype=np.float32)
+ return im, im_info
+
+
+class Pad(object):
+ def __init__(self, size, fill_value=[114.0, 114.0, 114.0]):
+ """
+ Pad image to a specified size.
+ Args:
+ size (list[int]): image target size
+ fill_value (list[float]): rgb value of pad area, default (114.0, 114.0, 114.0)
+ """
+ super(Pad, self).__init__()
+ if isinstance(size, int):
+ size = [size, size]
+ self.size = size
+ self.fill_value = fill_value
+
+ def __call__(self, im, im_info):
+ im_h, im_w = im.shape[:2]
+ h, w = self.size
+ if h == im_h and w == im_w:
+ im = im.astype(np.float32)
+ return im, im_info
+
+ canvas = np.ones((h, w, 3), dtype=np.float32)
+ canvas *= np.array(self.fill_value, dtype=np.float32)
+ canvas[0:im_h, 0:im_w, :] = im.astype(np.float32)
+ im = canvas
+ return im, im_info
+
+
+def rotate_point(pt, angle_rad):
+ """Rotate a point by an angle.
+
+ Args:
+ pt (list[float]): 2 dimensional point to be rotated
+ angle_rad (float): rotation angle by radian
+
+ Returns:
+ list[float]: Rotated point.
+ """
+ assert len(pt) == 2
+ sn, cs = np.sin(angle_rad), np.cos(angle_rad)
+ new_x = pt[0] * cs - pt[1] * sn
+ new_y = pt[0] * sn + pt[1] * cs
+ rotated_pt = [new_x, new_y]
+
+ return rotated_pt
+
+
+def _get_3rd_point(a, b):
+ """To calculate the affine matrix, three pairs of points are required. This
+ function is used to get the 3rd point, given 2D points a & b.
+
+ The 3rd point is defined by rotating vector `a - b` by 90 degrees
+ anticlockwise, using b as the rotation center.
+
+ Args:
+ a (np.ndarray): point(x,y)
+ b (np.ndarray): point(x,y)
+
+ Returns:
+ np.ndarray: The 3rd point.
+ """
+ assert len(a) == 2
+ assert len(b) == 2
+ direction = a - b
+ third_pt = b + np.array([-direction[1], direction[0]], dtype=np.float32)
+
+ return third_pt
+
+
+def get_affine_transform(center,
+ input_size,
+ rot,
+ output_size,
+ shift=(0., 0.),
+ inv=False):
+ """Get the affine transform matrix, given the center/scale/rot/output_size.
+
+ Args:
+ center (np.ndarray[2, ]): Center of the bounding box (x, y).
+ scale (np.ndarray[2, ]): Scale of the bounding box
+ wrt [width, height].
+ rot (float): Rotation angle (degree).
+ output_size (np.ndarray[2, ]): Size of the destination heatmaps.
+ shift (0-100%): Shift translation ratio wrt the width/height.
+ Default (0., 0.).
+ inv (bool): Option to inverse the affine transform direction.
+ (inv=False: src->dst or inv=True: dst->src)
+
+ Returns:
+ np.ndarray: The transform matrix.
+ """
+ assert len(center) == 2
+ assert len(output_size) == 2
+ assert len(shift) == 2
+ if not isinstance(input_size, (np.ndarray, list)):
+ input_size = np.array([input_size, input_size], dtype=np.float32)
+ scale_tmp = input_size
+
+ shift = np.array(shift)
+ src_w = scale_tmp[0]
+ dst_w = output_size[0]
+ dst_h = output_size[1]
+
+ rot_rad = np.pi * rot / 180
+ src_dir = rotate_point([0., src_w * -0.5], rot_rad)
+ dst_dir = np.array([0., dst_w * -0.5])
+
+ src = np.zeros((3, 2), dtype=np.float32)
+ src[0, :] = center + scale_tmp * shift
+ src[1, :] = center + src_dir + scale_tmp * shift
+ src[2, :] = _get_3rd_point(src[0, :], src[1, :])
+
+ dst = np.zeros((3, 2), dtype=np.float32)
+ dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
+ dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
+ dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :])
+
+ if inv:
+ trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
+ else:
+ trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
+
+ return trans
+
+
+class WarpAffine(object):
+ """Warp affine the image
+ """
+
+ def __init__(self,
+ keep_res=False,
+ pad=31,
+ input_h=512,
+ input_w=512,
+ scale=0.4,
+ shift=0.1):
+ self.keep_res = keep_res
+ self.pad = pad
+ self.input_h = input_h
+ self.input_w = input_w
+ self.scale = scale
+ self.shift = shift
+
+ def __call__(self, im, im_info):
+ """
+ Args:
+ im (np.ndarray): image (np.ndarray)
+ im_info (dict): info of image
+ Returns:
+ im (np.ndarray): processed image (np.ndarray)
+ im_info (dict): info of processed image
+ """
+ img = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
+
+ h, w = img.shape[:2]
+
+ if self.keep_res:
+ input_h = (h | self.pad) + 1
+ input_w = (w | self.pad) + 1
+ s = np.array([input_w, input_h], dtype=np.float32)
+ c = np.array([w // 2, h // 2], dtype=np.float32)
+
+ else:
+ s = max(h, w) * 1.0
+ input_h, input_w = self.input_h, self.input_w
+ c = np.array([w / 2., h / 2.], dtype=np.float32)
+
+ trans_input = get_affine_transform(c, s, 0, [input_w, input_h])
+ img = cv2.resize(img, (w, h))
+ inp = cv2.warpAffine(
+ img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)
+ return inp, im_info
+
+
+# keypoint preprocess
+def get_warp_matrix(theta, size_input, size_dst, size_target):
+ """This code is based on
+ https://github.com/open-mmlab/mmpose/blob/master/mmpose/core/post_processing/post_transforms.py
+
+ Calculate the transformation matrix under the constraint of unbiased.
+ Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased
+ Data Processing for Human Pose Estimation (CVPR 2020).
+
+ Args:
+ theta (float): Rotation angle in degrees.
+ size_input (np.ndarray): Size of input image [w, h].
+ size_dst (np.ndarray): Size of output image [w, h].
+ size_target (np.ndarray): Size of ROI in input plane [w, h].
+
+ Returns:
+ matrix (np.ndarray): A matrix for transformation.
+ """
+ theta = np.deg2rad(theta)
+ matrix = np.zeros((2, 3), dtype=np.float32)
+ scale_x = size_dst[0] / size_target[0]
+ scale_y = size_dst[1] / size_target[1]
+ matrix[0, 0] = np.cos(theta) * scale_x
+ matrix[0, 1] = -np.sin(theta) * scale_x
+ matrix[0, 2] = scale_x * (
+ -0.5 * size_input[0] * np.cos(theta) + 0.5 * size_input[1] *
+ np.sin(theta) + 0.5 * size_target[0])
+ matrix[1, 0] = np.sin(theta) * scale_y
+ matrix[1, 1] = np.cos(theta) * scale_y
+ matrix[1, 2] = scale_y * (
+ -0.5 * size_input[0] * np.sin(theta) - 0.5 * size_input[1] *
+ np.cos(theta) + 0.5 * size_target[1])
+ return matrix
+
+
+class TopDownEvalAffine(object):
+ """apply affine transform to image and coords
+
+ Args:
+ trainsize (list): [w, h], the standard size used to train
+ use_udp (bool): whether to use Unbiased Data Processing.
+ records(dict): the dict contained the image and coords
+
+ Returns:
+ records (dict): contain the image and coords after tranformed
+
+ """
+
+ def __init__(self, trainsize, use_udp=False):
+ self.trainsize = trainsize
+ self.use_udp = use_udp
+
+ def __call__(self, image, im_info):
+ rot = 0
+ imshape = im_info['im_shape'][::-1]
+ center = im_info['center'] if 'center' in im_info else imshape / 2.
+ scale = im_info['scale'] if 'scale' in im_info else imshape
+ if self.use_udp:
+ trans = get_warp_matrix(
+ rot, center * 2.0,
+ [self.trainsize[0] - 1.0, self.trainsize[1] - 1.0], scale)
+ image = cv2.warpAffine(
+ image,
+ trans, (int(self.trainsize[0]), int(self.trainsize[1])),
+ flags=cv2.INTER_LINEAR)
+ else:
+ trans = get_affine_transform(center, scale, rot, self.trainsize)
+ image = cv2.warpAffine(
+ image,
+ trans, (int(self.trainsize[0]), int(self.trainsize[1])),
+ flags=cv2.INTER_LINEAR)
+
+ return image, im_info
+
+
+class Compose:
+ def __init__(self, transforms):
+ self.transforms = []
+ for op_info in transforms:
+ new_op_info = op_info.copy()
+ op_type = new_op_info.pop('type')
+ self.transforms.append(eval(op_type)(**new_op_info))
+
+ def __call__(self, img_path):
+ img, im_info = decode_image(img_path)
+ for t in self.transforms:
+ img, im_info = t(img, im_info)
+ inputs = copy.deepcopy(im_info)
+ inputs['image'] = img
+ return inputs
diff --git a/src/rec_infer_from_crop_imgs.py b/src/rec_infer_from_crop_imgs.py
new file mode 100644
index 0000000..b51e92c
--- /dev/null
+++ b/src/rec_infer_from_crop_imgs.py
@@ -0,0 +1,59 @@
+import os
+import argparse
+import cv2 as cv
+from pathlib import Path
+from utils import to_katex
+from models.ocr_model.utils.inference import inference as latex_inference
+from models.ocr_model.model.TexTeller import TexTeller
+
+
+if __name__ == '__main__':
+ os.chdir(Path(__file__).resolve().parent)
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '-img_dir',
+ type=str,
+ default="./subimages",
+ help='path to the directory containing input images'
+ )
+ parser.add_argument(
+ '-output_dir',
+ type=str,
+ default="./results",
+ help='path to the output directory for storing recognition results'
+ )
+ parser.add_argument(
+ '-cuda',
+ default=False,
+ action='store_true',
+ help='use cuda or not'
+ )
+
+ args = parser.parse_args()
+
+ print('Loading model and tokenizer...')
+ latex_rec_model = TexTeller.from_pretrained()
+ tokenizer = TexTeller.get_tokenizer()
+ print('Model and tokenizer loaded.')
+
+ # Create the output directory if it doesn't exist
+ os.makedirs(args.output_dir, exist_ok=True)
+
+ # Loop through all images in the input directory
+ for filename in os.listdir(args.img_dir):
+ img_path = os.path.join(args.img_dir, filename)
+ img = cv.imread(img_path)
+
+ if img is not None:
+ print(f'Inference for {filename}...')
+ res = latex_inference(latex_rec_model, tokenizer, [img], args.cuda)
+ res = to_katex(res[0])
+
+ # Save the recognition result to a text file
+ output_file = os.path.join(args.output_dir, os.path.splitext(filename)[0] + '.txt')
+ with open(output_file, 'w') as f:
+ f.write(res)
+
+ print(f'Result saved to {output_file}')
+ else:
+ print(f"Warning: Could not read image {img_path}. Skipping...")
From d80d7262efc8851031a9adeb6c7fb4d893183e73 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E4=B8=89=E6=B4=8B=E4=B8=89=E6=B4=8B?= <1258009915@qq.com>
Date: Fri, 12 Apr 2024 06:13:58 +0000
Subject: [PATCH 6/9] update README
---
README.md | 40 ++++--
assets/README_zh.md | 149 ++++++++++++-----------
assets/det_rec.png | Bin 0 -> 940945 bytes
assets/image/README_zh/1712901497354.png | Bin 0 -> 495745 bytes
4 files changed, 107 insertions(+), 82 deletions(-)
create mode 100644 assets/det_rec.png
create mode 100644 assets/image/README_zh/1712901497354.png
diff --git a/README.md b/README.md
index ce6c208..cb686d1 100644
--- a/README.md
+++ b/README.md
@@ -27,7 +27,7 @@ TexTeller was trained with ~~550K~~7.5M image-formula pairs (dataset available [
* 📮[2024-03-25] TexTeller 2.0 released! The training data for TexTeller 2.0 has been increased to 7.5M (about **15 times more** than TexTeller 1.0 and also improved in data quality). The trained TexTeller 2.0 demonstrated **superior performance** in the test set, especially in recognizing rare symbols, complex multi-line formulas, and matrices.
> [There](./assets/test.pdf) are more test images here and a horizontal comparison of recognition models from different companies.
-* 📮[2024-04-11] Added whole image inference capability, just need to additionally install the onnxruntime library to get the new feature! We manually annotated formulas in 3,415 Chinese textbook images and used 8,272 formula images from the IBEM English paper detection dataset. We trained a formula object detection model based on the RT-DETR-R50 architecture and exported the trained model to the ONNX format. This allows inputting an image and recognizing all formulas in the image in one go.
+* 📮[2024-04-12] Trained a **formula detection model**, thereby enhancing the capability to detect and recognize formulas in entire documents (whole-image inference)!
## 🔑 Prerequisites
@@ -82,21 +82,39 @@ Enter `http://localhost:8501` in a browser to view the web demo.
> [!NOTE]
> If you are Windows user, please run the `start_web.bat` file instead.
-## Inference on Whole Images
-### Download Weights
-The ONNX model trained on the 8,272 IBEM dataset (https://zenodo.org/records/4757865) of English papers:
-https://huggingface.co/TonyLee1256/texteller_det/resolve/main/rtdetr_r50vd_6x_coco_trained_on_IBEM_en_papers.onnx?download=true
+## 🧠 Full Image Inference
-The ONNX model trained on 2,560 Chinese textbook images (100+ layouts):
-https://huggingface.co/TonyLee1256/texteller_det/blob/main/rtdetr_r50vd_6x_coco_trained_on_cn_textbook.onnx
+TexTeller also supports **formula detection and recognition** on full images, allowing for the detection of formulas throughout the image, followed by batch recognition of the formulas.
+
+### Download Weights
+
+English documentation formula detection [[link](https://huggingface.co/TonyLee1256/texteller_det/resolve/main/rtdetr_r50vd_6x_coco_trained_on_IBEM_en_papers.onnx?download=true)]: Trained on 8272 images from the [IBEM dataset](https://zenodo.org/records/4757865).
+
+Chinese documentation formula detection [[link](https://huggingface.co/TonyLee1256/texteller_det/blob/main/rtdetr_r50vd_6x_coco_trained_on_cn_textbook.onnx)]: Trained on 2560 Chinese textbook images (100+ layouts).
### Formula Detection
-Run infer_det.py in the TexTeller/src directory.
-This will detect all formulas in the input image, draw the detection results on the entire image and save it, and crop and save each detected formula as a separate image.
+
+Run the following command in the `TexTeller/src` directory:
+
+```bash
+python infer_det.py
+```
+
+Detects all formulas in the full image, and the results are saved in `TexTeller/src/subimages`.
+
+
+

+
### Batch Formula Recognition
-Run rec_infer_from_crop_imgs.py.
-Based on the formula detection results from the previous step, this script will perform batch recognition on all cropped formula images and save the recognition results as text files.
+
+After **formula detection**, run the following command in the `TexTeller/src` directory:
+
+```shell
+rec_infer_from_crop_imgs.py
+```
+
+This will use the results of the previous formula detection to perform batch recognition on all cropped formulas, saving the recognition results as txt files in `TexTeller/src/results`.
## 📡 API Usage
diff --git a/assets/README_zh.md b/assets/README_zh.md
index de05ba5..f211b88 100644
--- a/assets/README_zh.md
+++ b/assets/README_zh.md
@@ -25,10 +25,10 @@ TexTeller用了~~550K~~7.5M的图片-公式对进行训练(数据集可以在[
## 🔄 变更信息
* 📮[2024-03-25] TexTeller2.0发布!TexTeller2.0的训练数据增大到了7.5M(相较于TexTeller1.0**增加了~15倍**并且数据质量也有所改善)。训练后的TexTeller2.0在测试集中展现出了**更加优越的性能**,尤其在生僻符号、复杂多行、矩阵的识别场景中。
- > 在[这里](./test.pdf)有更多的测试图片以及各家识别模型的横向对比。
-
-* 📮[2024-04-11] 增加了整图推理的功能,只需额外安装onnxruntime库即可获取新功能!我们自行标注了3415张中文教材图片中的公式,并使用了8272张来自于IBEM英文论文公式检测数据集中的公式,基于RT-DETR-R50模型进行了公式目标检测的训练,并将训练好的模型导出为了onnx格式。以方便输入图片,一次性对图片中的所有公式进行识别。
+ > 在[这里](./test.pdf)有更多的测试图片以及各家识别模型的横向对比。
+ >
+* 📮[2024-04-12] 训练了**公式检测模型**,从而增加了对整个文档进行公式检测+公式识别(整图推理)的功能!
## 🔑 前置条件
@@ -43,25 +43,22 @@ python=3.10
1. 克隆本仓库:
- ```bash
- git clone https://github.com/OleehyO/TexTeller
- ```
-
+ ```bash
+ git clone https://github.com/OleehyO/TexTeller
+ ```
2. [安装pytorch](https://pytorch.org/get-started/locally/#start-locally)
-
3. 安装本项目的依赖包:
- ```bash
- pip install -r requirements.txt
- ```
+ ```bash
+ pip install -r requirements.txt
+ ```
+4. 进入 `TexTeller/src`目录,在终端运行以下命令进行推理:
-4. 进入`TexTeller/src`目录,在终端运行以下命令进行推理:
-
- ```bash
- python inference.py -img "/path/to/image.{jpg,png}"
- # use -cuda option to enable GPU inference
- #+e.g. python inference.py -img "./img.jpg" -cuda
- ```
+ ```bash
+ python inference.py -img "/path/to/image.{jpg,png}"
+ # use -cuda option to enable GPU inference
+ #+e.g. python inference.py -img "./img.jpg" -cuda
+ ```
> [!NOTE]
> 第一次运行时会在hugging face上下载所需要的checkpoints
@@ -72,27 +69,24 @@ python=3.10
1. 安装huggingface hub包
- ```bash
- pip install -U "huggingface_hub[cli]"
- ```
-
+ ```bash
+ pip install -U "huggingface_hub[cli]"
+ ```
2. 在能连接Hugging Face的机器上下载模型权重:
- ```bash
- huggingface-cli download OleehyO/TexTeller --include "*.json" "*.bin" "*.txt" --repo-type model --local-dir "your/dir/path"
- ```
-
-3. 把包含权重的目录上传远端服务器,然后把`TexTeller/src/models/ocr_model/model/TexTeller.py`中的`REPO_NAME = 'OleehyO/TexTeller'`修改为`REPO_NAME = 'your/dir/path'`
+ ```bash
+ huggingface-cli download OleehyO/TexTeller --include "*.json" "*.bin" "*.txt" --repo-type model --local-dir "your/dir/path"
+ ```
+3. 把包含权重的目录上传远端服务器,然后把 `TexTeller/src/models/ocr_model/model/TexTeller.py`中的 `REPO_NAME = 'OleehyO/TexTeller'`修改为 `REPO_NAME = 'your/dir/path'`
如果你还想在训练模型时开启evaluate,你需要提前下载metric脚本并上传远端服务器:
1. 在能连接Hugging Face的机器上下载metric脚本
- ```bash
- huggingface-cli download evaluate-metric/google_bleu --repo-type space --local-dir "your/dir/path"
- ```
-
-2. 把这个目录上传远端服务器,并在`TexTeller/src/models/ocr_model/utils/metrics.py`中把`evaluate.load('google_bleu')`改为`evaluate.load('your/dir/path/google_bleu.py')`
+ ```bash
+ huggingface-cli download evaluate-metric/google_bleu --repo-type space --local-dir "your/dir/path"
+ ```
+2. 把这个目录上传远端服务器,并在 `TexTeller/src/models/ocr_model/utils/metrics.py`中把 `evaluate.load('google_bleu')`改为 `evaluate.load('your/dir/path/google_bleu.py')`
## 🌐 网页演示
@@ -102,87 +96,105 @@ python=3.10
./start_web.sh
```
-在浏览器里输入`http://localhost:8501`就可以看到web demo
+在浏览器里输入 `http://localhost:8501`就可以看到web demo
> [!TIP]
-> 你可以改变`start_web.sh`的默认配置, 例如使用GPU进行推理(e.g. `USE_CUDA=True`) 或者增加beams的数量(e.g. `NUM_BEAM=3`)来获得更高的精确度
+> 你可以改变 `start_web.sh`的默认配置, 例如使用GPU进行推理(e.g. `USE_CUDA=True`) 或者增加beams的数量(e.g. `NUM_BEAM=3`)来获得更高的精确度
> [!NOTE]
> 对于Windows用户, 请运行 `start_web.bat`文件.
-## 整图推理
+## 🧠 整图推理
+
+TexTeller还支持对整张图片进行**公式检测+公式识别**,从而对整图公式进行检测,然后进行批公式识别。
### 下载权重
-在8272张IBEM数据集(https://zenodo.org/records/4757865)上训练,并导出的onnx模型:
-https://huggingface.co/TonyLee1256/texteller_det/resolve/main/rtdetr_r50vd_6x_coco_trained_on_IBEM_en_papers.onnx?download=true
-在2560张中文教材数据(100+版式)上训练,并导出的onnx模型:
-https://huggingface.co/TonyLee1256/texteller_det/blob/main/rtdetr_r50vd_6x_coco_trained_on_cn_textbook.onnx
+
+英文文档公式检测 [[link](https://huggingface.co/TonyLee1256/texteller_det/resolve/main/rtdetr_r50vd_6x_coco_trained_on_IBEM_en_papers.onnx?download=true)]:在8272张[IBEM数据集](https://zenodo.org/records/4757865)上训练得到
+
+中文文档公式检测 [[link](https://huggingface.co/TonyLee1256/texteller_det/blob/main/rtdetr_r50vd_6x_coco_trained_on_cn_textbook.onnx)]:在2560张中文教材数据(100+版式)上训练得到
### 公式检测
-cd TexTeller/src
-infer_det.py
-运行后,对整张图中的所有公式进行检测,绘制整图检测结果并保存,并将每一个检测出的目标单独裁剪并保存下来。
+
+ `TexTeller/src`目录下运行以下命令
+
+```bash
+python infer_det.py
+```
+
+对整张图中的所有公式进行检测,结果保存在 `TexTeller/src/subimages`
+
+
+

+
### 公式批识别
+
+在进行**公式检测后**, `TexTeller/src`目录下运行以下命令
+
+```shell
rec_infer_from_crop_imgs.py
-基于上一步公式检测的结果,对裁剪出的所有公式进行批量识别,将识别结果保存为txt文件。
+```
+
+会基于上一步公式检测的结果,对裁剪出的所有公式进行批量识别,将识别结果在 `TexTeller/src/results`中保存为txt文件。
## 📡 API调用
-我们使用[ray serve](https://github.com/ray-project/ray)来对外提供一个TexTeller的API接口,通过使用这个接口,你可以把TexTeller整合到自己的项目里。要想启动server,你需要先进入`TexTeller/src`目录然后运行以下命令:
+我们使用[ray serve](https://github.com/ray-project/ray)来对外提供一个TexTeller的API接口,通过使用这个接口,你可以把TexTeller整合到自己的项目里。要想启动server,你需要先进入 `TexTeller/src`目录然后运行以下命令:
```bash
python server.py # default settings
```
-你可以给`server.py`传递以下参数来改变server的推理设置(e.g. `python server.py --use_gpu` 来启动GPU推理):
+你可以给 `server.py`传递以下参数来改变server的推理设置(e.g. `python server.py --use_gpu` 来启动GPU推理):
-| 参数 | 描述 |
-| --- | --- |
-| `-ckpt` | 权重文件的路径,*默认为TexTeller的预训练权重*。|
-| `-tknz` | 分词器的路径, *默认为TexTeller的分词器*。|
-| `-port` | 服务器的服务端口, *默认是8000*。 |
-| `--use_gpu` | 是否使用GPU推理,*默认为CPU*。 |
-| `--num_beams` | beam search的beam数量, *默认是1*。 |
-| `--num_replicas` | 在服务器上运行的服务副本数量, *默认1个副本*。你可以使用更多的副本来获取更大的吞吐量。|
-| `--ncpu_per_replica` | 每个服务副本所用的CPU核心数,*默认为1*。 |
+| 参数 | 描述 |
+| ---------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `-ckpt` | 权重文件的路径,*默认为TexTeller的预训练权重*。 |
+| `-tknz` | 分词器的路径,*默认为TexTeller的分词器*。 |
+| `-port` | 服务器的服务端口,*默认是8000*。 |
+| `--use_gpu` | 是否使用GPU推理,*默认为CPU*。 |
+| `--num_beams` | beam search的beam数量,*默认是1*。 |
+| `--num_replicas` | 在服务器上运行的服务副本数量,*默认1个副本*。你可以使用更多的副本来获取更大的吞吐量。 |
+| `--ncpu_per_replica` | 每个服务副本所用的CPU核心数,*默认为1*。 |
| `--ngpu_per_replica` | 每个服务副本所用的GPU数量,*默认为1*。你可以把这个值设置成 0~1之间的数,这样会在一个GPU上运行多个服务副本来共享GPU,从而提高GPU的利用率。(注意,如果 --num_replicas 2, --ngpu_per_replica 0.7, 那么就必须要有2个GPU可用) |
> [!NOTE]
-> 一个客户端demo可以在`TexTeller/client/demo.py`找到,你可以参考`demo.py`来给server发送请求
+> 一个客户端demo可以在 `TexTeller/client/demo.py`找到,你可以参考 `demo.py`来给server发送请求
## 🏋️♂️ 训练
### 数据集
-我们在`TexTeller/src/models/ocr_model/train/dataset`目录中提供了一个数据集的例子,你可以把自己的图片放在`images`目录然后在`formulas.jsonl`中为每张图片标注对应的公式。
+我们在 `TexTeller/src/models/ocr_model/train/dataset`目录中提供了一个数据集的例子,你可以把自己的图片放在 `images`目录然后在 `formulas.jsonl`中为每张图片标注对应的公式。
-准备好数据集后,你需要在`.../dataset/loader.py`中把 **`DIR_URL`变量改成你自己数据集的路径**
+准备好数据集后,你需要在 `.../dataset/loader.py`中把 **`DIR_URL`变量改成你自己数据集的路径**
### 重新训练分词器
如果你使用了不一样的数据集,你可能需要重新训练tokenizer来得到一个不一样的字典。配置好数据集后,可以通过以下命令来训练自己的tokenizer:
-1. 在`TexTeller/src/models/tokenizer/train.py`中,修改`new_tokenizer.save_pretrained('./your_dir_name')`为你自定义的输出目录
- > 注意:如果要用一个不一样大小的字典(默认1W个token),你需要在 `TexTeller/src/models/globals.py`中修改`VOCAB_SIZE`变量
+1. 在 `TexTeller/src/models/tokenizer/train.py`中,修改 `new_tokenizer.save_pretrained('./your_dir_name')`为你自定义的输出目录
+ > 注意:如果要用一个不一样大小的字典(默认1W个token),你需要在 `TexTeller/src/models/globals.py`中修改 `VOCAB_SIZE`变量
+ >
2. **在 `TexTeller/src` 目录下**运行以下命令:
- ```bash
- python -m models.tokenizer.train
- ```
+ ```bash
+ python -m models.tokenizer.train
+ ```
### 训练模型
-要想训练模型, 你需要在`TexTeller/src`目录下运行以下命令:
+要想训练模型, 你需要在 `TexTeller/src`目录下运行以下命令:
```bash
python -m models.ocr_model.train.train
```
-你可以在`TexTeller/src/models/ocr_model/train/train.py`中设置自己的tokenizer和checkpoint路径(请参考`train.py`)。如果你使用了与TexTeller一样的架构和相同的字典,你还可以用自己的数据集来微调TexTeller的默认权重。
+你可以在 `TexTeller/src/models/ocr_model/train/train.py`中设置自己的tokenizer和checkpoint路径(请参考 `train.py`)。如果你使用了与TexTeller一样的架构和相同的字典,你还可以用自己的数据集来微调TexTeller的默认权重。
-在`TexTeller/src/globals.py`和`TexTeller/src/models/ocr_model/train/train_args.py`中,你可以改变模型的架构以及训练的超参数。
+在 `TexTeller/src/globals.py`和 `TexTeller/src/models/ocr_model/train/train_args.py`中,你可以改变模型的架构以及训练的超参数。
> [!NOTE]
> 我们的训练脚本使用了[Hugging Face Transformers](https://github.com/huggingface/transformers)库, 所以你可以参考他们提供的[文档](https://huggingface.co/docs/transformers/v4.32.1/main_classes/trainer#transformers.TrainingArguments)来获取更多训练参数的细节以及配置。
@@ -190,19 +202,14 @@ python -m models.ocr_model.train.train
## 🚧 不足
* 不支持扫描图片以及PDF文档识别
-
* 不支持手写体公式
## 📅 计划
-- [x] ~~使用更大的数据集来训练模型(7.5M样本,即将发布)~~
-
+- [X] ~~使用更大的数据集来训练模型(7.5M样本,即将发布)~~
- [ ] 扫描图片识别
-
- [ ] PDF文档识别 + 中英文场景支持
-
- [ ] 推理加速
-
- [ ] ...
## 💖 感谢
diff --git a/assets/det_rec.png b/assets/det_rec.png
new file mode 100644
index 0000000000000000000000000000000000000000..dbd1ffc29187f8f4ab7f59f4e87d1029c6407554
GIT binary patch
literal 940945
zcmZU(2UOF|voK5%5Ghf4q(e}Ys3?&lH3Sd@Q6WB}AYG(O?=1yHL8M9%sS!{RDUn_R
z2?&IuBp^cQz4uN6A$jqC?|a{SzVCO=W@mS2cC%$>r)<={#m)
zI(3hUiQVut`$^5CM*e9gCTw&crMqWS5>B1ILeP9vmU@?s{OaA+!Jen*+6*f`z0JB?cF^|fRk;lR
zYFEZTJ$}~I@!1SD6kTNpEx$l)^xkLNx@9KsO2WH
zTF+YQB?n*MQ=#@>k3)h2SS5b%e+z%If<9(cIc-v?7EB3FZ~KC%qmZ`%=1>!_yVER8
zZy%4tUxwX#XTxUpLQtYL`irtrT+wG`%eaG1*J)eRvo*}0KL40{E6@C>kvk~(q7vWB
zulw4omuER&Gkxv7xZnLV<5##c&kGSJ$yd+5k?t|HKsz4kOD*Z5hV!>RknS2Mv#?Qf
zEr`1ec`*idcJ6)z{8{t2#4Eh9DMX?9HDlte=QZQ7TG&T%A+|E9R^TZ}ntJQiD-X`6
zOVsG`)KPDIl>fw9#uv{g$Xj-?wfVRhwhE26oLn{*9~jI^AhCtGxDv?WoM2O8)!My7a0|&R`0PvA0=D>
zUMB$t2y-3Nkv`2bJ8yoyz9@#I*S^iE+oXhPZP(eV!)@+l-dPC$Dj{JEaop)is
zR!Bohrz=BiHQidUKrkA_-^6x}+%B)f*1NT>yp`)}_^c8l4rhDIbmQ#lMNJkL)sNie
zVW09aIRJXn!6{bWv*8b$IC#%EIO!x7>z&4UeL79!JO`Ww#B(L=ZEIbe#q(Bd7qNCE
z?D2Q$&W0A8T?q3`;9@+m%X{52e!iKn<1~FLi&a{Hkik_{$(Y#DUJZ!gOgqgcDYSjh
zM5O#O`#ILAoYv>LH{C;zrpE(MH5|4q$H;q!)QSZEO
zEWSEyerNHS@sVrv%**;I=32m$$gWymcP3F!lf}rWBQ279g8jAW4-RkWN&6EHn=NCH
z2H$6OTs2Kyg;i67K7Y&lHuC+Q0$F2TPu%6u6KC3@u5sjeoPZE_Z9!GH^4oHhRSfE{
zLj3=6Ve&0zb7>tevwaL{sL$>&CMVQ9Gli%;(%S@}v;&OOJK+Up1y%*%Z5#_-VM$H+
zyHs?3G+UY}o3GU9If`#}!isX@`s(cJnNVNUdpvHuwPT-0r{GwvW)i>zH)H4Eh=(uy
zFk>3rUeVI
znbvT-TjzAqY~(t@hivlAkVoA2o6me_FXqT-GXBoD%FLn3s(i-qmHgzTTJF+ttmmF@
zxWFf111tL6sf!n%ypC9ulzMyRMZB^kcdG#Bol6PNE(_^v%bmM;Nz369SHk1Z+@|ra
zS}zlh@9}w`SBdw%RP#Ra9(Y@r`ufG!GvD3_=NKbc~5v7Wm`
zv#58>Nj>;{<6A2|Y>&Vq6Z4qPN%yKXSkSbAy%7wxaBOv1k!t`MX0P8uP;P
z8F2p`Xj&mNPOa&J=sVD)1^f(D#QGhkJz#n-Py~4a_4Z(9;`EbO7n`#u?>^@voRi};
z;%s~E{lou<%@1eF4L`o|b5S~Cxwm^+q0ybKuG8GpkEhwEt)@9&fVO14LUk@JC9Ud1
zKVLl``9$-&REgx8R8>N0LcKmPIXUHFa%Re_l*h@h@9(5^rTAYHGpWzreYhlVn&qm$
ze@X9s(CpIe+^pNI!}b+#C93@0@D~PKIju(J*LJR6NLKA~?o#g(vA4IEw};IM+J|jh
z%$=D-bS)$UucMyyJ}EP@Dg6BH-jff7tB+K2yv^r$Qtnsf9vT^aW&30QM|k<#GV3zO
z)AVn7*UeK3RQ0MJS6S`7TYhl6JpGHXkz20Wd$YP>t*Qqv5zBUd(taOQ^$^v;UxJek
z4fdFM(g{wnS7RZuUxcv=M3-HsM1>NCaD||BuG=*)vwy=Mwm8iz@W(zd*BsGRGguk8
z^wQL&tg5`~e$~E{pVOX;%S-G_&`Vff=^tqR*q897XG`Dxy!}3`i1yUw?C6v4r564q
zTjrVe604L({+e(f;r
za_N?-+wT|KnWYAfzkQZ2>?#K13v-;m7Pk6EJ4Y@#%DYsYs-QZAbgyR^{SulNnlI`$
z?>@Tu@CL!`SLN5LT3h{UyJrrjrBx6Y>Uh`i_0E^?pN2n;ugbnXQr}TOaG1^0ParwG
zGpOpmZJv9xMX}vKe9d0%53yV3XX)?tMD>>&?%?ke&nHw8Z~gZ9>}j{xk->YN_hICn
zNVCYuIMujwaXVrnqSkTcf+=z@=e}0t;Z*8W=2YE$>ebAAdrR$}%8$|F+rk5pq
z5tAl`CRN&ZzAos2128NYRx4DHTHr$9#`e09t$Kup!^p>Wr~_0KeG_Vl^PNzb=*C$h
z^O2(Xmv}w;JRQPVqmw>X|8kvopa02Jz~t356?$+3JaA(mO7q68%ik}5m%Jx`^`aA^
zC;WNEpPPO=
zU$!{-y{w2Z9$gvpkFKIl*j4{O-@YF84)hx3#uL%iM*ByZUo*v2!t3*(Vk7ZuM?kZs|2Eqk)G@0+G=*3zAg6Z4s`@$w)Z1Rd+jj
z)+Wcp#(pZYFvHs+~@s4u0ixx
z+p9LQ2NoZS&9_Tln|SB^DXeVR{rPzPHy1vRUNuc_wio)T`T+}`sC`v?r*@ifAD^Q3
zQ=#i!mpDck`we*;;n%rb<04g}V!dvLbG~U`V&`UZSLg1GYP6s90KybqMGv;uoEyL1
z^6kpg4W&&*i=jn)nR`-i{%-@cBA4RC@cHsDxZ!X0aA_}>seyu#^EFw=v0LWdC%p1y
z1iyZEeQ{U_TTw#D%XdhfDxIrxD*0R;5U{-dGlb>!?1H{7-&*t3SAVS0h{Inj$|;hqE33FxTv#=PrEZ>UmsE>O7cYV2BV4Io
zU0ZM3In3Xgu^YZM+|Bw0q1O66UKul*F|>4vc3x2yLmrN%P0AM#q5r9xzGTkSWDj;iGEP%hx5K^S%`b)Q(o+Voz>N|w@j
z_tE;a;EEOg4P(;U=m=*0pK&TYU|s!3W8%^B!B3zd^eeq(Kfcj&-LhSr9qNku=U{Q&
zc6~+wwvC1_Sy7S?D`{z+73ty&*?Eo4jj8B()cEn8-Dw}|EwnJoxUn)QXc5f74n!)h2f36vGfcv56g*aUL61=F!f-4=O__y2=qtV!68T2mij74M)(BW6L{6}FzqI7
zq_Mf7S?iqMulI;E&%fsi2{AQ{Gc(;5ViJa+|0Vn+p~_!%kc#C_(fK~0&)K($g{o4H
z#rrt$?(Qt#JwzZ#EkxB5rrbiN))1CIZT7XiS^ahk_nj~P@M)T_#ViFaO-}qnP1}jA
zMY)GP>Ayf5rE|HRg(u{}o`#=T6z}Yx={p!0FkL@^Pcxlj7G`2QftXJclv(6|U|r^`
zOsxN@XJKNBbY?pB-#CUR`QI<$B>koNubee8f{Ej#bp9j-eq#B**r)D&V*S4``+q34
zjI{OjPmqjkz3uJYeVjaelN=vX-h5p?U)Utp;C`_z#M&i~6N!
z29G4PJ-qED6lHG7+`0rkBOxIH@wRhNeXOJV-{dDH^-E5^zMiUJa6mwSOn|(MhqohG
zRz*bxd`k{2CwKD%C(W9#kg>Fey_F7dZtn-?B_zUr4Q
z{T=9kzW@54_JPj-ZzOl0|DM*#1i^n>z_K#8!2i?t2`S`nt?DD^KzmnH9cQI?mU8}{GC|GV+O
z2_fLWbN^p!@m~x5pW2g^hMs|d|7X{rXP#e-Og`C2QD+_4lau`9nEm}&K_@>~|0_R%
zPkToj3QnCI5KSh19jzyU%&TK;wNjl=H@AqXr(Rvw>|qMKQp=Y599xU!82Z#a#(LxY
zX<-W%F22+2>pxG*H~B`#_KufL)EE!k78>O%Du%9jlXiC3*1s9GFaIet>L1%^?6Ya|
zXRpa%`*?t&FQBT`#+D367)L0Ehn4O8q4%c}+xY15zb)LR9o&&-mN)vFv1f^gBusAKY~3$iJ|*!oesz@Os-6yrd@QLbHRbc??S
zaCpzU#P(Q&KBG~M9p5Vj9kUeR{z)ZHMv+-)5?8CSn1*r!zP@6s`;4QfLzp~Qrs*W-
z2#fcbEgU>|^kPYW^LstzpB!@$jzMiV7kXB!vWF6ni!qtl07OU&5{Z`#93NL5bH2KV
zANsY{y%v=Wb-XDkxHvh$?Z4&|Xh<$guW)b3%k-YpQ%s2K?3dyled;iGGl46#_v)?j
ze}gZt3{m&1n(C=#3&|dsIci%Pu-Le6rgUhc)^aBK$ZvEdV?8UD;paNx^*h@u*rH66
zu9@w(dYz;(^x{hxoQS$W;r^zB+oP8|R?{xA8FYZ0KL?%i
zID~=Da?S1(wj5~9jhUeQk+jMtQ`GoAGHIsF*5#G!H_;BNFnGBh_n6p=ty(KhrZa9^
z9lp2iFWcV!UbViTpn?p67Nksw%Z*wokQDE^gMV6m{;97VH`@~$;5qtfvIk)}hjVc|
zBjvOv=7q8zv7Orra?MNv+-w+4>e~y3wcFWn9BOnXK{{{l8oB#i77oEe6lc}*wor~?
zwA52aQLrqz1oSGx#W?%8PW$E~ph7-*uxkHe&VZ6$Wf5@WQ5fl?I`LU3K$+4RY@m72
zc6185lX}C;9|^BvVz3v%fYKpw^3FrLJeNf*n=sd~qHc1E<5hW$+M1m4JE#(kSdYp)
z>~x`;F>V@yvjFxbbu)?eXN3G5+NTcN8j)N|Tt=Ow-U
z%Dw8)PhtKZ7PB!aYNn?){CCilU}oJCKLY$P){GAz%~_#=eKV
zDgCbyC>&{A_rvgw4e!>mK2f&y;xKKx+^#{a832x0ysJ$|t`^4GfY-vV`-UPc-{!OjQ)yg<2NoaSUZR45^vL&IdCk%sPwR+x#qU1A
zEAZXVV3~cDhYQt$M&Bb^Cj{m@2IsFmwcdMLvk;WMprWtKm;%ymj*jq*qqR;nty78f
ztAG0}2ml?L{3tUV_F(F3WX*yW0Z)m>58|e!aWBN7U+VUUU25Ww2jjC?`RQ-C1ggQ5
zSKvV;B@z`VQ$4vCS~uJ9X%bld)H65qL!6!
zVm?Df7XmQ~V86y{FCJrB#sm6+hhG+>X8yz4pYTRVm3cc0ioOM8Y^|M(4{Y7<(1Uuj
z?{cUwYK^$y_g&B(EWp{Hr6$8GjVtQK+m~50tKbv!gU4=d6Fa!aVgvp~7AXOxrANdR
z>vd7&!seI&csGo)jhHwj5wxfcthWpp^8@Ms#F30Gv2nPaE}up#
zuhQ8=#<#WWBeFvK_G1B7BUs#%x?si88DG3c3*g%duEP8KMush=Amy$LO1F=lN}~ND
zxdMVZwDZxlW+1J3Uk|mbH}dyLd4RVsfxQ!(+87E!`;}M?9t~5q7Hftrg$+T3q2xp2X0XF%D4r6WHF93J=Gr3$#M@AmjRi+v*WM#jBP3
zHP!Hxjb|h38^_1EZZmv=+rJ*Md7~Hc-)mD%;3vFzXdxmb|CjmjjYnAg(S;$Gxs8i)
z9Lltv?1P2{3-eX`XIt2aio*wPe@t-g{{|F4RH=QEb$Bdjwq+R(cW5*w#JL{yX&m>B
z6d;!i+))PssG8&blMeU4N2v@KaR(uQfun3?askVPPohkI{Wnzbr6yL=Pt
z&zn0U9krAO{8vPiT2678>C
zBwE^1WUxSavtf~@80X}_#=ZE9V=v9)0cs%t>1g;`V^>>4L8%Y7zJhZP*-=96@TvD0
z29UROid-BMkVBugm+Lnyg4RaS?6Ux{t+F>3@ms|~d~>cY^v*60U#Mx~|3sIWTC!urg&_5xgd
zf&Xlm@p@-g@w$lxdAV1MTs9DiHo#)Cj%+_cGoKRn%&@)}C%Ux9hONX#r@yKmELC-h
zmDp*zycy12Qhgh7E^vVB+nt(CHdf#AA~yT-;XlXRmF12+v@QPv0DW@$R**x4)sW
zpF~i-_15+9g3*5&*Q0UXh-q$Btd+AU)`Y)!Ex8U;EZfFj6g$Wcn47}JW6DeOmWpq{
z-I(ip3wcB{7Wj`Q_ADoEFPsrs0#e@tgI+|d$LqUN`79Nz=IyN<20BcwcJEs6>af2F
z{M|m~Mq6Ab717H$+Z$J&_dB+kI{R9RIBtE0KfwBSxKpJp{9G6_g{ue1je~0R@en!-
zAf~ag1NA7a8K2}Ce;(IurvpZRyAj;guQnjpz@(Nk^2O27tu0i8)}oOKxC5bm_&EdC
z&E@Ffod&|@HhWfuVgCP$BOz*_l`cs!CA#AHS6kxgc}&*U((
zo@H%It=?887s!mb<;A0_XEKxxQzpjEddH$dbs;GMCCE0v>=$haSW(&ExUa1phlp1`-Vp``NVQQGo}NKd9Vj;}QG+D@x1PC+rWt%xS5D|?LShR^Hlq>GOl82B2-^`JY1@};F^o`CihxUKUb7DRrya{Bgacp
z&)5$*n}<;L^xa%ixflDTkkX}c3Zk3S8{DM#yiOY%Eq#ins+ir%H%D{U^tnC&Zc7CJ
zZ*P>$jsq>KAcW#{Z6({FUQdJ8HL#<~n+zgf_MZhH#WpR~FbXMR;luq>nZqXd-xhfuUH2r+yRJ=h^%RL!G-zVsY(rMG(H7EKZDL7DiWAPdwxeZfM
ze?iI8#b>p+B;11d+#}lQn@C14D6sSUHbz}|S#4BLfY`+sSHIOde=D)Blhtsp_c6Z{
zx}j^j;zP+=dE3c7c9H*T^auH#YHzZ{kVR2TQN>EvLR*_gms+lXl`q-bkJ>zVTBBi;
z3%_aOItnDr(?9Q&OdOU(0vMYBjhLGIQXC4RcG{q~^TEafR*>jHucdvgd*I6PH!$`aYf<*nqae3|op)&N5b6(@;X=xUnK;JD
zE_(Pi7dW%8wBIEjZ`+So^qf`cHIO+V)P#_qLsX!nzI_HY9V)pcxD}^_q<1s!!gM~euf2Y+*SBnWwyekaZXKcA$
zpKptRh!2a+&N%4`@B&$FnNf+38kA-L?(WD1RO0||>?c*YiOTgz@@-M_SYF?Indv*n
zmDRBmW%9zkegjaUnKw{uvG;W!{D&N%`d;T3NQ0A9h@Xp^eV9tWnL~qtTy)Q=MT;{(
zL;wk}Luben=zC85e8B{*djb+u^U+aEB`=G2UHZZLA}C;P<=6`9$2{H}`=OprWBaMya_+9f|oi^*~f0Z?5WD@kt@;-Ths%zfw)@A-?92C((vh$EWZtdv&!~Ep9oo$p_RS
zLwdAs=XgJ3*x5GHu@Oi}SJRzx)<{+l3U&qwT?-~^Krn$va6aw
zDvnc6PmGr;XSoDt-(7ej{SJ8dPKK@W01fV-yu`V&Tb04iAc%Nm`UkfMz}uZQew8=XTIZ{L&h~wr+3h<
z?}(V=VT{{}_JF7f2&oCk@aWl9eMWz+=Z%dEfG=ZKIcr>$Q(vjyJm76$MOvh}D9psB
z)>EF8a$l}Fio4K$J9C&UGrm&iPh1k{h6k>Zha*;?jlZ=`<*svWF7{ZVnz>BI&dffF
z&=9ZkD^*AJK{X;$UB^Ffe_lIW!Aua?I~5Z<%~!qi!Z~P9BX&U#8vbB;7ptnh;saEh
zi*SlZ@;C|Pa6rl0%LBVu2bu|-ERnX*O$nEQ?`>&vpzJrrJTqse(s|Y5`H2%VrTcw1RXQ)4V`x?4j-3fmFw&K0{3}#Bjn~KB5wmkpd`vQAyd2kl@
z;M(zU#r}2T8DKlVlWah*ts&WRNJM`
zzDCVf#try2W+&OCr~~x_{Ft+gz*5qqPpu(|;7l4%$%UGu!0!ffx+Y?(oe_$n_VLKw
zyqX2%`a-tHU1X~sycOOr#KR$?9KI^*ws1ihGvzFR@}tHpb}`d=w?7*>?sfGCh{eyE
zh$FX~Oy*+>R(>*DtaakHjdTGtQHjPxW@4B2@%gRl?l&Bt+UlA<8Zbt4LZX}Rw|LaSijce8)@ibz
zw?g~1ya12GoFsT@UJA5JbFOse9HROwDy4s*%G+bnLJiu@sDvF=s-P}&nWeJ04fSkT
zcG?$Tvu5mBqjp5zE^7uKwN9ARHjc+P5M1cTFpX_i@?sA#*0m~oDqqhnGvz66x^_Su
zT!|gXm(8=B>-9ogcviur)^JN1IOm-twrS$Zi8h?Qo95qpPuxC~L1?cN4EL7{RGW>)
zEk@^;z_UU@DwCJN>bn_W*4Zu6o(0K|Yiw$L>4YUvnxrXYw`5XMabRIP7jJq|!&0t6
zq`wJQ&CBr1d7mG$oK?QqU&rD1`RE^Q)>^WarLUaf!De#*zr%`KMXX)R5`jY8v3tU$
za=UW4m9Cy!r>mHI@8e7ZQ&72D1GDdHLNK9?C#E`OA5J=ek2oR*9NlJ0q6<^~S2-<`
zNOqG?uj8$^Wj3~DKpL${h+ZyhkznPSHQ|XW#wZ5vFteqHibys1(I#m*S%l%fS$#E6LVUMa;cPc{l-9-eK+L};@#Zhj*
z1$bt@9?kZy3w{Q{$yAVq+t%athlH-I86Mcta{qhI%X(zu)X7DOP13+^itZJ9E3Rq0
z`Q*o4*{qb-SvVHRB0Ceqlj!
zze?OPGBGEIbPRab4~+XEW`sYZ58)D4#3j}m#nOsc#V!{B9{EIRoRSe;Mqj~3s=n{1nGhzq(5t6GpT
zTe{)haIRw(@Fq(JiOV%#ea}c^&lQ9GqD6Gl?&0Ts6p7mFO7cs2fH*YHE?s>8?{ODb
zNeTeD*ke0qf?ZURZfW29B4IjNt1Cd7^^}^p4@!I4S64j*5?*|nXJxvZ67KtN>S~t>
zRGNsh+-^3BG^`wT8A)4f>T35}`%`CVb-=N_2g9SKsV$y9QTD$epQhLh#6+o{>!j0P>V{-
zB90-stfaO&)xdr%@3)O&QSqPH8fV6z_@e
zoNf4RO`TU;W;P!%6tnP6YWH)SEZPu7J9WQf(`!_=pZV#y?si%SjdW_6VMQ35o>*_x
zUJN(ME;b@qVJM;SaGVR9kN)i|h9jb~b0s%auQ2fb!dQ6p$0*+vphuvU?<
zMi@Zn7O&~CUiGmS3-&Zo>93148FPiu-`*V1Zi@eQ$icQ(-n54~a-f-~>Iq8O
zo-cuJ%3;BwQl1zD%_8Z`c9d@wioI7l;$qKDD@T=qvqafP(AUVA>vJv7LB5vZQ=0K%
zMNF}>rya#cH&b|6(SNW|_LO#k#*+wH5rk%2S^AlqM~;x^v)d`FcrWm`&8h_uImv;1
zp176*bwgy!DGC`n1*SODh#fYNYbxh1j$QE0ax+YjvHISBU2kuw2SMKK2uv6IVZJ7`
zPC6bXi4o#E#jU0;SWVqRes?X`inrKA%_t#lm(@>h<&}z}hR%Rh3nAztll@i}TPlUm
z$}Km*jL*A7c^GrB;&YPsU~{0Q$_IeKML86%gi3(8DU93>d;76rg?wri7DVMwx_
z8hfBKqDB6=|EO9UBL)le>J=IXtAmp?t7Gag^
z{>JXxs0zc=k~*wwx;C)gR|?B}&)68rF^E8*GabOYEtWxdS&z36yEe4%o4fB*Cf?yc
zV4<;S)wK?x)uqYCJCwtoszXG^=2pq^HxtAe*hp2Q>h`Q;?{enNNjA&)jogBGcn=4n
zN1qnS7fYgi{2n|0_VbI=RP4aJq|x5kap$?BtN}&KB3QPsU)+N8T1o-P
z;?rs+w5f%;#=ombAIcl=#OSH2q6tY2MI|OG=eC7*1C0(gf8uFBYd&r0j;xkpT0K&pBDVj`R~~M@
zKX}^N$Pia;?iLiSE8vl6Z(WczBlsjTKD^j97hcsp%x|^(
z53v;OJu=g39<;B)gNkjI7Mr6up^yJao7+DdnUqD&xDv(CyZUH_q5-;@7iKoS
zTXa#CwLC^?6JX2g&{g*PqBK>L;-ao9-DH6B>z9rfSB;|CAw=7wYA&no8r
z5wTW4od57fz&|c4sfC?)!E(KwrQ^1&tc0{
z-#BThf{bGVig~j>i6N5%E>A08l@qmJ{xasdYnm-a>#ATaMee+ghX;;`{o&V%cB8M7
z=&sxU#>cVRwO7T0RDg#$K-HtJq$PjxJ`_#2vlFC<3>(4+wwX^v%*03?OZ$*yX!r3h
zEhkbWz~YRt=~R79^n-#>?)9N3jGPlWZhf#?7~@KueaBuud$C~a2l22E>=Uvk@acl(
zLD8t)w8)}r-@h8H0s=S@|A=cT~omrXGkxs2`P6c!vu?dWx-lIN(R)xCoSbDUU_{XM8aH_-rQ3
zWs{Z&rY0s)?;v(QRUe+e%r$Y5bwn87)j9QC33rBa2HItr6eYgdd0z_pmVbyv(LK&V
zYP@Wnqmk3IuRb4*=hd-M9$PLubPeD5oqZB0)!m6
zzRTXHSaDt!uuxPZ#FL`L4zozTeBvs<0IsR
zZB=%mKJbSZ0cPs|Y=wH49v?6JV_+zbo2D&txFvQH;onso_N4NtOeS4a?+2-&9RsSh
zv}H6kynlH4#sWkB2)qC?X2A2%$ZF5Z-uS@gNLxBtz^PQ0OJ+EfhCZr8tD)?E0}?{y
zPxCa9I-;P*OLILFFPJqW#kOsr%>jL!2*2U0AoI1s#2BZ^FXA4wy6T|=+NwZx(sqg3;n5Wp6gyOgJbtG@)1%vpI
z{`R2be2pH3w{Gs94;g8ijAeOmXVyq}TRkM5ozoN{;@C9XwA?vK{&LJ7<-x;qeH*z(
zMy{R2f^wX1St%V(2cZvw3QRj*w6x`l^I8Z04HT^nWv>lMZAdRk%S9|00lCzh<17r2
z;H~pu*}V_{Y;iniIa}-IMD-9~AfeH2XY3BwRkCMP@=3ycuJh)0XjsZ={BP;T-}~<{
zly{)86vR&ZtoQn(4J_{Onr2iByd9r+8+Ty8p7H3=xW|TzbL7EEj2SlaNgoYQpmp_3
zgV}R{|2;-P<&t6Dp6p!zBuIOrqX__S6FHFY9)ag>f4~>js-OfuZ6!+
zAcuxs)7Kl{hwd>N`_vlycBRRC1u$1v-c7~=oVbvQ{6UyjSAW;%8pfeAQ;346PftMl
z(BONTyCvk8)Zbs8r-TDVuyEoOUY9b)wSP|FGCy29)5g*3$q_-NE=J5JJ@%zn4@J%-
z%H=$3R4%TE7m7Mhx^c*Cp3{wKyIzlyy6*^lG=~3vWeD7K7*`%fC)$CcVUvF}jw&>cV(hzO3sZv&Q`75Po@yXzCy#K0=B5BBO65#o^x0@3wO_XU
zIh*m!banEt464
z7fu^~b>3}D>z6>}UO&tdO^elJ9NucMZC?j>5!p$@ohmALR&!
zz_#(3mfTVmX@MyC>{+T^pQ1&~QkEa&tq3`|$
zKRBU8f5C6LD}d*(Spt~c$d6QM0Y$F@77KxGDYF-o*m#0z#)iwc^jMHCypxz+8<%pd
zBVI2aG6c!NlnQMnO)v$sD2Ty1pAh{Q)wkcrXDTcf$9Zgu&13^U!iG8n=Q);R)u=3p
zK^-Af!pcXHxlf){tPibQ=>?wpOIhCW-Gdit@X#2$j){)NY|Bn(8Gcf$;(l9X3@v8d
z?fiR7E7HWI;DpmmUi?%UR&k4io9PD6WdbIQ4Zh!btA-5GMZbLz*k#fYA*1%}
zpmsqI+HyWM#;bg0<6-;Oz|`)=AO-Ap&a8Io#`eiWe5J_rkPes3+?H|Lz%x0kbdS#N
z@l+PMyb?yQLcylBZK{S4c-lWmwsFverZCXqK$JsIdC+yV@y;03ER@>l?WQT4K&M$~
zOxjv~76=iv1|T=X7aU>3D%&%CyFbD`r~vJ00Y*K8;^FKaR7uT-x$H`wJ)*EcD!a>Q
z;clZVKGnU(c%^_uQtgf=>7#z>t`Cggv9i@-r=TulB4-X?7gfirI48YW#;ST4;nCqi
zNecEkS3jLr3zI>x2!LJGk1u+JY7)xn@6tS%WEOO=vcEK9-0Jwym$0%|
z3FTMNy#fT*VR(OVsM{7-M{vgTv)C7cL_a|-IQ`YZ$I}a7zj!Tm6ldVoKtZh6&V|e=
zNaWO$$St0$`hYx1z>LAGmXEY~@EGqGUfV?Vn7h3;r~0U>{g3>+rd9dUXQ@)cdX+Q&6!ZFvE8bwQsFJX!?jriscM8cFgdEsU=+u0$bF23U-qJ
zppSJEG8y(Z+@sEt80xivKt#Z)kNW-(8zRrS!j=hAq-K}7=iJLJr+-DhpA;jssxTgq
zK0f4?^n}sgKR1M^CaKh^6%}WycP>_GBMss>Ad}3IvijX#`KZl3bgb@Ulpx=smQY5%
zWbnFPlgjs{XUW}+8_gONrXWJALc2C@{ULb^lfEGRlM)wYA7?;+%ON6Iv`n~j%zvCI
zv)aMX?a3?)mvNoaF#T?dP=BQ+TLAp5X2D(4Kg}?;4Uf%Sj=YhEOqwu
zcm=Fo$7{P@N+=f8Btu=g@r-_1NfeG_P_5I1R{6HzA=(I46l?>T5Q388wGw7!6kB^8B_e11m@uWED
z?d-89|TZkSk4~j;JbgsEgNsNdTuD9hO7@J$5re7dQjO&Zll?yOK^zo`J0ee8Jyn*wjQUsnrQAEm`}
z&2t>9DYgM5(=^gZlouRUN0OG^+O+4FRptGcM2_1^;jTBfd2MI*o7#t-VBbFzBLaZ@
zMxAo!tDS+qx-aBMW=yOT8|j`rtyx>(&j2W1r$t$Wb1(*GJEd}TO5JZB;KOn?yIx#m
zDYEP>G^W7H$irWbxJ#l~CJTXLoi($|20w1A@8$s1V+^VbD+cE`CvGm@l~LFdi>?}~
z-YB-cy|l%*o}!tMYixNvlt}Z$o1Oc2)lzYx2p;YZ*=J=Q>m2K?5Y82l8CGXn8D#it5N
z>Lygcc6&Ny4u8~M;HVUX1v;W1&d&fW_u6igE#y~Xr9U;|Zu<{DHQy8tGo;8RLc-{AKqgHMyE3)z0R-AZ^GJ^=r!DVPV6M+Rcchz>swQ5pI|eu
zQ9=dqI7H{<^XL~*nQYLLm)Q15o6;$h!xQSEy(+R*h#&yu;M$^ORod>*yer`Iaj?~7
zQ!IK)h17MeJPg;hunQg|Tyoz&8#yR~tCdhp6zb6zSK~+eWd3X$e7mufR8}YFJj?as
ze4Q}abGLv#5byd>N={H?CTx6F0Mf^E6GzgZmLQYlH}!U+p&<0LLidIexuai=%6Al1
zQzoW(x(>DqRt6i=s>L#jd`j-%$YrnhqWIL*f3k2m`m;^Lx3r)ZTS(zg4So`0~
zH#3UixZ*=g(jM(Ccu_%dxPhU6w15wp#oUDd{uw|Z{Mr>N-_-LP2n_}P+5ZSld2fW;
zaSoCcMzC|YKT|Spz2OH>BXwC1j%J>-5UeW@wG!8Mq<1Z}zSyY?GQfvTE6}ybvx@O%
z8u2`lX-AXY;9}rxiR+h#allO*+lRP&GK%u-R9*{3qD*YkK*jX4Ah`0h{DcEAco7tw
z_lCpz8@g($!sVeoO!k7ci`I_kijKFC$o$jY!J5F2ZWO}b$7pFJ!JSX
z;YtCp2Z%y~3^!p4ZU5}N9c_o-2(U|?{#?gpcR;Mzyv`VOtGnF#`Xa&bck@9jJLgwc
z3$9bZ9a*4!ia+2gG3u(LT+vu}Rix0_``jvS&oyJm^3(p2z0=gA$kH=o!PKp)8L3!z
zMnd|NzQ5apcgp!8oK9kvB3T%sPF%b2SMOX;kDo#}vtr4C!MBps!)zg(Y4yW8kK1LL
zO_k$!gm0Zn-SV}aKBLHF)uhrM$Hmk
z9V}3kBR58MY+smbNpuEmvQfFRSDnvRT)4IQV6_E(VFnql511G-PO=okk!+y
zOoKx@k3w2|U+Rf_?uryHS;TZP(^&o@>|vFo?8^MB2nB=n;Q5wG((-KuV@
zm}qoZ`Ib#x4tyYdAR#dWwkhEeE$gYhCB#%|g5>`&d(G>42p`ByD(yn$%|ba-wpr&q
z`-WG+a&^?2!1D@8lXK7eMM{xZw96daNCJ}u1$9iNm#t}L&7ZHlPx)kaV^)vo+d~wL
zHg=;%*82#wpQll#8?7o9UK;852mZJXGSw&CAJK73a)#VY=ek1Mw)P3*`4{1uXq^)%
z5#~~A9`w$G|BB-8&gviZ@k{UAU$murzSQQAzPV?x-C2;0z=%cjW^^lljsf6o$IA;p
zc%*Ch-jWg49%e1Olf_olCa&JGXn`-???zHN8iBfF`)M#5!tHni5FiC4j0oWTJHe_e#M5Im3MVt3h-k8W1M?c3kcJ6%&n2?
z-n^VM)5*vza>>hFANtc+zq(Sd4YwD_SUY^dhTLAYo#|7SLGJw9IkhJl_J3$Puc#)v
zuZ@EshzKb7(n6K0G${sY35ZgpsVGINhAIS*CM7gc5a}JH1VKThhK}?W2+~4Lk=~mS
z2#~}8AODMQt+|+sxtcX|&OZBjp5Oj6ipPDyqiYejWu3Hv9dF%re%4-Za+UsTetjPE
zVovxS9$z}l;Con79|kRtlkoK^dUzfk$p|MVp`0SW1?H7Z86=?`ecXl#ll5OqJ#p4%
ztQ%4Jq+7NTFT_KlE9J-dG?I~mXF1Rz3C`7h_fM5$3{{UbLw+6C#(Uky;MZ0pm>pDq8Ta5ci&BnTDLJWYxx1M9cWOBea=ewRnn~rJ}q+2
zNOi7)*?8Hyq_3MxM%BWHKWbpyg`(s0fGvyr&VamwC*;|y@rU)Js7$_L0{3};7LhN;M0+>dCi?YSprX_xRm1%2l$P{MFPV=Yb!f$A8ARa4XGgP
zHDS}v8MJcMy?Mt}0UCS&Q{j4kf=lIL=jVPi+VhWY5Sf&JiN|L>xQ}dinODyba)i>)DI~aE85fjTy^o#@cstqi$
zDJFS^%Ui7}PncDiWsZQVk~=P4_D>=>r`X*&6wc6Z9yAsR$^9ro#}pTY+qR@#x~1wh
z5iO9PbVWb~%KA~ZmGz|YGENBL!ud_+i6Qqo^1)XY_7fqVJix%97>~?DE52koK8_bT
zPyK1M8op*K9LrzP3~^bK&0^{npWNw8HW2rIskB`tH7kT>gC_N6o_aPdM%_;R!1S%w
z4Hau&y;VKkqry@XLph>6g+O9h42`#YA7{ARJM;INcfW}ka*hPvQg0qITkJuE_w-Ll
zC0!%WN&h*GNwBY~xco{#T@^T!&ma8qLO>rM1k+2zn$dE~W}D?O6}ngjY&$=(YX|_I
z83avc-$dVuxJ}TXztqsjr~V6>`UhFL!Z%M*$ybj508;zzwu>^m)jdaiXmWQCWgSCr
zO^cx&vlvCyXQ0biihwlIPTgXS!AycYGKFcSTw~DjxETSHeCs=mODV%iejJ6DS;J6?
z0~T+s6er)U3<4nW{fblW4rK
zntUfZUzz6oj>TJ)F|BVA9`vG8vocrIkAR#-r6Edb6*FYZ5J!%O3fZ>%Mw2P8nR8O?
zyf2x6G!`BEU%NAxEh+GC(vHQKu<)lgpsYH6{swu~G6q^HVZQy&tS#}UMqC5#&NNgG
zgE{|g8T_bU97UG8DHsfF-g-q-CjGh|;np?XL$^DS1;VQQ%a~kfNg!|K5$YER4_gQy
z^M9?q$tiJJ)h4MP7f9p0A}mxu(b>Nw=+u9y0K%5Ow3o4*_3^7PT^Ahxk8M|GYqLSN
z4y_dBuKH8EghgzZaNWH1)oaK#NdxC&O)E=vf&maT!iDyD8`BPaJ;K7Zt{$2)8u*wE
zp5Wpr;S9?Q{yjiVGDJEuZ!yMy^gxKr&)8|`|7VbzEZzA4gJ)UmK|Z`s`VxuiD+$_2
zQ%C0ZMF+*WsTU0=ik)+s*Fz(wigQ{E6jX~F$-rPkt$sMQSS42@j%C%MyW?yOi8(gA
zsoM80AbgZZcL@OupF0MCaD{G7|m#NiGa%4GfAU&M$
zLtTyN714&K%Ud7*%cc?Te{i#|uipJzrBR4KE%FIa3(xS?ANihT)T{7m=yXt#x6dMp
zZ~rccRwcWvD9;*NX<-z5dtYgp|9J|*C-+%+$q-uUch%CR{u@MO@bao=;o>!M>
zdwJfv*vw3GDMQrP>?l31LslhAk&PB!i`^fwS8Un(aGm1Yqb5F4Zn;Z)o%QCMAHEQ+GroGWmQ0l1x_}u*tg<@YzU8
z_Xpf|gSg15%fk}=ALh7?ml{C~Z*qhJYHsA<$&(#20h6mu=CxW6Je2s19*U{0JeAGT
z;|;#^(XOW0gaY?B?jWg5lq!R#^PtB|heTp;X~7ScFa=o>%bdgQ&6OUiWSBy!3hx+D
zVWXDv-8xV|+mV)I&{nn%Z&c8V~
zXyg6PHea3weK1J;H+RymXz3cML`DlUyS$R!Ph}|(1|@%G3N?!ZJ1mWAw6V{>ygp_w
zIF>&DNjCuJTk~(PRBIPgz><=(P5eBQp1a9PWP(fh0A3tT%oz{H#&g;ehRrt|ZY;_O
zP-v8RM}1$}kziT*m3`GfYpc3mKs@Dk&^Fdd1Pd^A;}5POlXT8SvA5foj68#y7`BMt
z5k`zA*x+Xm85C$KdaN?nXWE?wfp1yOQyzgrY0aBOa@{*Tk{nD<=kLGd0-1h_4a3D%
zlN~@`6+ylVoJZQrr7RI`aeC;gvzv57`kW3jl_=(1ey`whOzIkJkd9q7C-WF_Cs*J7
zMArs3gq>TdW}3>f+P3;s5qlrfV=Ym&|D33wiE
z;u>ORPlF}}p43K?_IF|1zyNL{HO>adXL*QQ?I3qWMh#|3wky1z`+W?d^Dkd@bje9L
ziZCk`umsZt4QQ;gZd5bp@At7%Qj?)mZaMRVqeRb6=eibG^(=imKb&u(C&WVig?XFm
zOdG1_d{udH;HvM|HY3u3K#AAlpKh`1$soO#0DjB;u2epx0*AWB$1fzB%KK_7(U
zqSTlZsrn3cIEMvB)HlB867UM+KqC12%f1=j5Xyb
z3|r)%jkjC%Mdi@4g465n(PNS~gS<_C{k*ziDKXY|W$;uxbVd`;05WoXBD}dkfiD5CkGVp3H
z{;GMa*8Aj-uRLw8Pir0z=k_I!7;lR6X_+Woo1wW>_ByUiyA??vtr*V0;(UEWCdu&g
zh=!4TASQWaQS4K`K%uF31xG5=g^?gE~xBdsu-1~X6vmmxRUweN?ARlw>
zmR(MGQ9!`WlMmM=N*UrKwwMe7qFC{>kW?%H~e&itD9@>Q{i8I{8QjJG6K<7F+gxpCqZ42WXj6PB70?3)faicC-F$Y0XD{xgXS>b+%~X&cy9-7aalcXd*-TKq(bYeWcW#jU$$?v+oM#wP2g3
zw@Pe)Z>*0Qm;^WvIR{tnHjCP+7D-*d+UoT(ONvV^Xj$L8bKUTZdw>UKB0%OVLQ?W}
z!}41B|FZyGM4qtY8Sb=WKUtTZRhl>dl(Gm0h~;5!S>4l0`nUtk=#>f=5G33jUwRhj
zMl0MIt4N#EsMj1S{pH{Wq5oc6ABb5YHXRAH^k~AA$P|w~&j%5jCRIWc?Y)}r-r!86Ii@BpNWnb_l3eF5rWe*!wFeJg%I4}GvK;h=(QAO^4PmsfT#;E
zc<|DU*e*0l(f|k$V5j#CtG}VA9MtWTu~L(x{L-CwtSdl8nVKrc(+In1?Af*J_{sTr
zIe1@8+O1qzS*;fgR$`^|L&hJ!Hh!xY*#Egq+AGgp4C^qEi4lVV`U1*9QVf#IVN0w7
zp7q0W7v=v~98%Ug&=|1f&bD6$j??ngBfMB;WmbfAExmm3SK70BzZ#u!E-8TzEy@Fn
zo43bQJ;Yd<3rre6`0{^Ik3>MVXoH%3_*>U!Rq{(NvVjE_JPm=2rB}09&RvTc?X&n(D
zJb#5PY&q6rPWEneE&yW1x`wm(58PJotX<3}){Ooh7JaZK{RZ~P*yhKpK5~un}C(o#m8Uf;&OEzNk
zj3Au*aP5kUe~FCKfDLI;l_cR!u2`Y2$h_Rm;`89nRbg728WGj4;DuMLF`>Zsrj?V(
zr@Q;r-1kZ_LTEEJggHFOV3bZaB-CT0*Bomy-$*tpe(e_$app=BEi)WORA42lS=Gxs
z_T08K(+H~mE%&>=Fxn1bMJ=0O*@1#$1&>}G;cge@fKr^p-CLK2KZ}H=Xc;4n?m+F1F=lylr_ghTg2h;}Z%&vcG8{Mc&bNfrp$W{kJT_t~wi_i04%3V%s)Pa=^3$dT^eGHBb+a*!lOpOxke%}G3}7?zD3FlB4#D*2dVL=PPk6=|AeV6q05
zoP(4i=yWwbdR%N)t_+^E
zkRppX1Kh>YZ(s-Jrx%WWCqbfEc0SNRVfu19#r>0Kjqhy8eQNBVODzaGjEL$Y;ZXqa
z!4_K+5}MP)F5dahlRJm^^<(=ke6gb1K8LC*>9hPwK});)Qg_*hN>FhmT{$pe2H`{U
zb&Kp3HRza9{!>M1v~=l7v(-9ARF8+PJa^*DUcnAZ{hMnq5Nz8p)D6jP|9ANw@8-~lx{N-+~kPRb=3aXJ=uI3DvnJj0XrSLZ#@m4gC-B((}%xK^Xd#1^*P}CRc5q@$5
zMp&PA(cjET7TF>ZKS&cgDA%avt#~|KS8Mgl;9d^;&=)=@KYS#w>X2
zuzvnXP(60>o(JkJPLdPyo^sax=|sRO;H6-|AxFgo>nR&f>F)GzsjY3_Xmf9=nS*8Q
z#%Jxt4Urk*+dSEsL|9ooEut{K7N|$EYEHsTz*6vRXI2*5|+>j=b
z*s$LR^$rfF*^dXdT&$`wFFd#t=b{Ks+BqYsoqb>dm#@EpG{e-srWX>xV;ZsC0Od8z
zME5hhFCD~WBbLLK4|oZF)blWx?XH5JKicaXOR4mqJcvu51q&J3NI;d;2Z7S@Q`-W6VGmEUIhben36U
zBOX;D%2K>hNI<;S5zH3k$f0AMJi1_%3H*$^t37cm4PF2#rvoz87*T&iphKUS+|kxW
z#)daDgNI_`iOh*B8XAx-E
z#^THoyVrx3pyUfsa=~LJ#?2%BF8K@>CoOW$Xuz-tF?ezSiXXbiy116(OG}#Q`Bg+t
z=Kn0$$9P;GHsE|VGeX5#f3o#1ng@IZN?z1Y9ZrkM;G>UsBMV~=a9d_q8c19iKxX`xX%Iov6tP5$Px!R%#e)QtBVh*c*LQ52g&h^Cz&|f_kHOOTM~KeNTwhI{@#2!iYYf~!M!YeHDwt!l{N0DRm`TR8Q7~7n0Fe8kD2%NN6@iK5;H!Ln?fr>{}c;`F9_&
z#iw8|vGYgJ%o~?$eII3Zz**^~w$VRnxs*^4g1e|}GX@0kLi!!dJ?mr2og4Pggp0hx
z?SiUJy+;QPsAzHgqyB90Rr&y2xkAjimC#bp^8D6t$phH4aKx)I(NE(4i1B2j->IJ_
z9tvw8yaPq$ltYs_F}LTHKuwVx_>Z4Mn&K8Z8HY#+`I3Qme}QYIQ-o`}j$3;m50?
zmluJ0y^)%Qif0-Nbz)Xi<|{^K97*RkN9Zv+=lnYApqdTaM2{#WQ0N3ikk?f+mpZ?TfP>Rr$#k8zsj
z^{IcP79y)F6}pcXLOgvDe1Hhkm%{n(#iWz0)d$2^CiZ~(h8Eag9}@+g{`CJLRU8zS
zKzFD7_Fm@f49H<|Q8Kyd`HwQ_`wMjxXi{F>!i32Q3EC983LpX$s?unFY72e}oV$yN
z7=OTO!+c3zg9lS|S;@{bRlAvvXsHz&Q$Lbb!`-iocMlKU-Bu$sh`wUCJriv#tV9oR
zrhgCX*c4N7jTFsWn64C5c0atA!8uF$uxn6Ww|!*~qXN>1-<{~3I@_4sgFYw}V_23F
zyb(zo#0sskgjk;mhh6EV&I#Sld{S~DM=1lG=VT3xj}Z|b^5Vj2&$kUg-CRSX1te&t
zhrmdJ@9*QhJ|j=dv69RGsXD|cTL*O$03dWd$whb}j%{{X!uE(kU*1>)7tRXQYfT^L
z`>L)stxUQWD_d?rqt?=HmUoZuUzQXKpNWDW@k!LExjB6L1XkJ1W)cE@
zn(aZ{ZTv~hX}*Oe<}hB{I<$gq>^^Z0-86Dk8dh5pX^6a27>^k@AiZa+O>k)Iz@JOA>o
zzcC&3JuU-m)nr};UI9_*z^l;fmXRy9Wj6SoVu(+X|0-MCQAYleT%dF~bkeGPEtt|v
z$K8=VVReiO-Lak*C@YgbZM`@CGIqWyRP1m&bXqI((L1Rkqq-kR)?ZzkX20=gvP=@*5%N1
zKb;*KGSW8xDBloPpnQr?Gt0N$xY^Uo^7izxPz#YECO(Ct6-+Rj>-)QdZhFB51|;_l
zx&LUt*ZAjqYkcn7uQlD1g6IL|KVCih)EW=BL!aT;6f9Ot=K$sBUd?d8vwM1C6nJyM
z+!~-L;BC<&3J$~^HFP^F>pkms*edm7`ZH+ZwXOdVRSgvX3e*$D@I0`(+T;wYVkgVc
zetyK6$>*{wP5V$&=?M>lrXR)Qyc(Sl9OT97+p4jY%CWmx#rhGhKmZc
zRKa$)`Jquw9vxq5m`qXZzm??4?_nr`T!(L-08y`6qa*iG6`5&GR=LtAe|?H_8yen+
zne5YRK@Ic8CU8rExQlL_eav;i$DC9Gjd-#D9~pc4Ywc!sA2a!#ilj)9<`+q+s%>@}
zrWny7hU5bzO)xj(`(y|1g3FA==Ob(AFBYApY?mK%z=3Xf{e=Pe1p{*~Aq*hukXTuK
z6jQ){o?;oBC*Fl~FyMIa@JIO^dHqM_9vm9A#S!l5FNGe_{kiQ^1hlS;t#Vu5j=Eup
zty9+9coSC~t(-BCd|KG$Pjq89^!-wL|(lSHt#5~gdYG9;wnp18bR2(fXzmamw!6xaANb)P4B_`
zR^PUzfV7;Bq)%)@_@kG_4tH&_BhAWsj}OL{0q(2HEQV3xLtEo$%Xuyzb3RyimHM1a
z8ul)>Y#G8y;JgP-av9AKdky^6y(b-57cs&%(8KxqCFM`j@{KC_N%+d))u5EIgF-rVh3zfigtrOSb-n=lm05k
zpL=(o(7=3`M%n%ezg@oR09HU_Igl&)SC-jSgP-Vl=qj~}E$CXT-XMd>Z{}b3H2sXz
zfWFax9joT@^W8)IgHJG6aqrdqP+3OT9`hU=kLTRoq=CLvS(|sZzsD}iR1TF;max2>{u-W(F9aW$68-#ctS{gH4jgsy*_aU?C%zv!k|6`9Yjw4o&^41ZU{l(MFa@TZCj(oz??R
zXyL~y^#VqYr9#YNfm_o^$b6!>psv&`;X{#O`J2{K#;r?^GSkC8oH1^#AEQ}$fihIk
z!Ub^3*oByeVTi$e&WG&Id%3S|aEwJsg;_2$VTYzpuZ!3@ydy-j1oa?`#r}{MvTeP2
zYc>u!3x@0WH|ebD+D#Zv(A8t>YTiSd^VPIe5)aqD^c>1uMYR&ZwU*|WpZHFtrwECB
zUcg_VHh&oSmN0_(ZMSM!(5|ca3wbqg8y^#}!luH%lRh7liYkcN28q>F7q5R@q}!%c
z-Rp@&cJtUNnFnY1yyPe>5vW72L!%sQmYHqWbt=>1>n)Fb!+5{tP(X5NcHgZU-b6m6
z8ZbaNw{|ZF2w+YcNP}#l0NPa+v>oAihH@=&+O_WwOp=%Kbf;cwTi3
zBkdHddHo!hlqapBes``;U&43jKLO(thM0!>_2}d4asTdBmiy~xS_QXX=%BRS3Eow2
zcuDkn1DxYVQS|amkE3`xN(6Ctb#xBsZBI}88^5SNE4c`dzk{2EZHcOH4T&@9XVS{*
z%P?&*R<%0K(tm}jqa2c_%m{bk!B5Q+qifsuaruG^bz09)lBV8HRogL+XJB^?T(c*nZe}0sBX3$tpK8^P=TRu@uCe{^#{&WwQ3Rz&-{K
zpsRl&aBdkuFj?KSxK2PuAuf&i6jYU$z+r9vDfdizpl14{X@5exGW9$iCUO7DcC|BH1Yz+KygfH!HL
znRh=dyGA0u3!=KJg1k+f9Ty8X&%9|$&O}Vw#SjJHrn}-MC`**JcfD4(+~>&66?fXj
zqd*Q1!uz(zwpM?B{B|M{DtsQnPwP-#RwFE_1Z3u5kKxH@3F8D*C9
zhTY}5(djEf$zCuf$vA-5b54k`a&O*9f{4}+ozYZV(04<<29+4MxMabh0rw+;@FV_K
z7ZtVr8p3Ys`hgtm=$+?V?uo%Sdtc=0wZ@%TUZP$d&RgC0R;&wGh2{lch9?%}$Smj|
zc>aMVm8*U^C)F}a4u9?ARLN(N^px%8GGXLf@CW+qXVcAxNT4r5I~;EME;EgjeH%?b8&2@X{F%_>Zw*1
z=xqhXE5N&LjwEzLUdLJ7%$7r6^k<0w=41(z?SA2xRbYpX2;O`VHJ)J3eag=ohc$`U
zSIE6GrwAn5@}&?Tmici|u!^!Q@pfC8XMUZ0K}cv~ga63u;^v8*@HIY#aG)9aUS3*B
z@WHnBa@$)afB>1p2)HeCXSvwhXu$NZ(_hh?x0)pd)LGTXa90z~rUFKAeCpsT53-Tu
z3Zrd61KTJL8EoMc&pGR*#A6g1@3Z#)fXA<(F?cm&oSp##m+-O~1vt|wAa4x|uj-c@Rzg>=
z23S7H9@13!wPr}NNTYxM{sZPDeS4yP>tvk|tosBW`N1tU`fzK76Ean?dq>Tma{#}u
z&jqc`qY)SGDxsts(_+G}5jg_u5TBs$WV5f0g>uilHiP5!TovOg4BI(Z>$TLfaop_0
zLnh~=kqz3|Wp*;jhHvtl&|2RpakrJB)1gw$$6#
z9I0WcPrZx2j$Ssq&zTggIq!MndFEeKQ@`DBtGgJOr5(D=DD{hd6kEszOu(JH7f;_E
z3i@rSEM6N!cqqt>cDJ>vqZobWyop=G1Z+KRN;2rdY)O2h$WQ1v-fhrkF>(;7|D!haI)q|518<
zw2EbUau-z|Z;3LHp9kzfYjtW~@&ojXu`*yyK!I+CK?G$Zdd3tC^yDx+Z}K#Z#uv&6
z8rvOFjBa`UY5ZZ74;-IC`9s}mfH4ii#U4`<0Oyt!S4D&IUyQpU$4zZ!4Dp
zr?pxLaZhL+IRH!c>6EAmoo_n!35$xTFO^$15^}##5{m>%N6E63Cy1#^*(+`~UEs@R
zITUDGnMc=N6zsPFJo|Gf*Ri$-ZCXk=2&jMR9O-b`C;i)?I6s_@L_hp)ZRoTW>F>sF
zm9wHyMZV!qG#4_xzmutvm&z3i=n%MnuhMK*nn?1AWxUUsn4j{|?z}+aZ1bHDCwQTS
z#yG~6Eh5dC(dQefw8HvIxi?NPb-Oww$mm;a*yE+6J%NphTO~$0JGAEP9@dr)X};?m
zubpxv$kyHj-JG`$wthct?SeYRos{RfJrCIZ6W^UzRE|1a=t(`|t4W-R4#){>IDGIU
z)i(0-rOq6g$2;3OYoPY=)-1V6#v?V$w&aI$pftl;f<
z=#IBk5y~W|7!S)}pS5A+WD(&%;9a;s6`SyvY#m~!0dt(a2%-|6St-4*
zZ*zP5gE6|o@d8Sz@(1TD*fA98(kQM2*O)OP2bB5@BXOU=$kJi+!ytRm^F7l>;IqEx
zXWZv5z($_LimtI_ljFOy)Mm!O;;=VDvOSOFY=&u1D%e(4kvHd-YFFpoO#^#L8^AiJ0M6T3
zeTT?=P5e=1Z_ii(@lf*+~#TDK}n@1^bh?xLg=_3zCXs!&UTxV85
zX`|9RwHi2YkQyO2x&n$Y<9O$;xqEZ(vqcL<`Hf;dM>Yp|uD+By4e)+Ii?vtCLMPD&
z|CAlbF2HEDmBelt39w5vELmUD>KdD1jfDf>U@BvBDlXh?lEO(hgZm6GLA&K?uNJ@}
zPw$t{C=!&gx0eMesZ~lYr?ycs7S*?J59zp%m_a9R?rinAi(vFt_7CP`MD1yL7wT~NP3Yb;NkymQep*9axvy+Hvc#K&DeUK`zUduL@YZVt`co@uj2&3jZa
zJ+JB1jnk&2rd#{>9R))%S8N&@7s8ia6NU|;m&@O;{92eoiVkr76TrS7BZ0xl&oK}8
z{pa6xAe$vbUHa|1d#a3(DMk>(l=o5b-#|TwMFB|dn8;(;u&I5=LZ}C3HUOB+b4dC^
zzv;-n|GYCQW2SafPl71=04o$`2{jr$n)3^A?>X04Y!n`>Ne>cDXHqOFOu+#eIX;d0w7u(sTNZ|<^b`z$#;rwTF?c0
z5QXQ+rf{IeWHp@I`OhJ0fByy>?s>}nFGTDInXv+8@@aTp``SC?6>F0)Y06*k%$A|h
zlk6N!Elvc^9xO9Jk_zedY`9k`R-@4=#6dO#`|i4vGvqLIt%lRL8nZ>d<
zVtm#z4=$S&KwD!YR}B7=s$hqYLgn%UeLmQzKfJ*x8tPsehW$E!KSeXfbZy-wN&8O?
z&4e^&o3S{8eqVb^rbfUTX&ZXqm>gH
zZAp(qos+HF4P;Ms!)k28PLF&{$J+wSd*@+7Iui!to}d&`u-k}wdigfiFDHaDaC-GG
zKTLQ&8P1#N-qYZH)jzcZSeaKZtY)#<#Kkt$GB=>2Oqrw;YX2x67~<_VXN9j-K8@F4
ze*8A-yyjk59eVLA7yL4+dj#sy1(AUETFUoYM^Hs
z2+YC3F2HP)xsR1qfEwze3q@2`Ir&<0X4voSRSaj*#-UwcO#`(t_gST;F+w{uscm8x
z!=o`)oOJ&c!VSh1+^4VlNe+40raXG+_Ft>cSm8>Uk$6uZ4m(*vW#e8t|2O$}=T?|N
zT0mQDzZS#c`WuKsjqt^gk}TuFNdN~`;%p6us2+A~cMtuWdgtux-liyUu}Ap}e)Tpr
z%7lk&2NI8B!+-npd$m2ZGjVRI=KdCBqe4LMZ!6yyPl$CX^P}(%@co(pUhsS{AnFftAsZS+gXbQ?XI)VKX7@tzSL*P0H-IV4(EH=S
z_h=ydJk)m!7>9_btz#*{MP0%NKwbkph1{1*kC-a{H+kWIPqY7pib~-(;-HeC%`U+%
z$MO3YQ7r$9IvaYe9c8ZeGtNY>;l{-^xh~J%0&Q(?kEizmm~1vAs~hK{nR0**f(hV2
z7UwTDl7c1`4f0*1ieFrM9HVbom#~Utc|a8Im#Pha>OmD#H{vu~lbPD&xf^e+tR0a?
zf(9SGo7vZ)$yca$u0cl5q5&NJK!!P;V*(2$Y6iod?VZ@`0HF23ge@7tf7$kjjd_-&f
z2k60*zXgeqYt+Cvq4xNQruYT5e};Cc!a=B1nHe8{vC%66ru_uh!IhG{P;S)vilTVB
zt1fg>WlKuw=<=UrjFAnhVy_>-G2x7&t6OGs+0#<-iUPCaq4cRx!y&vLaLs}{!l_6k
zIDsV&s@?`atIYG>-KT-P&T5#p6LA^!2EDdEbUtM=2mIPELnJHj=@u&E_ZcA}q9#6Z
zt6UosmghW~5FeMh`CX59IW_SQUB&@S1I0t!5awf^*bzfZ6ksBooKwT?vlNll
z!@9(#*8M6nnche(?t=#)b%5v-uri~(>^BvYV+@mmf9iMePlmfK)3<|enCM)G*Cakd
zPsYTLtwJ1%YYC=y)1H6p`B4+VOXTu?FqnB1E6+a5QXr@LdQoo@NO(<+>Hz|)orfHKnqJosMp%jBi6E~2K36FEdh7yV
zn}WCa%4QvMzOh)lqrgt&G1&kClbwbD9f9)rJ4MW@v^@Mo`)bQ-(qqI(bFH%
z?!=|(Bp)I=>yulZikYzfT}kc99{t7O@CZ`hsBp3*{6*yU#Ti#&pH^Ob$KJFisfTa$
z?^-}-KJ|l1b&GuByMtd0e^Hn0t~pC74b8f2ml3QSO;3_LhcQ1{a&G=6NvY}R8uBsc
zZi=kQ0!5km2-7%W_spqZ)xfeHdO(
z=~*`>?-=oVA(4wfpmT9p0b9zWj?d{3T7`abEFYxS8xXTs2S1xsUVA|P(mo#Me${+P
zVFdXef_%RGB@VX$=7dhLzHUc6ez!}5PmIsFEvJ^0=!s@yQ(5p6asCW!CJe8+i*f<<
z_lOdpNM+U6@vsl@OQ&brBJ*SKs0{h{3+K~*pKlZoHSxiJtFmRF7Ejw@{hR+JmXOaSJl(GEamkS+I^K%RZgMWQ7mmD
zVVVG(i_2aXhij(JVl6A}TN5QJd=es;7v#K_Mr~IVK%@jJPa*+Gj59%s1#P1I6Ve4#
zun)>JOtXHY+|ItjwdZ-OsA9ahtl&8Qb_GtjkNektC#s&hX;l?1b~7A_QOXWQf%{$t
z{?O*r%8_=zI|s@Q3@2p&rRsqS1q4qAb>jo6`vW?-PmL5zRxz{S@*boMJR{K8P6>w{
zyam0oPBrLAU=%%vx3$pJt~87@SikaGx+~!poz26KyX)LzADx|@{Z<)!_x?~=*0Nz%
zHrhWc8xh)8(_u?qJ2q@|#_eY39;*9&Phs6m{prD<{_`i^m3DjO7w1_3R?#>fd_D@j
zQ~(#jjtg$ZU!JKaZp;HC@T1P|Jz3{x0D@YFYU9fJBini2FeLZ;2*k{LG{}}8u+#bb
z9Q#R&3xj0)ch?b5?z@+BSkWF)>J!NPAXtqLKBCCjaQ-*sWVX@G$;7H|;yyZ&f+5eJ
zYjhXMBxqSI=q50o&yTDkrLOT+xs$-|9CmGqMemxYUrjw7G8Y>riF?AES+=|<4?$2o__Fj^NaDSFhIpG*3v
zllBz+ee#z1gcwY9qCgIRR1)l$s7v7au$XiDhY23bns|F5IH-h~fkRK1+f|Ku&mqhVq#pM{2oaNsQ&w>n
zT1I!+VndfwLs#F)K0qNvUa*>4;q*ZveRjb8!U3cQuIKCrb0y9}dLR42djD3c{6h&9
zdcmN`worMq0qkVY{dU5m=E)4mJPBax@P$!zqzJW-
z-yY4N#65z20yV_V$6fXVTIfpHsy@Me6IfG;zLTf{^ZjOmNm|cAJjx7tsFQuJ5ZVjk
zOHUofD6EZl*pTc4+xN9Q7UcsC#UDCTof2iZrg3q~emfJ1c0Y4%+q-Y=CZ+UoJ^p8!
zambGk68^P
zJ{oBc3YyXtU!^|V%X*s8uvG7ui(u}+m;@8HwLw2)ZC#NpJCQ}+uoX-k9QG;X$YC4~
zOMnyif1lSX^}nP({`ZYYkP2BKiSAKRjSC};2PF5~>l}b%@*C^;(~4Ya@am!_n+mL<
zE_E*MQG2O2G74A?nJ{r6p~7y@uG9)z-VP2JXHY*o#1?X?$ddp58bN3$jIJbw0KZYx
zU@Ph01?1Fx)k*+tKno-zbnJ{IV-ls2<`d<8zK!_p34IP?$MY~k{-u_%C4yKyHno2x
zHykPk!jPAC*rg}5ByRPy{Ub!jc>_+5kN%Uk&+uMXfQa2x>B<6#ioeBQO-aZ;_W~}_
z@$IHY2*Ss8cQ8bj_FTFtDT&CsRBP&brfLpwTk!O;E>fNRG(o_BKP6Iz2exS*I?k_f
zJe7(T_`qd1(!XV2EFJO%!T|0_4yp;jG{QrljIwGBDmd+b9C%~3vc&ig_IAj%D&R6Y
zBFyS$$WD!JqLD4!Ij&30FG1tqP69NkWR%koDL>#G`dx+u4XC^5(A)0X+ws_8dUUH!
z?d$809Su`sW!iZp2Hy+MLhJz10d?j-6EvvE97rS^G7+L3i0gvO3!bM5gAhg_(55zM
zLvC4H1H9XRl130)zF_WM_MsASi5ldEzR{$Hu4x9hUkbL9$;+mc-mf7@N_~a
zL#Vf_2pntpm^G1Qabm_{(oTHLR6H8CXWB1f;Eg^W?J8~IjgDjF36^wfgx2d_!=DD?-y8K(AJvl5*A3zpQ9G@Q0%Osb>CdI%R
z71MG78wP6FVeapc?9+|E3^)_t#88?1iSC@U;-2EY*NW=nle+H)cM=_WCKeMr&?pHq
z@juJfqJ*7gZ8Z8F>lqKeShDqiCoEt<2d;XyIQ;RKLYhyHWQ%mz^^=2nQd&c*c~lzV
zBmXLz--u-2sqA--rXNPwvie1>w9y;HNm`<$?t+`@%`8T5jU$4d9lz~
zqhI?Um0_C`b&MS_Vv6?4B-X?Z9kJx>tLyA8E|#dEj9no%K
zq9Die;)|TIG=)8|^cRU8Uuw=}m)<8q-6C;d2lS?d{Un#)W>;c1;>xIFsUV(15jBmj
z>#|52DnEJ2bPIVZe_^j&-$>}V_~tPK1PHwSt##t*e^y9VfwP38l?ie3^=4u!jLM5o
zg*kYak0TI3HI2d9}k7!SQv$Z7ck0&4ZB?L9;
z^>fsTon+`*`n%{?swrkbUbb%_;{Vw2{L9erUwEVP<-5Z!c4?$6H@*%VN~ur|LYWXh
zj3v=Z86((#
zRpr;_!vJ^J#2QRgj(47BkkB8x{|yX!n5hIZ
z=!b1R%Rp!*D)cf4?k29bd8fk>d1Q#bWp#Rua&dJJJhXVuq85I(+^+!M6bxZWAXw$8
zYaDdlK6>^oS2px^Y;MTv)^e?U#WlN~kMPQY1%0qoqHQZKLjl%+iF$NEkZYKXkGN?V
zxB4gBNbR+G8w}>03I+9!qN6BvM)~TYuCS||1j{cq8_zfaw*g1-R0=PfH8%McbXD!M
z$uq~5dH|=xE`Ac*@2q`0F=3yP2^v
zt^0>ec3!AZWKsy|7U*}Gfz{4L{J0LNOwi9a>bQt+@u?jD3Hg0A`2Xno@_4Aiw*L}I
z3Q;7>D6%AkvYVl-S))kS49Omiea12>$uby1wz4L|ShJ1YSYoVU?E8|j(=dkL@x0Ib
zzR&aPdH*_}IiGXp-1l{F*Yf>dm%;AN!?FjpcdO!13f>gUWqE`)>VU-igj;)LA>dUk
z4(%W64DYb|{F5JKIjrvrQDH=E-m7Ylyh}jm@cNRjaS(K+?B@n(*}%8U#i}+w52L;>
zMQ2}_9OlQ4hS;?#??E9%n-Ff#fsxhzXbG}Lhh5bTUQlH>>`odSHuXh#=pt#ntoxos
z%HhlPXUnK^-{w}{9d6C{3JM10I3HZSGdwgpvSm`<$P
zP;)iLZb+_VI`VCO!PM?xzB_j~v-^>jRhCD4!LqwH{R}nUPOWAHrq#xHN87zp@q>0T
zUWKsCaIjp|09J$|iW&|Uk#9{03U{N?#@Ue*$$v^8AP6e)2TqtYU(e-s|BWvoj6fl{
z7{evMw~TU~MiZF0ee7X`oOo_q7@t^K7N{)(k-AkHrZ5AMb-uqcLI?8ORGzrSx)}&3
zFreG0XGynig2s1dt_NGkW4Tm+f8v?$IhRfe?EE6r`g|TaxEE7$^fR4jNOGm}>A=%i
zW#*}L$mg@SYjT@9jaKHG#L^HFyh1s=<%a4;DY^!B9Q$(_@x(_K`@i11ym398WgpEG
z1&0ST+pm9x9HdKnbTmN@t{$^?S_e42K5GwaDdarmgb6aB4I^<+8nBI$)GKXwH}TY=
zi?fIO+e8o70rbsU>?BfxVND#2^X})_gSai+-uEdc_Vi^!DN}S?0`rG1`7=vbdbVm9r#p9?aS+u+^Dq-?6o
zo?kGKQ=4S%tWqL>2}bn|XX$YZ*%@9-jm1gNL(`fk3#P2NcHTm;xi~HGWex-XFKt3v
z7FGAshtB%hJBco2SW;e^`n9^{h}v%L@a&5d<%xEsXwTb`xx-=FI}e>-3GhN}wq
zwY6Blac7LZbG^*M;fKnbV*ctDT%(w`u5&t&X1l}axEme~x8Y$58Qr3=qGWgHaD+!k
zIJ->Swlc?Zd4uIQQP24jJ+=;v0LXGPymTq2YKh$=+k6FWjDLq{X2bU7a!!Xve4pk7
z+Y^KfMF_$=9K#&q?-^Ixhhy2LHy?}(;@MOR71&w$CoC7v#(K$lznNh6VZl1RE2J+6
zN%ZpSvH7irCwwgdSZe=fl#iL{?=zPsNbG&1Tkuh=VYTYdD1PSFD%RV1F
z%j?P;F9FY}C4B)Ow0f#ARxW8yqWp95ScMPR;5~-@q;i|^W`9^ww4CkM7gF?N#q-U&JVl!Lmk5TG8
zr}MhL1?Kay??zm_)+t=~`^MC%E|9hES?*|iSIio|B2jR&?+(7&m+pyZpu3e)>*?Uw
z#3)II+6}wS`_+;}KFDk+A8p;0yA~RAG)Rkg3#xh+FBh|(N(nnQR-^BhjNPU_A9VjS
zlOS+OIdxiERwvRssQ9-pN@*Q~VRtddy9Gc|8CcPE5Az{
z_f>~@TpTLJ=MT7jsBz>8v!ppWzH)a(!=anXnSDSHhE14;p98wjYUZ+vpocZAQ3a_W
z@I+uB|Em7F%`<@>hTghyy#w>62zD|%gq>{<2N~N?ZT(?@VzhPIO-UiqvPwDktCi+E
zH{AK55%xbqIr-=4b3Z0t8Ma=;Z&rn0^r&ROq&Y^%3Jc#l5uKIYeX9_isB?4F-}qHO
zrU(won#f@m<`Ro-8z#G{t@o?^sl#b(ZriMqX77c*Fl$@H5BhscdP;nZMdK3;Q$s67
zOIeOyJ|J+z@ABg46rA>Oo?;fIt8a9~;{R6wQ?K3x4ptSk->*FD)+KJ?^}|KEQdASm
z_Dc^Kj>}Z?m#E~OkT#eu*`V_?tig%`dK?DQsa9;e14jiBfppQ@FR#+`%dakOCWdoC
zn-lJ{i>){b8A2kzTVT+XaObPrpQ867oz0BO(f2CHME-X|G>KCc+%rS}IC8&e9Z(aA
z`W^~nC=n|0@oj*eW?ApY*^o&Bx?Dh~mXcCgE3Z`-oA!@?y-fACzK)lw
zvis_2>Tj{38-hd6r`?Djw_2(;9zO$m`nG=hN)VTna!kF>gVz#2H9e9!*?$i#&iQ)N
zd_5#wr9P}&@k^?~MByAHEF_FFS4u_%;?*4`-VJ25IfpND}KW!5T~G}}OH
zg^Bu>74YZpE>-TUasJal9BUS~pxp7(J|(YJC|;JXDu^$bZor*Yoyu@w;w
zW>f_6z_>#6Qh|8Fuh9?rtnB1~!~QN(bhg6m+wX^(og8*Y&A35<4ln+`?ot!KGJLs)
zw^dHp>}FY;G5LQ#3dr#swHZ)f=-Q-u`UMe-B;q;0tc-uG#x7u9=|(vj@picUW03##
zB|s1-V{n;jG+0R|%8AkUrs;`r{_EbqU#h98?kN{*!Jzlj|M9o~TA+5u@3ztrlW@_)6(iSgt+ic4`$|JM_%%;yHE+eLdL2H*ed*Z*y$aZ~@g
zr#$1gxc{pXCi(Jk;Q(^9iCSxnj)Y}iF+SSI2k3go<}NOt)ZW$ff$KgLbbj_oKe353
z2&2c2$KFKdOp5-~Lzpm`
zpNcv<$8aDD<+n}h#i~yBStdGV`e*$QVzKrTi<-7Mss8JX3=}a
zT#3tR25c6}F`+RV+2Cctp(}&<=O5~2B3m(WaHJV|S(1T&V_eWKB5nA^@AZIVGAuTO
zGb_mu7q1W3*?-EGx|<0#%|IW%#QF2n0Q?-ROCh4lOX}fhFwKP>lRE@$?Y^{5xb}GW
zJFor!e|T5Foy+$@QUeuy9)vZRBBKY^m+tOS%O7ggN4xLE;!t%IqN^~P7kN|o^nF282-;PE(b+FVsy{XH3{rj8^m6dpu4#aN8VN4I?V!r*Q
zkjpvo>1;5uFo5Ey7a<0H!D&e6Yhv!y>8ltND#9WjZM<+b?C4TRFMi`#KC4KnC>k&B
z4B6LLQnvc7odKdO2}|ru-83!rIk$8s6Vw-QurO42w7Y|Y
z9J$FYh@(cxqAV=S#}OMcPzTPTS?>>BB+k11)#UtIq
z!QF%C5e>7xwI;`FHOL381{0|Ed+ZGdg+Xc`!u_kwMI`q%GCd9Sjy`|F%3n=vNhW
zkLLc$_fm7C@>A`t$*a_TsM1begwQc%Js`k$n%;8Q7^q;X4*7Jn4LN>Y)q1Z$oQwEU
z-E{hQ;>Dxsu&%4qA#_PV!Qta27M6~I7CT&haqE3D2O~9vblIv;nEz&2zcJ9qWgons
zMcQqjd0|Ug^X*SVtI8~CEOQUT7)+_D-wU!_Xixefk!b7wJ7pcdxRbo7VCM@|)|MMT
zi8*^tBL0AXc{Z!4NMq+hEa@!v8-L*gj<%O+#Jn4J_A2TSzV&Cg@0f4J#IHgHp$v7N
zHmf3!su$9S1$gl9;?4pH1h4Fqa(n%L6WgiFK;w>fPRfB-&2p{Fa4pKpvqdP9()FN
zOr8wz{cSoaSNS{jk33((HCw61KYf*)rx7fx==c{i(kYGj`EH@p6vkJKDpqDi
zJfckte8>-J3J&jpoq&lml+$UpAklx49DDgB^*Rh_fW6$B8)}sDIPJ5v^Dy`1(z&y(
z_Am!&Q!=q)5;oFl;pt20Pn8ClqO{rT!1C?iE%2)K|KnW){mB~8cCKjM
z4dtip>fOYVEo`y*fwBg%I4uaczd-gEtPOC-EmtIS!<^=r6-Ghv$FpJMs+I$)-1wJb
z61?w7M099!vF&BkhVzW<{`@?sTGi2vi$23MCTg(A!)3o3p?IA140G)Y_;??$wgqll3Z12EN$!L$}v|0`4xff
zK|R-+PDN#xvl09e$Zz_4>0era6x%<>L1TmA4J~*hh*E&9
zPDdiCISi8O>F92((x26bX~gU3!?JHe=)gTHSV*s)C6U^1!aVAaxeDs;;o%FAt${KZchyw1r&sn~hNv9#-Qtjd
zxr-k=2J=d?%tqt-uN-c)mfux58}KTWTcIPXqtuuwOzdE}+%?S1+gkXjg>k-@ebjqN
z1$O<2kZ#H@OP+pmoU4a$kORhLQb2<`o?pY7qYzGL;D$I0dIN6Vy^i1jQma>ufr8ZK
zS;Txl)a9^xBmK0W%cMN`qT=-S1d}I|V#DKyAPP|b&+vt6
z1k^JPv1j6Q7^;vWxHmt$4FVEYA2Gxa8+6z01jlk(Lw8b~$+;YE5!?!MN94NWYN&UJ
z{Md8Vcgen{kN(oFIKfO$x2Z#U8u_O-mYUuIEA5Et@o6WEcY+C5gFy{J^~&FLKgZSI
zsH!je$aB3xlj8se9ic_F#
z6pg3{so2bRM@D~Q1Se>IRybwLUfyDV>0vG6a^3R#)Xat}0JBD5xCF$twMtluWUzPo
z%5o|@U=^hzHbR}zt2)8TL7FS0cXf)^NjV^~S7UM*CD^Yd$3(L5Rn!ul$(@g&XsPOmfR3M5s4Y|_vSC+yjGUs}K+zLva3c`awV69T6aCY<25dj#TG`R!hBf@>%
ziOuaVjEI+2q!ux-;=GXFGE4A8ClYc{#hZ5+&hmVdJ(scfY~Nv
z4K=wo4vX;{|7|f;_YgSSYF?bw-z^l4nF>{m<(bpe^X-Eb`i(pK0!M_5`SL^K73KJO
z68w1*icV?di~1BVYG(4}pzL03Nw9fz*vN83t=OQ%MWNe^&DhwS2TI3NV2v!PMYPE)
z8$Iz-8qg-K2fd-Xu=H+P#?z^QGrY!zIvRQJpjMg}eTg*8#Md!f!59|hzO^HWYkH@U
z(h)ty6j4r`ZGpyIPc!x$N)@9bhoAA>)C`t4O*`7j$=L=@UGz~2(xzhE^S9g}CTslA
z4(G?cwEcwbNHC02DpD2ac*c()U2wiv51XEW@LQ`~Ja5sPWiAL-uPC<4fdl~C#g5dL
ziBHjWyEHEB>_spfWpQxd>80>-g&mTwe3rh}$q$9nvuaX?d<8<)e1)8a61Y>x(Z05V
zRay3S2f;2swt_D>=^r9B?_}Z^+`!OQ%<~Hk%ChDLWb#_IYuatZvX&)5fHxiGBDaZ&
zcXNQu_P8a-AJ62lsq&Ls=?;ngVu0r=5&E#w7u@no%VI0$s375ZKkayVy{O$jSd2#nKz6M2JHGWH&@^fvsIct1C1jQK=d1UQTD`T_so1HNPRk>A*ODon
zJ2vTa+IDh=b;9ssbV@!s>pL<#np7_^Pj@4Wt^i0B_-9u$oBH9#m!Xu6SzwbGXOmkx
zTGk67OxkX=>-oFOHPGqyoY726sja6Gy<>jR7r)jZ5rCjQE4ehUSO`4my?(RL*sQBV=bb(HYF4?nkj)EAE(5V*R!-Q>+Q
z_dCbxlD3X#4NxPRND;fr_W69vA=c8g
z+-X_(lNE9pyeNwwL3{yCEbI9^-5R3ztK08#-J_#=n!NwhZf&;j+ZFUF!UBR9zTN&D
z8k&r8AI!5ToBeuWVly7>yo05iN3Nrj=lBq1aiJ>BA>s5WI6J;0|L$j;y!-IE+>!?L
zx=-^$Pd-nm?p{Oy4uhMw>afHfT#&Nd6jWbr3a)EweqY`+=H~Qs>mpE??pe4q!f8A7
z1@zPUdI#H?9Pky*B0R9|51TTQk3+n+|qHH#xTQ&9PSyEOe
z7|_o;c59&nGCcq?Y@}2j^vUdAuT}+gMCSVMu|RiDAEEUV-KBe0VcNDw`bWdXDn`Ph
zS@xJW|A~Qttw>{7{}6Oia%!x5cD~1G+@79tr(Mf5Sh6zxFH>z;h5Ff45x;|S=NPzJ
zt^%LZP3*k!n%-gIL{;BTn%K#%9IqsLh>(`5o-y622()uK-B^R;_WLRFKGF1!d_{H9
zYkKfv4urD&n%-i;tByRL;;O@NH=DH?FLEbsl&=d8Gy}I4324dK?Ze{uS|@*Cy9Cz(
z`*mvF?swPkU!jFG&f6z5t-E?1OYX*dw@NPTHFj0fJbO(axIhb3aerMcuUHW$C|Orh
zb}Em&7wJ`#(OK@r`O`Tjo!mq9XCF|ty5U3B6CN%>CKW@-RF_VMq#eFVN!s|%ib0y~
z2i#jxm5QkYm>^AhnMKy8_|?+tF0P!mGAmD)&h16OV%mF6Kefo3&?T~wyx%+VpcnH}
zSWr%WtoNgc=|Y}4(cv}y-D-RhkXL!VkR^@OynUiLcjw@zhWH`A@}nxvW5~L-2T5a}
zOsBvBhqIt{52BV5R{+JHvuK3>iCiI8zg6t=g(>UsQuQQX#GTmk(2z=bEw6dQED0R#%~S
z2k$JAgNhHAM-uZOCFP~@@ZSuJ
zq>w=QJjZnE=loX+5JX3O4tO`$uSfl<)u$;f{kPN{$jH6<1nyTV7Ac>n0AG_zT7t%%
zZ1}y0ff=d(d)mKx#D+hQehg$tqi8=n_hF;Fx<;{{GgubQ?7ZyHIKU?0O>l@1RR^Co
zqxw{C@OdZ^rm1wLHlyya4I8^ojE=ry;+tX0beMoscmeLR*OjazI=l9VbHD#|f%#2Y
zHL3RY(@}e2#f81$o-vF2qi6hVsicEM>vWzr+vV!`@M|!8C*46tYbG62CXb1y?N_v8
z_+)CQFS^P&%IB9|B-R}ty(Z5Y-;-B3n-Aik07cmbM6B=1J{urB7tGPpnmoE3)!P
zeQ<*?i&%%w&(e5myz?OLI#pMxVcLo#`XQC0O5t7tQ-)zwS3r`|K46O2h)J6usr#w^
zEStB*FsuUj`3em7de=4#1PZ#uu#OS5X15jW{Gdb1OHlXyOo%88T|c(*q=
z{9=`a8et`T4fiFH=Lc~CWtN0U4#`%Al^^ok(D;32!*M*W#;?Stzw-(IhY_gYDONLdPFjBE)P
zcegj);+NN1Eq`*o7N1f5-63)GRwVe#SFuYJDI3mDcN~Q?58mkV^S&-{6njnt%MUfY
zF-Lp@V-ra#PT#_a7EdAn?m7j2z51bK)y2X`Cq*4%#Q#G+nMd@r1=WYWC9OIwu~#wb
zbk`brqgY|3Y*+WJY)xOPaD)8T9QwtEF`#4Wn@+=eOnnx81fS6`urZah1c}o!Nal-W8>=OGm#S6(NxO_XmC@+$RBiNi3%P781ECmaY1JKfnDh
zA@HDSSHdgu4+DFz@|4)$V}cN7uORX_zbj7kcjl*xUuS}}J&Sl-S367VxW3+tA|~=o
z=RINFu~TR%mog2?(zUGrrs&^~Bi$79X^#_kk+9#v-Qs7FGa3$O35V;Mih4KDdz#y$
z22|cX0p}>9W=WKusk7+WJsfK
zv|u$E$khek7r&O(qPL>TtZ&(i2=$
z4CHNsA8)3VSN-N(q^DC1%K3mgm@c_S{Z052+Cvrid&`a({5J*`8B>PT3mS?Py0a{I
zP1&C_pGvIRO1)U_G$C)@5>`GHO3cK}BN&W|Kli?iquTKv00Q-+)S45sJP8-(H>vU@
zn18BC37Kt0(yo^(H8>>mw68ELd#^1lZsuS5J=`n6ZEP>rV@CJ2EU)%WY2mXro3nm*
z+P+;@mPZg?(1QtC6nfz%0v<8ma+%*oM+1}Orsnc!!Y%m04
zNZKugExKY^0Lbrx9Ed~|Izi?}hq9|UO_E+mgv#T5jdGi%Dz)#2<0`g>V-}u1a=Fn*
z*3^FIeH%9mm1Q%zh%6P8;KwC{wDTJ-Vo`H~@
z-`;-?)QMeYHq7hwdi%W&)xf5;g;gAm<2FLiN(W-)#Ja#@LUuYOXa
zhxP03Z4EPwM^2JD@T3mh@ZC$%gSv1xGhT&rE?ec=Q3?9J_+GoMpxV7qVk8^H&<@Ta
zIYAeLah{>def?7wk}wBUK*N~$*j~BR;ZUUAjA7$)+H`AHn|OE~eSPQC02C+F0hQx7
zVS-39Y(Gs_kB|ucChF{FF`eXg?a&&6miB4pmoo%$HAs{jPNd|f{bkI^Xfr+iLmfKR
zTrEP#Q8ebpOy|nyQ7JUMSo?H-#y#FtiRUwWFe{poioT)!mtcd6H#H-jwxlvr{ncE{
zx5TCQwfQ{axiP0uktFN4YUH^Wby~V8xzoav`J}d48akI5b%WC>7h8<&HF{J&65)gQ
zlj2nEsBVzk7b%>?*I&0k1_Rc9!x{=MYP`US}&T(UBGc#BiJe8dqVDZ%8s!5JOmKo@7Eb+y>I+HkK&|-CA^u*dm{c``|vKewp1Bjy->mW
zK?PI>$Kj89XL1k9Jok=B)3D5@wsv%Rq%y_)%wUXP5(E
zi9DG5ZMS0b6=sBw^({1rjwbA*9lPfI&w$nI8!=v^BtwjPal0)ix!^xotllTFyBiy<
zUr8Ty*2-V^lh4=WDNUgRr%!m34~sGsPRH;l*T?B~#*AO1yWIAf`X%PNpw_8d3BDTi
zznBmC>f2IYY5iddZ01f4vpk$pJk?je)bZiGa~Js*36G87G^c7`(yqKbtqJyhT`^wG
zq~
zj(4$rLB{5XtbFHn5xHlC^ir^Q-9G!nX+sD3akAZRz7RW_`-H@!%k{oHi+lpInrX2g
zT59aZH%pNSH^J^OTou`U0lyL!NJDJ&apYw1bX(uVJ>}PM@@al|ds3hKk0f2ptPaN`{;=&U9Jc(!oX7iH
zdaVU=mq&v4Rx18-o-zpkW8h_W?p>7=`{A7acN=_4
zmtXbYOqlnhQif-Q%P(WOW8&SuK|a$=-`N63^%hbe_-NYmYYgj*%P7aNdGzIi@&`Uk
z2RRU~9n*YS(fOG8`F=yAam;{8D3zlG@{hu;bjTy*lii(+oyyRd;klCf0qE_~8&yjY
zV*4EzYkf-JTI9(nv^L{g)PKy-oAX0U@y!=3(6?rxqKD#<+fP5l(K^v%w{5Q|_|S$v
zxt9oJ@+XDb-9wtUmd>8FwY><6c4IWgeUB*QFWNFNhD%9B58j#RnwIJQ5n7PXQwbzX
ztC+EDz2M_K3{dMB#TgLe9a;z+UbU(B4Xp%SqX3KTm&LL_R6^(lv(M%h-1M(032f|S
zy1YrG
z0K_V_ydX=jy6->e+FZz!vzdA4Tyb%xxfRpa52TW4P>?)^?wR`^NDWm;O-bYUVHq`*
zH}g^-FDWMgKBxT3rA%YZQy{RNSBjs>K#ewq>b~XIS9T_^(r=qm!MxI04x{PmK63-i
z*nH}}O~$jZ&*EvHB~TgXkcO-&R4$#AX{0v6Y_B+;h=PWTDP63)!olyYeJ68#B6Owz
zi!WP1mrlnwD%Z!;(b>*^NX&nuA6Y8Aj-4_k_~;YIK=vIS3HcM6WXZ(q-r7@ave5R-
zPyToPm%3zl)2>|LzgfyFN0UolEzyhT5U@!%HmzwJgi0o=v|YU%n^dzc%HB{L%1Ci)
zIhF5BHxm&zJRs`tV8=auKCK;`N53}=MePSl^kYR&<_dPOuk;MqhkaepF`PXuv8H{6kCF;ihg>IC#L$QWzEYE1
z6d3KRt}t_nY!m2~>Rx;>!S)e8|1$YhM}~GMKW6Dp?^JY3&7Tla7CHC^-RzF^?ONkj
z6}k>yYw&mqU4ccf|IP#MQETIBZGGP{I+L3=gCEqL+?`%(y=tqBbPa0
z6Ew$EY|5Bw%)YgXP5H-SVXL+`$SJT_6O9BFi)9y66#ebZ^QknJYk~BiYm$4hI8AB+
z?n?lefzj7K$5(5P=B^1A|M66BCs^@?mFk<%kL9(mdRM-fsBD0own2WP21Ao#N3_f5
zBx|VjPD=-rh8|liB`E1Jc`DIY(U2!XsL0e?TOHJcCij}a`H1I>=hS8H*9AMzgn-c(
zjB?H~Z?l_}m)-Q5p5FLr2QC@K6yt2(w9_}_%M~Lvyht>;YXzf!Zu0DM-+Wa8?e1o4^%AYZ1dpe3qCMP7n=3M+?Gx?1t54~l3HEqilZNb87l)C>gwy^(Y
zSi1sLKOpg|!9v2I#olhvb|`M8##k*N!^$<^gk7bZIts%ejpl4<7RUT)SXXdA3d-?p
zN;x(PkE&RqE@;h$j()@%yY}I3aeGJ4L%}T<4bdEtq4~Pv4Dg_ke9ddlm~YLPJLT6|
zVo6(T*`x{f-~VwE@fP)nb_}VA^du9wrgLd*4C$}suoh{HkP-Ry>u&S=(a(*fk$f-{y|&LhVI
z$RzCKgtR!ZaxvA=FB9s3D$;gtsUM&k&!v$Zuah$A(J7s>cksej*NIFeJ*!vbnio`G
zmYaD%Uz4+Uu0ut@LAh^$swULQkI%}%GB|N5BL{Jb-&)IXKy_IQcBCm#GGz##HInb2%FxhS3X
ztZ2jy{M!UCS5`>U7HLZ*%s#HAi0H3j%R-U42+tpX65V=`v)zuXOKDPcea_{faT6Ae
zt*1GhnRCcCw?~X8j4ZXCC*z)zPuSqF59
z=Awp4r3nN!$LtL5-LK}-vPqtfD7SIsajxP+sj|-q2NZ5+^Cu(6*r>`wP!n7mQda+i
z+*DXDk^b8YUP_0Gq_Nbe@6IV-dG5dh<_g~X@#GKYyZ#bRCF#@>(JQ$$6qBpNzvn;1
zxWmpEDMlH?rb?R6-!fi?8pzfd@$O1F2L?fZ=90aiGxTi$)lMK1N`B>icXxC2Zf3T((G)`jWCHd>wb)bQ#oByuFlszjFw$e
z_%gI>xX<3*u@INjDOT&jOLBa1dqKz}MxLDaNc2Fh5T*36)KHKz3Lg+_?-+o_WAc_PaN
zBzEKc*Qis^9fP>I8;~MF2@(=WX>if?JFNM9`m-VKLG%;UH@2wwidSOMkN7oM1JO@f
zxvfpDej53GdC6EGJ3IPuJ)@w-^IW*Lm4=RcX%NOFH3*m@Er|S`>X%a(5lavJ
zPK`}O0H~>*oOO>`#(>ckRIcLvU*t^1$bJEXw!GN~t!ZxAW33FM?CY)PewOvxL`kX9
zT2zr-8+9gyS$cl6ITtt3rCBK!8}Qx4r%^9%MyxuhfK0uoH5e_qaCc_&ut-G