From dd00e11a987231bb15bba0b7a516b569c984d1ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=89=E6=B4=8B=E4=B8=89=E6=B4=8B?= <1258009915@qq.com> Date: Sat, 6 Apr 2024 05:09:50 +0000 Subject: [PATCH] inference_transform bugfix --- src/models/ocr_model/utils/transforms.py | 1 - src/start_web.sh | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/models/ocr_model/utils/transforms.py b/src/models/ocr_model/utils/transforms.py index 1982cc3..7c014ae 100644 --- a/src/models/ocr_model/utils/transforms.py +++ b/src/models/ocr_model/utils/transforms.py @@ -187,7 +187,6 @@ def train_transform(images: List[Image.Image]) -> List[torch.Tensor]: def inference_transform(images: List[np.ndarray]) -> List[torch.Tensor]: assert IMG_CHANNELS == 1 , "Only support grayscale images for now" - images = [np.array(img.convert('RGB')) for img in images] # 裁剪掉白边 images = [trim_white_border(image) for image in images] # general transform pipeline diff --git a/src/start_web.sh b/src/start_web.sh index 450dff2..7475147 100755 --- a/src/start_web.sh +++ b/src/start_web.sh @@ -1,9 +1,9 @@ #!/usr/bin/env bash set -exu -export CHECKPOINT_DIR="default" -export TOKENIZER_DIR="default" -export USE_CUDA=False # True or False (case-sensitive) -export NUM_BEAM=1 +export CHECKPOINT_DIR="/home/lhy/code/TexTeller/src/models/ocr_model/model/ckpt" +export TOKENIZER_DIR="/home/lhy/code/TexTeller/src/models/tokenizer/roberta-tokenizer-7Mformulas" +export USE_CUDA=True # True or False (case-sensitive) +export NUM_BEAM=3 streamlit run web.py