feat add glm-ocr core
This commit is contained in:
@@ -3,9 +3,8 @@
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
import torch
|
||||
from typing import Optional
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
@@ -48,21 +47,25 @@ class Settings(BaseSettings):
|
||||
is_padding: bool = True
|
||||
padding_ratio: float = 0.1
|
||||
|
||||
max_tokens: int = 4096
|
||||
|
||||
# Model Paths
|
||||
pp_doclayout_model_dir: Optional[str] = "/home/yoge/.cache/modelscope/hub/models/PaddlePaddle/PP-DocLayoutV3"
|
||||
pp_doclayout_model_dir: str | None = (
|
||||
"/home/yoge/.cache/modelscope/hub/models/PaddlePaddle/PP-DocLayoutV3"
|
||||
)
|
||||
|
||||
# Image Processing
|
||||
max_image_size_mb: int = 10
|
||||
image_padding_ratio: float = 0.1 # 10% on each side = 20% total expansion
|
||||
|
||||
device: torch.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # cuda:0 or cpu
|
||||
device: torch.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
# Server Settings
|
||||
host: str = "0.0.0.0"
|
||||
port: int = 8053
|
||||
|
||||
# Logging Settings
|
||||
log_dir: Optional[str] = None # Defaults to /app/logs in container or ./logs locally
|
||||
log_dir: str | None = None # Defaults to /app/logs in container or ./logs locally
|
||||
log_level: str = "INFO" # DEBUG, INFO, WARNING, ERROR, CRITICAL
|
||||
|
||||
@property
|
||||
|
||||
Reference in New Issue
Block a user