# DocProcesser Dockerfile - Multi-stage optimized build # Optimized for RTX 5080 GPU deployment # ============================================================================= # STAGE 1: Builder - Install dependencies # ============================================================================= FROM nvidia/cuda:12.9.0-devel-ubuntu24.04 AS builder # Install build dependencies and Python 3.10 RUN apt-get update && apt-get install -y --no-install-recommends \ software-properties-common \ && add-apt-repository -y ppa:deadsnakes/ppa \ && apt-get update && apt-get install -y --no-install-recommends \ python3.10 \ python3.10-venv \ python3.10-dev \ python3.10-distutils \ build-essential \ curl \ && rm -rf /var/lib/apt/lists/* # Setup Python RUN ln -sf /usr/bin/python3.10 /usr/bin/python && \ ln -sf /usr/bin/python3.10 /usr/bin/python3 && \ curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10 # Install uv RUN python3.10 -m pip install uv -i https://pypi.tuna.tsinghua.edu.cn/simple WORKDIR /build # Copy dependency files COPY pyproject.toml ./ COPY wheels/ ./wheels/ # Create virtual environment with dependencies RUN uv venv /build/venv --python python3.10 && \ . /build/venv/bin/activate && \ uv pip install -i https://pypi.tuna.tsinghua.edu.cn/simple -e . && \ rm -rf ./wheels # Clean up venv - remove unnecessary files RUN find /build/venv -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true && \ find /build/venv -type d -name "*.dist-info/tests" -exec rm -rf {} + 2>/dev/null || true && \ find /build/venv -type f -name "*.pyc" -delete && \ find /build/venv -type f -name "*.pyo" -delete && \ find /build/venv -type d -name "tests" -exec rm -rf {} + 2>/dev/null || true && \ find /build/venv -type d -name "test" -exec rm -rf {} + 2>/dev/null || true # ============================================================================= # STAGE 2: Runtime - Minimal final image # ============================================================================= FROM nvidia/cuda:12.9.0-runtime-ubuntu24.04 # Set environment variables ENV PYTHONUNBUFFERED=1 \ PYTHONDONTWRITEBYTECODE=1 \ PIP_NO_CACHE_DIR=1 \ PIP_DISABLE_PIP_VERSION_CHECK=1 \ # Model cache directories - mount these at runtime MODELSCOPE_CACHE=/root/.cache/modelscope \ HF_HOME=/root/.cache/huggingface \ # Application config (override defaults for container) # Use 127.0.0.1 for --network host mode, or override with -e for bridge mode PP_DOCLAYOUT_MODEL_DIR=/root/.cache/modelscope/hub/models/PaddlePaddle/PP-DocLayoutV2 \ PADDLEOCR_VL_URL=http://127.0.0.1:8001/v1 \ PATH="/app/.venv/bin:$PATH" \ VIRTUAL_ENV="/app/.venv" WORKDIR /app # Install runtime-only system dependencies (NO build tools) RUN apt-get update && apt-get install -y --no-install-recommends \ python3.10 \ libgl1 \ libglib2.0-0 \ libsm6 \ libxext6 \ libxrender-dev \ libgomp1 \ curl \ pandoc \ && rm -rf /var/lib/apt/lists/* # Setup Python symlinks RUN ln -sf /usr/bin/python3.10 /usr/bin/python && \ ln -sf /usr/bin/python3.10 /usr/bin/python3 # Copy pre-built venv from builder stage COPY --from=builder /build/venv /app/.venv # Copy application code (excluding model files if they're in the repo) COPY app/ ./app/ # Create model cache directories (mount from host at runtime) # NOTE: Remove model files from app/model to keep image lean RUN mkdir -p /root/.cache/modelscope \ /root/.cache/huggingface \ /root/.paddlex && \ rm -rf /app/app/model/* # Declare volumes for model cache (mount at runtime to avoid re-downloading) VOLUME ["/root/.cache/modelscope", "/root/.cache/huggingface", "/root/.paddlex"] # Expose port EXPOSE 8053 # Health check HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ CMD curl -f http://localhost:8053/health || exit 1 # Run the application CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8053", "--workers", "1"] # ============================================================================= # Usage: Mount local model cache to avoid downloading # # Option 1: Use host network (simplest, can access localhost services) # docker run --gpus all --network host \ # -v /home/yoge/.paddlex:/root/.paddlex:ro \ # -v /home/yoge/.cache/modelscope:/root/.cache/modelscope:ro \ # -v /home/yoge/.cache/huggingface:/root/.cache/huggingface:ro \ # doc_processer:latest # # Option 2: Use bridge network with host.docker.internal (Linux needs --add-host) # docker run --gpus all -p 8053:8053 \ # --add-host=host.docker.internal:host-gateway \ # -v /home/yoge/.paddlex:/root/.paddlex:ro \ # -v /home/yoge/.cache/modelscope:/root/.cache/modelscope:ro \ # -v /home/yoge/.cache/huggingface:/root/.cache/huggingface:ro \ # doc_processer:latest # =============================================================================