# Paperless-AI - AI-powered document processing for Paperless-NGX # Uses Ollama on Atlantis for LLM inference # Web UI: http://:3033 or via reverse proxy # Docs: https://github.com/clusterzx/paperless-ai services: paperlessngx-ai: image: clusterzx/paperless-ai:latest container_name: PaperlessNGX-AI hostname: paperless-ai ports: - "3033:3000" healthcheck: test: ["CMD", "curl", "-f", "http://localhost:3000/status"] interval: 10s timeout: 5s retries: 3 start_period: 90s volumes: - /volume1/docker/paperlessngxai:/app/data:rw environment: # --- Paperless-NGX Connection --- # Using Calypso's IP + external port (containers on different networks) PAPERLESS_URL: "http://192.168.0.250:8777" PAPERLESS_NGX_URL: "http://192.168.0.250:8777" PAPERLESS_HOST: "192.168.0.250" PAPERLESS_API_URL: "http://192.168.0.250:8777/api" PAPERLESS_API_TOKEN: "REDACTED_TOKEN" # --- LLM Connection (LM Studio on Shinku-Ryuu via Tailscale) --- # Temporarily using LM Studio instead of Ollama (OpenAI-compatible API) # Original Ollama config: OLLAMA_API_URL: "http://192.168.0.200:11434" OLLAMA_MODEL: "llama3.2:latest" AI_PROVIDER: "custom" CUSTOM_BASE_URL: "http://100.98.93.15:1234/v1" CUSTOM_MODEL: "llama-3.2-3b-instruct" CUSTOM_API_KEY: "lm-studio" # --- Optional Settings --- # PROCESS_PREDEFINED_DOCUMENTS: "yes" # SCAN_INTERVAL: "*/30 * * * *" restart: unless-stopped