# AnythingLLM - Local RAG-powered document assistant # URL: http://192.168.0.200:3101 # Port: 3101 # LLM: Olares qwen3:32b via OpenAI-compatible API # Docs: docs/services/individual/anythingllm.md services: anythingllm: image: mintplexlabs/anythingllm:latest container_name: anythingllm hostname: anythingllm security_opt: - no-new-privileges:true ports: - "3101:3001" volumes: - /volume2/metadata/docker/anythingllm/storage:/app/server/storage:rw - /volume1/archive/paperless/backup_2026-03-15/media/documents/archive:/documents/paperless-archive:ro - /volume1/archive/paperless/backup_2026-03-15/media/documents/originals:/documents/paperless-originals:ro environment: STORAGE_DIR: /app/server/storage SERVER_PORT: 3001 DISABLE_TELEMETRY: "true" TZ: America/Los_Angeles # LLM Provider - Olares qwen3:32b via OpenAI-compatible API LLM_PROVIDER: generic-openai GENERIC_OPEN_AI_BASE_PATH: http://192.168.0.145:31434/v1 GENERIC_OPEN_AI_MODEL_PREF: qwen3-coder:latest GENERIC_OPEN_AI_MAX_TOKENS: 8192 GENERIC_OPEN_AI_API_KEY: not-needed # pragma: allowlist secret GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT: 65536 # Embedding and Vector DB EMBEDDING_ENGINE: native VECTOR_DB: lancedb healthcheck: test: ["CMD", "curl", "-f", "http://localhost:3001/api/ping"] interval: 15s timeout: 5s retries: 3 start_period: 30s restart: unless-stopped