# Ollama - Local LLM inference # URL: https://ollama.vishconcord.synology.me # Port: 11434 # Run large language models locally services: webui: container_name: OLLAMA-WEBUI image: ghcr.io/open-webui/open-webui:0.6 volumes: - /root/docker/ollama/webui:/app/backend/data:rw environment: OLLAMA_BASE_URL: http://ollama:11434 WEBUI_SECRET_KEY: "REDACTED_SECRET_KEY" healthcheck: test: timeout 10s bash -c ':> /dev/tcp/127.0.0.1/8080' || exit 1 interval: 10s timeout: 5s retries: 3 start_period: 90s ports: - 8271:8080 restart: on-failure depends_on: ollama: condition: service_healthy ollama: container_name: OLLAMA image: ollama/ollama:latest entrypoint: ["/usr/bin/bash", "/entrypoint.sh"] volumes: - /root/docker/ollama/data:/root/.ollama:rw - /root/docker/ollama/entrypoint/entrypoint.sh:/entrypoint.sh environment: MODELS: codegemma:2b,codellama:7b,mistral:7b,llama3.2:3b OLLAMA_INSTALL_MODELS: codegemma:2b,codellama:7b,mistral:7b,llama3.2:3b ports: - 11434:11434 healthcheck: test: ["CMD", "ollama", "--version"] interval: 10s timeout: 5s retries: 3 start_period: 30s restart: on-failure:5