42 lines
1.1 KiB
YAML
42 lines
1.1 KiB
YAML
version: "3.9"
|
|
|
|
services:
|
|
ollama:
|
|
image: ollama/ollama:latest
|
|
container_name: ollama
|
|
restart: unless-stopped
|
|
ports:
|
|
- "11434:11434"
|
|
environment:
|
|
- OLLAMA_KEEP_ALIVE=10m
|
|
volumes:
|
|
- /mnt/data/llama:/root/.ollama
|
|
# --- Optional AMD iGPU offload (experimental on SCALE) ---
|
|
# devices:
|
|
# - /dev/kfd
|
|
# - /dev/dri
|
|
# group_add:
|
|
# - "video"
|
|
# - "render"
|
|
# environment:
|
|
# - OLLAMA_KEEP_ALIVE=10m
|
|
# - HSA_ENABLE_SDMA=0
|
|
# - HSA_OVERRIDE_GFX_VERSION=11.0.0
|
|
|
|
openwebui:
|
|
image: ghcr.io/open-webui/open-webui:latest
|
|
container_name: open-webui
|
|
restart: unless-stopped
|
|
depends_on:
|
|
- ollama
|
|
ports:
|
|
- "3000:8080" # browse to http://<truenas-ip>:3000
|
|
environment:
|
|
# Either var works on recent builds; keeping both for compatibility
|
|
- OLLAMA_API_BASE_URL=http://ollama:11434
|
|
- OLLAMA_BASE_URL=http://ollama:11434
|
|
# Set to "false" to allow open signup without password
|
|
- WEBUI_AUTH=true
|
|
volumes:
|
|
- /mnt/data/llama/open-webui:/app/backend/data
|