Sanitized mirror from private repository - 2026-04-20 01:32:01 UTC
Some checks failed
Documentation / Build Docusaurus (push) Failing after 5m3s
Documentation / Deploy to GitHub Pages (push) Has been skipped

This commit is contained in:
Gitea Mirror Bot
2026-04-20 01:32:01 +00:00
commit e7652c8dab
1445 changed files with 364095 additions and 0 deletions

View File

@@ -0,0 +1,45 @@
# Ollama - Local LLM inference
# URL: https://ollama.vishconcord.synology.me
# Port: 11434
# Run large language models locally
services:
webui:
container_name: OLLAMA-WEBUI
image: ghcr.io/open-webui/open-webui:0.6
volumes:
- /root/docker/ollama/webui:/app/backend/data:rw
environment:
OLLAMA_BASE_URL: http://ollama:11434
WEBUI_SECRET_KEY: "REDACTED_SECRET_KEY"
healthcheck:
test: timeout 10s bash -c ':> /dev/tcp/127.0.0.1/8080' || exit 1
interval: 10s
timeout: 5s
retries: 3
start_period: 90s
ports:
- 8271:8080
restart: on-failure
depends_on:
ollama:
condition: service_healthy
ollama:
container_name: OLLAMA
image: ollama/ollama:latest
entrypoint: ["/usr/bin/bash", "/entrypoint.sh"]
volumes:
- /root/docker/ollama/data:/root/.ollama:rw
- /root/docker/ollama/entrypoint/entrypoint.sh:/entrypoint.sh
environment:
MODELS: codegemma:2b,codellama:7b,mistral:7b,llama3.2:3b
OLLAMA_INSTALL_MODELS: codegemma:2b,codellama:7b,mistral:7b,llama3.2:3b
ports:
- 11434:11434
healthcheck:
test: ["CMD", "ollama", "--version"]
interval: 10s
timeout: 5s
retries: 3
start_period: 30s
restart: on-failure:5

View File

@@ -0,0 +1,24 @@
#!/bin/bash
set -euo pipefail
# Start Ollama server.
/bin/ollama serve &
pid=$!
# Wait for Ollama to be ready using Bash's built-in networking capabilities.
while ! timeout 1 bash -c "echo > /dev/tcp/localhost/11434" 2>/dev/null; do
echo "Waiting for Ollama to start..."
sleep 1
done
echo "Ollama started."
# Retrieve and install/update models from the MODELS that you have in your Docker Compose stack environment variables.
IFS=',' read -ra model_array <<< "$MODELS"
for model in "${model_array[@]}"; do
echo "Installing/Updating model $model..."
ollama pull $model # This command fetches the latest version of the llama model
done
echo "All models installed/updated."
# Continue to main process.
wait $pid