Sanitized mirror from private repository - 2026-03-18 10:31:50 UTC
Some checks failed
Documentation / Build Docusaurus (push) Failing after 21m3s
Documentation / Deploy to GitHub Pages (push) Has been skipped

This commit is contained in:
Gitea Mirror Bot
2026-03-18 10:31:50 +00:00
commit 8e49624d78
1221 changed files with 304405 additions and 0 deletions

View File

@@ -0,0 +1,55 @@
# Ollama - Local LLM inference
# URL: https://ollama.vishconcord.synology.me
# Port: 11434
# Run large language models locally
version: "3.8"
services:
ollama:
container_name: ollama
image: ollama/ollama:rocm
restart: unless-stopped
ports:
- "11434:11434"
environment:
OLLAMA_HOST: 0.0.0.0
OLLAMA_ORIGINS: https://rxv4access.vishconcord.synology.me
OLLAMA_OPENAI_COMPAT: 1
OLLAMA_INSTALL_MODELS: >
phi3:mini,
gemma:2b
OLLAMA_NUM_THREAD: 4
volumes:
- /volume2/metadata/docker/ollama/data:/root/.ollama:rw
- /volume2/metadata/docker/ollama/custom:/models/custom:ro
healthcheck:
test: ["CMD", "ollama", "--version"]
interval: 15s
timeout: 5s
retries: 3
start_period: 45s
deploy:
resources:
limits:
memory: 18g
webui:
container_name: ollama-webui
image: ghcr.io/open-webui/open-webui:0.6
restart: unless-stopped
depends_on:
ollama:
condition: service_healthy
ports:
- "8271:8080"
environment:
OLLAMA_BASE_URL: http://ollama:11434
WEBUI_SECRET_KEY: "REDACTED_SECRET_KEY" # pragma: allowlist secret
volumes:
- /volume2/metadata/docker/ollama/webui:/app/backend/data:rw
healthcheck:
test: timeout 10s bash -c ':> /dev/tcp/127.0.0.1/8080' || exit 1
interval: 10s
timeout: 5s
retries: 3
start_period: 90s

View File

@@ -0,0 +1,24 @@
#!/bin/bash
set -euo pipefail
# Start Ollama server.
/bin/ollama serve &
pid=$!
# Wait for Ollama to be ready using Bash's built-in networking capabilities.
while ! timeout 1 bash -c "echo > /dev/tcp/localhost/11434" 2>/dev/null; do
echo "Waiting for Ollama to start..."
sleep 1
done
echo "Ollama started."
# Retrieve and install/update models from the MODELS that you have in your Docker Compose stack environment variables.
IFS=',' read -ra model_array <<< "$MODELS"
for model in "${model_array[@]}"; do
echo "Installing/Updating model $model..."
ollama pull $model # This command fetches the latest version of the llama model
done
echo "All models installed/updated."
# Continue to main process.
wait $pid

View File

@@ -0,0 +1,17 @@
Why these models?
Coding:
codegemma:2b → lightweight, good for completions.
codellama:7b → solid for structured code (like Docker Compose).
mistral:7b → generalist, also good with logic in code.
Writing (tech docs & emails):
llama3.2:3b → smaller generalist.
gemma:7b → more natural writing.
neural-chat:7b → conversational, good for email tone.