Sanitized mirror from private repository - 2026-04-18 11:19:59 UTC
Some checks failed
Documentation / Build Docusaurus (push) Failing after 5m14s
Documentation / Deploy to GitHub Pages (push) Has been skipped

This commit is contained in:
Gitea Mirror Bot
2026-04-18 11:19:59 +00:00
commit fb00a325d1
1418 changed files with 359990 additions and 0 deletions

View File

View File

@@ -0,0 +1,19 @@
# Ubuntu archive
sudo rsync -avz --delete --ignore-errors --no-perms --no-owner --no-group \
rsync://archive.ubuntu.com/ubuntu \
/volume1/archive/repo/mirror/archive.ubuntu.com/ubuntu
# Ubuntu security
sudo rsync -avz --delete --ignore-errors --no-perms --no-owner --no-group \
rsync://security.ubuntu.com/ubuntu \
/volume1/archive/repo/mirror/security.ubuntu.com/ubuntu
# Debian archive
sudo rsync -avz --delete --ignore-errors --no-perms --no-owner --no-group \
rsync://deb.debian.org/debian \
/volume1/archive/repo/mirror/deb.debian.org/debian
# Debian security
sudo rsync -avz --delete --ignore-errors --no-perms --no-owner --no-group \
rsync://security.debian.org/debian-security \
/volume1/archive/repo/mirror/security.debian.org/debian-security

View File

@@ -0,0 +1,24 @@
# AdGuard Home — Atlantis (backup DNS)
# Port: 53 (DNS), 9080 (web UI)
# Purpose: Backup split-horizon DNS resolver
# Primary: Calypso (192.168.0.250)
# Backup: Atlantis (192.168.0.200) ← this instance
#
# Same filters, rewrites, and upstream DNS as Calypso.
# Router DHCP: primary=192.168.0.250, secondary=192.168.0.200
services:
adguard:
image: adguard/adguardhome:latest
container_name: AdGuard
network_mode: host
mem_limit: 2g
cpu_shares: 768
security_opt:
- no-new-privileges:true
restart: on-failure:5
volumes:
- /volume1/docker/adguard/config:/opt/adguardhome/conf:rw
- /volume1/docker/adguard/data:/opt/adguardhome/work:rw
environment:
TZ: America/Los_Angeles

View File

@@ -0,0 +1,41 @@
# AnythingLLM - Local RAG-powered document assistant
# URL: http://192.168.0.200:3101
# Port: 3101
# LLM: Olares qwen3:32b via OpenAI-compatible API
# Docs: docs/services/individual/anythingllm.md
services:
anythingllm:
image: mintplexlabs/anythingllm:latest
container_name: anythingllm
hostname: anythingllm
security_opt:
- no-new-privileges:true
ports:
- "3101:3001"
volumes:
- /volume2/metadata/docker/anythingllm/storage:/app/server/storage:rw
- /volume1/archive/paperless/backup_2026-03-15/media/documents/archive:/documents/paperless-archive:ro
- /volume1/archive/paperless/backup_2026-03-15/media/documents/originals:/documents/paperless-originals:ro
environment:
STORAGE_DIR: /app/server/storage
SERVER_PORT: 3001
DISABLE_TELEMETRY: "true"
TZ: America/Los_Angeles
# LLM Provider - Olares qwen3:32b via OpenAI-compatible API
LLM_PROVIDER: generic-openai
GENERIC_OPEN_AI_BASE_PATH: http://192.168.0.145:31434/v1
GENERIC_OPEN_AI_MODEL_PREF: qwen3-coder:latest
GENERIC_OPEN_AI_MAX_TOKENS: 8192
GENERIC_OPEN_AI_API_KEY: not-needed # pragma: allowlist secret
GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT: 65536
# Embedding and Vector DB
EMBEDDING_ENGINE: native
VECTOR_DB: lancedb
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3001/api/ping"]
interval: 15s
timeout: 5s
retries: 3
start_period: 30s
restart: unless-stopped

View File

@@ -0,0 +1,498 @@
# Arr Suite - Media automation stack
# Services: Sonarr, Radarr, Prowlarr, Bazarr, Lidarr, Tdarr, LazyLibrarian, Audiobookshelf
# Manages TV shows, movies, music, books, audiobooks downloads and organization
# GitOps Test: Stack successfully deployed and auto-updating
#
# Storage Configuration (2026-02-01):
# - Downloads: /volume3/usenet (Synology SNV5420 NVMe RAID1 - 621 MB/s)
# - Media: /volume1/data (SATA RAID6 - 84TB)
# - Configs: /volume2/metadata/docker2 (Crucial P310 NVMe RAID1)
#
# Volume 3 created for fast download performance using 007revad's Synology_M2_volume script
#
# Theming: Self-hosted theme.park (Dracula theme)
# - TP_DOMAIN uses docker gateway IP to reach host's theme-park container
# - Deploy theme-park stack first: Atlantis/theme-park/theme-park.yaml
version: "3.8"
x-themepark: &themepark
TP_SCHEME: "http"
TP_DOMAIN: "192.168.0.200:8580"
TP_THEME: "dracula"
networks:
media2_net:
driver: bridge
name: media2_net
ipam:
config:
- subnet: 172.24.0.0/24
gateway: 172.24.0.1
services:
wizarr:
image: ghcr.io/wizarrrr/wizarr:latest
container_name: wizarr
environment:
- PUID=1029
- PGID=100
- TZ=America/Los_Angeles
- DISABLE_BUILTIN_AUTH=true
volumes:
- /volume2/metadata/docker2/wizarr:/data/database
ports:
- "5690:5690"
networks:
media2_net:
ipv4_address: 172.24.0.2
security_opt:
- no-new-privileges:true
restart: unless-stopped
tautulli:
image: lscr.io/linuxserver/tautulli:latest
container_name: tautulli
environment:
- PUID=1029
- PGID=100
- TZ=America/Los_Angeles
- UMASK=022
- DOCKER_MODS=ghcr.io/themepark-dev/theme.park:tautulli
- TP_SCHEME=http
- TP_DOMAIN=192.168.0.200:8580
- TP_THEME=dracula
volumes:
- /volume2/metadata/docker2/tautulli:/config
ports:
- "8181:8181"
networks:
media2_net:
ipv4_address: 172.24.0.12
security_opt:
- no-new-privileges:true
restart: unless-stopped
prowlarr:
image: lscr.io/linuxserver/prowlarr:latest
container_name: prowlarr
environment:
- PUID=1029
- PGID=100
- TZ=America/Los_Angeles
- UMASK=022
- DOCKER_MODS=ghcr.io/themepark-dev/theme.park:prowlarr
- TP_SCHEME=http
- TP_DOMAIN=192.168.0.200:8580
- TP_THEME=dracula
volumes:
- /volume2/metadata/docker2/prowlarr:/config
ports:
- "9696:9696"
networks:
media2_net:
ipv4_address: 172.24.0.6
security_opt:
- no-new-privileges:true
restart: unless-stopped
flaresolverr:
image: flaresolverr/flaresolverr:latest
container_name: flaresolverr
environment:
- TZ=America/Los_Angeles
ports:
- "8191:8191"
networks:
media2_net:
ipv4_address: 172.24.0.4
security_opt:
- no-new-privileges:true
restart: unless-stopped
sabnzbd:
image: lscr.io/linuxserver/sabnzbd:latest
container_name: sabnzbd
network_mode: host
environment:
- PUID=1029
- PGID=100
- TZ=America/Los_Angeles
- UMASK=022
- DOCKER_MODS=ghcr.io/themepark-dev/theme.park:sabnzbd
- TP_SCHEME=http
- TP_DOMAIN=192.168.0.200:8580
- TP_THEME=dracula
volumes:
- /volume2/metadata/docker2/sabnzbd:/config
- /volume3/usenet/incomplete:/data/incomplete
- /volume3/usenet/complete:/data/complete
security_opt:
- no-new-privileges:true
restart: unless-stopped
jackett:
image: lscr.io/linuxserver/jackett:latest
container_name: jackett
environment:
- PUID=1029
- PGID=100
- TZ=America/Los_Angeles
- UMASK=022
- DOCKER_MODS=ghcr.io/themepark-dev/theme.park:jackett
- TP_SCHEME=http
- TP_DOMAIN=192.168.0.200:8580
- TP_THEME=dracula
volumes:
- /volume2/metadata/docker2/jackett:/config
- /volume1/data:/downloads
ports:
- "9117:9117"
networks:
media2_net:
ipv4_address: 172.24.0.11
security_opt:
- no-new-privileges:true
restart: unless-stopped
sonarr:
image: lscr.io/linuxserver/sonarr:latest
container_name: sonarr
environment:
- PUID=1029
- PGID=100
- TZ=America/Los_Angeles
- UMASK=022
- DOCKER_MODS=ghcr.io/themepark-dev/theme.park:sonarr
- TP_SCHEME=http
- TP_DOMAIN=192.168.0.200:8580
- TP_THEME=dracula
volumes:
- /volume2/metadata/docker2/sonarr:/config
- /volume1/data:/data
- /volume3/usenet:/sab
- /volume2/torrents:/downloads # Deluge download dir — required for torrent import
ports:
- "8989:8989"
networks:
media2_net:
ipv4_address: 172.24.0.7
security_opt:
- no-new-privileges:true
restart: unless-stopped
lidarr:
image: lscr.io/linuxserver/lidarr:latest
container_name: lidarr
environment:
- PUID=1029
- PGID=100
- TZ=America/Los_Angeles
- UMASK=022
- DOCKER_MODS=ghcr.io/themepark-dev/theme.park:lidarr
- TP_SCHEME=http
- TP_DOMAIN=192.168.0.200:8580
- TP_THEME=dracula
volumes:
- /volume2/metadata/docker2/lidarr:/config
- /volume1/data:/data
- /volume3/usenet:/sab
# arr-scripts: custom init scripts for Deezer integration via deemix
# Config: /volume2/metadata/docker2/lidarr/extended.conf (contains ARL token, not in git)
# Setup: https://github.com/RandomNinjaAtk/arr-scripts
- /volume2/metadata/docker2/lidarr-scripts/custom-services.d:/custom-services.d
- /volume2/metadata/docker2/lidarr-scripts/custom-cont-init.d:/custom-cont-init.d
ports:
- "8686:8686"
networks:
media2_net:
ipv4_address: 172.24.0.9
security_opt:
- no-new-privileges:true
restart: unless-stopped
radarr:
image: lscr.io/linuxserver/radarr:latest
container_name: radarr
environment:
- PUID=1029
- PGID=100
- TZ=America/Los_Angeles
- UMASK=022
- DOCKER_MODS=ghcr.io/themepark-dev/theme.park:radarr
- TP_SCHEME=http
- TP_DOMAIN=192.168.0.200:8580
- TP_THEME=dracula
volumes:
- /volume2/metadata/docker2/radarr:/config
- /volume1/data:/data
- /volume3/usenet:/sab
- /volume2/torrents:/downloads # Deluge download dir — required for torrent import
ports:
- "7878:7878"
networks:
media2_net:
ipv4_address: 172.24.0.8
security_opt:
- no-new-privileges:true
restart: unless-stopped
# Readarr retired - replaced with LazyLibrarian + Audiobookshelf
lazylibrarian:
image: lscr.io/linuxserver/lazylibrarian:latest
container_name: lazylibrarian
environment:
- PUID=1029
- PGID=100
- TZ=America/Los_Angeles
- UMASK=022
- DOCKER_MODS=ghcr.io/themepark-dev/theme.park:lazylibrarian|ghcr.io/linuxserver/mods:lazylibrarian-calibre
- TP_SCHEME=http
- TP_DOMAIN=192.168.0.200:8580
- TP_THEME=dracula
volumes:
- /volume2/metadata/docker2/lazylibrarian:/config
- /volume1/data:/data
- /volume3/usenet:/sab
- /volume2/torrents:/downloads # Deluge download dir — required for torrent import
- /volume2/metadata/docker2/lazylibrarian-scripts/custom-cont-init.d:/custom-cont-init.d # patch tracker-less torrent handling
ports:
- "5299:5299"
networks:
media2_net:
ipv4_address: 172.24.0.5
security_opt:
- no-new-privileges:true
restart: unless-stopped
audiobookshelf:
image: ghcr.io/advplyr/audiobookshelf:latest
container_name: audiobookshelf
environment:
- PUID=1029
- PGID=100
- TZ=America/Los_Angeles
volumes:
- /volume2/metadata/docker2/audiobookshelf:/config
- /volume1/data/media/audiobooks:/audiobooks
- /volume1/data/media/podcasts:/podcasts
- /volume1/data/media/ebooks:/ebooks
ports:
- "13378:80"
networks:
media2_net:
ipv4_address: 172.24.0.16
security_opt:
- no-new-privileges:true
restart: unless-stopped
# Bazarr - subtitle management for Sonarr and Radarr
# Web UI: http://192.168.0.200:6767
# Language profile: English (profile ID 1), no mustContain filter
# Providers: REDACTED_APP_PASSWORD (vishinator), podnapisi, yifysubtitles, subf2m, subsource, subdl, animetosho
# NOTE: OpenSubtitles.com may be IP-blocked — submit unblock request at opensubtitles.com/support
# Notifications: Signal API via homelab-vm:8080 → REDACTED_PHONE_NUMBER
# API keys stored in: /volume2/metadata/docker2/bazarr/config/config.yaml (not in repo)
bazarr:
image: lscr.io/linuxserver/bazarr:latest
container_name: bazarr
environment:
- PUID=1029
- PGID=100
- TZ=America/Los_Angeles
- UMASK=022
- DOCKER_MODS=ghcr.io/themepark-dev/theme.park:bazarr
- TP_SCHEME=http
- TP_DOMAIN=192.168.0.200:8580
- TP_THEME=dracula
volumes:
- /volume2/metadata/docker2/bazarr:/config
- /volume1/data:/data
- /volume3/usenet:/sab
ports:
- "6767:6767"
networks:
media2_net:
ipv4_address: 172.24.0.10
security_opt:
- no-new-privileges:true
restart: unless-stopped
whisparr:
image: ghcr.io/hotio/whisparr:nightly
container_name: whisparr
environment:
- PUID=1029
- PGID=100
- TZ=America/Los_Angeles
- UMASK=022
- TP_HOTIO=true
- TP_SCHEME=http
- TP_DOMAIN=192.168.0.200:8580
- TP_THEME=dracula
volumes:
- /volume2/metadata/docker2/whisparr:/config
- /volume1/data:/data
- /volume3/usenet/complete:/sab/complete
- /volume3/usenet/incomplete:/sab/incomplete
ports:
- "6969:6969"
networks:
media2_net:
ipv4_address: 172.24.0.3
security_opt:
- no-new-privileges:true
restart: unless-stopped
plex:
image: lscr.io/linuxserver/plex:latest
container_name: plex
network_mode: host
environment:
- PUID=1029
- PGID=100
- TZ=America/Los_Angeles
- UMASK=022
- VERSION=docker
- DOCKER_MODS=ghcr.io/themepark-dev/theme.park:plex
- TP_SCHEME=http
- TP_DOMAIN=192.168.0.200:8580
- TP_THEME=dracula
volumes:
- /volume2/metadata/docker2/plex:/config
- /volume1/data/media:/data/media
security_opt:
- no-new-privileges:true
restart: unless-stopped
jellyseerr:
image: fallenbagel/jellyseerr:latest
container_name: jellyseerr
user: "1029:100"
environment:
- TZ=America/Los_Angeles
# Note: Jellyseerr theming requires CSS injection via reverse proxy or browser extension
# theme.park doesn't support DOCKER_MODS for non-linuxserver images
volumes:
- /volume2/metadata/docker2/jellyseerr:/app/config
ports:
- "5055:5055"
networks:
media2_net:
ipv4_address: 172.24.0.14
dns:
- 9.9.9.9
- 1.1.1.1
security_opt:
- no-new-privileges:true
restart: unless-stopped
gluetun:
image: qmcgaw/gluetun:v3.38.0
container_name: gluetun
privileged: true
devices:
- /dev/net/tun:/dev/net/tun
labels:
- com.centurylinklabs.watchtower.enable=false
environment:
- PUID=1029
- PGID=100
- TZ=America/Los_Angeles
# --- WireGuard ---
- VPN_SERVICE_PROVIDER=custom
- VPN_TYPE=wireguard
- WIREGUARD_PRIVATE_KEY=aAavqcZ6sx3IlgiH5Q8m/6w33mBu4M23JBM8N6cBKEU= # pragma: allowlist secret
- WIREGUARD_ADDRESSES=10.2.0.2/32
- WIREGUARD_DNS=10.2.0.1
- WIREGUARD_PUBLIC_KEY=FrVOQ+Dy0StjfwNtbJygJCkwSJt6ynlGbQwZBZWYfhc=
- WIREGUARD_ALLOWED_IPS=0.0.0.0/0,::/0
- WIREGUARD_ENDPOINT_IP=79.127.185.193
- WIREGUARD_ENDPOINT_PORT=51820
volumes:
- /volume2/metadata/docker2/gluetun:/gluetun
ports:
- "8112:8112" # Deluge WebUI
- "58946:58946" # Torrent TCP
- "58946:58946/udp" # Torrent UDP
networks:
media2_net:
ipv4_address: 172.24.0.20
healthcheck:
test: ["CMD-SHELL", "wget -qO /dev/null http://127.0.0.1:9999 2>/dev/null || exit 1"]
interval: 10s
timeout: 5s
retries: 6
start_period: 30s
security_opt:
- no-new-privileges:true
restart: unless-stopped
deluge:
image: lscr.io/linuxserver/deluge:latest
container_name: deluge
environment:
- PUID=1029
- PGID=100
- TZ=America/Los_Angeles
- UMASK=022
- DOCKER_MODS=ghcr.io/themepark-dev/theme.park:deluge
- TP_SCHEME=http
- TP_DOMAIN=192.168.0.200:8580
- TP_THEME=dracula
volumes:
- /volume2/metadata/docker2/deluge:/config
- /volume2/torrents:/downloads
network_mode: "service:gluetun"
depends_on:
gluetun:
condition: service_healthy
security_opt:
- no-new-privileges:true
restart: unless-stopped
tdarr:
image: ghcr.io/haveagitgat/tdarr@sha256:048ae8ed4de8e9f0de51ad73REDACTED_GITEA_TOKEN # v2.67.01 - pinned to keep all nodes in sync
container_name: tdarr
labels:
- com.centurylinklabs.watchtower.enable=false
environment:
- PUID=1029
- PGID=100
- TZ=America/Los_Angeles
- UMASK=022
- serverIP=0.0.0.0
- serverPort=8266
- webUIPort=8265
- internalNode=true
- inContainer=true
- ffmpegVersion=6
- nodeName=Atlantis
volumes:
- /volume2/metadata/docker2/tdarr/server:/app/server
- /volume2/metadata/docker2/tdarr/configs:/app/configs
- /volume2/metadata/docker2/tdarr/logs:/app/logs
- /volume1/data/media:/media
- /volume3/usenet/tdarr_cache:/temp
- /volume3/usenet/tdarr_cache:/cache # Fix: internal node uses /cache path
ports:
- "8265:8265"
- "8266:8266"
networks:
media2_net:
ipv4_address: 172.24.0.15
security_opt:
- no-new-privileges:true
restart: unless-stopped

View File

@@ -0,0 +1,154 @@
#!/usr/bin/env bash
# =============================================================================
# Arr-Suite Installer — Atlantis (192.168.0.200)
# =============================================================================
# One-line install:
# bash <(curl -fsSL https://git.vish.gg/Vish/homelab/raw/branch/main/hosts/synology/atlantis/arr-suite/install.sh)
#
# What this installs:
# Sonarr, Radarr, Lidarr, Bazarr, Prowlarr, Jackett, FlaresolverR
# SABnzbd, Deluge (via gluetun VPN), Tdarr, LazyLibrarian
# Audiobookshelf, Whisparr, Plex, Jellyseerr, Tautulli, Wizarr
#
# Prerequisites:
# - Synology DSM with Container Manager (Docker)
# - /volume1/data, /volume2/metadata/docker2, /volume3/usenet, /volume2/torrents
# - PUID=1029, PGID=100 (DSM user: vish)
# - WireGuard credentials for gluetun (must be set in compose or env)
# =============================================================================
set -euo pipefail
REPO_URL="https://git.vish.gg/Vish/homelab"
COMPOSE_URL="${REPO_URL}/raw/branch/main/hosts/synology/atlantis/arr-suite/docker-compose.yml"
DOCKER="${DOCKER_BIN:-/usr/local/bin/docker}"
STACK_DIR="/volume2/metadata/docker2/arr-suite"
COMPOSE_FILE="${STACK_DIR}/docker-compose.yml"
# Colours
RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; NC='\033[0m'
info() { echo -e "${GREEN}[INFO]${NC} $*"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
error() { echo -e "${RED}[ERROR]${NC} $*"; exit 1; }
# ── Preflight ─────────────────────────────────────────────────────────────────
info "Arr-Suite installer starting"
[[ $(id -u) -eq 0 ]] || error "Run as root (sudo bash install.sh)"
command -v "$DOCKER" &>/dev/null || error "Docker not found at $DOCKER — set DOCKER_BIN env var"
for vol in /volume1/data /volume2/metadata/docker2 /volume3/usenet /volume2/torrents; do
[[ -d "$vol" ]] || warn "Volume $vol does not exist — create it before starting services"
done
# ── Required directories ───────────────────────────────────────────────────────
info "Creating config directories..."
SERVICES=(
sonarr radarr lidarr bazarr prowlarr jackett sabnzbd
deluge gluetun tdarr/server tdarr/configs tdarr/logs
lazylibrarian audiobookshelf whisparr plex jellyseerr
tautulli wizarr
)
for svc in "${SERVICES[@]}"; do
mkdir -p "/volume2/metadata/docker2/${svc}"
done
# Download directories
mkdir -p \
/volume3/usenet/complete \
/volume3/usenet/incomplete \
/volume3/usenet/tdarr_cache \
/volume2/torrents/complete \
/volume2/torrents/incomplete
# Media library
mkdir -p \
/volume1/data/media/tv \
/volume1/data/media/movies \
/volume1/data/media/music \
/volume1/data/media/audiobooks \
/volume1/data/media/podcasts \
/volume1/data/media/ebooks \
/volume1/data/media/misc
# Lidarr arr-scripts directories
mkdir -p \
/volume2/metadata/docker2/lidarr-scripts/custom-cont-init.d \
/volume2/metadata/docker2/lidarr-scripts/custom-services.d
# ── Lidarr arr-scripts bootstrap ──────────────────────────────────────────────
INIT_SCRIPT="/volume2/metadata/docker2/lidarr-scripts/custom-cont-init.d/scripts_init.bash"
if [[ ! -f "$INIT_SCRIPT" ]]; then
info "Downloading arr-scripts init script..."
curl -fsSL "https://raw.githubusercontent.com/RandomNinjaAtk/arr-scripts/main/lidarr/scripts_init.bash" \
-o "$INIT_SCRIPT" || warn "Failed to download arr-scripts init — download manually from RandomNinjaAtk/arr-scripts"
chmod +x "$INIT_SCRIPT"
fi
# ── Download compose file ──────────────────────────────────────────────────────
info "Downloading docker-compose.yml..."
mkdir -p "$STACK_DIR"
curl -fsSL "$COMPOSE_URL" -o "$COMPOSE_FILE" || error "Failed to download compose file from $COMPOSE_URL"
# ── Warn about secrets ────────────────────────────────────────────────────────
warn "==================================================================="
warn "ACTION REQUIRED before starting:"
warn ""
warn "1. Set gluetun WireGuard credentials in:"
warn " $COMPOSE_FILE"
warn " - WIREGUARD_PRIVATE_KEY"
warn " - WIREGUARD_PUBLIC_KEY"
warn " - WIREGUARD_ENDPOINT_IP"
warn ""
warn "2. Set Lidarr Deezer ARL token:"
warn " /volume2/metadata/docker2/lidarr/extended.conf"
warn " arlToken=\"<your-arl-token>\""
warn " Get from: deezer.com -> DevTools -> Cookies -> arl"
warn ""
warn "3. Set Plex claim token (optional, for initial setup):"
warn " https://www.plex.tv/claim"
warn " Add to compose: PLEX_CLAIM=<token>"
warn "==================================================================="
# ── Pull images ───────────────────────────────────────────────────────────────
read -rp "Pull all images now? (y/N): " pull_images
if [[ "${pull_images,,}" == "y" ]]; then
info "Pulling images (this may take a while)..."
"$DOCKER" compose -f "$COMPOSE_FILE" pull
fi
# ── Start stack ───────────────────────────────────────────────────────────────
read -rp "Start all services now? (y/N): " start_services
if [[ "${start_services,,}" == "y" ]]; then
info "Starting arr-suite..."
"$DOCKER" compose -f "$COMPOSE_FILE" up -d
info "Done! Services starting..."
echo ""
echo "Service URLs:"
echo " Sonarr: http://192.168.0.200:8989"
echo " Radarr: http://192.168.0.200:7878"
echo " Lidarr: http://192.168.0.200:8686"
echo " Prowlarr: http://192.168.0.200:9696"
echo " SABnzbd: http://192.168.0.200:8080"
echo " Deluge: http://192.168.0.200:8112 (password: "REDACTED_PASSWORD"
echo " Bazarr: http://192.168.0.200:6767"
echo " Tdarr: http://192.168.0.200:8265"
echo " Whisparr: http://192.168.0.200:6969"
echo " Plex: http://192.168.0.200:32400/web"
echo " Jellyseerr: http://192.168.0.200:5055"
echo " Audiobookshelf:http://192.168.0.200:13378"
echo " LazyLibrarian: http://192.168.0.200:5299"
echo " Tautulli: http://192.168.0.200:8181"
echo " Wizarr: http://192.168.0.200:5690"
echo " Jackett: http://192.168.0.200:9117"
fi
info "Install complete."
info "Docs: https://git.vish.gg/Vish/homelab/src/branch/main/docs/services/individual/"

View File

@@ -0,0 +1,18 @@
services:
jellyseerr:
image: fallenbagel/jellyseerr:latest
container_name: jellyseerr
user: 1029:65536 #YOUR_UID_AND_GID
environment:
- TZ=America/Los_Angeles #CHANGE_TO_YOUR_TZ
volumes:
- /volume1/docker2/jellyseerr:/app/config
ports:
- 5055:5055/tcp
network_mode: synobridge
dns: #DNS Servers to help with speed issues some have
- 9.9.9.9
- 1.1.1.1
security_opt:
- no-new-privileges:true
restart: unless-stopped

View File

@@ -0,0 +1,163 @@
# =============================================================================
# PLEX MEDIA SERVER - DISASTER RECOVERY CONFIGURATION
# =============================================================================
#
# SERVICE OVERVIEW:
# - Primary media streaming server for homelab
# - Serves 4K movies, TV shows, music, and photos
# - Hardware transcoding enabled via Intel Quick Sync
# - Critical service for media consumption
#
# DISASTER RECOVERY NOTES:
# - Configuration stored in /volume1/docker2/plex (CRITICAL BACKUP)
# - Media files in /volume1/data/media (128TB+ library)
# - Database contains watch history, metadata, user preferences
# - Hardware transcoding requires Intel GPU access (/dev/dri)
#
# BACKUP PRIORITY: HIGH
# - Config backup: Daily automated backup required
# - Media backup: Secondary NAS sync (Calypso)
# - Database backup: Included in config volume
#
# RECOVERY TIME OBJECTIVE (RTO): 30 minutes
# RECOVERY POINT OBJECTIVE (RPO): 24 hours
#
# DEPENDENCIES:
# - Volume1 must be accessible (current issue: SSD cache failure)
# - Intel GPU drivers for hardware transcoding
# - Network connectivity for remote access
# - Plex Pass subscription for premium features
#
# PORTS USED:
# - 32400/tcp: Main Plex web interface and API
# - 3005/tcp: Plex Home Theater via Plex Companion
# - 8324/tcp: Plex for Roku via Plex Companion
# - 32469/tcp: Plex DLNA Server
# - 1900/udp: Plex DLNA Server
# - 32410/udp, 32412/udp, 32413/udp, 32414/udp: GDM Network discovery
#
# =============================================================================
services:
plex:
# CONTAINER IMAGE:
# - linuxserver/plex: Community-maintained, regularly updated
# - Alternative: plexinc/pms-docker (official but less frequent updates)
# - Version pinning recommended for production: linuxserver/plex:1.32.8
image: linuxserver/plex:latest
# CONTAINER NAME:
# - Fixed name for easy identification and management
# - Used in monitoring, logs, and backup scripts
container_name: plex
# NETWORK CONFIGURATION:
# - host mode: Required for Plex auto-discovery and DLNA
# - Allows Plex to bind to all network interfaces
# - Enables UPnP/DLNA functionality for smart TVs
# - SECURITY NOTE: Exposes all container ports to host
network_mode: host
environment:
# USER/GROUP PERMISSIONS:
# - PUID=1029: User ID for file ownership (Synology 'admin' user)
# - PGID=65536: Group ID for file access (Synology 'administrators' group)
# - CRITICAL: Must match NAS user/group for file access
# - Find correct values: id admin (on Synology)
- PUID=1029 #CHANGE_TO_YOUR_UID
- PGID=65536 #CHANGE_TO_YOUR_GID
# TIMEZONE CONFIGURATION:
# - TZ: Timezone for logs, scheduling, and metadata
# - Must match system timezone for accurate timestamps
# - Format: Area/City (e.g., America/Los_Angeles, Europe/London)
- TZ=America/Los_Angeles #CHANGE_TO_YOUR_TZ
# FILE PERMISSIONS:
# - UMASK=022: Default file permissions (755 for dirs, 644 for files)
# - Ensures proper read/write access for media files
# - 022 = owner: rwx, group: r-x, other: r-x
- UMASK=022
# PLEX VERSION MANAGEMENT:
# - VERSION=docker: Use version bundled with Docker image
# - Alternative: VERSION=latest (auto-update, not recommended for production)
# - Alternative: VERSION=1.32.8.7639-fb6452ebf (pin specific version)
- VERSION=docker
# PLEX CLAIM TOKEN:
# - Used for initial server setup and linking to Plex account
# - Get token from: https://plex.tv/claim (valid for 4 minutes)
# - Leave empty after initial setup
# - SECURITY: Remove token after claiming server
- PLEX_CLAIM=
volumes:
# CONFIGURATION VOLUME:
# - /volume1/docker2/plex:/config
# - Contains: Database, metadata, thumbnails, logs, preferences
# - SIZE: ~50-100GB depending on library size
# - BACKUP CRITICAL: Contains all user data and settings
# - RECOVERY: Restore this volume to recover complete Plex setup
- /volume1/docker2/plex:/config
# MEDIA VOLUME:
# - /volume1/data/media:/data/media
# - Contains: Movies, TV shows, music, photos (128TB+ library)
# - READ-ONLY recommended for security (add :ro suffix if desired)
# - STRUCTURE: Organized by type (movies/, tv/, music/, photos/)
# - BACKUP: Synced to Calypso NAS for redundancy
- /volume1/data/media:/data/media
devices:
# HARDWARE TRANSCODING:
# - /dev/dri:/dev/dri: Intel Quick Sync Video access
# - Enables hardware-accelerated transcoding (H.264, H.265, AV1)
# - CRITICAL: Reduces CPU usage by 80-90% during transcoding
# - REQUIREMENT: Intel GPU with Quick Sync support
# - TROUBLESHOOTING: Check 'ls -la /dev/dri' for render devices
- /dev/dri:/dev/dri
security_opt:
# SECURITY HARDENING:
# - no-new-privileges: Prevents privilege escalation attacks
# - Container cannot gain additional privileges during runtime
# - Recommended security practice for all containers
- no-new-privileges:true
# RESTART POLICY:
# - always: Container restarts automatically on failure or system reboot
# - CRITICAL: Ensures Plex is always available for media streaming
# - Alternative: unless-stopped (won't restart if manually stopped)
restart: unless-stopped
# =============================================================================
# DISASTER RECOVERY PROCEDURES:
# =============================================================================
#
# BACKUP VERIFICATION:
# docker exec plex ls -la /config/Library/Application\ Support/Plex\ Media\ Server/
#
# MANUAL BACKUP:
# tar -czf /volume2/backups/plex-config-$(date +%Y%m%d).tar.gz /volume1/docker2/plex/
#
# RESTORE PROCEDURE:
# 1. Stop container: docker-compose down
# 2. Restore config: tar -xzf plex-backup.tar.gz -C /volume1/docker2/
# 3. Fix permissions: chown -R 1029:65536 /volume1/docker2/plex/
# 4. Start container: docker-compose up -d
# 5. Verify: Check http://atlantis.vish.local:32400/web
#
# TROUBLESHOOTING:
# - No hardware transcoding: Check /dev/dri permissions and Intel GPU drivers
# - Database corruption: Restore from backup or rebuild library
# - Permission errors: Verify PUID/PGID match NAS user/group
# - Network issues: Check host networking and firewall rules
#
# MONITORING:
# - Health check: curl -f http://localhost:32400/identity
# - Logs: docker logs plex
# - Transcoding: Plex Dashboard > Settings > Transcoder
# - Performance: Grafana dashboard for CPU/GPU usage
#
# =============================================================================

View File

@@ -0,0 +1,29 @@
services:
linuxserver-prowlarr:
image: linuxserver/prowlarr:latest
container_name: prowlarr
environment:
- PUID=1029 #CHANGE_TO_YOUR_UID
- PGID=65536 #CHANGE_TO_YOUR_GID
- TZ=America/Los_Angeles #CHANGE_TO_YOUR_TZ
- UMASK=022
volumes:
- /volume1/docker2/prowlarr:/config
ports:
- 9696:9696/tcp
network_mode: synobridge
security_opt:
- no-new-privileges:true
restart: unless-stopped
flaresolverr:
image: flaresolverr/flaresolverr:latest
container_name: flaresolverr
environment:
- TZ=America/Los_Angeles #CHANGE_TO_YOUR_TZ
ports:
- 8191:8191
network_mode: synobridge
security_opt:
- no-new-privileges:true
restart: unless-stopped

View File

@@ -0,0 +1,18 @@
services:
sabnzbd:
image: linuxserver/sabnzbd:latest
container_name: sabnzbd
environment:
- PUID=1029 #CHANGE_TO_YOUR_UID
- PGID=65536 #CHANGE_TO_YOUR_GID
- TZ=America/Los_Angeles #CHANGE_TO_YOUR_TZ
- UMASK=022
volumes:
- /volume1/docker2/sabnzbd:/config
- /volume1/data/usenet:/data/usenet
ports:
- 8080:8080/tcp
network_mode: synobridge
security_opt:
- no-new-privileges:true
restart: unless-stopped

View File

@@ -0,0 +1,17 @@
services:
tautulli:
image: linuxserver/tautulli:latest
container_name: tautulli
environment:
- PUID=1029 #CHANGE_TO_YOUR_UID
- PGID=65536 #CHANGE_TO_YOUR_GID
- TZ=America/Los_Angeles #CHANGE_TO_YOUR_TZ
- UMASK=022
volumes:
- /volume1/docker2/tautulli:/config
ports:
- 8181:8181/tcp
network_mode: synobridge
security_opt:
- no-new-privileges:true
restart: unless-stopped

View File

@@ -0,0 +1,18 @@
services:
whisparr:
image: hotio/whisparr:nightly
container_name: whisparr
environment:
- PUID=1029 #CHANGE_TO_YOUR_UID
- PGID=65536 #CHANGE_TO_YOUR_GID
- TZ=America/Los_Angeles #CHANGE_TO_YOUR_TZ
- UMASK=022
volumes:
- /volume1/docker2/whisparr:/config
- /volume1/data/:/data
ports:
- 6969:6969/tcp
network_mode: synobridge
security_opt:
- no-new-privileges:true
restart: unless-stopped

View File

@@ -0,0 +1,19 @@
version: '3.8'
services:
wizarr:
image: ghcr.io/wizarrrr/wizarr:latest
container_name: wizarr
environment:
- PUID=1029
- PGID=65536
- TZ=America/Los_Angeles
- DISABLE_BUILTIN_AUTH=false
volumes:
- /volume1/docker2/wizarr:/data/database
ports:
- 5690:5690/tcp
network_mode: synobridge
security_opt:
- no-new-privileges:true
restart: unless-stopped

View File

@@ -0,0 +1,18 @@
ssh-keygen -t ed25519 -C "synology@atlantis"
rsync -avhn --progress -e "ssh -T -c aes128-gcm@openssh.com -o Compression=no -x" \
"/volume1/data/media/tv/Lord of Mysteries/" \
root@100.99.156.20:/root/docker/plex/tvshows/
rsync -avh --progress -e "ssh -T -c aes128-gcm@openssh.com -o Compression=no -x" \
"/volume1/data/media/movies/Ballerina (2025)" \
root@100.99.156.20:/root/docker/plex/movies/
rsync -avh --progress -e "ssh -T -c aes128-gcm@openssh.com -o Compression=no -x" \
"/volume1/data/media/other/" \
--include 'VID_20240328_150621.mp4' \
--include 'VID_20240328_153720.mp4' \
--exclude '*' \
homelab@100.67.40.126:/home/homelab/whisper-docker/audio/

View File

@@ -0,0 +1,18 @@
# Baikal - CalDAV/CardDAV server
# Port: 8800
# Self-hosted calendar and contacts sync server
version: "3.7"
services:
baikal:
image: ckulka/baikal
container_name: baikal
ports:
- "12852:80"
environment:
- PUID=1026
- PGID=100
volumes:
- /volume2/metadata/docker/baikal/config:/var/www/baikal/config
- /volume2/metadata/docker/baikal/html:/var/www/baikal/Specific
restart: unless-stopped

View File

@@ -0,0 +1 @@
https://cal.vish.gg/dav.php/calendars/vish/default?export

View File

@@ -0,0 +1,20 @@
# Calibre Web - E-book management
# Port: 8083
# Web-based e-book library with OPDS support
name: calibre
services:
calibre-web:
container_name: calibre-webui
ports:
- 8183:8083
environment:
- PUID=1026
- PGID=100
- TZ=America/Los_Angeles
- DOCKER_MODS=linuxserver/mods:universal-calibre
- OAUTHLIB_RELAX_TOKEN_SCOPE=1
volumes:
- /volume2/metadata/docker/calibreweb:/config
- /volume2/metadata/docker/books:/books
restart: unless-stopped
image: ghcr.io/linuxserver/calibre-web

View File

@@ -0,0 +1,43 @@
# Cloudflare Tunnel for Atlantis NAS
# Provides secure external access without port forwarding
#
# SETUP INSTRUCTIONS:
# 1. Go to https://one.dash.cloudflare.com/ → Zero Trust → Networks → Tunnels
# 2. Create a new tunnel named "atlantis-tunnel"
# 3. Copy the tunnel token (starts with eyJ...)
# 4. Replace TUNNEL_TOKEN_HERE below with your token
# 5. In the tunnel dashboard, add these public hostnames:
#
# | Public Hostname | Service |
# |----------------------|----------------------------|
# | pw.vish.gg | http://localhost:4080 |
# | cal.vish.gg | http://localhost:12852 |
# | meet.thevish.io | https://localhost:5443 |
# | joplin.thevish.io | http://localhost:22300 |
# | mastodon.vish.gg | http://192.168.0.154:3000 |
# | matrix.thevish.io | http://192.168.0.154:8081 |
# | mx.vish.gg | http://192.168.0.154:8082 |
# | mm.crista.love | http://192.168.0.154:8065 |
#
# 6. Deploy this stack in Portainer
version: '3.8'
services:
cloudflared:
image: cloudflare/cloudflared:latest
container_name: cloudflare-tunnel
restart: unless-stopped
command: tunnel run
environment:
- TUNNEL_TOKEN=${TUNNEL_TOKEN}
network_mode: host # Needed to access localhost services and VMs
# Alternative if you prefer bridge network:
# networks:
# - tunnel_net
# extra_hosts:
# - "host.docker.internal:host-gateway"
# networks:
# tunnel_net:
# driver: bridge

View File

@@ -0,0 +1,83 @@
# Standalone DERP Relay Server — Atlantis (Home NAS)
# =============================================================================
# Tailscale/Headscale DERP relay for home-network fallback connectivity.
# Serves as region 902 "Home - Atlantis" in the headscale derpmap.
#
# Why standalone (not behind nginx):
# The DERP protocol does an HTTP→binary protocol switch inside TLS.
# It is incompatible with HTTP reverse proxies. Must handle TLS directly.
#
# Port layout:
# 8445/tcp — DERP relay (direct TLS, NOT proxied through NPM)
# 3480/udp — STUN (NAT traversal hints)
# Port 3478 taken by coturn/Jitsi, 3479 taken by coturn/Matrix on matrix-ubuntu.
#
# TLS cert:
# Issued by Let's Encrypt via certbot DNS challenge (Cloudflare).
# Cert path: /volume1/docker/derper-atl/certs/
# Cloudflare credentials: /volume1/docker/derper-atl/secrets/cloudflare.ini
# Auto-renewed monthly by the cert-renewer sidecar (ofelia + certbot/dns-cloudflare).
# On first deploy or manual renewal, run:
# docker run -it --rm \
# -v /volume1/docker/derper-atl/certs:/etc/letsencrypt \
# -v /volume1/docker/derper-atl/secrets:/root/.secrets:ro \
# certbot/dns-cloudflare certonly \
# --dns-cloudflare \
# --dns-cloudflare-credentials /root/.secrets/cloudflare.ini \
# -d derp-atl.vish.gg
# Then copy certs to flat layout:
# cp certs/live/derp-atl.vish.gg/fullchain.pem certs/live/derp-atl.vish.gg/derp-atl.vish.gg.crt
# cp certs/live/derp-atl.vish.gg/privkey.pem certs/live/derp-atl.vish.gg/derp-atl.vish.gg.key
#
# Firewall / DSM rules required (one-time):
# Allow inbound 8445/tcp and 3480/udp in DSM → Security → Firewall
#
# Router port forwards required (one-time, on home router):
# 8445/tcp → 192.168.0.200 (Atlantis LAN IP, main interface)
# 3480/udp → 192.168.0.200
#
# DNS: derp-atl.vish.gg → home public IP (managed by dynamicdnsupdater.yaml, unproxied)
# =============================================================================
services:
derper-atl:
image: fredliang/derper:latest
container_name: derper-atl
restart: unless-stopped
ports:
- "8445:8445" # DERP TLS — direct, not behind NPM
- "3480:3480/udp" # STUN (3478 taken by coturn/Jitsi, 3479 taken by coturn/Matrix)
volumes:
# Full letsencrypt mount required — live/ contains symlinks into archive/
# mounting only live/ breaks symlink resolution inside the container
- /volume1/docker/derper-atl/certs:/etc/letsencrypt:ro
environment:
- DERP_DOMAIN=derp-atl.vish.gg
- DERP_CERT_MODE=manual
- DERP_CERT_DIR=/etc/letsencrypt/live/derp-atl.vish.gg
- DERP_ADDR=:8445
- DERP_STUN=true
- DERP_STUN_PORT=3480
- DERP_HTTP_PORT=-1 # disable plain HTTP, TLS only
- DERP_VERIFY_CLIENTS=false # allow any node (headscale manages auth)
cert-renewer:
# Runs certbot monthly via supercronic; after renewal copies certs to the
# flat layout derper expects, then restarts derper-atl via Docker socket.
# Schedule: 03:00 on the 1st of every month.
image: certbot/dns-cloudflare:latest
container_name: derper-atl-cert-renewer
restart: unless-stopped
depends_on:
- derper-atl
entrypoint: >-
sh -c "
apk add --no-cache supercronic curl &&
echo '0 3 1 * * /renew.sh' > /crontab &&
exec supercronic /crontab
"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /volume1/docker/derper-atl/certs:/etc/letsencrypt
- /volume1/docker/derper-atl/secrets:/root/.secrets:ro
- /volume1/docker/derper-atl/renew.sh:/renew.sh:ro

View File

@@ -0,0 +1,28 @@
# Diun — Docker Image Update Notifier
#
# Watches all running containers on this host and sends ntfy
# notifications when upstream images update their digest.
# Schedule: Mondays 09:00 (weekly cadence).
#
# ntfy topic: https://ntfy.vish.gg/diun
services:
diun:
image: crazymax/diun:latest
container_name: diun
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- diun-data:/data
environment:
LOG_LEVEL: info
DIUN_WATCH_WORKERS: "20"
DIUN_WATCH_SCHEDULE: "0 9 * * 1"
DIUN_WATCH_JITTER: 30s
DIUN_PROVIDERS_DOCKER: "true"
DIUN_PROVIDERS_DOCKER_WATCHBYDEFAULT: "true"
DIUN_NOTIF_NTFY_ENDPOINT: "https://ntfy.vish.gg"
DIUN_NOTIF_NTFY_TOPIC: "diun"
restart: unless-stopped
volumes:
diun-data:

View File

@@ -0,0 +1,20 @@
services:
dockpeek:
container_name: Dockpeek
image: ghcr.io/dockpeek/dockpeek:latest
healthcheck:
test: timeout 10s bash -c ':> /dev/tcp/127.0.0.1/8000' || exit 1
interval: 10s
timeout: 5s
retries: 3
start_period: 90s
environment:
SECRET_KEY: "REDACTED_SECRET_KEY" # pragma: allowlist secret
USERNAME: vish
PASSWORD: REDACTED_PASSWORD # pragma: allowlist secret
DOCKER_HOST: unix:///var/run/docker.sock
ports:
- 3812:8000
volumes:
- /var/run/docker.sock:/var/run/docker.sock
restart: on-failure:5

View File

@@ -0,0 +1,71 @@
services:
db:
image: postgres:17
container_name: Documenso-DB
hostname: documenso-db
security_opt:
- no-new-privileges:true
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "documenso", "-U", "documensouser"]
timeout: 45s
interval: 10s
retries: 10
volumes:
- /volume1/docker/documenso/db:/var/lib/postgresql/data:rw
environment:
POSTGRES_DB: documenso
POSTGRES_USER: documensouser
POSTGRES_PASSWORD: "REDACTED_PASSWORD" # pragma: allowlist secret
restart: on-failure:5
documenso:
image: documenso/documenso:latest
container_name: Documenso
ports:
- 3513:3000
volumes:
- /volume1/docker/documenso/data:/opt/documenso:rw
depends_on:
db:
condition: service_healthy
environment:
- PORT=3000
- NEXTAUTH_SECRET="REDACTED_NEXTAUTH_SECRET" # pragma: allowlist secret
- NEXT_PRIVATE_ENCRYPTION_KEY=y6vZRCEKo2rEsJzXlQfgXg3fLKlhiT7h # pragma: allowlist secret
- NEXT_PRIVATE_ENCRYPTION_SECONDARY_KEY=QA7tXtw7fDExGRjrJ616hDmiJ4EReXlP # pragma: allowlist secret
- NEXTAUTH_URL=https://documenso.thevish.io
- NEXT_PUBLIC_WEBAPP_URL=https://documenso.thevish.io
- NEXT_PRIVATE_INTERNAL_WEBAPP_URL=http://documenso:3000
- NEXT_PUBLIC_MARKETING_URL=https://documenso.thevish.io
- NEXT_PRIVATE_DATABASE_URL=postgres://documensouser:documensopass@documenso-db:5432/documenso
- NEXT_PRIVATE_DIRECT_DATABASE_URL=postgres://documensouser:documensopass@documenso-db:5432/documenso
- NEXT_PUBLIC_UPLOAD_TRANSPORT=database
- NEXT_PRIVATE_SMTP_TRANSPORT=smtp-auth
- NEXT_PRIVATE_SMTP_HOST=smtp.gmail.com
- NEXT_PRIVATE_SMTP_PORT=587
- NEXT_PRIVATE_SMTP_USERNAME=your-email@example.com
- NEXT_PRIVATE_SMTP_PASSWORD="REDACTED_PASSWORD" jkbo lmag sapq # pragma: allowlist secret
- NEXT_PRIVATE_SMTP_SECURE=false
- NEXT_PRIVATE_SMTP_FROM_NAME=Vish
- NEXT_PRIVATE_SMTP_FROM_ADDRESS=your-email@example.com
- NEXT_PRIVATE_SIGNING_LOCAL_FILE_PATH=/opt/documenso/cert.p12
#NEXT_PRIVATE_SMTP_UNSAFE_IGNORE_TLS=true
#NEXT_PRIVATE_SMTP_APIKEY_USER=${NEXT_PRIVATE_SMTP_APIKEY_USER}
#NEXT_PRIVATE_SMTP_APIKEY=${NEXT_PRIVATE_SMTP_APIKEY}
#NEXT_PRIVATE_RESEND_API_KEY=${NEXT_PRIVATE_RESEND_API_KEY}
#NEXT_PRIVATE_MAILCHANNELS_API_KEY=${NEXT_PRIVATE_MAILCHANNELS_API_KEY}
#NEXT_PRIVATE_MAILCHANNELS_ENDPOINT=${NEXT_PRIVATE_MAILCHANNELS_ENDPOINT}
#NEXT_PRIVATE_MAILCHANNELS_DKIM_DOMAIN=${NEXT_PRIVATE_MAILCHANNELS_DKIM_DOMAIN}
#NEXT_PRIVATE_MAILCHANNELS_DKIM_SELECTOR=${NEXT_PRIVATE_MAILCHANNELS_DKIM_SELECTOR}
#NEXT_PRIVATE_MAILCHANNELS_DKIM_PRIVATE_KEY=${NEXT_PRIVATE_MAILCHANNELS_DKIM_PRIVATE_KEY}
#NEXT_PUBLIC_DOCUMENT_SIZE_UPLOAD_LIMIT=${NEXT_PUBLIC_DOCUMENT_SIZE_UPLOAD_LIMIT}
#NEXT_PUBLIC_POSTHOG_KEY=${NEXT_PUBLIC_POSTHOG_KEY}
#NEXT_PUBLIC_DISABLE_SIGNUP=${NEXT_PUBLIC_DISABLE_SIGNUP}
#NEXT_PRIVATE_UPLOAD_ENDPOINT=${NEXT_PRIVATE_UPLOAD_ENDPOINT}
#NEXT_PRIVATE_UPLOAD_FORCE_PATH_STYLE=${NEXT_PRIVATE_UPLOAD_FORCE_PATH_STYLE}
#NEXT_PRIVATE_UPLOAD_REGION=${NEXT_PRIVATE_UPLOAD_REGION}
#NEXT_PRIVATE_UPLOAD_BUCKET=${NEXT_PRIVATE_UPLOAD_BUCKET}
#NEXT_PRIVATE_UPLOAD_ACCESS_KEY_ID=${NEXT_PRIVATE_UPLOAD_ACCESS_KEY_ID}
#NEXT_PRIVATE_UPLOAD_SECRET_ACCESS_KEY=${NEXT_PRIVATE_UPLOAD_SECRET_ACCESS_KEY}
#NEXT_PRIVATE_GOOGLE_CLIENT_ID=${NEXT_PRIVATE_GOOGLE_CLIENT_ID}
#NEXT_PRIVATE_GOOGLE_CLIENT_SECRET=${NEXT_PRIVATE_GOOGLE_CLIENT_SECRET}

View File

@@ -0,0 +1,19 @@
# DokuWiki - Wiki platform
# Port: 8084
# Simple wiki without database, uses plain text files
version: "3.9"
services:
dokuwiki:
image: ghcr.io/linuxserver/dokuwiki
container_name: dokuwiki
restart: unless-stopped
ports:
- "8399:80"
- "4443:443"
environment:
- TZ=America/Los_Angeles
- PUID=1026
- PGID=100
volumes:
- /volume2/metadata/docker/dokuwiki:/config

View File

@@ -0,0 +1,21 @@
# Dozzle - Real-time Docker log viewer
# Port: 8892
# Lightweight container log viewer with web UI
# Updated: 2026-03-11
services:
dozzle:
container_name: Dozzle
image: amir20/dozzle:latest
mem_limit: 3g
cpu_shares: 768
security_opt:
- no-new-privileges:true
restart: on-failure:5
ports:
- 8892:8080
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /volume2/metadata/docker/dozzle:/data:rw
environment:
DOZZLE_AUTH_PROVIDER: simple
DOZZLE_REMOTE_AGENT: "100.72.55.21:7007,100.77.151.40:7007,100.103.48.78:7007,100.75.252.64:7007,100.67.40.126:7007,100.82.197.124:7007,100.125.0.20:7007,100.85.21.51:7007"

View File

@@ -0,0 +1,6 @@
users:
vish:
name: "Vish k"
# Generate with IT-TOOLS https://it-tools.tech/bcrypt
password: "REDACTED_PASSWORD" # pragma: allowlist secret
email: your-email@example.com

View File

@@ -0,0 +1,74 @@
# Dynamic DNS Updater
# Updates DNS records when public IP changes
# Deployed on Atlantis - updates all homelab domains
version: '3.8'
services:
# vish.gg (proxied domains - all public services)
ddns-vish-proxied:
image: favonia/cloudflare-ddns:latest
network_mode: host
restart: unless-stopped
user: "1026:100"
read_only: true
cap_drop: [all]
security_opt: [no-new-privileges:true]
environment:
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN}
# Main domains + Calypso services (sf, dav, actual, docs, ost, retro)
# NOTE: mx.vish.gg intentionally excluded — MX/mail records must NOT be CF-proxied
# NOTE: reddit.vish.gg and vp.vish.gg removed — obsolete services
# NOTE: pt.vish.gg added 2026-04-18 — Portainer (behind Authentik SSO, CF-proxied)
- DOMAINS=vish.gg,www.vish.gg,cal.vish.gg,dash.vish.gg,gf.vish.gg,git.vish.gg,kuma.vish.gg,mastodon.vish.gg,nb.vish.gg,npm.vish.gg,ntfy.vish.gg,ollama.vish.gg,paperless.vish.gg,pw.vish.gg,pt.vish.gg,rackula.vish.gg,rx.vish.gg,rxdl.vish.gg,rxv4access.vish.gg,rxv4download.vish.gg,scrutiny.vish.gg,sso.vish.gg,sf.vish.gg,dav.vish.gg,actual.vish.gg,docs.vish.gg,ost.vish.gg,retro.vish.gg,wizarr.vish.gg
- PROXIED=true
# thevish.io (proxied domains)
ddns-thevish-proxied:
image: favonia/cloudflare-ddns:latest
network_mode: host
restart: unless-stopped
user: "1026:100"
read_only: true
cap_drop: [all]
security_opt: [no-new-privileges:true]
environment:
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN}
# Removed: documenso.thevish.io, *.vps.thevish.io (deleted)
# Added: binterest, hoarder (now proxied)
# meet.thevish.io moved here: CF proxy enabled Jan 2026 (NPM migration)
- DOMAINS=www.thevish.io,joplin.thevish.io,matrix.thevish.io,binterest.thevish.io,hoarder.thevish.io,meet.thevish.io
- PROXIED=true
# vish.gg (unproxied domains - special protocols requiring direct IP)
ddns-vish-unproxied:
image: favonia/cloudflare-ddns:latest
network_mode: host
restart: unless-stopped
user: "1026:100"
read_only: true
cap_drop: [all]
security_opt: [no-new-privileges:true]
environment:
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN}
# mx.vish.gg - Matrix homeserver; CF proxy breaks federation (port 8448)
# derp.vish.gg - Headscale built-in DERP relay; CF proxy breaks DERP protocol
# derp-atl.vish.gg - Atlantis DERP relay (region 902); CF proxy breaks DERP protocol
# headscale.vish.gg - Headscale VPN server; CF proxy breaks Tailscale client connections
# livekit.mx.vish.gg - Matrix LiveKit SFU; needs direct WebRTC (UDP 50000-50100), CF proxy breaks it
- DOMAINS=mx.vish.gg,derp.vish.gg,derp-atl.vish.gg,headscale.vish.gg,livekit.mx.vish.gg
- PROXIED=false
# thevish.io (unproxied domains - special protocols)
ddns-thevish-unproxied:
image: favonia/cloudflare-ddns:latest
network_mode: host
restart: unless-stopped
user: "1026:100"
read_only: true
cap_drop: [all]
security_opt: [no-new-privileges:true]
environment:
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN}
# turn.thevish.io - TURN/STUN protocol needs direct connection
- DOMAINS=turn.thevish.io
- PROXIED=false

View File

@@ -0,0 +1,19 @@
# Fenrus - Application dashboard
# Port: 5000
# Modern dashboard for self-hosted services
version: "3"
services:
fenrus:
container_name: Fenrus
image: revenz/fenrus:latest
restart: unless-stopped
environment:
- TZ=America/Los_Angeles
ports:
- 4500:3000
volumes:
- /volume2/metadata/docker/fenrus:/app/data
dns:
- 100.103.48.78 # Calypso's Tailscale IP as resolver
- 100.72.55.21 # Concord_NUC or your Tailnet DNS node

View File

@@ -0,0 +1,66 @@
# Firefly III - Finance
# Port: 8080
# Personal finance manager
version: '3.7'
networks:
internal:
external: false
services:
firefly:
container_name: firefly
image: fireflyiii/core:latest
ports:
- 6182:8080
volumes:
- /volume1/docker/fireflyup:/var/www/html/storage/upload
restart: unless-stopped
env_file:
- stack.env
depends_on:
- firefly-db
networks:
- internal
firefly-db:
container_name: firefly-db
image: postgres
volumes:
- /volume1/docker/fireflydb:/var/lib/postgresql/data
restart: unless-stopped
environment:
POSTGRES_DB: firefly
POSTGRES_USER: firefly
POSTGRES_PASSWORD: "REDACTED_PASSWORD" # pragma: allowlist secret
networks:
- internal
firefly-db-backup:
container_name: firefly-db-backup
image: postgres
volumes:
- /volume1/docker/fireflydb:/dump
- /etc/localtime:/etc/localtime:ro
environment:
PGHOST: firefly-db
PGDATABASE: firefly
PGUSER: firefly
PGPASSWORD: "REDACTED_PASSWORD" # pragma: allowlist secret
BACKUP_NUM_KEEP: 10
BACKUP_FREQUENCY: 7d
entrypoint: |
bash -c 'bash -s < /dump/dump_\`date +%d-%m-%Y"_"%H_%M_%S\`.psql
(ls -t /dump/dump*.psql|head -n $$BACKUP_NUM_KEEP;ls /dump/dump*.psql)|sort|uniq -u|xargs rm -- {}
sleep $$BACKUP_FREQUENCY
done
EOF'
networks:
- internal
firefly-redis:
container_name: firefly-redis
image: redis
networks:
- internal

View File

@@ -0,0 +1,11 @@
# Extra fstab entries for Atlantis Synology 1823xs+ (192.168.0.200)
# These are appended to /etc/fstab on the host
#
# Credentials file for pi-5: /root/.pi5_smb_creds (chmod 600)
# username=vish
# password="REDACTED_PASSWORD" password>
#
# Note: Atlantis volumes are btrfs managed by DSM (volume1/2/3)
# pi-5 SMB share (NVMe storagepool) — mounted at /volume1/pi5_storagepool
//192.168.0.66/storagepool /volume1/pi5_storagepool cifs credentials=/root/.pi5_smb_creds,vers=3.0,nofail,_netdev 0 0

View File

@@ -0,0 +1,22 @@
# GitLab - Git repository
# Port: 8929
# Self-hosted Git and CI/CD
version: '3.6'
services:
web:
image: 'gitlab/gitlab-ce:latest'
restart: unless-stopped
hostname: 'gl.vish.gg'
environment:
GITLAB_OMNIBUS_CONFIG: |
external_url 'http://gl.vish.gg:8929'
gitlab_rails['gitlab_shell_ssh_port'] = 2224
ports:
- 8929:8929/tcp
- 2224:22
volumes:
- /volume1/docker/gitlab/config:/etc/gitlab
- /volume1/docker/gitlab/logs:/var/log/gitlab
- /volume1/docker/gitlab/data:/var/opt/gitlab
shm_size: '256m'

View File

@@ -0,0 +1,143 @@
# Grafana - Dashboards
# Port: 3000
# Metrics visualization and dashboards
version: "3.9"
services:
grafana:
image: grafana/grafana:latest
container_name: Grafana
hostname: grafana
networks:
- grafana-net
mem_limit: 512m
cpu_shares: 512
security_opt:
- no-new-privileges:true
user: 1026:100
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:3000/api/health
ports:
- 3340:3000
volumes:
- /volume1/docker/grafana/data:/var/lib/grafana:rw
environment:
TZ: America/Los_Angeles
GF_INSTALL_PLUGINS: grafana-clock-panel,grafana-simple-json-datasource,natel-discrete-panel,grafana-piechart-panel
# Authentik SSO Configuration
GF_SERVER_ROOT_URL: https://gf.vish.gg
GF_AUTH_GENERIC_OAUTH_ENABLED: "true"
GF_AUTH_GENERIC_OAUTH_NAME: Authentik
GF_AUTH_GENERIC_OAUTH_CLIENT_ID: "REDACTED_CLIENT_ID" # pragma: allowlist secret
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET: "REDACTED_CLIENT_SECRET" # pragma: allowlist secret
GF_AUTH_GENERIC_OAUTH_SCOPES: openid profile email
GF_AUTH_GENERIC_OAUTH_AUTH_URL: https://sso.vish.gg/application/o/authorize/
GF_AUTH_GENERIC_OAUTH_TOKEN_URL: https://sso.vish.gg/application/o/token/
GF_AUTH_GENERIC_OAUTH_API_URL: https://sso.vish.gg/application/o/userinfo/
GF_AUTH_SIGNOUT_REDIRECT_URL: https://sso.vish.gg/application/o/grafana/end-session/
GF_AUTH_GENERIC_OAUTH_ROLE_ATTRIBUTE_PATH: "contains(groups[*], 'Grafana Admins') && 'Admin' || contains(groups[*], 'Grafana Editors') && 'Editor' || 'Viewer'"
# Keep local admin auth working
GF_AUTH_DISABLE_LOGIN_FORM: "false"
restart: on-failure:5
prometheus:
image: prom/prometheus
command:
- '--storage.tsdb.retention.time=60d'
- --config.file=/etc/prometheus/prometheus.yml
container_name: Prometheus
hostname: prometheus-server
networks:
- grafana-net
- prometheus-net
mem_limit: 1g
cpu_shares: 768
security_opt:
- no-new-privileges=true
user: 1026:100
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:9090/ || exit 1
volumes:
- /volume1/docker/grafana/prometheus:/prometheus:rw
- /volume1/docker/grafana/prometheus.yml:/etc/prometheus/prometheus.yml:ro
restart: on-failure:5
node-exporter:
image: prom/node-exporter:latest
command:
- --collector.disable-defaults
- --collector.stat
- --collector.time
- --collector.cpu
- --collector.loadavg
- --collector.hwmon
- --collector.meminfo
- --collector.diskstats
container_name: Prometheus-Node
hostname: prometheus-node
networks:
- prometheus-net
mem_limit: 256m
mem_reservation: 64m
cpu_shares: 512
security_opt:
- no-new-privileges=true
read_only: true
user: 1026:100
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:9100/
restart: on-failure:5
snmp-exporter:
image: prom/snmp-exporter:latest
command:
- --config.file=/etc/snmp_exporter/snmp.yml
container_name: Prometheus-SNMP
hostname: prometheus-snmp
networks:
- prometheus-net
mem_limit: 256m
mem_reservation: 64m
cpu_shares: 512
security_opt:
- no-new-privileges:true
read_only: true
user: 1026:100
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:9116/ || exit 1
volumes:
- /volume1/docker/grafana/snmp:/etc/snmp_exporter/:ro
restart: on-failure:5
cadvisor:
image: gcr.io/cadvisor/cadvisor:latest
command:
- '--docker_only=true'
container_name: Prometheus-cAdvisor
hostname: prometheus-cadvisor
networks:
- prometheus-net
mem_limit: 256m
mem_reservation: 64m
cpu_shares: 512
security_opt:
- no-new-privileges=true
read_only: true
volumes:
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
restart: on-failure:5
networks:
grafana-net:
name: grafana-net
ipam:
config:
- subnet: 192.168.50.0/24
prometheus-net:
name: prometheus-net
ipam:
config:
- subnet: 192.168.51.0/24

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,29 @@
# Node Exporter - Prometheus metrics
# Port: 9100 (host network)
# Exposes hardware/OS metrics for Prometheus
version: "3.8"
services:
node-exporter:
image: quay.io/prometheus/node-exporter:latest
container_name: node_exporter
network_mode: host
pid: host
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.sysfs=/host/sys'
- '--path.rootfs=/rootfs'
- '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)'
restart: unless-stopped
snmp-exporter:
image: quay.io/prometheus/snmp-exporter:latest
container_name: snmp_exporter
network_mode: host # important, so exporter can talk to DSM SNMP on localhost
volumes:
- /volume2/metadata/docker/snmp/snmp.yml:/etc/snmp_exporter/snmp.yml:ro
restart: unless-stopped

View File

@@ -0,0 +1,278 @@
# =============================================================================
# HOMELAB MONITORING STACK - CRITICAL INFRASTRUCTURE VISIBILITY
# =============================================================================
#
# SERVICE OVERVIEW:
# - Complete monitoring solution for homelab infrastructure
# - Grafana: Visualization and dashboards
# - Prometheus: Metrics collection and storage
# - Node Exporter: System metrics (CPU, memory, disk, network)
# - SNMP Exporter: Network device monitoring (router, switches)
# - cAdvisor: Container metrics and resource usage
# - Blackbox Exporter: Service availability and response times
# - Speedtest Exporter: Internet connection monitoring
#
# DISASTER RECOVERY PRIORITY: HIGH
# - Essential for infrastructure visibility during outages
# - Contains historical performance data
# - Critical for troubleshooting and capacity planning
#
# RECOVERY TIME OBJECTIVE (RTO): 30 minutes
# RECOVERY POINT OBJECTIVE (RPO): 4 hours (metrics retention)
#
# DEPENDENCIES:
# - Volume2 for data persistence (separate from Volume1)
# - Network access to all monitored systems
# - SNMP access to network devices
# - Docker socket access for container monitoring
#
# =============================================================================
version: '3'
services:
# ==========================================================================
# GRAFANA - Visualization and Dashboard Platform
# ==========================================================================
grafana:
# CONTAINER IMAGE:
# - grafana/grafana:latest: Official Grafana image
# - Consider pinning version for production: grafana/grafana:10.2.0
# - Auto-updates with Watchtower (monitor for breaking changes)
image: grafana/grafana:latest
# CONTAINER IDENTIFICATION:
# - Grafana: Clear identification for monitoring and logs
# - grafana: Internal hostname for service communication
container_name: Grafana
hostname: grafana
# NETWORK CONFIGURATION:
# - grafana-net: Isolated network for Grafana and data sources
# - Allows secure communication with Prometheus
# - Prevents unauthorized access to monitoring data
networks:
- grafana-net
# RESOURCE ALLOCATION:
# - mem_limit: 512MB (sufficient for dashboards and queries)
# - cpu_shares: 512 (medium priority, less than Prometheus)
# - Grafana is lightweight but needs memory for dashboard rendering
mem_limit: 512m
cpu_shares: 512
# SECURITY CONFIGURATION:
# - no-new-privileges: Prevents privilege escalation attacks
# - user: 1026:100 (Synology user/group for file permissions)
# - CRITICAL: Must match NAS permissions for data access
security_opt:
- no-new-privileges:true
user: 1026:100
# HEALTH MONITORING:
# - wget: Tests Grafana API health endpoint
# - /api/health: Built-in Grafana health check
# - Ensures web interface is responsive
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:3000/api/health
# NETWORK PORTS:
# - 7099:3000: External port 7099 maps to internal Grafana port 3000
# - Port 7099: Accessible via reverse proxy or direct access
# - Port 3000: Standard Grafana web interface port
ports:
- 7099:3000
# DATA PERSISTENCE:
# - /volume2/metadata/docker/grafana/data: Grafana configuration and data
# - Contains: Dashboards, data sources, users, alerts, plugins
# - BACKUP CRITICAL: Contains all dashboard configurations
# - Volume2: Separate from Volume1 for redundancy
volumes:
- /volume2/metadata/docker/grafana/data:/var/lib/grafana:rw
environment:
# TIMEZONE CONFIGURATION:
# - TZ: Timezone for logs and dashboard timestamps
# - Must match system timezone for accurate time series data
TZ: America/Los_Angeles
# PLUGIN INSTALLATION:
# - GF_INSTALL_PLUGINS: Comma-separated list of plugins to install
# - grafana-clock-panel: Clock widget for dashboards
# - grafana-simple-json-datasource: JSON data source support
# - natel-discrete-panel: Discrete value visualization
# - grafana-piechart-panel: Pie chart visualizations
# - Plugins installed automatically on container start
GF_INSTALL_PLUGINS: grafana-clock-panel,grafana-simple-json-datasource,natel-discrete-panel,grafana-piechart-panel
# RESTART POLICY:
# - on-failure:5: Restart up to 5 times on failure
# - Critical for maintaining monitoring visibility
# - Prevents infinite restart loops
restart: on-failure:5
# ==========================================================================
# PROMETHEUS - Metrics Collection and Time Series Database
# ==========================================================================
prometheus:
# CONTAINER IMAGE:
# - prom/prometheus: Official Prometheus image
# - Latest stable version with security updates
# - Consider version pinning: prom/prometheus:v2.47.0
image: prom/prometheus
# PROMETHEUS CONFIGURATION:
# - --storage.tsdb.retention.time=60d: Keep metrics for 60 days
# - --config.file: Path to Prometheus configuration file
# - Retention period balances storage usage vs. historical data
command:
- '--storage.tsdb.retention.time=60d'
- '--config.file=/etc/prometheus/prometheus.yml'
# CONTAINER IDENTIFICATION:
# - Prometheus: Clear identification for monitoring
# - prometheus-server: Internal hostname for service communication
container_name: Prometheus
hostname: prometheus-server
# NETWORK CONFIGURATION:
# - grafana-net: Communication with Grafana for data queries
# - prometheus-net: Communication with exporters and targets
# - Dual network setup for security and organization
networks:
- grafana-net
- prometheus-net
# RESOURCE ALLOCATION:
# - mem_limit: 1GB (metrics database requires significant memory)
# - cpu_shares: 768 (high priority for metrics collection)
# - Memory usage scales with number of metrics and retention period
mem_limit: 1g
cpu_shares: 768
# SECURITY CONFIGURATION:
# - no-new-privileges: Prevents privilege escalation
# - user: 1026:100 (Synology permissions for data storage)
security_opt:
- no-new-privileges=true
user: 1026:100
# HEALTH MONITORING:
# - wget: Tests Prometheus web interface availability
# - Port 9090: Standard Prometheus web UI port
# - Ensures metrics collection is operational
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:9090/ || exit 1
# DATA PERSISTENCE:
# - /volume2/metadata/docker/grafana/prometheus: Time series database storage
# - /volume2/metadata/docker/grafana/prometheus.yml: Configuration file
# - BACKUP IMPORTANT: Contains historical metrics data
# - Configuration file defines scrape targets and rules
volumes:
- /volume2/metadata/docker/grafana/prometheus:/prometheus:rw
- /volume2/metadata/docker/grafana/prometheus.yml:/etc/prometheus/prometheus.yml:ro
# RESTART POLICY:
# - on-failure:5: Restart on failure to maintain metrics collection
# - Critical for continuous monitoring and alerting
restart: on-failure:5
node-exporter:
image: prom/node-exporter:latest
command:
- --collector.disable-defaults
- --collector.stat
- --collector.time
- --collector.cpu
- --collector.loadavg
- --collector.hwmon
- --collector.meminfo
- --collector.diskstats
container_name: Prometheus-Node
hostname: prometheus-node
networks:
- prometheus-net
mem_limit: 256m
mem_reservation: 64m
cpu_shares: 512
security_opt:
- no-new-privileges=true
read_only: true
user: 1026:100
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:9100/
restart: on-failure:5
snmp-exporter:
image: prom/snmp-exporter:latest
command:
- '--config.file=/etc/snmp_exporter/snmp.yml'
container_name: Prometheus-SNMP
hostname: prometheus-snmp
networks:
- prometheus-net
mem_limit: 256m
mem_reservation: 64m
cpu_shares: 512
security_opt:
- no-new-privileges:true
read_only: true
user: 1026:100
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:9116/ || exit 1
volumes:
- /volume2/metadata/docker/grafana/snmp:/etc/snmp_exporter/:ro
restart: on-failure:5
cadvisor:
image: gcr.io/cadvisor/cadvisor:latest
command:
- '--docker_only=true'
container_name: Prometheus-cAdvisor
hostname: prometheus-cadvisor
networks:
- prometheus-net
mem_limit: 256m
mem_reservation: 64m
cpu_shares: 512
security_opt:
- no-new-privileges=true
read_only: true
volumes:
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
restart: on-failure:5
blackbox-exporter:
image: prom/blackbox-exporter
container_name: blackbox-exporter
networks:
- prometheus-net
ports:
- 9115:9115
restart: unless-stopped
speedtest-exporter:
image: miguelndecarvalho/speedtest-exporter
container_name: speedtest-exporter
networks:
- prometheus-net
ports:
- 9798:9798
restart: unless-stopped
networks:
grafana-net:
name: grafana-net
ipam:
config:
- subnet: 192.168.50.0/24
prometheus-net:
name: prometheus-net
ipam:
config:
- subnet: 192.168.51.0/24

View File

@@ -0,0 +1,100 @@
scrape_configs:
- job_name: prometheus
scrape_interval: 30s
static_configs:
- targets: ['localhost:9090']
labels:
group: 'prometheus'
- job_name: watchtower-docker
scrape_interval: 10m
metrics_path: /v1/metrics
bearer_token: "REDACTED_TOKEN" # pragma: allowlist secret
static_configs:
- targets: ['watchtower:8080']
- job_name: node-docker
scrape_interval: 5s
static_configs:
- targets: ['prometheus-node:9100']
- job_name: cadvisor-docker
scrape_interval: 5s
static_configs:
- targets: ['prometheus-cadvisor:8080']
- job_name: snmp-docker
scrape_interval: 5s
static_configs:
- targets: ['192.168.0.200']
metrics_path: /snmp
params:
module: [synology]
auth: [snmpv3]
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- source_labels: [__param_target]
regex: (.*)
replacement: prometheus-snmp:9116
target_label: __address__
- job_name: homelab
static_configs:
- targets: ['192.168.0.210:9100']
labels:
instance: homelab
- job_name: LA_VM
static_configs:
- labels:
instance: LA_VM
targets:
- YOUR_WAN_IP:9100
- job_name: nuc
static_configs:
- labels:
instance: vish-concord-nuc
targets:
- 100.72.55.21:9100
- job_name: indolent-flower
static_configs:
- labels:
instance: indolent-flower
targets:
- 100.87.181.91:9100
- job_name: 'blackbox'
metrics_path: /probe
params:
module: [http_2xx]
static_configs:
- targets:
- https://google.com
- https://1.1.1.1
- http://192.168.0.1
labels:
group: 'external-probes'
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: blackbox-exporter:9115
- job_name: 'speedtest_atlantis'
scrape_interval: 15m
scrape_timeout: 90s # <-- extended timeout
static_configs:
- targets: ['speedtest-exporter:9798']
- job_name: 'speedtest_calypso'
scrape_interval: 15m
scrape_timeout: 90s # <-- extended timeout
static_configs:
- targets: ['192.168.0.250:9798']

View File

@@ -0,0 +1,38 @@
scrape_configs:
- job_name: prometheus
scrape_interval: 30s
static_configs:
- targets: ['localhost:9090']
labels:
group: 'prometheus'
- job_name: watchtower-docker
scrape_interval: 10m
metrics_path: /v1/metrics
bearer_token: "REDACTED_TOKEN" # your API_TOKEN # pragma: allowlist secret
static_configs:
- targets: ['watchtower:8080']
- job_name: node-docker
scrape_interval: 5s
static_configs:
- targets: ['prometheus-node:9100']
- job_name: cadvisor-docker
scrape_interval: 5s
static_configs:
- targets: ['prometheus-cadvisor:8080']
- job_name: snmp-docker
scrape_interval: 5s
static_configs:
- targets: ['192.168.1.132'] # Your NAS IP
metrics_path: /snmp
params:
module: [synology]
auth: [snmpv3]
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- source_labels: [__param_target]
regex: (.*)
replacement: prometheus-snmp:9116
target_label: __address__

View File

@@ -0,0 +1,907 @@
auths:
snmpv3:
version: 3
security_level: authPriv
auth_protocol: MD5
username: snmp-exporter
password: "REDACTED_PASSWORD" # pragma: allowlist secret
priv_protocol: DES
priv_password: "REDACTED_PASSWORD" # pragma: allowlist secret
modules:
synology:
walk:
- 1.3.6.1.2.1.2
- 1.3.6.1.2.1.31.1.1
- 1.3.6.1.4.1.6574.1
- 1.3.6.1.4.1.6574.2
- 1.3.6.1.4.1.6574.3
- 1.3.6.1.4.1.6574.6
metrics:
- name: ifNumber
oid: 1.3.6.1.2.1.2.1
type: gauge
help: The number of network interfaces (regardless of their current state) present on this system. - 1.3.6.1.2.1.2.1
- name: ifIndex
oid: 1.3.6.1.2.1.2.2.1.1
type: gauge
help: A unique value, greater than zero, for each interface - 1.3.6.1.2.1.2.2.1.1
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifDescr
oid: 1.3.6.1.2.1.2.2.1.2
type: DisplayString
help: A textual string containing information about the interface - 1.3.6.1.2.1.2.2.1.2
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifMtu
oid: 1.3.6.1.2.1.2.2.1.4
type: gauge
help: The size of the largest packet which can be sent/received on the interface, specified in octets - 1.3.6.1.2.1.2.2.1.4
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifSpeed
oid: 1.3.6.1.2.1.2.2.1.5
type: gauge
help: An estimate of the interface's current bandwidth in bits per second - 1.3.6.1.2.1.2.2.1.5
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifPhysAddress
oid: 1.3.6.1.2.1.2.2.1.6
type: PhysAddress48
help: The interface's address at its protocol sub-layer - 1.3.6.1.2.1.2.2.1.6
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifAdminStatus
oid: 1.3.6.1.2.1.2.2.1.7
type: gauge
help: The desired state of the interface - 1.3.6.1.2.1.2.2.1.7
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
enum_values:
1: up
2: down
3: testing
- name: ifOperStatus
oid: 1.3.6.1.2.1.2.2.1.8
type: gauge
help: The current operational state of the interface - 1.3.6.1.2.1.2.2.1.8
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
enum_values:
1: up
2: down
3: testing
4: unknown
5: dormant
6: notPresent
7: lowerLayerDown
- name: ifLastChange
oid: 1.3.6.1.2.1.2.2.1.9
type: gauge
help: The value of sysUpTime at the time the interface entered its current operational state - 1.3.6.1.2.1.2.2.1.9
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifInOctets
oid: 1.3.6.1.2.1.2.2.1.10
type: counter
help: The total number of octets received on the interface, including framing characters - 1.3.6.1.2.1.2.2.1.10
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifInUcastPkts
oid: 1.3.6.1.2.1.2.2.1.11
type: counter
help: The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were not addressed to a multicast
or broadcast address at this sub-layer - 1.3.6.1.2.1.2.2.1.11
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifInNUcastPkts
oid: 1.3.6.1.2.1.2.2.1.12
type: counter
help: The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were addressed to a multicast
or broadcast address at this sub-layer - 1.3.6.1.2.1.2.2.1.12
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifInDiscards
oid: 1.3.6.1.2.1.2.2.1.13
type: counter
help: The number of inbound packets which were chosen to be discarded even though no errors had been detected to prevent
their being deliverable to a higher-layer protocol - 1.3.6.1.2.1.2.2.1.13
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifInErrors
oid: 1.3.6.1.2.1.2.2.1.14
type: counter
help: For packet-oriented interfaces, the number of inbound packets that contained errors preventing them from being
deliverable to a higher-layer protocol - 1.3.6.1.2.1.2.2.1.14
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifInUnknownProtos
oid: 1.3.6.1.2.1.2.2.1.15
type: counter
help: For packet-oriented interfaces, the number of packets received via the interface which were discarded because
of an unknown or unsupported protocol - 1.3.6.1.2.1.2.2.1.15
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifOutOctets
oid: 1.3.6.1.2.1.2.2.1.16
type: counter
help: The total number of octets transmitted out of the interface, including framing characters - 1.3.6.1.2.1.2.2.1.16
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifOutUcastPkts
oid: 1.3.6.1.2.1.2.2.1.17
type: counter
help: The total number of packets that higher-level protocols requested be transmitted, and which were not addressed
to a multicast or broadcast address at this sub-layer, including those that were discarded or not sent - 1.3.6.1.2.1.2.2.1.17
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifOutNUcastPkts
oid: 1.3.6.1.2.1.2.2.1.18
type: counter
help: The total number of packets that higher-level protocols requested be transmitted, and which were addressed to
a multicast or broadcast address at this sub-layer, including those that were discarded or not sent - 1.3.6.1.2.1.2.2.1.18
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifOutDiscards
oid: 1.3.6.1.2.1.2.2.1.19
type: counter
help: The number of outbound packets which were chosen to be discarded even though no errors had been detected to
prevent their being transmitted - 1.3.6.1.2.1.2.2.1.19
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifOutErrors
oid: 1.3.6.1.2.1.2.2.1.20
type: counter
help: For packet-oriented interfaces, the number of outbound packets that could not be transmitted because of errors
- 1.3.6.1.2.1.2.2.1.20
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifOutQLen
oid: 1.3.6.1.2.1.2.2.1.21
type: gauge
help: The length of the output packet queue (in packets). - 1.3.6.1.2.1.2.2.1.21
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifSpecific
oid: 1.3.6.1.2.1.2.2.1.22
type: OctetString
help: A reference to MIB definitions specific to the particular media being used to realize the interface - 1.3.6.1.2.1.2.2.1.22
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
help: The textual name of the interface - 1.3.6.1.2.1.31.1.1.1.1
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifInMulticastPkts
oid: 1.3.6.1.2.1.31.1.1.1.2
type: counter
help: The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were addressed to a multicast
address at this sub-layer - 1.3.6.1.2.1.31.1.1.1.2
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifInBroadcastPkts
oid: 1.3.6.1.2.1.31.1.1.1.3
type: counter
help: The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were addressed to a broadcast
address at this sub-layer - 1.3.6.1.2.1.31.1.1.1.3
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifOutMulticastPkts
oid: 1.3.6.1.2.1.31.1.1.1.4
type: counter
help: The total number of packets that higher-level protocols requested be transmitted, and which were addressed to
a multicast address at this sub-layer, including those that were discarded or not sent - 1.3.6.1.2.1.31.1.1.1.4
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifOutBroadcastPkts
oid: 1.3.6.1.2.1.31.1.1.1.5
type: counter
help: The total number of packets that higher-level protocols requested be transmitted, and which were addressed to
a broadcast address at this sub-layer, including those that were discarded or not sent - 1.3.6.1.2.1.31.1.1.1.5
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifHCInOctets
oid: 1.3.6.1.2.1.31.1.1.1.6
type: counter
help: The total number of octets received on the interface, including framing characters - 1.3.6.1.2.1.31.1.1.1.6
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifHCInUcastPkts
oid: 1.3.6.1.2.1.31.1.1.1.7
type: counter
help: The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were not addressed to a multicast
or broadcast address at this sub-layer - 1.3.6.1.2.1.31.1.1.1.7
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifHCInMulticastPkts
oid: 1.3.6.1.2.1.31.1.1.1.8
type: counter
help: The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were addressed to a multicast
address at this sub-layer - 1.3.6.1.2.1.31.1.1.1.8
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifHCInBroadcastPkts
oid: 1.3.6.1.2.1.31.1.1.1.9
type: counter
help: The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were addressed to a broadcast
address at this sub-layer - 1.3.6.1.2.1.31.1.1.1.9
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifHCOutOctets
oid: 1.3.6.1.2.1.31.1.1.1.10
type: counter
help: The total number of octets transmitted out of the interface, including framing characters - 1.3.6.1.2.1.31.1.1.1.10
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: REDACTED_APP_PASSWORD
oid: 1.3.6.1.2.1.31.1.1.1.11
type: counter
help: The total number of packets that higher-level protocols requested be transmitted, and which were not addressed
to a multicast or broadcast address at this sub-layer, including those that were discarded or not sent - 1.3.6.1.2.1.31.1.1.1.11
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifHCOutMulticastPkts
oid: 1.3.6.1.2.1.31.1.1.1.12
type: counter
help: The total number of packets that higher-level protocols requested be transmitted, and which were addressed to
a multicast address at this sub-layer, including those that were discarded or not sent - 1.3.6.1.2.1.31.1.1.1.12
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifHCOutBroadcastPkts
oid: 1.3.6.1.2.1.31.1.1.1.13
type: counter
help: The total number of packets that higher-level protocols requested be transmitted, and which were addressed to
a broadcast address at this sub-layer, including those that were discarded or not sent - 1.3.6.1.2.1.31.1.1.1.13
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifLinkUpDownTrapEnable
oid: 1.3.6.1.2.1.31.1.1.1.14
type: gauge
help: Indicates whether linkUp/linkDown traps should be generated for this interface - 1.3.6.1.2.1.31.1.1.1.14
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
enum_values:
1: enabled
2: disabled
- name: ifHighSpeed
oid: 1.3.6.1.2.1.31.1.1.1.15
type: gauge
help: An estimate of the interface's current bandwidth in units of 1,000,000 bits per second - 1.3.6.1.2.1.31.1.1.1.15
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifPromiscuousMode
oid: 1.3.6.1.2.1.31.1.1.1.16
type: gauge
help: This object has a value of false(2) if this interface only accepts packets/frames that are addressed to this
station - 1.3.6.1.2.1.31.1.1.1.16
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
enum_values:
1: 'true'
2: 'false'
- name: ifConnectorPresent
oid: 1.3.6.1.2.1.31.1.1.1.17
type: gauge
help: This object has the value 'true(1)' if the interface sublayer has a physical connector and the value 'false(2)'
otherwise. - 1.3.6.1.2.1.31.1.1.1.17
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
enum_values:
1: 'true'
2: 'false'
- name: ifAlias
oid: 1.3.6.1.2.1.31.1.1.1.18
type: DisplayString
help: This object is an 'alias' name for the interface as specified by a network manager, and provides a non-volatile
'handle' for the interface - 1.3.6.1.2.1.31.1.1.1.18
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifCounterDiscontinuityTime
oid: 1.3.6.1.2.1.31.1.1.1.19
type: gauge
help: The value of sysUpTime on the most recent occasion at which any one or more of this interface's counters suffered
a discontinuity - 1.3.6.1.2.1.31.1.1.1.19
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: systemStatus
oid: 1.3.6.1.4.1.6574.1.1
type: gauge
help: Synology system status Each meanings of status represented describe below - 1.3.6.1.4.1.6574.1.1
- name: temperature
oid: 1.3.6.1.4.1.6574.1.2
type: gauge
help: Synology system temperature The temperature of Disk Station uses Celsius degree. - 1.3.6.1.4.1.6574.1.2
- name: powerStatus
oid: 1.3.6.1.4.1.6574.1.3
type: gauge
help: Synology power status Each meanings of status represented describe below - 1.3.6.1.4.1.6574.1.3
- name: systemFanStatus
oid: 1.3.6.1.4.1.6574.1.4.1
type: gauge
help: Synology system fan status Each meanings of status represented describe below - 1.3.6.1.4.1.6574.1.4.1
- name: cpuFanStatus
oid: 1.3.6.1.4.1.6574.1.4.2
type: gauge
help: Synology cpu fan status Each meanings of status represented describe below - 1.3.6.1.4.1.6574.1.4.2
- name: modelName
oid: 1.3.6.1.4.1.6574.1.5.1
type: DisplayString
help: The Model name of this NAS - 1.3.6.1.4.1.6574.1.5.1
- name: serialNumber
oid: 1.3.6.1.4.1.6574.1.5.2
type: DisplayString
help: The serial number of this NAS - 1.3.6.1.4.1.6574.1.5.2
- name: version
oid: 1.3.6.1.4.1.6574.1.5.3
type: DisplayString
help: The version of this DSM - 1.3.6.1.4.1.6574.1.5.3
- name: REDACTED_APP_PASSWORD
oid: 1.3.6.1.4.1.6574.1.5.4
type: gauge
help: This oid is for checking whether there is a latest DSM can be upgraded - 1.3.6.1.4.1.6574.1.5.4
- name: REDACTED_APP_PASSWORD
oid: 1.3.6.1.4.1.6574.1.6
type: gauge
help: Synology system controller number Controller A(0) Controller B(1) - 1.3.6.1.4.1.6574.1.6
- name: diskIndex
oid: 1.3.6.1.4.1.6574.2.1.1.1
type: gauge
help: The index of disk table - 1.3.6.1.4.1.6574.2.1.1.1
indexes:
- labelname: diskIndex
type: gauge
lookups:
- labels:
- diskIndex
labelname: diskID
oid: 1.3.6.1.4.1.6574.2.1.1.2
type: DisplayString
- labels: []
labelname: diskIndex
- name: diskID
oid: 1.3.6.1.4.1.6574.2.1.1.2
type: DisplayString
help: Synology disk ID The ID of disk is assigned by disk Station. - 1.3.6.1.4.1.6574.2.1.1.2
indexes:
- labelname: diskIndex
type: gauge
lookups:
- labels:
- diskIndex
labelname: diskID
oid: 1.3.6.1.4.1.6574.2.1.1.2
type: DisplayString
- labels: []
labelname: diskIndex
- name: diskModel
oid: 1.3.6.1.4.1.6574.2.1.1.3
type: DisplayString
help: Synology disk model name The disk model name will be showed here. - 1.3.6.1.4.1.6574.2.1.1.3
indexes:
- labelname: diskIndex
type: gauge
lookups:
- labels:
- diskIndex
labelname: diskID
oid: 1.3.6.1.4.1.6574.2.1.1.2
type: DisplayString
- labels: []
labelname: diskIndex
- name: diskType
oid: 1.3.6.1.4.1.6574.2.1.1.4
type: DisplayString
help: Synology disk type The type of disk will be showed here, including SATA, SSD and so on. - 1.3.6.1.4.1.6574.2.1.1.4
indexes:
- labelname: diskIndex
type: gauge
lookups:
- labels:
- diskIndex
labelname: diskID
oid: 1.3.6.1.4.1.6574.2.1.1.2
type: DisplayString
- labels: []
labelname: diskIndex
- name: diskStatus
oid: 1.3.6.1.4.1.6574.2.1.1.5
type: gauge
help: Synology disk status. Normal-1 Initialized-2 NotInitialized-3 SystemPartitionFailed-4 Crashed-5 - 1.3.6.1.4.1.6574.2.1.1.5
indexes:
- labelname: diskIndex
type: gauge
lookups:
- labels:
- diskIndex
labelname: diskID
oid: 1.3.6.1.4.1.6574.2.1.1.2
type: DisplayString
- labels: []
labelname: diskIndex
- name: diskTemperature
oid: 1.3.6.1.4.1.6574.2.1.1.6
type: gauge
help: Synology disk temperature The temperature of each disk uses Celsius degree. - 1.3.6.1.4.1.6574.2.1.1.6
indexes:
- labelname: diskIndex
type: gauge
lookups:
- labels:
- diskIndex
labelname: diskID
oid: 1.3.6.1.4.1.6574.2.1.1.2
type: DisplayString
- labels: []
labelname: diskIndex
- name: raidIndex
oid: 1.3.6.1.4.1.6574.3.1.1.1
type: gauge
help: The index of raid table - 1.3.6.1.4.1.6574.3.1.1.1
indexes:
- labelname: raidIndex
type: gauge
lookups:
- labels:
- raidIndex
labelname: raidName
oid: 1.3.6.1.4.1.6574.3.1.1.2
type: DisplayString
- name: raidName
oid: 1.3.6.1.4.1.6574.3.1.1.2
type: DisplayString
help: Synology raid name The name of each raid will be showed here. - 1.3.6.1.4.1.6574.3.1.1.2
indexes:
- labelname: raidIndex
type: gauge
lookups:
- labels:
- raidIndex
labelname: raidName
oid: 1.3.6.1.4.1.6574.3.1.1.2
type: DisplayString
- name: raidStatus
oid: 1.3.6.1.4.1.6574.3.1.1.3
type: gauge
help: Synology Raid status Each meanings of status represented describe below - 1.3.6.1.4.1.6574.3.1.1.3
indexes:
- labelname: raidIndex
type: gauge
lookups:
- labels:
- raidIndex
labelname: raidName
oid: 1.3.6.1.4.1.6574.3.1.1.2
type: DisplayString
- name: raidFreeSize
oid: 1.3.6.1.4.1.6574.3.1.1.4
type: gauge
help: Synology raid freesize Free space in bytes. - 1.3.6.1.4.1.6574.3.1.1.4
indexes:
- labelname: raidIndex
type: gauge
lookups:
- labels:
- raidIndex
labelname: raidName
oid: 1.3.6.1.4.1.6574.3.1.1.2
type: DisplayString
- name: raidTotalSize
oid: 1.3.6.1.4.1.6574.3.1.1.5
type: gauge
help: Synology raid totalsize Total space in bytes. - 1.3.6.1.4.1.6574.3.1.1.5
indexes:
- labelname: raidIndex
type: gauge
lookups:
- labels:
- raidIndex
labelname: raidName
oid: 1.3.6.1.4.1.6574.3.1.1.2
type: DisplayString
- name: REDACTED_APP_PASSWORD
oid: 1.3.6.1.4.1.6574.6.1.1.1
type: gauge
help: Service info index - 1.3.6.1.4.1.6574.6.1.1.1
indexes:
- labelname: REDACTED_APP_PASSWORD
type: gauge
lookups:
- labels:
- REDACTED_APP_PASSWORD
labelname: serviceName
oid: 1.3.6.1.4.1.6574.6.1.1.2
type: DisplayString
- labels: []
labelname: REDACTED_APP_PASSWORD
- name: serviceName
oid: 1.3.6.1.4.1.6574.6.1.1.2
type: DisplayString
help: Service name - 1.3.6.1.4.1.6574.6.1.1.2
indexes:
- labelname: REDACTED_APP_PASSWORD
type: gauge
lookups:
- labels:
- REDACTED_APP_PASSWORD
labelname: serviceName
oid: 1.3.6.1.4.1.6574.6.1.1.2
type: DisplayString
- labels: []
labelname: REDACTED_APP_PASSWORD
- name: serviceUsers
oid: 1.3.6.1.4.1.6574.6.1.1.3
type: gauge
help: Number of users using this service - 1.3.6.1.4.1.6574.6.1.1.3
indexes:
- labelname: REDACTED_APP_PASSWORD
type: gauge
lookups:
- labels:
- REDACTED_APP_PASSWORD
labelname: serviceName
oid: 1.3.6.1.4.1.6574.6.1.1.2
type: DisplayString
- labels: []
labelname: REDACTED_APP_PASSWORD

View File

@@ -0,0 +1,907 @@
auths:
snmpv3:
version: 3
security_level: authPriv
auth_protocol: MD5
username: snmp-exporter
password: "REDACTED_PASSWORD" # pragma: allowlist secret
priv_protocol: DES
priv_password: "REDACTED_PASSWORD" # pragma: allowlist secret
modules:
synology:
walk:
- 1.3.6.1.2.1.2
- 1.3.6.1.2.1.31.1.1
- 1.3.6.1.4.1.6574.1
- 1.3.6.1.4.1.6574.2
- 1.3.6.1.4.1.6574.3
- 1.3.6.1.4.1.6574.6
metrics:
- name: ifNumber
oid: 1.3.6.1.2.1.2.1
type: gauge
help: The number of network interfaces (regardless of their current state) present on this system. - 1.3.6.1.2.1.2.1
- name: ifIndex
oid: 1.3.6.1.2.1.2.2.1.1
type: gauge
help: A unique value, greater than zero, for each interface - 1.3.6.1.2.1.2.2.1.1
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifDescr
oid: 1.3.6.1.2.1.2.2.1.2
type: DisplayString
help: A textual string containing information about the interface - 1.3.6.1.2.1.2.2.1.2
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifMtu
oid: 1.3.6.1.2.1.2.2.1.4
type: gauge
help: The size of the largest packet which can be sent/received on the interface, specified in octets - 1.3.6.1.2.1.2.2.1.4
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifSpeed
oid: 1.3.6.1.2.1.2.2.1.5
type: gauge
help: An estimate of the interface's current bandwidth in bits per second - 1.3.6.1.2.1.2.2.1.5
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifPhysAddress
oid: 1.3.6.1.2.1.2.2.1.6
type: PhysAddress48
help: The interface's address at its protocol sub-layer - 1.3.6.1.2.1.2.2.1.6
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifAdminStatus
oid: 1.3.6.1.2.1.2.2.1.7
type: gauge
help: The desired state of the interface - 1.3.6.1.2.1.2.2.1.7
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
enum_values:
1: up
2: down
3: testing
- name: ifOperStatus
oid: 1.3.6.1.2.1.2.2.1.8
type: gauge
help: The current operational state of the interface - 1.3.6.1.2.1.2.2.1.8
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
enum_values:
1: up
2: down
3: testing
4: unknown
5: dormant
6: notPresent
7: lowerLayerDown
- name: ifLastChange
oid: 1.3.6.1.2.1.2.2.1.9
type: gauge
help: The value of sysUpTime at the time the interface entered its current operational state - 1.3.6.1.2.1.2.2.1.9
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifInOctets
oid: 1.3.6.1.2.1.2.2.1.10
type: counter
help: The total number of octets received on the interface, including framing characters - 1.3.6.1.2.1.2.2.1.10
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifInUcastPkts
oid: 1.3.6.1.2.1.2.2.1.11
type: counter
help: The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were not addressed to a multicast
or broadcast address at this sub-layer - 1.3.6.1.2.1.2.2.1.11
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifInNUcastPkts
oid: 1.3.6.1.2.1.2.2.1.12
type: counter
help: The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were addressed to a multicast
or broadcast address at this sub-layer - 1.3.6.1.2.1.2.2.1.12
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifInDiscards
oid: 1.3.6.1.2.1.2.2.1.13
type: counter
help: The number of inbound packets which were chosen to be discarded even though no errors had been detected to prevent
their being deliverable to a higher-layer protocol - 1.3.6.1.2.1.2.2.1.13
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifInErrors
oid: 1.3.6.1.2.1.2.2.1.14
type: counter
help: For packet-oriented interfaces, the number of inbound packets that contained errors preventing them from being
deliverable to a higher-layer protocol - 1.3.6.1.2.1.2.2.1.14
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifInUnknownProtos
oid: 1.3.6.1.2.1.2.2.1.15
type: counter
help: For packet-oriented interfaces, the number of packets received via the interface which were discarded because
of an unknown or unsupported protocol - 1.3.6.1.2.1.2.2.1.15
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifOutOctets
oid: 1.3.6.1.2.1.2.2.1.16
type: counter
help: The total number of octets transmitted out of the interface, including framing characters - 1.3.6.1.2.1.2.2.1.16
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifOutUcastPkts
oid: 1.3.6.1.2.1.2.2.1.17
type: counter
help: The total number of packets that higher-level protocols requested be transmitted, and which were not addressed
to a multicast or broadcast address at this sub-layer, including those that were discarded or not sent - 1.3.6.1.2.1.2.2.1.17
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifOutNUcastPkts
oid: 1.3.6.1.2.1.2.2.1.18
type: counter
help: The total number of packets that higher-level protocols requested be transmitted, and which were addressed to
a multicast or broadcast address at this sub-layer, including those that were discarded or not sent - 1.3.6.1.2.1.2.2.1.18
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifOutDiscards
oid: 1.3.6.1.2.1.2.2.1.19
type: counter
help: The number of outbound packets which were chosen to be discarded even though no errors had been detected to
prevent their being transmitted - 1.3.6.1.2.1.2.2.1.19
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifOutErrors
oid: 1.3.6.1.2.1.2.2.1.20
type: counter
help: For packet-oriented interfaces, the number of outbound packets that could not be transmitted because of errors
- 1.3.6.1.2.1.2.2.1.20
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifOutQLen
oid: 1.3.6.1.2.1.2.2.1.21
type: gauge
help: The length of the output packet queue (in packets). - 1.3.6.1.2.1.2.2.1.21
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifSpecific
oid: 1.3.6.1.2.1.2.2.1.22
type: OctetString
help: A reference to MIB definitions specific to the particular media being used to realize the interface - 1.3.6.1.2.1.2.2.1.22
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
help: The textual name of the interface - 1.3.6.1.2.1.31.1.1.1.1
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifInMulticastPkts
oid: 1.3.6.1.2.1.31.1.1.1.2
type: counter
help: The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were addressed to a multicast
address at this sub-layer - 1.3.6.1.2.1.31.1.1.1.2
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifInBroadcastPkts
oid: 1.3.6.1.2.1.31.1.1.1.3
type: counter
help: The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were addressed to a broadcast
address at this sub-layer - 1.3.6.1.2.1.31.1.1.1.3
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifOutMulticastPkts
oid: 1.3.6.1.2.1.31.1.1.1.4
type: counter
help: The total number of packets that higher-level protocols requested be transmitted, and which were addressed to
a multicast address at this sub-layer, including those that were discarded or not sent - 1.3.6.1.2.1.31.1.1.1.4
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifOutBroadcastPkts
oid: 1.3.6.1.2.1.31.1.1.1.5
type: counter
help: The total number of packets that higher-level protocols requested be transmitted, and which were addressed to
a broadcast address at this sub-layer, including those that were discarded or not sent - 1.3.6.1.2.1.31.1.1.1.5
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifHCInOctets
oid: 1.3.6.1.2.1.31.1.1.1.6
type: counter
help: The total number of octets received on the interface, including framing characters - 1.3.6.1.2.1.31.1.1.1.6
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifHCInUcastPkts
oid: 1.3.6.1.2.1.31.1.1.1.7
type: counter
help: The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were not addressed to a multicast
or broadcast address at this sub-layer - 1.3.6.1.2.1.31.1.1.1.7
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifHCInMulticastPkts
oid: 1.3.6.1.2.1.31.1.1.1.8
type: counter
help: The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were addressed to a multicast
address at this sub-layer - 1.3.6.1.2.1.31.1.1.1.8
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifHCInBroadcastPkts
oid: 1.3.6.1.2.1.31.1.1.1.9
type: counter
help: The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were addressed to a broadcast
address at this sub-layer - 1.3.6.1.2.1.31.1.1.1.9
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifHCOutOctets
oid: 1.3.6.1.2.1.31.1.1.1.10
type: counter
help: The total number of octets transmitted out of the interface, including framing characters - 1.3.6.1.2.1.31.1.1.1.10
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: REDACTED_APP_PASSWORD
oid: 1.3.6.1.2.1.31.1.1.1.11
type: counter
help: The total number of packets that higher-level protocols requested be transmitted, and which were not addressed
to a multicast or broadcast address at this sub-layer, including those that were discarded or not sent - 1.3.6.1.2.1.31.1.1.1.11
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifHCOutMulticastPkts
oid: 1.3.6.1.2.1.31.1.1.1.12
type: counter
help: The total number of packets that higher-level protocols requested be transmitted, and which were addressed to
a multicast address at this sub-layer, including those that were discarded or not sent - 1.3.6.1.2.1.31.1.1.1.12
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifHCOutBroadcastPkts
oid: 1.3.6.1.2.1.31.1.1.1.13
type: counter
help: The total number of packets that higher-level protocols requested be transmitted, and which were addressed to
a broadcast address at this sub-layer, including those that were discarded or not sent - 1.3.6.1.2.1.31.1.1.1.13
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifLinkUpDownTrapEnable
oid: 1.3.6.1.2.1.31.1.1.1.14
type: gauge
help: Indicates whether linkUp/linkDown traps should be generated for this interface - 1.3.6.1.2.1.31.1.1.1.14
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
enum_values:
1: enabled
2: disabled
- name: ifHighSpeed
oid: 1.3.6.1.2.1.31.1.1.1.15
type: gauge
help: An estimate of the interface's current bandwidth in units of 1,000,000 bits per second - 1.3.6.1.2.1.31.1.1.1.15
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifPromiscuousMode
oid: 1.3.6.1.2.1.31.1.1.1.16
type: gauge
help: This object has a value of false(2) if this interface only accepts packets/frames that are addressed to this
station - 1.3.6.1.2.1.31.1.1.1.16
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
enum_values:
1: 'true'
2: 'false'
- name: ifConnectorPresent
oid: 1.3.6.1.2.1.31.1.1.1.17
type: gauge
help: This object has the value 'true(1)' if the interface sublayer has a physical connector and the value 'false(2)'
otherwise. - 1.3.6.1.2.1.31.1.1.1.17
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
enum_values:
1: 'true'
2: 'false'
- name: ifAlias
oid: 1.3.6.1.2.1.31.1.1.1.18
type: DisplayString
help: This object is an 'alias' name for the interface as specified by a network manager, and provides a non-volatile
'handle' for the interface - 1.3.6.1.2.1.31.1.1.1.18
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: ifCounterDiscontinuityTime
oid: 1.3.6.1.2.1.31.1.1.1.19
type: gauge
help: The value of sysUpTime on the most recent occasion at which any one or more of this interface's counters suffered
a discontinuity - 1.3.6.1.2.1.31.1.1.1.19
indexes:
- labelname: ifIndex
type: gauge
lookups:
- labels:
- ifIndex
labelname: ifName
oid: 1.3.6.1.2.1.31.1.1.1.1
type: DisplayString
- labels: []
labelname: ifIndex
- name: systemStatus
oid: 1.3.6.1.4.1.6574.1.1
type: gauge
help: Synology system status Each meanings of status represented describe below - 1.3.6.1.4.1.6574.1.1
- name: temperature
oid: 1.3.6.1.4.1.6574.1.2
type: gauge
help: Synology system temperature The temperature of Disk Station uses Celsius degree. - 1.3.6.1.4.1.6574.1.2
- name: powerStatus
oid: 1.3.6.1.4.1.6574.1.3
type: gauge
help: Synology power status Each meanings of status represented describe below - 1.3.6.1.4.1.6574.1.3
- name: systemFanStatus
oid: 1.3.6.1.4.1.6574.1.4.1
type: gauge
help: Synology system fan status Each meanings of status represented describe below - 1.3.6.1.4.1.6574.1.4.1
- name: cpuFanStatus
oid: 1.3.6.1.4.1.6574.1.4.2
type: gauge
help: Synology cpu fan status Each meanings of status represented describe below - 1.3.6.1.4.1.6574.1.4.2
- name: modelName
oid: 1.3.6.1.4.1.6574.1.5.1
type: DisplayString
help: The Model name of this NAS - 1.3.6.1.4.1.6574.1.5.1
- name: serialNumber
oid: 1.3.6.1.4.1.6574.1.5.2
type: DisplayString
help: The serial number of this NAS - 1.3.6.1.4.1.6574.1.5.2
- name: version
oid: 1.3.6.1.4.1.6574.1.5.3
type: DisplayString
help: The version of this DSM - 1.3.6.1.4.1.6574.1.5.3
- name: REDACTED_APP_PASSWORD
oid: 1.3.6.1.4.1.6574.1.5.4
type: gauge
help: This oid is for checking whether there is a latest DSM can be upgraded - 1.3.6.1.4.1.6574.1.5.4
- name: REDACTED_APP_PASSWORD
oid: 1.3.6.1.4.1.6574.1.6
type: gauge
help: Synology system controller number Controller A(0) Controller B(1) - 1.3.6.1.4.1.6574.1.6
- name: diskIndex
oid: 1.3.6.1.4.1.6574.2.1.1.1
type: gauge
help: The index of disk table - 1.3.6.1.4.1.6574.2.1.1.1
indexes:
- labelname: diskIndex
type: gauge
lookups:
- labels:
- diskIndex
labelname: diskID
oid: 1.3.6.1.4.1.6574.2.1.1.2
type: DisplayString
- labels: []
labelname: diskIndex
- name: diskID
oid: 1.3.6.1.4.1.6574.2.1.1.2
type: DisplayString
help: Synology disk ID The ID of disk is assigned by disk Station. - 1.3.6.1.4.1.6574.2.1.1.2
indexes:
- labelname: diskIndex
type: gauge
lookups:
- labels:
- diskIndex
labelname: diskID
oid: 1.3.6.1.4.1.6574.2.1.1.2
type: DisplayString
- labels: []
labelname: diskIndex
- name: diskModel
oid: 1.3.6.1.4.1.6574.2.1.1.3
type: DisplayString
help: Synology disk model name The disk model name will be showed here. - 1.3.6.1.4.1.6574.2.1.1.3
indexes:
- labelname: diskIndex
type: gauge
lookups:
- labels:
- diskIndex
labelname: diskID
oid: 1.3.6.1.4.1.6574.2.1.1.2
type: DisplayString
- labels: []
labelname: diskIndex
- name: diskType
oid: 1.3.6.1.4.1.6574.2.1.1.4
type: DisplayString
help: Synology disk type The type of disk will be showed here, including SATA, SSD and so on. - 1.3.6.1.4.1.6574.2.1.1.4
indexes:
- labelname: diskIndex
type: gauge
lookups:
- labels:
- diskIndex
labelname: diskID
oid: 1.3.6.1.4.1.6574.2.1.1.2
type: DisplayString
- labels: []
labelname: diskIndex
- name: diskStatus
oid: 1.3.6.1.4.1.6574.2.1.1.5
type: gauge
help: Synology disk status. Normal-1 Initialized-2 NotInitialized-3 SystemPartitionFailed-4 Crashed-5 - 1.3.6.1.4.1.6574.2.1.1.5
indexes:
- labelname: diskIndex
type: gauge
lookups:
- labels:
- diskIndex
labelname: diskID
oid: 1.3.6.1.4.1.6574.2.1.1.2
type: DisplayString
- labels: []
labelname: diskIndex
- name: diskTemperature
oid: 1.3.6.1.4.1.6574.2.1.1.6
type: gauge
help: Synology disk temperature The temperature of each disk uses Celsius degree. - 1.3.6.1.4.1.6574.2.1.1.6
indexes:
- labelname: diskIndex
type: gauge
lookups:
- labels:
- diskIndex
labelname: diskID
oid: 1.3.6.1.4.1.6574.2.1.1.2
type: DisplayString
- labels: []
labelname: diskIndex
- name: raidIndex
oid: 1.3.6.1.4.1.6574.3.1.1.1
type: gauge
help: The index of raid table - 1.3.6.1.4.1.6574.3.1.1.1
indexes:
- labelname: raidIndex
type: gauge
lookups:
- labels:
- raidIndex
labelname: raidName
oid: 1.3.6.1.4.1.6574.3.1.1.2
type: DisplayString
- name: raidName
oid: 1.3.6.1.4.1.6574.3.1.1.2
type: DisplayString
help: Synology raid name The name of each raid will be showed here. - 1.3.6.1.4.1.6574.3.1.1.2
indexes:
- labelname: raidIndex
type: gauge
lookups:
- labels:
- raidIndex
labelname: raidName
oid: 1.3.6.1.4.1.6574.3.1.1.2
type: DisplayString
- name: raidStatus
oid: 1.3.6.1.4.1.6574.3.1.1.3
type: gauge
help: Synology Raid status Each meanings of status represented describe below - 1.3.6.1.4.1.6574.3.1.1.3
indexes:
- labelname: raidIndex
type: gauge
lookups:
- labels:
- raidIndex
labelname: raidName
oid: 1.3.6.1.4.1.6574.3.1.1.2
type: DisplayString
- name: raidFreeSize
oid: 1.3.6.1.4.1.6574.3.1.1.4
type: gauge
help: Synology raid freesize Free space in bytes. - 1.3.6.1.4.1.6574.3.1.1.4
indexes:
- labelname: raidIndex
type: gauge
lookups:
- labels:
- raidIndex
labelname: raidName
oid: 1.3.6.1.4.1.6574.3.1.1.2
type: DisplayString
- name: raidTotalSize
oid: 1.3.6.1.4.1.6574.3.1.1.5
type: gauge
help: Synology raid totalsize Total space in bytes. - 1.3.6.1.4.1.6574.3.1.1.5
indexes:
- labelname: raidIndex
type: gauge
lookups:
- labels:
- raidIndex
labelname: raidName
oid: 1.3.6.1.4.1.6574.3.1.1.2
type: DisplayString
- name: REDACTED_APP_PASSWORD
oid: 1.3.6.1.4.1.6574.6.1.1.1
type: gauge
help: Service info index - 1.3.6.1.4.1.6574.6.1.1.1
indexes:
- labelname: REDACTED_APP_PASSWORD
type: gauge
lookups:
- labels:
- REDACTED_APP_PASSWORD
labelname: serviceName
oid: 1.3.6.1.4.1.6574.6.1.1.2
type: DisplayString
- labels: []
labelname: REDACTED_APP_PASSWORD
- name: serviceName
oid: 1.3.6.1.4.1.6574.6.1.1.2
type: DisplayString
help: Service name - 1.3.6.1.4.1.6574.6.1.1.2
indexes:
- labelname: REDACTED_APP_PASSWORD
type: gauge
lookups:
- labels:
- REDACTED_APP_PASSWORD
labelname: serviceName
oid: 1.3.6.1.4.1.6574.6.1.1.2
type: DisplayString
- labels: []
labelname: REDACTED_APP_PASSWORD
- name: serviceUsers
oid: 1.3.6.1.4.1.6574.6.1.1.3
type: gauge
help: Number of users using this service - 1.3.6.1.4.1.6574.6.1.1.3
indexes:
- labelname: REDACTED_APP_PASSWORD
type: gauge
lookups:
- labels:
- REDACTED_APP_PASSWORD
labelname: serviceName
oid: 1.3.6.1.4.1.6574.6.1.1.2
type: DisplayString
- labels: []
labelname: REDACTED_APP_PASSWORD

View File

@@ -0,0 +1,35 @@
# Homarr - Modern dashboard for your homelab
# Port: 7575
# Docs: https://homarr.dev/
#
# Data stored in: /volume2/metadata/docker/homarr/appdata
# Database: SQLite at /appdata/db/db.sqlite
services:
homarr:
image: ghcr.io/homarr-labs/homarr:latest
container_name: homarr
environment:
- TZ=America/Los_Angeles
- SECRET_ENCRYPTION_KEY=a393eb842415bbd2f6bcf74bREDACTED_GITEA_TOKEN # pragma: allowlist secret
# Authentik SSO via native OIDC — credentials kept as fallback if Authentik is down
- AUTH_PROVIDER=oidc,credentials
- AUTH_OIDC_ISSUER=https://sso.vish.gg/application/o/homarr/
- AUTH_OIDC_CLIENT_ID="REDACTED_CLIENT_ID"
- AUTH_OIDC_CLIENT_SECRET="REDACTED_CLIENT_SECRET" # pragma: allowlist secret
- AUTH_OIDC_CLIENT_NAME=Authentik
- AUTH_OIDC_AUTO_LOGIN=false
- AUTH_LOGOUT_REDIRECT_URL=https://sso.vish.gg/application/o/homarr/end-session/
- AUTH_OIDC_ADMIN_GROUP=Homarr Admins
- AUTH_OIDC_OWNER_GROUP=Homarr Admins
volumes:
- /volume2/metadata/docker/homarr/appdata:/appdata
- /var/run/docker.sock:/var/run/docker.sock:ro
ports:
- "7575:7575"
dns:
- 192.168.0.200 # Atlantis AdGuard (resolves .tail.vish.gg and .vish.local)
- 192.168.0.250 # Calypso AdGuard (backup)
restart: unless-stopped
security_opt:
- no-new-privileges:true

View File

@@ -0,0 +1,104 @@
# Immich - Photo/video backup solution
# URL: http://192.168.0.200:8212 (LAN only)
# Port: 2283
# Google Photos alternative with ML-powered features
# SSO: Authentik OIDC (sso.vish.gg/application/o/immich-atlantis/)
version: "3.9"
services:
immich-redis:
image: redis
container_name: Immich-REDIS
hostname: immich-redis
security_opt:
- no-new-privileges:true
healthcheck:
test: ["CMD-SHELL", "redis-cli ping || exit 1"]
user: 1026:100
environment:
- TZ=America/Los_Angeles
volumes:
- /volume2/metadata/docker/immich/redis:/data:rw
restart: on-failure:5
immich-db:
image: ghcr.io/immich-app/postgres:16-vectorchord0.4.3-pgvectors0.2.0
container_name: Immich-DB
hostname: immich-db
security_opt:
- no-new-privileges:true
shm_size: 256mb
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "immich", "-U", "immichuser"]
interval: 10s
timeout: 5s
retries: 5
volumes:
- /volume2/metadata/docker/immich/db:/var/lib/postgresql/data:rw
environment:
- TZ=America/Los_Angeles
- POSTGRES_DB=immich
- POSTGRES_USER=immichuser
- POSTGRES_PASSWORD="REDACTED_PASSWORD" # pragma: allowlist secret
# Uncomment if your database is on spinning disks instead of SSD
- DB_STORAGE_TYPE=HDD
restart: on-failure:5
immich-server:
image: ghcr.io/immich-app/immich-server:release
container_name: Immich-SERVER
hostname: immich-server
user: 1026:100
security_opt:
- no-new-privileges:true
env_file:
- stack.env
ports:
- 8212:2283
environment:
- IMMICH_CONFIG_FILE=/config/immich-config.json
volumes:
# Main Immich data folder
- /volume2/metadata/docker/immich/upload:/data:rw
# Mount Synology Photos library as external read-only source
- /volume1/homes/vish/Photos:/external/photos:ro
- /etc/localtime:/etc/localtime:ro
# SSO config
- /volume2/metadata/docker/immich/config/immich-config.json:/config/immich-config.json:ro
depends_on:
immich-redis:
condition: service_healthy
immich-db:
condition: service_started
restart: on-failure:5
deploy:
resources:
limits:
memory: 4G
immich-machine-learning:
image: ghcr.io/immich-app/immich-machine-learning:release
container_name: Immich-LEARNING
hostname: immich-machine-learning
user: 1026:100
security_opt:
- no-new-privileges:true
env_file:
- stack.env
volumes:
- /volume2/metadata/docker/immich/upload:/data:rw
- /volume1/homes/vish/Photos:/external/photos:ro
- /volume2/metadata/docker/immich/cache:/cache:rw
- /volume2/metadata/docker/immich/cache:/.cache:rw
- /volume2/metadata/docker/immich/cache:/.config:rw
- /volume2/metadata/docker/immich/matplotlib:/matplotlib:rw
environment:
- MPLCONFIGDIR=/matplotlib
depends_on:
immich-db:
condition: service_started
restart: on-failure:5
deploy:
resources:
limits:
memory: 4G

View File

@@ -0,0 +1,60 @@
# Invidious - YouTube
# Port: 3000
# Privacy-respecting YouTube
version: "3.9"
services:
invidious-db:
image: postgres
container_name: Invidious-DB
hostname: invidious-db
security_opt:
- no-new-privileges:true
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "invidious", "-U", "kemal"]
timeout: 45s
interval: 10s
retries: 10
user: 1026:100
volumes:
- /volume1/docker/invidiousdb:/var/lib/postgresql/data
environment:
POSTGRES_DB: invidious
POSTGRES_USER: kemal
POSTGRES_PASSWORD: "REDACTED_PASSWORD" # pragma: allowlist secret
restart: unless-stopped
invidious:
image: quay.io/invidious/invidious:latest
container_name: Invidious
hostname: invidious
user: 1026:100
security_opt:
- no-new-privileges:true
healthcheck:
test: wget -nv --tries=1 --spider http://127.0.0.1:3000/api/v1/comments/jNQXAC9IVRw || exit 1
interval: 30s
timeout: 5s
retries: 2
ports:
- 10.0.0.100:7601:3000
environment:
INVIDIOUS_CONFIG: |
db:
dbname: invidious
user: kemal
password: "REDACTED_PASSWORD" # pragma: allowlist secret
host: invidious-db
port: 5432
check_tables: true
captcha_enabled: false
default_user_preferences:
locale: us
region: US
external_port: 7601
domain: invidious.vishinator.synology.me
https_only: true
restart: unless-stopped
depends_on:
invidious-db:
condition: service_healthy

View File

@@ -0,0 +1,11 @@
# iPerf3 - Network bandwidth testing
# Port: 5201
# TCP/UDP bandwidth measurement tool
version: '3.8'
services:
iperf3:
image: networkstatic/iperf3
container_name: iperf3
restart: unless-stopped
network_mode: "host" # Allows the container to use the NAS's network stack
command: "-s" # Runs iperf3 in server mode

View File

@@ -0,0 +1,24 @@
# IT Tools - Developer utilities collection
# Port: 8085
# Collection of handy online tools for developers
version: '3.8'
services:
it-tools:
container_name: it-tools
image: corentinth/it-tools:latest
restart: unless-stopped
ports:
- "5545:80"
environment:
- TZ=UTC
logging:
driver: json-file
options:
max-size: "10k"
labels:
com.docker.compose.service.description: "IT Tools Dashboard"
networks:
default:
driver: bridge

View File

@@ -0,0 +1,21 @@
# JDownloader2 - Downloads
# Port: 5800
# Multi-host download manager
version: '3.9'
services:
jdownloader-2:
image: jlesage/jdownloader-2
restart: unless-stopped
volumes:
- /volume1/docker/jdownloader2/output:/output
- /volume1/docker/jdownloader2/config:/config
environment:
- TZ=America/Los_Angeles
- PGID=100
- PUID=1026
ports:
- 13016:5900
- 40288:5800
- 20123:3129
container_name: jdownloader2

View File

@@ -0,0 +1,173 @@
# Jitsi Meet - Video conferencing
# Port: 8443
# Self-hosted video conferencing platform
version: '3.8'
networks:
meet.jitsi:
driver: bridge
turn_net:
driver: bridge
ipam:
config:
- subnet: 172.30.0.0/24
services:
##########################################################
# COTURN
##########################################################
coturn:
image: instrumentisto/coturn:latest
container_name: coturn
restart: unless-stopped
command: ["turnserver", "-c", "/config/turnserver.conf"]
ports:
- "3478:3478/tcp"
- "3478:3478/udp"
- "5349:5349/tcp"
- "5349:5349/udp"
- "49160-49200:49160-49200/udp"
volumes:
- /volume2/metadata/docker/turnserver/turnserver.conf:/config/turnserver.conf:ro
- /volume2/metadata/docker/turnserver/certs:/config/certs:ro
- /volume2/metadata/docker/turnserver/logs:/var/log
- /volume2/metadata/docker/turnserver/db:/var/lib/coturn
environment:
- TZ=America/Los_Angeles
networks:
turn_net:
ipv4_address: 172.30.0.2
ulimits:
nofile:
soft: 65536
hard: 65536
##########################################################
# PROSODY
##########################################################
prosody:
image: jitsi/prosody:stable
container_name: jitsi-prosody
restart: unless-stopped
volumes:
- /volume2/metadata/docker/jitsi/prosody:/config
environment:
- XMPP_DOMAIN=meet.jitsi
- XMPP_AUTH_DOMAIN=auth.meet.jitsi
- XMPP_MUC_DOMAIN=muc.meet.jitsi
- XMPP_INTERNAL_MUC_DOMAIN=internal-muc.meet.jitsi
- XMPP_GUEST_DOMAIN=guest.meet.jitsi
- XMPP_RECORDER_DOMAIN=recorder.meet.jitsi
- JVB_AUTH_USER=jvb
- JVB_AUTH_PASSWORD="REDACTED_PASSWORD" # pragma: allowlist secret
- JICOFO_AUTH_USER=focus
- JICOFO_AUTH_PASSWORD="REDACTED_PASSWORD" # pragma: allowlist secret
- JICOFO_COMPONENT_SECRET=dE6r5r3A3Xpirujycq3E # pragma: allowlist secret
- TZ=America/Los_Angeles
networks:
meet.jitsi:
aliases:
- xmpp.meet.jitsi
- auth.meet.jitsi
- muc.meet.jitsi
- internal-muc.meet.jitsi
- guest.meet.jitsi
- recorder.meet.jitsi
- focus.meet.jitsi
##########################################################
# JICOFO
##########################################################
jicofo:
image: jitsi/jicofo:stable
container_name: jitsi-jicofo
restart: unless-stopped
volumes:
- /volume2/metadata/docker/jitsi/jicofo:/config
environment:
- XMPP_DOMAIN=meet.jitsi
- XMPP_AUTH_DOMAIN=auth.meet.jitsi
- JICOFO_AUTH_USER=focus
- JICOFO_AUTH_PASSWORD="REDACTED_PASSWORD" # pragma: allowlist secret
- JICOFO_COMPONENT_SECRET=dE6r5r3A3Xpirujycq3E # pragma: allowlist secret
- TZ=America/Los_Angeles
depends_on:
- prosody
networks:
- meet.jitsi
##########################################################
# JVB
##########################################################
jvb:
image: jitsi/jvb:stable
container_name: jitsi-jvb
restart: unless-stopped
ports:
- "10000:10000/udp"
volumes:
- /volume2/metadata/docker/jitsi/jvb:/config
environment:
- XMPP_SERVER=prosody
- XMPP_DOMAIN=meet.jitsi
- XMPP_AUTH_DOMAIN=auth.meet.jitsi
- XMPP_INTERNAL_MUC_DOMAIN=internal-muc.meet.jitsi
- JVB_AUTH_USER=jvb
- JVB_AUTH_PASSWORD="REDACTED_PASSWORD" # pragma: allowlist secret
- JVB_BREWERY_MUC=jvbbrewery
- JVB_PORT=10000
- JVB_TCP_HARVESTER_DISABLED=true
- JVB_STUN_SERVERS=stun.l.google.com:19302
- JVB_ENABLE_APIS=rest,colibri
- JVB_ADVERTISE_IPS=184.23.52.219
- TZ=America/Los_Angeles
depends_on:
- prosody
networks:
- meet.jitsi
##########################################################
# WEB UI
##########################################################
web:
image: jitsi/web:stable
container_name: jitsi-web
restart: unless-stopped
ports:
- "5080:80"
- "5443:443"
volumes:
- /volume2/metadata/docker/jitsi/web:/config
- /volume2/metadata/docker/jitsi/letsencrypt:/etc/letsencrypt
environment:
- PUBLIC_URL=https://meet.thevish.io
- ENABLE_P2P=0
- ENABLE_TURN=1
- TURN_HOST=turn.thevish.io
- TURN_PORT=3478
- TURN_TRANSPORT=udp
- TURN_CREDENTIALS=testuser:testpass
- XMPP_STUN_SERVERS=stun.l.google.com:19302
- DISABLE_HTTPS=0
- ENABLE_HTTP_REDIRECT=0
- TZ=America/Los_Angeles
depends_on:
- prosody
- jicofo
- jvb
networks:
- meet.jitsi

View File

@@ -0,0 +1,41 @@
# Joplin Server - Note sync backend
# Port: 22300
# Sync server for Joplin notes app
version: '3'
services:
db:
image: postgres:15
volumes:
- /volume2/metadata/docker/joplin:/var/lib/postgresql/data
ports:
- "5435:5432"
restart: unless-stopped
environment:
- POSTGRES_PASSWORD="REDACTED_PASSWORD" # pragma: allowlist secret
- POSTGRES_USER=joplin
- POSTGRES_DB=joplin
app:
image: joplin/server:latest
depends_on:
- db
ports:
- "22300:22300"
restart: unless-stopped
environment:
- APP_PORT=22300
- APP_BASE_URL=https://joplin.thevish.io
- DB_CLIENT=pg
- POSTGRES_PASSWORD="REDACTED_PASSWORD" # pragma: allowlist secret
- POSTGRES_DATABASE=joplin
- POSTGRES_USER=joplin
- POSTGRES_PORT=5432
- POSTGRES_HOST=db
- MAILER_ENABLED=1
- MAILER_HOST=smtp.gmail.com
- MAILER_PORT=587
- MAILER_SECURITY=starttls
- MAILER_AUTH_USER=your-email@example.com
- MAILER_AUTH_PASSWORD="REDACTED_PASSWORD" # pragma: allowlist secret
- MAILER_NOREPLY_NAME=JoplinServer
- MAILER_NOREPLY_EMAIL=your-email@example.com

View File

@@ -0,0 +1,41 @@
# LlamaGPT - Local ChatGPT
# Port: 3000
# Self-hosted ChatGPT alternative
version: "3.9"
services:
api:
image: ghcr.io/getumbrel/llama-gpt-api:latest
container_name: LlamaGPT-api
hostname: llamagpt-api
mem_limit: 8g
cpu_shares: 768
security_opt:
- no-new-privileges:true
environment:
MODEL: /models/llama-2-7b-chat.bin
MODEL_DOWNLOAD_URL: https://huggingface.co/TheBloke/Nous-Hermes-Llama-2-7B-GGML/resolve/main/nous-hermes-llama-2-7b.ggmlv3.q4_0.bin
USE_MLOCK: 1
cap_add:
- IPC_LOCK
restart: on-failure:5
front:
image: ghcr.io/getumbrel/llama-gpt-ui:latest
container_name: LlamaGPT
hostname: llamagpt
mem_limit: 1g
cpu_shares: 768
security_opt:
- no-new-privileges:true
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:3000
ports:
- 3136:3000
environment:
- 'OPENAI_API_KEY="REDACTED_API_KEY"
- 'OPENAI_API_HOST=http://llamagpt-api:8000'
- 'DEFAULT_MODEL=/models/llama-2-7b-chat.bin'
- 'WAIT_HOSTS=llamagpt-api:8000'
- 'WAIT_TIMEOUT=600'
restart: on-failure:5

View File

@@ -0,0 +1,79 @@
# Mastodon - Social network
# Port: 3000
# Decentralized social media
version: "3.9"
services:
mastodon-redis:
image: redis
container_name: Mastodon-REDIS
hostname: mastodon-redis
security_opt:
- no-new-privileges:true
healthcheck:
test: ["CMD-SHELL", "redis-cli ping || exit 1"]
user: 1026:100
environment:
- TZ=America/Los_Angeles
volumes:
- /volume1/docker/mastodon/redis:/data
restart: unless-stopped
mastodon-db:
image: postgres
container_name: Mastodon-DB
hostname: mastodon-db
security_opt:
- no-new-privileges:true
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "mastodon", "-U", "mastodonuser"]
timeout: 45s
interval: 10s
retries: 10
user: 1026:100
volumes:
- /volume1/docker/mastodon/db:/var/lib/postgresql/data
environment:
POSTGRES_DB: mastodon
POSTGRES_USER: mastodonuser
POSTGRES_PASSWORD: "REDACTED_PASSWORD" # pragma: allowlist secret
restart: unless-stopped
mastodon:
image: lscr.io/linuxserver/mastodon:latest
container_name: Mastodon
hostname: mastodon
security_opt:
- no-new-privileges:true
environment:
- PUID=1026
- PGID=100
- TZ=America/Los_Angeles
- DEFAULT_LOCALE=en
- LOCAL_DOMAIN=mastodon.vish.gg
- WEB_DOMAIN=mastodon.vish.gg
- REDIS_HOST=mastodon-redis
- REDIS_PORT=6379
- DB_HOST=mastodon-db
- DB_USER=mastodonuser
- DB_NAME=mastodon
- DB_PASS="REDACTED_PASSWORD" # pragma: allowlist secret
- DB_PORT=5432
- ES_ENABLED=false
- ES_HOST=es
- ES_PORT=9200
- ES_USER=elastic
- ES_PASS="REDACTED_PASSWORD" # pragma: allowlist secret
- SECRET_KEY_BASE="REDACTED_SECRET_KEY_BASE"_GITEA_TOKEN # pragma: allowlist secret
- OTP_SECRET="REDACTED_OTP_SECRET"_GITEA_TOKEN # pragma: allowlist secret
- S3_ENABLED=false
volumes:
- /volume1/docker/mastodon/config:/config
ports:
- 8562:443
restart: unless-stopped
depends_on:
mastodon-redis:
condition: service_healthy
mastodon-db:
condition: service_started

View File

@@ -0,0 +1,45 @@
# Matrix Synapse
# Port: 8008
# Federated chat homeserver
version: "3.9"
services:
synapse-db:
image: postgres
container_name: Synapse-DB
hostname: synapse-db
security_opt:
- no-new-privileges:true
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "synapsedb", "-U", "synapseuser"]
timeout: 45s
interval: 10s
retries: 10
user: 1026:100
volumes:
- /volume2/metadata/docker/synapse/db:/var/lib/postgresql/data
environment:
- POSTGRES_DB=synapsedb
- POSTGRES_USER=synapseuser
- POSTGRES_PASSWORD="REDACTED_PASSWORD" # pragma: allowlist secret
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
restart: unless-stopped
synapse:
image: matrixdotorg/synapse:latest
container_name: Synapse
hostname: synapse
security_opt:
- no-new-privileges:true
user: 1026:100
environment:
- TZ=America/Los_Angeles
- SYNAPSE_CONFIG_PATH=/data/homeserver.yaml
volumes:
- /volume2/metadata/docker/synapse/data:/data
ports:
- 8450:8008/tcp
restart: unless-stopped
depends_on:
synapse-db:
condition: service_started

View File

@@ -0,0 +1,54 @@
# Configuration file for Synapse.
#
# This is a YAML file: see [1] for a quick introduction. Note in particular
# that *indentation is important*: all the elements of a list or dictionary
# should have the same indentation.
#
# [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html
#
# For more information on how to configure Synapse, including a complete accounting of
# each option, go to docs/usage/configuration/config_documentation.md or
# https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html
server_name: "vish"
enable_registration: true
enable_registration_without_verification: true
enable_group_creation: true
pid_file: /data/homeserver.pid
listeners:
- port: 8008
tls: false
type: http
x_forwarded: true
resources:
- names: [client, federation]
compress: false
database:
name: psycopg2
args:
user: synapseuser
password: "REDACTED_PASSWORD" # pragma: allowlist secret
database: synapsedb
host: synapse-db
cp_min: 5
cp_max: 10
log_config: "/data/vish.log.config"
media_store_path: /data/media_store
registration_shared_secret: "yx9S.cr&BfOC;V4z:~:MWDwfI0Ld=64UZ~Y0jt4hTk;j2RQ*&F" # pragma: allowlist secret
report_stats: true
macaroon_secret_key: "tdXeRQE&Yp:X~yFM1&#^K7ZhikDi;Yte#DGRxLbDRVYGmD1fH_" # pragma: allowlist secret
form_secret: "q,:M6Y+M054Tw=yCWbavcNxrXLgU,M@iblHxo_5T@VOHgdpikF" # pragma: allowlist secret
signing_key_path: "/data/vish.signing.key"
trusted_key_servers:
- server_name: "matrix.org"
turn_uris:
- "turn:turn.thevish.io:3478?transport=udp"
- "turn:turn.thevish.io:3478?transport=tcp"
- "turns:turn.thevish.io:5349?transport=udp"
- "turns:turn.thevish.io:5349?transport=tcp"
turn_shared_secret: "c7y7vrETfYRhOkhrUX/8xszqCQOvh0mWWAA7QBwQlsQ=" # pragma: allowlist secret # use your actual secret
turn_user_lifetime: 86400000
turn_allow_guests: true
# vim:ft=yaml

View File

@@ -0,0 +1,4 @@
openssl rand -base64 32
Output:
c7y7vrETfYRhOkhrUX/8xszqCQOvh0mWWAA7QBwQlsQ=

View File

@@ -0,0 +1,22 @@
-----BEGIN CERTIFICATE-----
MIIDtjCCAzygAwIBAgISBqFIOn7eu28SfvzBamcT56aSMAoGCCqGSM49BAMDMDIx
CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQDEwJF
NjAeFw0yNTA3MDUyMTUyMDVaFw0yNTEwMDMyMTUyMDRaMCIxIDAeBgNVBAMTF3Zp
c2hjb25jb3JkLnN5bm9sb2d5Lm1lMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE
45zvNIgbuOTQro3M9mfdQR7h8oMih+YCifkwstKIdQzvYP9ZtMsqos748RfClDjs
xDUUmcYwi5YCyrxEyaRrlqOCAkAwggI8MA4GA1UdDwEB/wQEAwIHgDAdBgNVHSUE
FjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU
UIIr/44cK1l8+6U53JQmvGYW6iUwHwYDVR0jBBgwFoAUkydGmAOpUWiOmNbEQkjb
I79YlNIwMgYIKwYBBQUHAQEEJjAkMCIGCCsGAQUFBzAChhZodHRwOi8vZTYuaS5s
ZW5jci5vcmcvMD0GA1UdEQQ2MDSCGSoudmlzaGNvbmNvcmQuc3lub2xvZ3kubWWC
F3Zpc2hjb25jb3JkLnN5bm9sb2d5Lm1lMBMGA1UdIAQMMAowCAYGZ4EMAQIBMC0G
A1UdHwQmMCQwIqAgoB6GHGh0dHA6Ly9lNi5jLmxlbmNyLm9yZy8xOS5jcmwwggEE
BgorBgEEAdZ5AgQCBIH1BIHyAPAAdgDd3Mo0ldfhFgXnlTL6x5/4PRxQ39sAOhQS
dgosrLvIKgAAAZfcyMjYAAAEAwBHMEUCIGGsbgHmrfjeIj07954+JAZujHQ2d6Cg
+2ey1bmeNycmAiEAzbPFNFmKa7SG3wYmgGzYsUnnZc7zgXGDoLtuSMa8RnwAdgB9
WR4S4XgqexxhZ3xe/fjQh1wUoE6VnrkDL9kOjC55uAAAAZfcyNCFAAAEAwBHMEUC
IQCPRl51MZHLtsQlGl9pGPxCxARZIkUKMyTpSlsqTrjeVwIgDJtI7rF/BzJ+8DC1
XRMBpnEsF27Vh2SQm+PMGVlXLkUwCgYIKoZIzj0EAwMDaAAwZQIwSXIk4PAYyY5z
PR07dRzR5euvEZdAq1Ez6Wdwnl9JTKGWRxrkJfZT+1HY7mSfuXpyAjEA1LeAuXxd
cHNJPINSlz05YCglqCqmnkksJccdIp0OKmGYBQcRwDZlE7aIIZc+oU4V
-----END CERTIFICATE-----

View File

@@ -0,0 +1,26 @@
-----BEGIN CERTIFICATE-----
MIIEVzCCAj+gAwIBAgIRALBXPpFzlydw27SHyzpFKzgwDQYJKoZIhvcNAQELBQAw
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMjQwMzEzMDAwMDAw
WhcNMjcwMzEyMjM1OTU5WjAyMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg
RW5jcnlwdDELMAkGA1UEAxMCRTYwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATZ8Z5G
h/ghcWCoJuuj+rnq2h25EqfUJtlRFLFhfHWWvyILOR/VvtEKRqotPEoJhC6+QJVV
6RlAN2Z17TJOdwRJ+HB7wxjnzvdxEP6sdNgA1O1tHHMWMxCcOrLqbGL0vbijgfgw
gfUwDgYDVR0PAQH/BAQDAgGGMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcD
ATASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBSTJ0aYA6lRaI6Y1sRCSNsj
v1iU0jAfBgNVHSMEGDAWgBR5tFnme7bl5AFzgAiIyBpY9umbbjAyBggrBgEFBQcB
AQQmMCQwIgYIKwYBBQUHMAKGFmh0dHA6Ly94MS5pLmxlbmNyLm9yZy8wEwYDVR0g
BAwwCjAIBgZngQwBAgEwJwYDVR0fBCAwHjAcoBqgGIYWaHR0cDovL3gxLmMubGVu
Y3Iub3JnLzANBgkqhkiG9w0BAQsFAAOCAgEAfYt7SiA1sgWGCIpunk46r4AExIRc
MxkKgUhNlrrv1B21hOaXN/5miE+LOTbrcmU/M9yvC6MVY730GNFoL8IhJ8j8vrOL
pMY22OP6baS1k9YMrtDTlwJHoGby04ThTUeBDksS9RiuHvicZqBedQdIF65pZuhp
eDcGBcLiYasQr/EO5gxxtLyTmgsHSOVSBcFOn9lgv7LECPq9i7mfH3mpxgrRKSxH
pOoZ0KXMcB+hHuvlklHntvcI0mMMQ0mhYj6qtMFStkF1RpCG3IPdIwpVCQqu8GV7
s8ubknRzs+3C/Bm19RFOoiPpDkwvyNfvmQ14XkyqqKK5oZ8zhD32kFRQkxa8uZSu
h4aTImFxknu39waBxIRXE4jKxlAmQc4QjFZoq1KmQqQg0J/1JF8RlFvJas1VcjLv
YlvUB2t6npO6oQjB3l+PNf0DpQH7iUx3Wz5AjQCi6L25FjyE06q6BZ/QlmtYdl/8
ZYao4SRqPEs/6cAiF+Qf5zg2UkaWtDphl1LKMuTNLotvsX99HP69V2faNyegodQ0
LyTApr/vT01YPE46vNsDLgK+4cL6TrzC/a4WcmF5SRJ938zrv/duJHLXQIku5v0+
EwOy59Hdm0PT/Er/84dDV0CSjdR/2XuZM3kpysSKLgD1cKiDA+IRguODCxfO9cyY
Ig46v9mFmBvyH04=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,30 @@
-----BEGIN CERTIFICATE-----
MIIFIzCCBAugAwIBAgISBnLj71XYkmklsH9Kbh5sJVIqMA0GCSqGSIb3DQEBCwUA
MDMxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQwwCgYDVQQD
EwNSMTAwHhcNMjUwNzA1MjE1MjA0WhcNMjUxMDAzMjE1MjAzWjAiMSAwHgYDVQQD
Exd2aXNoY29uY29yZC5zeW5vbG9neS5tZTCCASIwDQYJKoZIhvcNAQEBBQADggEP
ADCCAQoCggEBANYoVv6dZACDHffbvs/8jjyrxdUjRwosesqsrpjZBvp7LBYSJB8T
SY2X2GsMrLVJXMmRaADnvFMCH5K7hSXgVQItTrJOEraaj7YlO7cUY8x5LAMqvTGs
CHzpR5mmfY29toMo5y4Nw6ppzS8GehICO5kf117CpITRfJ5GVUvVKFUyPKP4YxwU
wDuOD0cNZ4orOvWRPWUDCu9xaJK/Ml9DUFbTL8C5vNBxeGXyUpG90z0NrwbK/q3Y
SqUaHTtxtHKu8Xg/vSysK+4fHKE0PGEGxvh+M4CWM46SJQu7ajBFrJYG9Fg7b2Gn
Z79us9+BHL+R0hEsNqfKB+yk6fwn7CU8aEECAwEAAaOCAkAwggI8MA4GA1UdDwEB
/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/
BAIwADAdBgNVHQ4EFgQUpBhN82BCyZod/guG5nq+7XhE/GgwHwYDVR0jBBgwFoAU
u7zDR6XkvKnGw6RyDBCNojXhyOgwMwYIKwYBBQUHAQEEJzAlMCMGCCsGAQUFBzAC
hhdodHRwOi8vcjEwLmkubGVuY3Iub3JnLzA9BgNVHREENjA0ghkqLnZpc2hjb25j
b3JkLnN5bm9sb2d5Lm1lghd2aXNoY29uY29yZC5zeW5vbG9neS5tZTATBgNVHSAE
DDAKMAgGBmeBDAECATAuBgNVHR8EJzAlMCOgIaAfhh1odHRwOi8vcjEwLmMubGVu
Y3Iub3JnLzQzLmNybDCCAQIGCisGAQQB1nkCBAIEgfMEgfAA7gB1AMz7D2qFcQll
/pWbU87psnwi6YVcDZeNtql+VMD+TA2wAAABl9zIxGwAAAQDAEYwRAIgAZ5AdSLd
ck20vYRcFZrQiV96oYIePURFVHxYn1kcNfsCIEhIxhXxSvPQdUy40FczC5hCgsC6
xwvYbLaKyRzb0LJjAHUA3dzKNJXX4RYF55Uy+sef+D0cUN/bADoUEnYKLKy7yCoA
AAGX3MjEjAAABAMARjBEAiBIQTlsET9c1BMWtj/YHtXCwSlILtH3+QvfpzYBkhQM
/QIgNPNNPc4MgfmWZNbq8Sc0U6t1z++g3FSprMIusRoKHX0wDQYJKoZIhvcNAQEL
BQADggEBAAU6MJgEv9OKWmbRjwq2FDheBl0n3FoEJOHVOUAPHU9xd3YIxKUj4/iL
ImLRE+xkvz9PigYyetYQDVaDKgOPhr+30T5mJEKKyYDvpRQ301fMqLvMXesqt7ye
+YYTz/OD6kTzkg27p4ks+PXovEVnR9oUumDIZBxIJeh54mTshVcYCqNpol+4xGSI
nMps9La2D23ng2/x7bsOAiKwowTkvkA+EUf6pNQDIOe1KW26GLzuq6YUVm1GDVFH
vD6lT8+o/M1TBrQ6DC3kuhpfx+c8skcITBKAqhOwAwUUs+b7qZXiBDeLtvJKlC2D
O7OcgyoN4yVOSCE/VgioV27nfhZJJYo=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,29 @@
-----BEGIN CERTIFICATE-----
MIIFBTCCAu2gAwIBAgIQS6hSk/eaL6JzBkuoBI110DANBgkqhkiG9w0BAQsFADBP
MQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFy
Y2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMTAeFw0yNDAzMTMwMDAwMDBa
Fw0yNzAzMTIyMzU5NTlaMDMxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBF
bmNyeXB0MQwwCgYDVQQDEwNSMTAwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQDPV+XmxFQS7bRH/sknWHZGUCiMHT6I3wWd1bUYKb3dtVq/+vbOo76vACFL
YlpaPAEvxVgD9on/jhFD68G14BQHlo9vH9fnuoE5CXVlt8KvGFs3Jijno/QHK20a
/6tYvJWuQP/py1fEtVt/eA0YYbwX51TGu0mRzW4Y0YCF7qZlNrx06rxQTOr8IfM4
FpOUurDTazgGzRYSespSdcitdrLCnF2YRVxvYXvGLe48E1KGAdlX5jgc3421H5KR
mudKHMxFqHJV8LDmowfs/acbZp4/SItxhHFYyTr6717yW0QrPHTnj7JHwQdqzZq3
DZb3EoEmUVQK7GH29/Xi8orIlQ2NAgMBAAGjgfgwgfUwDgYDVR0PAQH/BAQDAgGG
MB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATASBgNVHRMBAf8ECDAGAQH/
AgEAMB0GA1UdDgQWBBS7vMNHpeS8qcbDpHIMEI2iNeHI6DAfBgNVHSMEGDAWgBR5
tFnme7bl5AFzgAiIyBpY9umbbjAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUHMAKG
Fmh0dHA6Ly94MS5pLmxlbmNyLm9yZy8wEwYDVR0gBAwwCjAIBgZngQwBAgEwJwYD
VR0fBCAwHjAcoBqgGIYWaHR0cDovL3gxLmMubGVuY3Iub3JnLzANBgkqhkiG9w0B
AQsFAAOCAgEAkrHnQTfreZ2B5s3iJeE6IOmQRJWjgVzPw139vaBw1bGWKCIL0vIo
zwzn1OZDjCQiHcFCktEJr59L9MhwTyAWsVrdAfYf+B9haxQnsHKNY67u4s5Lzzfd
u6PUzeetUK29v+PsPmI2cJkxp+iN3epi4hKu9ZzUPSwMqtCceb7qPVxEbpYxY1p9
1n5PJKBLBX9eb9LU6l8zSxPWV7bK3lG4XaMJgnT9x3ies7msFtpKK5bDtotij/l0
GaKeA97pb5uwD9KgWvaFXMIEt8jVTjLEvwRdvCn294GPDF08U8lAkIv7tghluaQh
1QnlE4SEN4LOECj8dsIGJXpGUk3aU3KkJz9icKy+aUgA+2cP21uh6NcDIS3XyfaZ
QjmDQ993ChII8SXWupQZVBiIpcWO4RqZk3lr7Bz5MUCwzDIA359e57SSq5CCkY0N
4B6Vulk7LktfwrdGNVI5BsC9qqxSwSKgRJeZ9wygIaehbHFHFhcBaMDKpiZlBHyz
rsnnlFXCb5s8HKn5LsUgGvB24L7sGNZP2CX7dhHov+YhD+jozLW2p9W4959Bz2Ei
RmqDtmiXLnzqTpXbI+suyCsohKRg6Un0RC47+cpiVwHiXZAW+cn8eiNIjqbVgXLx
KPpdzvvtTnOPlC7SQZSYmdunr3Bf9b77AiC/ZidstK36dRILKz7OA54=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,30 @@
-----BEGIN CERTIFICATE-----
MIIFIzCCBAugAwIBAgISBnLj71XYkmklsH9Kbh5sJVIqMA0GCSqGSIb3DQEBCwUA
MDMxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQwwCgYDVQQD
EwNSMTAwHhcNMjUwNzA1MjE1MjA0WhcNMjUxMDAzMjE1MjAzWjAiMSAwHgYDVQQD
Exd2aXNoY29uY29yZC5zeW5vbG9neS5tZTCCASIwDQYJKoZIhvcNAQEBBQADggEP
ADCCAQoCggEBANYoVv6dZACDHffbvs/8jjyrxdUjRwosesqsrpjZBvp7LBYSJB8T
SY2X2GsMrLVJXMmRaADnvFMCH5K7hSXgVQItTrJOEraaj7YlO7cUY8x5LAMqvTGs
CHzpR5mmfY29toMo5y4Nw6ppzS8GehICO5kf117CpITRfJ5GVUvVKFUyPKP4YxwU
wDuOD0cNZ4orOvWRPWUDCu9xaJK/Ml9DUFbTL8C5vNBxeGXyUpG90z0NrwbK/q3Y
SqUaHTtxtHKu8Xg/vSysK+4fHKE0PGEGxvh+M4CWM46SJQu7ajBFrJYG9Fg7b2Gn
Z79us9+BHL+R0hEsNqfKB+yk6fwn7CU8aEECAwEAAaOCAkAwggI8MA4GA1UdDwEB
/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/
BAIwADAdBgNVHQ4EFgQUpBhN82BCyZod/guG5nq+7XhE/GgwHwYDVR0jBBgwFoAU
u7zDR6XkvKnGw6RyDBCNojXhyOgwMwYIKwYBBQUHAQEEJzAlMCMGCCsGAQUFBzAC
hhdodHRwOi8vcjEwLmkubGVuY3Iub3JnLzA9BgNVHREENjA0ghkqLnZpc2hjb25j
b3JkLnN5bm9sb2d5Lm1lghd2aXNoY29uY29yZC5zeW5vbG9neS5tZTATBgNVHSAE
DDAKMAgGBmeBDAECATAuBgNVHR8EJzAlMCOgIaAfhh1odHRwOi8vcjEwLmMubGVu
Y3Iub3JnLzQzLmNybDCCAQIGCisGAQQB1nkCBAIEgfMEgfAA7gB1AMz7D2qFcQll
/pWbU87psnwi6YVcDZeNtql+VMD+TA2wAAABl9zIxGwAAAQDAEYwRAIgAZ5AdSLd
ck20vYRcFZrQiV96oYIePURFVHxYn1kcNfsCIEhIxhXxSvPQdUy40FczC5hCgsC6
xwvYbLaKyRzb0LJjAHUA3dzKNJXX4RYF55Uy+sef+D0cUN/bADoUEnYKLKy7yCoA
AAGX3MjEjAAABAMARjBEAiBIQTlsET9c1BMWtj/YHtXCwSlILtH3+QvfpzYBkhQM
/QIgNPNNPc4MgfmWZNbq8Sc0U6t1z++g3FSprMIusRoKHX0wDQYJKoZIhvcNAQEL
BQADggEBAAU6MJgEv9OKWmbRjwq2FDheBl0n3FoEJOHVOUAPHU9xd3YIxKUj4/iL
ImLRE+xkvz9PigYyetYQDVaDKgOPhr+30T5mJEKKyYDvpRQ301fMqLvMXesqt7ye
+YYTz/OD6kTzkg27p4ks+PXovEVnR9oUumDIZBxIJeh54mTshVcYCqNpol+4xGSI
nMps9La2D23ng2/x7bsOAiKwowTkvkA+EUf6pNQDIOe1KW26GLzuq6YUVm1GDVFH
vD6lT8+o/M1TBrQ6DC3kuhpfx+c8skcITBKAqhOwAwUUs+b7qZXiBDeLtvJKlC2D
O7OcgyoN4yVOSCE/VgioV27nfhZJJYo=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,29 @@
-----BEGIN CERTIFICATE-----
MIIFBTCCAu2gAwIBAgIQS6hSk/eaL6JzBkuoBI110DANBgkqhkiG9w0BAQsFADBP
MQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFy
Y2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMTAeFw0yNDAzMTMwMDAwMDBa
Fw0yNzAzMTIyMzU5NTlaMDMxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBF
bmNyeXB0MQwwCgYDVQQDEwNSMTAwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQDPV+XmxFQS7bRH/sknWHZGUCiMHT6I3wWd1bUYKb3dtVq/+vbOo76vACFL
YlpaPAEvxVgD9on/jhFD68G14BQHlo9vH9fnuoE5CXVlt8KvGFs3Jijno/QHK20a
/6tYvJWuQP/py1fEtVt/eA0YYbwX51TGu0mRzW4Y0YCF7qZlNrx06rxQTOr8IfM4
FpOUurDTazgGzRYSespSdcitdrLCnF2YRVxvYXvGLe48E1KGAdlX5jgc3421H5KR
mudKHMxFqHJV8LDmowfs/acbZp4/SItxhHFYyTr6717yW0QrPHTnj7JHwQdqzZq3
DZb3EoEmUVQK7GH29/Xi8orIlQ2NAgMBAAGjgfgwgfUwDgYDVR0PAQH/BAQDAgGG
MB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATASBgNVHRMBAf8ECDAGAQH/
AgEAMB0GA1UdDgQWBBS7vMNHpeS8qcbDpHIMEI2iNeHI6DAfBgNVHSMEGDAWgBR5
tFnme7bl5AFzgAiIyBpY9umbbjAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUHMAKG
Fmh0dHA6Ly94MS5pLmxlbmNyLm9yZy8wEwYDVR0gBAwwCjAIBgZngQwBAgEwJwYD
VR0fBCAwHjAcoBqgGIYWaHR0cDovL3gxLmMubGVuY3Iub3JnLzANBgkqhkiG9w0B
AQsFAAOCAgEAkrHnQTfreZ2B5s3iJeE6IOmQRJWjgVzPw139vaBw1bGWKCIL0vIo
zwzn1OZDjCQiHcFCktEJr59L9MhwTyAWsVrdAfYf+B9haxQnsHKNY67u4s5Lzzfd
u6PUzeetUK29v+PsPmI2cJkxp+iN3epi4hKu9ZzUPSwMqtCceb7qPVxEbpYxY1p9
1n5PJKBLBX9eb9LU6l8zSxPWV7bK3lG4XaMJgnT9x3ies7msFtpKK5bDtotij/l0
GaKeA97pb5uwD9KgWvaFXMIEt8jVTjLEvwRdvCn294GPDF08U8lAkIv7tghluaQh
1QnlE4SEN4LOECj8dsIGJXpGUk3aU3KkJz9icKy+aUgA+2cP21uh6NcDIS3XyfaZ
QjmDQ993ChII8SXWupQZVBiIpcWO4RqZk3lr7Bz5MUCwzDIA359e57SSq5CCkY0N
4B6Vulk7LktfwrdGNVI5BsC9qqxSwSKgRJeZ9wygIaehbHFHFhcBaMDKpiZlBHyz
rsnnlFXCb5s8HKn5LsUgGvB24L7sGNZP2CX7dhHov+YhD+jozLW2p9W4959Bz2Ei
RmqDtmiXLnzqTpXbI+suyCsohKRg6Un0RC47+cpiVwHiXZAW+cn8eiNIjqbVgXLx
KPpdzvvtTnOPlC7SQZSYmdunr3Bf9b77AiC/ZidstK36dRILKz7OA54=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,31 @@
-----BEGIN CERTIFICATE-----
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,29 @@
-----BEGIN CERTIFICATE-----
MIIFBTCCAu2gAwIBAgIQS6hSk/eaL6JzBkuoBI110DANBgkqhkiG9w0BAQsFADBP
MQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFy
Y2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMTAeFw0yNDAzMTMwMDAwMDBa
Fw0yNzAzMTIyMzU5NTlaMDMxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBF
bmNyeXB0MQwwCgYDVQQDEwNSMTAwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQDPV+XmxFQS7bRH/sknWHZGUCiMHT6I3wWd1bUYKb3dtVq/+vbOo76vACFL
YlpaPAEvxVgD9on/jhFD68G14BQHlo9vH9fnuoE5CXVlt8KvGFs3Jijno/QHK20a
/6tYvJWuQP/py1fEtVt/eA0YYbwX51TGu0mRzW4Y0YCF7qZlNrx06rxQTOr8IfM4
FpOUurDTazgGzRYSespSdcitdrLCnF2YRVxvYXvGLe48E1KGAdlX5jgc3421H5KR
mudKHMxFqHJV8LDmowfs/acbZp4/SItxhHFYyTr6717yW0QrPHTnj7JHwQdqzZq3
DZb3EoEmUVQK7GH29/Xi8orIlQ2NAgMBAAGjgfgwgfUwDgYDVR0PAQH/BAQDAgGG
MB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATASBgNVHRMBAf8ECDAGAQH/
AgEAMB0GA1UdDgQWBBS7vMNHpeS8qcbDpHIMEI2iNeHI6DAfBgNVHSMEGDAWgBR5
tFnme7bl5AFzgAiIyBpY9umbbjAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUHMAKG
Fmh0dHA6Ly94MS5pLmxlbmNyLm9yZy8wEwYDVR0gBAwwCjAIBgZngQwBAgEwJwYD
VR0fBCAwHjAcoBqgGIYWaHR0cDovL3gxLmMubGVuY3Iub3JnLzANBgkqhkiG9w0B
AQsFAAOCAgEAkrHnQTfreZ2B5s3iJeE6IOmQRJWjgVzPw139vaBw1bGWKCIL0vIo
zwzn1OZDjCQiHcFCktEJr59L9MhwTyAWsVrdAfYf+B9haxQnsHKNY67u4s5Lzzfd
u6PUzeetUK29v+PsPmI2cJkxp+iN3epi4hKu9ZzUPSwMqtCceb7qPVxEbpYxY1p9
1n5PJKBLBX9eb9LU6l8zSxPWV7bK3lG4XaMJgnT9x3ies7msFtpKK5bDtotij/l0
GaKeA97pb5uwD9KgWvaFXMIEt8jVTjLEvwRdvCn294GPDF08U8lAkIv7tghluaQh
1QnlE4SEN4LOECj8dsIGJXpGUk3aU3KkJz9icKy+aUgA+2cP21uh6NcDIS3XyfaZ
QjmDQ993ChII8SXWupQZVBiIpcWO4RqZk3lr7Bz5MUCwzDIA359e57SSq5CCkY0N
4B6Vulk7LktfwrdGNVI5BsC9qqxSwSKgRJeZ9wygIaehbHFHFhcBaMDKpiZlBHyz
rsnnlFXCb5s8HKn5LsUgGvB24L7sGNZP2CX7dhHov+YhD+jozLW2p9W4959Bz2Ei
RmqDtmiXLnzqTpXbI+suyCsohKRg6Un0RC47+cpiVwHiXZAW+cn8eiNIjqbVgXLx
KPpdzvvtTnOPlC7SQZSYmdunr3Bf9b77AiC/ZidstK36dRILKz7OA54=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,35 @@
version: '3.8'
networks:
turn_net:
driver: bridge
ipam:
config:
- subnet: 172.30.0.0/24
services:
coturn:
image: instrumentisto/coturn:latest
container_name: coturn
restart: unless-stopped
command: ["turnserver", "-c", "/config/turnserver.conf"]
ports:
- "3478:3478/tcp"
- "3478:3478/udp"
- "5349:5349/tcp"
- "5349:5349/udp"
- "49160-49200:49160-49200/udp"
volumes:
- /volume2/metadata/docker/turnserver/turnserver.conf:/config/turnserver.conf:ro
- /volume2/metadata/docker/turnserver/certs:/config/certs:ro
- /volume2/metadata/docker/turnserver/logs:/var/log
- /volume2/metadata/docker/turnserver/db:/var/lib/coturn
environment:
- TZ=America/Los_Angeles
networks:
turn_net:
ipv4_address: 172.30.0.2
ulimits:
nofile:
soft: 65536
hard: 65536

View File

@@ -0,0 +1,74 @@
# NetBox - DCIM/IPAM
# Port: 8000
# Network documentation and IPAM
version: "3.9"
services:
netbox-redis:
image: redis
container_name: NETBOX-REDIS
hostname: netbox-redis
healthcheck:
test: ["CMD-SHELL", "redis-cli ping || exit 1"]
command:
- sh
- -c
- redis-server --appendonly yes --requirepass REDACTED_PASSWORD
user: 1026:100
volumes:
- /volume1/docker/netbox/redis:/data
environment:
- REDIS_PASSWORD="REDACTED_PASSWORD" # pragma: allowlist secret
restart: unless-stopped
netbox-db:
image: postgres
container_name: NETBOX-POSTGRES-DB
hostname: netbox-db
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "netbox", "-U", "netbox-user"]
timeout: 45s
interval: 10s
retries: 10
user: 1026:100
volumes:
- /volume1/docker/netbox/db:/var/lib/postgresql/data
environment:
POSTGRES_DB: netbox
POSTGRES_USER: netbox-user
POSTGRES_PASSWORD: "REDACTED_PASSWORD" # pragma: allowlist secret
restart: unless-stopped
netbox:
image: linuxserver/netbox:latest
container_name: NETBOX
hostname: netbox
healthcheck:
test: wget --no-verbose --tries=1 --spider http://10.0.0.100:9458/ || exit 1
environment:
- PUID=1026
- PGID=100
- TZ=America/Los_Angeles
- SUPERUSER_EMAIL=your-email@example.com
- SUPERUSER_PASSWORD="REDACTED_PASSWORD" # pragma: allowlist secret
- ALLOWED_HOST=10.0.0.100
- DB_HOST=netbox-db
- DB_PORT=5432
- DB_NAME=netbox
- DB_USER=netbox-user
- DB_PASSWORD="REDACTED_PASSWORD" # pragma: allowlist secret
- REDIS_HOST=netbox-redis
- REDIS_PORT=6379
- REDIS_PASSWORD="REDACTED_PASSWORD" # pragma: allowlist secret
- REDIS_DB_TASK=0
- REDIS_DB_CACHE=1
volumes:
- /volume1/docker/netbox/config:/config
ports:
- 10.0.0.100:9458:8000
restart: unless-stopped
depends_on:
netbox-redis:
condition: service_healthy
netbox-db:
condition: service_healthy

View File

@@ -0,0 +1,11 @@
{
"database": {
"engine": "knex-native",
"knex": {
"client": "sqlite3",
"connection": {
"filename": "/data/database.sqlite"
}
}
}
}

View File

@@ -0,0 +1,17 @@
version: "3.8"
services:
nginx_proxy_manager:
image: jc21/nginx-proxy-manager
container_name: nginx_proxy_manager
ports:
- "8341:80"
- "81:81"
- "8766:443"
environment:
- TZ=America/Los_Angeles
volumes:
- /volume1/docker/nginxproxymanager/config.json:/app/config/production.json
- /volume1/docker/nginxproxymanager/data:/data
- /volume1/docker/nginxproxymanager/letsencrypt:/etc/letsencrypt
restart: unless-stopped

View File

@@ -0,0 +1,13 @@
# ntfy - Push notifications
# Port: 8080
# Simple pub-sub notification service
version: '3.9'
services:
ntfy:
command: serve
image: binwiederhier/ntfy
tty: true
stdin_open: true
ports:
- '48978:80'

View File

@@ -0,0 +1,55 @@
# Ollama - Local LLM inference
# URL: https://ollama.vishconcord.synology.me
# Port: 11434
# Run large language models locally
version: "3.8"
services:
ollama:
container_name: ollama
image: ollama/ollama:rocm
restart: unless-stopped
ports:
- "11434:11434"
environment:
OLLAMA_HOST: 0.0.0.0
OLLAMA_ORIGINS: https://rxv4access.vishconcord.synology.me
OLLAMA_OPENAI_COMPAT: 1
OLLAMA_INSTALL_MODELS: >
phi3:mini,
gemma:2b
OLLAMA_NUM_THREAD: 4
volumes:
- /volume2/metadata/docker/ollama/data:/root/.ollama:rw
- /volume2/metadata/docker/ollama/custom:/models/custom:ro
healthcheck:
test: ["CMD", "ollama", "--version"]
interval: 15s
timeout: 5s
retries: 3
start_period: 45s
deploy:
resources:
limits:
memory: 18g
webui:
container_name: ollama-webui
image: ghcr.io/open-webui/open-webui:0.6
restart: unless-stopped
depends_on:
ollama:
condition: service_healthy
ports:
- "8271:8080"
environment:
OLLAMA_BASE_URL: http://ollama:11434
WEBUI_SECRET_KEY: "REDACTED_SECRET_KEY" # pragma: allowlist secret
volumes:
- /volume2/metadata/docker/ollama/webui:/app/backend/data:rw
healthcheck:
test: timeout 10s bash -c ':> /dev/tcp/127.0.0.1/8080' || exit 1
interval: 10s
timeout: 5s
retries: 3
start_period: 90s

View File

@@ -0,0 +1,24 @@
#!/bin/bash
set -euo pipefail
# Start Ollama server.
/bin/ollama serve &
pid=$!
# Wait for Ollama to be ready using Bash's built-in networking capabilities.
while ! timeout 1 bash -c "echo > /dev/tcp/localhost/11434" 2>/dev/null; do
echo "Waiting for Ollama to start..."
sleep 1
done
echo "Ollama started."
# Retrieve and install/update models from the MODELS that you have in your Docker Compose stack environment variables.
IFS=',' read -ra model_array <<< "$MODELS"
for model in "${model_array[@]}"; do
echo "Installing/Updating model $model..."
ollama pull $model # This command fetches the latest version of the llama model
done
echo "All models installed/updated."
# Continue to main process.
wait $pid

View File

@@ -0,0 +1,17 @@
Why these models?
Coding:
codegemma:2b → lightweight, good for completions.
codellama:7b → solid for structured code (like Docker Compose).
mistral:7b → generalist, also good with logic in code.
Writing (tech docs & emails):
llama3.2:3b → smaller generalist.
gemma:7b → more natural writing.
neural-chat:7b → conversational, good for email tone.

View File

@@ -0,0 +1,58 @@
version: "3.8"
services:
broker:
image: redis:7
container_name: PaperlessNGX-REDIS
command: ["redis-server", "--save", "60", "1", "--loglevel", "warning"]
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 3s
retries: 5
restart: unless-stopped
db:
image: postgres:16
container_name: PaperlessNGX-DB
environment:
POSTGRES_DB: paperless
POSTGRES_USER: paperless
POSTGRES_PASSWORD: "REDACTED_PASSWORD" # pragma: allowlist secret
volumes:
- /volume2/metadata/docker/paperless/postgres:/var/lib/postgresql/data
restart: unless-stopped
paperless:
image: ghcr.io/paperless-ngx/paperless-ngx:latest
container_name: PaperlessNGX
depends_on:
broker:
condition: service_healthy
db:
condition: service_started
environment:
PUID: 1029
PGID: 100
TZ: America/Los_Angeles
PAPERLESS_REDIS: redis://broker:6379
PAPERLESS_DBHOST: db
PAPERLESS_DBPORT: 5432
PAPERLESS_DBNAME: paperless
PAPERLESS_DBUSER: paperless
PAPERLESS_DBPASS: paperless
PAPERLESS_URL: http://paperless.vish.local
PAPERLESS_OCR_LANGUAGE: eng
volumes:
- /volume2/metadata/docker/paperless/data:/usr/src/paperless/data
- /volume2/metadata/docker/paperless/inbox:/usr/src/paperless/consume
- /volume2/metadata/docker/paperless/documents:/usr/src/paperless/export
- /volume2/metadata/docker/paperless/media:/usr/src/paperless/media
ports:
- "5890:8000"
restart: unless-stopped

View File

@@ -0,0 +1,168 @@
# =============================================================================
# PI-HOLE - NETWORK-WIDE AD BLOCKING AND DNS FILTERING
# =============================================================================
#
# SERVICE OVERVIEW:
# - Network-wide ad blocking and DNS filtering
# - Custom DNS server with blacklist/whitelist management
# - DHCP server capability (if needed)
# - Query logging and analytics dashboard
# - Local DNS resolution for homelab services
#
# DISASTER RECOVERY PRIORITY: HIGH
# - Critical for network functionality and security
# - Provides DNS resolution for homelab services
# - Blocks malicious domains and ads network-wide
# - Essential for maintaining network performance
#
# RECOVERY TIME OBJECTIVE (RTO): 15 minutes
# RECOVERY POINT OBJECTIVE (RPO): 24 hours (DNS logs and settings)
#
# DEPENDENCIES:
# - Volume1 for configuration and logs
# - Host network access for DNS (port 53)
# - Router configuration to use Pi-hole as DNS server
# - Internet connectivity for blocklist updates
#
# NETWORK IMPACT:
# - All devices use Pi-hole for DNS resolution
# - Router DNS settings: 192.168.1.100 (primary)
# - Fallback DNS: 1.1.1.1, 8.8.8.8 (if Pi-hole fails)
#
# =============================================================================
version: '3.3'
services:
pihole:
# CONTAINER IMAGE:
# - pihole/pihole: Official Pi-hole image
# - Includes DNS server, web interface, and FTL (Faster Than Light) daemon
# - Regular updates with new blocklists and security patches
image: pihole/pihole
# CONTAINER IDENTIFICATION:
# - pihole: Clear identification for logs and management
# - Used in network configuration and monitoring
container_name: pihole
environment:
# WEB INTERFACE CONFIGURATION:
# - WEB_PORT=9000: Custom web interface port (default 80)
# - Avoids conflicts with other web services
# - Accessible at: http://atlantis.vish.local:9000/admin
- WEB_PORT=9000
# ADMIN PASSWORD:
# - WEBPASSWORD: "REDACTED_PASSWORD" for Pi-hole admin interface
# - SECURITY WARNING: Change this password immediately
# - TODO: Move to secrets management or environment file
- WEBPASSWORD="REDACTED_PASSWORD" # pragma: allowlist secret # TODO: CHANGE THIS PASSWORD
# NETWORK CONFIGURATION:
# - FTLCONF_LOCAL_IPV4: Pi-hole's IP address for DNS responses
# - NOTE: This should match the actual NAS IP (192.168.1.100)
# - TODO: Update to correct IP address
- FTLCONF_LOCAL_IPV4=10.0.0.250 # TODO: Fix IP address
# TIMEZONE CONFIGURATION:
# - TZ: Timezone for logs and query timestamps
# - NOTE: Typo in timezone (should be America/Los_Angeles)
# - Used for accurate log timestamps and statistics
- TZ=American/Los_Angeles # TODO: Fix timezone typo
# DNS DAEMON CONFIGURATION:
# - DNSMASQ_USER=root: User for dnsmasq DNS server
# - DNSMASQ_LISTENING=local: Listen only on local interfaces
# - Security: Prevents DNS amplification attacks
- DNSMASQ_USER=root
- DNSMASQ_LISTENING=local
volumes:
# DNSMASQ CONFIGURATION:
# - /volume1/docker/pihole/dnsmasq.d:/etc/dnsmasq.d
# - Contains: Custom DNS configurations, local DNS entries
# - Used for: Local domain resolution (*.vish.local)
# - BACKUP IMPORTANT: Custom DNS configurations
- /volume1/docker/pihole/dnsmasq.d:/etc/dnsmasq.d
# PI-HOLE CONFIGURATION AND DATA:
# - /volume1/docker/pihole/pihole:/etc/pihole
# - Contains: Blocklists, whitelists, query logs, settings
# - BACKUP CRITICAL: All Pi-hole configuration and history
# - Size: ~100MB-1GB depending on log retention
- /volume1/docker/pihole/pihole:/etc/pihole
# NETWORK CONFIGURATION:
# - host: Required for DNS server functionality
# - Allows Pi-hole to bind to port 53 (DNS)
# - Enables DHCP server functionality if needed
# - SECURITY NOTE: Exposes all container ports to host
network_mode: host
# RESTART POLICY:
# - always: Container restarts automatically on failure or reboot
# - CRITICAL: DNS service must be always available
# - Network functionality depends on Pi-hole availability
restart: unless-stopped
# =============================================================================
# DISASTER RECOVERY PROCEDURES - PI-HOLE
# =============================================================================
#
# BACKUP COMMANDS:
# # Configuration backup:
# tar -czf /volume2/backups/pihole-$(date +%Y%m%d).tar.gz /volume1/docker/pihole/
#
# # Settings export (via web interface):
# # Admin > Settings > Teleporter > Backup
# # Save backup file to secure location
#
# RESTORE PROCEDURE:
# 1. Stop container: docker-compose -f pihole.yml down
# 2. Restore data: tar -xzf pihole-backup.tar.gz -C /volume1/docker/
# 3. Fix permissions: chown -R root:root /volume1/docker/pihole/
# 4. Start container: docker-compose -f pihole.yml up -d
# 5. Verify DNS: nslookup google.com 192.168.1.100
# 6. Check web interface: http://atlantis.vish.local:9000/admin
#
# NETWORK CONFIGURATION (Post-Recovery):
# 1. Router DNS settings:
# Primary DNS: 192.168.1.100 (Pi-hole)
# Secondary DNS: 1.1.1.1 (Cloudflare backup)
#
# 2. Local DNS entries (add to dnsmasq.d/02-local.conf):
# address=/atlantis.vish.local/192.168.1.100
# address=/calypso.vish.local/192.168.1.101
# address=/concord-nuc.vish.local/192.168.1.102
#
# 3. Test local resolution:
# nslookup atlantis.vish.local
# nslookup plex.vish.local
#
# TROUBLESHOOTING:
# - DNS not working: Check port 53 availability, verify host networking
# - Web interface inaccessible: Check WEB_PORT setting and firewall
# - Slow DNS resolution: Check upstream DNS servers and network connectivity
# - Blocklists not updating: Verify internet connectivity and cron jobs
#
# EMERGENCY DNS FALLBACK:
# If Pi-hole fails completely:
# 1. Router > DHCP Settings > DNS Servers
# 2. Change to: 1.1.1.1, 8.8.8.8
# 3. Restart router DHCP or reboot devices
# 4. Restore Pi-hole service as soon as possible
#
# MONITORING AND HEALTH CHECKS:
# - DNS test: nslookup google.com 192.168.1.100
# - Web interface: curl -f http://localhost:9000/admin/
# - Query logs: docker exec pihole tail -f /var/log/pihole.log
# - Blocklist status: Check admin interface > Tools > Update Gravity
#
# SECURITY CONSIDERATIONS:
# - Change default admin password immediately
# - Regularly update blocklists
# - Monitor query logs for suspicious activity
# - Consider enabling DNSSEC validation
#
# =============================================================================

View File

@@ -0,0 +1,140 @@
# Piped - YouTube frontend
# Port: 8080
# Privacy-respecting YouTube frontend
version: "3.9"
services:
db:
image: postgres
container_name: Piped-DB
hostname: piped-db
mem_limit: 512m
cpu_shares: 768
security_opt:
- no-new-privileges:true
user: 1026:100
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "piped", "-U", "pipeduser"]
timeout: 45s
interval: 10s
retries: 10
volumes:
- /volume1/docker/piped/db:/var/lib/postgresql/data:rw
environment:
POSTGRES_DB: piped
POSTGRES_USER: pipeduser
POSTGRES_PASSWORD: "REDACTED_PASSWORD" # pragma: allowlist secret
restart: on-failure:5
piped-proxy:
image: 1337kavin/piped-proxy:latest
container_name: Piped-PROXY
hostname: piped-proxy
mem_limit: 512m
cpu_shares: 768
security_opt:
- no-new-privileges:true
read_only: true
volumes:
- /volume1/docker/piped/piped-proxy:/app/socket:rw
environment:
UDS: 1
restart: on-failure:5
piped-back:
image: 1337kavin/piped:latest
container_name: Piped-BACKEND
hostname: piped-backend
mem_limit: 2g
cpu_shares: 768
security_opt:
- no-new-privileges:true
healthcheck:
test: stat /etc/passwd || exit 1
volumes:
- /volume1/docker/piped/config.properties:/app/config.properties:ro
restart: on-failure:5
depends_on:
db:
condition: service_healthy
piped-front:
image: 1337kavin/piped-frontend:latest
entrypoint: ash -c 'sed -i s/pipedapi.kavin.rocks/pipedapi.vishinator.synology.me/g /usr/share/nginx/html/assets/* && /docker-entrypoint.sh && nginx -g "daemon off;"'
container_name: Piped-FRONTEND
hostname: piped-frontend
mem_limit: 1g
cpu_shares: 768
security_opt:
- no-new-privileges:true
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:80
restart: on-failure:5
depends_on:
piped-back:
condition: service_healthy
nginx:
image: nginx:mainline-alpine
container_name: Piped-NGINX
hostname: nginx
mem_limit: 512m
cpu_shares: 768
security_opt:
- no-new-privileges:true
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:80
ports:
- 8045:80
volumes:
- /volume1/docker/piped/nginx.conf:/etc/nginx/nginx.conf:ro
- /volume1/docker/piped/pipedapi.conf:/etc/nginx/conf.d/pipedapi.conf:ro
- /volume1/docker/piped/pipedproxy.conf:/etc/nginx/conf.d/pipedproxy.conf:ro
- /volume1/docker/piped/pipedfrontend.conf:/etc/nginx/conf.d/pipedfrontend.conf:ro
- /volume1/docker/piped/ytproxy.conf:/etc/nginx/snippets/ytproxy.conf:ro
- /volume1/docker/piped/piped-proxy:/var/run/ytproxy:rw
restart: on-failure:5
depends_on:
piped-back:
condition: service_healthy
piped-front:
condition: service_healthy
piped-proxy:
condition: service_started
hyperpipe-back:
image: codeberg.org/hyperpipe/hyperpipe-backend:latest
container_name: Hyperpipe-API
hostname: hyperpipe-backend
mem_limit: 512m
cpu_shares: 768
security_opt:
- no-new-privileges:true
read_only: true
user: 1026:100
ports:
- 3771:3000
environment:
HYP_PROXY: hyperpipe-proxy.onrender.com
restart: on-failure:5
depends_on:
nginx:
condition: service_healthy
hyperpipe-front:
image: codeberg.org/hyperpipe/hyperpipe:latest
entrypoint: sh -c 'find /usr/share/nginx/html -type f -exec sed -i s/pipedapi.kavin.rocks/pipedapi.vishinator.synology.me/g {} \; -exec sed -i s/hyperpipeapi.onrender.com/hyperpipeapi.vishinator.synology.me/g {} \; && /docker-entrypoint.sh && nginx -g "daemon off;"'
container_name: Hyperpipe-FRONTEND
hostname: hyperpipe-frontend
mem_limit: 512m
cpu_shares: 768
security_opt:
- no-new-privileges:true
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost
ports:
- 8745:80
restart: on-failure:5
depends_on:
hyperpipe-back:
condition: service_started

View File

@@ -0,0 +1,11 @@
# Portainer - Container management
# Port: 9000
# Docker container management UI
docker run -d --name=portainer \
-p 8000:8000 \
-p 10000:9000 \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /volume1/docker/portainer:/data \
--restart=always \
portainer/portainer-ee

View File

@@ -0,0 +1,23 @@
# Redlib - Reddit frontend
# Port: 8080
# Privacy-respecting Reddit viewer
version: '3.9'
services:
redlib:
image: quay.io/redlib/redlib:latest
container_name: Redlib
restart: unless-stopped
ports:
- "9000:8080"
environment:
- REDLIB_SFW_ONLY=off
- REDLIB_BANNER=vish
- REDLIB_ROBOTS_DISABLE_INDEXING=on
- REDLIB_DEFAULT_THEME=dracula
- REDLIB_DEFAULT_SHOW_NSFW=on
- REDLIB_DEFAULT_BLUR_NSFW=on
- REDLIB_DEFAULT_HIDE_AWARDS=off
- REDLIB_DEFAULT_LAYOUT=card
- REDLIB_DEFAULT_AUTOPLAY_VIDEOS=on
- REDLIB_DEFAULT_HIDE_HLS_NOTIFICATION=off

View File

@@ -0,0 +1,14 @@
# Nginx Repository Mirror
# Port: 8888
# Local APT/package repository mirror
version: '3.8'
services:
nginx:
image: nginxinc/nginx-unprivileged:alpine
container_name: nginx
ports:
- "9661:8080"
volumes:
- /volume1/website:/usr/share/nginx/html:ro
restart: unless-stopped
user: "1026:100"

View File

@@ -0,0 +1,35 @@
# Scrutiny Collector — Atlantis (Synology 1823xs+)
#
# Ships SMART data to the hub on homelab-vm.
# All 8 SATA bays populated + 4 NVMe slots.
# Synology uses /dev/sata* — requires explicit device list in collector.yaml.
# collector.yaml lives at: /volume1/docker/scrutiny-collector/collector.yaml
#
# privileged: true required on DSM (same as gluetun — kernel lacks nf_conntrack_netlink)
#
# Hub: http://100.67.40.126:8090
services:
scrutiny-collector:
image: ghcr.io/analogj/scrutiny:master-collector
container_name: scrutiny-collector
privileged: true
volumes:
- /run/udev:/run/udev:ro
- /volume1/docker/scrutiny-collector/collector.yaml:/opt/scrutiny/config/collector.yaml:ro
devices:
- /dev/sata1
- /dev/sata2
- /dev/sata3
- /dev/sata4
- /dev/sata5
- /dev/sata6
- /dev/sata7
- /dev/sata8
- /dev/nvme0n1
- /dev/nvme1n1
- /dev/nvme2n1
- /dev/nvme3n1
environment:
COLLECTOR_API_ENDPOINT: "http://100.67.40.126:8090"
restart: unless-stopped

View File

@@ -0,0 +1,44 @@
# Stirling PDF - PDF tools
# Port: 8080
# PDF manipulation toolkit
services:
stirling-pdf:
container_name: Stirling-PDF
image: docker.stirlingpdf.com/stirlingtools/stirling-pdf
mem_limit: 4g
cpu_shares: 1024
security_opt:
- no-new-privileges:true
healthcheck:
test: timeout 10s bash -c ':> /dev/tcp/127.0.0.1/8080' || exit 1
interval: 10s
timeout: 5s
retries: 3
start_period: 90s
ports:
- 7890:8080
volumes:
- /volume1/docker/stirling/data:/usr/share/tessdata:rw # Required for extra OCR languages
- /volume1/docker/stirling/config:/configs:rw
- /volume1/docker/stirling/logs:/logs:rw
- /volume1/docker/stirling/customfiles:/customFiles:rw
- /volume1/docker/stirling/pipeline:/pipeline:rw
environment:
PUID: 1026
PGID: 100
DISABLE_ADDITIONAL_FEATURES: false
SECURITY_ENABLE_LOGIN: true #or false
SECURITY_INITIAL_LOGIN_USERNAME: vish
SECURITY_INITIAL_LOGIN_PASSWORD: "REDACTED_PASSWORD" # pragma: allowlist secret
INSTALL_BOOK_AND_ADVANCED_HTML_OPS: false #or true
SECURITY_CSRFDISABLED: true #or false
SYSTEM_DEFAULTLOCALE: en-US # or fr-FR or de-DE
UI_APPNAME: vishPDF
UI_HOMEDESCRIPTION: vishPDF site
UI_APPNAMENAVBAR: vish PDF
SYSTEM_MAXFILESIZE: 5000 # Set the maximum file size in MB
METRICS_ENABLED: true
DISABLE_PIXEL: true
SYSTEM_GOOGLEVISIBILITY: false # or true
restart: on-failure:5

View File

@@ -0,0 +1,44 @@
# Matrix Synapse - Federated chat server
# Port: 8008
# Matrix homeserver for decentralized communication
version: "3.9"
services:
synapse-db:
image: postgres:15
container_name: Synapse-DB
hostname: synapse-db
security_opt:
- no-new-privileges:true
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "synapsedb", "-U", "synapseuser"]
timeout: 45s
interval: 10s
retries: 10
user: 1026:100
volumes:
- /volume2/metadata/docker/synapse/db:/var/lib/postgresql/data
environment:
- POSTGRES_DB=synapsedb
- POSTGRES_USER=synapseuser
- POSTGRES_PASSWORD="REDACTED_PASSWORD" # pragma: allowlist secret
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
restart: unless-stopped
synapse:
image: matrixdotorg/synapse:latest
container_name: Synapse
hostname: synapse
security_opt:
- no-new-privileges:true
user: 1026:100
environment:
- TZ=America/Los_Angeles
- SYNAPSE_CONFIG_PATH=/data/homeserver.yaml
volumes:
- /volume2/metadata/docker/synapse/data:/data
ports:
- 8450:8008/tcp
restart: unless-stopped
depends_on:
synapse-db:
condition: service_started

View File

@@ -0,0 +1,39 @@
# Syncthing - File synchronization
# Port: 8384 (web), 22000 (sync)
# Continuous file synchronization between devices
# Themed with self-hosted theme.park (Dracula)
version: "3.8"
services:
syncthing:
image: ghcr.io/linuxserver/syncthing:latest
container_name: syncthing
restart: on-failure:5
security_opt:
- no-new-privileges:true
healthcheck:
test: curl -f http://localhost:8384/ || exit 1
environment:
- PUID=1026
- PGID=100
- TZ=America/Los_Angeles
- DOCKER_MODS=ghcr.io/themepark-dev/theme.park:syncthing
- TP_SCHEME=http
- TP_DOMAIN=192.168.0.200:8580
- TP_THEME=dracula
volumes:
# This contains config.xml, certs, DB, AND all your real data folders
- /volume2/metadata/docker/syncthing:/config:rw
ports:
- 8384:8384 # Web UI
- 22000:22000/tcp # Sync protocol
- 22000:22000/udp # QUIC
- 21027:21027/udp # Local discovery
networks:
default:
driver: bridge

View File

@@ -0,0 +1,13 @@
sudo -i
for f in /etc.defaults/synoinfo.conf /etc/synoinfo.conf; do
sed -i '/nvme_force_show=/d' "$f"
sed -i '/nvme_disks=/d' "$f"
sed -i '/support_nvme_disk_compatibility=/d' "$f"
sed -i '/support_disk_compatibility=/d' "$f"
echo 'nvme_force_show="yes"' >> "$f"
echo 'nvme_disks="nvme0n1,nvme1n1,nvme2n1,nvme3n1"' >> "$f"
echo 'support_nvme_disk_compatibility="no"' >> "$f"
echo 'support_disk_compatibility="no"' >> "$f"
done

View File

@@ -0,0 +1,22 @@
# Termix - Web terminal
# Port: 3000
# Web-based terminal emulator
version: "3.8"
services:
termix:
image: ghcr.io/lukegus/termix:latest
container_name: Termix
healthcheck:
test: ["CMD-SHELL", "bash -c '</dev/tcp/127.0.0.1/5674' || exit 1"]
interval: 10s
timeout: 5s
retries: 3
start_period: 90s
ports:
- "5674:5674"
volumes:
- /volume2/metadata/docker/termix:/app/data:rw
environment:
PORT: 5674
restart: on-failure:5

View File

@@ -0,0 +1,28 @@
# Theme.Park - Self-hosted CSS themes for various apps
# https://github.com/themepark-dev/theme.park
#
# Self-hosting eliminates external dependency on GitHub/CDN
# All themed apps should set: TP_DOMAIN=atlantis:8580
#
# Themed apps on Atlantis:
# - sonarr, radarr, lidarr, bazarr, prowlarr, tautulli
# - sabnzbd, jackett, whisparr, jellyseerr, deluge
# - plex, portainer, syncthing
version: "3.8"
services:
theme-park:
image: ghcr.io/themepark-dev/theme.park:latest
container_name: theme-park
environment:
- PUID=1029
- PGID=100
- TZ=America/Los_Angeles
ports:
- "8580:80"
- "8543:443"
volumes:
- /volume2/metadata/docker2/theme-park:/config
restart: unless-stopped
security_opt:
- no-new-privileges:true

View File

@@ -0,0 +1,139 @@
# =============================================================================
# UPTIME KUMA - SERVICE MONITORING AND STATUS PAGE
# =============================================================================
#
# SERVICE OVERVIEW:
# - Real-time monitoring of all homelab services
# - Beautiful status page for service availability
# - Alerting via email, Discord, Slack, SMS, and more
# - Docker container monitoring via Docker socket
#
# DISASTER RECOVERY PRIORITY: HIGH
# - Essential for monitoring service health during recovery
# - Provides immediate visibility into what's working/broken
# - Critical for validating recovery procedures
#
# RECOVERY TIME OBJECTIVE (RTO): 15 minutes
# RECOVERY POINT OBJECTIVE (RPO): 1 hour (monitoring history)
#
# DEPENDENCIES:
# - Volume1 for configuration storage
# - Docker socket access for container monitoring
# - Network connectivity to all monitored services
# - SMTP access for email notifications
#
# MONITORING TARGETS:
# - All critical homelab services (Plex, Vaultwarden, etc.)
# - Network infrastructure (router, switches)
# - Internet connectivity and speed
# - SSL certificate expiration
# - Disk space and system resources
#
# =============================================================================
version: '3.3'
services:
uptime-kuma:
# CONTAINER IMAGE:
# - louislam/uptime-kuma: Official Uptime Kuma image
# - Lightweight Node.js application with SQLite database
# - Regular updates with new monitoring features
image: louislam/uptime-kuma
# CONTAINER IDENTIFICATION:
# - uptime_kuma: Clear identification for logs and management
# - Used in monitoring dashboards and backup scripts
container_name: uptime_kuma
# NETWORK CONFIGURATION:
# - 3444:3001: External port 3444 maps to internal port 3001
# - Port 3444: Accessible via reverse proxy or direct access
# - Port 3001: Standard Uptime Kuma web interface port
# - Accessible at: http://atlantis.vish.local:3444
ports:
- '3444:3001'
environment:
# USER/GROUP PERMISSIONS:
# - PUID=1026: User ID for file ownership (Synology user)
# - PGID=100: Group ID for file access (Synology group)
# - CRITICAL: Must match NAS permissions for data access
- PUID=1026
- PGID=100
# TIMEZONE CONFIGURATION:
# - TZ: Timezone for monitoring timestamps and scheduling
# - Must match system timezone for accurate alerting
# - Used for maintenance windows and notification timing
- TZ=America/Los_Angeles
volumes:
# CONFIGURATION AND DATABASE:
# - /volume1/docker/uptimekuma:/app/data
# - Contains: SQLite database, configuration, notification settings
# - BACKUP CRITICAL: Contains all monitoring history and settings
# - Size: ~100MB-1GB depending on monitoring history
- '/volume1/docker/uptimekuma:/app/data'
# DOCKER SOCKET ACCESS:
# - /var/run/docker.sock:/var/run/docker.sock
# - Enables monitoring of Docker containers directly
# - Allows automatic discovery of running services
# - SECURITY NOTE: Provides full Docker API access
- '/var/run/docker.sock:/var/run/docker.sock'
# RESTART POLICY:
# - always: Container restarts automatically on failure or reboot
# - CRITICAL: Monitoring must be always available
# - Essential for detecting and alerting on service failures
restart: unless-stopped
# =============================================================================
# DISASTER RECOVERY PROCEDURES - UPTIME KUMA
# =============================================================================
#
# BACKUP COMMANDS:
# # Configuration backup:
# tar -czf /volume2/backups/uptimekuma-$(date +%Y%m%d).tar.gz /volume1/docker/uptimekuma/
#
# # Database backup (SQLite):
# docker exec uptime_kuma sqlite3 /app/data/kuma.db ".backup /app/data/kuma-backup-$(date +%Y%m%d).db"
#
# RESTORE PROCEDURE:
# 1. Stop container: docker-compose -f uptimekuma.yml down
# 2. Restore data: tar -xzf uptimekuma-backup.tar.gz -C /volume1/docker/
# 3. Fix permissions: chown -R 1026:100 /volume1/docker/uptimekuma/
# 4. Start container: docker-compose -f uptimekuma.yml up -d
# 5. Verify: Access http://atlantis.vish.local:3444
#
# MONITORING SETUP (Post-Recovery):
# 1. Add critical services:
# - Vaultwarden: https://pw.vish.gg
# - Plex: http://atlantis.vish.local:32400
# - Grafana: http://atlantis.vish.local:7099
# - Router: http://192.168.1.1
#
# 2. Configure notifications:
# - Email: SMTP settings for alerts
# - Discord/Slack: Webhook URLs
# - SMS: Twilio or similar service
#
# 3. Set up status page:
# - Public status page for family/friends
# - Custom domain if desired
# - Maintenance windows for planned outages
#
# TROUBLESHOOTING:
# - Database corruption: Restore from backup or recreate monitors
# - Permission errors: Check PUID/PGID match NAS user/group
# - Docker socket issues: Verify Docker daemon is running
# - Network connectivity: Check firewall and network configuration
#
# HEALTH CHECKS:
# - Service check: curl -f http://localhost:3444/api/status-page/heartbeat
# - Database check: docker exec uptime_kuma ls -la /app/data/
# - Logs: docker logs uptime_kuma
# - Performance: Monitor CPU/memory usage in Grafana
#
# =============================================================================

View File

@@ -0,0 +1,258 @@
# =============================================================================
# VAULTWARDEN PASSWORD MANAGER - CRITICAL SECURITY SERVICE
# =============================================================================
#
# SERVICE OVERVIEW:
# - Self-hosted Bitwarden-compatible password manager
# - CRITICAL: Contains ALL homelab passwords and secrets
# - Two-container setup: PostgreSQL database + Vaultwarden server
# - Accessible via https://pw.vish.gg (external domain)
#
# DISASTER RECOVERY PRIORITY: MAXIMUM CRITICAL
# - This service contains passwords for ALL other services
# - Loss of this data = loss of access to entire homelab
# - BACKUP FREQUENCY: Multiple times daily
# - BACKUP LOCATIONS: Local + offsite + encrypted cloud
#
# RECOVERY TIME OBJECTIVE (RTO): 15 minutes (CRITICAL)
# RECOVERY POINT OBJECTIVE (RPO): 1 hour (MAXIMUM)
#
# SECURITY CONSIDERATIONS:
# - Admin token required for configuration changes
# - SMTP configured for password reset emails
# - Database encrypted at rest
# - All communications over HTTPS only
#
# DEPENDENCIES:
# - Volume2 for data storage (separate from Volume1 for redundancy)
# - External domain (pw.vish.gg) for remote access
# - SMTP access for email notifications
# - Reverse proxy for HTTPS termination
#
# =============================================================================
version: "3.9"
services:
# ==========================================================================
# POSTGRESQL DATABASE - Password Vault Storage
# ==========================================================================
db:
# DATABASE IMAGE:
# - postgres:16-bookworm: Latest stable PostgreSQL with Debian base
# - Version 16: Latest major version with improved performance
# - bookworm: Debian 12 base for security and stability
image: postgres:16-bookworm
# CONTAINER IDENTIFICATION:
# - Vaultwarden-DB: Clear identification for monitoring/logs
# - vaultwarden-db: Internal hostname for service communication
container_name: Vaultwarden-DB
hostname: vaultwarden-db
# RESOURCE LIMITS:
# - mem_limit: 512MB maximum memory (sufficient for password database)
# - cpu_shares: 768 (medium priority, less than Vaultwarden app)
# - Prevents database from consuming excessive resources
mem_limit: 512m
cpu_shares: 768
# SECURITY CONFIGURATION:
# - no-new-privileges: Prevents privilege escalation attacks
# - user: 1026:100 (Synology user/group for file permissions)
# - CRITICAL: Must match NAS permissions for data access
security_opt:
- no-new-privileges:true
user: 1026:100
# HEALTH MONITORING:
# - pg_isready: PostgreSQL built-in health check command
# - Checks database connectivity and readiness
# - timeout: 45s (generous timeout for startup)
# - interval: 10s (frequent checks for quick failure detection)
# - retries: 10 (allows for slow startup during high load)
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "vaultwarden", "-U", "vaultwardenuser"]
timeout: 45s
interval: 10s
retries: 10
# DATA PERSISTENCE:
# - /volume2/metadata/docker/vaultwarden/db: Database storage location
# - CRITICAL: Volume2 used for redundancy (separate from Volume1)
# - Contains ALL password vault data
# - BACKUP CRITICAL: This directory contains encrypted password database
volumes:
- /volume2/metadata/docker/vaultwarden/db:/var/lib/postgresql/data:rw
# DATABASE CONFIGURATION:
# - POSTGRES_DB: Database name for Vaultwarden
# - POSTGRES_USER: Database user (matches DATABASE_URL in Vaultwarden)
# - POSTGRES_PASSWORD: "REDACTED_PASSWORD" password (SECURITY: Change in production)
# - NOTE: These credentials are for database access, not vault access
environment:
POSTGRES_DB: vaultwarden
POSTGRES_USER: vaultwardenuser
POSTGRES_PASSWORD: "REDACTED_PASSWORD" # pragma: allowlist secret
# RESTART POLICY:
# - on-failure:5: Restart up to 5 times on failure
# - Prevents infinite restart loops while ensuring availability
# - Database failures are typically resolved by restart
restart: on-failure:5
# ==========================================================================
# VAULTWARDEN SERVER - Password Manager Application
# ==========================================================================
vaultwarden:
# APPLICATION IMAGE:
# - vaultwarden/server: Official Vaultwarden image
# - Rust-based, lightweight Bitwarden server implementation
# - latest: Auto-updates (consider pinning for production)
image: vaultwarden/server:testing
# CONTAINER IDENTIFICATION:
# - Vaultwarden: Main application container
# - vaultwarden: Internal hostname for service communication
container_name: Vaultwarden
hostname: vaultwarden
# RESOURCE ALLOCATION:
# - mem_limit: 256MB maximum (Rust is memory-efficient)
# - mem_reservation: 96MB guaranteed memory
# - cpu_shares: 1024 (high priority - critical service)
mem_limit: 256m
mem_reservation: 96m
cpu_shares: 1024
# SECURITY HARDENING:
# - no-new-privileges: Prevents privilege escalation
# - user: 1026:100 (Synology permissions for data access)
security_opt:
- no-new-privileges:true
user: 1026:100
# NETWORK CONFIGURATION:
# - 4080:4020: External port 4080 maps to internal port 4020
# - Port 4080: Accessible via reverse proxy for HTTPS
# - Port 4020: Internal Rocket web server port
ports:
- 4080:4020
# DATA PERSISTENCE:
# - /volume2/metadata/docker/vaultwarden/data: Application data
# - Contains: Vault data, attachments, icons, logs
# - BACKUP CRITICAL: Contains encrypted user vaults
# - Separate from database for additional redundancy
volumes:
- /volume2/metadata/docker/vaultwarden/data:/data:rw
environment:
# WEB SERVER CONFIGURATION:
# - ROCKET_PORT: Internal web server port (matches container port)
# - Must match the internal port in ports mapping
ROCKET_PORT: 4020
# DATABASE CONNECTION:
# - DATABASE_URL: PostgreSQL connection string
# - Format: postgresql://user:REDACTED_PASSWORD@host:port/database
# - Connects to 'db' service via Docker networking
DATABASE_URL: postgresql://vaultwardenuser:REDACTED_PASSWORD@vaultwarden-db:5432/vaultwarden # pragma: allowlist secret
# ADMIN INTERFACE SECURITY:
# - ADMIN_TOKEN: Argon2 hashed admin password
# - Required for admin panel access (/admin)
# - SECURITY: Generated with strong password and Argon2 hashing
# - DISABLE_ADMIN_TOKEN: false (admin panel enabled)
# - CRITICAL: Change this token in production
ADMIN_TOKEN: $$argon2id$$v=19$$m=65540,t=3,p=4$$azFxdU5ubEJvaDN6VkRSTENkbElYOFVWd1dmaDU3K0ZTNnI4ME45WHI3Yz0$$XdCzw6jqk8PY8vGEdd+LNhrpyUHbucTv2AIzZMzN4aQ # pragma: allowlist secret
DISABLE_ADMIN_TOKEN: false
# EXTERNAL ACCESS CONFIGURATION:
# - DOMAIN: External domain for Vaultwarden access
# - Used for: Email links, HTTPS redirects, CORS headers
# - CRITICAL: Must match reverse proxy configuration
DOMAIN: https://pw.vish.gg
# EMAIL CONFIGURATION (Password Reset & Notifications):
# - SMTP_HOST: Gmail SMTP server for email delivery
# - SMTP_FROM: Sender email address for notifications
# - SMTP_PORT: 587 (STARTTLS port for Gmail)
# - SMTP_SECURITY: starttls (encrypted email transmission)
# - SMTP_USERNAME: Gmail account for sending emails
# - SMTP_PASSWORD: "REDACTED_PASSWORD" # pragma: allowlist secret
# - SECURITY: Use app-specific password, not account password
SMTP_HOST: smtp.gmail.com
SMTP_FROM: your-email@example.com
SMTP_PORT: 587
SMTP_SECURITY: starttls
SMTP_USERNAME: your-email@example.com
SMTP_PASSWORD: "REDACTED_PASSWORD" # pragma: allowlist secret
# SSO CONFIGURATION (Authentik OIDC):
SSO_ENABLED: true
SSO_ONLY: false
SSO_AUTHORITY: https://sso.vish.gg/application/o/vaultwarden/
SSO_CLIENT_ID: vaultwarden
SSO_CLIENT_SECRET: "REDACTED_CLIENT_SECRET" # pragma: allowlist secret
SSO_ALLOW_UNKNOWN_EMAIL_VERIFICATION: true
SSO_SIGNUPS_MATCH_EMAIL: true
# RESTART POLICY:
# - on-failure:5: Restart up to 5 times on failure
# - Critical service must be highly available
# - Prevents infinite restart loops
restart: on-failure:5
# SERVICE DEPENDENCIES:
# - depends_on: Ensures database starts before Vaultwarden
# - condition: service_started (waits for container start, not readiness)
# - Database must be available for Vaultwarden to function
depends_on:
db:
condition: service_started
# =============================================================================
# DISASTER RECOVERY PROCEDURES - VAULTWARDEN
# =============================================================================
#
# CRITICAL BACKUP COMMANDS:
# # Database backup (encrypted vault data):
# docker exec Vaultwarden-DB pg_dump -U vaultwardenuser vaultwarden > /volume2/backups/vaultwarden-db-$(date +%Y%m%d-%H%M).sql
#
# # Application data backup:
# tar -czf /volume2/backups/vaultwarden-data-$(date +%Y%m%d-%H%M).tar.gz /volume2/metadata/docker/vaultwarden/data/
#
# # Complete backup (database + data):
# docker-compose exec db pg_dump -U vaultwardenuser vaultwarden | gzip > /volume2/backups/vaultwarden-complete-$(date +%Y%m%d-%H%M).sql.gz
#
# EMERGENCY RESTORE PROCEDURE:
# 1. Stop services: docker-compose down
# 2. Restore database:
# docker-compose up -d db
# docker exec -i Vaultwarden-DB psql -U vaultwardenuser vaultwarden < backup.sql
# 3. Restore data: tar -xzf vaultwarden-data-backup.tar.gz -C /volume2/metadata/docker/vaultwarden/
# 4. Fix permissions: chown -R 1026:100 /volume2/metadata/docker/vaultwarden/
# 5. Start services: docker-compose up -d
# 6. Verify: Access https://pw.vish.gg and test login
#
# OFFLINE PASSWORD ACCESS:
# - Export vault data before disasters
# - Keep encrypted backup of critical passwords
# - Store master password in secure physical location
# - Consider KeePass backup for offline access
#
# MONITORING & HEALTH CHECKS:
# - Health check: curl -f http://localhost:4080/alive
# - Database check: docker exec Vaultwarden-DB pg_isready
# - Admin panel: https://pw.vish.gg/admin (requires admin token)
# - Logs: docker logs Vaultwarden && docker logs Vaultwarden-DB
#
# SECURITY INCIDENT RESPONSE:
# 1. Immediately change admin token
# 2. Force logout all users via admin panel
# 3. Review access logs for suspicious activity
# 4. Update all critical passwords stored in vault
# 5. Enable 2FA for all accounts if not already enabled
#
# =============================================================================

View File

@@ -0,0 +1,148 @@
# =============================================================================
# WATCHTOWER - AUTOMATED DOCKER CONTAINER UPDATES
# =============================================================================
#
# SERVICE OVERVIEW:
# - Automatically updates Docker containers to latest versions
# - Monitors Docker Hub for image updates every 2 hours
# - Gracefully restarts containers with new images
# - Cleans up old images to save disk space
# - Provides metrics for Prometheus monitoring
#
# DISASTER RECOVERY PRIORITY: MEDIUM
# - Helpful for maintaining updated containers
# - Not critical for immediate disaster recovery
# - Can be disabled during recovery operations
#
# RECOVERY TIME OBJECTIVE (RTO): 1 hour
# RECOVERY POINT OBJECTIVE (RPO): N/A (stateless service)
#
# DEPENDENCIES:
# - Docker socket access (read-only)
# - Network connectivity to Docker Hub
# - Prometheus network for metrics
# - Sufficient disk space for image downloads
#
# SECURITY CONSIDERATIONS:
# - Read-only Docker socket access
# - No new privileges security option
# - Read-only container filesystem
# - Automatic cleanup of old images
#
# =============================================================================
services:
watchtower:
# CONTAINER IMAGE:
# - containrrr/watchtower:latest: Official Watchtower image
# - Community-maintained Docker container updater
# - Regular updates with new features and security patches
image: containrrr/watchtower:latest
# CONTAINER IDENTIFICATION:
# - WATCHTOWER: Clear identification for logs and monitoring
# - watchtower: Internal hostname for service communication
container_name: WATCHTOWER
hostname: watchtower
# PORT CONFIGURATION:
# - 8082:8080: HTTP API for metrics (8082 to avoid conflicts)
# - Allows Prometheus to scrape metrics endpoint
ports:
- "8082:8080"
# NETWORK CONFIGURATION:
# - prometheus-net: Connected to monitoring network
# - Allows Prometheus to scrape metrics
# - Isolated from other services for security
networks:
- prometheus-net
# RESOURCE ALLOCATION:
# - mem_limit: 128MB maximum (lightweight service)
# - mem_reservation: 50MB guaranteed memory
# - cpu_shares: 256 (low priority, background task)
mem_limit: 128m
mem_reservation: 50m
cpu_shares: 256
# SECURITY CONFIGURATION:
# - no-new-privileges: Prevents privilege escalation
# - read_only: Container filesystem is read-only
# - Minimal attack surface for automated service
security_opt:
- no-new-privileges=true
read_only: true
# DOCKER SOCKET ACCESS:
# - /var/run/docker.sock: Read-only access to Docker daemon
# - Required for monitoring and updating containers
# - SECURITY: Read-only prevents malicious container creation
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
# TIMEZONE CONFIGURATION:
# - TZ: Timezone for scheduling and logging
# - Must match system timezone for accurate scheduling
TZ: America/Los_Angeles
# IMAGE CLEANUP CONFIGURATION:
# - WATCHTOWER_CLEANUP: true - Remove old images after updating
# - Prevents disk space issues from accumulated old images
# - CRITICAL: Saves significant disk space over time
WATCHTOWER_CLEANUP: true # Remove old images after updating
# VOLUME HANDLING:
# - WATCHTOWER_REMOVE_VOLUMES: false - Preserve data volumes
# - CRITICAL: Prevents data loss during container updates
# - Volumes contain persistent application data
WATCHTOWER_REMOVE_VOLUMES: false # Remove attached volumes after updating
# DOCKER API CONFIGURATION:
# - DOCKER_API_VERSION: 1.43 - Docker API version compatibility
# - Must match or be compatible with Docker daemon version
DOCKER_API_VERSION: 1.43 # Synology DSM max supported API version
# UPDATE BEHAVIOR:
# - WATCHTOWER_INCLUDE_RESTARTING: true - Update restarting containers
# - WATCHTOWER_INCLUDE_STOPPED: false - Skip stopped containers
# - Ensures only active services are automatically updated
WATCHTOWER_INCLUDE_RESTARTING: true # Restart containers after update
WATCHTOWER_INCLUDE_STOPPED: false # Update stopped containers
# SCHEDULING CONFIGURATION:
# - WATCHTOWER_SCHEDULE: "0 0 */2 * * *" - Every 2 hours
# - Cron format: second minute hour day month weekday
# - Frequent enough for security updates, not too disruptive
WATCHTOWER_SCHEDULE: "0 0 */2 * * *" # Update & Scan containers every 2 hours
# LABEL-BASED FILTERING:
# - WATCHTOWER_LABEL_ENABLE: false - Update all containers
# - Alternative: true (only update containers with watchtower labels)
WATCHTOWER_LABEL_ENABLE: false
# RESTART BEHAVIOR:
# - WATCHTOWER_ROLLING_RESTART: true - Restart containers one by one
# - Minimizes service disruption during updates
# - WATCHTOWER_TIMEOUT: 30s - Wait time for graceful shutdown
WATCHTOWER_ROLLING_RESTART: false # Disabled due to dependent containers
WATCHTOWER_TIMEOUT: 30s
# MONITORING INTEGRATION:
# - WATCHTOWER_HTTP_API_METRICS: true - Enable Prometheus metrics
# - WATCHTOWER_HTTP_API_TOKEN: "REDACTED_HTTP_TOKEN" token for metrics endpoint
# - Allows monitoring of update frequency and success rates
# - HTTP_API_UPDATE disabled to allow scheduled runs
WATCHTOWER_HTTP_API_METRICS: true # Metrics for Prometheus
WATCHTOWER_HTTP_API_TOKEN: "REDACTED_HTTP_TOKEN" # Token for Prometheus
# RESTART POLICY:
# - on-failure:5: Restart up to 5 times on failure
# - Ensures automatic updates continue even after failures
# - Prevents infinite restart loops
restart: on-failure:5
networks:
prometheus-net:
external: true

View File

@@ -0,0 +1,25 @@
# WireGuard - VPN server
# Port: 51820/udp
# Modern, fast VPN tunnel
services:
wgeasy:
image: ghcr.io/wg-easy/wg-easy
network_mode: "bridge"
container_name: wgeasy
ports:
- "51820:51820/udp"
- "51821:51821"
cap_add:
- NET_ADMIN
- SYS_MODULE
sysctls:
- net.ipv4.conf.all.src_valid_mark=1
- net.ipv4.ip_forward=1
volumes:
- /volume2/metadata/docker/wg-easy:/etc/wireguard
environment:
- WG_HOST=vishinator.synology.me
- HASH_PASSWORD="REDACTED_PASSWORD" # pragma: allowlist secret
restart: unless-stopped

View File

@@ -0,0 +1,40 @@
# MeTube - YouTube downloader
# Port: 8081
# Web GUI for youtube-dl/yt-dlp
version: "3.8"
services:
youtube_downloader:
container_name: youtube_downloader
image: tzahi12345/youtubedl-material:nightly
mem_limit: 6g
cpu_shares: 768
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:17442/"]
interval: 30s
timeout: 5s
retries: 3
security_opt:
- no-new-privileges:true
restart: on-failure:5
environment:
- PUID=1029
- PGID=100
- ytdl_default_downloader=yt-dlp
- ytdl_use_local_db=true
- ytdl_port=17442
- write_ytdl_config=true
ports:
- "8084:17442"
volumes:
- /volume2/metadata/docker/youtubedl/appdata:/app/appdata:rw
- /volume2/metadata/docker/youtubedl/audio:/app/audio:rw
- /volume2/metadata/docker/youtubedl/subscriptions:/app/subscriptions:rw
- /volume2/metadata/docker/youtubedl/users:/app/users:rw
- /volume2/metadata/docker/youtubedl/video:/app/video:rw

View File

@@ -0,0 +1,38 @@
# Zot — OCI pull-through registry cache
# =============================================================================
# Single-instance pull-through cache for Docker Hub, lscr.io, ghcr.io, quay.io
#
# How it works:
# - Each Docker host points its registry-mirror at http://100.83.230.112:5000
# - On first pull, Zot fetches from upstream and caches locally
# - Subsequent pulls on any host are served from local cache instantly
# - No credentials required for public images
#
# Storage: /volume2/metadata/docker2/zot/ (NVMe RAID1 — fast, ~10-20GB steady state)
#
# Web UI: http://100.83.230.112:5050 (browse cached images)
# Metrics: http://100.83.230.112:5050/metrics (Prometheus)
#
# Per-host mirror config (one-time, manual):
# Atlantis/Calypso: Container Manager → Registry → Settings → Mirror
# Other Linux hosts: /etc/docker/daemon.json → "registry-mirrors": ["http://100.83.230.112:5000"]
#
# To add credentials (Docker Hub authenticated pulls, ghcr.io):
# Drop /volume2/metadata/docker2/zot/credentials.json on Atlantis
# See docs/services/individual/zot.md for format
# =============================================================================
services:
zot:
image: ghcr.io/project-zot/zot-linux-amd64:latest
container_name: zot
restart: unless-stopped
ports:
- "5050:5000"
volumes:
- /volume2/metadata/docker2/zot/data:/var/lib/registry
- /volume2/metadata/docker2/zot/config.json:/etc/zot/config.json:ro
# credentials.json is optional — drop it on Atlantis to enable authenticated pulls
# - /volume2/metadata/docker2/zot/credentials.json:/etc/zot/credentials.json:ro
labels:
- com.centurylinklabs.watchtower.enable=true

View File

@@ -0,0 +1,84 @@
{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "/var/lib/registry",
"gc": true,
"gcDelay": "1h",
"gcInterval": "24h",
"dedupe": true
},
"http": {
"address": "0.0.0.0",
"port": "5000"
},
"log": {
"level": "info"
},
"extensions": {
"sync": {
"enable": true,
"registries": [
{
"urls": ["https://registry-1.docker.io"],
"onDemand": true,
"tlsVerify": true,
"maxRetries": 3,
"retryDelay": "5m",
"content": [
{
"prefix": "**"
}
]
},
{
"urls": ["https://lscr.io"],
"onDemand": true,
"tlsVerify": true,
"maxRetries": 3,
"retryDelay": "5m",
"content": [
{
"prefix": "**"
}
]
},
{
"urls": ["https://ghcr.io"],
"onDemand": true,
"tlsVerify": true,
"maxRetries": 3,
"retryDelay": "5m",
"content": [
{
"prefix": "**"
}
]
},
{
"urls": ["https://quay.io"],
"onDemand": true,
"tlsVerify": true,
"maxRetries": 3,
"retryDelay": "5m",
"content": [
{
"prefix": "**"
}
]
}
]
},
"ui": {
"enable": true
},
"metrics": {
"enable": true,
"prometheus": {
"path": "/metrics"
}
},
"search": {
"enable": true
}
}
}