Sanitized mirror from private repository - 2026-03-29 13:33:25 UTC
Some checks failed
Documentation / Build Docusaurus (push) Failing after 5m0s
Documentation / Deploy to GitHub Pages (push) Has been skipped

This commit is contained in:
Gitea Mirror Bot
2026-03-29 13:33:25 +00:00
commit 75d4f4e02b
1280 changed files with 331190 additions and 0 deletions

View File

View File

@@ -0,0 +1,22 @@
# docker-compose run archivebox init --setup
# docker-compose up
# echo "https://example.com" | docker-compose run archivebox archivebox add
# docker-compose run archivebox add --depth=1 https://example.com/some/feed.rss
# docker-compose run archivebox config --set PUBLIC_INDEX=True
# docker-compose run archivebox help
# Documentation:
# https://github.com/ArchiveBox/ArchiveBox/wiki/Docker#docker-compose
version: '2.4'
services:
archivebox:
image: archivebox/archivebox:master
command: server --quick-init 0.0.0.0:8000
ports:
- 8000:8000
environment:
- ALLOWED_HOSTS=*
- MEDIA_MAX_SIZE=750m
volumes:
- ./data:/data

View File

@@ -0,0 +1,17 @@
# ChatGPT Web - AI chat
# Port: 3000
# ChatGPT web interface
version: '3.9'
services:
deiucanta:
image: 'ghcr.io/deiucanta/chatpad:latest'
restart: unless-stopped
ports:
- '5690:80'
container_name: Chatpad-AI
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:80/health"]
interval: 30s
timeout: 10s
retries: 3

View File

@@ -0,0 +1,30 @@
# Conduit - Matrix server
# Port: 6167
# Lightweight Matrix homeserver
version: "3.9"
services:
matrix-conduit:
image: matrixconduit/matrix-conduit:latest
container_name: Matrix-Conduit
hostname: matrix-conduit
security_opt:
- no-new-privileges:true
user: 1000:1000
ports:
- "8455:6167"
volumes:
- "/volume1/docker/matrix-conduit:/var/lib/matrix-conduit/"
environment:
- CONDUIT_SERVER_NAME=vishtestingserver
- CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit/
- CONDUIT_DATABASE_BACKEND=rocksdb
- CONDUIT_PORT=6167
- CONDUIT_MAX_REQUEST_SIZE=20000000
- CONDUIT_ALLOW_REGISTRATION=true
- CONDUIT_ALLOW_FEDERATION=true
- CONDUIT_TRUSTED_SERVERS=["matrix.org"]
- CONDUIT_MAX_CONCURRENT_REQUESTS=250
- CONDUIT_ADDRESS=0.0.0.0
- CONDUIT_CONFIG=''
restart: unless-stopped

View File

@@ -0,0 +1,9 @@
version: '3.9'
services:
drawio:
image: jgraph/drawio
restart: unless-stopped
ports:
- '8443:8443'
- '5022:8080'
container_name: drawio

View File

@@ -0,0 +1,15 @@
# Element Web - Matrix client
# Port: 80
# Matrix chat web client
version: '3'
services:
element-web:
image: vectorim/element-web:latest
container_name: element-web
restart: unless-stopped
volumes:
- /home/vish/docker/elementweb/config.json:/app/config.json
ports:
- 9000:80

View File

@@ -0,0 +1,88 @@
# PhotoPrism - Photo management
# Port: 2342
# AI-powered photo management
version: "3.9"
services:
db:
image: mariadb:jammy
container_name: PhotoPrism-DB
hostname: photoprism-db
mem_limit: 1g
cpu_shares: 768
security_opt:
- no-new-privileges:true
- seccomp:unconfined
- apparmor:unconfined
user: 1000:1000
healthcheck:
test: ["CMD-SHELL", "mysqladmin ping -u root -p$$MYSQL_ROOT_PASSWORD | grep 'mysqld is alive' || exit 1"]
volumes:
- /home/vish/docker/photoprism/db:/var/lib/mysql:rw
environment:
TZ: America/Los_Angeles
MYSQL_ROOT_PASSWORD: "REDACTED_PASSWORD"
MYSQL_DATABASE: photoprism
MYSQL_USER: photoprism-user
MYSQL_PASSWORD: "REDACTED_PASSWORD"
restart: on-failure:5
photoprism:
image: photoprism/photoprism:latest
container_name: PhotoPrism
hostname: photoprism
mem_limit: 6g
cpu_shares: 1024
security_opt:
- no-new-privileges:true
- seccomp:unconfined
- apparmor:unconfined
user: 1000:1009
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:2342
ports:
- 2342:2342
volumes:
- /home/vish/docker/photoprism/import:/photoprism/import:rw # *Optional* base folder from which files can be imported to originals
- /home/vish/docker/photoprism/storage:/photoprism/storage:rw
- /home/vish/docker/photoprism/originals:/photoprism/originals:rw
# - /volume1/docker/photoprism/family:/photoprism/originals/family:rw # *Additional* media folders can be mounted like this
environment:
PHOTOPRISM_ADMIN_USER: vish
PHOTOPRISM_ADMIN_PASSWORD: "REDACTED_PASSWORD"
PHOTOPRISM_UID: 1000
PHOTOPRISM_GID: 1000
PHOTOPRISM_AUTH_MODE: password
PHOTOPRISM_SITE_URL: http://localhost:2342/
PHOTOPRISM_ORIGINALS_LIMIT: 5120
PHOTOPRISM_HTTP_COMPRESSION: gzip
PHOTOPRISM_READONLY: false
PHOTOPRISM_EXPERIMENTAL: false
PHOTOPRISM_DISABLE_CHOWN: false
PHOTOPRISM_DISABLE_WEBDAV: false
PHOTOPRISM_DISABLE_SETTINGS: false
PHOTOPRISM_DISABLE_TENSORFLOW: false
PHOTOPRISM_DISABLE_FACES: false
PHOTOPRISM_DISABLE_CLASSIFICATION: false
PHOTOPRISM_DISABLE_RAW: false
PHOTOPRISM_RAW_PRESETS: false
PHOTOPRISM_JPEG_QUALITY: 100
PHOTOPRISM_DETECT_NSFW: false
PHOTOPRISM_UPLOAD_NSFW: true
PHOTOPRISM_SPONSOR: true
PHOTOPRISM_DATABASE_DRIVER: mysql
PHOTOPRISM_DATABASE_SERVER: photoprism-db:3306
PHOTOPRISM_DATABASE_NAME: photoprism
PHOTOPRISM_DATABASE_USER: photoprism-user
PHOTOPRISM_DATABASE_PASSWORD: "REDACTED_PASSWORD"
PHOTOPRISM_WORKERS: 2
PHOTOPRISM_THUMB_FILTER: blackman # best to worst: blackman, lanczos, cubic, linear
PHOTOPRISM_APP_MODE: standalone # progressive web app MODE - fullscreen, standalone, minimal-ui, browser
# PHOTOPRISM_SITE_CAPTION: "AI-Powered Photos App"
# PHOTOPRISM_SITE_DESCRIPTION: ""
# PHOTOPRISM_SITE_AUTHOR: ""
working_dir: "/photoprism"
restart: on-failure:5
depends_on:
db:
condition: service_started

View File

@@ -0,0 +1,24 @@
# Pi.Alert - Network scanner
# Port: 20211
# Network device monitoring
version: "3.9"
services:
pi.alert:
container_name: Pi.Alert
healthcheck:
test: curl -f http://localhost:17811/ || exit 1
mem_limit: 2g
cpu_shares: 768
security_opt:
- no-new-privileges:true
volumes:
- /home/vish/docker/pialert/config:/home/pi/pialert/config:rw
- /home/vish/docker/pialert/db:/home/pi/pialert/db:rw
- /home/vish/docker/pialert/logs:/home/pi/pialert/front/log:rw
environment:
TZ: America/Los_Angeles
PORT: 17811
network_mode: host
restart: on-failure:5
image: jokobsk/pi.alert:latest

View File

@@ -0,0 +1,65 @@
# ProxiTok - TikTok frontend
# Port: 8080
# Privacy-respecting TikTok viewer
version: "3.9"
services:
redis:
image: redis
command: redis-server --save 60 1 --loglevel warning
container_name: ProxiTok-REDIS
hostname: proxitok-redis
mem_limit: 256m
cpu_shares: 768
security_opt:
- no-new-privileges:true
read_only: true
user: 1000:1000
healthcheck:
test: ["CMD-SHELL", "redis-cli ping || exit 1"]
restart: on-failure:5
signer:
image: ghcr.io/pablouser1/signtok:master
container_name: ProxiTok-SIGNER
hostname: proxitok-signer
mem_limit: 512m
cpu_shares: 768
security_opt:
- no-new-privileges:true
read_only: true
user: 1000:1000
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:8080/ || exit 1
restart: on-failure:5
proxitok:
image: ghcr.io/pablouser1/proxitok:master
container_name: ProxiTok
hostname: proxitok
mem_limit: 1g
cpu_shares: 768
security_opt:
- no-new-privileges:true
healthcheck:
test: stat /etc/passwd || exit 1
ports:
- 9770:80
volumes:
- proxitok-cache:/cache
environment:
LATTE_CACHE: /cache
API_CACHE: redis
REDIS_HOST: proxitok-redis
REDIS_PORT: 6379
API_SIGNER: remote
API_SIGNER_URL: http://proxitok-signer:8080/signature
restart: on-failure:5
depends_on:
redis:
condition: service_healthy
signer:
condition: service_healthy
volumes:
proxitok-cache:

View File

@@ -0,0 +1,145 @@
# Concord NUC
**Hostname**: concord-nuc / vish-concord-nuc
**IP Address**: 192.168.68.100 (static, eno1)
**Tailscale IP**: 100.72.55.21
**OS**: Ubuntu (cloud-init based)
**SSH**: `ssh vish-concord-nuc` (via Tailscale — see `~/.ssh/config`)
---
## Network Configuration
### Static IP Setup
`eno1` is configured with a **static IP** (`192.168.68.100/22`) via netplan. This is required because AdGuard Home binds its DNS listener to a specific IP, and DHCP lease changes would cause it to crash.
**Netplan config**: `/etc/netplan/50-cloud-init.yaml`
```yaml
network:
ethernets:
eno1:
dhcp4: false
addresses:
- 192.168.68.100/22
routes:
- to: default
via: 192.168.68.1
nameservers:
addresses:
- 9.9.9.9
- 1.1.1.1
version: 2
wifis:
wlp1s0:
access-points:
This_Wifi_Sucks:
password: "REDACTED_PASSWORD"
dhcp4: true
```
**Cloud-init is disabled** from managing network config:
`/etc/cloud/cloud.cfg.d/99-disable-network-config.cfg` — prevents reboots from reverting to DHCP.
> **Warning**: If you ever re-enable cloud-init networking or wipe this file, eno1 will revert to DHCP and AdGuard will start crash-looping on the next restart. See the Troubleshooting section below.
---
## Services
| Service | Port | URL |
|---------|------|-----|
| AdGuard Home (Web UI) | 9080 | http://192.168.68.100:9080 |
| AdGuard Home (DNS) | 53 | 192.168.68.100:53, 100.72.55.21:53 |
| Home Assistant | - | see homeassistant.yaml |
| Plex | - | see plex.yaml |
| Syncthing | - | see syncthing.yaml |
| Invidious | 3000 | https://in.vish.gg (public), http://192.168.68.100:3000 |
| Materialious | 3001 | http://192.168.68.100:3001 |
| YourSpotify | 4000, 15000 | see yourspotify.yaml |
---
## Deployed Stacks
| Compose File | Service | Notes |
|-------------|---------|-------|
| `adguard.yaml` | AdGuard Home | DNS ad blocker, binds to 192.168.68.100 |
| `homeassistant.yaml` | Home Assistant | Home automation |
| `plex.yaml` | Plex | Media server |
| `syncthing.yaml` | Syncthing | File sync |
| `wireguard.yaml` | WireGuard / wg-easy | VPN |
| `dyndns_updater.yaml` | DynDNS | Dynamic DNS |
| `node-exporter.yaml` | Node Exporter | Prometheus metrics |
| `piped.yaml` | Piped | YouTube alternative frontend |
| `yourspotify.yaml` | YourSpotify | Spotify stats |
| `invidious/invidious.yaml` | Invidious + Companion + DB + Materialious | YouTube frontend — https://in.vish.gg |
---
## Troubleshooting
### AdGuard crash-loops on startup
**Symptom**: `docker ps` shows AdGuard as "Restarting" or "Up Less than a second"
**Cause**: AdGuard binds DNS to a specific IP (`192.168.68.100`). If the host's IP changes (DHCP), or if AdGuard rewrites its config to the current DHCP address, it will fail to bind on next start.
**Diagnose**:
```bash
docker logs AdGuard --tail 20
# Look for: "bind: cannot assign requested address"
# The log will show which IP it tried to use
```
**Fix**:
```bash
# 1. Check what IP AdGuard thinks it should use
sudo grep -A3 'bind_hosts' /home/vish/docker/adguard/config/AdGuardHome.yaml
# 2. Check what IP eno1 actually has
ip addr show eno1 | grep 'inet '
# 3. If they don't match, update the config
sudo sed -i 's/- 192.168.68.XXX/- 192.168.68.100/' /home/vish/docker/adguard/config/AdGuardHome.yaml
# 4. Restart AdGuard
docker restart AdGuard
```
**If the host IP has reverted to DHCP** (e.g. after a reboot wiped the static config):
```bash
# Re-apply static IP
sudo netplan apply
# Verify
ip addr show eno1 | grep 'inet '
# Should show: inet 192.168.68.100/22
```
---
## Incident History
### 2026-02-22 — AdGuard crash-loop / IP mismatch
- **Root cause**: Host had drifted from `192.168.68.100` to DHCP-assigned `192.168.68.87`. AdGuard briefly started, rewrote its config to `.87`, then the static IP was applied and `.87` was gone — causing a bind failure loop.
- **Resolution**:
1. Disabled cloud-init network management
2. Set `eno1` to static `192.168.68.100/22` via netplan
3. Corrected `AdGuardHome.yaml` `bind_hosts` back to `.100`
4. Restarted AdGuard — stable
---
### 2026-02-27 — Invidious 502 / crash-loop
- **Root cause 1**: PostgreSQL 14 defaults `pg_hba.conf` to `scram-sha-256` for host connections. Invidious's Crystal DB driver does not support scram-sha-256, causing a "password authentication failed" crash loop even with correct credentials.
- **Fix**: Changed last line of `/var/lib/postgresql/data/pg_hba.conf` in the `invidious-db` container from `host all all all scram-sha-256` to `host all all 172.21.0.0/16 trust`, then ran `SELECT pg_reload_conf();`.
- **Root cause 2**: Portainer had saved the literal string `REDACTED_SECRET_KEY` as the `SERVER_SECRET_KEY` env var for the companion container (Portainer's secret-redaction placeholder was baked in as the real value). The latest companion image validates the key strictly (exactly 16 alphanumeric chars), causing it to crash.
- **Fix**: Updated the Portainer stack file via API (`PUT /api/stacks/584`), replacing all `REDACTED_*` placeholders with the real values.
---
*Last updated: 2026-02-27*

View File

@@ -0,0 +1,23 @@
# AdGuard Home - DNS ad blocker
# Web UI: http://192.168.68.100:9080
# DNS: 192.168.68.100:53, 100.72.55.21:53
#
# IMPORTANT: This container binds DNS to 192.168.68.100 (configured in AdGuardHome.yaml).
# The host MUST have a static IP of 192.168.68.100 on eno1, otherwise AdGuard will
# crash-loop with "bind: cannot assign requested address".
# See README.md for static IP setup and troubleshooting.
services:
adguard:
image: adguard/adguardhome
container_name: AdGuard
mem_limit: 2g
cpu_shares: 768
security_opt:
- no-new-privileges:true
restart: unless-stopped
network_mode: host
volumes:
- /home/vish/docker/adguard/config:/opt/adguardhome/conf:rw
- /home/vish/docker/adguard/data:/opt/adguardhome/work:rw
environment:
TZ: America/Los_Angeles

View File

@@ -0,0 +1,28 @@
# Diun — Docker Image Update Notifier
#
# Watches all running containers on this host and sends ntfy
# notifications when upstream images update their digest.
# Schedule: Mondays 09:00 (weekly cadence).
#
# ntfy topic: https://ntfy.vish.gg/diun
services:
diun:
image: crazymax/diun:latest
container_name: diun
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- diun-data:/data
environment:
LOG_LEVEL: info
DIUN_WATCH_WORKERS: "20"
DIUN_WATCH_SCHEDULE: "0 9 * * 1"
DIUN_WATCH_JITTER: 30s
DIUN_PROVIDERS_DOCKER: "true"
DIUN_PROVIDERS_DOCKER_WATCHBYDEFAULT: "true"
DIUN_NOTIF_NTFY_ENDPOINT: "https://ntfy.vish.gg"
DIUN_NOTIF_NTFY_TOPIC: "diun"
restart: unless-stopped
volumes:
diun-data:

View File

@@ -0,0 +1,28 @@
pds-g^KU_n-Ck6JOm^BQu9pcct0DI/MvsCnViM6kGHGVCigvohyf/HHHfHG8c=
8. Start the Server
Use screen or tmux to keep the server running in the background.
Start Master (Overworld) Server
bash
Copy
Edit
cd ~/dst/bin
screen -S dst-master ./dontstarve_dedicated_server_nullrenderer -cluster MyCluster -shard Master
Start Caves Server
Open a new session:
bash
Copy
Edit
screen -S dst-caves ./dontstarve_dedicated_server_nullrenderer -cluster MyCluster -shard Caves
[Service]
User=dst
ExecStart=/home/dstserver/dst/bin/dontstarve_dedicated_server_nullrenderer -cluster MyCluster -shard Master
Restart=always

View File

@@ -0,0 +1,15 @@
services:
dozzle-agent:
image: amir20/dozzle:latest
container_name: dozzle-agent
command: agent
volumes:
- /var/run/docker.sock:/var/run/docker.sock
ports:
- "7007:7007"
restart: unless-stopped
healthcheck:
test: ["CMD", "/dozzle", "healthcheck"]
interval: 30s
timeout: 5s
retries: 3

View File

@@ -0,0 +1,17 @@
# Dynamic DNS Updater
# Updates DNS records when public IP changes
version: '3.8'
services:
ddns-vish-13340:
image: favonia/cloudflare-ddns:latest
network_mode: host
restart: unless-stopped
user: "1000:1000"
read_only: true
cap_drop: [all]
security_opt: [no-new-privileges:true]
environment:
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN}
- DOMAINS=api.vish.gg,api.vp.vish.gg,in.vish.gg,client.spotify.vish.gg,spotify.vish.gg
- PROXIED=false

View File

@@ -0,0 +1,55 @@
# Home Assistant - Smart home automation
# Port: 8123
# Open source home automation platform
version: '3'
services:
homeassistant:
container_name: homeassistant
image: ghcr.io/home-assistant/home-assistant:stable
network_mode: host
restart: unless-stopped
environment:
- TZ=America/Los_Angeles
volumes:
- /home/vish/docker/homeassistant:/config
- /etc/localtime:/etc/localtime:ro
matter-server:
container_name: matter-server
image: ghcr.io/home-assistant-libs/python-matter-server:stable
network_mode: host
restart: unless-stopped
volumes:
- /home/vish/docker/matter:/data
piper:
container_name: piper
image: rhasspy/wyoming-piper:latest
restart: unless-stopped
ports:
- "10200:10200"
volumes:
- /home/vish/docker/piper:/data
command: --voice en_US-lessac-medium
whisper:
container_name: whisper
image: rhasspy/wyoming-whisper:latest
restart: unless-stopped
ports:
- "10300:10300"
volumes:
- /home/vish/docker/whisper:/data
command: --model tiny-int8 --language en
openwakeword:
container_name: openwakeword
image: rhasspy/wyoming-openwakeword:latest
restart: unless-stopped
ports:
- "10400:10400"
command: --preload-model ok_nabu
networks:
default:
name: homeassistant-stack

View File

@@ -0,0 +1,13 @@
#!/bin/bash
# Invidious DB initialisation script
# Runs once on first container start (docker-entrypoint-initdb.d).
#
# Adds a pg_hba.conf rule allowing connections from any Docker subnet
# using trust auth. Without this, PostgreSQL rejects the invidious
# container when the Docker network is assigned a different subnet after
# a recreate (the default pg_hba.conf only covers localhost).
set -e
# Allow connections from any host on the Docker bridge network
echo "host all all 0.0.0.0/0 trust" >> /var/lib/postgresql/data/pg_hba.conf

View File

@@ -0,0 +1,115 @@
version: "3"
configs:
materialious_nginx:
content: |
events { worker_connections 1024; }
http {
default_type application/octet-stream;
include /etc/nginx/mime.types;
server {
listen 80;
# The video player passes dashUrl as a relative path that resolves
# to this origin — proxy Invidious API/media paths to local service.
# (in.vish.gg resolves to the external IP which is unreachable via
# hairpin NAT from inside Docker; invidious:3000 is on same network)
location ~ ^/(api|companion|vi|ggpht|videoplayback|sb|s_p|ytc|storyboards) {
proxy_pass http://invidious:3000;
proxy_set_header Host $$host;
proxy_set_header X-Real-IP $$remote_addr;
proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
}
location / {
root /usr/share/nginx/html;
try_files $$uri /index.html;
}
}
}
services:
invidious:
image: quay.io/invidious/invidious:latest
platform: linux/amd64
restart: unless-stopped
ports:
- "3000:3000"
environment:
INVIDIOUS_CONFIG: |
db:
dbname: invidious
user: kemal
password: "REDACTED_PASSWORD"
host: invidious-db
port: 5432
check_tables: true
invidious_companion:
- private_url: "http://companion:8282/companion"
invidious_companion_key: "pha6nuser7ecei1E"
hmac_key: "Kai5eexiewohchei"
healthcheck:
test: wget -nv --tries=1 --spider http://127.0.0.1:3000/api/v1/trending || exit 1
interval: 30s
timeout: 5s
retries: 2
logging:
options:
max-size: "1G"
max-file: "4"
depends_on:
- invidious-db
- companion
companion:
image: quay.io/invidious/invidious-companion:latest
platform: linux/amd64
environment:
- SERVER_SECRET_KEY=pha6nuser7ecei1E
restart: unless-stopped
cap_drop:
- ALL
read_only: true
volumes:
- companioncache:/var/tmp/youtubei.js:rw
security_opt:
- no-new-privileges:true
logging:
options:
max-size: "1G"
max-file: "4"
invidious-db:
image: postgres:14
restart: unless-stopped
environment:
POSTGRES_DB: invidious
POSTGRES_USER: kemal
POSTGRES_PASSWORD: "REDACTED_PASSWORD" # pragma: allowlist secret
volumes:
- postgresdata:/var/lib/postgresql/data
- ./config/sql:/config/sql
- ./docker/init-invidious-db.sh:/docker-entrypoint-initdb.d/init-invidious-db.sh
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
materialious:
image: wardpearce/materialious:latest
container_name: materialious
restart: unless-stopped
environment:
VITE_DEFAULT_INVIDIOUS_INSTANCE: "https://in.vish.gg"
configs:
- source: materialious_nginx
target: /etc/nginx/nginx.conf
ports:
- "3001:80"
logging:
options:
max-size: "1G"
max-file: "4"
volumes:
postgresdata:
companioncache:

View File

@@ -0,0 +1,4 @@
vish@vish-concord-nuc:~/invidious/invidious$ pwgen 16 1 # for Invidious (HMAC_KEY)
Kai5eexiewohchei
vish@vish-concord-nuc:~/invidious/invidious$ pwgen 16 1 # for Invidious companion (invidious_companion_key)
pha6nuser7ecei1E

View File

@@ -0,0 +1,65 @@
version: "3.8" # Upgrade to a newer version for better features and support
services:
invidious:
image: quay.io/invidious/invidious:latest
restart: unless-stopped
ports:
- "3000:3000"
environment:
INVIDIOUS_CONFIG: |
db:
dbname: invidious
user: kemal
password: "REDACTED_PASSWORD"
host: invidious-db
port: 5432
check_tables: true
signature_server: inv_sig_helper:12999
visitor_data: ""
po_token: "REDACTED_TOKEN"=="
hmac_key: "9Uncxo4Ws54s7dr0i3t8"
healthcheck:
test: ["CMD", "wget", "-nv", "--tries=1", "--spider", "http://127.0.0.1:3000/api/v1/trending"]
interval: 30s
timeout: 5s
retries: 2
logging:
options:
max-size: "1G"
max-file: "4"
depends_on:
- invidious-db
inv_sig_helper:
image: quay.io/invidious/inv-sig-helper:latest
init: true
command: ["--tcp", "0.0.0.0:12999"]
environment:
- RUST_LOG=info
restart: unless-stopped
cap_drop:
- ALL
read_only: true
security_opt:
- no-new-privileges:true
invidious-db:
image: docker.io/library/postgres:14
restart: unless-stopped
volumes:
- postgresdata:/var/lib/postgresql/data
- ./config/sql:/config/sql
- ./docker/init-invidious-db.sh:/docker-entrypoint-initdb.d/init-invidious-db.sh
environment:
POSTGRES_DB: invidious
POSTGRES_USER: kemal
POSTGRES_PASSWORD: "REDACTED_PASSWORD"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
interval: 30s
timeout: 5s
retries: 3
volumes:
postgresdata:

View File

@@ -0,0 +1,2 @@
docker all in one
docker-compose down --volumes --remove-orphans && docker-compose pull && docker-compose up -d

View File

@@ -0,0 +1,28 @@
# Redirect all HTTP traffic to HTTPS
server {
listen 80;
server_name client.spotify.vish.gg;
return 301 https://$host$request_uri;
}
# HTTPS configuration for the subdomain
server {
listen 443 ssl;
server_name client.spotify.vish.gg;
# SSL Certificates (managed by Certbot)
ssl_certificate /etc/letsencrypt/live/client.spotify.vish.gg/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/client.spotify.vish.gg/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
# Proxy to Docker container
location / {
proxy_pass http://127.0.0.1:4000; # Maps to your Docker container
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}

View File

@@ -0,0 +1,63 @@
server {
if ($host = in.vish.gg) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80;
server_name in.vish.gg;
# Redirect all HTTP traffic to HTTPS
return 301 https://$host$request_uri;
}
server {
listen 443 ssl http2;
server_name in.vish.gg;
# SSL Certificates (Certbot paths)
ssl_certificate /etc/letsencrypt/live/in.vish.gg/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/in.vish.gg/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
# --- Reverse Proxy to Invidious ---
location / {
proxy_pass http://127.0.0.1:3000;
proxy_http_version 1.1;
# Required headers for reverse proxying
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# WebSocket and streaming stability
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
# Disable buffering for video streams
proxy_buffering off;
proxy_request_buffering off;
# Avoid premature timeouts during long playback
proxy_read_timeout 600s;
proxy_send_timeout 600s;
}
# Cache static assets (images, css, js) for better performance
location ~* \.(?:jpg|jpeg|png|gif|ico|css|js|webp)$ {
expires 30d;
add_header Cache-Control "public, no-transform";
proxy_pass http://127.0.0.1:3000;
}
# Security headers (optional but sensible)
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Content-Type-Options nosniff;
add_header X-Frame-Options SAMEORIGIN;
add_header Referrer-Policy same-origin;
}

View File

@@ -0,0 +1,28 @@
# Redirect HTTP to HTTPS
server {
listen 80;
server_name spotify.vish.gg;
return 301 https://$host$request_uri;
}
# HTTPS server block
server {
listen 443 ssl;
server_name spotify.vish.gg;
# SSL Certificates (managed by Certbot)
ssl_certificate /etc/letsencrypt/live/spotify.vish.gg/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/spotify.vish.gg/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
# Proxy requests to backend API
location / {
proxy_pass http://127.0.0.1:15000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}

View File

@@ -0,0 +1,74 @@
# Redirect HTTP to HTTPS
server {
listen 80;
server_name vp.vish.gg api.vp.vish.gg proxy.vp.vish.gg;
return 301 https://$host$request_uri;
}
# HTTPS Reverse Proxy for Piped
server {
listen 443 ssl http2;
server_name vp.vish.gg;
# SSL Certificates (managed by Certbot)
ssl_certificate /etc/letsencrypt/live/vp.vish.gg/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/vp.vish.gg/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
# Proxy requests to Piped Frontend (use Docker service name, NOT 127.0.0.1)
location / {
proxy_pass http://127.0.0.1:8080;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
# HTTPS Reverse Proxy for Piped API
server {
listen 443 ssl http2;
server_name api.vp.vish.gg;
# SSL Certificates
ssl_certificate /etc/letsencrypt/live/vp.vish.gg/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/vp.vish.gg/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
# Proxy requests to Piped API backend
location / {
proxy_pass http://127.0.0.1:8080;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
# HTTPS Reverse Proxy for Piped Proxy (for video streaming)
server {
listen 443 ssl http2;
server_name proxy.vp.vish.gg;
# SSL Certificates
ssl_certificate /etc/letsencrypt/live/vp.vish.gg/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/vp.vish.gg/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
# Proxy video playback requests through ytproxy
location ~ (/videoplayback|/api/v4/|/api/manifest/) {
include snippets/ytproxy.conf;
add_header Cache-Control private always;
proxy_hide_header Access-Control-Allow-Origin;
}
location / {
include snippets/ytproxy.conf;
add_header Cache-Control "public, max-age=604800";
proxy_hide_header Access-Control-Allow-Origin;
}
}

View File

@@ -0,0 +1,24 @@
# Node Exporter - Prometheus metrics exporter for hardware/OS metrics
# Exposes metrics on port 9101 (changed from 9100 due to host conflict)
# Used by: Grafana/Prometheus monitoring stack
# Note: Using bridge network with port mapping instead of host network
# to avoid conflict with host-installed node_exporter
version: "3.8"
services:
node-exporter:
image: quay.io/prometheus/node-exporter:latest
container_name: node_exporter
ports:
- "9101:9100"
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.sysfs=/host/sys'
- '--path.rootfs=/rootfs'
- '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)'
restart: unless-stopped

View File

@@ -0,0 +1,79 @@
# Piped - YouTube frontend
# Port: 8080
# Privacy-respecting YouTube
services:
piped-frontend:
image: 1337kavin/piped-frontend:latest
restart: unless-stopped
depends_on:
- piped
environment:
BACKEND_HOSTNAME: api.vp.vish.gg
HTTP_MODE: https
container_name: piped-frontend
piped-proxy:
image: 1337kavin/piped-proxy:latest
restart: unless-stopped
environment:
- UDS=1
volumes:
- piped-proxy:/app/socket
container_name: piped-proxy
piped:
image: 1337kavin/piped:latest
restart: unless-stopped
volumes:
- ./config/config.properties:/app/config.properties:ro
depends_on:
- postgres
container_name: piped-backend
bg-helper:
image: 1337kavin/bg-helper-server:latest
restart: unless-stopped
container_name: piped-bg-helper
nginx:
image: nginx:mainline-alpine
restart: unless-stopped
ports:
- "8080:80"
volumes:
- ./config/nginx.conf:/etc/nginx/nginx.conf:ro
- ./config/pipedapi.conf:/etc/nginx/conf.d/pipedapi.conf:ro
- ./config/pipedproxy.conf:/etc/nginx/conf.d/pipedproxy.conf:ro
- ./config/pipedfrontend.conf:/etc/nginx/conf.d/pipedfrontend.conf:ro
- ./config/ytproxy.conf:/etc/nginx/snippets/ytproxy.conf:ro
- piped-proxy:/var/run/ytproxy
container_name: nginx
depends_on:
- piped
- piped-proxy
- piped-frontend
labels:
- "traefik.enable=true"
- "traefik.http.routers.piped.rule=Host(`FRONTEND_HOSTNAME`, `BACKEND_HOSTNAME`, `PROXY_HOSTNAME`)"
- "traefik.http.routers.piped.entrypoints=websecure"
- "traefik.http.services.piped.loadbalancer.server.port=8080"
postgres:
image: pgautoupgrade/pgautoupgrade:16-alpine
restart: unless-stopped
volumes:
- ./data/db:/var/lib/postgresql/data
environment:
- POSTGRES_DB=piped
- POSTGRES_USER=piped
- POSTGRES_PASSWORD="REDACTED_PASSWORD"
container_name: postgres
watchtower:
image: containrrr/watchtower
restart: unless-stopped
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /etc/timezone:/etc/timezone:ro
environment:
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_INCLUDE_RESTARTING=true
container_name: watchtower
command: piped-frontend piped-backend piped-proxy piped-bg-helper varnish nginx postgres watchtower
volumes:
piped-proxy: null

View File

@@ -0,0 +1,28 @@
# Plex Media Server
# Web UI: http://<host-ip>:32400/web
# Uses Intel QuickSync for hardware transcoding (via /dev/dri)
# Media library mounted from NAS at /mnt/nas
services:
plex:
image: linuxserver/plex:latest
container_name: plex
network_mode: host
environment:
- PUID=1000
- PGID=1000
- TZ=America/Los_Angeles
- UMASK=022
- VERSION=docker
# Get claim token from: https://www.plex.tv/claim/
- PLEX_CLAIM=claim-REDACTED_APP_PASSWORD
volumes:
- /home/vish/docker/plex/config:/config
- /mnt/nas/:/data/media
devices:
# Intel QuickSync for hardware transcoding
- /dev/dri:/dev/dri
security_opt:
- no-new-privileges:true
restart: on-failure:10
# custom-cont-init.d/01-wait-for-nas.sh waits up to 120s for /mnt/nas before starting Plex

View File

@@ -0,0 +1,22 @@
# Portainer Edge Agent - concord-nuc
# Connects to Portainer server on Atlantis (100.83.230.112:8000)
# Deploy: docker compose -f portainer_agent.yaml up -d
services:
portainer_edge_agent:
image: portainer/agent:2.33.7
container_name: portainer_edge_agent
restart: unless-stopped
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /var/lib/docker/volumes:/var/lib/docker/volumes
- /:/host
- portainer_agent_data:/data
environment:
EDGE: "1"
EDGE_ID: "be02f203-f10c-471a-927c-9ca2adac254c"
EDGE_KEY: "aHR0cDovLzEwMC44My4yMzAuMTEyOjEwMDAwfGh0dHA6Ly8xMDAuODMuMjMwLjExMjo4MDAwfGtDWjVkTjJyNXNnQTJvMEF6UDN4R3h6enBpclFqa05Wa0FCQkU0R1IxWFU9fDQ0MzM5OA"
EDGE_INSECURE_POLL: "1"
volumes:
portainer_agent_data:

View File

@@ -0,0 +1,22 @@
# Scrutiny Collector — concord-nuc (Intel NUC)
#
# Ships SMART data to the hub on homelab-vm.
# NUC typically has one internal NVMe + optionally a SATA SSD.
# Adjust device list: run `lsblk` to see actual drives.
#
# Hub: http://100.67.40.126:8090
services:
scrutiny-collector:
image: ghcr.io/analogj/scrutiny:master-collector
container_name: scrutiny-collector
cap_add:
- SYS_RAWIO
- SYS_ADMIN
volumes:
- /run/udev:/run/udev:ro
devices:
- /dev/sda
environment:
COLLECTOR_API_ENDPOINT: "http://100.67.40.126:8090"
restart: unless-stopped

View File

@@ -0,0 +1,19 @@
# Syncthing - File synchronization
# Port: 8384 (web), 22000 (sync)
# Continuous file synchronization between devices
services:
syncthing:
container_name: syncthing
ports:
- 8384:8384
- 22000:22000/tcp
- 22000:22000/udp
- 21027:21027/udp
environment:
- TZ=America/Los_Angeles
volumes:
- /home/vish/docker/syncthing/config:/config
- /home/vish/docker/syncthing/data1:/data1
- /home/vish/docker/syncthing/data2:/data2
restart: unless-stopped
image: ghcr.io/linuxserver/syncthing

View File

@@ -0,0 +1,25 @@
# WireGuard - VPN server
# Port: 51820/udp
# Modern, fast VPN tunnel
services:
wg-easy:
container_name: wg-easy
image: ghcr.io/wg-easy/wg-easy
environment:
- HASH_PASSWORD="REDACTED_PASSWORD"
- WG_HOST=vishconcord.tplinkdns.com
volumes:
- ./config:/etc/wireguard
- /lib/modules:/lib/modules
ports:
- "51820:51820/udp"
- "51821:51821/tcp"
restart: unless-stopped
cap_add:
- NET_ADMIN
- SYS_MODULE
sysctls:
- net.ipv4.ip_forward=1
- net.ipv4.conf.all.src_valid_mark=1

View File

@@ -0,0 +1,49 @@
# Your Spotify - Listening statistics
# Port: 3000
# Self-hosted Spotify listening history tracker
version: "3.8"
services:
server:
image: yooooomi/your_spotify_server
restart: unless-stopped
ports:
- "15000:8080" # Expose port 15000 for backend service
depends_on:
- mongo
environment:
- API_ENDPOINT=https://spotify.vish.gg # Public URL for backend
- CLIENT_ENDPOINT=https://client.spotify.vish.gg # Public URL for frontend
- SPOTIFY_PUBLIC=d6b3bda999f042099ce79a8b6e9f9e68 # Spotify app client ID
- SPOTIFY_SECRET=72c650e7a25f441baa245b963003a672 # Spotify app client secret
- SPOTIFY_REDIRECT_URI=https://client.spotify.vish.gg/callback # Redirect URI for OAuth
- CORS=https://client.spotify.vish.gg # Allow frontend's origin
networks:
- spotify_network
mongo:
container_name: mongo
image: mongo:4.4.8
restart: unless-stopped
volumes:
- yourspotify_mongo_data:/data/db # Named volume for persistent storage
networks:
- spotify_network
web:
image: yooooomi/your_spotify_client
restart: unless-stopped
ports:
- "4000:3000" # Expose port 4000 for frontend
environment:
- API_ENDPOINT=https://spotify.vish.gg # URL for backend API
networks:
- spotify_network
volumes:
yourspotify_mongo_data:
driver: local
networks:
spotify_network:
driver: bridge

View File

@@ -0,0 +1,234 @@
# Guava - TrueNAS Scale Server
**Hostname**: guava
**IP Address**: 192.168.0.100
**Tailscale IP**: 100.75.252.64
**Domain**: guava.crista.home
**OS**: TrueNAS Scale 25.04.2.6 (Debian 12 Bookworm)
**Kernel**: 6.12.15-production+truenas
---
## Hardware Specifications
| Component | Specification |
|-----------|---------------|
| **CPU** | 12 cores |
| **RAM** | 30 GB |
| **Storage** | ZFS pools (1.5TB+ available) |
| **Docker** | 27.5.0 |
| **Compose** | v2.32.3 |
---
## Storage Layout
### Boot Pool
- `/` - Root filesystem (433GB available)
- ZFS dataset: `boot-pool/ROOT/25.04.2.6`
### Data Pool (`/mnt/data/`)
| Dataset | Size Used | Purpose |
|---------|-----------|---------|
| `data/guava_turquoise` | 3.0TB / 4.5TB | Primary storage (67% used) |
| `data/photos` | 159GB | Photo storage |
| `data/jellyfin` | 145GB | Media library |
| `data/llama` | 59GB | LLM models |
| `data/plane-data` | ~100MB | Plane.so application data |
| `data/iso` | 556MB | ISO images |
| `data/cocalc` | 324MB | Computational notebook |
| `data/website` | 59MB | Web content |
| `data/openproject` | 13MB | OpenProject (postgres) |
| `data/fasten` | 5.7MB | Health records |
| `data/fenrus` | 3.5MB | Dashboard config |
| `data/medical` | 14MB | Medical records |
| `data/truenas-exporters` | - | Prometheus exporters |
### TrueNAS Apps (`/mnt/.ix-apps/`)
- Docker storage: 28GB used
- App configs and mounts for TrueNAS-managed apps
---
## Network Configuration
| Service | Port | Protocol | URL |
|---------|------|----------|-----|
| Portainer | 31015 | HTTPS | https://guava.crista.home:31015 |
| **Plane.so** | 3080 | HTTP | **http://guava.crista.home:3080** |
| Plane.so HTTPS | 3443 | HTTPS | https://guava.crista.home:3443 |
| Jellyfin | 30013 | HTTP | http://guava.crista.home:30013 |
| Jellyfin HTTPS | 30014 | HTTPS | https://guava.crista.home:30014 |
| Gitea | 30008-30009 | HTTP | http://guava.crista.home:30008 |
| WireGuard | 51827 | UDP | - |
| wg-easy UI | 30058 | HTTP | http://guava.crista.home:30058 |
| Fenrus | 45678 | HTTP | http://guava.crista.home:45678 |
| Fasten | 9090 | HTTP | http://guava.crista.home:9090 |
| Node Exporter | 9100 | HTTP | http://guava.crista.home:9100/metrics |
| nginx | 28888 | HTTP | http://guava.crista.home:28888 |
| iperf3 | 5201 | TCP | - |
| SSH | 22 | TCP | - |
| SMB | 445 | TCP | - |
| Pi-hole DNS | 53 | TCP/UDP | - |
---
## Portainer Access
| Setting | Value |
|---------|-------|
| **URL** | `https://guava.crista.home:31015` |
| **API Endpoint** | `https://localhost:31015/api` (from guava) |
| **Endpoint ID** | 3 (local) |
| **API Token** | `ptr_REDACTED_PORTAINER_TOKEN` |
### API Examples
```bash
# List stacks
curl -sk -H 'X-API-Key: "REDACTED_API_KEY" \
'https://localhost:31015/api/stacks'
# List containers
curl -sk -H 'X-API-Key: "REDACTED_API_KEY" \
'https://localhost:31015/api/endpoints/3/docker/containers/json'
# Create stack from compose string
curl -sk -X POST \
-H 'X-API-Key: "REDACTED_API_KEY" \
-H 'Content-Type: application/json' \
'https://localhost:31015/api/stacks/create/standalone/string?endpointId=3' \
-d '{"name": "my-stack", "REDACTED_APP_PASSWORD": "..."}'
```
---
## Deployed Stacks (Portainer)
| ID | Name | Status | Description |
|----|------|--------|-------------|
| 2 | nginx | ✅ Active | Reverse proxy (:28888) |
| 3 | ddns | ✅ Active | Dynamic DNS updater (crista.love) |
| 4 | llama | ⏸️ Inactive | LLM server |
| 5 | fenrus | ✅ Active | Dashboard (:45678) |
| 8 | fasten | ✅ Active | Health records (:9090) |
| 17 | node-exporter | ✅ Active | Prometheus metrics (:9100) |
| 18 | iperf3 | ✅ Active | Network speed testing (:5201) |
| 25 | cocalc | ⏸️ Inactive | Computational notebook |
| **26** | **plane-stack** | ✅ Active | **Project management (:3080)** |
### TrueNAS-Managed Apps (ix-apps)
| App | Container | Port | Description |
|-----|-----------|------|-------------|
| Portainer | ix-portainer-portainer-1 | 31015 | Container management |
| Gitea | ix-gitea-gitea-1 | 30008-30009 | Git server |
| Gitea DB | ix-gitea-postgres-1 | - | PostgreSQL for Gitea |
| Jellyfin | ix-jellyfin-jellyfin-1 | 30013, 30014 | Media server |
| WireGuard | ix-wg-easy-wg-easy-1 | 30058, 51827/udp | VPN server |
| Tailscale | ix-tailscale-tailscale-1 | - | Mesh VPN |
| Pi-hole | (configured) | 53 | DNS server |
---
## SSH Access
### Via Cloudflare Tunnel
```bash
# Install cloudflared
curl -L https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64 -o /tmp/cloudflared
chmod +x /tmp/cloudflared
# SSH config
cat >> ~/.ssh/config << 'EOF'
Host guava
HostName ruled-bowl-dos-jews.trycloudflare.com
User vish
IdentityFile ~/.ssh/id_ed25519
ProxyCommand /tmp/cloudflared access ssh --hostname %h
EOF
# Connect
ssh guava
```
### Direct (Local Network)
```bash
ssh vish@192.168.0.100
```
**Note**: Docker commands require `sudo` on guava.
---
## Services Documentation
### Plane.so
See [plane.yaml](plane.yaml) for the full stack configuration.
| Component | Container | Port | Purpose |
|-----------|-----------|------|---------|
| Frontend | plane-web | 3000 | Web UI |
| Admin | plane-admin | 3000 | Admin panel |
| Space | plane-space | 3000 | Public pages |
| API | plane-api | 8000 | Backend API |
| Worker | plane-worker | 8000 | Background jobs |
| Beat | plane-beat | 8000 | Scheduled tasks |
| Live | plane-live | 3000 | Real-time updates |
| Database | plane-db | 5432 | PostgreSQL |
| Cache | plane-redis | 6379 | Valkey/Redis |
| Queue | plane-mq | 5672 | RabbitMQ |
| Storage | plane-minio | 9000 | MinIO S3 |
| Proxy | plane-proxy | 80/443 | Caddy reverse proxy |
**Access URL**: http://guava.crista.home:3080
**Data Location**: `/mnt/data/plane-data/`
---
## Maintenance
### Backup Locations
| Data | Path | Priority |
|------|------|----------|
| Plane DB | `/mnt/data/plane-data/postgres/` | High |
| Plane Files | `/mnt/data/plane-data/minio/` | High |
| Gitea | `/mnt/.ix-apps/app_mounts/gitea/` | High |
| Jellyfin Config | `/mnt/.ix-apps/app_mounts/jellyfin/config/` | Medium |
| Photos | `/mnt/data/photos/` | High |
### Common Commands
```bash
# Check all containers
sudo docker ps -a
# View stack logs
sudo docker compose -f /path/to/stack logs -f
# Restart a stack via Portainer API
curl -sk -X POST \
-H 'X-API-Key: TOKEN' \
'https://localhost:31015/api/stacks/STACK_ID/stop?endpointId=3'
curl -sk -X POST \
-H 'X-API-Key: TOKEN' \
'https://localhost:31015/api/stacks/STACK_ID/start?endpointId=3'
```
---
## Related Documentation
- [Plane.so Service Docs](../../../docs/services/individual/plane.md)
- [TrueNAS Scale Documentation](https://www.truenas.com/docs/scale/)
- [AGENTS.md](../../../AGENTS.md) - Quick reference for all hosts
---
*Last updated: February 4, 2026*
*Verified via SSH - all services confirmed running*

View File

@@ -0,0 +1,23 @@
Guava CIFS/SMB Shares
data /mnt/data/passionfruit
guava_turquoise /mnt/data/guava_turquoise Backup of turquoise
photos /mnt/data/photos
Global Configuration
Nameservers
Nameserver 1:
1.1.1.1
Nameserver 2:
192.168.0.250
Default Route
IPv4:
192.168.0.1
Hostname:guava
Domain: local
HTTP Proxy:---
Service Announcement: NETBIOS-NS, mDNS, WS-DISCOVERY
Additional Domains:---
Hostname Database:---
Outbound Network:Allow All

View File

@@ -0,0 +1,213 @@
# Plane.so - Self-Hosted Project Management
# Deployed via Portainer on TrueNAS Scale (guava)
# Port: 3080 (HTTP), 3443 (HTTPS)
x-db-env: &db-env
PGHOST: plane-db
PGDATABASE: plane
POSTGRES_USER: plane
POSTGRES_PASSWORD: "REDACTED_PASSWORD"
POSTGRES_DB: plane
POSTGRES_PORT: 5432
PGDATA: /var/lib/postgresql/data
x-redis-env: &redis-env
REDIS_HOST: plane-redis
REDIS_PORT: 6379
REDIS_URL: redis://plane-redis:6379/
x-minio-env: &minio-env
MINIO_ROOT_USER: ${AWS_ACCESS_KEY_ID:-planeaccess}
MINIO_ROOT_PASSWORD: "REDACTED_PASSWORD"
x-aws-s3-env: &aws-s3-env
AWS_REGION: us-east-1
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID:-planeaccess}
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY:-planesecret123}
AWS_S3_ENDPOINT_URL: http://plane-minio:9000
AWS_S3_BUCKET_NAME: uploads
x-proxy-env: &proxy-env
APP_DOMAIN: ${APP_DOMAIN:-guava.crista.home}
FILE_SIZE_LIMIT: 52428800
LISTEN_HTTP_PORT: 80
LISTEN_HTTPS_PORT: 443
BUCKET_NAME: uploads
SITE_ADDRESS: :80
x-mq-env: &mq-env
RABBITMQ_HOST: plane-mq
RABBITMQ_PORT: 5672
RABBITMQ_DEFAULT_USER: plane
RABBITMQ_DEFAULT_PASS: "REDACTED_PASSWORD"REDACTED_PASSWORD"
RABBITMQ_DEFAULT_VHOST: plane
RABBITMQ_VHOST: plane
x-live-env: &live-env
API_BASE_URL: http://api:8000
LIVE_SERVER_SECRET_KEY: ${LIVE_SERVER_SECRET_KEY:-60gp0byfz2dvffa45cxl20p1scy9xbpf6d8c5y0geejgkyp1b5}
x-app-env: &app-env
WEB_URL: ${WEB_URL:-http://guava.crista.home:3080}
DEBUG: 0
CORS_ALLOWED_ORIGINS: ${CORS_ALLOWED_ORIGINS:-}
GUNICORN_WORKERS: 2
USE_MINIO: 1
DATABASE_URL: postgresql://plane:${POSTGRES_PASSWORD:"REDACTED_PASSWORD"
SECRET_KEY: ${SECRET_KEY:-60gp0byfz2dvffa45cxl20p1scy9xbpf6d8c5y0geejgkyp1b5}
AMQP_URL: amqp://plane:${RABBITMQ_PASSWORD:"REDACTED_PASSWORD"
API_KEY_RATE_LIMIT: 60/minute
MINIO_ENDPOINT_SSL: 0
LIVE_SERVER_SECRET_KEY: ${LIVE_SERVER_SECRET_KEY:-60gp0byfz2dvffa45cxl20p1scy9xbpf6d8c5y0geejgkyp1b5}
services:
web:
image: artifacts.plane.so/makeplane/plane-frontend:stable
container_name: plane-web
restart: unless-stopped
depends_on:
- api
- worker
space:
image: artifacts.plane.so/makeplane/plane-space:stable
container_name: plane-space
restart: unless-stopped
depends_on:
- api
- worker
- web
admin:
image: artifacts.plane.so/makeplane/plane-admin:stable
container_name: plane-admin
restart: unless-stopped
depends_on:
- api
- web
live:
image: artifacts.plane.so/makeplane/plane-live:stable
container_name: plane-live
restart: unless-stopped
environment:
<<: [*live-env, *redis-env]
depends_on:
- api
- web
api:
image: artifacts.plane.so/makeplane/plane-backend:stable
container_name: plane-api
command: ./bin/docker-entrypoint-api.sh
restart: unless-stopped
environment:
<<: [*app-env, *db-env, *redis-env, *minio-env, *aws-s3-env, *proxy-env]
depends_on:
plane-db:
condition: service_healthy
plane-redis:
condition: service_started
plane-mq:
condition: service_started
worker:
image: artifacts.plane.so/makeplane/plane-backend:stable
container_name: plane-worker
command: ./bin/docker-entrypoint-worker.sh
restart: unless-stopped
environment:
<<: [*app-env, *db-env, *redis-env, *minio-env, *aws-s3-env, *proxy-env]
depends_on:
- api
- plane-db
- plane-redis
- plane-mq
beat-worker:
image: artifacts.plane.so/makeplane/plane-backend:stable
container_name: plane-beat
command: ./bin/docker-entrypoint-beat.sh
restart: unless-stopped
environment:
<<: [*app-env, *db-env, *redis-env, *minio-env, *aws-s3-env, *proxy-env]
depends_on:
- api
- plane-db
- plane-redis
- plane-mq
migrator:
image: artifacts.plane.so/makeplane/plane-backend:stable
container_name: plane-migrator
command: ./bin/docker-entrypoint-migrator.sh
restart: on-failure
environment:
<<: [*app-env, *db-env, *redis-env, *minio-env, *aws-s3-env, *proxy-env]
depends_on:
plane-db:
condition: service_healthy
plane-redis:
condition: service_started
plane-db:
image: postgres:15.7-alpine
container_name: plane-db
command: postgres -c 'max_connections=1000'
restart: unless-stopped
environment:
<<: *db-env
volumes:
- /mnt/data/plane-data/postgres:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U plane -d plane"]
interval: 10s
timeout: 5s
retries: 5
plane-redis:
image: valkey/valkey:7.2.11-alpine
container_name: plane-redis
restart: unless-stopped
volumes:
- /mnt/data/plane-data/redis:/data
plane-mq:
image: rabbitmq:3.13.6-management-alpine
container_name: plane-mq
restart: unless-stopped
environment:
<<: *mq-env
volumes:
- /mnt/data/plane-data/rabbitmq:/var/lib/rabbitmq
plane-minio:
image: minio/minio:latest
container_name: plane-minio
command: server /export --console-address ":9090"
restart: unless-stopped
environment:
<<: *minio-env
volumes:
- /mnt/data/plane-data/minio:/export
proxy:
image: artifacts.plane.so/makeplane/plane-proxy:stable
container_name: plane-proxy
restart: unless-stopped
environment:
<<: *proxy-env
ports:
- "3080:80"
- "3443:443"
depends_on:
- web
- api
- space
- admin
- live
networks:
default:
name: plane-network
driver: bridge

View File

@@ -0,0 +1,25 @@
version: '3.8'
services:
cocalc:
image: sagemathinc/cocalc-docker:latest
container_name: cocalc
restart: unless-stopped
ports:
- "8080:443" # expose CoCalc HTTPS on port 8080
# or "443:443" if you want it directly bound to 443
volumes:
# Persistent project and home directories
- /mnt/data/cocalc/projects:/projects
- /mnt/data/cocalc/home:/home/cocalc
# Optional: shared local "library of documents"
- /mnt/data/cocalc/library:/projects/library
environment:
- TZ=America/Los_Angeles
- COCALC_NATS_AUTH=false # disable NATS auth for standalone use
# - COCALC_ADMIN_PASSWORD="REDACTED_PASSWORD" # optional admin password
# - COCALC_NO_IDLE_TIMEOUT=true # optional: stop idle shutdowns

View File

@@ -0,0 +1,18 @@
version: '3.8'
services:
ddns-crista-love:
image: favonia/cloudflare-ddns:latest
container_name: ddns-crista-love
network_mode: host
restart: unless-stopped
user: "3000:3000"
read_only: true
cap_drop:
- all
security_opt:
- no-new-privileges:true
environment:
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN}
- DOMAINS=crista.love,cle.crista.love,cocalc.crista.love,mm.crista.love
- PROXIED=true

View File

@@ -0,0 +1,12 @@
version: "3.9"
services:
fasten:
image: ghcr.io/fastenhealth/fasten-onprem:main
container_name: fasten-onprem
ports:
- "9090:8080"
volumes:
- /mnt/data/fasten/db:/opt/fasten/db
- /mnt/data/fasten/cache:/opt/fasten/cache
restart: unless-stopped

View File

@@ -0,0 +1,19 @@
version: "3.9"
services:
fenrus:
image: revenz/fenrus:latest
container_name: fenrus
healthcheck:
test: ["CMD-SHELL", "curl -f http://127.0.0.1:3000/ || exit 1"]
interval: 30s
timeout: 5s
retries: 3
start_period: 90s
ports:
- "45678:3000"
volumes:
- /mnt/data/fenrus:/app/data:rw
environment:
TZ: America/Los_Angeles
restart: unless-stopped

View File

@@ -0,0 +1,41 @@
version: "3.9"
services:
ollama:
image: ollama/ollama:latest
container_name: ollama
restart: unless-stopped
ports:
- "11434:11434"
environment:
- OLLAMA_KEEP_ALIVE=10m
volumes:
- /mnt/data/llama:/root/.ollama
# --- Optional AMD iGPU offload (experimental on SCALE) ---
# devices:
# - /dev/kfd
# - /dev/dri
# group_add:
# - "video"
# - "render"
# environment:
# - OLLAMA_KEEP_ALIVE=10m
# - HSA_ENABLE_SDMA=0
# - HSA_OVERRIDE_GFX_VERSION=11.0.0
openwebui:
image: ghcr.io/open-webui/open-webui:latest
container_name: open-webui
restart: unless-stopped
depends_on:
- ollama
ports:
- "3000:8080" # browse to http://<truenas-ip>:3000
environment:
# Either var works on recent builds; keeping both for compatibility
- OLLAMA_API_BASE_URL=http://ollama:11434
- OLLAMA_BASE_URL=http://ollama:11434
# Set to "false" to allow open signup without password
- WEBUI_AUTH=true
volumes:
- /mnt/data/llama/open-webui:/app/backend/data

View File

@@ -0,0 +1,10 @@
My recommended use on your setup:
Model Use case
Llama3.1:8b Main general-purpose assistant
Mistral:7b Fast, concise replies & RAG
Qwen2.5:3b Lightweight, quick lookups
Qwen2.5-Coder:7b Dedicated coding tasks
Llama3:8b Legacy/benchmark (optional)
qwen2.5:7b-instruct Writing up emails
deepseek-r1 (chonky but accurate)
deepseek-r1:8b (lighter version of r1 , can run on DS1823xs+)

View File

@@ -0,0 +1,18 @@
version: "3.8"
services:
nginx:
image: nginx:latest
container_name: nginx
volumes:
- /mnt/data/website/html:/usr/share/nginx/html:ro
- /mnt/data/website/conf.d:/etc/nginx/conf.d:ro
ports:
- "28888:80" # 👈 Expose port 28888 on the host
networks:
- web-net
restart: unless-stopped
networks:
web-net:
external: true

View File

@@ -0,0 +1,18 @@
version: "3.9"
services:
node-exporter:
image: prom/node-exporter:latest
container_name: node-exporter
restart: unless-stopped
network_mode: "host"
pid: "host"
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.sysfs=/host/sys'
- '--path.rootfs=/rootfs'
- '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)'