Sanitized mirror from private repository - 2026-04-19 07:39:14 UTC
Some checks failed
Documentation / Build Docusaurus (push) Failing after 18m8s
Documentation / Deploy to GitHub Pages (push) Has been skipped

This commit is contained in:
Gitea Mirror Bot
2026-04-19 07:39:14 +00:00
commit d6eb5dcb1e
1437 changed files with 362941 additions and 0 deletions

View File

View File

@@ -0,0 +1,20 @@
# Droppy - File sharing
# Port: 8989
# Self-hosted file sharing
version: '3.8'
services:
droppy:
container_name: droppy
image: silverwind/droppy
ports:
- 8989:8989
volumes:
- /root/docker/droppy/config/:/config
- /root/docker/droppy/files/:/files
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8989"]
interval: 30s
timeout: 10s
retries: 5

View File

@@ -0,0 +1,24 @@
# Fenrus - Dashboard
# Port: 5000
# Application dashboard
version: '3.8'
services:
fenrus:
image: revenz/fenrus
container_name: fenrus
environment:
- TZ=America/Los_Angeles
volumes:
- /root/docker/fenrus/data:/app/data
- /root/docker/fenrus/images:/app/wwwroot/images
ports:
- 35000:3000
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s

View File

@@ -0,0 +1,45 @@
# Hemmelig - Secret sharing
# Port: 3000
# Self-destructing secret sharing
services:
hemmelig:
image: hemmeligapp/hemmelig:latest # The Docker image to use for the hemmelig service
hostname: hemmelig # The hostname of the hemmelig service
init: true # Whether to enable initialization scripts
volumes:
- /root/docker/hem/files/:/var/tmp/hemmelig/upload/files # Mounts the host directory to the container directory for file uploads
environment:
- SECRET_REDIS_HOST=hemmelig-redis # The hostname of the Redis server
- SECRET_LOCAL_HOSTNAME=0.0.0.0 # The local hostname for the Fastify instance
- SECRET_PORT=3000 # The port number for the Fastify instance
- SECRET_HOST= # Used for i.e. setting CORS to your domain name
- SECRET_DISABLE_USERS=false # Whether user registration is disabled
- SECRET_ENABLE_FILE_UPLOAD=true # Whether file upload is enabled or disabled
- SECRET_FILE_SIZE=4 # The total allowed upload file size in MB
- SECRET_FORCED_LANGUAGE=en # The default language for the application
- SECRET_JWT_SECRET=REDACTED_PASSWORD123! # The secret signing JWT tokens for login # pragma: allowlist secret
- SECRET_MAX_TEXT_SIZE=256 # The max text size for a secret, set in KB (i.e. 256 for 256KB)
ports:
- "3000:3000" # Maps the host port to the container port
depends_on:
- redis # Ensures that Redis is started before Hemmelig
restart: unless-stopped # Always restarts the service if it stops unexpectedly
stop_grace_period: 1m # The amount of time to wait before stopping the service
healthcheck:
test: "wget -O /dev/null localhost:3000 || exit 1" # Tests whether the Hemmelig service is responsive
timeout: 5s # The amount of time to wait for a response from the health check
retries: 1 # The number of times to retry the health check if it fails
redis:
image: redis # The Docker image to use for the Redis server
hostname: hemmelig-redis # The hostname of the Redis server
init: true # Whether to enable initialization scripts
volumes:
- ./root/docker/hem/redis/:/data # Mounts the host directory to the container directory for persistent data
command: redis-server --appendonly yes # Runs Redis with append-only mode enabled
restart: unless-stopped # Always restarts the service if it stops unexpectedly
stop_grace_period: 1m # The amount of time to wait before stopping the service
healthcheck:
test: "redis-cli ping | grep PONG || exit 1" # Tests whether the Redis server is responsive
timeout: 5s # The amount of time to wait for a response from the health check
retries: 1 # The number of times to retry the health check if it fails

View File

@@ -0,0 +1,60 @@
# Invidious - YouTube frontend
# Port: 3000
# Privacy-respecting YouTube viewer
version: "3.9"
services:
invidious-db:
image: postgres
container_name: Invidious-DB
hostname: invidious-db
security_opt:
- no-new-privileges:true
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "invidious", "-U", "kemal"]
timeout: 45s
interval: 10s
retries: 10
user: 0:0
volumes:
- /volume1/docker/invidiousdb:/var/lib/postgresql/data
environment:
POSTGRES_DB: invidious
POSTGRES_USER: kemal
POSTGRES_PASSWORD: "REDACTED_PASSWORD"
restart: unless-stopped
invidious:
image: quay.io/invidious/invidious:latest
container_name: Invidious
hostname: invidious
user: 0:0
security_opt:
- no-new-privileges:true
healthcheck:
test: wget -nv --tries=1 --spider http://127.0.0.1:3000/api/v1/comments/jNQXAC9IVRw || exit 1
interval: 30s
timeout: 5s
retries: 2
ports:
- 94.72.140.37:7601:3000
environment:
INVIDIOUS_CONFIG: |
db:
dbname: invidious
user: kemal
password: "REDACTED_PASSWORD"
host: invidious-db
port: 5432
check_tables: true
captcha_enabled: false
default_user_preferences:
locale: us
region: US
external_port: 7601
domain: invidious.vish.gg
https_only: true
restart: unless-stopped
depends_on:
invidious-db:
condition: service_healthy

View File

@@ -0,0 +1,54 @@
# Mattermost - Team collaboration
# Port: 8065
# Self-hosted Slack alternative
version: "3.9"
services:
mattermost-db:
image: postgres
container_name: Mattermost-DB
hostname: mattermost-db
security_opt:
- no-new-privileges:true
pids_limit: 100
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "mattermost", "-U", "mattermostuser"]
interval: 10s
timeout: 5s
retries: 5
user: 0:0
volumes:
- /root/docker/mattermost/db:/var/lib/postgresql/data
environment:
- POSTGRES_DB=mattermost
- POSTGRES_USER=mattermostuser
- POSTGRES_PASSWORD="REDACTED_PASSWORD"
- TZ=America/Los_Angeles
restart: unless-stopped
mattermost:
image: mattermost/mattermost-team-edition:latest
container_name: Mattermost
hostname: mattermost
security_opt:
- no-new-privileges:true
pids_limit: 200
user: 0:0
volumes:
- /root/docker/mattermost/config:/mattermost/config:rw
- /root/docker/mattermost/data:/mattermost/data:rw
- /root/docker/mattermost/logs:/mattermost/logs:rw
- /root/docker/mattermost/plugins:/mattermost/plugins:rw
- /root/docker/mattermost/client:/mattermost/client/plugins:rw
- /root/docker/mattermost/indexes:/mattermost/bleve-indexes:rw
environment:
- TZ=America/Los_Angeles
- MM_SQLSETTINGS_DRIVERNAME=postgres
- MM_SQLSETTINGS_DATASOURCE=postgres://mattermostuser:mattermostpw@mattermost-db:5432/mattermost?sslmode=disable&connect_timeout=10
- MM_BLEVESETTINGS_INDEXDIR=/mattermost/bleve-indexes
- MM_SERVICESETTINGS_SITEURL=https://mm.vish.gg
ports:
- 8401:8065
restart: unless-stopped
depends_on:
mattermost-db:
condition: service_healthy

View File

@@ -0,0 +1,14 @@
# MeTube - YouTube downloader
# Port: 8081
# Web GUI for yt-dlp
version: "3"
services:
metube:
image: alexta69/metube
container_name: metube
restart: unless-stopped
ports:
- "8871:8081"
volumes:
- /root/docker/yt:/downloads

View File

@@ -0,0 +1,21 @@
# Navidrome - Music server
# Port: 4533
# Personal music streaming server
version: "3"
services:
navidrome:
image: deluan/navidrome:latest
user: 0:0 # should be owner of volumes
ports:
- "4533:4533"
restart: unless-stopped
environment:
# Optional: put your config options customization here. Examples:
ND_SCANSCHEDULE: 1h
ND_LOGLEVEL: info
ND_SESSIONTIMEOUT: 24h
ND_BASEURL: ""
volumes:
- "/root/docker/navidrome:/data"
- "/root/plex/:/music:ro"

View File

@@ -0,0 +1,16 @@
# Nginx Proxy Manager
# Port: 81
# Reverse proxy management
version: '3'
services:
app:
image: 'jc21/nginx-proxy-manager:latest'
restart: unless-stopped
ports:
- '80:80'
- '8181:81'
- '443:443'
volumes:
- ./data:/data
- ./letsencrypt:/etc/letsencrypt

View File

@@ -0,0 +1,15 @@
# RainLoop - Webmail
# Port: 8888
# Simple webmail client
version: '3'
services:
rainloop:
image: wernerfred/docker-rainloop:latest
container_name: docker-rainloop
restart: unless-stopped
ports:
- 8080:80
volumes:
- /opt/docker-rainloop/data:/rainloop/data

View File

@@ -0,0 +1,23 @@
# Syncthing - File synchronization
# Port: 8384 (web), 22000 (sync)
# Continuous file synchronization between devices
version: "2.1"
services:
syncthing:
image: lscr.io/linuxserver/syncthing:latest
container_name: syncthing
hostname: syncthing #optional
environment:
- PUID=1000
- PGID=1000
- TZ=America/Los_Angeles
volumes:
- /root/docker/syncthing/config:/config
- /root/docker/syncthing/data1
- /root/docker/syncthing/data2
ports:
- 8384:8384
- 22000:22000/tcp
- 22000:22000/udp
- 21027:21027/udp
restart: unless-stopped

View File

@@ -0,0 +1,19 @@
# Watchtower - Container update notifier for Bulgaria VM (schedule disabled - GitOps managed)
# Auto-update schedule removed; image updates are handled via Renovate PRs.
# Manual update trigger: POST http://localhost:8080/v1/update
# Header: Authorization: Bearer watchtower-metrics-token
version: "3"
services:
watchtower:
image: containrrr/watchtower:latest
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_HTTP_API_UPDATE=true
- WATCHTOWER_HTTP_API_METRICS=true
- WATCHTOWER_HTTP_API_TOKEN="REDACTED_HTTP_TOKEN"
- TZ=America/Los_Angeles
restart: unless-stopped
labels:
- "com.centurylinklabs.watchtower.enable=false"

View File

@@ -0,0 +1,61 @@
# This specifies the version of Docker Compose to use.
version: "3"
# This defines all of the services that will be run in this Docker Compose setup.
services:
# This defines a service named "server".
server:
# This specifies the Docker image to use for this service.
image: yooooomi/your_spotify_server
# This sets the restart policy for this service. In this case, it will always restart if it stops.
restart: unless-stopped
# This maps port 15000 on the host machine to port 8080 on the container.
ports:
- "15000:8080"
# This links the "mongo" service to this one. This allows them to communicate with each other.
links:
- mongo
# This specifies that the "mongo" service must be started before this one.
depends_on:
- mongo
# This sets environment variables for the container.
environment:
- API_ENDPOINT=http://vish.gg:15000 # This MUST be included as a valid URL in the spotify dashboard
- CLIENT_ENDPOINT=http://vish.gg:4000
- SPOTIFY_PUBLIC=d6b3bda999f042099ce79a8b6e9f9e68
- SPOTIFY_SECRET=72c650e7a25f441baa245b963003a672
- CORS=http://vish.gg:4000,http://vish.gg:4001 # all if you want to allow every origin
# This defines a service named "mongo".
mongo:
# This sets the container name for this service.
container_name: mongo
# This specifies the Docker image to use for this service.
image: mongo:4.4.8
# This mounts a volume from the host machine into the container. In this case, it mounts "./your_spotify_db" on the host machine to "/data/db" in the container.
volumes:
- ./your_spotify_db:/data/db
# This defines a service named "web".
web:
# This specifies the Docker image to use for this service.
image: yooooomi/your_spotify_client
# This sets the restart policy for this service. In this case, it will always restart if it stops.
restart: unless-stopped
# This maps port 4000 on the host machine to port 3000 on the container.
ports:
- "4000:3000"
# This sets environment variables for the container.
environment:
- API_ENDPOINT=http://vish.gg:15000

View File

View File

@@ -0,0 +1,11 @@
# Factorio - Game server
# Port: 34197/udp
# Factorio dedicated game server
sudo docker run -d \
-p 34197:34197/udp \
-p 27015:27015/tcp \
-v /root/factorio:/factorio \
--name factorio \
--restart=always \
factoriotools/factorio

View File

@@ -0,0 +1,22 @@
# GitLab - Git repository
# Port: 8929
# Self-hosted Git and CI/CD platform
version: '3.6'
services:
web:
image: 'gitlab/gitlab-ce:latest'
restart: unless-stopped
hostname: 'gl.thevish.io'
environment:
GITLAB_OMNIBUS_CONFIG: |
external_url 'http://glssh.thevish.io:8929'
gitlab_rails['gitlab_shell_ssh_port'] = 2224
ports:
- '8929:8929'
- '2224:22'
volumes:
- '$GITLAB_HOME/config:/etc/gitlab'
- '$GITLAB_HOME/logs:/var/log/gitlab'
- '$GITLAB_HOME/data:/var/opt/gitlab'
shm_size: '256m'

View File

@@ -0,0 +1,19 @@
# JDownloader2 - Download manager
# Port: 5800
# Multi-host download manager
version: '3.9'
services:
jdownloader-2:
image: jlesage/jdownloader-2
restart: unless-stopped
volumes:
- /root/docker/j2/output:/output
- /root/docker/j2/config:/config
environment:
- TZ=America/Los_Angeles
ports:
- 13016:5900
- 53578:5800
- 20123:3129
container_name: jdownloader2

View File

@@ -0,0 +1,27 @@
# Jellyfin - Media server
# Port: 8096
# Free media streaming server
version: '3.5'
services:
jellyfin:
image: jellyfin/jellyfin
container_name: jellyfin
user: 0:0
volumes:
- /root/jellyfin/config:/config
- /root/jellyfin/cache:/cache
- /root/jellyfin/media:/media
- /root/jellyfin/media2:/media2:ro
restart: 'unless-stopped'
# Optional - alternative address used for autodiscovery
environment:
- JELLYFIN_PublishedServerUrl=http://stuff.thevish.io
# Optional - may be necessary for docker healthcheck to pass if running in host network mode
ports:
- 8096:8096
- 8920:8920 #optional
- 7359:7359/udp #optional
- 1900:1900/udp #optional
extra_hosts:
- "host.docker.internal:host-gateway"

View File

@@ -0,0 +1,44 @@
# Matrix Synapse - Chat server
# Port: 8008
# Federated Matrix homeserver
version: "3.9"
services:
synapse-db:
image: postgres
container_name: Synapse-DB
hostname: synapse-db
security_opt:
- no-new-privileges:true
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "synapsedb", "-U", "synapseuser"]
timeout: 45s
interval: 10s
retries: 10
volumes:
- /root/docker/db//var/lib/postgresql/data
environment:
- POSTGRES_DB=synapsedb
- POSTGRES_USER=synapseuser
- POSTGRES_PASSWORD="REDACTED_PASSWORD"
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
restart: unless-stopped
synapse:
image: matrixdotorg/synapse:latest
container_name: Synapse
hostname: synapse
security_opt:
- no-new-privileges:true
environment:
- TZ=America/Los_Angeles
- SYNAPSE_CONFIG_PATH=/data/homeserver.yaml
volumes:
- /root/docker/data:/data
ports:
- 8500:8008/tcp
restart: unless-stopped
depends_on:
synapse-db:
condition: service_started

View File

@@ -0,0 +1,32 @@
# n.eko - Virtual browser
# Port: 8080
# Virtual browser in Docker for screen sharing
version: "3.5"
networks:
default:
attachable: true
name: "neko-rooms-net"
services:
neko-rooms:
image: "m1k1o/neko-rooms:latest"
restart: "unless-stopped"
environment:
- "TZ=America/Los_Angeles"
- "NEKO_ROOMS_MUX=true"
- "NEKO_ROOMS_EPR=59000-59049"
- "NEKO_ROOMS_NAT1TO1=74.91.118.242" # IP address of your server that is reachable from client
- "NEKO_ROOMS_INSTANCE_URL=https://showtime.vish.gg/" # external URL
- "NEKO_ROOMS_STORAGE_ENABLED=true"
- "NEKO_ROOMS_STORAGE_INTERNAL=/data"
- "NEKO_ROOMS_STORAGE_EXTERNAL=/opt/neko-rooms/data"
- "NEKO_ROOMS_INSTANCE_NETWORK=neko-rooms-net"
- "NEKO_ROOMS_TRAEFIK_ENABLED=false"
- "NEKO_ROOMS_PATH_PREFIX=/room/"
ports:
- "8080:8080"
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
- "/opt/neko-rooms/data:/data"

View File

@@ -0,0 +1,69 @@
# ProxiTok - TikTok frontend
# Port: 8080
# Privacy-respecting TikTok viewer
version: '3'
services:
web:
container_name: proxitok-web
image: ghcr.io/pablouser1/proxitok:master
ports:
- 9770:8080
environment:
- LATTE_CACHE=/cache
- API_CACHE=redis
- REDIS_HOST=proxitok-redis
- REDIS_PORT=6379
- API_SIGNER=remote
- API_SIGNER_URL=http://proxitok-signer:8080/signature
volumes:
- proxitok-cache:/cache
depends_on:
- redis
- signer
networks:
- proxitok
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
redis:
container_name: proxitok-redis
image: redis:7-alpine
command: redis-server --save 60 1 --loglevel warning
restart: unless-stopped
networks:
- proxitok
user: nobody
read_only: true
security_opt:
- no-new-privileges:true
tmpfs:
- /data:size=10M,mode=0770,uid=65534,gid=65534,noexec,nosuid,nodev
cap_drop:
- ALL
signer:
container_name: proxitok-signer
image: ghcr.io/pablouser1/signtok:master
init: true
networks:
- proxitok
user: nobody
read_only: true
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
volumes:
proxitok-cache:
networks:
proxitok:

View File

@@ -0,0 +1,19 @@
# Watchtower - Container update notifier for Chicago VM (schedule disabled - GitOps managed)
# Auto-update schedule removed; image updates are handled via Renovate PRs.
# Manual update trigger: POST http://localhost:8080/v1/update
# Header: Authorization: Bearer watchtower-metrics-token
version: "3"
services:
watchtower:
image: containrrr/watchtower:latest
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_HTTP_API_UPDATE=true
- WATCHTOWER_HTTP_API_METRICS=true
- WATCHTOWER_HTTP_API_TOKEN="REDACTED_HTTP_TOKEN"
- TZ=America/Los_Angeles
restart: unless-stopped
labels:
- "com.centurylinklabs.watchtower.enable=false"

View File

@@ -0,0 +1,45 @@
# Ollama - Local LLM inference
# URL: https://ollama.vishconcord.synology.me
# Port: 11434
# Run large language models locally
services:
webui:
container_name: OLLAMA-WEBUI
image: ghcr.io/open-webui/open-webui:0.6
volumes:
- /root/docker/ollama/webui:/app/backend/data:rw
environment:
OLLAMA_BASE_URL: http://ollama:11434
WEBUI_SECRET_KEY: "REDACTED_SECRET_KEY"
healthcheck:
test: timeout 10s bash -c ':> /dev/tcp/127.0.0.1/8080' || exit 1
interval: 10s
timeout: 5s
retries: 3
start_period: 90s
ports:
- 8271:8080
restart: on-failure
depends_on:
ollama:
condition: service_healthy
ollama:
container_name: OLLAMA
image: ollama/ollama:latest
entrypoint: ["/usr/bin/bash", "/entrypoint.sh"]
volumes:
- /root/docker/ollama/data:/root/.ollama:rw
- /root/docker/ollama/entrypoint/entrypoint.sh:/entrypoint.sh
environment:
MODELS: codegemma:2b,codellama:7b,mistral:7b,llama3.2:3b
OLLAMA_INSTALL_MODELS: codegemma:2b,codellama:7b,mistral:7b,llama3.2:3b
ports:
- 11434:11434
healthcheck:
test: ["CMD", "ollama", "--version"]
interval: 10s
timeout: 5s
retries: 3
start_period: 30s
restart: on-failure:5

View File

@@ -0,0 +1,24 @@
#!/bin/bash
set -euo pipefail
# Start Ollama server.
/bin/ollama serve &
pid=$!
# Wait for Ollama to be ready using Bash's built-in networking capabilities.
while ! timeout 1 bash -c "echo > /dev/tcp/localhost/11434" 2>/dev/null; do
echo "Waiting for Ollama to start..."
sleep 1
done
echo "Ollama started."
# Retrieve and install/update models from the MODELS that you have in your Docker Compose stack environment variables.
IFS=',' read -ra model_array <<< "$MODELS"
for model in "${model_array[@]}"; do
echo "Installing/Updating model $model..."
ollama pull $model # This command fetches the latest version of the llama model
done
echo "All models installed/updated."
# Continue to main process.
wait $pid

View File

View File

@@ -0,0 +1,284 @@
# Alerting Stack - Alertmanager + Notification Bridges
# =============================================================================
# Dual-channel alerting: ntfy (mobile push) + Signal (encrypted messaging)
# =============================================================================
# Deployed via: Portainer GitOps
# Ports: 9093 (Alertmanager), 5000 (signal-bridge), 5001 (ntfy-bridge)
#
# Alert Routing:
# - Warning alerts → ntfy only
# - Critical alerts → ntfy + Signal
# - Resolved alerts → Both channels (for critical)
#
# Uses docker configs to embed Python bridge apps since Portainer GitOps
# doesn't support docker build
configs:
# Alertmanager Configuration
alertmanager_config:
content: |
global:
resolve_timeout: 5m
route:
group_by: ['alertname', 'severity', 'instance']
group_wait: 30s
group_interval: 5m
repeat_interval: 4h
receiver: 'ntfy-all'
routes:
- match:
severity: critical
receiver: 'critical-alerts'
continue: false
- match:
severity: warning
receiver: 'ntfy-all'
receivers:
- name: 'ntfy-all'
webhook_configs:
- url: 'http://ntfy-bridge:5001/alert'
send_resolved: true
- name: 'critical-alerts'
webhook_configs:
- url: 'http://ntfy-bridge:5001/alert'
send_resolved: true
- url: 'http://signal-bridge:5000/alert'
send_resolved: true
inhibit_rules:
- source_match:
severity: 'critical'
target_match:
severity: 'warning'
equal: ['alertname', 'instance']
# ntfy-bridge Python App
ntfy_bridge_app:
content: |
from flask import Flask, request, jsonify
import requests
import os
app = Flask(__name__)
NTFY_URL = os.environ.get('NTFY_URL', 'http://NTFY:80')
NTFY_TOPIC = os.environ.get('NTFY_TOPIC', 'homelab-alerts')
def get_priority(severity, status):
if status == 'resolved':
return '3'
if severity == 'critical':
return '5'
return '4'
def get_tag(severity, status):
if status == 'resolved':
return 'white_check_mark'
if severity == 'critical':
return 'rotating_light'
return 'warning'
def format_alert(alert):
status = alert.get('status', 'firing')
labels = alert.get('labels', {})
annotations = alert.get('annotations', {})
alertname = labels.get('alertname', 'Unknown')
severity = labels.get('severity', 'warning')
instance = labels.get('instance', 'unknown')
status_text = 'RESOLVED' if status == 'resolved' else 'FIRING'
title = f"{alertname} [{status_text}]"
summary = annotations.get('summary', '')
description = annotations.get('description', '')
body_parts = []
if summary:
body_parts.append(summary)
if description and description != summary:
body_parts.append(description)
if instance != 'unknown':
body_parts.append(f"Host: {instance}")
body = '\n'.join(body_parts) if body_parts else f"Alert {status_text.lower()}"
return title, body, severity, status
@app.route('/alert', methods=['POST'])
def handle_alert():
try:
data = request.json
for alert in data.get('alerts', []):
title, body, severity, status = format_alert(alert)
requests.post(f"{NTFY_URL}/{NTFY_TOPIC}", data=body,
headers={'Title': title, 'Priority': get_priority(severity, status), 'Tags': get_tag(severity, status)})
return jsonify({'status': 'sent', 'count': len(data.get('alerts', []))})
except Exception as e:
return jsonify({'status': 'error', 'message': str(e)}), 500
@app.route('/health', methods=['GET'])
def health():
return jsonify({'status': 'healthy'})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5001)
# signal-bridge Python App
signal_bridge_app:
content: |
import os
import requests
from flask import Flask, request, jsonify
app = Flask(__name__)
SIGNAL_API_URL = os.environ.get('SIGNAL_API_URL', 'http://signal-api:8080')
SIGNAL_SENDER = os.environ.get('SIGNAL_SENDER', '')
SIGNAL_RECIPIENTS = os.environ.get('SIGNAL_RECIPIENTS', '').split(',')
def format_alert_message(alert_data):
messages = []
for alert in alert_data.get('alerts', []):
status = alert.get('status', 'firing')
labels = alert.get('labels', {})
annotations = alert.get('annotations', {})
severity = labels.get('severity', 'warning')
summary = annotations.get('summary', labels.get('alertname', 'Alert'))
description = annotations.get('description', '')
if status == 'resolved':
emoji, text = '✅', 'RESOLVED'
elif severity == 'critical':
emoji, text = '🚨', 'CRITICAL'
else:
emoji, text = '⚠️', 'WARNING'
msg = f"{emoji} [{text}] {summary}"
if description:
msg += f"\n{description}"
messages.append(msg)
return "\n\n".join(messages)
def send_signal_message(message):
if not SIGNAL_SENDER or not SIGNAL_RECIPIENTS:
return False
success = True
for recipient in SIGNAL_RECIPIENTS:
recipient = recipient.strip()
if not recipient:
continue
try:
response = requests.post(f"{SIGNAL_API_URL}/v2/send", json={
"message": message, "number": SIGNAL_SENDER, "recipients": [recipient]
}, timeout=30)
if response.status_code not in [200, 201]:
success = False
except Exception:
success = False
return success
@app.route('/health', methods=['GET'])
def health():
return jsonify({"status": "healthy"})
@app.route('/alert', methods=['POST'])
def receive_alert():
try:
alert_data = request.get_json()
if not alert_data:
return jsonify({"error": "No data"}), 400
message = format_alert_message(alert_data)
if send_signal_message(message):
return jsonify({"status": "sent"})
return jsonify({"status": "partial_failure"}), 207
except Exception as e:
return jsonify({"error": str(e)}), 500
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
services:
alertmanager:
image: prom/alertmanager:latest
container_name: alertmanager
restart: unless-stopped
ports:
- "9093:9093"
configs:
- source: alertmanager_config
target: /etc/alertmanager/alertmanager.yml
volumes:
- alertmanager-data:/alertmanager
command:
- '--config.file=/etc/alertmanager/alertmanager.yml'
- '--storage.path=/alertmanager'
- '--web.external-url=http://localhost:9093'
networks:
- alerting
- monitoring-stack_monitoring
ntfy-bridge:
image: python:3.11-slim
container_name: ntfy-bridge
restart: unless-stopped
ports:
- "5001:5001"
environment:
- NTFY_URL=http://NTFY:80
- NTFY_TOPIC="REDACTED_NTFY_TOPIC"
configs:
- source: ntfy_bridge_app
target: /app/app.py
command: >
sh -c "pip install --quiet flask requests gunicorn &&
cd /app && gunicorn --bind 0.0.0.0:5001 --workers 2 app:app"
networks:
- alerting
- ntfy-stack_default
healthcheck:
test: ["CMD", "python3", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:5001/health')"]
interval: 30s
timeout: 10s
retries: 3
signal-bridge:
image: python:3.11-slim
container_name: signal-bridge
restart: unless-stopped
ports:
- "5000:5000"
environment:
- SIGNAL_API_URL=http://signal-api:8080
- SIGNAL_SENDER=REDACTED_PHONE_NUMBER
- SIGNAL_RECIPIENTS=REDACTED_PHONE_NUMBER
configs:
- source: signal_bridge_app
target: /app/app.py
command: >
sh -c "pip install --quiet flask requests gunicorn &&
cd /app && gunicorn --bind 0.0.0.0:5000 --workers 2 app:app"
networks:
- alerting
- signal-api-stack_default
healthcheck:
test: ["CMD", "python3", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:5000/health')"]
interval: 30s
timeout: 10s
retries: 3
volumes:
alertmanager-data:
networks:
alerting:
driver: bridge
monitoring-stack_monitoring:
external: true
ntfy-stack_default:
external: true
signal-api-stack_default:
external: true

View File

@@ -0,0 +1,57 @@
# ArchiveBox - Web archiving
# Port: 8000
# Self-hosted internet archiving solution
version: '3.8'
services:
archivebox:
image: archivebox/archivebox:latest
container_name: archivebox
ports:
- "7254:8000"
volumes:
- ./data:/data
environment:
- PUID=1000
- PGID=1000
- ADMIN_USERNAME=vish
- ADMIN_PASSWORD="REDACTED_PASSWORD"
- ALLOWED_HOSTS=*
- CSRF_TRUSTED_ORIGINS=http://localhost:7254
- PUBLIC_INDEX=True
- PUBLIC_SNAPSHOTS=True
- PUBLIC_ADD_VIEW=False
- SEARCH_BACKEND_ENGINE=sonic
- SEARCH_BACKEND_HOST_NAME=sonic
- SEARCH_BACKEND_PASSWORD="REDACTED_PASSWORD"
restart: unless-stopped
archivebox_scheduler:
image: archivebox/archivebox:latest
container_name: archivebox_scheduler
command: schedule --foreground --update --every=day
volumes:
- ./data:/data
environment:
- PUID=1000
- PGID=1000
- TIMEOUT=120
- SEARCH_BACKEND_ENGINE=sonic
- SEARCH_BACKEND_HOST_NAME=sonic
- SEARCH_BACKEND_PASSWORD="REDACTED_PASSWORD"
restart: unless-stopped
sonic:
image: archivebox/sonic:latest
container_name: archivebox_sonic
expose:
- "1491"
environment:
- SEARCH_BACKEND_PASSWORD="REDACTED_PASSWORD"
volumes:
- ./data/sonic:/var/lib/sonic/store
restart: unless-stopped
networks:
default:
name: archivebox_net

View File

@@ -0,0 +1,23 @@
services:
beeper:
image: ghcr.io/zachatrocity/docker-beeper:latest
container_name: Beeper
healthcheck:
test: ["CMD-SHELL", "nc -z 127.0.0.1 3000 || exit 1"]
interval: 10s
timeout: 5s
retries: 3
start_period: 90s
security_opt:
- seccomp:unconfined
environment:
PUID: 1029
PGID: 100
TZ: America/Los_Angeles
volumes:
- /home/homelab/docker/beeper:/config:rw
ports:
- 3655:3000 # HTTP (redirects to HTTPS — use port 3656)
- 3656:3001 # HTTPS (use this — accept self-signed cert in browser)
shm_size: "2gb"
restart: on-failure:5

View File

@@ -0,0 +1,14 @@
# Binternet - Pinterest frontend
# Port: 8080
# Privacy-respecting Pinterest frontend
services:
binternet:
container_name: binternet
image: ghcr.io/ahwxorg/binternet:latest
cap_drop:
- ALL
security_opt:
- no-new-privileges:true
ports:
- '21544:8080'
restart: unless-stopped

View File

@@ -0,0 +1,30 @@
# Cloudflare Tunnel for Homelab-VM
# Provides secure external access without port forwarding
#
# SETUP INSTRUCTIONS:
# 1. Go to https://one.dash.cloudflare.com/ → Zero Trust → Networks → Tunnels
# 2. Create a new tunnel named "homelab-vm-tunnel"
# 3. Copy the tunnel token (starts with eyJ...)
# 4. Replace TUNNEL_TOKEN_HERE below with your token
# 5. In the tunnel dashboard, add these public hostnames:
#
# | Public Hostname | Service |
# |------------------------|----------------------------|
# | gf.vish.gg | http://localhost:3300 |
# | ntfy.vish.gg | http://localhost:8081 |
# | hoarder.thevish.io | http://localhost:3000 |
# | binterest.thevish.io | http://localhost:21544 |
#
# 6. Deploy this stack
version: '3.8'
services:
cloudflared:
image: cloudflare/cloudflared:latest
container_name: cloudflare-tunnel
restart: unless-stopped
command: tunnel run
environment:
- TUNNEL_TOKEN=${TUNNEL_TOKEN}
network_mode: host # Needed to access localhost services

View File

@@ -0,0 +1,18 @@
# Dashdot - Server dashboard
# Port: 3001
# Modern server dashboard
version: "3.9"
services:
dashdot:
image: mauricenino/dashdot
container_name: dashdot
ports:
- "7512:3001"
volumes:
- "/:/mnt/host:ro"
privileged: true
stdin_open: true # same as -it
tty: true # same as -it
restart: unless-stopped

View File

@@ -0,0 +1,28 @@
# Diun — Docker Image Update Notifier
#
# Watches all running containers on this host and sends ntfy
# notifications when upstream images update their digest.
# Schedule: Mondays 09:00 (weekly cadence).
#
# ntfy topic: https://ntfy.vish.gg/diun
services:
diun:
image: crazymax/diun:latest
container_name: diun
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- diun-data:/data
environment:
LOG_LEVEL: info
DIUN_WATCH_WORKERS: "20"
DIUN_WATCH_SCHEDULE: "0 9 * * 1"
DIUN_WATCH_JITTER: 30s
DIUN_PROVIDERS_DOCKER: "true"
DIUN_PROVIDERS_DOCKER_WATCHBYDEFAULT: "true"
DIUN_NOTIF_NTFY_ENDPOINT: "https://ntfy.vish.gg"
DIUN_NOTIF_NTFY_TOPIC: "diun"
restart: unless-stopped
volumes:
diun-data:

View File

@@ -0,0 +1,15 @@
services:
dozzle-agent:
image: amir20/dozzle:latest
container_name: dozzle-agent
command: agent
volumes:
- /var/run/docker.sock:/var/run/docker.sock
ports:
- "7007:7007"
restart: unless-stopped
healthcheck:
test: ["CMD", "/dozzle", "healthcheck"]
interval: 30s
timeout: 5s
retries: 3

View File

@@ -0,0 +1,17 @@
# Draw.io - Diagramming tool
# Port: 8080
# Self-hosted diagram editor
version: "3.9"
services:
drawio:
container_name: Draw.io
image: jgraph/drawio
healthcheck:
test: curl -f http://localhost:8080/ || exit 1
mem_limit: 4g
cpu_shares: 768
security_opt:
- no-new-privileges:true
restart: on-failure:5
ports:
- 5022:8080

View File

@@ -0,0 +1,12 @@
# Excalidraw — Collaborative whiteboard / diagram tool
# Port: 5080
# URL: http://192.168.0.210:5080
# Virtual whiteboard for sketching diagrams, hand-drawn style
services:
excalidraw:
image: excalidraw/excalidraw:latest
container_name: excalidraw
ports:
- "5080:80"
restart: unless-stopped

View File

@@ -0,0 +1,83 @@
# Fluxer Chat Server Deployment
# Domain: st.vish.gg
# Replaces: Stoat Chat
# Status: ✅ DEPLOYED SUCCESSFULLY & CAPTCHA ISSUE RESOLVED
## Deployment Summary
- **Date**: 2026-02-15
- **Domain**: st.vish.gg (Cloudflare DNS grey cloud)
- **Location**: /root/fluxer
- **Replaced**: Stoat Chat (services stopped and removed)
- **Status**: Fully operational with user registration working
## Architecture
Fluxer uses a multi-container architecture with the following services:
- **caddy**: Frontend web server serving the React app (port 8088)
- **gateway**: WebSocket gateway for real-time communication
- **api**: REST API backend (internal port 8080)
- **postgres**: Primary database
- **redis**: Caching and session storage
- **cassandra**: Message storage
- **minio**: File storage (S3-compatible)
- **meilisearch**: Search engine
- **livekit**: Voice/video calling (not configured)
- **worker**: Background job processing
- **media**: Media processing service
- **clamav**: Antivirus scanning
- **metrics**: Monitoring and metrics
## Network Configuration
- **External Access**: nginx reverse proxy → Caddy (port 8088) → API (port 8080)
- **Nginx Config**: /etc/nginx/sites-available/fluxer
- **SSL**: Handled by nginx with existing certificates
## Issues Resolved
### 1. Asset Loading (Fixed)
- **Problem**: Frontend was trying to load assets from external CDN
- **Solution**: Modified build configuration to use local assets
### 2. Captcha Verification (Fixed)
- **Problem**: "verify human" captcha not loading, preventing account creation
- **Root Cause**: Using test Turnstile keys causing 400 errors on registration
- **Solution**: Disabled captcha by setting `CAPTCHA_ENABLED=false` in `/root/fluxer/dev/.env`
- **Result**: User registration now works without captcha requirement
## Configuration Files
- **Main Config**: /root/fluxer/dev/compose.yaml
- **Environment**: /root/fluxer/dev/.env
- **Nginx Config**: /etc/nginx/sites-available/fluxer
## Key Environment Variables
```
CAPTCHA_ENABLED=false
CAPTCHA_PRIMARY_PROVIDER=turnstile
TURNSTILE_SITE_KEY=1x00000000000000000000AA (test key)
TURNSTILE_SECRET_KEY=1x0000000000000000000000000000000AA (test key)
```
## Verification
- **API Health**: https://st.vish.gg/api/instance ✅
- **Frontend**: https://st.vish.gg/ ✅
- **Registration**: Working without captcha ✅
- **Test User Created**: ID 1472533637105737729 ✅
## Management Commands
```bash
# Start services
cd /root/fluxer && docker compose -f dev/compose.yaml up -d
# Stop services
cd /root/fluxer && docker compose -f dev/compose.yaml down
# View logs
cd /root/fluxer && docker compose -f dev/compose.yaml logs [service_name]
# Restart API only
cd /root/fluxer && docker compose -f dev/compose.yaml restart api
```
## Notes
- Captcha can be re-enabled later by setting `CAPTCHA_ENABLED=true` and configuring proper Turnstile keys
- Voice/video calling requires LiveKit configuration (currently disabled)
- All data is persisted in Docker volumes
- Service runs in development mode for easier debugging

View File

@@ -0,0 +1,46 @@
# fstab remote mounts for homelab-vm (192.168.0.210)
# Credentials files (chmod 600, owner root):
# /etc/samba/.atlantis_credentials — vish @ Atlantis + Setillo
# /etc/samba/.calypso_credentials — Vish @ Calypso
# /etc/samba/.setillo_credentials — vish @ Setillo
# /etc/samba/.pi5_credentials — vish @ pi-5
# /etc/samba/.guava_credentials — vish @ Guava (TrueNAS; password has literal \! — not !)
# ── Atlantis (192.168.0.200) - Synology 1823xs+ ──────────────────────────────
# NFS (archive only — only share DSM exports to this host via NFS)
192.168.0.200:/volume1/archive /mnt/repo_atlantis nfs vers=3,_netdev,nofail 0 0
# CIFS
//192.168.0.200/data /mnt/atlantis_data cifs credentials=/etc/samba/.atlantis_credentials,vers=3.0,_netdev,nofail 0 0
//192.168.0.200/docker /mnt/atlantis_docker cifs credentials=/etc/samba/.atlantis_credentials,vers=3.0,_netdev,nofail 0 0
//192.168.0.200/downloads /mnt/atlantis_downloads cifs credentials=/etc/samba/.atlantis_credentials,vers=3.0,_netdev,nofail 0 0
//192.168.0.200/games /mnt/atlantis_games cifs credentials=/etc/samba/.atlantis_credentials,vers=3.0,_netdev,nofail 0 0
//192.168.0.200/torrents /mnt/atlantis_torrents cifs credentials=/etc/samba/.atlantis_credentials,vers=3.0,_netdev,nofail 0 0
//192.168.0.200/usenet /mnt/atlantis_usenet cifs credentials=/etc/samba/.atlantis_credentials,vers=3.0,_netdev,nofail 0 0
//192.168.0.200/website /mnt/atlantis_website cifs credentials=/etc/samba/.atlantis_credentials,vers=3.0,_netdev,nofail 0 0
//192.168.0.200/documents /mnt/atlantis_documents cifs credentials=/etc/samba/.atlantis_credentials,vers=3.0,_netdev,nofail 0 0
# ── Calypso (100.103.48.78) - Synology DS723+ via Tailscale ──────────────────
//100.103.48.78/data /mnt/calypso_data cifs credentials=/etc/samba/.calypso_credentials,vers=3.0,_netdev,nofail 0 0
//100.103.48.78/docker /mnt/calypso_docker cifs credentials=/etc/samba/.calypso_credentials,vers=3.0,_netdev,nofail 0 0
//100.103.48.78/docker2 /mnt/calypso_docker2 cifs credentials=/etc/samba/.calypso_credentials,vers=3.0,_netdev,nofail 0 0
//100.103.48.78/dropboxsync /mnt/calypso_dropboxsync cifs credentials=/etc/samba/.calypso_credentials,vers=3.0,_netdev,nofail 0 0
//100.103.48.78/Files /mnt/calypso_files cifs credentials=/etc/samba/.calypso_credentials,vers=3.0,_netdev,nofail 0 0
//100.103.48.78/netshare /mnt/calypso_netshare cifs credentials=/etc/samba/.calypso_credentials,vers=3.0,_netdev,nofail 0 0
# ── Setillo (100.125.0.20) - Synology DS223j via Tailscale ───────────────────
//100.125.0.20/backups /mnt/setillo_backups cifs credentials=/etc/samba/.setillo_credentials,vers=3.0,_netdev,nofail 0 0
//100.125.0.20/docker /mnt/setillo_docker cifs credentials=/etc/samba/.setillo_credentials,vers=3.0,_netdev,nofail 0 0
//100.125.0.20/PlexMediaServer /mnt/setillo_plex cifs credentials=/etc/samba/.setillo_credentials,vers=3.0,_netdev,nofail 0 0
//100.125.0.20/syncthing /mnt/setillo_syncthing cifs credentials=/etc/samba/.setillo_credentials,vers=3.0,_netdev,nofail 0 0
# ── pi-5 / rpi5-vish (192.168.0.66) - Raspberry Pi 5 ────────────────────────
//192.168.0.66/storagepool /mnt/pi5_storagepool cifs credentials=/etc/samba/.pi5_credentials,vers=3.0,_netdev,nofail 0 0
# ── Guava (100.75.252.64) - TrueNAS SCALE via Tailscale ──────────────────────
//100.75.252.64/photos /mnt/guava_photos cifs credentials=/etc/samba/.guava_credentials,vers=3.0,_netdev,nofail 0 0
//100.75.252.64/data /mnt/guava_data cifs credentials=/etc/samba/.guava_credentials,vers=3.0,_netdev,nofail 0 0
//100.75.252.64/guava_turquoise /mnt/guava_turquoise cifs credentials=/etc/samba/.guava_credentials,vers=3.0,_netdev,nofail 0 0
//100.75.252.64/website /mnt/guava_website cifs credentials=/etc/samba/.guava_credentials,vers=3.0,_netdev,nofail 0 0
//100.75.252.64/jellyfin /mnt/guava_jellyfin cifs credentials=/etc/samba/.guava_credentials,vers=3.0,_netdev,nofail 0 0
//100.75.252.64/truenas-exporters /mnt/guava_exporters cifs credentials=/etc/samba/.guava_credentials,vers=3.0,_netdev,nofail 0 0
//100.75.252.64/iso /mnt/guava_iso cifs credentials=/etc/samba/.guava_credentials,vers=3.0,_netdev,nofail 0 0

View File

@@ -0,0 +1,20 @@
# Gitea to ntfy Webhook Bridge
# Receives Gitea webhooks and forwards formatted messages to ntfy
# Port: 8095 (internal)
#
# Usage: Add webhook in Gitea pointing to http://192.168.0.210:8095/webhook
# Target ntfy topic: homelab-alerts
services:
gitea-ntfy-bridge:
image: python:3.12-alpine
container_name: gitea-ntfy-bridge
environment:
- NTFY_URL=https://ntfy.vish.gg
- NTFY_TOPIC="REDACTED_NTFY_TOPIC"
ports:
- "8095:8095"
volumes:
- ./gitea-ntfy-bridge:/app:ro
command: ["python", "/app/bridge.py"]
restart: unless-stopped

View File

@@ -0,0 +1,140 @@
#!/usr/bin/env python3
"""Gitea to ntfy Webhook Bridge - Translates Gitea events to ntfy notifications"""
import os
import sys
import json
import urllib.request
from http.server import HTTPServer, BaseHTTPRequestHandler
# Force unbuffered output
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', buffering=1)
sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', buffering=1)
NTFY_URL = os.environ.get("NTFY_URL", "https://ntfy.vish.gg")
NTFY_TOPIC = os.environ.get("NTFY_TOPIC", "homelab-alerts")
class WebhookHandler(BaseHTTPRequestHandler):
def do_GET(self):
"""Health check endpoint"""
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(b"Gitea-ntfy bridge OK\n")
print(f"Health check from {self.client_address[0]}", flush=True)
def do_POST(self):
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length)
try:
data = json.loads(body) if body else {}
event_type = self.headers.get("X-Gitea-Event", "unknown")
print(f"Received {event_type} event from {self.client_address[0]}", flush=True)
title, message, tags, priority = self.format_message(event_type, data)
if title and message:
print(f"Sending notification: {title}", flush=True)
self.send_ntfy(title, message, tags, priority)
self.send_response(200)
else:
print(f"Ignoring event type: {event_type}", flush=True)
self.send_response(204) # No content to send
except Exception as e:
print(f"Error processing webhook: {e}", flush=True)
self.send_response(500)
self.end_headers()
def format_message(self, event_type, data):
"""Format Gitea event into ntfy message"""
repo = data.get("repository", {}).get("full_name", "unknown")
sender = data.get("sender", {}).get("login", "unknown")
title = None
message = None
tags = "git"
priority = "default"
if event_type == "push":
commits = data.get("commits", [])
branch = data.get("ref", "").replace("refs/heads/", "")
count = len(commits)
title = f"Push to {repo}"
message = f"{sender} pushed {count} commit(s) to {branch}"
if commits:
message += f"\n\n* {commits[0].get('message', '').split(chr(10))[0]}"
if count > 1:
message += f"\n* ... and {count - 1} more"
tags = "package"
elif event_type == "pull_request":
action = data.get("action", "")
pr = data.get("pull_request", {})
pr_title = pr.get("title", "")
pr_num = pr.get("number", "")
title = f"PR #{pr_num} {action}"
message = f"{repo}: {pr_title}\nBy: {sender}"
tags = "twisted_rightwards_arrows"
if action == "opened":
priority = "high"
elif event_type == "issues":
action = data.get("action", "")
issue = data.get("issue", {})
issue_title = issue.get("title", "")
issue_num = issue.get("number", "")
title = f"Issue #{issue_num} {action}"
message = f"{repo}: {issue_title}\nBy: {sender}"
tags = "clipboard"
elif event_type == "release":
action = data.get("action", "")
release = data.get("release", {})
tag = release.get("tag_name", "")
title = f"Release {tag}"
message = f"{repo}: New release {action}\n{release.get('name', tag)}"
tags = "rocket"
priority = "high"
elif event_type == "create":
ref_type = data.get("ref_type", "")
ref = data.get("ref", "")
title = f"New {ref_type}: {ref}"
message = f"{repo}\nCreated by: {sender}"
tags = "sparkles"
elif event_type == "delete":
ref_type = data.get("ref_type", "")
ref = data.get("ref", "")
title = f"Deleted {ref_type}: {ref}"
message = f"{repo}\nDeleted by: {sender}"
tags = "wastebasket"
return title, message, tags, priority
def send_ntfy(self, title, message, tags="git", priority="default"):
"""Send notification to ntfy"""
url = f"{NTFY_URL}/{NTFY_TOPIC}"
headers = {
"Title": title,
"Tags": tags,
"Priority": priority,
}
req = urllib.request.Request(url, data=message.encode('utf-8'), headers=headers, method="POST")
try:
with urllib.request.urlopen(req, timeout=10) as resp:
print(f"Sent: {title} -> {resp.status}")
except Exception as e:
print(f"Failed to send ntfy: {e}")
def log_message(self, format, *args):
print(f"[{self.log_date_time_string()}] {format % args}")
if __name__ == "__main__":
server = HTTPServer(("0.0.0.0", 8095), WebhookHandler)
print(f"Gitea-ntfy bridge running on :8095 -> {NTFY_URL}/{NTFY_TOPIC}")
server.serve_forever()

View File

@@ -0,0 +1,18 @@
# Gotify - Push notifications
# Port: 8070
# Self-hosted push notification server
version: '3.9'
services:
gotify:
image: ghcr.io/gotify/server:latest
container_name: Gotify
restart: on-failure:5
ports:
- 8081:80
volumes:
- /home/homelab/docker/gotify:/app/data:rw
environment:
GOTIFY_DEFAULTUSER_NAME: vish
GOTIFY_DEFAULTUSER_PASS: "REDACTED_PASSWORD"
TZ: America/Los_Angeles

View File

@@ -0,0 +1,365 @@
{
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"mappings": [
{
"options": {
"0": {
"color": "red",
"text": "DOWN"
},
"1": {
"color": "green",
"text": "UP"
}
},
"type": "value"
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "red",
"value": null
},
{
"color": "green",
"value": 1
}
]
}
}
},
"gridPos": {
"h": 5,
"w": 24,
"x": 0,
"y": 0
},
"id": 1,
"options": {
"colorMode": "background",
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"lastNotNull"
]
},
"textMode": "value_and_name"
},
"targets": [
{
"expr": "up{job=~\"\"}",
"legendFormat": "{{job}}",
"refId": "A"
}
],
"title": "Device Status",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"max": 100,
"min": 0,
"unit": "percent"
}
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 5
},
"id": 2,
"options": {
"legend": {
"calcs": [
"mean",
"max"
],
"displayMode": "table",
"placement": "right"
}
},
"targets": [
{
"expr": "100 - (avg by(job) (rate(node_cpu_seconds_total{mode=\"idle\", job=~\"\"}[5m])) * 100)",
"legendFormat": "{{job}}",
"refId": "A"
}
],
"title": "CPU Usage",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"max": 100,
"min": 0,
"unit": "percent"
}
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 5
},
"id": 3,
"options": {
"legend": {
"calcs": [
"mean",
"max"
],
"displayMode": "table",
"placement": "right"
}
},
"targets": [
{
"expr": "(1 - (node_memory_MemAvailable_bytes{job=~\"\"} / node_memory_MemTotal_bytes{job=~\"\"})) * 100",
"legendFormat": "{{job}}",
"refId": "A"
}
],
"title": "Memory Usage",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 70
},
{
"color": "red",
"value": 85
}
]
},
"unit": "percent"
}
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 13
},
"id": 4,
"options": {
"displayMode": "gradient",
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"lastNotNull"
]
}
},
"targets": [
{
"expr": "100 - ((node_filesystem_avail_bytes{job=~\"\", mountpoint=\"/\", fstype!=\"rootfs\"} / node_filesystem_size_bytes{job=~\"\", mountpoint=\"/\", fstype!=\"rootfs\"}) * 100)",
"legendFormat": "{{job}}",
"refId": "A"
}
],
"title": "Root Disk Usage",
"type": "bargauge"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "s"
}
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 13
},
"id": 5,
"options": {
"colorMode": "value",
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"lastNotNull"
]
}
},
"targets": [
{
"expr": "node_time_seconds{job=~\"\"} - node_boot_time_seconds{job=~\"\"}",
"legendFormat": "{{job}}",
"refId": "A"
}
],
"title": "Uptime",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"unit": "Bps"
}
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 21
},
"id": 6,
"options": {
"legend": {
"calcs": [
"mean",
"max"
],
"displayMode": "table",
"placement": "right"
}
},
"targets": [
{
"expr": "sum by(job) (rate(node_network_receive_bytes_total{job=~\"\", device!~\"lo|docker.*|br-.*|veth.*\"}[5m]))",
"legendFormat": "{{job}}",
"refId": "A"
}
],
"title": "Network Receive",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"unit": "Bps"
}
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 21
},
"id": 7,
"options": {
"legend": {
"calcs": [
"mean",
"max"
],
"displayMode": "table",
"placement": "right"
}
},
"targets": [
{
"expr": "sum by(job) (rate(node_network_transmit_bytes_total{job=~\"\", device!~\"lo|docker.*|br-.*|veth.*\"}[5m]))",
"legendFormat": "{{job}}",
"refId": "A"
}
],
"title": "Network Transmit",
"type": "timeseries"
}
],
"refresh": "30s",
"schemaVersion": 38,
"tags": [
"infrastructure",
"node-exporter",
"tailscale"
],
"templating": {
"list": [
{
"current": {},
"hide": 0,
"includeAll": false,
"label": "Data Source",
"multi": false,
"name": "datasource",
"options": [],
"query": "prometheus",
"refresh": 1,
"type": "datasource"
},
{
"allValue": ".*",
"current": {},
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"definition": "label_values(node_uname_info, job)",
"hide": 0,
"includeAll": true,
"label": "Host",
"multi": true,
"name": "job",
"query": "label_values(node_uname_info, job)",
"refresh": 1,
"regex": "",
"sort": 1,
"type": "query"
}
]
},
"timezone": "browser",
"title": "Infrastructure Overview - All Devices",
"uid": "infrastructure-overview-v2"
}

View File

@@ -0,0 +1,939 @@
{
"panels": [
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 0
},
"id": 1,
"title": "\ud83d\udcca Quick Stats",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "s"
}
},
"gridPos": {
"h": 4,
"w": 4,
"x": 0,
"y": 1
},
"id": 2,
"options": {
"colorMode": "value",
"graphMode": "none",
"reduceOptions": {
"calcs": [
"lastNotNull"
]
}
},
"targets": [
{
"expr": "node_time_seconds{job=\"$job\",instance=\"$instance\"} - node_boot_time_seconds{job=\"$job\",instance=\"$instance\"}",
"legendFormat": "Uptime",
"refId": "A"
}
],
"title": "Uptime",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "blue",
"value": null
}
]
}
}
},
"gridPos": {
"h": 4,
"w": 3,
"x": 4,
"y": 1
},
"id": 3,
"options": {
"colorMode": "value",
"graphMode": "none",
"reduceOptions": {
"calcs": [
"lastNotNull"
]
}
},
"targets": [
{
"expr": "count(node_cpu_seconds_total{job=\"$job\",instance=\"$instance\",mode=\"idle\"})",
"legendFormat": "Cores",
"refId": "A"
}
],
"title": "CPU Cores",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "purple",
"value": null
}
]
},
"unit": "bytes"
}
},
"gridPos": {
"h": 4,
"w": 3,
"x": 7,
"y": 1
},
"id": 4,
"options": {
"colorMode": "value",
"graphMode": "none",
"reduceOptions": {
"calcs": [
"lastNotNull"
]
}
},
"targets": [
{
"expr": "node_memory_MemTotal_bytes{job=\"$job\",instance=\"$instance\"}",
"legendFormat": "RAM",
"refId": "A"
}
],
"title": "Total RAM",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 60
},
{
"color": "red",
"value": 80
}
]
},
"unit": "percent"
}
},
"gridPos": {
"h": 4,
"w": 3,
"x": 10,
"y": 1
},
"id": 5,
"options": {
"reduceOptions": {
"calcs": [
"lastNotNull"
]
}
},
"targets": [
{
"expr": "100 - (avg(rate(node_cpu_seconds_total{job=\"$job\",instance=\"$instance\",mode=\"idle\"}[5m])) * 100)",
"legendFormat": "CPU",
"refId": "A"
}
],
"title": "CPU",
"type": "gauge"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 70
},
{
"color": "red",
"value": 85
}
]
},
"unit": "percent"
}
},
"gridPos": {
"h": 4,
"w": 3,
"x": 13,
"y": 1
},
"id": 6,
"options": {
"reduceOptions": {
"calcs": [
"lastNotNull"
]
}
},
"targets": [
{
"expr": "(1 - (node_memory_MemAvailable_bytes{job=\"$job\",instance=\"$instance\"} / node_memory_MemTotal_bytes{job=\"$job\",instance=\"$instance\"})) * 100",
"legendFormat": "Memory",
"refId": "A"
}
],
"title": "Memory",
"type": "gauge"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 70
},
{
"color": "red",
"value": 85
}
]
},
"unit": "percent"
}
},
"gridPos": {
"h": 4,
"w": 3,
"x": 16,
"y": 1
},
"id": 7,
"options": {
"reduceOptions": {
"calcs": [
"lastNotNull"
]
}
},
"targets": [
{
"expr": "100 - ((node_filesystem_avail_bytes{job=\"$job\",instance=\"$instance\",mountpoint=\"/\",fstype!=\"rootfs\"} / node_filesystem_size_bytes{job=\"$job\",instance=\"$instance\",mountpoint=\"/\",fstype!=\"rootfs\"}) * 100)",
"legendFormat": "Disk",
"refId": "A"
}
],
"title": "Disk /",
"type": "gauge"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"decimals": 2,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 2
},
{
"color": "red",
"value": 4
}
]
}
}
},
"gridPos": {
"h": 4,
"w": 2,
"x": 19,
"y": 1
},
"id": 8,
"options": {
"colorMode": "value",
"graphMode": "area",
"reduceOptions": {
"calcs": [
"lastNotNull"
]
}
},
"targets": [
{
"expr": "node_load1{job=\"$job\",instance=\"$instance\"}",
"legendFormat": "1m",
"refId": "A"
}
],
"title": "Load 1m",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"decimals": 2,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 2
},
{
"color": "red",
"value": 4
}
]
}
}
},
"gridPos": {
"h": 4,
"w": 2,
"x": 21,
"y": 1
},
"id": 9,
"options": {
"colorMode": "value",
"graphMode": "area",
"reduceOptions": {
"calcs": [
"lastNotNull"
]
}
},
"targets": [
{
"expr": "node_load5{job=\"$job\",instance=\"$instance\"}",
"legendFormat": "5m",
"refId": "A"
}
],
"title": "Load 5m",
"type": "stat"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 5
},
"id": 10,
"title": "\ud83d\udda5\ufe0f CPU Details",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"custom": {
"fillOpacity": 50,
"stacking": {
"group": "A",
"mode": "normal"
}
},
"unit": "percent"
}
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 6
},
"id": 11,
"options": {
"legend": {
"calcs": [
"mean",
"max"
],
"displayMode": "table",
"placement": "right"
}
},
"targets": [
{
"expr": "avg(rate(node_cpu_seconds_total{job=\"$job\",instance=\"$instance\",mode=\"user\"}[5m])) * 100",
"legendFormat": "User",
"refId": "A"
},
{
"expr": "avg(rate(node_cpu_seconds_total{job=\"$job\",instance=\"$instance\",mode=\"system\"}[5m])) * 100",
"legendFormat": "System",
"refId": "B"
},
{
"expr": "avg(rate(node_cpu_seconds_total{job=\"$job\",instance=\"$instance\",mode=\"iowait\"}[5m])) * 100",
"legendFormat": "IOWait",
"refId": "C"
},
{
"expr": "avg(rate(node_cpu_seconds_total{job=\"$job\",instance=\"$instance\",mode=\"steal\"}[5m])) * 100",
"legendFormat": "Steal",
"refId": "D"
}
],
"title": "CPU Usage Breakdown",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"max": 100,
"min": 0,
"unit": "percent"
}
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 6
},
"id": 12,
"options": {
"legend": {
"calcs": [
"mean"
],
"displayMode": "table",
"placement": "right"
}
},
"targets": [
{
"expr": "100 - (rate(node_cpu_seconds_total{job=\"$job\",instance=\"$instance\",mode=\"idle\"}[5m]) * 100)",
"legendFormat": "CPU {{cpu}}",
"refId": "A"
}
],
"title": "CPU Per Core",
"type": "timeseries"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 14
},
"id": 20,
"title": "\ud83e\udde0 Memory Details",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"custom": {
"fillOpacity": 30,
"stacking": {
"group": "A",
"mode": "normal"
}
},
"unit": "bytes"
}
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 15
},
"id": 21,
"options": {
"legend": {
"calcs": [
"mean"
],
"displayMode": "table",
"placement": "right"
}
},
"targets": [
{
"expr": "node_memory_MemTotal_bytes{job=\"$job\",instance=\"$instance\"} - node_memory_MemAvailable_bytes{job=\"$job\",instance=\"$instance\"}",
"legendFormat": "Used",
"refId": "A"
},
{
"expr": "node_memory_Buffers_bytes{job=\"$job\",instance=\"$instance\"}",
"legendFormat": "Buffers",
"refId": "B"
},
{
"expr": "node_memory_Cached_bytes{job=\"$job\",instance=\"$instance\"}",
"legendFormat": "Cached",
"refId": "C"
},
{
"expr": "node_memory_MemFree_bytes{job=\"$job\",instance=\"$instance\"}",
"legendFormat": "Free",
"refId": "D"
}
],
"title": "Memory Usage",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"unit": "bytes"
}
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 15
},
"id": 22,
"targets": [
{
"expr": "node_memory_SwapTotal_bytes{job=\"$job\",instance=\"$instance\"}",
"legendFormat": "Total",
"refId": "A"
},
{
"expr": "node_memory_SwapTotal_bytes{job=\"$job\",instance=\"$instance\"} - node_memory_SwapFree_bytes{job=\"$job\",instance=\"$instance\"}",
"legendFormat": "Used",
"refId": "B"
}
],
"title": "Swap Usage",
"type": "timeseries"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 23
},
"id": 30,
"title": "\ud83d\udcbe Disk Details",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 70
},
{
"color": "red",
"value": 85
}
]
},
"unit": "percent"
}
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 24
},
"id": 31,
"options": {
"displayMode": "gradient",
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"lastNotNull"
]
}
},
"targets": [
{
"expr": "100 - ((node_filesystem_avail_bytes{job=\"$job\",instance=\"$instance\",fstype!~\"tmpfs|overlay|squashfs\"} / node_filesystem_size_bytes{job=\"$job\",instance=\"$instance\",fstype!~\"tmpfs|overlay|squashfs\"}) * 100)",
"legendFormat": "{{mountpoint}}",
"refId": "A"
}
],
"title": "Disk Space Usage",
"type": "bargauge"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"unit": "Bps"
},
"overrides": [
{
"matcher": {
"id": "byRegexp",
"options": ".*Write.*"
},
"properties": [
{
"id": "custom.transform",
"value": "negative-Y"
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 24
},
"id": 32,
"options": {
"legend": {
"calcs": [
"mean",
"max"
],
"displayMode": "table",
"placement": "right"
}
},
"targets": [
{
"expr": "rate(node_disk_read_bytes_total{job=\"$job\",instance=\"$instance\",device!~\"loop.*|dm-.*\"}[5m])",
"legendFormat": "{{device}} Read",
"refId": "A"
},
{
"expr": "rate(node_disk_written_bytes_total{job=\"$job\",instance=\"$instance\",device!~\"loop.*|dm-.*\"}[5m])",
"legendFormat": "{{device}} Write",
"refId": "B"
}
],
"title": "Disk I/O",
"type": "timeseries"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 32
},
"id": 40,
"title": "\ud83c\udf10 Network Details",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"unit": "bps"
},
"overrides": [
{
"matcher": {
"id": "byRegexp",
"options": ".*TX.*"
},
"properties": [
{
"id": "custom.transform",
"value": "negative-Y"
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 33
},
"id": 41,
"options": {
"legend": {
"calcs": [
"mean",
"max"
],
"displayMode": "table",
"placement": "right"
}
},
"targets": [
{
"expr": "rate(node_network_receive_bytes_total{job=\"$job\",instance=\"$instance\",device!~\"lo|docker.*|br-.*|veth.*\"}[5m]) * 8",
"legendFormat": "{{device}} RX",
"refId": "A"
},
{
"expr": "rate(node_network_transmit_bytes_total{job=\"$job\",instance=\"$instance\",device!~\"lo|docker.*|br-.*|veth.*\"}[5m]) * 8",
"legendFormat": "{{device}} TX",
"refId": "B"
}
],
"title": "Network Traffic",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"unit": "pps"
}
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 33
},
"id": 42,
"options": {
"legend": {
"calcs": [
"mean"
],
"displayMode": "table",
"placement": "right"
}
},
"targets": [
{
"expr": "rate(node_network_receive_errs_total{job=\"$job\",instance=\"$instance\",device!~\"lo|docker.*|br-.*|veth.*\"}[5m])",
"legendFormat": "{{device}} RX Errors",
"refId": "A"
},
{
"expr": "rate(node_network_transmit_errs_total{job=\"$job\",instance=\"$instance\",device!~\"lo|docker.*|br-.*|veth.*\"}[5m])",
"legendFormat": "{{device}} TX Errors",
"refId": "B"
}
],
"title": "Network Errors",
"type": "timeseries"
}
],
"refresh": "30s",
"schemaVersion": 38,
"tags": [
"node-exporter",
"detailed",
"infrastructure"
],
"templating": {
"list": [
{
"current": {
"text": "Prometheus",
"value": "cfbskvs8upds0b"
},
"hide": 0,
"includeAll": false,
"label": "Data Source",
"multi": false,
"name": "datasource",
"options": [],
"query": "prometheus",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"type": "datasource"
},
{
"current": {
"text": "node_exporter",
"value": "node_exporter"
},
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"definition": "label_values(node_uname_info, job)",
"hide": 0,
"includeAll": false,
"label": "Host",
"multi": false,
"name": "job",
"options": [],
"query": "label_values(node_uname_info, job)",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"type": "query"
},
{
"current": {
"text": "homelab-vm",
"value": "homelab-vm"
},
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"definition": "label_values(node_uname_info{job=\"$job\"}, instance)",
"hide": 0,
"includeAll": false,
"label": "Instance",
"multi": false,
"name": "instance",
"options": [],
"query": "label_values(node_uname_info{job=\"$job\"}, instance)",
"refresh": 2,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"type": "query"
}
]
},
"time": {
"from": "now-1h",
"to": "now"
},
"timezone": "browser",
"title": "Node Details - Full Metrics",
"uid": "node-details-v2"
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,237 @@
{
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"custom": {
"fillOpacity": 10,
"lineWidth": 2,
"stacking": {
"mode": "none"
}
},
"unit": "Bps"
}
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 0
},
"id": 1,
"targets": [
{
"expr": "rate(node_network_transmit_bytes_total{device=\"tailscale0\"}[5m])",
"legendFormat": "{{instance}}"
}
],
"title": "Tailscale TX Rate by Host",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"custom": {
"fillOpacity": 10,
"lineWidth": 2,
"stacking": {
"mode": "none"
}
},
"unit": "Bps"
}
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 2,
"targets": [
{
"expr": "rate(node_network_receive_bytes_total{device=\"tailscale0\"}[5m])",
"legendFormat": "{{instance}}"
}
],
"title": "Tailscale RX Rate by Host",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"custom": {
"fillOpacity": 20,
"lineWidth": 2,
"stacking": {
"mode": "normal"
}
},
"unit": "Bps"
}
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 8
},
"id": 3,
"targets": [
{
"expr": "sum(rate(node_network_transmit_bytes_total{device=\"tailscale0\"}[5m]))",
"legendFormat": "Total TX"
},
{
"expr": "sum(rate(node_network_receive_bytes_total{device=\"tailscale0\"}[5m]))",
"legendFormat": "Total RX"
}
],
"title": "Total Tailnet Bandwidth",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"custom": {
"fillOpacity": 30,
"lineWidth": 1,
"stacking": {
"mode": "normal"
}
},
"unit": "Bps"
}
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 8
},
"id": 4,
"targets": [
{
"expr": "rate(node_network_transmit_bytes_total{device=\"tailscale0\"}[5m]) + rate(node_network_receive_bytes_total{device=\"tailscale0\"}[5m])",
"legendFormat": "{{instance}}"
}
],
"title": "Tailscale TX+RX Rate (Stacked by Host)",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"thresholds": {
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 10485760
},
{
"color": "red",
"value": 52428800
}
]
},
"unit": "Bps"
}
},
"gridPos": {
"h": 4,
"w": 24,
"x": 0,
"y": 16
},
"id": 5,
"options": {
"graphMode": "area",
"textMode": "auto"
},
"targets": [
{
"expr": "rate(node_network_transmit_bytes_total{device=\"tailscale0\"}[5m])",
"legendFormat": "{{instance}}"
}
],
"title": "Current TX Rate",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "cfbskvs8upds0b"
},
"fieldConfig": {
"defaults": {
"thresholds": {
"steps": [
{
"color": "blue",
"value": null
}
]
},
"unit": "bytes"
}
},
"gridPos": {
"h": 4,
"w": 24,
"x": 0,
"y": 20
},
"id": 6,
"options": {
"graphMode": "none",
"textMode": "auto"
},
"targets": [
{
"expr": "node_network_transmit_bytes_total{device=\"tailscale0\"}",
"legendFormat": "{{instance}} TX"
}
],
"title": "Total Data Transferred (since reset)",
"type": "stat"
}
],
"refresh": "30s",
"schemaVersion": 39,
"tags": [
"tailscale",
"network"
],
"time": {
"from": "now-24h",
"to": "now"
},
"timezone": "browser",
"title": "Tailscale Bandwidth",
"uid": "tailscale-bandwidth"
}

View File

@@ -0,0 +1,574 @@
{
"uid": "truenas-guava",
"title": "TrueNAS (Guava) Monitoring",
"description": "TrueNAS SCALE monitoring for Guava (Ryzen 5 8600G, ZFS storage)",
"editable": true,
"graphTooltip": 1,
"refresh": "30s",
"schemaVersion": 39,
"tags": ["truenas", "guava", "node-exporter"],
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {},
"timezone": "browser",
"panels": [
{
"collapsed": false,
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 },
"id": 1,
"title": "Overview",
"type": "row"
},
{
"datasource": { "type": "prometheus", "uid": "cfbskvs8upds0b" },
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [{ "color": "green", "value": null }]
},
"unit": "s"
}
},
"gridPos": { "h": 4, "w": 8, "x": 0, "y": 1 },
"id": 2,
"options": {
"colorMode": "value",
"graphMode": "none",
"reduceOptions": { "calcs": ["lastNotNull"] }
},
"targets": [
{
"expr": "node_time_seconds{job=\"truenas-node\",instance=\"guava\"} - node_boot_time_seconds{job=\"truenas-node\",instance=\"guava\"}",
"legendFormat": "Uptime",
"refId": "A"
}
],
"title": "Uptime",
"type": "stat"
},
{
"datasource": { "type": "prometheus", "uid": "cfbskvs8upds0b" },
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [{ "color": "blue", "value": null }]
}
}
},
"gridPos": { "h": 4, "w": 8, "x": 8, "y": 1 },
"id": 3,
"options": {
"colorMode": "value",
"graphMode": "none",
"reduceOptions": { "calcs": ["lastNotNull"] }
},
"targets": [
{
"expr": "count(count by (cpu) (node_cpu_seconds_total{job=\"truenas-node\",instance=\"guava\"}))",
"legendFormat": "CPU Cores",
"refId": "A"
}
],
"title": "CPU Cores",
"type": "stat"
},
{
"datasource": { "type": "prometheus", "uid": "cfbskvs8upds0b" },
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [{ "color": "purple", "value": null }]
},
"unit": "bytes"
}
},
"gridPos": { "h": 4, "w": 8, "x": 16, "y": 1 },
"id": 4,
"options": {
"colorMode": "value",
"graphMode": "none",
"reduceOptions": { "calcs": ["lastNotNull"] }
},
"targets": [
{
"expr": "node_memory_MemTotal_bytes{job=\"truenas-node\",instance=\"guava\"}",
"legendFormat": "Total RAM",
"refId": "A"
}
],
"title": "Total RAM",
"type": "stat"
},
{
"collapsed": false,
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 5 },
"id": 5,
"title": "CPU & Load",
"type": "row"
},
{
"datasource": { "type": "prometheus", "uid": "cfbskvs8upds0b" },
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" },
"custom": {
"axisBorderShow": false,
"drawStyle": "line",
"fillOpacity": 20,
"lineWidth": 1,
"pointSize": 5,
"showPoints": "never",
"spanNulls": false,
"stacking": { "mode": "none" }
},
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{ "color": "green", "value": null },
{ "color": "orange", "value": 70 },
{ "color": "red", "value": 90 }
]
},
"unit": "percent"
}
},
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 6 },
"id": 6,
"options": {
"legend": { "displayMode": "list", "placement": "bottom" },
"tooltip": { "mode": "multi", "sort": "desc" }
},
"targets": [
{
"expr": "100 - (avg by (instance) (rate(node_cpu_seconds_total{job=\"truenas-node\",instance=\"guava\",mode=\"idle\"}[$__rate_interval])) * 100)",
"legendFormat": "CPU Usage %",
"refId": "A"
}
],
"title": "CPU Usage %",
"type": "timeseries"
},
{
"datasource": { "type": "prometheus", "uid": "cfbskvs8upds0b" },
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" },
"custom": {
"axisBorderShow": false,
"drawStyle": "line",
"fillOpacity": 10,
"lineWidth": 1,
"pointSize": 5,
"showPoints": "never",
"spanNulls": false,
"stacking": { "mode": "none" }
},
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [{ "color": "green", "value": null }]
}
}
},
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 6 },
"id": 7,
"options": {
"legend": { "displayMode": "list", "placement": "bottom" },
"tooltip": { "mode": "multi", "sort": "desc" }
},
"targets": [
{
"expr": "node_load1{job=\"truenas-node\",instance=\"guava\"}",
"legendFormat": "Load 1m",
"refId": "A"
},
{
"expr": "node_load5{job=\"truenas-node\",instance=\"guava\"}",
"legendFormat": "Load 5m",
"refId": "B"
},
{
"expr": "node_load15{job=\"truenas-node\",instance=\"guava\"}",
"legendFormat": "Load 15m",
"refId": "C"
}
],
"title": "System Load",
"type": "timeseries"
},
{
"collapsed": false,
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 14 },
"id": 8,
"title": "Memory",
"type": "row"
},
{
"datasource": { "type": "prometheus", "uid": "cfbskvs8upds0b" },
"fieldConfig": {
"defaults": {
"color": { "mode": "thresholds" },
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{ "color": "green", "value": null },
{ "color": "orange", "value": 70 },
{ "color": "red", "value": 90 }
]
},
"unit": "percent"
}
},
"gridPos": { "h": 8, "w": 6, "x": 0, "y": 15 },
"id": 9,
"options": {
"minVizHeight": 75,
"minVizWidth": 75,
"orientation": "auto",
"reduceOptions": { "calcs": ["lastNotNull"] },
"showThresholdLabels": false,
"showThresholdMarkers": true
},
"targets": [
{
"expr": "100 * (1 - node_memory_MemAvailable_bytes{job=\"truenas-node\",instance=\"guava\"} / node_memory_MemTotal_bytes{job=\"truenas-node\",instance=\"guava\"})",
"legendFormat": "RAM Used %",
"refId": "A"
}
],
"title": "RAM Usage",
"type": "gauge"
},
{
"datasource": { "type": "prometheus", "uid": "cfbskvs8upds0b" },
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" },
"custom": {
"axisBorderShow": false,
"drawStyle": "line",
"fillOpacity": 20,
"lineWidth": 1,
"pointSize": 5,
"showPoints": "never",
"spanNulls": false,
"stacking": { "mode": "none" }
},
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [{ "color": "green", "value": null }]
},
"unit": "bytes"
}
},
"gridPos": { "h": 8, "w": 18, "x": 6, "y": 15 },
"id": 10,
"options": {
"legend": { "displayMode": "list", "placement": "bottom" },
"tooltip": { "mode": "multi", "sort": "desc" }
},
"targets": [
{
"expr": "node_memory_MemTotal_bytes{job=\"truenas-node\",instance=\"guava\"} - node_memory_MemAvailable_bytes{job=\"truenas-node\",instance=\"guava\"}",
"legendFormat": "Used",
"refId": "A"
},
{
"expr": "node_memory_MemTotal_bytes{job=\"truenas-node\",instance=\"guava\"}",
"legendFormat": "Total",
"refId": "B"
}
],
"title": "RAM Usage Over Time",
"type": "timeseries"
},
{
"collapsed": false,
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 23 },
"id": 11,
"title": "Storage",
"type": "row"
},
{
"datasource": { "type": "prometheus", "uid": "cfbskvs8upds0b" },
"fieldConfig": {
"defaults": {
"color": { "mode": "thresholds" },
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{ "color": "green", "value": null },
{ "color": "yellow", "value": 60 },
{ "color": "orange", "value": 80 },
{ "color": "red", "value": 90 }
]
},
"unit": "percent"
},
"overrides": []
},
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 24 },
"id": 12,
"options": {
"displayMode": "gradient",
"minVizHeight": 16,
"minVizWidth": 0,
"namePlacement": "auto",
"orientation": "horizontal",
"reduceOptions": { "calcs": ["lastNotNull"] },
"showUnfilled": true,
"valueMode": "color"
},
"targets": [
{
"expr": "100 * (1 - node_filesystem_avail_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/\"} / node_filesystem_size_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/\"})",
"legendFormat": "/ (boot pool)",
"refId": "A"
},
{
"expr": "100 * (1 - node_filesystem_avail_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/mnt/data\"} / node_filesystem_size_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/mnt/data\"})",
"legendFormat": "/mnt/data (main pool)",
"refId": "B"
},
{
"expr": "100 * (1 - node_filesystem_avail_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/mnt/data/guava_turquoise\"} / node_filesystem_size_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/mnt/data/guava_turquoise\"})",
"legendFormat": "/mnt/data/guava_turquoise (external)",
"refId": "C"
},
{
"expr": "100 * (1 - node_filesystem_avail_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/mnt/atlantis_media\"} / node_filesystem_size_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/mnt/atlantis_media\"})",
"legendFormat": "/mnt/atlantis_media (NFS)",
"refId": "D"
},
{
"expr": "100 * (1 - node_filesystem_avail_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/mnt/.ix-apps\"} / node_filesystem_size_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/mnt/.ix-apps\"})",
"legendFormat": "/mnt/.ix-apps (apps pool)",
"refId": "E"
}
],
"title": "Storage Usage %",
"type": "bargauge"
},
{
"datasource": { "type": "prometheus", "uid": "cfbskvs8upds0b" },
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" },
"custom": {
"axisBorderShow": false,
"drawStyle": "line",
"fillOpacity": 10,
"lineWidth": 1,
"pointSize": 5,
"showPoints": "never",
"spanNulls": false,
"stacking": { "mode": "none" }
},
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [{ "color": "green", "value": null }]
},
"unit": "bytes"
}
},
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 24 },
"id": 13,
"options": {
"legend": { "displayMode": "list", "placement": "bottom" },
"tooltip": { "mode": "multi", "sort": "desc" }
},
"targets": [
{
"expr": "node_filesystem_size_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/\"} - node_filesystem_avail_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/\"}",
"legendFormat": "/ used",
"refId": "A"
},
{
"expr": "node_filesystem_size_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/mnt/data\"} - node_filesystem_avail_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/mnt/data\"}",
"legendFormat": "/mnt/data used",
"refId": "B"
},
{
"expr": "node_filesystem_size_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/mnt/data/guava_turquoise\"} - node_filesystem_avail_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/mnt/data/guava_turquoise\"}",
"legendFormat": "guava_turquoise used",
"refId": "C"
},
{
"expr": "node_filesystem_size_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/mnt/atlantis_media\"} - node_filesystem_avail_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/mnt/atlantis_media\"}",
"legendFormat": "atlantis_media used",
"refId": "D"
},
{
"expr": "node_filesystem_size_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/mnt/.ix-apps\"} - node_filesystem_avail_bytes{job=\"truenas-node\",instance=\"guava\",mountpoint=\"/mnt/.ix-apps\"}",
"legendFormat": ".ix-apps used",
"refId": "E"
}
],
"title": "Storage Used Over Time",
"type": "timeseries"
},
{
"collapsed": false,
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 32 },
"id": 14,
"title": "Network",
"type": "row"
},
{
"datasource": { "type": "prometheus", "uid": "cfbskvs8upds0b" },
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" },
"custom": {
"axisBorderShow": false,
"drawStyle": "line",
"fillOpacity": 10,
"lineWidth": 1,
"pointSize": 5,
"showPoints": "never",
"spanNulls": false,
"stacking": { "mode": "none" }
},
"thresholds": {
"mode": "absolute",
"steps": [{ "color": "green", "value": null }]
},
"unit": "Bps"
}
},
"gridPos": { "h": 8, "w": 24, "x": 0, "y": 33 },
"id": 15,
"options": {
"legend": { "displayMode": "list", "placement": "bottom" },
"tooltip": { "mode": "multi", "sort": "desc" }
},
"targets": [
{
"expr": "rate(node_network_receive_bytes_total{job=\"truenas-node\",instance=\"guava\",device!~\"lo|veth.*|br-.*|docker.*\"}[$__rate_interval])",
"legendFormat": "{{device}} rx",
"refId": "A"
},
{
"expr": "-rate(node_network_transmit_bytes_total{job=\"truenas-node\",instance=\"guava\",device!~\"lo|veth.*|br-.*|docker.*\"}[$__rate_interval])",
"legendFormat": "{{device}} tx",
"refId": "B"
}
],
"title": "Network Throughput (rx positive / tx negative)",
"type": "timeseries"
},
{
"collapsed": false,
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 41 },
"id": 16,
"title": "Disk I/O",
"type": "row"
},
{
"datasource": { "type": "prometheus", "uid": "cfbskvs8upds0b" },
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" },
"custom": {
"axisBorderShow": false,
"drawStyle": "line",
"fillOpacity": 10,
"lineWidth": 1,
"pointSize": 5,
"showPoints": "never",
"spanNulls": false,
"stacking": { "mode": "none" }
},
"thresholds": {
"mode": "absolute",
"steps": [{ "color": "green", "value": null }]
},
"unit": "Bps"
}
},
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 42 },
"id": 17,
"options": {
"legend": { "displayMode": "list", "placement": "bottom" },
"tooltip": { "mode": "multi", "sort": "desc" }
},
"targets": [
{
"expr": "rate(node_disk_read_bytes_total{job=\"truenas-node\",instance=\"guava\",device!~\"loop.*\"}[$__rate_interval])",
"legendFormat": "{{device}} read",
"refId": "A"
},
{
"expr": "-rate(node_disk_written_bytes_total{job=\"truenas-node\",instance=\"guava\",device!~\"loop.*\"}[$__rate_interval])",
"legendFormat": "{{device}} write",
"refId": "B"
}
],
"title": "Disk Read/Write Rates",
"type": "timeseries"
},
{
"datasource": { "type": "prometheus", "uid": "cfbskvs8upds0b" },
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" },
"custom": {
"axisBorderShow": false,
"drawStyle": "line",
"fillOpacity": 20,
"lineWidth": 1,
"pointSize": 5,
"showPoints": "never",
"spanNulls": false,
"stacking": { "mode": "none" }
},
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{ "color": "green", "value": null },
{ "color": "orange", "value": 70 },
{ "color": "red", "value": 90 }
]
},
"unit": "percent"
}
},
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 42 },
"id": 18,
"options": {
"legend": { "displayMode": "list", "placement": "bottom" },
"tooltip": { "mode": "multi", "sort": "desc" }
},
"targets": [
{
"expr": "rate(node_disk_io_time_seconds_total{job=\"truenas-node\",instance=\"guava\",device!~\"loop.*\"}[$__rate_interval]) * 100",
"legendFormat": "{{device}} IO util %",
"refId": "A"
}
],
"title": "Disk I/O Utilization %",
"type": "timeseries"
}
],
"templating": { "list": [] },
"annotations": { "list": [] },
"links": [],
"fiscalYearStartMonth": 0,
"liveNow": false,
"weekStart": ""
}

View File

@@ -0,0 +1,12 @@
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
updateIntervalSeconds: 30
allowUiUpdates: true
options:
path: /var/lib/grafana/dashboards

View File

@@ -0,0 +1,10 @@
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
access: proxy
url: http://prometheus:9090
uid: cfbskvs8upds0b
isDefault: true
editable: true

View File

@@ -0,0 +1,53 @@
# Hoarder/Karakeep - Bookmark manager
# Port: 3000
# URL: https://hoarder.thevish.io
# AI-powered bookmark and note manager
# SSO: Authentik OIDC (sso.vish.gg/application/o/hoarder/)
services:
web:
image: ghcr.io/hoarder-app/hoarder:${HOARDER_VERSION:-release}
restart: unless-stopped
volumes:
- /home/homelab/docker/hoarder/data:/data
ports:
- 3482:3000
environment:
MEILI_ADDR: http://meilisearch:7700
BROWSER_WEB_URL: http://chrome:9222
OLLAMA_BASE_URL: http://192.168.0.145:31434
OLLAMA_KEEP_ALIVE: -1m
INFERENCE_TEXT_MODEL: qwen3-coder:latest
DATA_DIR: /data
NEXTAUTH_SECRET: "REDACTED_NEXTAUTH_SECRET"
NEXTAUTH_URL: https://hoarder.thevish.io
MEILI_MASTER_KEY: ${MEILI_MASTER_KEY}
# Authentik OIDC SSO
OAUTH_WELLKNOWN_URL: https://sso.vish.gg/application/o/hoarder/.well-known/openid-configuration
OAUTH_CLIENT_ID: hoarder
OAUTH_CLIENT_SECRET: "REDACTED_CLIENT_SECRET" # pragma: allowlist secret
OAUTH_PROVIDER_NAME: Authentik
OAUTH_ALLOW_DANGEROUS_EMAIL_ACCOUNT_LINKING: "true"
chrome:
image: gcr.io/zenika-hub/alpine-chrome:123
restart: unless-stopped
command:
- chromium-browser
- --no-sandbox
- --disable-gpu
- --disable-dev-shm-usage
- --remote-debugging-address=0.0.0.0
- --remote-debugging-port=9222
- --hide-scrollbars
ports:
- 9222:9222 # optional, for debugging
meilisearch:
image: getmeili/meilisearch:v1.6
restart: unless-stopped
environment:
MEILI_NO_ANALYTICS: "true"
volumes:
- /root/docker/hoarder/meilisearch:/meili_data
volumes:
meilisearch:
data:

View File

@@ -0,0 +1,18 @@
# Left 4 Dead 2 - Game server
# Port: 27015
# L4D2 dedicated game server
version: '3.4'
services:
linuxgsm-l4d2:
image: gameservermanagers/gameserver:l4d2
# image: ghcr.io/gameservermanagers/gameserver:csgo
container_name: l4d2server
volumes:
- /home/homelab/docker/l4d2:/data
ports:
- "27015:27015/tcp"
- "27015:27015/udp"
- "27020:27020/udp"
- "27005:27005/udp"
restart: unless-stopped

View File

@@ -0,0 +1,23 @@
# Redlib - Reddit frontend (maintained fork of Libreddit)
# Port: 9000
# Privacy-respecting Reddit frontend
# NOTE: Reddit actively blocks these frontends. May return 403 errors.
# See: https://github.com/redlib-org/redlib/issues
services:
redlib:
image: quay.io/redlib/redlib:latest
container_name: Redlib
hostname: redlib
mem_limit: 2g
cpu_shares: 768
security_opt:
- no-new-privileges:true
read_only: true
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "--tries=1", "http://localhost:8080/settings"]
interval: 30s
timeout: 5s
ports:
- 9000:8080
restart: on-failure:5

View File

@@ -0,0 +1,61 @@
# Mattermost - Team collaboration
# Port: 8065
# Self-hosted Slack alternative
# DB: host postgres (172.17.0.1:5432) — not containerized
# Compose file lives on host at: /opt/mattermost/docker-compose.yml
services:
mattermost:
image: mattermost/mattermost-team-edition:11.4
container_name: mattermost
restart: unless-stopped
security_opt:
- no-new-privileges:true
pids_limit: 200
read_only: false
tmpfs:
- /tmp
ports:
- "8065:8065"
environment:
TZ: UTC
MM_SQLSETTINGS_DRIVERNAME: postgres
MM_SQLSETTINGS_DATASOURCE: "postgres://mmuser:${MM_DB_PASSWORD}@172.17.0.1:5432/mattermost?sslmode=disable&connect_timeout=10" # pragma: allowlist secret
MM_SERVICESETTINGS_SITEURL: https://mm.crista.love
MM_SERVICESETTINGS_LISTENADDRESS: ":8065"
MM_FILESETTINGS_DRIVERNAME: local
MM_FILESETTINGS_DIRECTORY: /mattermost/data
MM_LOGSETTINGS_CONSOLELEVEL: INFO
MM_LOGSETTINGS_FILELEVEL: INFO
MM_EMAILSETTINGS_ENABLESMTPAUTH: "true"
MM_EMAILSETTINGS_SMTPSERVER: smtp.gmail.com
MM_EMAILSETTINGS_SMTPPORT: "587"
MM_EMAILSETTINGS_CONNECTIONSECURITY: STARTTLS
MM_EMAILSETTINGS_SMTPUSERNAME: ${MM_SMTP_USERNAME} # set in .env
MM_EMAILSETTINGS_FEEDBACKEMAIL: ${MM_FEEDBACK_EMAIL} # set in .env
MM_EMAILSETTINGS_FEEDBACKNAME: Mattermost
MM_EMAILSETTINGS_SENDEMAILNOTIFICATIONS: "true"
MM_TEAMSETTINGS_ENABLEOPENSERVER: "true"
MM_TEAMSETTINGS_MAXUSERSPERTEAM: "50"
# Authentik OAuth2 via GitLab-compatible provider (works with Team Edition)
MM_GITLABSETTINGS_ENABLE: "true"
MM_GITLABSETTINGS_ID: ${MM_OAUTH_CLIENT_ID} # set in .env
MM_GITLABSETTINGS_SECRET: ${MM_OAUTH_CLIENT_SECRET} # set in .env # pragma: allowlist secret
MM_GITLABSETTINGS_SCOPE: "openid profile email"
MM_GITLABSETTINGS_AUTHENDPOINT: "https://sso.vish.gg/application/o/authorize/"
MM_GITLABSETTINGS_TOKENENDPOINT: "https://sso.vish.gg/application/o/token/"
MM_GITLABSETTINGS_USERAPIENDPOINT: "https://sso.vish.gg/application/o/userinfo/"
MM_GITLABSETTINGS_BUTTONTEXTCOLOR: "#FFFFFF"
MM_GITLABSETTINGS_BUTTONCOLOR: "#fd4b2d"
env_file:
- .env
volumes:
- /opt/mattermost/config:/mattermost/config:rw
- /opt/mattermost/data:/mattermost/data:rw
- /opt/mattermost/logs:/mattermost/logs:rw
- /opt/mattermost/plugins:/mattermost/plugins:rw
- /opt/mattermost/client-plugins:/mattermost/client/plugins:rw
# No custom healthcheck needed — the image provides one via:
# CMD /mattermost/bin/mmctl system status --local
extra_hosts:
- "host.docker.internal:host-gateway"

View File

@@ -0,0 +1,64 @@
# Prometheus + Grafana Monitoring Stack - LIVE DEPLOYMENT
# =============================================================================
# This is the actual running compose at /home/homelab/docker/monitoring/
# Deployed directly with docker compose, NOT via Portainer.
#
# Config files are bind-mounted from the same directory:
# ./prometheus/prometheus.yml - scrape config + alerting rules reference
# ./prometheus/alert-rules.yml - alerting rules
# ./grafana/provisioning/ - datasources + dashboard provisioning
#
# To redeploy: docker compose -f this file up -d (from /home/homelab/docker/monitoring/)
# To reload Prometheus config without restart: curl -X POST http://localhost:9090/-/reload
#
# See monitoring.yaml for the self-contained Portainer GitOps version (embedded configs).
# =============================================================================
version: "3.8"
services:
prometheus:
image: prom/prometheus:latest
container_name: prometheus
volumes:
- ./prometheus:/etc/prometheus
- prometheus-data:/prometheus
command:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--web.enable-lifecycle"
ports:
- "9090:9090"
restart: unless-stopped
grafana:
image: grafana/grafana-oss:latest
container_name: grafana
environment:
- GF_SECURITY_ADMIN_USER=admin
- GF_SECURITY_ADMIN_PASSWORD="REDACTED_PASSWORD"
volumes:
- grafana-data:/var/lib/grafana
- ./grafana/provisioning/datasources:/etc/grafana/provisioning/datasources
- ./grafana/provisioning/dashboards:/etc/grafana/provisioning/dashboards
- ./grafana/dashboards:/var/lib/grafana/dashboards
ports:
- "3300:3000"
restart: unless-stopped
node_exporter:
image: prom/node-exporter:latest
container_name: node_exporter
network_mode: host
pid: host
volumes:
- /:/host:ro,rslave
- /sys:/host/sys:ro
- /proc:/host/proc:ro
command:
- '--path.rootfs=/host'
restart: unless-stopped
volumes:
prometheus-data:
grafana-data:

View File

@@ -0,0 +1,421 @@
# Prometheus + Grafana Monitoring Stack - Portainer GitOps Version
# =============================================================================
# NOTE: The live deployment is monitoring-compose.yml (plain docker compose,
# bind-mounted configs at /home/homelab/docker/monitoring/).
# This file is the self-contained Portainer GitOps version (embedded configs).
# Stack 476 on endpoint 443399 no longer exists in Portainer.
# =============================================================================
# Ports: 9090 (Prometheus), 3300 (Grafana), 9116 (SNMP Exporter)
#
# Uses docker configs for prometheus.yml and snmp.yml since bind mounts have
# symlink issues with Portainer git deploy
#
# Dashboard Provisioning:
# - Datasources: Auto-configured Prometheus
# - Dashboards: Infrastructure Overview, Synology NAS, Node Exporter Full (from Grafana.com)
#
# Old/deprecated configs have been moved to: archive/deprecated-monitoring-stacks/
configs:
# Grafana Datasource Provisioning
grafana_datasources:
content: |
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
uid: cfbskvs8upds0b
access: proxy
url: http://prometheus:9090
isDefault: true
editable: true
# Grafana Dashboard Provisioning Config
# Dashboards are loaded from bind-mounted /home/homelab/docker/grafana-dashboards/
# To add a new dashboard: drop a JSON file in that directory and restart Grafana
# Dashboard JSONs are backed up in the repo at hosts/vms/homelab-vm/grafana/dashboards/
grafana_dashboards_config:
content: |
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: 'Provisioned'
folderUid: 'provisioned'
type: file
disableDeletion: true
updateIntervalSeconds: 30
allowUiUpdates: true
options:
path: /var/lib/grafana/dashboards
# Dashboard JSON files are now bind-mounted from /home/homelab/docker/grafana-dashboards/
# Backed up in repo at hosts/vms/homelab-vm/grafana/dashboards/
# Dashboards: infrastructure-overview-v2, node-details-v2, node-exporter-full,
# synology-nas-v3, tailscale-bandwidth, truenas-guava
prometheus_config:
content: |
global:
scrape_interval: 15s
evaluation_interval: 15s
alerting:
alertmanagers:
- static_configs:
- targets:
- alertmanager:9093
rule_files:
- /etc/prometheus/alert-rules.yml
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
- job_name: 'node_exporter'
static_configs:
- targets: ['host.docker.internal:9100']
relabel_configs:
- target_label: instance
replacement: 'homelab-vm'
- job_name: 'homelab-node'
static_configs:
- targets: ['100.67.40.126:9100']
relabel_configs:
- target_label: instance
replacement: 'homelab-vm'
- job_name: 'raspberry-pis'
static_configs:
- targets: ['100.77.151.40:9100']
# pi-5-kevin (100.123.246.75) removed - offline 127+ days
relabel_configs:
- target_label: instance
replacement: 'pi-5'
- job_name: 'setillo-node'
static_configs:
- targets: ['100.125.0.20:9100']
relabel_configs:
- target_label: instance
replacement: 'setillo'
- job_name: 'setillo-snmp'
metrics_path: /snmp
params:
module: [synology]
auth: [snmpv3]
target: ['127.0.0.1']
static_configs:
- targets: ['100.125.0.20:9116']
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
replacement: '127.0.0.1'
- source_labels: [__param_target]
target_label: instance
replacement: 'setillo'
- target_label: __address__
replacement: '100.125.0.20:9116'
- job_name: 'calypso-node'
static_configs:
- targets: ['100.103.48.78:9100']
relabel_configs:
- target_label: instance
replacement: 'calypso'
- job_name: 'calypso-snmp'
metrics_path: /snmp
params:
module: [synology]
auth: [snmpv3]
target: ['127.0.0.1']
static_configs:
- targets: ['100.103.48.78:9116']
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
replacement: '127.0.0.1'
- source_labels: [__param_target]
target_label: instance
replacement: 'calypso'
- target_label: __address__
replacement: '100.103.48.78:9116'
- job_name: 'atlantis-node'
static_configs:
- targets: ['100.83.230.112:9100']
relabel_configs:
- target_label: instance
replacement: 'atlantis'
- job_name: 'atlantis-snmp'
metrics_path: /snmp
params:
module: [synology]
auth: [snmpv3]
target: ['127.0.0.1']
static_configs:
- targets: ['100.83.230.112:9116']
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
replacement: '127.0.0.1'
- source_labels: [__param_target]
target_label: instance
replacement: 'atlantis'
- target_label: __address__
replacement: '100.83.230.112:9116'
- job_name: 'concord-nuc-node'
static_configs:
- targets: ['100.72.55.21:9100']
relabel_configs:
- target_label: instance
replacement: 'concord-nuc'
- job_name: 'truenas-node'
static_configs:
- targets: ['100.75.252.64:9100']
relabel_configs:
- target_label: instance
replacement: 'guava'
- job_name: 'seattle-node'
static_configs:
- targets: ['100.82.197.124:9100']
relabel_configs:
- target_label: instance
replacement: 'seattle'
- job_name: 'proxmox-node'
static_configs:
- targets: ['100.87.12.28:9100']
relabel_configs:
- target_label: instance
replacement: 'proxmox'
snmp_config:
content: |
auths:
snmpv3:
version: 3
security_level: authPriv
auth_protocol: MD5
username: snmp-exporter
password: "REDACTED_PASSWORD"
priv_protocol: DES
priv_password: "REDACTED_PASSWORD"
modules:
synology:
walk:
- 1.3.6.1.2.1.1
- 1.3.6.1.2.1.2
- 1.3.6.1.2.1.25.2
- 1.3.6.1.2.1.25.3.3
- 1.3.6.1.2.1.31.1.1
- 1.3.6.1.4.1.2021.4
- 1.3.6.1.4.1.2021.10
- 1.3.6.1.4.1.2021.11
- 1.3.6.1.4.1.6574.1
- 1.3.6.1.4.1.6574.2
- 1.3.6.1.4.1.6574.3
- 1.3.6.1.4.1.6574.4
- 1.3.6.1.4.1.6574.5
- 1.3.6.1.4.1.6574.6
- 1.3.6.1.4.1.6574.101
- 1.3.6.1.4.1.6574.102
metrics:
- name: sysDescr
oid: 1.3.6.1.2.1.1.1
type: DisplayString
- name: sysUpTime
oid: 1.3.6.1.2.1.1.3
type: gauge
- name: sysName
oid: 1.3.6.1.2.1.1.5
type: DisplayString
- name: ssCpuRawUser
oid: 1.3.6.1.4.1.2021.11.50
type: counter
- name: ssCpuRawSystem
oid: 1.3.6.1.4.1.2021.11.52
type: counter
- name: ssCpuRawIdle
oid: 1.3.6.1.4.1.2021.11.53
type: counter
- name: memTotalSwap
oid: 1.3.6.1.4.1.2021.4.3
type: gauge
- name: memAvailSwap
oid: 1.3.6.1.4.1.2021.4.4
type: gauge
- name: memTotalReal
oid: 1.3.6.1.4.1.2021.4.5
type: gauge
- name: memAvailReal
oid: 1.3.6.1.4.1.2021.4.6
type: gauge
- name: systemStatus
oid: 1.3.6.1.4.1.6574.1.1
type: gauge
- name: temperature
oid: 1.3.6.1.4.1.6574.1.2
type: gauge
- name: powerStatus
oid: 1.3.6.1.4.1.6574.1.3
type: gauge
- name: modelName
oid: 1.3.6.1.4.1.6574.1.5.1
type: DisplayString
- name: version
oid: 1.3.6.1.4.1.6574.1.5.3
type: DisplayString
- name: diskID
oid: 1.3.6.1.4.1.6574.2.1.1.2
type: DisplayString
indexes:
- labelname: diskIndex
type: gauge
- name: diskStatus
oid: 1.3.6.1.4.1.6574.2.1.1.5
type: gauge
indexes:
- labelname: diskIndex
type: gauge
- name: diskTemperature
oid: 1.3.6.1.4.1.6574.2.1.1.6
type: gauge
indexes:
- labelname: diskIndex
type: gauge
- name: raidName
oid: 1.3.6.1.4.1.6574.3.1.1.2
type: DisplayString
indexes:
- labelname: raidIndex
type: gauge
- name: raidStatus
oid: 1.3.6.1.4.1.6574.3.1.1.3
type: gauge
indexes:
- labelname: raidIndex
type: gauge
- name: raidFreeSize
oid: 1.3.6.1.4.1.6574.3.1.1.4
type: gauge
indexes:
- labelname: raidIndex
type: gauge
- name: raidTotalSize
oid: 1.3.6.1.4.1.6574.3.1.1.5
type: gauge
indexes:
- labelname: raidIndex
type: gauge
services:
prometheus:
image: prom/prometheus:latest
container_name: prometheus
configs:
- source: prometheus_config
target: /etc/prometheus/prometheus.yml
volumes:
- prometheus-data:/prometheus
command:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--web.enable-lifecycle"
ports:
- "9090:9090"
restart: unless-stopped
networks:
- monitoring
extra_hosts:
- "host.docker.internal:host-gateway"
grafana:
image: grafana/grafana-oss:12.4.0
container_name: grafana
environment:
- GF_SECURITY_ADMIN_USER=admin
- GF_SECURITY_ADMIN_PASSWORD="REDACTED_PASSWORD"
# Disable Grafana 12 unified storage feature to restore home dashboard env var support
- GF_FEATURE_TOGGLES_DISABLE=kubernetesDashboards
# Authentik OAuth2 SSO Configuration
- GF_AUTH_GENERIC_OAUTH_ENABLED=true
- GF_AUTH_GENERIC_OAUTH_NAME=Authentik
- GF_AUTH_GENERIC_OAUTH_CLIENT_ID="REDACTED_CLIENT_ID"
- GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET="REDACTED_CLIENT_SECRET"
- GF_AUTH_GENERIC_OAUTH_SCOPES=openid profile email
- GF_AUTH_GENERIC_OAUTH_AUTH_URL=https://sso.vish.gg/application/o/authorize/
- GF_AUTH_GENERIC_OAUTH_TOKEN_URL=https://sso.vish.gg/application/o/token/
- GF_AUTH_GENERIC_OAUTH_API_URL=https://sso.vish.gg/application/o/userinfo/
- GF_AUTH_SIGNOUT_REDIRECT_URL=https://sso.vish.gg/application/o/grafana/end-session/
- GF_AUTH_GENERIC_OAUTH_ROLE_ATTRIBUTE_PATH=contains(groups[*], 'Grafana Admins') && 'Admin' || contains(groups[*], 'Grafana Editors') && 'Editor' || 'Viewer'
# Required for Authentik - extract email and login from userinfo response
- GF_AUTH_GENERIC_OAUTH_EMAIL_ATTRIBUTE_PATH=email
- GF_AUTH_GENERIC_OAUTH_LOGIN_ATTRIBUTE_PATH=preferred_username
- GF_AUTH_GENERIC_OAUTH_NAME_ATTRIBUTE_PATH=name
- GF_SERVER_ROOT_URL=https://gf.vish.gg
# Home dashboard is set via org preferences in Grafana DB (node-details-v2)
# GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH is not used - home is DB-persisted via API
configs:
# Datasource provisioning
- source: grafana_datasources
target: /etc/grafana/provisioning/datasources/datasources.yaml
# Dashboard provider config
- source: grafana_dashboards_config
target: /etc/grafana/provisioning/dashboards/dashboards.yaml
volumes:
- grafana-data:/var/lib/grafana
# Dashboard JSONs — bind-mounted from host for easy add/update
- /home/homelab/docker/grafana-dashboards:/var/lib/grafana/dashboards:ro
ports:
- "3300:3000"
restart: unless-stopped
depends_on:
- prometheus
networks:
- monitoring
node_exporter:
image: prom/node-exporter:latest
container_name: node_exporter
network_mode: host
pid: host
volumes:
- /:/host:ro,rslave
- /sys:/host/sys:ro
- /proc:/host/proc:ro
command:
- '--path.rootfs=/host'
restart: unless-stopped
snmp_exporter:
image: prom/snmp-exporter:latest
container_name: snmp_exporter
configs:
- source: snmp_config
target: /etc/snmp_exporter/snmp.yml
ports:
- "9116:9116"
restart: unless-stopped
networks:
- monitoring
volumes:
prometheus-data:
grafana-data:
networks:
monitoring:
driver: bridge

View File

@@ -0,0 +1,65 @@
# NetBox - DCIM/IPAM
# Port: 8443 -> 8080
# URL: https://nb.vish.gg
# Network documentation, device inventory, and IP address management
services:
netbox:
image: linuxserver/netbox:latest
container_name: netbox
depends_on:
netbox-db:
condition: service_healthy
netbox-redis:
condition: service_healthy
environment:
- PUID=1000
- PGID=1000
- TZ=America/Los_Angeles
- SUPERUSER_EMAIL=${SUPERUSER_EMAIL}
- SUPERUSER_PASSWORD="REDACTED_PASSWORD"
- ALLOWED_HOST=*
- DB_HOST=netbox-db
- DB_PORT=5432
- DB_NAME=netbox
- DB_USER=netbox
- DB_PASSWORD="REDACTED_PASSWORD"
- REDIS_HOST=netbox-redis
- REDIS_PORT=6379
- REDIS_PASSWORD="REDACTED_PASSWORD"
- REDIS_DB_TASK=0
- REDIS_DB_CACHE=1
volumes:
- /home/homelab/docker/netbox/config:/config
ports:
- "8443:8000"
restart: unless-stopped
netbox-db:
image: postgres:16-alpine
container_name: netbox-db
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "netbox", "-U", "netbox"]
interval: 10s
timeout: 5s
retries: 10
volumes:
- /home/homelab/docker/netbox/db:/var/lib/postgresql/data
environment:
POSTGRES_DB: netbox
POSTGRES_USER: netbox
POSTGRES_PASSWORD: "REDACTED_PASSWORD"
restart: unless-stopped
netbox-redis:
image: redis:7-alpine
container_name: netbox-redis
healthcheck:
test: ["CMD-SHELL", "redis-cli ping || exit 1"]
interval: 10s
timeout: 5s
retries: 5
command: redis-server --appendonly yes --requirepass REDACTED_PASSWORD
volumes:
- /home/homelab/docker/netbox/redis:/data
restart: unless-stopped

View File

@@ -0,0 +1,13 @@
# Node Exporter - Metrics
# Port: 9100
# Prometheus hardware/OS metrics
version: '3.8'
services:
node-exporter:
image: prom/node-exporter:latest
container_name: node-exporter
restart: unless-stopped
ports:
- "9100:9100"

View File

@@ -0,0 +1,43 @@
# ntfy - Push notifications
# Port: 8081 - ntfy server
# Port: 8095 - Gitea webhook bridge
# Simple pub-sub notification service with Gitea integration
version: "3.9"
services:
ntfy:
image: binwiederhier/ntfy
container_name: NTFY
command:
- serve
environment:
- TZ=America/Los_Angeles
volumes:
- /home/homelab/docker/ntfy:/var/cache/ntfy:rw
- /home/homelab/docker/ntfy/config:/etc/ntfy:rw
healthcheck:
test: ["CMD-SHELL", "wget -q --tries=1 http://localhost:80/v1/health -O - | grep -Eo '\"healthy\"\\s*:\\s*true' || exit 1"]
interval: 60s
timeout: 10s
retries: 3
start_period: 40s
ports:
- 8081:80 # Exposing on port 8081
restart: on-failure:5
gitea-ntfy-bridge:
image: python:3.12-alpine
container_name: gitea-ntfy-bridge
environment:
- NTFY_URL=https://ntfy.vish.gg
- NTFY_TOPIC="REDACTED_NTFY_TOPIC"
- TZ=America/Los_Angeles
- PYTHONUNBUFFERED=1
ports:
- "8095:8095"
volumes:
- /home/homelab/docker/gitea-ntfy-bridge:/app:ro
command: ["python", "-u", "/app/bridge.py"]
restart: unless-stopped
depends_on:
- ntfy

View File

@@ -0,0 +1,374 @@
# ntfy server config file
#
# Please refer to the documentation at https://ntfy.sh/REDACTED_TOPIC/config/ for details.
# All options also support underscores (_) instead of dashes (-) to comply with the YAML spec.
# Public facing base URL of the service (e.g. https://ntfy.sh or https://ntfy.example.com)
#
# This setting is required for any of the following features:
# - attachments (to return a download URL)
# - e-mail sending (for the topic URL in the email footer)
# - iOS push notifications for self-hosted servers (to calculate the Firebase poll_request topic)
# - Matrix Push Gateway (to validate that the pushkey is correct)
#
#
base-url: "https://ntfy.vish.gg"
# Listen address for the HTTP & HTTPS web server. If "listen-https" is set, you must also
# set "key-file" and "cert-file". Format: [<ip>]:<port>, e.g. "1.2.3.4:8080".
#
# To listen on all interfaces, you may omit the IP address, e.g. ":443".
# To disable HTTP, set "listen-http" to "-".
#
# listen-http: ":80"
# listen-https:
# Listen on a Unix socket, e.g. /var/lib/ntfy/ntfy.sock
# This can be useful to avoid port issues on local systems, and to simplify permissions.
#
# listen-unix: <socket-path>
# listen-unix-mode: <linux permissions, e.g. 0700>
# Path to the private key & cert file for the HTTPS web server. Not used if "listen-https" is not set.
#
# key-file: <filename>
# cert-file: <filename>
# If set, also publish messages to a Firebase Cloud Messaging (FCM) topic for your app.
# This is optional and only required to save battery when using the Android app.
#
# firebase-key-file: <filename>
# If "cache-file" is set, messages are cached in a local SQLite database instead of only in-memory.
# This allows for service restarts without losing messages in support of the since= parameter.
#
# The "cache-duration" parameter defines the duration for which messages will be buffered
# before they are deleted. This is required to support the "since=..." and "poll=1" parameter.
# To disable the cache entirely (on-disk/in-memory), set "cache-duration" to 0.
# The cache file is created automatically, provided that the correct permissions are set.
#
# The "cache-startup-queries" parameter allows you to run commands when the database is initialized,
# e.g. to enable WAL mode (see https://phiresky.github.io/blog/2020/sqlite-performance-tuning/)).
# Example:
# cache-startup-queries: |
# pragma journal_mode = WAL;
# pragma synchronous = normal;
# pragma temp_store = memory;
# pragma busy_timeout = 15000;
# vacuum;
#
# The "cache-batch-size" and "cache-batch-timeout" parameter allow enabling async batch writing
# of messages. If set, messages will be queued and written to the database in batches of the given
# size, or after the given timeout. This is only required for high volume servers.
#
# Debian/RPM package users:
# Use /var/cache/ntfy/cache.db as cache file to avoid permission issues. The package
# creates this folder for you.
#
# Check your permissions:
# If you are running ntfy with systemd, make sure this cache file is owned by the
# ntfy user and group by running: chown ntfy.ntfy <filename>.
#
# cache-file: <filename>
# cache-duration: "12h"
# cache-startup-queries:
# cache-batch-size: 0
# cache-batch-timeout: "0ms"
# If set, access to the ntfy server and API can be controlled on a granular level using
# the 'ntfy user' and 'ntfy access' commands. See the --help pages for details, or check the docs.
#
# - auth-file is the SQLite user/access database; it is created automatically if it doesn't already exist
# - auth-default-access defines the default/fallback access if no access control entry is found; it can be
# set to "read-write" (default), "read-only", "write-only" or "deny-all".
# - auth-startup-queries allows you to run commands when the database is initialized, e.g. to enable
# WAL mode. This is similar to cache-startup-queries. See above for details.
#
# Debian/RPM package users:
# Use /var/lib/ntfy/user.db as user database to avoid permission issues. The package
# creates this folder for you.
#
# Check your permissions:
# If you are running ntfy with systemd, REDACTED_APP_PASSWORD database file is owned by the
# ntfy user and group by running: chown ntfy.ntfy <filename>.
#
# auth-file: <filename>
# auth-default-access: "read-write"
# auth-startup-queries:
# If set, the X-Forwarded-For header is used to determine the visitor IP address
# instead of the remote address of the connection.
#
# WARNING: If you are behind a proxy, you must set this, otherwise all visitors are rate limited
# as if they are one.
#
# behind-proxy: false
# If enabled, clients can attach files to notifications as attachments. Minimum settings to enable attachments
# are "attachment-cache-dir" and "base-url".
#
# - attachment-cache-dir is the cache directory for attached files
# - attachment-total-size-limit is the limit of the on-disk attachment cache directory (total size)
# - attachment-file-size-limit is the per-file attachment size limit (e.g. 300k, 2M, 100M)
# - attachment-expiry-duration is the duration after which uploaded attachments will be deleted (e.g. 3h, 20h)
#
# attachment-cache-dir:
# attachment-total-size-limit: "5G"
# attachment-file-size-limit: "15M"
# attachment-expiry-duration: "3h"
# If enabled, allow outgoing e-mail notifications via the 'X-Email' header. If this header is set,
# messages will additionally be sent out as e-mail using an external SMTP server.
#
# As of today, only SMTP servers with plain text auth (or no auth at all), and STARTLS are supported.
# Please also refer to the rate limiting settings below (visitor-email-limit-burst & visitor-email-limit-burst).
#
# - smtp-sender-addr is the hostname:port of the SMTP server
# - smtp-sender-from is the e-mail address of the sender
# - smtp-sender-user/smtp-sender-pass are the username and password of the SMTP user (leave blank for no auth)
#
# smtp-sender-addr:
# smtp-sender-from:
# smtp-sender-user:
# smtp-sender-pass:
# If enabled, ntfy will launch a lightweight SMTP server for incoming messages. Once configured, users can send
# emails to a topic e-mail address to publish messages to a topic.
#
# - smtp-server-listen defines the IP address and port the SMTP server will listen on, e.g. :25 or 1.2.3.4:25
# - smtp-server-domain is the e-mail domain, e.g. ntfy.sh
# - smtp-server-addr-prefix is an optional prefix for the e-mail addresses to prevent spam. If set to "ntfy-",
# for instance, only e-mails to ntfy-$topic@ntfy.sh will be accepted. If this is not set, all emails to
# $topic@ntfy.sh will be accepted (which may obviously be a spam problem).
#
# smtp-server-listen:
# smtp-server-domain:
# smtp-server-addr-prefix:
# Web Push support (background notifications for browsers)
#
# If enabled, allows ntfy to receive push notifications, even when the ntfy web app is closed. When enabled, users
# can enable background notifications in the web app. Once enabled, ntfy will forward published messages to the push
# endpoint, which will then forward it to the browser.
#
# You must configure web-push-public/private key, web-push-file, and web-push-email-address below to enable Web Push.
# Run "ntfy webpush keys" to generate the keys.
#
# - web-push-public-key is the generated VAPID public key, e.g. AA1234BBCCddvveekaabcdfqwertyuiopasdfghjklzxcvbnm1234567890
# - web-push-private-key is the generated VAPID private key, e.g. AA2BB1234567890abcdefzxcvbnm1234567890
# - web-push-file is a database file to keep track of browser subscription endpoints, e.g. `/var/cache/ntfy/webpush.db`
# - web-push-email-address is the admin email address send to the push provider, e.g. `sysadmin@example.com`
# - web-push-startup-queries is an optional list of queries to run on startup`
#
# web-push-public-key:
# web-push-private-key:
# web-push-file:
# web-push-email-address:
# web-push-startup-queries:
# If enabled, ntfy can perform voice calls via Twilio via the "X-Call" header.
#
# - twilio-account is the Twilio account SID, e.g. AC12345beefbeef67890beefbeef122586
# - twilio-auth-token is the Twilio auth token, e.g. affebeef258625862586258625862586
# - twilio-phone-number is the outgoing phone number you purchased, e.g. REDACTED_PHONE_NUMBER
# - twilio-verify-service is the Twilio Verify service SID, e.g. VA12345beefbeef67890beefbeef122586
#
# twilio-account:
# twilio-auth-token:
# twilio-phone-number:
# twilio-verify-service:
# Interval in which keepalive messages are sent to the client. This is to prevent
# intermediaries closing the connection for inactivity.
#
# Note that the Android app has a hardcoded timeout at 77s, so it should be less than that.
#
# keepalive-interval: "45s"
# Interval in which the manager prunes old messages, deletes topics
# and prints the stats.
#
# manager-interval: "1m"
# Defines topic names that are not allowed, because they are otherwise used. There are a few default topics
# that cannot be used (e.g. app, account, settings, ...). To extend the default list, define them here.
#
# Example:
# disallowed-topics:
# - about
# - pricing
# - contact
#
# disallowed-topics:
# Defines the root path of the web app, or disables the web app entirely.
#
# Can be any simple path, e.g. "/", "/app", or "/ntfy". For backwards-compatibility reasons,
# the values "app" (maps to "/"), "home" (maps to "/app"), or "disable" (maps to "") to disable
# the web app entirely.
#
# web-root: /
# Various feature flags used to control the web app, and API access, mainly around user and
# account management.
#
# - enable-signup allows users to sign up via the web app, or API
# - enable-login allows users to log in via the web app, or API
# - enable-reservations allows users to reserve topics (if their tier allows it)
#
# enable-signup: false
# enable-login: false
# enable-reservations: false
# Server URL of a Firebase/APNS-connected ntfy server (likely "https://ntfy.sh").
#
# iOS users:
# If you use the iOS ntfy app, you MUST configure this to receive timely notifications. You'll like want this:
#
upstream-base-url: "https://ntfy.sh"
#
# If set, all incoming messages will publish a "poll_request" message to the configured upstream server, containing
# the message ID of the original message, instructing the iOS app to poll this server for the actual message contents.
# This is to prevent the upstream server and Firebase/APNS from being able to read the message.
#
# - upstream-base-url is the base URL of the upstream server. Should be "https://ntfy.sh".
# - upstream-access-token is the token used to authenticate with the upstream server. This is only required
# if you exceed the upstream rate limits, or the uptream server requires authentication.
#
# upstream-base-url:
# upstream-access-token:
# Configures message-specific limits
#
# - message-size-limit defines the max size of a message body. Please note message sizes >4K are NOT RECOMMENDED,
# and largely untested. If FCM and/or APNS is used, the limit should stay 4K, because their limits are around that size.
# If you increase this size limit regardless, FCM and APNS will NOT work for large messages.
# - message-delay-limit defines the max delay of a message when using the "Delay" header.
#
# message-size-limit: "4k"
# message-delay-limit: "3d"
# Rate limiting: Total number of topics before the server rejects new topics.
#
# global-topic-limit: 15000
# Rate limiting: Number of subscriptions per visitor (IP address)
#
# visitor-subscription-limit: 30
# Rate limiting: Allowed GET/PUT/POST requests per second, per visitor:
# - visitor-request-limit-burst is the initial bucket of requests each visitor has
# - visitor-request-limit-replenish is the rate at which the bucket is refilled
# - visitor-request-limit-exempt-hosts is a comma-separated list of hostnames, IPs or CIDRs to be
# exempt from request rate limiting. Hostnames are resolved at the time the server is started.
# Example: "1.2.3.4,ntfy.example.com,8.7.6.0/24"
#
# visitor-request-limit-burst: 60
# visitor-request-limit-replenish: "5s"
# visitor-request-limit-exempt-hosts: ""
# Rate limiting: Hard daily limit of messages per visitor and day. The limit is reset
# every day at midnight UTC. If the limit is not set (or set to zero), the request
# limit (see above) governs the upper limit.
#
# visitor-message-daily-limit: 0
# Rate limiting: Allowed emails per visitor:
# - visitor-email-limit-burst is the initial bucket of emails each visitor has
# - visitor-email-limit-replenish is the rate at which the bucket is refilled
#
# visitor-email-limit-burst: 16
# visitor-email-limit-replenish: "1h"
# Rate limiting: Attachment size and bandwidth limits per visitor:
# - visitor-attachment-total-size-limit is the total storage limit used for attachments per visitor
# - visitor-attachment-daily-bandwidth-limit is the total daily attachment download/upload traffic limit per visitor
#
# visitor-attachment-total-size-limit: "100M"
# visitor-attachment-daily-bandwidth-limit: "500M"
# Rate limiting: Enable subscriber-based rate limiting (mostly used for UnifiedPush)
#
# If subscriber-based rate limiting is enabled, messages published on UnifiedPush topics** (topics starting with "up")
# will be counted towards the "rate visitor" of the topic. A "rate visitor" is the first subscriber to the topic.
#
# Once enabled, a client subscribing to UnifiedPush topics via HTTP stream, or websockets, will be automatically registered as
# a "rate visitor", i.e. the visitor whose rate limits will be used when publishing on this topic. Note that setting the rate visitor
# requires **read-write permission** on the topic.
#
# If this setting is enabled, publishing to UnifiedPush topics will lead to a HTTP 507 response if
# no "rate visitor" has been previously registered. This is to avoid burning the publisher's "visitor-message-daily-limit".
#
# visitor-subscriber-rate-limiting: false
# Payments integration via Stripe
#
# - stripe-secret-key is the key used for the Stripe API communication. Setting this values
# enables payments in the ntfy web app (e.g. Upgrade dialog). See https://dashboard.stripe.com/apikeys.
# - stripe-webhook-key is the key required to validate the authenticity of incoming webhooks from Stripe.
# Webhooks are essential up keep the local database in sync with the payment provider. See https://dashboard.stripe.com/webhooks.
# - billing-contact is an email address or website displayed in the "Upgrade tier" dialog to let people reach
# out with billing questions. If unset, nothing will be displayed.
#
# stripe-secret-key:
# stripe-webhook-key:
# billing-contact:
# Metrics
#
# ntfy can expose Prometheus-style metrics via a /metrics endpoint, or on a dedicated listen IP/port.
# Metrics may be considered sensitive information, so before you enable them, be sure you know what you are
# doing, and/or secure access to the endpoint in your reverse proxy.
#
# - enable-metrics enables the /metrics endpoint for the default ntfy server (i.e. HTTP, HTTPS and/or Unix socket)
# - metrics-listen-http exposes the metrics endpoint via a dedicated [IP]:port. If set, this option implicitly
# enables metrics as well, e.g. "10.0.1.1:9090" or ":9090"
#
# enable-metrics: false
# metrics-listen-http:
# Profiling
#
# ntfy can expose Go's net/http/pprof endpoints to support profiling of the ntfy server. If enabled, ntfy will listen
# on a dedicated listen IP/port, which can be accessed via the web browser on http://<ip>:<port>/debug/pprof/.
# This can be helpful to expose bottlenecks, and visualize call flows. See https://pkg.go.dev/net/http/pprof for details.
#
# profile-listen-http:
# Logging options
#
# By default, ntfy logs to the console (stderr), with an "info" log level, and in a human-readable text format.
# ntfy supports five different log levels, can also write to a file, log as JSON, and even supports granular
# log level overrides for easier debugging. Some options (log-level and log-level-overrides) can be hot reloaded
# by calling "kill -HUP $pid" or "systemctl reload ntfy".
#
# - log-format defines the output format, can be "text" (default) or "json"
# - log-file is a filename to write logs to. If this is not set, ntfy logs to stderr.
# - log-level defines the default log level, can be one of "trace", "debug", "info" (default), "warn" or "error".
# Be aware that "debug" (and particularly "trace") can be VERY CHATTY. Only turn them on briefly for debugging purposes.
# - log-level-overrides lets you override the log level if certain fields match. This is incredibly powerful
# for debugging certain parts of the system (e.g. only the account management, or only a certain visitor).
# This is an array of strings in the format:
# - "field=value -> level" to match a value exactly, e.g. "tag=manager -> trace"
# - "field -> level" to match any value, e.g. "time_taken_ms -> debug"
# Warning: Using log-level-overrides has a performance penalty. Only use it for temporary debugging.
#
# Check your permissions:
# If you are running ntfy with systemd, make sure this log file is owned by the
# ntfy user and group by running: chown ntfy.ntfy <filename>.
#
# Example (good for production):
# log-level: info
# log-format: json
# log-file: /var/log/ntfy.log
#
# Example level overrides (for debugging, only use temporarily):
# log-level-overrides:
# - "tag=manager -> trace"
# - "visitor_ip=1.2.3.4 -> debug"
# - "time_taken_ms -> debug"
#
# log-level: info
# log-level-overrides:
# log-format: text
# log-file:

View File

@@ -0,0 +1,12 @@
/home/youruser/whisper-docker/
├── docker-compose.yml
├── Dockerfile
├── audio/ <-- this is ./audio on the host
│ ├── sample.mp3
└── models/
mkdir audio
cp ~/Downloads/myfile.mp3 audio/
docker compose run --rm whisper myfile.mp3 --model small --fp16 False
sudo docker compose run --rm whisper tape4.mp4 --model small --fp16 False --language en

View File

@@ -0,0 +1,41 @@
# OpenHands - AI Software Development Agent
# Port: 3001
# Docs: https://docs.openhands.dev
# LLM: Claude Sonnet 4
version: '3.8'
services:
openhands:
image: docker.openhands.dev/openhands/openhands:1.1
container_name: openhands-app
ports:
- "3001:3000"
extra_hosts:
- "host.docker.internal:host-gateway"
environment:
# LLM Configuration
- LLM_API_KEY=${ANTHROPIC_API_KEY}
- LLM_MODEL=anthropic/claude-sonnet-4-20250514
# Sandbox Configuration
- SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.openhands.dev/openhands/runtime:1.1-nikolaik
- LOG_ALL_EVENTS=true
- RUN_AS_OPENHANDS=true
- OPENHANDS_USER_ID=42420
# Use docker bridge gateway IP so runtime containers can reach the main container
- SANDBOX_LOCAL_RUNTIME_URL=http://172.17.0.1
- USE_HOST_NETWORK=false
- WORKSPACE_BASE=/opt/workspace_base
- SANDBOX_USER_ID=0
- FILE_STORE=local
- FILE_STORE_PATH=/.openhands
- INIT_GIT_IN_EMPTY_WORKSPACE=1
# Disable default MCP (runtime can't resolve host.docker.internal)
- DISABLE_DEFAULT_MCP=true
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- openhands-data:/.openhands
restart: unless-stopped
volumes:
openhands-data:

View File

@@ -0,0 +1,41 @@
# OpenProject - Project management
# Port: 8080
# Open source project management
version: "3.8"
services:
db:
image: postgres:16
container_name: openproject-db
restart: unless-stopped
environment:
POSTGRES_USER: openproject
POSTGRES_PASSWORD: "REDACTED_PASSWORD"
POSTGRES_DB: openproject
volumes:
- /home/homelab/docker/openproject/postgres:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U openproject -d openproject"]
interval: 30s
timeout: 5s
retries: 5
openproject:
image: openproject/openproject:16.0.0-slim
container_name: openproject
restart: unless-stopped
depends_on:
db:
condition: service_healthy
ports:
- "8083:8080"
environment:
OPENPROJECT_HOST__NAME: "homelab.vish.local" # 👈 replace with homelabs LAN IP
OPENPROJECT_DISABLE__HOST__NAME__CHECK: "true"
OPENPROJECT_HTTPS: "false"
OPENPROJECT_SECRET_KEY_BASE: "REDACTED_SECRET_KEY_BASE"_GITEA_TOKEN"
OPENPROJECT_EE__MANAGER__VISIBLE: "false"
DATABASE_URL: "postgresql://openproject:REDACTED_PASSWORD@db:5432/openproject"
volumes:
- /home/homelab/docker/openproject/assets:/var/openproject/assets

View File

@@ -0,0 +1,15 @@
# Paper Minecraft - Game server
# Port: 25565
# Paper Minecraft server
version: "3.8"
services:
# bind mount example
linuxgsm-pmc-bind:
image: gameservermanagers/gameserver:pmc
# image: ghcr.io/gameservermanagers/gameserver:pmc
container_name: pmcserver
restart: unless-stopped
volumes:
- /home/homelab/docker/pmc:/data
network_mode: host

View File

@@ -0,0 +1,21 @@
# Perplexica - AI-powered search engine
# Port: 4785
# Configure LLM providers via web UI at http://192.168.0.210:4785/settings
#
# Configured to use Olares Ollama instance (qwen3:32b, 30.5B Q4_K_M)
# Endpoint: https://a5be22681.vishinator.olares.com (native Ollama API + OpenAI-compat)
services:
perplexica:
image: itzcrazykns1337/perplexica:latest
container_name: perplexica
ports:
- "4785:3000"
environment:
- OLLAMA_BASE_URL=https://a5be22681.vishinator.olares.com
volumes:
- perplexica-data:/home/perplexica/data
restart: unless-stopped
volumes:
perplexica-data:

View File

@@ -0,0 +1,16 @@
# Podgrab - Podcast manager
# Port: 8080
# Podcast download and management
version: '3.3'
services:
podgrab:
container_name: podgrab
image: akhilrex/podgrab
ports:
- "8389:8080"
volumes:
- /mnt/atlantis_docker/podgrab/podcasts:/assets
- /mnt/atlantis_docker/podgrab/config:/config
restart: unless-stopped

View File

@@ -0,0 +1,22 @@
# Portainer Edge Agent - homelab-vm
# Connects to Portainer server on Atlantis (100.83.230.112:8000)
# Deploy: docker compose -f portainer_agent.yaml up -d
services:
portainer_edge_agent:
image: portainer/agent:2.33.7
container_name: portainer_edge_agent
restart: unless-stopped
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /var/lib/docker/volumes:/var/lib/docker/volumes
- /:/host
- portainer_agent_data:/data
environment:
EDGE: "1"
EDGE_ID: "18271a7b-03ea-4945-946c-4a845e1bb3ff"
EDGE_KEY: "aHR0cDovLzEwMC44My4yMzAuMTEyOjEwMDAwfGh0dHA6Ly8xMDAuODMuMjMwLjExMjo4MDAwfGtDWjVkTjJyNXNnQTJvMEF6UDN4R3h6enBpclFqa05Wa0FCQkU0R1IxWFU9fDQ0MzM5OQ"
EDGE_INSECURE_POLL: "1"
volumes:
portainer_agent_data:

View File

@@ -0,0 +1,53 @@
# ProxiTok - Privacy-respecting TikTok frontend
# Port: 9770
# Alternative TikTok frontend - no ads, no tracking, server-side requests
services:
proxitok:
container_name: proxitok-web
image: ghcr.io/pablouser1/proxitok:master
ports:
- 9770:8080
environment:
- LATTE_CACHE=/cache
- API_CACHE=redis
- REDIS_HOST=proxitok-redis
- REDIS_PORT=6379
- API_CHROMEDRIVER=http://proxitok-chromedriver:4444
volumes:
- proxitok-cache:/cache
depends_on:
- redis
- chromedriver
networks:
- proxitok
restart: unless-stopped
redis:
container_name: proxitok-redis
image: redis:7-alpine
volumes:
- proxitok-redis:/data
networks:
- proxitok
init: true
restart: unless-stopped
chromedriver:
container_name: proxitok-chromedriver
image: robcherry/docker-chromedriver:latest
shm_size: 2g
environment:
- CHROMEDRIVER_WHITELISTED_IPS=
privileged: true
networks:
- proxitok
restart: unless-stopped
volumes:
proxitok-cache:
proxitok-redis:
networks:
proxitok:
driver: bridge

View File

@@ -0,0 +1,21 @@
# Redlib - Reddit frontend (maintained fork of Libreddit)
# Port: 9000
# Privacy-respecting Reddit frontend
services:
redlib:
image: quay.io/redlib/redlib:latest
container_name: Redlib
hostname: redlib
mem_limit: 2g
cpu_shares: 768
security_opt:
- no-new-privileges:true
read_only: true
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "--tries=1", "http://localhost:8080/settings"]
interval: 30s
timeout: 5s
ports:
- 9000:8080
restart: on-failure:5

View File

@@ -0,0 +1,47 @@
# mariushosting example of a RomM configuration file
# Only uncomment the lines you want to use/modify, or add new ones where needed
exclude:
# Exclude platforms to be scanned
platforms: [] # ['my_excluded_platform_1', 'my_excluded_platform_2']
# Exclude roms or parts of roms to be scanned
roms:
# Single file games section.
# Will not apply to files that are in sub-folders (multi-disc roms, games with updates, DLC, patches, etc.)
single_file:
# Exclude all files with certain extensions to be scanned
extensions: [] # ['xml', 'txt']
# Exclude matched file names to be scanned.
# Supports unix filename pattern matching
# Can also exclude files by extension
names: [] # ['info.txt', '._*', '*.nfo']
# Multi files games section
# Will apply to files that are in sub-folders (multi-disc roms, games with updates, DLC, patches, etc.)
multi_file:
# Exclude matched 'folder' names to be scanned (RomM identifies folders as multi file games)
names: [] # ['my_multi_file_game', 'DLC']
# Exclude files within sub-folders.
parts:
# Exclude matched file names to be scanned from multi file roms
# Keep in mind that RomM doesn't scan folders inside multi files games,
# so there is no need to exclude folders from inside of multi files games.
names: [] # ['data.xml', '._*'] # Supports unix filename pattern matching
# Exclude all files with certain extensions to be scanned from multi file roms
extensions: [] # ['xml', 'txt']
system:
# Asociate different platform names to your current file system platform names
# [your custom platform folder name]: [RomM platform name]
# In this example if you have a 'gc' folder, RomM will treat it like the 'ngc' folder and if you have a 'psx' folder, RomM will treat it like the 'ps' folder
platforms: {} # { gc: 'ngc', psx: 'ps' }
# Asociate one platform to it's main version
versions: {} # { naomi: 'arcade' }
# The folder name where your roms are located
filesystem: {} # { roms_folder: 'roms' } For example if your folder structure is /home/user/library/roms_folder

View File

@@ -0,0 +1,55 @@
version: "3.9"
services:
db:
image: mariadb:11.4-noble # LTS Long Time Support until May 29, 2029
container_name: RomM-DB
security_opt:
- no-new-privileges:false
environment:
MYSQL_DATABASE: romm
MYSQL_USER: rommuser
MYSQL_PASSWORD: "REDACTED_PASSWORD"
MYSQL_ROOT_PASSWORD: "REDACTED_PASSWORD"
TZ: America/Los_Angeles
volumes:
- /mnt/atlantis_docker/romm/db:/var/lib/mysql:rw
restart: on-failure:5
romm:
image: rommapp/romm:latest
container_name: RomM
depends_on:
- db
ports:
- "7676:8080"
environment:
ROMM_DB_DRIVER: mariadb
DB_HOST: db
DB_NAME: romm
DB_USER: rommuser
DB_PASSWD: "REDACTED_PASSWORD"
DB_PORT: 3306
ROMM_AUTH_SECRET_KEY: e9c36749cf1cb5f8df757bc0REDACTED_GITEA_TOKEN
# Metadata providers (optional):
# SCREENSCRAPER_USER:
# SCREENSCRAPER_PASSWORD:
# IGDB_CLIENT_ID:
# IGDB_CLIENT_SECRET:
# MOBYGAMES_API_KEY:
# STEAMGRIDDB_API_KEY:
# RETROACHIEVEMENTS_API_KEY:
# HASHEOUS_API_ENABLED: true
volumes:
- /mnt/atlantis_docker/romm/resources:/romm/resources:rw
- /mnt/atlantis_docker/romm/redis:/redis-data:rw
- /mnt/atlantis_docker/romm/games/library:/romm/library:rw
- /mnt/atlantis_docker/romm/games/assets:/romm/assets:rw
- /mnt/atlantis_docker/romm/games/config:/romm/config:rw
healthcheck:
test: ["CMD", "curl", "-f", "http://127.0.0.1:8080/"]
interval: 10s
timeout: 5s
retries: 3
start_period: 90s
restart: on-failure:10

View File

@@ -0,0 +1,24 @@
# Roundcube - Webmail
# Port: 8080
# Web-based email client
version: "3.9"
services:
roundcube:
image: roundcube/roundcubemail:latest
container_name: roundcube
environment:
ROUNDCUBEMAIL_DEFAULT_HOST: ssl://imap.gmail.com
ROUNDCUBEMAIL_DEFAULT_PORT: 993
ROUNDCUBEMAIL_SMTP_SERVER: tls://smtp.gmail.com
ROUNDCUBEMAIL_SMTP_PORT: 587
ROUNDCUBEMAIL_UPLOAD_MAX_FILESIZE: 25M
ROUNDCUBEMAIL_SKIN: elastic
volumes:
- /mnt/atlantis_docker/roundcube/data:/var/roundcube
- /mnt/atlantis_docker/roundcube/config:/var/roundcube/config
- /mnt/atlantis_docker/roundcube/logs:/var/roundcube/logs
ports:
- "7512:80" # or 7512:80 if you prefer
restart: unless-stopped

View File

@@ -0,0 +1,37 @@
# Roundcube ProtonMail Bridge
# Port: 8080
# Webmail with ProtonMail support
version: "3.9"
services:
roundcube-protonmail:
image: roundcube/roundcubemail:latest
container_name: roundcube-protonmail
environment:
# ProtonMail Bridge IMAP + SMTP (plain inside the Docker network)
ROUNDCUBEMAIL_DEFAULT_HOST: protonmail-bridge
ROUNDCUBEMAIL_DEFAULT_PORT: 143
ROUNDCUBEMAIL_SMTP_SERVER: protonmail-bridge
ROUNDCUBEMAIL_SMTP_PORT: 25
ROUNDCUBEMAIL_UPLOAD_MAX_FILESIZE: 25M
ROUNDCUBEMAIL_SKIN: elastic
volumes:
- /mnt/atlantis_docker/roundcube_protonmail/data:/var/roundcube
- /mnt/atlantis_docker/roundcube_protonmail/config:/var/roundcube/config
- /mnt/atlantis_docker/roundcube_protonmail/logs:/var/roundcube/logs
ports:
- "7513:80" # exposed via your tailnet (change if needed)
restart: unless-stopped
depends_on:
- protonmail-bridge
protonmail-bridge:
image: shenxn/protonmail-bridge:latest
container_name: protonmail-bridge
environment:
- TZ=America/Los_Angeles
command: ["protonmail-bridge", "--no-keychain", "--cli"]
volumes:
- /mnt/atlantis_docker/roundcube_protonmail/bridge:/root/.config/protonmail/bridge
restart: unless-stopped

View File

@@ -0,0 +1,33 @@
# Satisfactory - Game server
# Port: 7777
# Satisfactory dedicated game server
services:
satisfactory-server:
container_name: 'satisfactory-server'
hostname: 'satisfactory-server'
image: 'wolveix/satisfactory-server:latest'
ports:
- '7777:7777/udp'
- '7777:7777/tcp'
volumes:
- /home/homelab/docker/sf:/data
environment:
- MAXPLAYERS=4
- PGID=1000
- PUID=1000
- ROOTLESS=false
- STEAMBETA=false
restart: unless-stopped
healthcheck:
test: bash /healthcheck.sh
interval: 30s
timeout: 10s
retries: 3
start_period: 120s
deploy:
resources:
limits:
memory: 6G
reservations:
memory: 4G

View File

@@ -0,0 +1,55 @@
# Scrutiny — SMART Disk Health Monitoring Hub
#
# Runs on homelab-vm (Tailscale 100.67.40.126)
# Web UI: http://100.67.40.126:8090 (also: scrutiny.vish.gg via NPM)
# InfluxDB: internal to this stack
#
# Collectors ship metrics from physical hosts to this hub.
# Collector composes at:
# hosts/synology/atlantis/scrutiny-collector.yaml
# hosts/synology/calypso/scrutiny-collector.yaml
# hosts/synology/setillo/scrutiny-collector.yaml
# hosts/physical/concord-nuc/scrutiny-collector.yaml
# hosts/edge/rpi5-vish/scrutiny-collector.yaml
#
# Deploy: Portainer GitOps on endpoint 443399 (homelab-vm)
services:
scrutiny-web:
image: ghcr.io/analogj/scrutiny:master-web
container_name: scrutiny-web
ports:
- "8090:8080"
volumes:
- scrutiny-config:/opt/scrutiny/config
- scrutiny-influx:/opt/scrutiny/influxdb
environment:
GIN_MODE: release
SCRUTINY_WEB_INFLUXDB_HOST: scrutiny-influxdb
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/api/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
depends_on:
scrutiny-influxdb:
condition: service_healthy
scrutiny-influxdb:
image: influxdb:2.2
container_name: scrutiny-influxdb
volumes:
- scrutiny-influx:/var/lib/influxdb2
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8086/ping"]
interval: 10s
timeout: 5s
retries: 5
start_period: 20s
volumes:
scrutiny-config:
scrutiny-influx:

View File

@@ -0,0 +1,22 @@
# SearXNG — Privacy-respecting meta search engine
# Port: 8888
# URL: http://192.168.0.210:8888
# Aggregates results from Google, Bing, DuckDuckGo, etc. without tracking
services:
searxng:
image: searxng/searxng:latest
container_name: searxng
ports:
- "8888:8080"
volumes:
- /home/homelab/docker/searxng:/etc/searxng
environment:
- SEARXNG_BASE_URL=http://192.168.0.210:8888/
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
restart: unless-stopped

View File

@@ -0,0 +1,68 @@
# Shlink - URL shortener
# Port: 8080
# Self-hosted URL shortener
version: "3.9"
services:
shlink-db:
image: postgres
container_name: Shlink-DB
hostname: shlink-db
security_opt:
- no-new-privileges:true
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "shlink", "-U", "shlinkuser"]
interval: 10s
timeout: 5s
retries: 5
user: 1000:1000
volumes:
- /home/homelab/docker/shlinkdb:/var/lib/postgresql/data
environment:
POSTGRES_DB: shlink
POSTGRES_USER: shlinkuser
POSTGRES_PASSWORD: "REDACTED_PASSWORD"
restart: unless-stopped
shlink:
image: shlinkio/shlink:stable
container_name: Shlink
hostname: shlink
security_opt:
- no-new-privileges:true
ports:
- 8335:8080
environment:
- TIMEZONE=America/Los_Angeles
- INITIAL_API_KEY="REDACTED_API_KEY"
- DB_DRIVER=postgres
- DB_NAME=shlink
- DB_USER=shlinkuser
- DB_PASSWORD="REDACTED_PASSWORD"
- DB_HOST=shlink-db
- DB_PORT=5432
- DEFAULT_DOMAIN=url.thevish.io
- IS_HTTPS_ENABLED=true
- GEOLITE_LICENSE_KEY="REDACTED_GEOLITE_KEY"
restart: unless-stopped
depends_on:
shlink-db:
condition: service_started
shlink-web:
image: shlinkio/shlink-web-client:stable
container_name: Shlink-WEB
hostname: shlink-web
security_opt:
- no-new-privileges:true
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:80/ || exit 1
ports:
- 8336:80
environment:
- SHLINK_SERVER_NAME=thevish
- SHLINK_SERVER_URL=https://url.thevish.io
- SHLINK_SERVER_API_KEY="REDACTED_API_KEY"
restart: unless-stopped
depends_on:
- shlink

View File

@@ -0,0 +1,15 @@
# Signal API - Signal messenger REST API
# Port: 8080
# REST API for Signal messenger automation
version: "3"
services:
signal-cli-rest-api:
container_name: signal-api
restart: unless-stopped
ports:
- 8080:8080
volumes:
- /home/homelab/docker/signal:/home/.local/share/signal-cli
environment:
- MODE=native
image: bbernhard/signal-cli-rest-api

View File

@@ -0,0 +1,23 @@
# Syncthing - File synchronization
# Port: 8384 (web), 22000 (sync)
# Continuous file synchronization between devices
version: "2.1"
services:
syncthing:
image: lscr.io/linuxserver/syncthing:latest
container_name: syncthing
hostname: syncthing #optional
environment:
- PUID=1000
- PGID=1000
- TZ=America/Los_Angeles
volumes:
- /root/docker/syncthing/config:/config
- /root/docker/syncthing/data1
- /root/docker/syncthing/data2
ports:
- 8384:8384
- 22000:22000/tcp
- 22000:22000/udp
- 21027:21027/udp
restart: unless-stopped

View File

@@ -0,0 +1,18 @@
# WatchYourLAN - Network scanner
# Port: 8840
# Lightweight network IP scanner with web UI
services:
watchyourlan:
container_name: WatchYourLAN
environment:
- TZ=America/Los_Angeles
- HOST=192.168.0.210
- PORT=8840
- IFACES=ens18
- THEME=grass
- COLOR=dark
volumes:
- /home/homelab/docker/wyl:/data/WatchYourLAN
network_mode: host
restart: unless-stopped
image: aceberg/watchyourlan:v2

View File

@@ -0,0 +1,15 @@
# Web Check - Website analysis
# Port: 3000
# All-in-one website OSINT analysis tool
version: "3.9"
services:
webcheck:
container_name: Web-Check
image: lissy93/web-check
mem_limit: 4g
cpu_shares: 768
security_opt:
- no-new-privileges:true
restart: on-failure:5
ports:
- 6160:3000

View File

@@ -0,0 +1,23 @@
# WebCord - Discord client
# Port: 3000
# Web-based Discord client
---
version: "2.1"
services:
webcord:
image: lscr.io/linuxserver/webcord:latest
container_name: webcord
security_opt:
- seccomp:unconfined #optional
environment:
- PUID=1000
- PGID=1000
- TZ=America/Los_Angeles
volumes:
- /home/homelab/docker/webcord:/config
ports:
- 3000:3000
- 3001:3001
shm_size: "1gb"
restart: unless-stopped

View File

@@ -0,0 +1,89 @@
# mastodon-rocky
Rocky Linux 10 VM running Mastodon (bare-metal systemd, no Docker). Hosted on Calypso (Synology DS723+).
**Hostname**: mastodon-rocky
**LAN IP**: 192.168.0.126 (DHCP)
**Tailscale IP**: 100.64.0.3
**SSH**: `ssh mastodon-rocky` (via Tailscale — see `~/.ssh/config`)
**SSH user**: root
---
## Hardware (Virtual Machine)
| Property | Value |
|----------|-------|
| **Hypervisor** | Synology Virtual Machine Manager (VMM) on Calypso |
| **Host** | Calypso — Synology DS723+ |
| **OS** | Rocky Linux 10.1 (Red Quartz) |
| **Kernel** | 6.12.0-124.27.1.el10_1.x86_64 |
| **Architecture** | x86_64 |
| **vCPU** | 4 cores (AMD Ryzen Embedded V1780B, host passthrough) |
| **RAM** | 8 GB |
| **Disk** | 100 GB (virtual disk), 61 GB root LVM (`/dev/mapper/rl-root`) |
| **Network** | `ens3`, bridged to Calypso LAN |
---
## Network Configuration
- **LAN IP**: `192.168.0.126/24` (DHCP)
- **Tailscale IP**: `100.64.0.3` (Headscale node 21)
- **Default gateway**: `192.168.0.1`
### Tailscale / Headscale
Joined to Headscale at `headscale.vish.gg:8443`. Accepts all subnet routes (`--accept-routes`).
**Known routing quirk**: Same as other `192.168.0.0/24` nodes — Calypso's subnet route advertisement via Headscale causes Tailscale to install `192.168.0.0/24` in table 52, breaking inbound LAN connectivity. Fixed with a persistent NetworkManager dispatcher hook:
```bash
# /etc/NetworkManager/dispatcher.d/99-lan-routing-fix
[ "$2" = "up" ] && ip rule add to 192.168.0.0/24 priority 5200 lookup main 2>/dev/null || true
```
**DNS gotcha**: When Tailscale is offline or mid-switch, it overwrites `/etc/resolv.conf` with `nameserver 100.100.100.100` (MagicDNS), which is unreachable — breaking DNS entirely. If you ever need to re-join Headscale:
```bash
echo 'nameserver 1.1.1.1' > /etc/resolv.conf
tailscale up --login-server=https://headscale.vish.gg:8443 --authkey=<key> --accept-routes --hostname=mastodon-rocky --force-reauth
```
---
## Services
All services run as bare-metal systemd units (no Docker).
| Service | Description | Port |
|---------|-------------|------|
| `mastodon-web.service` | Mastodon web (Puma) | 3000 |
| `mastodon-streaming.service` | Mastodon streaming API | 4000 |
| `mastodon-sidekiq.service` | Mastodon background jobs | — |
| `nginx.service` | Reverse proxy | 80, 443 |
| `postgresql.service` | PostgreSQL database | 5432 |
| `valkey.service` | Valkey (Redis-compatible) cache | 6379 |
### Service Management
```bash
# Check all Mastodon services
systemctl status mastodon-web mastodon-streaming mastodon-sidekiq
# Restart Mastodon
systemctl restart mastodon-web mastodon-streaming mastodon-sidekiq
# View logs
journalctl -u mastodon-web -f
journalctl -u mastodon-sidekiq -f
```
---
## Web Console
Cockpit is available at `https://mastodon-rocky:9090` or `https://192.168.0.126:9090`.
---
*Last Updated*: 2026-03-10
*Host*: Calypso (Synology DS723+) via Synology VMM

28
hosts/vms/matrix-ubuntu-vm/.gitignore vendored Normal file
View File

@@ -0,0 +1,28 @@
# Environment files with secrets
.env
.env.production
*.env.local
# Database dumps
*.sql
*.dump
# Logs
*.log
logs/
# Media files
public/system/
media_store/
# Docker volumes
redis/
data/
# OS files
.DS_Store
Thumbs.db
# IDE
.vscode/
.idea/

View File

@@ -0,0 +1,341 @@
# Ubuntu VM Homelab
Self-hosted communication platform with Mastodon, Mattermost, and Matrix/Element on a single Ubuntu VM sharing PostgreSQL.
## Current Deployment Status
| Service | Status | Domain | Internal Port | Nginx Port |
|---------|--------|--------|---------------|------------|
| ✅ Mastodon | Running | mastodon.vish.gg | 3000, 4000 | 8082 |
| ✅ Mattermost | Running | mm.crista.love | 8065 | 8081 |
| ✅ Matrix (mx.vish.gg) | Running | mx.vish.gg | 8018 | 8082 |
| ✅ Matrix (vish - legacy) | Running | matrix.thevish.io | 8008 | 8081 |
| ✅ PostgreSQL | Running | - | 5432 | - |
| ✅ Redis | Running | - | 6379 | - |
| ✅ TURN (coturn) | Running | mx.vish.gg:3479 | 3479 | - |
## VM Specifications
- **OS**: Ubuntu 24.04.4 LTS (x86_64)
- **Hostname**: matrix-ubuntu
- **LAN IP**: 192.168.0.154 (static) — `ssh ubuntu-matrix`
- **Tailscale IP**: 100.85.21.51
- **SSH user**: test
- **RAM**: 7.7 GB
- **CPU**: 4 cores
- **Storage**: 96 GB
- **Network**: Static IP set via netplan (`/etc/netplan/99-static.yaml`), cloud-init network management disabled
## Architecture
```
┌─────────────────────────────────────────────────────────────┐
│ Cloudflare Proxy │
└─────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────┐
│ Nginx │
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
│ │ :8080 │ │ :8081 │ │ :8082 │ │
│ │ Matrix │ │ Mattermost │ │ Mastodon │ │
│ └─────────────┘ └─────────────┘ └─────────────┘ │
└─────────────────────────────────────────────────────────────┘
│ │ │
▼ ▼ ▼
┌─────────────┐ ┌─────────────┐ ┌─────────────┐
│ Synapse │ │ Mattermost │ │ Mastodon │
│ :8008 │ │ Docker │ │ Docker │
│ + Element │ │ :8065 │ │ :3000 │
└─────────────┘ └─────────────┘ │ :4000 │
│ │ └─────────────┘
│ │ │
└───────────────────┴──────────────────┘
┌─────────────────┐
│ PostgreSQL │
│ :5432 │
│ │
│ - synapse │
│ - mattermost │
│ - mastodon │
└─────────────────┘
```
## Databases
All services share the same PostgreSQL 16 server:
| Database | User | Purpose |
|----------|------|---------|
| synapse | synapse | Matrix homeserver (vish - legacy) |
| synapse_mx | synapse_mx | Matrix homeserver (mx.vish.gg - federated) |
| mattermost | mmuser | Mattermost |
| mastodon_production | mastodon | Mastodon |
## Docker Containers
```
NAMES IMAGE STATUS
mastodon-streaming-1 ghcr.io/mastodon/mastodon-streaming:v4.5.7 Up
mastodon-web-1 ghcr.io/mastodon/mastodon:v4.5.7 Up
mastodon-sidekiq-1 ghcr.io/mastodon/mastodon:v4.5.7 Up
mastodon-redis-1 redis:7-alpine Up
mattermost mattermost/mattermost-team-edition:11.4 Up (healthy)
```
## Systemd Services (bare-metal)
```
UNIT SERVICE VERSION
synapse.service Synapse (legacy) 1.148.0 — /opt/synapse, port 8008
synapse-mx.service Synapse (primary) 1.148.0 — /opt/synapse-mx, port 8018
```
Both Synapse instances share the venv at `/opt/synapse/venv/`.
## Quick Start
1. Clone this repo to your VM
2. Copy environment templates and edit with your values
3. Run the setup script
```bash
git clone https://git.vish.gg/Vish/Ubuntu-vm-homelab.git
cd Ubuntu-vm-homelab
./scripts/setup.sh
```
## Directory Structure
```
Ubuntu-vm-homelab/
├── mastodon/
│ ├── docker-compose.yml
│ └── .env.production.template
├── mattermost/
│ ├── docker-compose.yml
│ └── config.json.template
├── matrix-element/
│ ├── homeserver.yaml.template
│ └── element-config.json.template
├── nginx/
│ ├── mastodon.conf
│ ├── mattermost.conf
│ └── matrix.conf
├── scripts/
│ ├── setup.sh
│ ├── backup.sh
│ └── update.sh
└── README.md
```
## Credentials
Stored securely on the server:
- `/opt/mastodon/.env.production` - Mastodon secrets
- `/opt/mattermost/config/config.json` - Mattermost config
- `/opt/synapse/homeserver.yaml` - Matrix config
## Cloudflare Setup
Each service requires a DNS record pointing to the VM's public IP with Cloudflare proxy enabled.
Configure origin rules to route to the correct nginx port.
## Maintenance
### Backup
```bash
./scripts/backup.sh
```
### View Logs
```bash
# Mastodon
cd /opt/mastodon && docker compose logs -f
# Mattermost
docker logs -f mattermost
# Matrix (mx.vish.gg)
tail -f /opt/synapse-mx/homeserver.log
# Matrix (legacy vish)
tail -f /opt/synapse/homeserver.log
```
---
## Updating Services
### Update Mastodon
```bash
cd /opt/mastodon
# Pull latest images
docker compose pull
# Stop services
docker compose down
# Run database migrations
docker compose run --rm web bundle exec rails db:migrate
# Precompile assets (if needed)
docker compose run --rm web bundle exec rails assets:precompile
# Start services
docker compose up -d
# Verify
docker compose ps
```
**Check for release notes:** https://github.com/mastodon/mastodon/releases
### Update Mattermost
```bash
cd /opt/mattermost
# Check current version
docker exec mattermost mattermost version
# Pull latest image
docker compose pull
# Stop and restart
docker compose down
docker compose up -d
# Verify
docker logs mattermost | head -20
```
**Check for release notes:** https://docs.mattermost.com/about/mattermost-server-releases.html
### Update Matrix Synapse (both instances share the same venv)
Both instances use `/opt/synapse/venv/` — upgrade once, restart both.
```bash
# Check current version
curl -s http://localhost:8018/_synapse/admin/v1/server_version
# Upgrade (pin to a specific version, e.g. 1.148.0)
sudo /opt/synapse/venv/bin/pip install 'matrix-synapse==1.148.0'
# Restart both services
sudo systemctl restart synapse synapse-mx
# Verify
curl -s http://localhost:8008/_synapse/admin/v1/server_version # legacy
curl -s http://localhost:8018/_synapse/admin/v1/server_version # mx
```
**Check for release notes:** https://github.com/element-hq/synapse/releases
> **Note:** If startup fails with `InsufficientPrivilege: must be owner of table`, see
> the DB ownership fix in `docs/MATRIX.md#db-ownership-fix`.
### Update Element Web
```bash
# Check latest version at https://github.com/element-hq/element-web/releases
ELEMENT_VERSION="v1.12.11" # Change to latest version
# Download and extract
cd /tmp
wget https://github.com/element-hq/element-web/releases/download/${ELEMENT_VERSION}/element-${ELEMENT_VERSION}.tar.gz
tar -xzf element-${ELEMENT_VERSION}.tar.gz
# Backup current config
cp /opt/element/web/config.json /tmp/element-config-backup.json
# Back up configs
cp /opt/element/web/config.json /tmp/element-config-web.json
cp /opt/element/web-thevish/config.json /tmp/element-config-thevish.json
# Replace files (both installs share the same release)
sudo rm -rf /opt/element/web/* /opt/element/web-thevish/*
sudo cp -r element-${ELEMENT_VERSION}/* /opt/element/web/
sudo cp -r element-${ELEMENT_VERSION}/* /opt/element/web-thevish/
# Restore configs
sudo cp /tmp/element-config-web.json /opt/element/web/config.json
sudo cp /tmp/element-config-thevish.json /opt/element/web-thevish/config.json
# Verify (nginx serves static files, no restart needed)
cat /opt/element/web/version
cat /opt/element/web-thevish/version
# Cleanup
rm -rf /tmp/element-${ELEMENT_VERSION}* /tmp/element-config-*.json
```
### Update TURN Server (coturn)
```bash
# Update via apt
sudo apt update
sudo apt upgrade coturn
# Restart
sudo systemctl restart coturn
# Verify
sudo systemctl status coturn
```
### Update All Services (Quick Script)
```bash
#!/bin/bash
# Save as /opt/scripts/update-all.sh
echo "=== Updating Mastodon ==="
cd /opt/mastodon
docker compose pull
docker compose down
docker compose run --rm web bundle exec rails db:migrate
docker compose up -d
echo "=== Updating Mattermost ==="
cd /opt/mattermost
docker compose pull
docker compose down
docker compose up -d
echo "=== Updating Synapse ==="
cd /opt/synapse
source venv/bin/activate
pip install --upgrade matrix-synapse
pkill -f 'synapse.app.homeserver'
sleep 2
sudo -u synapse /opt/synapse/venv/bin/python -m synapse.app.homeserver \
--config-path=/opt/synapse-mx/homeserver.yaml --daemonize
sudo -u synapse /opt/synapse/venv/bin/python -m synapse.app.homeserver \
--config-path=/opt/synapse/homeserver.yaml --daemonize
echo "=== Updating System Packages ==="
sudo apt update && sudo apt upgrade -y
echo "=== Done! ==="
```
---
## Federation Status
| Service | Protocol | Federation |
|---------|----------|------------|
| Matrix (mx.vish.gg) | Matrix | ✅ Enabled |
| Matrix (vish) | Matrix | ❌ Disabled (invalid server_name) |
| Mastodon | ActivityPub | ✅ Enabled |
| Mattermost | Shared Channels | ❌ Enterprise only |
## License
MIT

View File

@@ -0,0 +1,28 @@
# Diun — Docker Image Update Notifier
#
# Watches all running containers on this host and sends ntfy
# notifications when upstream images update their digest.
# Schedule: Mondays 09:00 (weekly cadence).
#
# ntfy topic: https://ntfy.vish.gg/diun
services:
diun:
image: crazymax/diun:latest
container_name: diun
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- diun-data:/data
environment:
LOG_LEVEL: info
DIUN_WATCH_WORKERS: "20"
DIUN_WATCH_SCHEDULE: "0 9 * * 1"
DIUN_WATCH_JITTER: 30s
DIUN_PROVIDERS_DOCKER: "true"
DIUN_PROVIDERS_DOCKER_WATCHBYDEFAULT: "true"
DIUN_NOTIF_NTFY_ENDPOINT: "https://ntfy.vish.gg"
DIUN_NOTIF_NTFY_TOPIC: "diun"
restart: unless-stopped
volumes:
diun-data:

View File

@@ -0,0 +1,171 @@
# Mastodon Federation Guide
## What is Federation?
Federation allows your Mastodon instance to communicate with other Mastodon instances (and other ActivityPub-compatible servers). Users can follow accounts on other servers, and posts are shared across the network.
## Federation Requirements
### 1. HTTPS (Required)
Federation only works over HTTPS. Cloudflare provides this automatically when proxying is enabled.
### 2. Correct Domain Configuration
```env
# .env.production
LOCAL_DOMAIN=mastodon.vish.gg
```
⚠️ **Warning**: Changing LOCAL_DOMAIN after setup will break existing accounts!
### 3. Webfinger Endpoint
Must respond correctly at:
```
https://mastodon.vish.gg/.well-known/webfinger?resource=acct:username@mastodon.vish.gg
```
Expected response:
```json
{
"subject": "acct:vish@mastodon.vish.gg",
"aliases": [
"https://mastodon.vish.gg/@vish",
"https://mastodon.vish.gg/users/vish"
],
"links": [
{
"rel": "http://webfinger.net/rel/profile-page",
"type": "text/html",
"href": "https://mastodon.vish.gg/@vish"
},
{
"rel": "self",
"type": "application/activity+json",
"href": "https://mastodon.vish.gg/users/vish"
}
]
}
```
### 4. ActivityPub Actor Endpoint
Must respond at:
```
https://mastodon.vish.gg/users/vish
```
With `Accept: application/activity+json` header.
## Testing Federation
### Test Webfinger (from external server)
```bash
curl "https://mastodon.vish.gg/.well-known/webfinger?resource=acct:vish@mastodon.vish.gg"
```
### Test Actor Endpoint
```bash
curl -H "Accept: application/activity+json" "https://mastodon.vish.gg/users/vish"
```
### Test Outbound Federation
Search for a remote user in your Mastodon instance:
1. Go to https://mastodon.vish.gg
2. Search for `@Gargron@mastodon.social`
3. If federation works, you'll see the user's profile
### Test from Another Instance
Go to any public Mastodon instance and search for:
```
@vish@mastodon.vish.gg
```
## Cloudflare Configuration
### Required Settings
1. **Proxy Status**: Orange cloud (Proxied) ✅
2. **SSL/TLS Mode**: Full (strict)
3. **Cache Level**: Standard (or Bypass for API endpoints)
### Origin Rules (if using non-standard ports)
Since nginx listens on port 8082, configure an origin rule:
**Rule**:
- If hostname equals `mastodon.vish.gg`
- Then: Override destination port to 8082
### Firewall Rules
Ensure port 8082 is accessible from Cloudflare IPs or use Cloudflare Tunnel.
## Common Federation Issues
### Issue: Remote users can't find your instance
**Cause**: DNS not properly configured or Cloudflare not proxying
**Fix**:
1. Verify DNS A record points to your server
2. Enable Cloudflare proxy (orange cloud)
3. Wait for DNS propagation
### Issue: Webfinger returns 301 redirect
**Normal behavior**: Mastodon redirects HTTP to HTTPS
**Solution**: Ensure requests come via HTTPS
### Issue: Cannot follow remote users
**Cause**: Outbound connections blocked
**Fix**:
1. Check firewall allows outbound HTTPS (443)
2. Verify sidekiq is running: `docker compose ps`
3. Check sidekiq logs: `docker compose logs sidekiq`
### Issue: Federation lag
**Cause**: High queue backlog in sidekiq
**Fix**:
```bash
# Check queue status
docker compose exec web bin/tootctl sidekiq status
# Clear dead jobs if needed
docker compose exec web bin/tootctl sidekiq kill
```
## Federation Debug Commands
```bash
# Check instance connectivity
cd /opt/mastodon
docker compose exec web bin/tootctl domains crawl mastodon.social
# Refresh a remote account
docker compose exec web bin/tootctl accounts refresh @Gargron@mastodon.social
# Clear delivery failures
docker compose exec web bin/tootctl domains purge <domain>
```
## Security Considerations
### Block/Allow Lists
Configure in Admin → Federation:
- Block specific domains
- Silence (limit) specific domains
- Allow specific domains (whitelist mode)
### Rate Limiting
Mastodon has built-in rate limiting for federation requests to prevent abuse.
## Monitoring Federation Health
### Check Sidekiq Queues
```bash
docker compose exec web bin/tootctl sidekiq stats
```
Healthy queues should have:
- Low `push` queue (outbound deliveries)
- Low `pull` queue (fetching remote content)
- Minimal retries
### Check Federation Stats
In Admin → Dashboard:
- Known instances count
- Active users (remote)
- Incoming/outgoing messages

View File

@@ -0,0 +1,321 @@
# Matrix Synapse Setup
This VM runs **two Matrix Synapse instances**:
| Instance | server_name | Domain | Federation | Purpose |
|----------|-------------|--------|------------|---------|
| **Primary** | `mx.vish.gg` | https://mx.vish.gg | ✅ Yes | Main server with federation |
| **Legacy** | `vish` | https://matrix.thevish.io | ❌ No | Historical data archive |
## Architecture
```
Internet
┌────────┴────────┐
│ Cloudflare │
└────────┬────────┘
┌─────────────┴─────────────┐
│ │
▼ ▼
┌─────────────────┐ ┌─────────────────┐
│ mx.vish.gg │ │ matrix.thevish.io│
│ (port 443) │ │ (port 443) │
└────────┬────────┘ └────────┬─────────┘
│ │
▼ ▼
┌─────────────────┐ ┌─────────────────┐
│ Synology Reverse│ │ Synology Reverse│
│ Proxy → :8082 │ │ Proxy → :8081 │
└────────┬────────┘ └────────┬─────────┘
│ │
└───────────┬───────────────┘
┌─────────────────────────────────────┐
│ Ubuntu VM (192.168.0.154) │
│ ┌──────────────┐ ┌──────────────┐ │
│ │ Nginx :8082 │ │ Nginx :8081 │ │
│ │ mx.vish.gg │ │ thevish.io │ │
│ └──────┬───────┘ └──────┬───────┘ │
│ │ │ │
│ ▼ ▼ │
│ ┌──────────────┐ ┌──────────────┐ │
│ │ Synapse:8018 │ │ Synapse:8008 │ │
│ │ mx.vish.gg │ │ vish │ │
│ └──────┬───────┘ └──────┬───────┘ │
│ │ │ │
│ ▼ ▼ │
│ ┌──────────────┐ ┌──────────────┐ │
│ │ synapse_mx │ │ synapse │ │
│ │ PostgreSQL │ │ PostgreSQL │ │
│ └──────────────┘ └──────────────┘ │
└─────────────────────────────────────┘
```
## Primary Server: mx.vish.gg
**This is the main server with federation enabled.**
### Configuration
- **Location**: `/opt/synapse-mx/`
- **Config**: `/opt/synapse-mx/homeserver.yaml`
- **Signing Key**: `/opt/synapse-mx/mx.vish.gg.signing.key`
- **Media Store**: `/opt/synapse-mx/media_store/`
- **Database**: `synapse_mx` (user: `synapse_mx`)
- **Port**: 8018 (Synapse) → 8082 (Nginx)
### User IDs
Users on this server have IDs like: `@username:mx.vish.gg`
### Federation
- ✅ Can communicate with matrix.org and other federated servers
- ✅ Can join public rooms on other servers
- ✅ Other users can find and message your users
### Managing the Service
```bash
sudo systemctl start synapse-mx
sudo systemctl stop synapse-mx
sudo systemctl restart synapse-mx
sudo systemctl status synapse-mx
```
Service file: `/etc/systemd/system/synapse-mx.service`
## Legacy Server: vish (matrix.thevish.io)
**This server contains historical data and cannot federate.**
### Why No Federation?
The `server_name` is `vish` which is not a valid domain. Other Matrix servers cannot discover it because:
- No DNS record for `vish`
- Cannot serve `.well-known` at `https://vish/`
### Configuration
- **Location**: `/opt/synapse/`
- **Config**: `/opt/synapse/homeserver.yaml`
- **Signing Key**: `/opt/synapse/vish.signing.key`
- **Media Store**: `/opt/synapse/media_store/`
- **Database**: `synapse` (user: `synapse`)
- **Port**: 8008 (Synapse) → 8081 (Nginx)
### User IDs
Users on this server have IDs like: `@username:vish`
### Managing the Service
```bash
sudo systemctl start synapse
sudo systemctl stop synapse
sudo systemctl restart synapse
sudo systemctl status synapse
```
Service file: `/etc/systemd/system/synapse.service`
## TURN Server (coturn)
TURN server enables voice/video calls to work through NAT.
### Configuration
- **Config**: `/etc/turnserver.conf`
- **Ports**: 3479 (TURN), 5350 (TURNS), 49201-49250 (Media relay UDP)
- **Realm**: `matrix.thevish.io`
- **Auth Secret**: Shared with Synapse (`turn_shared_secret`)
### Key Settings
```ini
listening-port=3479
tls-listening-port=5350
listening-ip=0.0.0.0
external-ip=YOUR_WAN_IP/192.168.0.154
static-auth-secret=<shared-secret>
realm=matrix.thevish.io
min-port=49201
max-port=49250
```
### Port Forwarding Required
| Port | Protocol | Purpose |
|------|----------|---------|
| 3479 | TCP/UDP | TURN |
| 5350 | TCP/UDP | TURNS (TLS) |
| 49201-49250 | UDP | Media relay |
## Element Web
Element Web is served by Nginx for both instances.
### mx.vish.gg
- **Location**: `/opt/element/web/`
- **Config**: `/opt/element/web/config.json`
- **URL**: https://mx.vish.gg/
### matrix.thevish.io
- **Location**: `/opt/element/web-thevish/`
- **Config**: `/opt/element/web-thevish/config.json`
- **URL**: https://matrix.thevish.io/
## Nginx Configuration
### mx.vish.gg (port 8082)
Location: `/etc/nginx/sites-available/mx-vish-gg`
```nginx
server {
listen 8082;
server_name mx.vish.gg;
root /opt/element/web;
location /health { proxy_pass http://127.0.0.1:8018; }
location ~ ^(/_matrix|/_synapse/client) { proxy_pass http://127.0.0.1:8018; }
location /_matrix/federation { proxy_pass http://127.0.0.1:8018; }
location /.well-known/matrix/server { return 200 '{"m.server": "mx.vish.gg:443"}'; }
location /.well-known/matrix/client { return 200 '{"m.homeserver": {"base_url": "https://mx.vish.gg"}}'; }
location / { try_files $uri $uri/ /index.html; }
}
```
### matrix.thevish.io (port 8081)
Location: `/etc/nginx/sites-available/matrix-thevish`
```nginx
server {
listen 8081;
server_name matrix.thevish.io;
root /opt/element/web-thevish;
location /health { proxy_pass http://127.0.0.1:8008; }
location ~ ^(/_matrix|/_synapse/client) { proxy_pass http://127.0.0.1:8008; }
location /.well-known/matrix/server { return 200 '{"m.server": "matrix.thevish.io:443"}'; }
location /.well-known/matrix/client { return 200 '{"m.homeserver": {"base_url": "https://matrix.thevish.io"}}'; }
location / { try_files $uri $uri/ /index.html; }
}
```
## Synology Reverse Proxy
| Name | Source (HTTPS) | Destination (HTTP) |
|------|----------------|-------------------|
| mx_vish_gg | mx.vish.gg:443 | 192.168.0.154:8082 |
| matrix_thevish | matrix.thevish.io:443 | 192.168.0.154:8081 |
## Cloudflare DNS
| Type | Name | Content | Proxy |
|------|------|---------|-------|
| A | mx.vish.gg | YOUR_WAN_IP | ✅ Proxied |
| A | matrix.thevish.io | YOUR_WAN_IP | ✅ Proxied |
## Database Backup
### Backup mx.vish.gg
```bash
sudo -u postgres pg_dump -Fc synapse_mx > synapse_mx_backup_$(date +%Y%m%d).dump
```
### Backup legacy vish
```bash
sudo -u postgres pg_dump -Fc synapse > synapse_vish_backup_$(date +%Y%m%d).dump
```
### Restore
```bash
sudo -u postgres pg_restore -d <database_name> <backup_file.dump>
```
## Testing Federation
Use the Matrix Federation Tester:
```bash
curl -s "https://federationtester.matrix.org/api/report?server_name=mx.vish.gg" | python3 -c "
import sys, json
d = json.load(sys.stdin)
print(f'Federation OK: {d.get(\"FederationOK\", False)}')
"
```
## Creating Users
### Via registration (if enabled)
Go to https://mx.vish.gg and click "Create account"
### Via command line
```bash
cd /opt/synapse-mx
sudo -u synapse /opt/synapse/venv/bin/register_new_matrix_user \
-c /opt/synapse-mx/homeserver.yaml \
-u <username> -p <password> -a
```
## Troubleshooting
### Check if Synapse is running
```bash
sudo systemctl status synapse synapse-mx
curl -s http://localhost:8008/_synapse/admin/v1/server_version # legacy
curl -s http://localhost:8018/_synapse/admin/v1/server_version # mx
```
### View logs
```bash
sudo journalctl -u synapse -f # mx.vish.gg
sudo journalctl -u synapse-mx -f # legacy vish
```
### Test health endpoints
```bash
curl http://localhost:8018/health # mx.vish.gg
curl http://localhost:8008/health # legacy vish
```
### Restart nginx
```bash
sudo nginx -t && sudo systemctl reload nginx
```
### DB ownership fix (apply if migrations fail on upgrade)
If Synapse fails to start after upgrade with `InsufficientPrivilege: must be owner of table`,
the DB tables need their ownership corrected. Run for the affected database:
```bash
# For synapse (legacy) DB:
sudo -u postgres psql synapse -t -c "
SELECT 'ALTER TABLE public.' || tablename || ' OWNER TO synapse;'
FROM pg_tables WHERE schemaname='public' AND tableowner <> 'synapse';
" | sudo -u postgres psql synapse
sudo -u postgres psql synapse -t -c "
SELECT 'ALTER SEQUENCE ' || sequence_name || ' OWNER TO synapse;'
FROM information_schema.sequences WHERE sequence_schema='public';
" | sudo -u postgres psql synapse
# For synapse_mx DB, replace 'synapse' with 'synapse_mx' throughout
```

View File

@@ -0,0 +1,259 @@
# Deployment Documentation
Complete setup guide for the Ubuntu VM Homelab with Mastodon, Mattermost, and Matrix/Element.
## Server Access
```
IP: YOUR_WAN_IP
SSH Port: 65533
Username: test
Password: "REDACTED_PASSWORD"
```
## Service Credentials
### Mastodon Admin
- **Username**: vish
- **Email**: your-email@example.com
- **Password**: `c16a0236e5a5da1e0c80bb296a290fc3`
- **URL**: https://mastodon.vish.gg
### Mattermost
- **URL**: https://mm.crista.love
- **Admin**: (configured during first access)
### Matrix/Element
- **URL**: https://mx.vish.gg
- **Homeserver**: mx.vish.gg
## PostgreSQL Configuration
PostgreSQL 16 is configured to allow Docker container connections:
```
# /etc/postgresql/16/main/pg_hba.conf
host all all 172.17.0.0/16 md5
host all all 0.0.0.0/0 md5
# /etc/postgresql/16/main/postgresql.conf
listen_addresses = '*'
```
### Database Credentials
| Database | User | Password |
|----------|------|----------|
| mastodon_production | mastodon | mastodon_pass_2026 |
| mattermost | mmuser | (check /opt/mattermost/config/config.json) |
| synapse | synapse | (check /opt/synapse/homeserver.yaml) |
## Nginx Configuration
### Ports
- **8080**: Matrix/Element (mx.vish.gg)
- **8081**: Mattermost (mm.crista.love)
- **8082**: Mastodon (mastodon.vish.gg)
### Site Configs
```
/etc/nginx/sites-enabled/
├── mastodon -> /etc/nginx/sites-available/mastodon
├── matrix -> /etc/nginx/sites-available/matrix
└── mattermost -> /etc/nginx/sites-available/mattermost
```
## Mastodon Setup Details
### Directory Structure
```
/opt/mastodon/
├── docker-compose.yml
├── .env.production
├── public/
│ └── system/ # Media uploads
└── redis/ # Redis data
```
### Environment Variables
```env
LOCAL_DOMAIN=mastodon.vish.gg
SINGLE_USER_MODE=false
# Database
DB_HOST=172.17.0.1
DB_PORT=5432
DB_NAME=mastodon_production
DB_USER=mastodon
DB_PASS="REDACTED_PASSWORD"
# Redis
REDIS_HOST=redis
REDIS_PORT=6379
# SMTP (Gmail) - CONFIGURED AND WORKING ✅
SMTP_SERVER=smtp.gmail.com
SMTP_PORT=587
SMTP_LOGIN=your-email@example.com
SMTP_PASSWORD="REDACTED_PASSWORD"
SMTP_AUTH_METHOD=plain
SMTP_ENABLE_STARTTLS=auto
SMTP_FROM_ADDRESS="Mastodon <notifications@mastodon.vish.gg>"
# Search
ES_ENABLED=false
```
### Common Commands
```bash
# View logs
cd /opt/mastodon && docker compose logs -f
# Restart services
cd /opt/mastodon && docker compose restart
# Run admin commands
cd /opt/mastodon && docker compose exec web bin/tootctl <command>
# Create new user
docker compose run --rm web bin/tootctl accounts create USERNAME --email=EMAIL --confirmed --role=Owner
# Database migration
docker compose run --rm web bundle exec rake db:migrate
```
## Mattermost Setup Details
### Directory Structure
```
/opt/mattermost/
├── config/
│ └── config.json
├── data/
├── logs/
├── plugins/
└── client/plugins/
```
### Docker Command
```bash
docker run -d --name mattermost \
-p 8065:8065 \
-v /opt/mattermost/config:/mattermost/config \
-v /opt/mattermost/data:/mattermost/data \
-v /opt/mattermost/logs:/mattermost/logs \
-v /opt/mattermost/plugins:/mattermost/plugins \
--restart=always \
mattermost/mattermost-team-edition:11.3
```
## Matrix/Synapse Setup Details
### Directory Structure
```
/opt/synapse/
├── homeserver.yaml
├── *.signing.key
└── media_store/
/opt/element/web/
└── (Element Web static files)
```
### Synapse Service
```bash
# Status
systemctl status matrix-synapse
# Restart
systemctl restart matrix-synapse
# Logs
journalctl -u matrix-synapse -f
```
## Cloudflare Configuration
For each service, configure Cloudflare:
1. **DNS Records** (A records pointing to VM public IP)
- mastodon.vish.gg
- mm.crista.love
- mx.vish.gg
2. **Origin Rules** (Route to correct nginx port)
- mastodon.vish.gg → Port 8082
- mm.crista.love → Port 8081
- mx.vish.gg → Port 8080
3. **SSL/TLS**: Full (strict)
## Federation (Mastodon)
Federation requires:
1. ✅ Proper LOCAL_DOMAIN in .env.production
2. ✅ HTTPS via Cloudflare
3. ✅ Webfinger endpoint responding at `/.well-known/webfinger`
4. ⏳ DNS properly configured
Test federation:
```bash
# From another server
curl "https://mastodon.vish.gg/.well-known/webfinger?resource=acct:vish@mastodon.vish.gg"
```
## SMTP Configuration (Gmail)
To send emails via Gmail:
1. Enable 2-Factor Authentication on your Google account
2. Generate an App Password:
- Go to https://myaccount.google.com/apppasswords
- Create a new app password for "Mail"
3. Update `/opt/mastodon/.env.production`:
```
SMTP_PASSWORD="REDACTED_PASSWORD"
```
4. Restart Mastodon:
```bash
cd /opt/mastodon && docker compose restart
```
## Backup Locations
```
/backup/
├── YYYYMMDD_HHMMSS/
│ ├── mattermost.sql
│ ├── synapse.sql
│ ├── mastodon.sql
│ ├── mastodon_media.tar.gz
│ ├── mattermost_data.tar.gz
│ └── synapse_data.tar.gz
```
## Troubleshooting
### Mastodon 403 Forbidden
- Normal when accessing with wrong Host header
- Always access via proper domain or use `-H "Host: mastodon.vish.gg"`
### Federation Not Working
- Check Cloudflare proxy is enabled
- Verify DNS resolves correctly
- Test webfinger endpoint externally
### Database Connection Errors
- Verify PostgreSQL is listening on all interfaces
- Check pg_hba.conf allows Docker network
- Restart PostgreSQL: `systemctl restart postgresql`
### Container Won't Start
```bash
# Check logs
docker logs <container_name>
# Check Docker network
docker network ls
docker network inspect mastodon_internal_network
```

View File

@@ -0,0 +1,178 @@
# SMTP Email Configuration
Guide for configuring email delivery for Mastodon and Mattermost.
## Gmail SMTP Setup
### Prerequisites
1. Google account with 2-Factor Authentication enabled
2. App Password generated for "Mail"
### Generate Gmail App Password
1. Go to [Google Account Security](https://myaccount.google.com/security)
2. Enable 2-Step Verification if not already enabled
3. Go to [App Passwords](https://myaccount.google.com/apppasswords)
4. Select "Mail" and your device
5. Click "Generate"
6. Copy the 16-character password
### Mastodon Configuration
Edit `/opt/mastodon/.env.production`:
```env
# SMTP Configuration (Gmail)
SMTP_SERVER=smtp.gmail.com
SMTP_PORT=587
SMTP_LOGIN=your-email@example.com
SMTP_PASSWORD="REDACTED_PASSWORD"
SMTP_AUTH_METHOD=plain
SMTP_OPENSSL_VERIFY_MODE=none
SMTP_ENABLE_STARTTLS=auto
SMTP_FROM_ADDRESS="Mastodon <notifications@mastodon.vish.gg>"
```
Apply changes:
```bash
cd /opt/mastodon && docker compose restart
```
### Test Email Delivery
```bash
# Send test email
cd /opt/mastodon
docker compose exec web bin/tootctl accounts modify vish --confirm
# Or trigger password reset
# Go to login page and click "Forgot password"
```
## Mattermost Email Configuration
Edit `/opt/mattermost/config/config.json`:
```json
{
"EmailSettings": {
"EnableSignUpWithEmail": true,
"EnableSignInWithEmail": true,
"EnableSignInWithUsername": true,
"SendEmailNotifications": true,
"RequireEmailVerification": false,
"FeedbackName": "Mattermost",
"FeedbackEmail": "notifications@mm.crista.love",
"SMTPUsername": "your-email@example.com",
"SMTPPassword": "your_16_char_app_password",
"SMTPServer": "smtp.gmail.com",
"SMTPPort": "587",
"ConnectionSecurity": "STARTTLS",
"SendPushNotifications": true
}
}
```
Restart Mattermost:
```bash
docker restart mattermost
```
## Alternative: SendGrid
### Setup
1. Create SendGrid account at https://sendgrid.com
2. Generate API key with "Mail Send" permission
### Mastodon Configuration
```env
SMTP_SERVER=smtp.sendgrid.net
SMTP_PORT=587
SMTP_LOGIN=apikey
SMTP_PASSWORD="REDACTED_PASSWORD"
SMTP_AUTH_METHOD=plain
SMTP_OPENSSL_VERIFY_MODE=peer
SMTP_ENABLE_STARTTLS=auto
SMTP_FROM_ADDRESS="Mastodon <notifications@mastodon.vish.gg>"
```
## Alternative: Mailgun
### Setup
1. Create Mailgun account at https://mailgun.com
2. Verify your domain
3. Get SMTP credentials
### Mastodon Configuration
```env
SMTP_SERVER=smtp.mailgun.org
SMTP_PORT=587
SMTP_LOGIN=postmaster@mg.yourdomain.com
SMTP_PASSWORD="REDACTED_PASSWORD"
SMTP_AUTH_METHOD=plain
SMTP_OPENSSL_VERIFY_MODE=peer
SMTP_ENABLE_STARTTLS=auto
SMTP_FROM_ADDRESS="Mastodon <notifications@mastodon.vish.gg>"
```
## Troubleshooting
### Check SMTP Connection
```bash
# Test from container
docker compose exec web bash -c "echo 'test' | openssl s_client -connect smtp.gmail.com:587 -starttls smtp"
```
### Check Sidekiq Mail Queue
```bash
# View failed email jobs
docker compose exec web bin/tootctl sidekiq status
```
### Common Errors
#### "Username and Password not accepted"
- Verify App Password is correct (not your regular password)
- Ensure 2FA is enabled on Google account
- Check no extra spaces in password
#### "Connection refused"
- Firewall blocking outbound port 587
- Try port 465 with SSL instead
#### "Certificate verify failed"
- Set `SMTP_OPENSSL_VERIFY_MODE=none` (less secure)
- Or ensure CA certificates are up to date
### Gmail-Specific Issues
#### "Less secure app access"
- Not needed when using App Passwords
- App Passwords bypass this requirement
#### "Critical security alert"
- Normal for first connection from new IP
- Confirm it was you in Google Security settings
## Email Content Customization
### Mastodon
Email templates are in the Mastodon source code. Custom templates require forking.
### Mattermost
Edit in System Console → Site Configuration → Customization
- Support Email
- Notification Footer
- Custom Branding
## SPF/DKIM/DMARC
For better deliverability, configure DNS records:
### SPF Record
```
TXT @ "v=spf1 include:_spf.google.com ~all"
```
### Note on Gmail Sending
When using Gmail SMTP, emails are sent "via gmail.com" which has good deliverability. Custom domain email requires additional DNS setup.

View File

@@ -0,0 +1,15 @@
services:
dozzle-agent:
image: amir20/dozzle:latest
container_name: dozzle-agent
command: agent
volumes:
- /var/run/docker.sock:/var/run/docker.sock
ports:
- "7007:7007"
restart: unless-stopped
healthcheck:
test: ["CMD", "/dozzle", "healthcheck"]
interval: 30s
timeout: 5s
retries: 3

View File

@@ -0,0 +1,45 @@
# Mastodon Environment Configuration
# Copy to .env.production and fill in values
LOCAL_DOMAIN=mastodon.vish.gg
SINGLE_USER_MODE=false
# Generate with: openssl rand -hex 64
SECRET_KEY_BASE=<GENERATE_SECRET>
OTP_SECRET=<GENERATE_SECRET>
# Database (using host PostgreSQL)
DB_HOST=172.17.0.1
DB_PORT=5432
DB_NAME=mastodon_production
DB_USER=mastodon
DB_PASS=REDACTED_DB_PASSWORD
# Redis
REDIS_HOST=redis
REDIS_PORT=6379
# Locale
DEFAULT_LOCALE=en
# SMTP Configuration (Gmail)
# See docs/SMTP.md for setup instructions
SMTP_SERVER=smtp.gmail.com
SMTP_PORT=587
SMTP_LOGIN=your-email@example.com
SMTP_PASSWORD=REDACTED_SMTP_PASSWORD
SMTP_AUTH_METHOD=plain
SMTP_OPENSSL_VERIFY_MODE=none
SMTP_ENABLE_STARTTLS=auto
SMTP_FROM_ADDRESS="Mastodon <notifications@mastodon.vish.gg>"
# File storage
PAPERCLIP_SECRET=<GENERATE_SECRET>
# Search (optional)
ES_ENABLED=false
# Encryption keys - Generate with: docker compose run --rm web bin/rails db:encryption:init
ACTIVE_RECORD_ENCRYPTION_DETERMINISTIC_KEY=<GENERATE>
ACTIVE_RECORD_ENCRYPTION_KEY_DERIVATION_SALT=<GENERATE>
ACTIVE_RECORD_ENCRYPTION_PRIMARY_KEY=<GENERATE>

View File

@@ -0,0 +1,53 @@
services:
redis:
restart: unless-stopped
image: redis:7-alpine
networks:
- internal_network
volumes:
- ./redis:/data
web:
image: ghcr.io/mastodon/mastodon:v4.5.7
restart: unless-stopped
env_file: .env.production
command: bundle exec puma -C config/puma.rb
networks:
- external_network
- internal_network
ports:
- '3000:3000'
depends_on:
- redis
volumes:
- ./public/system:/mastodon/public/system
streaming:
image: ghcr.io/mastodon/mastodon-streaming:v4.5.7
restart: unless-stopped
env_file: .env.production
networks:
- external_network
- internal_network
ports:
- '4000:4000'
depends_on:
- redis
sidekiq:
image: ghcr.io/mastodon/mastodon:v4.5.7
restart: unless-stopped
env_file: .env.production
command: bundle exec sidekiq
networks:
- external_network
- internal_network
depends_on:
- redis
volumes:
- ./public/system:/mastodon/public/system
networks:
external_network:
internal_network:
internal: true

View File

@@ -0,0 +1,36 @@
{
"default_server_config": {
"m.homeserver": {
"base_url": "https://mx.vish.gg",
"server_name": "mx.vish.gg"
},
"m.identity_server": {
"base_url": "https://vector.im"
}
},
"disable_custom_urls": false,
"disable_guests": true,
"disable_login_language_selector": false,
"disable_3pid_login": false,
"brand": "Element",
"integrations_ui_url": "https://scalar.vector.im/",
"integrations_rest_url": "https://scalar.vector.im/api",
"integrations_widgets_urls": [
"https://scalar.vector.im/_matrix/integrations/v1",
"https://scalar.vector.im/api",
"https://scalar-staging.vector.im/_matrix/integrations/v1",
"https://scalar-staging.vector.im/api",
"https://scalar-staging.riot.im/scalar/api"
],
"default_country_code": "US",
"show_labs_settings": true,
"features": {},
"default_federate": true,
"default_theme": "dark",
"room_directory": {
"servers": ["mx.vish.gg", "matrix.org"]
},
"enable_presence_by_hs_url": {
"https://mx.vish.gg": true
}
}

View File

@@ -0,0 +1,69 @@
# Matrix Synapse Homeserver Configuration Template
# Copy to /opt/synapse-mx/homeserver.yaml and customize
#
# This is the PRIMARY federated server (mx.vish.gg)
# For legacy server config, see homeserver-legacy.yaml.template
server_name: "mx.vish.gg"
pid_file: /opt/synapse-mx/homeserver.pid
public_baseurl: https://mx.vish.gg/
listeners:
- port: 8018
tls: false
type: http
x_forwarded: true
resources:
- names: [client, federation]
compress: false
database:
name: psycopg2
args:
user: synapse_mx
password: "REDACTED_PASSWORD"
database: synapse_mx
host: localhost
cp_min: 5
cp_max: 10
log_config: "/opt/synapse-mx/mx.vish.gg.log.config"
media_store_path: /opt/synapse-mx/media_store
signing_key_path: "/opt/synapse-mx/mx.vish.gg.signing.key"
trusted_key_servers:
- server_name: "matrix.org"
# Generate secrets with: python3 -c "import secrets; print(secrets.token_urlsafe(32))"
registration_shared_secret: "<GENERATE_SECRET>"
macaroon_secret_key: "<GENERATE_SECRET>"
form_secret: "<GENERATE_SECRET>"
enable_registration: true
enable_registration_without_verification: true
max_upload_size: 100M
url_preview_enabled: true
url_preview_ip_range_blacklist:
- '127.0.0.0/8'
- '10.0.0.0/8'
- '172.16.0.0/12'
- '192.168.0.0/16'
- '100.64.0.0/10'
- '169.254.0.0/16'
- '::1/128'
- 'fe80::/64'
- 'fc00::/7'
report_stats: false
suppress_key_server_warning: true
# TURN server for voice/video calls
turn_uris:
- "turn:mx.vish.gg:3479?transport=udp"
- "turn:mx.vish.gg:3479?transport=tcp"
turn_shared_secret: "<TURN_SHARED_SECRET>"
turn_user_lifetime: 86400000
turn_allow_guests: true
enable_3pid_changes: true

View File

@@ -0,0 +1,33 @@
# TURN Server Configuration (coturn)
# Copy to /etc/turnserver.conf
# Ports
listening-port=3479
tls-listening-port=5350
listening-ip=0.0.0.0
# External IP for NAT traversal
# Format: external-ip=<public-ip>/<internal-ip>
external-ip=YOUR_WAN_IP/192.168.0.154
# Authentication
fingerprint
use-auth-secret
static-auth-secret=<TURN_SHARED_SECRET>
realm=matrix.thevish.io
# Quotas
total-quota=100
bps-capacity=0
stale-nonce=600
# Security
no-multicast-peers
# Media relay ports (must be forwarded through firewall)
min-port=49201
max-port=49250
# Logging
log-file=/var/log/turnserver.log
verbose

View File

@@ -0,0 +1,27 @@
services:
mattermost:
container_name: mattermost
image: mattermost/mattermost-team-edition:11.3
restart: unless-stopped
ports:
- "8065:8065"
volumes:
- ./config:/mattermost/config
- ./data:/mattermost/data
- ./logs:/mattermost/logs
- ./plugins:/mattermost/plugins
- ./client/plugins:/mattermost/client/plugins
- ./bleve-indexes:/mattermost/bleve-indexes
environment:
- TZ=UTC
- MM_SQLSETTINGS_DRIVERNAME=postgres
- MM_SQLSETTINGS_DATASOURCE=postgres://mmuser:${MM_DB_PASSWORD}@172.17.0.1:5432/mattermost?sslmode=disable
- MM_SERVICESETTINGS_SITEURL=https://mm.crista.love
# Authentik OpenID Connect SSO - keeps local login working
- MM_OPENIDSETTINGS_ENABLE=true
- MM_OPENIDSETTINGS_BUTTONTEXT=Sign in with Authentik
- MM_OPENIDSETTINGS_BUTTONCOLOR=#fd4b2d
- MM_OPENIDSETTINGS_DISCOVERYSETTINGS_DISCOVERURL=https://sso.vish.gg/application/o/mattermost/.well-known/openid-configuration
- MM_OPENIDSETTINGS_ID=OGxIdZLKqYKgf9Sf9zAFAyhKzBdDvonL7HHSBu1w
- MM_OPENIDSETTINGS_SECRET=Dzi2iOFXMyzXrvbT2ZDSdqYYg6c6bX39mFihX4h20WKEV0lHBnKfF5bb6KWDH2P9HhlTpl1KPB5LbE9GYuJqGoTXO6aXWiNJJhqrCgJX2eaFRtne2J72mz4TfTxxKBCM
- MM_OPENIDSETTINGS_SCOPE=openid profile email

View File

@@ -0,0 +1,118 @@
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
upstream mastodon_backend {
server 127.0.0.1:3000 fail_timeout=0;
}
upstream mastodon_streaming {
server 127.0.0.1:4000 fail_timeout=0;
}
server {
listen 8082;
listen [::]:8082;
server_name mastodon.vish.gg;
keepalive_timeout 70;
sendfile on;
client_max_body_size 80m;
root /opt/mastodon/public;
gzip on;
gzip_disable "msie6";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript image/svg+xml image/x-icon;
location / {
try_files $uri @proxy;
}
location /sw.js {
add_header Cache-Control "public, max-age=604800, must-revalidate";
try_files $uri =404;
}
location ~ ^/assets/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
try_files $uri =404;
}
location ~ ^/avatars/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
try_files $uri =404;
}
location ~ ^/emoji/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
try_files $uri =404;
}
location ~ ^/headers/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
try_files $uri =404;
}
location ~ ^/packs/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
try_files $uri =404;
}
location ~ ^/shortcuts/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
try_files $uri =404;
}
location ~ ^/sounds/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
try_files $uri =404;
}
location ~ ^/system/ {
add_header Cache-Control "public, max-age=2419200, immutable";
try_files $uri =404;
}
location ^~ /api/v1/streaming {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_pass http://mastodon_streaming;
proxy_buffering off;
proxy_redirect off;
proxy_http_version 1.1;
tcp_nodelay on;
}
location @proxy {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Proxy "";
proxy_pass_header Server;
proxy_pass http://mastodon_backend;
proxy_buffering on;
proxy_redirect off;
proxy_http_version 1.1;
proxy_cache_bypass $http_upgrade;
tcp_nodelay on;
}
error_page 404 500 501 502 503 504 /500.html;
}

View File

@@ -0,0 +1,54 @@
# matrix.thevish.io - Legacy Matrix server (no federation, historical data)
server {
listen 8081;
listen [::]:8081;
server_name matrix.thevish.io;
# Element Web client
root /opt/element/web-thevish;
index index.html;
# Health check
location /health {
proxy_pass http://127.0.0.1:8008;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Host $host;
}
# Client-Server API
location ~ ^(/_matrix|/_synapse/client) {
proxy_pass http://127.0.0.1:8008;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Host $host;
client_max_body_size 100M;
proxy_http_version 1.1;
}
# Federation API (won't work due to server_name being "vish")
location /_matrix/federation {
proxy_pass http://127.0.0.1:8008;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Host $host;
client_max_body_size 100M;
}
# Well-known (for reference, federation won't work)
location /.well-known/matrix/server {
default_type application/json;
return 200 '{"m.server": "matrix.thevish.io:443"}';
}
location /.well-known/matrix/client {
default_type application/json;
add_header Access-Control-Allow-Origin *;
return 200 '{"m.homeserver": {"base_url": "https://matrix.thevish.io"}}';
}
# Element static files
location / {
try_files $uri $uri/ /index.html;
}
}

View File

@@ -0,0 +1,54 @@
# mx.vish.gg - Primary Matrix server (federation enabled)
server {
listen 8082;
listen [::]:8082;
server_name mx.vish.gg;
# Element Web client
root /opt/element/web;
index index.html;
# Health check
location /health {
proxy_pass http://127.0.0.1:8018;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Host $host;
}
# Client-Server API
location ~ ^(/_matrix|/_synapse/client) {
proxy_pass http://127.0.0.1:8018;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Host $host;
client_max_body_size 100M;
proxy_http_version 1.1;
}
# Federation API
location /_matrix/federation {
proxy_pass http://127.0.0.1:8018;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Host $host;
client_max_body_size 100M;
}
# Well-known for federation
location /.well-known/matrix/server {
default_type application/json;
return 200 '{"m.server": "mx.vish.gg:443"}';
}
location /.well-known/matrix/client {
default_type application/json;
add_header Access-Control-Allow-Origin *;
return 200 '{"m.homeserver": {"base_url": "https://mx.vish.gg"}}';
}
# Element static files
location / {
try_files $uri $uri/ /index.html;
}
}

View File

@@ -0,0 +1,41 @@
upstream mattermost {
server 127.0.0.1:8065;
keepalive 32;
}
server {
listen 8081;
listen [::]:8081;
server_name mm.crista.love;
location ~ /api/v[0-9]+/(users/)?websocket$ {
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
client_max_body_size 50M;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Frame-Options SAMEORIGIN;
proxy_buffers 256 16k;
proxy_buffer_size 16k;
proxy_read_timeout 600s;
proxy_http_version 1.1;
proxy_pass http://mattermost;
}
location / {
client_max_body_size 100M;
proxy_set_header Connection "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Frame-Options SAMEORIGIN;
proxy_buffers 256 16k;
proxy_buffer_size 16k;
proxy_read_timeout 600s;
proxy_http_version 1.1;
proxy_pass http://mattermost;
}
}

View File

@@ -0,0 +1,30 @@
#!/bin/bash
set -e
BACKUP_DIR="/backup/$(date +%Y%m%d_%H%M%S)"
mkdir -p "$BACKUP_DIR"
echo "=== Homelab Backup ==="
echo "Backup directory: $BACKUP_DIR"
# Backup PostgreSQL databases
echo "[1/4] Backing up PostgreSQL databases..."
sudo -u postgres pg_dump mattermost > "$BACKUP_DIR/mattermost.sql"
sudo -u postgres pg_dump synapse > "$BACKUP_DIR/synapse.sql"
sudo -u postgres pg_dump mastodon_production > "$BACKUP_DIR/mastodon.sql"
# Backup Mastodon media
echo "[2/4] Backing up Mastodon media..."
tar -czf "$BACKUP_DIR/mastodon_media.tar.gz" -C /opt/mastodon public/system 2>/dev/null || true
# Backup Mattermost data
echo "[3/4] Backing up Mattermost data..."
tar -czf "$BACKUP_DIR/mattermost_data.tar.gz" -C /opt/mattermost data config 2>/dev/null || true
# Backup Matrix/Synapse
echo "[4/4] Backing up Matrix data..."
tar -czf "$BACKUP_DIR/synapse_data.tar.gz" -C /opt synapse 2>/dev/null || true
echo ""
echo "Backup complete: $BACKUP_DIR"
ls -lh "$BACKUP_DIR"

Some files were not shown because too many files have changed in this diff Show More