Sanitized mirror from private repository - 2026-03-09 11:59:35 UTC
This commit is contained in:
0
hosts/vms/bulgaria-vm/.gitkeep
Normal file
0
hosts/vms/bulgaria-vm/.gitkeep
Normal file
20
hosts/vms/bulgaria-vm/droppy.yml
Normal file
20
hosts/vms/bulgaria-vm/droppy.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
# Droppy - File sharing
|
||||
# Port: 8989
|
||||
# Self-hosted file sharing
|
||||
|
||||
version: '3.8'
|
||||
services:
|
||||
droppy:
|
||||
container_name: droppy
|
||||
image: silverwind/droppy
|
||||
ports:
|
||||
- 8989:8989
|
||||
volumes:
|
||||
- /root/docker/droppy/config/:/config
|
||||
- /root/docker/droppy/files/:/files
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8989"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
24
hosts/vms/bulgaria-vm/fenrus.yml
Normal file
24
hosts/vms/bulgaria-vm/fenrus.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
# Fenrus - Dashboard
|
||||
# Port: 5000
|
||||
# Application dashboard
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
fenrus:
|
||||
image: revenz/fenrus
|
||||
container_name: fenrus
|
||||
environment:
|
||||
- TZ=America/Los_Angeles
|
||||
volumes:
|
||||
- /root/docker/fenrus/data:/app/data
|
||||
- /root/docker/fenrus/images:/app/wwwroot/images
|
||||
ports:
|
||||
- 35000:3000
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3000"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
45
hosts/vms/bulgaria-vm/hemmelig.yml
Normal file
45
hosts/vms/bulgaria-vm/hemmelig.yml
Normal file
@@ -0,0 +1,45 @@
|
||||
# Hemmelig - Secret sharing
|
||||
# Port: 3000
|
||||
# Self-destructing secret sharing
|
||||
|
||||
services:
|
||||
hemmelig:
|
||||
image: hemmeligapp/hemmelig:latest # The Docker image to use for the hemmelig service
|
||||
hostname: hemmelig # The hostname of the hemmelig service
|
||||
init: true # Whether to enable initialization scripts
|
||||
volumes:
|
||||
- /root/docker/hem/files/:/var/tmp/hemmelig/upload/files # Mounts the host directory to the container directory for file uploads
|
||||
environment:
|
||||
- SECRET_REDIS_HOST=hemmelig-redis # The hostname of the Redis server
|
||||
- SECRET_LOCAL_HOSTNAME=0.0.0.0 # The local hostname for the Fastify instance
|
||||
- SECRET_PORT=3000 # The port number for the Fastify instance
|
||||
- SECRET_HOST= # Used for i.e. setting CORS to your domain name
|
||||
- SECRET_DISABLE_USERS=false # Whether user registration is disabled
|
||||
- SECRET_ENABLE_FILE_UPLOAD=true # Whether file upload is enabled or disabled
|
||||
- SECRET_FILE_SIZE=4 # The total allowed upload file size in MB
|
||||
- SECRET_FORCED_LANGUAGE=en # The default language for the application
|
||||
- SECRET_JWT_SECRET=REDACTED_PASSWORD123! # The secret signing JWT tokens for login
|
||||
- SECRET_MAX_TEXT_SIZE=256 # The max text size for a secret, set in KB (i.e. 256 for 256KB)
|
||||
ports:
|
||||
- "3000:3000" # Maps the host port to the container port
|
||||
depends_on:
|
||||
- redis # Ensures that Redis is started before Hemmelig
|
||||
restart: unless-stopped # Always restarts the service if it stops unexpectedly
|
||||
stop_grace_period: 1m # The amount of time to wait before stopping the service
|
||||
healthcheck:
|
||||
test: "wget -O /dev/null localhost:3000 || exit 1" # Tests whether the Hemmelig service is responsive
|
||||
timeout: 5s # The amount of time to wait for a response from the health check
|
||||
retries: 1 # The number of times to retry the health check if it fails
|
||||
redis:
|
||||
image: redis # The Docker image to use for the Redis server
|
||||
hostname: hemmelig-redis # The hostname of the Redis server
|
||||
init: true # Whether to enable initialization scripts
|
||||
volumes:
|
||||
- ./root/docker/hem/redis/:/data # Mounts the host directory to the container directory for persistent data
|
||||
command: redis-server --appendonly yes # Runs Redis with append-only mode enabled
|
||||
restart: unless-stopped # Always restarts the service if it stops unexpectedly
|
||||
stop_grace_period: 1m # The amount of time to wait before stopping the service
|
||||
healthcheck:
|
||||
test: "redis-cli ping | grep PONG || exit 1" # Tests whether the Redis server is responsive
|
||||
timeout: 5s # The amount of time to wait for a response from the health check
|
||||
retries: 1 # The number of times to retry the health check if it fails
|
||||
60
hosts/vms/bulgaria-vm/invidious.yml
Normal file
60
hosts/vms/bulgaria-vm/invidious.yml
Normal file
@@ -0,0 +1,60 @@
|
||||
# Invidious - YouTube frontend
|
||||
# Port: 3000
|
||||
# Privacy-respecting YouTube viewer
|
||||
|
||||
version: "3.9"
|
||||
services:
|
||||
invidious-db:
|
||||
image: postgres
|
||||
container_name: Invidious-DB
|
||||
hostname: invidious-db
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
healthcheck:
|
||||
test: ["CMD", "pg_isready", "-q", "-d", "invidious", "-U", "kemal"]
|
||||
timeout: 45s
|
||||
interval: 10s
|
||||
retries: 10
|
||||
user: 0:0
|
||||
volumes:
|
||||
- /volume1/docker/invidiousdb:/var/lib/postgresql/data
|
||||
environment:
|
||||
POSTGRES_DB: invidious
|
||||
POSTGRES_USER: kemal
|
||||
POSTGRES_PASSWORD: "REDACTED_PASSWORD"
|
||||
restart: unless-stopped
|
||||
|
||||
invidious:
|
||||
image: quay.io/invidious/invidious:latest
|
||||
container_name: Invidious
|
||||
hostname: invidious
|
||||
user: 0:0
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
healthcheck:
|
||||
test: wget -nv --tries=1 --spider http://127.0.0.1:3000/api/v1/comments/jNQXAC9IVRw || exit 1
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 2
|
||||
ports:
|
||||
- 94.72.140.37:7601:3000
|
||||
environment:
|
||||
INVIDIOUS_CONFIG: |
|
||||
db:
|
||||
dbname: invidious
|
||||
user: kemal
|
||||
password: "REDACTED_PASSWORD"
|
||||
host: invidious-db
|
||||
port: 5432
|
||||
check_tables: true
|
||||
captcha_enabled: false
|
||||
default_user_preferences:
|
||||
locale: us
|
||||
region: US
|
||||
external_port: 7601
|
||||
domain: invidious.vish.gg
|
||||
https_only: true
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
invidious-db:
|
||||
condition: service_healthy
|
||||
54
hosts/vms/bulgaria-vm/mattermost.yml
Normal file
54
hosts/vms/bulgaria-vm/mattermost.yml
Normal file
@@ -0,0 +1,54 @@
|
||||
# Mattermost - Team collaboration
|
||||
# Port: 8065
|
||||
# Self-hosted Slack alternative
|
||||
version: "3.9"
|
||||
services:
|
||||
mattermost-db:
|
||||
image: postgres
|
||||
container_name: Mattermost-DB
|
||||
hostname: mattermost-db
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
pids_limit: 100
|
||||
healthcheck:
|
||||
test: ["CMD", "pg_isready", "-q", "-d", "mattermost", "-U", "mattermostuser"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
user: 0:0
|
||||
volumes:
|
||||
- /root/docker/mattermost/db:/var/lib/postgresql/data
|
||||
environment:
|
||||
- POSTGRES_DB=mattermost
|
||||
- POSTGRES_USER=mattermostuser
|
||||
- POSTGRES_PASSWORD="REDACTED_PASSWORD"
|
||||
- TZ=America/Los_Angeles
|
||||
restart: unless-stopped
|
||||
|
||||
mattermost:
|
||||
image: mattermost/mattermost-team-edition:latest
|
||||
container_name: Mattermost
|
||||
hostname: mattermost
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
pids_limit: 200
|
||||
user: 0:0
|
||||
volumes:
|
||||
- /root/docker/mattermost/config:/mattermost/config:rw
|
||||
- /root/docker/mattermost/data:/mattermost/data:rw
|
||||
- /root/docker/mattermost/logs:/mattermost/logs:rw
|
||||
- /root/docker/mattermost/plugins:/mattermost/plugins:rw
|
||||
- /root/docker/mattermost/client:/mattermost/client/plugins:rw
|
||||
- /root/docker/mattermost/indexes:/mattermost/bleve-indexes:rw
|
||||
environment:
|
||||
- TZ=America/Los_Angeles
|
||||
- MM_SQLSETTINGS_DRIVERNAME=postgres
|
||||
- MM_SQLSETTINGS_DATASOURCE=postgres://mattermostuser:mattermostpw@mattermost-db:5432/mattermost?sslmode=disable&connect_timeout=10
|
||||
- MM_BLEVESETTINGS_INDEXDIR=/mattermost/bleve-indexes
|
||||
- MM_SERVICESETTINGS_SITEURL=https://mm.vish.gg
|
||||
ports:
|
||||
- 8401:8065
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
mattermost-db:
|
||||
condition: service_healthy
|
||||
14
hosts/vms/bulgaria-vm/metube.yml
Normal file
14
hosts/vms/bulgaria-vm/metube.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
# MeTube - YouTube downloader
|
||||
# Port: 8081
|
||||
# Web GUI for yt-dlp
|
||||
|
||||
version: "3"
|
||||
services:
|
||||
metube:
|
||||
image: alexta69/metube
|
||||
container_name: metube
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "8871:8081"
|
||||
volumes:
|
||||
- /root/docker/yt:/downloads
|
||||
21
hosts/vms/bulgaria-vm/navidrome.yml
Normal file
21
hosts/vms/bulgaria-vm/navidrome.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
# Navidrome - Music server
|
||||
# Port: 4533
|
||||
# Personal music streaming server
|
||||
|
||||
version: "3"
|
||||
services:
|
||||
navidrome:
|
||||
image: deluan/navidrome:latest
|
||||
user: 0:0 # should be owner of volumes
|
||||
ports:
|
||||
- "4533:4533"
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
# Optional: put your config options customization here. Examples:
|
||||
ND_SCANSCHEDULE: 1h
|
||||
ND_LOGLEVEL: info
|
||||
ND_SESSIONTIMEOUT: 24h
|
||||
ND_BASEURL: ""
|
||||
volumes:
|
||||
- "/root/docker/navidrome:/data"
|
||||
- "/root/plex/:/music:ro"
|
||||
16
hosts/vms/bulgaria-vm/nginx_proxy_manager.yml
Normal file
16
hosts/vms/bulgaria-vm/nginx_proxy_manager.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
# Nginx Proxy Manager
|
||||
# Port: 81
|
||||
# Reverse proxy management
|
||||
|
||||
version: '3'
|
||||
services:
|
||||
app:
|
||||
image: 'jc21/nginx-proxy-manager:latest'
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- '80:80'
|
||||
- '8181:81'
|
||||
- '443:443'
|
||||
volumes:
|
||||
- ./data:/data
|
||||
- ./letsencrypt:/etc/letsencrypt
|
||||
15
hosts/vms/bulgaria-vm/rainloop.yml
Normal file
15
hosts/vms/bulgaria-vm/rainloop.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
# RainLoop - Webmail
|
||||
# Port: 8888
|
||||
# Simple webmail client
|
||||
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
rainloop:
|
||||
image: wernerfred/docker-rainloop:latest
|
||||
container_name: docker-rainloop
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 8080:80
|
||||
volumes:
|
||||
- /opt/docker-rainloop/data:/rainloop/data
|
||||
23
hosts/vms/bulgaria-vm/syncthing.yml
Normal file
23
hosts/vms/bulgaria-vm/syncthing.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
# Syncthing - File synchronization
|
||||
# Port: 8384 (web), 22000 (sync)
|
||||
# Continuous file synchronization between devices
|
||||
version: "2.1"
|
||||
services:
|
||||
syncthing:
|
||||
image: lscr.io/linuxserver/syncthing:latest
|
||||
container_name: syncthing
|
||||
hostname: syncthing #optional
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=America/Los_Angeles
|
||||
volumes:
|
||||
- /root/docker/syncthing/config:/config
|
||||
- /root/docker/syncthing/data1
|
||||
- /root/docker/syncthing/data2
|
||||
ports:
|
||||
- 8384:8384
|
||||
- 22000:22000/tcp
|
||||
- 22000:22000/udp
|
||||
- 21027:21027/udp
|
||||
restart: unless-stopped
|
||||
19
hosts/vms/bulgaria-vm/watchtower.yml
Normal file
19
hosts/vms/bulgaria-vm/watchtower.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
# Watchtower - Container update notifier for Bulgaria VM (schedule disabled - GitOps managed)
|
||||
# Auto-update schedule removed; image updates are handled via Renovate PRs.
|
||||
# Manual update trigger: POST http://localhost:8080/v1/update
|
||||
# Header: Authorization: Bearer watchtower-metrics-token
|
||||
version: "3"
|
||||
services:
|
||||
watchtower:
|
||||
image: containrrr/watchtower:latest
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
- WATCHTOWER_CLEANUP=true
|
||||
- WATCHTOWER_HTTP_API_UPDATE=true
|
||||
- WATCHTOWER_HTTP_API_METRICS=true
|
||||
- WATCHTOWER_HTTP_API_TOKEN="REDACTED_HTTP_TOKEN"
|
||||
- TZ=America/Los_Angeles
|
||||
restart: unless-stopped
|
||||
labels:
|
||||
- "com.centurylinklabs.watchtower.enable=false"
|
||||
61
hosts/vms/bulgaria-vm/yourspotify.yml
Normal file
61
hosts/vms/bulgaria-vm/yourspotify.yml
Normal file
@@ -0,0 +1,61 @@
|
||||
# This specifies the version of Docker Compose to use.
|
||||
version: "3"
|
||||
|
||||
# This defines all of the services that will be run in this Docker Compose setup.
|
||||
services:
|
||||
|
||||
# This defines a service named "server".
|
||||
server:
|
||||
# This specifies the Docker image to use for this service.
|
||||
image: yooooomi/your_spotify_server
|
||||
|
||||
# This sets the restart policy for this service. In this case, it will always restart if it stops.
|
||||
restart: unless-stopped
|
||||
|
||||
# This maps port 15000 on the host machine to port 8080 on the container.
|
||||
ports:
|
||||
- "15000:8080"
|
||||
|
||||
# This links the "mongo" service to this one. This allows them to communicate with each other.
|
||||
links:
|
||||
- mongo
|
||||
|
||||
# This specifies that the "mongo" service must be started before this one.
|
||||
depends_on:
|
||||
- mongo
|
||||
|
||||
# This sets environment variables for the container.
|
||||
environment:
|
||||
- API_ENDPOINT=http://vish.gg:15000 # This MUST be included as a valid URL in the spotify dashboard
|
||||
- CLIENT_ENDPOINT=http://vish.gg:4000
|
||||
- SPOTIFY_PUBLIC=d6b3bda999f042099ce79a8b6e9f9e68
|
||||
- SPOTIFY_SECRET=72c650e7a25f441baa245b963003a672
|
||||
- CORS=http://vish.gg:4000,http://vish.gg:4001 # all if you want to allow every origin
|
||||
|
||||
# This defines a service named "mongo".
|
||||
mongo:
|
||||
# This sets the container name for this service.
|
||||
container_name: mongo
|
||||
|
||||
# This specifies the Docker image to use for this service.
|
||||
image: mongo:4.4.8
|
||||
|
||||
# This mounts a volume from the host machine into the container. In this case, it mounts "./your_spotify_db" on the host machine to "/data/db" in the container.
|
||||
volumes:
|
||||
- ./your_spotify_db:/data/db
|
||||
|
||||
# This defines a service named "web".
|
||||
web:
|
||||
# This specifies the Docker image to use for this service.
|
||||
image: yooooomi/your_spotify_client
|
||||
|
||||
# This sets the restart policy for this service. In this case, it will always restart if it stops.
|
||||
restart: unless-stopped
|
||||
|
||||
# This maps port 4000 on the host machine to port 3000 on the container.
|
||||
ports:
|
||||
- "4000:3000"
|
||||
|
||||
# This sets environment variables for the container.
|
||||
environment:
|
||||
- API_ENDPOINT=http://vish.gg:15000
|
||||
0
hosts/vms/chicago-vm/.gitkeep
Normal file
0
hosts/vms/chicago-vm/.gitkeep
Normal file
11
hosts/vms/chicago-vm/factorio.yml
Normal file
11
hosts/vms/chicago-vm/factorio.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
# Factorio - Game server
|
||||
# Port: 34197/udp
|
||||
# Factorio dedicated game server
|
||||
|
||||
sudo docker run -d \
|
||||
-p 34197:34197/udp \
|
||||
-p 27015:27015/tcp \
|
||||
-v /root/factorio:/factorio \
|
||||
--name factorio \
|
||||
--restart=always \
|
||||
factoriotools/factorio
|
||||
22
hosts/vms/chicago-vm/gitlab.yml
Normal file
22
hosts/vms/chicago-vm/gitlab.yml
Normal file
@@ -0,0 +1,22 @@
|
||||
# GitLab - Git repository
|
||||
# Port: 8929
|
||||
# Self-hosted Git and CI/CD platform
|
||||
|
||||
version: '3.6'
|
||||
services:
|
||||
web:
|
||||
image: 'gitlab/gitlab-ce:latest'
|
||||
restart: unless-stopped
|
||||
hostname: 'gl.thevish.io'
|
||||
environment:
|
||||
GITLAB_OMNIBUS_CONFIG: |
|
||||
external_url 'http://glssh.thevish.io:8929'
|
||||
gitlab_rails['gitlab_shell_ssh_port'] = 2224
|
||||
ports:
|
||||
- '8929:8929'
|
||||
- '2224:22'
|
||||
volumes:
|
||||
- '$GITLAB_HOME/config:/etc/gitlab'
|
||||
- '$GITLAB_HOME/logs:/var/log/gitlab'
|
||||
- '$GITLAB_HOME/data:/var/opt/gitlab'
|
||||
shm_size: '256m'
|
||||
19
hosts/vms/chicago-vm/jdownloader2.yml
Normal file
19
hosts/vms/chicago-vm/jdownloader2.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
# JDownloader2 - Download manager
|
||||
# Port: 5800
|
||||
# Multi-host download manager
|
||||
|
||||
version: '3.9'
|
||||
services:
|
||||
jdownloader-2:
|
||||
image: jlesage/jdownloader-2
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /root/docker/j2/output:/output
|
||||
- /root/docker/j2/config:/config
|
||||
environment:
|
||||
- TZ=America/Los_Angeles
|
||||
ports:
|
||||
- 13016:5900
|
||||
- 53578:5800
|
||||
- 20123:3129
|
||||
container_name: jdownloader2
|
||||
27
hosts/vms/chicago-vm/jellyfin.yml
Normal file
27
hosts/vms/chicago-vm/jellyfin.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
# Jellyfin - Media server
|
||||
# Port: 8096
|
||||
# Free media streaming server
|
||||
|
||||
version: '3.5'
|
||||
services:
|
||||
jellyfin:
|
||||
image: jellyfin/jellyfin
|
||||
container_name: jellyfin
|
||||
user: 0:0
|
||||
volumes:
|
||||
- /root/jellyfin/config:/config
|
||||
- /root/jellyfin/cache:/cache
|
||||
- /root/jellyfin/media:/media
|
||||
- /root/jellyfin/media2:/media2:ro
|
||||
restart: 'unless-stopped'
|
||||
# Optional - alternative address used for autodiscovery
|
||||
environment:
|
||||
- JELLYFIN_PublishedServerUrl=http://stuff.thevish.io
|
||||
# Optional - may be necessary for docker healthcheck to pass if running in host network mode
|
||||
ports:
|
||||
- 8096:8096
|
||||
- 8920:8920 #optional
|
||||
- 7359:7359/udp #optional
|
||||
- 1900:1900/udp #optional
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
44
hosts/vms/chicago-vm/matrix.yml
Normal file
44
hosts/vms/chicago-vm/matrix.yml
Normal file
@@ -0,0 +1,44 @@
|
||||
# Matrix Synapse - Chat server
|
||||
# Port: 8008
|
||||
# Federated Matrix homeserver
|
||||
|
||||
version: "3.9"
|
||||
services:
|
||||
synapse-db:
|
||||
image: postgres
|
||||
container_name: Synapse-DB
|
||||
hostname: synapse-db
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
healthcheck:
|
||||
test: ["CMD", "pg_isready", "-q", "-d", "synapsedb", "-U", "synapseuser"]
|
||||
timeout: 45s
|
||||
interval: 10s
|
||||
retries: 10
|
||||
|
||||
volumes:
|
||||
- /root/docker/db//var/lib/postgresql/data
|
||||
environment:
|
||||
- POSTGRES_DB=synapsedb
|
||||
- POSTGRES_USER=synapseuser
|
||||
- POSTGRES_PASSWORD="REDACTED_PASSWORD"
|
||||
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
|
||||
restart: unless-stopped
|
||||
|
||||
synapse:
|
||||
image: matrixdotorg/synapse:latest
|
||||
container_name: Synapse
|
||||
hostname: synapse
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
environment:
|
||||
- TZ=America/Los_Angeles
|
||||
- SYNAPSE_CONFIG_PATH=/data/homeserver.yaml
|
||||
volumes:
|
||||
- /root/docker/data:/data
|
||||
ports:
|
||||
- 8500:8008/tcp
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
synapse-db:
|
||||
condition: service_started
|
||||
32
hosts/vms/chicago-vm/neko.yml
Normal file
32
hosts/vms/chicago-vm/neko.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
# n.eko - Virtual browser
|
||||
# Port: 8080
|
||||
# Virtual browser in Docker for screen sharing
|
||||
|
||||
version: "3.5"
|
||||
|
||||
networks:
|
||||
default:
|
||||
attachable: true
|
||||
name: "neko-rooms-net"
|
||||
|
||||
services:
|
||||
neko-rooms:
|
||||
image: "m1k1o/neko-rooms:latest"
|
||||
restart: "unless-stopped"
|
||||
environment:
|
||||
- "TZ=America/Los_Angeles"
|
||||
- "NEKO_ROOMS_MUX=true"
|
||||
- "NEKO_ROOMS_EPR=59000-59049"
|
||||
- "NEKO_ROOMS_NAT1TO1=74.91.118.242" # IP address of your server that is reachable from client
|
||||
- "NEKO_ROOMS_INSTANCE_URL=https://showtime.vish.gg/" # external URL
|
||||
- "NEKO_ROOMS_STORAGE_ENABLED=true"
|
||||
- "NEKO_ROOMS_STORAGE_INTERNAL=/data"
|
||||
- "NEKO_ROOMS_STORAGE_EXTERNAL=/opt/neko-rooms/data"
|
||||
- "NEKO_ROOMS_INSTANCE_NETWORK=neko-rooms-net"
|
||||
- "NEKO_ROOMS_TRAEFIK_ENABLED=false"
|
||||
- "NEKO_ROOMS_PATH_PREFIX=/room/"
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- "/var/run/docker.sock:/var/run/docker.sock"
|
||||
- "/opt/neko-rooms/data:/data"
|
||||
69
hosts/vms/chicago-vm/proxitok.yml
Normal file
69
hosts/vms/chicago-vm/proxitok.yml
Normal file
@@ -0,0 +1,69 @@
|
||||
# ProxiTok - TikTok frontend
|
||||
# Port: 8080
|
||||
# Privacy-respecting TikTok viewer
|
||||
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
web:
|
||||
container_name: proxitok-web
|
||||
image: ghcr.io/pablouser1/proxitok:master
|
||||
ports:
|
||||
- 9770:8080
|
||||
environment:
|
||||
- LATTE_CACHE=/cache
|
||||
- API_CACHE=redis
|
||||
- REDIS_HOST=proxitok-redis
|
||||
- REDIS_PORT=6379
|
||||
- API_SIGNER=remote
|
||||
- API_SIGNER_URL=http://proxitok-signer:8080/signature
|
||||
volumes:
|
||||
- proxitok-cache:/cache
|
||||
depends_on:
|
||||
- redis
|
||||
- signer
|
||||
networks:
|
||||
- proxitok
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- CHOWN
|
||||
- SETGID
|
||||
- SETUID
|
||||
|
||||
redis:
|
||||
container_name: proxitok-redis
|
||||
image: redis:7-alpine
|
||||
command: redis-server --save 60 1 --loglevel warning
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- proxitok
|
||||
user: nobody
|
||||
read_only: true
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
tmpfs:
|
||||
- /data:size=10M,mode=0770,uid=65534,gid=65534,noexec,nosuid,nodev
|
||||
cap_drop:
|
||||
- ALL
|
||||
|
||||
signer:
|
||||
container_name: proxitok-signer
|
||||
image: ghcr.io/pablouser1/signtok:master
|
||||
init: true
|
||||
networks:
|
||||
- proxitok
|
||||
user: nobody
|
||||
read_only: true
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
cap_drop:
|
||||
- ALL
|
||||
|
||||
volumes:
|
||||
proxitok-cache:
|
||||
|
||||
networks:
|
||||
proxitok:
|
||||
19
hosts/vms/chicago-vm/watchtower.yml
Normal file
19
hosts/vms/chicago-vm/watchtower.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
# Watchtower - Container update notifier for Chicago VM (schedule disabled - GitOps managed)
|
||||
# Auto-update schedule removed; image updates are handled via Renovate PRs.
|
||||
# Manual update trigger: POST http://localhost:8080/v1/update
|
||||
# Header: Authorization: Bearer watchtower-metrics-token
|
||||
version: "3"
|
||||
services:
|
||||
watchtower:
|
||||
image: containrrr/watchtower:latest
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
- WATCHTOWER_CLEANUP=true
|
||||
- WATCHTOWER_HTTP_API_UPDATE=true
|
||||
- WATCHTOWER_HTTP_API_METRICS=true
|
||||
- WATCHTOWER_HTTP_API_TOKEN="REDACTED_HTTP_TOKEN"
|
||||
- TZ=America/Los_Angeles
|
||||
restart: unless-stopped
|
||||
labels:
|
||||
- "com.centurylinklabs.watchtower.enable=false"
|
||||
45
hosts/vms/contabo-vm/ollama/docker-compose.yml
Normal file
45
hosts/vms/contabo-vm/ollama/docker-compose.yml
Normal file
@@ -0,0 +1,45 @@
|
||||
# Ollama - Local LLM inference
|
||||
# URL: https://ollama.vishconcord.synology.me
|
||||
# Port: 11434
|
||||
# Run large language models locally
|
||||
services:
|
||||
webui:
|
||||
container_name: OLLAMA-WEBUI
|
||||
image: ghcr.io/open-webui/open-webui:0.6
|
||||
volumes:
|
||||
- /root/docker/ollama/webui:/app/backend/data:rw
|
||||
environment:
|
||||
OLLAMA_BASE_URL: http://ollama:11434
|
||||
WEBUI_SECRET_KEY: "REDACTED_SECRET_KEY"
|
||||
healthcheck:
|
||||
test: timeout 10s bash -c ':> /dev/tcp/127.0.0.1/8080' || exit 1
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 90s
|
||||
ports:
|
||||
- 8271:8080
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
ollama:
|
||||
condition: service_healthy
|
||||
|
||||
ollama:
|
||||
container_name: OLLAMA
|
||||
image: ollama/ollama:latest
|
||||
entrypoint: ["/usr/bin/bash", "/entrypoint.sh"]
|
||||
volumes:
|
||||
- /root/docker/ollama/data:/root/.ollama:rw
|
||||
- /root/docker/ollama/entrypoint/entrypoint.sh:/entrypoint.sh
|
||||
environment:
|
||||
MODELS: codegemma:2b,codellama:7b,mistral:7b,llama3.2:3b
|
||||
OLLAMA_INSTALL_MODELS: codegemma:2b,codellama:7b,mistral:7b,llama3.2:3b
|
||||
ports:
|
||||
- 11434:11434
|
||||
healthcheck:
|
||||
test: ["CMD", "ollama", "--version"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
restart: on-failure:5
|
||||
24
hosts/vms/contabo-vm/ollama/entrypoint/entrypoint.sh
Normal file
24
hosts/vms/contabo-vm/ollama/entrypoint/entrypoint.sh
Normal file
@@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Start Ollama server.
|
||||
/bin/ollama serve &
|
||||
pid=$!
|
||||
|
||||
# Wait for Ollama to be ready using Bash's built-in networking capabilities.
|
||||
while ! timeout 1 bash -c "echo > /dev/tcp/localhost/11434" 2>/dev/null; do
|
||||
echo "Waiting for Ollama to start..."
|
||||
sleep 1
|
||||
done
|
||||
echo "Ollama started."
|
||||
|
||||
# Retrieve and install/update models from the MODELS that you have in your Docker Compose stack environment variables.
|
||||
IFS=',' read -ra model_array <<< "$MODELS"
|
||||
for model in "${model_array[@]}"; do
|
||||
echo "Installing/Updating model $model..."
|
||||
ollama pull $model # This command fetches the latest version of the llama model
|
||||
done
|
||||
echo "All models installed/updated."
|
||||
|
||||
# Continue to main process.
|
||||
wait $pid
|
||||
0
hosts/vms/homelab-vm/.gitkeep
Normal file
0
hosts/vms/homelab-vm/.gitkeep
Normal file
284
hosts/vms/homelab-vm/alerting.yaml
Normal file
284
hosts/vms/homelab-vm/alerting.yaml
Normal file
@@ -0,0 +1,284 @@
|
||||
# Alerting Stack - Alertmanager + Notification Bridges
|
||||
# =============================================================================
|
||||
# Dual-channel alerting: ntfy (mobile push) + Signal (encrypted messaging)
|
||||
# =============================================================================
|
||||
# Deployed via: Portainer GitOps
|
||||
# Ports: 9093 (Alertmanager), 5000 (signal-bridge), 5001 (ntfy-bridge)
|
||||
#
|
||||
# Alert Routing:
|
||||
# - Warning alerts → ntfy only
|
||||
# - Critical alerts → ntfy + Signal
|
||||
# - Resolved alerts → Both channels (for critical)
|
||||
#
|
||||
# Uses docker configs to embed Python bridge apps since Portainer GitOps
|
||||
# doesn't support docker build
|
||||
|
||||
configs:
|
||||
# Alertmanager Configuration
|
||||
alertmanager_config:
|
||||
content: |
|
||||
global:
|
||||
resolve_timeout: 5m
|
||||
|
||||
route:
|
||||
group_by: ['alertname', 'severity', 'instance']
|
||||
group_wait: 30s
|
||||
group_interval: 5m
|
||||
repeat_interval: 4h
|
||||
receiver: 'ntfy-all'
|
||||
|
||||
routes:
|
||||
- match:
|
||||
severity: critical
|
||||
receiver: 'critical-alerts'
|
||||
continue: false
|
||||
- match:
|
||||
severity: warning
|
||||
receiver: 'ntfy-all'
|
||||
|
||||
receivers:
|
||||
- name: 'ntfy-all'
|
||||
webhook_configs:
|
||||
- url: 'http://ntfy-bridge:5001/alert'
|
||||
send_resolved: true
|
||||
|
||||
- name: 'critical-alerts'
|
||||
webhook_configs:
|
||||
- url: 'http://ntfy-bridge:5001/alert'
|
||||
send_resolved: true
|
||||
- url: 'http://signal-bridge:5000/alert'
|
||||
send_resolved: true
|
||||
|
||||
inhibit_rules:
|
||||
- source_match:
|
||||
severity: 'critical'
|
||||
target_match:
|
||||
severity: 'warning'
|
||||
equal: ['alertname', 'instance']
|
||||
|
||||
# ntfy-bridge Python App
|
||||
ntfy_bridge_app:
|
||||
content: |
|
||||
from flask import Flask, request, jsonify
|
||||
import requests
|
||||
import os
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
NTFY_URL = os.environ.get('NTFY_URL', 'http://NTFY:80')
|
||||
NTFY_TOPIC = os.environ.get('NTFY_TOPIC', 'homelab-alerts')
|
||||
|
||||
def get_priority(severity, status):
|
||||
if status == 'resolved':
|
||||
return '3'
|
||||
if severity == 'critical':
|
||||
return '5'
|
||||
return '4'
|
||||
|
||||
def get_tag(severity, status):
|
||||
if status == 'resolved':
|
||||
return 'white_check_mark'
|
||||
if severity == 'critical':
|
||||
return 'rotating_light'
|
||||
return 'warning'
|
||||
|
||||
def format_alert(alert):
|
||||
status = alert.get('status', 'firing')
|
||||
labels = alert.get('labels', {})
|
||||
annotations = alert.get('annotations', {})
|
||||
|
||||
alertname = labels.get('alertname', 'Unknown')
|
||||
severity = labels.get('severity', 'warning')
|
||||
instance = labels.get('instance', 'unknown')
|
||||
|
||||
status_text = 'RESOLVED' if status == 'resolved' else 'FIRING'
|
||||
title = f"{alertname} [{status_text}]"
|
||||
|
||||
summary = annotations.get('summary', '')
|
||||
description = annotations.get('description', '')
|
||||
|
||||
body_parts = []
|
||||
if summary:
|
||||
body_parts.append(summary)
|
||||
if description and description != summary:
|
||||
body_parts.append(description)
|
||||
if instance != 'unknown':
|
||||
body_parts.append(f"Host: {instance}")
|
||||
|
||||
body = '\n'.join(body_parts) if body_parts else f"Alert {status_text.lower()}"
|
||||
return title, body, severity, status
|
||||
|
||||
@app.route('/alert', methods=['POST'])
|
||||
def handle_alert():
|
||||
try:
|
||||
data = request.json
|
||||
for alert in data.get('alerts', []):
|
||||
title, body, severity, status = format_alert(alert)
|
||||
requests.post(f"{NTFY_URL}/{NTFY_TOPIC}", data=body,
|
||||
headers={'Title': title, 'Priority': get_priority(severity, status), 'Tags': get_tag(severity, status)})
|
||||
return jsonify({'status': 'sent', 'count': len(data.get('alerts', []))})
|
||||
except Exception as e:
|
||||
return jsonify({'status': 'error', 'message': str(e)}), 500
|
||||
|
||||
@app.route('/health', methods=['GET'])
|
||||
def health():
|
||||
return jsonify({'status': 'healthy'})
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(host='0.0.0.0', port=5001)
|
||||
|
||||
# signal-bridge Python App
|
||||
signal_bridge_app:
|
||||
content: |
|
||||
import os
|
||||
import requests
|
||||
from flask import Flask, request, jsonify
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
SIGNAL_API_URL = os.environ.get('SIGNAL_API_URL', 'http://signal-api:8080')
|
||||
SIGNAL_SENDER = os.environ.get('SIGNAL_SENDER', '')
|
||||
SIGNAL_RECIPIENTS = os.environ.get('SIGNAL_RECIPIENTS', '').split(',')
|
||||
|
||||
def format_alert_message(alert_data):
|
||||
messages = []
|
||||
for alert in alert_data.get('alerts', []):
|
||||
status = alert.get('status', 'firing')
|
||||
labels = alert.get('labels', {})
|
||||
annotations = alert.get('annotations', {})
|
||||
severity = labels.get('severity', 'warning')
|
||||
summary = annotations.get('summary', labels.get('alertname', 'Alert'))
|
||||
description = annotations.get('description', '')
|
||||
|
||||
if status == 'resolved':
|
||||
emoji, text = '✅', 'RESOLVED'
|
||||
elif severity == 'critical':
|
||||
emoji, text = '🚨', 'CRITICAL'
|
||||
else:
|
||||
emoji, text = '⚠️', 'WARNING'
|
||||
|
||||
msg = f"{emoji} [{text}] {summary}"
|
||||
if description:
|
||||
msg += f"\n{description}"
|
||||
messages.append(msg)
|
||||
return "\n\n".join(messages)
|
||||
|
||||
def send_signal_message(message):
|
||||
if not SIGNAL_SENDER or not SIGNAL_RECIPIENTS:
|
||||
return False
|
||||
success = True
|
||||
for recipient in SIGNAL_RECIPIENTS:
|
||||
recipient = recipient.strip()
|
||||
if not recipient:
|
||||
continue
|
||||
try:
|
||||
response = requests.post(f"{SIGNAL_API_URL}/v2/send", json={
|
||||
"message": message, "number": SIGNAL_SENDER, "recipients": [recipient]
|
||||
}, timeout=30)
|
||||
if response.status_code not in [200, 201]:
|
||||
success = False
|
||||
except Exception:
|
||||
success = False
|
||||
return success
|
||||
|
||||
@app.route('/health', methods=['GET'])
|
||||
def health():
|
||||
return jsonify({"status": "healthy"})
|
||||
|
||||
@app.route('/alert', methods=['POST'])
|
||||
def receive_alert():
|
||||
try:
|
||||
alert_data = request.get_json()
|
||||
if not alert_data:
|
||||
return jsonify({"error": "No data"}), 400
|
||||
message = format_alert_message(alert_data)
|
||||
if send_signal_message(message):
|
||||
return jsonify({"status": "sent"})
|
||||
return jsonify({"status": "partial_failure"}), 207
|
||||
except Exception as e:
|
||||
return jsonify({"error": str(e)}), 500
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(host='0.0.0.0', port=5000)
|
||||
|
||||
services:
|
||||
alertmanager:
|
||||
image: prom/alertmanager:latest
|
||||
container_name: alertmanager
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "9093:9093"
|
||||
configs:
|
||||
- source: alertmanager_config
|
||||
target: /etc/alertmanager/alertmanager.yml
|
||||
volumes:
|
||||
- alertmanager-data:/alertmanager
|
||||
command:
|
||||
- '--config.file=/etc/alertmanager/alertmanager.yml'
|
||||
- '--storage.path=/alertmanager'
|
||||
- '--web.external-url=http://localhost:9093'
|
||||
networks:
|
||||
- alerting
|
||||
- monitoring-stack_monitoring
|
||||
|
||||
ntfy-bridge:
|
||||
image: python:3.11-slim
|
||||
container_name: ntfy-bridge
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "5001:5001"
|
||||
environment:
|
||||
- NTFY_URL=http://NTFY:80
|
||||
- NTFY_TOPIC="REDACTED_NTFY_TOPIC"
|
||||
configs:
|
||||
- source: ntfy_bridge_app
|
||||
target: /app/app.py
|
||||
command: >
|
||||
sh -c "pip install --quiet flask requests gunicorn &&
|
||||
cd /app && gunicorn --bind 0.0.0.0:5001 --workers 2 app:app"
|
||||
networks:
|
||||
- alerting
|
||||
- ntfy-stack_default
|
||||
healthcheck:
|
||||
test: ["CMD", "python3", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:5001/health')"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
signal-bridge:
|
||||
image: python:3.11-slim
|
||||
container_name: signal-bridge
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "5000:5000"
|
||||
environment:
|
||||
- SIGNAL_API_URL=http://signal-api:8080
|
||||
- SIGNAL_SENDER=REDACTED_PHONE_NUMBER
|
||||
- SIGNAL_RECIPIENTS=REDACTED_PHONE_NUMBER
|
||||
configs:
|
||||
- source: signal_bridge_app
|
||||
target: /app/app.py
|
||||
command: >
|
||||
sh -c "pip install --quiet flask requests gunicorn &&
|
||||
cd /app && gunicorn --bind 0.0.0.0:5000 --workers 2 app:app"
|
||||
networks:
|
||||
- alerting
|
||||
- signal-api-stack_default
|
||||
healthcheck:
|
||||
test: ["CMD", "python3", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:5000/health')"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
volumes:
|
||||
alertmanager-data:
|
||||
|
||||
networks:
|
||||
alerting:
|
||||
driver: bridge
|
||||
monitoring-stack_monitoring:
|
||||
external: true
|
||||
ntfy-stack_default:
|
||||
external: true
|
||||
signal-api-stack_default:
|
||||
external: true
|
||||
57
hosts/vms/homelab-vm/archivebox.yaml
Normal file
57
hosts/vms/homelab-vm/archivebox.yaml
Normal file
@@ -0,0 +1,57 @@
|
||||
# ArchiveBox - Web archiving
|
||||
# Port: 8000
|
||||
# Self-hosted internet archiving solution
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
archivebox:
|
||||
image: archivebox/archivebox:latest
|
||||
container_name: archivebox
|
||||
ports:
|
||||
- "7254:8000"
|
||||
volumes:
|
||||
- ./data:/data
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- ADMIN_USERNAME=vish
|
||||
- ADMIN_PASSWORD="REDACTED_PASSWORD"
|
||||
- ALLOWED_HOSTS=*
|
||||
- CSRF_TRUSTED_ORIGINS=http://localhost:7254
|
||||
- PUBLIC_INDEX=True
|
||||
- PUBLIC_SNAPSHOTS=True
|
||||
- PUBLIC_ADD_VIEW=False
|
||||
- SEARCH_BACKEND_ENGINE=sonic
|
||||
- SEARCH_BACKEND_HOST_NAME=sonic
|
||||
- SEARCH_BACKEND_PASSWORD="REDACTED_PASSWORD"
|
||||
restart: unless-stopped
|
||||
|
||||
archivebox_scheduler:
|
||||
image: archivebox/archivebox:latest
|
||||
container_name: archivebox_scheduler
|
||||
command: schedule --foreground --update --every=day
|
||||
volumes:
|
||||
- ./data:/data
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TIMEOUT=120
|
||||
- SEARCH_BACKEND_ENGINE=sonic
|
||||
- SEARCH_BACKEND_HOST_NAME=sonic
|
||||
- SEARCH_BACKEND_PASSWORD="REDACTED_PASSWORD"
|
||||
restart: unless-stopped
|
||||
|
||||
sonic:
|
||||
image: archivebox/sonic:latest
|
||||
container_name: archivebox_sonic
|
||||
expose:
|
||||
- "1491"
|
||||
environment:
|
||||
- SEARCH_BACKEND_PASSWORD="REDACTED_PASSWORD"
|
||||
volumes:
|
||||
- ./data/sonic:/var/lib/sonic/store
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: archivebox_net
|
||||
23
hosts/vms/homelab-vm/beeper.yaml
Normal file
23
hosts/vms/homelab-vm/beeper.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
services:
|
||||
beeper:
|
||||
image: ghcr.io/zachatrocity/docker-beeper:latest
|
||||
container_name: Beeper
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "nc -z 127.0.0.1 3000 || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 90s
|
||||
security_opt:
|
||||
- seccomp:unconfined
|
||||
environment:
|
||||
PUID: 1029
|
||||
PGID: 100
|
||||
TZ: America/Los_Angeles
|
||||
volumes:
|
||||
- /home/homelab/docker/beeper:/config:rw
|
||||
ports:
|
||||
- 3655:3000 # HTTP (redirects to HTTPS — use port 3656)
|
||||
- 3656:3001 # HTTPS (use this — accept self-signed cert in browser)
|
||||
shm_size: "2gb"
|
||||
restart: on-failure:5
|
||||
14
hosts/vms/homelab-vm/binternet.yaml
Normal file
14
hosts/vms/homelab-vm/binternet.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
# Binternet - Pinterest frontend
|
||||
# Port: 8080
|
||||
# Privacy-respecting Pinterest frontend
|
||||
services:
|
||||
binternet:
|
||||
container_name: binternet
|
||||
image: ghcr.io/ahwxorg/binternet:latest
|
||||
cap_drop:
|
||||
- ALL
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
ports:
|
||||
- '21544:8080'
|
||||
restart: unless-stopped
|
||||
30
hosts/vms/homelab-vm/cloudflare-tunnel.yaml
Normal file
30
hosts/vms/homelab-vm/cloudflare-tunnel.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
# Cloudflare Tunnel for Homelab-VM
|
||||
# Provides secure external access without port forwarding
|
||||
#
|
||||
# SETUP INSTRUCTIONS:
|
||||
# 1. Go to https://one.dash.cloudflare.com/ → Zero Trust → Networks → Tunnels
|
||||
# 2. Create a new tunnel named "homelab-vm-tunnel"
|
||||
# 3. Copy the tunnel token (starts with eyJ...)
|
||||
# 4. Replace TUNNEL_TOKEN_HERE below with your token
|
||||
# 5. In the tunnel dashboard, add these public hostnames:
|
||||
#
|
||||
# | Public Hostname | Service |
|
||||
# |------------------------|----------------------------|
|
||||
# | gf.vish.gg | http://localhost:3300 |
|
||||
# | ntfy.vish.gg | http://localhost:8081 |
|
||||
# | hoarder.thevish.io | http://localhost:3000 |
|
||||
# | binterest.thevish.io | http://localhost:21544 |
|
||||
#
|
||||
# 6. Deploy this stack
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
cloudflared:
|
||||
image: cloudflare/cloudflared:latest
|
||||
container_name: cloudflare-tunnel
|
||||
restart: unless-stopped
|
||||
command: tunnel run
|
||||
environment:
|
||||
- TUNNEL_TOKEN=${TUNNEL_TOKEN}
|
||||
network_mode: host # Needed to access localhost services
|
||||
18
hosts/vms/homelab-vm/dashdot.yaml
Normal file
18
hosts/vms/homelab-vm/dashdot.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
# Dashdot - Server dashboard
|
||||
# Port: 3001
|
||||
# Modern server dashboard
|
||||
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
dashdot:
|
||||
image: mauricenino/dashdot
|
||||
container_name: dashdot
|
||||
ports:
|
||||
- "7512:3001"
|
||||
volumes:
|
||||
- "/:/mnt/host:ro"
|
||||
privileged: true
|
||||
stdin_open: true # same as -it
|
||||
tty: true # same as -it
|
||||
restart: unless-stopped
|
||||
38
hosts/vms/homelab-vm/ddns.yml
Normal file
38
hosts/vms/homelab-vm/ddns.yml
Normal file
@@ -0,0 +1,38 @@
|
||||
# Dynamic DNS Updater
|
||||
# Updates DNS records when IP changes
|
||||
|
||||
version: "3.7"
|
||||
services:
|
||||
ddns-updater:
|
||||
image: qmcgaw/ddns-updater
|
||||
container_name: ddns-updater
|
||||
network_mode: bridge
|
||||
ports:
|
||||
- 8000:8000/tcp
|
||||
volumes:
|
||||
- /home/homelab/docker/ddns/data:/updater/data
|
||||
environment:
|
||||
- CONFIG=
|
||||
- PERIOD=5m
|
||||
- UPDATE_COOLDOWN_PERIOD=5m
|
||||
- PUBLICIP_FETCHERS=all
|
||||
- PUBLICIP_HTTP_PROVIDERS=all
|
||||
- PUBLICIPV4_HTTP_PROVIDERS=all
|
||||
- PUBLICIPV6_HTTP_PROVIDERS=all
|
||||
- PUBLICIP_DNS_PROVIDERS=all
|
||||
- PUBLICIP_DNS_TIMEOUT=3s
|
||||
- HTTP_TIMEOUT=10s
|
||||
|
||||
# Web UI
|
||||
- LISTENING_PORT=8000
|
||||
- ROOT_URL=/
|
||||
|
||||
# Backup
|
||||
- BACKUP_PERIOD=0 # 0 to disable
|
||||
- BACKUP_DIRECTORY=/updater/data
|
||||
|
||||
# Other
|
||||
- LOG_LEVEL=info
|
||||
- LOG_CALLER=hidden
|
||||
- SHOUTRRR_ADDRESSES=
|
||||
restart: unless-stopped
|
||||
28
hosts/vms/homelab-vm/diun.yaml
Normal file
28
hosts/vms/homelab-vm/diun.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
# Diun — Docker Image Update Notifier
|
||||
#
|
||||
# Watches all running containers on this host and sends ntfy
|
||||
# notifications when upstream images update their digest.
|
||||
# Schedule: Mondays 09:00 (weekly cadence).
|
||||
#
|
||||
# ntfy topic: https://ntfy.vish.gg/diun
|
||||
|
||||
services:
|
||||
diun:
|
||||
image: crazymax/diun:latest
|
||||
container_name: diun
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- diun-data:/data
|
||||
environment:
|
||||
LOG_LEVEL: info
|
||||
DIUN_WATCH_WORKERS: "20"
|
||||
DIUN_WATCH_SCHEDULE: "0 9 * * 1"
|
||||
DIUN_WATCH_JITTER: 30s
|
||||
DIUN_PROVIDERS_DOCKER: "true"
|
||||
DIUN_PROVIDERS_DOCKER_WATCHBYDEFAULT: "true"
|
||||
DIUN_NOTIF_NTFY_ENDPOINT: "https://ntfy.vish.gg"
|
||||
DIUN_NOTIF_NTFY_TOPIC: "diun"
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
diun-data:
|
||||
15
hosts/vms/homelab-vm/dozzle-agent.yaml
Normal file
15
hosts/vms/homelab-vm/dozzle-agent.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
services:
|
||||
dozzle-agent:
|
||||
image: amir20/dozzle:latest
|
||||
container_name: dozzle-agent
|
||||
command: agent
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
ports:
|
||||
- "7007:7007"
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "/dozzle", "healthcheck"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
17
hosts/vms/homelab-vm/drawio.yml
Normal file
17
hosts/vms/homelab-vm/drawio.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
# Draw.io - Diagramming tool
|
||||
# Port: 8080
|
||||
# Self-hosted diagram editor
|
||||
version: "3.9"
|
||||
services:
|
||||
drawio:
|
||||
container_name: Draw.io
|
||||
image: jgraph/drawio
|
||||
healthcheck:
|
||||
test: curl -f http://localhost:8080/ || exit 1
|
||||
mem_limit: 4g
|
||||
cpu_shares: 768
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
restart: on-failure:5
|
||||
ports:
|
||||
- 5022:8080
|
||||
83
hosts/vms/homelab-vm/fluxer-notes.md
Normal file
83
hosts/vms/homelab-vm/fluxer-notes.md
Normal file
@@ -0,0 +1,83 @@
|
||||
# Fluxer Chat Server Deployment
|
||||
# Domain: st.vish.gg
|
||||
# Replaces: Stoat Chat
|
||||
# Status: ✅ DEPLOYED SUCCESSFULLY & CAPTCHA ISSUE RESOLVED
|
||||
|
||||
## Deployment Summary
|
||||
- **Date**: 2026-02-15
|
||||
- **Domain**: st.vish.gg (Cloudflare DNS grey cloud)
|
||||
- **Location**: /root/fluxer
|
||||
- **Replaced**: Stoat Chat (services stopped and removed)
|
||||
- **Status**: Fully operational with user registration working
|
||||
|
||||
## Architecture
|
||||
Fluxer uses a multi-container architecture with the following services:
|
||||
- **caddy**: Frontend web server serving the React app (port 8088)
|
||||
- **gateway**: WebSocket gateway for real-time communication
|
||||
- **api**: REST API backend (internal port 8080)
|
||||
- **postgres**: Primary database
|
||||
- **redis**: Caching and session storage
|
||||
- **cassandra**: Message storage
|
||||
- **minio**: File storage (S3-compatible)
|
||||
- **meilisearch**: Search engine
|
||||
- **livekit**: Voice/video calling (not configured)
|
||||
- **worker**: Background job processing
|
||||
- **media**: Media processing service
|
||||
- **clamav**: Antivirus scanning
|
||||
- **metrics**: Monitoring and metrics
|
||||
|
||||
## Network Configuration
|
||||
- **External Access**: nginx reverse proxy → Caddy (port 8088) → API (port 8080)
|
||||
- **Nginx Config**: /etc/nginx/sites-available/fluxer
|
||||
- **SSL**: Handled by nginx with existing certificates
|
||||
|
||||
## Issues Resolved
|
||||
### 1. Asset Loading (Fixed)
|
||||
- **Problem**: Frontend was trying to load assets from external CDN
|
||||
- **Solution**: Modified build configuration to use local assets
|
||||
|
||||
### 2. Captcha Verification (Fixed)
|
||||
- **Problem**: "verify human" captcha not loading, preventing account creation
|
||||
- **Root Cause**: Using test Turnstile keys causing 400 errors on registration
|
||||
- **Solution**: Disabled captcha by setting `CAPTCHA_ENABLED=false` in `/root/fluxer/dev/.env`
|
||||
- **Result**: User registration now works without captcha requirement
|
||||
|
||||
## Configuration Files
|
||||
- **Main Config**: /root/fluxer/dev/compose.yaml
|
||||
- **Environment**: /root/fluxer/dev/.env
|
||||
- **Nginx Config**: /etc/nginx/sites-available/fluxer
|
||||
|
||||
## Key Environment Variables
|
||||
```
|
||||
CAPTCHA_ENABLED=false
|
||||
CAPTCHA_PRIMARY_PROVIDER=turnstile
|
||||
TURNSTILE_SITE_KEY=1x00000000000000000000AA (test key)
|
||||
TURNSTILE_SECRET_KEY=1x0000000000000000000000000000000AA (test key)
|
||||
```
|
||||
|
||||
## Verification
|
||||
- **API Health**: https://st.vish.gg/api/instance ✅
|
||||
- **Frontend**: https://st.vish.gg/ ✅
|
||||
- **Registration**: Working without captcha ✅
|
||||
- **Test User Created**: ID 1472533637105737729 ✅
|
||||
|
||||
## Management Commands
|
||||
```bash
|
||||
# Start services
|
||||
cd /root/fluxer && docker compose -f dev/compose.yaml up -d
|
||||
|
||||
# Stop services
|
||||
cd /root/fluxer && docker compose -f dev/compose.yaml down
|
||||
|
||||
# View logs
|
||||
cd /root/fluxer && docker compose -f dev/compose.yaml logs [service_name]
|
||||
|
||||
# Restart API only
|
||||
cd /root/fluxer && docker compose -f dev/compose.yaml restart api
|
||||
```
|
||||
|
||||
## Notes
|
||||
- Captcha can be re-enabled later by setting `CAPTCHA_ENABLED=true` and configuring proper Turnstile keys
|
||||
- Voice/video calling requires LiveKit configuration (currently disabled)
|
||||
- All data is persisted in Docker volumes
|
||||
- Service runs in development mode for easier debugging
|
||||
46
hosts/vms/homelab-vm/fstab.mounts
Normal file
46
hosts/vms/homelab-vm/fstab.mounts
Normal file
@@ -0,0 +1,46 @@
|
||||
# fstab remote mounts for homelab-vm (192.168.0.210)
|
||||
# Credentials files (chmod 600, owner root):
|
||||
# /etc/samba/.atlantis_credentials — vish @ Atlantis + Setillo
|
||||
# /etc/samba/.calypso_credentials — Vish @ Calypso
|
||||
# /etc/samba/.setillo_credentials — vish @ Setillo
|
||||
# /etc/samba/.pi5_credentials — vish @ pi-5
|
||||
# /etc/samba/.guava_credentials — vish @ Guava (TrueNAS; password has literal \! — not !)
|
||||
|
||||
# ── Atlantis (192.168.0.200) - Synology 1823xs+ ──────────────────────────────
|
||||
# NFS (archive only — only share DSM exports to this host via NFS)
|
||||
192.168.0.200:/volume1/archive /mnt/repo_atlantis nfs vers=3,_netdev,nofail 0 0
|
||||
# CIFS
|
||||
//192.168.0.200/data /mnt/atlantis_data cifs credentials=/etc/samba/.atlantis_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//192.168.0.200/docker /mnt/atlantis_docker cifs credentials=/etc/samba/.atlantis_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//192.168.0.200/downloads /mnt/atlantis_downloads cifs credentials=/etc/samba/.atlantis_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//192.168.0.200/games /mnt/atlantis_games cifs credentials=/etc/samba/.atlantis_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//192.168.0.200/torrents /mnt/atlantis_torrents cifs credentials=/etc/samba/.atlantis_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//192.168.0.200/usenet /mnt/atlantis_usenet cifs credentials=/etc/samba/.atlantis_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//192.168.0.200/website /mnt/atlantis_website cifs credentials=/etc/samba/.atlantis_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//192.168.0.200/documents /mnt/atlantis_documents cifs credentials=/etc/samba/.atlantis_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
|
||||
# ── Calypso (100.103.48.78) - Synology DS723+ via Tailscale ──────────────────
|
||||
//100.103.48.78/data /mnt/calypso_data cifs credentials=/etc/samba/.calypso_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//100.103.48.78/docker /mnt/calypso_docker cifs credentials=/etc/samba/.calypso_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//100.103.48.78/docker2 /mnt/calypso_docker2 cifs credentials=/etc/samba/.calypso_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//100.103.48.78/dropboxsync /mnt/calypso_dropboxsync cifs credentials=/etc/samba/.calypso_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//100.103.48.78/Files /mnt/calypso_files cifs credentials=/etc/samba/.calypso_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//100.103.48.78/netshare /mnt/calypso_netshare cifs credentials=/etc/samba/.calypso_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
|
||||
# ── Setillo (100.125.0.20) - Synology DS223j via Tailscale ───────────────────
|
||||
//100.125.0.20/backups /mnt/setillo_backups cifs credentials=/etc/samba/.setillo_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//100.125.0.20/docker /mnt/setillo_docker cifs credentials=/etc/samba/.setillo_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//100.125.0.20/PlexMediaServer /mnt/setillo_plex cifs credentials=/etc/samba/.setillo_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//100.125.0.20/syncthing /mnt/setillo_syncthing cifs credentials=/etc/samba/.setillo_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
|
||||
# ── pi-5 / rpi5-vish (192.168.0.66) - Raspberry Pi 5 ────────────────────────
|
||||
//192.168.0.66/storagepool /mnt/pi5_storagepool cifs credentials=/etc/samba/.pi5_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
|
||||
# ── Guava (100.75.252.64) - TrueNAS SCALE via Tailscale ──────────────────────
|
||||
//100.75.252.64/photos /mnt/guava_photos cifs credentials=/etc/samba/.guava_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//100.75.252.64/data /mnt/guava_data cifs credentials=/etc/samba/.guava_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//100.75.252.64/guava_turquoise /mnt/guava_turquoise cifs credentials=/etc/samba/.guava_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//100.75.252.64/website /mnt/guava_website cifs credentials=/etc/samba/.guava_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//100.75.252.64/jellyfin /mnt/guava_jellyfin cifs credentials=/etc/samba/.guava_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//100.75.252.64/truenas-exporters /mnt/guava_exporters cifs credentials=/etc/samba/.guava_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
//100.75.252.64/iso /mnt/guava_iso cifs credentials=/etc/samba/.guava_credentials,vers=3.0,_netdev,nofail 0 0
|
||||
20
hosts/vms/homelab-vm/gitea-ntfy-bridge.yaml
Normal file
20
hosts/vms/homelab-vm/gitea-ntfy-bridge.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
# Gitea to ntfy Webhook Bridge
|
||||
# Receives Gitea webhooks and forwards formatted messages to ntfy
|
||||
# Port: 8095 (internal)
|
||||
#
|
||||
# Usage: Add webhook in Gitea pointing to http://192.168.0.210:8095/webhook
|
||||
# Target ntfy topic: homelab-alerts
|
||||
|
||||
services:
|
||||
gitea-ntfy-bridge:
|
||||
image: python:3.12-alpine
|
||||
container_name: gitea-ntfy-bridge
|
||||
environment:
|
||||
- NTFY_URL=https://ntfy.vish.gg
|
||||
- NTFY_TOPIC="REDACTED_NTFY_TOPIC"
|
||||
ports:
|
||||
- "8095:8095"
|
||||
volumes:
|
||||
- ./gitea-ntfy-bridge:/app:ro
|
||||
command: ["python", "/app/bridge.py"]
|
||||
restart: unless-stopped
|
||||
140
hosts/vms/homelab-vm/gitea-ntfy-bridge/bridge.py
Normal file
140
hosts/vms/homelab-vm/gitea-ntfy-bridge/bridge.py
Normal file
@@ -0,0 +1,140 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Gitea to ntfy Webhook Bridge - Translates Gitea events to ntfy notifications"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import urllib.request
|
||||
from http.server import HTTPServer, BaseHTTPRequestHandler
|
||||
|
||||
# Force unbuffered output
|
||||
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', buffering=1)
|
||||
sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', buffering=1)
|
||||
|
||||
NTFY_URL = os.environ.get("NTFY_URL", "https://ntfy.vish.gg")
|
||||
NTFY_TOPIC = os.environ.get("NTFY_TOPIC", "homelab-alerts")
|
||||
|
||||
class WebhookHandler(BaseHTTPRequestHandler):
|
||||
def do_GET(self):
|
||||
"""Health check endpoint"""
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "text/plain")
|
||||
self.end_headers()
|
||||
self.wfile.write(b"Gitea-ntfy bridge OK\n")
|
||||
print(f"Health check from {self.client_address[0]}", flush=True)
|
||||
|
||||
def do_POST(self):
|
||||
content_length = int(self.headers.get("Content-Length", 0))
|
||||
body = self.rfile.read(content_length)
|
||||
|
||||
try:
|
||||
data = json.loads(body) if body else {}
|
||||
event_type = self.headers.get("X-Gitea-Event", "unknown")
|
||||
|
||||
print(f"Received {event_type} event from {self.client_address[0]}", flush=True)
|
||||
|
||||
title, message, tags, priority = self.format_message(event_type, data)
|
||||
|
||||
if title and message:
|
||||
print(f"Sending notification: {title}", flush=True)
|
||||
self.send_ntfy(title, message, tags, priority)
|
||||
self.send_response(200)
|
||||
else:
|
||||
print(f"Ignoring event type: {event_type}", flush=True)
|
||||
self.send_response(204) # No content to send
|
||||
except Exception as e:
|
||||
print(f"Error processing webhook: {e}", flush=True)
|
||||
self.send_response(500)
|
||||
|
||||
self.end_headers()
|
||||
|
||||
def format_message(self, event_type, data):
|
||||
"""Format Gitea event into ntfy message"""
|
||||
repo = data.get("repository", {}).get("full_name", "unknown")
|
||||
sender = data.get("sender", {}).get("login", "unknown")
|
||||
|
||||
title = None
|
||||
message = None
|
||||
tags = "git"
|
||||
priority = "default"
|
||||
|
||||
if event_type == "push":
|
||||
commits = data.get("commits", [])
|
||||
branch = data.get("ref", "").replace("refs/heads/", "")
|
||||
count = len(commits)
|
||||
title = f"Push to {repo}"
|
||||
message = f"{sender} pushed {count} commit(s) to {branch}"
|
||||
if commits:
|
||||
message += f"\n\n* {commits[0].get('message', '').split(chr(10))[0]}"
|
||||
if count > 1:
|
||||
message += f"\n* ... and {count - 1} more"
|
||||
tags = "package"
|
||||
|
||||
elif event_type == "pull_request":
|
||||
action = data.get("action", "")
|
||||
pr = data.get("pull_request", {})
|
||||
pr_title = pr.get("title", "")
|
||||
pr_num = pr.get("number", "")
|
||||
title = f"PR #{pr_num} {action}"
|
||||
message = f"{repo}: {pr_title}\nBy: {sender}"
|
||||
tags = "twisted_rightwards_arrows"
|
||||
if action == "opened":
|
||||
priority = "high"
|
||||
|
||||
elif event_type == "issues":
|
||||
action = data.get("action", "")
|
||||
issue = data.get("issue", {})
|
||||
issue_title = issue.get("title", "")
|
||||
issue_num = issue.get("number", "")
|
||||
title = f"Issue #{issue_num} {action}"
|
||||
message = f"{repo}: {issue_title}\nBy: {sender}"
|
||||
tags = "clipboard"
|
||||
|
||||
elif event_type == "release":
|
||||
action = data.get("action", "")
|
||||
release = data.get("release", {})
|
||||
tag = release.get("tag_name", "")
|
||||
title = f"Release {tag}"
|
||||
message = f"{repo}: New release {action}\n{release.get('name', tag)}"
|
||||
tags = "rocket"
|
||||
priority = "high"
|
||||
|
||||
elif event_type == "create":
|
||||
ref_type = data.get("ref_type", "")
|
||||
ref = data.get("ref", "")
|
||||
title = f"New {ref_type}: {ref}"
|
||||
message = f"{repo}\nCreated by: {sender}"
|
||||
tags = "sparkles"
|
||||
|
||||
elif event_type == "delete":
|
||||
ref_type = data.get("ref_type", "")
|
||||
ref = data.get("ref", "")
|
||||
title = f"Deleted {ref_type}: {ref}"
|
||||
message = f"{repo}\nDeleted by: {sender}"
|
||||
tags = "wastebasket"
|
||||
|
||||
return title, message, tags, priority
|
||||
|
||||
def send_ntfy(self, title, message, tags="git", priority="default"):
|
||||
"""Send notification to ntfy"""
|
||||
url = f"{NTFY_URL}/{NTFY_TOPIC}"
|
||||
headers = {
|
||||
"Title": title,
|
||||
"Tags": tags,
|
||||
"Priority": priority,
|
||||
}
|
||||
|
||||
req = urllib.request.Request(url, data=message.encode('utf-8'), headers=headers, method="POST")
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
print(f"Sent: {title} -> {resp.status}")
|
||||
except Exception as e:
|
||||
print(f"Failed to send ntfy: {e}")
|
||||
|
||||
def log_message(self, format, *args):
|
||||
print(f"[{self.log_date_time_string()}] {format % args}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
server = HTTPServer(("0.0.0.0", 8095), WebhookHandler)
|
||||
print(f"Gitea-ntfy bridge running on :8095 -> {NTFY_URL}/{NTFY_TOPIC}")
|
||||
server.serve_forever()
|
||||
18
hosts/vms/homelab-vm/gotify.yml
Normal file
18
hosts/vms/homelab-vm/gotify.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
# Gotify - Push notifications
|
||||
# Port: 8070
|
||||
# Self-hosted push notification server
|
||||
|
||||
version: '3.9'
|
||||
services:
|
||||
gotify:
|
||||
image: ghcr.io/gotify/server:latest
|
||||
container_name: Gotify
|
||||
restart: on-failure:5
|
||||
ports:
|
||||
- 8081:80
|
||||
volumes:
|
||||
- /home/homelab/docker/gotify:/app/data:rw
|
||||
environment:
|
||||
GOTIFY_DEFAULTUSER_NAME: vish
|
||||
GOTIFY_DEFAULTUSER_PASS: "REDACTED_PASSWORD"
|
||||
TZ: America/Los_Angeles
|
||||
42
hosts/vms/homelab-vm/hoarder.yaml
Normal file
42
hosts/vms/homelab-vm/hoarder.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
# Hoarder/Karakeep - Bookmark manager
|
||||
# Port: 3000
|
||||
# AI-powered bookmark and note manager
|
||||
services:
|
||||
web:
|
||||
image: ghcr.io/hoarder-app/hoarder:${HOARDER_VERSION:-release}
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /home/homelab/docker/hoarder/data:/data
|
||||
ports:
|
||||
- 3482:3000
|
||||
environment:
|
||||
MEILI_ADDR: http://meilisearch:7700
|
||||
BROWSER_WEB_URL: http://chrome:9222
|
||||
OPENAI_API_KEY: "REDACTED_API_KEY"
|
||||
DATA_DIR: /data
|
||||
NEXTAUTH_SECRET: "REDACTED_NEXTAUTH_SECRET"
|
||||
MEILI_MASTER_KEY: ${MEILI_MASTER_KEY}
|
||||
chrome:
|
||||
image: gcr.io/zenika-hub/alpine-chrome:123
|
||||
restart: unless-stopped
|
||||
command:
|
||||
- chromium-browser
|
||||
- --no-sandbox
|
||||
- --disable-gpu
|
||||
- --disable-dev-shm-usage
|
||||
- --remote-debugging-address=0.0.0.0
|
||||
- --remote-debugging-port=9222
|
||||
- --hide-scrollbars
|
||||
ports:
|
||||
- 9222:9222 # optional, for debugging
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:v1.6
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
MEILI_NO_ANALYTICS: "true"
|
||||
volumes:
|
||||
- /root/docker/hoarder/meilisearch:/meili_data
|
||||
|
||||
volumes:
|
||||
meilisearch:
|
||||
data:
|
||||
18
hosts/vms/homelab-vm/l4d2_docker.yaml
Normal file
18
hosts/vms/homelab-vm/l4d2_docker.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
# Left 4 Dead 2 - Game server
|
||||
# Port: 27015
|
||||
# L4D2 dedicated game server
|
||||
|
||||
version: '3.4'
|
||||
services:
|
||||
linuxgsm-l4d2:
|
||||
image: gameservermanagers/gameserver:l4d2
|
||||
# image: ghcr.io/gameservermanagers/gameserver:csgo
|
||||
container_name: l4d2server
|
||||
volumes:
|
||||
- /home/homelab/docker/l4d2:/data
|
||||
ports:
|
||||
- "27015:27015/tcp"
|
||||
- "27015:27015/udp"
|
||||
- "27020:27020/udp"
|
||||
- "27005:27005/udp"
|
||||
restart: unless-stopped
|
||||
23
hosts/vms/homelab-vm/libreddit.yaml
Normal file
23
hosts/vms/homelab-vm/libreddit.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
# Redlib - Reddit frontend (maintained fork of Libreddit)
|
||||
# Port: 9000
|
||||
# Privacy-respecting Reddit frontend
|
||||
# NOTE: Reddit actively blocks these frontends. May return 403 errors.
|
||||
# See: https://github.com/redlib-org/redlib/issues
|
||||
|
||||
services:
|
||||
redlib:
|
||||
image: quay.io/redlib/redlib:latest
|
||||
container_name: Redlib
|
||||
hostname: redlib
|
||||
mem_limit: 2g
|
||||
cpu_shares: 768
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
read_only: true
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "--tries=1", "http://localhost:8080/settings"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
ports:
|
||||
- 9000:8080
|
||||
restart: on-failure:5
|
||||
61
hosts/vms/homelab-vm/mattermost.yml
Normal file
61
hosts/vms/homelab-vm/mattermost.yml
Normal file
@@ -0,0 +1,61 @@
|
||||
# Mattermost - Team collaboration
|
||||
# Port: 8065
|
||||
# Self-hosted Slack alternative
|
||||
# DB: host postgres (172.17.0.1:5432) — not containerized
|
||||
# Compose file lives on host at: /opt/mattermost/docker-compose.yml
|
||||
|
||||
services:
|
||||
mattermost:
|
||||
image: mattermost/mattermost-team-edition:11.4
|
||||
container_name: mattermost
|
||||
restart: unless-stopped
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
pids_limit: 200
|
||||
read_only: false
|
||||
tmpfs:
|
||||
- /tmp
|
||||
ports:
|
||||
- "8065:8065"
|
||||
environment:
|
||||
TZ: UTC
|
||||
MM_SQLSETTINGS_DRIVERNAME: postgres
|
||||
MM_SQLSETTINGS_DATASOURCE: "postgres://mmuser:${MM_DB_PASSWORD}@172.17.0.1:5432/mattermost?sslmode=disable&connect_timeout=10" # pragma: allowlist secret
|
||||
MM_SERVICESETTINGS_SITEURL: https://mm.crista.love
|
||||
MM_SERVICESETTINGS_LISTENADDRESS: ":8065"
|
||||
MM_FILESETTINGS_DRIVERNAME: local
|
||||
MM_FILESETTINGS_DIRECTORY: /mattermost/data
|
||||
MM_LOGSETTINGS_CONSOLELEVEL: INFO
|
||||
MM_LOGSETTINGS_FILELEVEL: INFO
|
||||
MM_EMAILSETTINGS_ENABLESMTPAUTH: "true"
|
||||
MM_EMAILSETTINGS_SMTPSERVER: smtp.gmail.com
|
||||
MM_EMAILSETTINGS_SMTPPORT: "587"
|
||||
MM_EMAILSETTINGS_CONNECTIONSECURITY: STARTTLS
|
||||
MM_EMAILSETTINGS_SMTPUSERNAME: ${MM_SMTP_USERNAME} # set in .env
|
||||
MM_EMAILSETTINGS_FEEDBACKEMAIL: ${MM_FEEDBACK_EMAIL} # set in .env
|
||||
MM_EMAILSETTINGS_FEEDBACKNAME: Mattermost
|
||||
MM_EMAILSETTINGS_SENDEMAILNOTIFICATIONS: "true"
|
||||
MM_TEAMSETTINGS_ENABLEOPENSERVER: "true"
|
||||
MM_TEAMSETTINGS_MAXUSERSPERTEAM: "50"
|
||||
# Authentik OAuth2 via GitLab-compatible provider (works with Team Edition)
|
||||
MM_GITLABSETTINGS_ENABLE: "true"
|
||||
MM_GITLABSETTINGS_ID: ${MM_OAUTH_CLIENT_ID} # set in .env
|
||||
MM_GITLABSETTINGS_SECRET: ${MM_OAUTH_CLIENT_SECRET} # set in .env # pragma: allowlist secret
|
||||
MM_GITLABSETTINGS_SCOPE: "openid profile email"
|
||||
MM_GITLABSETTINGS_AUTHENDPOINT: "https://sso.vish.gg/application/o/authorize/"
|
||||
MM_GITLABSETTINGS_TOKENENDPOINT: "https://sso.vish.gg/application/o/token/"
|
||||
MM_GITLABSETTINGS_USERAPIENDPOINT: "https://sso.vish.gg/application/o/userinfo/"
|
||||
MM_GITLABSETTINGS_BUTTONTEXTCOLOR: "#FFFFFF"
|
||||
MM_GITLABSETTINGS_BUTTONCOLOR: "#fd4b2d"
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
- /opt/mattermost/config:/mattermost/config:rw
|
||||
- /opt/mattermost/data:/mattermost/data:rw
|
||||
- /opt/mattermost/logs:/mattermost/logs:rw
|
||||
- /opt/mattermost/plugins:/mattermost/plugins:rw
|
||||
- /opt/mattermost/client-plugins:/mattermost/client/plugins:rw
|
||||
# No custom healthcheck needed — the image provides one via:
|
||||
# CMD /mattermost/bin/mmctl system status --local
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
64
hosts/vms/homelab-vm/monitoring-compose.yml
Normal file
64
hosts/vms/homelab-vm/monitoring-compose.yml
Normal file
@@ -0,0 +1,64 @@
|
||||
# Prometheus + Grafana Monitoring Stack - LIVE DEPLOYMENT
|
||||
# =============================================================================
|
||||
# This is the actual running compose at /home/homelab/docker/monitoring/
|
||||
# Deployed directly with docker compose, NOT via Portainer.
|
||||
#
|
||||
# Config files are bind-mounted from the same directory:
|
||||
# ./prometheus/prometheus.yml - scrape config + alerting rules reference
|
||||
# ./prometheus/alert-rules.yml - alerting rules
|
||||
# ./grafana/provisioning/ - datasources + dashboard provisioning
|
||||
#
|
||||
# To redeploy: docker compose -f this file up -d (from /home/homelab/docker/monitoring/)
|
||||
# To reload Prometheus config without restart: curl -X POST http://localhost:9090/-/reload
|
||||
#
|
||||
# See monitoring.yaml for the self-contained Portainer GitOps version (embedded configs).
|
||||
# =============================================================================
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
prometheus:
|
||||
image: prom/prometheus:latest
|
||||
container_name: prometheus
|
||||
volumes:
|
||||
- ./prometheus:/etc/prometheus
|
||||
- prometheus-data:/prometheus
|
||||
command:
|
||||
- "--config.file=/etc/prometheus/prometheus.yml"
|
||||
- "--storage.tsdb.path=/prometheus"
|
||||
- "--web.enable-lifecycle"
|
||||
ports:
|
||||
- "9090:9090"
|
||||
restart: unless-stopped
|
||||
|
||||
grafana:
|
||||
image: grafana/grafana-oss:latest
|
||||
container_name: grafana
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- GF_SECURITY_ADMIN_PASSWORD="REDACTED_PASSWORD"
|
||||
volumes:
|
||||
- grafana-data:/var/lib/grafana
|
||||
- ./grafana/provisioning/datasources:/etc/grafana/provisioning/datasources
|
||||
- ./grafana/provisioning/dashboards:/etc/grafana/provisioning/dashboards
|
||||
- ./grafana/dashboards:/var/lib/grafana/dashboards
|
||||
ports:
|
||||
- "3300:3000"
|
||||
restart: unless-stopped
|
||||
|
||||
node_exporter:
|
||||
image: prom/node-exporter:latest
|
||||
container_name: node_exporter
|
||||
network_mode: host
|
||||
pid: host
|
||||
volumes:
|
||||
- /:/host:ro,rslave
|
||||
- /sys:/host/sys:ro
|
||||
- /proc:/host/proc:ro
|
||||
command:
|
||||
- '--path.rootfs=/host'
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
prometheus-data:
|
||||
grafana-data:
|
||||
1054
hosts/vms/homelab-vm/monitoring.yaml
Normal file
1054
hosts/vms/homelab-vm/monitoring.yaml
Normal file
File diff suppressed because it is too large
Load Diff
13
hosts/vms/homelab-vm/node-exporter.yml
Normal file
13
hosts/vms/homelab-vm/node-exporter.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
# Node Exporter - Metrics
|
||||
# Port: 9100
|
||||
# Prometheus hardware/OS metrics
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
node-exporter:
|
||||
image: prom/node-exporter:latest
|
||||
container_name: node-exporter
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "9100:9100"
|
||||
43
hosts/vms/homelab-vm/ntfy.yaml
Normal file
43
hosts/vms/homelab-vm/ntfy.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
# ntfy - Push notifications
|
||||
# Port: 8081 - ntfy server
|
||||
# Port: 8095 - Gitea webhook bridge
|
||||
# Simple pub-sub notification service with Gitea integration
|
||||
|
||||
version: "3.9"
|
||||
services:
|
||||
ntfy:
|
||||
image: binwiederhier/ntfy
|
||||
container_name: NTFY
|
||||
command:
|
||||
- serve
|
||||
environment:
|
||||
- TZ=America/Los_Angeles
|
||||
volumes:
|
||||
- /home/homelab/docker/ntfy:/var/cache/ntfy:rw
|
||||
- /home/homelab/docker/ntfy/config:/etc/ntfy:rw
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget -q --tries=1 http://localhost:80/v1/health -O - | grep -Eo '\"healthy\"\\s*:\\s*true' || exit 1"]
|
||||
interval: 60s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
ports:
|
||||
- 8081:80 # Exposing on port 8081
|
||||
restart: on-failure:5
|
||||
|
||||
gitea-ntfy-bridge:
|
||||
image: python:3.12-alpine
|
||||
container_name: gitea-ntfy-bridge
|
||||
environment:
|
||||
- NTFY_URL=https://ntfy.vish.gg
|
||||
- NTFY_TOPIC="REDACTED_NTFY_TOPIC"
|
||||
- TZ=America/Los_Angeles
|
||||
- PYTHONUNBUFFERED=1
|
||||
ports:
|
||||
- "8095:8095"
|
||||
volumes:
|
||||
- /home/homelab/docker/gitea-ntfy-bridge:/app:ro
|
||||
command: ["python", "-u", "/app/bridge.py"]
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- ntfy
|
||||
374
hosts/vms/homelab-vm/ntfy/server.yml
Normal file
374
hosts/vms/homelab-vm/ntfy/server.yml
Normal file
@@ -0,0 +1,374 @@
|
||||
# ntfy server config file
|
||||
#
|
||||
# Please refer to the documentation at https://ntfy.sh/REDACTED_TOPIC/config/ for details.
|
||||
# All options also support underscores (_) instead of dashes (-) to comply with the YAML spec.
|
||||
|
||||
# Public facing base URL of the service (e.g. https://ntfy.sh or https://ntfy.example.com)
|
||||
#
|
||||
# This setting is required for any of the following features:
|
||||
# - attachments (to return a download URL)
|
||||
# - e-mail sending (for the topic URL in the email footer)
|
||||
# - iOS push notifications for self-hosted servers (to calculate the Firebase poll_request topic)
|
||||
# - Matrix Push Gateway (to validate that the pushkey is correct)
|
||||
#
|
||||
#
|
||||
base-url: "https://ntfy.vish.gg"
|
||||
|
||||
# Listen address for the HTTP & HTTPS web server. If "listen-https" is set, you must also
|
||||
# set "key-file" and "cert-file". Format: [<ip>]:<port>, e.g. "1.2.3.4:8080".
|
||||
#
|
||||
# To listen on all interfaces, you may omit the IP address, e.g. ":443".
|
||||
# To disable HTTP, set "listen-http" to "-".
|
||||
#
|
||||
# listen-http: ":80"
|
||||
# listen-https:
|
||||
|
||||
# Listen on a Unix socket, e.g. /var/lib/ntfy/ntfy.sock
|
||||
# This can be useful to avoid port issues on local systems, and to simplify permissions.
|
||||
#
|
||||
# listen-unix: <socket-path>
|
||||
# listen-unix-mode: <linux permissions, e.g. 0700>
|
||||
|
||||
# Path to the private key & cert file for the HTTPS web server. Not used if "listen-https" is not set.
|
||||
#
|
||||
# key-file: <filename>
|
||||
# cert-file: <filename>
|
||||
|
||||
# If set, also publish messages to a Firebase Cloud Messaging (FCM) topic for your app.
|
||||
# This is optional and only required to save battery when using the Android app.
|
||||
#
|
||||
# firebase-key-file: <filename>
|
||||
|
||||
# If "cache-file" is set, messages are cached in a local SQLite database instead of only in-memory.
|
||||
# This allows for service restarts without losing messages in support of the since= parameter.
|
||||
#
|
||||
# The "cache-duration" parameter defines the duration for which messages will be buffered
|
||||
# before they are deleted. This is required to support the "since=..." and "poll=1" parameter.
|
||||
# To disable the cache entirely (on-disk/in-memory), set "cache-duration" to 0.
|
||||
# The cache file is created automatically, provided that the correct permissions are set.
|
||||
#
|
||||
# The "cache-startup-queries" parameter allows you to run commands when the database is initialized,
|
||||
# e.g. to enable WAL mode (see https://phiresky.github.io/blog/2020/sqlite-performance-tuning/)).
|
||||
# Example:
|
||||
# cache-startup-queries: |
|
||||
# pragma journal_mode = WAL;
|
||||
# pragma synchronous = normal;
|
||||
# pragma temp_store = memory;
|
||||
# pragma busy_timeout = 15000;
|
||||
# vacuum;
|
||||
#
|
||||
# The "cache-batch-size" and "cache-batch-timeout" parameter allow enabling async batch writing
|
||||
# of messages. If set, messages will be queued and written to the database in batches of the given
|
||||
# size, or after the given timeout. This is only required for high volume servers.
|
||||
#
|
||||
# Debian/RPM package users:
|
||||
# Use /var/cache/ntfy/cache.db as cache file to avoid permission issues. The package
|
||||
# creates this folder for you.
|
||||
#
|
||||
# Check your permissions:
|
||||
# If you are running ntfy with systemd, make sure this cache file is owned by the
|
||||
# ntfy user and group by running: chown ntfy.ntfy <filename>.
|
||||
#
|
||||
# cache-file: <filename>
|
||||
# cache-duration: "12h"
|
||||
# cache-startup-queries:
|
||||
# cache-batch-size: 0
|
||||
# cache-batch-timeout: "0ms"
|
||||
|
||||
# If set, access to the ntfy server and API can be controlled on a granular level using
|
||||
# the 'ntfy user' and 'ntfy access' commands. See the --help pages for details, or check the docs.
|
||||
#
|
||||
# - auth-file is the SQLite user/access database; it is created automatically if it doesn't already exist
|
||||
# - auth-default-access defines the default/fallback access if no access control entry is found; it can be
|
||||
# set to "read-write" (default), "read-only", "write-only" or "deny-all".
|
||||
# - auth-startup-queries allows you to run commands when the database is initialized, e.g. to enable
|
||||
# WAL mode. This is similar to cache-startup-queries. See above for details.
|
||||
#
|
||||
# Debian/RPM package users:
|
||||
# Use /var/lib/ntfy/user.db as user database to avoid permission issues. The package
|
||||
# creates this folder for you.
|
||||
#
|
||||
# Check your permissions:
|
||||
# If you are running ntfy with systemd, REDACTED_APP_PASSWORD database file is owned by the
|
||||
# ntfy user and group by running: chown ntfy.ntfy <filename>.
|
||||
#
|
||||
# auth-file: <filename>
|
||||
# auth-default-access: "read-write"
|
||||
# auth-startup-queries:
|
||||
|
||||
# If set, the X-Forwarded-For header is used to determine the visitor IP address
|
||||
# instead of the remote address of the connection.
|
||||
#
|
||||
# WARNING: If you are behind a proxy, you must set this, otherwise all visitors are rate limited
|
||||
# as if they are one.
|
||||
#
|
||||
# behind-proxy: false
|
||||
|
||||
# If enabled, clients can attach files to notifications as attachments. Minimum settings to enable attachments
|
||||
# are "attachment-cache-dir" and "base-url".
|
||||
#
|
||||
# - attachment-cache-dir is the cache directory for attached files
|
||||
# - attachment-total-size-limit is the limit of the on-disk attachment cache directory (total size)
|
||||
# - attachment-file-size-limit is the per-file attachment size limit (e.g. 300k, 2M, 100M)
|
||||
# - attachment-expiry-duration is the duration after which uploaded attachments will be deleted (e.g. 3h, 20h)
|
||||
#
|
||||
# attachment-cache-dir:
|
||||
# attachment-total-size-limit: "5G"
|
||||
# attachment-file-size-limit: "15M"
|
||||
# attachment-expiry-duration: "3h"
|
||||
|
||||
# If enabled, allow outgoing e-mail notifications via the 'X-Email' header. If this header is set,
|
||||
# messages will additionally be sent out as e-mail using an external SMTP server.
|
||||
#
|
||||
# As of today, only SMTP servers with plain text auth (or no auth at all), and STARTLS are supported.
|
||||
# Please also refer to the rate limiting settings below (visitor-email-limit-burst & visitor-email-limit-burst).
|
||||
#
|
||||
# - smtp-sender-addr is the hostname:port of the SMTP server
|
||||
# - smtp-sender-from is the e-mail address of the sender
|
||||
# - smtp-sender-user/smtp-sender-pass are the username and password of the SMTP user (leave blank for no auth)
|
||||
#
|
||||
# smtp-sender-addr:
|
||||
# smtp-sender-from:
|
||||
# smtp-sender-user:
|
||||
# smtp-sender-pass:
|
||||
|
||||
# If enabled, ntfy will launch a lightweight SMTP server for incoming messages. Once configured, users can send
|
||||
# emails to a topic e-mail address to publish messages to a topic.
|
||||
#
|
||||
# - smtp-server-listen defines the IP address and port the SMTP server will listen on, e.g. :25 or 1.2.3.4:25
|
||||
# - smtp-server-domain is the e-mail domain, e.g. ntfy.sh
|
||||
# - smtp-server-addr-prefix is an optional prefix for the e-mail addresses to prevent spam. If set to "ntfy-",
|
||||
# for instance, only e-mails to ntfy-$topic@ntfy.sh will be accepted. If this is not set, all emails to
|
||||
# $topic@ntfy.sh will be accepted (which may obviously be a spam problem).
|
||||
#
|
||||
# smtp-server-listen:
|
||||
# smtp-server-domain:
|
||||
# smtp-server-addr-prefix:
|
||||
|
||||
# Web Push support (background notifications for browsers)
|
||||
#
|
||||
# If enabled, allows ntfy to receive push notifications, even when the ntfy web app is closed. When enabled, users
|
||||
# can enable background notifications in the web app. Once enabled, ntfy will forward published messages to the push
|
||||
# endpoint, which will then forward it to the browser.
|
||||
#
|
||||
# You must configure web-push-public/private key, web-push-file, and web-push-email-address below to enable Web Push.
|
||||
# Run "ntfy webpush keys" to generate the keys.
|
||||
#
|
||||
# - web-push-public-key is the generated VAPID public key, e.g. AA1234BBCCddvveekaabcdfqwertyuiopasdfghjklzxcvbnm1234567890
|
||||
# - web-push-private-key is the generated VAPID private key, e.g. AA2BB1234567890abcdefzxcvbnm1234567890
|
||||
# - web-push-file is a database file to keep track of browser subscription endpoints, e.g. `/var/cache/ntfy/webpush.db`
|
||||
# - web-push-email-address is the admin email address send to the push provider, e.g. `sysadmin@example.com`
|
||||
# - web-push-startup-queries is an optional list of queries to run on startup`
|
||||
#
|
||||
# web-push-public-key:
|
||||
# web-push-private-key:
|
||||
# web-push-file:
|
||||
# web-push-email-address:
|
||||
# web-push-startup-queries:
|
||||
|
||||
# If enabled, ntfy can perform voice calls via Twilio via the "X-Call" header.
|
||||
#
|
||||
# - twilio-account is the Twilio account SID, e.g. AC12345beefbeef67890beefbeef122586
|
||||
# - twilio-auth-token is the Twilio auth token, e.g. affebeef258625862586258625862586
|
||||
# - twilio-phone-number is the outgoing phone number you purchased, e.g. REDACTED_PHONE_NUMBER
|
||||
# - twilio-verify-service is the Twilio Verify service SID, e.g. VA12345beefbeef67890beefbeef122586
|
||||
#
|
||||
# twilio-account:
|
||||
# twilio-auth-token:
|
||||
# twilio-phone-number:
|
||||
# twilio-verify-service:
|
||||
|
||||
# Interval in which keepalive messages are sent to the client. This is to prevent
|
||||
# intermediaries closing the connection for inactivity.
|
||||
#
|
||||
# Note that the Android app has a hardcoded timeout at 77s, so it should be less than that.
|
||||
#
|
||||
# keepalive-interval: "45s"
|
||||
|
||||
# Interval in which the manager prunes old messages, deletes topics
|
||||
# and prints the stats.
|
||||
#
|
||||
# manager-interval: "1m"
|
||||
|
||||
# Defines topic names that are not allowed, because they are otherwise used. There are a few default topics
|
||||
# that cannot be used (e.g. app, account, settings, ...). To extend the default list, define them here.
|
||||
#
|
||||
# Example:
|
||||
# disallowed-topics:
|
||||
# - about
|
||||
# - pricing
|
||||
# - contact
|
||||
#
|
||||
# disallowed-topics:
|
||||
|
||||
# Defines the root path of the web app, or disables the web app entirely.
|
||||
#
|
||||
# Can be any simple path, e.g. "/", "/app", or "/ntfy". For backwards-compatibility reasons,
|
||||
# the values "app" (maps to "/"), "home" (maps to "/app"), or "disable" (maps to "") to disable
|
||||
# the web app entirely.
|
||||
#
|
||||
# web-root: /
|
||||
|
||||
# Various feature flags used to control the web app, and API access, mainly around user and
|
||||
# account management.
|
||||
#
|
||||
# - enable-signup allows users to sign up via the web app, or API
|
||||
# - enable-login allows users to log in via the web app, or API
|
||||
# - enable-reservations allows users to reserve topics (if their tier allows it)
|
||||
#
|
||||
# enable-signup: false
|
||||
# enable-login: false
|
||||
# enable-reservations: false
|
||||
|
||||
# Server URL of a Firebase/APNS-connected ntfy server (likely "https://ntfy.sh").
|
||||
#
|
||||
# iOS users:
|
||||
# If you use the iOS ntfy app, you MUST configure this to receive timely notifications. You'll like want this:
|
||||
#
|
||||
upstream-base-url: "https://ntfy.sh"
|
||||
#
|
||||
# If set, all incoming messages will publish a "poll_request" message to the configured upstream server, containing
|
||||
# the message ID of the original message, instructing the iOS app to poll this server for the actual message contents.
|
||||
# This is to prevent the upstream server and Firebase/APNS from being able to read the message.
|
||||
#
|
||||
# - upstream-base-url is the base URL of the upstream server. Should be "https://ntfy.sh".
|
||||
# - upstream-access-token is the token used to authenticate with the upstream server. This is only required
|
||||
# if you exceed the upstream rate limits, or the uptream server requires authentication.
|
||||
#
|
||||
# upstream-base-url:
|
||||
# upstream-access-token:
|
||||
|
||||
# Configures message-specific limits
|
||||
#
|
||||
# - message-size-limit defines the max size of a message body. Please note message sizes >4K are NOT RECOMMENDED,
|
||||
# and largely untested. If FCM and/or APNS is used, the limit should stay 4K, because their limits are around that size.
|
||||
# If you increase this size limit regardless, FCM and APNS will NOT work for large messages.
|
||||
# - message-delay-limit defines the max delay of a message when using the "Delay" header.
|
||||
#
|
||||
# message-size-limit: "4k"
|
||||
# message-delay-limit: "3d"
|
||||
|
||||
# Rate limiting: Total number of topics before the server rejects new topics.
|
||||
#
|
||||
# global-topic-limit: 15000
|
||||
|
||||
# Rate limiting: Number of subscriptions per visitor (IP address)
|
||||
#
|
||||
# visitor-subscription-limit: 30
|
||||
|
||||
# Rate limiting: Allowed GET/PUT/POST requests per second, per visitor:
|
||||
# - visitor-request-limit-burst is the initial bucket of requests each visitor has
|
||||
# - visitor-request-limit-replenish is the rate at which the bucket is refilled
|
||||
# - visitor-request-limit-exempt-hosts is a comma-separated list of hostnames, IPs or CIDRs to be
|
||||
# exempt from request rate limiting. Hostnames are resolved at the time the server is started.
|
||||
# Example: "1.2.3.4,ntfy.example.com,8.7.6.0/24"
|
||||
#
|
||||
# visitor-request-limit-burst: 60
|
||||
# visitor-request-limit-replenish: "5s"
|
||||
# visitor-request-limit-exempt-hosts: ""
|
||||
|
||||
# Rate limiting: Hard daily limit of messages per visitor and day. The limit is reset
|
||||
# every day at midnight UTC. If the limit is not set (or set to zero), the request
|
||||
# limit (see above) governs the upper limit.
|
||||
#
|
||||
# visitor-message-daily-limit: 0
|
||||
|
||||
# Rate limiting: Allowed emails per visitor:
|
||||
# - visitor-email-limit-burst is the initial bucket of emails each visitor has
|
||||
# - visitor-email-limit-replenish is the rate at which the bucket is refilled
|
||||
#
|
||||
# visitor-email-limit-burst: 16
|
||||
# visitor-email-limit-replenish: "1h"
|
||||
|
||||
# Rate limiting: Attachment size and bandwidth limits per visitor:
|
||||
# - visitor-attachment-total-size-limit is the total storage limit used for attachments per visitor
|
||||
# - visitor-attachment-daily-bandwidth-limit is the total daily attachment download/upload traffic limit per visitor
|
||||
#
|
||||
# visitor-attachment-total-size-limit: "100M"
|
||||
# visitor-attachment-daily-bandwidth-limit: "500M"
|
||||
|
||||
# Rate limiting: Enable subscriber-based rate limiting (mostly used for UnifiedPush)
|
||||
#
|
||||
# If subscriber-based rate limiting is enabled, messages published on UnifiedPush topics** (topics starting with "up")
|
||||
# will be counted towards the "rate visitor" of the topic. A "rate visitor" is the first subscriber to the topic.
|
||||
#
|
||||
# Once enabled, a client subscribing to UnifiedPush topics via HTTP stream, or websockets, will be automatically registered as
|
||||
# a "rate visitor", i.e. the visitor whose rate limits will be used when publishing on this topic. Note that setting the rate visitor
|
||||
# requires **read-write permission** on the topic.
|
||||
#
|
||||
# If this setting is enabled, publishing to UnifiedPush topics will lead to a HTTP 507 response if
|
||||
# no "rate visitor" has been previously registered. This is to avoid burning the publisher's "visitor-message-daily-limit".
|
||||
#
|
||||
# visitor-subscriber-rate-limiting: false
|
||||
|
||||
# Payments integration via Stripe
|
||||
#
|
||||
# - stripe-secret-key is the key used for the Stripe API communication. Setting this values
|
||||
# enables payments in the ntfy web app (e.g. Upgrade dialog). See https://dashboard.stripe.com/apikeys.
|
||||
# - stripe-webhook-key is the key required to validate the authenticity of incoming webhooks from Stripe.
|
||||
# Webhooks are essential up keep the local database in sync with the payment provider. See https://dashboard.stripe.com/webhooks.
|
||||
# - billing-contact is an email address or website displayed in the "Upgrade tier" dialog to let people reach
|
||||
# out with billing questions. If unset, nothing will be displayed.
|
||||
#
|
||||
# stripe-secret-key:
|
||||
# stripe-webhook-key:
|
||||
# billing-contact:
|
||||
|
||||
# Metrics
|
||||
#
|
||||
# ntfy can expose Prometheus-style metrics via a /metrics endpoint, or on a dedicated listen IP/port.
|
||||
# Metrics may be considered sensitive information, so before you enable them, be sure you know what you are
|
||||
# doing, and/or secure access to the endpoint in your reverse proxy.
|
||||
#
|
||||
# - enable-metrics enables the /metrics endpoint for the default ntfy server (i.e. HTTP, HTTPS and/or Unix socket)
|
||||
# - metrics-listen-http exposes the metrics endpoint via a dedicated [IP]:port. If set, this option implicitly
|
||||
# enables metrics as well, e.g. "10.0.1.1:9090" or ":9090"
|
||||
#
|
||||
# enable-metrics: false
|
||||
# metrics-listen-http:
|
||||
|
||||
# Profiling
|
||||
#
|
||||
# ntfy can expose Go's net/http/pprof endpoints to support profiling of the ntfy server. If enabled, ntfy will listen
|
||||
# on a dedicated listen IP/port, which can be accessed via the web browser on http://<ip>:<port>/debug/pprof/.
|
||||
# This can be helpful to expose bottlenecks, and visualize call flows. See https://pkg.go.dev/net/http/pprof for details.
|
||||
#
|
||||
# profile-listen-http:
|
||||
|
||||
# Logging options
|
||||
#
|
||||
# By default, ntfy logs to the console (stderr), with an "info" log level, and in a human-readable text format.
|
||||
# ntfy supports five different log levels, can also write to a file, log as JSON, and even supports granular
|
||||
# log level overrides for easier debugging. Some options (log-level and log-level-overrides) can be hot reloaded
|
||||
# by calling "kill -HUP $pid" or "systemctl reload ntfy".
|
||||
#
|
||||
# - log-format defines the output format, can be "text" (default) or "json"
|
||||
# - log-file is a filename to write logs to. If this is not set, ntfy logs to stderr.
|
||||
# - log-level defines the default log level, can be one of "trace", "debug", "info" (default), "warn" or "error".
|
||||
# Be aware that "debug" (and particularly "trace") can be VERY CHATTY. Only turn them on briefly for debugging purposes.
|
||||
# - log-level-overrides lets you override the log level if certain fields match. This is incredibly powerful
|
||||
# for debugging certain parts of the system (e.g. only the account management, or only a certain visitor).
|
||||
# This is an array of strings in the format:
|
||||
# - "field=value -> level" to match a value exactly, e.g. "tag=manager -> trace"
|
||||
# - "field -> level" to match any value, e.g. "time_taken_ms -> debug"
|
||||
# Warning: Using log-level-overrides has a performance penalty. Only use it for temporary debugging.
|
||||
#
|
||||
# Check your permissions:
|
||||
# If you are running ntfy with systemd, make sure this log file is owned by the
|
||||
# ntfy user and group by running: chown ntfy.ntfy <filename>.
|
||||
#
|
||||
# Example (good for production):
|
||||
# log-level: info
|
||||
# log-format: json
|
||||
# log-file: /var/log/ntfy.log
|
||||
#
|
||||
# Example level overrides (for debugging, only use temporarily):
|
||||
# log-level-overrides:
|
||||
# - "tag=manager -> trace"
|
||||
# - "visitor_ip=1.2.3.4 -> debug"
|
||||
# - "time_taken_ms -> debug"
|
||||
#
|
||||
# log-level: info
|
||||
# log-level-overrides:
|
||||
# log-format: text
|
||||
# log-file:
|
||||
12
hosts/vms/homelab-vm/openai_whisper.txt
Normal file
12
hosts/vms/homelab-vm/openai_whisper.txt
Normal file
@@ -0,0 +1,12 @@
|
||||
/home/youruser/whisper-docker/
|
||||
├── docker-compose.yml
|
||||
├── Dockerfile
|
||||
├── audio/ <-- this is ./audio on the host
|
||||
│ ├── sample.mp3
|
||||
└── models/
|
||||
|
||||
mkdir audio
|
||||
cp ~/Downloads/myfile.mp3 audio/
|
||||
docker compose run --rm whisper myfile.mp3 --model small --fp16 False
|
||||
|
||||
sudo docker compose run --rm whisper tape4.mp4 --model small --fp16 False --language en
|
||||
41
hosts/vms/homelab-vm/openhands.yaml
Normal file
41
hosts/vms/homelab-vm/openhands.yaml
Normal file
@@ -0,0 +1,41 @@
|
||||
# OpenHands - AI Software Development Agent
|
||||
# Port: 3001
|
||||
# Docs: https://docs.openhands.dev
|
||||
# LLM: Claude Sonnet 4
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
openhands:
|
||||
image: docker.openhands.dev/openhands/openhands:1.1
|
||||
container_name: openhands-app
|
||||
ports:
|
||||
- "3001:3000"
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
environment:
|
||||
# LLM Configuration
|
||||
- LLM_API_KEY=${ANTHROPIC_API_KEY}
|
||||
- LLM_MODEL=anthropic/claude-sonnet-4-20250514
|
||||
# Sandbox Configuration
|
||||
- SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.openhands.dev/openhands/runtime:1.1-nikolaik
|
||||
- LOG_ALL_EVENTS=true
|
||||
- RUN_AS_OPENHANDS=true
|
||||
- OPENHANDS_USER_ID=42420
|
||||
# Use docker bridge gateway IP so runtime containers can reach the main container
|
||||
- SANDBOX_LOCAL_RUNTIME_URL=http://172.17.0.1
|
||||
- USE_HOST_NETWORK=false
|
||||
- WORKSPACE_BASE=/opt/workspace_base
|
||||
- SANDBOX_USER_ID=0
|
||||
- FILE_STORE=local
|
||||
- FILE_STORE_PATH=/.openhands
|
||||
- INIT_GIT_IN_EMPTY_WORKSPACE=1
|
||||
# Disable default MCP (runtime can't resolve host.docker.internal)
|
||||
- DISABLE_DEFAULT_MCP=true
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- openhands-data:/.openhands
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
openhands-data:
|
||||
41
hosts/vms/homelab-vm/openproject.yml
Normal file
41
hosts/vms/homelab-vm/openproject.yml
Normal file
@@ -0,0 +1,41 @@
|
||||
# OpenProject - Project management
|
||||
# Port: 8080
|
||||
# Open source project management
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
db:
|
||||
image: postgres:16
|
||||
container_name: openproject-db
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_USER: openproject
|
||||
POSTGRES_PASSWORD: "REDACTED_PASSWORD"
|
||||
POSTGRES_DB: openproject
|
||||
volumes:
|
||||
- /home/homelab/docker/openproject/postgres:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U openproject -d openproject"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
openproject:
|
||||
image: openproject/openproject:16.0.0-slim
|
||||
container_name: openproject
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "8083:8080"
|
||||
environment:
|
||||
OPENPROJECT_HOST__NAME: "homelab.vish.local" # 👈 replace with homelab’s LAN IP
|
||||
OPENPROJECT_DISABLE__HOST__NAME__CHECK: "true"
|
||||
OPENPROJECT_HTTPS: "false"
|
||||
OPENPROJECT_SECRET_KEY_BASE: "REDACTED_SECRET_KEY_BASE"_GITEA_TOKEN"
|
||||
OPENPROJECT_EE__MANAGER__VISIBLE: "false"
|
||||
DATABASE_URL: "postgresql://openproject:REDACTED_PASSWORD@db:5432/openproject"
|
||||
volumes:
|
||||
- /home/homelab/docker/openproject/assets:/var/openproject/assets
|
||||
15
hosts/vms/homelab-vm/paperminecraft.yaml
Normal file
15
hosts/vms/homelab-vm/paperminecraft.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
# Paper Minecraft - Game server
|
||||
# Port: 25565
|
||||
# Paper Minecraft server
|
||||
|
||||
version: "3.8"
|
||||
services:
|
||||
# bind mount example
|
||||
linuxgsm-pmc-bind:
|
||||
image: gameservermanagers/gameserver:pmc
|
||||
# image: ghcr.io/gameservermanagers/gameserver:pmc
|
||||
container_name: pmcserver
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /home/homelab/docker/pmc:/data
|
||||
network_mode: host
|
||||
21
hosts/vms/homelab-vm/perplexica.yaml
Normal file
21
hosts/vms/homelab-vm/perplexica.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
# Perplexica - AI-powered search engine
|
||||
# Port: 4785
|
||||
# Configure LLM providers via web UI at http://192.168.0.210:4785/settings
|
||||
#
|
||||
# Configured to use Seattle Ollama instance (100.82.197.124:11434) via Tailscale
|
||||
# This distributes LLM inference load to the Contabo VPS with CPU-only inference
|
||||
|
||||
services:
|
||||
perplexica:
|
||||
image: itzcrazykns1337/perplexica:latest
|
||||
container_name: perplexica
|
||||
ports:
|
||||
- "4785:3000"
|
||||
environment:
|
||||
- OLLAMA_BASE_URL=http://100.82.197.124:11434
|
||||
volumes:
|
||||
- perplexica-data:/home/perplexica/data
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
perplexica-data:
|
||||
16
hosts/vms/homelab-vm/podgrab.yml
Normal file
16
hosts/vms/homelab-vm/podgrab.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
# Podgrab - Podcast manager
|
||||
# Port: 8080
|
||||
# Podcast download and management
|
||||
|
||||
version: '3.3'
|
||||
|
||||
services:
|
||||
podgrab:
|
||||
container_name: podgrab
|
||||
image: akhilrex/podgrab
|
||||
ports:
|
||||
- "8389:8080"
|
||||
volumes:
|
||||
- /mnt/atlantis_docker/podgrab/podcasts:/assets
|
||||
- /mnt/atlantis_docker/podgrab/config:/config
|
||||
restart: unless-stopped
|
||||
22
hosts/vms/homelab-vm/portainer_agent.yaml
Normal file
22
hosts/vms/homelab-vm/portainer_agent.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
# Portainer Edge Agent - homelab-vm
|
||||
# Connects to Portainer server on Atlantis (100.83.230.112:8000)
|
||||
# Deploy: docker compose -f portainer_agent.yaml up -d
|
||||
|
||||
services:
|
||||
portainer_edge_agent:
|
||||
image: portainer/agent:2.33.7
|
||||
container_name: portainer_edge_agent
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/lib/docker/volumes:/var/lib/docker/volumes
|
||||
- /:/host
|
||||
- portainer_agent_data:/data
|
||||
environment:
|
||||
EDGE: "1"
|
||||
EDGE_ID: "18271a7b-03ea-4945-946c-4a845e1bb3ff"
|
||||
EDGE_KEY: "aHR0cDovLzEwMC44My4yMzAuMTEyOjEwMDAwfGh0dHA6Ly8xMDAuODMuMjMwLjExMjo4MDAwfGtDWjVkTjJyNXNnQTJvMEF6UDN4R3h6enBpclFqa05Wa0FCQkU0R1IxWFU9fDQ0MzM5OQ"
|
||||
EDGE_INSECURE_POLL: "1"
|
||||
|
||||
volumes:
|
||||
portainer_agent_data:
|
||||
53
hosts/vms/homelab-vm/proxitok.yaml
Normal file
53
hosts/vms/homelab-vm/proxitok.yaml
Normal file
@@ -0,0 +1,53 @@
|
||||
# ProxiTok - Privacy-respecting TikTok frontend
|
||||
# Port: 9770
|
||||
# Alternative TikTok frontend - no ads, no tracking, server-side requests
|
||||
|
||||
services:
|
||||
proxitok:
|
||||
container_name: proxitok-web
|
||||
image: ghcr.io/pablouser1/proxitok:master
|
||||
ports:
|
||||
- 9770:8080
|
||||
environment:
|
||||
- LATTE_CACHE=/cache
|
||||
- API_CACHE=redis
|
||||
- REDIS_HOST=proxitok-redis
|
||||
- REDIS_PORT=6379
|
||||
- API_CHROMEDRIVER=http://proxitok-chromedriver:4444
|
||||
volumes:
|
||||
- proxitok-cache:/cache
|
||||
depends_on:
|
||||
- redis
|
||||
- chromedriver
|
||||
networks:
|
||||
- proxitok
|
||||
restart: unless-stopped
|
||||
|
||||
redis:
|
||||
container_name: proxitok-redis
|
||||
image: redis:7-alpine
|
||||
volumes:
|
||||
- proxitok-redis:/data
|
||||
networks:
|
||||
- proxitok
|
||||
init: true
|
||||
restart: unless-stopped
|
||||
|
||||
chromedriver:
|
||||
container_name: proxitok-chromedriver
|
||||
image: robcherry/docker-chromedriver:latest
|
||||
shm_size: 2g
|
||||
environment:
|
||||
- CHROMEDRIVER_WHITELISTED_IPS=
|
||||
privileged: true
|
||||
networks:
|
||||
- proxitok
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
proxitok-cache:
|
||||
proxitok-redis:
|
||||
|
||||
networks:
|
||||
proxitok:
|
||||
driver: bridge
|
||||
21
hosts/vms/homelab-vm/redlib.yaml
Normal file
21
hosts/vms/homelab-vm/redlib.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
# Redlib - Reddit frontend (maintained fork of Libreddit)
|
||||
# Port: 9000
|
||||
# Privacy-respecting Reddit frontend
|
||||
|
||||
services:
|
||||
redlib:
|
||||
image: quay.io/redlib/redlib:latest
|
||||
container_name: Redlib
|
||||
hostname: redlib
|
||||
mem_limit: 2g
|
||||
cpu_shares: 768
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
read_only: true
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "--tries=1", "http://localhost:8080/settings"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
ports:
|
||||
- 9000:8080
|
||||
restart: on-failure:5
|
||||
47
hosts/vms/homelab-vm/romm/config.yml
Normal file
47
hosts/vms/homelab-vm/romm/config.yml
Normal file
@@ -0,0 +1,47 @@
|
||||
# mariushosting example of a RomM configuration file
|
||||
# Only uncomment the lines you want to use/modify, or add new ones where needed
|
||||
|
||||
exclude:
|
||||
# Exclude platforms to be scanned
|
||||
platforms: [] # ['my_excluded_platform_1', 'my_excluded_platform_2']
|
||||
|
||||
# Exclude roms or parts of roms to be scanned
|
||||
roms:
|
||||
# Single file games section.
|
||||
# Will not apply to files that are in sub-folders (multi-disc roms, games with updates, DLC, patches, etc.)
|
||||
single_file:
|
||||
# Exclude all files with certain extensions to be scanned
|
||||
extensions: [] # ['xml', 'txt']
|
||||
|
||||
# Exclude matched file names to be scanned.
|
||||
# Supports unix filename pattern matching
|
||||
# Can also exclude files by extension
|
||||
names: [] # ['info.txt', '._*', '*.nfo']
|
||||
|
||||
# Multi files games section
|
||||
# Will apply to files that are in sub-folders (multi-disc roms, games with updates, DLC, patches, etc.)
|
||||
multi_file:
|
||||
# Exclude matched 'folder' names to be scanned (RomM identifies folders as multi file games)
|
||||
names: [] # ['my_multi_file_game', 'DLC']
|
||||
|
||||
# Exclude files within sub-folders.
|
||||
parts:
|
||||
# Exclude matched file names to be scanned from multi file roms
|
||||
# Keep in mind that RomM doesn't scan folders inside multi files games,
|
||||
# so there is no need to exclude folders from inside of multi files games.
|
||||
names: [] # ['data.xml', '._*'] # Supports unix filename pattern matching
|
||||
|
||||
# Exclude all files with certain extensions to be scanned from multi file roms
|
||||
extensions: [] # ['xml', 'txt']
|
||||
|
||||
system:
|
||||
# Asociate different platform names to your current file system platform names
|
||||
# [your custom platform folder name]: [RomM platform name]
|
||||
# In this example if you have a 'gc' folder, RomM will treat it like the 'ngc' folder and if you have a 'psx' folder, RomM will treat it like the 'ps' folder
|
||||
platforms: {} # { gc: 'ngc', psx: 'ps' }
|
||||
|
||||
# Asociate one platform to it's main version
|
||||
versions: {} # { naomi: 'arcade' }
|
||||
|
||||
# The folder name where your roms are located
|
||||
filesystem: {} # { roms_folder: 'roms' } For example if your folder structure is /home/user/library/roms_folder
|
||||
55
hosts/vms/homelab-vm/romm/romm.yaml
Normal file
55
hosts/vms/homelab-vm/romm/romm.yaml
Normal file
@@ -0,0 +1,55 @@
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
db:
|
||||
image: mariadb:11.4-noble # LTS Long Time Support until May 29, 2029
|
||||
container_name: RomM-DB
|
||||
security_opt:
|
||||
- no-new-privileges:false
|
||||
environment:
|
||||
MYSQL_DATABASE: romm
|
||||
MYSQL_USER: rommuser
|
||||
MYSQL_PASSWORD: "REDACTED_PASSWORD"
|
||||
MYSQL_ROOT_PASSWORD: "REDACTED_PASSWORD"
|
||||
TZ: America/Los_Angeles
|
||||
volumes:
|
||||
- /mnt/atlantis_docker/romm/db:/var/lib/mysql:rw
|
||||
restart: on-failure:5
|
||||
|
||||
romm:
|
||||
image: rommapp/romm:latest
|
||||
container_name: RomM
|
||||
depends_on:
|
||||
- db
|
||||
ports:
|
||||
- "7676:8080"
|
||||
environment:
|
||||
ROMM_DB_DRIVER: mariadb
|
||||
DB_HOST: db
|
||||
DB_NAME: romm
|
||||
DB_USER: rommuser
|
||||
DB_PASSWD: "REDACTED_PASSWORD"
|
||||
DB_PORT: 3306
|
||||
ROMM_AUTH_SECRET_KEY: e9c36749cf1cb5f8df757bc0REDACTED_GITEA_TOKEN
|
||||
# Metadata providers (optional):
|
||||
# SCREENSCRAPER_USER:
|
||||
# SCREENSCRAPER_PASSWORD:
|
||||
# IGDB_CLIENT_ID:
|
||||
# IGDB_CLIENT_SECRET:
|
||||
# MOBYGAMES_API_KEY:
|
||||
# STEAMGRIDDB_API_KEY:
|
||||
# RETROACHIEVEMENTS_API_KEY:
|
||||
# HASHEOUS_API_ENABLED: true
|
||||
volumes:
|
||||
- /mnt/atlantis_docker/romm/resources:/romm/resources:rw
|
||||
- /mnt/atlantis_docker/romm/redis:/redis-data:rw
|
||||
- /mnt/atlantis_docker/romm/games/library:/romm/library:rw
|
||||
- /mnt/atlantis_docker/romm/games/assets:/romm/assets:rw
|
||||
- /mnt/atlantis_docker/romm/games/config:/romm/config:rw
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://127.0.0.1:8080/"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 90s
|
||||
restart: on-failure:10
|
||||
24
hosts/vms/homelab-vm/roundcube.yaml
Normal file
24
hosts/vms/homelab-vm/roundcube.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
# Roundcube - Webmail
|
||||
# Port: 8080
|
||||
# Web-based email client
|
||||
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
roundcube:
|
||||
image: roundcube/roundcubemail:latest
|
||||
container_name: roundcube
|
||||
environment:
|
||||
ROUNDCUBEMAIL_DEFAULT_HOST: ssl://imap.gmail.com
|
||||
ROUNDCUBEMAIL_DEFAULT_PORT: 993
|
||||
ROUNDCUBEMAIL_SMTP_SERVER: tls://smtp.gmail.com
|
||||
ROUNDCUBEMAIL_SMTP_PORT: 587
|
||||
ROUNDCUBEMAIL_UPLOAD_MAX_FILESIZE: 25M
|
||||
ROUNDCUBEMAIL_SKIN: elastic
|
||||
volumes:
|
||||
- /mnt/atlantis_docker/roundcube/data:/var/roundcube
|
||||
- /mnt/atlantis_docker/roundcube/config:/var/roundcube/config
|
||||
- /mnt/atlantis_docker/roundcube/logs:/var/roundcube/logs
|
||||
ports:
|
||||
- "7512:80" # or 7512:80 if you prefer
|
||||
restart: unless-stopped
|
||||
37
hosts/vms/homelab-vm/roundcube_protonmail.yaml
Normal file
37
hosts/vms/homelab-vm/roundcube_protonmail.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
# Roundcube ProtonMail Bridge
|
||||
# Port: 8080
|
||||
# Webmail with ProtonMail support
|
||||
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
roundcube-protonmail:
|
||||
image: roundcube/roundcubemail:latest
|
||||
container_name: roundcube-protonmail
|
||||
environment:
|
||||
# ProtonMail Bridge IMAP + SMTP (plain inside the Docker network)
|
||||
ROUNDCUBEMAIL_DEFAULT_HOST: protonmail-bridge
|
||||
ROUNDCUBEMAIL_DEFAULT_PORT: 143
|
||||
ROUNDCUBEMAIL_SMTP_SERVER: protonmail-bridge
|
||||
ROUNDCUBEMAIL_SMTP_PORT: 25
|
||||
ROUNDCUBEMAIL_UPLOAD_MAX_FILESIZE: 25M
|
||||
ROUNDCUBEMAIL_SKIN: elastic
|
||||
volumes:
|
||||
- /mnt/atlantis_docker/roundcube_protonmail/data:/var/roundcube
|
||||
- /mnt/atlantis_docker/roundcube_protonmail/config:/var/roundcube/config
|
||||
- /mnt/atlantis_docker/roundcube_protonmail/logs:/var/roundcube/logs
|
||||
ports:
|
||||
- "7513:80" # exposed via your tailnet (change if needed)
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- protonmail-bridge
|
||||
|
||||
protonmail-bridge:
|
||||
image: shenxn/protonmail-bridge:latest
|
||||
container_name: protonmail-bridge
|
||||
environment:
|
||||
- TZ=America/Los_Angeles
|
||||
command: ["protonmail-bridge", "--no-keychain", "--cli"]
|
||||
volumes:
|
||||
- /mnt/atlantis_docker/roundcube_protonmail/bridge:/root/.config/protonmail/bridge
|
||||
restart: unless-stopped
|
||||
33
hosts/vms/homelab-vm/satisfactory.yaml
Normal file
33
hosts/vms/homelab-vm/satisfactory.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
# Satisfactory - Game server
|
||||
# Port: 7777
|
||||
# Satisfactory dedicated game server
|
||||
|
||||
services:
|
||||
satisfactory-server:
|
||||
container_name: 'satisfactory-server'
|
||||
hostname: 'satisfactory-server'
|
||||
image: 'wolveix/satisfactory-server:latest'
|
||||
ports:
|
||||
- '7777:7777/udp'
|
||||
- '7777:7777/tcp'
|
||||
volumes:
|
||||
- /home/homelab/docker/sf:/data
|
||||
environment:
|
||||
- MAXPLAYERS=4
|
||||
- PGID=1000
|
||||
- PUID=1000
|
||||
- ROOTLESS=false
|
||||
- STEAMBETA=false
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: bash /healthcheck.sh
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 120s
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 6G
|
||||
reservations:
|
||||
memory: 4G
|
||||
55
hosts/vms/homelab-vm/scrutiny.yaml
Normal file
55
hosts/vms/homelab-vm/scrutiny.yaml
Normal file
@@ -0,0 +1,55 @@
|
||||
# Scrutiny — SMART Disk Health Monitoring Hub
|
||||
#
|
||||
# Runs on homelab-vm (Tailscale 100.67.40.126)
|
||||
# Web UI: http://100.67.40.126:8090 (also: scrutiny.vish.gg via NPM)
|
||||
# InfluxDB: internal to this stack
|
||||
#
|
||||
# Collectors ship metrics from physical hosts to this hub.
|
||||
# Collector composes at:
|
||||
# hosts/synology/atlantis/scrutiny-collector.yaml
|
||||
# hosts/synology/calypso/scrutiny-collector.yaml
|
||||
# hosts/synology/setillo/scrutiny-collector.yaml
|
||||
# hosts/physical/concord-nuc/scrutiny-collector.yaml
|
||||
# hosts/edge/rpi5-vish/scrutiny-collector.yaml
|
||||
#
|
||||
# Deploy: Portainer GitOps on endpoint 443399 (homelab-vm)
|
||||
|
||||
services:
|
||||
scrutiny-web:
|
||||
image: ghcr.io/analogj/scrutiny:master-web
|
||||
container_name: scrutiny-web
|
||||
ports:
|
||||
- "8090:8080"
|
||||
volumes:
|
||||
- scrutiny-config:/opt/scrutiny/config
|
||||
- scrutiny-influx:/opt/scrutiny/influxdb
|
||||
environment:
|
||||
GIN_MODE: release
|
||||
SCRUTINY_WEB_INFLUXDB_HOST: scrutiny-influxdb
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8080/api/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 60s
|
||||
depends_on:
|
||||
scrutiny-influxdb:
|
||||
condition: service_healthy
|
||||
|
||||
scrutiny-influxdb:
|
||||
image: influxdb:2.2
|
||||
container_name: scrutiny-influxdb
|
||||
volumes:
|
||||
- scrutiny-influx:/var/lib/influxdb2
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8086/ping"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 20s
|
||||
|
||||
volumes:
|
||||
scrutiny-config:
|
||||
scrutiny-influx:
|
||||
68
hosts/vms/homelab-vm/shlink.yml
Normal file
68
hosts/vms/homelab-vm/shlink.yml
Normal file
@@ -0,0 +1,68 @@
|
||||
# Shlink - URL shortener
|
||||
# Port: 8080
|
||||
# Self-hosted URL shortener
|
||||
|
||||
version: "3.9"
|
||||
services:
|
||||
shlink-db:
|
||||
image: postgres
|
||||
container_name: Shlink-DB
|
||||
hostname: shlink-db
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
healthcheck:
|
||||
test: ["CMD", "pg_isready", "-q", "-d", "shlink", "-U", "shlinkuser"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
user: 1000:1000
|
||||
volumes:
|
||||
- /home/homelab/docker/shlinkdb:/var/lib/postgresql/data
|
||||
environment:
|
||||
POSTGRES_DB: shlink
|
||||
POSTGRES_USER: shlinkuser
|
||||
POSTGRES_PASSWORD: "REDACTED_PASSWORD"
|
||||
restart: unless-stopped
|
||||
|
||||
shlink:
|
||||
image: shlinkio/shlink:stable
|
||||
container_name: Shlink
|
||||
hostname: shlink
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
ports:
|
||||
- 8335:8080
|
||||
environment:
|
||||
- TIMEZONE=America/Los_Angeles
|
||||
- INITIAL_API_KEY="REDACTED_API_KEY"
|
||||
- DB_DRIVER=postgres
|
||||
- DB_NAME=shlink
|
||||
- DB_USER=shlinkuser
|
||||
- DB_PASSWORD="REDACTED_PASSWORD"
|
||||
- DB_HOST=shlink-db
|
||||
- DB_PORT=5432
|
||||
- DEFAULT_DOMAIN=url.thevish.io
|
||||
- IS_HTTPS_ENABLED=true
|
||||
- GEOLITE_LICENSE_KEY="REDACTED_GEOLITE_KEY"
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
shlink-db:
|
||||
condition: service_started
|
||||
|
||||
shlink-web:
|
||||
image: shlinkio/shlink-web-client:stable
|
||||
container_name: Shlink-WEB
|
||||
hostname: shlink-web
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
healthcheck:
|
||||
test: wget --no-verbose --tries=1 --spider http://localhost:80/ || exit 1
|
||||
ports:
|
||||
- 8336:80
|
||||
environment:
|
||||
- SHLINK_SERVER_NAME=thevish
|
||||
- SHLINK_SERVER_URL=https://url.thevish.io
|
||||
- SHLINK_SERVER_API_KEY="REDACTED_API_KEY"
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- shlink
|
||||
15
hosts/vms/homelab-vm/signal_api.yaml
Normal file
15
hosts/vms/homelab-vm/signal_api.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
# Signal API - Signal messenger REST API
|
||||
# Port: 8080
|
||||
# REST API for Signal messenger automation
|
||||
version: "3"
|
||||
services:
|
||||
signal-cli-rest-api:
|
||||
container_name: signal-api
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 8080:8080
|
||||
volumes:
|
||||
- /home/homelab/docker/signal:/home/.local/share/signal-cli
|
||||
environment:
|
||||
- MODE=native
|
||||
image: bbernhard/signal-cli-rest-api
|
||||
23
hosts/vms/homelab-vm/syncthing.yml
Normal file
23
hosts/vms/homelab-vm/syncthing.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
# Syncthing - File synchronization
|
||||
# Port: 8384 (web), 22000 (sync)
|
||||
# Continuous file synchronization between devices
|
||||
version: "2.1"
|
||||
services:
|
||||
syncthing:
|
||||
image: lscr.io/linuxserver/syncthing:latest
|
||||
container_name: syncthing
|
||||
hostname: syncthing #optional
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=America/Los_Angeles
|
||||
volumes:
|
||||
- /root/docker/syncthing/config:/config
|
||||
- /root/docker/syncthing/data1
|
||||
- /root/docker/syncthing/data2
|
||||
ports:
|
||||
- 8384:8384
|
||||
- 22000:22000/tcp
|
||||
- 22000:22000/udp
|
||||
- 21027:21027/udp
|
||||
restart: unless-stopped
|
||||
18
hosts/vms/homelab-vm/watchyourlan.yaml
Normal file
18
hosts/vms/homelab-vm/watchyourlan.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
# WatchYourLAN - Network scanner
|
||||
# Port: 8840
|
||||
# Lightweight network IP scanner with web UI
|
||||
services:
|
||||
watchyourlan:
|
||||
container_name: WatchYourLAN
|
||||
environment:
|
||||
- TZ=America/Los_Angeles
|
||||
- HOST=192.168.0.210
|
||||
- PORT=8840
|
||||
- IFACES=ens18
|
||||
- THEME=grass
|
||||
- COLOR=dark
|
||||
volumes:
|
||||
- /home/homelab/docker/wyl:/data/WatchYourLAN
|
||||
network_mode: host
|
||||
restart: unless-stopped
|
||||
image: aceberg/watchyourlan:v2
|
||||
15
hosts/vms/homelab-vm/webcheck.yaml
Normal file
15
hosts/vms/homelab-vm/webcheck.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
# Web Check - Website analysis
|
||||
# Port: 3000
|
||||
# All-in-one website OSINT analysis tool
|
||||
version: "3.9"
|
||||
services:
|
||||
webcheck:
|
||||
container_name: Web-Check
|
||||
image: lissy93/web-check
|
||||
mem_limit: 4g
|
||||
cpu_shares: 768
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
restart: on-failure:5
|
||||
ports:
|
||||
- 6160:3000
|
||||
23
hosts/vms/homelab-vm/webcord.yml
Normal file
23
hosts/vms/homelab-vm/webcord.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
# WebCord - Discord client
|
||||
# Port: 3000
|
||||
# Web-based Discord client
|
||||
|
||||
---
|
||||
version: "2.1"
|
||||
services:
|
||||
webcord:
|
||||
image: lscr.io/linuxserver/webcord:latest
|
||||
container_name: webcord
|
||||
security_opt:
|
||||
- seccomp:unconfined #optional
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=America/Los_Angeles
|
||||
volumes:
|
||||
- /home/homelab/docker/webcord:/config
|
||||
ports:
|
||||
- 3000:3000
|
||||
- 3001:3001
|
||||
shm_size: "1gb"
|
||||
restart: unless-stopped
|
||||
28
hosts/vms/matrix-ubuntu-vm/.gitignore
vendored
Normal file
28
hosts/vms/matrix-ubuntu-vm/.gitignore
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
# Environment files with secrets
|
||||
.env
|
||||
.env.production
|
||||
*.env.local
|
||||
|
||||
# Database dumps
|
||||
*.sql
|
||||
*.dump
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
logs/
|
||||
|
||||
# Media files
|
||||
public/system/
|
||||
media_store/
|
||||
|
||||
# Docker volumes
|
||||
redis/
|
||||
data/
|
||||
|
||||
# OS files
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# IDE
|
||||
.vscode/
|
||||
.idea/
|
||||
341
hosts/vms/matrix-ubuntu-vm/README.md
Normal file
341
hosts/vms/matrix-ubuntu-vm/README.md
Normal file
@@ -0,0 +1,341 @@
|
||||
# Ubuntu VM Homelab
|
||||
|
||||
Self-hosted communication platform with Mastodon, Mattermost, and Matrix/Element on a single Ubuntu VM sharing PostgreSQL.
|
||||
|
||||
## Current Deployment Status
|
||||
|
||||
| Service | Status | Domain | Internal Port | Nginx Port |
|
||||
|---------|--------|--------|---------------|------------|
|
||||
| ✅ Mastodon | Running | mastodon.vish.gg | 3000, 4000 | 8082 |
|
||||
| ✅ Mattermost | Running | mm.crista.love | 8065 | 8081 |
|
||||
| ✅ Matrix (mx.vish.gg) | Running | mx.vish.gg | 8018 | 8082 |
|
||||
| ✅ Matrix (vish - legacy) | Running | matrix.thevish.io | 8008 | 8081 |
|
||||
| ✅ PostgreSQL | Running | - | 5432 | - |
|
||||
| ✅ Redis | Running | - | 6379 | - |
|
||||
| ✅ TURN (coturn) | Running | mx.vish.gg:3479 | 3479 | - |
|
||||
|
||||
## VM Specifications
|
||||
|
||||
- **OS**: Ubuntu 24.04.4 LTS (x86_64)
|
||||
- **Hostname**: matrix-ubuntu
|
||||
- **LAN IP**: 192.168.0.154 (static) — `ssh ubuntu-matrix`
|
||||
- **Tailscale IP**: 100.85.21.51
|
||||
- **SSH user**: test
|
||||
- **RAM**: 7.7 GB
|
||||
- **CPU**: 4 cores
|
||||
- **Storage**: 96 GB
|
||||
- **Network**: Static IP set via netplan (`/etc/netplan/99-static.yaml`), cloud-init network management disabled
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Cloudflare Proxy │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Nginx │
|
||||
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
|
||||
│ │ :8080 │ │ :8081 │ │ :8082 │ │
|
||||
│ │ Matrix │ │ Mattermost │ │ Mastodon │ │
|
||||
│ └─────────────┘ └─────────────┘ └─────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│ │ │
|
||||
▼ ▼ ▼
|
||||
┌─────────────┐ ┌─────────────┐ ┌─────────────┐
|
||||
│ Synapse │ │ Mattermost │ │ Mastodon │
|
||||
│ :8008 │ │ Docker │ │ Docker │
|
||||
│ + Element │ │ :8065 │ │ :3000 │
|
||||
└─────────────┘ └─────────────┘ │ :4000 │
|
||||
│ │ └─────────────┘
|
||||
│ │ │
|
||||
└───────────────────┴──────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────┐
|
||||
│ PostgreSQL │
|
||||
│ :5432 │
|
||||
│ │
|
||||
│ - synapse │
|
||||
│ - mattermost │
|
||||
│ - mastodon │
|
||||
└─────────────────┘
|
||||
```
|
||||
|
||||
## Databases
|
||||
|
||||
All services share the same PostgreSQL 16 server:
|
||||
|
||||
| Database | User | Purpose |
|
||||
|----------|------|---------|
|
||||
| synapse | synapse | Matrix homeserver (vish - legacy) |
|
||||
| synapse_mx | synapse_mx | Matrix homeserver (mx.vish.gg - federated) |
|
||||
| mattermost | mmuser | Mattermost |
|
||||
| mastodon_production | mastodon | Mastodon |
|
||||
|
||||
## Docker Containers
|
||||
|
||||
```
|
||||
NAMES IMAGE STATUS
|
||||
mastodon-streaming-1 ghcr.io/mastodon/mastodon-streaming:v4.5.7 Up
|
||||
mastodon-web-1 ghcr.io/mastodon/mastodon:v4.5.7 Up
|
||||
mastodon-sidekiq-1 ghcr.io/mastodon/mastodon:v4.5.7 Up
|
||||
mastodon-redis-1 redis:7-alpine Up
|
||||
mattermost mattermost/mattermost-team-edition:11.4 Up (healthy)
|
||||
```
|
||||
|
||||
## Systemd Services (bare-metal)
|
||||
|
||||
```
|
||||
UNIT SERVICE VERSION
|
||||
synapse.service Synapse (legacy) 1.148.0 — /opt/synapse, port 8008
|
||||
synapse-mx.service Synapse (primary) 1.148.0 — /opt/synapse-mx, port 8018
|
||||
```
|
||||
|
||||
Both Synapse instances share the venv at `/opt/synapse/venv/`.
|
||||
|
||||
## Quick Start
|
||||
|
||||
1. Clone this repo to your VM
|
||||
2. Copy environment templates and edit with your values
|
||||
3. Run the setup script
|
||||
|
||||
```bash
|
||||
git clone https://git.vish.gg/Vish/Ubuntu-vm-homelab.git
|
||||
cd Ubuntu-vm-homelab
|
||||
./scripts/setup.sh
|
||||
```
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
Ubuntu-vm-homelab/
|
||||
├── mastodon/
|
||||
│ ├── docker-compose.yml
|
||||
│ └── .env.production.template
|
||||
├── mattermost/
|
||||
│ ├── docker-compose.yml
|
||||
│ └── config.json.template
|
||||
├── matrix-element/
|
||||
│ ├── homeserver.yaml.template
|
||||
│ └── element-config.json.template
|
||||
├── nginx/
|
||||
│ ├── mastodon.conf
|
||||
│ ├── mattermost.conf
|
||||
│ └── matrix.conf
|
||||
├── scripts/
|
||||
│ ├── setup.sh
|
||||
│ ├── backup.sh
|
||||
│ └── update.sh
|
||||
└── README.md
|
||||
```
|
||||
|
||||
## Credentials
|
||||
|
||||
Stored securely on the server:
|
||||
- `/opt/mastodon/.env.production` - Mastodon secrets
|
||||
- `/opt/mattermost/config/config.json` - Mattermost config
|
||||
- `/opt/synapse/homeserver.yaml` - Matrix config
|
||||
|
||||
## Cloudflare Setup
|
||||
|
||||
Each service requires a DNS record pointing to the VM's public IP with Cloudflare proxy enabled.
|
||||
Configure origin rules to route to the correct nginx port.
|
||||
|
||||
## Maintenance
|
||||
|
||||
### Backup
|
||||
```bash
|
||||
./scripts/backup.sh
|
||||
```
|
||||
|
||||
### View Logs
|
||||
```bash
|
||||
# Mastodon
|
||||
cd /opt/mastodon && docker compose logs -f
|
||||
|
||||
# Mattermost
|
||||
docker logs -f mattermost
|
||||
|
||||
# Matrix (mx.vish.gg)
|
||||
tail -f /opt/synapse-mx/homeserver.log
|
||||
|
||||
# Matrix (legacy vish)
|
||||
tail -f /opt/synapse/homeserver.log
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Updating Services
|
||||
|
||||
### Update Mastodon
|
||||
|
||||
```bash
|
||||
cd /opt/mastodon
|
||||
|
||||
# Pull latest images
|
||||
docker compose pull
|
||||
|
||||
# Stop services
|
||||
docker compose down
|
||||
|
||||
# Run database migrations
|
||||
docker compose run --rm web bundle exec rails db:migrate
|
||||
|
||||
# Precompile assets (if needed)
|
||||
docker compose run --rm web bundle exec rails assets:precompile
|
||||
|
||||
# Start services
|
||||
docker compose up -d
|
||||
|
||||
# Verify
|
||||
docker compose ps
|
||||
```
|
||||
|
||||
**Check for release notes:** https://github.com/mastodon/mastodon/releases
|
||||
|
||||
### Update Mattermost
|
||||
|
||||
```bash
|
||||
cd /opt/mattermost
|
||||
|
||||
# Check current version
|
||||
docker exec mattermost mattermost version
|
||||
|
||||
# Pull latest image
|
||||
docker compose pull
|
||||
|
||||
# Stop and restart
|
||||
docker compose down
|
||||
docker compose up -d
|
||||
|
||||
# Verify
|
||||
docker logs mattermost | head -20
|
||||
```
|
||||
|
||||
**Check for release notes:** https://docs.mattermost.com/about/mattermost-server-releases.html
|
||||
|
||||
### Update Matrix Synapse (both instances share the same venv)
|
||||
|
||||
Both instances use `/opt/synapse/venv/` — upgrade once, restart both.
|
||||
|
||||
```bash
|
||||
# Check current version
|
||||
curl -s http://localhost:8018/_synapse/admin/v1/server_version
|
||||
|
||||
# Upgrade (pin to a specific version, e.g. 1.148.0)
|
||||
sudo /opt/synapse/venv/bin/pip install 'matrix-synapse==1.148.0'
|
||||
|
||||
# Restart both services
|
||||
sudo systemctl restart synapse synapse-mx
|
||||
|
||||
# Verify
|
||||
curl -s http://localhost:8008/_synapse/admin/v1/server_version # legacy
|
||||
curl -s http://localhost:8018/_synapse/admin/v1/server_version # mx
|
||||
```
|
||||
|
||||
**Check for release notes:** https://github.com/element-hq/synapse/releases
|
||||
|
||||
> **Note:** If startup fails with `InsufficientPrivilege: must be owner of table`, see
|
||||
> the DB ownership fix in `docs/MATRIX.md#db-ownership-fix`.
|
||||
|
||||
### Update Element Web
|
||||
|
||||
```bash
|
||||
# Check latest version at https://github.com/element-hq/element-web/releases
|
||||
ELEMENT_VERSION="v1.12.11" # Change to latest version
|
||||
|
||||
# Download and extract
|
||||
cd /tmp
|
||||
wget https://github.com/element-hq/element-web/releases/download/${ELEMENT_VERSION}/element-${ELEMENT_VERSION}.tar.gz
|
||||
tar -xzf element-${ELEMENT_VERSION}.tar.gz
|
||||
|
||||
# Backup current config
|
||||
cp /opt/element/web/config.json /tmp/element-config-backup.json
|
||||
|
||||
# Back up configs
|
||||
cp /opt/element/web/config.json /tmp/element-config-web.json
|
||||
cp /opt/element/web-thevish/config.json /tmp/element-config-thevish.json
|
||||
|
||||
# Replace files (both installs share the same release)
|
||||
sudo rm -rf /opt/element/web/* /opt/element/web-thevish/*
|
||||
sudo cp -r element-${ELEMENT_VERSION}/* /opt/element/web/
|
||||
sudo cp -r element-${ELEMENT_VERSION}/* /opt/element/web-thevish/
|
||||
|
||||
# Restore configs
|
||||
sudo cp /tmp/element-config-web.json /opt/element/web/config.json
|
||||
sudo cp /tmp/element-config-thevish.json /opt/element/web-thevish/config.json
|
||||
|
||||
# Verify (nginx serves static files, no restart needed)
|
||||
cat /opt/element/web/version
|
||||
cat /opt/element/web-thevish/version
|
||||
|
||||
# Cleanup
|
||||
rm -rf /tmp/element-${ELEMENT_VERSION}* /tmp/element-config-*.json
|
||||
```
|
||||
|
||||
### Update TURN Server (coturn)
|
||||
|
||||
```bash
|
||||
# Update via apt
|
||||
sudo apt update
|
||||
sudo apt upgrade coturn
|
||||
|
||||
# Restart
|
||||
sudo systemctl restart coturn
|
||||
|
||||
# Verify
|
||||
sudo systemctl status coturn
|
||||
```
|
||||
|
||||
### Update All Services (Quick Script)
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# Save as /opt/scripts/update-all.sh
|
||||
|
||||
echo "=== Updating Mastodon ==="
|
||||
cd /opt/mastodon
|
||||
docker compose pull
|
||||
docker compose down
|
||||
docker compose run --rm web bundle exec rails db:migrate
|
||||
docker compose up -d
|
||||
|
||||
echo "=== Updating Mattermost ==="
|
||||
cd /opt/mattermost
|
||||
docker compose pull
|
||||
docker compose down
|
||||
docker compose up -d
|
||||
|
||||
echo "=== Updating Synapse ==="
|
||||
cd /opt/synapse
|
||||
source venv/bin/activate
|
||||
pip install --upgrade matrix-synapse
|
||||
pkill -f 'synapse.app.homeserver'
|
||||
sleep 2
|
||||
sudo -u synapse /opt/synapse/venv/bin/python -m synapse.app.homeserver \
|
||||
--config-path=/opt/synapse-mx/homeserver.yaml --daemonize
|
||||
sudo -u synapse /opt/synapse/venv/bin/python -m synapse.app.homeserver \
|
||||
--config-path=/opt/synapse/homeserver.yaml --daemonize
|
||||
|
||||
echo "=== Updating System Packages ==="
|
||||
sudo apt update && sudo apt upgrade -y
|
||||
|
||||
echo "=== Done! ==="
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Federation Status
|
||||
|
||||
| Service | Protocol | Federation |
|
||||
|---------|----------|------------|
|
||||
| Matrix (mx.vish.gg) | Matrix | ✅ Enabled |
|
||||
| Matrix (vish) | Matrix | ❌ Disabled (invalid server_name) |
|
||||
| Mastodon | ActivityPub | ✅ Enabled |
|
||||
| Mattermost | Shared Channels | ❌ Enterprise only |
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
28
hosts/vms/matrix-ubuntu-vm/diun.yaml
Normal file
28
hosts/vms/matrix-ubuntu-vm/diun.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
# Diun — Docker Image Update Notifier
|
||||
#
|
||||
# Watches all running containers on this host and sends ntfy
|
||||
# notifications when upstream images update their digest.
|
||||
# Schedule: Mondays 09:00 (weekly cadence).
|
||||
#
|
||||
# ntfy topic: https://ntfy.vish.gg/diun
|
||||
|
||||
services:
|
||||
diun:
|
||||
image: crazymax/diun:latest
|
||||
container_name: diun
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- diun-data:/data
|
||||
environment:
|
||||
LOG_LEVEL: info
|
||||
DIUN_WATCH_WORKERS: "20"
|
||||
DIUN_WATCH_SCHEDULE: "0 9 * * 1"
|
||||
DIUN_WATCH_JITTER: 30s
|
||||
DIUN_PROVIDERS_DOCKER: "true"
|
||||
DIUN_PROVIDERS_DOCKER_WATCHBYDEFAULT: "true"
|
||||
DIUN_NOTIF_NTFY_ENDPOINT: "https://ntfy.vish.gg"
|
||||
DIUN_NOTIF_NTFY_TOPIC: "diun"
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
diun-data:
|
||||
171
hosts/vms/matrix-ubuntu-vm/docs/FEDERATION.md
Normal file
171
hosts/vms/matrix-ubuntu-vm/docs/FEDERATION.md
Normal file
@@ -0,0 +1,171 @@
|
||||
# Mastodon Federation Guide
|
||||
|
||||
## What is Federation?
|
||||
|
||||
Federation allows your Mastodon instance to communicate with other Mastodon instances (and other ActivityPub-compatible servers). Users can follow accounts on other servers, and posts are shared across the network.
|
||||
|
||||
## Federation Requirements
|
||||
|
||||
### 1. HTTPS (Required)
|
||||
Federation only works over HTTPS. Cloudflare provides this automatically when proxying is enabled.
|
||||
|
||||
### 2. Correct Domain Configuration
|
||||
```env
|
||||
# .env.production
|
||||
LOCAL_DOMAIN=mastodon.vish.gg
|
||||
```
|
||||
|
||||
⚠️ **Warning**: Changing LOCAL_DOMAIN after setup will break existing accounts!
|
||||
|
||||
### 3. Webfinger Endpoint
|
||||
Must respond correctly at:
|
||||
```
|
||||
https://mastodon.vish.gg/.well-known/webfinger?resource=acct:username@mastodon.vish.gg
|
||||
```
|
||||
|
||||
Expected response:
|
||||
```json
|
||||
{
|
||||
"subject": "acct:vish@mastodon.vish.gg",
|
||||
"aliases": [
|
||||
"https://mastodon.vish.gg/@vish",
|
||||
"https://mastodon.vish.gg/users/vish"
|
||||
],
|
||||
"links": [
|
||||
{
|
||||
"rel": "http://webfinger.net/rel/profile-page",
|
||||
"type": "text/html",
|
||||
"href": "https://mastodon.vish.gg/@vish"
|
||||
},
|
||||
{
|
||||
"rel": "self",
|
||||
"type": "application/activity+json",
|
||||
"href": "https://mastodon.vish.gg/users/vish"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 4. ActivityPub Actor Endpoint
|
||||
Must respond at:
|
||||
```
|
||||
https://mastodon.vish.gg/users/vish
|
||||
```
|
||||
With `Accept: application/activity+json` header.
|
||||
|
||||
## Testing Federation
|
||||
|
||||
### Test Webfinger (from external server)
|
||||
```bash
|
||||
curl "https://mastodon.vish.gg/.well-known/webfinger?resource=acct:vish@mastodon.vish.gg"
|
||||
```
|
||||
|
||||
### Test Actor Endpoint
|
||||
```bash
|
||||
curl -H "Accept: application/activity+json" "https://mastodon.vish.gg/users/vish"
|
||||
```
|
||||
|
||||
### Test Outbound Federation
|
||||
Search for a remote user in your Mastodon instance:
|
||||
1. Go to https://mastodon.vish.gg
|
||||
2. Search for `@Gargron@mastodon.social`
|
||||
3. If federation works, you'll see the user's profile
|
||||
|
||||
### Test from Another Instance
|
||||
Go to any public Mastodon instance and search for:
|
||||
```
|
||||
@vish@mastodon.vish.gg
|
||||
```
|
||||
|
||||
## Cloudflare Configuration
|
||||
|
||||
### Required Settings
|
||||
|
||||
1. **Proxy Status**: Orange cloud (Proxied) ✅
|
||||
2. **SSL/TLS Mode**: Full (strict)
|
||||
3. **Cache Level**: Standard (or Bypass for API endpoints)
|
||||
|
||||
### Origin Rules (if using non-standard ports)
|
||||
|
||||
Since nginx listens on port 8082, configure an origin rule:
|
||||
|
||||
**Rule**:
|
||||
- If hostname equals `mastodon.vish.gg`
|
||||
- Then: Override destination port to 8082
|
||||
|
||||
### Firewall Rules
|
||||
Ensure port 8082 is accessible from Cloudflare IPs or use Cloudflare Tunnel.
|
||||
|
||||
## Common Federation Issues
|
||||
|
||||
### Issue: Remote users can't find your instance
|
||||
**Cause**: DNS not properly configured or Cloudflare not proxying
|
||||
**Fix**:
|
||||
1. Verify DNS A record points to your server
|
||||
2. Enable Cloudflare proxy (orange cloud)
|
||||
3. Wait for DNS propagation
|
||||
|
||||
### Issue: Webfinger returns 301 redirect
|
||||
**Normal behavior**: Mastodon redirects HTTP to HTTPS
|
||||
**Solution**: Ensure requests come via HTTPS
|
||||
|
||||
### Issue: Cannot follow remote users
|
||||
**Cause**: Outbound connections blocked
|
||||
**Fix**:
|
||||
1. Check firewall allows outbound HTTPS (443)
|
||||
2. Verify sidekiq is running: `docker compose ps`
|
||||
3. Check sidekiq logs: `docker compose logs sidekiq`
|
||||
|
||||
### Issue: Federation lag
|
||||
**Cause**: High queue backlog in sidekiq
|
||||
**Fix**:
|
||||
```bash
|
||||
# Check queue status
|
||||
docker compose exec web bin/tootctl sidekiq status
|
||||
|
||||
# Clear dead jobs if needed
|
||||
docker compose exec web bin/tootctl sidekiq kill
|
||||
```
|
||||
|
||||
## Federation Debug Commands
|
||||
|
||||
```bash
|
||||
# Check instance connectivity
|
||||
cd /opt/mastodon
|
||||
docker compose exec web bin/tootctl domains crawl mastodon.social
|
||||
|
||||
# Refresh a remote account
|
||||
docker compose exec web bin/tootctl accounts refresh @Gargron@mastodon.social
|
||||
|
||||
# Clear delivery failures
|
||||
docker compose exec web bin/tootctl domains purge <domain>
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Block/Allow Lists
|
||||
Configure in Admin → Federation:
|
||||
- Block specific domains
|
||||
- Silence (limit) specific domains
|
||||
- Allow specific domains (whitelist mode)
|
||||
|
||||
### Rate Limiting
|
||||
Mastodon has built-in rate limiting for federation requests to prevent abuse.
|
||||
|
||||
## Monitoring Federation Health
|
||||
|
||||
### Check Sidekiq Queues
|
||||
```bash
|
||||
docker compose exec web bin/tootctl sidekiq stats
|
||||
```
|
||||
|
||||
Healthy queues should have:
|
||||
- Low `push` queue (outbound deliveries)
|
||||
- Low `pull` queue (fetching remote content)
|
||||
- Minimal retries
|
||||
|
||||
### Check Federation Stats
|
||||
In Admin → Dashboard:
|
||||
- Known instances count
|
||||
- Active users (remote)
|
||||
- Incoming/outgoing messages
|
||||
321
hosts/vms/matrix-ubuntu-vm/docs/MATRIX.md
Normal file
321
hosts/vms/matrix-ubuntu-vm/docs/MATRIX.md
Normal file
@@ -0,0 +1,321 @@
|
||||
# Matrix Synapse Setup
|
||||
|
||||
This VM runs **two Matrix Synapse instances**:
|
||||
|
||||
| Instance | server_name | Domain | Federation | Purpose |
|
||||
|----------|-------------|--------|------------|---------|
|
||||
| **Primary** | `mx.vish.gg` | https://mx.vish.gg | ✅ Yes | Main server with federation |
|
||||
| **Legacy** | `vish` | https://matrix.thevish.io | ❌ No | Historical data archive |
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
Internet
|
||||
│
|
||||
┌────────┴────────┐
|
||||
│ Cloudflare │
|
||||
└────────┬────────┘
|
||||
│
|
||||
┌─────────────┴─────────────┐
|
||||
│ │
|
||||
▼ ▼
|
||||
┌─────────────────┐ ┌─────────────────┐
|
||||
│ mx.vish.gg │ │ matrix.thevish.io│
|
||||
│ (port 443) │ │ (port 443) │
|
||||
└────────┬────────┘ └────────┬─────────┘
|
||||
│ │
|
||||
▼ ▼
|
||||
┌─────────────────┐ ┌─────────────────┐
|
||||
│ Synology Reverse│ │ Synology Reverse│
|
||||
│ Proxy → :8082 │ │ Proxy → :8081 │
|
||||
└────────┬────────┘ └────────┬─────────┘
|
||||
│ │
|
||||
└───────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────┐
|
||||
│ Ubuntu VM (192.168.0.154) │
|
||||
│ ┌──────────────┐ ┌──────────────┐ │
|
||||
│ │ Nginx :8082 │ │ Nginx :8081 │ │
|
||||
│ │ mx.vish.gg │ │ thevish.io │ │
|
||||
│ └──────┬───────┘ └──────┬───────┘ │
|
||||
│ │ │ │
|
||||
│ ▼ ▼ │
|
||||
│ ┌──────────────┐ ┌──────────────┐ │
|
||||
│ │ Synapse:8018 │ │ Synapse:8008 │ │
|
||||
│ │ mx.vish.gg │ │ vish │ │
|
||||
│ └──────┬───────┘ └──────┬───────┘ │
|
||||
│ │ │ │
|
||||
│ ▼ ▼ │
|
||||
│ ┌──────────────┐ ┌──────────────┐ │
|
||||
│ │ synapse_mx │ │ synapse │ │
|
||||
│ │ PostgreSQL │ │ PostgreSQL │ │
|
||||
│ └──────────────┘ └──────────────┘ │
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Primary Server: mx.vish.gg
|
||||
|
||||
**This is the main server with federation enabled.**
|
||||
|
||||
### Configuration
|
||||
|
||||
- **Location**: `/opt/synapse-mx/`
|
||||
- **Config**: `/opt/synapse-mx/homeserver.yaml`
|
||||
- **Signing Key**: `/opt/synapse-mx/mx.vish.gg.signing.key`
|
||||
- **Media Store**: `/opt/synapse-mx/media_store/`
|
||||
- **Database**: `synapse_mx` (user: `synapse_mx`)
|
||||
- **Port**: 8018 (Synapse) → 8082 (Nginx)
|
||||
|
||||
### User IDs
|
||||
|
||||
Users on this server have IDs like: `@username:mx.vish.gg`
|
||||
|
||||
### Federation
|
||||
|
||||
- ✅ Can communicate with matrix.org and other federated servers
|
||||
- ✅ Can join public rooms on other servers
|
||||
- ✅ Other users can find and message your users
|
||||
|
||||
### Managing the Service
|
||||
|
||||
```bash
|
||||
sudo systemctl start synapse-mx
|
||||
sudo systemctl stop synapse-mx
|
||||
sudo systemctl restart synapse-mx
|
||||
sudo systemctl status synapse-mx
|
||||
```
|
||||
|
||||
Service file: `/etc/systemd/system/synapse-mx.service`
|
||||
|
||||
## Legacy Server: vish (matrix.thevish.io)
|
||||
|
||||
**This server contains historical data and cannot federate.**
|
||||
|
||||
### Why No Federation?
|
||||
|
||||
The `server_name` is `vish` which is not a valid domain. Other Matrix servers cannot discover it because:
|
||||
- No DNS record for `vish`
|
||||
- Cannot serve `.well-known` at `https://vish/`
|
||||
|
||||
### Configuration
|
||||
|
||||
- **Location**: `/opt/synapse/`
|
||||
- **Config**: `/opt/synapse/homeserver.yaml`
|
||||
- **Signing Key**: `/opt/synapse/vish.signing.key`
|
||||
- **Media Store**: `/opt/synapse/media_store/`
|
||||
- **Database**: `synapse` (user: `synapse`)
|
||||
- **Port**: 8008 (Synapse) → 8081 (Nginx)
|
||||
|
||||
### User IDs
|
||||
|
||||
Users on this server have IDs like: `@username:vish`
|
||||
|
||||
### Managing the Service
|
||||
|
||||
```bash
|
||||
sudo systemctl start synapse
|
||||
sudo systemctl stop synapse
|
||||
sudo systemctl restart synapse
|
||||
sudo systemctl status synapse
|
||||
```
|
||||
|
||||
Service file: `/etc/systemd/system/synapse.service`
|
||||
|
||||
## TURN Server (coturn)
|
||||
|
||||
TURN server enables voice/video calls to work through NAT.
|
||||
|
||||
### Configuration
|
||||
|
||||
- **Config**: `/etc/turnserver.conf`
|
||||
- **Ports**: 3479 (TURN), 5350 (TURNS), 49201-49250 (Media relay UDP)
|
||||
- **Realm**: `matrix.thevish.io`
|
||||
- **Auth Secret**: Shared with Synapse (`turn_shared_secret`)
|
||||
|
||||
### Key Settings
|
||||
|
||||
```ini
|
||||
listening-port=3479
|
||||
tls-listening-port=5350
|
||||
listening-ip=0.0.0.0
|
||||
external-ip=YOUR_WAN_IP/192.168.0.154
|
||||
static-auth-secret=<shared-secret>
|
||||
realm=matrix.thevish.io
|
||||
min-port=49201
|
||||
max-port=49250
|
||||
```
|
||||
|
||||
### Port Forwarding Required
|
||||
|
||||
| Port | Protocol | Purpose |
|
||||
|------|----------|---------|
|
||||
| 3479 | TCP/UDP | TURN |
|
||||
| 5350 | TCP/UDP | TURNS (TLS) |
|
||||
| 49201-49250 | UDP | Media relay |
|
||||
|
||||
## Element Web
|
||||
|
||||
Element Web is served by Nginx for both instances.
|
||||
|
||||
### mx.vish.gg
|
||||
|
||||
- **Location**: `/opt/element/web/`
|
||||
- **Config**: `/opt/element/web/config.json`
|
||||
- **URL**: https://mx.vish.gg/
|
||||
|
||||
### matrix.thevish.io
|
||||
|
||||
- **Location**: `/opt/element/web-thevish/`
|
||||
- **Config**: `/opt/element/web-thevish/config.json`
|
||||
- **URL**: https://matrix.thevish.io/
|
||||
|
||||
## Nginx Configuration
|
||||
|
||||
### mx.vish.gg (port 8082)
|
||||
|
||||
Location: `/etc/nginx/sites-available/mx-vish-gg`
|
||||
|
||||
```nginx
|
||||
server {
|
||||
listen 8082;
|
||||
server_name mx.vish.gg;
|
||||
root /opt/element/web;
|
||||
|
||||
location /health { proxy_pass http://127.0.0.1:8018; }
|
||||
location ~ ^(/_matrix|/_synapse/client) { proxy_pass http://127.0.0.1:8018; }
|
||||
location /_matrix/federation { proxy_pass http://127.0.0.1:8018; }
|
||||
location /.well-known/matrix/server { return 200 '{"m.server": "mx.vish.gg:443"}'; }
|
||||
location /.well-known/matrix/client { return 200 '{"m.homeserver": {"base_url": "https://mx.vish.gg"}}'; }
|
||||
location / { try_files $uri $uri/ /index.html; }
|
||||
}
|
||||
```
|
||||
|
||||
### matrix.thevish.io (port 8081)
|
||||
|
||||
Location: `/etc/nginx/sites-available/matrix-thevish`
|
||||
|
||||
```nginx
|
||||
server {
|
||||
listen 8081;
|
||||
server_name matrix.thevish.io;
|
||||
root /opt/element/web-thevish;
|
||||
|
||||
location /health { proxy_pass http://127.0.0.1:8008; }
|
||||
location ~ ^(/_matrix|/_synapse/client) { proxy_pass http://127.0.0.1:8008; }
|
||||
location /.well-known/matrix/server { return 200 '{"m.server": "matrix.thevish.io:443"}'; }
|
||||
location /.well-known/matrix/client { return 200 '{"m.homeserver": {"base_url": "https://matrix.thevish.io"}}'; }
|
||||
location / { try_files $uri $uri/ /index.html; }
|
||||
}
|
||||
```
|
||||
|
||||
## Synology Reverse Proxy
|
||||
|
||||
| Name | Source (HTTPS) | Destination (HTTP) |
|
||||
|------|----------------|-------------------|
|
||||
| mx_vish_gg | mx.vish.gg:443 | 192.168.0.154:8082 |
|
||||
| matrix_thevish | matrix.thevish.io:443 | 192.168.0.154:8081 |
|
||||
|
||||
## Cloudflare DNS
|
||||
|
||||
| Type | Name | Content | Proxy |
|
||||
|------|------|---------|-------|
|
||||
| A | mx.vish.gg | YOUR_WAN_IP | ✅ Proxied |
|
||||
| A | matrix.thevish.io | YOUR_WAN_IP | ✅ Proxied |
|
||||
|
||||
## Database Backup
|
||||
|
||||
### Backup mx.vish.gg
|
||||
|
||||
```bash
|
||||
sudo -u postgres pg_dump -Fc synapse_mx > synapse_mx_backup_$(date +%Y%m%d).dump
|
||||
```
|
||||
|
||||
### Backup legacy vish
|
||||
|
||||
```bash
|
||||
sudo -u postgres pg_dump -Fc synapse > synapse_vish_backup_$(date +%Y%m%d).dump
|
||||
```
|
||||
|
||||
### Restore
|
||||
|
||||
```bash
|
||||
sudo -u postgres pg_restore -d <database_name> <backup_file.dump>
|
||||
```
|
||||
|
||||
## Testing Federation
|
||||
|
||||
Use the Matrix Federation Tester:
|
||||
|
||||
```bash
|
||||
curl -s "https://federationtester.matrix.org/api/report?server_name=mx.vish.gg" | python3 -c "
|
||||
import sys, json
|
||||
d = json.load(sys.stdin)
|
||||
print(f'Federation OK: {d.get(\"FederationOK\", False)}')
|
||||
"
|
||||
```
|
||||
|
||||
## Creating Users
|
||||
|
||||
### Via registration (if enabled)
|
||||
|
||||
Go to https://mx.vish.gg and click "Create account"
|
||||
|
||||
### Via command line
|
||||
|
||||
```bash
|
||||
cd /opt/synapse-mx
|
||||
sudo -u synapse /opt/synapse/venv/bin/register_new_matrix_user \
|
||||
-c /opt/synapse-mx/homeserver.yaml \
|
||||
-u <username> -p <password> -a
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Check if Synapse is running
|
||||
|
||||
```bash
|
||||
sudo systemctl status synapse synapse-mx
|
||||
curl -s http://localhost:8008/_synapse/admin/v1/server_version # legacy
|
||||
curl -s http://localhost:8018/_synapse/admin/v1/server_version # mx
|
||||
```
|
||||
|
||||
### View logs
|
||||
|
||||
```bash
|
||||
sudo journalctl -u synapse -f # mx.vish.gg
|
||||
sudo journalctl -u synapse-mx -f # legacy vish
|
||||
```
|
||||
|
||||
### Test health endpoints
|
||||
|
||||
```bash
|
||||
curl http://localhost:8018/health # mx.vish.gg
|
||||
curl http://localhost:8008/health # legacy vish
|
||||
```
|
||||
|
||||
### Restart nginx
|
||||
|
||||
```bash
|
||||
sudo nginx -t && sudo systemctl reload nginx
|
||||
```
|
||||
|
||||
### DB ownership fix (apply if migrations fail on upgrade)
|
||||
|
||||
If Synapse fails to start after upgrade with `InsufficientPrivilege: must be owner of table`,
|
||||
the DB tables need their ownership corrected. Run for the affected database:
|
||||
|
||||
```bash
|
||||
# For synapse (legacy) DB:
|
||||
sudo -u postgres psql synapse -t -c "
|
||||
SELECT 'ALTER TABLE public.' || tablename || ' OWNER TO synapse;'
|
||||
FROM pg_tables WHERE schemaname='public' AND tableowner <> 'synapse';
|
||||
" | sudo -u postgres psql synapse
|
||||
|
||||
sudo -u postgres psql synapse -t -c "
|
||||
SELECT 'ALTER SEQUENCE ' || sequence_name || ' OWNER TO synapse;'
|
||||
FROM information_schema.sequences WHERE sequence_schema='public';
|
||||
" | sudo -u postgres psql synapse
|
||||
|
||||
# For synapse_mx DB, replace 'synapse' with 'synapse_mx' throughout
|
||||
```
|
||||
259
hosts/vms/matrix-ubuntu-vm/docs/SETUP.md
Normal file
259
hosts/vms/matrix-ubuntu-vm/docs/SETUP.md
Normal file
@@ -0,0 +1,259 @@
|
||||
# Deployment Documentation
|
||||
|
||||
Complete setup guide for the Ubuntu VM Homelab with Mastodon, Mattermost, and Matrix/Element.
|
||||
|
||||
## Server Access
|
||||
|
||||
```
|
||||
IP: YOUR_WAN_IP
|
||||
SSH Port: 65533
|
||||
Username: test
|
||||
Password: "REDACTED_PASSWORD"
|
||||
```
|
||||
|
||||
## Service Credentials
|
||||
|
||||
### Mastodon Admin
|
||||
- **Username**: vish
|
||||
- **Email**: your-email@example.com
|
||||
- **Password**: `c16a0236e5a5da1e0c80bb296a290fc3`
|
||||
- **URL**: https://mastodon.vish.gg
|
||||
|
||||
### Mattermost
|
||||
- **URL**: https://mm.crista.love
|
||||
- **Admin**: (configured during first access)
|
||||
|
||||
### Matrix/Element
|
||||
- **URL**: https://mx.vish.gg
|
||||
- **Homeserver**: mx.vish.gg
|
||||
|
||||
## PostgreSQL Configuration
|
||||
|
||||
PostgreSQL 16 is configured to allow Docker container connections:
|
||||
|
||||
```
|
||||
# /etc/postgresql/16/main/pg_hba.conf
|
||||
host all all 172.17.0.0/16 md5
|
||||
host all all 0.0.0.0/0 md5
|
||||
|
||||
# /etc/postgresql/16/main/postgresql.conf
|
||||
listen_addresses = '*'
|
||||
```
|
||||
|
||||
### Database Credentials
|
||||
|
||||
| Database | User | Password |
|
||||
|----------|------|----------|
|
||||
| mastodon_production | mastodon | mastodon_pass_2026 |
|
||||
| mattermost | mmuser | (check /opt/mattermost/config/config.json) |
|
||||
| synapse | synapse | (check /opt/synapse/homeserver.yaml) |
|
||||
|
||||
## Nginx Configuration
|
||||
|
||||
### Ports
|
||||
- **8080**: Matrix/Element (mx.vish.gg)
|
||||
- **8081**: Mattermost (mm.crista.love)
|
||||
- **8082**: Mastodon (mastodon.vish.gg)
|
||||
|
||||
### Site Configs
|
||||
```
|
||||
/etc/nginx/sites-enabled/
|
||||
├── mastodon -> /etc/nginx/sites-available/mastodon
|
||||
├── matrix -> /etc/nginx/sites-available/matrix
|
||||
└── mattermost -> /etc/nginx/sites-available/mattermost
|
||||
```
|
||||
|
||||
## Mastodon Setup Details
|
||||
|
||||
### Directory Structure
|
||||
```
|
||||
/opt/mastodon/
|
||||
├── docker-compose.yml
|
||||
├── .env.production
|
||||
├── public/
|
||||
│ └── system/ # Media uploads
|
||||
└── redis/ # Redis data
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
```env
|
||||
LOCAL_DOMAIN=mastodon.vish.gg
|
||||
SINGLE_USER_MODE=false
|
||||
|
||||
# Database
|
||||
DB_HOST=172.17.0.1
|
||||
DB_PORT=5432
|
||||
DB_NAME=mastodon_production
|
||||
DB_USER=mastodon
|
||||
DB_PASS="REDACTED_PASSWORD"
|
||||
|
||||
# Redis
|
||||
REDIS_HOST=redis
|
||||
REDIS_PORT=6379
|
||||
|
||||
# SMTP (Gmail) - CONFIGURED AND WORKING ✅
|
||||
SMTP_SERVER=smtp.gmail.com
|
||||
SMTP_PORT=587
|
||||
SMTP_LOGIN=your-email@example.com
|
||||
SMTP_PASSWORD="REDACTED_PASSWORD"
|
||||
SMTP_AUTH_METHOD=plain
|
||||
SMTP_ENABLE_STARTTLS=auto
|
||||
SMTP_FROM_ADDRESS="Mastodon <notifications@mastodon.vish.gg>"
|
||||
|
||||
# Search
|
||||
ES_ENABLED=false
|
||||
```
|
||||
|
||||
### Common Commands
|
||||
```bash
|
||||
# View logs
|
||||
cd /opt/mastodon && docker compose logs -f
|
||||
|
||||
# Restart services
|
||||
cd /opt/mastodon && docker compose restart
|
||||
|
||||
# Run admin commands
|
||||
cd /opt/mastodon && docker compose exec web bin/tootctl <command>
|
||||
|
||||
# Create new user
|
||||
docker compose run --rm web bin/tootctl accounts create USERNAME --email=EMAIL --confirmed --role=Owner
|
||||
|
||||
# Database migration
|
||||
docker compose run --rm web bundle exec rake db:migrate
|
||||
```
|
||||
|
||||
## Mattermost Setup Details
|
||||
|
||||
### Directory Structure
|
||||
```
|
||||
/opt/mattermost/
|
||||
├── config/
|
||||
│ └── config.json
|
||||
├── data/
|
||||
├── logs/
|
||||
├── plugins/
|
||||
└── client/plugins/
|
||||
```
|
||||
|
||||
### Docker Command
|
||||
```bash
|
||||
docker run -d --name mattermost \
|
||||
-p 8065:8065 \
|
||||
-v /opt/mattermost/config:/mattermost/config \
|
||||
-v /opt/mattermost/data:/mattermost/data \
|
||||
-v /opt/mattermost/logs:/mattermost/logs \
|
||||
-v /opt/mattermost/plugins:/mattermost/plugins \
|
||||
--restart=always \
|
||||
mattermost/mattermost-team-edition:11.3
|
||||
```
|
||||
|
||||
## Matrix/Synapse Setup Details
|
||||
|
||||
### Directory Structure
|
||||
```
|
||||
/opt/synapse/
|
||||
├── homeserver.yaml
|
||||
├── *.signing.key
|
||||
└── media_store/
|
||||
|
||||
/opt/element/web/
|
||||
└── (Element Web static files)
|
||||
```
|
||||
|
||||
### Synapse Service
|
||||
```bash
|
||||
# Status
|
||||
systemctl status matrix-synapse
|
||||
|
||||
# Restart
|
||||
systemctl restart matrix-synapse
|
||||
|
||||
# Logs
|
||||
journalctl -u matrix-synapse -f
|
||||
```
|
||||
|
||||
## Cloudflare Configuration
|
||||
|
||||
For each service, configure Cloudflare:
|
||||
|
||||
1. **DNS Records** (A records pointing to VM public IP)
|
||||
- mastodon.vish.gg
|
||||
- mm.crista.love
|
||||
- mx.vish.gg
|
||||
|
||||
2. **Origin Rules** (Route to correct nginx port)
|
||||
- mastodon.vish.gg → Port 8082
|
||||
- mm.crista.love → Port 8081
|
||||
- mx.vish.gg → Port 8080
|
||||
|
||||
3. **SSL/TLS**: Full (strict)
|
||||
|
||||
## Federation (Mastodon)
|
||||
|
||||
Federation requires:
|
||||
1. ✅ Proper LOCAL_DOMAIN in .env.production
|
||||
2. ✅ HTTPS via Cloudflare
|
||||
3. ✅ Webfinger endpoint responding at `/.well-known/webfinger`
|
||||
4. ⏳ DNS properly configured
|
||||
|
||||
Test federation:
|
||||
```bash
|
||||
# From another server
|
||||
curl "https://mastodon.vish.gg/.well-known/webfinger?resource=acct:vish@mastodon.vish.gg"
|
||||
```
|
||||
|
||||
## SMTP Configuration (Gmail)
|
||||
|
||||
To send emails via Gmail:
|
||||
|
||||
1. Enable 2-Factor Authentication on your Google account
|
||||
2. Generate an App Password:
|
||||
- Go to https://myaccount.google.com/apppasswords
|
||||
- Create a new app password for "Mail"
|
||||
3. Update `/opt/mastodon/.env.production`:
|
||||
```
|
||||
SMTP_PASSWORD="REDACTED_PASSWORD"
|
||||
```
|
||||
4. Restart Mastodon:
|
||||
```bash
|
||||
cd /opt/mastodon && docker compose restart
|
||||
```
|
||||
|
||||
## Backup Locations
|
||||
|
||||
```
|
||||
/backup/
|
||||
├── YYYYMMDD_HHMMSS/
|
||||
│ ├── mattermost.sql
|
||||
│ ├── synapse.sql
|
||||
│ ├── mastodon.sql
|
||||
│ ├── mastodon_media.tar.gz
|
||||
│ ├── mattermost_data.tar.gz
|
||||
│ └── synapse_data.tar.gz
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Mastodon 403 Forbidden
|
||||
- Normal when accessing with wrong Host header
|
||||
- Always access via proper domain or use `-H "Host: mastodon.vish.gg"`
|
||||
|
||||
### Federation Not Working
|
||||
- Check Cloudflare proxy is enabled
|
||||
- Verify DNS resolves correctly
|
||||
- Test webfinger endpoint externally
|
||||
|
||||
### Database Connection Errors
|
||||
- Verify PostgreSQL is listening on all interfaces
|
||||
- Check pg_hba.conf allows Docker network
|
||||
- Restart PostgreSQL: `systemctl restart postgresql`
|
||||
|
||||
### Container Won't Start
|
||||
```bash
|
||||
# Check logs
|
||||
docker logs <container_name>
|
||||
|
||||
# Check Docker network
|
||||
docker network ls
|
||||
docker network inspect mastodon_internal_network
|
||||
```
|
||||
178
hosts/vms/matrix-ubuntu-vm/docs/SMTP.md
Normal file
178
hosts/vms/matrix-ubuntu-vm/docs/SMTP.md
Normal file
@@ -0,0 +1,178 @@
|
||||
# SMTP Email Configuration
|
||||
|
||||
Guide for configuring email delivery for Mastodon and Mattermost.
|
||||
|
||||
## Gmail SMTP Setup
|
||||
|
||||
### Prerequisites
|
||||
1. Google account with 2-Factor Authentication enabled
|
||||
2. App Password generated for "Mail"
|
||||
|
||||
### Generate Gmail App Password
|
||||
|
||||
1. Go to [Google Account Security](https://myaccount.google.com/security)
|
||||
2. Enable 2-Step Verification if not already enabled
|
||||
3. Go to [App Passwords](https://myaccount.google.com/apppasswords)
|
||||
4. Select "Mail" and your device
|
||||
5. Click "Generate"
|
||||
6. Copy the 16-character password
|
||||
|
||||
### Mastodon Configuration
|
||||
|
||||
Edit `/opt/mastodon/.env.production`:
|
||||
|
||||
```env
|
||||
# SMTP Configuration (Gmail)
|
||||
SMTP_SERVER=smtp.gmail.com
|
||||
SMTP_PORT=587
|
||||
SMTP_LOGIN=your-email@example.com
|
||||
SMTP_PASSWORD="REDACTED_PASSWORD"
|
||||
SMTP_AUTH_METHOD=plain
|
||||
SMTP_OPENSSL_VERIFY_MODE=none
|
||||
SMTP_ENABLE_STARTTLS=auto
|
||||
SMTP_FROM_ADDRESS="Mastodon <notifications@mastodon.vish.gg>"
|
||||
```
|
||||
|
||||
Apply changes:
|
||||
```bash
|
||||
cd /opt/mastodon && docker compose restart
|
||||
```
|
||||
|
||||
### Test Email Delivery
|
||||
|
||||
```bash
|
||||
# Send test email
|
||||
cd /opt/mastodon
|
||||
docker compose exec web bin/tootctl accounts modify vish --confirm
|
||||
|
||||
# Or trigger password reset
|
||||
# Go to login page and click "Forgot password"
|
||||
```
|
||||
|
||||
## Mattermost Email Configuration
|
||||
|
||||
Edit `/opt/mattermost/config/config.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"EmailSettings": {
|
||||
"EnableSignUpWithEmail": true,
|
||||
"EnableSignInWithEmail": true,
|
||||
"EnableSignInWithUsername": true,
|
||||
"SendEmailNotifications": true,
|
||||
"RequireEmailVerification": false,
|
||||
"FeedbackName": "Mattermost",
|
||||
"FeedbackEmail": "notifications@mm.crista.love",
|
||||
"SMTPUsername": "your-email@example.com",
|
||||
"SMTPPassword": "your_16_char_app_password",
|
||||
"SMTPServer": "smtp.gmail.com",
|
||||
"SMTPPort": "587",
|
||||
"ConnectionSecurity": "STARTTLS",
|
||||
"SendPushNotifications": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Restart Mattermost:
|
||||
```bash
|
||||
docker restart mattermost
|
||||
```
|
||||
|
||||
## Alternative: SendGrid
|
||||
|
||||
### Setup
|
||||
1. Create SendGrid account at https://sendgrid.com
|
||||
2. Generate API key with "Mail Send" permission
|
||||
|
||||
### Mastodon Configuration
|
||||
```env
|
||||
SMTP_SERVER=smtp.sendgrid.net
|
||||
SMTP_PORT=587
|
||||
SMTP_LOGIN=apikey
|
||||
SMTP_PASSWORD="REDACTED_PASSWORD"
|
||||
SMTP_AUTH_METHOD=plain
|
||||
SMTP_OPENSSL_VERIFY_MODE=peer
|
||||
SMTP_ENABLE_STARTTLS=auto
|
||||
SMTP_FROM_ADDRESS="Mastodon <notifications@mastodon.vish.gg>"
|
||||
```
|
||||
|
||||
## Alternative: Mailgun
|
||||
|
||||
### Setup
|
||||
1. Create Mailgun account at https://mailgun.com
|
||||
2. Verify your domain
|
||||
3. Get SMTP credentials
|
||||
|
||||
### Mastodon Configuration
|
||||
```env
|
||||
SMTP_SERVER=smtp.mailgun.org
|
||||
SMTP_PORT=587
|
||||
SMTP_LOGIN=postmaster@mg.yourdomain.com
|
||||
SMTP_PASSWORD="REDACTED_PASSWORD"
|
||||
SMTP_AUTH_METHOD=plain
|
||||
SMTP_OPENSSL_VERIFY_MODE=peer
|
||||
SMTP_ENABLE_STARTTLS=auto
|
||||
SMTP_FROM_ADDRESS="Mastodon <notifications@mastodon.vish.gg>"
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Check SMTP Connection
|
||||
```bash
|
||||
# Test from container
|
||||
docker compose exec web bash -c "echo 'test' | openssl s_client -connect smtp.gmail.com:587 -starttls smtp"
|
||||
```
|
||||
|
||||
### Check Sidekiq Mail Queue
|
||||
```bash
|
||||
# View failed email jobs
|
||||
docker compose exec web bin/tootctl sidekiq status
|
||||
```
|
||||
|
||||
### Common Errors
|
||||
|
||||
#### "Username and Password not accepted"
|
||||
- Verify App Password is correct (not your regular password)
|
||||
- Ensure 2FA is enabled on Google account
|
||||
- Check no extra spaces in password
|
||||
|
||||
#### "Connection refused"
|
||||
- Firewall blocking outbound port 587
|
||||
- Try port 465 with SSL instead
|
||||
|
||||
#### "Certificate verify failed"
|
||||
- Set `SMTP_OPENSSL_VERIFY_MODE=none` (less secure)
|
||||
- Or ensure CA certificates are up to date
|
||||
|
||||
### Gmail-Specific Issues
|
||||
|
||||
#### "Less secure app access"
|
||||
- Not needed when using App Passwords
|
||||
- App Passwords bypass this requirement
|
||||
|
||||
#### "Critical security alert"
|
||||
- Normal for first connection from new IP
|
||||
- Confirm it was you in Google Security settings
|
||||
|
||||
## Email Content Customization
|
||||
|
||||
### Mastodon
|
||||
Email templates are in the Mastodon source code. Custom templates require forking.
|
||||
|
||||
### Mattermost
|
||||
Edit in System Console → Site Configuration → Customization
|
||||
- Support Email
|
||||
- Notification Footer
|
||||
- Custom Branding
|
||||
|
||||
## SPF/DKIM/DMARC
|
||||
|
||||
For better deliverability, configure DNS records:
|
||||
|
||||
### SPF Record
|
||||
```
|
||||
TXT @ "v=spf1 include:_spf.google.com ~all"
|
||||
```
|
||||
|
||||
### Note on Gmail Sending
|
||||
When using Gmail SMTP, emails are sent "via gmail.com" which has good deliverability. Custom domain email requires additional DNS setup.
|
||||
15
hosts/vms/matrix-ubuntu-vm/dozzle-agent.yaml
Normal file
15
hosts/vms/matrix-ubuntu-vm/dozzle-agent.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
services:
|
||||
dozzle-agent:
|
||||
image: amir20/dozzle:latest
|
||||
container_name: dozzle-agent
|
||||
command: agent
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
ports:
|
||||
- "7007:7007"
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "/dozzle", "healthcheck"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
45
hosts/vms/matrix-ubuntu-vm/mastodon/.env.production.template
Normal file
45
hosts/vms/matrix-ubuntu-vm/mastodon/.env.production.template
Normal file
@@ -0,0 +1,45 @@
|
||||
# Mastodon Environment Configuration
|
||||
# Copy to .env.production and fill in values
|
||||
|
||||
LOCAL_DOMAIN=mastodon.vish.gg
|
||||
SINGLE_USER_MODE=false
|
||||
|
||||
# Generate with: openssl rand -hex 64
|
||||
SECRET_KEY_BASE=<GENERATE_SECRET>
|
||||
OTP_SECRET=<GENERATE_SECRET>
|
||||
|
||||
# Database (using host PostgreSQL)
|
||||
DB_HOST=172.17.0.1
|
||||
DB_PORT=5432
|
||||
DB_NAME=mastodon_production
|
||||
DB_USER=mastodon
|
||||
DB_PASS=REDACTED_DB_PASSWORD
|
||||
|
||||
# Redis
|
||||
REDIS_HOST=redis
|
||||
REDIS_PORT=6379
|
||||
|
||||
# Locale
|
||||
DEFAULT_LOCALE=en
|
||||
|
||||
# SMTP Configuration (Gmail)
|
||||
# See docs/SMTP.md for setup instructions
|
||||
SMTP_SERVER=smtp.gmail.com
|
||||
SMTP_PORT=587
|
||||
SMTP_LOGIN=your-email@example.com
|
||||
SMTP_PASSWORD=REDACTED_SMTP_PASSWORD
|
||||
SMTP_AUTH_METHOD=plain
|
||||
SMTP_OPENSSL_VERIFY_MODE=none
|
||||
SMTP_ENABLE_STARTTLS=auto
|
||||
SMTP_FROM_ADDRESS="Mastodon <notifications@mastodon.vish.gg>"
|
||||
|
||||
# File storage
|
||||
PAPERCLIP_SECRET=<GENERATE_SECRET>
|
||||
|
||||
# Search (optional)
|
||||
ES_ENABLED=false
|
||||
|
||||
# Encryption keys - Generate with: docker compose run --rm web bin/rails db:encryption:init
|
||||
ACTIVE_RECORD_ENCRYPTION_DETERMINISTIC_KEY=<GENERATE>
|
||||
ACTIVE_RECORD_ENCRYPTION_KEY_DERIVATION_SALT=<GENERATE>
|
||||
ACTIVE_RECORD_ENCRYPTION_PRIMARY_KEY=<GENERATE>
|
||||
53
hosts/vms/matrix-ubuntu-vm/mastodon/docker-compose.yml
Normal file
53
hosts/vms/matrix-ubuntu-vm/mastodon/docker-compose.yml
Normal file
@@ -0,0 +1,53 @@
|
||||
services:
|
||||
redis:
|
||||
restart: unless-stopped
|
||||
image: redis:7-alpine
|
||||
networks:
|
||||
- internal_network
|
||||
volumes:
|
||||
- ./redis:/data
|
||||
|
||||
web:
|
||||
image: ghcr.io/mastodon/mastodon:v4.5.7
|
||||
restart: unless-stopped
|
||||
env_file: .env.production
|
||||
command: bundle exec puma -C config/puma.rb
|
||||
networks:
|
||||
- external_network
|
||||
- internal_network
|
||||
ports:
|
||||
- '3000:3000'
|
||||
depends_on:
|
||||
- redis
|
||||
volumes:
|
||||
- ./public/system:/mastodon/public/system
|
||||
|
||||
streaming:
|
||||
image: ghcr.io/mastodon/mastodon-streaming:v4.5.7
|
||||
restart: unless-stopped
|
||||
env_file: .env.production
|
||||
networks:
|
||||
- external_network
|
||||
- internal_network
|
||||
ports:
|
||||
- '4000:4000'
|
||||
depends_on:
|
||||
- redis
|
||||
|
||||
sidekiq:
|
||||
image: ghcr.io/mastodon/mastodon:v4.5.7
|
||||
restart: unless-stopped
|
||||
env_file: .env.production
|
||||
command: bundle exec sidekiq
|
||||
networks:
|
||||
- external_network
|
||||
- internal_network
|
||||
depends_on:
|
||||
- redis
|
||||
volumes:
|
||||
- ./public/system:/mastodon/public/system
|
||||
|
||||
networks:
|
||||
external_network:
|
||||
internal_network:
|
||||
internal: true
|
||||
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"default_server_config": {
|
||||
"m.homeserver": {
|
||||
"base_url": "https://mx.vish.gg",
|
||||
"server_name": "mx.vish.gg"
|
||||
},
|
||||
"m.identity_server": {
|
||||
"base_url": "https://vector.im"
|
||||
}
|
||||
},
|
||||
"disable_custom_urls": false,
|
||||
"disable_guests": true,
|
||||
"disable_login_language_selector": false,
|
||||
"disable_3pid_login": false,
|
||||
"brand": "Element",
|
||||
"integrations_ui_url": "https://scalar.vector.im/",
|
||||
"integrations_rest_url": "https://scalar.vector.im/api",
|
||||
"integrations_widgets_urls": [
|
||||
"https://scalar.vector.im/_matrix/integrations/v1",
|
||||
"https://scalar.vector.im/api",
|
||||
"https://scalar-staging.vector.im/_matrix/integrations/v1",
|
||||
"https://scalar-staging.vector.im/api",
|
||||
"https://scalar-staging.riot.im/scalar/api"
|
||||
],
|
||||
"default_country_code": "US",
|
||||
"show_labs_settings": true,
|
||||
"features": {},
|
||||
"default_federate": true,
|
||||
"default_theme": "dark",
|
||||
"room_directory": {
|
||||
"servers": ["mx.vish.gg", "matrix.org"]
|
||||
},
|
||||
"enable_presence_by_hs_url": {
|
||||
"https://mx.vish.gg": true
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,69 @@
|
||||
# Matrix Synapse Homeserver Configuration Template
|
||||
# Copy to /opt/synapse-mx/homeserver.yaml and customize
|
||||
#
|
||||
# This is the PRIMARY federated server (mx.vish.gg)
|
||||
# For legacy server config, see homeserver-legacy.yaml.template
|
||||
|
||||
server_name: "mx.vish.gg"
|
||||
pid_file: /opt/synapse-mx/homeserver.pid
|
||||
public_baseurl: https://mx.vish.gg/
|
||||
|
||||
listeners:
|
||||
- port: 8018
|
||||
tls: false
|
||||
type: http
|
||||
x_forwarded: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
compress: false
|
||||
|
||||
database:
|
||||
name: psycopg2
|
||||
args:
|
||||
user: synapse_mx
|
||||
password: "REDACTED_PASSWORD"
|
||||
database: synapse_mx
|
||||
host: localhost
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
|
||||
log_config: "/opt/synapse-mx/mx.vish.gg.log.config"
|
||||
media_store_path: /opt/synapse-mx/media_store
|
||||
signing_key_path: "/opt/synapse-mx/mx.vish.gg.signing.key"
|
||||
|
||||
trusted_key_servers:
|
||||
- server_name: "matrix.org"
|
||||
|
||||
# Generate secrets with: python3 -c "import secrets; print(secrets.token_urlsafe(32))"
|
||||
registration_shared_secret: "<GENERATE_SECRET>"
|
||||
macaroon_secret_key: "<GENERATE_SECRET>"
|
||||
form_secret: "<GENERATE_SECRET>"
|
||||
|
||||
enable_registration: true
|
||||
enable_registration_without_verification: true
|
||||
|
||||
max_upload_size: 100M
|
||||
url_preview_enabled: true
|
||||
url_preview_ip_range_blacklist:
|
||||
- '127.0.0.0/8'
|
||||
- '10.0.0.0/8'
|
||||
- '172.16.0.0/12'
|
||||
- '192.168.0.0/16'
|
||||
- '100.64.0.0/10'
|
||||
- '169.254.0.0/16'
|
||||
- '::1/128'
|
||||
- 'fe80::/64'
|
||||
- 'fc00::/7'
|
||||
|
||||
report_stats: false
|
||||
suppress_key_server_warning: true
|
||||
|
||||
# TURN server for voice/video calls
|
||||
turn_uris:
|
||||
- "turn:mx.vish.gg:3479?transport=udp"
|
||||
- "turn:mx.vish.gg:3479?transport=tcp"
|
||||
turn_shared_secret: "<TURN_SHARED_SECRET>"
|
||||
turn_user_lifetime: 86400000
|
||||
turn_allow_guests: true
|
||||
|
||||
enable_3pid_changes: true
|
||||
@@ -0,0 +1,33 @@
|
||||
# TURN Server Configuration (coturn)
|
||||
# Copy to /etc/turnserver.conf
|
||||
|
||||
# Ports
|
||||
listening-port=3479
|
||||
tls-listening-port=5350
|
||||
listening-ip=0.0.0.0
|
||||
|
||||
# External IP for NAT traversal
|
||||
# Format: external-ip=<public-ip>/<internal-ip>
|
||||
external-ip=YOUR_WAN_IP/192.168.0.154
|
||||
|
||||
# Authentication
|
||||
fingerprint
|
||||
use-auth-secret
|
||||
static-auth-secret=<TURN_SHARED_SECRET>
|
||||
realm=matrix.thevish.io
|
||||
|
||||
# Quotas
|
||||
total-quota=100
|
||||
bps-capacity=0
|
||||
stale-nonce=600
|
||||
|
||||
# Security
|
||||
no-multicast-peers
|
||||
|
||||
# Media relay ports (must be forwarded through firewall)
|
||||
min-port=49201
|
||||
max-port=49250
|
||||
|
||||
# Logging
|
||||
log-file=/var/log/turnserver.log
|
||||
verbose
|
||||
27
hosts/vms/matrix-ubuntu-vm/mattermost/docker-compose.yml
Normal file
27
hosts/vms/matrix-ubuntu-vm/mattermost/docker-compose.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
services:
|
||||
mattermost:
|
||||
container_name: mattermost
|
||||
image: mattermost/mattermost-team-edition:11.3
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "8065:8065"
|
||||
volumes:
|
||||
- ./config:/mattermost/config
|
||||
- ./data:/mattermost/data
|
||||
- ./logs:/mattermost/logs
|
||||
- ./plugins:/mattermost/plugins
|
||||
- ./client/plugins:/mattermost/client/plugins
|
||||
- ./bleve-indexes:/mattermost/bleve-indexes
|
||||
environment:
|
||||
- TZ=UTC
|
||||
- MM_SQLSETTINGS_DRIVERNAME=postgres
|
||||
- MM_SQLSETTINGS_DATASOURCE=postgres://mmuser:${MM_DB_PASSWORD}@172.17.0.1:5432/mattermost?sslmode=disable
|
||||
- MM_SERVICESETTINGS_SITEURL=https://mm.crista.love
|
||||
# Authentik OpenID Connect SSO - keeps local login working
|
||||
- MM_OPENIDSETTINGS_ENABLE=true
|
||||
- MM_OPENIDSETTINGS_BUTTONTEXT=Sign in with Authentik
|
||||
- MM_OPENIDSETTINGS_BUTTONCOLOR=#fd4b2d
|
||||
- MM_OPENIDSETTINGS_DISCOVERYSETTINGS_DISCOVERURL=https://sso.vish.gg/application/o/mattermost/.well-known/openid-configuration
|
||||
- MM_OPENIDSETTINGS_ID=OGxIdZLKqYKgf9Sf9zAFAyhKzBdDvonL7HHSBu1w
|
||||
- MM_OPENIDSETTINGS_SECRET=Dzi2iOFXMyzXrvbT2ZDSdqYYg6c6bX39mFihX4h20WKEV0lHBnKfF5bb6KWDH2P9HhlTpl1KPB5LbE9GYuJqGoTXO6aXWiNJJhqrCgJX2eaFRtne2J72mz4TfTxxKBCM
|
||||
- MM_OPENIDSETTINGS_SCOPE=openid profile email
|
||||
118
hosts/vms/matrix-ubuntu-vm/nginx/mastodon.conf
Normal file
118
hosts/vms/matrix-ubuntu-vm/nginx/mastodon.conf
Normal file
@@ -0,0 +1,118 @@
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
|
||||
upstream mastodon_backend {
|
||||
server 127.0.0.1:3000 fail_timeout=0;
|
||||
}
|
||||
|
||||
upstream mastodon_streaming {
|
||||
server 127.0.0.1:4000 fail_timeout=0;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 8082;
|
||||
listen [::]:8082;
|
||||
server_name mastodon.vish.gg;
|
||||
|
||||
keepalive_timeout 70;
|
||||
sendfile on;
|
||||
client_max_body_size 80m;
|
||||
|
||||
root /opt/mastodon/public;
|
||||
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
gzip_vary on;
|
||||
gzip_proxied any;
|
||||
gzip_comp_level 6;
|
||||
gzip_buffers 16 8k;
|
||||
gzip_http_version 1.1;
|
||||
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript image/svg+xml image/x-icon;
|
||||
|
||||
location / {
|
||||
try_files $uri @proxy;
|
||||
}
|
||||
|
||||
location /sw.js {
|
||||
add_header Cache-Control "public, max-age=604800, must-revalidate";
|
||||
try_files $uri =404;
|
||||
}
|
||||
|
||||
location ~ ^/assets/ {
|
||||
add_header Cache-Control "public, max-age=2419200, must-revalidate";
|
||||
try_files $uri =404;
|
||||
}
|
||||
|
||||
location ~ ^/avatars/ {
|
||||
add_header Cache-Control "public, max-age=2419200, must-revalidate";
|
||||
try_files $uri =404;
|
||||
}
|
||||
|
||||
location ~ ^/emoji/ {
|
||||
add_header Cache-Control "public, max-age=2419200, must-revalidate";
|
||||
try_files $uri =404;
|
||||
}
|
||||
|
||||
location ~ ^/headers/ {
|
||||
add_header Cache-Control "public, max-age=2419200, must-revalidate";
|
||||
try_files $uri =404;
|
||||
}
|
||||
|
||||
location ~ ^/packs/ {
|
||||
add_header Cache-Control "public, max-age=2419200, must-revalidate";
|
||||
try_files $uri =404;
|
||||
}
|
||||
|
||||
location ~ ^/shortcuts/ {
|
||||
add_header Cache-Control "public, max-age=2419200, must-revalidate";
|
||||
try_files $uri =404;
|
||||
}
|
||||
|
||||
location ~ ^/sounds/ {
|
||||
add_header Cache-Control "public, max-age=2419200, must-revalidate";
|
||||
try_files $uri =404;
|
||||
}
|
||||
|
||||
location ~ ^/system/ {
|
||||
add_header Cache-Control "public, max-age=2419200, immutable";
|
||||
try_files $uri =404;
|
||||
}
|
||||
|
||||
location ^~ /api/v1/streaming {
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
|
||||
proxy_pass http://mastodon_streaming;
|
||||
proxy_buffering off;
|
||||
proxy_redirect off;
|
||||
proxy_http_version 1.1;
|
||||
|
||||
tcp_nodelay on;
|
||||
}
|
||||
|
||||
location @proxy {
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_pass_header Server;
|
||||
|
||||
proxy_pass http://mastodon_backend;
|
||||
proxy_buffering on;
|
||||
proxy_redirect off;
|
||||
proxy_http_version 1.1;
|
||||
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
|
||||
tcp_nodelay on;
|
||||
}
|
||||
|
||||
error_page 404 500 501 502 503 504 /500.html;
|
||||
}
|
||||
54
hosts/vms/matrix-ubuntu-vm/nginx/matrix-legacy.conf
Normal file
54
hosts/vms/matrix-ubuntu-vm/nginx/matrix-legacy.conf
Normal file
@@ -0,0 +1,54 @@
|
||||
# matrix.thevish.io - Legacy Matrix server (no federation, historical data)
|
||||
server {
|
||||
listen 8081;
|
||||
listen [::]:8081;
|
||||
server_name matrix.thevish.io;
|
||||
|
||||
# Element Web client
|
||||
root /opt/element/web-thevish;
|
||||
index index.html;
|
||||
|
||||
# Health check
|
||||
location /health {
|
||||
proxy_pass http://127.0.0.1:8008;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
|
||||
# Client-Server API
|
||||
location ~ ^(/_matrix|/_synapse/client) {
|
||||
proxy_pass http://127.0.0.1:8008;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
proxy_set_header Host $host;
|
||||
client_max_body_size 100M;
|
||||
proxy_http_version 1.1;
|
||||
}
|
||||
|
||||
# Federation API (won't work due to server_name being "vish")
|
||||
location /_matrix/federation {
|
||||
proxy_pass http://127.0.0.1:8008;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
proxy_set_header Host $host;
|
||||
client_max_body_size 100M;
|
||||
}
|
||||
|
||||
# Well-known (for reference, federation won't work)
|
||||
location /.well-known/matrix/server {
|
||||
default_type application/json;
|
||||
return 200 '{"m.server": "matrix.thevish.io:443"}';
|
||||
}
|
||||
|
||||
location /.well-known/matrix/client {
|
||||
default_type application/json;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
return 200 '{"m.homeserver": {"base_url": "https://matrix.thevish.io"}}';
|
||||
}
|
||||
|
||||
# Element static files
|
||||
location / {
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
}
|
||||
54
hosts/vms/matrix-ubuntu-vm/nginx/matrix.conf
Normal file
54
hosts/vms/matrix-ubuntu-vm/nginx/matrix.conf
Normal file
@@ -0,0 +1,54 @@
|
||||
# mx.vish.gg - Primary Matrix server (federation enabled)
|
||||
server {
|
||||
listen 8082;
|
||||
listen [::]:8082;
|
||||
server_name mx.vish.gg;
|
||||
|
||||
# Element Web client
|
||||
root /opt/element/web;
|
||||
index index.html;
|
||||
|
||||
# Health check
|
||||
location /health {
|
||||
proxy_pass http://127.0.0.1:8018;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
|
||||
# Client-Server API
|
||||
location ~ ^(/_matrix|/_synapse/client) {
|
||||
proxy_pass http://127.0.0.1:8018;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
proxy_set_header Host $host;
|
||||
client_max_body_size 100M;
|
||||
proxy_http_version 1.1;
|
||||
}
|
||||
|
||||
# Federation API
|
||||
location /_matrix/federation {
|
||||
proxy_pass http://127.0.0.1:8018;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
proxy_set_header Host $host;
|
||||
client_max_body_size 100M;
|
||||
}
|
||||
|
||||
# Well-known for federation
|
||||
location /.well-known/matrix/server {
|
||||
default_type application/json;
|
||||
return 200 '{"m.server": "mx.vish.gg:443"}';
|
||||
}
|
||||
|
||||
location /.well-known/matrix/client {
|
||||
default_type application/json;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
return 200 '{"m.homeserver": {"base_url": "https://mx.vish.gg"}}';
|
||||
}
|
||||
|
||||
# Element static files
|
||||
location / {
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
}
|
||||
41
hosts/vms/matrix-ubuntu-vm/nginx/mattermost.conf
Normal file
41
hosts/vms/matrix-ubuntu-vm/nginx/mattermost.conf
Normal file
@@ -0,0 +1,41 @@
|
||||
upstream mattermost {
|
||||
server 127.0.0.1:8065;
|
||||
keepalive 32;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 8081;
|
||||
listen [::]:8081;
|
||||
server_name mm.crista.love;
|
||||
|
||||
location ~ /api/v[0-9]+/(users/)?websocket$ {
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
client_max_body_size 50M;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Frame-Options SAMEORIGIN;
|
||||
proxy_buffers 256 16k;
|
||||
proxy_buffer_size 16k;
|
||||
proxy_read_timeout 600s;
|
||||
proxy_http_version 1.1;
|
||||
proxy_pass http://mattermost;
|
||||
}
|
||||
|
||||
location / {
|
||||
client_max_body_size 100M;
|
||||
proxy_set_header Connection "";
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Frame-Options SAMEORIGIN;
|
||||
proxy_buffers 256 16k;
|
||||
proxy_buffer_size 16k;
|
||||
proxy_read_timeout 600s;
|
||||
proxy_http_version 1.1;
|
||||
proxy_pass http://mattermost;
|
||||
}
|
||||
}
|
||||
30
hosts/vms/matrix-ubuntu-vm/scripts/backup.sh
Executable file
30
hosts/vms/matrix-ubuntu-vm/scripts/backup.sh
Executable file
@@ -0,0 +1,30 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
BACKUP_DIR="/backup/$(date +%Y%m%d_%H%M%S)"
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
|
||||
echo "=== Homelab Backup ==="
|
||||
echo "Backup directory: $BACKUP_DIR"
|
||||
|
||||
# Backup PostgreSQL databases
|
||||
echo "[1/4] Backing up PostgreSQL databases..."
|
||||
sudo -u postgres pg_dump mattermost > "$BACKUP_DIR/mattermost.sql"
|
||||
sudo -u postgres pg_dump synapse > "$BACKUP_DIR/synapse.sql"
|
||||
sudo -u postgres pg_dump mastodon_production > "$BACKUP_DIR/mastodon.sql"
|
||||
|
||||
# Backup Mastodon media
|
||||
echo "[2/4] Backing up Mastodon media..."
|
||||
tar -czf "$BACKUP_DIR/mastodon_media.tar.gz" -C /opt/mastodon public/system 2>/dev/null || true
|
||||
|
||||
# Backup Mattermost data
|
||||
echo "[3/4] Backing up Mattermost data..."
|
||||
tar -czf "$BACKUP_DIR/mattermost_data.tar.gz" -C /opt/mattermost data config 2>/dev/null || true
|
||||
|
||||
# Backup Matrix/Synapse
|
||||
echo "[4/4] Backing up Matrix data..."
|
||||
tar -czf "$BACKUP_DIR/synapse_data.tar.gz" -C /opt synapse 2>/dev/null || true
|
||||
|
||||
echo ""
|
||||
echo "Backup complete: $BACKUP_DIR"
|
||||
ls -lh "$BACKUP_DIR"
|
||||
69
hosts/vms/matrix-ubuntu-vm/scripts/setup.sh
Executable file
69
hosts/vms/matrix-ubuntu-vm/scripts/setup.sh
Executable file
@@ -0,0 +1,69 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "=== Ubuntu VM Homelab Setup ==="
|
||||
echo "This script sets up Mastodon, Mattermost, and Matrix/Element"
|
||||
|
||||
# Check if running as root
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
echo "Please run as root (sudo ./setup.sh)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Update system
|
||||
echo "[1/8] Updating system..."
|
||||
apt-get update && apt-get upgrade -y
|
||||
|
||||
# Install dependencies
|
||||
echo "[2/8] Installing dependencies..."
|
||||
apt-get install -y \
|
||||
docker.io docker-compose-v2 \
|
||||
nginx \
|
||||
postgresql postgresql-contrib \
|
||||
curl wget git
|
||||
|
||||
# Start services
|
||||
echo "[3/8] Starting services..."
|
||||
systemctl enable --now docker
|
||||
systemctl enable --now postgresql
|
||||
systemctl enable --now nginx
|
||||
|
||||
# Setup PostgreSQL
|
||||
echo "[4/8] Setting up PostgreSQL..."
|
||||
sudo -u postgres psql -c "CREATE USER mmuser WITH PASSWORD 'REDACTED_PASSWORD';" 2>/dev/null || true
|
||||
sudo -u postgres psql -c "CREATE DATABASE mattermost OWNER mmuser;" 2>/dev/null || true
|
||||
sudo -u postgres psql -c "CREATE USER synapse WITH PASSWORD 'REDACTED_PASSWORD';" 2>/dev/null || true
|
||||
sudo -u postgres psql -c "CREATE DATABASE synapse OWNER synapse ENCODING 'UTF8' LC_COLLATE='C' LC_CTYPE='C' template=template0;" 2>/dev/null || true
|
||||
sudo -u postgres psql -c "CREATE USER mastodon WITH PASSWORD 'REDACTED_PASSWORD' CREATEDB;" 2>/dev/null || true
|
||||
sudo -u postgres psql -c "CREATE DATABASE mastodon_production OWNER mastodon;" 2>/dev/null || true
|
||||
|
||||
# Configure PostgreSQL for Docker access
|
||||
echo "[5/8] Configuring PostgreSQL..."
|
||||
echo "host all all 172.17.0.0/16 md5" >> /etc/postgresql/*/main/pg_hba.conf
|
||||
echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/*/main/pg_hba.conf
|
||||
sed -i "s/#listen_addresses = 'localhost'/listen_addresses = '*'/" /etc/postgresql/*/main/postgresql.conf
|
||||
systemctl restart postgresql
|
||||
|
||||
# Setup directories
|
||||
echo "[6/8] Creating directories..."
|
||||
mkdir -p /opt/mastodon /opt/mattermost /opt/synapse /opt/element/web
|
||||
|
||||
# Copy nginx configs
|
||||
echo "[7/8] Setting up Nginx..."
|
||||
cp nginx/*.conf /etc/nginx/sites-available/
|
||||
ln -sf /etc/nginx/sites-available/mastodon.conf /etc/nginx/sites-enabled/
|
||||
ln -sf /etc/nginx/sites-available/mattermost.conf /etc/nginx/sites-enabled/
|
||||
ln -sf /etc/nginx/sites-available/matrix.conf /etc/nginx/sites-enabled/
|
||||
nginx -t && systemctl reload nginx
|
||||
|
||||
echo "[8/8] Setup complete!"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo "1. Copy docker-compose files to /opt directories"
|
||||
echo "2. Configure environment files with actual secrets"
|
||||
echo "3. Run migrations and start services"
|
||||
echo ""
|
||||
echo "Ports:"
|
||||
echo " - Mastodon: 8082"
|
||||
echo " - Mattermost: 8081"
|
||||
echo " - Matrix/Element: 8080"
|
||||
96
hosts/vms/matrix-ubuntu-vm/scripts/update.sh
Executable file
96
hosts/vms/matrix-ubuntu-vm/scripts/update.sh
Executable file
@@ -0,0 +1,96 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Homelab Update Script
|
||||
# Updates Mastodon, Mattermost, Matrix Synapse, and system packages
|
||||
|
||||
echo "=== Homelab Update Script ==="
|
||||
echo "Started at: $(date)"
|
||||
echo ""
|
||||
|
||||
# Update Mastodon
|
||||
echo "[1/5] Updating Mastodon..."
|
||||
cd /opt/mastodon
|
||||
docker compose pull
|
||||
docker compose down
|
||||
docker compose run --rm web bundle exec rails db:migrate
|
||||
docker compose up -d
|
||||
echo "✅ Mastodon updated!"
|
||||
echo ""
|
||||
|
||||
# Update Mattermost
|
||||
echo "[2/5] Updating Mattermost..."
|
||||
cd /opt/mattermost
|
||||
docker compose pull
|
||||
docker compose down
|
||||
docker compose up -d
|
||||
echo "✅ Mattermost updated!"
|
||||
echo ""
|
||||
|
||||
# Update Matrix Synapse
|
||||
echo "[3/5] Updating Matrix Synapse..."
|
||||
cd /opt/synapse
|
||||
source venv/bin/activate
|
||||
|
||||
# Get current version
|
||||
CURRENT_VERSION=$(python -m synapse.app.homeserver --version 2>&1 | head -1)
|
||||
echo "Current version: $CURRENT_VERSION"
|
||||
|
||||
# Upgrade
|
||||
pip install --upgrade matrix-synapse
|
||||
|
||||
# Get new version
|
||||
NEW_VERSION=$(python -m synapse.app.homeserver --version 2>&1 | head -1)
|
||||
echo "New version: $NEW_VERSION"
|
||||
|
||||
# Restart both Synapse instances
|
||||
echo "Restarting Synapse instances..."
|
||||
pkill -f 'synapse.app.homeserver' || true
|
||||
sleep 2
|
||||
|
||||
# Start mx.vish.gg (primary)
|
||||
sudo -u synapse /opt/synapse/venv/bin/python -m synapse.app.homeserver \
|
||||
--config-path=/opt/synapse-mx/homeserver.yaml --daemonize
|
||||
echo " - mx.vish.gg started on port 8018"
|
||||
|
||||
# Start legacy vish
|
||||
sudo -u synapse /opt/synapse/venv/bin/python -m synapse.app.homeserver \
|
||||
--config-path=/opt/synapse/homeserver.yaml --daemonize
|
||||
echo " - vish (legacy) started on port 8008"
|
||||
|
||||
deactivate
|
||||
echo "✅ Matrix Synapse updated!"
|
||||
echo ""
|
||||
|
||||
# Update TURN server
|
||||
echo "[4/5] Updating TURN server (coturn)..."
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -y coturn 2>/dev/null && echo "✅ Coturn updated!" || echo "⚠️ Coturn update skipped"
|
||||
sudo systemctl restart coturn 2>/dev/null || true
|
||||
echo ""
|
||||
|
||||
# Update system packages
|
||||
echo "[5/5] Updating system packages..."
|
||||
sudo apt-get update
|
||||
sudo apt-get upgrade -y
|
||||
sudo apt-get autoremove -y
|
||||
echo "✅ System packages updated!"
|
||||
echo ""
|
||||
|
||||
# Verification
|
||||
echo "=== Verification ==="
|
||||
echo ""
|
||||
echo "Mastodon:"
|
||||
docker compose -f /opt/mastodon/docker-compose.yml ps --format "table {{.Name}}\t{{.Status}}" 2>/dev/null | head -5
|
||||
|
||||
echo ""
|
||||
echo "Mattermost:"
|
||||
docker ps --filter "name=mattermost" --format "table {{.Names}}\t{{.Status}}"
|
||||
|
||||
echo ""
|
||||
echo "Matrix Synapse:"
|
||||
curl -s http://localhost:8018/_matrix/federation/v1/version 2>/dev/null && echo " (mx.vish.gg)" || echo "❌ mx.vish.gg not responding"
|
||||
curl -s http://localhost:8008/_matrix/federation/v1/version 2>/dev/null && echo " (vish legacy)" || echo "❌ vish not responding"
|
||||
|
||||
echo ""
|
||||
echo "=== Update Complete ==="
|
||||
echo "Finished at: $(date)"
|
||||
16
hosts/vms/matrix-ubuntu-vm/systemd/synapse-mx.service
Normal file
16
hosts/vms/matrix-ubuntu-vm/systemd/synapse-mx.service
Normal file
@@ -0,0 +1,16 @@
|
||||
[Unit]
|
||||
Description=Synapse Matrix Homeserver (mx.vish.gg)
|
||||
After=network.target postgresql.service
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
User=synapse
|
||||
Group=synapse
|
||||
WorkingDirectory=/opt/synapse-mx
|
||||
ExecStart=/opt/synapse/venv/bin/python -m synapse.app.homeserver --config-path=/opt/synapse-mx/homeserver.yaml
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
16
hosts/vms/matrix-ubuntu-vm/systemd/synapse.service
Normal file
16
hosts/vms/matrix-ubuntu-vm/systemd/synapse.service
Normal file
@@ -0,0 +1,16 @@
|
||||
[Unit]
|
||||
Description=Synapse Matrix Homeserver
|
||||
After=network.target postgresql.service
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
User=synapse
|
||||
Group=synapse
|
||||
WorkingDirectory=/opt/synapse
|
||||
ExecStart=/opt/synapse/venv/bin/python -m synapse.app.homeserver --config-path=/opt/synapse/homeserver.yaml
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
400
hosts/vms/seattle/README-ollama.md
Normal file
400
hosts/vms/seattle/README-ollama.md
Normal file
@@ -0,0 +1,400 @@
|
||||
# Ollama on Seattle - Local LLM Inference Server
|
||||
|
||||
## Overview
|
||||
|
||||
| Setting | Value |
|
||||
|---------|-------|
|
||||
| **Host** | Seattle VM (Contabo VPS) |
|
||||
| **Port** | 11434 (Ollama API) |
|
||||
| **Image** | `ollama/ollama:latest` |
|
||||
| **API** | http://100.82.197.124:11434 (Tailscale) |
|
||||
| **Stack File** | `hosts/vms/seattle/ollama.yaml` |
|
||||
| **Data Volume** | `ollama-seattle-data` |
|
||||
|
||||
## Why Ollama on Seattle?
|
||||
|
||||
Ollama was deployed on seattle to provide:
|
||||
1. **CPU-Only Inference**: Ollama is optimized for CPU inference, unlike vLLM which requires GPU
|
||||
2. **Additional Capacity**: Supplements the main Ollama instance on Atlantis (192.168.0.200)
|
||||
3. **Geographic Distribution**: Runs on a Contabo VPS, providing inference capability outside the local network
|
||||
4. **Integration with Perplexica**: Can be added as an additional LLM provider for redundancy
|
||||
|
||||
## Specifications
|
||||
|
||||
### Hardware
|
||||
- **CPU**: 16 vCPU AMD EPYC Processor
|
||||
- **RAM**: 64GB
|
||||
- **Storage**: 300GB SSD
|
||||
- **Location**: Contabo Data Center
|
||||
- **Network**: Tailscale VPN (100.82.197.124)
|
||||
|
||||
### Resource Allocation
|
||||
```yaml
|
||||
limits:
|
||||
cpus: '12'
|
||||
memory: 32G
|
||||
reservations:
|
||||
cpus: '4'
|
||||
memory: 8G
|
||||
```
|
||||
|
||||
## Installed Models
|
||||
|
||||
### Qwen 2.5 1.5B Instruct
|
||||
- **Model ID**: `qwen2.5:1.5b`
|
||||
- **Size**: ~986 MB
|
||||
- **Context Window**: 32K tokens
|
||||
- **Use Case**: Fast, lightweight inference for search queries
|
||||
- **Performance**: Excellent on CPU, ~5-10 tokens/second
|
||||
|
||||
## Installation History
|
||||
|
||||
### February 16, 2026 - Initial Setup
|
||||
|
||||
**Problem**: Attempted to use vLLM for CPU inference
|
||||
- vLLM container crashed with device detection errors
|
||||
- vLLM is primarily designed for GPU inference
|
||||
- CPU mode is not well-supported in recent vLLM versions
|
||||
|
||||
**Solution**: Switched to Ollama
|
||||
- Ollama is specifically optimized for CPU inference
|
||||
- Provides better performance and reliability on CPU-only systems
|
||||
- Simpler configuration and management
|
||||
- Native support for multiple model formats
|
||||
|
||||
**Deployment Steps**:
|
||||
1. Removed failing vLLM container
|
||||
2. Created `ollama.yaml` docker-compose configuration
|
||||
3. Deployed Ollama container
|
||||
4. Pulled `qwen2.5:1.5b` model
|
||||
5. Tested API connectivity via Tailscale
|
||||
|
||||
## Configuration
|
||||
|
||||
### Docker Compose
|
||||
|
||||
See `hosts/vms/seattle/ollama.yaml`:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
ollama:
|
||||
image: ollama/ollama:latest
|
||||
container_name: ollama-seattle
|
||||
ports:
|
||||
- "11434:11434"
|
||||
environment:
|
||||
- OLLAMA_HOST=0.0.0.0:11434
|
||||
- OLLAMA_KEEP_ALIVE=24h
|
||||
- OLLAMA_NUM_PARALLEL=2
|
||||
- OLLAMA_MAX_LOADED_MODELS=2
|
||||
volumes:
|
||||
- ollama-data:/root/.ollama
|
||||
restart: unless-stopped
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
|
||||
- `OLLAMA_HOST`: Bind to all interfaces
|
||||
- `OLLAMA_KEEP_ALIVE`: Keep models loaded for 24 hours
|
||||
- `OLLAMA_NUM_PARALLEL`: Allow 2 parallel requests
|
||||
- `OLLAMA_MAX_LOADED_MODELS`: Cache up to 2 models in memory
|
||||
|
||||
## Usage
|
||||
|
||||
### API Endpoints
|
||||
|
||||
#### List Models
|
||||
```bash
|
||||
curl http://100.82.197.124:11434/api/tags
|
||||
```
|
||||
|
||||
#### Generate Completion
|
||||
```bash
|
||||
curl http://100.82.197.124:11434/api/generate -d '{
|
||||
"model": "qwen2.5:1.5b",
|
||||
"prompt": "Explain quantum computing in simple terms"
|
||||
}'
|
||||
```
|
||||
|
||||
#### Chat Completion
|
||||
```bash
|
||||
curl http://100.82.197.124:11434/api/chat -d '{
|
||||
"model": "qwen2.5:1.5b",
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello!"}
|
||||
]
|
||||
}'
|
||||
```
|
||||
|
||||
### Model Management
|
||||
|
||||
#### Pull a New Model
|
||||
```bash
|
||||
ssh seattle-tailscale "docker exec ollama-seattle ollama pull <model-name>"
|
||||
|
||||
# Examples:
|
||||
# docker exec ollama-seattle ollama pull qwen2.5:3b
|
||||
# docker exec ollama-seattle ollama pull llama3.2:3b
|
||||
# docker exec ollama-seattle ollama pull mistral:7b
|
||||
```
|
||||
|
||||
#### List Downloaded Models
|
||||
```bash
|
||||
ssh seattle-tailscale "docker exec ollama-seattle ollama list"
|
||||
```
|
||||
|
||||
#### Remove a Model
|
||||
```bash
|
||||
ssh seattle-tailscale "docker exec ollama-seattle ollama rm <model-name>"
|
||||
```
|
||||
|
||||
## Integration with Perplexica
|
||||
|
||||
To add this Ollama instance as an LLM provider in Perplexica:
|
||||
|
||||
1. Navigate to **http://192.168.0.210:4785/settings**
|
||||
2. Click **"Model Providers"**
|
||||
3. Click **"Add Provider"**
|
||||
4. Configure as follows:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "Ollama Seattle",
|
||||
"type": "ollama",
|
||||
"baseURL": "http://100.82.197.124:11434",
|
||||
"apiKey": ""
|
||||
}
|
||||
```
|
||||
|
||||
5. Click **"Save"**
|
||||
6. Select `qwen2.5:1.5b` from the model dropdown when searching
|
||||
|
||||
### Benefits of Multiple Ollama Instances
|
||||
|
||||
- **Load Distribution**: Distribute inference load across multiple servers
|
||||
- **Redundancy**: If one instance is down, use the other
|
||||
- **Model Variety**: Different instances can host different models
|
||||
- **Network Optimization**: Use closest/fastest instance
|
||||
|
||||
## Performance
|
||||
|
||||
### Expected Performance (CPU-Only)
|
||||
|
||||
| Model | Size | Tokens/Second | Memory Usage |
|
||||
|-------|------|---------------|--------------|
|
||||
| qwen2.5:1.5b | 986 MB | 8-12 | ~2-3 GB |
|
||||
| qwen2.5:3b | ~2 GB | 5-8 | ~4-5 GB |
|
||||
| llama3.2:3b | ~2 GB | 4-7 | ~4-5 GB |
|
||||
| mistral:7b | ~4 GB | 2-4 | ~8-10 GB |
|
||||
|
||||
### Optimization Tips
|
||||
|
||||
1. **Use Smaller Models**: 1.5B and 3B models work best on CPU
|
||||
2. **Limit Parallel Requests**: Set `OLLAMA_NUM_PARALLEL=2` to avoid overload
|
||||
3. **Keep Models Loaded**: Long `OLLAMA_KEEP_ALIVE` prevents reload delays
|
||||
4. **Monitor Memory**: Watch RAM usage with `docker stats ollama-seattle`
|
||||
|
||||
## Monitoring
|
||||
|
||||
### Container Status
|
||||
```bash
|
||||
# Check if running
|
||||
ssh seattle-tailscale "docker ps | grep ollama"
|
||||
|
||||
# View logs
|
||||
ssh seattle-tailscale "docker logs -f ollama-seattle"
|
||||
|
||||
# Check resource usage
|
||||
ssh seattle-tailscale "docker stats ollama-seattle"
|
||||
```
|
||||
|
||||
### API Health Check
|
||||
```bash
|
||||
# Test connectivity
|
||||
curl -m 5 http://100.82.197.124:11434/api/tags
|
||||
|
||||
# Test inference
|
||||
curl http://100.82.197.124:11434/api/generate -d '{
|
||||
"model": "qwen2.5:1.5b",
|
||||
"prompt": "test",
|
||||
"stream": false
|
||||
}'
|
||||
```
|
||||
|
||||
### Performance Metrics
|
||||
```bash
|
||||
# Check response time
|
||||
time curl -s http://100.82.197.124:11434/api/tags > /dev/null
|
||||
|
||||
# Monitor CPU usage
|
||||
ssh seattle-tailscale "top -b -n 1 | grep ollama"
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Container Won't Start
|
||||
|
||||
```bash
|
||||
# Check logs
|
||||
ssh seattle-tailscale "docker logs ollama-seattle"
|
||||
|
||||
# Common issues:
|
||||
# - Port 11434 already in use
|
||||
# - Insufficient memory
|
||||
# - Volume mount permissions
|
||||
```
|
||||
|
||||
### Slow Inference
|
||||
|
||||
**Causes**:
|
||||
- Model too large for available CPU
|
||||
- Too many parallel requests
|
||||
- Insufficient RAM
|
||||
|
||||
**Solutions**:
|
||||
```bash
|
||||
# Use a smaller model
|
||||
docker exec ollama-seattle ollama pull qwen2.5:1.5b
|
||||
|
||||
# Reduce parallel requests
|
||||
# Edit ollama.yaml: OLLAMA_NUM_PARALLEL=1
|
||||
|
||||
# Increase CPU allocation
|
||||
# Edit ollama.yaml: cpus: '16'
|
||||
```
|
||||
|
||||
### Connection Timeout
|
||||
|
||||
**Problem**: Unable to reach Ollama from other machines
|
||||
|
||||
**Solutions**:
|
||||
1. Verify Tailscale connection:
|
||||
```bash
|
||||
ping 100.82.197.124
|
||||
tailscale status | grep seattle
|
||||
```
|
||||
|
||||
2. Check firewall:
|
||||
```bash
|
||||
ssh seattle-tailscale "ss -tlnp | grep 11434"
|
||||
```
|
||||
|
||||
3. Verify container is listening:
|
||||
```bash
|
||||
ssh seattle-tailscale "docker exec ollama-seattle netstat -tlnp"
|
||||
```
|
||||
|
||||
### Model Download Fails
|
||||
|
||||
```bash
|
||||
# Check available disk space
|
||||
ssh seattle-tailscale "df -h"
|
||||
|
||||
# Check internet connectivity
|
||||
ssh seattle-tailscale "curl -I https://ollama.com"
|
||||
|
||||
# Try manual download
|
||||
ssh seattle-tailscale "docker exec -it ollama-seattle ollama pull <model>"
|
||||
```
|
||||
|
||||
## Maintenance
|
||||
|
||||
### Updates
|
||||
|
||||
```bash
|
||||
# Pull latest Ollama image
|
||||
ssh seattle-tailscale "docker pull ollama/ollama:latest"
|
||||
|
||||
# Recreate container
|
||||
ssh seattle-tailscale "cd /opt/ollama && docker compose up -d --force-recreate"
|
||||
```
|
||||
|
||||
### Backup
|
||||
|
||||
```bash
|
||||
# Backup models and configuration
|
||||
ssh seattle-tailscale "docker run --rm -v ollama-seattle-data:/data -v $(pwd):/backup alpine tar czf /backup/ollama-backup.tar.gz /data"
|
||||
|
||||
# Restore
|
||||
ssh seattle-tailscale "docker run --rm -v ollama-seattle-data:/data -v $(pwd):/backup alpine tar xzf /backup/ollama-backup.tar.gz -C /"
|
||||
```
|
||||
|
||||
### Cleanup
|
||||
|
||||
```bash
|
||||
# Remove unused models
|
||||
ssh seattle-tailscale "docker exec ollama-seattle ollama list"
|
||||
ssh seattle-tailscale "docker exec ollama-seattle ollama rm <unused-model>"
|
||||
|
||||
# Clean up Docker
|
||||
ssh seattle-tailscale "docker system prune -f"
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Network Access
|
||||
|
||||
- Ollama is exposed on port 11434
|
||||
- **Only accessible via Tailscale** (100.82.197.124)
|
||||
- Not exposed to public internet
|
||||
- Consider adding authentication if exposing publicly
|
||||
|
||||
### API Security
|
||||
|
||||
Ollama doesn't have built-in authentication. For production use:
|
||||
|
||||
1. **Use a reverse proxy** with authentication (Nginx, Caddy)
|
||||
2. **Restrict access** via firewall rules
|
||||
3. **Use Tailscale ACLs** to limit access
|
||||
4. **Monitor usage** for abuse
|
||||
|
||||
## Cost Analysis
|
||||
|
||||
### Contabo VPS Costs
|
||||
- **Monthly Cost**: ~$25-35 USD
|
||||
- **Inference Cost**: $0 (self-hosted)
|
||||
- **vs Cloud APIs**: OpenAI costs ~$0.15-0.60 per 1M tokens
|
||||
|
||||
### Break-even Analysis
|
||||
- **Light usage** (<1M tokens/month): Cloud APIs cheaper
|
||||
- **Medium usage** (1-10M tokens/month): Self-hosted breaks even
|
||||
- **Heavy usage** (>10M tokens/month): Self-hosted much cheaper
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
### Potential Improvements
|
||||
|
||||
1. **GPU Support**: Migrate to GPU-enabled VPS for faster inference
|
||||
2. **Load Balancer**: Set up Nginx to load balance between Ollama instances
|
||||
3. **Auto-scaling**: Deploy additional instances based on load
|
||||
4. **Model Caching**: Pre-warm multiple models for faster switching
|
||||
5. **Monitoring Dashboard**: Grafana + Prometheus for metrics
|
||||
6. **API Gateway**: Add rate limiting and authentication
|
||||
|
||||
### Model Recommendations
|
||||
|
||||
For different use cases on CPU:
|
||||
|
||||
- **Fast responses**: qwen2.5:1.5b, phi3:3.8b
|
||||
- **Better quality**: qwen2.5:3b, llama3.2:3b
|
||||
- **Code tasks**: qwen2.5-coder:1.5b, codegemma:2b
|
||||
- **Instruction following**: mistral:7b (slower but better)
|
||||
|
||||
## Related Services
|
||||
|
||||
- **Atlantis Ollama** (`192.168.0.200:11434`) - Main Ollama instance
|
||||
- **Perplexica** (`192.168.0.210:4785`) - AI search engine client
|
||||
- **LM Studio** (`100.98.93.15:1234`) - Alternative LLM server
|
||||
|
||||
## References
|
||||
|
||||
- [Ollama Documentation](https://github.com/ollama/ollama)
|
||||
- [Available Models](https://ollama.com/library)
|
||||
- [Ollama API Reference](https://github.com/ollama/ollama/blob/main/docs/api.md)
|
||||
- [Qwen 2.5 Model Card](https://ollama.com/library/qwen2.5)
|
||||
|
||||
---
|
||||
|
||||
**Status:** ✅ Fully operational
|
||||
**Last Updated:** February 16, 2026
|
||||
**Maintained By:** Docker Compose (manual)
|
||||
123
hosts/vms/seattle/README.md
Normal file
123
hosts/vms/seattle/README.md
Normal file
@@ -0,0 +1,123 @@
|
||||
# Seattle VM (Contabo VPS)
|
||||
|
||||
## 🖥️ Machine Specifications
|
||||
|
||||
| Component | Details |
|
||||
|-----------|---------|
|
||||
| **Provider** | Contabo VPS |
|
||||
| **Hostname** | vmi2076105 (seattle-vm) |
|
||||
| **OS** | Ubuntu 24.04.4 LTS |
|
||||
| **Kernel** | Linux 6.8.0-90-generic |
|
||||
| **Architecture** | x86_64 |
|
||||
| **CPU** | 16 vCPU AMD EPYC Processor |
|
||||
| **Memory** | 64GB RAM |
|
||||
| **Storage** | 300GB SSD (24% used) |
|
||||
| **Virtualization** | KVM |
|
||||
|
||||
## 🌐 Network Configuration
|
||||
|
||||
| Interface | IP Address | Purpose |
|
||||
|-----------|------------|---------|
|
||||
| **eth0** | YOUR_WAN_IP/21 | Public Internet |
|
||||
| **tailscale0** | 100.82.197.124/32 | Tailscale VPN |
|
||||
| **docker0** | 172.17.0.1/16 | Docker default bridge |
|
||||
| **Custom bridges** | 172.18-20.0.1/16 | Service-specific networks |
|
||||
|
||||
## 🚀 Running Services
|
||||
|
||||
### Web Services (Docker)
|
||||
- **[Wallabag](./wallabag/)** - Read-later service at `wb.vish.gg`
|
||||
- **[Obsidian](./obsidian/)** - Note-taking web interface at `obs.vish.gg`
|
||||
- **[MinIO](./stoatchat/)** - Object storage for StoatChat at ports 14009-14010
|
||||
|
||||
### AI/ML Services
|
||||
- **[Ollama](./README-ollama.md)** - Local LLM inference server
|
||||
- API Port: 11434
|
||||
- Tailscale: `100.82.197.124:11434`
|
||||
- Models: `qwen2.5:1.5b`
|
||||
- Purpose: CPU-based inference for Perplexica integration
|
||||
|
||||
### Chat Platform
|
||||
- **[StoatChat (Revolt)](./stoatchat/)** - Self-hosted chat platform
|
||||
- Multiple microservices: Delta, Bonfire, Autumn, January, Gifbox
|
||||
- Ports: 14702-14706
|
||||
|
||||
### Gaming Services
|
||||
- **[PufferPanel](./pufferpanel/)** - Game server management panel
|
||||
- Web UI: Port 8080
|
||||
- SFTP: Port 5657
|
||||
- **[Garry's Mod PropHunt](./gmod-prophunt/)** - Game server
|
||||
- Game Port: 27015
|
||||
- RCON: 39903
|
||||
|
||||
### System Services
|
||||
- **Nginx** - Reverse proxy (ports 80, 443)
|
||||
- **Tailscale** - VPN mesh networking
|
||||
- **SSH** - Remote access (ports 22, 2222)
|
||||
- **MariaDB** - Database server (port 3306)
|
||||
- **Redis** - Cache server (port 6379)
|
||||
- **Postfix** - Mail server (port 25)
|
||||
|
||||
## 📁 Service Directories
|
||||
|
||||
```
|
||||
/opt/
|
||||
├── wallabag/ # Wallabag installation
|
||||
├── obsidian/ # Obsidian web interface
|
||||
├── gmod-prophunt/ # Garry's Mod server files
|
||||
└── pufferpanel/ # Game server management
|
||||
|
||||
/home/gmod/ # Garry's Mod user directory
|
||||
/etc/nginx/sites-enabled/ # Nginx virtual hosts
|
||||
```
|
||||
|
||||
## 🔧 Management
|
||||
|
||||
### Docker Services
|
||||
```bash
|
||||
# View running containers
|
||||
docker ps
|
||||
|
||||
# Restart a service
|
||||
docker-compose -f /opt/wallabag/docker-compose.yml restart
|
||||
|
||||
# View logs
|
||||
docker logs wallabag
|
||||
```
|
||||
|
||||
### System Services
|
||||
```bash
|
||||
# Check service status
|
||||
systemctl status nginx tailscaled
|
||||
|
||||
# Restart nginx
|
||||
sudo systemctl restart nginx
|
||||
|
||||
# View logs
|
||||
journalctl -u nginx -f
|
||||
```
|
||||
|
||||
### Game Server Management
|
||||
- **PufferPanel Web UI**: Access via configured domain
|
||||
- **Direct SRCDS**: Located in `/home/gmod/gmod-prophunt-server/`
|
||||
|
||||
## 🔒 Security Features
|
||||
|
||||
- **Tailscale VPN** for secure remote access
|
||||
- **Nginx reverse proxy** with SSL termination
|
||||
- **Firewall** configured for specific service ports
|
||||
- **SSH** on both standard (22) and alternate (2222) ports
|
||||
- **Local-only binding** for sensitive services (MySQL, Redis)
|
||||
|
||||
## 📊 Monitoring
|
||||
|
||||
- **System resources**: `htop`, `df -h`, `free -h`
|
||||
- **Network**: `ss -tlnp`, `netstat -tulpn`
|
||||
- **Docker**: `docker stats`, `docker logs`
|
||||
- **Services**: `systemctl status`
|
||||
|
||||
## 🔗 Related Documentation
|
||||
|
||||
- [StoatChat Deployment Guide](./stoatchat/DEPLOYMENT_GUIDE.md)
|
||||
- [Service Management Guide](./stoatchat/SERVICE_MANAGEMENT.md)
|
||||
- [Troubleshooting Guide](./stoatchat/TROUBLESHOOTING.md)
|
||||
43
hosts/vms/seattle/bookstack/docker-compose.yml
Normal file
43
hosts/vms/seattle/bookstack/docker-compose.yml
Normal file
@@ -0,0 +1,43 @@
|
||||
services:
|
||||
bookstack:
|
||||
image: lscr.io/linuxserver/bookstack:latest
|
||||
container_name: bookstack
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=America/Los_Angeles
|
||||
- APP_URL=http://100.82.197.124:6875
|
||||
- DB_HOST=bookstack-db
|
||||
- DB_PORT=3306
|
||||
- DB_USER=bookstack
|
||||
- DB_PASS="REDACTED_PASSWORD"
|
||||
- DB_DATABASE=bookstack
|
||||
- APP_KEY=base64:OyXRjle+VXdiPS2BBADYCrHSS/rCAo/VE9m2fW97YW8=
|
||||
volumes:
|
||||
- /opt/bookstack/data:/config
|
||||
ports:
|
||||
- "100.82.197.124:6875:80"
|
||||
depends_on:
|
||||
- bookstack-db
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:80/status"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
|
||||
bookstack-db:
|
||||
image: lscr.io/linuxserver/mariadb:latest
|
||||
container_name: bookstack-db
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=America/Los_Angeles
|
||||
- MYSQL_ROOT_PASSWORD="REDACTED_PASSWORD"
|
||||
- MYSQL_DATABASE=bookstack
|
||||
- MYSQL_USER=bookstack
|
||||
- MYSQL_PASSWORD="REDACTED_PASSWORD"
|
||||
volumes:
|
||||
- /opt/bookstack/db:/config
|
||||
44
hosts/vms/seattle/ddns-updater.yaml
Normal file
44
hosts/vms/seattle/ddns-updater.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
# Dynamic DNS Updater — Seattle VM (Contabo VPS, YOUR_WAN_IP)
|
||||
# Keeps Cloudflare A records current with the VPS public IP.
|
||||
# Three services: proxied, stoatchat unproxied, and DERP unproxied.
|
||||
services:
|
||||
# vish.gg services behind Cloudflare proxy (HTTP/HTTPS via CF edge)
|
||||
ddns-seattle-proxied:
|
||||
image: favonia/cloudflare-ddns:latest
|
||||
network_mode: host
|
||||
restart: unless-stopped
|
||||
read_only: true
|
||||
cap_drop: [all]
|
||||
security_opt: [no-new-privileges:true]
|
||||
environment:
|
||||
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN}
|
||||
# General Seattle VM services (CF proxy on)
|
||||
- DOMAINS=nx.vish.gg,obs.vish.gg,pp.vish.gg,wb.vish.gg
|
||||
- PROXIED=true
|
||||
|
||||
# StoatChat WebRTC subdomains — must be unproxied (direct IP for WebSockets / LiveKit UDP)
|
||||
ddns-seattle-stoatchat:
|
||||
image: favonia/cloudflare-ddns:latest
|
||||
network_mode: host
|
||||
restart: unless-stopped
|
||||
read_only: true
|
||||
cap_drop: [all]
|
||||
security_opt: [no-new-privileges:true]
|
||||
environment:
|
||||
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN}
|
||||
# st.vish.gg + all subdomains need direct IP for real-time connections
|
||||
- DOMAINS=st.vish.gg,api.st.vish.gg,events.st.vish.gg,files.st.vish.gg,proxy.st.vish.gg,voice.st.vish.gg,livekit.st.vish.gg
|
||||
- PROXIED=false
|
||||
|
||||
# DERP relay — must be unproxied (DERP protocol requires direct TLS, CF proxy breaks it)
|
||||
ddns-seattle-derp:
|
||||
image: favonia/cloudflare-ddns:latest
|
||||
network_mode: host
|
||||
restart: unless-stopped
|
||||
read_only: true
|
||||
cap_drop: [all]
|
||||
security_opt: [no-new-privileges:true]
|
||||
environment:
|
||||
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN}
|
||||
- DOMAINS=derp-sea.vish.gg
|
||||
- PROXIED=false
|
||||
47
hosts/vms/seattle/derper.yaml
Normal file
47
hosts/vms/seattle/derper.yaml
Normal file
@@ -0,0 +1,47 @@
|
||||
# Standalone DERP Relay Server — Seattle VPS
|
||||
# =============================================================================
|
||||
# Tailscale/Headscale DERP relay for external fallback connectivity.
|
||||
# Serves as region 901 "Seattle VPS" in the headscale derpmap.
|
||||
#
|
||||
# Why standalone (not behind nginx):
|
||||
# The DERP protocol does an HTTP→binary protocol switch inside TLS.
|
||||
# It is incompatible with HTTP reverse proxies. Must handle TLS directly.
|
||||
#
|
||||
# Port layout:
|
||||
# 8444/tcp — DERP relay (direct TLS, NOT proxied through nginx)
|
||||
# 3478/udp — STUN (NAT traversal hints)
|
||||
#
|
||||
# TLS cert:
|
||||
# Issued by Let's Encrypt via certbot DNS challenge (Cloudflare).
|
||||
# Cert path: /etc/letsencrypt/live/derp-sea.vish.gg/
|
||||
# Renewal hook at /etc/letsencrypt/renewal-hooks/deploy/derp-sea-symlinks.sh
|
||||
# auto-restarts this container after renewal.
|
||||
#
|
||||
# UFW rules required (one-time, already applied):
|
||||
# ufw allow 8444/tcp # DERP TLS
|
||||
# ufw allow 3478/udp # STUN
|
||||
#
|
||||
# DNS: derp-sea.vish.gg → YOUR_WAN_IP (managed by ddns-updater.yaml, unproxied)
|
||||
# =============================================================================
|
||||
|
||||
services:
|
||||
derper:
|
||||
image: fredliang/derper:latest
|
||||
container_name: derper
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "8444:8444" # DERP TLS — direct, not behind nginx
|
||||
- "3478:3478/udp" # STUN
|
||||
volumes:
|
||||
# Full letsencrypt mount required — live/ contains symlinks into archive/
|
||||
# mounting only live/ breaks symlink resolution inside the container
|
||||
- /etc/letsencrypt:/etc/letsencrypt:ro
|
||||
environment:
|
||||
- DERP_DOMAIN=derp-sea.vish.gg
|
||||
- DERP_CERT_MODE=manual
|
||||
- DERP_CERT_DIR=/etc/letsencrypt/live/derp-sea.vish.gg
|
||||
- DERP_ADDR=:8444
|
||||
- DERP_STUN=true
|
||||
- DERP_STUN_PORT=3478
|
||||
- DERP_HTTP_PORT=-1 # disable plain HTTP, TLS only
|
||||
- DERP_VERIFY_CLIENTS=false # allow any node (headscale manages auth)
|
||||
28
hosts/vms/seattle/diun.yaml
Normal file
28
hosts/vms/seattle/diun.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
# Diun — Docker Image Update Notifier
|
||||
#
|
||||
# Watches all running containers on this host and sends ntfy
|
||||
# notifications when upstream images update their digest.
|
||||
# Schedule: Mondays 09:00 (weekly cadence).
|
||||
#
|
||||
# ntfy topic: https://ntfy.vish.gg/diun
|
||||
|
||||
services:
|
||||
diun:
|
||||
image: crazymax/diun:latest
|
||||
container_name: diun
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- diun-data:/data
|
||||
environment:
|
||||
LOG_LEVEL: info
|
||||
DIUN_WATCH_WORKERS: "20"
|
||||
DIUN_WATCH_SCHEDULE: "0 9 * * 1"
|
||||
DIUN_WATCH_JITTER: 30s
|
||||
DIUN_PROVIDERS_DOCKER: "true"
|
||||
DIUN_PROVIDERS_DOCKER_WATCHBYDEFAULT: "true"
|
||||
DIUN_NOTIF_NTFY_ENDPOINT: "https://ntfy.vish.gg"
|
||||
DIUN_NOTIF_NTFY_TOPIC: "diun"
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
diun-data:
|
||||
15
hosts/vms/seattle/dozzle-agent.yaml
Normal file
15
hosts/vms/seattle/dozzle-agent.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
services:
|
||||
dozzle-agent:
|
||||
image: amir20/dozzle:latest
|
||||
container_name: dozzle-agent
|
||||
command: agent
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
ports:
|
||||
- "7007:7007"
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "/dozzle", "healthcheck"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user