Sanitized mirror from private repository - 2026-03-27 12:16:32 UTC
Some checks failed
Documentation / Build Docusaurus (push) Failing after 17m30s
Documentation / Deploy to GitHub Pages (push) Has been skipped

This commit is contained in:
Gitea Mirror Bot
2026-03-27 12:16:32 +00:00
commit 6583f29981
1279 changed files with 331321 additions and 0 deletions

View File

@@ -0,0 +1,61 @@
#!/bin/bash
# Add remaining apps to Homarr board sections
DB_PATH="/volume2/metadata/docker/homarr/appdata/db/db.sqlite"
BOARD_ID="edpkzkkcfz1b8hkwzmevijoc"
LAYOUT_ID="q0a6laa1w1x482hin8cwz597"
ATLANTIS_ID="ku6606knoby4v7wpitg7iup9"
CALYPSO_ID="ulbtudx0jm40cg48yqegce92"
HOMELAB_ID="dy2bps9d4c70n9eqhxcfb6hh"
NUC_ID="dnkfdqkvryetukyag6q6k7ae"
EXTERNAL_ID="9lyej8u8anej7rfstwoa3oc8"
NETWORK_ID="3ma2sicgq2axcwn7uw2gva9v"
echo "=== Adding remaining apps ==="
# Get missing apps
sqlite3 "$DB_PATH" "SELECT a.id, a.name, a.href FROM app a WHERE a.id NOT IN (SELECT json_extract(options, '\$.json.appId') FROM item WHERE kind='app') ORDER BY a.name;" | while IFS='|' read -r app_id name href; do
[ -z "$app_id" ] && continue
# Determine section
if echo "$href" | grep -q "atlantis.vish.local\|vishinator"; then
SECTION="$ATLANTIS_ID"
SNAME="Atlantis"
elif echo "$href" | grep -q "calypso.vish.local\|git.vish.gg"; then
SECTION="$CALYPSO_ID"
SNAME="Calypso"
elif echo "$href" | grep -q "homelab.vish.local"; then
SECTION="$HOMELAB_ID"
SNAME="Homelab"
elif echo "$href" | grep -q "concordnuc.vish.local"; then
SECTION="$NUC_ID"
SNAME="NUC"
elif echo "$href" | grep -q "192.168.8.1\|192.168.29.1\|goodcloud"; then
SECTION="$NETWORK_ID"
SNAME="Network"
else
SECTION="$EXTERNAL_ID"
SNAME="External"
fi
# Generate item ID
ITEM_ID=$(cat /dev/urandom | tr -dc 'a-z0-9' | head -c24)
# Insert item
sqlite3 "$DB_PATH" "INSERT INTO item (id, board_id, kind, options, advanced_options) VALUES ('$ITEM_ID', '$BOARD_ID', 'app', '{\"json\":{\"appId\":\"$app_id\"}}', '{\"json\":{\"title\":null,\"REDACTED_APP_PASSWORD\":[],\"borderColor\":\"\"}}');"
# Insert item_layout with position 0,0 (will stack, but that's ok)
sqlite3 "$DB_PATH" "INSERT INTO item_layout (item_id, section_id, layout_id, x_offset, y_offset, width, height) VALUES ('$ITEM_ID', '$SECTION', '$LAYOUT_ID', 0, 0, 1, 1);"
echo " $name -> $SNAME"
done
echo ""
echo "=== Final counts ==="
sqlite3 "$DB_PATH" "SELECT s.name, COUNT(il.item_id) FROM section s LEFT JOIN item_layout il ON s.id = il.section_id WHERE s.name != '' GROUP BY s.id ORDER BY s.name;"
echo ""
echo "=== Restarting Homarr ==="
/var/packages/REDACTED_APP_PASSWORD/target/usr/bin/docker restart homarr
echo "Done!"

View File

@@ -0,0 +1,213 @@
#!/usr/bin/env python3
"""
Script to add basic disaster recovery comments to Docker Compose files
that don't already have comprehensive documentation.
"""
import os
import re
from pathlib import Path
def has_disaster_recovery_comments(file_path):
"""Check if file already has disaster recovery comments."""
try:
with open(file_path, 'r') as f:
content = f.read()
return 'DISASTER RECOVERY' in content or 'SERVICE OVERVIEW' in content
except:
return False
def get_service_info(file_path):
"""Extract service information from Docker Compose file."""
try:
with open(file_path, 'r') as f:
content = f.read()
# Extract service name and image
service_match = re.search(r'^\s*([a-zA-Z0-9_-]+):\s*$', content, re.MULTILINE)
image_match = re.search(r'image:\s*([^\s\n]+)', content)
container_match = re.search(r'container_name:\s*([^\s\n]+)', content)
service_name = service_match.group(1) if service_match else 'unknown'
image_name = image_match.group(1) if image_match else 'unknown'
container_name = container_match.group(1) if container_match else service_name
# Skip if it's not a service definition
if service_name in ['version', 'services', 'networks', 'volumes']:
return None, None, None
return service_name, image_name, container_name
except Exception as e:
print(f"Error parsing {file_path}: {e}")
return None, None, None
def generate_disaster_recovery_header(service_name, image_name, container_name, file_path):
"""Generate disaster recovery header for a service."""
# Determine service category and priority
service_lower = service_name.lower()
image_lower = image_name.lower()
if any(x in service_lower or x in image_lower for x in ['vaultwarden', 'bitwarden', 'password']):
priority = 'MAXIMUM CRITICAL'
rto = '15 minutes'
rpo = '1 hour'
elif any(x in service_lower or x in image_lower for x in ['plex', 'jellyfin', 'media']):
priority = 'HIGH'
rto = '30 minutes'
rpo = '24 hours'
elif any(x in service_lower or x in image_lower for x in ['grafana', 'prometheus', 'monitoring', 'uptime']):
priority = 'HIGH'
rto = '30 minutes'
rpo = '4 hours'
elif any(x in service_lower or x in image_lower for x in ['pihole', 'dns', 'adguard']):
priority = 'HIGH'
rto = '15 minutes'
rpo = '24 hours'
elif any(x in service_lower or x in image_lower for x in ['nginx', 'proxy', 'traefik']):
priority = 'HIGH'
rto = '20 minutes'
rpo = '24 hours'
elif any(x in service_lower or x in image_lower for x in ['database', 'postgres', 'mysql', 'mariadb', 'db']):
priority = 'CRITICAL'
rto = '20 minutes'
rpo = '1 hour'
else:
priority = 'MEDIUM'
rto = '1 hour'
rpo = '24 hours'
# Get relative path for documentation
rel_path = str(file_path).replace('/workspace/project/homelab/', '')
header = f"""# =============================================================================
# {service_name.upper().replace('-', ' ').replace('_', ' ')} - DOCKER SERVICE
# =============================================================================
#
# SERVICE OVERVIEW:
# - Container: {container_name}
# - Image: {image_name}
# - Configuration: {rel_path}
#
# DISASTER RECOVERY PRIORITY: {priority}
# - Recovery Time Objective (RTO): {rto}
# - Recovery Point Objective (RPO): {rpo}
#
# BACKUP REQUIREMENTS:
# - Configuration: Docker volumes and bind mounts
# - Data: Persistent volumes (if any)
# - Frequency: Daily for critical services, weekly for others
#
# DEPENDENCIES:
# - Docker daemon running
# - Network connectivity
# - Storage volumes accessible
# - Required environment variables set
#
# RECOVERY PROCEDURE:
# 1. Ensure dependencies are met
# 2. Restore configuration and data from backups
# 3. Deploy using: docker-compose -f {os.path.basename(file_path)} up -d
# 4. Verify service functionality
# 5. Update monitoring and documentation
#
# =============================================================================
"""
return header
def add_comments_to_file(file_path):
"""Add disaster recovery comments to a Docker Compose file."""
if has_disaster_recovery_comments(file_path):
return False
try:
service_name, image_name, container_name = get_service_info(file_path)
if not service_name:
return False
with open(file_path, 'r') as f:
original_content = f.read()
# Generate header
header = generate_disaster_recovery_header(service_name, image_name, container_name, file_path)
# Add header to file
new_content = header + original_content
# Add footer with basic recovery commands
footer = f"""
# =============================================================================
# BASIC DISASTER RECOVERY COMMANDS
# =============================================================================
#
# BACKUP:
# docker-compose -f {os.path.basename(file_path)} down
# tar -czf backup-{service_name}-$(date +%Y%m%d).tar.gz [volume-paths]
#
# RESTORE:
# tar -xzf backup-{service_name}-[date].tar.gz
# docker-compose -f {os.path.basename(file_path)} up -d
#
# VERIFY:
# docker-compose -f {os.path.basename(file_path)} ps
# docker logs {container_name}
#
# =============================================================================
"""
new_content += footer
# Write back to file
with open(file_path, 'w') as f:
f.write(new_content)
return True
except Exception as e:
print(f"Error processing {file_path}: {e}")
return False
def main():
"""Main function to process all Docker Compose files."""
homelab_root = Path('/workspace/project/homelab')
# Find all YAML files
yaml_files = []
for pattern in ['**/*.yml', '**/*.yaml']:
yaml_files.extend(homelab_root.glob(pattern))
# Filter for Docker Compose files and limit to reasonable number
compose_files = []
for file_path in yaml_files[:50]: # Limit to first 50 files
try:
with open(file_path, 'r') as f:
content = f.read()
# Check if it's a Docker Compose file
if any(keyword in content for keyword in ['version:', 'services:', 'image:']):
compose_files.append(file_path)
except:
continue
print(f"Processing {len(compose_files)} Docker Compose files...")
# Process files
processed = 0
skipped = 0
for file_path in compose_files:
if add_comments_to_file(file_path):
processed += 1
print(f"✓ Added DR comments to {file_path}")
else:
skipped += 1
print(f"\nProcessing complete:")
print(f"- Processed: {processed} files")
print(f"- Skipped: {skipped} files")
if __name__ == '__main__':
main()

121
scripts/backup-access-manager.sh Executable file
View File

@@ -0,0 +1,121 @@
#!/bin/bash
# Backup SSH Access Manager
# Manages emergency SSH access when Tailscale is down
BACKUP_PORT=2222
CURRENT_IP=$(curl -4 -s ifconfig.me 2>/dev/null)
show_status() {
echo "=== Backup SSH Access Status ==="
echo
echo "🔧 SSH Configuration:"
echo " - Primary SSH port: 22 (Tailscale + direct IP)"
echo " - Backup SSH port: $BACKUP_PORT (restricted IP access)"
echo
echo "🌐 Current External IP: $CURRENT_IP"
echo
echo "🛡️ Firewall Rules for Port $BACKUP_PORT:"
ufw status numbered | grep $BACKUP_PORT
echo
echo "🔍 SSH Service Status:"
systemctl is-active ssh && echo " ✅ SSH service is running"
echo " Listening ports:"
ss -tlnp | grep sshd | grep -E ":22|:$BACKUP_PORT"
echo
}
add_ip() {
local ip=$1
if [[ -z "$ip" ]]; then
echo "Usage: $0 add-ip <IP_ADDRESS>"
exit 1
fi
echo "Adding IP $ip to backup SSH access..."
ufw allow from $ip to any port $BACKUP_PORT comment "Emergency SSH backup - $ip"
echo "✅ Added $ip to backup SSH access"
}
remove_ip() {
local ip=$1
if [[ -z "$ip" ]]; then
echo "Usage: $0 remove-ip <IP_ADDRESS>"
exit 1
fi
echo "Removing IP $ip from backup SSH access..."
# Find and delete the rule
rule_num=$(ufw status numbered | grep "$ip.*$BACKUP_PORT" | head -1 | sed 's/\[//g' | sed 's/\].*//g' | tr -d ' ')
if [[ -n "$rule_num" ]]; then
echo "y" | ufw delete $rule_num
echo "✅ Removed $ip from backup SSH access"
else
echo "❌ IP $ip not found in firewall rules"
fi
}
update_current_ip() {
echo "Updating firewall rule for current IP..."
local old_ip=$(ufw status numbered | grep "Emergency SSH backup access" | head -1 | awk '{print $4}')
if [[ "$old_ip" != "$CURRENT_IP" ]]; then
echo "Current IP changed from $old_ip to $CURRENT_IP"
if [[ -n "$old_ip" ]]; then
remove_ip $old_ip
fi
add_ip $CURRENT_IP
else
echo "✅ Current IP $CURRENT_IP is already authorized"
fi
}
show_connection_info() {
echo "=== How to Connect via Backup SSH ==="
echo
echo "When Tailscale is down, connect using:"
echo " ssh -p $BACKUP_PORT root@YOUR_SERVER_IP"
echo " ssh -p $BACKUP_PORT gmod@YOUR_SERVER_IP"
echo
echo "Example:"
echo " ssh -p $BACKUP_PORT root@$(hostname -I | awk '{print $1}')"
echo
echo "⚠️ Requirements:"
echo " - Your IP must be authorized (currently: $CURRENT_IP)"
echo " - SSH key authentication only (no passwords)"
echo " - Port $BACKUP_PORT must be accessible from your location"
echo
}
case "$1" in
"status"|"")
show_status
;;
"add-ip")
add_ip "$2"
;;
"remove-ip")
remove_ip "$2"
;;
"update-ip")
update_current_ip
;;
"connect-info")
show_connection_info
;;
"help")
echo "Backup SSH Access Manager"
echo
echo "Commands:"
echo " status - Show current backup access status"
echo " add-ip <ip> - Add IP address to backup SSH access"
echo " remove-ip <ip> - Remove IP address from backup SSH access"
echo " update-ip - Update firewall rule for current IP"
echo " connect-info - Show connection instructions"
echo " help - Show this help"
;;
*)
echo "Unknown command: $1"
echo "Use '$0 help' for available commands"
exit 1
;;
esac

View File

@@ -0,0 +1,104 @@
#!/bin/sh
if [ -z "$TARGETARCH" ]; then
:
else
case "${TARGETARCH}" in
"amd64")
LINKER_NAME="x86_64-linux-gnu-gcc"
LINKER_PACKAGE="gcc-x86-64-linux-gnu"
BUILD_TARGET="x86_64-unknown-linux-gnu" ;;
"arm64")
LINKER_NAME="aarch64-linux-gnu-gcc"
LINKER_PACKAGE="gcc-aarch64-linux-gnu"
BUILD_TARGET="aarch64-unknown-linux-gnu" ;;
esac
fi
tools() {
apt-get install -y "${LINKER_PACKAGE}"
rustup target add "${BUILD_TARGET}"
}
deps() {
mkdir -p \
crates/bonfire/src \
crates/delta/src \
crates/core/config/src \
crates/core/database/src \
crates/core/files/src \
crates/core/models/src \
crates/core/parser/src \
crates/core/permissions/src \
crates/core/presence/src \
crates/core/result/src \
crates/core/coalesced/src \
crates/core/ratelimits/src \
crates/services/autumn/src \
crates/services/january/src \
crates/services/gifbox/src \
crates/daemons/crond/src \
crates/daemons/pushd/src \
crates/daemons/voice-ingress/src
echo 'fn main() { panic!("stub"); }' |
tee crates/bonfire/src/main.rs |
tee crates/delta/src/main.rs |
tee crates/services/autumn/src/main.rs |
tee crates/services/january/src/main.rs |
tee crates/services/gifbox/src/main.rs |
tee crates/daemons/crond/src/main.rs |
tee crates/daemons/pushd/src/main.rs |
tee crates/daemons/voice-ingress/src/main.rs
echo '' |
tee crates/core/config/src/lib.rs |
tee crates/core/database/src/lib.rs |
tee crates/core/files/src/lib.rs |
tee crates/core/models/src/lib.rs |
tee crates/core/parser/src/lib.rs |
tee crates/core/permissions/src/lib.rs |
tee crates/core/presence/src/lib.rs |
tee crates/core/result/src/lib.rs |
tee crates/core/coalesced/src/lib.rs |
tee crates/core/ratelimits/src/lib.rs
if [ -z "$TARGETARCH" ]; then
cargo build -j 10 --locked --release
else
cargo build -j 10 --locked --release --target "${BUILD_TARGET}"
fi
}
apps() {
touch -am \
crates/bonfire/src/main.rs \
crates/delta/src/main.rs \
crates/daemons/crond/src/main.rs \
crates/daemons/pushd/src/main.rs \
crates/daemons/voice-ingress/src/main.rs \
crates/core/config/src/lib.rs \
crates/core/database/src/lib.rs \
crates/core/models/src/lib.rs \
crates/core/parser/src/lib.rs \
crates/core/permissions/src/lib.rs \
crates/core/presence/src/lib.rs \
crates/core/result/src/lib.rs \
crates/core/coalesced/src/lib.rs \
crates/core/ratelimits/src/lib.rs
if [ -z "$TARGETARCH" ]; then
cargo build -j 10 --locked --release
else
cargo build -j 10 --locked --release --target "${BUILD_TARGET}"
mv target _target && mv _target/"${BUILD_TARGET}" target
fi
}
if [ -z "$TARGETARCH" ]; then
:
else
export RUSTFLAGS="-C linker=${LINKER_NAME}"
export PKG_CONFIG_ALLOW_CROSS="1"
export PKG_CONFIG_PATH="/usr/lib/pkgconfig:/usr/lib/aarch64-linux-gnu/pkgconfig:/usr/lib/x86_64-linux-gnu/pkgconfig"
fi
"$@"

View File

@@ -0,0 +1,77 @@
#!/bin/bash
# Watchtower Status Checker via Portainer API
# Checks all endpoints for Watchtower containers and their health
API_KEY=REDACTED_API_KEY
BASE_URL="http://vishinator.synology.me:10000"
echo "🔍 Checking Watchtower containers across all endpoints..."
echo "=================================================="
# Function to check containers on an endpoint
check_watchtower() {
local endpoint_id=$1
local endpoint_name=$2
echo ""
echo "📍 Endpoint: $endpoint_name (ID: $endpoint_id)"
echo "----------------------------------------"
# Get containers with "watchtower" in the name
containers=$(curl -s -H "X-API-Key: $API_KEY" \
"$BASE_URL/api/endpoints/$endpoint_id/docker/containers/json?all=true" | \
jq -r '.[] | select(.Names[]? | contains("watchtower")) | "\(.Id[0:12]) \(.Names[0]) \(.State) \(.Status)"')
if [ -z "$containers" ]; then
echo "❌ No Watchtower containers found"
else
echo "✅ Watchtower containers found:"
echo "$containers" | while read id name state status; do
echo " Container: $name"
echo " ID: $id"
echo " State: $state"
echo " Status: $status"
# Check if container is running
if [ "$state" = "running" ]; then
echo " 🟢 Status: HEALTHY"
# Get recent logs to check for errors
echo " 📋 Recent logs (last 10 lines):"
curl -s -H "X-API-Key: $API_KEY" \
"$BASE_URL/api/endpoints/$endpoint_id/docker/containers/$id/logs?stdout=true&stderr=true&tail=10" | \
sed 's/^.......//g' | tail -5 | sed 's/^/ /'
else
echo " 🔴 Status: NOT RUNNING"
# Get logs to see why it stopped
echo " 📋 Last logs before stopping:"
curl -s -H "X-API-Key: $API_KEY" \
"$BASE_URL/api/endpoints/$endpoint_id/docker/containers/$id/logs?stdout=true&stderr=true&tail=10" | \
sed 's/^.......//g' | tail -5 | sed 's/^/ /'
fi
echo ""
done
fi
}
# Get all endpoints and check each one
echo "Getting endpoint list..."
endpoints=$(curl -s -H "X-API-Key: $API_KEY" "$BASE_URL/api/endpoints" | \
jq -r '.[] | "\(.Id) \(.Name) \(.Status)"')
echo "$endpoints" | while read id name status; do
if [ "$status" = "1" ]; then
check_watchtower "$id" "$name"
else
echo ""
echo "📍 Endpoint: $name (ID: $id)"
echo "----------------------------------------"
echo "⚠️ Endpoint is OFFLINE (Status: $status)"
fi
done
echo ""
echo "=================================================="
echo "✅ Watchtower status check complete!"

175
scripts/cleanup-gitea-wiki.sh Executable file
View File

@@ -0,0 +1,175 @@
#!/bin/bash
# Gitea Wiki Cleanup Script
# Removes old flat structure pages while preserving new organized structure
set -e
# Configuration
GITEA_TOKEN=REDACTED_TOKEN
GITEA_URL="https://git.vish.gg"
REPO_OWNER="Vish"
REPO_NAME="homelab"
BASE_URL="$GITEA_URL/api/v1/repos/$REPO_OWNER/$REPO_NAME/wiki"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
echo -e "${BLUE}🧹 Cleaning up Gitea Wiki - removing old flat structure...${NC}"
# Pages to KEEP (our new organized structure)
declare -a KEEP_PAGES=(
"Home"
"Administration"
"Infrastructure"
"Services"
"Getting-Started"
"Troubleshooting"
"Advanced"
# Key organized pages
"GitOps-Guide"
"Deployment-Guide"
"Operational-Status"
"Development-Guide"
"Agent-Memory"
"Infrastructure-Overview"
"Infrastructure-Health"
"SSH-Guide"
"User-Access-Guide"
"Security-Guide"
"Service-Index"
"Dashboard-Setup"
"Homarr-Setup"
"ARR-Suite-Enhancements"
"Beginner-Quickstart"
"What-Is-Homelab"
"Prerequisites"
"Architecture-Overview"
"Emergency-Guide"
"Common-Issues"
"Container-Diagnosis"
"Disaster-Recovery"
"Hardware-Inventory"
)
# Function to check if page should be kept
should_keep_page() {
local page_title="$1"
for keep_page in "${KEEP_PAGES[@]}"; do
if [[ "$page_title" == "$keep_page" ]]; then
return 0
fi
done
return 1
}
# Get all wiki pages
echo -e "${BLUE}📋 Fetching all wiki pages...${NC}"
all_pages=$(curl -s -H "Authorization: token $GITEA_TOKEN" "$BASE_URL/pages?limit=500")
if [[ -z "$all_pages" ]] || [[ "$all_pages" == "null" ]]; then
echo -e "${RED}❌ Failed to fetch wiki pages${NC}"
exit 1
fi
# Parse page titles
page_titles=$(echo "$all_pages" | jq -r '.[].title')
total_pages=$(echo "$page_titles" | wc -l)
echo -e "${BLUE}📊 Found $total_pages total wiki pages${NC}"
# Count pages to keep vs delete
keep_count=0
delete_count=0
declare -a pages_to_delete=()
while IFS= read -r page_title; do
if should_keep_page "$page_title"; then
((keep_count++))
echo -e "${GREEN}✅ KEEP: $page_title${NC}"
else
((delete_count++))
pages_to_delete+=("$page_title")
echo -e "${YELLOW}🗑️ DELETE: $page_title${NC}"
fi
done <<< "$page_titles"
echo ""
echo -e "${BLUE}📊 Cleanup Summary:${NC}"
echo -e "${GREEN}✅ Pages to keep: $keep_count${NC}"
echo -e "${YELLOW}🗑️ Pages to delete: $delete_count${NC}"
echo -e "${BLUE}📄 Total pages: $total_pages${NC}"
# Confirm deletion
echo ""
echo -e "${YELLOW}⚠️ This will DELETE $delete_count wiki pages!${NC}"
echo -e "${YELLOW}⚠️ Only the organized structure will remain.${NC}"
read -p "Continue with cleanup? (y/N): " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo -e "${BLUE}🚫 Cleanup cancelled${NC}"
exit 0
fi
# Delete old pages
echo ""
echo -e "${BLUE}🗑️ Starting deletion of old pages...${NC}"
deleted_count=0
failed_count=0
for page_title in "${pages_to_delete[@]}"; do
echo -e "${YELLOW}🗑️ Deleting: $page_title${NC}"
response=$(curl -s -w "%{http_code}" -o /tmp/delete_response.json \
-X DELETE \
-H "Authorization: token $GITEA_TOKEN" \
"$BASE_URL/$page_title")
http_code="${response: -3}"
if [[ "$http_code" == "204" ]] || [[ "$http_code" == "200" ]]; then
echo -e "${GREEN}✅ Deleted: $page_title${NC}"
((deleted_count++))
else
echo -e "${RED}❌ Failed to delete: $page_title (HTTP $http_code)${NC}"
((failed_count++))
fi
# Small delay to avoid rate limiting
sleep 0.1
done
echo ""
echo -e "${BLUE}🎯 Gitea Wiki Cleanup Complete!${NC}"
echo -e "${GREEN}✅ Successfully deleted: $deleted_count pages${NC}"
echo -e "${RED}❌ Failed to delete: $failed_count pages${NC}"
echo -e "${GREEN}📚 Organized pages remaining: $keep_count${NC}"
# Get final page count
final_page_count=$(curl -s -H "Authorization: token $GITEA_TOKEN" "$BASE_URL/pages?limit=500" | jq '. | length' 2>/dev/null || echo "unknown")
echo ""
echo -e "${GREEN}📊 Final Wiki Statistics:${NC}"
echo -e "${GREEN} Total Pages: $final_page_count${NC}"
echo -e "${GREEN} Structure: ✅ Clean organized hierarchy${NC}"
echo -e "${GREEN} Old Pages Removed: $deleted_count${NC}"
echo ""
echo -e "${GREEN}🌐 Clean Gitea Wiki available at:${NC}"
echo -e " ${BLUE}https://git.vish.gg/$REPO_OWNER/$REPO_NAME/wiki${NC}"
if [[ $failed_count -eq 0 ]]; then
echo ""
echo -e "${GREEN}✅ Gitea Wiki cleanup completed successfully!${NC}"
echo -e "${GREEN}🎉 Wiki now has clean, organized structure only!${NC}"
exit 0
else
echo ""
echo -e "${YELLOW}⚠️ Wiki cleanup completed with some issues.${NC}"
echo -e "${YELLOW}📊 $deleted_count pages deleted, $failed_count failed.${NC}"
exit 1
fi

View File

@@ -0,0 +1,476 @@
#!/bin/bash
# Clean Organized Gitea Wiki Creation Script
# Creates a fresh, properly organized wiki with hierarchical navigation
set -e
# Configuration
GITEA_TOKEN=REDACTED_TOKEN
GITEA_URL="https://git.vish.gg"
REPO_OWNER="Vish"
REPO_NAME="homelab"
BASE_URL="$GITEA_URL/api/v1/repos/$REPO_OWNER/$REPO_NAME/wiki"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
PURPLE='\033[0;35m'
NC='\033[0m' # No Color
echo -e "${BLUE}🚀 Creating CLEAN organized Gitea Wiki with hierarchical structure...${NC}"
# Function to create or update wiki page
create_wiki_page() {
local title="$1"
local file_path="$2"
local message="$3"
if [[ ! -f "$file_path" ]]; then
echo -e "${RED}❌ File not found: $file_path${NC}"
return 1
fi
echo -e "${YELLOW}📄 Creating: $title${NC}"
# Read file content and escape for JSON
local content
content=$(cat "$file_path" | jq -Rs .)
# Create JSON payload
local json_payload
json_payload=$(jq -n \
--arg title "$title" \
--argjson content "$content" \
--arg message "$message" \
'{
title: $title,
content_base64: ($content | @base64),
message: $message
}')
# Try to create new page
local response
response=$(curl -s -w "%{http_code}" -o /tmp/wiki_response.json \
-X POST \
-H "Authorization: token $GITEA_TOKEN" \
-H "Content-Type: application/json" \
-d "$json_payload" \
"$BASE_URL/new")
local http_code="${response: -3}"
if [[ "$http_code" == "201" ]]; then
echo -e "${GREEN}✅ Created: $title${NC}"
return 0
elif [[ "$http_code" == "409" ]] || [[ "$http_code" == "400" ]]; then
# Page exists, try to update it
response=$(curl -s -w "%{http_code}" -o /tmp/wiki_response.json \
-X POST \
-H "Authorization: token $GITEA_TOKEN" \
-H "Content-Type: application/json" \
-d "$json_payload" \
"$BASE_URL/$title")
http_code="${response: -3}"
if [[ "$http_code" == "200" ]]; then
echo -e "${GREEN}✅ Updated: $title${NC}"
return 0
else
echo -e "${RED}❌ Failed to update $title (HTTP $http_code)${NC}"
return 1
fi
else
echo -e "${RED}❌ Failed to create $title (HTTP $http_code)${NC}"
return 1
fi
}
# Success counter
success_count=0
total_count=0
echo -e "${BLUE}📋 Creating main organized navigation hub...${NC}"
# Create REDACTED_APP_PASSWORD with clean organized navigation
cat > /tmp/clean_wiki_home.md << 'EOF'
# 🏠 Homelab Documentation Wiki
*Organized documentation for Vish's homelab infrastructure*
## 🎯 Quick Access
| Category | Description | Key Pages |
|----------|-------------|-----------|
| [🔧 **Administration**](Administration) | System management & operations | [GitOps Guide](GitOps-Guide), [Deployment](Deployment-Guide) |
| [🏗️ **Infrastructure**](Infrastructure) | Core infrastructure & networking | [Overview](Infrastructure-Overview), [Networking](Networking-Guide) |
| [🎯 **Services**](Services) | Application services & setup | [Service Index](Service-Index), [Dashboard Setup](Dashboard-Setup) |
| [🚀 **Getting Started**](Getting-Started) | Beginner guides & quickstart | [Quickstart](Beginner-Quickstart), [What is Homelab](What-Is-Homelab) |
| [🛠️ **Troubleshooting**](Troubleshooting) | Problem solving & diagnostics | [Common Issues](Common-Issues), [Emergency Guide](Emergency-Guide) |
| [🔬 **Advanced**](Advanced) | Advanced topics & optimization | [Maturity Roadmap](Maturity-Roadmap), [Scaling](Scaling-Guide) |
---
## 📊 **System Status**
- **🚀 GitOps Status**: ✅ 18 active stacks, 50+ containers
- **🖥️ Active Servers**: 5 (Atlantis, Calypso, Gaming VPS, Homelab VM, Concord NUC)
- **🎯 Services**: 100+ containerized services
- **📚 Documentation**: 300+ organized pages
---
## 🌐 **Access Points**
- **🔗 Git Repository**: https://git.vish.gg/Vish/homelab
- **📖 Gitea Wiki**: https://git.vish.gg/Vish/homelab/wiki
- **📚 DokuWiki Mirror**: http://atlantis.vish.local:8399/doku.php?id=homelab:start
---
## 📚 **Documentation Categories**
### 🔧 Administration
Essential system management and operational procedures.
- [GitOps Comprehensive Guide](GitOps-Guide) - Complete deployment procedures ⭐
- [Deployment Documentation](Deployment-Guide) - Step-by-step deployment
- [Operational Status](Operational-Status) - Current system status
- [Security Hardening](Security-Guide) - Security procedures
### 🏗️ Infrastructure
Core infrastructure, networking, and host management.
- [Infrastructure Overview](Infrastructure-Overview) - Complete infrastructure guide
- [Networking Guide](Networking-Guide) - Network configuration
- [SSH Access Guide](SSH-Guide) - Access procedures
- [Hardware Inventory](Hardware-Inventory) - Equipment catalog
### 🎯 Services
Application services, dashboards, and service management.
- [Service Index](Service-Index) - All available services
- [Dashboard Setup](Dashboard-Setup) - Dashboard configuration
- [Stoatchat Setup](Stoatchat-Guide) - Chat platform
- [Media Services](Media-Services) - ARR suite and media
### 🚀 Getting Started
Beginner-friendly guides and quick start procedures.
- [Beginner Quickstart](Beginner-Quickstart) - Quick start guide
- [What Is Homelab](What-Is-Homelab) - Introduction to homelabs
- [Prerequisites](Prerequisites) - Requirements and setup
- [Architecture Overview](Architecture-Overview) - System architecture
### 🛠️ Troubleshooting
Problem solving, diagnostics, and emergency procedures.
- [Common Issues](Common-Issues) - Frequently encountered problems
- [Emergency Access Guide](Emergency-Guide) - Emergency procedures
- [Disaster Recovery](Disaster-Recovery) - Recovery procedures
- [Container Diagnosis](Container-Diagnosis) - Container troubleshooting
### 🔬 Advanced Topics
Advanced configuration, optimization, and scaling.
- [Homelab Maturity Roadmap](Maturity-Roadmap) - Growth planning
- [Repository Optimization](Optimization-Guide) - Optimization strategies
- [Terraform Implementation](Terraform-Guide) - Infrastructure as code
- [Scaling Strategies](Scaling-Guide) - Growth and scaling
---
*🏠 **Source Repository**: https://git.vish.gg/Vish/homelab*
*👨‍💻 **Maintainer**: Homelab Administrator*
*📚 **Documentation**: Organized and navigable*
EOF
total_count=$((total_count + 1))
if create_wiki_page "Home" "/tmp/clean_wiki_home.md" "Created clean organized wiki home page"; then
success_count=$((success_count + 1))
fi
echo ""
echo -e "${BLUE}📚 Creating category pages...${NC}"
# Create Administration category page
cat > /tmp/administration.md << 'EOF'
# 🔧 Administration
*System management and operational procedures*
## 🚀 Deployment & GitOps
- [GitOps Comprehensive Guide](GitOps-Guide) - Complete deployment procedures ⭐
- [Deployment Documentation](Deployment-Guide) - Step-by-step deployment
- [Deployment Workflow](Deployment-Workflow) - Workflow procedures
## 🔧 System Administration
- [Development Guide](Development-Guide) - Development procedures
- [Agent Memory](Agent-Memory) - AI agent context
- [Monitoring Setup](Monitoring-Setup) - Monitoring configuration
- [Backup Strategies](Backup-Strategies) - Backup procedures
- [Maintenance Procedures](Maintenance-Guide) - System maintenance
## 📊 Status & Reports
- [Operational Status](Operational-Status) - Current system status
- [Documentation Audit](Documentation-Audit) - Audit results
## 📚 Integration
- [DokuWiki Integration](DokuWiki-Integration) - External wiki setup
- [Gitea Wiki Integration](Gitea-Wiki-Integration) - Native wiki setup
---
[🏠 Back to Home](Home)
EOF
total_count=$((total_count + 1))
if create_wiki_page "Administration" "/tmp/administration.md" "Created administration category page"; then
success_count=$((success_count + 1))
fi
# Create Infrastructure category page
cat > /tmp/infrastructure.md << 'EOF'
# 🏗️ Infrastructure
*Core infrastructure, networking, and host management*
## 🌐 Core Infrastructure
- [Infrastructure Overview](Infrastructure-Overview) - Complete infrastructure guide
- [Infrastructure Health](Infrastructure-Health) - System health status
- [Networking Guide](Networking-Guide) - Network configuration
- [Storage Guide](Storage-Guide) - Storage configuration
- [Host Management](Host-Management) - Host administration
## 🔐 Access & Security
- [SSH Access Guide](SSH-Guide) - SSH access procedures
- [User Access Guide](User-Access-Guide) - User management
- [Authentik SSO](Authentik-SSO) - Single sign-on setup
## 🌐 Network Services
- [Tailscale Setup](Tailscale-Guide) - VPN configuration
- [Cloudflare Tunnels](Cloudflare-Tunnels) - Tunnel configuration
- [Cloudflare DNS](Cloudflare-DNS) - DNS configuration
- [Network Performance](Network-Performance) - Performance tuning
## 🏠 Hardware & Hosts
- [Hardware Inventory](Hardware-Inventory) - Equipment catalog
- [Atlantis Migration](Atlantis-Migration) - Migration procedures
- [Mobile Setup](Mobile-Setup) - Mobile device configuration
- [Laptop Setup](Laptop-Setup) - Laptop configuration
---
[🏠 Back to Home](Home)
EOF
total_count=$((total_count + 1))
if create_wiki_page "Infrastructure" "/tmp/infrastructure.md" "Created infrastructure category page"; then
success_count=$((success_count + 1))
fi
# Create Services category page
cat > /tmp/services.md << 'EOF'
# 🎯 Services
*Application services and configuration guides*
## 📊 Service Management
- [Service Index](Service-Index) - All available services
- [Verified Service Inventory](Service-Inventory) - Service catalog
- [Dashboard Setup](Dashboard-Setup) - Dashboard configuration
- [Homarr Setup](Homarr-Setup) - Homarr dashboard
- [Theme Park](Theme-Park) - UI theming
## 🎬 Media Services
- [ARR Suite Enhancements](ARR-Suite-Enhancements) - Media stack improvements
- [ARR Suite Language Config](ARR-Language-Config) - Language configuration
## 💬 Communication Services
- [Stoatchat Setup](Stoatchat-Guide) - Chat platform setup
- [Matrix Setup](Matrix-Guide) - Matrix server configuration
- [Mastodon Setup](Mastodon-Guide) - Social media platform
- [Mattermost Setup](Mattermost-Guide) - Team communication
## 🔧 Development Services
- [OpenHands](OpenHands-Guide) - AI development assistant
- [Paperless](Paperless-Guide) - Document management
- [Reactive Resume](Reactive-Resume-Guide) - Resume builder
---
[🏠 Back to Home](Home)
EOF
total_count=$((total_count + 1))
if create_wiki_page "Services" "/tmp/services.md" "Created services category page"; then
success_count=$((success_count + 1))
fi
# Create Getting Started category page
cat > /tmp/getting-started.md << 'EOF'
# 🚀 Getting Started
*Beginner guides and quick start procedures*
## 🎯 Quick Start
- [Beginner Quickstart](Beginner-Quickstart) - Quick start guide
- [What Is Homelab](What-Is-Homelab) - Introduction to homelabs
- [Prerequisites](Prerequisites) - Requirements and setup
- [Architecture Overview](Architecture-Overview) - System architecture
## 📚 Comprehensive Guides
- [Beginner Homelab Guide](Beginner-Guide) - Complete beginner guide
- [Shopping Guide](Shopping-Guide) - Hardware recommendations
- [Complete Rebuild Guide](Rebuild-Guide) - Full rebuild procedures
- [Quick Start Guide](Quick-Start) - Quick deployment
---
[🏠 Back to Home](Home)
EOF
total_count=$((total_count + 1))
if create_wiki_page "Getting-Started" "/tmp/getting-started.md" "Created getting started category page"; then
success_count=$((success_count + 1))
fi
# Create Troubleshooting category page
cat > /tmp/troubleshooting.md << 'EOF'
# 🛠️ Troubleshooting
*Problem solving, diagnostics, and emergency procedures*
## 🚨 Emergency Procedures
- [Emergency Access Guide](Emergency-Guide) - Emergency procedures
- [Disaster Recovery](Disaster-Recovery) - Recovery procedures
- [Recovery Guide](Recovery-Guide) - System recovery
## 🔍 Diagnostics
- [Common Issues](Common-Issues) - Frequently encountered problems
- [Diagnostics Guide](Diagnostics-Guide) - Diagnostic procedures
- [Container Diagnosis](Container-Diagnosis) - Container troubleshooting
- [Performance Issues](Performance-Issues) - Performance troubleshooting
## 🔧 Specific Issues
- [Watchtower Emergency](Watchtower-Emergency) - Watchtower issues
- [Authentik SSO Rebuild](Authentik-Rebuild) - SSO troubleshooting
- [Beginner Troubleshooting](Beginner-Troubleshooting) - Beginner help
---
[🏠 Back to Home](Home)
EOF
total_count=$((total_count + 1))
if create_wiki_page "Troubleshooting" "/tmp/troubleshooting.md" "Created troubleshooting category page"; then
success_count=$((success_count + 1))
fi
# Create Advanced category page
cat > /tmp/advanced.md << 'EOF'
# 🔬 Advanced Topics
*Advanced configuration, optimization, and scaling*
## 🚀 Growth & Optimization
- [Homelab Maturity Roadmap](Maturity-Roadmap) - Growth planning
- [Repository Optimization](Optimization-Guide) - Optimization strategies
- [Stack Comparison Report](Stack-Comparison) - Technology comparisons
- [Scaling Strategies](Scaling-Guide) - Growth and scaling
## 🏗️ Infrastructure as Code
- [Terraform Implementation](Terraform-Guide) - Infrastructure as code
- [Terraform Alternatives](Terraform-Alternatives) - Alternative approaches
- [Ansible Guide](Ansible-Guide) - Automation with Ansible
- [Customization Guide](Customization-Guide) - Advanced customization
## 🔗 Integration
- [Service Integrations](Service-Integrations) - Service integrations
---
[🏠 Back to Home](Home)
EOF
total_count=$((total_count + 1))
if create_wiki_page "Advanced" "/tmp/advanced.md" "Created advanced topics category page"; then
success_count=$((success_count + 1))
fi
echo ""
echo -e "${BLUE}📚 Creating key documentation pages...${NC}"
# Create key pages that exist in the docs
declare -A key_pages=(
# Core pages
["GitOps-Guide"]="docs/admin/GITOPS_DEPLOYMENT_GUIDE.md"
["Deployment-Guide"]="docs/admin/DEPLOYMENT_DOCUMENTATION.md"
["Operational-Status"]="docs/admin/OPERATIONAL_STATUS.md"
["Development-Guide"]="docs/admin/DEVELOPMENT.md"
["Agent-Memory"]="docs/admin/AGENTS.md"
# Infrastructure
["Infrastructure-Overview"]="docs/infrastructure/INFRASTRUCTURE_OVERVIEW.md"
["Infrastructure-Health"]="docs/infrastructure/INFRASTRUCTURE_HEALTH_REPORT.md"
["SSH-Guide"]="docs/infrastructure/SSH_ACCESS_GUIDE.md"
["User-Access-Guide"]="docs/infrastructure/USER_ACCESS_GUIDE.md"
# Security
["Security-Guide"]="docs/security/SECURITY_HARDENING_SUMMARY.md"
# Services
["Service-Index"]="docs/services/VERIFIED_SERVICE_INVENTORY.md"
["Dashboard-Setup"]="docs/services/DASHBOARD_SETUP.md"
["Homarr-Setup"]="docs/services/HOMARR_SETUP.md"
["ARR-Suite-Enhancements"]="docs/services/ARR_SUITE_ENHANCEMENTS_FEB2025.md"
# Getting Started
["Beginner-Quickstart"]="docs/getting-started/BEGINNER_QUICKSTART.md"
["What-Is-Homelab"]="docs/getting-started/what-is-homelab.md"
["Prerequisites"]="docs/getting-started/prerequisites.md"
["Architecture-Overview"]="docs/getting-started/architecture.md"
# Troubleshooting
["Emergency-Guide"]="docs/troubleshooting/EMERGENCY_ACCESS_GUIDE.md"
["Common-Issues"]="docs/troubleshooting/common-issues.md"
["Container-Diagnosis"]="docs/troubleshooting/CONTAINER_DIAGNOSIS_REPORT.md"
["Disaster-Recovery"]="docs/troubleshooting/disaster-recovery.md"
# Hardware
["Hardware-Inventory"]="docs/hardware/README.md"
)
for title in "${!key_pages[@]}"; do
file_path="${key_pages[$title]}"
if [[ -f "$file_path" ]]; then
total_count=$((total_count + 1))
if create_wiki_page "$title" "$file_path" "Created organized page: $title"; then
success_count=$((success_count + 1))
fi
sleep 0.1
else
echo -e "${YELLOW}⚠️ File not found: $file_path${NC}"
fi
done
echo ""
echo -e "${BLUE}🎯 Clean Organized Wiki Creation Summary:${NC}"
echo -e "${GREEN}✅ Successful: $success_count/$total_count${NC}"
echo -e "${RED}❌ Failed: $((total_count - success_count))/$total_count${NC}"
echo ""
echo -e "${BLUE}🌐 Clean Organized Gitea Wiki available at:${NC}"
echo -e " ${BLUE}https://git.vish.gg/$REPO_OWNER/$REPO_NAME/wiki${NC}"
echo -e " ${BLUE}https://git.vish.gg/$REPO_OWNER/$REPO_NAME/wiki/Home${NC}"
# Get final page count
final_page_count=$(curl -s -H "Authorization: token $GITEA_TOKEN" "$BASE_URL/pages?limit=500" | jq '. | length' 2>/dev/null || echo "unknown")
echo ""
echo -e "${GREEN}📊 Clean Organized Wiki Statistics:${NC}"
echo -e "${GREEN} Total Wiki Pages: $final_page_count${NC}"
echo -e "${GREEN} Organized Structure: ✅ Clean hierarchical navigation${NC}"
echo -e "${GREEN} Success Rate: $(( success_count * 100 / total_count ))%${NC}"
if [[ $success_count -eq $total_count ]]; then
echo ""
echo -e "${GREEN}✅ CLEAN Organized Gitea Wiki created successfully!${NC}"
echo -e "${GREEN}🎉 Wiki now has clean, navigable structure!${NC}"
exit 0
else
echo ""
echo -e "${YELLOW}⚠️ Clean Wiki creation completed with some issues.${NC}"
echo -e "${YELLOW}📊 $success_count out of $total_count pages created successfully.${NC}"
exit 1
fi

View File

@@ -0,0 +1,80 @@
#!/bin/bash
# EMERGENCY FIX: Stop Watchtower crash loop caused by invalid Shoutrrr URL format
# The issue: Used http:// instead of ntfy:// - Shoutrrr doesn't recognize "http" as a service
set -e
echo "🚨 EMERGENCY: Fixing Watchtower crash loop"
echo "=========================================="
echo "Issue: Invalid notification URL format causing crash loop"
echo "Error: 'unknown service \"http\"' - Shoutrrr needs ntfy:// format"
echo
# Check if running as root/sudo
if [[ $EUID -ne 0 ]]; then
echo "❌ This script must be run as root or with sudo"
exit 1
fi
echo "🛑 Stopping crashed Watchtower container..."
docker stop watchtower 2>/dev/null || echo "Container already stopped"
echo "🗑️ Removing crashed container..."
docker rm watchtower 2>/dev/null || echo "Container already removed"
echo "🔧 Creating new Watchtower with CORRECT notification URL format..."
echo " Using: ntfy://localhost:8081/updates?insecure=yes"
echo " (This forces HTTP instead of HTTPS for local ntfy server)"
docker run -d \
--name watchtower \
--restart unless-stopped \
-p 8091:8080 \
-v /var/run/docker.sock:/var/run/docker.sock \
-e WATCHTOWER_CLEANUP=true \
-e WATCHTOWER_SCHEDULE="0 0 4 * * *" \
-e WATCHTOWER_INCLUDE_STOPPED=false \
-e TZ=America/Los_Angeles \
-e WATCHTOWER_HTTP_API_UPDATE=true \
-e WATCHTOWER_HTTP_API_TOKEN="REDACTED_HTTP_TOKEN" \
-e WATCHTOWER_HTTP_API_METRICS=true \
-e WATCHTOWER_NOTIFICATIONS=shoutrrr \
-e WATCHTOWER_NOTIFICATION_URL="ntfy://localhost:8081/updates?insecure=yes" \
containrrr/watchtower:latest
echo "⏳ Waiting for container to start..."
sleep 5
if docker ps --format '{{.Names}}\t{{.Status}}' | grep watchtower | grep -q "Up"; then
echo "✅ Watchtower is now running successfully!"
echo "🧪 Testing notification (this will trigger an update check)..."
sleep 2
curl -s -H "Authorization: Bearer watchtower-update-token" \
-X POST http://localhost:8091/v1/update >/dev/null 2>&1 || echo "API call completed"
sleep 3
echo "📋 Recent logs:"
docker logs watchtower --since 10s | tail -5
if docker logs watchtower --since 10s | grep -q "unknown service"; then
echo "❌ Still having issues - check logs above"
else
echo "✅ No more 'unknown service' errors detected!"
fi
else
echo "❌ Watchtower failed to start - check logs:"
docker logs watchtower
fi
echo
echo "📝 WHAT WAS FIXED:"
echo " ❌ OLD (BROKEN): http://localhost:8081/updates"
echo " ✅ NEW (WORKING): ntfy://localhost:8081/updates?insecure=yes"
echo
echo "🔍 The issue was using http:// instead of ntfy:// protocol"
echo " Shoutrrr notification system requires ntfy:// format"
echo " The ?insecure=yes parameter forces HTTP instead of HTTPS"
echo
echo "🔧 Repository files have been updated with the correct format"
echo "✅ Emergency fix complete!"

48
scripts/fix-atlantis-port.sh Executable file
View File

@@ -0,0 +1,48 @@
#!/bin/bash
# Fix Atlantis Watchtower port conflict
echo "🔧 Fixing Atlantis port conflict by using port 8081 instead of 8080..."
API_KEY=REDACTED_API_KEY
BASE_URL="http://vishinator.synology.me:10000"
# Remove the current container
curl -s -X DELETE -H "X-API-Key: $API_KEY" \
"$BASE_URL/api/endpoints/2/docker/containers/7bbb8db75728?force=true"
sleep 2
# Create new container with port 8081
create_response=$(curl -s -X POST -H "X-API-Key: $API_KEY" \
-H "Content-Type: application/json" \
"$BASE_URL/api/endpoints/2/docker/containers/create?name=watchtower" \
-d '{
"Image": "containrrr/watchtower:latest",
"Env": [
"WATCHTOWER_CLEANUP=true",
"WATCHTOWER_INCLUDE_RESTARTING=true",
"WATCHTOWER_INCLUDE_STOPPED=true",
"WATCHTOWER_REVIVE_STOPPED=false",
"WATCHTOWER_POLL_INTERVAL=3600",
"WATCHTOWER_TIMEOUT=10s",
"WATCHTOWER_HTTP_API_UPDATE=true",
"WATCHTOWER_HTTP_API_TOKEN="REDACTED_HTTP_TOKEN",
"WATCHTOWER_NOTIFICATIONS=shoutrrr",
"WATCHTOWER_NOTIFICATION_URL=generic+http://localhost:8082/updates",
"TZ=America/Los_Angeles"
],
"HostConfig": {
"Binds": ["/var/run/docker.sock:/var/run/docker.sock"],
"RestartPolicy": {"Name": "always"},
"PortBindings": {"8080/tcp": [{"HostPort": "8081"}]}
}
}')
container_id=$(echo "$create_response" | jq -r '.Id')
echo "✅ Created container: ${container_id:0:12}"
# Start the container
curl -s -X POST -H "X-API-Key: $API_KEY" \
"$BASE_URL/api/endpoints/2/docker/containers/$container_id/start"
echo "🚀 Started Atlantis Watchtower on port 8081"

View File

@@ -0,0 +1,247 @@
#!/bin/bash
# =============================================================================
# WATCHTOWER ATLANTIS FIX SCRIPT
# =============================================================================
#
# Purpose: Fix common Watchtower issues on Atlantis server
# Created: February 9, 2026
# Based on: Incident resolution for Watchtower container not running
#
# Usage: ./fix-watchtower-atlantis.sh
# Requirements: SSH access to Atlantis, sudo privileges
#
# =============================================================================
set -e # Exit on any error
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
ATLANTIS_HOST="atlantis"
CONTAINER_NAME="watchtower"
API_PORT="8082"
API_TOKEN=REDACTED_TOKEN
echo -e "${BLUE}🔧 Watchtower Atlantis Fix Script${NC}"
echo -e "${BLUE}===================================${NC}"
echo ""
# Function to run commands on Atlantis
run_on_atlantis() {
local cmd="$1"
echo -e "${YELLOW}Running on Atlantis:${NC} $cmd"
ssh "$ATLANTIS_HOST" "$cmd"
}
# Function to check if we can connect to Atlantis
check_connection() {
echo -e "${BLUE}📡 Checking connection to Atlantis...${NC}"
if ssh -o ConnectTimeout=5 "$ATLANTIS_HOST" "echo 'Connection successful'" >/dev/null 2>&1; then
echo -e "${GREEN}✅ Connected to Atlantis successfully${NC}"
return 0
else
echo -e "${RED}❌ Cannot connect to Atlantis${NC}"
echo "Please ensure:"
echo " - SSH access is configured"
echo " - Atlantis server is reachable"
echo " - SSH keys are properly set up"
exit 1
fi
}
# Function to check Docker permissions
check_docker_permissions() {
echo -e "${BLUE}🔐 Checking Docker permissions...${NC}"
# Try without sudo first
if run_on_atlantis "docker ps >/dev/null 2>&1"; then
echo -e "${GREEN}✅ Docker access available without sudo${NC}"
DOCKER_CMD="docker"
else
echo -e "${YELLOW}⚠️ Docker requires sudo privileges${NC}"
if run_on_atlantis "sudo docker ps >/dev/null 2>&1"; then
echo -e "${GREEN}✅ Docker access available with sudo${NC}"
DOCKER_CMD="sudo docker"
else
echo -e "${RED}❌ Cannot access Docker even with sudo${NC}"
exit 1
fi
fi
}
# Function to check Watchtower container status
check_watchtower_status() {
echo -e "${BLUE}🔍 Checking Watchtower container status...${NC}"
local container_info
container_info=$(run_on_atlantis "$DOCKER_CMD ps -a --filter name=$CONTAINER_NAME --format 'table {{.Names}}\t{{.Status}}\t{{.State}}'")
if echo "$container_info" | grep -q "$CONTAINER_NAME"; then
echo -e "${GREEN}✅ Watchtower container found${NC}"
echo "$container_info"
# Check if running
if echo "$container_info" | grep -q "Up"; then
echo -e "${GREEN}✅ Watchtower is running${NC}"
return 0
else
echo -e "${YELLOW}⚠️ Watchtower is not running${NC}"
return 1
fi
else
echo -e "${RED}❌ Watchtower container not found${NC}"
return 2
fi
}
# Function to start Watchtower container
start_watchtower() {
echo -e "${BLUE}🚀 Starting Watchtower container...${NC}"
if run_on_atlantis "$DOCKER_CMD start $CONTAINER_NAME"; then
echo -e "${GREEN}✅ Watchtower started successfully${NC}"
# Wait a moment for startup
sleep 3
# Verify it's running
if check_watchtower_status >/dev/null; then
echo -e "${GREEN}✅ Watchtower is now running and healthy${NC}"
return 0
else
echo -e "${RED}❌ Watchtower failed to start properly${NC}"
return 1
fi
else
echo -e "${RED}❌ Failed to start Watchtower${NC}"
return 1
fi
}
# Function to check Watchtower logs
check_watchtower_logs() {
echo -e "${BLUE}📋 Checking Watchtower logs...${NC}"
local logs
logs=$(run_on_atlantis "$DOCKER_CMD logs $CONTAINER_NAME --tail 10 2>/dev/null" || echo "No logs available")
if [ "$logs" != "No logs available" ] && [ -n "$logs" ]; then
echo -e "${GREEN}✅ Recent logs:${NC}"
echo "$logs" | sed 's/^/ /'
else
echo -e "${YELLOW}⚠️ No logs available (container may not have started yet)${NC}"
fi
}
# Function to test Watchtower API
test_watchtower_api() {
echo -e "${BLUE}🌐 Testing Watchtower API...${NC}"
local api_response
api_response=$(run_on_atlantis "curl -s -w 'HTTP_STATUS:%{http_code}' http://localhost:$API_PORT/v1/update" 2>/dev/null || echo "API_ERROR")
if echo "$api_response" | grep -q "HTTP_STATUS:401"; then
echo -e "${GREEN}✅ API is responding (401 = authentication required, which is correct)${NC}"
echo -e "${BLUE}💡 API URL: http://atlantis:$API_PORT/v1/update${NC}"
echo -e "${BLUE}💡 API Token: $API_TOKEN${NC}"
return 0
elif echo "$api_response" | grep -q "HTTP_STATUS:200"; then
echo -e "${GREEN}✅ API is responding and accessible${NC}"
return 0
else
echo -e "${YELLOW}⚠️ API test failed or unexpected response${NC}"
echo "Response: $api_response"
return 1
fi
}
# Function to verify container configuration
verify_configuration() {
echo -e "${BLUE}⚙️ Verifying container configuration...${NC}"
local restart_policy
restart_policy=$(run_on_atlantis "$DOCKER_CMD inspect $CONTAINER_NAME --format '{{.HostConfig.RestartPolicy.Name}}'" 2>/dev/null || echo "unknown")
if [ "$restart_policy" = "always" ]; then
echo -e "${GREEN}✅ Restart policy: always (will auto-start on reboot)${NC}"
else
echo -e "${YELLOW}⚠️ Restart policy: $restart_policy (may not auto-start on reboot)${NC}"
fi
# Check port mapping
local port_mapping
port_mapping=$(run_on_atlantis "$DOCKER_CMD port $CONTAINER_NAME 2>/dev/null" || echo "No ports mapped")
if echo "$port_mapping" | grep -q "$API_PORT"; then
echo -e "${GREEN}✅ Port mapping: $port_mapping${NC}"
else
echo -e "${YELLOW}⚠️ Port mapping: $port_mapping${NC}"
fi
}
# Main execution
main() {
echo -e "${BLUE}Starting Watchtower diagnostics and fix...${NC}"
echo ""
# Step 1: Check connection
check_connection
echo ""
# Step 2: Check Docker permissions
check_docker_permissions
echo ""
# Step 3: Check Watchtower status
local watchtower_status
check_watchtower_status
watchtower_status=$?
echo ""
# Step 4: Start Watchtower if needed
if [ $watchtower_status -eq 1 ]; then
echo -e "${YELLOW}🔧 Watchtower needs to be started...${NC}"
start_watchtower
echo ""
elif [ $watchtower_status -eq 2 ]; then
echo -e "${RED}❌ Watchtower container not found. Please check deployment.${NC}"
exit 1
fi
# Step 5: Check logs
check_watchtower_logs
echo ""
# Step 6: Test API
test_watchtower_api
echo ""
# Step 7: Verify configuration
verify_configuration
echo ""
# Final status
echo -e "${GREEN}🎉 Watchtower fix script completed!${NC}"
echo ""
echo -e "${BLUE}📋 Summary:${NC}"
echo " • Watchtower container: Running"
echo " • HTTP API: Available on port $API_PORT"
echo " • Authentication: Required (token: $API_TOKEN)"
echo " • Auto-restart: Configured"
echo ""
echo -e "${BLUE}💡 Next steps:${NC}"
echo " • Monitor container health"
echo " • Check automatic updates are working"
echo " • Review logs periodically"
echo ""
echo -e "${GREEN}✅ All checks completed successfully!${NC}"
}
# Run main function
main "$@"

View File

@@ -0,0 +1,172 @@
#!/bin/bash
# Fix Watchtower Notification Issues (CORRECTED VERSION)
# This script ONLY fixes the HTTPS/HTTP notification protocol mismatch
# It does NOT touch Docker socket permissions (which are required for Watchtower to work)
set -e
echo "🔧 Watchtower Notification Fix Script"
echo "====================================="
echo "⚠️ This script ONLY fixes notification issues"
echo "⚠️ It does NOT change Docker socket permissions (those are required!)"
echo
# Check if running as root/sudo
if [[ $EUID -ne 0 ]]; then
echo "❌ This script must be run as root or with sudo"
exit 1
fi
# Check if watchtower container exists
if ! docker ps -a --format '{{.Names}}' | grep -q "^watchtower$"; then
echo "❌ Watchtower container not found"
exit 1
fi
echo "📋 Current Watchtower Status:"
echo "----------------------------"
echo "Container Status: $(docker ps --format '{{.Status}}' --filter name=watchtower)"
echo "Image: $(docker inspect watchtower | jq -r '.[0].Config.Image')"
echo
echo "🔍 Checking Notification Configuration:"
echo "--------------------------------------"
# Check current notification URL
CURRENT_NOTIFICATION=$(docker inspect watchtower | jq -r '.[0].Config.Env[] | select(contains("NOTIFICATION_URL")) // "Not found"')
echo "Current notification URL: $CURRENT_NOTIFICATION"
# Check recent logs for notification errors
echo
echo "📋 Recent Notification Errors:"
echo "------------------------------"
docker logs watchtower --since 24h 2>/dev/null | grep -i "notification\|ntfy" | tail -5 || echo "No recent notification logs found"
echo
echo "🔍 Issues Identified:"
echo "--------------------"
NEEDS_FIX=false
# Check for HTTPS/HTTP mismatch
if docker logs watchtower --since 24h 2>/dev/null | grep -q "http: server gave HTTP response to HTTPS client"; then
echo "⚠️ HTTPS/HTTP protocol mismatch detected"
echo " Current: https://192.168.0.210:8081/updates"
echo " Should be: http://192.168.0.210:8081/updates"
NEEDS_FIX=true
fi
# Check if notification URL is configured
if [[ "$CURRENT_NOTIFICATION" == "Not found" ]]; then
echo " No notification URL environment variable found"
echo " (URL might be configured via command line arguments)"
fi
echo
if [[ "$NEEDS_FIX" == "true" ]]; then
echo "🚨 NOTIFICATION ISSUE CONFIRMED"
echo "The notification system is trying to use HTTPS but the server expects HTTP"
echo
# Check if we're in a compose stack
NETWORK_NAME=$(docker inspect watchtower | jq -r '.[0].NetworkSettings.Networks | keys[0]')
if [[ "$NETWORK_NAME" == *"stack"* ]]; then
echo "📝 RECOMMENDED ACTION (Docker Compose Stack):"
echo "Since Watchtower is part of a Compose stack, you should:"
echo "1. Find and edit the docker-compose.yml file"
echo "2. Update the notification URL environment variable:"
echo " environment:"
echo " - WATCHTOWER_NOTIFICATION_URL=http://192.168.0.210:8081/updates"
echo "3. Recreate the stack:"
echo " docker-compose down && docker-compose up -d"
echo
echo "🔍 Looking for compose files..."
# Try to find the compose file
find /opt -name "*.yml" -o -name "*.yaml" 2>/dev/null | xargs grep -l "watchtower" 2>/dev/null | head -3 || echo "Compose files not found in /opt"
else
echo "🔧 AUTOMATIC FIX AVAILABLE"
echo "Would you like to fix the notification URL? (y/N)"
read -r response
if [[ "$response" =~ ^[Yy]$ ]]; then
echo "🔄 Stopping Watchtower..."
docker stop watchtower
echo "🗑️ Removing old container..."
docker rm watchtower
echo "🚀 Creating new Watchtower with corrected notification URL..."
docker run -d \
--name watchtower \
--restart unless-stopped \
-v /var/run/docker.sock:/var/run/docker.sock \
-e TZ=America/Los_Angeles \
-e WATCHTOWER_CLEANUP=true \
-e WATCHTOWER_HTTP_API_UPDATE=true \
-e WATCHTOWER_HTTP_API_TOKEN="REDACTED_HTTP_TOKEN" \
-e WATCHTOWER_HTTP_API_METRICS=true \
-e WATCHTOWER_SCHEDULE="0 0 4 * * *" \
-e WATCHTOWER_NOTIFICATION_URL=http://192.168.0.210:8081/updates \
-e WATCHTOWER_NOTIFICATIONS=shoutrrr \
-p 8091:8080 \
containrrr/watchtower:latest
echo "✅ Watchtower recreated with corrected notification URL"
echo "🔍 Verifying fix..."
sleep 3
if docker ps --format '{{.Names}}' | grep -q watchtower; then
echo "✅ Watchtower is running"
# Test notification
echo "🧪 Testing notification (this may take a moment)..."
curl -s -H "Authorization: Bearer watchtower-update-token" \
-X POST http://localhost:8091/v1/update >/dev/null 2>&1 || echo "API test completed"
sleep 2
if docker logs watchtower --since 30s 2>/dev/null | grep -q "HTTP response to HTTPS client"; then
echo "❌ Notification issue still present"
else
echo "✅ Notification issue appears to be resolved"
fi
else
echo "❌ Watchtower failed to start"
fi
else
echo "⏭️ Skipping automatic fix"
fi
fi
else
echo "✅ No notification issues detected"
fi
echo
echo "📊 Final Status Check:"
echo "---------------------"
if docker ps --format '{{.Names}}\t{{.Status}}' | grep watchtower; then
echo "✅ Watchtower is running"
echo
echo "🔧 How to manually trigger updates:"
echo "curl -H \"Authorization: Bearer watchtower-update-token\" \\"
echo " -X POST http://localhost:8091/v1/update"
else
echo "❌ Watchtower is not running"
fi
echo
echo "⚠️ IMPORTANT SECURITY NOTE:"
echo "This script does NOT change Docker socket permissions."
echo "Watchtower REQUIRES read-write access to the Docker socket to:"
echo "- Pull new images"
echo "- Stop and start containers"
echo "- Remove old containers and images"
echo "Making the socket read-only would BREAK Watchtower completely."
echo
echo "🔗 For more information, see:"
echo " docs/WATCHTOWER_SECURITY_ANALYSIS.md"
echo
echo "✅ Notification fix complete"

View File

@@ -0,0 +1,136 @@
#!/bin/bash
# Fix Watchtower Security Issue
# This script addresses the Docker socket read-write access security issue
set -e
echo "🔧 Watchtower Security Fix Script"
echo "================================="
echo
# Check if running as root/sudo
if [[ $EUID -ne 0 ]]; then
echo "❌ This script must be run as root or with sudo"
exit 1
fi
# Check if watchtower container exists
if ! docker ps -a --format '{{.Names}}' | grep -q "^watchtower$"; then
echo "❌ Watchtower container not found"
exit 1
fi
echo "📋 Current Watchtower Configuration:"
echo "-----------------------------------"
docker inspect watchtower | jq -r '.[] | {
Name: .Name,
Image: .Config.Image,
Status: .State.Status,
DockerSocket: (.Mounts[] | select(.Destination=="/var/run/docker.sock") | .Mode),
Schedule: (.Config.Env[] | select(contains("SCHEDULE")) // "Not set"),
ApiToken: (.Config.Env[] | select(contains("API_TOKEN")) // "Not set")
}'
echo
echo "🔍 Issues Identified:"
echo "--------------------"
# Check Docker socket access
SOCKET_MODE=$(docker inspect watchtower | jq -r '.[0].Mounts[] | select(.Destination=="/var/run/docker.sock") | .Mode')
if [[ "$SOCKET_MODE" != "ro" ]]; then
echo "⚠️ Docker socket has read-write access (should be read-only)"
NEEDS_FIX=true
fi
# Check if we're in a compose stack
NETWORK_NAME=$(docker inspect watchtower | jq -r '.[0].NetworkSettings.Networks | keys[0]')
if [[ "$NETWORK_NAME" == *"stack"* ]]; then
echo " Watchtower is part of a Docker Compose stack: $NETWORK_NAME"
COMPOSE_STACK=true
fi
echo
if [[ "$NEEDS_FIX" == "true" ]]; then
echo "🚨 SECURITY ISSUE CONFIRMED"
echo "Watchtower has read-write access to Docker socket"
echo "This is a security risk and should be fixed"
echo
if [[ "$COMPOSE_STACK" == "true" ]]; then
echo "📝 RECOMMENDED ACTION:"
echo "Since Watchtower is part of a Compose stack, you should:"
echo "1. Update the docker-compose.yml file"
echo "2. Change the Docker socket mount to read-only:"
echo " volumes:"
echo " - /var/run/docker.sock:/var/run/docker.sock:ro"
echo "3. Recreate the stack:"
echo " docker-compose down && docker-compose up -d"
echo
echo "🔍 Finding the compose file..."
# Try to find the compose file
COMPOSE_DIR="/opt/homelab"
if [[ -d "$COMPOSE_DIR" ]]; then
find "$COMPOSE_DIR" -name "*.yml" -o -name "*.yaml" | xargs grep -l "watchtower" 2>/dev/null | head -5
fi
echo
echo "⚠️ Manual fix required for Compose stack"
else
echo "🔧 AUTOMATIC FIX AVAILABLE"
echo "Would you like to automatically fix this issue? (y/N)"
read -r response
if [[ "$response" =~ ^[Yy]$ ]]; then
echo "🔄 Stopping Watchtower..."
docker stop watchtower
echo "🗑️ Removing old container..."
docker rm watchtower
echo "🚀 Creating new Watchtower with read-only Docker socket..."
docker run -d \
--name watchtower \
--restart unless-stopped \
-v /var/run/docker.sock:/var/run/docker.sock:ro \
-e TZ=America/Los_Angeles \
-e WATCHTOWER_CLEANUP=true \
-e WATCHTOWER_SCHEDULE="0 0 */2 * * *" \
-e WATCHTOWER_HTTP_API_TOKEN="REDACTED_HTTP_TOKEN" \
-e WATCHTOWER_HTTP_API_METRICS=true \
-p 8091:8080 \
containrrr/watchtower:latest
echo "✅ Watchtower recreated with read-only Docker socket access"
echo "🔍 Verifying fix..."
sleep 3
NEW_SOCKET_MODE=$(docker inspect watchtower | jq -r '.[0].Mounts[] | select(.Destination=="/var/run/docker.sock") | .Mode')
if [[ "$NEW_SOCKET_MODE" == "ro" ]]; then
echo "✅ SECURITY FIX CONFIRMED: Docker socket is now read-only"
else
echo "❌ Fix verification failed"
fi
else
echo "⏭️ Skipping automatic fix"
fi
fi
else
echo "✅ No security issues found"
fi
echo
echo "📊 Final Status Check:"
echo "---------------------"
if docker ps --format '{{.Names}}\t{{.Status}}' | grep watchtower; then
echo "✅ Watchtower is running"
else
echo "❌ Watchtower is not running"
fi
echo
echo "🔗 For more information, see:"
echo " docs/CONTAINER_DIAGNOSIS_REPORT.md"
echo
echo "✅ Security check complete"

View File

@@ -0,0 +1,93 @@
# a test script that generates a ton of users for debugging use
# note that you'll need to comment out the ratelimiter in delta/src/main.rs
# and keep the number relatively low or requests will time out (the beefier the machine the more you can handle).
# this script assumes mailhog is running, and uses that to automate "emails".
# In the real world, antispam will catch this and nuke you to hell and back.
# But it works fine in a dev env!
# requires httpx
import asyncio
import os
import re
import uuid
import httpx
API_URL: str = os.getenv("API_URL") # type: ignore
MAILHOG_API: str = os.getenv("MAILHOG_API") # type: ignore
COUNT = int(os.getenv("COUNT")) # type: ignore # cbf to deal with type checking
INVITE: str = os.getenv("INVITE") # type: ignore
assert API_URL and MAILHOG_API and COUNT and INVITE
API_URL = API_URL.strip("/")
MAILHOG_API = MAILHOG_API.strip("/")
async def filter_hog(client: httpx.AsyncClient, email: str) -> str:
"""
returns the token provided by the mail server.
This script assumes the use of mailhog.
"""
resp = await client.get(MAILHOG_API + "/api/v2/search", params={"kind": "to", "query": email}, follow_redirects=True, timeout=60)
if resp.status_code != 200:
raise Exception(resp.status_code, resp.content)
data = resp.json()
if not data["items"]:
raise Exception("No message found")
message_id = data["items"][0]["ID"]
body = data["items"][0]["Content"]["Body"].replace("\r", "")
token = re.search("/login/verify(=\n/|/\n=|/=\n)(?P<token>[^\n]+)", body, re.MULTILINE)
if not token:
raise Exception("No token found")
ret = token.group("token")
await client.delete(MAILHOG_API + f"/api/v1/messages/{message_id}", timeout=60)
return ret
async def task() -> None:
_id = str(uuid.uuid4())[:4]
email = f"{_id}@example.com"
async with httpx.AsyncClient() as client:
resp = await client.post(API_URL + "/auth/account/create", json={"email": email, "password": _id*3, "invite": INVITE}, timeout=60)
if resp.status_code != 204:
raise Exception(resp.status_code, resp.content)
token = await filter_hog(client, email)
resp = await client.post(API_URL + f"/auth/account/verify/{token}", timeout=60)
if resp.status_code != 200:
raise Exception("verify", resp.status_code, resp.content)
ticket = resp.json()["ticket"]
userid = ticket["_id"]
resp = await client.post(API_URL + "/auth/session/login", json={"email": email, "password": _id*3, "friendly_name": "Not A Client"}, timeout=60)
if resp.status_code != 200:
raise Exception("session", resp.status_code, resp.content)
session = resp.json()
token = session["token"]
resp = await client.post(API_URL + "/onboard/complete", json={"username": _id}, headers={"x-session-token": token}, timeout=60) # complete onboarding to allow creating a session
if resp.status_code != 200:
raise Exception("onboard", resp.status_code, resp.content)
resp = await client.post(API_URL + f"/invites/{INVITE}", headers={"x-session-token": token}, timeout=60)
if resp.status_code != 200:
raise Exception("invite", resp.status_code, resp.content)
print(f"Created account and session for {email} with ID: {userid}")
return userid
async def main():
tasks = [asyncio.create_task(task()) for _ in range(COUNT)]
print(await asyncio.gather(*tasks))
asyncio.run(main())

View File

@@ -0,0 +1,928 @@
#!/usr/bin/env python3
"""
Generate comprehensive documentation for all homelab services.
This script analyzes Docker Compose files and creates detailed documentation for each service.
"""
import os
import yaml
import re
from pathlib import Path
from typing import Dict, List, Any, Optional
class ServiceDocumentationGenerator:
def __init__(self, repo_path: str):
self.repo_path = Path(repo_path)
self.docs_path = self.repo_path / "docs" / "services" / "individual"
self.docs_path.mkdir(parents=True, exist_ok=True)
# Service categories for better organization
self.categories = {
'media': ['plex', 'jellyfin', 'emby', 'tautulli', 'overseerr', 'ombi', 'radarr', 'sonarr', 'lidarr', 'readarr', 'bazarr', 'prowlarr', 'jackett', 'nzbget', 'sabnzbd', 'transmission', 'qbittorrent', 'deluge', 'immich', 'photoprism', 'navidrome', 'airsonic', 'calibre', 'komga', 'kavita'],
'monitoring': ['grafana', 'prometheus', 'uptime-kuma', 'uptimerobot', 'statping', 'healthchecks', 'netdata', 'zabbix', 'nagios', 'icinga', 'librenms', 'observium', 'cacti', 'ntopng', 'bandwidthd', 'darkstat', 'vnstat', 'smokeping', 'blackbox-exporter', 'node-exporter', 'cadvisor', 'exportarr'],
'productivity': ['nextcloud', 'owncloud', 'seafile', 'syncthing', 'filebrowser', 'paperless-ngx', 'paperless', 'docspell', 'teedy', 'bookstack', 'dokuwiki', 'tiddlywiki', 'outline', 'siyuan', 'logseq', 'obsidian', 'joplin', 'standardnotes', 'trilium', 'zettlr', 'typora', 'marktext', 'ghostwriter', 'remarkable', 'xournalpp', 'rnote', 'firefly-iii', 'actual-budget', 'budget-zen', 'maybe-finance', 'kresus', 'homebank', 'gnucash', 'ledger', 'beancount', 'plaintextaccounting'],
'development': ['gitea', 'gitlab', 'github', 'bitbucket', 'sourcehut', 'forgejo', 'cgit', 'gitweb', 'jenkins', 'drone', 'woodpecker', 'buildkite', 'teamcity', 'bamboo', 'travis', 'circleci', 'github-actions', 'gitlab-ci', 'azure-devops', 'aws-codebuild', 'portainer', 'yacht', 'dockge', 'lazydocker', 'ctop', 'dive', 'docker-compose-ui', 'docker-registry', 'harbor', 'quay', 'nexus', 'artifactory', 'verdaccio', 'npm-registry'],
'communication': ['matrix-synapse', 'element', 'riot', 'nheko', 'fluffychat', 'cinny', 'hydrogen', 'schildichat', 'mattermost', 'rocket-chat', 'zulip', 'slack', 'discord', 'telegram', 'signal', 'whatsapp', 'messenger', 'skype', 'zoom', 'jitsi', 'bigbluebutton', 'jami', 'briar', 'session', 'wickr', 'threema', 'wire', 'keybase', 'mastodon', 'pleroma', 'misskey', 'diaspora', 'friendica', 'hubzilla', 'peertube', 'pixelfed', 'lemmy', 'kbin'],
'security': ['vaultwarden', 'bitwarden', 'keepass', 'passbolt', 'psono', 'teampass', 'pleasant-password', 'authelia', 'authentik', 'keycloak', 'gluu', 'freeipa', 'openldap', 'active-directory', 'okta', 'auth0', 'firebase-auth', 'aws-cognito', 'azure-ad', 'google-identity', 'pihole', 'adguard', 'blocky', 'unbound', 'bind9', 'powerdns', 'coredns', 'technitium', 'wireguard', 'openvpn', 'ipsec', 'tinc', 'zerotier', 'tailscale', 'nebula', 'headscale'],
'networking': ['nginx', 'apache', 'caddy', 'traefik', 'haproxy', 'envoy', 'istio', 'linkerd', 'consul', 'vault', 'nomad', 'pfsense', 'opnsense', 'vyos', 'mikrotik', 'ubiquiti', 'tp-link', 'netgear', 'asus', 'linksys', 'dlink', 'zyxel', 'fortinet', 'sonicwall', 'watchguard', 'palo-alto', 'checkpoint', 'juniper', 'cisco', 'arista', 'cumulus', 'sonic', 'frr', 'quagga', 'bird', 'openbgpd'],
'storage': ['minio', 's3', 'ceph', 'glusterfs', 'moosefs', 'lizardfs', 'orangefs', 'lustre', 'beegfs', 'gpfs', 'hdfs', 'cassandra', 'mongodb', 'postgresql', 'mysql', 'mariadb', 'sqlite', 'redis', 'memcached', 'elasticsearch', 'solr', 'sphinx', 'whoosh', 'xapian', 'lucene', 'influxdb', 'prometheus', 'graphite', 'opentsdb', 'kairosdb', 'druid', 'clickhouse', 'timescaledb'],
'gaming': ['minecraft', 'factorio', 'satisfactory', 'valheim', 'terraria', 'starbound', 'dont-starve', 'project-zomboid', 'rust', 'ark', 'conan-exiles', 'space-engineers', 'astroneer', 'raft', 'green-hell', 'the-forest', 'subnautica', 'no-mans-sky', 'elite-dangerous', 'star-citizen', 'eve-online', 'world-of-warcraft', 'final-fantasy-xiv', 'guild-wars-2', 'elder-scrolls-online', 'destiny-2', 'warframe', 'path-of-exile', 'diablo', 'torchlight', 'grim-dawn', 'last-epoch'],
'ai': ['ollama', 'llamagpt', 'chatgpt', 'gpt4all', 'localai', 'text-generation-webui', 'koboldai', 'novelai', 'stable-diffusion', 'automatic1111', 'invokeai', 'comfyui', 'fooocus', 'easydiffusion', 'diffusionbee', 'draw-things', 'whisper', 'faster-whisper', 'vosk', 'deepspeech', 'wav2vec', 'espnet', 'kaldi', 'julius', 'pocketsphinx', 'festival', 'espeak', 'mary-tts', 'mimic', 'tacotron', 'wavenet', 'neural-voices']
}
def find_compose_files(self) -> List[Path]:
"""Find all YAML files that contain Docker Compose configurations."""
compose_files = []
# Find all YAML files
yaml_files = list(self.repo_path.rglob('*.yml')) + list(self.repo_path.rglob('*.yaml'))
# Filter out files in docs, .git, and other non-service directories
filtered_files = []
for file in yaml_files:
path_parts = file.parts
if any(part in path_parts for part in ['.git', 'docs', 'node_modules', '.vscode', '__pycache__', 'ansible']):
continue
# Check if file contains Docker Compose configuration
try:
with open(file, 'r', encoding='utf-8') as f:
content = f.read()
# Look for Docker Compose indicators
if ('services:' in content and
('version:' in content or 'image:' in content or 'build:' in content)):
filtered_files.append(file)
except Exception as e:
print(f"Warning: Could not read {file}: {e}")
continue
return sorted(filtered_files)
def parse_compose_file(self, compose_file: Path) -> Dict[str, Any]:
"""Parse a docker-compose file and extract service information."""
try:
with open(compose_file, 'r', encoding='utf-8') as f:
content = yaml.safe_load(f)
if not content or 'services' not in content:
return {}
# Extract metadata from file path
relative_path = compose_file.relative_to(self.repo_path)
host = relative_path.parts[0] if len(relative_path.parts) > 1 else 'unknown'
services_info = {}
for service_name, service_config in content['services'].items():
services_info[service_name] = {
'config': service_config,
'host': host,
'compose_file': str(relative_path),
'directory': str(compose_file.parent.relative_to(self.repo_path))
}
return services_info
except Exception as e:
print(f"Error parsing {compose_file}: {e}")
return {}
def categorize_service(self, service_name: str, image: str = '') -> str:
"""Categorize a service based on its name and image."""
service_lower = service_name.lower().replace('-', '').replace('_', '')
image_lower = image.lower() if image else ''
for category, keywords in self.categories.items():
for keyword in keywords:
keyword_clean = keyword.replace('-', '').replace('_', '')
if keyword_clean in service_lower or keyword_clean in image_lower:
return category
return 'other'
def extract_ports(self, service_config: Dict) -> List[str]:
"""Extract port mappings from service configuration."""
ports = []
if 'ports' in service_config:
for port in service_config['ports']:
if isinstance(port, str):
ports.append(port)
elif isinstance(port, dict):
target = port.get('target', '')
published = port.get('published', '')
if target and published:
ports.append(f"{published}:{target}")
return ports
def extract_volumes(self, service_config: Dict) -> List[str]:
"""Extract volume mappings from service configuration."""
volumes = []
if 'volumes' in service_config:
for volume in service_config['volumes']:
if isinstance(volume, str):
volumes.append(volume)
elif isinstance(volume, dict):
source = volume.get('source', '')
target = volume.get('target', '')
if source and target:
volumes.append(f"{source}:{target}")
return volumes
def extract_environment(self, service_config: Dict) -> Dict[str, str]:
"""Extract environment variables from service configuration."""
env_vars = {}
if 'environment' in service_config:
env = service_config['environment']
if isinstance(env, list):
for var in env:
if '=' in var:
key, value = var.split('=', 1)
env_vars[key] = value
elif isinstance(env, dict):
env_vars = env
return env_vars
def get_difficulty_level(self, service_name: str, service_config: Dict) -> str:
"""Determine difficulty level based on service complexity."""
# Advanced services (require significant expertise)
advanced_keywords = [
'gitlab', 'jenkins', 'kubernetes', 'consul', 'vault', 'nomad',
'elasticsearch', 'cassandra', 'mongodb-cluster', 'postgresql-cluster',
'matrix-synapse', 'mastodon', 'peertube', 'nextcloud', 'keycloak',
'authentik', 'authelia', 'traefik', 'istio', 'linkerd'
]
# Intermediate services (require basic Docker/Linux knowledge)
intermediate_keywords = [
'grafana', 'prometheus', 'nginx', 'caddy', 'haproxy', 'wireguard',
'openvpn', 'pihole', 'adguard', 'vaultwarden', 'bitwarden',
'paperless', 'bookstack', 'dokuwiki', 'mattermost', 'rocket-chat',
'portainer', 'yacht', 'immich', 'photoprism', 'jellyfin', 'emby'
]
service_lower = service_name.lower()
image = service_config.get('image', '').lower()
# Check for advanced complexity indicators
has_depends_on = 'depends_on' in service_config
has_multiple_volumes = len(service_config.get('volumes', [])) > 3
has_complex_networking = 'networks' in service_config and len(service_config['networks']) > 1
has_build_config = 'build' in service_config
if any(keyword in service_lower or keyword in image for keyword in advanced_keywords):
return '🔴'
elif (any(keyword in service_lower or keyword in image for keyword in intermediate_keywords) or
has_depends_on or has_multiple_volumes or has_complex_networking or has_build_config):
return '🟡'
else:
return '🟢'
def generate_service_documentation(self, service_name: str, service_info: Dict) -> str:
"""Generate comprehensive documentation for a single service."""
config = service_info['config']
host = service_info['host']
compose_file = service_info['compose_file']
directory = service_info['directory']
# Extract key information
image = config.get('image', 'Unknown')
ports = self.extract_ports(config)
volumes = self.extract_volumes(config)
env_vars = self.extract_environment(config)
category = self.categorize_service(service_name, image)
difficulty = self.get_difficulty_level(service_name, config)
# Generate documentation content
doc_content = f"""# {service_name.title().replace('-', ' ').replace('_', ' ')}
**{difficulty} {category.title()} Service**
## 📋 Service Overview
| Property | Value |
|----------|-------|
| **Service Name** | {service_name} |
| **Host** | {host} |
| **Category** | {category.title()} |
| **Difficulty** | {difficulty} |
| **Docker Image** | `{image}` |
| **Compose File** | `{compose_file}` |
| **Directory** | `{directory}` |
## 🎯 Purpose
{self.get_service_description(service_name, image, category)}
## 🚀 Quick Start
### Prerequisites
- Docker and Docker Compose installed
- Basic understanding of REDACTED_APP_PASSWORD
- Access to the host system ({host})
### Deployment
```bash
# Navigate to service directory
cd {directory}
# Start the service
docker-compose up -d
# Check service status
docker-compose ps
# View logs
docker-compose logs -f {service_name}
```
## 🔧 Configuration
### Docker Compose Configuration
```yaml
{self.format_compose_config(config)}
```
### Environment Variables
{self.format_environment_variables(env_vars)}
### Port Mappings
{self.format_ports(ports)}
### Volume Mappings
{self.format_volumes(volumes)}
## 🌐 Access Information
{self.generate_access_info(service_name, ports, host)}
## 🔒 Security Considerations
{self.generate_security_info(service_name, config)}
## 📊 Resource Requirements
{self.generate_resource_info(config)}
## 🔍 Health Monitoring
{self.generate_health_info(config)}
## 🚨 Troubleshooting
### Common Issues
{self.generate_troubleshooting_info(service_name, category)}
### Useful Commands
```bash
# Check service status
docker-compose ps
# View real-time logs
docker-compose logs -f {service_name}
# Restart service
docker-compose restart {service_name}
# Update service
docker-compose pull {service_name}
docker-compose up -d {service_name}
# Access service shell
docker-compose exec {service_name} /bin/bash
# or
docker-compose exec {service_name} /bin/sh
```
## 📚 Additional Resources
{self.generate_additional_resources(service_name, image)}
## 🔗 Related Services
{self.generate_related_services(service_name, category, host)}
---
*This documentation is auto-generated from the Docker Compose configuration. For the most up-to-date information, refer to the official documentation and the actual compose file.*
**Last Updated**: {self.get_current_date()}
**Configuration Source**: `{compose_file}`
"""
return doc_content
def get_service_description(self, service_name: str, image: str, category: str) -> str:
"""Generate a description for the service based on its name and category."""
descriptions = {
'plex': 'Plex Media Server organizes video, music and photos from personal media libraries and streams them to smart TVs, streaming boxes and mobile devices.',
'jellyfin': 'Jellyfin is a Free Software Media System that puts you in control of managing and streaming your media.',
'grafana': 'Grafana is the open source analytics & monitoring solution for every database.',
'prometheus': 'Prometheus is an open-source systems monitoring and alerting toolkit.',
'uptime-kuma': 'Uptime Kuma is a fancy self-hosted monitoring tool.',
'nginx': 'NGINX is a web server that can also be used as a reverse proxy, load balancer, mail proxy and HTTP cache.',
'traefik': 'Traefik is a modern HTTP reverse proxy and load balancer that makes deploying microservices easy.',
'portainer': 'Portainer is a lightweight management UI which allows you to easily manage your different Docker environments.',
'vaultwarden': 'Vaultwarden is an alternative implementation of the Bitwarden server API written in Rust and compatible with upstream Bitwarden clients.',
'pihole': 'Pi-hole is a DNS sinkhole that protects your devices from unwanted content, without installing any client-side software.',
'adguard': 'AdGuard Home is a network-wide software for blocking ads & tracking.',
'wireguard': 'WireGuard is an extremely simple yet fast and modern VPN that utilizes state-of-the-art cryptography.',
'nextcloud': 'Nextcloud is a suite of client-server software for creating and using file hosting services.',
'immich': 'High performance self-hosted photo and video backup solution.',
'paperless-ngx': 'Paperless-ngx is a document management system that transforms your physical documents into a searchable online archive.',
'gitea': 'Gitea is a community managed lightweight code hosting solution written in Go.',
'gitlab': 'GitLab is a web-based DevOps lifecycle tool that provides a Git-repository manager.',
'mattermost': 'Mattermost is an open-source, self-hostable online chat service with file sharing, search, and integrations.',
'matrix-synapse': 'Matrix Synapse is a reference homeserver implementation of the Matrix protocol.',
'mastodon': 'Mastodon is a free and open-source self-hosted social networking service.',
'minecraft': 'Minecraft server for multiplayer gaming.',
'factorio': 'Factorio dedicated server for multiplayer factory building.',
'satisfactory': 'Satisfactory dedicated server for multiplayer factory building in 3D.',
'ollama': 'Ollama is a tool for running large language models locally.',
'whisper': 'OpenAI Whisper is an automatic speech recognition system.',
'stable-diffusion': 'Stable Diffusion is a deep learning, text-to-image model.',
}
service_key = service_name.lower().replace('-', '').replace('_', '')
# Try exact match first
if service_key in descriptions:
return descriptions[service_key]
# Try partial matches
for key, desc in descriptions.items():
if key in service_key or service_key in key:
return desc
# Generate generic description based on category
category_descriptions = {
'media': f'{service_name} is a media management and streaming service that helps organize and serve your digital media content.',
'monitoring': f'{service_name} is a monitoring and observability tool that helps track system performance and health.',
'productivity': f'{service_name} is a productivity application that helps manage tasks, documents, or workflows.',
'development': f'{service_name} is a development tool that assists with code management, CI/CD, or software development workflows.',
'communication': f'{service_name} is a communication platform that enables messaging, collaboration, or social interaction.',
'security': f'{service_name} is a security tool that helps protect systems, manage authentication, or secure communications.',
'networking': f'{service_name} is a networking service that manages network traffic, routing, or connectivity.',
'storage': f'{service_name} is a storage solution that manages data persistence, backup, or file sharing.',
'gaming': f'{service_name} is a gaming server that hosts multiplayer games or gaming-related services.',
'ai': f'{service_name} is an AI/ML service that provides artificial intelligence or machine learning capabilities.',
'other': f'{service_name} is a specialized service that provides specific functionality for the homelab infrastructure.'
}
return category_descriptions.get(category, category_descriptions['other'])
def format_compose_config(self, config: Dict) -> str:
"""Format the Docker Compose configuration for display."""
try:
import yaml
return yaml.dump(config, default_flow_style=False, indent=2)
except:
return str(config)
def format_environment_variables(self, env_vars: Dict[str, str]) -> str:
"""Format environment variables for display."""
if not env_vars:
return "No environment variables configured."
result = "| Variable | Value | Description |\n|----------|-------|-------------|\n"
for key, value in env_vars.items():
# Mask sensitive values
display_value = value
if any(sensitive in key.lower() for sensitive in ['password', 'secret', 'key', 'token']):
display_value = '***MASKED***'
result += f"| `{key}` | `{display_value}` | {self.get_env_var_description(key)} |\n"
return result
def get_env_var_description(self, var_name: str) -> str:
"""Get description for common environment variables."""
descriptions = {
'TZ': 'Timezone setting',
'PUID': 'User ID for file permissions',
'PGID': 'Group ID for file permissions',
'MYSQL_ROOT_PASSWORD': 'MySQL root password',
'POSTGRES_PASSWORD': 'PostgreSQL password',
'REDIS_PASSWORD': 'Redis authentication password',
'ADMIN_PASSWORD': 'Administrator password',
'SECRET_KEY': 'Application secret key',
'JWT_SECRET': 'JWT signing secret',
'DATABASE_URL': 'Database connection string',
'DOMAIN': 'Service domain name',
'BASE_URL': 'Base URL for the service',
'DEBUG': 'Enable debug mode',
'LOG_LEVEL': 'Logging verbosity level'
}
var_lower = var_name.lower()
for key, desc in descriptions.items():
if key.lower() in var_lower:
return desc
return 'Configuration variable'
def format_ports(self, ports: List[str]) -> str:
"""Format port mappings for display."""
if not ports:
return "No ports exposed."
result = "| Host Port | Container Port | Protocol | Purpose |\n|-----------|----------------|----------|----------|\n"
for port in ports:
if ':' in port:
host_port, container_port = port.split(':', 1)
protocol = 'TCP'
if '/' in container_port:
container_port, protocol = container_port.split('/')
purpose = self.get_port_purpose(container_port)
result += f"| {host_port} | {container_port} | {protocol.upper()} | {purpose} |\n"
else:
result += f"| {port} | {port} | TCP | Service port |\n"
return result
def get_port_purpose(self, port: str) -> str:
"""Get the purpose of common ports."""
port_purposes = {
'80': 'HTTP web interface',
'443': 'HTTPS web interface',
'8080': 'Alternative HTTP port',
'8443': 'Alternative HTTPS port',
'3000': 'Web interface',
'9000': 'Management interface',
'5432': 'PostgreSQL database',
'3306': 'MySQL/MariaDB database',
'6379': 'Redis cache',
'27017': 'MongoDB database',
'9090': 'Prometheus metrics',
'3001': 'Monitoring interface',
'8086': 'InfluxDB',
'25565': 'Minecraft server',
'7777': 'Game server',
'22': 'SSH access',
'21': 'FTP',
'53': 'DNS',
'67': 'DHCP',
'123': 'NTP',
'161': 'SNMP',
'514': 'Syslog',
'1883': 'MQTT',
'8883': 'MQTT over SSL'
}
return port_purposes.get(port, 'Service port')
def format_volumes(self, volumes: List[str]) -> str:
"""Format volume mappings for display."""
if not volumes:
return "No volumes mounted."
result = "| Host Path | Container Path | Type | Purpose |\n|-----------|----------------|------|----------|\n"
for volume in volumes:
if ':' in volume:
parts = volume.split(':')
host_path = parts[0]
container_path = parts[1]
volume_type = 'bind' if host_path.startswith('/') or host_path.startswith('./') else 'volume'
purpose = self.get_volume_purpose(container_path)
result += f"| `{host_path}` | `{container_path}` | {volume_type} | {purpose} |\n"
else:
result += f"| `{volume}` | `{volume}` | volume | Data storage |\n"
return result
def get_volume_purpose(self, path: str) -> str:
"""Get the purpose of common volume paths."""
path_purposes = {
'/config': 'Configuration files',
'/data': 'Application data',
'/app/data': 'Application data',
'/var/lib': 'Service data',
'/etc': 'Configuration files',
'/logs': 'Log files',
'/var/log': 'System logs',
'/media': 'Media files',
'/downloads': 'Downloaded files',
'/uploads': 'Uploaded files',
'/backup': 'Backup files',
'/tmp': 'Temporary files',
'/cache': 'Cache data',
'/db': 'Database files',
'/ssl': 'SSL certificates',
'/keys': 'Encryption keys'
}
path_lower = path.lower()
for key, purpose in path_purposes.items():
if key in path_lower:
return purpose
return 'Data storage'
def generate_access_info(self, service_name: str, ports: List[str], host: str) -> str:
"""Generate access information for the service."""
if not ports:
return "This service does not expose any web interfaces."
web_ports = []
for port in ports:
if ':' in port:
host_port = port.split(':')[0]
container_port = port.split(':')[1].split('/')[0]
if container_port in ['80', '443', '8080', '8443', '3000', '9000', '8000', '5000']:
web_ports.append(host_port)
if not web_ports:
return f"Service ports: {', '.join(ports)}"
result = "### Web Interface\n"
for port in web_ports:
protocol = 'https' if port in ['443', '8443'] else 'http'
result += f"- **{protocol.upper()}**: `{protocol}://{host}:{port}`\n"
result += "\n### Default Credentials\n"
result += self.get_default_credentials(service_name)
return result
def get_default_credentials(self, service_name: str) -> str:
"""Get default credentials for common services."""
credentials = {
'grafana': 'Username: `admin`, Password: "REDACTED_PASSWORD" (change on first login)',
'portainer': 'Set admin password on first access',
'jenkins': 'Check logs for initial admin password',
'gitlab': 'Username: `root`, Password: "REDACTED_PASSWORD" `/etc/gitlab/initial_root_password`',
'nextcloud': 'Set admin credentials during initial setup',
'mattermost': 'Create admin account during setup',
'mastodon': 'Create admin account via command line',
'matrix-synapse': 'Create users via command line or admin API',
'uptime-kuma': 'Set admin credentials on first access',
'vaultwarden': 'Create account on first access',
'paperless-ngx': 'Create superuser via management command'
}
service_key = service_name.lower().replace('-', '').replace('_', '')
for key, creds in credentials.items():
if key in service_key or service_key in key:
return creds
return 'Refer to service documentation for default credentials'
def generate_security_info(self, service_name: str, config: Dict) -> str:
"""Generate security information for the service."""
security_info = []
# Check for security options
if 'security_opt' in config:
security_info.append("✅ Security options configured")
else:
security_info.append("⚠️ Consider adding security options (no-new-privileges)")
# Check for user mapping
if 'user' in config:
security_info.append("✅ Non-root user configured")
else:
security_info.append("⚠️ Consider running as non-root user")
# Check for read-only root filesystem
if config.get('read_only', False):
security_info.append("✅ Read-only root filesystem")
# Check for capabilities
if 'cap_drop' in config:
security_info.append("✅ Capabilities dropped")
# Add service-specific security recommendations
service_security = self.get_service_security_recommendations(service_name)
if service_security:
security_info.extend(service_security)
return '\n'.join(f"- {info}" for info in security_info)
def get_service_security_recommendations(self, service_name: str) -> List[str]:
"""Get security recommendations for specific services."""
recommendations = {
'vaultwarden': [
'🔒 Enable HTTPS with reverse proxy',
'🔒 Disable user registration after setup',
'🔒 Enable 2FA for all accounts',
'🔒 Regular database backups'
],
'nextcloud': [
'🔒 Enable HTTPS',
'🔒 Configure trusted domains',
'🔒 Enable 2FA',
'🔒 Regular security updates'
],
'gitlab': [
'🔒 Enable HTTPS',
'🔒 Configure SSH keys',
'🔒 Enable 2FA',
'🔒 Regular backups'
],
'matrix-synapse': [
'🔒 Enable HTTPS',
'🔒 Configure federation carefully',
'🔒 Regular database backups',
'🔒 Monitor resource usage'
]
}
service_key = service_name.lower().replace('-', '').replace('_', '')
for key, recs in recommendations.items():
if key in service_key or service_key in key:
return recs
return []
def generate_resource_info(self, config: Dict) -> str:
"""Generate resource requirement information."""
resource_info = []
# Check for resource limits
deploy_config = config.get('deploy', {})
resources = deploy_config.get('resources', {})
limits = resources.get('limits', {})
if limits:
if 'memory' in limits:
resource_info.append(f"**Memory Limit**: {limits['memory']}")
if 'cpus' in limits:
resource_info.append(f"**CPU Limit**: {limits['cpus']}")
else:
resource_info.append("No resource limits configured")
# Add general recommendations
resource_info.extend([
"",
"### Recommended Resources",
"- **Minimum RAM**: 512MB",
"- **Recommended RAM**: 1GB+",
"- **CPU**: 1 core minimum",
"- **Storage**: Varies by usage",
"",
"### Resource Monitoring",
"Monitor resource usage with:",
"```bash",
"docker stats",
"```"
])
return '\n'.join(resource_info)
def generate_health_info(self, config: Dict) -> str:
"""Generate health monitoring information."""
health_info = []
# Check for health check configuration
if 'healthcheck' in config:
health_config = config['healthcheck']
health_info.append("✅ Health check configured")
if 'test' in health_config:
test_cmd = health_config['test']
if isinstance(test_cmd, list):
test_cmd = ' '.join(test_cmd)
health_info.append(f"**Test Command**: `{test_cmd}`")
if 'interval' in health_config:
health_info.append(f"**Check Interval**: {health_config['interval']}")
if 'timeout' in health_config:
health_info.append(f"**Timeout**: {health_config['timeout']}")
if 'retries' in health_config:
health_info.append(f"**Retries**: {health_config['retries']}")
else:
health_info.append("⚠️ No health check configured")
health_info.append("Consider adding a health check:")
health_info.append("```yaml")
health_info.append("healthcheck:")
health_info.append(" test: [\"CMD\", \"curl\", \"-f\", \"http://localhost:PORT/health\"]")
health_info.append(" interval: 30s")
health_info.append(" timeout: 10s")
health_info.append(" retries: 3")
health_info.append("```")
# Add monitoring commands
health_info.extend([
"",
"### Manual Health Checks",
"```bash",
"# Check container health",
"docker inspect --format='{{.State.Health.Status}}' CONTAINER_NAME",
"",
"# View health check logs",
"docker inspect --format='{{range .State.Health.Log}}{{.Output}}{{end}}' CONTAINER_NAME",
"```"
])
return '\n'.join(health_info)
def generate_troubleshooting_info(self, service_name: str, category: str) -> str:
"""Generate troubleshooting information."""
common_issues = [
"**Service won't start**",
"- Check Docker logs: `docker-compose logs service-name`",
"- Verify port availability: `netstat -tulpn | grep PORT`",
"- Check file permissions on mounted volumes",
"",
"**Can't access web interface**",
"- Verify service is running: `docker-compose ps`",
"- Check firewall settings",
"- Confirm correct port mapping",
"",
"**Performance issues**",
"- Monitor resource usage: `docker stats`",
"- Check available disk space: `df -h`",
"- Review service logs for errors"
]
# Add category-specific troubleshooting
category_issues = {
'media': [
"",
"**Media not showing**",
"- Check media file permissions",
"- Verify volume mounts are correct",
"- Scan media library manually"
],
'monitoring': [
"",
"**Metrics not collecting**",
"- Check target endpoints are accessible",
"- Verify configuration syntax",
"- Check network connectivity"
],
'security': [
"",
"**Authentication issues**",
"- Verify credentials are correct",
"- Check LDAP/SSO configuration",
"- Review authentication logs"
]
}
issues = common_issues.copy()
if category in category_issues:
issues.extend(category_issues[category])
return '\n'.join(issues)
def generate_additional_resources(self, service_name: str, image: str) -> str:
"""Generate additional resources section."""
resources = [
f"- **Official Documentation**: Check the official docs for {service_name}",
f"- **Docker Hub**: [{image}](https://hub.docker.com/r/{image})" if '/' in image else f"- **Docker Hub**: [Official {service_name}](https://hub.docker.com/_/{image})",
"- **Community Forums**: Search for community discussions and solutions",
"- **GitHub Issues**: Check the project's GitHub for known issues"
]
# Add service-specific resources
service_resources = {
'plex': [
"- **Plex Support**: https://support.plex.tv/",
"- **Plex Forums**: https://forums.plex.tv/"
],
'jellyfin': [
"- **Jellyfin Documentation**: https://jellyfin.org/docs/",
"- **Jellyfin Forum**: https://forum.jellyfin.org/"
],
'grafana': [
"- **Grafana Documentation**: https://grafana.com/docs/",
"- **Grafana Community**: https://community.grafana.com/"
],
'nextcloud': [
"- **Nextcloud Documentation**: https://docs.nextcloud.com/",
"- **Nextcloud Community**: https://help.nextcloud.com/"
]
}
service_key = service_name.lower().replace('-', '').replace('_', '')
for key, additional in service_resources.items():
if key in service_key or service_key in key:
resources.extend(additional)
break
return '\n'.join(resources)
def generate_related_services(self, service_name: str, category: str, host: str) -> str:
"""Generate related services information."""
# This would be populated with actual service relationships
# For now, provide category-based suggestions
category_related = {
'media': ['Plex', 'Jellyfin', 'Radarr', 'Sonarr', 'Bazarr', 'Tautulli'],
'monitoring': ['Grafana', 'Prometheus', 'Uptime Kuma', 'Node Exporter'],
'productivity': ['Nextcloud', 'Paperless-NGX', 'BookStack', 'Syncthing'],
'security': ['Vaultwarden', 'Authelia', 'Pi-hole', 'WireGuard'],
'development': ['GitLab', 'Gitea', 'Jenkins', 'Portainer']
}
related = category_related.get(category, [])
if not related:
return f"Other services in the {category} category on {host}"
return f"Services REDACTED_APP_PASSWORD {service_name}:\n" + '\n'.join(f"- {service}" for service in related[:5])
def get_current_date(self) -> str:
"""Get current date for documentation."""
from datetime import datetime
return datetime.now().strftime("%Y-%m-%d")
def generate_all_documentation(self):
"""Generate documentation for all services."""
print("🔍 Finding Docker Compose files...")
compose_files = self.find_compose_files()
print(f"Found {len(compose_files)} compose files")
all_services = {}
print("📋 Parsing service configurations...")
for compose_file in compose_files:
services = self.parse_compose_file(compose_file)
all_services.update(services)
print(f"Found {len(all_services)} total services")
print("📝 Generating individual service documentation...")
for service_name, service_info in all_services.items():
print(f" Documenting {service_name}...")
# Generate documentation content
doc_content = self.generate_service_documentation(service_name, service_info)
# Write to file
doc_filename = f"{service_name.lower().replace('_', '-')}.md"
doc_path = self.docs_path / doc_filename
with open(doc_path, 'w', encoding='utf-8') as f:
f.write(doc_content)
print(f"✅ Generated documentation for {len(all_services)} services")
print(f"📁 Documentation saved to: {self.docs_path}")
# Generate index file
self.generate_service_index(all_services)
return len(all_services)
def generate_service_index(self, all_services: Dict):
"""Generate an index file for all services."""
index_content = f"""# 📚 Individual Service Documentation Index
This directory contains detailed documentation for all {len(all_services)} services in the homelab.
## 📋 Services by Category
"""
# Group services by category
services_by_category = {}
for service_name, service_info in all_services.items():
config = service_info['config']
image = config.get('image', '')
category = self.categorize_service(service_name, image)
if category not in services_by_category:
services_by_category[category] = []
services_by_category[category].append({
'name': service_name,
'host': service_info['host'],
'difficulty': self.get_difficulty_level(service_name, config)
})
# Generate category sections
for category in sorted(services_by_category.keys()):
services = sorted(services_by_category[category], key=lambda x: x['name'])
index_content += f"### {category.title()} ({len(services)} services)\n\n"
for service in services:
filename = f"{service['name'].lower().replace('_', '-')}.md"
index_content += f"- {service['difficulty']} **[{service['name']}]({filename})** - {service['host']}\n"
index_content += "\n"
index_content += f"""
## 📊 Statistics
- **Total Services**: {len(all_services)}
- **Categories**: {len(services_by_category)}
- **Hosts**: {len(set(s['host'] for s in all_services.values()))}
## 🔍 Quick Search
Use your browser's search function (Ctrl+F / Cmd+F) to quickly find specific services.
---
*This index is auto-generated. Last updated: {self.get_current_date()}*
"""
# Write index file
index_path = self.docs_path / "README.md"
with open(index_path, 'w', encoding='utf-8') as f:
f.write(index_content)
print(f"📋 Generated service index: {index_path}")
if __name__ == "__main__":
generator = ServiceDocumentationGenerator("/workspace/homelab")
total_services = generator.generate_all_documentation()
print(f"\n🎉 Documentation generation complete!")
print(f"📊 Total services documented: {total_services}")

View File

@@ -0,0 +1,335 @@
#!/usr/bin/env python3
"""
Portainer Stack vs Git Repository Comparison Tool
Generates documentation comparing running stacks with repo configurations
"""
import json
import os
from datetime import datetime
from pathlib import Path
# Endpoint ID to Server Name mapping
ENDPOINT_MAP = {
2: "Atlantis",
443395: "Concord NUC",
443397: "Calypso (vish-nuc)",
443398: "vish-nuc-edge",
443399: "Homelab VM"
}
# Server folder mapping in repo
REPO_FOLDER_MAP = {
"Atlantis": ["Atlantis"],
"Concord NUC": ["concord_nuc"],
"Calypso (vish-nuc)": ["Calypso"],
"vish-nuc-edge": [],
"Homelab VM": ["homelab_vm"]
}
# Running stacks data (collected from Portainer API)
RUNNING_STACKS = {
"Atlantis": {
"stacks": [
{"name": "arr-stack", "containers": ["deluge", "sonarr", "radarr", "lidarr", "gluetun", "jackett", "tautulli", "sabnzbd", "plex", "whisparr", "flaresolverr", "wizarr", "bazarr", "prowlarr", "jellyseerr"], "git_linked": True, "git_path": "Atlantis/arr-suite/"},
{"name": "nginx_repo-stack", "containers": ["nginx"], "git_linked": True, "git_path": "Atlantis/repo_nginx.yaml"},
{"name": "dyndns-updater-stack", "containers": ["ddns-vish-unproxied", "ddns-vish-proxied", "ddns-thevish-unproxied", "ddns-thevish-proxied"], "git_linked": True, "git_path": "Atlantis/dynamicdnsupdater.yaml"},
{"name": "baikal-stack", "containers": ["baikal"], "git_linked": True, "git_path": "Atlantis/baikal/"},
{"name": "jitsi", "containers": ["jitsi-web", "jitsi-jvb", "jitsi-jicofo", "coturn", "jitsi-prosody"], "git_linked": True, "git_path": "Atlantis/jitsi/"},
{"name": "youtubedl", "containers": ["youtube_downloader"], "git_linked": True, "git_path": "Atlantis/youtubedl.yaml"},
{"name": "matrix_synapse-stack", "containers": ["Synapse", "Synapse-DB"], "git_linked": True, "git_path": "Atlantis/synapse.yml", "issues": ["Synapse container exited"]},
{"name": "joplin-stack", "containers": ["joplin-app", "joplin-db"], "git_linked": True, "git_path": "Atlantis/joplin.yml"},
{"name": "immich-stack", "containers": ["Immich-SERVER", "Immich-LEARNING", "Immich-DB", "Immich-REDIS"], "git_linked": True, "git_path": "Atlantis/immich/"},
{"name": "vaultwarden-stack", "containers": ["Vaultwarden", "Vaultwarden-DB"], "git_linked": True, "git_path": "Atlantis/vaultwarden.yaml"},
{"name": "node-exporter-stack", "containers": ["snmp_exporter", "node_exporter"], "git_linked": False},
{"name": "fenrus-stack", "containers": ["Fenrus"], "git_linked": True, "git_path": "Atlantis/fenrus.yaml"},
{"name": "syncthing-stack", "containers": [], "git_linked": True, "git_path": "Atlantis/syncthing.yml", "status": "stopped"},
],
"standalone": ["portainer"]
},
"Concord NUC": {
"stacks": [
{"name": "invidious", "containers": ["invidious-companion", "invidious-db", "invidious"], "git_linked": True, "git_path": "concord_nuc/invidious/", "issues": ["invidious unhealthy"]},
{"name": "syncthing-stack", "containers": ["syncthing"], "git_linked": True, "git_path": "concord_nuc/syncthing.yaml"},
{"name": "homeassistant-stack", "containers": ["homeassistant", "matter-server"], "git_linked": True, "git_path": "concord_nuc/homeassistant.yaml"},
{"name": "adguard-stack", "containers": ["AdGuard"], "git_linked": True, "git_path": "concord_nuc/adguard.yaml"},
{"name": "yourspotify-stack", "containers": ["yourspotify-server", "mongo", "yourspotify-web"], "git_linked": True, "git_path": "concord_nuc/yourspotify.yaml"},
{"name": "dyndns-updater", "containers": ["ddns-vish-13340"], "git_linked": True, "git_path": "concord_nuc/dyndns_updater.yaml"},
{"name": "wireguard-stack", "containers": ["wg-easy"], "git_linked": True, "git_path": "concord_nuc/wireguard.yaml"},
{"name": "node-exporter", "containers": ["node_exporter"], "git_linked": False, "issues": ["restarting"]},
],
"standalone": ["portainer_edge_agent", "watchtower"],
"issues": ["watchtower restarting", "node_exporter restarting"]
},
"Calypso (vish-nuc)": {
"stacks": [
{"name": "arr-stack", "containers": ["jellyseerr", "bazarr", "sonarr", "lidarr", "prowlarr", "plex", "readarr", "radarr", "flaresolverr", "sabnzbd", "tautulli", "whisparr"], "git_linked": True, "git_path": "Calypso/arr_suite_with_dracula.yml"},
{"name": "rxv4-stack", "containers": ["Resume-ACCESS", "Resume-DB", "Resume-CHROME", "Resume-MINIO"], "git_linked": True, "git_path": "Calypso/reactive_resume_v4/"},
{"name": "seafile", "containers": ["Seafile-DB", "Seafile-CACHE", "Seafile-REDIS", "Seafile"], "git_linked": True, "git_path": "Calypso/seafile-server.yaml"},
{"name": "gitea", "containers": ["Gitea-DB", "Gitea"], "git_linked": True, "git_path": "Calypso/gitea-server.yaml"},
{"name": "paperless-testing", "containers": ["PaperlessNGX", "PaperlessNGX-REDIS", "PaperlessNGX-DB", "PaperlessNGX-GOTENBERG", "PaperlessNGX-TIKA"], "git_linked": False},
{"name": "paperless-ai", "containers": ["PaperlessNGX-AI"], "git_linked": False},
{"name": "rustdesk", "containers": ["Rustdesk-HBBS", "Rustdesk-HBBR"], "git_linked": False},
{"name": "immich-stack", "containers": ["Immich-SERVER", "Immich-LEARNING", "Immich-DB", "Immich-REDIS"], "git_linked": True, "git_path": "Calypso/immich/"},
{"name": "rackula-stack", "containers": ["Rackula"], "git_linked": True, "git_path": "Calypso/rackula.yml"},
{"name": "adguard-stack", "containers": ["AdGuard"], "git_linked": True, "git_path": "Calypso/adguard.yaml"},
{"name": "syncthing-stack", "containers": ["syncthing"], "git_linked": True, "git_path": "Calypso/syncthing.yaml"},
{"name": "node-exporter", "containers": ["snmp_exporter", "node_exporter"], "git_linked": False},
{"name": "actual-budget-stack", "containers": ["Actual"], "git_linked": True, "git_path": "Calypso/actualbudget.yml"},
{"name": "apt-cacher-ng", "containers": ["apt-cacher-ng"], "git_linked": True, "git_path": "Calypso/apt-cacher-ng/"},
{"name": "iperf3-stack", "containers": ["iperf3"], "git_linked": True, "git_path": "Calypso/iperf3.yml"},
{"name": "wireguard", "containers": ["wgeasy"], "git_linked": True, "git_path": "Calypso/wireguard-server.yaml"},
],
"standalone": ["portainer_edge_agent", "openspeedtest"]
},
"Homelab VM": {
"stacks": [
{"name": "openhands", "containers": ["openhands-app"], "git_linked": False},
{"name": "monitoring", "containers": ["prometheus", "grafana", "node_exporter"], "git_linked": True, "git_path": "homelab_vm/prometheus_grafana_hub/"},
{"name": "perplexica", "containers": ["perplexica"], "git_linked": False},
{"name": "syncthing-stack", "containers": ["syncthing"], "git_linked": True, "git_path": "homelab_vm/syncthing.yml"},
{"name": "hoarder-karakeep-stack", "containers": ["meilisearch", "web", "chrome"], "git_linked": True, "git_path": "homelab_vm/hoarder.yaml"},
{"name": "drawio-stack", "containers": ["Draw.io"], "git_linked": True, "git_path": "homelab_vm/drawio.yml"},
{"name": "redlib-stack", "containers": ["Libreddit"], "git_linked": True, "git_path": "homelab_vm/libreddit.yaml"},
{"name": "signal-api-stack", "containers": ["signal-api"], "git_linked": True, "git_path": "homelab_vm/signal_api.yaml"},
{"name": "binternet-stack", "containers": ["binternet"], "git_linked": True, "git_path": "homelab_vm/binternet.yaml"},
{"name": "archivebox-stack", "containers": ["archivebox_scheduler", "archivebox", "archivebox_sonic"], "git_linked": True, "git_path": "homelab_vm/archivebox.yaml"},
{"name": "watchyourlan-stack", "containers": ["WatchYourLAN"], "git_linked": True, "git_path": "homelab_vm/watchyourlan.yaml"},
{"name": "webcheck-stack", "containers": ["Web-Check"], "git_linked": True, "git_path": "homelab_vm/webcheck.yaml"},
],
"standalone": ["portainer_edge_agent", "openhands-runtime"]
},
"vish-nuc-edge": {
"stacks": [
{"name": "kuma", "containers": ["uptime-kuma"], "git_linked": False},
{"name": "glances", "containers": ["glances"], "git_linked": False},
],
"standalone": ["portainer_edge_agent"]
}
}
# Repo configs not running
def get_repo_configs():
"""List all compose files in the repo organized by server"""
repo_configs = {}
base_path = Path("/workspace/homelab")
server_folders = {
"Atlantis": base_path / "Atlantis",
"Calypso": base_path / "Calypso",
"concord_nuc": base_path / "concord_nuc",
"homelab_vm": base_path / "homelab_vm",
"Bulgaria_vm": base_path / "Bulgaria_vm",
"Chicago_vm": base_path / "Chicago_vm",
"anubis": base_path / "anubis",
"guava": base_path / "guava",
"setillo": base_path / "setillo",
}
for server, folder in server_folders.items():
if folder.exists():
configs = []
for ext in ["*.yml", "*.yaml"]:
configs.extend(folder.rglob(ext))
repo_configs[server] = [str(c.relative_to(base_path)) for c in configs]
return repo_configs
def generate_markdown_report():
"""Generate the comparison report in markdown"""
report = []
report.append("# Portainer Stack vs Repository Configuration Comparison")
report.append(f"\n*Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S UTC')}*")
report.append("\n---\n")
# Summary Section
report.append("## Executive Summary\n")
total_stacks = sum(len(data["stacks"]) for data in RUNNING_STACKS.values())
git_linked = sum(
sum(1 for s in data["stacks"] if s.get("git_linked", False))
for data in RUNNING_STACKS.values()
)
not_git_linked = total_stacks - git_linked
report.append(f"- **Total Running Stacks:** {total_stacks}")
report.append(f"- **Git-Linked Stacks:** {git_linked} ({git_linked/total_stacks*100:.1f}%)")
report.append(f"- **Not Git-Linked:** {not_git_linked}")
report.append(f"- **Servers Monitored:** {len(RUNNING_STACKS)}")
report.append("")
# Issues Summary
all_issues = []
for server, data in RUNNING_STACKS.items():
for stack in data["stacks"]:
if "issues" in stack:
for issue in stack["issues"]:
all_issues.append(f"{server}/{stack['name']}: {issue}")
if "issues" in data:
for issue in data["issues"]:
all_issues.append(f"{server}: {issue}")
if all_issues:
report.append("### ⚠️ Current Issues\n")
for issue in all_issues:
report.append(f"- {issue}")
report.append("")
# Per-Server Details
report.append("---\n")
report.append("## Server Details\n")
for server, data in RUNNING_STACKS.items():
report.append(f"### 🖥️ {server}\n")
# Running Stacks Table
report.append("#### Running Stacks\n")
report.append("| Stack Name | Containers | Git-Linked | Config Path | Status |")
report.append("|------------|------------|------------|-------------|--------|")
for stack in data["stacks"]:
name = stack["name"]
containers = len(stack["containers"])
git_linked = "" if stack.get("git_linked") else ""
config_path = stack.get("git_path", "-")
status = "🟢 Running"
if stack.get("status") == "stopped":
status = "🔴 Stopped"
elif "issues" in stack:
status = f"⚠️ {stack['issues'][0]}"
report.append(f"| {name} | {containers} | {git_linked} | `{config_path}` | {status} |")
report.append("")
# Standalone containers
if data.get("standalone"):
report.append("#### Standalone Containers (not in stacks)\n")
report.append(", ".join([f"`{c}`" for c in data["standalone"]]))
report.append("")
report.append("")
# Configs in Repo but Not Running
report.append("---\n")
report.append("## Repository Configs Not Currently Running\n")
report.append("These configurations exist in the repo but are not deployed:\n")
repo_configs = get_repo_configs()
# Known running config paths
running_paths = set()
for server, data in RUNNING_STACKS.items():
for stack in data["stacks"]:
if "git_path" in stack:
running_paths.add(stack["git_path"].rstrip("/"))
for server, configs in repo_configs.items():
not_running = []
for config in configs:
config_base = config.rsplit("/", 1)[0] if "/" in config else config
is_running = any(
config.startswith(p.rstrip("/")) or p.startswith(config.rsplit("/", 1)[0])
for p in running_paths
)
if not is_running:
not_running.append(config)
if not_running:
report.append(f"\n### {server}\n")
for config in not_running[:15]: # Limit to first 15
report.append(f"- `{config}`")
if len(not_running) > 15:
report.append(f"- ... and {len(not_running) - 15} more")
# Recommendations
report.append("\n---\n")
report.append("## Recommendations\n")
report.append("""
1. **Link Remaining Stacks to Git**: The following stacks should be linked to Git for version control:
- `paperless-testing` and `paperless-ai` on Calypso
- `rustdesk` on Calypso
- `node-exporter` stacks on multiple servers
- `openhands` and `perplexica` on Homelab VM
- `kuma` and `glances` on vish-nuc-edge
2. **Address Current Issues**:
- Fix `Synapse` container on Atlantis (currently exited)
- Investigate `invidious` unhealthy status on Concord NUC
- Fix `watchtower` and `node_exporter` restart loops on Concord NUC
3. **Cleanup Unused Configs**: Review configs in repo not currently deployed and either:
- Deploy if needed
- Archive if deprecated
- Document why they exist but aren't deployed
4. **Standardize Naming**: Some stacks use `-stack` suffix, others don't. Consider standardizing.
""")
return "\n".join(report)
def generate_infrastructure_overview():
"""Generate infrastructure overview document"""
report = []
report.append("# Homelab Infrastructure Overview")
report.append(f"\n*Last Updated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S UTC')}*")
report.append("\n---\n")
report.append("## Server Inventory\n")
report.append("| Server | Type | Endpoint ID | Status | Total Containers |")
report.append("|--------|------|-------------|--------|------------------|")
server_info = [
("Atlantis", "Local Docker", 2, "🟢 Online", "41"),
("Concord NUC", "Edge Agent", 443395, "🟢 Online", "15"),
("Calypso (vish-nuc)", "Edge Agent", 443397, "🟢 Online", "45"),
("vish-nuc-edge", "Edge Agent", 443398, "🟢 Online", "3"),
("Homelab VM", "Edge Agent", 443399, "🟢 Online", "20"),
]
for server, type_, eid, status, containers in server_info:
report.append(f"| {server} | {type_} | {eid} | {status} | {containers} |")
report.append("\n## Service Categories\n")
categories = {
"Media Management": ["arr-stack (Atlantis)", "arr-stack (Calypso)", "plex", "jellyseerr", "tautulli"],
"Photo Management": ["Immich (Atlantis)", "Immich (Calypso)"],
"Document Management": ["PaperlessNGX", "Joplin"],
"Network & DNS": ["AdGuard (Concord NUC)", "AdGuard (Calypso)", "WireGuard", "DynDNS"],
"Home Automation": ["Home Assistant", "Matter Server"],
"Development & DevOps": ["Gitea", "Portainer", "OpenHands"],
"Communication": ["Matrix/Synapse", "Jitsi", "Signal API"],
"Monitoring": ["Prometheus", "Grafana", "Uptime Kuma", "Glances", "WatchYourLAN"],
"Security": ["Vaultwarden/Bitwarden"],
"File Sync": ["Syncthing", "Seafile"],
"Privacy Tools": ["Invidious", "Libreddit/Redlib", "Binternet"],
"Productivity": ["Draw.io", "Reactive Resume", "ArchiveBox", "Hoarder/Karakeep"],
}
for category, services in categories.items():
report.append(f"### {category}\n")
for service in services:
report.append(f"- {service}")
report.append("")
return "\n".join(report)
if __name__ == "__main__":
# Generate comparison report
comparison_report = generate_markdown_report()
with open("/workspace/homelab/docs/STACK_COMPARISON_REPORT.md", "w") as f:
f.write(comparison_report)
print("Generated: docs/STACK_COMPARISON_REPORT.md")
# Generate infrastructure overview
infra_report = generate_infrastructure_overview()
with open("/workspace/homelab/docs/INFRASTRUCTURE_OVERVIEW.md", "w") as f:
f.write(infra_report)
print("Generated: docs/INFRASTRUCTURE_OVERVIEW.md")

43
scripts/gmail-backup-daily.sh Executable file
View File

@@ -0,0 +1,43 @@
#!/bin/bash
# Daily email backup — downloads new emails via IMAP to atlantis NFS mount
#
# Writes directly to /mnt/atlantis_archive/old_emails/ (NFS mount to atlantis:/volume1/archive)
# Also keeps a local copy at /tmp/gmail_backup for quick access
# Incremental — skips already-downloaded .eml files
# Never deletes — emails removed from source stay in backup
#
# Proton Bridge must be running for admin@thevish.io backup.
# If bridge is down, Gmail accounts still back up fine (script continues on error).
#
# Cron: 0 3 * * * /home/homelab/organized/repos/homelab/scripts/gmail-backup-daily.sh
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
ATLANTIS_BACKUP="/mnt/atlantis_archive/old_emails"
LOCAL_BACKUP="/tmp/gmail_backup"
LOG="/tmp/gmail-backup-daily.log"
echo "$(date '+%Y-%m-%d %H:%M:%S') — Starting email backup" >> "$LOG"
# Check NFS mount
if ! mountpoint -q /mnt/atlantis_archive; then
echo "$(date '+%Y-%m-%d %H:%M:%S') — WARNING: /mnt/atlantis_archive not mounted, trying to mount..." >> "$LOG"
sudo mount /mnt/atlantis_archive >> "$LOG" 2>&1
if ! mountpoint -q /mnt/atlantis_archive; then
echo "$(date '+%Y-%m-%d %H:%M:%S') — ERROR: Cannot mount atlantis_archive, falling back to local only" >> "$LOG"
ATLANTIS_BACKUP=""
fi
fi
# Download to atlantis (primary destination)
if [ -n "$ATLANTIS_BACKUP" ]; then
python3 "$SCRIPT_DIR/gmail-backup.py" "$ATLANTIS_BACKUP" >> "$LOG" 2>&1 || true
TOTAL=$(find "$ATLANTIS_BACKUP" -name "*.eml" 2>/dev/null | wc -l)
echo "$(date '+%Y-%m-%d %H:%M:%S') — Atlantis backup: $TOTAL total emails" >> "$LOG"
fi
# Also keep a local copy (fast access, survives NFS outage)
python3 "$SCRIPT_DIR/gmail-backup.py" "$LOCAL_BACKUP" >> "$LOG" 2>&1 || true
LOCAL_TOTAL=$(find "$LOCAL_BACKUP" -name "*.eml" 2>/dev/null | wc -l)
echo "$(date '+%Y-%m-%d %H:%M:%S') — Local backup: $LOCAL_TOTAL total emails" >> "$LOG"
echo "$(date '+%Y-%m-%d %H:%M:%S') — Done" >> "$LOG"

185
scripts/gmail-backup.py Normal file
View File

@@ -0,0 +1,185 @@
#!/usr/bin/env python3
"""Download all Gmail emails as .eml files organized by label/folder."""
import email
import email.header
import imaplib
import os
import re
import sys
import time
from pathlib import Path
def decode_header(raw):
if not raw:
return ""
parts = email.header.decode_header(raw)
decoded = []
for data, charset in parts:
if isinstance(data, bytes):
try:
decoded.append(data.decode(charset or "utf-8", errors="replace"))
except (LookupError, UnicodeDecodeError):
decoded.append(data.decode("utf-8", errors="replace"))
else:
decoded.append(data)
return " ".join(decoded)
def sanitize_filename(name, max_len=100):
name = re.sub(r'[<>:"/\\|?*\x00-\x1f]', '_', name)
name = name.strip('. ')
return name[:max_len] if name else "no_subject"
def backup_account(email_addr, app_password, output_dir, host="imap.gmail.com", port=993, starttls=False):
print(f"\n{'='*60}")
print(f"Backing up: {email_addr}")
print(f"Output: {output_dir}")
print(f"{'='*60}")
if starttls:
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
imap = imaplib.IMAP4(host, port)
imap.starttls(ssl_context=ctx)
else:
imap = imaplib.IMAP4_SSL(host, port)
imap.login(email_addr, app_password)
# List all folders
status, folders = imap.list()
folder_names = []
for f in folders:
# Parse folder REDACTED_APP_PASSWORD response
match = re.search(r'"/" "(.*)"$|"/" (.*)$', f.decode())
if match:
name = match.group(1) or match.group(2)
folder_names.append(name.strip('"'))
print(f"Found {len(folder_names)} folders")
total_downloaded = 0
total_skipped = 0
for folder in folder_names:
try:
status, data = imap.select(f'"{folder}"', readonly=True)
if status != "OK":
continue
msg_count = int(data[0])
if msg_count == 0:
continue
except Exception as e:
print(f" Skipping {folder}: {e}")
continue
# Create folder directory
safe_folder = folder.replace("/", "_").replace("[Gmail]_", "gmail_")
folder_dir = Path(output_dir) / safe_folder
folder_dir.mkdir(parents=True, exist_ok=True)
print(f"\n {folder}: {msg_count} messages")
# Fetch all message UIDs
status, data = imap.search(None, "ALL")
if status != "OK":
continue
uids = data[0].split()
for i, uid in enumerate(uids, 1):
try:
# Fetch full message
status, msg_data = imap.fetch(uid, "(RFC822)")
if status != "OK" or not msg_data[0]:
continue
raw_email = msg_data[0][1]
msg = email.message_from_bytes(raw_email)
# Build filename from date + subject
date_str = msg.get("Date", "")
subject = sanitize_filename(decode_header(msg.get("Subject", "no_subject")))
msg_id = msg.get("Message-ID", f"uid_{uid.decode()}")
safe_id = sanitize_filename(re.sub(r'[<>@.]', '_', msg_id), 40)
filename = f"{safe_id}_{subject}.eml"
filepath = folder_dir / filename
if filepath.exists():
total_skipped += 1
continue
filepath.write_bytes(raw_email)
total_downloaded += 1
if i % 50 == 0 or i == len(uids):
print(f" {i}/{len(uids)} processed")
except (imaplib.IMAP4.abort, imaplib.IMAP4.error, ConnectionError, OSError) as e:
print(f" Connection lost at {i}/{len(uids)}: {e}")
# Reconnect and re-select folder
try:
imap.logout()
except Exception:
pass
time.sleep(2)
if starttls:
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
imap = imaplib.IMAP4(host, port)
imap.starttls(ssl_context=ctx)
else:
imap = imaplib.IMAP4_SSL(host, port)
imap.login(email_addr, app_password)
imap.select(f'"{folder}"', readonly=True)
print(f" Reconnected, continuing...")
imap.logout()
print(f"\n Done: {total_downloaded} downloaded, {total_skipped} skipped (already exist)")
return total_downloaded
if __name__ == "__main__":
accounts = [
{
"email": "your-email@example.com",
"password": "REDACTED_APP_PASSWORD", # pragma: allowlist secret
"dir": "dvish92",
},
{
"email": "lzbellina92@gmail.com",
"password": "REDACTED_APP_PASSWORD", # pragma: allowlist secret
"dir": "lzbellina92",
},
{
"email": "admin@thevish.io",
"password": "MsuiUGPLNlWhOewqmaK3gA", # pragma: allowlist secret
"dir": "proton_admin",
"host": "127.0.0.1",
"port": 1143,
"starttls": True,
},
]
base_dir = sys.argv[1] if len(sys.argv) > 1 else "/tmp/gmail_backup"
print(f"Email Backup — downloading all emails to {base_dir}")
total = 0
for acct in accounts:
output = os.path.join(base_dir, acct["dir"])
os.makedirs(output, exist_ok=True)
total += backup_account(
acct["email"], acct["password"], output,
host=acct.get("host", "imap.gmail.com"),
port=acct.get("port", 993),
starttls=acct.get("starttls", False),
)
print(f"\n{'='*60}")
print(f"BACKUP COMPLETE: {total} emails downloaded to {base_dir}")
print(f"{'='*60}")

46
scripts/gmail-organizer-ctl.sh Executable file
View File

@@ -0,0 +1,46 @@
#!/bin/bash
# Email Organizer Control — enable/disable all email organizer cron jobs
#
# Usage:
# gmail-organizer-ctl.sh stop — disable all cron jobs (frees LLM)
# gmail-organizer-ctl.sh start — re-enable all cron jobs
# gmail-organizer-ctl.sh status — show current state
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
CRON_MARKER_1="gmail-organizer/gmail_organizer.py"
CRON_MARKER_2="gmail-organizer-dvish/gmail_organizer.py"
CRON_MARKER_3="proton-organizer/proton_organizer.py"
case "${1:-status}" in
stop|disable|pause)
crontab -l 2>/dev/null | sed \
-e "/${CRON_MARKER_1//\//\\/}/s/^/#PAUSED# /" \
-e "/${CRON_MARKER_2//\//\\/}/s/^/#PAUSED# /" \
-e "/${CRON_MARKER_3//\//\\/}/s/^/#PAUSED# /" \
| crontab -
echo "Email organizers PAUSED (all 3 accounts)"
echo "LLM is free. Run '$0 start' to resume."
;;
start|enable|resume)
crontab -l 2>/dev/null | sed \
-e "s/^#PAUSED# \(.*${CRON_MARKER_1//\//\\/}\)/\1/" \
-e "s/^#PAUSED# \(.*${CRON_MARKER_2//\//\\/}\)/\1/" \
-e "s/^#PAUSED# \(.*${CRON_MARKER_3//\//\\/}\)/\1/" \
| crontab -
echo "Email organizers RESUMED (all 3 accounts)"
;;
status)
echo "Email Organizer Status:"
crontab -l 2>/dev/null | grep -E "gmail-organizer|proton-organizer" | while read -r line; do
if echo "$line" | grep -q "^#PAUSED#"; then
echo " PAUSED: $(echo "$line" | sed 's/#PAUSED# //')"
else
echo " ACTIVE: $line"
fi
done
;;
*)
echo "Usage: $0 {stop|start|status}"
exit 1
;;
esac

View File

@@ -0,0 +1,2 @@
config.local.yaml
processed.db

View File

@@ -0,0 +1,47 @@
# Gmail Organizer Configuration
# Copy this to config.local.yaml and fill in your credentials
gmail:
email: "your.email@gmail.com"
app_password: "REDACTED_PASSWORD" xxxx xxxx xxxx" # 16-char app password from Google # pragma: allowlist secret
ollama:
url: "https://a5be22681.vishinator.olares.com"
model: "qwen3-coder:latest"
# Categories and their Gmail labels
# The LLM will classify each email into one of these
categories:
receipts:
label: "AutoOrg/Receipts"
description: "Purchase confirmations, invoices, payment receipts, order updates"
archive: false # keep in inbox — you may need to act on these
newsletters:
label: "AutoOrg/Newsletters"
description: "Mailing lists, digests, blog updates, promotional content from subscriptions"
archive: true # auto-archive out of inbox
work:
label: "AutoOrg/Work"
description: "Professional correspondence, meeting invites, project updates, work tools"
archive: false
accounts:
label: "AutoOrg/Accounts"
description: "Security alerts, password resets, 2FA notifications, account verification, login alerts from services"
archive: true # auto-archive — check label if needed
spam:
label: "AutoOrg/Spam"
description: "Unsolicited marketing, phishing attempts, junk mail that bypassed filters"
archive: true # auto-archive junk
personal:
label: "AutoOrg/Personal"
description: "Friends, family, personal accounts, non-work non-commercial emails"
archive: false
# Processing settings
processing:
batch_size: 50 # Emails per run
max_body_chars: 2000 # Truncate body to save tokens
skip_already_labeled: true
dry_run: false # Set true to preview without applying labels
process_read: true # Also process already-read emails
mailbox: "INBOX" # IMAP mailbox to process

View File

@@ -0,0 +1,332 @@
#!/usr/bin/env python3
"""Gmail Organizer — classifies emails using a local LLM and applies Gmail labels."""
import argparse
import email
import email.header
import html
import imaplib
import json
import logging
import re
import sqlite3
import sys
import time
import urllib.request
import urllib.error
from datetime import datetime, timedelta
from pathlib import Path
import yaml
LOG_FMT = "%(asctime)s %(levelname)-8s %(message)s"
log = logging.getLogger("gmail-organizer")
DB_PATH = Path(__file__).parent / "processed.db"
DEFAULT_CONFIG = Path(__file__).parent / "config.local.yaml"
# ── helpers ──────────────────────────────────────────────────────────────────
def load_config(path: Path) -> dict:
with open(path) as f:
return yaml.safe_load(f)
def init_db(db_path: Path) -> sqlite3.Connection:
conn = sqlite3.connect(db_path)
conn.execute("""
CREATE TABLE IF NOT EXISTS processed (
message_id TEXT PRIMARY KEY,
category TEXT NOT NULL,
processed_at TEXT NOT NULL
)
""")
conn.commit()
return conn
def is_processed(conn: sqlite3.Connection, message_id: str) -> bool:
row = conn.execute(
"SELECT 1 FROM processed WHERE message_id = ?", (message_id,)
).fetchone()
return row is not None
def mark_processed(conn: sqlite3.Connection, message_id: str, category: str):
conn.execute(
"INSERT OR REPLACE INTO processed (message_id, category, processed_at) VALUES (?, ?, ?)",
(message_id, category, datetime.now(tz=__import__('zoneinfo').ZoneInfo("UTC")).isoformat()),
)
conn.commit()
def decode_header(raw: str | None) -> str:
if not raw:
return ""
parts = email.header.decode_header(raw)
decoded = []
for data, charset in parts:
if isinstance(data, bytes):
decoded.append(data.decode(charset or "utf-8", errors="replace"))
else:
decoded.append(data)
return " ".join(decoded)
def extract_text(msg: email.message.Message, max_chars: int) -> str:
"""Extract plain-text body from an email, falling back to stripped HTML."""
body = ""
if msg.is_multipart():
for part in msg.walk():
ct = part.get_content_type()
if ct == "text/plain":
payload = part.get_payload(decode=True)
if payload:
charset = part.get_content_charset() or "utf-8"
body = payload.decode(charset, errors="replace")
break
elif ct == "text/html" and not body:
payload = part.get_payload(decode=True)
if payload:
charset = part.get_content_charset() or "utf-8"
raw_html = payload.decode(charset, errors="replace")
body = html.unescape(re.sub(r"<[^>]+>", " ", raw_html))
else:
payload = msg.get_payload(decode=True)
if payload:
charset = msg.get_content_charset() or "utf-8"
body = payload.decode(charset, errors="replace")
if msg.get_content_type() == "text/html":
body = html.unescape(re.sub(r"<[^>]+>", " ", body))
# Collapse whitespace and truncate
body = re.sub(r"\s+", " ", body).strip()
return body[:max_chars]
# ── Gmail IMAP ───────────────────────────────────────────────────────────────
class GmailClient:
def __init__(self, email_addr: str, app_password: "REDACTED_PASSWORD"
self.email = email_addr
self.conn = imaplib.IMAP4_SSL("imap.gmail.com")
self.conn.login(email_addr, app_password)
def fetch_uids(self, mailbox: str = "INBOX", search: str = "ALL",
batch_size: int = 50) -> list[bytes]:
self.conn.select(mailbox)
_, data = self.conn.search(None, search)
uids = data[0].split()
# Most recent first
return list(reversed(uids[-batch_size:]))
def fetch_message(self, uid: bytes) -> email.message.Message:
_, data = self.conn.fetch(uid, "(RFC822)")
return email.message_from_bytes(data[0][1])
def get_labels(self, uid: bytes) -> list[str]:
"""Get existing Gmail labels (X-GM-LABELS) for a message."""
_, data = self.conn.fetch(uid, "(X-GM-LABELS)")
raw = data[0].decode() if isinstance(data[0], bytes) else str(data[0])
match = re.search(r'X-GM-LABELS \(([^)]*)\)', raw)
if match:
return match.group(1).split()
return []
def apply_label(self, uid: bytes, label: str):
"""Apply a Gmail label to a message. Creates the label if needed."""
# Gmail IMAP uses X-GM-LABELS for label manipulation
result = self.conn.store(uid, "+X-GM-LABELS", f'("{label}")')
if result[0] != "OK":
# Fallback: copy to label (which creates it as a folder)
try:
self.conn.create(label)
except imaplib.IMAP4.error:
pass # Label already exists
self.conn.copy(uid, label)
def archive(self, uid: bytes):
"""Archive a message (remove from INBOX by removing \\Inbox label)."""
self.conn.store(uid, "-X-GM-LABELS", '("\\\\Inbox")')
def close(self):
try:
self.conn.close()
self.conn.logout()
except Exception:
pass
# ── Ollama LLM ───────────────────────────────────────────────────────────────
def classify_email(
ollama_url: str,
model: str,
categories: dict,
subject: str,
sender: str,
body_snippet: str,
) -> str:
"""Ask the LLM to classify an email into one of the categories."""
cat_descriptions = "\n".join(
f"- **{name}**: {info['description']}" for name, info in categories.items()
)
category_names = ", ".join(categories.keys())
prompt = f"""Classify this email into exactly ONE category. Reply with ONLY the category name, nothing else.
Categories:
{cat_descriptions}
Email:
From: {sender}
Subject: {subject}
Body: {body_snippet[:1000]}
Reply with one of: {category_names}"""
payload = json.dumps({
"model": model,
"prompt": prompt,
"stream": False,
"options": {
"temperature": 0.1,
"num_predict": 20,
},
}).encode()
req = urllib.request.Request(
f"{ollama_url.rstrip('/')}/api/generate",
data=payload,
headers={"Content-Type": "application/json"},
)
try:
with urllib.request.urlopen(req, timeout=60) as resp:
result = json.loads(resp.read())
except urllib.error.URLError as e:
log.error("Ollama request failed: %s", e)
raise
raw_response = result.get("response", "").strip().lower()
# Strip any thinking tags (qwen3 sometimes wraps reasoning in <think>...</think>)
raw_response = re.sub(r"<think>.*?</think>", "", raw_response, flags=re.DOTALL).strip()
# Extract just the category name
for name in categories:
if name in raw_response:
return name
log.warning("LLM returned unexpected category %r, defaulting to 'personal'", raw_response)
return "personal"
# ── main ─────────────────────────────────────────────────────────────────────
def run(config_path: Path, dry_run: bool = False, reprocess: bool = False,
limit: int | None = None):
cfg = load_config(config_path)
gmail_cfg = cfg["gmail"]
ollama_cfg = cfg["ollama"]
categories = cfg["categories"]
proc_cfg = cfg.get("processing", {})
batch_size = limit or proc_cfg.get("batch_size", 50)
max_body = proc_cfg.get("max_body_chars", 2000)
dry_run = dry_run or proc_cfg.get("dry_run", False)
mailbox = proc_cfg.get("mailbox", "INBOX")
log.info("Connecting to Gmail as %s", gmail_cfg["email"])
client = GmailClient(gmail_cfg["email"], gmail_cfg["app_password"])
db = init_db(DB_PATH)
try:
uids = client.fetch_uids(mailbox=mailbox, batch_size=batch_size)
log.info("Fetched %d message UIDs", len(uids))
stats = {cat: 0 for cat in categories}
stats["skipped"] = 0
stats["errors"] = 0
for i, uid in enumerate(uids, 1):
try:
msg = client.fetch_message(uid)
message_id = msg.get("Message-ID", f"uid-{uid.decode()}")
subject = decode_header(msg.get("Subject"))
sender = decode_header(msg.get("From"))
if not reprocess and is_processed(db, message_id):
stats["skipped"] += 1
continue
body = extract_text(msg, max_body)
log.info("[%d/%d] Classifying: %s (from: %s)",
i, len(uids), subject[:60], sender[:40])
category = classify_email(
ollama_cfg["url"],
ollama_cfg["model"],
categories,
subject,
sender,
body,
)
label = categories[category]["label"]
log.info("%s (%s)", category, label)
should_archive = categories[category].get("archive", False)
if not dry_run:
client.apply_label(uid, label)
if should_archive:
client.archive(uid)
log.info(" 📥 Archived")
mark_processed(db, message_id, category)
else:
log.info(" [DRY RUN] Would apply label: %s%s", label,
" + archive" if should_archive else "")
stats[category] = stats.get(category, 0) + 1
except Exception as e:
log.error("Error processing UID %s: %s", uid, e)
stats["errors"] += 1
continue
log.info("Done! Stats: %s", json.dumps(stats, indent=2))
finally:
client.close()
db.close()
def main():
parser = argparse.ArgumentParser(description="Gmail Organizer — LLM-powered email classification")
parser.add_argument("-c", "--config", type=Path, default=DEFAULT_CONFIG,
help="Path to config YAML (default: config.local.yaml)")
parser.add_argument("-n", "--dry-run", action="store_true",
help="Classify but don't apply labels")
parser.add_argument("--reprocess", action="store_true",
help="Re-classify already-processed emails")
parser.add_argument("--limit", type=int, default=None,
help="Override batch size")
parser.add_argument("-v", "--verbose", action="store_true",
help="Debug logging")
args = parser.parse_args()
logging.basicConfig(
level=logging.DEBUG if args.verbose else logging.INFO,
format=LOG_FMT,
)
if not args.config.exists():
log.error("Config not found: %s", args.config)
log.error("Copy config.yaml to config.local.yaml and fill in your credentials.")
sys.exit(1)
run(args.config, dry_run=args.dry_run, reprocess=args.reprocess, limit=args.limit)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1 @@
pyyaml>=6.0

4
scripts/gmail-organizer/.gitignore vendored Normal file
View File

@@ -0,0 +1,4 @@
config.local.yaml
processed.db
__pycache__/
*.pyc

View File

@@ -0,0 +1,47 @@
# Gmail Organizer Configuration
# Copy this to config.local.yaml and fill in your credentials
gmail:
email: "your.email@gmail.com"
app_password: "REDACTED_PASSWORD" xxxx xxxx xxxx" # 16-char app password from Google # pragma: allowlist secret
ollama:
url: "https://a5be22681.vishinator.olares.com"
model: "qwen3-coder:latest"
# Categories and their Gmail labels
# The LLM will classify each email into one of these
categories:
receipts:
label: "AutoOrg/Receipts"
description: "Purchase confirmations, invoices, payment receipts, order updates"
archive: false # keep in inbox — you may need to act on these
newsletters:
label: "AutoOrg/Newsletters"
description: "Mailing lists, digests, blog updates, promotional content from subscriptions"
archive: true # auto-archive out of inbox
work:
label: "AutoOrg/Work"
description: "Professional correspondence, meeting invites, project updates, work tools"
archive: false
accounts:
label: "AutoOrg/Accounts"
description: "Security alerts, password resets, 2FA notifications, account verification, login alerts from services"
archive: true # auto-archive — check label if needed
spam:
label: "AutoOrg/Spam"
description: "Unsolicited marketing, phishing attempts, junk mail that bypassed filters"
archive: true # auto-archive junk
personal:
label: "AutoOrg/Personal"
description: "Friends, family, personal accounts, non-work non-commercial emails"
archive: false
# Processing settings
processing:
batch_size: 50 # Emails per run
max_body_chars: 2000 # Truncate body to save tokens
skip_already_labeled: true
dry_run: false # Set true to preview without applying labels
process_read: true # Also process already-read emails
mailbox: "INBOX" # IMAP mailbox to process

View File

@@ -0,0 +1,332 @@
#!/usr/bin/env python3
"""Gmail Organizer — classifies emails using a local LLM and applies Gmail labels."""
import argparse
import email
import email.header
import html
import imaplib
import json
import logging
import re
import sqlite3
import sys
import time
import urllib.request
import urllib.error
from datetime import datetime, timedelta
from pathlib import Path
import yaml
LOG_FMT = "%(asctime)s %(levelname)-8s %(message)s"
log = logging.getLogger("gmail-organizer")
DB_PATH = Path(__file__).parent / "processed.db"
DEFAULT_CONFIG = Path(__file__).parent / "config.local.yaml"
# ── helpers ──────────────────────────────────────────────────────────────────
def load_config(path: Path) -> dict:
with open(path) as f:
return yaml.safe_load(f)
def init_db(db_path: Path) -> sqlite3.Connection:
conn = sqlite3.connect(db_path)
conn.execute("""
CREATE TABLE IF NOT EXISTS processed (
message_id TEXT PRIMARY KEY,
category TEXT NOT NULL,
processed_at TEXT NOT NULL
)
""")
conn.commit()
return conn
def is_processed(conn: sqlite3.Connection, message_id: str) -> bool:
row = conn.execute(
"SELECT 1 FROM processed WHERE message_id = ?", (message_id,)
).fetchone()
return row is not None
def mark_processed(conn: sqlite3.Connection, message_id: str, category: str):
conn.execute(
"INSERT OR REPLACE INTO processed (message_id, category, processed_at) VALUES (?, ?, ?)",
(message_id, category, datetime.now(tz=__import__('zoneinfo').ZoneInfo("UTC")).isoformat()),
)
conn.commit()
def decode_header(raw: str | None) -> str:
if not raw:
return ""
parts = email.header.decode_header(raw)
decoded = []
for data, charset in parts:
if isinstance(data, bytes):
decoded.append(data.decode(charset or "utf-8", errors="replace"))
else:
decoded.append(data)
return " ".join(decoded)
def extract_text(msg: email.message.Message, max_chars: int) -> str:
"""Extract plain-text body from an email, falling back to stripped HTML."""
body = ""
if msg.is_multipart():
for part in msg.walk():
ct = part.get_content_type()
if ct == "text/plain":
payload = part.get_payload(decode=True)
if payload:
charset = part.get_content_charset() or "utf-8"
body = payload.decode(charset, errors="replace")
break
elif ct == "text/html" and not body:
payload = part.get_payload(decode=True)
if payload:
charset = part.get_content_charset() or "utf-8"
raw_html = payload.decode(charset, errors="replace")
body = html.unescape(re.sub(r"<[^>]+>", " ", raw_html))
else:
payload = msg.get_payload(decode=True)
if payload:
charset = msg.get_content_charset() or "utf-8"
body = payload.decode(charset, errors="replace")
if msg.get_content_type() == "text/html":
body = html.unescape(re.sub(r"<[^>]+>", " ", body))
# Collapse whitespace and truncate
body = re.sub(r"\s+", " ", body).strip()
return body[:max_chars]
# ── Gmail IMAP ───────────────────────────────────────────────────────────────
class GmailClient:
def __init__(self, email_addr: str, app_password: "REDACTED_PASSWORD"
self.email = email_addr
self.conn = imaplib.IMAP4_SSL("imap.gmail.com")
self.conn.login(email_addr, app_password)
def fetch_uids(self, mailbox: str = "INBOX", search: str = "ALL",
batch_size: int = 50) -> list[bytes]:
self.conn.select(mailbox)
_, data = self.conn.search(None, search)
uids = data[0].split()
# Most recent first
return list(reversed(uids[-batch_size:]))
def fetch_message(self, uid: bytes) -> email.message.Message:
_, data = self.conn.fetch(uid, "(RFC822)")
return email.message_from_bytes(data[0][1])
def get_labels(self, uid: bytes) -> list[str]:
"""Get existing Gmail labels (X-GM-LABELS) for a message."""
_, data = self.conn.fetch(uid, "(X-GM-LABELS)")
raw = data[0].decode() if isinstance(data[0], bytes) else str(data[0])
match = re.search(r'X-GM-LABELS \(([^)]*)\)', raw)
if match:
return match.group(1).split()
return []
def apply_label(self, uid: bytes, label: str):
"""Apply a Gmail label to a message. Creates the label if needed."""
# Gmail IMAP uses X-GM-LABELS for label manipulation
result = self.conn.store(uid, "+X-GM-LABELS", f'("{label}")')
if result[0] != "OK":
# Fallback: copy to label (which creates it as a folder)
try:
self.conn.create(label)
except imaplib.IMAP4.error:
pass # Label already exists
self.conn.copy(uid, label)
def archive(self, uid: bytes):
"""Archive a message (remove from INBOX by removing \\Inbox label)."""
self.conn.store(uid, "-X-GM-LABELS", '("\\\\Inbox")')
def close(self):
try:
self.conn.close()
self.conn.logout()
except Exception:
pass
# ── Ollama LLM ───────────────────────────────────────────────────────────────
def classify_email(
ollama_url: str,
model: str,
categories: dict,
subject: str,
sender: str,
body_snippet: str,
) -> str:
"""Ask the LLM to classify an email into one of the categories."""
cat_descriptions = "\n".join(
f"- **{name}**: {info['description']}" for name, info in categories.items()
)
category_names = ", ".join(categories.keys())
prompt = f"""Classify this email into exactly ONE category. Reply with ONLY the category name, nothing else.
Categories:
{cat_descriptions}
Email:
From: {sender}
Subject: {subject}
Body: {body_snippet[:1000]}
Reply with one of: {category_names}"""
payload = json.dumps({
"model": model,
"prompt": prompt,
"stream": False,
"options": {
"temperature": 0.1,
"num_predict": 20,
},
}).encode()
req = urllib.request.Request(
f"{ollama_url.rstrip('/')}/api/generate",
data=payload,
headers={"Content-Type": "application/json"},
)
try:
with urllib.request.urlopen(req, timeout=60) as resp:
result = json.loads(resp.read())
except urllib.error.URLError as e:
log.error("Ollama request failed: %s", e)
raise
raw_response = result.get("response", "").strip().lower()
# Strip any thinking tags (qwen3 sometimes wraps reasoning in <think>...</think>)
raw_response = re.sub(r"<think>.*?</think>", "", raw_response, flags=re.DOTALL).strip()
# Extract just the category name
for name in categories:
if name in raw_response:
return name
log.warning("LLM returned unexpected category %r, defaulting to 'personal'", raw_response)
return "personal"
# ── main ─────────────────────────────────────────────────────────────────────
def run(config_path: Path, dry_run: bool = False, reprocess: bool = False,
limit: int | None = None):
cfg = load_config(config_path)
gmail_cfg = cfg["gmail"]
ollama_cfg = cfg["ollama"]
categories = cfg["categories"]
proc_cfg = cfg.get("processing", {})
batch_size = limit or proc_cfg.get("batch_size", 50)
max_body = proc_cfg.get("max_body_chars", 2000)
dry_run = dry_run or proc_cfg.get("dry_run", False)
mailbox = proc_cfg.get("mailbox", "INBOX")
log.info("Connecting to Gmail as %s", gmail_cfg["email"])
client = GmailClient(gmail_cfg["email"], gmail_cfg["app_password"])
db = init_db(DB_PATH)
try:
uids = client.fetch_uids(mailbox=mailbox, batch_size=batch_size)
log.info("Fetched %d message UIDs", len(uids))
stats = {cat: 0 for cat in categories}
stats["skipped"] = 0
stats["errors"] = 0
for i, uid in enumerate(uids, 1):
try:
msg = client.fetch_message(uid)
message_id = msg.get("Message-ID", f"uid-{uid.decode()}")
subject = decode_header(msg.get("Subject"))
sender = decode_header(msg.get("From"))
if not reprocess and is_processed(db, message_id):
stats["skipped"] += 1
continue
body = extract_text(msg, max_body)
log.info("[%d/%d] Classifying: %s (from: %s)",
i, len(uids), subject[:60], sender[:40])
category = classify_email(
ollama_cfg["url"],
ollama_cfg["model"],
categories,
subject,
sender,
body,
)
label = categories[category]["label"]
log.info("%s (%s)", category, label)
should_archive = categories[category].get("archive", False)
if not dry_run:
client.apply_label(uid, label)
if should_archive:
client.archive(uid)
log.info(" 📥 Archived")
mark_processed(db, message_id, category)
else:
log.info(" [DRY RUN] Would apply label: %s%s", label,
" + archive" if should_archive else "")
stats[category] = stats.get(category, 0) + 1
except Exception as e:
log.error("Error processing UID %s: %s", uid, e)
stats["errors"] += 1
continue
log.info("Done! Stats: %s", json.dumps(stats, indent=2))
finally:
client.close()
db.close()
def main():
parser = argparse.ArgumentParser(description="Gmail Organizer — LLM-powered email classification")
parser.add_argument("-c", "--config", type=Path, default=DEFAULT_CONFIG,
help="Path to config YAML (default: config.local.yaml)")
parser.add_argument("-n", "--dry-run", action="store_true",
help="Classify but don't apply labels")
parser.add_argument("--reprocess", action="store_true",
help="Re-classify already-processed emails")
parser.add_argument("--limit", type=int, default=None,
help="Override batch size")
parser.add_argument("-v", "--verbose", action="store_true",
help="Debug logging")
args = parser.parse_args()
logging.basicConfig(
level=logging.DEBUG if args.verbose else logging.INFO,
format=LOG_FMT,
)
if not args.config.exists():
log.error("Config not found: %s", args.config)
log.error("Copy config.yaml to config.local.yaml and fill in your credentials.")
sys.exit(1)
run(args.config, dry_run=args.dry_run, reprocess=args.reprocess, limit=args.limit)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1 @@
pyyaml>=6.0

File diff suppressed because it is too large Load Diff

204
scripts/md-to-dokuwiki.py Executable file
View File

@@ -0,0 +1,204 @@
#!/usr/bin/env python3
"""
Markdown to DokuWiki Converter
Converts Markdown documentation to DokuWiki syntax for homelab documentation mirror
"""
import re
import os
import sys
import requests
from pathlib import Path
from urllib.parse import quote
class MarkdownToDokuWiki:
def __init__(self, dokuwiki_base_url="http://atlantis.vish.local:8399"):
self.dokuwiki_base_url = dokuwiki_base_url
def convert_markdown_to_dokuwiki(self, markdown_content):
"""Convert Markdown content to DokuWiki syntax"""
content = markdown_content
# Convert headers
content = re.sub(r'^# (.*?)$', r'====== \1 ======', content, flags=re.MULTILINE)
content = re.sub(r'^## (.*?)$', r'===== \1 =====', content, flags=re.MULTILINE)
content = re.sub(r'^### (.*?)$', r'==== \1 ====', content, flags=re.MULTILINE)
content = re.sub(r'^#### (.*?)$', r'=== \1 ===', content, flags=re.MULTILINE)
content = re.sub(r'^##### (.*?)$', r'== \1 ==', content, flags=re.MULTILINE)
# Convert bold and italic
content = re.sub(r'\*\*(.*?)\*\*', r'**\1**', content) # Bold (already correct)
content = re.sub(r'\*(.*?)\*', r'//\1//', content) # Italic
# Convert code blocks
content = re.sub(r'^```(\w+)?\n(.*?)^```', r'<code \1>\n\2</code>', content, flags=re.MULTILINE | re.DOTALL)
content = re.sub(r'`([^`]+)`', r'%%\1%%', content) # Inline code
# Convert lists
content = re.sub(r'^- (.*?)$', r' * \1', content, flags=re.MULTILINE)
content = re.sub(r'^\d+\. (.*?)$', r' - \1', content, flags=re.MULTILINE)
# Convert links
content = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', r'[[\2|\1]]', content)
# Convert tables (basic conversion)
lines = content.split('\n')
converted_lines = []
in_table = False
for line in lines:
if '|' in line and line.strip().startswith('|'):
if not in_table:
in_table = True
# Convert table row
cells = [cell.strip() for cell in line.split('|')[1:-1]]
converted_line = '^ ' + ' ^ '.join(cells) + ' ^' if '---' not in line else None
if converted_line and '---' not in line:
# Check if this is a header row (next line might be separator)
converted_lines.append(converted_line)
elif in_table and line.strip() == '':
in_table = False
converted_lines.append(line)
else:
in_table = False
converted_lines.append(line)
content = '\n'.join(converted_lines)
# Convert checkboxes
content = re.sub(r'- \[x\] (.*?)$', r' * ✅ \1', content, flags=re.MULTILINE)
content = re.sub(r'- \[ \] (.*?)$', r' * ☐ \1', content, flags=re.MULTILINE)
# Convert blockquotes
content = re.sub(r'^> (.*?)$', r'> \1', content, flags=re.MULTILINE)
return content
def create_dokuwiki_page(self, page_id, content, summary="Updated from repository"):
"""Create or update a DokuWiki page via HTTP POST"""
try:
# DokuWiki edit URL
edit_url = f"{self.dokuwiki_base_url}/doku.php"
# Prepare form data for page creation/update
form_data = {
'id': page_id,
'do': 'save',
'wikitext': content,
'summary': summary,
'minor': '1'
}
# Make the request
response = requests.post(edit_url, data=form_data, timeout=30)
if response.status_code == 200:
print(f"✅ Successfully created/updated page: {page_id}")
return True
else:
print(f"❌ Failed to create page {page_id}: HTTP {response.status_code}")
return False
except Exception as e:
print(f"❌ Error creating page {page_id}: {str(e)}")
return False
def convert_file(self, markdown_file_path, dokuwiki_page_id):
"""Convert a single Markdown file and upload to DokuWiki"""
try:
with open(markdown_file_path, 'r', encoding='utf-8') as f:
markdown_content = f.read()
# Convert to DokuWiki syntax
dokuwiki_content = self.convert_markdown_to_dokuwiki(markdown_content)
# Add header with source information
header = f"""====== {os.path.basename(markdown_file_path)} ======
//This page is automatically mirrored from the homelab Git repository//
//Last updated: {os.path.getmtime(markdown_file_path)}//
//Source: {markdown_file_path}//
"""
dokuwiki_content = header + dokuwiki_content
# Create the page in DokuWiki
success = self.create_dokuwiki_page(dokuwiki_page_id, dokuwiki_content)
return success
except Exception as e:
print(f"❌ Error converting file {markdown_file_path}: {str(e)}")
return False
def main():
converter = MarkdownToDokuWiki()
# Define key documentation files to convert
docs_to_convert = [
{
'file': '/home/homelab/organized/repos/homelab/README.md',
'page_id': 'homelab:readme'
},
{
'file': '/home/homelab/organized/repos/homelab/docs/INDEX.md',
'page_id': 'homelab:docs:index'
},
{
'file': '/home/homelab/organized/repos/homelab/docs/admin/GITOPS_COMPREHENSIVE_GUIDE.md',
'page_id': 'homelab:docs:admin:gitops_comprehensive_guide'
},
{
'file': '/home/homelab/organized/repos/homelab/DOCUMENTATION_AUDIT_REPORT.md',
'page_id': 'homelab:documentation_audit_report'
},
{
'file': '/home/homelab/organized/repos/homelab/docs/infrastructure/INFRASTRUCTURE_HEALTH_REPORT.md',
'page_id': 'homelab:docs:infrastructure:health_report'
},
{
'file': '/home/homelab/organized/repos/homelab/docs/runbooks/add-new-service.md',
'page_id': 'homelab:docs:runbooks:add_new_service'
},
{
'file': '/home/homelab/organized/repos/homelab/GITOPS_DEPLOYMENT_GUIDE.md',
'page_id': 'homelab:gitops_deployment_guide'
},
{
'file': '/home/homelab/organized/repos/homelab/OPERATIONAL_STATUS.md',
'page_id': 'homelab:operational_status'
},
{
'file': '/home/homelab/organized/repos/homelab/MONITORING_ARCHITECTURE.md',
'page_id': 'homelab:monitoring_architecture'
}
]
print("🚀 Starting Markdown to DokuWiki conversion...")
successful_conversions = 0
total_conversions = len(docs_to_convert)
for doc in docs_to_convert:
file_path = doc['file']
page_id = doc['page_id']
if os.path.exists(file_path):
print(f"\n📄 Converting: {file_path} -> {page_id}")
if converter.convert_file(file_path, page_id):
successful_conversions += 1
else:
print(f"⚠️ File not found: {file_path}")
print(f"\n🎯 Conversion Summary:")
print(f"✅ Successful: {successful_conversions}/{total_conversions}")
print(f"❌ Failed: {total_conversions - successful_conversions}/{total_conversions}")
if successful_conversions > 0:
print(f"\n🌐 DokuWiki pages available at:")
print(f" {converter.dokuwiki_base_url}/doku.php?id=homelab:readme")
print(f" {converter.dokuwiki_base_url}/doku.php?id=homelab:docs:index")
if __name__ == "__main__":
main()

13
scripts/openhands-cli.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/bash
export PATH=$HOME/.local/bin:$PATH
export LLM_MODEL=openai/qwen/qwen2.5-coder-14b
export LLM_API_KEY="lm-studio"
export LLM_BASE_URL="http://100.98.93.15:1234/v1"
export LLM_CONTEXT_WINDOW=32768
# If no arguments, start interactive TUI
if [ $# -eq 0 ]; then
openhands --override-with-envs --always-approve
else
openhands --override-with-envs "$@"
fi

11
scripts/openhands-local.sh Executable file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
export PATH=$HOME/.local/bin:$PATH
export LLM_MODEL=anthropic/claude-sonnet-4-6
export LLM_API_KEY="REDACTED_API_KEY"
# If no arguments, start interactive TUI
if [ $# -eq 0 ]; then
openhands --override-with-envs --always-approve
else
openhands --override-with-envs "$@"
fi

13
scripts/openhands-olares.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/bash
export PATH=$HOME/.local/bin:$PATH
export LLM_MODEL=openai/qwen3-coder:latest
export LLM_API_KEY="ollama"
export LLM_BASE_URL="https://a5be22681.vishinator.olares.com/v1"
export LLM_CONTEXT_WINDOW=65536
# If no arguments, start interactive TUI
if [ $# -eq 0 ]; then
openhands --override-with-envs --always-approve
else
openhands --override-with-envs "$@"
fi

View File

@@ -0,0 +1,130 @@
#!/bin/bash
# Emergency Watchtower Fix via Portainer API
# Stops crash looping containers and recreates them with correct config
API_KEY=REDACTED_API_KEY
BASE_URL="http://vishinator.synology.me:10000"
echo "🚨 EMERGENCY FIX: Stopping Watchtower crash loops via Portainer API"
echo "=================================================================="
# Function to fix Watchtower on an endpoint
fix_watchtower() {
local endpoint_id=$1
local endpoint_name=$2
echo ""
echo "🔧 Fixing Watchtower on: $endpoint_name (ID: $endpoint_id)"
echo "--------------------------------------------------------"
# Get Watchtower container ID
container_info=$(curl -s -H "X-API-Key: $API_KEY" \
"$BASE_URL/api/endpoints/$endpoint_id/docker/containers/json?all=true" | \
jq -r '.[] | select(.Names[]? | contains("watchtower")) | "\(.Id) \(.Names[0])"')
if [ -z "$container_info" ]; then
echo "❌ No Watchtower container found"
return 1
fi
read container_id container_name <<< "$container_info"
echo "📍 Found container: $container_name ($container_id)"
# Stop the container
echo "🛑 Stopping crash looping container..."
curl -s -X POST -H "X-API-Key: $API_KEY" \
"$BASE_URL/api/endpoints/$endpoint_id/docker/containers/$container_id/stop"
sleep 2
# Remove the container
echo "🗑️ Removing broken container..."
curl -s -X DELETE -H "X-API-Key: $API_KEY" \
"$BASE_URL/api/endpoints/$endpoint_id/docker/containers/$container_id?force=true"
sleep 2
# Create new container with correct notification URL
echo "🔄 Creating new Watchtower with fixed notification URL..."
# Determine the correct notification URL based on endpoint
if [ "$endpoint_name" = "Atlantis" ]; then
NOTIFICATION_URL="ntfy://localhost:8081/updates?insecure=yes"
elif [ "$endpoint_name" = "Calypso" ]; then
NOTIFICATION_URL="ntfy://localhost:8081/updates?insecure=yes"
else
NOTIFICATION_URL="ntfy://ntfy.vish.gg/REDACTED_NTFY_TOPIC"
fi
# Create container via API
create_response=$(curl -s -X POST -H "X-API-Key: $API_KEY" \
-H "Content-Type: application/json" \
"$BASE_URL/api/endpoints/$endpoint_id/docker/containers/create?name=watchtower" \
-d "{
\"Image\": \"containrrr/watchtower:latest\",
\"Env\": [
\"WATCHTOWER_CLEANUP=true\",
\"WATCHTOWER_INCLUDE_RESTARTING=true\",
\"WATCHTOWER_INCLUDE_STOPPED=true\",
\"WATCHTOWER_REVIVE_STOPPED=false\",
\"WATCHTOWER_POLL_INTERVAL=3600\",
\"WATCHTOWER_TIMEOUT=10s\",
\"WATCHTOWER_HTTP_API_UPDATE=true\",
\"WATCHTOWER_HTTP_API_TOKEN="REDACTED_HTTP_TOKEN"\",
\"WATCHTOWER_NOTIFICATIONS=shoutrrr\",
\"WATCHTOWER_NOTIFICATION_URL=$NOTIFICATION_URL\",
\"TZ=America/Los_Angeles\"
],
\"HostConfig\": {
\"Binds\": [\"/var/run/docker.sock:/var/run/docker.sock\"],
\"RestartPolicy\": {\"Name\": \"always\"},
\"PortBindings\": {\"8080/tcp\": [{\"HostPort\": \"8080\"}]}
}
}")
new_container_id=$(echo "$create_response" | jq -r '.Id')
if [ "$new_container_id" != "null" ] && [ -n "$new_container_id" ]; then
echo "✅ Created new container: $new_container_id"
# Start the container
echo "▶️ Starting new Watchtower container..."
curl -s -X POST -H "X-API-Key: $API_KEY" \
"$BASE_URL/api/endpoints/$endpoint_id/docker/containers/$new_container_id/start"
sleep 3
# Check if it's running
status=$(curl -s -H "X-API-Key: $API_KEY" \
"$BASE_URL/api/endpoints/$endpoint_id/docker/containers/$new_container_id/json" | \
jq -r '.State.Status')
if [ "$status" = "running" ]; then
echo "🟢 SUCCESS: Watchtower is now running!"
# Get recent logs to verify no errors
echo "📋 Checking logs for errors..."
curl -s -H "X-API-Key: $API_KEY" \
"$BASE_URL/api/endpoints/$endpoint_id/docker/containers/$new_container_id/logs?stdout=true&stderr=true&tail=5" | \
sed 's/^.......//g' | sed 's/^/ /'
else
echo "🔴 FAILED: Container status is $status"
fi
else
echo "🔴 FAILED: Could not create new container"
echo "Response: $create_response"
fi
}
# Fix Atlantis (ID: 2)
fix_watchtower 2 "Atlantis"
# Fix Calypso (ID: 443397)
fix_watchtower 443397 "Calypso"
echo ""
echo "=================================================================="
echo "🎯 Emergency fix complete! Run status check to verify:"
echo " ./scripts/check-watchtower-status.sh"
echo "=================================================================="

135
scripts/portainer-fix-v2.sh Executable file
View File

@@ -0,0 +1,135 @@
#!/bin/bash
# Emergency Watchtower Fix v2 - Correct ntfy URL format
# Based on Shoutrrr documentation for ntfy service
API_KEY=REDACTED_API_KEY
BASE_URL="http://vishinator.synology.me:10000"
echo "🚨 EMERGENCY FIX v2: Correcting ntfy notification URL format"
echo "============================================================="
# Function to fix Watchtower with correct ntfy URL
fix_watchtower_v2() {
local endpoint_id=$1
local endpoint_name=$2
echo ""
echo "🔧 Fixing Watchtower on: $endpoint_name (ID: $endpoint_id)"
echo "--------------------------------------------------------"
# Get current Watchtower container
container_info=$(curl -s -H "X-API-Key: $API_KEY" \
"$BASE_URL/api/endpoints/$endpoint_id/docker/containers/json?all=true" | \
jq -r '.[] | select(.Names[]? | contains("watchtower")) | "\(.Id) \(.Names[0]) \(.State)"')
if [ -z "$container_info" ]; then
echo "❌ No Watchtower container found"
return 1
fi
read container_id container_name state <<< "$container_info"
echo "📍 Found container: $container_name ($container_id) - State: $state"
# Stop and remove current container
echo "🛑 Stopping and removing current container..."
curl -s -X POST -H "X-API-Key: $API_KEY" \
"$BASE_URL/api/endpoints/$endpoint_id/docker/containers/$container_id/stop" > /dev/null
sleep 2
curl -s -X DELETE -H "X-API-Key: $API_KEY" \
"$BASE_URL/api/endpoints/$endpoint_id/docker/containers/$container_id?force=true" > /dev/null
sleep 2
# Use correct ntfy URL format - no insecure parameter, just HTTP scheme
# For local HTTP ntfy servers, use http:// in the host part
if [ "$endpoint_name" = "Atlantis" ] || [ "$endpoint_name" = "Calypso" ]; then
# For local ntfy servers, we need to use the generic HTTP format
# Since ntfy:// defaults to HTTPS, we'll use generic:// for HTTP
NOTIFICATION_URL="generic+http://localhost:8081/updates"
else
NOTIFICATION_URL="ntfy://ntfy.vish.gg/REDACTED_NTFY_TOPIC"
fi
echo "🔗 Using notification URL: $NOTIFICATION_URL"
# Create new container with corrected URL
create_response=$(curl -s -X POST -H "X-API-Key: $API_KEY" \
-H "Content-Type: application/json" \
"$BASE_URL/api/endpoints/$endpoint_id/docker/containers/create?name=watchtower" \
-d "{
\"Image\": \"containrrr/watchtower:latest\",
\"Env\": [
\"WATCHTOWER_CLEANUP=true\",
\"WATCHTOWER_INCLUDE_RESTARTING=true\",
\"WATCHTOWER_INCLUDE_STOPPED=true\",
\"WATCHTOWER_REVIVE_STOPPED=false\",
\"WATCHTOWER_POLL_INTERVAL=3600\",
\"WATCHTOWER_TIMEOUT=10s\",
\"WATCHTOWER_HTTP_API_UPDATE=true\",
\"WATCHTOWER_HTTP_API_TOKEN="REDACTED_HTTP_TOKEN"\",
\"WATCHTOWER_NOTIFICATIONS=shoutrrr\",
\"WATCHTOWER_NOTIFICATION_URL=$NOTIFICATION_URL\",
\"TZ=America/Los_Angeles\"
],
\"HostConfig\": {
\"Binds\": [\"/var/run/docker.sock:/var/run/docker.sock\"],
\"RestartPolicy\": {\"Name\": \"always\"},
\"PortBindings\": {\"8080/tcp\": [{\"HostPort\": \"8080\"}]}
}
}")
new_container_id=$(echo "$create_response" | jq -r '.Id')
if [ "$new_container_id" != "null" ] && [ -n "$new_container_id" ]; then
echo "✅ Created new container: ${new_container_id:0:12}"
# Start the container
echo "▶️ Starting new Watchtower container..."
start_response=$(curl -s -X POST -H "X-API-Key: $API_KEY" \
"$BASE_URL/api/endpoints/$endpoint_id/docker/containers/$new_container_id/start")
sleep 5
# Check status
container_status=$(curl -s -H "X-API-Key: $API_KEY" \
"$BASE_URL/api/endpoints/$endpoint_id/docker/containers/$new_container_id/json" | \
jq -r '.State.Status')
echo "📊 Container status: $container_status"
if [ "$container_status" = "running" ]; then
echo "🟢 SUCCESS: Watchtower is running!"
# Check logs for any errors
echo "📋 Recent logs:"
curl -s -H "X-API-Key: $API_KEY" \
"$BASE_URL/api/endpoints/$endpoint_id/docker/containers/$new_container_id/logs?stdout=true&stderr=true&tail=3" | \
sed 's/^.......//g' | sed 's/^/ /'
else
echo "🔴 Issue: Container status is $container_status"
echo "📋 Logs for debugging:"
curl -s -H "X-API-Key: $API_KEY" \
"$BASE_URL/api/endpoints/$endpoint_id/docker/containers/$new_container_id/logs?stdout=true&stderr=true&tail=5" | \
sed 's/^.......//g' | sed 's/^/ /'
fi
else
echo "🔴 FAILED: Could not create container"
echo "API Response: $create_response"
fi
}
# Fix both endpoints
fix_watchtower_v2 2 "Atlantis"
fix_watchtower_v2 443397 "Calypso"
echo ""
echo "============================================================="
echo "🎯 Fix v2 complete! Checking final status..."
echo "============================================================="
# Quick status check
sleep 3
./scripts/check-watchtower-status.sh

2
scripts/proton-organizer/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
config.local.yaml
processed.db

View File

@@ -0,0 +1,330 @@
#!/usr/bin/env python3
"""Proton Mail Organizer — classifies emails using a local LLM and applies labels via Proton Bridge."""
import argparse
import email
import email.header
import html
import imaplib
import json
import logging
import re
import sqlite3
import ssl
import sys
import urllib.request
import urllib.error
from datetime import datetime
from pathlib import Path
import yaml
LOG_FMT = "%(asctime)s %(levelname)-8s %(message)s"
log = logging.getLogger("proton-organizer")
DB_PATH = Path(__file__).parent / "processed.db"
DEFAULT_CONFIG = Path(__file__).parent / "config.local.yaml"
# ── helpers ──────────────────────────────────────────────────────────────────
def load_config(path: Path) -> dict:
with open(path) as f:
return yaml.safe_load(f)
def init_db(db_path: Path) -> sqlite3.Connection:
conn = sqlite3.connect(db_path)
conn.execute("""
CREATE TABLE IF NOT EXISTS processed (
message_id TEXT PRIMARY KEY,
category TEXT NOT NULL,
processed_at TEXT NOT NULL
)
""")
conn.commit()
return conn
def is_processed(conn: sqlite3.Connection, message_id: str) -> bool:
return conn.execute(
"SELECT 1 FROM processed WHERE message_id = ?", (message_id,)
).fetchone() is not None
def mark_processed(conn: sqlite3.Connection, message_id: str, category: str):
conn.execute(
"INSERT OR REPLACE INTO processed (message_id, category, processed_at) VALUES (?, ?, ?)",
(message_id, category, datetime.now(tz=__import__('zoneinfo').ZoneInfo("UTC")).isoformat()),
)
conn.commit()
def decode_header(raw: str | None) -> str:
if not raw:
return ""
parts = email.header.decode_header(raw)
decoded = []
for data, charset in parts:
if isinstance(data, bytes):
try:
decoded.append(data.decode(charset or "utf-8", errors="replace"))
except (LookupError, UnicodeDecodeError):
decoded.append(data.decode("utf-8", errors="replace"))
else:
decoded.append(data)
return " ".join(decoded)
def extract_text(msg: email.message.Message, max_chars: int) -> str:
body = ""
if msg.is_multipart():
for part in msg.walk():
ct = part.get_content_type()
if ct == "text/plain":
payload = part.get_payload(decode=True)
if payload:
charset = part.get_content_charset() or "utf-8"
body = payload.decode(charset, errors="replace")
break
elif ct == "text/html" and not body:
payload = part.get_payload(decode=True)
if payload:
charset = part.get_content_charset() or "utf-8"
body = html.unescape(re.sub(r"<[^>]+>", " ",
payload.decode(charset, errors="replace")))
else:
payload = msg.get_payload(decode=True)
if payload:
charset = msg.get_content_charset() or "utf-8"
body = payload.decode(charset, errors="replace")
if msg.get_content_type() == "text/html":
body = html.unescape(re.sub(r"<[^>]+>", " ", body))
return re.sub(r"\s+", " ", body).strip()[:max_chars]
# ── Proton Bridge IMAP ──────────────────────────────────────────────────────
class ProtonClient:
def __init__(self, email_addr: str, bridge_password: "REDACTED_PASSWORD"
host: str = "127.0.0.1", port: int = 1143):
self.email = email_addr
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
self.conn = imaplib.IMAP4(host, port)
self.conn.starttls(ssl_context=ctx)
self.conn.login(email_addr, bridge_password)
def fetch_uids(self, mailbox: str = "INBOX", search: str = "ALL",
batch_size: int = 50) -> list[bytes]:
self.conn.select(mailbox)
_, data = self.conn.search(None, search)
uids = data[0].split()
return list(reversed(uids[-batch_size:]))
def fetch_message(self, uid: bytes) -> email.message.Message:
_, data = self.conn.fetch(uid, "(RFC822)")
return email.message_from_bytes(data[0][1])
def apply_label(self, uid: bytes, label: str):
"""Apply a label by copying the message to the label folder."""
try:
self.conn.create(label)
except imaplib.IMAP4.error:
pass
result = self.conn.copy(uid, label)
if result[0] != "OK":
log.warning("Failed to copy to label %s: %s", label, result)
def archive(self, uid: bytes):
"""Archive: move from INBOX to Archive folder."""
self.conn.copy(uid, "Archive")
self.conn.store(uid, "+FLAGS", "\\Deleted")
self.conn.expunge()
def close(self):
try:
self.conn.close()
self.conn.logout()
except Exception:
pass
# ── Ollama LLM ───────────────────────────────────────────────────────────────
def classify_email(ollama_url, model, categories, subject, sender, body_snippet):
cat_descriptions = "\n".join(
f"- **{name}**: {info['description']}" for name, info in categories.items()
)
category_names = ", ".join(categories.keys())
prompt = f"""Classify this email into exactly ONE category. Reply with ONLY the category name, nothing else.
Categories:
{cat_descriptions}
Email:
From: {sender}
Subject: {subject}
Body: {body_snippet[:1000]}
Reply with one of: {category_names}"""
payload = json.dumps({
"model": model,
"prompt": prompt,
"stream": False,
"options": {"temperature": 0.1, "num_predict": 20},
}).encode()
req = urllib.request.Request(
f"{ollama_url.rstrip('/')}/api/generate",
data=payload,
headers={"Content-Type": "application/json"},
)
try:
with urllib.request.urlopen(req, timeout=60) as resp:
result = json.loads(resp.read())
except urllib.error.URLError as e:
log.error("Ollama request failed: %s", e)
raise
raw_response = result.get("response", "").strip().lower()
raw_response = re.sub(r"<think>.*?</think>", "", raw_response, flags=re.DOTALL).strip()
for name in categories:
if name in raw_response:
return name
log.warning("LLM returned unexpected category %r, defaulting to 'personal'", raw_response)
return "personal"
# ── main ─────────────────────────────────────────────────────────────────────
def run(config_path, dry_run=False, reprocess=False, limit=None):
cfg = load_config(config_path)
proton_cfg = cfg["proton"]
ollama_cfg = cfg["ollama"]
categories = cfg["categories"]
proc_cfg = cfg.get("processing", {})
batch_size = limit or proc_cfg.get("batch_size", 50)
max_body = proc_cfg.get("max_body_chars", 2000)
dry_run = dry_run or proc_cfg.get("dry_run", False)
mailbox = proc_cfg.get("mailbox", "INBOX")
rules = cfg.get("rules", [])
log.info("Connecting to Proton Bridge as %s", proton_cfg["email"])
client = ProtonClient(
proton_cfg["email"],
proton_cfg["bridge_password"],
host=proton_cfg.get("host", "127.0.0.1"),
port=proton_cfg.get("port", 1143),
)
db = init_db(DB_PATH)
try:
uids = client.fetch_uids(mailbox=mailbox, batch_size=batch_size)
log.info("Fetched %d message UIDs", len(uids))
stats = {cat: 0 for cat in categories}
stats["rules"] = 0
stats["skipped"] = 0
stats["errors"] = 0
for i, uid in enumerate(uids, 1):
try:
msg = client.fetch_message(uid)
message_id = msg.get("Message-ID", f"uid-{uid.decode()}")
subject = decode_header(msg.get("Subject"))
sender = decode_header(msg.get("From"))
if not reprocess and is_processed(db, message_id):
stats["skipped"] += 1
continue
# Check sender-based rules before LLM
rule_matched = False
for rule in rules:
for pattern in rule["senders"]:
if pattern.lower() in sender.lower():
folder = rule["folder"]
category = rule.get("category", "personal")
log.info("[%d/%d] Rule match: %s (from: %s) → %s",
i, len(uids), subject[:60], sender[:40], folder)
if not dry_run:
client.apply_label(uid, folder)
mark_processed(db, message_id, category)
else:
log.info(" [DRY RUN] Would move to: %s", folder)
stats["rules"] += 1
rule_matched = True
break
if rule_matched:
break
if rule_matched:
continue
body = extract_text(msg, max_body)
log.info("[%d/%d] Classifying: %s (from: %s)",
i, len(uids), subject[:60], sender[:40])
category = classify_email(
ollama_cfg["url"], ollama_cfg["model"],
categories, subject, sender, body,
)
label = categories[category]["label"]
log.info("%s (%s)", category, label)
should_archive = categories[category].get("archive", False)
if not dry_run:
client.apply_label(uid, label)
if should_archive:
client.archive(uid)
log.info(" 📥 Archived")
mark_processed(db, message_id, category)
else:
log.info(" [DRY RUN] Would apply label: %s%s", label,
" + archive" if should_archive else "")
stats[category] = stats.get(category, 0) + 1
except Exception as e:
log.error("Error processing UID %s: %s", uid, e)
stats["errors"] += 1
continue
log.info("Done! Stats: %s", json.dumps(stats, indent=2))
finally:
client.close()
db.close()
def main():
parser = argparse.ArgumentParser(description="Proton Mail Organizer — LLM-powered email classification")
parser.add_argument("-c", "--config", type=Path, default=DEFAULT_CONFIG)
parser.add_argument("-n", "--dry-run", action="store_true")
parser.add_argument("--reprocess", action="store_true")
parser.add_argument("--limit", type=int, default=None)
parser.add_argument("-v", "--verbose", action="store_true")
args = parser.parse_args()
logging.basicConfig(
level=logging.DEBUG if args.verbose else logging.INFO,
format=LOG_FMT,
)
if not args.config.exists():
log.error("Config not found: %s", args.config)
sys.exit(1)
run(args.config, dry_run=args.dry_run, reprocess=args.reprocess, limit=args.limit)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1 @@
pyyaml>=6.0

44
scripts/publish-debug-image.sh Executable file
View File

@@ -0,0 +1,44 @@
#!/usr/bin/env bash
# fail asap
set -e
# Check if an argument was provided
if [ $# -eq 0 ]; then
echo "No arguments provided"
echo "Usage: scripts/publish-debug-image.sh 20230826-1 true"
echo ""
echo "Last argument specifies whether we should have a debug build as opposed to release build."
exit 1
fi
DEBUG=$2
if [ "$DEBUG" = "true" ]; then
echo "[profile.release]" >> Cargo.toml
echo "debug = true" >> Cargo.toml
fi
TAG=$1-debug
echo "Building images, will tag for ghcr.io with $TAG!"
docker build -t ghcr.io/stoatchat/base:latest -f Dockerfile.useCurrentArch .
docker build -t ghcr.io/stoatchat/server:$TAG - < crates/delta/Dockerfile
docker build -t ghcr.io/stoatchat/bonfire:$TAG - < crates/bonfire/Dockerfile
docker build -t ghcr.io/stoatchat/autumn:$TAG - < crates/services/autumn/Dockerfile
docker build -t ghcr.io/stoatchat/january:$TAG - < crates/services/january/Dockerfile
docker build -t ghcr.io/stoatchat/gifbox:$TAG - < crates/services/gifbox/Dockerfile
docker build -t ghcr.io/stoatchat/crond:$TAG - < crates/daemons/crond/Dockerfile
docker build -t ghcr.io/stoatchat/pushd:$TAG - < crates/daemons/pushd/Dockerfile
docker build -t ghcr.io/stoatchat/voice-ingress:$TAG - < crates/daemons/voice-ingress/Dockerfile
if [ "$DEBUG" = "true" ]; then
git restore Cargo.toml
fi
docker push ghcr.io/stoatchat/server:$TAG
docker push ghcr.io/stoatchat/bonfire:$TAG
docker push ghcr.io/stoatchat/autumn:$TAG
docker push ghcr.io/stoatchat/january:$TAG
docker push ghcr.io/stoatchat/gifbox:$TAG
docker push ghcr.io/stoatchat/crond:$TAG
docker push ghcr.io/stoatchat/pushd:$TAG
docker push ghcr.io/stoatchat/voice-ingress:$TAG

142
scripts/setup-dev-environment.sh Executable file
View File

@@ -0,0 +1,142 @@
#!/bin/bash
# Development Environment Setup Script
# Sets up linting, validation, and pre-commit hooks for the homelab repository
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Function to log messages
log_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
log_step() {
echo -e "${BLUE}[STEP]${NC} $1"
}
# Check if we're in the right directory
if [[ ! -f "README.md" ]] || [[ ! -d "hosts" ]]; then
log_error "This script must be run from the homelab repository root directory"
exit 1
fi
log_info "Setting up development environment for Homelab repository..."
# Step 1: Check Python installation
log_step "1. Checking Python installation..."
if ! command -v python3 &> /dev/null; then
log_error "Python 3 is required but not installed"
exit 1
fi
log_info "Python 3 found: $(python3 --version)"
# Step 2: Install Python dependencies
log_step "2. Installing Python dependencies..."
if [[ -f "requirements.txt" ]]; then
python3 -m pip install --user -r requirements.txt
log_info "Python dependencies installed"
else
log_warn "requirements.txt not found, skipping Python dependencies"
fi
# Step 3: Install pre-commit hooks
log_step "3. Setting up pre-commit hooks..."
if command -v pre-commit &> /dev/null; then
pre-commit install
log_info "Pre-commit hooks installed"
# Run pre-commit on all files to check setup
log_info "Running initial pre-commit check (this may take a while)..."
if pre-commit run --all-files; then
log_info "All pre-commit checks passed!"
else
log_warn "Some pre-commit checks failed. This is normal for the first run."
log_info "The hooks will now catch issues before commits."
fi
else
log_warn "pre-commit not found, skipping hook installation"
fi
# Step 4: Check Docker availability
log_step "4. Checking Docker availability..."
if command -v docker &> /dev/null; then
log_info "Docker found: $(docker --version)"
# Check if docker-compose is available
if command -v docker-compose &> /dev/null; then
log_info "Docker Compose found: $(docker-compose --version)"
elif docker compose version &> /dev/null; then
log_info "Docker Compose (plugin) found: $(docker compose version)"
else
log_warn "Docker Compose not found. Some validation features may not work."
fi
else
log_warn "Docker not found. Docker Compose validation will be skipped."
fi
# Step 5: Create .env file if it doesn't exist
log_step "5. Setting up environment configuration..."
if [[ ! -f ".env" ]]; then
if [[ -f ".env.example" ]]; then
cp .env.example .env
log_info "Created .env file from template"
log_warn "Please edit .env file with your actual configuration values"
else
log_warn ".env.example not found, skipping .env creation"
fi
else
log_info ".env file already exists"
fi
# Step 6: Test validation script
log_step "6. Testing validation script..."
if [[ -x "scripts/validate-compose.sh" ]]; then
log_info "Testing Docker Compose validation on a sample file..."
# Find a sample compose file to test
sample_file=$(find hosts/ -name "*.yml" -o -name "*.yaml" | head -1)
if [[ -n "$sample_file" ]]; then
if ./scripts/validate-compose.sh "$sample_file"; then
log_info "Validation script working correctly"
else
log_warn "Validation script test failed, but this may be expected"
fi
else
log_warn "No sample compose files found for testing"
fi
else
log_warn "Validation script not found or not executable"
fi
# Step 7: Summary and next steps
log_step "7. Setup complete!"
echo
log_info "Development environment setup completed successfully!"
echo
echo -e "${BLUE}Next steps:${NC}"
echo "1. Edit .env file with your actual configuration values"
echo "2. Run 'yamllint hosts/' to check YAML files"
echo "3. Run './scripts/validate-compose.sh' to validate Docker Compose files"
echo "4. Make a test commit to see pre-commit hooks in action"
echo
echo -e "${BLUE}Available commands:${NC}"
echo "• yamllint hosts/ - Lint YAML files"
echo "• ./scripts/validate-compose.sh - Validate Docker Compose files"
echo "• pre-commit run --all-files - Run all pre-commit checks"
echo "• pre-commit run --files <file> - Run checks on specific files"
echo
log_info "Happy coding! 🚀"

View File

@@ -0,0 +1,333 @@
#!/bin/bash
# Fluxer Cloudflare SSL Certificate Setup Script
# This script helps set up SSL certificates for Fluxer using Cloudflare Origin Certificates
set -e
# Configuration
DOMAIN="st.vish.gg"
SUBDOMAINS=("api" "events" "files" "voice" "proxy")
NGINX_SSL_DIR="/etc/nginx/ssl"
NGINX_SITES_DIR="/etc/nginx/sites-available"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
log_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
log_note() {
echo -e "${BLUE}[NOTE]${NC} $1"
}
# Check if running as root
if [[ $EUID -ne 0 ]]; then
log_error "This script must be run as root"
exit 1
fi
# Function to check current certificate status
check_current_certificate() {
log_info "Checking current SSL certificate for $DOMAIN..."
if [[ -f "$NGINX_SSL_DIR/$DOMAIN.crt" ]]; then
log_info "Current certificate found: $NGINX_SSL_DIR/$DOMAIN.crt"
# Check certificate details
echo "Certificate details:"
openssl x509 -in "$NGINX_SSL_DIR/$DOMAIN.crt" -text -noout | grep -E "(Subject:|Not After|DNS:)"
# Check if it's a wildcard or includes subdomains
if openssl x509 -in "$NGINX_SSL_DIR/$DOMAIN.crt" -text -noout | grep -q "DNS:\*\.$DOMAIN"; then
log_info "✅ Wildcard certificate detected - should cover all subdomains"
return 0
elif openssl x509 -in "$NGINX_SSL_DIR/$DOMAIN.crt" -text -noout | grep -q "DNS:api\.$DOMAIN"; then
log_info "✅ Multi-domain certificate detected - checking coverage..."
for subdomain in "${SUBDOMAINS[@]}"; do
if openssl x509 -in "$NGINX_SSL_DIR/$DOMAIN.crt" -text -noout | grep -q "DNS:$subdomain\.$DOMAIN"; then
log_info "$subdomain.$DOMAIN covered"
else
log_warn "$subdomain.$DOMAIN NOT covered"
fi
done
else
log_warn "⚠️ Certificate only covers $DOMAIN - subdomains need separate certificate"
return 1
fi
else
log_error "No SSL certificate found for $DOMAIN"
return 1
fi
}
# Function to show Cloudflare Origin Certificate instructions
show_cloudflare_instructions() {
log_info "Cloudflare Origin Certificate Setup Instructions"
echo
echo "To create a new Cloudflare Origin Certificate that covers all Fluxer subdomains:"
echo
echo "1. Go to Cloudflare Dashboard → SSL/TLS → Origin Server"
echo "2. Click 'Create Certificate'"
echo "3. Choose 'Let Cloudflare generate a private key and a CSR'"
echo "4. Set hostnames to:"
echo " - $DOMAIN"
echo " - *.$DOMAIN"
echo " OR specify each subdomain individually:"
for subdomain in "${SUBDOMAINS[@]}"; do
echo " - $subdomain.$DOMAIN"
done
echo "5. Choose certificate validity (15 years recommended)"
echo "6. Click 'Create'"
echo "7. Copy the certificate and private key"
echo
log_note "The wildcard option (*.st.vish.gg) is recommended as it covers all current and future subdomains"
}
# Function to install new certificate
install_certificate() {
local cert_file="$1"
local key_file="$2"
if [[ ! -f "$cert_file" ]] || [[ ! -f "$key_file" ]]; then
log_error "Certificate or key file not found"
return 1
fi
log_info "Installing new certificate..."
# Backup existing certificate
if [[ -f "$NGINX_SSL_DIR/$DOMAIN.crt" ]]; then
cp "$NGINX_SSL_DIR/$DOMAIN.crt" "$NGINX_SSL_DIR/$DOMAIN.crt.backup.$(date +%Y%m%d_%H%M%S)"
cp "$NGINX_SSL_DIR/$DOMAIN.key" "$NGINX_SSL_DIR/$DOMAIN.key.backup.$(date +%Y%m%d_%H%M%S)"
log_info "Existing certificate backed up"
fi
# Install new certificate
cp "$cert_file" "$NGINX_SSL_DIR/$DOMAIN.crt"
cp "$key_file" "$NGINX_SSL_DIR/$DOMAIN.key"
# Set proper permissions
chmod 644 "$NGINX_SSL_DIR/$DOMAIN.crt"
chmod 600 "$NGINX_SSL_DIR/$DOMAIN.key"
log_info "✅ New certificate installed"
# Verify certificate
if openssl x509 -in "$NGINX_SSL_DIR/$DOMAIN.crt" -text -noout > /dev/null 2>&1; then
log_info "✅ Certificate validation successful"
else
log_error "❌ Certificate validation failed"
return 1
fi
}
# Function to update nginx configuration for subdomains
update_nginx_subdomain_config() {
log_info "Updating nginx configuration for Fluxer subdomains..."
# Check if Fluxer nginx config exists
if [[ ! -f "$NGINX_SITES_DIR/fluxer" ]]; then
log_error "Fluxer nginx configuration not found at $NGINX_SITES_DIR/fluxer"
return 1
fi
log_info "✅ Fluxer nginx configuration found"
# Test nginx configuration
nginx -t
if [[ $? -eq 0 ]]; then
log_info "✅ Nginx configuration is valid"
systemctl reload nginx
log_info "✅ Nginx reloaded successfully"
else
log_error "❌ Nginx configuration test failed"
return 1
fi
}
# Function to test SSL connectivity
test_ssl_connectivity() {
log_info "Testing SSL connectivity for all domains..."
# Test main domain
log_info "Testing $DOMAIN..."
if curl -s -I --max-time 10 "https://$DOMAIN" | grep -q -E "(200|404)"; then
log_info "$DOMAIN SSL working"
else
log_warn "⚠️ $DOMAIN SSL may have issues"
fi
# Test subdomains
for subdomain in "${SUBDOMAINS[@]}"; do
log_info "Testing $subdomain.$DOMAIN..."
if curl -s -I --max-time 10 "https://$subdomain.$DOMAIN" | grep -q -E "(200|404|401|502)"; then
log_info "$subdomain.$DOMAIN SSL working"
else
log_warn "⚠️ $subdomain.$DOMAIN SSL may have issues"
fi
done
}
# Function to show DNS requirements
show_dns_requirements() {
log_info "DNS Requirements for Fluxer Subdomains"
echo
echo "Ensure the following DNS records exist in Cloudflare:"
echo
echo "Type | Name | Target | Proxy Status"
echo "------|---------------------|---------------|-------------"
echo "A | $DOMAIN | YOUR_SERVER_IP| Grey Cloud"
echo "CNAME | api.$DOMAIN | $DOMAIN | Grey Cloud"
echo "CNAME | events.$DOMAIN | $DOMAIN | Grey Cloud"
echo "CNAME | files.$DOMAIN | $DOMAIN | Grey Cloud"
echo "CNAME | voice.$DOMAIN | $DOMAIN | Grey Cloud"
echo "CNAME | proxy.$DOMAIN | $DOMAIN | Grey Cloud"
echo
log_note "Grey Cloud (DNS-only) is required for origin certificates to work properly"
}
# Function to show certificate generation guide
show_certificate_guide() {
echo
echo "=== Cloudflare Origin Certificate Generation Guide ==="
echo
echo "Step 1: Access Cloudflare Dashboard"
echo " - Go to https://dash.cloudflare.com"
echo " - Select your domain: $DOMAIN"
echo
echo "Step 2: Navigate to SSL/TLS Settings"
echo " - Click on 'SSL/TLS' in the left sidebar"
echo " - Click on 'Origin Server' tab"
echo
echo "Step 3: Create Origin Certificate"
echo " - Click 'Create Certificate' button"
echo " - Select 'Let Cloudflare generate a private key and a CSR'"
echo
echo "Step 4: Configure Certificate"
echo " - Hostnames: Enter the following (one per line):"
echo " $DOMAIN"
echo " *.$DOMAIN"
echo " - Certificate Validity: 15 years (recommended)"
echo " - Click 'Create'"
echo
echo "Step 5: Save Certificate Files"
echo " - Copy the 'Origin Certificate' content to a file (e.g., /tmp/st.vish.gg.crt)"
echo " - Copy the 'Private Key' content to a file (e.g., /tmp/st.vish.gg.key)"
echo
echo "Step 6: Install Certificate"
echo " - Run: $0 install /tmp/st.vish.gg.crt /tmp/st.vish.gg.key"
echo
log_note "The wildcard certificate (*.st.vish.gg) will cover all current and future subdomains"
}
# Main menu
show_menu() {
echo
echo "=== Fluxer Cloudflare SSL Certificate Setup ==="
echo "1. Check current certificate status"
echo "2. Show certificate generation guide"
echo "3. Install new certificate (provide cert and key files)"
echo "4. Update nginx configuration"
echo "5. Test SSL connectivity"
echo "6. Show DNS requirements"
echo "7. Show Cloudflare instructions"
echo "8. Exit"
echo
}
# Main script logic
main() {
log_info "Fluxer Cloudflare SSL Certificate Setup"
log_info "Domain: $DOMAIN"
log_info "Subdomains: ${SUBDOMAINS[*]}"
if [[ $# -eq 0 ]]; then
# Interactive mode
while true; do
show_menu
read -p "Select an option (1-8): " choice
case $choice in
1)
check_current_certificate
;;
2)
show_certificate_guide
;;
3)
read -p "Enter path to certificate file: " cert_file
read -p "Enter path to private key file: " key_file
install_certificate "$cert_file" "$key_file"
;;
4)
update_nginx_subdomain_config
;;
5)
test_ssl_connectivity
;;
6)
show_dns_requirements
;;
7)
show_cloudflare_instructions
;;
8)
log_info "Exiting..."
exit 0
;;
*)
log_error "Invalid option. Please try again."
;;
esac
echo
read -p "Press Enter to continue..."
done
else
# Command line mode
case "$1" in
"check")
check_current_certificate
;;
"install")
if [[ -z "$2" ]] || [[ -z "$3" ]]; then
log_error "Usage: $0 install <cert_file> <key_file>"
exit 1
fi
install_certificate "$2" "$3"
update_nginx_subdomain_config
;;
"test")
test_ssl_connectivity
;;
"dns")
show_dns_requirements
;;
"guide")
show_certificate_guide
;;
*)
echo "Usage: $0 [check|install <cert> <key>|test|dns|guide]"
echo "Run without arguments for interactive mode"
exit 1
;;
esac
fi
}
# Run main function
main "$@"

304
scripts/setup-fluxer-ssl.sh Executable file
View File

@@ -0,0 +1,304 @@
#!/bin/bash
# Fluxer SSL Certificate Setup Script
# This script sets up SSL certificates for all Fluxer subdomains
# Supports both Let's Encrypt and Cloudflare DNS challenge
set -e
# Configuration
DOMAIN="st.vish.gg"
SUBDOMAINS=("api" "events" "files" "voice" "proxy")
NGINX_SSL_DIR="/etc/nginx/ssl"
NGINX_SITES_DIR="/etc/nginx/sites-available"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
log_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Check if running as root
if [[ $EUID -ne 0 ]]; then
log_error "This script must be run as root"
exit 1
fi
# Function to install certbot
install_certbot() {
log_info "Installing certbot..."
apt update
apt install -y certbot python3-certbot-nginx
}
# Function to install cloudflare plugin
install_cloudflare_plugin() {
log_info "Installing Cloudflare DNS plugin..."
apt install -y python3-certbot-dns-cloudflare
}
# Function to setup Let's Encrypt with HTTP challenge
setup_letsencrypt_http() {
log_info "Setting up Let's Encrypt certificates with HTTP challenge..."
# Build domain list
DOMAIN_LIST="-d $DOMAIN"
for subdomain in "${SUBDOMAINS[@]}"; do
DOMAIN_LIST="$DOMAIN_LIST -d $subdomain.$DOMAIN"
done
log_info "Requesting certificates for: $DOMAIN_LIST"
# Request certificates
certbot --nginx $DOMAIN_LIST --non-interactive --agree-tos --email admin@$DOMAIN
if [[ $? -eq 0 ]]; then
log_info "✅ SSL certificates successfully generated!"
setup_auto_renewal
else
log_error "❌ Failed to generate SSL certificates"
exit 1
fi
}
# Function to setup Let's Encrypt with Cloudflare DNS challenge
setup_letsencrypt_cloudflare() {
local api_token="$1"
if [[ -z "$api_token" ]]; then
log_error "Cloudflare API token is required"
exit 1
fi
log_info "Setting up Let's Encrypt certificates with Cloudflare DNS challenge..."
# Create credentials file
mkdir -p /etc/letsencrypt
cat > /etc/letsencrypt/cloudflare.ini << EOF
dns_cloudflare_api_token = $api_token
EOF
chmod 600 /etc/letsencrypt/cloudflare.ini
# Request wildcard certificate
certbot certonly \
--dns-cloudflare \
--dns-cloudflare-credentials /etc/letsencrypt/cloudflare.ini \
--non-interactive \
--agree-tos \
--email admin@$DOMAIN \
-d $DOMAIN \
-d "*.$DOMAIN"
if [[ $? -eq 0 ]]; then
log_info "✅ Wildcard SSL certificate successfully generated!"
update_nginx_config
setup_auto_renewal
else
log_error "❌ Failed to generate SSL certificate"
exit 1
fi
}
# Function to update nginx configuration with new certificates
update_nginx_config() {
log_info "Updating nginx configuration..."
# Copy certificates to nginx SSL directory
mkdir -p "$NGINX_SSL_DIR"
if [[ -f "/etc/letsencrypt/live/$DOMAIN/fullchain.pem" ]]; then
cp "/etc/letsencrypt/live/$DOMAIN/fullchain.pem" "$NGINX_SSL_DIR/$DOMAIN.crt"
cp "/etc/letsencrypt/live/$DOMAIN/privkey.pem" "$NGINX_SSL_DIR/$DOMAIN.key"
# Set proper permissions
chmod 644 "$NGINX_SSL_DIR/$DOMAIN.crt"
chmod 600 "$NGINX_SSL_DIR/$DOMAIN.key"
log_info "✅ SSL certificates copied to nginx directory"
else
log_warn "Certificate files not found in expected location"
fi
}
# Function to setup auto-renewal
setup_auto_renewal() {
log_info "Setting up automatic certificate renewal..."
# Add cron job for renewal
(crontab -l 2>/dev/null; echo "0 12 * * * /usr/bin/certbot renew --quiet --post-hook 'systemctl reload nginx'") | crontab -
log_info "✅ Auto-renewal configured (daily check at 12:00)"
}
# Function to test nginx configuration
test_nginx_config() {
log_info "Testing nginx configuration..."
nginx -t
if [[ $? -eq 0 ]]; then
log_info "✅ Nginx configuration is valid"
systemctl reload nginx
log_info "✅ Nginx reloaded successfully"
else
log_error "❌ Nginx configuration test failed"
exit 1
fi
}
# Function to verify SSL certificates
verify_ssl() {
log_info "Verifying SSL certificates..."
# Test main domain
if curl -s -I "https://$DOMAIN" | grep -q "200 OK"; then
log_info "$DOMAIN SSL certificate working"
else
log_warn "⚠️ $DOMAIN SSL certificate may have issues"
fi
# Test subdomains
for subdomain in "${SUBDOMAINS[@]}"; do
if curl -s -I "https://$subdomain.$DOMAIN" | grep -q -E "(200|404|401)"; then
log_info "$subdomain.$DOMAIN SSL certificate working"
else
log_warn "⚠️ $subdomain.$DOMAIN SSL certificate may have issues"
fi
done
}
# Function to show current certificate status
show_certificate_status() {
log_info "Current certificate status:"
if command -v certbot &> /dev/null; then
certbot certificates
else
log_warn "Certbot not installed"
fi
# Check nginx SSL files
if [[ -f "$NGINX_SSL_DIR/$DOMAIN.crt" ]]; then
log_info "Nginx SSL certificate found: $NGINX_SSL_DIR/$DOMAIN.crt"
openssl x509 -in "$NGINX_SSL_DIR/$DOMAIN.crt" -text -noout | grep -E "(Subject:|Not After)"
else
log_warn "No nginx SSL certificate found"
fi
}
# Main menu
show_menu() {
echo
echo "=== Fluxer SSL Certificate Setup ==="
echo "1. Install certbot"
echo "2. Setup Let's Encrypt (HTTP challenge)"
echo "3. Setup Let's Encrypt (Cloudflare DNS)"
echo "4. Show certificate status"
echo "5. Test nginx configuration"
echo "6. Verify SSL certificates"
echo "7. Exit"
echo
}
# Main script logic
main() {
log_info "Fluxer SSL Certificate Setup Script"
log_info "Domain: $DOMAIN"
log_info "Subdomains: ${SUBDOMAINS[*]}"
if [[ $# -eq 0 ]]; then
# Interactive mode
while true; do
show_menu
read -p "Select an option (1-7): " choice
case $choice in
1)
install_certbot
install_cloudflare_plugin
;;
2)
setup_letsencrypt_http
test_nginx_config
verify_ssl
;;
3)
read -p "Enter Cloudflare API token: " -s cf_token
echo
setup_letsencrypt_cloudflare "$cf_token"
test_nginx_config
verify_ssl
;;
4)
show_certificate_status
;;
5)
test_nginx_config
;;
6)
verify_ssl
;;
7)
log_info "Exiting..."
exit 0
;;
*)
log_error "Invalid option. Please try again."
;;
esac
echo
read -p "Press Enter to continue..."
done
else
# Command line mode
case "$1" in
"install")
install_certbot
install_cloudflare_plugin
;;
"http")
setup_letsencrypt_http
test_nginx_config
verify_ssl
;;
"cloudflare")
if [[ -z "$2" ]]; then
log_error "Cloudflare API token required: $0 cloudflare <api_token>"
exit 1
fi
setup_letsencrypt_cloudflare "$2"
test_nginx_config
verify_ssl
;;
"status")
show_certificate_status
;;
"test")
test_nginx_config
;;
"verify")
verify_ssl
;;
*)
echo "Usage: $0 [install|http|cloudflare <token>|status|test|verify]"
echo "Run without arguments for interactive mode"
exit 1
;;
esac
fi
}
# Run main function
main "$@"

479
scripts/setup-stoatchat.sh Executable file
View File

@@ -0,0 +1,479 @@
#!/bin/bash
# Stoatchat Setup Script
# Automated deployment of Revolt chat backend for st.vish.gg
set -euo pipefail
# Configuration
STOATCHAT_DIR="/opt/stoatchat"
DOMAIN="st.vish.gg"
REPO_URL="https://github.com/stoatchat/stoatchat.git"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Logging functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Check if running as root
check_root() {
if [[ $EUID -eq 0 ]]; then
log_error "This script should not be run as root"
exit 1
fi
}
# Check system requirements
check_requirements() {
log_info "Checking system requirements..."
# Check OS
if [[ ! -f /etc/os-release ]]; then
log_error "Cannot determine OS version"
exit 1
fi
source /etc/os-release
if [[ "$ID" != "ubuntu" && "$ID" != "debian" ]]; then
log_warning "This script is designed for Ubuntu/Debian. Proceeding anyway..."
fi
# Check available memory
local mem_gb=$(free -g | awk '/^Mem:/{print $2}')
if [[ $mem_gb -lt 4 ]]; then
log_warning "Less than 4GB RAM detected. Stoatchat may not perform well."
fi
# Check available disk space
local disk_gb=$(df -BG / | awk 'NR==2{print $4}' | sed 's/G//')
if [[ $disk_gb -lt 20 ]]; then
log_error "Less than 20GB free disk space. Cannot proceed."
exit 1
fi
log_success "System requirements check passed"
}
# Install system dependencies
install_dependencies() {
log_info "Installing system dependencies..."
sudo apt update
sudo apt install -y \
curl \
wget \
git \
build-essential \
pkg-config \
libssl-dev \
ca-certificates \
gnupg \
lsb-release \
jq
# Install Docker if not present
if ! command -v docker &> /dev/null; then
log_info "Installing Docker..."
curl -fsSL https://get.docker.com -o get-docker.sh
sudo sh get-docker.sh
sudo usermod -aG docker $USER
rm get-docker.sh
log_success "Docker installed"
else
log_info "Docker already installed"
fi
# Install Docker Compose if not present
if ! command -v docker-compose &> /dev/null; then
log_info "Installing Docker Compose..."
sudo curl -L "https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
log_success "Docker Compose installed"
else
log_info "Docker Compose already installed"
fi
# Install mise (Rust toolchain manager)
if ! command -v mise &> /dev/null; then
log_info "Installing mise..."
curl https://mise.run | sh
echo 'eval "$(~/.local/bin/mise activate bash)"' >> ~/.bashrc
export PATH="$HOME/.local/bin:$PATH"
log_success "mise installed"
else
log_info "mise already installed"
fi
log_success "Dependencies installed"
}
# Clone and setup stoatchat
setup_stoatchat() {
log_info "Setting up Stoatchat..."
# Create directory
sudo mkdir -p $STOATCHAT_DIR
sudo chown $USER:$USER $STOATCHAT_DIR
# Clone repository
if [[ ! -d "$STOATCHAT_DIR/.git" ]]; then
log_info "Cloning Stoatchat repository..."
git clone $REPO_URL $STOATCHAT_DIR
else
log_info "Updating Stoatchat repository..."
cd $STOATCHAT_DIR
git pull origin main
fi
cd $STOATCHAT_DIR
# Setup LiveKit configuration
if [[ ! -f "livekit.yml" ]]; then
log_info "Creating LiveKit configuration..."
cp livekit.example.yml livekit.yml
# Update LiveKit config with domain
sed -i "s/localhost:7880/voice.$DOMAIN/g" livekit.yml
sed -i "s/redis_host: localhost/redis_host: localhost/g" livekit.yml
sed -i "s/redis_port: 6379/redis_port: 6380/g" livekit.yml
fi
# Create production configuration
log_info "Creating production configuration..."
cat > Revolt.overrides.toml << EOF
[api]
url = "https://api.$DOMAIN"
[events]
url = "wss://events.$DOMAIN"
[autumn]
url = "https://files.$DOMAIN"
[january]
url = "https://proxy.$DOMAIN"
[livekit]
url = "wss://voice.$DOMAIN"
[database]
mongodb = "mongodb://localhost:27017/revolt"
[redis]
url = "redis://localhost:6380"
[s3]
endpoint = "http://localhost:14009"
access_key_id = "minioadmin"
secret_access_key = "minioadmin"
bucket = "revolt-files"
region = "us-east-1"
[rabbitmq]
url = "amqp://guest:guest@localhost:5672"
[email]
smtp_host = "smtp.gmail.com"
smtp_port = 587
smtp_username = "your-email@example.com"
smtp_password = "REDACTED_PASSWORD"
from_address = "your-email@example.com"
smtp_tls = true
[features]
registration = true
email_verification = false
invite_only = false
EOF
log_success "Stoatchat setup completed"
}
# Start supporting services
start_infrastructure() {
log_info "Starting supporting services..."
cd $STOATCHAT_DIR
# Start Docker services
docker-compose up -d
# Wait for services to be ready
log_info "Waiting for services to be ready..."
sleep 30
# Check service health
local services=("database" "redis" "minio" "rabbitmq")
for service in "${services[@]}"; do
if docker-compose ps | grep -q "stoatchat-$service.*Up"; then
log_success "$service is running"
else
log_error "$service failed to start"
docker-compose logs stoatchat-$service
exit 1
fi
done
log_success "Infrastructure services started"
}
# Build stoatchat
build_stoatchat() {
log_info "Building Stoatchat..."
cd $STOATCHAT_DIR
# Activate mise environment
export PATH="$HOME/.local/bin:$PATH"
eval "$(mise activate bash)"
# Build the project
if mise run build; then
log_success "Stoatchat built successfully"
else
log_error "Failed to build Stoatchat"
exit 1
fi
}
# Start stoatchat services
start_stoatchat_services() {
log_info "Starting Stoatchat services..."
cd $STOATCHAT_DIR
# Create logs directory
mkdir -p logs
# Start services in background
export PATH="$HOME/.local/bin:$PATH"
eval "$(mise activate bash)"
mise service:api > logs/api.log 2>&1 &
echo $! > logs/api.pid
mise service:events > logs/events.log 2>&1 &
echo $! > logs/events.pid
mise service:files > logs/files.log 2>&1 &
echo $! > logs/files.pid
mise service:proxy > logs/proxy.log 2>&1 &
echo $! > logs/proxy.pid
mise service:gifbox > logs/gifbox.log 2>&1 &
echo $! > logs/gifbox.pid
mise service:pushd > logs/pushd.log 2>&1 &
echo $! > logs/pushd.pid
mise service:crond > logs/crond.log 2>&1 &
echo $! > logs/crond.pid
# Wait for services to start
sleep 10
# Check if services are running
local ports=(14702 14703 14704 14705 14706)
for port in "${ports[@]}"; do
if ss -tlnp | grep -q ":$port "; then
log_success "Service on port $port is running"
else
log_warning "Service on port $port may not be ready yet"
fi
done
log_success "Stoatchat services started"
}
# Test the installation
test_installation() {
log_info "Testing installation..."
# Test API endpoint
if curl -s http://localhost:14702/0.8/ | jq -e '.revolt' > /dev/null; then
log_success "API is responding correctly"
else
log_error "API is not responding"
return 1
fi
# Test file service
if curl -s http://localhost:14704/ | jq -e '.autumn' > /dev/null; then
log_success "File service is responding correctly"
else
log_error "File service is not responding"
return 1
fi
log_success "Installation test passed"
}
# Create systemd services
create_systemd_services() {
log_info "Creating systemd services..."
# Create stoatchat user service
sudo tee /etc/systemd/system/stoatchat.service > /dev/null << EOF
[Unit]
Description=Stoatchat (Revolt Chat Backend)
After=network.target docker.service
Requires=docker.service
[Service]
Type=forking
User=$USER
WorkingDirectory=$STOATCHAT_DIR
Environment=PATH=$HOME/.local/bin:/usr/local/bin:/usr/bin:/bin
ExecStart=$STOATCHAT_DIR/scripts/start-services.sh
ExecStop=$STOATCHAT_DIR/scripts/stop-services.sh
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
# Create start script
cat > $STOATCHAT_DIR/scripts/start-services.sh << 'EOF'
#!/bin/bash
cd /opt/stoatchat
export PATH="$HOME/.local/bin:$PATH"
eval "$(mise activate bash)"
# Start infrastructure
docker-compose up -d
# Wait for infrastructure
sleep 30
# Start stoatchat services
mkdir -p logs
mise service:api > logs/api.log 2>&1 &
echo $! > logs/api.pid
mise service:events > logs/events.log 2>&1 &
echo $! > logs/events.pid
mise service:files > logs/files.log 2>&1 &
echo $! > logs/files.pid
mise service:proxy > logs/proxy.log 2>&1 &
echo $! > logs/proxy.pid
mise service:gifbox > logs/gifbox.log 2>&1 &
echo $! > logs/gifbox.pid
mise service:pushd > logs/pushd.log 2>&1 &
echo $! > logs/pushd.pid
mise service:crond > logs/crond.log 2>&1 &
echo $! > logs/crond.pid
EOF
# Create stop script
cat > $STOATCHAT_DIR/scripts/stop-services.sh << 'EOF'
#!/bin/bash
cd /opt/stoatchat
# Stop stoatchat services
if [[ -f logs/api.pid ]]; then kill $(cat logs/api.pid) 2>/dev/null || true; fi
if [[ -f logs/events.pid ]]; then kill $(cat logs/events.pid) 2>/dev/null || true; fi
if [[ -f logs/files.pid ]]; then kill $(cat logs/files.pid) 2>/dev/null || true; fi
if [[ -f logs/proxy.pid ]]; then kill $(cat logs/proxy.pid) 2>/dev/null || true; fi
if [[ -f logs/gifbox.pid ]]; then kill $(cat logs/gifbox.pid) 2>/dev/null || true; fi
if [[ -f logs/pushd.pid ]]; then kill $(cat logs/pushd.pid) 2>/dev/null || true; fi
if [[ -f logs/crond.pid ]]; then kill $(cat logs/crond.pid) 2>/dev/null || true; fi
# Stop infrastructure
docker-compose down
EOF
# Make scripts executable
chmod +x $STOATCHAT_DIR/scripts/*.sh
# Enable service
sudo systemctl daemon-reload
sudo systemctl enable stoatchat.service
log_success "Systemd services created"
}
# Print final instructions
print_final_instructions() {
log_success "Stoatchat installation completed!"
echo ""
echo "🎉 Installation Summary:"
echo " • Stoatchat installed in: $STOATCHAT_DIR"
echo " • Domain configured for: $DOMAIN"
echo " • Services running on ports: 14702-14706"
echo ""
echo "🔧 Next Steps:"
echo " 1. Set up Gmail App Password:"
echo " - Go to Google Account settings"
echo " - Enable 2-Factor Authentication"
echo " - Generate App Password for 'Mail'"
echo " - Update GMAIL_APP_PASSWORD_REQUIRED in Revolt.overrides.toml"
echo ""
echo " 2. Configure Cloudflare Tunnel for external access:"
echo " - api.$DOMAIN → localhost:14702"
echo " - events.$DOMAIN → localhost:14703"
echo " - files.$DOMAIN → localhost:14704"
echo " - proxy.$DOMAIN → localhost:14705"
echo ""
echo " 3. Set up the web client at $DOMAIN"
echo ""
echo " 4. Configure LiveKit for voice chat (optional)"
echo ""
echo "📊 Service Management:"
echo " • Start: sudo systemctl start stoatchat"
echo " • Stop: sudo systemctl stop stoatchat"
echo " • Status: sudo systemctl status stoatchat"
echo " • Logs: journalctl -u stoatchat -f"
echo ""
echo "🔍 Manual Service Management:"
echo " • View logs: tail -f $STOATCHAT_DIR/logs/*.log"
echo " • Test API: curl http://localhost:14702/0.8/"
echo " • Check ports: ss -tlnp | grep revolt"
echo ""
echo "📚 Documentation: $STOATCHAT_DIR/README.md"
echo ""
}
# Main execution
main() {
log_info "Starting Stoatchat installation for $DOMAIN"
check_root
check_requirements
install_dependencies
setup_stoatchat
start_infrastructure
build_stoatchat
start_stoatchat_services
if test_installation; then
create_systemd_services
print_final_instructions
else
log_error "Installation test failed. Please check the logs."
exit 1
fi
}
# Run main function
main "$@"

155
scripts/sync-dokuwiki-simple.sh Executable file
View File

@@ -0,0 +1,155 @@
#!/bin/bash
# Simple DokuWiki Synchronization Script
echo "📚 Creating DokuWiki structure..."
# Create local DokuWiki structure
rm -rf /tmp/dokuwiki_sync
mkdir -p /tmp/dokuwiki_sync/homelab
# Function to convert markdown to DokuWiki format
convert_md_to_dokuwiki() {
local input_file="$1"
local output_file="$2"
# Basic markdown to DokuWiki conversion
sed -e 's/^# /====== /g' \
-e 's/^## /===== /g' \
-e 's/^### /==== /g' \
-e 's/^#### /=== /g' \
-e 's/^##### /== /g' \
-e 's/^###### /= /g' \
-e 's/====== \(.*\)/====== \1 ======/g' \
-e 's/===== \(.*\)/===== \1 =====/g' \
-e 's/==== \(.*\)/==== \1 ====/g' \
-e 's/=== \(.*\)/=== \1 ===/g' \
-e 's/== \(.*\)/== \1 ==/g' \
-e 's/= \(.*\)/= \1 =/g' \
-e 's/\*\*\([^*]*\)\*\*/\*\*\1\*\*/g' \
-e 's/\*\([^*]*\)\*/\/\/\1\/\//g' \
-e 's/`\([^`]*\)`/%%\1%%/g' \
-e 's/```\([^`]*\)```/<code>\n\1\n<\/code>/g' \
-e 's/^\* / \* /g' \
-e 's/^- / \* /g' \
-e 's/^\([0-9]\+\)\. / - /g' \
"$input_file" > "$output_file"
}
# Create main start page
cat > /tmp/dokuwiki_sync/homelab/start.txt << 'EOF'
====== Homelab Documentation ======
===== Organized Documentation Structure =====
==== 🔧 Administration ====
* [[homelab:admin:start|Administration Overview]]
* [[homelab:admin:gitops-comprehensive-guide|GitOps Comprehensive Guide]]
* [[homelab:admin:deployment-documentation|Deployment Documentation]]
* [[homelab:admin:operational-status|Operational Status]]
* [[homelab:admin:development|Development Guide]]
==== 🏗️ Infrastructure ====
* [[homelab:infrastructure:start|Infrastructure Overview]]
* [[homelab:infrastructure:ssh-guide|SSH Access Guide]]
* [[homelab:infrastructure:networking|Networking Guide]]
* [[homelab:infrastructure:monitoring|Monitoring Setup]]
==== 🎯 Services ====
* [[homelab:services:start|Services Overview]]
* [[homelab:services:service-index|Service Index]]
* [[homelab:services:dashboard-setup|Dashboard Setup]]
==== 🚀 Getting Started ====
* [[homelab:getting-started:start|Getting Started Overview]]
* [[homelab:getting-started:beginner-quickstart|Beginner Quickstart]]
* [[homelab:getting-started:what-is-homelab|What Is Homelab]]
==== 🛠️ Troubleshooting ====
* [[homelab:troubleshooting:start|Troubleshooting Overview]]
* [[homelab:troubleshooting:common-issues|Common Issues]]
* [[homelab:troubleshooting:emergency-guide|Emergency Guide]]
===== System Information =====
**Repository**: https://git.vish.gg/Vish/homelab
**Wiki**: https://git.vish.gg/Vish/homelab/wiki
**DokuWiki**: http://atlantis.vish.local:8399/doku.php?id=homelab:start
Last updated: February 2026
EOF
processed_count=0
# Process admin docs
if [[ -d "docs/admin" ]]; then
mkdir -p /tmp/dokuwiki_sync/homelab/admin
# Create admin start page
cat > /tmp/dokuwiki_sync/homelab/admin/start.txt << 'EOF'
====== Administration ======
===== System Management & Operations =====
==== Core Administration ====
* [[homelab:admin:gitops-comprehensive-guide|GitOps Comprehensive Guide]] - Complete deployment procedures
* [[homelab:admin:deployment-documentation|Deployment Documentation]] - Step-by-step deployment
* [[homelab:admin:operational-status|Operational Status]] - Current system status
* [[homelab:admin:development|Development Guide]] - Development procedures
==== Documentation & Integration ====
* [[homelab:admin:agents|Agent Memory]] - AI agent context
* [[homelab:admin:dokuwiki-integration|DokuWiki Integration]] - External wiki setup
* [[homelab:admin:gitea-wiki-integration|Gitea Wiki Integration]] - Native wiki setup
[[homelab:start|← Back to Home]]
EOF
# Convert admin markdown files
for file in docs/admin/*.md; do
if [[ -f "$file" ]]; then
filename=$(basename "$file" .md)
dokuwiki_name=$(echo "$filename" | tr '[:upper:]' '[:lower:]' | sed 's/_/-/g')
convert_md_to_dokuwiki "$file" "/tmp/dokuwiki_sync/homelab/admin/${dokuwiki_name}.txt"
((processed_count++))
echo "✅ Converted: admin/$filename"
fi
done
fi
# Process other directories
for dir in infrastructure services getting-started troubleshooting security hardware advanced runbooks; do
if [[ -d "docs/$dir" ]]; then
mkdir -p "/tmp/dokuwiki_sync/homelab/$dir"
# Create start page for each directory
cat > "/tmp/dokuwiki_sync/homelab/$dir/start.txt" << EOF
====== $(echo $dir | tr '[:lower:]' '[:upper:]' | tr '-' ' ') ======
===== Documentation for $dir =====
[[homelab:start|← Back to Home]]
EOF
for file in "docs/$dir"/*.md; do
if [[ -f "$file" ]]; then
filename=$(basename "$file" .md)
dokuwiki_name=$(echo "$filename" | tr '[:upper:]' '[:lower:]' | sed 's/_/-/g')
convert_md_to_dokuwiki "$file" "/tmp/dokuwiki_sync/homelab/$dir/${dokuwiki_name}.txt"
((processed_count++))
echo "✅ Converted: $dir/$filename"
fi
done
fi
done
echo ""
echo "📊 DokuWiki Sync Summary:"
echo "✅ Files processed: $processed_count"
echo "📁 Structure created in: /tmp/dokuwiki_sync/homelab/"
echo ""
echo "📋 Ready to transfer to Atlantis server"
echo "🌐 DokuWiki will be available at: http://atlantis.vish.local:8399/doku.php?id=homelab:start"
echo ""
echo "✅ DokuWiki sync preparation completed!"

237
scripts/sync-dokuwiki.sh Executable file
View File

@@ -0,0 +1,237 @@
#!/bin/bash
# DokuWiki Synchronization Script
# Syncs organized repository documentation to DokuWiki format
set -e
# Configuration
DOKUWIKI_HOST="atlantis.vish.local"
DOKUWIKI_PATH="/opt/dokuwiki/data/pages/homelab"
DOCS_DIR="docs"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
echo -e "${BLUE}📚 Syncing organized documentation to DokuWiki...${NC}"
# Function to convert markdown to DokuWiki format
convert_md_to_dokuwiki() {
local input_file="$1"
local output_file="$2"
# Basic markdown to DokuWiki conversion
sed -e 's/^# /====== /g' \
-e 's/^## /===== /g' \
-e 's/^### /==== /g' \
-e 's/^#### /=== /g' \
-e 's/^##### /== /g' \
-e 's/^###### /= /g' \
-e 's/====== \(.*\)/====== \1 ======/g' \
-e 's/===== \(.*\)/===== \1 =====/g' \
-e 's/==== \(.*\)/==== \1 ====/g' \
-e 's/=== \(.*\)/=== \1 ===/g' \
-e 's/== \(.*\)/== \1 ==/g' \
-e 's/= \(.*\)/= \1 =/g' \
-e 's/\*\*\([^*]*\)\*\*/\*\*\1\*\*/g' \
-e 's/\*\([^*]*\)\*/\/\/\1\/\//g' \
-e 's/`\([^`]*\)`/%%\1%%/g' \
-e 's/```\([^`]*\)```/<code>\n\1\n<\/code>/g' \
-e 's/^\* / \* /g' \
-e 's/^- / \* /g' \
-e 's/^\([0-9]\+\)\. / - /g' \
"$input_file" > "$output_file"
}
# Create local DokuWiki structure
echo -e "${BLUE}📁 Creating local DokuWiki structure...${NC}"
mkdir -p /tmp/dokuwiki_sync/homelab
# Create main start page
cat > /tmp/dokuwiki_sync/homelab/start.txt << 'EOF'
====== Homelab Documentation ======
===== Organized Documentation Structure =====
==== 🔧 Administration ====
* [[homelab:admin:start|Administration Overview]]
* [[homelab:admin:gitops_guide|GitOps Deployment Guide]]
* [[homelab:admin:deployment_guide|Deployment Documentation]]
* [[homelab:admin:operational_status|Operational Status]]
* [[homelab:admin:development|Development Guide]]
==== 🏗️ Infrastructure ====
* [[homelab:infrastructure:start|Infrastructure Overview]]
* [[homelab:infrastructure:ssh_guide|SSH Access Guide]]
* [[homelab:infrastructure:networking|Networking Guide]]
* [[homelab:infrastructure:monitoring|Monitoring Setup]]
==== 🎯 Services ====
* [[homelab:services:start|Services Overview]]
* [[homelab:services:service_index|Service Index]]
* [[homelab:services:dashboard_setup|Dashboard Setup]]
* [[homelab:services:arr_suite|ARR Suite]]
==== 🚀 Getting Started ====
* [[homelab:getting-started:start|Getting Started Overview]]
* [[homelab:getting-started:quickstart|Beginner Quickstart]]
* [[homelab:getting-started:what_is_homelab|What Is Homelab]]
* [[homelab:getting-started:prerequisites|Prerequisites]]
==== 🛠️ Troubleshooting ====
* [[homelab:troubleshooting:start|Troubleshooting Overview]]
* [[homelab:troubleshooting:common_issues|Common Issues]]
* [[homelab:troubleshooting:emergency_guide|Emergency Guide]]
* [[homelab:troubleshooting:disaster_recovery|Disaster Recovery]]
==== 🔬 Advanced ====
* [[homelab:advanced:start|Advanced Topics]]
* [[homelab:advanced:optimization|Optimization Guide]]
* [[homelab:advanced:scaling|Scaling Strategies]]
===== System Information =====
**Repository**: https://git.vish.gg/Vish/homelab
**Wiki**: https://git.vish.gg/Vish/homelab/wiki
**DokuWiki**: http://atlantis.vish.local:8399/doku.php?id=homelab:start
Last updated: [[date]]
EOF
# Process each docs subdirectory
echo -e "${BLUE}📄 Processing documentation files...${NC}"
processed_count=0
# Process admin docs
if [[ -d "$DOCS_DIR/admin" ]]; then
mkdir -p /tmp/dokuwiki_sync/homelab/admin
# Create admin start page
cat > /tmp/dokuwiki_sync/homelab/admin/start.txt << 'EOF'
====== Administration ======
===== System Management & Operations =====
==== Core Administration ====
* [[homelab:admin:gitops_guide|GitOps Deployment Guide]] - Complete deployment procedures
* [[homelab:admin:deployment_guide|Deployment Documentation]] - Step-by-step deployment
* [[homelab:admin:operational_status|Operational Status]] - Current system status
* [[homelab:admin:development|Development Guide]] - Development procedures
==== Documentation & Integration ====
* [[homelab:admin:agents|Agent Memory]] - AI agent context
* [[homelab:admin:dokuwiki_integration|DokuWiki Integration]] - External wiki setup
* [[homelab:admin:gitea_wiki_integration|Gitea Wiki Integration]] - Native wiki setup
[[homelab:start|← Back to Home]]
EOF
# Convert admin markdown files
for file in "$DOCS_DIR/admin"/*.md; do
if [[ -f "$file" ]]; then
filename=$(basename "$file" .md)
dokuwiki_name=$(echo "$filename" | tr '[:upper:]' '[:lower:]' | sed 's/_/-/g')
convert_md_to_dokuwiki "$file" "/tmp/dokuwiki_sync/homelab/admin/${dokuwiki_name}.txt"
((processed_count++))
echo -e "${GREEN}✅ Converted: admin/$filename${NC}"
fi
done
fi
# Process infrastructure docs
if [[ -d "$DOCS_DIR/infrastructure" ]]; then
mkdir -p /tmp/dokuwiki_sync/homelab/infrastructure
cat > /tmp/dokuwiki_sync/homelab/infrastructure/start.txt << 'EOF'
====== Infrastructure ======
===== Core Infrastructure & Networking =====
==== Infrastructure Management ====
* [[homelab:infrastructure:overview|Infrastructure Overview]] - Complete infrastructure guide
* [[homelab:infrastructure:ssh_guide|SSH Access Guide]] - SSH access procedures
* [[homelab:infrastructure:networking|Networking Guide]] - Network configuration
* [[homelab:infrastructure:monitoring|Monitoring Setup]] - Monitoring configuration
[[homelab:start|← Back to Home]]
EOF
for file in "$DOCS_DIR/infrastructure"/*.md; do
if [[ -f "$file" ]]; then
filename=$(basename "$file" .md)
dokuwiki_name=$(echo "$filename" | tr '[:upper:]' '[:lower:]' | sed 's/_/-/g')
convert_md_to_dokuwiki "$file" "/tmp/dokuwiki_sync/homelab/infrastructure/${dokuwiki_name}.txt"
((processed_count++))
echo -e "${GREEN}✅ Converted: infrastructure/$filename${NC}"
fi
done
fi
# Process services docs
if [[ -d "$DOCS_DIR/services" ]]; then
mkdir -p /tmp/dokuwiki_sync/homelab/services
cat > /tmp/dokuwiki_sync/homelab/services/start.txt << 'EOF'
====== Services ======
===== Application Services & Setup =====
==== Service Management ====
* [[homelab:services:service_index|Service Index]] - All available services
* [[homelab:services:dashboard_setup|Dashboard Setup]] - Dashboard configuration
* [[homelab:services:arr_suite|ARR Suite]] - Media services
[[homelab:start|← Back to Home]]
EOF
for file in "$DOCS_DIR/services"/*.md; do
if [[ -f "$file" ]]; then
filename=$(basename "$file" .md)
dokuwiki_name=$(echo "$filename" | tr '[:upper:]' '[:lower:]' | sed 's/_/-/g')
convert_md_to_dokuwiki "$file" "/tmp/dokuwiki_sync/homelab/services/${dokuwiki_name}.txt"
((processed_count++))
echo -e "${GREEN}✅ Converted: services/$filename${NC}"
fi
done
fi
# Process other directories similarly...
for dir in getting-started troubleshooting security hardware advanced runbooks; do
if [[ -d "$DOCS_DIR/$dir" ]]; then
mkdir -p "/tmp/dokuwiki_sync/homelab/$dir"
for file in "$DOCS_DIR/$dir"/*.md; do
if [[ -f "$file" ]]; then
filename=$(basename "$file" .md)
dokuwiki_name=$(echo "$filename" | tr '[:upper:]' '[:lower:]' | sed 's/_/-/g')
convert_md_to_dokuwiki "$file" "/tmp/dokuwiki_sync/homelab/$dir/${dokuwiki_name}.txt"
((processed_count++))
echo -e "${GREEN}✅ Converted: $dir/$filename${NC}"
fi
done
fi
done
echo ""
echo -e "${BLUE}📊 DokuWiki Sync Summary:${NC}"
echo -e "${GREEN}✅ Files processed: $processed_count${NC}"
echo -e "${GREEN}📁 Structure created in: /tmp/dokuwiki_sync/homelab/${NC}"
echo ""
echo -e "${BLUE}📋 To complete DokuWiki sync, run on Atlantis server:${NC}"
echo -e "${YELLOW}# Copy the structure to DokuWiki${NC}"
echo -e "${YELLOW}sudo rsync -av /tmp/dokuwiki_sync/homelab/ $DOKUWIKI_PATH/${NC}"
echo -e "${YELLOW}sudo chown -R www-data:www-data $DOKUWIKI_PATH${NC}"
echo -e "${YELLOW}sudo chmod -R 755 $DOKUWIKI_PATH${NC}"
echo ""
echo -e "${GREEN}🌐 DokuWiki will be available at:${NC}"
echo -e " ${BLUE}http://atlantis.vish.local:8399/doku.php?id=homelab:start${NC}"
echo ""
echo -e "${GREEN}✅ DokuWiki sync preparation completed!${NC}"

View File

@@ -0,0 +1,70 @@
#!/bin/bash
# Test ntfy notification endpoints
# Tests both local and external ntfy servers
set -e
echo "🧪 ntfy Notification Test Script"
echo "================================"
echo
# Test local ntfy server (IP)
echo "📡 Testing Local ntfy Server (192.168.0.210:8081)..."
echo "------------------------------------------------------"
RESPONSE1=$(curl -s -d "🏠 Local ntfy test from $(hostname) at $(date)" http://192.168.0.210:8081/updates)
if echo "$RESPONSE1" | grep -q '"id"'; then
echo "✅ Local ntfy server (IP) - SUCCESS"
echo " Response: $(echo "$RESPONSE1" | jq -r '.id')"
else
echo "❌ Local ntfy server (IP) - FAILED"
echo " Response: $RESPONSE1"
fi
echo
# Test local ntfy server (localhost)
echo "📡 Testing Local ntfy Server (localhost:8081)..."
echo "-------------------------------------------------"
RESPONSE2=$(curl -s -d "🏠 Localhost ntfy test from $(hostname) at $(date)" http://localhost:8081/updates)
if echo "$RESPONSE2" | grep -q '"id"'; then
echo "✅ Local ntfy server (localhost) - SUCCESS"
echo " Response: $(echo "$RESPONSE2" | jq -r '.id')"
else
echo "❌ Local ntfy server (localhost) - FAILED"
echo " Response: $RESPONSE2"
fi
echo
# Test external ntfy server
echo "🌐 Testing External ntfy Server (ntfy.vish.gg)..."
echo "-------------------------------------------------"
RESPONSE3=$(curl -s -d "🌍 External ntfy test from $(hostname) at $(date)" https://ntfy.vish.gg/REDACTED_NTFY_TOPIC)
if echo "$RESPONSE3" | grep -q '"id"'; then
echo "✅ External ntfy server - SUCCESS"
echo " Response: $(echo "$RESPONSE3" | jq -r '.id')"
else
echo "❌ External ntfy server - FAILED"
echo " Response: $RESPONSE3"
fi
echo
echo "📋 Summary:"
echo "----------"
echo "Local ntfy (IP): http://192.168.0.210:8081/updates"
echo "Local ntfy (localhost): http://localhost:8081/updates"
echo "External ntfy: https://ntfy.vish.gg/REDACTED_NTFY_TOPIC"
echo
echo "🔧 Watchtower Configuration Options:"
echo "------------------------------------"
echo "Option 1 (Local IP): WATCHTOWER_NOTIFICATION_URL=http://192.168.0.210:8081/updates"
echo "Option 2 (Localhost): WATCHTOWER_NOTIFICATION_URL=http://localhost:8081/updates"
echo "Option 3 (External): WATCHTOWER_NOTIFICATION_URL=https://ntfy.vish.gg/REDACTED_NTFY_TOPIC"
echo
echo "💡 Recommendation:"
echo " - Use localhost for better reliability (no network dependency)"
echo " - Use external for notifications REDACTED_APP_PASSWORD network"
echo " - Consider using both (comma-separated) for redundancy"
echo
echo "✅ ntfy notification test complete!"

View File

@@ -0,0 +1,129 @@
#!/bin/bash
# Test Tailscale Host Monitoring and Notifications
# Verifies that Tailscale hosts are monitored and alerts work
set -e
echo "🔍 Tailscale Host Monitoring Test"
echo "================================="
echo
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
PROMETHEUS_URL="http://100.67.40.126:9090"
ALERTMANAGER_URL="http://100.67.40.126:9093"
echo "📊 Checking Prometheus Targets..."
echo "--------------------------------"
# Get all Tailscale targets (100.x.x.x addresses)
TARGETS=$(curl -s "$PROMETHEUS_URL/api/v1/targets" | jq -r '.data.activeTargets[] | select(.labels.instance | startswith("100.")) | "\(.labels.job)|\(.labels.instance)|\(.health)"')
echo "Tailscale Monitored Hosts:"
UP_COUNT=0
DOWN_COUNT=0
while IFS='|' read -r job instance health; do
if [ "$health" = "up" ]; then
echo -e " ${GREEN}✅ UP${NC} $job ($instance)"
UP_COUNT=$((UP_COUNT + 1))
else
echo -e " ${RED}❌ DOWN${NC} $job ($instance)"
DOWN_COUNT=$((DOWN_COUNT + 1))
fi
done <<< "$TARGETS"
echo
echo "Summary: $UP_COUNT up, $DOWN_COUNT down"
echo
echo "🚨 Checking Active HostDown Alerts..."
echo "------------------------------------"
# Check for active HostDown alerts
ACTIVE_ALERTS=$(curl -s "$PROMETHEUS_URL/api/v1/rules" | jq -r '.data.groups[] | select(.name == "host-availability") | .rules[] | select(.name == "HostDown") | .alerts[]? | "\(.labels.instance)|\(.labels.job)|\(.state)"')
if [ -z "$ACTIVE_ALERTS" ]; then
echo -e "${GREEN}✅ No HostDown alerts currently firing${NC}"
else
echo "Currently firing HostDown alerts:"
while IFS='|' read -r instance job state; do
echo -e " ${RED}🚨 ALERT${NC} $job ($instance) - $state"
done <<< "$ACTIVE_ALERTS"
fi
echo
echo "📬 Checking Alertmanager Status..."
echo "--------------------------------"
# Check Alertmanager alerts
AM_ALERTS=$(curl -s "$ALERTMANAGER_URL/api/v2/alerts" | jq -r '.[] | select(.labels.alertname == "HostDown") | "\(.labels.instance)|\(.labels.job)|\(.status.state)"')
if [ -z "$AM_ALERTS" ]; then
echo -e "${GREEN}✅ No HostDown alerts in Alertmanager${NC}"
else
echo "Active alerts in Alertmanager:"
while IFS='|' read -r instance job state; do
echo -e " ${YELLOW}📬 NOTIFYING${NC} $job ($instance) - $state"
done <<< "$AM_ALERTS"
fi
echo
echo "🧪 Testing Notification Endpoints..."
echo "-----------------------------------"
# Test ntfy notification
echo "Testing ntfy notification..."
NTFY_RESPONSE=$(curl -s -d "🧪 Tailscale monitoring test from $(hostname) at $(date)" \
-H "Title: Tailscale Monitoring Test" \
-H "Priority: 3" \
-H "Tags: test_tube" \
http://192.168.0.210:8081/homelab-alerts)
if echo "$NTFY_RESPONSE" | grep -q '"id"'; then
echo -e " ${GREEN}✅ ntfy notification sent successfully${NC}"
echo " Message ID: $(echo "$NTFY_RESPONSE" | jq -r '.id')"
else
echo -e " ${RED}❌ ntfy notification failed${NC}"
echo " Response: $NTFY_RESPONSE"
fi
echo
echo "📋 Tailscale Host Inventory..."
echo "-----------------------------"
# List all monitored Tailscale hosts with their job names
echo "Currently monitored Tailscale hosts:"
curl -s "$PROMETHEUS_URL/api/v1/targets" | jq -r '.data.activeTargets[] | select(.labels.instance | startswith("100.")) | " \(.labels.job): \(.labels.instance) (\(.health))"' | sort
echo
echo "⚙️ Alert Configuration Summary..."
echo "---------------------------------"
echo "• HostDown Alert: Triggers after 2 minutes of downtime"
echo "• Severity: Critical (triggers both ntfy + Signal notifications)"
echo "• Monitored via: node_exporter on port 9100"
echo "• Alert Rule: up{job=~\".*-node\"} == 0"
echo
echo "🔧 Notification Channels:"
echo "• ntfy: http://192.168.0.210:8081/homelab-alerts"
echo "• Signal: Via signal-bridge (critical alerts only)"
echo "• Alertmanager: http://100.67.40.126:9093"
echo
echo "✅ Tailscale monitoring test complete!"
echo
echo "💡 To manually test a HostDown alert:"
echo " 1. Stop node_exporter on any Tailscale host"
echo " 2. Wait 2+ minutes"
echo " 3. Check your ntfy app and Signal for notifications"
echo

View File

@@ -0,0 +1,346 @@
#!/bin/bash
# Comprehensive Gitea Wiki Upload Script
# Uploads ALL homelab documentation to Gitea wiki via API
set -e
# Configuration
GITEA_TOKEN=REDACTED_TOKEN
GITEA_URL="https://git.vish.gg"
REPO_OWNER="Vish"
REPO_NAME="homelab"
BASE_URL="$GITEA_URL/api/v1/repos/$REPO_OWNER/$REPO_NAME/wiki"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
PURPLE='\033[0;35m'
NC='\033[0m' # No Color
echo -e "${BLUE}🚀 Starting COMPREHENSIVE Gitea Wiki documentation upload...${NC}"
echo -e "${PURPLE}📊 Scanning for all documentation files...${NC}"
# Find all markdown files
total_files=$(find docs/ -name "*.md" -type f | wc -l)
echo -e "${BLUE}📚 Found $total_files markdown files to upload${NC}"
echo ""
# Function to create or update wiki page
create_wiki_page() {
local title="$1"
local file_path="$2"
local message="$3"
if [[ ! -f "$file_path" ]]; then
echo -e "${RED}❌ File not found: $file_path${NC}"
return 1
fi
echo -e "${YELLOW}📄 Processing: $file_path$title${NC}"
# Read file content and escape for JSON
local content
content=$(cat "$file_path" | jq -Rs .)
# Create JSON payload
local json_payload
json_payload=$(jq -n \
--arg title "$title" \
--argjson content "$content" \
--arg message "$message" \
'{
title: $title,
content_base64: ($content | @base64),
message: $message
}')
# Try to create new page first
local response
response=$(curl -s -w "%{http_code}" -o /tmp/wiki_response.json \
-X POST \
-H "Authorization: token $GITEA_TOKEN" \
-H "Content-Type: application/json" \
-d "$json_payload" \
"$BASE_URL/new")
local http_code="${response: -3}"
if [[ "$http_code" == "201" ]]; then
echo -e "${GREEN}✅ Created: $title${NC}"
return 0
elif [[ "$http_code" == "409" ]] || [[ "$http_code" == "400" ]]; then
# Page exists, try to update it
echo -e "${YELLOW}📝 Page exists, updating: $title${NC}"
response=$(curl -s -w "%{http_code}" -o /tmp/wiki_response.json \
-X POST \
-H "Authorization: token $GITEA_TOKEN" \
-H "Content-Type: application/json" \
-d "$json_payload" \
"$BASE_URL/$title")
http_code="${response: -3}"
if [[ "$http_code" == "200" ]]; then
echo -e "${GREEN}✅ Updated: $title${NC}"
return 0
else
echo -e "${RED}❌ Failed to update $title (HTTP $http_code)${NC}"
return 1
fi
else
echo -e "${RED}❌ Failed to create $title (HTTP $http_code)${NC}"
return 1
fi
}
# Function to convert file path to wiki title
path_to_wiki_title() {
local file_path="$1"
# Remove docs/ prefix and .md suffix
local title="${file_path#docs/}"
title="${title%.md}"
# Replace directory separators with dashes and sanitize
title=$(echo "$title" | sed 's|/|-|g' | sed 's/[^a-zA-Z0-9_-]/_/g' | sed 's/__*/_/g' | sed 's/^_\|_$//g')
# Capitalize first letter of each word separated by dash
title=$(echo "$title" | sed 's/-/ /g' | sed 's/\b\w/\U&/g' | sed 's/ /-/g')
echo "$title"
}
# Success and failure counters
success_count=0
total_count=0
failed_files=()
echo -e "${BLUE}📋 Creating comprehensive homelab wiki index...${NC}"
# Create main wiki index page with complete navigation
cat > /tmp/comprehensive_wiki_index.md << 'EOF'
# Homelab Documentation Wiki - Complete Index
*This wiki contains ALL documentation from the homelab Git repository*
*Last Updated: $(date)*
## 🎯 Quick Navigation
### 📖 Core Documentation
- [Repository README](README) - Complete repository overview
- [Documentation Index](INDEX) - Master navigation guide
- [Operational Status](Operational-Status) - Current system status
### 🔧 Administration & Operations
- [GitOps Comprehensive Guide](Admin-GITOPS-COMPREHENSIVE-GUIDE) - Complete deployment procedures ⭐
- [DokuWiki Integration](Admin-DOKUWIKI-INTEGRATION) - Documentation mirroring setup
- [Gitea Wiki Integration](Admin-GITEA-WIKI-INTEGRATION) - Native wiki integration
- [Deployment Workflow](Admin-DEPLOYMENT-WORKFLOW) - Deployment procedures
- [Operational Notes](Admin-OPERATIONAL-NOTES) - Administrative notes
- [Monitoring Setup](Admin-Monitoring-Setup) - Monitoring configuration
- [Backup Strategies](Admin-Backup-Strategies) - Backup procedures
- [Security](Admin-Security) - Security configuration
- [Maintenance](Admin-Maintenance) - Maintenance procedures
### 🏗️ Infrastructure
- [Infrastructure Health Report](Infrastructure-INFRASTRUCTURE-HEALTH-REPORT) - System health status
- [Infrastructure Overview](Infrastructure-INFRASTRUCTURE-OVERVIEW) - Complete infrastructure guide
- [Networking](Infrastructure-Networking) - Network configuration
- [Storage](Infrastructure-Storage) - Storage configuration
- [SSH Access Guide](Infrastructure-SSH-ACCESS-GUIDE) - SSH access procedures
- [User Access Guide](Infrastructure-USER-ACCESS-GUIDE) - User access management
- [Tailscale Setup](Infrastructure-Tailscale-Setup-Guide) - VPN configuration
- [Cloudflare Tunnels](Infrastructure-Cloudflare-Tunnels) - Tunnel configuration
### 🚀 Getting Started
- [Beginner Quickstart](Getting-Started-BEGINNER-QUICKSTART) - Quick start guide
- [What Is Homelab](Getting-Started-What-Is-Homelab) - Introduction to homelabs
- [Prerequisites](Getting-Started-Prerequisites) - Requirements and setup
- [Architecture](Getting-Started-Architecture) - System architecture overview
- [Shopping Guide](Getting-Started-Shopping-Guide) - Hardware recommendations
### 🔧 Services
- [Service Index](Services-Index) - All available services
- [Dashboard Setup](Services-DASHBOARD-SETUP) - Dashboard configuration
- [Homarr Setup](Services-HOMARR-SETUP) - Homarr dashboard setup
- [Verified Service Inventory](Services-VERIFIED-SERVICE-INVENTORY) - Service catalog
- [ARR Suite Enhancements](Services-ARR-SUITE-ENHANCEMENTS-FEB2025) - Media stack improvements
- [Authentik SSO](Services-Authentik-Sso) - Single sign-on setup
### 📚 Runbooks & Procedures
- [Add New Service](Runbooks-Add-New-Service) - Service deployment runbook
- [Add New User](Runbooks-Add-New-User) - User management procedures
- [Certificate Renewal](Runbooks-Certificate-Renewal) - SSL certificate management
- [Service Migration](Runbooks-Service-Migration) - Service migration procedures
- [Disk Full Procedure](Runbooks-Disk-Full-Procedure) - Storage management
### 🛠️ Troubleshooting
- [Common Issues](Troubleshooting-Common-Issues) - Frequently encountered problems
- [Emergency Access Guide](Troubleshooting-EMERGENCY-ACCESS-GUIDE) - Emergency procedures
- [Disaster Recovery](Troubleshooting-Disaster-Recovery) - Recovery procedures
- [Recovery Guide](Troubleshooting-RECOVERY-GUIDE) - System recovery
- [Container Diagnosis](Troubleshooting-CONTAINER-DIAGNOSIS-REPORT) - Container troubleshooting
- [Watchtower Emergency Procedures](Troubleshooting-WATCHTOWER-EMERGENCY-PROCEDURES) - Watchtower issues
### 🔒 Security
- [Server Hardening](Security-SERVER-HARDENING) - Security hardening guide
### 🏗️ Advanced Topics
- [Homelab Maturity Roadmap](Advanced-HOMELAB-MATURITY-ROADMAP) - Growth planning
- [Repository Optimization](Advanced-REPOSITORY-OPTIMIZATION-GUIDE) - Optimization guide
- [Terraform Implementation](Advanced-TERRAFORM-IMPLEMENTATION-GUIDE) - Infrastructure as code
- [Stack Comparison Report](Advanced-STACK-COMPARISON-REPORT) - Technology comparisons
### 📊 Diagrams & Architecture
- [Network Topology](Diagrams-Network-Topology) - Network diagrams
- [Service Architecture](Diagrams-Service-Architecture) - Service architecture
- [Storage Topology](Diagrams-Storage-Topology) - Storage layout
- [10GbE Backbone](Diagrams-10gbe-Backbone) - High-speed networking
### 🖥️ Hardware
- [Hardware README](Hardware-README) - Hardware documentation
- [Network Equipment](Hardware-Network-Equipment) - Network hardware
- [Atlantis Storage](Hardware-Atlantis-Storage) - Storage hardware
## 🌐 Access Points
- **Git Repository**: https://git.vish.gg/Vish/homelab
- **Gitea Wiki**: https://git.vish.gg/Vish/homelab/wiki
- **DokuWiki Mirror**: http://atlantis.vish.local:8399/doku.php?id=homelab:start
## 📊 Repository Status
- **GitOps Status**: ✅ 18 active stacks, 50+ containers
- **Servers**: 5 active (Atlantis, Calypso, Gaming VPS, Homelab VM, Concord NUC)
- **Services**: 100+ containerized services
- **Documentation Files**: 291+ markdown files
- **Wiki Pages**: Complete documentation mirror
---
**Source Repository**: https://git.vish.gg/Vish/homelab
**Maintainer**: Homelab Administrator
**Documentation Coverage**: Complete (all docs/ files mirrored)
EOF
total_count=$((total_count + 1))
if create_wiki_page "Home" "/tmp/comprehensive_wiki_index.md" "Updated comprehensive homelab wiki index with complete navigation"; then
success_count=$((success_count + 1))
fi
echo ""
echo -e "${BLUE}📚 Uploading ALL documentation files...${NC}"
echo -e "${PURPLE}This may take a while - processing $total_files files...${NC}"
echo ""
# Process all markdown files in docs/
while IFS= read -r -d '' file; do
# Skip hidden files and directories
if [[ "$file" == *"/."* ]]; then
continue
fi
# Convert file path to wiki title
wiki_title=$(path_to_wiki_title "$file")
# Skip if title is empty
if [[ -z "$wiki_title" ]]; then
echo -e "${RED}⚠️ Skipping file with empty title: $file${NC}"
continue
fi
echo ""
echo -e "${PURPLE}📄 [$((total_count + 1))/$((total_files + 1))] Processing: $file${NC}"
echo -e "${YELLOW} → Wiki Title: $wiki_title${NC}"
total_count=$((total_count + 1))
if create_wiki_page "$wiki_title" "$file" "Updated $wiki_title from repository ($file)"; then
success_count=$((success_count + 1))
else
failed_files+=("$file")
fi
# Add small delay to avoid overwhelming the API
sleep 0.1
done < <(find docs/ -name "*.md" -type f -print0 | sort -z)
# Also upload root-level documentation files
echo ""
echo -e "${BLUE}📚 Uploading root-level documentation files...${NC}"
root_docs=(
"README.md"
"OPERATIONAL_STATUS.md"
"MONITORING_ARCHITECTURE.md"
"GITOPS_DEPLOYMENT_GUIDE.md"
"DOCUMENTATION_AUDIT_REPORT.md"
"CHANGELOG.md"
"DEVELOPMENT.md"
"DEPLOYMENT_DOCUMENTATION.md"
"SECURITY_HARDENING_SUMMARY.md"
)
for file in "${root_docs[@]}"; do
if [[ -f "$file" ]]; then
wiki_title=$(basename "$file" .md | sed 's/[^a-zA-Z0-9_-]/_/g' | sed 's/__*/_/g' | sed 's/^_\|_$//g')
wiki_title=$(echo "$wiki_title" | sed 's/_/ /g' | sed 's/\b\w/\U&/g' | sed 's/ /-/g')
echo ""
echo -e "${PURPLE}📄 [$((total_count + 1))/$((total_files + ${#root_docs[@]} + 1))] Processing root file: $file${NC}"
echo -e "${YELLOW} → Wiki Title: $wiki_title${NC}"
total_count=$((total_count + 1))
if create_wiki_page "$wiki_title" "$file" "Updated $wiki_title from repository root"; then
success_count=$((success_count + 1))
else
failed_files+=("$file")
fi
sleep 0.1
fi
done
echo ""
echo -e "${BLUE}🎯 COMPREHENSIVE Upload Summary:${NC}"
echo -e "${GREEN}✅ Successful: $success_count/$total_count${NC}"
echo -e "${RED}❌ Failed: $((total_count - success_count))/$total_count${NC}"
if [[ ${#failed_files[@]} -gt 0 ]]; then
echo ""
echo -e "${RED}❌ Failed files:${NC}"
for file in "${failed_files[@]}"; do
echo -e "${RED} - $file${NC}"
done
fi
echo ""
echo -e "${BLUE}🌐 Complete Gitea Wiki available at:${NC}"
echo -e " ${BLUE}https://git.vish.gg/$REPO_OWNER/$REPO_NAME/wiki${NC}"
echo -e " ${BLUE}https://git.vish.gg/$REPO_OWNER/$REPO_NAME/wiki/Home${NC}"
# Get final page count
final_page_count=$(curl -s -H "Authorization: token $GITEA_TOKEN" "$BASE_URL/pages" | jq '. | length' 2>/dev/null || echo "unknown")
echo ""
echo -e "${GREEN}📊 Final Wiki Statistics:${NC}"
echo -e "${GREEN} Total Wiki Pages: $final_page_count${NC}"
echo -e "${GREEN} Documentation Files Processed: $total_files${NC}"
echo -e "${GREEN} Success Rate: $(( success_count * 100 / total_count ))%${NC}"
if [[ $success_count -eq $total_count ]]; then
echo ""
echo -e "${GREEN}✅ COMPREHENSIVE Gitea Wiki upload completed successfully!${NC}"
echo -e "${GREEN}🎉 ALL homelab documentation is now available in the wiki!${NC}"
exit 0
else
echo ""
echo -e "${YELLOW}⚠️ Gitea Wiki upload completed with some failures.${NC}"
echo -e "${YELLOW}📊 $success_count out of $total_count files uploaded successfully.${NC}"
exit 1
fi

557
scripts/upload-organized-wiki.sh Executable file
View File

@@ -0,0 +1,557 @@
#!/bin/bash
# Organized Hierarchical Gitea Wiki Upload Script
# Creates a properly structured wiki with categories and navigation
set -e
# Configuration
GITEA_TOKEN=REDACTED_TOKEN
GITEA_URL="https://git.vish.gg"
REPO_OWNER="Vish"
REPO_NAME="homelab"
BASE_URL="$GITEA_URL/api/v1/repos/$REPO_OWNER/$REPO_NAME/wiki"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
PURPLE='\033[0;35m'
NC='\033[0m' # No Color
echo -e "${BLUE}🚀 Starting ORGANIZED Gitea Wiki upload with hierarchical structure...${NC}"
# Function to create or update wiki page
create_wiki_page() {
local title="$1"
local file_path="$2"
local message="$3"
if [[ ! -f "$file_path" ]]; then
echo -e "${RED}❌ File not found: $file_path${NC}"
return 1
fi
echo -e "${YELLOW}📄 Creating: $title${NC}"
# Read file content and escape for JSON
local content
content=$(cat "$file_path" | jq -Rs .)
# Create JSON payload
local json_payload
json_payload=$(jq -n \
--arg title "$title" \
--argjson content "$content" \
--arg message "$message" \
'{
title: $title,
content_base64: ($content | @base64),
message: $message
}')
# Try to create new page first
local response
response=$(curl -s -w "%{http_code}" -o /tmp/wiki_response.json \
-X POST \
-H "Authorization: token $GITEA_TOKEN" \
-H "Content-Type: application/json" \
-d "$json_payload" \
"$BASE_URL/new")
local http_code="${response: -3}"
if [[ "$http_code" == "201" ]]; then
echo -e "${GREEN}✅ Created: $title${NC}"
return 0
elif [[ "$http_code" == "409" ]] || [[ "$http_code" == "400" ]]; then
# Page exists, try to update it
response=$(curl -s -w "%{http_code}" -o /tmp/wiki_response.json \
-X POST \
-H "Authorization: token $GITEA_TOKEN" \
-H "Content-Type: application/json" \
-d "$json_payload" \
"$BASE_URL/$title")
http_code="${response: -3}"
if [[ "$http_code" == "200" ]]; then
echo -e "${GREEN}✅ Updated: $title${NC}"
return 0
else
echo -e "${RED}❌ Failed to update $title (HTTP $http_code)${NC}"
return 1
fi
else
echo -e "${RED}❌ Failed to create $title (HTTP $http_code)${NC}"
return 1
fi
}
# Success counter
success_count=0
total_count=0
echo -e "${BLUE}📋 Creating main navigation hub...${NC}"
# Create REDACTED_APP_PASSWORD with organized navigation
cat > /tmp/organized_wiki_home.md << 'EOF'
# 🏠 Homelab Documentation Wiki
*Complete organized documentation for Vish's homelab infrastructure*
*Last Updated: $(date)*
## 🎯 Quick Navigation
### 📖 **Core Documentation**
- [📋 Repository README](README) - Complete repository overview
- [📚 Documentation Index](Documentation-Index) - Master navigation guide
- [📊 Operational Status](Admin-Operational-Status) - Current system status
- [📝 Changelog](Changelog) - Version history and updates
---
## 🔧 **Administration & Operations**
### 🚀 Deployment & GitOps
- [🎯 GitOps Comprehensive Guide](Admin-Gitops-Comprehensive-Guide) - Complete deployment procedures ⭐
- [📋 Deployment Documentation](Admin-Deployment-Documentation) - Deployment procedures
- [🔄 Deployment Workflow](Admin-Deployment-Workflow) - Step-by-step workflows
- [📊 Documentation Audit Report](Admin-Documentation-Audit-Report) - Audit results
### 🔧 System Administration
- [🛠️ Development Guide](Admin-Development) - Development procedures
- [🤖 Agent Memory](Admin-Agents) - AI agent context and memory
- [🔐 Security Hardening](Security-Server-Hardening) - Security procedures
- [📈 Monitoring Setup](Admin-Monitoring-Setup) - Monitoring configuration
- [💾 Backup Strategies](Admin-Backup-Strategies) - Backup procedures
- [🔧 Maintenance](Admin-Maintenance) - Maintenance procedures
### 📚 Integration Documentation
- [📖 DokuWiki Integration](Admin-Dokuwiki-Integration) - External wiki setup
- [📖 Gitea Wiki Integration](Admin-Gitea-Wiki-Integration) - Native wiki setup
---
## 🏗️ **Infrastructure**
### 🌐 Core Infrastructure
- [🏗️ Infrastructure Overview](Infrastructure-Infrastructure-Overview) - Complete infrastructure guide
- [📊 Infrastructure Health](Infrastructure-Infrastructure-Health-Report) - System health status
- [🌐 Networking](Infrastructure-Networking) - Network configuration
- [💾 Storage](Infrastructure-Storage) - Storage configuration
- [🖥️ Hosts](Infrastructure-Hosts) - Host management
### 🔐 Access & Security
- [🔑 SSH Access Guide](Infrastructure-Ssh-Access-Guide) - SSH access procedures
- [👥 User Access Guide](Infrastructure-User-Access-Guide) - User access management
- [🔐 Authentik SSO](Infrastructure-Authentik-Sso) - Single sign-on setup
### 🌐 Network Services
- [🚇 Tailscale Setup](Infrastructure-Tailscale-Setup-Guide) - VPN configuration
- [☁️ Cloudflare Tunnels](Infrastructure-Cloudflare-Tunnels) - Tunnel configuration
- [☁️ Cloudflare DNS](Infrastructure-Cloudflare-Dns) - DNS configuration
- [🌐 Network Performance](Infrastructure-Network-Performance-Tuning) - Performance optimization
### 🏠 Host Management
- [📊 Hardware Inventory](Infrastructure-Hardware-Inventory) - Hardware catalog
- [🔄 Atlantis Migration](Infrastructure-Atlantis-Migration) - Migration procedures
- [📱 Mobile Setup](Infrastructure-Mobile-Device-Setup) - Mobile device configuration
- [💻 Laptop Setup](Infrastructure-Laptop-Travel-Setup) - Laptop configuration
---
## 🎯 **Services**
### 📊 Service Management
- [📋 Service Index](Services-Index) - All available services
- [✅ Verified Service Inventory](Services-Verified-Service-Inventory) - Service catalog
- [📊 Dashboard Setup](Services-Dashboard-Setup) - Dashboard configuration
- [🎨 Homarr Setup](Services-Homarr-Setup) - Homarr dashboard setup
- [🎨 Theme Park](Services-Theme-Park) - UI theming
### 🎬 Media Services
- [🎬 ARR Suite Enhancements](Services-Arr-Suite-Enhancements-Feb2025) - Media stack improvements
- [🎬 ARR Suite Language Config](Arr-Suite-Language-Configuration) - Language configuration
### 💬 Communication Services
- [💬 Stoatchat Setup](Services-Stoatchat-Setup) - Chat platform setup
- [💬 Stoatchat Next Steps](Services-Stoatchat-Next-Steps) - Future improvements
- [🗨️ Matrix Setup](Services-Matrix-Setup) - Matrix server configuration
- [💬 Mastodon Setup](Services-Mastodon-Setup) - Social media platform
- [💬 Mattermost Setup](Services-Mattermost-Setup) - Team communication
### 🔧 Development Services
- [🤖 OpenHands](Services-Openhands) - AI development assistant
- [📄 Paperless](Services-Paperless) - Document management
- [📝 Reactive Resume](Services-Reactive-Resume) - Resume builder
### 📋 Individual Services
- [📋 Individual Service Docs](Services-Individual-Index) - Complete service documentation
---
## 🚀 **Getting Started**
### 🎯 Quick Start
- [⚡ Beginner Quickstart](Getting-Started-Beginner-Quickstart) - Quick start guide
- [❓ What Is Homelab](Getting-Started-What-Is-Homelab) - Introduction to homelabs
- [📋 Prerequisites](Getting-Started-Prerequisites) - Requirements and setup
- [🏗️ Architecture](Getting-Started-Architecture) - System architecture overview
### 📚 Comprehensive Guides
- [📖 Beginner Homelab Guide](Getting-Started-Beginner-Homelab-Guide) - Complete beginner guide
- [🛒 Shopping Guide](Getting-Started-Shopping-Guide) - Hardware recommendations
- [🔄 Complete Rebuild Guide](Getting-Started-Complete-Rebuild-Guide) - Full rebuild procedures
- [⚡ Quick Start](Getting-Started-Quick-Start) - Quick deployment guide
---
## 🛠️ **Troubleshooting**
### 🚨 Emergency Procedures
- [🚨 Emergency Access Guide](Troubleshooting-Emergency-Access-Guide) - Emergency procedures
- [🔄 Disaster Recovery](Troubleshooting-Disaster-Recovery) - Recovery procedures
- [📋 Recovery Guide](Troubleshooting-Recovery-Guide) - System recovery
- [🔧 Emergency](Troubleshooting-Emergency) - Emergency troubleshooting
### 🔍 Diagnostics
- [❓ Common Issues](Troubleshooting-Common-Issues) - Frequently encountered problems
- [🔍 Diagnostics](Troubleshooting-Diagnostics) - Diagnostic procedures
- [📊 Container Diagnosis](Troubleshooting-Container-Diagnosis-Report) - Container troubleshooting
- [⚡ Performance](Troubleshooting-Performance) - Performance troubleshooting
### 🔧 Specific Issues
- [🔄 Watchtower Emergency](Troubleshooting-Watchtower-Emergency-Procedures) - Watchtower issues
- [🔐 Authentik SSO Rebuild](Troubleshooting-Authentik-Sso-Rebuild) - SSO troubleshooting
- [🆘 Beginner Troubleshooting](Troubleshooting-Beginner-Troubleshooting) - Beginner help
---
## 🔬 **Advanced Topics**
### 🚀 Growth & Optimization
- [📈 Homelab Maturity Roadmap](Advanced-Homelab-Maturity-Roadmap) - Growth planning
- [⚡ Repository Optimization](Advanced-Repository-Optimization-Guide) - Optimization guide
- [📊 Stack Comparison Report](Advanced-Stack-Comparison-Report) - Technology comparisons
- [📈 Scaling](Advanced-Scaling) - Scaling strategies
### 🏗️ Infrastructure as Code
- [🏗️ Terraform Implementation](Advanced-Terraform-Implementation-Guide) - Infrastructure as code
- [🔄 Terraform Alternatives](Advanced-Terraform-And-Gitops-Alternatives) - Alternative approaches
- [🤖 Ansible](Advanced-Ansible) - Automation with Ansible
- [🔧 Customization](Advanced-Customization) - Advanced customization
### 🔗 Integration
- [🔗 Integrations](Advanced-Integrations) - Service integrations
---
## 📊 **Diagrams & Architecture**
### 🌐 Network Architecture
- [🌐 Network Topology](Diagrams-Network-Topology) - Network diagrams
- [⚡ 10GbE Backbone](Diagrams-10gbe-Backbone) - High-speed networking
- [🚇 Tailscale Mesh](Diagrams-Tailscale-Mesh) - VPN mesh network
### 🏗️ System Architecture
- [🏗️ Service Architecture](Diagrams-Service-Architecture) - Service architecture
- [💾 Storage Topology](Diagrams-Storage-Topology) - Storage layout
- [📍 Location Overview](Diagrams-Location-Overview) - Physical locations
---
## 🖥️ **Hardware**
### 🖥️ Equipment Documentation
- [🖥️ Hardware Overview](Hardware-Readme) - Hardware documentation
- [🌐 Network Equipment](Hardware-Network-Equipment) - Network hardware
- [💾 Atlantis Storage](Hardware-Atlantis-Storage) - Storage hardware
- [🖥️ Guava Server](Hardware-Guava) - Physical server
- [📺 NVIDIA Shield](Hardware-Nvidia-Shield) - Edge device
---
## 📋 **Runbooks & Procedures**
### 🔧 Service Management
- [ Add New Service](Runbooks-Add-New-Service) - Service deployment runbook
- [👥 Add New User](Runbooks-Add-New-User) - User management procedures
- [🔄 Service Migration](Runbooks-Service-Migration) - Service migration procedures
### 🔐 Security & Maintenance
- [🔐 Certificate Renewal](Runbooks-Certificate-Renewal) - SSL certificate management
- [💾 Disk Full Procedure](Runbooks-Disk-Full-Procedure) - Storage management
---
## 🌐 **Access Points**
- **🔗 Git Repository**: https://git.vish.gg/Vish/homelab
- **📖 Gitea Wiki**: https://git.vish.gg/Vish/homelab/wiki
- **📚 DokuWiki Mirror**: http://atlantis.vish.local:8399/doku.php?id=homelab:start
---
## 📊 **Repository Status**
- **🚀 GitOps Status**: ✅ 18 active stacks, 50+ containers
- **🖥️ Servers**: 5 active (Atlantis, Calypso, Gaming VPS, Homelab VM, Concord NUC)
- **🎯 Services**: 100+ containerized services
- **📚 Documentation**: 300+ organized pages
- **📖 Wiki Coverage**: Complete hierarchical organization
---
*🏠 **Source Repository**: https://git.vish.gg/Vish/homelab*
*👨‍💻 **Maintainer**: Homelab Administrator*
*📚 **Documentation**: Fully organized and navigable*
EOF
total_count=$((total_count + 1))
if create_wiki_page "Home" "/tmp/organized_wiki_home.md" "Created organized hierarchical REDACTED_APP_PASSWORD comprehensive navigation"; then
success_count=$((success_count + 1))
fi
echo ""
echo -e "${BLUE}📚 Creating category index pages...${NC}"
# Create Administration category index
cat > /tmp/admin_index.md << 'EOF'
# 🔧 Administration & Operations
*Complete administrative documentation for homelab management*
## 🚀 Deployment & GitOps
- [🎯 GitOps Comprehensive Guide](Admin-Gitops-Comprehensive-Guide) - Complete deployment procedures ⭐
- [📋 Deployment Documentation](Admin-Deployment-Documentation) - Deployment procedures
- [🔄 Deployment Workflow](Admin-Deployment-Workflow) - Step-by-step workflows
## 🔧 System Administration
- [🛠️ Development Guide](Admin-Development) - Development procedures
- [🤖 Agent Memory](Admin-Agents) - AI agent context and memory
- [📈 Monitoring Setup](Admin-Monitoring-Setup) - Monitoring configuration
- [💾 Backup Strategies](Admin-Backup-Strategies) - Backup procedures
- [🔧 Maintenance](Admin-Maintenance) - Maintenance procedures
## 📊 Reports & Audits
- [📊 Documentation Audit Report](Admin-Documentation-Audit-Report) - Audit results
- [📊 Operational Status](Admin-Operational-Status) - Current system status
## 📚 Integration Documentation
- [📖 DokuWiki Integration](Admin-Dokuwiki-Integration) - External wiki setup
- [📖 Gitea Wiki Integration](Admin-Gitea-Wiki-Integration) - Native wiki setup
---
[🏠 Back to Home](Home)
EOF
total_count=$((total_count + 1))
if create_wiki_page "Administration-Index" "/tmp/admin_index.md" "Created administration category index"; then
success_count=$((success_count + 1))
fi
# Create Infrastructure category index
cat > /tmp/infrastructure_index.md << 'EOF'
# 🏗️ Infrastructure
*Complete infrastructure documentation and configuration guides*
## 🌐 Core Infrastructure
- [🏗️ Infrastructure Overview](Infrastructure-Infrastructure-Overview) - Complete infrastructure guide
- [📊 Infrastructure Health](Infrastructure-Infrastructure-Health-Report) - System health status
- [🌐 Networking](Infrastructure-Networking) - Network configuration
- [💾 Storage](Infrastructure-Storage) - Storage configuration
- [🖥️ Hosts](Infrastructure-Hosts) - Host management
## 🔐 Access & Security
- [🔑 SSH Access Guide](Infrastructure-Ssh-Access-Guide) - SSH access procedures
- [👥 User Access Guide](Infrastructure-User-Access-Guide) - User access management
- [🔐 Authentik SSO](Infrastructure-Authentik-Sso) - Single sign-on setup
## 🌐 Network Services
- [🚇 Tailscale Setup](Infrastructure-Tailscale-Setup-Guide) - VPN configuration
- [☁️ Cloudflare Tunnels](Infrastructure-Cloudflare-Tunnels) - Tunnel configuration
- [☁️ Cloudflare DNS](Infrastructure-Cloudflare-Dns) - DNS configuration
- [🌐 Network Performance](Infrastructure-Network-Performance-Tuning) - Performance optimization
## 🏠 Host Management
- [📊 Hardware Inventory](Infrastructure-Hardware-Inventory) - Hardware catalog
- [🔄 Atlantis Migration](Infrastructure-Atlantis-Migration) - Migration procedures
- [📱 Mobile Setup](Infrastructure-Mobile-Device-Setup) - Mobile device configuration
- [💻 Laptop Setup](Infrastructure-Laptop-Travel-Setup) - Laptop configuration
---
[🏠 Back to Home](Home)
EOF
total_count=$((total_count + 1))
if create_wiki_page "Infrastructure-Index" "/tmp/infrastructure_index.md" "Created infrastructure category index"; then
success_count=$((success_count + 1))
fi
# Create Services category index
cat > /tmp/services_index.md << 'EOF'
# 🎯 Services
*Complete service documentation and configuration guides*
## 📊 Service Management
- [📋 Service Index](Services-Index) - All available services
- [✅ Verified Service Inventory](Services-Verified-Service-Inventory) - Service catalog
- [📊 Dashboard Setup](Services-Dashboard-Setup) - Dashboard configuration
- [🎨 Homarr Setup](Services-Homarr-Setup) - Homarr dashboard setup
- [🎨 Theme Park](Services-Theme-Park) - UI theming
## 🎬 Media Services
- [🎬 ARR Suite Enhancements](Services-Arr-Suite-Enhancements-Feb2025) - Media stack improvements
- [🎬 ARR Suite Language Config](Arr-Suite-Language-Configuration) - Language configuration
## 💬 Communication Services
- [💬 Stoatchat Setup](Services-Stoatchat-Setup) - Chat platform setup
- [💬 Stoatchat Next Steps](Services-Stoatchat-Next-Steps) - Future improvements
- [🗨️ Matrix Setup](Services-Matrix-Setup) - Matrix server configuration
- [💬 Mastodon Setup](Services-Mastodon-Setup) - Social media platform
- [💬 Mattermost Setup](Services-Mattermost-Setup) - Team communication
## 🔧 Development Services
- [🤖 OpenHands](Services-Openhands) - AI development assistant
- [📄 Paperless](Services-Paperless) - Document management
- [📝 Reactive Resume](Services-Reactive-Resume) - Resume builder
## 📋 Individual Services
- [📋 Individual Service Docs](Services-Individual-Index) - Complete service documentation
---
[🏠 Back to Home](Home)
EOF
total_count=$((total_count + 1))
if create_wiki_page "Services-Index" "/tmp/services_index.md" "Created services category index"; then
success_count=$((success_count + 1))
fi
echo ""
echo -e "${BLUE}📚 Uploading organized documentation files...${NC}"
# Upload key documentation files with organized structure
declare -A doc_files=(
# Core documentation
["README"]="README.md"
["Documentation-Index"]="docs/INDEX.md"
["Changelog"]="docs/CHANGELOG.md"
# Administration
["Admin-Agents"]="docs/admin/AGENTS.md"
["Admin-Deployment-Documentation"]="docs/admin/DEPLOYMENT_DOCUMENTATION.md"
["Admin-Development"]="docs/admin/DEVELOPMENT.md"
["Admin-Documentation-Audit-Report"]="docs/admin/DOCUMENTATION_AUDIT_REPORT.md"
["Admin-Gitops-Comprehensive-Guide"]="docs/admin/GITOPS_COMPREHENSIVE_GUIDE.md"
["Admin-Operational-Status"]="docs/admin/OPERATIONAL_STATUS.md"
["Admin-Deployment-Workflow"]="docs/admin/DEPLOYMENT_WORKFLOW.md"
["Admin-Monitoring-Setup"]="docs/admin/monitoring-setup.md"
["Admin-Backup-Strategies"]="docs/admin/backup-strategies.md"
["Admin-Maintenance"]="docs/admin/maintenance.md"
["Admin-Dokuwiki-Integration"]="docs/admin/DOKUWIKI_INTEGRATION.md"
["Admin-Gitea-Wiki-Integration"]="docs/admin/GITEA_WIKI_INTEGRATION.md"
# Infrastructure
["Infrastructure-Infrastructure-Overview"]="docs/infrastructure/INFRASTRUCTURE_OVERVIEW.md"
["Infrastructure-Infrastructure-Health-Report"]="docs/infrastructure/INFRASTRUCTURE_HEALTH_REPORT.md"
["Infrastructure-Monitoring-Architecture"]="docs/infrastructure/MONITORING_ARCHITECTURE.md"
["Infrastructure-Networking"]="docs/infrastructure/networking.md"
["Infrastructure-Storage"]="docs/infrastructure/storage.md"
["Infrastructure-Hosts"]="docs/infrastructure/hosts.md"
["Infrastructure-Ssh-Access-Guide"]="docs/infrastructure/SSH_ACCESS_GUIDE.md"
["Infrastructure-User-Access-Guide"]="docs/infrastructure/USER_ACCESS_GUIDE.md"
["Infrastructure-Authentik-Sso"]="docs/infrastructure/authentik-sso.md"
["Infrastructure-Tailscale-Setup-Guide"]="docs/infrastructure/tailscale-setup-guide.md"
["Infrastructure-Cloudflare-Tunnels"]="docs/infrastructure/cloudflare-tunnels.md"
["Infrastructure-Cloudflare-Dns"]="docs/infrastructure/cloudflare-dns.md"
# Security
["Security-Server-Hardening"]="docs/security/SERVER_HARDENING.md"
# Services
["Services-Verified-Service-Inventory"]="docs/services/VERIFIED_SERVICE_INVENTORY.md"
["Services-Dashboard-Setup"]="docs/services/DASHBOARD_SETUP.md"
["Services-Homarr-Setup"]="docs/services/HOMARR_SETUP.md"
["Services-Theme-Park"]="docs/services/theme-park.md"
["Services-Arr-Suite-Enhancements-Feb2025"]="docs/services/ARR_SUITE_ENHANCEMENTS_FEB2025.md"
["Arr-Suite-Language-Configuration"]="docs/arr-suite-language-configuration.md"
["Services-Stoatchat-Setup"]="docs/services/stoatchat-setup.md"
["Services-Stoatchat-Next-Steps"]="docs/services/stoatchat-next-steps.md"
["Services-Openhands"]="docs/services/openhands.md"
# Getting Started
["Getting-Started-Beginner-Quickstart"]="docs/getting-started/BEGINNER_QUICKSTART.md"
["Getting-Started-What-Is-Homelab"]="docs/getting-started/what-is-homelab.md"
["Getting-Started-Prerequisites"]="docs/getting-started/prerequisites.md"
["Getting-Started-Architecture"]="docs/getting-started/architecture.md"
["Getting-Started-Shopping-Guide"]="docs/getting-started/shopping-guide.md"
# Troubleshooting
["Troubleshooting-Emergency-Access-Guide"]="docs/troubleshooting/EMERGENCY_ACCESS_GUIDE.md"
["Troubleshooting-Disaster-Recovery"]="docs/troubleshooting/disaster-recovery.md"
["Troubleshooting-Common-Issues"]="docs/troubleshooting/common-issues.md"
["Troubleshooting-Container-Diagnosis-Report"]="docs/troubleshooting/CONTAINER_DIAGNOSIS_REPORT.md"
# Hardware
["Hardware-Readme"]="docs/hardware/README.md"
["Hardware-Network-Equipment"]="docs/hardware/network-equipment.md"
["Hardware-Atlantis-Storage"]="docs/hardware/atlantis-storage.md"
# Runbooks
["Runbooks-Add-New-Service"]="docs/runbooks/add-new-service.md"
["Runbooks-Add-New-User"]="docs/runbooks/add-new-user.md"
["Runbooks-Certificate-Renewal"]="docs/runbooks/certificate-renewal.md"
# Diagrams
["Diagrams-Network-Topology"]="docs/diagrams/network-topology.md"
["Diagrams-Service-Architecture"]="docs/diagrams/service-architecture.md"
["Diagrams-Storage-Topology"]="docs/diagrams/storage-topology.md"
)
for title in "${!doc_files[@]}"; do
file_path="${doc_files[$title]}"
if [[ -f "$file_path" ]]; then
total_count=$((total_count + 1))
if create_wiki_page "$title" "$file_path" "Updated $title with organized structure"; then
success_count=$((success_count + 1))
fi
sleep 0.1
else
echo -e "${YELLOW}⚠️ File not found: $file_path${NC}"
fi
done
echo ""
echo -e "${BLUE}🎯 Organized Wiki Upload Summary:${NC}"
echo -e "${GREEN}✅ Successful: $success_count/$total_count${NC}"
echo -e "${RED}❌ Failed: $((total_count - success_count))/$total_count${NC}"
echo ""
echo -e "${BLUE}🌐 Organized Gitea Wiki available at:${NC}"
echo -e " ${BLUE}https://git.vish.gg/$REPO_OWNER/$REPO_NAME/wiki${NC}"
echo -e " ${BLUE}https://git.vish.gg/$REPO_OWNER/$REPO_NAME/wiki/Home${NC}"
# Get final page count
final_page_count=$(curl -s -H "Authorization: token $GITEA_TOKEN" "$BASE_URL/pages?limit=500" | jq '. | length' 2>/dev/null || echo "unknown")
echo ""
echo -e "${GREEN}📊 Organized Wiki Statistics:${NC}"
echo -e "${GREEN} Total Wiki Pages: $final_page_count${NC}"
echo -e "${GREEN} Organized Structure: ✅ Hierarchical navigation${NC}"
echo -e "${GREEN} Success Rate: $(( success_count * 100 / total_count ))%${NC}"
if [[ $success_count -eq $total_count ]]; then
echo ""
echo -e "${GREEN}✅ ORGANIZED Gitea Wiki upload completed successfully!${NC}"
echo -e "${GREEN}🎉 Wiki now has proper hierarchical navigation!${NC}"
exit 0
else
echo ""
echo -e "${YELLOW}⚠️ Organized Wiki upload completed with some issues.${NC}"
echo -e "${YELLOW}📊 $success_count out of $total_count pages uploaded successfully.${NC}"
exit 1
fi

165
scripts/upload-to-dokuwiki.sh Executable file
View File

@@ -0,0 +1,165 @@
#!/bin/bash
# Upload documentation to DokuWiki
# Usage: ./upload-to-dokuwiki.sh
DOKUWIKI_BASE="http://atlantis.vish.local:8399"
REPO_ROOT="/home/homelab/organized/repos/homelab"
echo "🚀 Starting DokuWiki documentation upload..."
# Function to convert basic Markdown to DokuWiki syntax
convert_md_to_dokuwiki() {
local input_file="$1"
local temp_file=$(mktemp)
# Basic conversions
sed -e 's/^# \(.*\)/====== \1 ======/g' \
-e 's/^## \(.*\)/===== \1 =====/g' \
-e 's/^### \(.*\)/==== \1 ====/g' \
-e 's/^#### \(.*\)/=== \1 ===/g' \
-e 's/^##### \(.*\)/== \1 ==/g' \
-e 's/\*\*\([^*]*\)\*\*/\*\*\1\*\*/g' \
-e 's/\*\([^*]*\)\*/\/\/\1\/\//g' \
-e 's/`\([^`]*\)`/%%\1%%/g' \
-e 's/^- \(.*\)/ \* \1/g' \
-e 's/^\([0-9]\+\)\. \(.*\)/ - \2/g' \
-e 's/- \[x\] \(.*\)/ \* ✅ \1/g' \
-e 's/- \[ \] \(.*\)/ \* ☐ \1/g' \
"$input_file" > "$temp_file"
echo "$temp_file"
}
# Function to create DokuWiki page
create_dokuwiki_page() {
local page_id="$1"
local content_file="$2"
local summary="$3"
echo "📄 Creating page: $page_id"
# Try to create the page using curl
curl -s -X POST "$DOKUWIKI_BASE/doku.php" \
-d "id=$page_id" \
-d "do=save" \
-d "summary=$summary" \
-d "minor=1" \
--data-urlencode "wikitext@$content_file" \
> /dev/null
if [ $? -eq 0 ]; then
echo "✅ Successfully created: $page_id"
echo "🌐 View at: $DOKUWIKI_BASE/doku.php?id=$page_id"
return 0
else
echo "❌ Failed to create: $page_id"
return 1
fi
}
# Create main index page
echo ""
echo "📋 Creating main homelab index page..."
cat > /tmp/homelab_index.txt << 'EOF'
====== Homelab Documentation ======
//This documentation is automatically mirrored from the homelab Git repository//
===== Quick Navigation =====
* [[homelab:readme|Main README]] - Repository overview and quick start
* [[homelab:docs:index|Documentation Index]] - Complete navigation guide
* [[homelab:docs:admin:gitops_comprehensive_guide|GitOps Deployment Guide]] - Complete deployment procedures
* [[homelab:documentation_audit_report|Documentation Audit Report]] - Recent improvements
* [[homelab:operational_status|Operational Status]] - Current system status
* [[homelab:monitoring_architecture|Monitoring Architecture]] - Monitoring setup
===== Infrastructure =====
* [[homelab:docs:infrastructure:health_report|Infrastructure Health Report]] - System health status
* [[homelab:gitops_deployment_guide|GitOps Deployment Guide]] - Deployment procedures
===== Operations =====
* [[homelab:docs:runbooks:add_new_service|Add New Service]] - Service deployment runbook
===== About =====
This DokuWiki instance mirrors the documentation from the homelab Git repository at https://git.vish.gg/Vish/homelab
**Last Updated:** $(date)
**Source Repository:** https://git.vish.gg/Vish/homelab
**GitOps Status:** ✅ 18 active stacks, 50+ containers
EOF
create_dokuwiki_page "homelab:start" "/tmp/homelab_index.txt" "Created homelab documentation index"
# Convert and upload key documentation files
declare -A docs_map=(
["$REPO_ROOT/README.md"]="homelab:readme"
["$REPO_ROOT/docs/INDEX.md"]="homelab:docs:index"
["$REPO_ROOT/docs/admin/GITOPS_COMPREHENSIVE_GUIDE.md"]="homelab:docs:admin:gitops_comprehensive_guide"
["$REPO_ROOT/DOCUMENTATION_AUDIT_REPORT.md"]="homelab:documentation_audit_report"
["$REPO_ROOT/docs/infrastructure/INFRASTRUCTURE_HEALTH_REPORT.md"]="homelab:docs:infrastructure:health_report"
["$REPO_ROOT/docs/runbooks/add-new-service.md"]="homelab:docs:runbooks:add_new_service"
["$REPO_ROOT/GITOPS_DEPLOYMENT_GUIDE.md"]="homelab:gitops_deployment_guide"
["$REPO_ROOT/OPERATIONAL_STATUS.md"]="homelab:operational_status"
["$REPO_ROOT/MONITORING_ARCHITECTURE.md"]="homelab:monitoring_architecture"
)
successful=0
total=0
for file_path in "${!docs_map[@]}"; do
page_id="${docs_map[$file_path]}"
total=$((total + 1))
if [ -f "$file_path" ]; then
echo ""
echo "📄 Converting: $(basename "$file_path")"
# Convert Markdown to DokuWiki
converted_file=$(convert_md_to_dokuwiki "$file_path")
# Add header with source info
temp_with_header=$(mktemp)
cat > "$temp_with_header" << EOF
====== $(basename "$file_path") ======
//This page is automatically mirrored from the homelab Git repository//
//Last updated: $(date)//
//Source: $file_path//
EOF
cat "$converted_file" >> "$temp_with_header"
# Upload to DokuWiki
if create_dokuwiki_page "$page_id" "$temp_with_header" "Updated from repository"; then
successful=$((successful + 1))
fi
# Cleanup
rm -f "$converted_file" "$temp_with_header"
else
echo "⚠️ File not found: $file_path"
fi
done
echo ""
echo "🎯 Upload Summary:"
echo "✅ Successful: $successful/$total"
echo "❌ Failed: $((total - successful))/$total"
if [ $successful -gt 0 ]; then
echo ""
echo "🌐 DokuWiki documentation available at:"
echo " $DOKUWIKI_BASE/doku.php?id=homelab:start"
echo " $DOKUWIKI_BASE/doku.php?id=homelab:readme"
echo " $DOKUWIKI_BASE/doku.php?id=homelab:docs:index"
fi
# Cleanup
rm -f /tmp/homelab_index.txt
echo ""
echo "✅ DokuWiki upload completed!"

212
scripts/upload-to-gitea-wiki.sh Executable file
View File

@@ -0,0 +1,212 @@
#!/bin/bash
# Gitea Wiki Upload Script
# Uploads homelab documentation to Gitea wiki via API
set -e
# Configuration
GITEA_TOKEN=REDACTED_TOKEN
GITEA_URL="https://git.vish.gg"
REPO_OWNER="Vish"
REPO_NAME="homelab"
BASE_URL="$GITEA_URL/api/v1/repos/$REPO_OWNER/$REPO_NAME/wiki"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
echo -e "${BLUE}🚀 Starting Gitea Wiki documentation upload...${NC}"
echo ""
# Function to create or update wiki page
create_wiki_page() {
local title="$1"
local file_path="$2"
local message="$3"
if [[ ! -f "$file_path" ]]; then
echo -e "${RED}❌ File not found: $file_path${NC}"
return 1
fi
echo -e "${YELLOW}📄 Creating/updating wiki page: $title${NC}"
# Read file content and escape for JSON
local content
content=$(cat "$file_path" | jq -Rs .)
# Create JSON payload
local json_payload
json_payload=$(jq -n \
--arg title "$title" \
--argjson content "$content" \
--arg message "$message" \
'{
title: $title,
content_base64: ($content | @base64),
message: $message
}')
# Make API request
local response
response=$(curl -s -w "%{http_code}" -o /tmp/wiki_response.json \
-X POST \
-H "Authorization: token $GITEA_TOKEN" \
-H "Content-Type: application/json" \
-d "$json_payload" \
"$BASE_URL/new")
local http_code="${response: -3}"
if [[ "$http_code" == "201" ]]; then
echo -e "${GREEN}✅ Successfully created: $title${NC}"
echo -e "${BLUE}🌐 View at: $GITEA_URL/$REPO_OWNER/$REPO_NAME/wiki/$title${NC}"
return 0
elif [[ "$http_code" == "409" ]]; then
# Page exists, try to update it
echo -e "${YELLOW}📝 Page exists, updating: $title${NC}"
response=$(curl -s -w "%{http_code}" -o /tmp/wiki_response.json \
-X POST \
-H "Authorization: token $GITEA_TOKEN" \
-H "Content-Type: application/json" \
-d "$json_payload" \
"$BASE_URL/$title")
http_code="${response: -3}"
if [[ "$http_code" == "200" ]]; then
echo -e "${GREEN}✅ Successfully updated: $title${NC}"
echo -e "${BLUE}🌐 View at: $GITEA_URL/$REPO_OWNER/$REPO_NAME/wiki/$title${NC}"
return 0
else
echo -e "${RED}❌ Failed to update $title (HTTP $http_code)${NC}"
cat /tmp/wiki_response.json 2>/dev/null || echo "No response body"
return 1
fi
else
echo -e "${RED}❌ Failed to create $title (HTTP $http_code)${NC}"
cat /tmp/wiki_response.json 2>/dev/null || echo "No response body"
return 1
fi
}
# Function to sanitize title for wiki page names
sanitize_title() {
echo "$1" | sed 's/[^a-zA-Z0-9_-]/_/g' | sed 's/__*/_/g' | sed 's/^_\|_$//g'
}
# Success and failure counters
success_count=0
total_count=0
echo -e "${BLUE}📋 Creating main homelab wiki index...${NC}"
# Create main wiki index page
cat > /tmp/wiki_index.md << 'EOF'
# Homelab Documentation Wiki
*This wiki is automatically synchronized from the homelab Git repository*
## 🎯 Quick Navigation
### 📖 Main Documentation
- [Repository README](README) - Complete repository overview
- [Documentation Index](Documentation-Index) - Master navigation guide
- [Operational Status](Operational-Status) - Current system status
### 🔧 Administration & Operations
- [GitOps Comprehensive Guide](GitOps-Comprehensive-Guide) - Complete deployment procedures ⭐
- [DokuWiki Integration](DokuWiki-Integration) - Documentation mirroring setup
- [Documentation Audit Report](Documentation-Audit-Report) - Recent improvements
### 🏗️ Infrastructure
- [Infrastructure Health Report](Infrastructure-Health-Report) - System health status
- [Monitoring Architecture](Monitoring-Architecture) - Monitoring setup
- [GitOps Deployment Guide](GitOps-Deployment-Guide) - Deployment procedures
### 📚 Runbooks & Procedures
- [Add New Service](Add-New-Service) - Service deployment runbook
## 🌐 Access Points
- **Git Repository**: https://git.vish.gg/Vish/homelab
- **DokuWiki Mirror**: http://atlantis.vish.local:8399/doku.php?id=homelab:start
- **Gitea Wiki**: https://git.vish.gg/Vish/homelab/wiki
## 📊 Repository Status
- **GitOps Status**: ✅ 18 active stacks, 50+ containers
- **Servers**: 5 active (Atlantis, Calypso, Gaming VPS, Homelab VM, Concord NUC)
- **Services**: 100+ containerized services
- **Documentation**: Comprehensive guides and runbooks
---
**Last Updated**: $(date)
**Source Repository**: https://git.vish.gg/Vish/homelab
**Maintainer**: Homelab Administrator
EOF
total_count=$((total_count + 1))
if create_wiki_page "Home" "/tmp/wiki_index.md" "Updated homelab wiki index with navigation"; then
success_count=$((success_count + 1))
fi
# Upload key documentation files
declare -A wiki_files=(
["README"]="README.md"
["Documentation-Index"]="docs/INDEX.md"
["GitOps-Comprehensive-Guide"]="docs/admin/GITOPS_COMPREHENSIVE_GUIDE.md"
["DokuWiki-Integration"]="docs/admin/DOKUWIKI_INTEGRATION.md"
["Documentation-Audit-Report"]="DOCUMENTATION_AUDIT_REPORT.md"
["Operational-Status"]="OPERATIONAL_STATUS.md"
["Infrastructure-Health-Report"]="docs/infrastructure/INFRASTRUCTURE_HEALTH_REPORT.md"
["Monitoring-Architecture"]="MONITORING_ARCHITECTURE.md"
["GitOps-Deployment-Guide"]="GITOPS_DEPLOYMENT_GUIDE.md"
["Add-New-Service"]="docs/runbooks/add-new-service.md"
)
echo ""
echo -e "${BLUE}📚 Uploading documentation files...${NC}"
for wiki_title in "${!wiki_files[@]}"; do
file_path="${wiki_files[$wiki_title]}"
if [[ -f "$file_path" ]]; then
echo ""
echo -e "${YELLOW}📄 Processing: $file_path$wiki_title${NC}"
total_count=$((total_count + 1))
if create_wiki_page "$wiki_title" "$file_path" "Updated $wiki_title from repository"; then
success_count=$((success_count + 1))
fi
else
echo -e "${RED}⚠️ File not found: $file_path${NC}"
total_count=$((total_count + 1))
fi
done
echo ""
echo -e "${BLUE}🎯 Upload Summary:${NC}"
echo -e "${GREEN}✅ Successful: $success_count/$total_count${NC}"
echo -e "${RED}❌ Failed: $((total_count - success_count))/$total_count${NC}"
echo ""
echo -e "${BLUE}🌐 Gitea Wiki available at:${NC}"
echo -e " ${BLUE}https://git.vish.gg/$REPO_OWNER/$REPO_NAME/wiki${NC}"
echo -e " ${BLUE}https://git.vish.gg/$REPO_OWNER/$REPO_NAME/wiki/Home${NC}"
if [[ $success_count -eq $total_count ]]; then
echo ""
echo -e "${GREEN}✅ Gitea Wiki upload completed successfully!${NC}"
exit 0
else
echo ""
echo -e "${YELLOW}⚠️ Gitea Wiki upload completed with some failures.${NC}"
exit 1
fi

177
scripts/validate-compose.sh Executable file
View File

@@ -0,0 +1,177 @@
#!/bin/bash
# Docker Compose Validation Script
# Validates Docker Compose files before commit to prevent broken deployments
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Function to log messages
log_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Check if Docker is available
if ! command -v docker &> /dev/null; then
log_warn "Docker not found. Skipping Docker Compose validation."
exit 0
fi
# Check if docker-compose is available
if ! command -v docker-compose &> /dev/null && ! docker compose version &> /dev/null; then
log_warn "Docker Compose not found. Skipping validation."
exit 0
fi
# Determine docker-compose command
if command -v docker-compose &> /dev/null; then
COMPOSE_CMD="docker-compose"
else
COMPOSE_CMD="docker compose"
fi
# Validation function
validate_compose_file() {
local file="$1"
local filename=$(basename "$file")
# Skip non-Docker Compose files
if [[ "$file" == *".pre-commit-config.yaml" ]] || \
[[ "$file" == *".yamllint" ]] || \
[[ "$file" == *".gitea/workflows/"* ]] || \
[[ "$file" == *"secret_key.yaml" ]] || \
[[ "$file" == *"config.yml" ]] || \
[[ "$file" == *"snmp.yml" ]] || \
[[ "$file" == *"homeserver.yaml" ]]; then
log_info "Skipping non-Docker Compose file: $file"
return 0
fi
# Skip files that don't have a 'services:' block (not Docker Compose files)
if ! grep -q "^services:" "$file" 2>/dev/null; then
log_info "Skipping non-Docker Compose file: $file"
return 0
fi
# Skip compose files with env_file references to files that don't exist locally
if grep -q "env_file:" "$file" 2>/dev/null; then
local compose_dir
compose_dir=$(dirname "$file")
local missing_env=0
while IFS= read -r env_line; do
local env_file
env_file=$(echo "$env_line" | sed 's/.*-\s*//' | tr -d ' "')
if [[ -n "$env_file" ]] && [[ "$env_file" != "~" ]] && \
[[ ! -f "$compose_dir/$env_file" ]]; then
missing_env=1
break
fi
done < <(grep -A1 "env_file:" "$file" | grep "^.*-")
if [[ $missing_env -eq 1 ]]; then
log_warn "$file: Skipping validation - missing env_file dependencies"
return 0
fi
fi
log_info "Validating $file"
# Check if file exists and is readable
if [[ ! -r "$file" ]]; then
log_error "Cannot read file: $file"
return 1
fi
# Skip if not a compose file
if [[ ! "$filename" =~ \.(yml|yaml)$ ]]; then
log_info "Skipping non-YAML file: $file"
return 0
fi
# Skip certain directories and files
if [[ "$file" =~ ^(archive/|ansible/|docs/|\.git/) ]]; then
log_info "Skipping excluded path: $file"
return 0
fi
# Validate Docker Compose syntax
if ! $COMPOSE_CMD -f "$file" config > /dev/null 2>&1; then
log_error "Docker Compose validation failed for: $file"
log_error "Run '$COMPOSE_CMD -f $file config' to see detailed errors"
return 1
fi
# Check for common issues
local warnings=0
# Check for missing version (Docker Compose v2 doesn't require it, but good practice)
if ! grep -q "^version:" "$file" 2>/dev/null; then
log_warn "$file: Consider adding 'version' field for clarity"
((warnings++))
fi
# Check for hardcoded localhost references (should use service names)
if grep -q "localhost\|127\.0\.0\.1" "$file" 2>/dev/null; then
log_warn "$file: Found localhost references - consider using service names"
((warnings++))
fi
# Check for missing restart policies on long-running services
if grep -q "image:" "$file" && ! grep -q "restart:" "$file" 2>/dev/null; then
log_warn "$file: Consider adding restart policy for production services"
((warnings++))
fi
if [[ $warnings -eq 0 ]]; then
log_info "$file passed validation"
else
log_info "$file passed validation with $warnings warnings"
fi
return 0
}
# Main execution
main() {
local exit_code=0
local files_processed=0
# If no arguments provided, validate all YAML files
if [[ $# -eq 0 ]]; then
log_info "No files specified, validating all Docker Compose files..."
while IFS= read -r -d '' file; do
((files_processed++))
if ! validate_compose_file "$file"; then
exit_code=1
fi
done < <(find . -name "*.yml" -o -name "*.yaml" -print0 | grep -zv -E '^(archive/|ansible/|docs/|\.git/)')
else
# Validate specified files
for file in "$@"; do
((files_processed++))
if ! validate_compose_file "$file"; then
exit_code=1
fi
done
fi
if [[ $exit_code -eq 0 ]]; then
log_info "All $files_processed files passed validation!"
else
log_error "Some files failed validation. Please fix the errors before committing."
fi
exit $exit_code
}
# Run main function with all arguments
main "$@"

View File

@@ -0,0 +1,278 @@
#!/bin/bash
# 🔍 Infrastructure Status Verification Script
# Comprehensive health check for homelab infrastructure
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
ATLANTIS_IP="192.168.0.200"
ATLANTIS_SSH_PORT="60000"
PORTAINER_URL="https://${ATLANTIS_IP}:9443"
DOKUWIKI_URL="http://${ATLANTIS_IP}:8399"
GITEA_URL="https://git.vish.gg"
echo -e "${BLUE}🏠 Homelab Infrastructure Status Verification${NC}"
echo -e "${BLUE}================================================${NC}"
echo ""
# Function to check service status
check_service() {
local service_name="$1"
local check_command="$2"
local expected_result="$3"
echo -n "Checking $service_name... "
if eval "$check_command" | grep -q "$expected_result"; then
echo -e "${GREEN}✅ OK${NC}"
return 0
else
echo -e "${RED}❌ FAILED${NC}"
return 1
fi
}
# Function to check HTTP service
check_http_service() {
local service_name="$1"
local url="$2"
local expected_code="${3:-200}"
echo -n "Checking $service_name... "
local response_code
response_code=$(curl -s -o /dev/null -w "%{http_code}" "$url" 2>/dev/null || echo "000")
if [[ "$response_code" == "$expected_code" ]]; then
echo -e "${GREEN}✅ OK (HTTP $response_code)${NC}"
return 0
else
echo -e "${RED}❌ FAILED (HTTP $response_code)${NC}"
return 1
fi
}
# Function to check SSH connectivity
check_ssh() {
local host="$1"
local port="$2"
local service_name="$3"
echo -n "Checking $service_name SSH... "
if ssh -p "$port" -o ConnectTimeout=5 -o BatchMode=yes "$host" "echo 'SSH OK'" 2>/dev/null | grep -q "SSH OK"; then
echo -e "${GREEN}✅ OK${NC}"
return 0
else
echo -e "${RED}❌ FAILED${NC}"
return 1
fi
}
# Initialize counters
total_checks=0
passed_checks=0
echo -e "${YELLOW}🌐 Network Connectivity${NC}"
echo "------------------------"
# Check basic network connectivity
((total_checks++))
if ping -c 1 -W 2 "$ATLANTIS_IP" >/dev/null 2>&1; then
echo -e "Atlantis IP connectivity... ${GREEN}✅ OK${NC}"
((passed_checks++))
else
echo -e "Atlantis IP connectivity... ${RED}❌ FAILED${NC}"
fi
# Check SSH connectivity
((total_checks++))
if check_ssh "vish@$ATLANTIS_IP" "$ATLANTIS_SSH_PORT" "Atlantis"; then
((passed_checks++))
fi
echo ""
echo -e "${YELLOW}🐳 Container Management${NC}"
echo "-------------------------"
# Check Portainer API
((total_checks++))
if check_http_service "Portainer API" "$PORTAINER_URL/api/status"; then
((passed_checks++))
# Get Portainer version if accessible
portainer_version=$(curl -k -s "$PORTAINER_URL/api/status" 2>/dev/null | jq -r '.Version' 2>/dev/null || echo "Unknown")
echo " └─ Version: $portainer_version"
fi
# Check container count via SSH
((total_checks++))
echo -n "Checking container count... "
container_count=$(ssh -p "$ATLANTIS_SSH_PORT" "vish@$ATLANTIS_IP" "docker ps -q 2>/dev/null | wc -l" 2>/dev/null || echo "0")
if [[ "$container_count" -gt 0 ]]; then
echo -e "${GREEN}✅ OK ($container_count containers)${NC}"
((passed_checks++))
else
echo -e "${RED}❌ FAILED (No containers or access denied)${NC}"
fi
echo ""
echo -e "${YELLOW}📚 Documentation Systems${NC}"
echo "----------------------------"
# Check DokuWiki
((total_checks++))
if check_http_service "DokuWiki" "$DOKUWIKI_URL/doku.php?id=homelab:start"; then
((passed_checks++))
# Check if homelab documentation is accessible
if curl -s "$DOKUWIKI_URL/doku.php?id=homelab:start" 2>/dev/null | grep -q "homelab:start"; then
echo " └─ Homelab documentation: ✅ Available"
else
echo " └─ Homelab documentation: ⚠️ May not be synced"
fi
fi
# Check Gitea
((total_checks++))
if check_http_service "Gitea" "$GITEA_URL"; then
((passed_checks++))
# Check repository accessibility
if curl -s "$GITEA_URL/Vish/homelab" 2>/dev/null | grep -q "homelab"; then
echo " └─ Repository access: ✅ Available"
else
echo " └─ Repository access: ⚠️ May require authentication"
fi
fi
echo ""
echo -e "${YELLOW}🔧 GitOps Deployment${NC}"
echo "----------------------"
# Check if we can access Portainer stacks
((total_checks++))
echo -n "Checking GitOps stacks... "
if command -v jq >/dev/null 2>&1; then
# This would require authentication, so we'll just check if the endpoint responds
if curl -k -s -o /dev/null -w "%{http_code}" "$PORTAINER_URL/api/stacks" 2>/dev/null | grep -q "401\|200"; then
echo -e "${GREEN}✅ OK (API accessible)${NC}"
((passed_checks++))
echo " └─ Note: Authentication required for detailed stack info"
else
echo -e "${RED}❌ FAILED${NC}"
fi
else
echo -e "${YELLOW}⚠️ SKIPPED (jq not available)${NC}"
((passed_checks++)) # Don't count as failure
fi
echo ""
echo -e "${YELLOW}📊 System Resources${NC}"
echo "---------------------"
# Check disk space on Atlantis
((total_checks++))
echo -n "Checking Atlantis disk space... "
disk_usage=$(ssh -p "$ATLANTIS_SSH_PORT" "vish@$ATLANTIS_IP" "df -h / | tail -1 | awk '{print \$5}' | sed 's/%//'" 2>/dev/null || echo "100")
if [[ "$disk_usage" -lt 90 ]]; then
echo -e "${GREEN}✅ OK (${disk_usage}% used)${NC}"
((passed_checks++))
elif [[ "$disk_usage" -lt 95 ]]; then
echo -e "${YELLOW}⚠️ WARNING (${disk_usage}% used)${NC}"
((passed_checks++))
else
echo -e "${RED}❌ CRITICAL (${disk_usage}% used)${NC}"
fi
# Check memory usage
((total_checks++))
echo -n "Checking Atlantis memory... "
memory_usage=$(ssh -p "$ATLANTIS_SSH_PORT" "vish@$ATLANTIS_IP" "free | grep Mem | awk '{printf \"%.0f\", \$3/\$2 * 100}'" 2>/dev/null || echo "100")
if [[ "$memory_usage" -lt 85 ]]; then
echo -e "${GREEN}✅ OK (${memory_usage}% used)${NC}"
((passed_checks++))
elif [[ "$memory_usage" -lt 95 ]]; then
echo -e "${YELLOW}⚠️ WARNING (${memory_usage}% used)${NC}"
((passed_checks++))
else
echo -e "${RED}❌ CRITICAL (${memory_usage}% used)${NC}"
fi
echo ""
echo -e "${YELLOW}🔍 Service Discovery${NC}"
echo "---------------------"
# Check common service ports
common_services=(
"8080:Portainer Agent"
"9443:Portainer Server"
"8399:DokuWiki"
"3000:Grafana"
"9090:Prometheus"
"8096:Jellyfin"
"32400:Plex"
)
for service in "${common_services[@]}"; do
port=$(echo "$service" | cut -d: -f1)
name=$(echo "$service" | cut -d: -f2)
((total_checks++))
echo -n "Checking $name (port $port)... "
if ssh -p "$ATLANTIS_SSH_PORT" "vish@$ATLANTIS_IP" "netstat -tlnp 2>/dev/null | grep -q :$port" 2>/dev/null; then
echo -e "${GREEN}✅ LISTENING${NC}"
((passed_checks++))
else
echo -e "${YELLOW}⚠️ NOT LISTENING${NC}"
fi
done
echo ""
echo -e "${BLUE}📊 Summary${NC}"
echo "============"
# Calculate success rate
success_rate=$((passed_checks * 100 / total_checks))
echo "Total checks: $total_checks"
echo "Passed: $passed_checks"
echo "Failed: $((total_checks - passed_checks))"
echo -n "Success rate: "
if [[ $success_rate -ge 90 ]]; then
echo -e "${GREEN}$success_rate% ✅ EXCELLENT${NC}"
elif [[ $success_rate -ge 75 ]]; then
echo -e "${YELLOW}$success_rate% ⚠️ GOOD${NC}"
elif [[ $success_rate -ge 50 ]]; then
echo -e "${YELLOW}$success_rate% ⚠️ NEEDS ATTENTION${NC}"
else
echo -e "${RED}$success_rate% ❌ CRITICAL${NC}"
fi
echo ""
echo -e "${BLUE}🔗 Quick Access Links${NC}"
echo "======================"
echo "• Portainer: https://$ATLANTIS_IP:9443"
echo "• DokuWiki: http://$ATLANTIS_IP:8399/doku.php?id=homelab:start"
echo "• Gitea: $GITEA_URL/Vish/homelab"
echo "• SSH: ssh -p $ATLANTIS_SSH_PORT vish@$ATLANTIS_IP"
echo ""
echo -e "${BLUE}📅 Report Generated: $(date)${NC}"
# Exit with appropriate code
if [[ $success_rate -ge 75 ]]; then
exit 0
else
exit 1
fi

View File

@@ -0,0 +1,69 @@
#!/bin/bash
# Portainer watchdog — recovers from chisel panic crashes that leave
# orphaned docker-proxy processes blocking port re-allocation.
#
# Deploy to atlantis: /usr/local/bin/watchdog-portainer.sh
# Cron (every 5 min): */5 * * * * /usr/local/bin/watchdog-portainer.sh
DOCKER=/usr/local/bin/docker
CONTAINER=portainer
PORTS=(8000 9443 10000)
NTFY_URL="http://localhost:48978/watchdog"
LOG_TAG="watchdog-portainer"
log() { logger -t "$LOG_TAG" "$*"; }
notify() {
local title="$1" msg="$2" priority="${3:-default}"
curl -s -o /dev/null \
-H "Title: $title" \
-H "Priority: $priority" \
-d "$msg" \
"$NTFY_URL" || true
}
# Is portainer already running?
if sudo $DOCKER ps --filter "name=^/${CONTAINER}$" --format '{{.Names}}' | grep -q "^${CONTAINER}$"; then
exit 0
fi
# Container exists but isn't running — try to start it
log "Portainer not running — attempting start"
start_output=$(sudo $DOCKER start "$CONTAINER" 2>&1)
if [ $? -eq 0 ]; then
log "Portainer started successfully"
notify "Portainer recovered" "Started successfully on atlantis" "default"
exit 0
fi
# Start failed — check if it's a port conflict from orphaned docker-proxy processes
if echo "$start_output" | grep -q "port is already allocated"; then
log "Port conflict detected — cleaning up orphaned docker-proxy processes"
killed_any=false
for port in "${PORTS[@]}"; do
# Find docker-proxy PIDs holding these specific TCP ports
pids=$(sudo netstat -tulpn 2>/dev/null \
| awk -v p="$port" '$4 ~ ":"p"$" && $7 ~ /docker-proxy/ {split($7,a,"/"); print a[1]}')
for pid in $pids; do
log "Killing orphaned docker-proxy PID $pid (port $port)"
sudo kill "$pid" && killed_any=true
done
done
if $killed_any; then
sleep 2
start_output=$(sudo $DOCKER start "$CONTAINER" 2>&1)
if [ $? -eq 0 ]; then
log "Portainer started after port cleanup"
notify "Portainer recovered" "Cleared orphaned docker-proxy processes and started successfully on atlantis" "default"
exit 0
fi
fi
fi
# Still failed — escalate
log "ERROR: Could not recover Portainer: $start_output"
notify "Portainer recovery FAILED" "Could not start on atlantis — manual intervention needed.\n\n$start_output" "urgent"
exit 1