Sanitized mirror from private repository - 2026-04-18 11:19:59 UTC
Some checks failed
Documentation / Build Docusaurus (push) Failing after 5m14s
Documentation / Deploy to GitHub Pages (push) Has been skipped

This commit is contained in:
Gitea Mirror Bot
2026-04-18 11:19:59 +00:00
commit fb00a325d1
1418 changed files with 359990 additions and 0 deletions

View File

@@ -0,0 +1,104 @@
# Nginx Proxy Manager - GitOps Deployment
This directory contains the GitOps deployment configuration for Nginx Proxy Manager on the Calypso server.
## 🚀 Quick Start
```bash
# Deploy NPM
./deploy.sh
# Check status
./deploy.sh status
# View logs
./deploy.sh logs
```
## 🌐 Access URLs
- **Admin UI**: http://192.168.0.250:81
- **HTTP Proxy**: http://192.168.0.250:8880 (external port 80)
- **HTTPS Proxy**: https://192.168.0.250:8443 (external port 443)
## 🔧 Configuration
### Port Mapping
- `8880:80` - HTTP proxy (router forwards 80→8880)
- `8443:443` - HTTPS proxy (router forwards 443→8443)
- `81:81` - Admin interface
### Data Storage
- **Config**: `/volume1/docker/nginx-proxy-manager/data`
- **SSL Certs**: `/volume1/docker/nginx-proxy-manager/letsencrypt`
## 🛠️ Deployment Commands
```bash
# Full deployment
./deploy.sh deploy
# Management
./deploy.sh restart # Restart service
./deploy.sh stop # Stop service
./deploy.sh update # Update images and redeploy
./deploy.sh status # Check service status
./deploy.sh logs # View service logs
./deploy.sh cleanup # Clean up existing containers
```
## 🔐 Initial Setup
1. **First Login**:
- URL: http://192.168.0.250:81
- Email: `admin@example.com`
- Password: "REDACTED_PASSWORD"
2. **Change Default Credentials**:
- Update email and password immediately
- Enable 2FA if desired
3. **Configure Proxy Hosts**:
- Add your domains (*.vish.gg, *.thevish.io)
- Configure SSL certificates
- Set up forwarding rules
## 🌍 Router Configuration
Ensure your router forwards these ports:
- **Port 80** → **8880** (HTTP)
- **Port 443** → **8443** (HTTPS)
## 🔄 Migration Notes
This deployment uses alternate ports (8880/8443) to avoid conflicts with Synology's built-in nginx service. Once migration is complete and Synology nginx is disabled, you can change the ports to standard 80/443.
## 🚨 Troubleshooting
### Service Won't Start
```bash
# Clean up and redeploy
./deploy.sh cleanup
./deploy.sh deploy
```
### Can't Access Admin UI
```bash
# Check service status
./deploy.sh status
# Check logs
./deploy.sh logs
```
### SSL Certificate Issues
1. Ensure domains point to your external IP (YOUR_WAN_IP)
2. Check router port forwarding
3. Verify Cloudflare DNS settings
## 📊 Status
**Status**: ✅ **ACTIVE DEPLOYMENT** (GitOps)
- **Version**: Latest (jc21/nginx-proxy-manager)
- **Deployed**: 2026-02-16
- **External Access**: ✅ Configured via router forwarding

View File

@@ -0,0 +1,181 @@
#!/bin/bash
# Nginx Proxy Manager - GitOps Deployment Script
# Deploys NPM to Calypso server with proper port configuration
set -euo pipefail
# Configuration
SERVICE_NAME="nginx-proxy-manager"
REMOTE_HOST="Vish@192.168.0.250"
SSH_PORT="62000"
REMOTE_PATH="/volume1/docker/nginx-proxy-manager"
COMPOSE_FILE="docker-compose.yml"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Logging functions
log() {
echo -e "${BLUE}[$(date '+%Y-%m-%d %H:%M:%S')] $1${NC}"
}
success() {
echo -e "${GREEN}$1${NC}"
}
warning() {
echo -e "${YELLOW}⚠️ $1${NC}"
}
error() {
echo -e "${RED}$1${NC}"
exit 1
}
check_prerequisites() {
if [[ ! -f "$COMPOSE_FILE" ]]; then
error "docker-compose.yml not found in current directory"
fi
if ! ssh -q -p "$SSH_PORT" "$REMOTE_HOST" exit; then
error "Cannot connect to $REMOTE_HOST"
fi
}
cleanup_existing() {
log "Cleaning up existing NPM containers..."
# Stop and remove any existing NPM containers
ssh -p "$SSH_PORT" "$REMOTE_HOST" "sudo /usr/local/bin/docker stop nginx-proxy-manager 2>/dev/null || true"
ssh -p "$SSH_PORT" "$REMOTE_HOST" "sudo /usr/local/bin/docker rm nginx-proxy-manager 2>/dev/null || true"
# Clean up any orphaned containers
ssh -p "$SSH_PORT" "$REMOTE_HOST" "sudo /usr/local/bin/docker container prune -f 2>/dev/null || true"
success "Cleanup complete"
}
deploy() {
log "Deploying $SERVICE_NAME to $REMOTE_HOST..."
# Create required directories
log "Creating required directories..."
ssh -p "$SSH_PORT" "$REMOTE_HOST" "mkdir -p $REMOTE_PATH/{data,letsencrypt}"
# Copy compose file
log "Copying docker-compose.yml to $REMOTE_HOST:$REMOTE_PATH/"
ssh -p "$SSH_PORT" "$REMOTE_HOST" "cat > $REMOTE_PATH/docker-compose.yml" < "$COMPOSE_FILE"
# Deploy services
log "Starting NPM services..."
ssh -p "$SSH_PORT" "$REMOTE_HOST" "cd $REMOTE_PATH && sudo /usr/local/bin/docker-compose up -d"
# Wait for services to be healthy
log "Waiting for services to be healthy..."
sleep 15
# Check status
if ssh -p "$SSH_PORT" "$REMOTE_HOST" "sudo /usr/local/bin/docker ps | grep -q 'nginx-proxy-manager.*Up'"; then
success "$SERVICE_NAME deployed successfully!"
log "Admin UI: http://192.168.0.250:81"
log "HTTP Proxy: http://192.168.0.250:8880"
log "HTTPS Proxy: https://192.168.0.250:8443"
warning "Default login: admin@example.com / changeme"
warning "Make sure your router forwards:"
warning " Port 80 → 8880 (HTTP)"
warning " Port 443 → 8443 (HTTPS)"
else
warning "Service started but may not be fully healthy yet. Check logs with: ./deploy.sh logs"
fi
}
restart() {
log "Restarting $SERVICE_NAME..."
ssh -p "$SSH_PORT" "$REMOTE_HOST" "cd $REMOTE_PATH && sudo /usr/local/bin/docker-compose restart"
success "Service restarted"
}
stop() {
log "Stopping $SERVICE_NAME..."
ssh -p "$SSH_PORT" "$REMOTE_HOST" "cd $REMOTE_PATH && sudo /usr/local/bin/docker-compose down"
success "Service stopped"
}
logs() {
log "Showing logs for $SERVICE_NAME..."
ssh -p "$SSH_PORT" "$REMOTE_HOST" "sudo /usr/local/bin/docker logs -f nginx-proxy-manager"
}
status() {
log "Checking status of $SERVICE_NAME services..."
echo
ssh -p "$SSH_PORT" "$REMOTE_HOST" "sudo /usr/local/bin/docker ps --format 'table {{.Names}}\t{{.Image}}\t{{.Status}}\t{{.Ports}}' | grep -E '(NAMES|nginx-proxy-manager)'"
echo
# Test connectivity
if curl -s -o /dev/null -w "%{http_code}" "http://192.168.0.250:81" | grep -q "200\|302\|401"; then
success "NPM Admin UI is responding at http://192.168.0.250:81"
else
warning "NPM Admin UI is not responding"
fi
}
update() {
log "Updating $SERVICE_NAME..."
ssh -p "$SSH_PORT" "$REMOTE_HOST" "cd $REMOTE_PATH && sudo /usr/local/bin/docker-compose pull"
ssh -p "$SSH_PORT" "$REMOTE_HOST" "cd $REMOTE_PATH && sudo /usr/local/bin/docker-compose up -d"
success "Service updated"
}
# Main execution
COMMAND=${1:-deploy}
case $COMMAND in
deploy)
check_prerequisites
cleanup_existing
deploy
;;
restart)
check_prerequisites
restart
;;
stop)
check_prerequisites
stop
;;
logs)
check_prerequisites
logs
;;
status)
check_prerequisites
status
;;
update)
check_prerequisites
update
;;
cleanup)
check_prerequisites
cleanup_existing
;;
*)
echo "Usage: $0 [deploy|restart|stop|logs|status|update|cleanup]"
echo
echo "Commands:"
echo " deploy - Deploy/update the service (default)"
echo " restart - Restart the service"
echo " stop - Stop the service"
echo " logs - Show service logs"
echo " status - Show service status"
echo " update - Pull latest images and redeploy"
echo " cleanup - Clean up existing containers"
exit 1
;;
esac

View File

@@ -0,0 +1,46 @@
# Nginx Proxy Manager - Reverse Proxy with GUI
# Docs: https://nginxproxymanager.com/
# Deployed to: Calypso (DS723+)
# Domains: *.vish.gg, *.thevish.io
#
# REPLACES: Synology DSM Reverse Proxy
# INTEGRATES: Authentik SSO via Forward Auth
#
# PORTS:
# - 80: HTTP (redirect to HTTPS)
# - 443: HTTPS (main proxy)
# - 81: Admin UI
#
# DISASTER RECOVERY:
# - Config: /volume1/docker/nginx-proxy-manager/data
# - SSL Certs: /volume1/docker/nginx-proxy-manager/letsencrypt
# - Database: SQLite in data directory
services:
nginx-proxy-manager:
image: jc21/nginx-proxy-manager:latest
container_name: nginx-proxy-manager
restart: unless-stopped
ports:
# Using alternate ports during migration (Synology nginx on 80/443)
# Change to 80:80 and 443:443 after migration complete
- "8880:80" # HTTP (temp port)
- "8443:443" # HTTPS (temp port)
- "81:81" # Admin UI
environment:
# Disable IPv6 if not needed
DISABLE_IPV6: "true"
volumes:
- /volume1/docker/nginx-proxy-manager/data:/data
- /volume1/docker/nginx-proxy-manager/letsencrypt:/etc/letsencrypt
networks:
- npm-network
healthcheck:
test: ["CMD", "/bin/check-health"]
interval: 30s
timeout: 10s
retries: 3
networks:
npm-network:
driver: bridge