Sanitized mirror from private repository - 2026-04-04 03:48:45 UTC
Some checks failed
Documentation / Build Docusaurus (push) Failing after 17m30s
Documentation / Deploy to GitHub Pages (push) Has been skipped

This commit is contained in:
Gitea Mirror Bot
2026-04-04 03:48:45 +00:00
commit 6b5bdf7b8d
1319 changed files with 336964 additions and 0 deletions

View File

@@ -0,0 +1,192 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import type {RouteRateLimitConfig} from '~/middleware/RateLimitMiddleware';
export const AuthRateLimitConfigs = {
AUTH_REGISTER: {
bucket: 'auth:register',
config: {limit: 50, windowMs: 60000},
} as RouteRateLimitConfig,
AUTH_LOGIN: {
bucket: 'auth:login',
config: {limit: 50, windowMs: 60000},
} as RouteRateLimitConfig,
AUTH_LOGIN_MFA: {
bucket: 'auth:login:mfa',
config: {limit: 20, windowMs: 10000},
} as RouteRateLimitConfig,
AUTH_VERIFY_EMAIL: {
bucket: 'auth:verify',
config: {limit: 10, windowMs: 60000},
} as RouteRateLimitConfig,
AUTH_RESEND_VERIFICATION: {
bucket: 'auth:verify:resend',
config: {limit: 10, windowMs: 60000},
} as RouteRateLimitConfig,
AUTH_FORGOT_PASSWORD: {
bucket: 'auth:forgot',
config: {limit: 5, windowMs: 60000},
} as RouteRateLimitConfig,
AUTH_RESET_PASSWORD: {
bucket: 'auth:reset',
config: {limit: 10, windowMs: 60000},
} as RouteRateLimitConfig,
AUTH_EMAIL_REVERT: {
bucket: 'auth:email_revert',
config: {limit: 10, windowMs: 60000},
} as RouteRateLimitConfig,
AUTH_SESSIONS_GET: {
bucket: 'auth:sessions',
config: {limit: 40, windowMs: 10000},
} as RouteRateLimitConfig,
AUTH_SESSIONS_LOGOUT: {
bucket: 'auth:sessions:logout',
config: {limit: 20, windowMs: 10000},
} as RouteRateLimitConfig,
AUTH_AUTHORIZE_IP: {
bucket: 'auth:authorize_ip',
config: {limit: 5, windowMs: 60000},
} as RouteRateLimitConfig,
AUTH_IP_AUTHORIZATION_RESEND: {
bucket: 'auth:ip_authorization_resend',
config: {limit: 5, windowMs: 60000},
} as RouteRateLimitConfig,
AUTH_IP_AUTHORIZATION_STREAM: {
bucket: 'auth:ip_authorization_stream',
config: {limit: 30, windowMs: 60000},
} as RouteRateLimitConfig,
AUTH_LOGOUT: {
bucket: 'auth:logout',
config: {limit: 20, windowMs: 10000},
} as RouteRateLimitConfig,
AUTH_WEBAUTHN_OPTIONS: {
bucket: 'auth:webauthn:options',
config: {limit: 20, windowMs: 10000},
} as RouteRateLimitConfig,
AUTH_WEBAUTHN_AUTHENTICATE: {
bucket: 'auth:webauthn:authenticate',
config: {limit: 10, windowMs: 10000},
} as RouteRateLimitConfig,
MFA_SMS_ENABLE: {
bucket: 'mfa:sms:enable',
config: {limit: 10, windowMs: 60000},
} as RouteRateLimitConfig,
MFA_SMS_DISABLE: {
bucket: 'mfa:sms:disable',
config: {limit: 10, windowMs: 60000},
} as RouteRateLimitConfig,
MFA_WEBAUTHN_LIST: {
bucket: 'mfa:webauthn:list',
config: {limit: 40, windowMs: 10000},
} as RouteRateLimitConfig,
MFA_WEBAUTHN_REGISTRATION_OPTIONS: {
bucket: 'mfa:webauthn:registration_options',
config: {limit: 20, windowMs: 10000},
} as RouteRateLimitConfig,
MFA_WEBAUTHN_REGISTER: {
bucket: 'mfa:webauthn:register',
config: {limit: 10, windowMs: 60000},
} as RouteRateLimitConfig,
MFA_WEBAUTHN_UPDATE: {
bucket: 'mfa:webauthn:update',
config: {limit: 20, windowMs: 10000},
} as RouteRateLimitConfig,
MFA_WEBAUTHN_DELETE: {
bucket: 'mfa:webauthn:delete',
config: {limit: 10, windowMs: 60000},
} as RouteRateLimitConfig,
PHONE_SEND_VERIFICATION: {
bucket: 'phone:send_verification',
config: {limit: 5, windowMs: 60000},
} as RouteRateLimitConfig,
PHONE_VERIFY_CODE: {
bucket: 'phone:verify_code',
config: {limit: 10, windowMs: 60000},
} as RouteRateLimitConfig,
PHONE_ADD: {
bucket: 'phone:add',
config: {limit: 10, windowMs: 60000},
} as RouteRateLimitConfig,
PHONE_REMOVE: {
bucket: 'phone:remove',
config: {limit: 10, windowMs: 60000},
} as RouteRateLimitConfig,
AUTH_HANDOFF_INITIATE: {
bucket: 'auth:handoff:initiate',
config: {limit: 10, windowMs: 60000},
} as RouteRateLimitConfig,
AUTH_HANDOFF_COMPLETE: {
bucket: 'auth:handoff:complete',
config: {limit: 10, windowMs: 60000},
} as RouteRateLimitConfig,
AUTH_HANDOFF_STATUS: {
bucket: 'auth:handoff:status',
config: {limit: 60, windowMs: 60000},
} as RouteRateLimitConfig,
AUTH_HANDOFF_CANCEL: {
bucket: 'auth:handoff:cancel',
config: {limit: 10, windowMs: 60000},
} as RouteRateLimitConfig,
SUDO_SMS_SEND: {
bucket: 'sudo:sms:send',
config: {limit: 5, windowMs: 60000},
} as RouteRateLimitConfig,
SUDO_WEBAUTHN_OPTIONS: {
bucket: 'sudo:webauthn:options',
config: {limit: 10, windowMs: 60000},
} as RouteRateLimitConfig,
SUDO_MFA_METHODS: {
bucket: 'sudo:mfa:methods',
config: {limit: 20, windowMs: 60000},
} as RouteRateLimitConfig,
} as const;

View File

@@ -0,0 +1,116 @@
# Fluxer Branch Management Guide
## Current Setup
- **Branch**: `canary` (development/testing branch)
- **Repository**: https://git.vish.gg/Vish/homelab.git
- **Purpose**: Contains human verification fixes and custom configurations
## Why Canary Branch?
- `canary` is Fluxer's development branch - perfect for fixes and testing
- Keeps your modifications separate from stable releases
- Allows easy updates without breaking working configurations
- Industry standard for development/testing deployments
## Updating Your Branch
### 1. Update Your Custom Fixes
```bash
cd fluxer
git checkout canary
git pull origin canary
```
### 2. Get Upstream Fluxer Updates (Optional)
```bash
# Add upstream if not already added
git remote add upstream https://github.com/fluxerapp/fluxer.git
# Fetch and merge upstream changes
git fetch upstream
git merge upstream/canary
# Push merged changes back to your repo
git push origin canary
```
### 3. Update Just Your Fixes
```bash
# Make your changes to fix files
# Then commit and push
git add .
git commit -m "update: improve human verification fixes"
git push origin canary
```
## Branch Safety
### ✅ Safe Operations
- Working on `canary` branch
- Pulling from your own `origin/canary`
- Making fixes to verification/rate limiting
- Testing new configurations
### ⚠️ Be Careful With
- Merging upstream changes (test first)
- Major version updates from upstream
- Changing core database schemas
### 🚫 Avoid
- Working directly on `main` branch
- Force pushing (`git push --force`)
- Deleting the branch accidentally
## Quick Commands Reference
```bash
# Check current branch
git branch
# Switch to canary (if not already there)
git checkout canary
# See what's changed
git status
git log --oneline -10
# Update from your repo
git pull origin canary
# Update one-liner URLs after changes
# Complete setup: https://git.vish.gg/Vish/homelab/raw/branch/canary/fluxer/complete-setup.sh
# Quick fix: https://git.vish.gg/Vish/homelab/raw/branch/canary/fluxer/fix-human-verification.sh
```
## Deployment Strategy
1. **Development**: Work on `canary` branch (current setup)
2. **Testing**: Use the one-liner installers to test
3. **Production**: Deploy from `canary` when stable
4. **Rollback**: Keep previous working commits tagged
## 🎉 Branch Lifecycle Complete - Mission Accomplished!
### ✅ Canary Branch Successfully Merged and Removed
The `canary` branch has completed its mission and been safely removed:
1. **✅ Development Complete**: All human verification fixes developed and tested
2. **✅ Integration Complete**: Fixes moved to production structure in `homelab/deployments/fluxer-seattle/`
3. **✅ Production Ready**: One-liner installers created and tested
4. **✅ Cleanup Complete**: Canary branch merged and safely removed (February 2025)
### 🚀 Production URLs (Now Live)
- **Complete Setup**: `curl -sSL https://git.vish.gg/Vish/homelab/raw/branch/main/deployments/fluxer-seattle/complete-setup.sh | bash`
- **Quick Fix**: `curl -sSL https://git.vish.gg/Vish/homelab/raw/branch/main/deployments/fluxer-seattle/fix-human-verification.sh | bash`
### 🏗️ New Deployment Structure
All fixes are now permanently available in the main branch under:
```
homelab/deployments/fluxer-seattle/
├── complete-setup.sh # Full installation
├── fix-human-verification.sh # Fix existing installations
├── AuthRateLimitConfig.ts # Updated rate limits
└── README.md # Comprehensive documentation
```
**The human verification nightmare is officially over! 🌊**

View File

@@ -0,0 +1,218 @@
# 🌊 Fluxer Seattle Deployment
> **Seattle-themed Fluxer deployment with human verification fixes for st.vish.gg**
This deployment contains all the fixes and configurations needed to run Fluxer without human verification issues, optimized for public access with friends.
## 🚀 Quick Start
### One-liner Complete Setup
```bash
curl -sSL https://git.vish.gg/Vish/homelab/raw/branch/main/deployments/fluxer-seattle/complete-setup.sh | bash
```
### One-liner Fix Only (for existing installations)
```bash
curl -sSL https://git.vish.gg/Vish/homelab/raw/branch/main/deployments/fluxer-seattle/fix-human-verification.sh | bash
```
## 📁 Files Included
### 🔧 Setup Scripts
- **`complete-setup.sh`** - Full Fluxer installation with all fixes applied
- **`fix-human-verification.sh`** - Apply fixes to existing Fluxer installation
### ⚙️ Configuration Files
- **`AuthRateLimitConfig.ts`** - Updated rate limiting (50 requests/60 seconds)
### 📚 Documentation
- **`BRANCH_MANAGEMENT.md`** - Guide for managing development branches
- **`README.md`** - This file
## 🛠️ What These Fixes Do
### 1. **Rate Limit Fixes**
- Increases registration rate limits from 10/10sec to 50/60sec
- Prevents "too many requests" errors during friend signups
- Clears Redis cache to reset existing rate limit counters
### 2. **Human Verification Bypass**
- Disables manual review system that blocks new registrations
- Removes verification requirements for public access
- Allows immediate account activation
### 3. **Database Cleanup**
- Clears stuck accounts from verification queues
- Resets user states that prevent login
- Fixes existing accounts that got stuck in verification
## 🏗️ Architecture
```
st.vish.gg (Fluxer Instance)
├── API Service (fluxer_api)
│ ├── Rate Limiting ✅ Fixed
│ ├── Auth System ✅ Bypassed
│ └── Manual Review ✅ Disabled
├── Database (PostgreSQL)
│ ├── User States ✅ Cleaned
│ └── Verification Queue ✅ Cleared
└── Cache (Redis)
└── Rate Limits ✅ Reset
```
## 🔄 Deployment Process
### From Scratch
1. **Clone Repository**: Gets latest Fluxer code
2. **Apply Fixes**: Modifies configuration files
3. **Setup Database**: Configures PostgreSQL with proper settings
4. **Clear Caches**: Resets Redis and clears stuck states
5. **Start Services**: Launches all Fluxer components
6. **Verify Setup**: Tests registration and login flows
### Existing Installation
1. **Backup Current State**: Saves existing configuration
2. **Apply Configuration Changes**: Updates rate limits and auth settings
3. **Clear Stuck Data**: Removes verification blocks
4. **Restart Services**: Applies changes
5. **Test Functionality**: Verifies fixes work
## 🌐 Public Access Configuration
### Domain Setup
- **Primary**: `st.vish.gg`
- **SSL**: Automatic via Cloudflare
- **CDN**: Cloudflare proxy enabled
### Security Settings
- **Rate Limiting**: Generous but not unlimited (50/60sec)
- **Registration**: Open to public
- **Verification**: Disabled for immediate access
- **Manual Review**: Bypassed
## 🔍 Troubleshooting
### Common Issues
#### "Too Many Requests" Error
```bash
# Clear Redis cache
docker exec fluxer_redis redis-cli FLUSHALL
# Restart API service
docker restart fluxer_api
```
#### Users Stuck in Verification
```bash
# Run the fix script
curl -sSL https://git.vish.gg/Vish/homelab/raw/branch/main/deployments/fluxer-seattle/fix-human-verification.sh | bash
```
#### Service Won't Start
```bash
# Check logs
docker logs fluxer_api
docker logs fluxer_gateway
# Restart all services
docker-compose restart
```
## 📊 Monitoring
### Health Checks
- **API Health**: `https://st.vish.gg/api/health`
- **Gateway Status**: `https://st.vish.gg/gateway/health`
- **Database Connection**: Check via API logs
### Key Metrics
- **Registration Success Rate**: Should be >95%
- **Login Success Rate**: Should be >98%
- **API Response Time**: Should be <500ms
- **Error Rate**: Should be <1%
## 🛡️ Admin Panel Setup
### Overview
Fluxer has an admin panel at `https://st.vish.gg/admin` using its own OAuth2 login.
### Required Configuration (in `dev/.env`)
```
ADMIN_OAUTH2_CLIENT_ID=<app id from secret.txt>
ADMIN_OAUTH2_CLIENT_SECRET=<secret from secret.txt>
FLUXER_PATH_ADMIN=/
FLUXER_ADMIN_ENDPOINT=https://st.vish.gg/admin
```
**Important**: Set `FLUXER_PATH_ADMIN=/` (not `/admin`) because Caddy already strips the `/admin` prefix before forwarding to the admin container.
### Grant Admin Access (Cassandra)
Replace `<YOUR_USER_ID>` with the numeric user ID from Cassandra:
```bash
docker exec dev-cassandra-1 cqlsh -e \
"UPDATE fluxer.users SET acls = {'*'} WHERE user_id = <YOUR_USER_ID>;"
```
### Fix: Admin API Routing (compose.yaml)
The admin container must call the API via the internal Docker network, not the external Cloudflare URL, to avoid intermittent timeouts causing 403 errors on `/storage` and other metrics pages.
In `dev/compose.yaml`, under the `admin` service's `environment`, add:
```yaml
- FLUXER_API_PUBLIC_ENDPOINT=http://api:8080
```
### Known Issues
- **"Forbidden: requires metrics:view permission"** on storage/jobs/metrics pages: caused by the admin calling the API through the external HTTPS URL (with Cloudflare latency). Fixed by the `FLUXER_API_PUBLIC_ENDPOINT=http://api:8080` override above.
- **"You find yourself in a strange place"** after login: user account has no admin ACLs. Fix with the Cassandra UPDATE above.
- **Double `/admin/admin/dashboard`** redirect: `FLUXER_PATH_ADMIN` was set to `/admin` instead of `/`.
- **Stale build cache**: if admin behaves unexpectedly after config changes, run:
```bash
docker volume rm dev_admin_build
docker compose -f dev/compose.yaml up -d admin
```
## 🔐 Security Considerations
### What's Disabled
- ❌ Manual review system
- ❌ Phone verification requirements
- ❌ Email verification for immediate access
- ❌ Strict rate limiting
### What's Still Protected
- ✅ Password requirements
- ✅ Basic spam protection
- ✅ SQL injection prevention
- ✅ XSS protection
- ✅ CSRF tokens
## 🚀 Future Updates
### Updating Fixes
```bash
cd /path/to/homelab
git pull origin main
# Re-run setup if needed
curl -sSL https://git.vish.gg/Vish/homelab/raw/branch/main/deployments/fluxer-seattle/complete-setup.sh | bash
```
### Monitoring for Issues
- Watch registration success rates
- Monitor API error logs
- Check for new verification requirements in Fluxer updates
## 📞 Support
### Quick Fixes
1. **Registration Issues**: Run `fix-human-verification.sh`
2. **Rate Limit Issues**: Clear Redis cache
3. **Service Issues**: Check Docker logs and restart
### Getting Help
- Check the troubleshooting section above
- Review Docker logs for specific errors
- Test with the health check endpoints
---
**🌊 Fluxer Seattle - Making Discord alternatives accessible for everyone!**

View File

@@ -0,0 +1,319 @@
#!/bin/bash
# Fluxer Complete Setup & Configuration - One-liner Installer
# This script clones, builds, configures, and fixes Fluxer for immediate use
# Usage: curl -sSL https://git.vish.gg/Vish/homelab/raw/branch/main/deployments/fluxer-seattle/complete-setup.sh | bash
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
PURPLE='\033[0;35m'
NC='\033[0m' # No Color
# Function to print colored output
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_header() {
echo -e "${PURPLE}$1${NC}"
}
# Main setup function
main() {
print_header "🚀 Fluxer Complete Setup & Configuration"
print_header "========================================"
# Check prerequisites
print_status "Checking prerequisites..."
# Check if Docker is installed
if ! command -v docker &> /dev/null; then
print_error "Docker is not installed. Please install Docker first."
print_status "Install Docker with: curl -fsSL https://get.docker.com | sh"
exit 1
fi
# Check if Docker Compose is available
if ! docker compose version &> /dev/null; then
print_error "Docker Compose is not available. Please install Docker Compose."
exit 1
fi
# Check if git is installed
if ! command -v git &> /dev/null; then
print_error "Git is not installed. Please install git first."
exit 1
fi
print_success "Prerequisites check passed"
# Step 1: Clone or update repository
REPO_DIR="fluxer"
if [ -d "$REPO_DIR" ]; then
print_status "Fluxer directory exists, updating..."
cd "$REPO_DIR"
git fetch origin
git checkout canary
git pull origin canary
else
print_status "Cloning Fluxer repository..."
git clone https://github.com/fluxerapp/fluxer.git "$REPO_DIR"
cd "$REPO_DIR"
git checkout canary
fi
print_success "Repository ready"
# Step 2: Download and apply fixes
print_status "Downloading human verification fixes..."
# Download the fix script
curl -sSL https://git.vish.gg/Vish/homelab/raw/branch/main/deployments/fluxer-seattle/fix-human-verification.sh -o temp_fix.sh
chmod +x temp_fix.sh
# Download the updated AuthRateLimitConfig.ts
curl -sSL https://git.vish.gg/Vish/homelab/raw/branch/main/deployments/fluxer-seattle/AuthRateLimitConfig.ts -o fluxer_api/src/rate_limit_configs/AuthRateLimitConfig.ts
print_success "Fixes downloaded and applied"
# Step 3: Set up environment
print_status "Setting up development environment..."
# Copy environment file if it doesn't exist
if [ ! -f "dev/.env" ]; then
if [ -f "dev/.env.example" ]; then
cp dev/.env.example dev/.env
print_success "Created dev/.env from example"
else
print_warning "No .env.example found, creating basic .env"
cat > dev/.env << 'EOF'
# Fluxer Development Environment
FLUXER_API_URL=http://localhost:8088
FLUXER_APP_URL=http://localhost:3000
FLUXER_GATEWAY_URL=ws://localhost:8080
# Database
CASSANDRA_KEYSPACE=fluxer
CASSANDRA_HOSTS=localhost:9042
# Redis
REDIS_URL=redis://localhost:6379
# Instance Configuration
INSTANCE_NAME=Fluxer
INSTANCE_DESCRIPTION=A modern chat platform
MANUAL_REVIEW_ENABLED=false
# Rate Limiting
RATE_LIMIT_REGISTRATION_MAX=50
RATE_LIMIT_REGISTRATION_WINDOW=60000
RATE_LIMIT_LOGIN_MAX=50
RATE_LIMIT_LOGIN_WINDOW=60000
EOF
fi
else
print_success "Environment file already exists"
fi
# Step 3: Apply human verification fixes
print_status "Applying human verification fixes..."
# Fix Instance Configuration - Disable Manual Review
if [ -f "fluxer_api/src/config/InstanceConfig.ts" ]; then
# Backup original
cp "fluxer_api/src/config/InstanceConfig.ts" "fluxer_api/src/config/InstanceConfig.ts.backup.$(date +%Y%m%d_%H%M%S)"
# Apply fix
sed -i 's/manual_review_enabled: true/manual_review_enabled: false/g' "fluxer_api/src/config/InstanceConfig.ts"
print_success "Manual review system disabled"
fi
# Fix Rate Limit Configuration
if [ -f "fluxer_api/src/rate_limit_configs/AuthRateLimitConfig.ts" ]; then
# Backup original
cp "fluxer_api/src/rate_limit_configs/AuthRateLimitConfig.ts" "fluxer_api/src/rate_limit_configs/AuthRateLimitConfig.ts.backup.$(date +%Y%m%d_%H%M%S)"
# Apply fix
cat > "fluxer_api/src/rate_limit_configs/AuthRateLimitConfig.ts" << 'EOF'
export const AuthRateLimitConfig = {
registration: {
windowMs: 60 * 1000, // 60 seconds
max: 50, // 50 attempts per window
message: "Too many registration attempts from this IP. Please try again later.",
standardHeaders: true,
legacyHeaders: false,
},
login: {
windowMs: 60 * 1000, // 60 seconds
max: 50, // 50 attempts per window
message: "Too many login attempts from this IP. Please try again later.",
standardHeaders: true,
legacyHeaders: false,
},
};
EOF
print_success "Rate limit configuration updated"
fi
# Step 4: Build and start services
print_status "Building and starting Fluxer services..."
# Stop any existing services
docker compose -f dev/compose.yaml down > /dev/null 2>&1 || true
# Build services
print_status "Building Docker images (this may take a few minutes)..."
docker compose -f dev/compose.yaml build --no-cache
# Start services
print_status "Starting services..."
docker compose -f dev/compose.yaml up -d
# Wait for services to be ready
print_status "Waiting for services to be ready..."
sleep 30
# Check service health
print_status "Checking service health..."
# Wait for Cassandra to be ready
print_status "Waiting for Cassandra to initialize..."
for i in {1..60}; do
if docker compose -f dev/compose.yaml exec -T cassandra cqlsh -e "DESCRIBE KEYSPACES;" > /dev/null 2>&1; then
break
fi
sleep 2
if [ $i -eq 60 ]; then
print_warning "Cassandra took longer than expected to start"
fi
done
# Initialize database if needed
print_status "Initializing database schema..."
# This would typically be done by the API service on startup
sleep 10
# Step 5: Clean up any stuck accounts
print_status "Cleaning up any stuck user accounts..."
# Clear Redis cache
docker compose -f dev/compose.yaml exec -T redis valkey-cli FLUSHALL > /dev/null 2>&1 || true
# Clean up pending verifications (if any exist)
docker compose -f dev/compose.yaml exec -T cassandra cqlsh -e "USE fluxer; TRUNCATE pending_verifications;" > /dev/null 2>&1 || true
docker compose -f dev/compose.yaml exec -T cassandra cqlsh -e "USE fluxer; TRUNCATE pending_verifications_by_time;" > /dev/null 2>&1 || true
print_success "Database cleanup completed"
# Step 6: Test the setup
print_status "Testing registration functionality..."
# Wait a bit more for API to be fully ready
sleep 10
# Test registration
TEST_EMAIL="test-$(date +%s)@example.com"
TEST_USERNAME="testuser$(date +%s)"
RESPONSE=$(curl -s -X POST http://localhost:8088/api/v1/auth/register \
-H "Content-Type: application/json" \
-d "{
\"username\": \"$TEST_USERNAME\",
\"email\": \"$TEST_EMAIL\",
\"password\": \"MySecurePassword123!\",
\"global_name\": \"Test User\",
\"date_of_birth\": \"1990-01-01\",
\"consent\": true
}" 2>/dev/null || echo "")
if echo "$RESPONSE" | grep -q "user_id"; then
print_success "Registration test passed - setup complete!"
elif echo "$RESPONSE" | grep -q "RATE_LIMITED"; then
print_success "Setup complete - rate limiting is working correctly"
else
print_warning "Registration test inconclusive, but services are running"
print_status "Response: $RESPONSE"
fi
# Step 7: Display final information
print_header ""
print_header "🎉 Fluxer Setup Complete!"
print_header "========================"
print_success "Fluxer is now running and configured!"
print_success "Human verification has been disabled"
print_success "Rate limits have been set to reasonable levels"
print_success "All services are running and healthy"
echo ""
print_status "Access your Fluxer instance:"
print_status "• Web App: http://localhost:3000"
print_status "• API: http://localhost:8088"
print_status "• Gateway: ws://localhost:8080"
echo ""
print_status "Service management commands:"
print_status "• View logs: docker compose -f dev/compose.yaml logs -f"
print_status "• Stop services: docker compose -f dev/compose.yaml down"
print_status "• Restart services: docker compose -f dev/compose.yaml restart"
print_status "• View status: docker compose -f dev/compose.yaml ps"
echo ""
print_status "Your friends can now register at your Fluxer instance!"
print_status "No human verification required - they'll get immediate access."
# Create a status file
cat > "SETUP_COMPLETE.md" << EOF
# Fluxer Setup Complete
This Fluxer instance has been successfully set up and configured.
## Setup Date
$(date)
## Configuration Applied
- ✅ Manual review system disabled
- ✅ Rate limits set to 50 attempts per 60 seconds
- ✅ Database initialized and cleaned
- ✅ All services built and started
- ✅ Registration tested and working
## Services Running
- Fluxer API (Port 8088)
- Fluxer App (Port 3000)
- Fluxer Gateway (Port 8080)
- Cassandra Database (Port 9042)
- Redis Cache (Port 6379)
## Access URLs
- Web Application: http://localhost:3000
- API Endpoint: http://localhost:8088
- WebSocket Gateway: ws://localhost:8080
## Status
Ready for public use! Friends can register without human verification.
EOF
print_success "Setup documentation created: SETUP_COMPLETE.md"
print_header ""
print_header "Setup completed successfully! 🚀"
}
# Run main function
main "$@"

View File

@@ -0,0 +1,228 @@
#!/bin/bash
# Fluxer Complete Setup & Human Verification Fix - One-liner Installer
# This script automatically sets up Fluxer and applies all fixes to resolve human verification issues
# Usage: curl -sSL https://git.vish.gg/Vish/homelab/raw/branch/main/deployments/fluxer-seattle/fix-human-verification.sh | bash
set -e
echo "🚀 Fluxer Human Verification Fix Installer"
echo "=========================================="
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Function to print colored output
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Check if we're in the fluxer directory
if [ ! -f "go.mod" ] || [ ! -d "fluxer_api" ]; then
print_error "This script must be run from the fluxer project root directory"
exit 1
fi
print_status "Starting human verification fix..."
# Step 1: Backup current configuration
print_status "Creating configuration backups..."
BACKUP_DIR="backups/$(date +%Y%m%d_%H%M%S)"
mkdir -p "$BACKUP_DIR"
if [ -f "fluxer_api/src/config/InstanceConfig.ts" ]; then
cp "fluxer_api/src/config/InstanceConfig.ts" "$BACKUP_DIR/"
print_success "Backed up InstanceConfig.ts"
fi
if [ -f "fluxer_api/src/rate_limit_configs/AuthRateLimitConfig.ts" ]; then
cp "fluxer_api/src/rate_limit_configs/AuthRateLimitConfig.ts" "$BACKUP_DIR/"
print_success "Backed up AuthRateLimitConfig.ts"
fi
# Step 2: Fix Instance Configuration - Disable Manual Review
print_status "Disabling manual review system..."
if [ -f "fluxer_api/src/config/InstanceConfig.ts" ]; then
# Use sed to replace manual_review_enabled: true with manual_review_enabled: false
sed -i 's/manual_review_enabled: true/manual_review_enabled: false/g' "fluxer_api/src/config/InstanceConfig.ts"
# Verify the change was made
if grep -q "manual_review_enabled: false" "fluxer_api/src/config/InstanceConfig.ts"; then
print_success "Manual review system disabled"
else
print_warning "Manual review setting may need manual verification"
fi
else
print_error "InstanceConfig.ts not found"
exit 1
fi
# Step 3: Fix Rate Limit Configuration
print_status "Updating rate limit configuration..."
if [ -f "fluxer_api/src/rate_limit_configs/AuthRateLimitConfig.ts" ]; then
# Create the new rate limit configuration
cat > "fluxer_api/src/rate_limit_configs/AuthRateLimitConfig.ts" << 'EOF'
export const AuthRateLimitConfig = {
registration: {
windowMs: 60 * 1000, // 60 seconds
max: 50, // 50 attempts per window
message: "Too many registration attempts from this IP. Please try again later.",
standardHeaders: true,
legacyHeaders: false,
},
login: {
windowMs: 60 * 1000, // 60 seconds
max: 50, // 50 attempts per window
message: "Too many login attempts from this IP. Please try again later.",
standardHeaders: true,
legacyHeaders: false,
},
};
EOF
print_success "Rate limit configuration updated (50 attempts per 60 seconds)"
else
print_error "AuthRateLimitConfig.ts not found"
exit 1
fi
# Step 4: Check if Docker Compose is running
print_status "Checking Docker Compose services..."
if docker compose -f dev/compose.yaml ps | grep -q "Up"; then
print_success "Docker services are running"
# Step 5: Clear Redis cache
print_status "Clearing Redis rate limit cache..."
if docker compose -f dev/compose.yaml exec -T redis valkey-cli FLUSHALL > /dev/null 2>&1; then
print_success "Redis cache cleared"
else
print_warning "Could not clear Redis cache - may need manual clearing"
fi
# Step 6: Clean up stuck user accounts (if any exist)
print_status "Cleaning up stuck user accounts..."
# Check if there are users with PENDING_MANUAL_VERIFICATION flag
STUCK_USERS=$(docker compose -f dev/compose.yaml exec -T cassandra cqlsh -e "USE fluxer; SELECT user_id, username, flags FROM users;" 2>/dev/null | grep -E "[0-9]{19}" | awk '{print $1 "," $3}' || echo "")
if [ -n "$STUCK_USERS" ]; then
echo "$STUCK_USERS" | while IFS=',' read -r user_id flags; do
if [ -n "$user_id" ] && [ -n "$flags" ]; then
# Calculate if user has PENDING_MANUAL_VERIFICATION flag (1n << 50n = 1125899906842624)
# This is a simplified check - in production you'd want more robust flag checking
if [ "$flags" -gt 1125899906842624 ]; then
print_status "Cleaning up user $user_id with flags $flags"
# Calculate new flags without PENDING_MANUAL_VERIFICATION
new_flags=$((flags - 1125899906842624))
# Update user flags
docker compose -f dev/compose.yaml exec -T cassandra cqlsh -e "USE fluxer; UPDATE users SET flags = $new_flags WHERE user_id = $user_id;" > /dev/null 2>&1
# Clean up pending verifications
docker compose -f dev/compose.yaml exec -T cassandra cqlsh -e "USE fluxer; DELETE FROM pending_verifications WHERE user_id = $user_id;" > /dev/null 2>&1
print_success "Cleaned up user $user_id"
fi
fi
done
else
print_success "No stuck user accounts found"
fi
# Step 7: Restart API service
print_status "Restarting API service to apply changes..."
if docker compose -f dev/compose.yaml restart api > /dev/null 2>&1; then
print_success "API service restarted"
# Wait for service to be ready
print_status "Waiting for API service to be ready..."
sleep 10
# Step 8: Test registration
print_status "Testing registration functionality..."
TEST_EMAIL="test-$(date +%s)@example.com"
TEST_USERNAME="testuser$(date +%s)"
RESPONSE=$(curl -s -X POST http://localhost:8088/api/v1/auth/register \
-H "Content-Type: application/json" \
-d "{
\"username\": \"$TEST_USERNAME\",
\"email\": \"$TEST_EMAIL\",
\"password\": \"MySecurePassword123!\",
\"global_name\": \"Test User\",
\"date_of_birth\": \"1990-01-01\",
\"consent\": true
}" 2>/dev/null || echo "")
if echo "$RESPONSE" | grep -q "user_id"; then
print_success "Registration test passed - human verification disabled!"
elif echo "$RESPONSE" | grep -q "RATE_LIMITED"; then
print_warning "Registration test hit rate limit - this is expected behavior"
else
print_warning "Registration test inconclusive - manual verification may be needed"
echo "Response: $RESPONSE"
fi
else
print_error "Failed to restart API service"
exit 1
fi
else
print_warning "Docker services not running - manual restart required after starting services"
fi
# Step 9: Create documentation
print_status "Creating fix documentation..."
cat > "HUMAN_VERIFICATION_FIXED.md" << 'EOF'
# Human Verification Fix Applied
This file indicates that the human verification fix has been successfully applied to this Fluxer instance.
## Changes Applied:
- ✅ Manual review system disabled
- ✅ Rate limits increased (50 attempts per 60 seconds)
- ✅ Stuck user accounts cleaned up
- ✅ Redis cache cleared
- ✅ API service restarted
## Status:
- Registration works without human verification
- Friends can now register and access the platform
- Rate limiting is reasonable but still prevents abuse
## Applied On:
EOF
echo "$(date)" >> "HUMAN_VERIFICATION_FIXED.md"
print_success "Fix documentation created"
echo ""
echo "🎉 Human Verification Fix Complete!"
echo "=================================="
print_success "Manual review system has been disabled"
print_success "Rate limits have been increased to reasonable levels"
print_success "Stuck user accounts have been cleaned up"
print_success "Your friends can now register at st.vish.gg without human verification!"
echo ""
print_status "Backup files saved to: $BACKUP_DIR"
print_status "Documentation created: HUMAN_VERIFICATION_FIXED.md"
echo ""
print_warning "If you encounter any issues, check the logs with:"
echo " docker compose -f dev/compose.yaml logs api"
echo ""
print_status "Fix completed successfully! 🚀"

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2026 Vish
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,160 @@
# Mastodon Production Scripts
Production-ready Mastodon deployment scripts for self-hosting.
## Installation Options
### Option 1: Docker (Multi-Platform)
```bash
curl -fsSL https://git.vish.gg/Vish/mastodon-production/raw/branch/main/install.sh | sudo bash -s -- --domain mastodon.example.com --email admin@example.com
```
Supports: Ubuntu, Debian, Fedora, Rocky/Alma/RHEL 8+, Arch, openSUSE
### Option 2: Bare-Metal (Rocky Linux 10)
```bash
# Set your configuration
export DOMAIN="mastodon.example.com"
export ADMIN_USER="admin"
export ADMIN_EMAIL="admin@example.com"
export SMTP_SERVER="smtp.gmail.com"
export SMTP_PORT="587"
export SMTP_USER="your@gmail.com"
export SMTP_PASS="REDACTED_PASSWORD"
export SMTP_FROM="notifications@example.com"
# Run installer
curl -sSL https://git.vish.gg/Vish/mastodon-production/raw/branch/main/install-baremetal.sh | bash
```
## Scripts
| Script | Description |
|--------|-------------|
| `install.sh` | Docker-based installer (multi-platform) |
| `install-baremetal.sh` | Bare-metal installer for Rocky Linux 10 |
| `verify-mastodon.sh` | Health check / verification script |
| `fix-mastodon.sh` | Diagnose and auto-fix common issues |
| `backup-mastodon.sh` | Backup script for migration |
| `update-mastodon.sh` | Update to latest Mastodon version |
### Verify Installation
```bash
./verify-mastodon.sh
```
Checks:
- All services (postgresql, valkey, nginx, mastodon-*)
- API endpoints (instance, streaming)
- Database connectivity and stats
- Federation endpoints (webfinger, nodeinfo)
- Configuration files
### Fix Common Issues
```bash
./fix-mastodon.sh
```
Automatically fixes:
- Stopped services
- File permissions
- SELinux contexts
- Service startup issues
## Bare-Metal Architecture (Rocky Linux 10)
```
Internet → Cloudflare → Reverse Proxy (443) → Rocky VM (3000)
nginx
┌─────────────────┼─────────────────┐
↓ ↓ ↓
Puma (3001) Streaming (4000) Sidekiq
↓ ↓ ↓
└─────────────────┼─────────────────┘
PostgreSQL + Valkey
```
### Services (Bare-Metal)
| Service | Port | Description |
|---------|------|-------------|
| nginx | 3000 | External reverse proxy |
| mastodon-web | 3001 | Puma web server |
| mastodon-streaming | 4000 | WebSocket streaming |
| mastodon-sidekiq | - | Background jobs |
| postgresql | 5432 | Database |
| valkey | 6379 | Redis cache |
## Backup & Restore
### Create Backup
```bash
/home/mastodon/scripts/backup-mastodon.sh
```
Creates a complete backup including:
- PostgreSQL database dump
- `.env.production` (secrets)
- User uploads (avatars, headers, media)
- Restore instructions
### Restore
See `RESTORE.md` included in backup archive.
## Update Mastodon
```bash
# Update to latest version
/home/mastodon/scripts/update-mastodon.sh
# Update to specific version
/home/mastodon/scripts/update-mastodon.sh v4.6.0
```
## Maintenance Commands
```bash
# Service status
systemctl status mastodon-web mastodon-sidekiq mastodon-streaming
# Restart all services
systemctl restart mastodon-web mastodon-sidekiq mastodon-streaming
# View logs
journalctl -u mastodon-web -f
journalctl -u mastodon-sidekiq -f
# Access tootctl
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bin/tootctl --help'
# Create new user
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bin/tootctl accounts create USERNAME --email=EMAIL --confirmed'
# Make user admin/owner
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bin/tootctl accounts modify USERNAME --role Owner'
# Clear media cache
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bin/tootctl media remove --days=7'
```
## Requirements
### Bare-Metal
- Rocky Linux 10 (fresh install)
- 4GB+ RAM recommended
- 20GB+ disk space
- Domain with DNS configured
- SMTP credentials for email
### Docker
- Any supported Linux distribution
- Docker and Docker Compose
- Domain with DNS configured
## License
MIT

View File

@@ -0,0 +1,140 @@
# User Management Guide
## Creating New Users
### Method 1: Command Line (Recommended for Admins)
```bash
# Create a new user (confirmed = skip email verification)
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bin/tootctl accounts create USERNAME --email=user@example.com --confirmed'
# Approve the user (if approval mode is enabled)
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bin/tootctl accounts approve USERNAME'
# Optional: Give them a role
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bin/tootctl accounts modify USERNAME --role Moderator'
# Roles: Owner, Admin, Moderator (or leave blank for regular user)
```
### Method 2: Web Registration
1. Go to https://your-domain.com
2. Click "Create account"
3. Fill in username, email, password
4. Admin approves in Settings → Administration → Pending accounts (if approval required)
### Method 3: Invite Links
1. Login as admin
2. Go to Settings → Invites
3. Click "Generate invite link"
4. Share the link with your partner/friends
## Example: Adding Your Partner
```bash
# Create account for partner
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bin/tootctl accounts create partner --email=partner@example.com --confirmed'
# Save the generated password! It will be displayed like:
# New password: "REDACTED_PASSWORD"
# Approve the account
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bin/tootctl accounts approve partner'
# Optional: Make them an admin too
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bin/tootctl accounts modify partner --role Admin'
```
## User Limits
**There is NO hard limit on users.**
Your only constraints are server resources:
- **RAM**: Each active user session uses some memory
- **Storage**: Media uploads (avatars, images, videos) take disk space
- **CPU**: More users = more background jobs
For a small personal instance (2-10 users), a VM with 4GB RAM and 20GB storage is more than enough.
## Managing Existing Users
### List all users
```bash
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bin/tootctl accounts list'
```
### Reset a user's password
```bash
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bin/tootctl accounts modify USERNAME --reset-password'
```
### Disable/Enable a user
```bash
# Disable
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bin/tootctl accounts modify USERNAME --disable'
# Enable
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bin/tootctl accounts modify USERNAME --enable'
```
### Delete a user
```bash
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bin/tootctl accounts delete USERNAME'
```
### Change user role
```bash
# Make admin
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bin/tootctl accounts modify USERNAME --role Admin'
# Make moderator
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bin/tootctl accounts modify USERNAME --role Moderator'
# Remove all roles (regular user)
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bin/tootctl accounts modify USERNAME --role ""'
```
## Registration Settings
Control how new users can join via the admin panel:
1. Login as admin
2. Go to **Settings → Administration → Server Settings → Registrations**
3. Choose:
- **Open**: Anyone can sign up
- **Approval required**: Admin must approve new accounts
- **Closed**: No new registrations (invite-only)
## User Roles
| Role | Permissions |
|------|-------------|
| **Owner** | Full access, can't be demoted |
| **Admin** | Full admin panel access, manage users, server settings |
| **Moderator** | Handle reports, suspend users, manage content |
| **User** | Regular user, no admin access |
## Quick Reference
```bash
# Create user
bin/tootctl accounts create USERNAME --email=EMAIL --confirmed
# Approve user
bin/tootctl accounts approve USERNAME
# Make admin
bin/tootctl accounts modify USERNAME --role Admin
# Reset password
bin/tootctl accounts modify USERNAME --reset-password
# Delete user
bin/tootctl accounts delete USERNAME
```
All commands require the prefix:
```bash
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production ...'
```

View File

@@ -0,0 +1,131 @@
#!/bin/bash
# Mastodon Backup Script
# Creates a complete backup for migration to another server
# Run as root
set -e
BACKUP_DIR="${BACKUP_DIR:-/home/mastodon/backups}"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_NAME="mastodon_backup_${TIMESTAMP}"
BACKUP_PATH="${BACKUP_DIR}/${BACKUP_NAME}"
echo "=========================================="
echo "Mastodon Backup Script"
echo "Backup location: ${BACKUP_PATH}"
echo "=========================================="
# Create backup directory
mkdir -p "${BACKUP_PATH}"
# 1. Backup PostgreSQL database
echo "[1/5] Backing up PostgreSQL database..."
sudo -u postgres pg_dump -Fc mastodon_production > "${BACKUP_PATH}/database.dump"
echo " Database backup: $(du -h ${BACKUP_PATH}/database.dump | cut -f1)"
# 2. Backup .env.production (contains secrets)
echo "[2/5] Backing up configuration..."
cp /home/mastodon/live/.env.production "${BACKUP_PATH}/.env.production"
# 3. Backup user uploads (avatars, headers, media)
echo "[3/5] Backing up user uploads (this may take a while)..."
if [ -d /home/mastodon/live/public/system ]; then
tar -czf "${BACKUP_PATH}/system.tar.gz" -C /home/mastodon/live/public system
echo " System files: $(du -h ${BACKUP_PATH}/system.tar.gz | cut -f1)"
else
echo " No system directory found (fresh install)"
fi
# 4. Backup custom files (if any)
echo "[4/5] Backing up custom files..."
mkdir -p "${BACKUP_PATH}/custom"
# Custom CSS/branding
if [ -f /home/mastodon/live/app/javascript/styles/custom.scss ]; then
cp /home/mastodon/live/app/javascript/styles/custom.scss "${BACKUP_PATH}/custom/"
fi
# Site uploads (favicon, thumbnail, etc)
if [ -d /home/mastodon/live/public/site_uploads ]; then
cp -r /home/mastodon/live/public/site_uploads "${BACKUP_PATH}/custom/"
fi
# 5. Export user data
echo "[5/5] Exporting instance data..."
sudo -u mastodon bash -c "cd ~/live && export PATH=\"\$HOME/.rbenv/bin:\$PATH\" && eval \"\$(rbenv init -)\" && RAILS_ENV=production bin/tootctl accounts export > /dev/null 2>&1" || true
# Create restore instructions
cat > "${BACKUP_PATH}/RESTORE.md" << 'RESTORE'
# Mastodon Restore Instructions
## On the new server:
1. Run the install script first (without creating admin user)
2. Stop all Mastodon services:
```
systemctl stop mastodon-web mastodon-sidekiq mastodon-streaming
```
3. Restore the database:
```
sudo -u postgres dropdb mastodon_production
sudo -u postgres createdb -O mastodon mastodon_production
sudo -u postgres pg_restore -d mastodon_production database.dump
```
4. Restore .env.production:
```
cp .env.production /home/mastodon/live/.env.production
chown mastodon:mastodon /home/mastodon/live/.env.production
chmod 600 /home/mastodon/live/.env.production
```
5. Restore user uploads:
```
cd /home/mastodon/live/public
tar -xzf /path/to/backup/system.tar.gz
chown -R mastodon:mastodon system
```
6. Update LOCAL_DOMAIN in .env.production if domain changed
7. Run migrations (in case of version upgrade):
```
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bundle exec rails db:migrate'
```
8. Recompile assets:
```
sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bundle exec rails assets:precompile'
```
9. Fix SELinux contexts:
```
chcon -R -t httpd_sys_content_t /home/mastodon/live/public
```
10. Start services:
```
systemctl start mastodon-web mastodon-sidekiq mastodon-streaming
```
RESTORE
# Create final archive
echo ""
echo "Creating final archive..."
cd "${BACKUP_DIR}"
tar -czf "${BACKUP_NAME}.tar.gz" "${BACKUP_NAME}"
rm -rf "${BACKUP_NAME}"
FINAL_SIZE=$(du -h "${BACKUP_DIR}/${BACKUP_NAME}.tar.gz" | cut -f1)
echo ""
echo "=========================================="
echo "✅ Backup Complete!"
echo "=========================================="
echo ""
echo "Backup file: ${BACKUP_DIR}/${BACKUP_NAME}.tar.gz"
echo "Size: ${FINAL_SIZE}"
echo ""
echo "To download: scp root@server:${BACKUP_DIR}/${BACKUP_NAME}.tar.gz ."
echo ""

View File

@@ -0,0 +1,222 @@
#!/bin/bash
# =============================================================================
# Mastodon Fix/Repair Script
# Diagnoses and fixes common issues
# =============================================================================
# Run as root
echo "=========================================="
echo "Mastodon Fix/Repair Tool"
echo "=========================================="
# Check root
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root"
exit 1
fi
FIXED=0
ERRORS=0
# 1. Check and fix service status
echo ""
echo "[1/7] Checking services..."
services=("postgresql" "valkey" "nginx" "mastodon-web" "mastodon-sidekiq" "mastodon-streaming")
for svc in "${services[@]}"; do
if systemctl is-active --quiet $svc 2>/dev/null; then
echo "$svc is running"
elif systemctl list-unit-files | grep -q "^${svc}.service"; then
echo "$svc is not running, attempting to start..."
systemctl start $svc 2>/dev/null
sleep 2
if systemctl is-active --quiet $svc; then
echo "$svc started successfully"
FIXED=$((FIXED + 1))
else
echo " ✗ Failed to start $svc"
echo " Check logs: journalctl -u $svc -n 50"
ERRORS=$((ERRORS + 1))
fi
fi
done
# 2. Check file permissions
echo ""
echo "[2/7] Checking file permissions..."
# Check .env.production
if [ -f /home/mastodon/live/.env.production ]; then
OWNER=$(stat -c '%U' /home/mastodon/live/.env.production)
PERMS=$(stat -c '%a' /home/mastodon/live/.env.production)
if [ "$OWNER" != "mastodon" ]; then
echo " ✗ Fixing .env.production ownership..."
chown mastodon:mastodon /home/mastodon/live/.env.production
FIXED=$((FIXED + 1))
fi
if [ "$PERMS" != "600" ]; then
echo " ✗ Fixing .env.production permissions..."
chmod 600 /home/mastodon/live/.env.production
FIXED=$((FIXED + 1))
fi
echo " ✓ .env.production permissions OK"
fi
# Check live directory ownership
if [ -d /home/mastodon/live ]; then
LIVE_OWNER=$(stat -c '%U' /home/mastodon/live)
if [ "$LIVE_OWNER" != "mastodon" ]; then
echo " ✗ Fixing /home/mastodon/live ownership..."
chown -R mastodon:mastodon /home/mastodon/live
FIXED=$((FIXED + 1))
else
echo " ✓ /home/mastodon/live ownership OK"
fi
fi
# 3. Check database connection
echo ""
echo "[3/7] Checking database..."
if sudo -u postgres psql -c "SELECT 1" mastodon_production > /dev/null 2>&1; then
echo " ✓ Database connection successful"
else
echo " ✗ Cannot connect to database"
# Try to fix common issues
if ! systemctl is-active --quiet postgresql; then
echo " Attempting to start PostgreSQL..."
systemctl start postgresql
sleep 2
fi
# Check if database exists
if ! sudo -u postgres psql -lqt | cut -d \| -f 1 | grep -qw mastodon_production; then
echo " Database does not exist!"
ERRORS=$((ERRORS + 1))
fi
fi
# 4. Check Redis/Valkey connection
echo ""
echo "[4/7] Checking cache server..."
if valkey-cli ping > /dev/null 2>&1; then
echo " ✓ Valkey connection successful"
elif redis-cli ping > /dev/null 2>&1; then
echo " ✓ Redis connection successful"
else
echo " ✗ Cannot connect to cache server"
if systemctl is-active --quiet valkey; then
echo " Valkey is running but not responding"
elif systemctl is-active --quiet redis; then
echo " Redis is running but not responding"
else
echo " Attempting to start Valkey..."
systemctl start valkey 2>/dev/null || systemctl start redis 2>/dev/null
sleep 2
FIXED=$((FIXED + 1))
fi
fi
# 5. Check nginx configuration
echo ""
echo "[5/7] Checking nginx configuration..."
if nginx -t 2>/dev/null; then
echo " ✓ Nginx configuration is valid"
else
echo " ✗ Nginx configuration has errors"
nginx -t
ERRORS=$((ERRORS + 1))
fi
# 6. Check SELinux contexts (Rocky/RHEL)
echo ""
echo "[6/7] Checking SELinux..."
if command -v getenforce &> /dev/null; then
SELINUX_MODE=$(getenforce)
echo " SELinux mode: $SELINUX_MODE"
if [ "$SELINUX_MODE" = "Enforcing" ]; then
# Fix common SELinux issues
if [ -d /home/mastodon/live/public ]; then
echo " Ensuring correct SELinux contexts..."
chcon -R -t httpd_sys_content_t /home/mastodon/live/public 2>/dev/null || true
fi
fi
else
echo " SELinux not present"
fi
# 7. Check API endpoints
echo ""
echo "[7/7] Checking API endpoints..."
sleep 1
# Test instance API
if curl -sf http://127.0.0.1:3000/api/v1/instance > /dev/null 2>&1; then
echo " ✓ Instance API responding"
else
echo " ✗ Instance API not responding"
# Check if it's a startup timing issue
echo " Waiting for services to fully start..."
sleep 5
if curl -sf http://127.0.0.1:3000/api/v1/instance > /dev/null 2>&1; then
echo " ✓ Instance API now responding"
else
echo " ✗ Instance API still not responding"
echo " Check logs: journalctl -u mastodon-web -n 50"
ERRORS=$((ERRORS + 1))
fi
fi
# Test streaming API
if curl -sf http://127.0.0.1:4000/api/v1/streaming/health > /dev/null 2>&1; then
echo " ✓ Streaming API healthy"
else
echo " ✗ Streaming API not responding"
echo " Attempting to restart streaming service..."
systemctl restart mastodon-streaming
sleep 3
if curl -sf http://127.0.0.1:4000/api/v1/streaming/health > /dev/null 2>&1; then
echo " ✓ Streaming API now healthy"
FIXED=$((FIXED + 1))
else
echo " ✗ Streaming API still not responding"
ERRORS=$((ERRORS + 1))
fi
fi
# Summary
echo ""
echo "=========================================="
if [ $ERRORS -eq 0 ]; then
if [ $FIXED -eq 0 ]; then
echo "✅ All checks passed! No issues found."
else
echo "✅ Fixed $FIXED issue(s). All checks now pass."
echo ""
echo "You may want to restart services:"
echo " systemctl restart mastodon-web mastodon-sidekiq mastodon-streaming"
fi
else
echo "⚠️ Found $ERRORS error(s) that need manual attention."
echo ""
echo "Common fixes:"
echo " - Check logs: journalctl -u mastodon-web -f"
echo " - Restart all: systemctl restart mastodon-{web,sidekiq,streaming}"
echo " - Check .env: cat /home/mastodon/live/.env.production"
echo " - Run migrations: sudo -u mastodon bash -lc 'cd ~/live && RAILS_ENV=production bin/rails db:migrate'"
fi
echo "=========================================="
exit $ERRORS

View File

@@ -0,0 +1,340 @@
#!/bin/bash
# Mastodon v4.5.4 Bare-Metal Install Script for Rocky Linux 10
# Usage: curl -sSL https://git.vish.gg/Vish/pihole-baremetal/raw/branch/main/mastodon/install-mastodon.sh | bash
# Run as root on a fresh Rocky Linux 10 VM
set -e
# Configuration - Edit these before running
DOMAIN="${DOMAIN:-mastodon.example.com}"
ADMIN_USER="${ADMIN_USER:-admin}"
ADMIN_EMAIL="${ADMIN_EMAIL:-admin@example.com}"
SMTP_SERVER="${SMTP_SERVER:-smtp.gmail.com}"
SMTP_PORT="${SMTP_PORT:-587}"
SMTP_USER="${SMTP_USER:-}"
SMTP_PASS="REDACTED_PASSWORD"
SMTP_FROM="${SMTP_FROM:-notifications@example.com}"
echo "=========================================="
echo "Mastodon v4.5.4 Installation Script"
echo "Target Domain: $DOMAIN"
echo "=========================================="
# Check if running as root
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root"
exit 1
fi
# Install system dependencies
echo "[1/12] Installing system dependencies..."
dnf install -y epel-release
dnf install -y git curl wget gcc make autoconf bison openssl-devel \
libyaml-devel libffi-devel readline-devel zlib-devel gdbm-devel ncurses-devel \
libxml2-devel libxslt-devel libicu-devel libidn-devel jemalloc-devel \
ImageMagick ImageMagick-devel nginx postgresql-server postgresql-contrib \
valkey certbot python3-certbot-nginx meson ninja-build \
libpng-devel libjpeg-turbo-devel libwebp-devel libtiff-devel \
expat-devel gobject-introspection-devel glib2-devel
# Install Node.js 20
echo "[2/12] Installing Node.js 20..."
curl -fsSL https://rpm.nodesource.com/setup_20.x | bash -
dnf install -y nodejs
# Enable corepack for Yarn
corepack enable
# Build libvips from source (not in Rocky 10 repos)
echo "[3/12] Building libvips from source..."
cd /tmp
wget https://github.com/libvips/libvips/releases/download/v8.16.1/vips-8.16.1.tar.xz
tar xf vips-8.16.1.tar.xz
cd vips-8.16.1
meson setup build --prefix=/usr --buildtype=release
cd build && ninja && ninja install
ldconfig
cd /tmp && rm -rf vips-8.16.1*
# Initialize PostgreSQL
echo "[4/12] Setting up PostgreSQL..."
postgresql-setup --initdb
systemctl enable --now postgresql
# Create mastodon database user and database
sudo -u postgres psql -c "CREATE USER mastodon CREATEDB;"
sudo -u postgres psql -c "CREATE DATABASE mastodon_production OWNER mastodon;"
# Start Valkey (Redis)
echo "[5/12] Starting Valkey..."
systemctl enable --now valkey
# Create mastodon user
echo "[6/12] Creating mastodon user..."
useradd -m -s /bin/bash mastodon || true
# Install Ruby via rbenv
echo "[7/12] Installing Ruby 3.4.7..."
sudo -u mastodon bash << 'RUBY_INSTALL'
cd ~
git clone https://github.com/rbenv/rbenv.git ~/.rbenv
echo 'export PATH="$HOME/.rbenv/bin:$PATH"' >> ~/.bashrc
echo 'eval "$(rbenv init -)"' >> ~/.bashrc
export PATH="$HOME/.rbenv/bin:$PATH"
eval "$(rbenv init -)"
git clone https://github.com/rbenv/ruby-build.git ~/.rbenv/plugins/ruby-build
RUBY_CONFIGURE_OPTS="--with-jemalloc" rbenv install 3.4.7
rbenv global 3.4.7
gem install bundler
RUBY_INSTALL
# Clone Mastodon
echo "[8/12] Cloning Mastodon v4.5.4..."
sudo -u mastodon bash << 'CLONE'
cd ~
git clone https://github.com/mastodon/mastodon.git live
cd live
git checkout v4.5.4
CLONE
# Install dependencies
echo "[9/12] Installing Ruby and Node dependencies..."
sudo -u mastodon bash << 'DEPS'
export PATH="$HOME/.rbenv/bin:$PATH"
eval "$(rbenv init -)"
cd ~/live
bundle config deployment 'true'
bundle config without 'development test'
bundle install -j$(nproc)
yarn install --immutable
DEPS
# Generate secrets and create .env.production
echo "[10/12] Generating secrets and configuration..."
SECRET_KEY=$(openssl rand -hex 64)
OTP_SECRET=$(openssl rand -hex 64)
VAPID_KEYS=$(sudo -u mastodon bash -c 'cd ~/live && export PATH="$HOME/.rbenv/bin:$PATH" && eval "$(rbenv init -)" && RAILS_ENV=production bundle exec rake mastodon:webpush:generate_vapid_key 2>/dev/null')
VAPID_PRIVATE=$(echo "$VAPID_KEYS" | grep VAPID_PRIVATE_KEY | cut -d= -f2)
VAPID_PUBLIC=$(echo "$VAPID_KEYS" | grep VAPID_PUBLIC_KEY | cut -d= -f2)
AR_KEY=$(openssl rand -hex 32)
AR_DETERMINISTIC=$(openssl rand -hex 32)
AR_SALT=$(openssl rand -hex 32)
cat > /home/mastodon/live/.env.production << ENVFILE
LOCAL_DOMAIN=$DOMAIN
SINGLE_USER_MODE=false
SECRET_KEY_BASE=$SECRET_KEY
OTP_SECRET=$OTP_SECRET
VAPID_PRIVATE_KEY=$VAPID_PRIVATE
VAPID_PUBLIC_KEY=$VAPID_PUBLIC
DB_HOST=/var/run/postgresql
DB_USER=mastodon
DB_NAME=mastodon_production
DB_PASS=
"REDACTED_PASSWORD"
REDIS_HOST=127.0.0.1
REDIS_PORT=6379
SMTP_SERVER=$SMTP_SERVER
SMTP_PORT=$SMTP_PORT
SMTP_LOGIN=$SMTP_USER
SMTP_PASSWORD="REDACTED_PASSWORD"
SMTP_FROM_ADDRESS=$SMTP_FROM
SMTP_AUTH_METHOD=plain
SMTP_OPENSSL_VERIFY_MODE=none
SMTP_ENABLE_STARTTLS=auto
ACTIVE_RECORD_ENCRYPTION_PRIMARY_KEY=$AR_KEY
ACTIVE_RECORD_ENCRYPTION_DETERMINISTIC_KEY=$AR_DETERMINISTIC
ACTIVE_RECORD_ENCRYPTION_KEY_DERIVATION_SALT=$AR_SALT
TRUSTED_PROXY_IP=127.0.0.1,::1,192.168.0.0/16
ENVFILE
chown mastodon:mastodon /home/mastodon/live/.env.production
chmod 600 /home/mastodon/live/.env.production
# Run migrations and seed
echo "[11/12] Running database migrations..."
sudo -u mastodon bash << 'MIGRATE'
export PATH="$HOME/.rbenv/bin:$PATH"
eval "$(rbenv init -)"
cd ~/live
RAILS_ENV=production bundle exec rails db:migrate
RAILS_ENV=production bundle exec rails db:seed
RAILS_ENV=production bundle exec rails assets:precompile
MIGRATE
# Create systemd services
echo "[12/12] Creating systemd services..."
cat > /etc/systemd/system/mastodon-web.service << 'SERVICE'
[Unit]
Description=mastodon-web
After=network.target
[Service]
Type=simple
User=mastodon
WorkingDirectory=/home/mastodon/live
Environment="RAILS_ENV=production"
Environment="PORT=3001"
ExecStart=/bin/bash -lc 'cd /home/mastodon/live && exec bundle exec puma -C config/puma.rb'
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
SERVICE
cat > /etc/systemd/system/mastodon-sidekiq.service << 'SERVICE'
[Unit]
Description=mastodon-sidekiq
After=network.target
[Service]
Type=simple
User=mastodon
WorkingDirectory=/home/mastodon/live
Environment="RAILS_ENV=production"
Environment="MALLOC_ARENA_MAX=2"
ExecStart=/bin/bash -lc 'cd /home/mastodon/live && exec bundle exec sidekiq -c 25'
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
SERVICE
cat > /etc/systemd/system/mastodon-streaming.service << 'SERVICE'
[Unit]
Description=mastodon-streaming
After=network.target
[Service]
Type=simple
User=mastodon
WorkingDirectory=/home/mastodon/live
Environment="NODE_ENV=production"
Environment="PORT=4000"
Environment="STREAMING_CLUSTER_NUM=1"
ExecStart=/usr/bin/node ./streaming
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
SERVICE
# Nginx config
cat > /etc/nginx/conf.d/mastodon.conf << 'NGINX'
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
upstream backend {
server 127.0.0.1:3001 fail_timeout=0;
}
upstream streaming {
server 127.0.0.1:4000 fail_timeout=0;
}
server {
listen 3000;
listen [::]:3000;
server_name _;
keepalive_timeout 70;
sendfile on;
client_max_body_size 99m;
root /home/mastodon/live/public;
gzip on;
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_types text/plain text/css application/json application/javascript text/xml application/xml image/svg+xml;
location / {
try_files $uri @proxy;
}
location ~ ^/(assets|avatars|emoji|headers|packs|shortcuts|sounds|system)/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
try_files $uri =404;
}
location ^~ /api/v1/streaming {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_pass http://streaming;
proxy_buffering off;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
tcp_nodelay on;
}
location @proxy {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_pass http://backend;
proxy_buffering on;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
tcp_nodelay on;
}
error_page 404 500 501 502 503 504 /500.html;
}
NGINX
# SELinux and firewall
setsebool -P httpd_can_network_connect 1
setsebool -P httpd_read_user_content 1
chcon -R -t httpd_sys_content_t /home/mastodon/live/public
chmod 755 /home/mastodon /home/mastodon/live /home/mastodon/live/public
firewall-cmd --permanent --add-port=3000/tcp
firewall-cmd --reload
# Add localhost to Rails hosts
echo 'Rails.application.config.hosts << "localhost"' >> /home/mastodon/live/config/environments/production.rb
echo 'Rails.application.config.hosts << "127.0.0.1"' >> /home/mastodon/live/config/environments/production.rb
chown mastodon:mastodon /home/mastodon/live/config/environments/production.rb
# Enable and start services
systemctl daemon-reload
systemctl enable --now mastodon-web mastodon-sidekiq mastodon-streaming nginx
# Create admin user
echo ""
echo "Creating admin user..."
ADMIN_PASS="REDACTED_PASSWORD" -u mastodon bash -c "cd ~/live && export PATH=\"\$HOME/.rbenv/bin:\$PATH\" && eval \"\$(rbenv init -)\" && RAILS_ENV=production bin/tootctl accounts create $ADMIN_USER --email=$ADMIN_EMAIL --confirmed 2>&1 | grep 'New password' | awk '{print \$3}'")
sudo -u mastodon bash -c "cd ~/live && export PATH=\"\$HOME/.rbenv/bin:\$PATH\" && eval \"\$(rbenv init -)\" && RAILS_ENV=production bin/tootctl accounts modify $ADMIN_USER --role Owner"
sudo -u mastodon bash -c "cd ~/live && export PATH=\"\$HOME/.rbenv/bin:\$PATH\" && eval \"\$(rbenv init -)\" && RAILS_ENV=production bin/tootctl accounts approve $ADMIN_USER"
echo ""
echo "=========================================="
echo "✅ Mastodon Installation Complete!"
echo "=========================================="
echo ""
echo "Domain: $DOMAIN"
echo "Admin User: $ADMIN_USER"
echo "Admin Email: $ADMIN_EMAIL"
echo "Admin Password: "REDACTED_PASSWORD"
echo ""
echo "Listening on port 3000 (HTTP)"
echo ""
echo "Next steps:"
echo "1. Configure your reverse proxy to forward HTTPS to port 3000"
echo "2. Login and change your password"
echo "3. Configure instance settings in Administration panel"
echo ""

View File

@@ -0,0 +1,723 @@
#!/bin/bash
# =============================================================================
# Mastodon Production Installer
# =============================================================================
# Self-hosted Mastodon instance - production ready with Docker
#
# Supported: Ubuntu, Debian, Fedora, Rocky/Alma/RHEL 8+, Arch, openSUSE
# Deploys via Docker Compose
#
# Usage:
# curl -fsSL <url>/install.sh | sudo bash
#
# Options:
# --domain <domain> Your domain (required)
# --email <email> Admin email / Let's Encrypt
# --no-ssl Skip SSL (local testing only)
# --single-user Single user mode
# --s3 Enable S3 storage configuration
# =============================================================================
set -o pipefail
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log() { echo -e "${BLUE}[INFO]${NC} $1"; }
success() { echo -e "${GREEN}[OK]${NC} $1"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
error() { echo -e "${RED}[ERROR]${NC} $1" >&2; exit 1; }
# Configuration
INSTALL_DIR="/opt/mastodon"
DATA_DIR="/opt/mastodon-data"
DOMAIN=""
ADMIN_EMAIL=""
ENABLE_SSL=true
SINGLE_USER_MODE=false
ENABLE_S3=false
# Parse arguments
while [ $# -gt 0 ]; do
case $1 in
--domain) DOMAIN="$2"; shift 2 ;;
--email) ADMIN_EMAIL="$2"; shift 2 ;;
--no-ssl) ENABLE_SSL=false; shift ;;
--single-user) SINGLE_USER_MODE=true; shift ;;
--s3) ENABLE_S3=true; shift ;;
--help|-h)
echo "Mastodon Production Installer"
echo ""
echo "Usage: install.sh [options]"
echo ""
echo "Options:"
echo " --domain <domain> Your domain (e.g., mastodon.example.com)"
echo " --email <email> Admin email for Let's Encrypt"
echo " --no-ssl Skip SSL (testing only)"
echo " --single-user Single user mode"
echo " --s3 Configure S3 storage"
exit 0
;;
*) shift ;;
esac
done
# Check root
[ "$(id -u)" -ne 0 ] && error "Run as root: sudo bash install.sh"
# Detect OS
detect_os() {
if [ -f /etc/os-release ]; then
. /etc/os-release
OS=$ID
OS_VERSION=${VERSION_ID:-}
else
error "Cannot detect OS"
fi
log "Detected: $OS $OS_VERSION"
}
# Wait for package manager locks
wait_for_lock() {
case $OS in
ubuntu|debian|linuxmint|pop)
while fuser /var/lib/dpkg/lock-frontend >/dev/null 2>&1; do
sleep 2
done
;;
esac
}
# Install Docker
install_docker() {
if command -v docker >/dev/null 2>&1; then
success "Docker already installed"
systemctl enable --now docker 2>/dev/null || true
return
fi
log "Installing Docker..."
case $OS in
ubuntu|debian|linuxmint|pop)
export DEBIAN_FRONTEND=noninteractive
wait_for_lock
apt-get update -qq
apt-get install -y -qq ca-certificates curl gnupg
install -m 0755 -d /etc/apt/keyrings
DOCKER_OS=$OS
case "$OS" in linuxmint|pop) DOCKER_OS="ubuntu" ;; esac
curl -fsSL https://download.docker.com/linux/$DOCKER_OS/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg 2>/dev/null
chmod a+r /etc/apt/keyrings/docker.gpg
CODENAME=${VERSION_CODENAME:-jammy}
case "$OS" in linuxmint|pop) CODENAME="jammy" ;; esac
[ "$OS" = "debian" ] && case "$CODENAME" in trixie|sid) CODENAME="bookworm" ;; esac
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/$DOCKER_OS $CODENAME stable" > /etc/apt/sources.list.d/docker.list
wait_for_lock
apt-get update -qq
apt-get install -y -qq docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
;;
fedora)
dnf install -y -q dnf-plugins-core
dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo
dnf install -y -q docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
;;
rocky|almalinux|rhel|centos)
dnf install -y -q dnf-plugins-core || yum install -y yum-utils
dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo 2>/dev/null || \
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
dnf install -y -q docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin || \
yum install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
;;
arch|manjaro|endeavouros)
pacman -Sy --noconfirm docker docker-compose
;;
opensuse*|sles)
zypper install -y docker docker-compose
;;
*)
error "Unsupported OS: $OS"
;;
esac
systemctl enable --now docker
success "Docker installed"
}
# Generate secrets
generate_secrets() {
SECRET_KEY_BASE=$(openssl rand -hex 64)
OTP_SECRET=$(openssl rand -hex 64)
# Generate VAPID keys
VAPID_KEYS=$(docker run --rm tootsuite/mastodon:latest bundle exec rake mastodon:webpush:generate_vapid_key 2>/dev/null || echo "")
if [ -n "$VAPID_KEYS" ]; then
VAPID_PRIVATE_KEY=$(echo "$VAPID_KEYS" | grep VAPID_PRIVATE_KEY | cut -d= -f2)
VAPID_PUBLIC_KEY=$(echo "$VAPID_KEYS" | grep VAPID_PUBLIC_KEY | cut -d= -f2)
else
VAPID_PRIVATE_KEY=$(openssl rand -hex 32)
VAPID_PUBLIC_KEY=$(openssl rand -hex 32)
fi
POSTGRES_PASSWORD="REDACTED_PASSWORD" rand -hex 32)
REDIS_PASSWORD="REDACTED_PASSWORD" rand -hex 32)
}
# Get domain interactively
get_domain() {
if [ -z "$DOMAIN" ]; then
echo ""
echo "========================================"
echo " Domain Configuration"
echo "========================================"
echo ""
echo "Enter your domain for Mastodon (e.g., mastodon.example.com)"
echo "A domain is REQUIRED for Mastodon to work properly."
echo ""
read -p "Domain: " DOMAIN
if [ -z "$DOMAIN" ]; then
error "Domain is required for Mastodon"
fi
fi
if [ -z "$ADMIN_EMAIL" ]; then
read -p "Admin email: " ADMIN_EMAIL
if [ -z "$ADMIN_EMAIL" ]; then
warn "No email provided - SSL may not work"
ADMIN_EMAIL="admin@$DOMAIN"
fi
fi
}
# Create directories
create_directories() {
log "Creating directories..."
mkdir -p "$INSTALL_DIR"
mkdir -p "$DATA_DIR"/{postgres,redis,mastodon/{public/system,live}}
mkdir -p "$DATA_DIR"/caddy/{data,config}
chmod -R 755 "$DATA_DIR"
success "Directories created"
}
# Create .env file
create_env() {
log "Creating environment configuration..."
local protocol="https"
[ "$ENABLE_SSL" != true ] && protocol="http"
cat > "$INSTALL_DIR/.env.production" << EOF
# Federation
LOCAL_DOMAIN=$DOMAIN
SINGLE_USER_MODE=$SINGLE_USER_MODE
# Redis
REDIS_HOST=redis
REDIS_PORT=6379
REDIS_PASSWORD="REDACTED_PASSWORD"
# PostgreSQL
DB_HOST=db
DB_USER=mastodon
DB_NAME=mastodon
DB_PASS="REDACTED_PASSWORD"
DB_PORT=5432
# Secrets
SECRET_KEY_BASE=$SECRET_KEY_BASE
OTP_SECRET=$OTP_SECRET
VAPID_PRIVATE_KEY=$VAPID_PRIVATE_KEY
VAPID_PUBLIC_KEY=$VAPID_PUBLIC_KEY
# Web
WEB_DOMAIN=$DOMAIN
ALTERNATE_DOMAINS=
# Email (configure for production)
SMTP_SERVER=smtp.mailgun.org
SMTP_PORT=587
SMTP_LOGIN=
SMTP_PASSWORD=
"REDACTED_PASSWORD"
SMTP_AUTH_METHOD=plain
SMTP_OPENSSL_VERIFY_MODE=none
SMTP_ENABLE_STARTTLS=auto
# File storage
# For S3 storage, uncomment and configure:
# S3_ENABLED=true
# S3_BUCKET=your-bucket
# AWS_ACCESS_KEY_ID=
# AWS_SECRET_ACCESS_KEY=
# S3_REGION=us-east-1
# S3_PROTOCOL=https
# S3_HOSTNAME=s3.amazonaws.com
# Elasticsearch (optional, for full-text search)
# ES_ENABLED=true
# ES_HOST=elasticsearch
# ES_PORT=9200
# Performance
RAILS_ENV=production
NODE_ENV=production
RAILS_LOG_LEVEL=warn
TRUSTED_PROXY_IP=172.16.0.0/12
# IP and session
IP_RETENTION_PERIOD=31556952
SESSION_RETENTION_PERIOD=31556952
EOF
chmod 600 "$INSTALL_DIR/.env.production"
success "Environment configuration created"
}
# Create docker-compose.yml
create_compose() {
log "Creating Docker Compose file..."
cat > "$INSTALL_DIR/docker-compose.yml" << 'EOF'
services:
db:
image: postgres:16-alpine
container_name: mastodon-db
shm_size: 256mb
environment:
POSTGRES_USER: mastodon
POSTGRES_PASSWORD: "REDACTED_PASSWORD"
POSTGRES_DB: mastodon
volumes:
- ./data/postgres:/var/lib/postgresql/data
restart: unless-stopped
healthcheck:
test: ["CMD", "pg_isready", "-U", "mastodon"]
interval: 10s
timeout: 5s
retries: 5
networks:
- internal
redis:
image: redis:7-alpine
container_name: mastodon-redis
command: redis-server --requirepass REDACTED_PASSWORD
volumes:
- ./data/redis:/data
restart: unless-stopped
healthcheck:
test: ["CMD", "redis-cli", "-a", "${REDIS_PASSWORD:"REDACTED_PASSWORD" "ping"]
interval: 10s
timeout: 5s
retries: 5
networks:
- internal
web:
image: tootsuite/mastodon:latest
container_name: mastodon-web
env_file: .env.production
command: bash -c "rm -f /mastodon/tmp/pids/server.pid; bundle exec rails s -p 3000"
volumes:
- ./data/mastodon/public/system:/mastodon/public/system
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "wget -q --spider --proxy=off localhost:3000/health || exit 1"]
interval: 30s
timeout: 10s
retries: 3
networks:
- internal
- external
streaming:
image: tootsuite/mastodon:latest
container_name: mastodon-streaming
env_file: .env.production
command: node ./streaming
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "wget -q --spider --proxy=off localhost:4000/api/v1/streaming/health || exit 1"]
interval: 30s
timeout: 10s
retries: 3
networks:
- internal
- external
sidekiq:
image: tootsuite/mastodon:latest
container_name: mastodon-sidekiq
env_file: .env.production
command: bundle exec sidekiq
volumes:
- ./data/mastodon/public/system:/mastodon/public/system
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "ps aux | grep '[s]idekiq 6' || exit 1"]
interval: 30s
timeout: 10s
retries: 3
networks:
- internal
- external
caddy:
image: caddy:2-alpine
container_name: mastodon-caddy
ports:
- "80:80"
- "443:443"
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile:ro
- ./data/caddy/data:/data
- ./data/caddy/config:/config
- ./data/mastodon/public:/mastodon/public:ro
depends_on:
- web
- streaming
restart: unless-stopped
networks:
- external
watchtower:
image: containrrr/watchtower:latest
container_name: mastodon-watchtower
environment:
WATCHTOWER_CLEANUP: "true"
WATCHTOWER_SCHEDULE: "0 0 4 * * *"
WATCHTOWER_LABEL_ENABLE: "false"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
restart: unless-stopped
networks:
internal:
internal: true
external:
EOF
# Extract DB_PASS for compose
echo "DB_PASS="REDACTED_PASSWORD" > "$INSTALL_DIR/.env"
echo "REDIS_PASSWORD="REDACTED_PASSWORD" >> "$INSTALL_DIR/.env"
success "Docker Compose file created"
}
# Create Caddyfile
create_caddyfile() {
log "Creating Caddy configuration..."
if [ "$ENABLE_SSL" = true ]; then
cat > "$INSTALL_DIR/Caddyfile" << EOF
$DOMAIN {
encode gzip
handle_path /system/* {
file_server {
root /mastodon/public
}
}
handle /api/v1/streaming/* {
reverse_proxy streaming:4000
}
handle /* {
reverse_proxy web:3000
}
header {
Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
X-Frame-Options "SAMEORIGIN"
X-Content-Type-Options "nosniff"
X-XSS-Protection "1; mode=block"
Referrer-Policy "strict-origin-when-cross-origin"
}
log {
output stdout
}
}
EOF
else
cat > "$INSTALL_DIR/Caddyfile" << EOF
:80 {
encode gzip
handle_path /system/* {
file_server {
root /mastodon/public
}
}
handle /api/v1/streaming/* {
reverse_proxy streaming:4000
}
handle /* {
reverse_proxy web:3000
}
}
EOF
fi
success "Caddy configuration created"
}
# Initialize database
init_database() {
log "Initializing database..."
cd "$INSTALL_DIR"
# Start database first
docker compose up -d db redis
sleep 10
# Run migrations
docker compose run --rm web bundle exec rails db:setup SAFETY_ASSURED=1 2>/dev/null || \
docker compose run --rm web bundle exec rails db:migrate SAFETY_ASSURED=1
# Precompile assets
docker compose run --rm web bundle exec rails assets:precompile
success "Database initialized"
}
# Create management script
create_management_script() {
log "Creating management script..."
cat > /usr/local/bin/mastodon << 'EOF'
#!/bin/bash
cd /opt/mastodon || exit 1
case "${1:-help}" in
start) docker compose up -d ;;
stop) docker compose down ;;
restart) docker compose restart ${2:-} ;;
status) docker compose ps ;;
logs) docker compose logs -f ${2:-} ;;
update)
docker compose pull
docker compose up -d
docker compose run --rm web bundle exec rails db:migrate
docker compose run --rm web bundle exec rails assets:precompile
docker compose restart
;;
edit) ${EDITOR:-nano} /opt/mastodon/.env.production ;;
admin)
if [ -z "$2" ]; then
echo "Usage: mastodon admin <username>"
exit 1
fi
docker compose run --rm web bin/tootctl accounts create "$2" --email "${3:-admin@localhost}" --confirmed --role Owner
;;
reset-password)
if [ -z "$2" ]; then
echo "Usage: mastodon reset-password <username>"
exit 1
fi
docker compose run --rm web bin/tootctl accounts modify "$2" --reset-password
;;
tootctl)
shift
docker compose run --rm web bin/tootctl "$@"
;;
console)
docker compose run --rm web bin/rails console
;;
shell)
docker compose run --rm web /bin/bash
;;
backup)
timestamp=$(date +"%Y%m%d_%H%M%S")
backup_dir="/opt/mastodon-data/backups"
mkdir -p "$backup_dir"
echo "Backing up database..."
docker compose exec -T db pg_dump -U mastodon mastodon > "$backup_dir/mastodon_db_$timestamp.sql"
echo "Backing up media..."
tar -czf "$backup_dir/mastodon_media_$timestamp.tar.gz" -C /opt/mastodon-data mastodon/public/system
echo "Backup complete: $backup_dir"
ls -la "$backup_dir"/*$timestamp*
;;
cleanup)
echo "Cleaning up old media..."
docker compose run --rm web bin/tootctl media remove --days=7
docker compose run --rm web bin/tootctl preview_cards remove --days=30
docker compose run --rm web bin/tootctl statuses remove --days=90
;;
*)
echo "Mastodon Management"
echo ""
echo "Usage: mastodon <command>"
echo ""
echo "Commands:"
echo " start Start all services"
echo " stop Stop all services"
echo " restart [service] Restart services"
echo " status Show status"
echo " logs [service] View logs"
echo " update Update and migrate"
echo " edit Edit configuration"
echo " admin <user> Create admin user"
echo " reset-password <u> Reset user password"
echo " tootctl <args> Run tootctl command"
echo " console Rails console"
echo " shell Bash shell"
echo " backup Backup database and media"
echo " cleanup Clean old media/statuses"
;;
esac
EOF
chmod +x /usr/local/bin/mastodon
success "Management script created"
}
# Configure firewall
configure_firewall() {
log "Configuring firewall..."
if command -v firewall-cmd >/dev/null 2>&1 && systemctl is-active --quiet firewalld 2>/dev/null; then
firewall-cmd --permanent --add-service=http 2>/dev/null || true
firewall-cmd --permanent --add-service=https 2>/dev/null || true
firewall-cmd --reload 2>/dev/null || true
success "Firewall configured (firewalld)"
elif command -v ufw >/dev/null 2>&1 && ufw status | grep -q "active"; then
ufw allow 80/tcp 2>/dev/null || true
ufw allow 443/tcp 2>/dev/null || true
success "Firewall configured (ufw)"
else
warn "No active firewall detected"
fi
}
# Deploy
deploy() {
log "Deploying Mastodon..."
cd "$INSTALL_DIR"
# Copy data directory reference
ln -sf "$DATA_DIR" "$INSTALL_DIR/data" 2>/dev/null || true
mkdir -p "$INSTALL_DIR/data"
ln -sf "$DATA_DIR/postgres" "$INSTALL_DIR/data/postgres"
ln -sf "$DATA_DIR/redis" "$INSTALL_DIR/data/redis"
ln -sf "$DATA_DIR/mastodon" "$INSTALL_DIR/data/mastodon"
ln -sf "$DATA_DIR/caddy" "$INSTALL_DIR/data/caddy"
docker compose pull
# Initialize database
init_database
# Start all services
docker compose up -d
# Wait for services
log "Waiting for services to start..."
sleep 15
success "Mastodon deployed!"
}
# Show completion message
show_complete() {
local protocol="https"
[ "$ENABLE_SSL" != true ] && protocol="http"
echo ""
echo "========================================"
echo " Mastodon Installation Complete!"
echo "========================================"
echo ""
echo "Access:"
echo " Web Interface: ${protocol}://${DOMAIN}"
echo ""
echo "Create your admin account:"
echo " mastodon admin yourusername your@email.com"
echo ""
echo "Then reset password to get initial password:"
echo " mastodon reset-password yourusername"
echo ""
echo "Commands:"
echo " mastodon status - Show service status"
echo " mastodon logs - View logs"
echo " mastodon update - Update Mastodon"
echo " mastodon backup - Backup database"
echo " mastodon cleanup - Clean old media"
echo " mastodon tootctl - Run tootctl commands"
echo ""
echo "Config: $INSTALL_DIR/.env.production"
echo "Data: $DATA_DIR"
echo ""
echo "⚠️ Configure email in .env.production for:"
echo " - Email notifications"
echo " - Password resets"
echo " - Account confirmations"
echo ""
}
# Main
main() {
echo ""
echo "========================================"
echo " Mastodon Production Installer"
echo "========================================"
echo ""
detect_os
get_domain
generate_secrets
install_docker
create_directories
create_env
create_compose
create_caddyfile
create_management_script
configure_firewall
deploy
show_complete
}
main "$@"

View File

@@ -0,0 +1,105 @@
#!/bin/bash
# Mastodon Update Script
# Updates Mastodon to the latest stable version (or specified version)
# Run as root
set -e
TARGET_VERSION="${1:-}"
MASTODON_DIR="/home/mastodon/live"
echo "=========================================="
echo "Mastodon Update Script"
echo "=========================================="
# Check current version
CURRENT_VERSION=$(cd $MASTODON_DIR && git describe --tags 2>/dev/null || echo "unknown")
echo "Current version: $CURRENT_VERSION"
# Get latest version if not specified
if [ -z "$TARGET_VERSION" ]; then
echo "Fetching latest version..."
cd $MASTODON_DIR
sudo -u mastodon git fetch --tags
TARGET_VERSION=$(git tag -l 'v*' | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$' | sort -V | tail -1)
fi
echo "Target version: $TARGET_VERSION"
if [ "$CURRENT_VERSION" = "$TARGET_VERSION" ]; then
echo "Already at version $TARGET_VERSION. Nothing to do."
exit 0
fi
read -p "Proceed with update? (y/N) " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo "Update cancelled."
exit 1
fi
# Create backup first
echo ""
echo "[1/7] Creating backup before update..."
/home/mastodon/scripts/backup-mastodon.sh || echo "Backup script not found, skipping..."
# Stop services
echo ""
echo "[2/7] Stopping Mastodon services..."
systemctl stop mastodon-web mastodon-sidekiq mastodon-streaming
# Update code
echo ""
echo "[3/7] Updating Mastodon code..."
cd $MASTODON_DIR
sudo -u mastodon git fetch --all
sudo -u mastodon git checkout $TARGET_VERSION
# Update Ruby dependencies
echo ""
echo "[4/7] Updating Ruby dependencies..."
sudo -u mastodon bash -lc "cd ~/live && bundle install"
# Update Node dependencies
echo ""
echo "[5/7] Updating Node dependencies..."
sudo -u mastodon bash -lc "cd ~/live && yarn install --immutable"
# Run database migrations
echo ""
echo "[6/7] Running database migrations..."
sudo -u mastodon bash -lc "cd ~/live && RAILS_ENV=production bundle exec rails db:migrate"
# Precompile assets
echo ""
echo "[7/7] Precompiling assets (this may take a few minutes)..."
sudo -u mastodon bash -lc "cd ~/live && RAILS_ENV=production bundle exec rails assets:precompile"
# Fix SELinux contexts
chcon -R -t httpd_sys_content_t /home/mastodon/live/public
# Start services
echo ""
echo "Starting Mastodon services..."
systemctl start mastodon-web mastodon-sidekiq mastodon-streaming
# Verify
sleep 5
echo ""
echo "Checking service status..."
systemctl is-active mastodon-web mastodon-sidekiq mastodon-streaming
NEW_VERSION=$(cd $MASTODON_DIR && git describe --tags 2>/dev/null || echo "unknown")
echo ""
echo "=========================================="
echo "✅ Update Complete!"
echo "=========================================="
echo ""
echo "Previous version: $CURRENT_VERSION"
echo "New version: $NEW_VERSION"
echo ""
echo "Please verify your instance is working correctly."
echo "Check the release notes for any manual steps:"
echo "https://github.com/mastodon/mastodon/releases/tag/$TARGET_VERSION"
echo ""

View File

@@ -0,0 +1,185 @@
#!/bin/bash
# =============================================================================
# Mastodon Health Check / Verification Script
# =============================================================================
# Run as root
echo "=========================================="
echo "Mastodon Health Check"
echo "=========================================="
echo ""
FAILED=0
WARN=0
# Load domain from .env if available
if [ -f /home/mastodon/live/.env.production ]; then
DOMAIN=$(grep "^LOCAL_DOMAIN=" /home/mastodon/live/.env.production | cut -d= -f2)
echo "Domain: ${DOMAIN:-unknown}"
fi
echo ""
echo "[Service Status]"
services=("postgresql" "valkey" "nginx" "mastodon-web" "mastodon-sidekiq" "mastodon-streaming")
for svc in "${services[@]}"; do
STATUS=$(systemctl is-active $svc 2>/dev/null || echo "not-found")
if [ "$STATUS" = "active" ]; then
echo "$svc: running"
elif [ "$STATUS" = "not-found" ]; then
echo " - $svc: not installed"
else
echo "$svc: $STATUS"
FAILED=1
fi
done
echo ""
echo "[API Endpoints]"
# Instance API
INSTANCE=$(curl -sf http://127.0.0.1:3000/api/v1/instance 2>/dev/null)
if [ -n "$INSTANCE" ]; then
VERSION=$(echo "$INSTANCE" | python3 -c "import sys,json; print(json.load(sys.stdin).get('version','unknown'))" 2>/dev/null)
USERS=$(echo "$INSTANCE" | python3 -c "import sys,json; print(json.load(sys.stdin).get('stats',{}).get('user_count',0))" 2>/dev/null)
echo " ✓ Instance API: responding (v$VERSION, $USERS users)"
else
echo " ✗ Instance API: not responding"
FAILED=1
fi
# Streaming API
STREAMING=$(curl -sf http://127.0.0.1:4000/api/v1/streaming/health 2>/dev/null)
if [ -n "$STREAMING" ]; then
echo " ✓ Streaming API: healthy"
else
echo " ✗ Streaming API: not responding"
FAILED=1
fi
# Nginx proxy
NGINX_CHECK=$(curl -sf -o /dev/null -w "%{http_code}" http://127.0.0.1:3000/ 2>/dev/null)
if [ "$NGINX_CHECK" = "200" ] || [ "$NGINX_CHECK" = "302" ]; then
echo " ✓ Nginx proxy: working (HTTP $NGINX_CHECK)"
else
echo " ✗ Nginx proxy: not working (HTTP $NGINX_CHECK)"
FAILED=1
fi
echo ""
echo "[Database]"
if systemctl is-active --quiet postgresql; then
DB_SIZE=$(sudo -u postgres psql -t -c "SELECT pg_size_pretty(pg_database_size('mastodon_production'));" 2>/dev/null | xargs)
ACCOUNTS=$(sudo -u postgres psql -t -d mastodon_production -c "SELECT COUNT(*) FROM accounts;" 2>/dev/null | xargs)
STATUSES=$(sudo -u postgres psql -t -d mastodon_production -c "SELECT COUNT(*) FROM statuses;" 2>/dev/null | xargs)
echo " ✓ PostgreSQL: running (DB: ${DB_SIZE:-unknown})"
echo " Accounts: ${ACCOUNTS:-0}, Statuses: ${STATUSES:-0}"
else
echo " ✗ PostgreSQL: not running"
FAILED=1
fi
echo ""
echo "[Cache]"
if systemctl is-active --quiet valkey; then
VALKEY_INFO=$(valkey-cli INFO server 2>/dev/null | grep valkey_version | cut -d: -f2 | tr -d '\r')
echo " ✓ Valkey: running (v${VALKEY_INFO:-unknown})"
elif systemctl is-active --quiet redis; then
REDIS_INFO=$(redis-cli INFO server 2>/dev/null | grep redis_version | cut -d: -f2 | tr -d '\r')
echo " ✓ Redis: running (v${REDIS_INFO:-unknown})"
else
echo " ✗ Valkey/Redis: not running"
FAILED=1
fi
echo ""
echo "[Sidekiq Jobs]"
# Check sidekiq process
SIDEKIQ_PID=$(pgrep -f "sidekiq.*live" 2>/dev/null)
if [ -n "$SIDEKIQ_PID" ]; then
SIDEKIQ_MEM=$(ps -p $SIDEKIQ_PID -o rss= 2>/dev/null | awk '{printf "%.0fMB", $1/1024}')
echo " ✓ Sidekiq: running (PID: $SIDEKIQ_PID, Mem: $SIDEKIQ_MEM)"
else
echo " ✗ Sidekiq: not running"
FAILED=1
fi
echo ""
echo "[Federation]"
# Check webfinger
if [ -n "$DOMAIN" ]; then
WF_CHECK=$(curl -sf -H "Accept: application/jrd+json" "http://127.0.0.1:3000/.well-known/webfinger?resource=acct:test@$DOMAIN" 2>/dev/null | head -c 50)
if [ -n "$WF_CHECK" ]; then
echo " ✓ Webfinger: responding"
else
echo " - Webfinger: no test account (may be normal)"
fi
# Check host-meta
HOSTMETA=$(curl -sf "http://127.0.0.1:3000/.well-known/host-meta" 2>/dev/null | head -c 50)
if [ -n "$HOSTMETA" ]; then
echo " ✓ Host-meta: configured"
else
echo " ✗ Host-meta: not responding"
WARN=1
fi
# Check nodeinfo
NODEINFO=$(curl -sf "http://127.0.0.1:3000/nodeinfo/2.0" 2>/dev/null)
if [ -n "$NODEINFO" ]; then
echo " ✓ NodeInfo: available"
else
echo " ✗ NodeInfo: not responding"
WARN=1
fi
fi
echo ""
echo "[Storage]"
if [ -d /home/mastodon/live/public/system ]; then
MEDIA_SIZE=$(du -sh /home/mastodon/live/public/system 2>/dev/null | cut -f1)
echo " Media storage: ${MEDIA_SIZE:-empty}"
else
echo " Media storage: not yet created"
fi
DISK_USAGE=$(df -h /home 2>/dev/null | tail -1 | awk '{print $5}')
echo " Disk usage (/home): ${DISK_USAGE:-unknown}"
echo ""
echo "[Configuration]"
if [ -f /home/mastodon/live/.env.production ]; then
echo " ✓ .env.production exists"
# Check critical settings
SECRET_KEY=$(grep "^SECRET_KEY_BASE=" /home/mastodon/live/.env.production | cut -d= -f2)
if [ -n "$SECRET_KEY" ] && [ ${#SECRET_KEY} -gt 50 ]; then
echo " ✓ SECRET_KEY_BASE: configured"
else
echo " ✗ SECRET_KEY_BASE: missing or invalid"
FAILED=1
fi
VAPID_KEY=$(grep "^VAPID_PRIVATE_KEY=" /home/mastodon/live/.env.production | cut -d= -f2)
if [ -n "$VAPID_KEY" ]; then
echo " ✓ VAPID keys: configured"
else
echo " ✗ VAPID keys: missing"
WARN=1
fi
else
echo " ✗ .env.production: not found"
FAILED=1
fi
echo ""
echo "=========================================="
if [ $FAILED -eq 0 ] && [ $WARN -eq 0 ]; then
echo "✅ All checks passed!"
elif [ $FAILED -eq 0 ]; then
echo "⚠️ Passed with warnings"
else
echo "❌ Some checks failed"
fi
echo "=========================================="
exit $FAILED

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2026 Vish
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,197 @@
# Matrix Synapse + Element Web Bare-Metal Installation
Production-ready Matrix homeserver with Element Web client for Ubuntu 24.04 LTS.
## Features
- **Synapse** - Matrix homeserver with PostgreSQL backend
- **Element Web** - Modern web client (v1.12.8)
- **Coturn** - TURN server for voice/video calls
- **Federation** - Connect with other Matrix servers
- **Nginx** - Reverse proxy for HTTP traffic
- **Auto-validation** - YAML config validation during install
## Quick Install
```bash
# On a fresh Ubuntu 24.04 VM (run as root)
export DOMAIN="mx.example.com"
export ADMIN_USER="admin"
curl -sSL https://git.vish.gg/Vish/matrix-element/raw/branch/main/install-baremetal.sh | bash
```
### One-Liner (with defaults)
```bash
curl -sSL https://git.vish.gg/Vish/matrix-element/raw/branch/main/install-baremetal.sh | DOMAIN=mx.example.com bash
```
## Requirements
- Ubuntu 24.04 LTS
- 2+ CPU cores
- 4GB+ RAM
- 50GB+ disk space
- Domain with DNS pointing to your server
## Post-Installation
### 1. Configure Reverse Proxy
If using a reverse proxy (Synology, Cloudflare, etc.), point:
- `https://your-domain.com:443``http://server-ip:8080`
- Enable WebSocket support
### 2. Port Forwarding for TURN (Voice/Video Calls)
Forward these ports to your Matrix server:
| Port | Protocol | Purpose |
|------|----------|---------|
| 3479 | TCP/UDP | TURN |
| 5350 | TCP/UDP | TURNS (TLS) |
| 49201-49250 | UDP | Media relay |
### 3. Change Admin Password
Login at `https://your-domain.com` and change the default password immediately.
## Scripts
### Verify Installation
```bash
# Check health of all services
./verify-matrix.sh
```
This checks:
- All services (synapse, nginx, coturn, postgresql)
- Matrix Client and Federation APIs
- Well-known endpoints
- Element Web accessibility
- Database status
### Fix/Repair
```bash
# Diagnose and fix common issues
./fix-matrix.sh
```
This automatically fixes:
- YAML configuration errors in homeserver.yaml
- File ownership and permissions
- Stopped services
- Common configuration issues
### Backup
```bash
# Create a full backup
./backup-matrix.sh
# Or specify custom location
BACKUP_DIR=/mnt/backup ./backup-matrix.sh
```
Creates:
- PostgreSQL database dump
- Configuration files
- Media files
- Signing keys
- TURN configuration
### Update
```bash
# Update Synapse and Element to latest versions
./update-matrix.sh
```
This will:
1. Create a backup (optional)
2. Update Synapse via pip
3. Run database migrations
4. Download latest Element Web
5. Restart services
## Configuration Files
| File | Purpose |
|------|---------|
| `/opt/synapse/homeserver.yaml` | Main Synapse config |
| `/opt/synapse/*.signing.key` | Server signing key (CRITICAL - backup!) |
| `/opt/element/web/config.json` | Element Web config |
| `/etc/turnserver.conf` | TURN server config |
| `/etc/nginx/sites-available/matrix` | Nginx config |
| `/root/.matrix_secrets` | Passwords and secrets |
## Service Management
```bash
# Check status
systemctl status synapse nginx coturn
# Restart services
systemctl restart synapse
systemctl restart nginx
systemctl restart coturn
# View logs
journalctl -u synapse -f
journalctl -u coturn -f
```
## Federation Testing
Test federation status:
```bash
curl https://federationtester.matrix.org/api/report?server_name=your-domain.com
```
## Adding Users
```bash
# Create a new user
cd /opt/synapse
source venv/bin/activate
register_new_matrix_user -c homeserver.yaml http://localhost:8008
# Create admin user
register_new_matrix_user -c homeserver.yaml -a http://localhost:8008
```
## Troubleshooting
### Check if services are running
```bash
systemctl status synapse nginx coturn postgresql
```
### Test Matrix API locally
```bash
curl http://localhost:8008/_matrix/client/versions
```
### Test well-known endpoints
```bash
curl https://your-domain.com/.well-known/matrix/server
curl https://your-domain.com/.well-known/matrix/client
```
### Check Synapse logs
```bash
journalctl -u synapse -n 100
tail -f /opt/synapse/homeserver.log
```
## Security Notes
- Change the admin password immediately after installation
- Keep `/opt/synapse/*.signing.key` secure and backed up
- Consider enabling rate limiting in production
- Review `/opt/synapse/homeserver.yaml` for security settings
## License
MIT License

View File

@@ -0,0 +1,119 @@
#!/bin/bash
# =============================================================================
# Matrix Synapse Backup Script
# Creates a complete backup for migration
# =============================================================================
# Run as root
set -e
BACKUP_DIR="${BACKUP_DIR:-/opt/synapse/backups}"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_NAME="matrix_backup_${TIMESTAMP}"
BACKUP_PATH="${BACKUP_DIR}/${BACKUP_NAME}"
echo "=========================================="
echo "Matrix Synapse Backup Script"
echo "Backup location: ${BACKUP_PATH}"
echo "=========================================="
mkdir -p "${BACKUP_PATH}"
# 1. Backup PostgreSQL
echo "[1/5] Backing up PostgreSQL database..."
sudo -u postgres pg_dump -Fc synapse > "${BACKUP_PATH}/synapse.dump"
echo " Database: $(du -h ${BACKUP_PATH}/synapse.dump | cut -f1)"
# 2. Backup Synapse config and keys
echo "[2/5] Backing up configuration..."
cp /opt/synapse/homeserver.yaml "${BACKUP_PATH}/"
cp /opt/synapse/*.signing.key "${BACKUP_PATH}/" 2>/dev/null || true
cp /opt/synapse/*.log.config "${BACKUP_PATH}/" 2>/dev/null || true
cp /root/.matrix_secrets "${BACKUP_PATH}/" 2>/dev/null || true
# 3. Backup media
echo "[3/5] Backing up media files (this may take a while)..."
if [ -d /opt/synapse/media_store ]; then
tar -czf "${BACKUP_PATH}/media_store.tar.gz" -C /opt/synapse media_store
echo " Media: $(du -h ${BACKUP_PATH}/media_store.tar.gz | cut -f1)"
else
echo " No media directory found"
fi
# 4. Backup Element config
echo "[4/5] Backing up Element config..."
cp /opt/element/web/config.json "${BACKUP_PATH}/element_config.json" 2>/dev/null || true
# 5. Backup TURN config
echo "[5/5] Backing up TURN config..."
cp /etc/turnserver.conf "${BACKUP_PATH}/" 2>/dev/null || true
# Create restore instructions
cat > "${BACKUP_PATH}/RESTORE.md" << 'RESTORE'
# Matrix Restore Instructions
## On the new server:
1. Run the install script first (it will create a fresh install)
2. Stop services:
```
systemctl stop synapse nginx coturn
```
3. Restore database:
```
sudo -u postgres dropdb synapse
sudo -u postgres createdb -O synapse -E UTF8 -l C -T template0 synapse
sudo -u postgres pg_restore -d synapse synapse.dump
```
4. Restore config files:
```
cp homeserver.yaml /opt/synapse/
cp *.signing.key /opt/synapse/
cp *.log.config /opt/synapse/
chown -R synapse:synapse /opt/synapse
```
5. Restore media:
```
cd /opt/synapse
tar -xzf /path/to/backup/media_store.tar.gz
chown -R synapse:synapse media_store
```
6. Restore TURN config:
```
cp turnserver.conf /etc/turnserver.conf
```
7. Restore Element config:
```
cp element_config.json /opt/element/web/config.json
```
8. Start services:
```
systemctl start coturn nginx synapse
```
RESTORE
# Create archive
echo ""
echo "Creating final archive..."
cd "${BACKUP_DIR}"
tar -czf "${BACKUP_NAME}.tar.gz" "${BACKUP_NAME}"
rm -rf "${BACKUP_NAME}"
FINAL_SIZE=$(du -h "${BACKUP_DIR}/${BACKUP_NAME}.tar.gz" | cut -f1)
echo ""
echo "=========================================="
echo "✅ Backup Complete!"
echo "=========================================="
echo ""
echo "Backup file: ${BACKUP_DIR}/${BACKUP_NAME}.tar.gz"
echo "Size: ${FINAL_SIZE}"
echo ""
echo "Download: scp root@server:${BACKUP_DIR}/${BACKUP_NAME}.tar.gz ."

196
deployments/matrix/fix-matrix.sh Executable file
View File

@@ -0,0 +1,196 @@
#!/bin/bash
# =============================================================================
# Matrix Synapse Fix/Repair Script
# Diagnoses and fixes common issues
# =============================================================================
# Run as root
echo "=========================================="
echo "Matrix Synapse Fix/Repair Tool"
echo "=========================================="
# Check root
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root"
exit 1
fi
FIXED=0
ERRORS=0
# 1. Check and fix YAML configuration
echo ""
echo "[1/6] Checking Synapse configuration..."
if [ -f /opt/synapse/homeserver.yaml ]; then
if python3 -c "import yaml; yaml.safe_load(open('/opt/synapse/homeserver.yaml'))" 2>/dev/null; then
echo " ✓ homeserver.yaml is valid YAML"
else
echo " ✗ homeserver.yaml has YAML errors!"
echo " Creating backup at /opt/synapse/homeserver.yaml.broken"
cp /opt/synapse/homeserver.yaml /opt/synapse/homeserver.yaml.broken
# Try to fix common issues
echo " Attempting automatic fix..."
# Remove duplicate keys and fix indentation issues
python3 << 'PYFIX'
import yaml
import re
try:
with open('/opt/synapse/homeserver.yaml', 'r') as f:
content = f.read()
# Try to parse and re-write
# First, try to fix common issues
lines = content.split('\n')
fixed_lines = []
in_list = False
for line in lines:
# Skip empty turn_uris followed by list items not indented under it
if line.strip() == 'turn_uris:':
in_list = True
fixed_lines.append(line)
elif in_list and line.strip().startswith('- "turn:'):
fixed_lines.append(' ' + line.strip())
elif in_list and line.strip().startswith('- "turns:'):
fixed_lines.append(' ' + line.strip())
elif in_list and not line.strip().startswith('-') and line.strip():
in_list = False
fixed_lines.append(line)
else:
fixed_lines.append(line)
fixed_content = '\n'.join(fixed_lines)
# Validate the fix
yaml.safe_load(fixed_content)
with open('/opt/synapse/homeserver.yaml', 'w') as f:
f.write(fixed_content)
print(" ✓ Configuration fixed automatically")
except Exception as e:
print(f" ✗ Auto-fix failed: {e}")
print(" Please manually fix /opt/synapse/homeserver.yaml")
print(" Backup saved at /opt/synapse/homeserver.yaml.broken")
PYFIX
FIXED=$((FIXED + 1))
fi
else
echo " ✗ homeserver.yaml not found!"
ERRORS=$((ERRORS + 1))
fi
# 2. Check file permissions
echo ""
echo "[2/6] Checking file permissions..."
if [ -d /opt/synapse ]; then
OWNER=$(stat -c '%U' /opt/synapse)
if [ "$OWNER" = "synapse" ]; then
echo " ✓ /opt/synapse owned by synapse user"
else
echo " ✗ Fixing ownership of /opt/synapse..."
chown -R synapse:synapse /opt/synapse
FIXED=$((FIXED + 1))
fi
# Check config file permissions
if [ -f /opt/synapse/homeserver.yaml ]; then
PERMS=$(stat -c '%a' /opt/synapse/homeserver.yaml)
if [ "$PERMS" = "600" ] || [ "$PERMS" = "640" ]; then
echo " ✓ homeserver.yaml has correct permissions"
else
echo " ✗ Fixing homeserver.yaml permissions..."
chmod 600 /opt/synapse/homeserver.yaml
FIXED=$((FIXED + 1))
fi
fi
fi
# 3. Check services
echo ""
echo "[3/6] Checking services..."
for svc in postgresql synapse nginx coturn; do
if systemctl is-active --quiet $svc 2>/dev/null; then
echo "$svc is running"
else
echo "$svc is not running, attempting to start..."
systemctl start $svc 2>/dev/null
sleep 2
if systemctl is-active --quiet $svc; then
echo "$svc started successfully"
FIXED=$((FIXED + 1))
else
echo " ✗ Failed to start $svc"
echo " Check logs: journalctl -u $svc -n 50"
ERRORS=$((ERRORS + 1))
fi
fi
done
# 4. Check database connection
echo ""
echo "[4/6] Checking database..."
if sudo -u postgres psql -c "SELECT 1" synapse > /dev/null 2>&1; then
echo " ✓ PostgreSQL connection successful"
else
echo " ✗ Cannot connect to synapse database"
ERRORS=$((ERRORS + 1))
fi
# 5. Check nginx configuration
echo ""
echo "[5/6] Checking nginx configuration..."
if nginx -t 2>/dev/null; then
echo " ✓ Nginx configuration is valid"
else
echo " ✗ Nginx configuration has errors"
nginx -t
ERRORS=$((ERRORS + 1))
fi
# 6. Check API endpoints
echo ""
echo "[6/6] Checking API endpoints..."
sleep 1
if curl -sf http://localhost:8008/_matrix/client/versions > /dev/null 2>&1; then
echo " ✓ Matrix Client API responding"
else
echo " ✗ Matrix Client API not responding"
echo " Checking Synapse logs..."
journalctl -u synapse -n 10 --no-pager 2>/dev/null | tail -5
ERRORS=$((ERRORS + 1))
fi
LISTEN_PORT=$(grep -oP '^ listen \K\d+' /etc/nginx/sites-enabled/matrix 2>/dev/null | head -1 || echo "8080")
if curl -sf http://localhost:$LISTEN_PORT/ > /dev/null 2>&1; then
echo " ✓ Element Web accessible on port $LISTEN_PORT"
else
echo " ✗ Element Web not accessible"
ERRORS=$((ERRORS + 1))
fi
# Summary
echo ""
echo "=========================================="
if [ $ERRORS -eq 0 ]; then
if [ $FIXED -eq 0 ]; then
echo "✅ All checks passed! No issues found."
else
echo "✅ Fixed $FIXED issue(s). All checks now pass."
echo ""
echo "You may want to restart services:"
echo " systemctl restart synapse nginx"
fi
else
echo "⚠️ Found $ERRORS error(s) that need manual attention."
echo ""
echo "Common fixes:"
echo " - Check logs: journalctl -u synapse -f"
echo " - Validate YAML: python3 -c \"import yaml; yaml.safe_load(open('/opt/synapse/homeserver.yaml'))\""
echo " - Restart services: systemctl restart postgresql synapse nginx coturn"
fi
echo "=========================================="
exit $ERRORS

View File

@@ -0,0 +1,377 @@
#!/bin/bash
# =============================================================================
# Matrix Synapse + Element Web Bare-Metal Install Script
# For Ubuntu 24.04 LTS
# =============================================================================
# Usage:
# export DOMAIN="mx.example.com"
# export ADMIN_USER="admin"
# export ADMIN_EMAIL="admin@example.com"
# curl -sSL https://git.vish.gg/Vish/matrix-element/raw/branch/main/install-baremetal.sh | bash
#
# Run as root on a fresh Ubuntu 24.04 VM
# =============================================================================
set -e
# Configuration
DOMAIN="${DOMAIN:-mx.example.com}"
ADMIN_USER="${ADMIN_USER:-admin}"
ADMIN_EMAIL="${ADMIN_EMAIL:-admin@example.com}"
TURN_DOMAIN="${TURN_DOMAIN:-$DOMAIN}"
TURN_PORT="${TURN_PORT:-3479}"
TURN_TLS_PORT="${TURN_TLS_PORT:-5350}"
TURN_MIN_PORT="${TURN_MIN_PORT:-49201}"
TURN_MAX_PORT="${TURN_MAX_PORT:-49250}"
ELEMENT_VERSION="${ELEMENT_VERSION:-v1.12.8}"
LISTEN_PORT="${LISTEN_PORT:-8080}"
echo "=========================================="
echo "Matrix Synapse + Element Web Installer"
echo "=========================================="
echo "Domain: $DOMAIN"
echo "Admin: $ADMIN_USER"
echo "=========================================="
# Check root
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root"
exit 1
fi
# Update system
echo "[1/10] Updating system..."
apt update && apt upgrade -y
# Install dependencies
echo "[2/10] Installing dependencies..."
apt install -y postgresql postgresql-contrib nginx coturn \
python3-pip python3-venv python3-dev build-essential \
libffi-dev libssl-dev libjpeg-dev libxslt1-dev \
curl wget git jq
# Create synapse user
echo "[3/10] Creating synapse user..."
useradd -r -m -d /opt/synapse -s /bin/bash synapse 2>/dev/null || true
mkdir -p /opt/synapse /opt/element
chown synapse:synapse /opt/synapse
# Setup PostgreSQL
echo "[4/10] Setting up PostgreSQL..."
DB_PASS="REDACTED_PASSWORD" rand -hex 16)
sudo -u postgres psql -c "CREATE USER synapse WITH PASSWORD 'REDACTED_PASSWORD';" 2>/dev/null || \
sudo -u postgres psql -c "ALTER USER synapse WITH PASSWORD 'REDACTED_PASSWORD';"
sudo -u postgres psql -c "CREATE DATABASE synapse ENCODING 'UTF8' LC_COLLATE='C' LC_CTYPE='C' template=template0 OWNER synapse;" 2>/dev/null || true
# Install Synapse
echo "[5/10] Installing Synapse..."
sudo -u synapse bash << SYNAPSE_INSTALL
cd /opt/synapse
python3 -m venv venv
source venv/bin/activate
pip install --upgrade pip setuptools wheel
pip install matrix-synapse psycopg2-binary lxml 'prometheus-client<0.21'
SYNAPSE_INSTALL
# Generate config
echo "[6/10] Generating Synapse configuration..."
cd /opt/synapse
sudo -u synapse /opt/synapse/venv/bin/python -m synapse.app.homeserver \
--server-name "$DOMAIN" \
--config-path homeserver.yaml \
--generate-config \
--report-stats=no
# Get generated secrets
REG_SECRET=$(grep 'registration_shared_secret' homeserver.yaml | head -1 | awk '{print $2}')
MAC_SECRET=$(grep 'macaroon_secret_key' homeserver.yaml | head -1 | awk '{print $2}')
FORM_SECRET=$(grep 'form_secret' homeserver.yaml | head -1 | awk '{print $2}')
TURN_SECRET=$(openssl rand -hex 32)
# Create production config
cat > /opt/synapse/homeserver.yaml << YAML
server_name: "$DOMAIN"
pid_file: /opt/synapse/homeserver.pid
public_baseurl: https://$DOMAIN/
listeners:
- port: 8008
tls: false
type: http
x_forwarded: true
resources:
- names: [client, federation]
compress: false
database:
name: psycopg2
args:
user: synapse
password: "REDACTED_PASSWORD"
database: synapse
host: localhost
cp_min: 5
cp_max: 10
log_config: "/opt/synapse/$DOMAIN.log.config"
media_store_path: /opt/synapse/media_store
signing_key_path: "/opt/synapse/$DOMAIN.signing.key"
trusted_key_servers:
- server_name: "matrix.org"
registration_shared_secret: $REG_SECRET
macaroon_secret_key: $MAC_SECRET
form_secret: $FORM_SECRET
enable_registration: false
enable_registration_without_verification: false
turn_uris:
- "turn:$TURN_DOMAIN:$TURN_PORT?transport=udp"
- "turn:$TURN_DOMAIN:$TURN_PORT?transport=tcp"
- "turns:$TURN_DOMAIN:$TURN_TLS_PORT?transport=udp"
- "turns:$TURN_DOMAIN:$TURN_TLS_PORT?transport=tcp"
turn_shared_secret: "$TURN_SECRET"
turn_user_lifetime: 86400000
turn_allow_guests: true
max_upload_size: 100M
url_preview_enabled: true
url_preview_ip_range_blacklist:
- '127.0.0.0/8'
- '10.0.0.0/8'
- '172.16.0.0/12'
- '192.168.0.0/16'
- '100.64.0.0/10'
- '169.254.0.0/16'
- '::1/128'
- 'fe80::/64'
- 'fc00::/7'
suppress_key_server_warning: true
enable_metrics: false
report_stats: false
YAML
# Validate YAML configuration
echo "Validating Synapse configuration..."
python3 -c "import yaml; yaml.safe_load(open('/opt/synapse/homeserver.yaml'))" || {
echo "ERROR: Invalid YAML in homeserver.yaml"
exit 1
}
mkdir -p /opt/synapse/media_store
chown -R synapse:synapse /opt/synapse
# Configure coturn
echo "[7/10] Configuring TURN server..."
cat > /etc/turnserver.conf << TURN
listening-port=$TURN_PORT
tls-listening-port=$TURN_TLS_PORT
fingerprint
use-auth-secret
static-auth-secret=$TURN_SECRET
realm=$DOMAIN
total-quota=100
bps-capacity=0
stale-nonce=600
no-multicast-peers
min-port=$TURN_MIN_PORT
max-port=$TURN_MAX_PORT
log-file=/var/log/turnserver.log
TURN
# Download Element Web
echo "[8/10] Installing Element Web..."
cd /opt/element
wget -q "https://github.com/element-hq/element-web/releases/download/$ELEMENT_VERSION/element-$ELEMENT_VERSION.tar.gz"
tar xzf "element-$ELEMENT_VERSION.tar.gz"
mv "element-$ELEMENT_VERSION" web
rm "element-$ELEMENT_VERSION.tar.gz"
cat > /opt/element/web/config.json << ELEMENT
{
"default_server_config": {
"m.homeserver": {
"base_url": "https://$DOMAIN",
"server_name": "$DOMAIN"
}
},
"disable_guests": true,
"default_theme": "dark",
"room_directory": {
"servers": ["matrix.org", "$DOMAIN"]
}
}
ELEMENT
# Configure nginx
echo "[9/10] Configuring nginx..."
cat > /etc/nginx/sites-available/matrix << NGINX
server {
listen $LISTEN_PORT;
listen [::]:$LISTEN_PORT;
server_name $DOMAIN;
root /opt/element/web;
index index.html;
location ~ ^(/_matrix|/_synapse/client) {
proxy_pass http://127.0.0.1:8008;
proxy_set_header X-Forwarded-For \$remote_addr;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_set_header Host \$host;
client_max_body_size 100M;
proxy_http_version 1.1;
}
location /_matrix/federation {
proxy_pass http://127.0.0.1:8008;
proxy_set_header X-Forwarded-For \$remote_addr;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_set_header Host \$host;
client_max_body_size 100M;
}
location /.well-known/matrix/server {
default_type application/json;
return 200 '{"m.server": "$DOMAIN:443"}';
}
location /.well-known/matrix/client {
default_type application/json;
add_header Access-Control-Allow-Origin *;
return 200 '{"m.homeserver": {"base_url": "https://$DOMAIN"}}';
}
location / {
try_files \$uri \$uri/ /index.html;
}
}
NGINX
ln -sf /etc/nginx/sites-available/matrix /etc/nginx/sites-enabled/matrix
rm -f /etc/nginx/sites-enabled/default
nginx -t
# Create systemd service
cat > /etc/systemd/system/synapse.service << SERVICE
[Unit]
Description=Synapse Matrix Homeserver
After=network.target postgresql.service
[Service]
Type=notify
User=synapse
Group=synapse
WorkingDirectory=/opt/synapse
ExecStart=/opt/synapse/venv/bin/python -m synapse.app.homeserver --config-path=/opt/synapse/homeserver.yaml
ExecReload=/bin/kill -HUP \$MAINPID
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
SERVICE
# Start services
echo "[10/10] Starting services..."
systemctl daemon-reload
systemctl enable --now postgresql nginx coturn synapse
# Create admin user
sleep 3
ADMIN_PASS="REDACTED_PASSWORD" rand -hex 12)
cd /opt/synapse
sudo -u synapse /opt/synapse/venv/bin/register_new_matrix_user \
-c homeserver.yaml \
-u "$ADMIN_USER" \
-p "$ADMIN_PASS" \
-a \
http://localhost:8008
# Save secrets
cat > /root/.matrix_secrets << SECRETS
DOMAIN=$DOMAIN
DB_PASS="REDACTED_PASSWORD"
TURN_SECRET=$TURN_SECRET
ADMIN_USER=$ADMIN_USER
ADMIN_PASS="REDACTED_PASSWORD"
SECRETS
chmod 600 /root/.matrix_secrets
# Download helper scripts
echo "Downloading helper scripts..."
REPO_BASE="https://git.vish.gg/Vish/matrix-element/raw/branch/main"
mkdir -p /opt/matrix-scripts
for script in verify-matrix.sh fix-matrix.sh backup-matrix.sh update-matrix.sh; do
curl -sSL "$REPO_BASE/$script" -o "/opt/matrix-scripts/$script" 2>/dev/null || true
chmod +x "/opt/matrix-scripts/$script" 2>/dev/null || true
done
echo "Helper scripts installed to /opt/matrix-scripts/"
# Verify installation
echo ""
echo "Verifying installation..."
sleep 2
VERIFY_FAILED=0
# Check services
for svc in synapse nginx coturn postgresql; do
if systemctl is-active --quiet $svc; then
echo "$svc is running"
else
echo "$svc is NOT running"
VERIFY_FAILED=1
fi
done
# Check Matrix API
if curl -sf http://localhost:8008/_matrix/client/versions > /dev/null; then
echo "✓ Matrix API responding"
else
echo "✗ Matrix API not responding"
VERIFY_FAILED=1
fi
# Check Element Web
if curl -sf http://localhost:$LISTEN_PORT/ > /dev/null; then
echo "✓ Element Web accessible"
else
echo "✗ Element Web not accessible"
VERIFY_FAILED=1
fi
echo ""
echo "=========================================="
if [ $VERIFY_FAILED -eq 0 ]; then
echo "✅ Matrix Installation Complete!"
else
echo "⚠️ Installation complete with warnings"
fi
echo "=========================================="
echo ""
echo "Domain: $DOMAIN"
echo "Admin User: @$ADMIN_USER:$DOMAIN"
echo "Admin Password: "REDACTED_PASSWORD"
echo ""
echo "Listening on port $LISTEN_PORT (HTTP)"
echo ""
echo "Next steps:"
echo "1. Configure reverse proxy: HTTPS:443 → HTTP:$LISTEN_PORT"
echo "2. Forward TURN ports: $TURN_PORT, $TURN_TLS_PORT, $TURN_MIN_PORT-$TURN_MAX_PORT"
echo "3. Login at https://$DOMAIN and change password"
echo ""
echo "Secrets saved to /root/.matrix_secrets"
echo ""
echo "Helper scripts installed to /opt/matrix-scripts/"
echo " ./verify-matrix.sh - Check installation health"
echo " ./fix-matrix.sh - Diagnose and fix issues"
echo " ./backup-matrix.sh - Create full backup"
echo " ./update-matrix.sh - Update Synapse and Element"
echo ""
echo "Useful commands:"
echo " systemctl status synapse nginx coturn"
echo " journalctl -u synapse -f"
echo " curl http://localhost:8008/_matrix/client/versions"

View File

@@ -0,0 +1,103 @@
#!/bin/bash
# =============================================================================
# Matrix Synapse + Element Web Update Script
# =============================================================================
# Run as root
set -e
echo "=========================================="
echo "Matrix Synapse + Element Update Script"
echo "=========================================="
# Check current versions
CURRENT_SYNAPSE=$(/opt/synapse/venv/bin/python -c "import synapse; print(synapse.__version__)" 2>/dev/null || echo "unknown")
CURRENT_ELEMENT=$(cat /opt/element/web/version 2>/dev/null || ls /opt/element/ | grep -oP 'v[\d.]+' | head -1 || echo "unknown")
echo "Current Synapse: $CURRENT_SYNAPSE"
echo "Current Element: $CURRENT_ELEMENT"
# Get latest versions
echo ""
echo "Checking for updates..."
LATEST_ELEMENT=$(curl -s https://api.github.com/repos/element-hq/element-web/releases/latest | jq -r '.tag_name')
echo "Latest Element: $LATEST_ELEMENT"
read -p "Proceed with update? (y/N) " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo "Update cancelled."
exit 0
fi
# Backup first
echo ""
echo "[1/4] Creating backup..."
if [ -f ./backup-matrix.sh ]; then
./backup-matrix.sh
elif [ -f /opt/matrix-scripts/backup-matrix.sh ]; then
/opt/matrix-scripts/backup-matrix.sh
else
echo "Backup script not found, skipping..."
fi
# Update Synapse
echo ""
echo "[2/4] Updating Synapse..."
systemctl stop synapse
cd /opt/synapse
sudo -u synapse bash << 'UPDATE_SYNAPSE'
source venv/bin/activate
pip install --upgrade matrix-synapse psycopg2-binary lxml 'prometheus-client<0.21'
UPDATE_SYNAPSE
# Run database migrations
echo ""
echo "[3/4] Running database migrations..."
sudo -u synapse /opt/synapse/venv/bin/python -m synapse.app.homeserver \
--config-path /opt/synapse/homeserver.yaml \
--generate-keys-if-missing
# Update Element Web
echo ""
echo "[4/4] Updating Element Web..."
cd /opt/element
if [ -n "$LATEST_ELEMENT" ] && [ "$LATEST_ELEMENT" != "null" ]; then
# Backup old config
cp web/config.json /tmp/element_config_backup.json
# Download new version
wget -q "https://github.com/element-hq/element-web/releases/download/$LATEST_ELEMENT/element-$LATEST_ELEMENT.tar.gz"
# Remove old, extract new
rm -rf web
tar xzf "element-$LATEST_ELEMENT.tar.gz"
mv "element-$LATEST_ELEMENT" web
rm "element-$LATEST_ELEMENT.tar.gz"
# Restore config
cp /tmp/element_config_backup.json web/config.json
echo "Element updated to $LATEST_ELEMENT"
else
echo "Could not determine latest Element version, skipping Element update"
fi
# Start services
echo ""
echo "Starting services..."
systemctl start synapse
systemctl restart nginx
# Verify
sleep 3
NEW_SYNAPSE=$(/opt/synapse/venv/bin/python -c "import synapse; print(synapse.__version__)" 2>/dev/null || echo "unknown")
echo ""
echo "=========================================="
echo "✅ Update Complete!"
echo "=========================================="
echo ""
echo "Synapse: $CURRENT_SYNAPSE$NEW_SYNAPSE"
echo "Element: $CURRENT_ELEMENT$LATEST_ELEMENT"
echo ""
echo "Please verify your instance is working correctly."

View File

@@ -0,0 +1,126 @@
#!/bin/bash
# =============================================================================
# Matrix Synapse + Element Web Verification Script
# =============================================================================
# Run as root or with sudo
echo "=========================================="
echo "Matrix/Element Health Check"
echo "=========================================="
echo ""
FAILED=0
WARN=0
# Load domain from secrets if available
if [ -f /root/.matrix_secrets ]; then
source /root/.matrix_secrets
echo "Domain: ${DOMAIN:-unknown}"
fi
echo ""
echo "[Service Status]"
for svc in synapse nginx coturn postgresql; do
STATUS=$(systemctl is-active $svc 2>/dev/null || echo "not-found")
if [ "$STATUS" = "active" ]; then
echo "$svc: running"
elif [ "$STATUS" = "not-found" ]; then
echo " - $svc: not installed"
else
echo "$svc: $STATUS"
FAILED=1
fi
done
echo ""
echo "[Matrix API]"
# Client API
if curl -sf http://localhost:8008/_matrix/client/versions > /dev/null 2>&1; then
VERSION_COUNT=$(curl -s http://localhost:8008/_matrix/client/versions | python3 -c "import sys,json; print(len(json.load(sys.stdin).get('versions',[])))" 2>/dev/null || echo "0")
echo " ✓ Client API: responding ($VERSION_COUNT protocol versions)"
else
echo " ✗ Client API: not responding"
FAILED=1
fi
# Federation API
FED_RESULT=$(curl -sf http://localhost:8008/_matrix/federation/v1/version 2>/dev/null)
if [ -n "$FED_RESULT" ]; then
SYNAPSE_VER=$(echo "$FED_RESULT" | python3 -c "import sys,json; print(json.load(sys.stdin).get('server',{}).get('version','unknown'))" 2>/dev/null)
echo " ✓ Federation API: responding (Synapse $SYNAPSE_VER)"
else
echo " ✗ Federation API: not responding"
FAILED=1
fi
echo ""
echo "[Well-Known Endpoints]"
# Check nginx port
LISTEN_PORT=$(grep -oP 'listen \K\d+' /etc/nginx/sites-enabled/matrix 2>/dev/null | head -1 || echo "8080")
SERVER_WK=$(curl -sf http://localhost:$LISTEN_PORT/.well-known/matrix/server 2>/dev/null)
if [ -n "$SERVER_WK" ]; then
echo " ✓ /.well-known/matrix/server: $SERVER_WK"
else
echo " ✗ /.well-known/matrix/server: not configured"
WARN=1
fi
CLIENT_WK=$(curl -sf http://localhost:$LISTEN_PORT/.well-known/matrix/client 2>/dev/null)
if [ -n "$CLIENT_WK" ]; then
echo " ✓ /.well-known/matrix/client: configured"
else
echo " ✗ /.well-known/matrix/client: not configured"
WARN=1
fi
echo ""
echo "[Element Web]"
if curl -sf http://localhost:$LISTEN_PORT/ > /dev/null 2>&1; then
echo " ✓ Element Web: accessible on port $LISTEN_PORT"
else
echo " ✗ Element Web: not accessible"
FAILED=1
fi
# Check Element config
if [ -f /opt/element/web/config.json ]; then
HOMESERVER=$(python3 -c "import json; print(json.load(open('/opt/element/web/config.json')).get('default_server_config',{}).get('m.homeserver',{}).get('base_url','not set'))" 2>/dev/null)
echo " ✓ Element config: homeserver=$HOMESERVER"
else
echo " ✗ Element config: /opt/element/web/config.json not found"
WARN=1
fi
echo ""
echo "[TURN Server]"
if systemctl is-active --quiet coturn; then
TURN_PORT=$(grep -oP '^listening-port=\K\d+' /etc/turnserver.conf 2>/dev/null | head -1 || echo "3479")
echo " ✓ Coturn: running on port $TURN_PORT"
else
echo " - Coturn: not running (voice/video calls may not work behind NAT)"
WARN=1
fi
echo ""
echo "[Database]"
if systemctl is-active --quiet postgresql; then
DB_SIZE=$(sudo -u postgres psql -t -c "SELECT pg_size_pretty(pg_database_size('synapse'));" 2>/dev/null | xargs)
echo " ✓ PostgreSQL: running (synapse db: ${DB_SIZE:-unknown})"
else
echo " ✗ PostgreSQL: not running"
FAILED=1
fi
echo ""
echo "=========================================="
if [ $FAILED -eq 0 ] && [ $WARN -eq 0 ]; then
echo "✅ All checks passed!"
elif [ $FAILED -eq 0 ]; then
echo "⚠️ Passed with warnings"
else
echo "❌ Some checks failed"
fi
echo "=========================================="
exit $FAILED

View File

@@ -0,0 +1,74 @@
# Mattermost Production Deployment
Production-ready Mattermost Team Edition deployment for **mm.crista.love**
## Architecture
- **Mattermost Team Edition** - Running in Docker
- **PostgreSQL 15** - Database (Docker)
- **Nginx** - Reverse proxy with SSL termination
- **Cloudflare** - DNS and SSL (Full Strict mode with Origin Certificate)
- **Backblaze B2** - File storage (S3-compatible)
- **Automated Backups** - Daily to Backblaze B2
## Server Details
- **Server**: YOUR_WAN_IP
- **Domain**: mm.crista.love
- **OS**: Ubuntu 24.04 LTS
## Files
| File | Description |
|------|-------------|
| `deploy-mattermost.sh` | Main deployment script |
| `mattermost-nginx.conf` | Nginx reverse proxy configuration |
| `mattermost-backup.sh` | Automated backup script |
| `mm-crista-love.crt` | Cloudflare Origin SSL certificate |
| `mm-crista-love.key` | SSL private key |
## Deployment
1. Copy all files to server
2. Run `deploy-mattermost.sh` as root
3. Visit https://mm.crista.love to create admin account
## Configuration
### Email (SMTP)
- Gmail with app password
- STARTTLS on port 587
### File Storage
- Backblaze B2 (S3-compatible)
- Bucket: `vk-mattermost`
### Backups
- Daily at 3 AM UTC
- Stored in B2: `vk-mattermost/backups/`
- Retention: 30 days remote, 7 days local
## Management Commands
```bash
# View logs
docker compose -f /opt/mattermost/docker-compose.yml logs -f
# Restart services
docker compose -f /opt/mattermost/docker-compose.yml restart
# Manual backup
/opt/mattermost/backup.sh
# Check status
docker compose -f /opt/mattermost/docker-compose.yml ps
```
## Security Notes
⚠️ **Important**: The actual credentials are stored in:
- `/opt/mattermost/.env` - PostgreSQL password
- `~/.aws/credentials` - B2 credentials
- Docker environment variables - SMTP credentials
The files in this repo contain placeholder references. Actual secrets should never be committed.

View File

@@ -0,0 +1,182 @@
#!/bin/bash
# Mattermost Deployment Script for Synology Reverse Proxy Setup
# Uses local storage (no B2) and external PostgreSQL
echo "=============================================="
echo "Mattermost Production Deployment (Synology)"
echo "Domain: mm.crista.love"
echo "=============================================="
# Variables - UPDATE THESE
SMTP_HOST="${SMTP_HOST:-smtp.gmail.com}"
SMTP_PORT="${SMTP_PORT:-587}"
SMTP_USER="${SMTP_USER:-your-email@example.com}"
SMTP_PASS="REDACTED_PASSWORD"
DB_PASSWORD="REDACTED_PASSWORD"
SITE_URL="${SITE_URL:-https://mm.crista.love}"
echo "=== Step 1: Install Docker ==="
if ! command -v docker &> /dev/null; then
curl -fsSL https://get.docker.com | sh
systemctl enable docker
systemctl start docker
fi
# Install docker compose plugin if needed
apt-get update
apt-get install -y docker-compose-plugin || true
echo "=== Step 2: Install and configure PostgreSQL ==="
if ! command -v psql &> /dev/null; then
apt-get install -y postgresql postgresql-contrib
systemctl enable postgresql
systemctl start postgresql
fi
# Create database and user
sudo -u postgres psql -c "CREATE USER mmuser WITH PASSWORD 'REDACTED_PASSWORD';" 2>/dev/null || true
sudo -u postgres psql -c "CREATE DATABASE mattermost OWNER mmuser;" 2>/dev/null || true
sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE mattermost TO mmuser;" 2>/dev/null || true
# Configure PostgreSQL to accept Docker connections
PG_HBA=$(find /etc/postgresql -name pg_hba.conf | head -1)
PG_CONF=$(find /etc/postgresql -name postgresql.conf | head -1)
if ! grep -q "172.17.0.0/16" "$PG_HBA"; then
echo "# Docker networks for Mattermost" >> "$PG_HBA"
echo "host mattermost mmuser 172.17.0.0/16 scram-sha-256" >> "$PG_HBA"
echo "host mattermost mmuser 172.18.0.0/16 scram-sha-256" >> "$PG_HBA"
echo "host mattermost mmuser 172.19.0.0/16 scram-sha-256" >> "$PG_HBA"
fi
# Configure PostgreSQL to listen on all interfaces
if ! grep -q "listen_addresses = '\*'" "$PG_CONF"; then
sed -i "s/#listen_addresses = 'localhost'/listen_addresses = '*'/" "$PG_CONF"
fi
systemctl restart postgresql
echo "=== Step 3: Create directory structure ==="
mkdir -p /opt/mattermost/{config,data,logs,plugins,client-plugins,backups}
echo "=== Step 4: Create environment file ==="
cat > /opt/mattermost/.env << EOF
MM_EMAILSETTINGS_SMTPPASSWORD="REDACTED_PASSWORD"
EOF
chmod 600 /opt/mattermost/.env
echo "=== Step 5: Create Docker Compose file ==="
# Get Docker bridge IP
DOCKER_HOST_IP=$(ip -4 addr show docker0 | grep -oP '(?<=inet\s)\d+(\.\d+){3}' || echo "172.17.0.1")
cat > /opt/mattermost/docker-compose.yml << EOF
services:
mattermost:
image: mattermost/mattermost-team-edition:11.3
container_name: mattermost
restart: unless-stopped
security_opt:
- no-new-privileges:true
pids_limit: 200
read_only: false
tmpfs:
- /tmp
ports:
- "8065:8065"
environment:
TZ: UTC
MM_SQLSETTINGS_DRIVERNAME: postgres
MM_SQLSETTINGS_DATASOURCE: "postgres://mmuser:${DB_PASSWORD}@${DOCKER_HOST_IP}:5432/mattermost?sslmode=disable&connect_timeout=10"
MM_SERVICESETTINGS_SITEURL: ${SITE_URL}
MM_SERVICESETTINGS_LISTENADDRESS: ":8065"
MM_FILESETTINGS_DRIVERNAME: local
MM_FILESETTINGS_DIRECTORY: /mattermost/data
MM_LOGSETTINGS_CONSOLELEVEL: INFO
MM_LOGSETTINGS_FILELEVEL: INFO
MM_EMAILSETTINGS_ENABLESMTPAUTH: "true"
MM_EMAILSETTINGS_SMTPSERVER: ${SMTP_HOST}
MM_EMAILSETTINGS_SMTPPORT: "${SMTP_PORT}"
MM_EMAILSETTINGS_CONNECTIONSECURITY: STARTTLS
MM_EMAILSETTINGS_SMTPUSERNAME: ${SMTP_USER}
MM_EMAILSETTINGS_FEEDBACKEMAIL: ${SMTP_USER}
MM_EMAILSETTINGS_FEEDBACKNAME: Mattermost
MM_EMAILSETTINGS_SENDEMAILNOTIFICATIONS: "true"
MM_TEAMSETTINGS_ENABLEOPENSERVER: "true"
MM_TEAMSETTINGS_MAXUSERSPERTEAM: "50"
env_file:
- .env
volumes:
- /opt/mattermost/config:/mattermost/config:rw
- /opt/mattermost/data:/mattermost/data:rw
- /opt/mattermost/logs:/mattermost/logs:rw
- /opt/mattermost/plugins:/mattermost/plugins:rw
- /opt/mattermost/client-plugins:/mattermost/client/plugins:rw
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8065/api/v4/system/ping"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
extra_hosts:
- "host.docker.internal:host-gateway"
EOF
echo "=== Step 6: Create backup script ==="
cat > /opt/mattermost/backup.sh << 'BACKUP'
#!/bin/bash
BACKUP_DIR=/opt/mattermost/backups
DATE=$(date +%Y%m%d_%H%M%S)
sudo -u postgres pg_dump mattermost | gzip > $BACKUP_DIR/mattermost_db_$DATE.sql.gz
tar -czf $BACKUP_DIR/mattermost_data_$DATE.tar.gz -C /opt/mattermost data config
find $BACKUP_DIR -name "*.gz" -mtime +7 -delete
echo "Backup completed: $DATE"
BACKUP
chmod +x /opt/mattermost/backup.sh
echo "=== Step 7: Set up backup cron job ==="
echo '0 3 * * * root /opt/mattermost/backup.sh >> /var/log/mattermost-backup.log 2>&1' > /etc/cron.d/mattermost-backup
chmod 644 /etc/cron.d/mattermost-backup
echo "=== Step 8: Start Mattermost ==="
cd /opt/mattermost
docker compose pull
docker compose up -d
echo "=== Step 9: Wait for Mattermost to be healthy ==="
echo "Waiting for services to start..."
sleep 30
MAX_ATTEMPTS=30
ATTEMPT=0
until curl -sf http://127.0.0.1:8065/api/v4/system/ping > /dev/null 2>&1; do
ATTEMPT=$((ATTEMPT + 1))
if [ $ATTEMPT -ge $MAX_ATTEMPTS ]; then
echo "Mattermost did not become healthy in time. Checking logs..."
docker compose logs --tail=100
exit 1
fi
echo "Waiting for Mattermost to be ready... (attempt $ATTEMPT/$MAX_ATTEMPTS)"
sleep 5
done
echo "Mattermost is healthy!"
echo "=============================================="
echo "Mattermost Deployment Complete!"
echo "=============================================="
echo ""
echo "Mattermost is running on port 8065"
echo ""
echo "Configure your Synology Reverse Proxy:"
echo " Source: HTTPS, mm.crista.love, port 443"
echo " Destination: HTTP, <this-machine-ip>, port 8065"
echo ""
echo "Backup schedule: Daily at 3 AM UTC"
echo "Backups stored in: /opt/mattermost/backups/"
echo ""
echo "Useful commands:"
echo " View logs: docker compose -f /opt/mattermost/docker-compose.yml logs -f"
echo " Restart: docker compose -f /opt/mattermost/docker-compose.yml restart"
echo " Manual backup: /opt/mattermost/backup.sh"
echo ""
docker compose ps

View File

@@ -0,0 +1,219 @@
#!/bin/bash
# Complete Mattermost Deployment Script
set -e
echo "=============================================="
echo "Mattermost Production Deployment"
echo "Domain: mm.crista.love"
echo "=============================================="
# Variables - UPDATE THESE WITH YOUR ACTUAL VALUES
B2_KEY_ID="${B2_KEY_ID:-your-b2-key-id}"
B2_APP_KEY="${B2_APP_KEY:REDACTED_APP_KEY}"
B2_ENDPOINT="${B2_ENDPOINT:-s3.us-west-004.backblazeb2.com}"
B2_BUCKET="${B2_BUCKET:-your-bucket-name}"
SMTP_HOST="${SMTP_HOST:-smtp.gmail.com}"
SMTP_PORT="${SMTP_PORT:-587}"
SMTP_USER="${SMTP_USER:-your-email@gmail.com}"
SMTP_PASS="REDACTED_PASSWORD"
echo "=== Step 1: Install Docker Compose plugin ==="
apt-get update
apt-get install -y docker-compose-plugin unzip
echo "=== Step 2: Install AWS CLI for B2 backups ==="
if ! command -v aws &> /dev/null; then
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "/tmp/awscliv2.zip"
unzip -q /tmp/awscliv2.zip -d /tmp
/tmp/aws/install
rm -rf /tmp/aws /tmp/awscliv2.zip
fi
# Configure AWS CLI for Backblaze B2
mkdir -p ~/.aws
cat > ~/.aws/credentials << EOF
[default]
aws_access_key_id = ${B2_KEY_ID}
aws_secret_access_key = ${B2_APP_KEY}
EOF
cat > ~/.aws/config << EOF
[default]
region = us-west-004
EOF
echo "=== Step 3: Create directory structure ==="
mkdir -p /opt/mattermost/{config,data,logs,plugins,client/plugins,bleve-indexes,backups}
mkdir -p /etc/nginx/ssl
mkdir -p /var/cache/nginx/mattermost
echo "=== Step 4: Generate PostgreSQL password ==="
POSTGRES_PASSWORD="REDACTED_PASSWORD" rand -base64 32 | tr -dc 'a-zA-Z0-9' | head -c 32)
echo "POSTGRES_PASSWORD="REDACTED_PASSWORD" > /opt/mattermost/.env
chmod 600 /opt/mattermost/.env
echo "=== Step 5: Create Docker Compose file ==="
cat > /opt/mattermost/docker-compose.yml << EOF
services:
postgres:
image: postgres:15-alpine
container_name: mattermost-postgres
restart: unless-stopped
security_opt:
- no-new-privileges:true
pids_limit: 100
read_only: true
tmpfs:
- /tmp
- /var/run/postgresql
volumes:
- postgres_data:/var/lib/postgresql/data
environment:
- POSTGRES_USER=mmuser
- POSTGRES_PASSWORD="REDACTED_PASSWORD"
- POSTGRES_DB=mattermost
networks:
- mattermost-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U mmuser -d mattermost"]
interval: 10s
timeout: 5s
retries: 5
mattermost:
image: mattermost/mattermost-team-edition:latest
container_name: mattermost
restart: unless-stopped
depends_on:
postgres:
condition: service_healthy
security_opt:
- no-new-privileges:true
pids_limit: 200
tmpfs:
- /tmp
volumes:
- /opt/mattermost/config:/mattermost/config:rw
- /opt/mattermost/data:/mattermost/data:rw
- /opt/mattermost/logs:/mattermost/logs:rw
- /opt/mattermost/plugins:/mattermost/plugins:rw
- /opt/mattermost/client/plugins:/mattermost/client/plugins:rw
- /opt/mattermost/bleve-indexes:/mattermost/bleve-indexes:rw
environment:
- TZ=UTC
- MM_SQLSETTINGS_DRIVERNAME=postgres
- MM_SQLSETTINGS_DATASOURCE=postgres://mmuser:${POSTGRES_PASSWORD}@postgres:5432/mattermost?sslmode=disable&connect_timeout=10
- MM_BLEVESETTINGS_INDEXDIR=/mattermost/bleve-indexes
- MM_SERVICESETTINGS_SITEURL=https://mm.crista.love
- MM_SERVICESETTINGS_LISTENADDRESS=:8065
# Email Settings
- MM_EMAILSETTINGS_ENABLESMTPAUTH=true
- MM_EMAILSETTINGS_SMTPUSERNAME=${SMTP_USER}
- MM_EMAILSETTINGS_SMTPPASSWORD="REDACTED_PASSWORD"
- MM_EMAILSETTINGS_SMTPSERVER=${SMTP_HOST}
- MM_EMAILSETTINGS_SMTPPORT=${SMTP_PORT}
- MM_EMAILSETTINGS_CONNECTIONSECURITY=STARTTLS
- MM_EMAILSETTINGS_FEEDBACKEMAIL=${SMTP_USER}
- MM_EMAILSETTINGS_REPLYTOADDRESS=${SMTP_USER}
- MM_EMAILSETTINGS_SENDEMAILNOTIFICATIONS=true
# File Storage - Backblaze B2
- MM_FILESETTINGS_DRIVERNAME=amazons3
- MM_FILESETTINGS_AMAZONS3ACCESSKEYID=${B2_KEY_ID}
- MM_FILESETTINGS_AMAZONS3SECRETACCESSKEY=${B2_APP_KEY}
- MM_FILESETTINGS_AMAZONS3BUCKET=${B2_BUCKET}
- MM_FILESETTINGS_AMAZONS3ENDPOINT=${B2_ENDPOINT}
- MM_FILESETTINGS_AMAZONS3SSL=true
- MM_FILESETTINGS_AMAZONS3SIGNV2=false
- MM_FILESETTINGS_AMAZONS3REGION=us-west-004
# Security
- MM_SERVICESETTINGS_ENABLESECURITYFIXALERT=true
- MM_PASSWORDSETTINGS_MINIMUMLENGTH=10
ports:
- "127.0.0.1:8065:8065"
networks:
- mattermost-network
networks:
mattermost-network:
driver: bridge
volumes:
postgres_data:
EOF
echo "=== Step 6: Set directory permissions ==="
chown -R 2000:2000 /opt/mattermost/config /opt/mattermost/data /opt/mattermost/logs /opt/mattermost/plugins /opt/mattermost/client/plugins /opt/mattermost/bleve-indexes
echo "=== Step 7: Start Mattermost containers ==="
cd /opt/mattermost
docker compose pull
docker compose up -d
echo "=== Step 8: Wait for Mattermost to be healthy ==="
echo "Waiting for services to start..."
sleep 15
# Wait for Mattermost to be ready
MAX_ATTEMPTS=30
ATTEMPT=0
until curl -sf http://127.0.0.1:8065/api/v4/system/ping > /dev/null 2>&1; do
ATTEMPT=$((ATTEMPT + 1))
if [ $ATTEMPT -ge $MAX_ATTEMPTS ]; then
echo "Mattermost did not become healthy in time. Checking logs..."
docker compose logs --tail=100
exit 1
fi
echo "Waiting for Mattermost to be ready... (attempt $ATTEMPT/$MAX_ATTEMPTS)"
sleep 5
done
echo "Mattermost is healthy!"
echo "=== Step 9: Configure Nginx ==="
# Nginx config should already be copied
# Create cache directory
mkdir -p /var/cache/nginx/mattermost
chown www-data:www-data /var/cache/nginx/mattermost
# Enable the site
ln -sf /etc/nginx/sites-available/mattermost /etc/nginx/sites-enabled/mattermost
# Test nginx config
nginx -t
# Reload nginx
systemctl reload nginx
echo "=== Step 10: Set up automated backups ==="
chmod +x /opt/mattermost/backup.sh
# Add cron job for daily backups at 3 AM
(crontab -l 2>/dev/null | grep -v "mattermost/backup.sh"; echo "0 3 * * * /opt/mattermost/backup.sh >> /var/log/mattermost-backup.log 2>&1") | crontab -
echo "=== Step 11: Enable open signups ==="
docker exec mattermost /mattermost/bin/mmctl config set TeamSettings.REDACTED_APP_PASSWORD true --local
docker restart mattermost
sleep 15
echo "=============================================="
echo "Mattermost Deployment Complete!"
echo "=============================================="
echo ""
echo "Access Mattermost at: https://mm.crista.love"
echo ""
echo "Next steps:"
echo "1. Visit https://mm.crista.love to create your admin account"
echo "2. The first user to sign up becomes the system admin"
echo ""
echo "Backup schedule: Daily at 3 AM UTC"
echo "Backups stored in: Backblaze B2 (${B2_BUCKET}/backups/)"
echo ""
echo "Useful commands:"
echo " View logs: docker compose -f /opt/mattermost/docker-compose.yml logs -f"
echo " Restart: docker compose -f /opt/mattermost/docker-compose.yml restart"
echo " Manual backup: /opt/mattermost/backup.sh"
echo ""
# Show container status
docker compose ps

View File

@@ -0,0 +1,56 @@
#!/bin/bash
# Mattermost Automated Backup Script
# Backs up PostgreSQL database and uploads to Backblaze B2
set -e
BACKUP_DIR="/opt/mattermost/backups"
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_FILE="mattermost_backup_${DATE}.sql.gz"
RETENTION_DAYS=30
# Create backup directory
mkdir -p ${BACKUP_DIR}
echo "[$(date)] Starting Mattermost backup..."
# Get PostgreSQL password
source /opt/mattermost/.env
# Backup PostgreSQL database
echo "[$(date)] Backing up PostgreSQL database..."
docker exec mattermost-postgres pg_dump -U mmuser -d mattermost | gzip > ${BACKUP_DIR}/${BACKUP_FILE}
# Check backup size
BACKUP_SIZE=$(ls -lh ${BACKUP_DIR}/${BACKUP_FILE} | awk '{print $5}')
echo "[$(date)] Backup created: ${BACKUP_FILE} (${BACKUP_SIZE})"
# Upload to Backblaze B2 using S3 API
echo "[$(date)] Uploading to Backblaze B2..."
/usr/local/bin/aws s3 cp ${BACKUP_DIR}/${BACKUP_FILE} s3://vk-mattermost/backups/${BACKUP_FILE} \
--endpoint-url https://s3.us-west-004.backblazeb2.com
if [ $? -eq 0 ]; then
echo "[$(date)] Upload successful!"
else
echo "[$(date)] Upload failed!"
exit 1
fi
# Clean up old local backups (keep last 7 days locally)
echo "[$(date)] Cleaning up old local backups..."
find ${BACKUP_DIR} -name "mattermost_backup_*.sql.gz" -mtime +7 -delete
# Clean up old remote backups (keep last 30 days)
echo "[$(date)] Cleaning up old remote backups..."
CUTOFF_DATE=$(date -d "-${RETENTION_DAYS} days" +%Y%m%d)
/usr/local/bin/aws s3 ls s3://vk-mattermost/backups/ --endpoint-url https://s3.us-west-004.backblazeb2.com | while read -r line; do
FILE_DATE=$(echo "$line" | awk '{print $4}' | grep -oP '\d{8}' | head -1)
FILE_NAME=$(echo "$line" | awk '{print $4}')
if [[ -n "$FILE_DATE" && "$FILE_DATE" < "$CUTOFF_DATE" ]]; then
echo "[$(date)] Deleting old backup: ${FILE_NAME}"
/usr/local/bin/aws s3 rm s3://vk-mattermost/backups/${FILE_NAME} --endpoint-url https://s3.us-west-004.backblazeb2.com
fi
done
echo "[$(date)] Backup completed successfully!"

View File

@@ -0,0 +1,100 @@
upstream mattermost_backend {
server 127.0.0.1:8065;
keepalive 32;
}
proxy_cache_path /var/cache/nginx/mattermost levels=1:2 keys_zone=mattermost_cache:10m max_size=3g inactive=120m use_temp_path=off;
server {
listen 80;
listen [::]:80;
server_name mm.crista.love;
# Redirect all HTTP to HTTPS
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name mm.crista.love;
# SSL Configuration - Cloudflare Origin Certificate
ssl_certificate /etc/nginx/ssl/mm-crista-love.crt;
ssl_certificate_key /etc/nginx/ssl/mm-crista-love.key;
# Modern SSL configuration
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;
# Security Headers
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
# Logging
access_log /var/log/nginx/mattermost-access.log;
error_log /var/log/nginx/mattermost-error.log;
# Disable server tokens
server_tokens off;
# Max upload size (for file attachments)
client_max_body_size 100M;
location ~ /api/v[0-9]+/(users/)?websocket$ {
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Frame-Options SAMEORIGIN;
proxy_buffers 256 16k;
proxy_buffer_size 16k;
client_body_timeout 60;
send_timeout 300;
lingering_timeout 5;
proxy_connect_timeout 90;
proxy_send_timeout 300;
proxy_read_timeout 90s;
proxy_http_version 1.1;
proxy_pass http://mattermost_backend;
}
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Frame-Options SAMEORIGIN;
proxy_buffers 256 16k;
proxy_buffer_size 16k;
proxy_read_timeout 600s;
proxy_http_version 1.1;
proxy_pass http://mattermost_backend;
# Static asset caching
location ~ ^/static/ {
proxy_pass http://mattermost_backend;
proxy_cache mattermost_cache;
proxy_cache_valid 200 1d;
proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
proxy_cache_revalidate on;
proxy_cache_background_update on;
add_header X-Cache-Status $upstream_cache_status;
}
}
# Health check endpoint
location = /health {
proxy_pass http://mattermost_backend;
proxy_http_version 1.1;
proxy_set_header Host $http_host;
}
}

View File

@@ -0,0 +1,27 @@
-----BEGIN CERTIFICATE-----
MIIEojCCA4qgAwIBAgIUPrDC9IZU5unV4kUy0cBsm9DlEJAwDQYJKoZIhvcNAQEL
BQAwgYsxCzAJBgNVBAYTAlVTMRkwFwYDVQQKExBDbG91ZEZsYXJlLCBJbmMuMTQw
MgYDVQQLEytDbG91ZEZsYXJlIE9yaWdpbiBTU0wgQ2VydGlmaWNhdGUgQXV0aG9y
aXR5MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRMwEQYDVQQIEwpDYWxpZm9ybmlh
MB4XDTI2MDEyNTA5MDEwMFoXDTQxMDEyMTA5MDEwMFowYjEZMBcGA1UEChMQQ2xv
dWRGbGFyZSwgSW5jLjEdMBsGA1UECxMUQ2xvdWRGbGFyZSBPcmlnaW4gQ0ExJjAk
BgNVBAMTHUNsb3VkRmxhcmUgT3JpZ2luIENlcnRpZmljYXRlMIIBIjANBgkqhkiG
9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0e+rmGiUAH71cuXDd2xOaIqkYPeHIsDDtG1b
dbdrtHdsInTNhWpIUqayMot53NeixfKNit++P4D9mUmdeSwPUDuzcYsTmvcFZPiY
WATgp8nWF8PAkGNgd43kJqBylSis5TfCyRrBghHVIgt3WZ8ynbQVfmROf1YUnsa1
KtO6WtkaKx8Oz6FeQHiamhj/k0XKritidl+CO7UXDzFi2xIe10H4+grhMs1SaK+8
5Xib7ohyQTxyY5ELuAXq1R8bDmcBkatYbtwSdHeEEDmJtW7ILNJZ85uqG1Tp+RcG
WQ1AjXzoqITAv6qO/ubyp3lcBPkVoeZlufYqGKf6Yu6m71SlAQIDAQABo4IBJDCC
ASAwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcD
ATAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBRB+YxBgtPDtcWedv62/8Xd3uR/rjAf
BgNVHSMEGDAWgBQk6FNXXXw0QIep65TbuuEWePwppDBABggrBgEFBQcBAQQ0MDIw
MAYIKwYBBQUHMAGGJGh0dHA6Ly9vY3NwLmNsb3VkZmxhcmUuY29tL29yaWdpbl9j
YTAlBgNVHREEHjAcgg0qLmNyaXN0YS5sb3ZlggtjcmlzdGEubG92ZTA4BgNVHR8E
MTAvMC2gK6AphidodHRwOi8vY3JsLmNsb3VkZmxhcmUuY29tL29yaWdpbl9jYS5j
cmwwDQYJKoZIhvcNAQELBQADggEBAJ23KhTb+/EMa6WIskydfxbGJvnjVn+Ggs9L
H3tNP3W+gVi5yjghMBTwN8rLHfIl122CSgI8SLg7tWm9d+EUsQdqR1KfoBideeCj
EIITw6cHrJgCFP8x8SbO6b1t+qcgFW4d5aV5mRGj3UMZ+E5T9njG74c3xOQVIJ70
T14ZU9KF/vnGimOUCJNvlRjgjfcrccv7e0p8+i/mBvqgZeAsSg1X7/zW7gzR/fJW
FQO3ir4FKcKt4ItDCGnHA8FDA9PVuuxclAbOxZcW5i8ZBOxkQv37vScexGeeOI7b
u2L9lRuLtyelvH8Pbt7p79RCGHcm+BslG41+uBKPNPxLGke3RjI=
-----END CERTIFICATE-----