🎬 ARR Suite Template Bootstrap - Complete Media Automation Stack Features: - 16 production services (Prowlarr, Sonarr, Radarr, Plex, etc.) - One-command Ansible deployment - VPN-protected downloads via Gluetun - Tailscale secure access - Production-ready security (UFW, Fail2Ban) - Automated backups and monitoring - Comprehensive documentation Ready for customization and deployment to any VPS. Co-authored-by: openhands <openhands@all-hands.dev>
94 lines
3.3 KiB
Django/Jinja
94 lines
3.3 KiB
Django/Jinja
#!/bin/bash
|
|
# Log aggregation script for Arrs Media Stack
|
|
# Generated by Ansible
|
|
|
|
LOG_DIR="{{ docker_root }}/logs"
|
|
SYSTEM_LOG_DIR="$LOG_DIR/system"
|
|
ARRS_LOG_DIR="$LOG_DIR/arrs"
|
|
AGGREGATED_LOG="$LOG_DIR/aggregated-$(date '+%Y%m%d').log"
|
|
TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
|
|
|
|
# Ensure log directories exist
|
|
mkdir -p "$SYSTEM_LOG_DIR" "$ARRS_LOG_DIR"
|
|
|
|
# Function to aggregate logs with source prefix
|
|
aggregate_logs() {
|
|
local source="$1"
|
|
local log_file="$2"
|
|
|
|
if [[ -f "$log_file" ]]; then
|
|
while IFS= read -r line; do
|
|
echo "[$TIMESTAMP] [$source] $line" >> "$AGGREGATED_LOG"
|
|
done < "$log_file"
|
|
fi
|
|
}
|
|
|
|
# Start aggregation
|
|
echo "[$TIMESTAMP] [AGGREGATOR] Starting log aggregation" >> "$AGGREGATED_LOG"
|
|
|
|
# Aggregate system monitoring logs
|
|
for log_file in "$SYSTEM_LOG_DIR"/system-monitor-$(date '+%Y%m%d').log; do
|
|
if [[ -f "$log_file" ]]; then
|
|
aggregate_logs "SYSTEM" "$log_file"
|
|
fi
|
|
done
|
|
|
|
# Aggregate Docker monitoring logs
|
|
for log_file in "$ARRS_LOG_DIR"/docker-monitor-$(date '+%Y%m%d').log; do
|
|
if [[ -f "$log_file" ]]; then
|
|
aggregate_logs "DOCKER" "$log_file"
|
|
fi
|
|
done
|
|
|
|
# Aggregate Docker Compose logs (last 100 lines)
|
|
cd {{ docker_compose_dir }}
|
|
SERVICES=("sonarr" "radarr" "lidarr" "bazarr" "prowlarr" "watchtower")
|
|
|
|
for service in "${SERVICES[@]}"; do
|
|
echo "[$TIMESTAMP] [AGGREGATOR] Collecting logs for $service" >> "$AGGREGATED_LOG"
|
|
docker-compose logs --tail=100 "$service" 2>/dev/null | while IFS= read -r line; do
|
|
echo "[$TIMESTAMP] [${service^^}] $line" >> "$AGGREGATED_LOG"
|
|
done
|
|
done
|
|
|
|
# Aggregate system logs (errors and warnings)
|
|
echo "[$TIMESTAMP] [AGGREGATOR] Collecting system errors" >> "$AGGREGATED_LOG"
|
|
journalctl --since="1 hour ago" --priority=err --no-pager -q | while IFS= read -r line; do
|
|
echo "[$TIMESTAMP] [SYSLOG_ERROR] $line" >> "$AGGREGATED_LOG"
|
|
done
|
|
|
|
journalctl --since="1 hour ago" --priority=warning --no-pager -q | while IFS= read -r line; do
|
|
echo "[$TIMESTAMP] [SYSLOG_WARNING] $line" >> "$AGGREGATED_LOG"
|
|
done
|
|
|
|
# Aggregate Docker daemon logs
|
|
echo "[$TIMESTAMP] [AGGREGATOR] Collecting Docker daemon logs" >> "$AGGREGATED_LOG"
|
|
journalctl -u docker --since="1 hour ago" --no-pager -q | while IFS= read -r line; do
|
|
echo "[$TIMESTAMP] [DOCKER_DAEMON] $line" >> "$AGGREGATED_LOG"
|
|
done
|
|
|
|
# Generate summary
|
|
echo "[$TIMESTAMP] [AGGREGATOR] Generating summary" >> "$AGGREGATED_LOG"
|
|
|
|
# Count errors and warnings
|
|
ERROR_COUNT=$(grep -c "ERROR\|FAILED" "$AGGREGATED_LOG" 2>/dev/null || echo "0")
|
|
WARNING_COUNT=$(grep -c "WARNING\|WARN" "$AGGREGATED_LOG" 2>/dev/null || echo "0")
|
|
|
|
echo "[$TIMESTAMP] [SUMMARY] Errors: $ERROR_COUNT, Warnings: $WARNING_COUNT" >> "$AGGREGATED_LOG"
|
|
|
|
# Check for critical issues
|
|
CRITICAL_ISSUES=$(grep -E "(FAILED|ERROR|CRITICAL|FATAL)" "$AGGREGATED_LOG" | tail -5)
|
|
if [[ -n "$CRITICAL_ISSUES" ]]; then
|
|
echo "[$TIMESTAMP] [SUMMARY] Recent critical issues:" >> "$AGGREGATED_LOG"
|
|
echo "$CRITICAL_ISSUES" >> "$AGGREGATED_LOG"
|
|
fi
|
|
|
|
echo "[$TIMESTAMP] [AGGREGATOR] Log aggregation completed" >> "$AGGREGATED_LOG"
|
|
|
|
# Cleanup old aggregated logs (keep 7 days)
|
|
find "$LOG_DIR" -name "aggregated-*.log" -mtime +7 -delete 2>/dev/null
|
|
|
|
# Compress logs older than 1 day
|
|
find "$LOG_DIR" -name "*.log" -mtime +1 ! -name "aggregated-$(date '+%Y%m%d').log" -exec gzip {} \; 2>/dev/null
|
|
|
|
exit 0 |