Initial template repository

🎬 ARR Suite Template Bootstrap - Complete Media Automation Stack

Features:
- 16 production services (Prowlarr, Sonarr, Radarr, Plex, etc.)
- One-command Ansible deployment
- VPN-protected downloads via Gluetun
- Tailscale secure access
- Production-ready security (UFW, Fail2Ban)
- Automated backups and monitoring
- Comprehensive documentation

Ready for customization and deployment to any VPS.

Co-authored-by: openhands <openhands@all-hands.dev>
This commit is contained in:
openhands
2025-11-28 04:26:12 +00:00
commit 24f2cd64e9
71 changed files with 9983 additions and 0 deletions

77
templates/.env.j2 Normal file
View File

@@ -0,0 +1,77 @@
# Environment Configuration for *arr Stack
# Generated by Ansible - Do not edit manually
# System Configuration
PUID=1000
PGID=1000
TZ=UTC
UMASK=022
# Network Configuration
TAILSCALE_IP={{ tailscale_ip }}
BIND_TO_TAILSCALE={{ bind_to_tailscale_only | default(true) }}
# VPN Configuration
VPN_PROVIDER={{ vpn_provider }}
VPN_USERNAME={{ vpn_username }}
VPN_PASSWORD={{ vpn_password }}
VPN_SERVER_REGIONS=United States
# Service Ports
PROWLARR_PORT={{ services.prowlarr }}
SONARR_PORT={{ services.sonarr }}
RADARR_PORT={{ services.radarr }}
LIDARR_PORT={{ services.lidarr }}
WHISPARR_PORT={{ services.whisparr }}
BAZARR_PORT={{ services.bazarr }}
JELLYSEERR_PORT={{ services.jellyseerr }}
SABNZBD_PORT={{ services.sabnzbd }}
DELUGE_PORT={{ services.deluge }}
PLEX_PORT={{ services.plex }}
TAUTULLI_PORT={{ services.tautulli }}
# API Keys (Generated during deployment)
PROWLARR_API_KEY={{ api_keys.prowlarr }}
SONARR_API_KEY={{ api_keys.sonarr }}
RADARR_API_KEY={{ api_keys.radarr }}
LIDARR_API_KEY={{ api_keys.lidarr }}
WHISPARR_API_KEY={{ api_keys.whisparr }}
BAZARR_API_KEY={{ api_keys.bazarr }}
JELLYSEERR_API_KEY={{ api_keys.jellyseerr }}
SABNZBD_API_KEY={{ api_keys.sabnzbd }}
# Directory Paths
DOCKER_ROOT={{ base_path }}
MEDIA_ROOT={{ base_path }}/media
DOWNLOADS_ROOT={{ base_path }}/downloads
# Security Settings
ENABLE_FAIL2BAN={{ enable_fail2ban | default(true) }}
ENABLE_FIREWALL={{ enable_firewall | default(true) }}
ENABLE_AUTO_UPDATES={{ enable_auto_updates | default(true) }}
# Backup Configuration
BACKUP_ENABLED={{ backup_enabled | default(true) }}
BACKUP_RETENTION_DAYS={{ backup_retention_days | default(30) }}
BACKUP_SCHEDULE={{ backup_schedule | default('0 2 * * *') }}
# Monitoring
ENABLE_MONITORING={{ enable_monitoring | default(true) }}
HEALTH_CHECK_INTERVAL={{ health_check_interval | default(300) }}
# Plex Configuration
PLEX_CLAIM={{ plex_claim | default('') }}
PLEX_ADVERTISE_IP={{ ansible_default_ipv4.address }}
# Resource Limits
MEMORY_LIMIT_SONARR={{ memory_limits.sonarr | default('1g') }}
MEMORY_LIMIT_RADARR={{ memory_limits.radarr | default('1g') }}
MEMORY_LIMIT_LIDARR={{ memory_limits.lidarr | default('512m') }}
MEMORY_LIMIT_PROWLARR={{ memory_limits.prowlarr | default('512m') }}
MEMORY_LIMIT_BAZARR={{ memory_limits.bazarr | default('256m') }}
MEMORY_LIMIT_JELLYSEERR={{ memory_limits.jellyseerr | default('512m') }}
MEMORY_LIMIT_SABNZBD={{ memory_limits.sabnzbd | default('1g') }}
MEMORY_LIMIT_DELUGE={{ memory_limits.deluge | default('512m') }}
MEMORY_LIMIT_PLEX={{ memory_limits.plex | default('4g') }}
MEMORY_LIMIT_TAUTULLI={{ memory_limits.tautulli | default('256m') }}
MEMORY_LIMIT_GLUETUN={{ memory_limits.gluetun | default('256m') }}

View File

@@ -0,0 +1,4 @@
APT::Periodic::Update-Package-Lists "1";
APT::Periodic::Unattended-Upgrade "1";
APT::Periodic::AutocleanInterval "7";
APT::Periodic::Download-Upgradeable-Packages "1";

View File

@@ -0,0 +1,135 @@
// Automatically upgrade packages from these (origin:archive) pairs
//
// Note that in Ubuntu security updates may pull in new dependencies
// from non-security sources (e.g. chromium). By allowing the release
// pocket these get automatically pulled in.
Unattended-Upgrade::Allowed-Origins {
"${distro_id}:${distro_codename}";
"${distro_id}:${distro_codename}-security";
// Extended Security Maintenance; doesn't necessarily exist for
// every release and this system may not have it installed, but if
// available, the policy for updates is such that unattended-upgrades
// should also install from here by default.
"${distro_id}ESMApps:${distro_codename}-apps-security";
"${distro_id}ESM:${distro_codename}-infra-security";
"${distro_id}:${distro_codename}-updates";
// "${distro_id}:${distro_codename}-proposed";
// "${distro_id}:${distro_codename}-backports";
};
// Python regular expressions, matching packages to exclude from upgrading
Unattended-Upgrade::Package-Blacklist {
// The following matches all packages starting with linux-
// "linux-";
// Use $ to explicitely define the end of a package name. Without
// the $, "libc6" would match all of them.
// "libc6$";
// "libc6-dev$";
// "libc6-i686$";
// Special characters need escaping
// "libstdc\+\+6$";
// The following matches packages like xen-system-amd64, xen-utils-4.1,
// xenstore-utils and libxenstore3.0
// "(lib)?xen(store)?";
// For more information about Python regular expressions, see
// https://docs.python.org/3/howto/regex.html
};
// This option allows you to control if on a unclean dpkg exit
// unattended-upgrades will automatically run
// dpkg --force-confold --configure -a
// The default is true, to ensure updates keep getting installed
//Unattended-Upgrade::AutoFixInterruptedDpkg "true";
// Split the upgrade into the smallest possible chunks so that
// they can be interrupted with SIGTERM. This makes the upgrade
// a bit slower but it has the benefit that shutdown while a upgrade
// is running is possible (with a small delay)
//Unattended-Upgrade::MinimalSteps "true";
// Install all updates when the machine is shutting down
// instead of doing it in the background while the machine is running.
// This will (obviously) make shutdown slower.
// Unattended-upgrades increases logind's InhibitDelayMaxSec to 30s.
// This allows more time for unattended-upgrades to shut down gracefully
// or even install a few packages in InstallOnShutdown mode, but is still a
// big step back from the 30 minutes allowed for InstallOnShutdown previously.
// Users enabling InstallOnShutdown mode are advised to increase
// InhibitDelayMaxSec even further, possibly to 30 minutes.
//Unattended-Upgrade::InstallOnShutdown "false";
// Send email to this address for problems or packages upgrades
// If empty or unset then no email is sent, make sure that you
// have a working mail setup on your system. A package that provides
// 'mailx' must be installed. E.g. "user@example.com"
//Unattended-Upgrade::Mail "";
// Set this value to one of:
// "always", "only-on-error" or "on-change"
// If this is not set, then any legacy MailOnlyOnError (boolean) value
// is used to chose between "only-on-error" and "on-change"
//Unattended-Upgrade::MailReport "on-change";
// Remove unused automatically installed kernel-related packages
// (kernel images, kernel headers and kernel version locked tools).
Unattended-Upgrade::Remove-Unused-Kernel-Packages "true";
// Do automatic removal of newly unused dependencies after the upgrade
Unattended-Upgrade::Remove-New-Unused-Dependencies "true";
// Do automatic removal of unused packages after the upgrade
// (equivalent to apt autoremove)
Unattended-Upgrade::Remove-Unused-Dependencies "true";
// Automatically reboot *WITHOUT CONFIRMATION* if
// the file /var/run/reboot-required is found after the upgrade
Unattended-Upgrade::Automatic-Reboot "false";
// Automatically reboot even if there are users currently logged in
// when Unattended-Upgrade::Automatic-Reboot is set to true
//Unattended-Upgrade::Automatic-Reboot-WithUsers "true";
// If automatic reboot is enabled and needed, reboot at the specific
// time instead of immediately
// Default: "now"
//Unattended-Upgrade::Automatic-Reboot-Time "02:00";
// Use apt bandwidth limit feature, this example limits the download
// speed to 70kb/sec
//Acquire::http::Dl-Limit "70";
// Enable logging to syslog. Default is False
Unattended-Upgrade::SyslogEnable "true";
// Specify syslog facility. Default is daemon
// Unattended-Upgrade::SyslogFacility "daemon";
// Download and install upgrades only on AC power
// (i.e. skip or gracefully stop updates on battery)
// Unattended-Upgrade::OnlyOnACPower "true";
// Download and install upgrades only on non-metered connection
// (i.e. skip or gracefully stop updates on a metered connection)
// Unattended-Upgrade::Skip-Updates-On-Metered-Connections "true";
// Verbose logging
// Unattended-Upgrade::Verbose "false";
// Print debugging information both in unattended-upgrades and
// in unattended-upgrade-shutdown
// Unattended-Upgrade::Debug "false";
// Allow package downgrade if Pin-Priority exceeds 1000
// Unattended-Upgrade::Allow-downgrade "false";
// When APT fails to mark a package to be upgraded or installed try adjusting
// candidates of related packages to help APT's resolver in finding a solution
// where the package can be upgraded or installed.
// This is a workaround until APT's resolver is fixed to always find a
// solution if it exists. (See LP: #1831002)
// The default is true, so keep problems fixed in the cloud.
// Unattended-Upgrade::Allow-APT-Mark-Fallback "true";

View File

@@ -0,0 +1,79 @@
# Logrotate configuration for Arrs applications
# Generated by Ansible
{{ docker_root }}/sonarr/logs/*.txt {
daily
rotate {{ log_max_files }}
size {{ log_max_size }}
compress
delaycompress
missingok
notifempty
create 0644 {{ docker_user }} {{ docker_group }}
}
{{ docker_root }}/radarr/logs/*.txt {
daily
rotate {{ log_max_files }}
size {{ log_max_size }}
compress
delaycompress
missingok
notifempty
create 0644 {{ docker_user }} {{ docker_group }}
}
{{ docker_root }}/lidarr/logs/*.txt {
daily
rotate {{ log_max_files }}
size {{ log_max_size }}
compress
delaycompress
missingok
notifempty
create 0644 {{ docker_user }} {{ docker_group }}
}
{{ docker_root }}/bazarr/logs/*.log {
daily
rotate {{ log_max_files }}
size {{ log_max_size }}
compress
delaycompress
missingok
notifempty
create 0644 {{ docker_user }} {{ docker_group }}
}
{{ docker_root }}/prowlarr/logs/*.txt {
daily
rotate {{ log_max_files }}
size {{ log_max_size }}
compress
delaycompress
missingok
notifempty
create 0644 {{ docker_user }} {{ docker_group }}
}
{{ docker_root }}/logs/*.log {
daily
rotate {{ log_max_files }}
size {{ log_max_size }}
compress
delaycompress
missingok
notifempty
create 0644 {{ docker_user }} {{ docker_group }}
}
{{ docker_root }}/logs/*/*.log {
daily
rotate {{ log_max_files }}
size {{ log_max_size }}
compress
delaycompress
missingok
notifempty
create 0644 {{ docker_user }} {{ docker_group }}
}

23
templates/arrs-logs.sh.j2 Normal file
View File

@@ -0,0 +1,23 @@
#!/bin/bash
# View logs for Arrs services
cd {{ docker_compose_dir }}
if [ $# -eq 0 ]; then
echo "Showing logs for all services..."
docker-compose logs -f
elif [ "$1" = "list" ]; then
echo "Available services:"
echo " sonarr"
echo " radarr"
echo " lidarr"
echo " bazarr"
echo " prowlarr"
echo " watchtower"
echo ""
echo "Usage: $0 [service_name]"
echo " $0 list"
else
echo "Showing logs for $1..."
docker-compose logs -f "$1"
fi

View File

@@ -0,0 +1,9 @@
#!/bin/bash
# Restart all Arrs services
echo "Restarting Arrs Media Stack..."
cd {{ docker_compose_dir }}
docker-compose restart
echo "Services restarted successfully!"
docker-compose ps

View File

@@ -0,0 +1,21 @@
[Unit]
Description=Arrs Media Stack
Requires=docker.service
After=docker.service
Wants=network-online.target
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
User={{ docker_user }}
Group={{ docker_group }}
WorkingDirectory={{ docker_compose_dir }}
ExecStart=/usr/local/bin/docker-compose up -d
ExecStop=/usr/local/bin/docker-compose down
ExecReload=/usr/local/bin/docker-compose restart
TimeoutStartSec=300
TimeoutStopSec=120
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,21 @@
#!/bin/bash
# Start Arrs Media Stack
# Generated by Ansible
cd {{ docker_compose_dir }}
echo "Starting Arrs Media Stack..."
docker-compose up -d
echo "Waiting for services to start..."
sleep 10
echo "Service Status:"
docker-compose ps
echo ""
echo "Access URLs:"
echo "- Sonarr: http://$(hostname -I | awk '{print $1}'):{{ ports.sonarr }}"
echo "- Radarr: http://$(hostname -I | awk '{print $1}'):{{ ports.radarr }}"
echo "- Lidarr: http://$(hostname -I | awk '{print $1}'):{{ ports.lidarr }}"
echo "- Bazarr: http://$(hostname -I | awk '{print $1}'):{{ ports.bazarr }}"
echo "- Prowlarr: http://$(hostname -I | awk '{print $1}'):{{ ports.prowlarr }}"

View File

@@ -0,0 +1,31 @@
#!/bin/bash
# Check Arrs Media Stack Status
# Generated by Ansible
cd {{ docker_compose_dir }}
echo "=== Docker Compose Status ==="
docker-compose ps
echo ""
echo "=== Container Health ==="
{% for service, port in ports.items() %}
if curl -s -o /dev/null -w "%{http_code}" http://localhost:{{ port }}/ping | grep -q "200"; then
echo "✅ {{ service|title }}: Healthy (Port {{ port }})"
else
echo "❌ {{ service|title }}: Unhealthy (Port {{ port }})"
fi
{% endfor %}
echo ""
echo "=== System Resources ==="
echo "Memory Usage:"
free -h
echo ""
echo "Disk Usage:"
df -h {{ media_root }} {{ docker_root }}
echo ""
echo "=== Recent Logs ==="
docker-compose logs --tail=5

View File

@@ -0,0 +1,9 @@
#!/bin/bash
# Stop Arrs Media Stack
# Generated by Ansible
cd {{ docker_compose_dir }}
echo "Stopping Arrs Media Stack..."
docker-compose down
echo "All services stopped."

View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Update and restart all Arrs services
echo "Updating Arrs Media Stack..."
cd {{ docker_compose_dir }}
echo "Pulling latest images..."
docker-compose pull
echo "Restarting services with new images..."
docker-compose up -d
echo "Cleaning up old images..."
docker image prune -f
echo "Update completed successfully!"
docker-compose ps

View File

@@ -0,0 +1,75 @@
#!/bin/bash
# Arrs Configuration Backup Script
# Generated by Ansible - Do not edit manually
set -euo pipefail
# Configuration
BACKUP_DIR="{{ backup_dir }}"
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
BACKUP_NAME="arrs_backup_${TIMESTAMP}"
BACKUP_PATH="${BACKUP_DIR}/${BACKUP_NAME}"
LOG_FILE="{{ docker_root }}/logs/backup.log"
# Logging function
log() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOG_FILE"
}
# Create backup directory
mkdir -p "$BACKUP_PATH"
log "Starting Arrs backup: $BACKUP_NAME"
# Stop services for consistent backup
log "Stopping Arrs services..."
cd {{ docker_compose_dir }}
docker-compose stop
# Backup configurations
log "Backing up configurations..."
{% for path in backup_paths %}
if [ -d "{{ path }}" ]; then
rsync -av "{{ path }}/" "$BACKUP_PATH/$(basename {{ path }})/"
log "Backed up {{ path }}"
fi
{% endfor %}
# Backup Docker Compose files
log "Backing up Docker Compose configuration..."
cp -r {{ docker_compose_dir }} "$BACKUP_PATH/compose"
# Create backup metadata
cat > "$BACKUP_PATH/backup_info.txt" << EOF
Backup Date: $(date)
Hostname: $(hostname)
Docker User: {{ docker_user }}
Media Root: {{ media_root }}
Docker Root: {{ docker_root }}
Backup Paths:
{% for path in backup_paths %}
- {{ path }}
{% endfor %}
EOF
# Restart services
log "Restarting Arrs services..."
docker-compose up -d
# Create compressed archive
log "Creating compressed archive..."
cd "$BACKUP_DIR"
tar -czf "${BACKUP_NAME}.tar.gz" "$BACKUP_NAME"
rm -rf "$BACKUP_NAME"
# Set permissions
chown {{ docker_user }}:{{ docker_group }} "${BACKUP_NAME}.tar.gz"
log "Backup completed: ${BACKUP_NAME}.tar.gz"
log "Backup size: $(du -h ${BACKUP_NAME}.tar.gz | cut -f1)"
# Cleanup old backups
log "Cleaning up old backups..."
find "$BACKUP_DIR" -name "arrs_backup_*.tar.gz" -mtime +{{ backup_retention_days }} -delete
log "Backup process finished successfully"

46
templates/bash_aliases.j2 Normal file
View File

@@ -0,0 +1,46 @@
# Docker aliases
alias dps='docker ps'
alias dlog='docker logs'
alias dlogf='docker logs -f'
alias dexec='docker exec -it'
alias dstop='docker stop'
alias dstart='docker start'
alias drestart='docker restart'
# Docker Compose aliases
alias dcup='docker-compose up -d'
alias dcdown='docker-compose down'
alias dcrestart='docker-compose restart'
alias dcpull='docker-compose pull'
alias dclogs='docker-compose logs'
alias dclogsf='docker-compose logs -f'
# Arrs stack specific aliases
alias arrs-up='cd {{ docker_compose_dir }} && docker-compose up -d'
alias arrs-down='cd {{ docker_compose_dir }} && docker-compose down'
alias arrs-restart='cd {{ docker_compose_dir }} && docker-compose restart'
alias arrs-logs='cd {{ docker_compose_dir }} && docker-compose logs -f'
alias arrs-pull='cd {{ docker_compose_dir }} && docker-compose pull && docker-compose up -d'
alias arrs-status='cd {{ docker_compose_dir }} && docker-compose ps'
# Navigation aliases
alias media='cd {{ media_root }}'
alias docker-config='cd {{ docker_root }}'
alias compose='cd {{ docker_compose_dir }}'
# System monitoring aliases
alias htop='htop'
alias df='df -h'
alias du='du -h'
alias free='free -h'
alias ps='ps aux'
# Log viewing aliases
alias syslog='tail -f /var/log/syslog'
alias dockerlog='tail -f /var/log/docker.log'
alias arrs-log-sonarr='docker logs -f sonarr'
alias arrs-log-radarr='docker logs -f radarr'
alias arrs-log-lidarr='docker logs -f lidarr'
alias arrs-log-bazarr='docker logs -f bazarr'
alias arrs-log-prowlarr='docker logs -f prowlarr'
alias arrs-log-watchtower='docker logs -f watchtower'

120
templates/bashrc.j2 Normal file
View File

@@ -0,0 +1,120 @@
# ~/.bashrc: executed by bash(1) for non-login shells.
# If not running interactively, don't do anything
case $- in
*i*) ;;
*) return;;
esac
# don't put duplicate lines or lines starting with space in the history.
HISTCONTROL=ignoreboth
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color|*-256color) color_prompt=yes;;
esac
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
color_prompt=yes
else
color_prompt=
fi
fi
if [ "$color_prompt" = yes ]; then
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
else
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
fi
unset color_prompt force_color_prompt
# If this is an xterm set the title to user@host:dir
case "$TERM" in
xterm*|rxvt*)
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
;;
*)
;;
esac
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# colored GCC warnings and errors
export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
# some more ls aliases
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
# Docker aliases for Arrs stack management
alias dps='docker ps'
alias dlog='docker logs'
alias dlogf='docker logs -f'
alias dexec='docker exec -it'
alias dstop='docker stop'
alias dstart='docker start'
alias drestart='docker restart'
# Docker Compose aliases
alias dcup='docker-compose up -d'
alias dcdown='docker-compose down'
alias dcrestart='docker-compose restart'
alias dcpull='docker-compose pull'
alias dclogs='docker-compose logs'
alias dclogsf='docker-compose logs -f'
# Arrs stack specific aliases
cd {{ docker_compose_dir }}
alias arrs-up='docker-compose up -d'
alias arrs-down='docker-compose down'
alias arrs-restart='docker-compose restart'
alias arrs-logs='docker-compose logs -f'
alias arrs-pull='docker-compose pull && docker-compose up -d'
alias arrs-status='docker-compose ps'
# Navigation aliases
alias media='cd {{ media_root }}'
alias docker-config='cd {{ docker_root }}'
alias compose='cd {{ docker_compose_dir }}'
echo "Welcome to the Arrs Media Stack server!"
echo "Available commands:"
echo " arrs-up - Start all services"
echo " arrs-down - Stop all services"
echo " arrs-restart - Restart all services"
echo " arrs-logs - View logs"
echo " arrs-pull - Update and restart services"
echo " arrs-status - Show service status"
echo ""
echo "Navigation:"
echo " media - Go to media directory"
echo " compose - Go to docker-compose directory"
echo ""

View File

@@ -0,0 +1,77 @@
#!/bin/bash
# Service health check script for Arrs Media Stack
# Generated by Ansible
set -e
COMPOSE_DIR="{{ docker_compose_dir }}"
SERVICES=("sonarr" "radarr" "lidarr" "bazarr" "prowlarr" "watchtower")
PORTS=({{ ports.sonarr }} {{ ports.radarr }} {{ ports.lidarr }} {{ ports.bazarr }} {{ ports.prowlarr }})
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
echo "=== Arrs Media Stack Health Check ==="
echo "Timestamp: $(date)"
echo
# Check if compose directory exists
if [[ ! -d "$COMPOSE_DIR" ]]; then
echo -e "${RED}ERROR: Compose directory not found: $COMPOSE_DIR${NC}"
exit 1
fi
cd "$COMPOSE_DIR"
# Check Docker Compose services
echo "Docker Compose Services:"
docker-compose ps
echo
echo "Service Health Status:"
# Check each service
for i in "${!SERVICES[@]}"; do
service="${SERVICES[$i]}"
# Skip watchtower port check
if [[ "$service" == "watchtower" ]]; then
if docker-compose ps "$service" | grep -q "Up"; then
echo -e " ${service}: ${GREEN}Running${NC}"
else
echo -e " ${service}: ${RED}Not Running${NC}"
fi
continue
fi
port="${PORTS[$i]}"
# Check if container is running
if docker-compose ps "$service" | grep -q "Up"; then
# Check if port is responding
if curl -s -f "http://localhost:$port/ping" >/dev/null 2>&1 || \
curl -s -f "http://localhost:$port" >/dev/null 2>&1 || \
nc -z localhost "$port" 2>/dev/null; then
echo -e " ${service} (port $port): ${GREEN}Healthy${NC}"
else
echo -e " ${service} (port $port): ${YELLOW}Running but not responding${NC}"
fi
else
echo -e " ${service} (port $port): ${RED}Not Running${NC}"
fi
done
echo
echo "System Resources:"
echo " Memory: $(free -h | grep Mem | awk '{print $3 "/" $2}')"
echo " Disk: $(df -h {{ docker_root }} | tail -1 | awk '{print $3 "/" $2 " (" $5 ")"}')"
echo
echo "Recent Container Events:"
docker events --since="1h" --until="now" 2>/dev/null | tail -5 || echo " No recent events"
echo
echo "=== End Health Check ==="

View File

@@ -0,0 +1,104 @@
#!/bin/bash
# Configure Bazarr Connections
echo "🔧 Configuring Bazarr connections..."
# Create Bazarr configuration directory if it doesn't exist
mkdir -p /config/config
# Configure Sonarr connection in Bazarr
cat > /tmp/sonarr_config.py << 'EOF'
import sqlite3
import json
# Connect to Bazarr database
conn = sqlite3.connect('/config/db/bazarr.db')
cursor = conn.cursor()
# Update Sonarr settings
sonarr_settings = {
'ip': 'sonarr',
'port': 8989,
'base_url': '',
'ssl': False,
'apikey': '{{ api_keys.sonarr }}',
'full_update': 'Daily',
'only_monitored': False,
'series_sync': 60,
'episodes_sync': 60
}
# Insert or update Sonarr settings
for key, value in sonarr_settings.items():
cursor.execute(
"INSERT OR REPLACE INTO table_settings_sonarr (key, value) VALUES (?, ?)",
(key, json.dumps(value) if isinstance(value, (dict, list)) else str(value))
)
conn.commit()
conn.close()
print("✅ Sonarr configuration updated in Bazarr")
EOF
# Configure Radarr connection in Bazarr
cat > /tmp/radarr_config.py << 'EOF'
import sqlite3
import json
# Connect to Bazarr database
conn = sqlite3.connect('/config/db/bazarr.db')
cursor = conn.cursor()
# Update Radarr settings
radarr_settings = {
'ip': 'radarr',
'port': 7878,
'base_url': '',
'ssl': False,
'apikey': '{{ api_keys.radarr }}',
'full_update': 'Daily',
'only_monitored': False,
'movies_sync': 60
}
# Insert or update Radarr settings
for key, value in radarr_settings.items():
cursor.execute(
"INSERT OR REPLACE INTO table_settings_radarr (key, value) VALUES (?, ?)",
(key, json.dumps(value) if isinstance(value, (dict, list)) else str(value))
)
conn.commit()
conn.close()
print("✅ Radarr configuration updated in Bazarr")
EOF
# Run the configuration scripts if Python is available
if command -v python3 >/dev/null 2>&1; then
python3 /tmp/sonarr_config.py 2>/dev/null || echo "⚠️ Sonarr config update failed - configure manually"
python3 /tmp/radarr_config.py 2>/dev/null || echo "⚠️ Radarr config update failed - configure manually"
else
echo "⚠️ Python not available - configure Bazarr manually via web interface"
fi
# Enable Sonarr and Radarr in Bazarr settings
cat > /tmp/enable_services.py << 'EOF'
import sqlite3
conn = sqlite3.connect('/config/db/bazarr.db')
cursor = conn.cursor()
# Enable Sonarr and Radarr
cursor.execute("INSERT OR REPLACE INTO table_settings_general (key, value) VALUES ('use_sonarr', 'True')")
cursor.execute("INSERT OR REPLACE INTO table_settings_general (key, value) VALUES ('use_radarr', 'True')")
conn.commit()
conn.close()
print("✅ Sonarr and Radarr enabled in Bazarr")
EOF
if command -v python3 >/dev/null 2>&1; then
python3 /tmp/enable_services.py 2>/dev/null || echo "⚠️ Service enabling failed"
fi
echo "✅ Bazarr configuration complete!"

View File

@@ -0,0 +1,51 @@
#!/bin/bash
# Configure Download Clients for *arr Services
echo "🔧 Configuring Download Clients..."
# Get service name from hostname
SERVICE=$(hostname)
# Configure SABnzbd
curl -X POST "http://localhost:$(cat /proc/1/environ | tr '\0' '\n' | grep PORT | cut -d'=' -f2)/api/v3/downloadclient" \
-H "X-Api-Key: $(cat /config/config.xml | grep -o '<ApiKey>[^<]*</ApiKey>' | sed 's/<[^>]*>//g')" \
-H 'Content-Type: application/json' \
-d '{
"enable": true,
"name": "SABnzbd",
"implementation": "Sabnzbd",
"configContract": "SabnzbdSettings",
"fields": [
{"name": "host", "value": "gluetun"},
{"name": "port", "value": 8081},
{"name": "apiKey", "value": "{{ api_keys.sabnzbd }}"},
{"name": "username", "value": ""},
{"name": "password", "value": ""},
{"name": "tvCategory", "value": "tv"},
{"name": "recentTvPriority", "value": 0},
{"name": "olderTvPriority", "value": 0},
{"name": "useSsl", "value": false}
]
}' 2>/dev/null || echo "SABnzbd configuration failed or already exists"
# Configure Deluge
curl -X POST "http://localhost:$(cat /proc/1/environ | tr '\0' '\n' | grep PORT | cut -d'=' -f2)/api/v3/downloadclient" \
-H "X-Api-Key: $(cat /config/config.xml | grep -o '<ApiKey>[^<]*</ApiKey>' | sed 's/<[^>]*>//g')" \
-H 'Content-Type: application/json' \
-d '{
"enable": true,
"name": "Deluge",
"implementation": "Deluge",
"configContract": "DelugeSettings",
"fields": [
{"name": "host", "value": "gluetun"},
{"name": "port", "value": 8112},
{"name": "password", "value": "deluge"},
{"name": "tvCategory", "value": "tv"},
{"name": "recentTvPriority", "value": 0},
{"name": "olderTvPriority", "value": 0},
{"name": "useSsl", "value": false}
]
}' 2>/dev/null || echo "Deluge configuration failed or already exists"
echo "✅ Download clients configuration complete for $SERVICE!"

View File

@@ -0,0 +1,51 @@
#!/bin/bash
# Configure Jellyseerr Services
echo "🔧 Configuring Jellyseerr services..."
# Wait for Jellyseerr to be ready
sleep 10
# Configure Sonarr in Jellyseerr
curl -X POST 'http://localhost:5055/api/v1/service/sonarr' \
-H 'X-Api-Key: {{ api_keys.jellyseerr }}' \
-H 'Content-Type: application/json' \
-d '{
"name": "Sonarr",
"hostname": "sonarr",
"port": 8989,
"apiKey": "{{ api_keys.sonarr }}",
"useSsl": false,
"baseUrl": "",
"activeProfileId": 1,
"activeLanguageProfileId": 1,
"activeDirectory": "/data/media/tv",
"is4k": false,
"enableSeasonFolders": true,
"externalUrl": "",
"syncEnabled": true,
"preventSearch": false
}' 2>/dev/null || echo "⚠️ Sonarr configuration failed - may already exist"
# Configure Radarr in Jellyseerr
curl -X POST 'http://localhost:5055/api/v1/service/radarr' \
-H 'X-Api-Key: {{ api_keys.jellyseerr }}' \
-H 'Content-Type: application/json' \
-d '{
"name": "Radarr",
"hostname": "radarr",
"port": 7878,
"apiKey": "{{ api_keys.radarr }}",
"useSsl": false,
"baseUrl": "",
"activeProfileId": 1,
"activeDirectory": "/data/media/movies",
"is4k": false,
"externalUrl": "",
"syncEnabled": true,
"preventSearch": false,
"minimumAvailability": "released"
}' 2>/dev/null || echo "⚠️ Radarr configuration failed - may already exist"
echo "✅ Jellyseerr services configuration complete!"
echo "🌐 Access Jellyseerr at: http://your-server:5055"

View File

@@ -0,0 +1,70 @@
#!/bin/bash
# Configure Prowlarr Applications
echo "🔧 Configuring Prowlarr Applications..."
# Add Sonarr
curl -X POST 'http://localhost:9696/api/v1/applications' \
-H 'X-Api-Key: {{ api_keys.prowlarr }}' \
-H 'Content-Type: application/json' \
-d '{
"name": "Sonarr",
"syncLevel": "fullSync",
"implementation": "Sonarr",
"configContract": "SonarrSettings",
"fields": [
{"name": "baseUrl", "value": "http://sonarr:8989"},
{"name": "apiKey", "value": "{{ api_keys.sonarr }}"},
{"name": "syncCategories", "value": [5000, 5030, 5040]}
]
}' || echo "Sonarr already configured or error occurred"
# Add Radarr
curl -X POST 'http://localhost:9696/api/v1/applications' \
-H 'X-Api-Key: {{ api_keys.prowlarr }}' \
-H 'Content-Type: application/json' \
-d '{
"name": "Radarr",
"syncLevel": "fullSync",
"implementation": "Radarr",
"configContract": "RadarrSettings",
"fields": [
{"name": "baseUrl", "value": "http://radarr:7878"},
{"name": "apiKey", "value": "{{ api_keys.radarr }}"},
{"name": "syncCategories", "value": [2000, 2010, 2020, 2030, 2040, 2045, 2050, 2060]}
]
}' || echo "Radarr already configured or error occurred"
# Add Lidarr
curl -X POST 'http://localhost:9696/api/v1/applications' \
-H 'X-Api-Key: {{ api_keys.prowlarr }}' \
-H 'Content-Type: application/json' \
-d '{
"name": "Lidarr",
"syncLevel": "fullSync",
"implementation": "Lidarr",
"configContract": "LidarrSettings",
"fields": [
{"name": "baseUrl", "value": "http://lidarr:8686"},
{"name": "apiKey", "value": "{{ api_keys.lidarr }}"},
{"name": "syncCategories", "value": [3000, 3010, 3020, 3030, 3040]}
]
}' || echo "Lidarr already configured or error occurred"
# Add Whisparr
curl -X POST 'http://localhost:9696/api/v1/applications' \
-H 'X-Api-Key: {{ api_keys.prowlarr }}' \
-H 'Content-Type: application/json' \
-d '{
"name": "Whisparr",
"syncLevel": "fullSync",
"implementation": "Whisparr",
"configContract": "WhisparrSettings",
"fields": [
{"name": "baseUrl", "value": "http://whisparr:6969"},
{"name": "apiKey", "value": "{{ api_keys.whisparr }}"},
{"name": "syncCategories", "value": [6000, 6010, 6020, 6030, 6040, 6050, 6060, 6070]}
]
}' || echo "Whisparr already configured or error occurred"
echo "✅ Prowlarr applications configuration complete!"

18
templates/daemon.json.j2 Normal file
View File

@@ -0,0 +1,18 @@
{
"log-driver": "json-file",
"log-opts": {
"max-size": "{{ log_max_size | default('10m') }}",
"max-file": "{{ log_max_files | default('3') }}"
},
"storage-driver": "overlay2",
"userland-proxy": false,
"experimental": false,
"live-restore": true,
"default-ulimits": {
"nofile": {
"Name": "nofile",
"Hard": 65536,
"Soft": 65536
}
}
}

View File

@@ -0,0 +1,210 @@
#!/bin/bash
# Disk usage monitoring script for Arrs Media Stack
# Generated by Ansible
LOG_DIR="{{ docker_root }}/logs/system"
TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
DISK_LOG="$LOG_DIR/disk-usage-$(date '+%Y%m%d').log"
# Ensure log directory exists
mkdir -p "$LOG_DIR"
# Function to log with timestamp
log_disk() {
echo "[$TIMESTAMP] $1" >> "$DISK_LOG"
}
# Disk usage thresholds
WARNING_THRESHOLD=80
CRITICAL_THRESHOLD=90
log_disk "=== DISK USAGE MONITORING ==="
# Monitor main directories
DIRECTORIES=(
"{{ docker_root }}"
"{{ media_root }}"
"/var/lib/docker"
"/tmp"
"/var/log"
)
for dir in "${DIRECTORIES[@]}"; do
if [[ -d "$dir" ]]; then
USAGE=$(df "$dir" | tail -1)
FILESYSTEM=$(echo "$USAGE" | awk '{print $1}')
TOTAL=$(echo "$USAGE" | awk '{print $2}')
USED=$(echo "$USAGE" | awk '{print $3}')
AVAILABLE=$(echo "$USAGE" | awk '{print $4}')
PERCENT=$(echo "$USAGE" | awk '{print $5}' | cut -d'%' -f1)
# Convert to human readable
TOTAL_GB=$((TOTAL / 1024 / 1024))
USED_GB=$((USED / 1024 / 1024))
AVAILABLE_GB=$((AVAILABLE / 1024 / 1024))
log_disk "DISK_USAGE $dir - Filesystem: $FILESYSTEM, Total: ${TOTAL_GB}GB, Used: ${USED_GB}GB (${PERCENT}%), Available: ${AVAILABLE_GB}GB"
# Check thresholds
if [[ $PERCENT -ge $CRITICAL_THRESHOLD ]]; then
log_disk "CRITICAL_ALERT $dir disk usage is ${PERCENT}% (>=${CRITICAL_THRESHOLD}%)"
elif [[ $PERCENT -ge $WARNING_THRESHOLD ]]; then
log_disk "WARNING_ALERT $dir disk usage is ${PERCENT}% (>=${WARNING_THRESHOLD}%)"
fi
else
log_disk "DIRECTORY_NOT_FOUND $dir does not exist"
fi
done
# Monitor specific subdirectories in Docker root
log_disk "=== DOCKER SUBDIRECTORY USAGE ==="
DOCKER_SUBDIRS=(
"{{ docker_root }}/sonarr"
"{{ docker_root }}/radarr"
"{{ docker_root }}/lidarr"
"{{ docker_root }}/bazarr"
"{{ docker_root }}/prowlarr"
"{{ docker_root }}/compose"
"{{ docker_root }}/logs"
)
for subdir in "${DOCKER_SUBDIRS[@]}"; do
if [[ -d "$subdir" ]]; then
SIZE=$(du -sh "$subdir" 2>/dev/null | cut -f1)
log_disk "SUBDIR_SIZE $subdir: $SIZE"
fi
done
# Monitor media subdirectories
log_disk "=== MEDIA DIRECTORY USAGE ==="
MEDIA_SUBDIRS=(
"{{ media_root }}/movies"
"{{ media_root }}/tv"
"{{ media_root }}/music"
"{{ media_root }}/downloads"
)
for subdir in "${MEDIA_SUBDIRS[@]}"; do
if [[ -d "$subdir" ]]; then
SIZE=$(du -sh "$subdir" 2>/dev/null | cut -f1)
FILE_COUNT=$(find "$subdir" -type f 2>/dev/null | wc -l)
log_disk "MEDIA_SIZE $subdir: $SIZE ($FILE_COUNT files)"
else
log_disk "MEDIA_DIR_NOT_FOUND $subdir does not exist"
fi
done
# Docker system disk usage
if command -v docker >/dev/null 2>&1; then
log_disk "=== DOCKER SYSTEM USAGE ==="
# Docker system df
DOCKER_DF=$(docker system df --format "{{ '{{.Type}}' }}\t{{ '{{.TotalCount}}' }}\t{{ '{{.Active}}' }}\t{{ '{{.Size}}' }}\t{{ '{{.Reclaimable}}' }}" 2>/dev/null)
if [[ -n "$DOCKER_DF" ]]; then
echo "$DOCKER_DF" | while IFS=$'\t' read -r type total active size reclaimable; do
log_disk "DOCKER_USAGE $type - Total: $total, Active: $active, Size: $size, Reclaimable: $reclaimable"
done
fi
# Container sizes
cd {{ docker_compose_dir }}
SERVICES=("sonarr" "radarr" "lidarr" "bazarr" "prowlarr" "watchtower")
for service in "${SERVICES[@]}"; do
CONTAINER_ID=$(docker-compose ps -q "$service" 2>/dev/null)
if [[ -n "$CONTAINER_ID" ]]; then
CONTAINER_SIZE=$(docker inspect "$CONTAINER_ID" --format='{{ "{{.SizeRw}}" }}' 2>/dev/null)
if [[ -n "$CONTAINER_SIZE" && "$CONTAINER_SIZE" != "null" ]]; then
CONTAINER_SIZE_MB=$((CONTAINER_SIZE / 1024 / 1024))
log_disk "CONTAINER_SIZE $service: ${CONTAINER_SIZE_MB}MB"
fi
fi
done
fi
# Large files detection
log_disk "=== LARGE FILES DETECTION ==="
LARGE_FILES=$(find {{ docker_root }} -type f -size +100M 2>/dev/null | head -10)
if [[ -n "$LARGE_FILES" ]]; then
echo "$LARGE_FILES" | while IFS= read -r file; do
SIZE=$(du -sh "$file" 2>/dev/null | cut -f1)
log_disk "LARGE_FILE $file: $SIZE"
done
else
log_disk "LARGE_FILES No files larger than 100MB found in {{ docker_root }}"
fi
# Log files size monitoring
log_disk "=== LOG FILES MONITORING ==="
LOG_DIRS=(
"{{ docker_root }}/logs"
"{{ docker_root }}/sonarr/logs"
"{{ docker_root }}/radarr/logs"
"{{ docker_root }}/lidarr/logs"
"{{ docker_root }}/bazarr/logs"
"{{ docker_root }}/prowlarr/logs"
"/var/log"
)
for log_dir in "${LOG_DIRS[@]}"; do
if [[ -d "$log_dir" ]]; then
LOG_SIZE=$(du -sh "$log_dir" 2>/dev/null | cut -f1)
LOG_COUNT=$(find "$log_dir" -name "*.log" -o -name "*.txt" 2>/dev/null | wc -l)
log_disk "LOG_DIR_SIZE $log_dir: $LOG_SIZE ($LOG_COUNT log files)"
# Find large log files
LARGE_LOGS=$(find "$log_dir" -name "*.log" -o -name "*.txt" -size +10M 2>/dev/null)
if [[ -n "$LARGE_LOGS" ]]; then
echo "$LARGE_LOGS" | while IFS= read -r logfile; do
SIZE=$(du -sh "$logfile" 2>/dev/null | cut -f1)
log_disk "LARGE_LOG $logfile: $SIZE"
done
fi
fi
done
# Disk I/O statistics
if command -v iostat >/dev/null 2>&1; then
log_disk "=== DISK I/O STATISTICS ==="
IOSTAT_OUTPUT=$(iostat -d 1 1 | tail -n +4)
echo "$IOSTAT_OUTPUT" | while IFS= read -r line; do
if [[ -n "$line" && "$line" != *"Device"* ]]; then
log_disk "DISK_IO $line"
fi
done
fi
# Cleanup recommendations
log_disk "=== CLEANUP RECOMMENDATIONS ==="
# Check for old Docker images
if command -v docker >/dev/null 2>&1; then
DANGLING_IMAGES=$(docker images -f "dangling=true" -q | wc -l)
if [[ $DANGLING_IMAGES -gt 0 ]]; then
log_disk "CLEANUP_RECOMMENDATION $DANGLING_IMAGES dangling Docker images can be removed with 'docker image prune'"
fi
UNUSED_VOLUMES=$(docker volume ls -f "dangling=true" -q | wc -l)
if [[ $UNUSED_VOLUMES -gt 0 ]]; then
log_disk "CLEANUP_RECOMMENDATION $UNUSED_VOLUMES unused Docker volumes can be removed with 'docker volume prune'"
fi
fi
# Check for old log files
OLD_LOGS=$(find {{ docker_root }}/logs -name "*.log" -mtime +30 2>/dev/null | wc -l)
if [[ $OLD_LOGS -gt 0 ]]; then
log_disk "CLEANUP_RECOMMENDATION $OLD_LOGS log files older than 30 days can be cleaned up"
fi
# Check for compressed logs
COMPRESSED_LOGS=$(find {{ docker_root }}/logs -name "*.gz" -mtime +90 2>/dev/null | wc -l)
if [[ $COMPRESSED_LOGS -gt 0 ]]; then
log_disk "CLEANUP_RECOMMENDATION $COMPRESSED_LOGS compressed log files older than 90 days can be removed"
fi
log_disk "=== END DISK USAGE MONITORING ==="
# Cleanup old disk usage logs (keep 7 days)
find "$LOG_DIR" -name "disk-usage-*.log" -mtime +7 -delete 2>/dev/null
exit 0

View File

@@ -0,0 +1,32 @@
#include <tunables/global>
profile docker-arrs flags=(attach_disconnected,mediate_deleted) {
#include <abstractions/base>
network,
capability,
file,
umount,
deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir)
deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w,
deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel)
deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/
deny @{PROC}/sysrq-trigger rwklx,
deny @{PROC}/mem rwklx,
deny @{PROC}/kmem rwklx,
deny @{PROC}/kcore rwklx,
deny mount,
deny /sys/[^f]*/** wklx,
deny /sys/f[^s]*/** wklx,
deny /sys/fs/[^c]*/** wklx,
deny /sys/fs/c[^g]*/** wklx,
deny /sys/fs/cg[^r]*/** wklx,
deny /sys/firmware/** rwklx,
deny /sys/kernel/security/** rwklx,
# suppress ptrace denials when using 'docker ps' or using 'ps' inside a container
ptrace (trace,read) peer=docker-arrs,
}

View File

@@ -0,0 +1,575 @@
---
# Docker Compose for Arrs Media Stack
# Adapted from Dr. Frankenstein's guide for VPS deployment
# Generated by Ansible - Do not edit manually
version: '3.8'
services:
sonarr:
image: linuxserver/sonarr:latest
container_name: sonarr
environment:
- PUID={{ docker_uid }}
- PGID={{ docker_gid }}
- TZ={{ timezone }}
- UMASK=022
volumes:
- {{ docker_root }}/sonarr:/config
- {{ media_root }}:/data
ports:
{% if bind_to_tailscale_only %}
- "{{ tailscale_bind_ip }}:{{ ports.sonarr }}:8989/tcp" # Tailscale only
{% else %}
- "{{ ports.sonarr }}:8989/tcp" # All interfaces
{% endif %}
networks:
- arrs_network
security_opt:
- no-new-privileges:true
restart: always
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8989/ping"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
labels:
- "com.centurylinklabs.watchtower.enable=true"
radarr:
image: linuxserver/radarr:latest
container_name: radarr
environment:
- PUID={{ docker_uid }}
- PGID={{ docker_gid }}
- TZ={{ timezone }}
- UMASK=022
volumes:
- {{ docker_root }}/radarr:/config
- {{ media_root }}:/data
ports:
{% if bind_to_tailscale_only %}
- "{{ tailscale_bind_ip }}:{{ ports.radarr }}:7878/tcp" # Tailscale only
{% else %}
- "{{ ports.radarr }}:7878/tcp" # All interfaces
{% endif %}
networks:
- arrs_network
security_opt:
- no-new-privileges:true
restart: always
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:7878/ping"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
labels:
- "com.centurylinklabs.watchtower.enable=true"
lidarr:
image: linuxserver/lidarr:latest
container_name: lidarr
environment:
- PUID={{ docker_uid }}
- PGID={{ docker_gid }}
- TZ={{ timezone }}
- UMASK=022
volumes:
- {{ docker_root }}/lidarr:/config
- {{ media_root }}:/data
ports:
{% if bind_to_tailscale_only %}
- "{{ tailscale_bind_ip }}:{{ ports.lidarr }}:8686/tcp" # Tailscale only
{% else %}
- "{{ ports.lidarr }}:8686/tcp" # All interfaces
{% endif %}
networks:
- arrs_network
security_opt:
- no-new-privileges:true
restart: always
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8686/ping"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
labels:
- "com.centurylinklabs.watchtower.enable=true"
bazarr:
image: linuxserver/bazarr:latest
container_name: bazarr
environment:
- PUID={{ docker_uid }}
- PGID={{ docker_gid }}
- TZ={{ timezone }}
- UMASK=022
volumes:
- {{ docker_root }}/bazarr:/config
- {{ media_root }}:/data
ports:
{% if bind_to_tailscale_only %}
- "{{ tailscale_bind_ip }}:{{ ports.bazarr }}:6767/tcp" # Tailscale only
{% else %}
- "{{ ports.bazarr }}:6767/tcp" # All interfaces
{% endif %}
networks:
- arrs_network
security_opt:
- no-new-privileges:true
restart: always
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:6767/ping"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
labels:
- "com.centurylinklabs.watchtower.enable=true"
prowlarr:
image: linuxserver/prowlarr:latest
container_name: prowlarr
environment:
- PUID={{ docker_uid }}
- PGID={{ docker_gid }}
- TZ={{ timezone }}
- UMASK=022
volumes:
- {{ docker_root }}/prowlarr:/config
ports:
{% if bind_to_tailscale_only %}
- "{{ tailscale_bind_ip }}:{{ ports.prowlarr }}:9696/tcp" # Tailscale only
{% else %}
- "{{ ports.prowlarr }}:9696/tcp" # All interfaces
{% endif %}
networks:
- arrs_network
security_opt:
- no-new-privileges:true
restart: always
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9696/ping"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
labels:
- "com.centurylinklabs.watchtower.enable=true"
whisparr:
image: ghcr.io/hotio/whisparr
container_name: whisparr
environment:
- PUID={{ docker_uid }}
- PGID={{ docker_gid }}
- TZ={{ timezone }}
- UMASK=022
volumes:
- {{ docker_root }}/whisparr:/config
- {{ media_root }}:/data
- {{ media_root }}/xxx:/data/xxx # Adult content directory
ports:
{% if bind_to_tailscale_only %}
- "{{ tailscale_bind_ip }}:{{ ports.whisparr }}:6969/tcp" # Tailscale only
{% else %}
- "{{ ports.whisparr }}:6969/tcp" # All interfaces
{% endif %}
networks:
- arrs_network
security_opt:
- no-new-privileges:true
restart: always
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:6969/ping"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
labels:
- "com.centurylinklabs.watchtower.enable=true"
sabnzbd:
image: linuxserver/sabnzbd:latest
container_name: sabnzbd
environment:
- PUID={{ docker_uid }}
- PGID={{ docker_gid }}
- TZ={{ timezone }}
- UMASK=022
{% if vpn_enabled and sabnzbd_vpn_enabled %}
- WEBUI_PORT=8081 # Use different port when through VPN to avoid qBittorrent conflict
{% endif %}
volumes:
- {{ docker_root }}/sabnzbd:/config
- {{ media_root }}/downloads:/downloads
- {{ media_root }}/downloads/incomplete:/incomplete-downloads
{% if vpn_enabled and sabnzbd_vpn_enabled %}
network_mode: "service:gluetun" # Route through VPN
depends_on:
- gluetun
{% else %}
ports:
{% if bind_to_tailscale_only %}
- "{{ tailscale_bind_ip }}:{{ ports.sabnzbd }}:8080/tcp" # Tailscale only
{% else %}
- "{{ ports.sabnzbd }}:8080/tcp" # All interfaces
{% endif %}
networks:
- arrs_network
{% endif %}
security_opt:
- no-new-privileges:true
restart: always
healthcheck:
{% if vpn_enabled and sabnzbd_vpn_enabled %}
test: ["CMD", "curl", "-f", "http://localhost:8081/api?mode=version"]
{% else %}
test: ["CMD", "curl", "-f", "http://localhost:8080/api?mode=version"]
{% endif %}
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
labels:
- "com.centurylinklabs.watchtower.enable=true"
plex:
image: linuxserver/plex:latest
container_name: plex
environment:
- PUID={{ docker_uid }}
- PGID={{ docker_gid }}
- TZ={{ timezone }}
- VERSION=docker
- PLEX_CLAIM={{ plex_claim_token | default('') }}
volumes:
- {{ docker_root }}/plex:/config
- {{ media_root }}/movies:/movies:ro
- {{ media_root }}/tv:/tv:ro
- {{ media_root }}/music:/music:ro
ports:
{% if plex_public_access %}
- "{{ ports.plex }}:32400/tcp" # Public access for direct streaming
{% elif bind_to_tailscale_only %}
- "{{ tailscale_bind_ip }}:{{ ports.plex }}:32400/tcp" # Tailscale only
{% else %}
- "{{ ports.plex }}:32400/tcp" # All interfaces
{% endif %}
networks:
- arrs_network
security_opt:
- no-new-privileges:true
restart: always
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:32400/web"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
labels:
- "com.centurylinklabs.watchtower.enable=true"
tautulli:
image: linuxserver/tautulli:latest
container_name: tautulli
environment:
- PUID={{ docker_uid }}
- PGID={{ docker_gid }}
- TZ={{ timezone }}
volumes:
- {{ docker_root }}/tautulli:/config
ports:
{% if bind_to_tailscale_only %}
- "{{ tailscale_bind_ip }}:{{ ports.tautulli }}:8181/tcp" # Tailscale only
{% else %}
- "{{ ports.tautulli }}:8181/tcp" # All interfaces
{% endif %}
networks:
- arrs_network
security_opt:
- no-new-privileges:true
restart: always
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8181/status"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
labels:
- "com.centurylinklabs.watchtower.enable=true"
jellyseerr:
image: fallenbagel/jellyseerr:latest
container_name: jellyseerr
environment:
- LOG_LEVEL=debug
- TZ={{ timezone }}
volumes:
- {{ docker_root }}/jellyseerr:/app/config
ports:
{% if bind_to_tailscale_only %}
- "{{ tailscale_bind_ip }}:{{ ports.jellyseerr }}:5055/tcp" # Tailscale only
{% else %}
- "{{ ports.jellyseerr }}:5055/tcp" # All interfaces
{% endif %}
networks:
- arrs_network
security_opt:
- no-new-privileges:true
restart: always
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:5055/api/v1/status"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
labels:
- "com.centurylinklabs.watchtower.enable=true"
{% if vpn_enabled %}
gluetun:
image: qmcgaw/gluetun:latest
container_name: gluetun
cap_add:
- NET_ADMIN
devices:
- /dev/net/tun:/dev/net/tun
environment:
- VPN_SERVICE_PROVIDER={{ vpn_provider | default('') }}
- VPN_TYPE={{ vpn_type | default('openvpn') }}
{% if vpn_type == 'wireguard' %}
- WIREGUARD_PRIVATE_KEY={{ wireguard_private_key | default('') }}
- WIREGUARD_ADDRESSES={{ wireguard_addresses | default('') }}
- WIREGUARD_PUBLIC_KEY={{ wireguard_public_key | default('') }}
- VPN_ENDPOINT_IP={{ wireguard_endpoint.split(':')[0] | default('') }}
- VPN_ENDPOINT_PORT={{ wireguard_endpoint.split(':')[1] | default('51820') }}
{% else %}
- OPENVPN_USER={{ openvpn_user | default('') }}
- OPENVPN_PASSWORD={{ openvpn_password | default('') }}
{% if vpn_provider == 'custom' %}
- OPENVPN_CUSTOM_CONFIG=/gluetun/custom.conf
{% endif %}
{% endif %}
{% if vpn_provider != 'custom' and vpn_type != 'wireguard' %}
- SERVER_COUNTRIES={{ vpn_countries | default('') }}
{% endif %}
- FIREWALL_OUTBOUND_SUBNETS={{ docker_network_subnet }}
- FIREWALL_VPN_INPUT_PORTS=8080{% if sabnzbd_vpn_enabled %},8081{% endif %} # Allow WebUI access
- FIREWALL=on # Enable firewall kill switch
- DOT=off # Disable DNS over TLS to prevent leaks
- BLOCK_MALICIOUS=on # Block malicious domains
- BLOCK_ADS=off # Keep ads blocking off to avoid issues
- UNBLOCK= # No unblocking needed
- TZ={{ timezone }}
volumes:
- {{ docker_root }}/gluetun:/gluetun
ports:
{% if bind_to_tailscale_only %}
- "{{ tailscale_bind_ip }}:{{ ports.sabnzbd }}:8081/tcp" # SABnzbd WebUI through VPN (Tailscale only)
- "{{ tailscale_bind_ip }}:{{ ports.deluge }}:8112/tcp" # Deluge WebUI through VPN (Tailscale only)
{% else %}
- "{{ ports.sabnzbd }}:8081/tcp" # SABnzbd WebUI through VPN (all interfaces)
- "{{ ports.deluge }}:8112/tcp" # Deluge WebUI through VPN (all interfaces)
{% endif %}
networks:
- arrs_network
security_opt:
- no-new-privileges:true
restart: always
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://www.google.com/"]
interval: 60s
timeout: 30s
retries: 3
start_period: 120s
labels:
- "com.centurylinklabs.watchtower.enable=true"
{% endif %}
deluge:
image: linuxserver/deluge:latest
container_name: deluge
environment:
- PUID={{ docker_uid }}
- PGID={{ docker_gid }}
- TZ={{ timezone }}
- UMASK=022
- DELUGE_LOGLEVEL=error
volumes:
- {{ docker_root }}/deluge:/config
- {{ media_root }}/downloads:/downloads
{% if vpn_enabled %}
network_mode: "service:gluetun" # Route through VPN
depends_on:
- gluetun
{% else %}
ports:
{% if bind_to_tailscale_only %}
- "{{ tailscale_bind_ip }}:{{ ports.deluge }}:8112/tcp" # Tailscale only
{% else %}
- "{{ ports.deluge }}:8112/tcp" # All interfaces
{% endif %}
networks:
- arrs_network
{% endif %}
security_opt:
- no-new-privileges:true
restart: always
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8112/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
labels:
- "com.centurylinklabs.watchtower.enable=true"
# TubeArchivist stack - YouTube archiving
tubearchivist-es:
image: docker.elastic.co/elasticsearch/elasticsearch:8.11.0
container_name: tubearchivist-es
environment:
- "ELASTIC_PASSWORD=verysecret"
- "ES_JAVA_OPTS=-Xms1g -Xmx1g"
- "xpack.security.enabled=true"
- "discovery.type=single-node"
- "path.repo=/usr/share/elasticsearch/data/snapshot"
volumes:
- {{ docker_root }}/tubearchivist/es:/usr/share/elasticsearch/data
networks:
- arrs_network
security_opt:
- no-new-privileges:true
restart: always
healthcheck:
test: ["CMD", "curl", "-u", "elastic:verysecret", "-f", "http://localhost:9200/_cluster/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
labels:
- "com.centurylinklabs.watchtower.enable=true"
tubearchivist-redis:
image: redis/redis-stack-server:latest
container_name: tubearchivist-redis
volumes:
- {{ docker_root }}/tubearchivist/redis:/data
networks:
- arrs_network
security_opt:
- no-new-privileges:true
restart: always
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
labels:
- "com.centurylinklabs.watchtower.enable=true"
tubearchivist:
image: bbilly1/tubearchivist:latest
container_name: tubearchivist
environment:
- ES_URL=http://tubearchivist-es:9200
- REDIS_CON=redis://tubearchivist-redis:6379
- HOST_UID={{ docker_uid }}
- HOST_GID={{ docker_gid }}
- TA_HOST=http://{{ tailscale_bind_ip }}:{{ ports.tubearchivist }}
- TA_USERNAME=tubearchivist
- TA_PASSWORD=verysecret
- ELASTIC_PASSWORD=verysecret
- TZ={{ timezone }}
volumes:
- {{ media_root }}/youtube:/youtube
- {{ docker_root }}/tubearchivist/cache:/cache
ports:
{% if bind_to_tailscale_only %}
- "{{ tailscale_bind_ip }}:{{ ports.tubearchivist }}:8000/tcp" # Tailscale only
{% else %}
- "{{ ports.tubearchivist }}:8000/tcp" # All interfaces
{% endif %}
networks:
- arrs_network
depends_on:
- tubearchivist-es
- tubearchivist-redis
security_opt:
- no-new-privileges:true
restart: always
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
labels:
- "com.centurylinklabs.watchtower.enable=true"
{% if watchtower_enabled %}
watchtower:
image: containrrr/watchtower:1.7.1
container_name: watchtower
environment:
- TZ={{ timezone }}
- WATCHTOWER_SCHEDULE={{ watchtower_schedule }}
- WATCHTOWER_CLEANUP={{ watchtower_cleanup | lower }}
- WATCHTOWER_LABEL_ENABLE=true
- WATCHTOWER_INCLUDE_RESTARTING=true
- DOCKER_API_VERSION=1.44
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- {{ docker_root }}/watchtower:/config
networks:
- arrs_network
security_opt:
- no-new-privileges:true
restart: always
labels:
- "com.centurylinklabs.watchtower.enable=false"
{% endif %}
{% if log_rotation_enabled %}
logrotate:
image: blacklabelops/logrotate:latest
container_name: logrotate
environment:
- LOGS_DIRECTORIES=/var/lib/docker/containers /logs
- LOGROTATE_INTERVAL=daily
- LOGROTATE_COPIES=7
- LOGROTATE_SIZE=100M
volumes:
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- {{ docker_root }}/logs:/logs
networks:
- arrs_network
restart: always
labels:
- "com.centurylinklabs.watchtower.enable=true"
{% endif %}
networks:
arrs_network:
driver: bridge
ipam:
config:
- subnet: {{ docker_network_subnet }}
gateway: {{ docker_network_gateway }}
volumes:
sonarr_config:
driver: local
radarr_config:
driver: local
lidarr_config:
driver: local
bazarr_config:
driver: local
prowlarr_config:
driver: local

View File

@@ -0,0 +1,27 @@
# Docker container log rotation configuration
# Generated by Ansible
/var/lib/docker/containers/*/*.log {
daily
rotate {{ log_max_files }}
size {{ log_max_size }}
compress
delaycompress
missingok
notifempty
create 0644 root root
postrotate
/bin/kill -USR1 $(cat /var/run/docker.pid 2>/dev/null) 2>/dev/null || true
endscript
}
{{ docker_root }}/logs/*.log {
daily
rotate {{ log_max_files }}
size {{ log_max_size }}
compress
delaycompress
missingok
notifempty
create 0644 {{ docker_user }} {{ docker_group }}
}

View File

@@ -0,0 +1,9 @@
/var/lib/docker/containers/*/*.log {
rotate 7
daily
compress
size=1M
missingok
delaycompress
copytruncate
}

View File

@@ -0,0 +1,104 @@
#!/bin/bash
# Docker monitoring script for Arrs Media Stack
# Generated by Ansible
LOG_DIR="{{ docker_root }}/logs/arrs"
TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
LOG_FILE="$LOG_DIR/docker-monitor-$(date '+%Y%m%d').log"
# Ensure log directory exists
mkdir -p "$LOG_DIR"
# Function to log with timestamp
log_with_timestamp() {
echo "[$TIMESTAMP] $1" >> "$LOG_FILE"
}
# Change to compose directory
cd {{ docker_compose_dir }}
# Check Docker daemon
if ! docker info >/dev/null 2>&1; then
log_with_timestamp "DOCKER_DAEMON FAILED - Docker daemon not responding"
exit 1
fi
log_with_timestamp "DOCKER_DAEMON OK"
# Get container stats
CONTAINER_STATS=$(docker stats --no-stream --format "table {{ '{{.Container}}' }}\t{{ '{{.CPUPerc}}' }}\t{{ '{{.MemUsage}}' }}\t{{ '{{.MemPerc}}' }}\t{{ '{{.NetIO}}' }}\t{{ '{{.BlockIO}}' }}")
# Log container resource usage
while IFS=$'\t' read -r container cpu mem_usage mem_perc net_io block_io; do
if [[ "$container" != "CONTAINER" ]]; then
log_with_timestamp "CONTAINER_STATS $container CPU:$cpu MEM:$mem_usage($mem_perc) NET:$net_io DISK:$block_io"
fi
done <<< "$CONTAINER_STATS"
# Check individual service health
SERVICES=("sonarr" "radarr" "lidarr" "bazarr" "prowlarr" "watchtower")
PORTS=({{ ports.sonarr }} {{ ports.radarr }} {{ ports.lidarr }} {{ ports.bazarr }} {{ ports.prowlarr }})
for i in "${!SERVICES[@]}"; do
service="${SERVICES[$i]}"
# Get container status
STATUS=$(docker-compose ps -q "$service" | xargs docker inspect --format='{{ "{{.State.Status}}" }}' 2>/dev/null)
if [[ "$STATUS" == "running" ]]; then
# Check container health
HEALTH=$(docker-compose ps -q "$service" | xargs docker inspect --format='{{ "{{.State.Health.Status}}" }}' 2>/dev/null)
if [[ "$HEALTH" == "healthy" ]] || [[ "$HEALTH" == "" ]]; then
log_with_timestamp "SERVICE_$service OK"
else
log_with_timestamp "SERVICE_$service UNHEALTHY - Health status: $HEALTH"
fi
# Check restart count
RESTART_COUNT=$(docker-compose ps -q "$service" | xargs docker inspect --format='{{ "{{.RestartCount}}" }}' 2>/dev/null)
if [[ "$RESTART_COUNT" -gt 5 ]]; then
log_with_timestamp "SERVICE_$service WARNING - High restart count: $RESTART_COUNT"
fi
else
log_with_timestamp "SERVICE_$service FAILED - Status: $STATUS"
# Try to restart the service
log_with_timestamp "SERVICE_$service RESTART_ATTEMPT"
docker-compose restart "$service" 2>/dev/null
fi
done
# Check Docker system resources
DOCKER_SYSTEM_DF=$(docker system df --format "table {{ '{{.Type}}' }}\t{{ '{{.Total}}' }}\t{{ '{{.Active}}' }}\t{{ '{{.Size}}' }}\t{{ '{{.Reclaimable}}' }}")
log_with_timestamp "DOCKER_SYSTEM_DF $DOCKER_SYSTEM_DF"
# Check for stopped containers
STOPPED_CONTAINERS=$(docker ps -a --filter "status=exited" --format "{{ '{{.Names}}' }}" | grep -E "(sonarr|radarr|lidarr|bazarr|prowlarr|watchtower)" || true)
if [[ -n "$STOPPED_CONTAINERS" ]]; then
log_with_timestamp "STOPPED_CONTAINERS $STOPPED_CONTAINERS"
fi
# Check Docker logs for errors (last 5 minutes)
FIVE_MIN_AGO=$(date -d '5 minutes ago' '+%Y-%m-%dT%H:%M:%S')
for service in "${SERVICES[@]}"; do
ERROR_COUNT=$(docker-compose logs --since="$FIVE_MIN_AGO" "$service" 2>/dev/null | grep -i error | wc -l)
if [[ "$ERROR_COUNT" -gt 0 ]]; then
log_with_timestamp "SERVICE_$service ERRORS - $ERROR_COUNT errors in last 5 minutes"
fi
done
# Cleanup old log files (keep 7 days)
find "$LOG_DIR" -name "docker-monitor-*.log" -mtime +7 -delete 2>/dev/null
# Cleanup Docker system if disk usage is high
DISK_USAGE=$(df {{ docker_root }} | tail -1 | awk '{print $5}' | cut -d'%' -f1)
if [[ $DISK_USAGE -gt 85 ]]; then
log_with_timestamp "CLEANUP_ATTEMPT Disk usage ${DISK_USAGE}% - Running Docker cleanup"
docker system prune -f >/dev/null 2>&1
docker image prune -f >/dev/null 2>&1
log_with_timestamp "CLEANUP_COMPLETED Docker cleanup finished"
fi
exit 0

View File

@@ -0,0 +1,9 @@
[Service]
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
Restart=always
RestartSec=5

View File

@@ -0,0 +1,18 @@
{
"log-driver": "json-file",
"log-opts": {
"max-size": "10m",
"max-file": "3"
},
"storage-driver": "overlay2",
"userland-proxy": false,
"no-new-privileges": true,
"seccomp-profile": "/etc/docker/seccomp.json",
"default-ulimits": {
"nofile": {
"Name": "nofile",
"Hard": 65536,
"Soft": 65536
}
}
}

25
templates/docker.env.j2 Normal file
View File

@@ -0,0 +1,25 @@
# Docker Environment Variables for Arrs Media Stack
# Generated by Ansible on {{ ansible_date_time.iso8601 }}
# User and Group IDs
PUID=1000
PGID=1000
# Timezone
TZ={{ timezone }}
# Paths
MEDIA_ROOT={{ media_root }}
DOCKER_ROOT={{ docker_root }}
COMPOSE_DIR={{ docker_compose_dir }}
# Network
DOCKER_NETWORK=arrs-network
# Restart Policy
RESTART_POLICY=unless-stopped
# Logging
LOG_DRIVER=json-file
LOG_MAX_SIZE=10m
LOG_MAX_FILE=3

View File

@@ -0,0 +1,314 @@
#!/bin/bash
# Health check dashboard script for Arrs Media Stack
# Generated by Ansible
LOG_DIR="{{ docker_root }}/logs/system"
TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
DASHBOARD_LOG="$LOG_DIR/health-dashboard-$(date '+%Y%m%d').log"
# Ensure log directory exists
mkdir -p "$LOG_DIR"
# Function to log with timestamp
log_health() {
echo "[$TIMESTAMP] $1" >> "$DASHBOARD_LOG"
}
# Colors for terminal output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Function to display colored output
display_status() {
local service="$1"
local status="$2"
local details="$3"
case "$status" in
"OK"|"RUNNING")
echo -e "${GREEN}✓${NC} $service: ${GREEN}$status${NC} $details"
;;
"WARNING"|"DEGRADED")
echo -e "${YELLOW}⚠${NC} $service: ${YELLOW}$status${NC} $details"
;;
"CRITICAL"|"FAILED"|"DOWN")
echo -e "${RED}✗${NC} $service: ${RED}$status${NC} $details"
;;
*)
echo -e "${BLUE}${NC} $service: ${BLUE}$status${NC} $details"
;;
esac
}
log_health "=== HEALTH DASHBOARD STARTED ==="
echo "=================================================================="
echo " ARRS MEDIA STACK HEALTH DASHBOARD"
echo "=================================================================="
echo "Generated: $TIMESTAMP"
echo "=================================================================="
# System Health
echo -e "\n${BLUE}SYSTEM HEALTH${NC}"
echo "------------------------------------------------------------------"
# CPU Usage
CPU_USAGE=$(top -bn1 | grep "Cpu(s)" | awk '{print $2}' | cut -d'%' -f1)
if (( $(echo "$CPU_USAGE > 80" | bc -l) )); then
display_status "CPU Usage" "CRITICAL" "(${CPU_USAGE}%)"
log_health "SYSTEM_HEALTH CPU_USAGE CRITICAL ${CPU_USAGE}%"
elif (( $(echo "$CPU_USAGE > 60" | bc -l) )); then
display_status "CPU Usage" "WARNING" "(${CPU_USAGE}%)"
log_health "SYSTEM_HEALTH CPU_USAGE WARNING ${CPU_USAGE}%"
else
display_status "CPU Usage" "OK" "(${CPU_USAGE}%)"
log_health "SYSTEM_HEALTH CPU_USAGE OK ${CPU_USAGE}%"
fi
# Memory Usage
MEMORY_PERCENT=$(free | grep Mem | awk '{printf "%.1f", $3/$2 * 100.0}')
if (( $(echo "$MEMORY_PERCENT > 90" | bc -l) )); then
display_status "Memory Usage" "CRITICAL" "(${MEMORY_PERCENT}%)"
log_health "SYSTEM_HEALTH MEMORY_USAGE CRITICAL ${MEMORY_PERCENT}%"
elif (( $(echo "$MEMORY_PERCENT > 75" | bc -l) )); then
display_status "Memory Usage" "WARNING" "(${MEMORY_PERCENT}%)"
log_health "SYSTEM_HEALTH MEMORY_USAGE WARNING ${MEMORY_PERCENT}%"
else
display_status "Memory Usage" "OK" "(${MEMORY_PERCENT}%)"
log_health "SYSTEM_HEALTH MEMORY_USAGE OK ${MEMORY_PERCENT}%"
fi
# Disk Usage
DISK_USAGE=$(df -h {{ docker_root }} | tail -1 | awk '{print $5}' | cut -d'%' -f1)
if [[ $DISK_USAGE -gt 90 ]]; then
display_status "Disk Usage" "CRITICAL" "(${DISK_USAGE}%)"
log_health "SYSTEM_HEALTH DISK_USAGE CRITICAL ${DISK_USAGE}%"
elif [[ $DISK_USAGE -gt 80 ]]; then
display_status "Disk Usage" "WARNING" "(${DISK_USAGE}%)"
log_health "SYSTEM_HEALTH DISK_USAGE WARNING ${DISK_USAGE}%"
else
display_status "Disk Usage" "OK" "(${DISK_USAGE}%)"
log_health "SYSTEM_HEALTH DISK_USAGE OK ${DISK_USAGE}%"
fi
# Load Average
LOAD_1MIN=$(uptime | awk -F'load average:' '{print $2}' | awk '{print $1}' | cut -d',' -f1 | xargs)
if (( $(echo "$LOAD_1MIN > 2.0" | bc -l) )); then
display_status "Load Average" "WARNING" "(${LOAD_1MIN})"
log_health "SYSTEM_HEALTH LOAD_AVERAGE WARNING ${LOAD_1MIN}"
else
display_status "Load Average" "OK" "(${LOAD_1MIN})"
log_health "SYSTEM_HEALTH LOAD_AVERAGE OK ${LOAD_1MIN}"
fi
# Docker Services
echo -e "\n${BLUE}DOCKER SERVICES${NC}"
echo "------------------------------------------------------------------"
if command -v docker >/dev/null 2>&1; then
cd {{ docker_compose_dir }}
SERVICES=("sonarr" "radarr" "lidarr" "bazarr" "prowlarr" "whisparr" "deluge" "sabnzbd" "plex" "tautulli" "jellyseerr" "tubearchivist" "gluetun" "watchtower" "logrotate")
for service in "${SERVICES[@]}"; do
CONTAINER_ID=$(docker-compose ps -q "$service" 2>/dev/null)
if [[ -n "$CONTAINER_ID" ]]; then
CONTAINER_STATUS=$(docker inspect "$CONTAINER_ID" --format='{{ "{{.State.Status}}" }}' 2>/dev/null)
CONTAINER_HEALTH=$(docker inspect "$CONTAINER_ID" --format='{{ "{{.State.Health.Status}}" }}' 2>/dev/null)
if [[ "$CONTAINER_STATUS" == "running" ]]; then
if [[ "$CONTAINER_HEALTH" == "healthy" ]] || [[ -z "$CONTAINER_HEALTH" ]] || [[ "$CONTAINER_HEALTH" == "<no value>" ]]; then
display_status "$service" "RUNNING" ""
log_health "DOCKER_SERVICE $service RUNNING"
else
display_status "$service" "DEGRADED" "(health: $CONTAINER_HEALTH)"
log_health "DOCKER_SERVICE $service DEGRADED $CONTAINER_HEALTH"
fi
else
display_status "$service" "DOWN" "(status: $CONTAINER_STATUS)"
log_health "DOCKER_SERVICE $service DOWN $CONTAINER_STATUS"
fi
else
display_status "$service" "NOT_FOUND" ""
log_health "DOCKER_SERVICE $service NOT_FOUND"
fi
done
else
display_status "Docker" "NOT_INSTALLED" ""
log_health "DOCKER_SERVICE docker NOT_INSTALLED"
fi
# Network Connectivity
echo -e "\n${BLUE}NETWORK CONNECTIVITY${NC}"
echo "------------------------------------------------------------------"
# Internet connectivity
if ping -c 1 8.8.8.8 >/dev/null 2>&1; then
display_status "Internet" "OK" ""
log_health "NETWORK_CONNECTIVITY internet OK"
else
display_status "Internet" "FAILED" ""
log_health "NETWORK_CONNECTIVITY internet FAILED"
fi
# DNS resolution
if nslookup google.com >/dev/null 2>&1; then
display_status "DNS Resolution" "OK" ""
log_health "NETWORK_CONNECTIVITY dns OK"
else
display_status "DNS Resolution" "FAILED" ""
log_health "NETWORK_CONNECTIVITY dns FAILED"
fi
# Service ports - Check on Tailscale network interface
TAILSCALE_IP="{{ tailscale_bind_ip }}"
SERVICES_PORTS=(
"sonarr:{{ ports.sonarr }}"
"radarr:{{ ports.radarr }}"
"lidarr:{{ ports.lidarr }}"
"bazarr:{{ ports.bazarr }}"
"prowlarr:{{ ports.prowlarr }}"
"deluge:{{ ports.deluge }}"
"sabnzbd:{{ ports.sabnzbd }}"
"plex:{{ ports.plex }}"
"tautulli:{{ ports.tautulli }}"
"jellyseerr:{{ ports.jellyseerr }}"
"tubearchivist:{{ ports.tubearchivist }}"
"whisparr:{{ ports.whisparr }}"
)
for service_port in "${SERVICES_PORTS[@]}"; do
SERVICE=$(echo "$service_port" | cut -d: -f1)
PORT=$(echo "$service_port" | cut -d: -f2)
# Check on Tailscale IP first, fallback to localhost for services that might bind to both
if nc -z "$TAILSCALE_IP" "$PORT" 2>/dev/null; then
display_status "$SERVICE Port" "OK" "(port $PORT on $TAILSCALE_IP)"
log_health "NETWORK_CONNECTIVITY ${SERVICE}_port OK $PORT $TAILSCALE_IP"
elif nc -z localhost "$PORT" 2>/dev/null; then
display_status "$SERVICE Port" "OK" "(port $PORT on localhost)"
log_health "NETWORK_CONNECTIVITY ${SERVICE}_port OK $PORT localhost"
else
display_status "$SERVICE Port" "FAILED" "(port $PORT)"
log_health "NETWORK_CONNECTIVITY ${SERVICE}_port FAILED $PORT"
fi
done
# Security Status
echo -e "\n${BLUE}SECURITY STATUS${NC}"
echo "------------------------------------------------------------------"
# UFW Status
if command -v ufw >/dev/null 2>&1; then
UFW_STATUS=$(ufw status | head -1 | awk '{print $2}')
if [[ "$UFW_STATUS" == "active" ]]; then
display_status "UFW Firewall" "OK" "(active)"
log_health "SECURITY_STATUS ufw OK active"
else
display_status "UFW Firewall" "WARNING" "(inactive)"
log_health "SECURITY_STATUS ufw WARNING inactive"
fi
fi
# Fail2ban Status
if command -v fail2ban-client >/dev/null 2>&1; then
if systemctl is-active fail2ban >/dev/null 2>&1; then
display_status "Fail2ban" "OK" "(active)"
log_health "SECURITY_STATUS fail2ban OK active"
else
display_status "Fail2ban" "WARNING" "(inactive)"
log_health "SECURITY_STATUS fail2ban WARNING inactive"
fi
fi
# Recent failed login attempts
FAILED_LOGINS=$(grep "Failed password" /var/log/auth.log 2>/dev/null | grep "$(date '+%b %d')" | wc -l)
if [[ $FAILED_LOGINS -gt 10 ]]; then
display_status "Failed Logins" "WARNING" "($FAILED_LOGINS today)"
log_health "SECURITY_STATUS failed_logins WARNING $FAILED_LOGINS"
elif [[ $FAILED_LOGINS -gt 0 ]]; then
display_status "Failed Logins" "OK" "($FAILED_LOGINS today)"
log_health "SECURITY_STATUS failed_logins OK $FAILED_LOGINS"
else
display_status "Failed Logins" "OK" "(none today)"
log_health "SECURITY_STATUS failed_logins OK 0"
fi
# Storage Status
echo -e "\n${BLUE}STORAGE STATUS${NC}"
echo "------------------------------------------------------------------"
# Media directories
MEDIA_DIRS=(
"{{ media_root }}/movies"
"{{ media_root }}/tv"
"{{ media_root }}/music"
"{{ media_root }}/downloads"
)
for media_dir in "${MEDIA_DIRS[@]}"; do
DIR_NAME=$(basename "$media_dir")
if [[ -d "$media_dir" ]]; then
SIZE=$(du -sh "$media_dir" 2>/dev/null | cut -f1)
FILE_COUNT=$(find "$media_dir" -type f 2>/dev/null | wc -l)
display_status "$DIR_NAME Directory" "OK" "($SIZE, $FILE_COUNT files)"
log_health "STORAGE_STATUS ${DIR_NAME}_directory OK $SIZE $FILE_COUNT"
else
display_status "$DIR_NAME Directory" "NOT_FOUND" ""
log_health "STORAGE_STATUS ${DIR_NAME}_directory NOT_FOUND"
fi
done
# Recent Activity Summary
echo -e "\n${BLUE}RECENT ACTIVITY${NC}"
echo "------------------------------------------------------------------"
# Check for recent downloads (last 24 hours)
RECENT_DOWNLOADS=0
for media_dir in "${MEDIA_DIRS[@]}"; do
if [[ -d "$media_dir" ]]; then
COUNT=$(find "$media_dir" -type f -mtime -1 2>/dev/null | wc -l)
RECENT_DOWNLOADS=$((RECENT_DOWNLOADS + COUNT))
fi
done
display_status "Recent Downloads" "INFO" "($RECENT_DOWNLOADS files in last 24h)"
log_health "ACTIVITY_SUMMARY recent_downloads INFO $RECENT_DOWNLOADS"
# System uptime
UPTIME=$(uptime -p)
display_status "System Uptime" "INFO" "($UPTIME)"
log_health "ACTIVITY_SUMMARY system_uptime INFO $UPTIME"
# Overall Health Summary
echo -e "\n${BLUE}OVERALL HEALTH SUMMARY${NC}"
echo "=================================================================="
# Count issues
CRITICAL_ISSUES=$(grep "CRITICAL" "$DASHBOARD_LOG" | wc -l)
WARNING_ISSUES=$(grep "WARNING" "$DASHBOARD_LOG" | wc -l)
if [[ $CRITICAL_ISSUES -gt 0 ]]; then
echo -e "${RED}SYSTEM STATUS: CRITICAL${NC} ($CRITICAL_ISSUES critical issues)"
log_health "OVERALL_HEALTH CRITICAL $CRITICAL_ISSUES"
elif [[ $WARNING_ISSUES -gt 0 ]]; then
echo -e "${YELLOW}SYSTEM STATUS: WARNING${NC} ($WARNING_ISSUES warnings)"
log_health "OVERALL_HEALTH WARNING $WARNING_ISSUES"
else
echo -e "${GREEN}SYSTEM STATUS: HEALTHY${NC}"
log_health "OVERALL_HEALTH HEALTHY 0"
fi
echo "=================================================================="
echo "Dashboard log: $DASHBOARD_LOG"
echo "=================================================================="
log_health "=== HEALTH DASHBOARD COMPLETED ==="
# Cleanup old dashboard logs (keep 7 days)
find "$LOG_DIR" -name "health-dashboard-*.log" -mtime +7 -delete 2>/dev/null
exit 0

26
templates/jail.local.j2 Normal file
View File

@@ -0,0 +1,26 @@
[DEFAULT]
# Ban hosts for one hour:
bantime = 3600
# Override /etc/fail2ban/jail.d/00-firewalld.conf:
banaction = iptables-multiport
[sshd]
enabled = true
port = ssh
filter = sshd
logpath = /var/log/auth.log
maxretry = 3
bantime = 3600
findtime = 600
{% if plex_public_access | default(false) %}
[plex]
enabled = true
port = 32400
filter = plex
logpath = /home/docker/logs/plex/*.log
maxretry = 5
bantime = 7200
findtime = 600
{% endif %}

View File

@@ -0,0 +1,94 @@
#!/bin/bash
# Log aggregation script for Arrs Media Stack
# Generated by Ansible
LOG_DIR="{{ docker_root }}/logs"
SYSTEM_LOG_DIR="$LOG_DIR/system"
ARRS_LOG_DIR="$LOG_DIR/arrs"
AGGREGATED_LOG="$LOG_DIR/aggregated-$(date '+%Y%m%d').log"
TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
# Ensure log directories exist
mkdir -p "$SYSTEM_LOG_DIR" "$ARRS_LOG_DIR"
# Function to aggregate logs with source prefix
aggregate_logs() {
local source="$1"
local log_file="$2"
if [[ -f "$log_file" ]]; then
while IFS= read -r line; do
echo "[$TIMESTAMP] [$source] $line" >> "$AGGREGATED_LOG"
done < "$log_file"
fi
}
# Start aggregation
echo "[$TIMESTAMP] [AGGREGATOR] Starting log aggregation" >> "$AGGREGATED_LOG"
# Aggregate system monitoring logs
for log_file in "$SYSTEM_LOG_DIR"/system-monitor-$(date '+%Y%m%d').log; do
if [[ -f "$log_file" ]]; then
aggregate_logs "SYSTEM" "$log_file"
fi
done
# Aggregate Docker monitoring logs
for log_file in "$ARRS_LOG_DIR"/docker-monitor-$(date '+%Y%m%d').log; do
if [[ -f "$log_file" ]]; then
aggregate_logs "DOCKER" "$log_file"
fi
done
# Aggregate Docker Compose logs (last 100 lines)
cd {{ docker_compose_dir }}
SERVICES=("sonarr" "radarr" "lidarr" "bazarr" "prowlarr" "watchtower")
for service in "${SERVICES[@]}"; do
echo "[$TIMESTAMP] [AGGREGATOR] Collecting logs for $service" >> "$AGGREGATED_LOG"
docker-compose logs --tail=100 "$service" 2>/dev/null | while IFS= read -r line; do
echo "[$TIMESTAMP] [${service^^}] $line" >> "$AGGREGATED_LOG"
done
done
# Aggregate system logs (errors and warnings)
echo "[$TIMESTAMP] [AGGREGATOR] Collecting system errors" >> "$AGGREGATED_LOG"
journalctl --since="1 hour ago" --priority=err --no-pager -q | while IFS= read -r line; do
echo "[$TIMESTAMP] [SYSLOG_ERROR] $line" >> "$AGGREGATED_LOG"
done
journalctl --since="1 hour ago" --priority=warning --no-pager -q | while IFS= read -r line; do
echo "[$TIMESTAMP] [SYSLOG_WARNING] $line" >> "$AGGREGATED_LOG"
done
# Aggregate Docker daemon logs
echo "[$TIMESTAMP] [AGGREGATOR] Collecting Docker daemon logs" >> "$AGGREGATED_LOG"
journalctl -u docker --since="1 hour ago" --no-pager -q | while IFS= read -r line; do
echo "[$TIMESTAMP] [DOCKER_DAEMON] $line" >> "$AGGREGATED_LOG"
done
# Generate summary
echo "[$TIMESTAMP] [AGGREGATOR] Generating summary" >> "$AGGREGATED_LOG"
# Count errors and warnings
ERROR_COUNT=$(grep -c "ERROR\|FAILED" "$AGGREGATED_LOG" 2>/dev/null || echo "0")
WARNING_COUNT=$(grep -c "WARNING\|WARN" "$AGGREGATED_LOG" 2>/dev/null || echo "0")
echo "[$TIMESTAMP] [SUMMARY] Errors: $ERROR_COUNT, Warnings: $WARNING_COUNT" >> "$AGGREGATED_LOG"
# Check for critical issues
CRITICAL_ISSUES=$(grep -E "(FAILED|ERROR|CRITICAL|FATAL)" "$AGGREGATED_LOG" | tail -5)
if [[ -n "$CRITICAL_ISSUES" ]]; then
echo "[$TIMESTAMP] [SUMMARY] Recent critical issues:" >> "$AGGREGATED_LOG"
echo "$CRITICAL_ISSUES" >> "$AGGREGATED_LOG"
fi
echo "[$TIMESTAMP] [AGGREGATOR] Log aggregation completed" >> "$AGGREGATED_LOG"
# Cleanup old aggregated logs (keep 7 days)
find "$LOG_DIR" -name "aggregated-*.log" -mtime +7 -delete 2>/dev/null
# Compress logs older than 1 day
find "$LOG_DIR" -name "*.log" -mtime +1 ! -name "aggregated-$(date '+%Y%m%d').log" -exec gzip {} \; 2>/dev/null
exit 0

218
templates/manage-arrs.sh.j2 Normal file
View File

@@ -0,0 +1,218 @@
#!/bin/bash
# Arrs Media Stack Management Script
# Generated by Ansible - Customized for your VPS
set -e
COMPOSE_DIR="{{ docker_compose_dir }}"
COMPOSE_FILE="$COMPOSE_DIR/docker-compose.yml"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Function to print colored output
print_status() {
echo -e "${GREEN}[INFO]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_header() {
echo -e "${BLUE}=== $1 ===${NC}"
}
# Check if running as docker user
check_user() {
if [[ $EUID -ne $(id -u {{ docker_user }}) ]]; then
print_error "This script must be run as the {{ docker_user }} user"
exit 1
fi
}
# Change to compose directory
cd_compose_dir() {
if [[ ! -d "$COMPOSE_DIR" ]]; then
print_error "Compose directory not found: $COMPOSE_DIR"
exit 1
fi
cd "$COMPOSE_DIR"
}
# Function to show usage
usage() {
echo "Usage: $0 {start|stop|restart|status|logs|update|backup|restore}"
echo ""
echo "Commands:"
echo " start - Start all Arrs services"
echo " stop - Stop all Arrs services"
echo " restart - Restart all Arrs services"
echo " status - Show status of all services"
echo " logs - Show logs for all services"
echo " update - Update all Docker images and restart services"
echo " backup - Create backup of configurations"
echo " restore - Restore configurations from backup"
echo ""
}
# Start services
start_services() {
print_header "Starting Arrs Media Stack"
cd_compose_dir
docker-compose up -d
print_status "All services started"
show_status
}
# Stop services
stop_services() {
print_header "Stopping Arrs Media Stack"
cd_compose_dir
docker-compose down
print_status "All services stopped"
}
# Restart services
restart_services() {
print_header "Restarting Arrs Media Stack"
cd_compose_dir
docker-compose restart
print_status "All services restarted"
show_status
}
# Show status
show_status() {
print_header "Arrs Media Stack Status"
cd_compose_dir
docker-compose ps
echo ""
print_status "Service URLs (via Tailscale):"
echo " Sonarr: http://$(hostname -I | awk '{print $1}'):{{ ports.sonarr }}"
echo " Radarr: http://$(hostname -I | awk '{print $1}'):{{ ports.radarr }}"
echo " Lidarr: http://$(hostname -I | awk '{print $1}'):{{ ports.lidarr }}"
echo " Bazarr: http://$(hostname -I | awk '{print $1}'):{{ ports.bazarr }}"
echo " Prowlarr: http://$(hostname -I | awk '{print $1}'):{{ ports.prowlarr }}"
}
# Show logs
show_logs() {
print_header "Arrs Media Stack Logs"
cd_compose_dir
if [[ -n "$2" ]]; then
docker-compose logs -f "$2"
else
docker-compose logs -f --tail=50
fi
}
# Update services
update_services() {
print_header "Updating Arrs Media Stack"
cd_compose_dir
print_status "Pulling latest images..."
docker-compose pull
print_status "Recreating containers..."
docker-compose up -d --force-recreate
print_status "Cleaning up old images..."
docker image prune -f
print_status "Update complete"
show_status
}
# Backup configurations
backup_configs() {
print_header "Backing up Arrs Configurations"
BACKUP_DIR="{{ backup_dir }}"
BACKUP_FILE="$BACKUP_DIR/arrs-backup-$(date +%Y%m%d-%H%M%S).tar.gz"
mkdir -p "$BACKUP_DIR"
print_status "Creating backup: $BACKUP_FILE"
tar -czf "$BACKUP_FILE" \
-C "{{ docker_root }}" \
sonarr radarr lidarr bazarr prowlarr compose
print_status "Backup created successfully"
ls -lh "$BACKUP_FILE"
}
# Restore configurations
restore_configs() {
print_header "Restoring Arrs Configurations"
if [[ -z "$2" ]]; then
print_error "Please specify backup file to restore"
echo "Usage: $0 restore <backup-file>"
exit 1
fi
BACKUP_FILE="$2"
if [[ ! -f "$BACKUP_FILE" ]]; then
print_error "Backup file not found: $BACKUP_FILE"
exit 1
fi
print_warning "This will overwrite current configurations!"
read -p "Are you sure? (y/N): " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
print_status "Restore cancelled"
exit 0
fi
print_status "Stopping services..."
stop_services
print_status "Restoring from: $BACKUP_FILE"
tar -xzf "$BACKUP_FILE" -C "{{ docker_root }}"
print_status "Starting services..."
start_services
print_status "Restore complete"
}
# Main script logic
check_user
case "$1" in
start)
start_services
;;
stop)
stop_services
;;
restart)
restart_services
;;
status)
show_status
;;
logs)
show_logs "$@"
;;
update)
update_services
;;
backup)
backup_configs
;;
restore)
restore_configs "$@"
;;
*)
usage
exit 1
;;
esac
exit 0

View File

@@ -0,0 +1,237 @@
#!/bin/bash
# Network monitoring script for Arrs Media Stack
# Generated by Ansible
LOG_DIR="{{ docker_root }}/logs/system"
TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
NET_LOG="$LOG_DIR/network-monitor-$(date '+%Y%m%d').log"
# Ensure log directory exists
mkdir -p "$LOG_DIR"
# Function to log with timestamp
log_net() {
echo "[$TIMESTAMP] $1" >> "$NET_LOG"
}
log_net "=== NETWORK MONITORING ==="
# Network interface information
log_net "=== NETWORK INTERFACES ==="
ip addr show | grep -E "^[0-9]+:|inet " | while IFS= read -r line; do
log_net "INTERFACE $line"
done
# Default route
DEFAULT_ROUTE=$(ip route | grep default)
log_net "DEFAULT_ROUTE $DEFAULT_ROUTE"
# Network statistics
log_net "=== NETWORK STATISTICS ==="
MAIN_INTERFACE=$(ip route | grep default | awk '{print $5}' | head -1)
if [[ -n "$MAIN_INTERFACE" ]]; then
# Interface statistics
RX_BYTES=$(cat /sys/class/net/$MAIN_INTERFACE/statistics/rx_bytes 2>/dev/null || echo "0")
TX_BYTES=$(cat /sys/class/net/$MAIN_INTERFACE/statistics/tx_bytes 2>/dev/null || echo "0")
RX_PACKETS=$(cat /sys/class/net/$MAIN_INTERFACE/statistics/rx_packets 2>/dev/null || echo "0")
TX_PACKETS=$(cat /sys/class/net/$MAIN_INTERFACE/statistics/tx_packets 2>/dev/null || echo "0")
RX_ERRORS=$(cat /sys/class/net/$MAIN_INTERFACE/statistics/rx_errors 2>/dev/null || echo "0")
TX_ERRORS=$(cat /sys/class/net/$MAIN_INTERFACE/statistics/tx_errors 2>/dev/null || echo "0")
RX_DROPPED=$(cat /sys/class/net/$MAIN_INTERFACE/statistics/rx_dropped 2>/dev/null || echo "0")
TX_DROPPED=$(cat /sys/class/net/$MAIN_INTERFACE/statistics/tx_dropped 2>/dev/null || echo "0")
# Convert bytes to human readable
RX_MB=$((RX_BYTES / 1024 / 1024))
TX_MB=$((TX_BYTES / 1024 / 1024))
log_net "INTERFACE_STATS $MAIN_INTERFACE - RX: ${RX_MB}MB (${RX_PACKETS} packets, ${RX_ERRORS} errors, ${RX_DROPPED} dropped)"
log_net "INTERFACE_STATS $MAIN_INTERFACE - TX: ${TX_MB}MB (${TX_PACKETS} packets, ${TX_ERRORS} errors, ${TX_DROPPED} dropped)"
# Check for high error rates
if [[ $RX_ERRORS -gt 100 ]]; then
log_net "ALERT_NETWORK High RX errors on $MAIN_INTERFACE: $RX_ERRORS"
fi
if [[ $TX_ERRORS -gt 100 ]]; then
log_net "ALERT_NETWORK High TX errors on $MAIN_INTERFACE: $TX_ERRORS"
fi
fi
# Network connectivity tests
log_net "=== CONNECTIVITY TESTS ==="
# Test DNS resolution
if nslookup google.com >/dev/null 2>&1; then
log_net "DNS_TEST OK - DNS resolution working"
else
log_net "DNS_TEST FAILED - DNS resolution not working"
fi
# Test internet connectivity
if ping -c 1 8.8.8.8 >/dev/null 2>&1; then
log_net "INTERNET_TEST OK - Internet connectivity working"
else
log_net "INTERNET_TEST FAILED - No internet connectivity"
fi
# Test Tailscale connectivity (if configured)
if command -v tailscale >/dev/null 2>&1; then
TAILSCALE_STATUS=$(tailscale status --json 2>/dev/null | jq -r '.BackendState' 2>/dev/null || echo "unknown")
log_net "TAILSCALE_STATUS $TAILSCALE_STATUS"
if [[ "$TAILSCALE_STATUS" == "Running" ]]; then
TAILSCALE_IP=$(tailscale ip -4 2>/dev/null || echo "unknown")
log_net "TAILSCALE_IP $TAILSCALE_IP"
fi
fi
# Port connectivity tests for Arrs services
log_net "=== SERVICE PORT TESTS ==="
SERVICES_PORTS=(
"sonarr:{{ ports.sonarr }}"
"radarr:{{ ports.radarr }}"
"lidarr:{{ ports.lidarr }}"
"bazarr:{{ ports.bazarr }}"
"prowlarr:{{ ports.prowlarr }}"
)
for service_port in "${SERVICES_PORTS[@]}"; do
SERVICE=$(echo "$service_port" | cut -d: -f1)
PORT=$(echo "$service_port" | cut -d: -f2)
if nc -z localhost "$PORT" 2>/dev/null; then
log_net "PORT_TEST $SERVICE (port $PORT) - OK"
else
log_net "PORT_TEST $SERVICE (port $PORT) - FAILED"
fi
done
# Active network connections
log_net "=== ACTIVE CONNECTIONS ==="
ACTIVE_CONNECTIONS=$(netstat -tuln 2>/dev/null | grep LISTEN | wc -l)
log_net "LISTENING_PORTS Total listening ports: $ACTIVE_CONNECTIONS"
# Show listening ports for our services
netstat -tuln 2>/dev/null | grep -E ":{{ ports.sonarr }}|:{{ ports.radarr }}|:{{ ports.lidarr }}|:{{ ports.bazarr }}|:{{ ports.prowlarr }}" | while IFS= read -r line; do
log_net "SERVICE_PORT $line"
done
# Network load monitoring
log_net "=== NETWORK LOAD ==="
if command -v ss >/dev/null 2>&1; then
ESTABLISHED_CONNECTIONS=$(ss -t state established | wc -l)
TIME_WAIT_CONNECTIONS=$(ss -t state time-wait | wc -l)
log_net "CONNECTION_STATS Established: $ESTABLISHED_CONNECTIONS, Time-wait: $TIME_WAIT_CONNECTIONS"
# Check for high connection counts
if [[ $ESTABLISHED_CONNECTIONS -gt 1000 ]]; then
log_net "ALERT_NETWORK High number of established connections: $ESTABLISHED_CONNECTIONS"
fi
if [[ $TIME_WAIT_CONNECTIONS -gt 5000 ]]; then
log_net "ALERT_NETWORK High number of time-wait connections: $TIME_WAIT_CONNECTIONS"
fi
fi
# Docker network information
if command -v docker >/dev/null 2>&1; then
log_net "=== DOCKER NETWORK ==="
# Docker networks
DOCKER_NETWORKS=$(docker network ls --format "{{ '{{.Name}}' }}\t{{ '{{.Driver}}' }}\t{{ '{{.Scope}}' }}" 2>/dev/null)
if [[ -n "$DOCKER_NETWORKS" ]]; then
echo "$DOCKER_NETWORKS" | while IFS=$'\t' read -r name driver scope; do
log_net "DOCKER_NETWORK $name - Driver: $driver, Scope: $scope"
done
fi
# Container network stats
cd {{ docker_compose_dir }}
SERVICES=("sonarr" "radarr" "lidarr" "bazarr" "prowlarr" "watchtower")
for service in "${SERVICES[@]}"; do
CONTAINER_ID=$(docker-compose ps -q "$service" 2>/dev/null)
if [[ -n "$CONTAINER_ID" ]]; then
# Get container network stats
NET_STATS=$(docker stats --no-stream --format "{{ '{{.NetIO}}' }}" "$CONTAINER_ID" 2>/dev/null)
if [[ -n "$NET_STATS" ]]; then
log_net "CONTAINER_NETWORK $service - Network I/O: $NET_STATS"
fi
fi
done
fi
# Firewall status
log_net "=== FIREWALL STATUS ==="
if command -v ufw >/dev/null 2>&1; then
UFW_STATUS=$(ufw status 2>/dev/null | head -1)
log_net "UFW_STATUS $UFW_STATUS"
# Show rules for our ports
ufw status numbered 2>/dev/null | grep -E "{{ ports.sonarr }}|{{ ports.radarr }}|{{ ports.lidarr }}|{{ ports.bazarr }}|{{ ports.prowlarr }}" | while IFS= read -r line; do
log_net "UFW_RULE $line"
done
fi
# Network security checks
log_net "=== SECURITY CHECKS ==="
# Check for open ports that shouldn't be
UNEXPECTED_PORTS=$(netstat -tuln 2>/dev/null | grep LISTEN | grep -v -E ":22|:{{ ports.sonarr }}|:{{ ports.radarr }}|:{{ ports.lidarr }}|:{{ ports.bazarr }}|:{{ ports.prowlarr }}|:127.0.0.1" | wc -l)
if [[ $UNEXPECTED_PORTS -gt 0 ]]; then
log_net "SECURITY_ALERT $UNEXPECTED_PORTS unexpected open ports detected"
netstat -tuln 2>/dev/null | grep LISTEN | grep -v -E ":22|:{{ ports.sonarr }}|:{{ ports.radarr }}|:{{ ports.lidarr }}|:{{ ports.bazarr }}|:{{ ports.prowlarr }}|:127.0.0.1" | while IFS= read -r line; do
log_net "UNEXPECTED_PORT $line"
done
fi
# Check for failed connection attempts (from auth.log)
FAILED_CONNECTIONS=$(grep "Failed" /var/log/auth.log 2>/dev/null | grep "$(date '+%b %d')" | wc -l)
if [[ $FAILED_CONNECTIONS -gt 10 ]]; then
log_net "SECURITY_ALERT $FAILED_CONNECTIONS failed connection attempts today"
fi
# Bandwidth usage estimation
log_net "=== BANDWIDTH ESTIMATION ==="
if [[ -n "$MAIN_INTERFACE" ]]; then
# Read current stats
CURRENT_RX=$(cat /sys/class/net/$MAIN_INTERFACE/statistics/rx_bytes 2>/dev/null || echo "0")
CURRENT_TX=$(cat /sys/class/net/$MAIN_INTERFACE/statistics/tx_bytes 2>/dev/null || echo "0")
# Read previous stats if available
STATS_FILE="/tmp/network_stats_$MAIN_INTERFACE"
if [[ -f "$STATS_FILE" ]]; then
PREV_TIMESTAMP=$(head -1 "$STATS_FILE")
PREV_RX=$(sed -n '2p' "$STATS_FILE")
PREV_TX=$(sed -n '3p' "$STATS_FILE")
# Calculate time difference (in seconds)
TIME_DIFF=$(($(date +%s) - PREV_TIMESTAMP))
if [[ $TIME_DIFF -gt 0 ]]; then
# Calculate bandwidth (bytes per second)
RX_RATE=$(((CURRENT_RX - PREV_RX) / TIME_DIFF))
TX_RATE=$(((CURRENT_TX - PREV_TX) / TIME_DIFF))
# Convert to human readable (Mbps)
RX_MBPS=$(echo "scale=2; $RX_RATE * 8 / 1024 / 1024" | bc -l 2>/dev/null || echo "0")
TX_MBPS=$(echo "scale=2; $TX_RATE * 8 / 1024 / 1024" | bc -l 2>/dev/null || echo "0")
log_net "BANDWIDTH_USAGE RX: ${RX_MBPS} Mbps, TX: ${TX_MBPS} Mbps (over ${TIME_DIFF}s)"
fi
fi
# Save current stats for next run
echo "$(date +%s)" > "$STATS_FILE"
echo "$CURRENT_RX" >> "$STATS_FILE"
echo "$CURRENT_TX" >> "$STATS_FILE"
fi
log_net "=== END NETWORK MONITORING ==="
# Cleanup old network logs (keep 7 days)
find "$LOG_DIR" -name "network-monitor-*.log" -mtime +7 -delete 2>/dev/null
exit 0

View File

@@ -0,0 +1,171 @@
#!/bin/bash
# Performance monitoring script for Arrs Media Stack
# Generated by Ansible
LOG_DIR="{{ docker_root }}/logs/system"
TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
PERF_LOG="$LOG_DIR/performance-$(date '+%Y%m%d').log"
# Ensure log directory exists
mkdir -p "$LOG_DIR"
# Function to log with timestamp
log_perf() {
echo "[$TIMESTAMP] $1" >> "$PERF_LOG"
}
# System performance metrics
log_perf "=== PERFORMANCE METRICS ==="
# CPU Information
CPU_MODEL=$(grep "model name" /proc/cpuinfo | head -1 | cut -d: -f2 | xargs)
CPU_CORES=$(nproc)
CPU_USAGE=$(top -bn1 | grep "Cpu(s)" | awk '{print $2}' | cut -d'%' -f1)
LOAD_1MIN=$(uptime | awk -F'load average:' '{print $2}' | awk '{print $1}' | cut -d',' -f1 | xargs)
LOAD_5MIN=$(uptime | awk -F'load average:' '{print $2}' | awk '{print $2}' | cut -d',' -f1 | xargs)
LOAD_15MIN=$(uptime | awk -F'load average:' '{print $2}' | awk '{print $3}' | xargs)
log_perf "CPU_INFO Model: $CPU_MODEL, Cores: $CPU_CORES"
log_perf "CPU_USAGE ${CPU_USAGE}%"
log_perf "LOAD_AVERAGE 1min: $LOAD_1MIN, 5min: $LOAD_5MIN, 15min: $LOAD_15MIN"
# Memory Information
MEMORY_TOTAL=$(free -h | grep Mem | awk '{print $2}')
MEMORY_USED=$(free -h | grep Mem | awk '{print $3}')
MEMORY_FREE=$(free -h | grep Mem | awk '{print $4}')
MEMORY_PERCENT=$(free | grep Mem | awk '{printf "%.1f", $3/$2 * 100.0}')
SWAP_USED=$(free -h | grep Swap | awk '{print $3}')
SWAP_TOTAL=$(free -h | grep Swap | awk '{print $2}')
log_perf "MEMORY_USAGE Total: $MEMORY_TOTAL, Used: $MEMORY_USED (${MEMORY_PERCENT}%), Free: $MEMORY_FREE"
log_perf "SWAP_USAGE Used: $SWAP_USED, Total: $SWAP_TOTAL"
# Disk Information
DISK_USAGE=$(df -h {{ docker_root }} | tail -1)
DISK_TOTAL=$(echo "$DISK_USAGE" | awk '{print $2}')
DISK_USED=$(echo "$DISK_USAGE" | awk '{print $3}')
DISK_AVAILABLE=$(echo "$DISK_USAGE" | awk '{print $4}')
DISK_PERCENT=$(echo "$DISK_USAGE" | awk '{print $5}')
log_perf "DISK_USAGE {{ docker_root }} - Total: $DISK_TOTAL, Used: $DISK_USED ($DISK_PERCENT), Available: $DISK_AVAILABLE"
# Media directory disk usage if different
MEDIA_DISK_USAGE=$(df -h {{ media_root }} | tail -1)
MEDIA_DISK_TOTAL=$(echo "$MEDIA_DISK_USAGE" | awk '{print $2}')
MEDIA_DISK_USED=$(echo "$MEDIA_DISK_USAGE" | awk '{print $3}')
MEDIA_DISK_AVAILABLE=$(echo "$MEDIA_DISK_USAGE" | awk '{print $4}')
MEDIA_DISK_PERCENT=$(echo "$MEDIA_DISK_USAGE" | awk '{print $5}')
log_perf "MEDIA_DISK_USAGE {{ media_root }} - Total: $MEDIA_DISK_TOTAL, Used: $MEDIA_DISK_USED ($MEDIA_DISK_PERCENT), Available: $MEDIA_DISK_AVAILABLE"
# Network Statistics
NETWORK_INTERFACE=$(ip route | grep default | awk '{print $5}' | head -1)
if [[ -n "$NETWORK_INTERFACE" ]]; then
RX_BYTES=$(cat /sys/class/net/$NETWORK_INTERFACE/statistics/rx_bytes)
TX_BYTES=$(cat /sys/class/net/$NETWORK_INTERFACE/statistics/tx_bytes)
RX_PACKETS=$(cat /sys/class/net/$NETWORK_INTERFACE/statistics/rx_packets)
TX_PACKETS=$(cat /sys/class/net/$NETWORK_INTERFACE/statistics/tx_packets)
# Convert bytes to human readable
RX_MB=$((RX_BYTES / 1024 / 1024))
TX_MB=$((TX_BYTES / 1024 / 1024))
log_perf "NETWORK_STATS Interface: $NETWORK_INTERFACE, RX: ${RX_MB}MB (${RX_PACKETS} packets), TX: ${TX_MB}MB (${TX_PACKETS} packets)"
fi
# Docker Performance
if command -v docker >/dev/null 2>&1; then
cd {{ docker_compose_dir }}
log_perf "=== DOCKER PERFORMANCE ==="
# Docker system info
DOCKER_CONTAINERS_RUNNING=$(docker ps -q | wc -l)
DOCKER_CONTAINERS_TOTAL=$(docker ps -aq | wc -l)
DOCKER_IMAGES=$(docker images -q | wc -l)
log_perf "DOCKER_STATS Running containers: $DOCKER_CONTAINERS_RUNNING, Total containers: $DOCKER_CONTAINERS_TOTAL, Images: $DOCKER_IMAGES"
# Container resource usage
SERVICES=("sonarr" "radarr" "lidarr" "bazarr" "prowlarr" "watchtower")
for service in "${SERVICES[@]}"; do
CONTAINER_ID=$(docker-compose ps -q "$service" 2>/dev/null)
if [[ -n "$CONTAINER_ID" ]]; then
# Get container stats (single snapshot)
STATS=$(docker stats --no-stream --format "{{ '{{.CPUPerc}}' }}\t{{ '{{.MemUsage}}' }}\t{{ '{{.MemPerc}}' }}\t{{ '{{.NetIO}}' }}\t{{ '{{.BlockIO}}' }}" "$CONTAINER_ID" 2>/dev/null)
if [[ -n "$STATS" ]]; then
CPU_PERC=$(echo "$STATS" | cut -f1)
MEM_USAGE=$(echo "$STATS" | cut -f2)
MEM_PERC=$(echo "$STATS" | cut -f3)
NET_IO=$(echo "$STATS" | cut -f4)
BLOCK_IO=$(echo "$STATS" | cut -f5)
log_perf "CONTAINER_PERF $service - CPU: $CPU_PERC, Memory: $MEM_USAGE ($MEM_PERC), Network: $NET_IO, Disk: $BLOCK_IO"
fi
fi
done
# Docker system disk usage
DOCKER_SYSTEM_DF=$(docker system df --format "{{ '{{.Type}}' }}\t{{ '{{.TotalCount}}' }}\t{{ '{{.Active}}' }}\t{{ '{{.Size}}' }}\t{{ '{{.Reclaimable}}' }}" 2>/dev/null)
if [[ -n "$DOCKER_SYSTEM_DF" ]]; then
log_perf "DOCKER_DISK_USAGE:"
echo "$DOCKER_SYSTEM_DF" | while IFS=$'\t' read -r type total active size reclaimable; do
log_perf " $type - Total: $total, Active: $active, Size: $size, Reclaimable: $reclaimable"
done
fi
fi
# Process Information
log_perf "=== TOP PROCESSES ==="
TOP_PROCESSES=$(ps aux --sort=-%cpu | head -6 | tail -5)
echo "$TOP_PROCESSES" | while IFS= read -r line; do
log_perf "TOP_CPU $line"
done
TOP_MEMORY=$(ps aux --sort=-%mem | head -6 | tail -5)
echo "$TOP_MEMORY" | while IFS= read -r line; do
log_perf "TOP_MEM $line"
done
# I/O Statistics
if command -v iostat >/dev/null 2>&1; then
log_perf "=== I/O STATISTICS ==="
IOSTAT_OUTPUT=$(iostat -x 1 1 | tail -n +4)
echo "$IOSTAT_OUTPUT" | while IFS= read -r line; do
if [[ -n "$line" && "$line" != *"Device"* ]]; then
log_perf "IOSTAT $line"
fi
done
fi
# Performance Alerts
log_perf "=== PERFORMANCE ALERTS ==="
# CPU Alert (>80%)
if (( $(echo "$CPU_USAGE > 80" | bc -l) )); then
log_perf "ALERT_CPU High CPU usage: ${CPU_USAGE}%"
fi
# Memory Alert (>90%)
if (( $(echo "$MEMORY_PERCENT > 90" | bc -l) )); then
log_perf "ALERT_MEMORY High memory usage: ${MEMORY_PERCENT}%"
fi
# Disk Alert (>85%)
DISK_PERCENT_NUM=$(echo "$DISK_PERCENT" | cut -d'%' -f1)
if [[ $DISK_PERCENT_NUM -gt 85 ]]; then
log_perf "ALERT_DISK High disk usage: $DISK_PERCENT"
fi
# Load Average Alert (>2.0)
if (( $(echo "$LOAD_1MIN > 2.0" | bc -l) )); then
log_perf "ALERT_LOAD High load average: $LOAD_1MIN"
fi
log_perf "=== END PERFORMANCE MONITORING ==="
# Cleanup old performance logs (keep 7 days)
find "$LOG_DIR" -name "performance-*.log" -mtime +7 -delete 2>/dev/null
exit 0

View File

@@ -0,0 +1,13 @@
# Fail2ban filter for Plex Media Server
# Protects against brute force authentication attempts
[Definition]
# Match failed authentication attempts in Plex logs
failregex = ^.*Authentication failed for user.*from <HOST>.*$
^.*Invalid credentials.*from <HOST>.*$
^.*Failed login attempt.*from <HOST>.*$
^.*Unauthorized access attempt.*from <HOST>.*$
# Ignore successful authentications
ignoreregex = ^.*Authentication successful.*$
^.*Login successful.*$

View File

@@ -0,0 +1,9 @@
# Docker logging configuration
# Log Docker daemon messages to separate file
:programname, isequal, "dockerd" /var/log/docker.log
& stop
# Log container messages to separate files
$template DockerLogFormat,"/var/log/docker/%programname%.log"
:syslogtag, startswith, "docker/" ?DockerLogFormat
& stop

View File

@@ -0,0 +1,28 @@
#!/bin/bash
# SABnzbd Configuration Fix for Docker Service Communication
# This script fixes the hostname verification issue that prevents
# *arr services from connecting to SABnzbd
SABNZBD_CONFIG="/config/sabnzbd.ini"
# Wait for SABnzbd to create its config file
while [ ! -f "$SABNZBD_CONFIG" ]; do
echo "Waiting for SABnzbd config file to be created..."
sleep 5
done
# Check if host_whitelist needs to be updated
if ! grep -q "sonarr, radarr, lidarr" "$SABNZBD_CONFIG"; then
echo "Updating SABnzbd host_whitelist to allow *arr service connections..."
# Backup original config
cp "$SABNZBD_CONFIG" "${SABNZBD_CONFIG}.backup"
# Update host_whitelist to include all service names
sed -i 's/host_whitelist = \([^,]*\),/host_whitelist = \1, sonarr, radarr, lidarr, bazarr, prowlarr, whisparr, gluetun, localhost, 127.0.0.1,/' "$SABNZBD_CONFIG"
echo "SABnzbd host_whitelist updated successfully!"
echo "Services can now connect to SABnzbd using container hostnames."
else
echo "SABnzbd host_whitelist already configured for service connections."
fi

View File

@@ -0,0 +1,60 @@
#!/bin/bash
# Security audit script for Arrs Media Stack
echo "=== Security Audit Report - $(date) ==="
echo
echo "1. System Information:"
hostname
uname -a
uptime
echo
echo "2. User and Group Information:"
whoami
id docker 2>/dev/null || echo "Docker user not found"
getent group docker
echo
echo "3. SSH Configuration:"
systemctl is-active ssh
grep "^PermitRootLogin" /etc/ssh/sshd_config || echo "PermitRootLogin not configured"
grep "^PasswordAuthentication" /etc/ssh/sshd_config || echo "PasswordAuthentication not configured"
echo
echo "4. Firewall Status:"
ufw status
echo
echo "5. Fail2ban Status:"
systemctl is-active fail2ban
fail2ban-client status sshd 2>/dev/null || echo "Fail2ban sshd jail not active"
echo
echo "6. Docker Security:"
systemctl is-active docker
docker --version 2>/dev/null || echo "Docker not available"
docker ps 2>/dev/null || echo "Cannot access Docker"
echo
echo "7. File Permissions:"
ls -l /etc/ssh/sshd_config
ls -l /etc/fail2ban/jail.local 2>/dev/null || echo "jail.local not found"
ls -ld {{ docker_root }}
ls -ld {{ media_root }}
echo
echo "8. System Resources:"
free -h
df -h /
echo
echo "9. Network Connections:"
netstat -tlnp 2>/dev/null | grep -E ":(8989|7878|8686|6767|9696)" || echo "No Arrs ports found"
echo
echo "10. Recent Security Events:"
tail -10 /var/log/auth.log 2>/dev/null | grep sshd || echo "No SSH logs found"
echo
echo "=== End of Security Audit ==="

View File

@@ -0,0 +1,66 @@
#!/bin/bash
# System monitoring script for Arrs Media Stack
# Generated by Ansible
LOG_DIR="{{ docker_root }}/logs/system"
TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
LOG_FILE="$LOG_DIR/system-monitor-$(date '+%Y%m%d').log"
# Ensure log directory exists
mkdir -p "$LOG_DIR"
# Function to log with timestamp
log_with_timestamp() {
echo "[$TIMESTAMP] $1" >> "$LOG_FILE"
}
# System metrics
CPU_USAGE=$(top -bn1 | grep "Cpu(s)" | awk '{print $2}' | cut -d'%' -f1)
MEMORY_USAGE=$(free | grep Mem | awk '{printf "%.1f", $3/$2 * 100.0}')
DISK_USAGE=$(df {{ docker_root }} | tail -1 | awk '{print $5}' | cut -d'%' -f1)
LOAD_AVG=$(uptime | awk -F'load average:' '{print $2}' | awk '{print $1}' | cut -d',' -f1)
# Log system metrics
log_with_timestamp "SYSTEM_METRICS CPU:${CPU_USAGE}% MEM:${MEMORY_USAGE}% DISK:${DISK_USAGE}% LOAD:${LOAD_AVG}"
# Check Docker service
if systemctl is-active --quiet docker; then
log_with_timestamp "DOCKER_SERVICE OK"
else
log_with_timestamp "DOCKER_SERVICE FAILED"
fi
# Check Arrs services
cd {{ docker_compose_dir }}
SERVICES=("sonarr" "radarr" "lidarr" "bazarr" "prowlarr" "watchtower")
for service in "${SERVICES[@]}"; do
if docker-compose ps "$service" | grep -q "Up"; then
log_with_timestamp "SERVICE_${service^^} OK"
else
log_with_timestamp "SERVICE_${service^^} FAILED"
# Try to restart failed service
docker-compose restart "$service" 2>/dev/null
log_with_timestamp "SERVICE_${service^^} RESTART_ATTEMPTED"
fi
done
# Check disk space warning (>80%)
if [[ $DISK_USAGE -gt 80 ]]; then
log_with_timestamp "DISK_WARNING Disk usage is ${DISK_USAGE}%"
fi
# Check memory warning (>90%)
if (( $(echo "$MEMORY_USAGE > 90" | bc -l) )); then
log_with_timestamp "MEMORY_WARNING Memory usage is ${MEMORY_USAGE}%"
fi
# Check load average warning (>2.0)
if (( $(echo "$LOAD_AVG > 2.0" | bc -l) )); then
log_with_timestamp "LOAD_WARNING Load average is $LOAD_AVG"
fi
# Cleanup old log files (keep 7 days)
find "$LOG_DIR" -name "system-monitor-*.log" -mtime +7 -delete 2>/dev/null
exit 0