Initial template repository

🎬 ARR Suite Template Bootstrap - Complete Media Automation Stack

Features:
- 16 production services (Prowlarr, Sonarr, Radarr, Plex, etc.)
- One-command Ansible deployment
- VPN-protected downloads via Gluetun
- Tailscale secure access
- Production-ready security (UFW, Fail2Ban)
- Automated backups and monitoring
- Comprehensive documentation

Ready for customization and deployment to any VPS.

Co-authored-by: openhands <openhands@all-hands.dev>
This commit is contained in:
openhands
2025-11-28 04:26:12 +00:00
commit 24f2cd64e9
71 changed files with 9983 additions and 0 deletions

209
tasks/backup_setup.yml Normal file
View File

@@ -0,0 +1,209 @@
---
# Backup automation setup tasks
- name: Create backup directories
file:
path: "{{ item }}"
state: directory
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
loop:
- "{{ backup_dir }}"
- "{{ backup_dir }}/configs"
- "{{ backup_dir }}/compose"
- "{{ backup_dir }}/scripts"
- "{{ backup_dir }}/logs"
tags: ['backup_dirs']
- name: Install backup utilities
apt:
name:
- rsync
- tar
- gzip
- pigz
- pv
state: present
tags: ['backup_tools']
- name: Create main backup script
template:
src: backup-arrs.sh.j2
dest: "{{ docker_root }}/scripts/backup-arrs.sh"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
tags: ['backup_scripts']
- name: Create incremental backup script
template:
src: backup-incremental.sh.j2
dest: "{{ docker_root }}/scripts/backup-incremental.sh"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
tags: ['backup_scripts']
- name: Create restore script
template:
src: restore-arrs.sh.j2
dest: "{{ docker_root }}/scripts/restore-arrs.sh"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
tags: ['backup_scripts']
- name: Create backup verification script
template:
src: verify-backup.sh.j2
dest: "{{ docker_root }}/scripts/verify-backup.sh"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
tags: ['backup_scripts']
- name: Create backup cleanup script
template:
src: cleanup-backups.sh.j2
dest: "{{ docker_root }}/scripts/cleanup-backups.sh"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
tags: ['backup_scripts']
- name: Create backup configuration file
template:
src: backup.conf.j2
dest: "{{ docker_root }}/backup.conf"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0644'
tags: ['backup_config']
- name: Set up scheduled backup cron job
cron:
name: "Arrs configuration backup"
minute: "0"
hour: "2"
weekday: "0"
job: "{{ docker_root }}/scripts/backup-arrs.sh >> {{ docker_root }}/logs/backup.log 2>&1"
user: "{{ docker_user }}"
when: backup_enabled
tags: ['backup_cron']
- name: Set up daily incremental backup cron job
cron:
name: "Arrs incremental backup"
minute: "30"
hour: "3"
job: "{{ docker_root }}/scripts/backup-incremental.sh >> {{ docker_root }}/logs/backup-incremental.log 2>&1"
user: "{{ docker_user }}"
when: backup_enabled
tags: ['backup_cron']
- name: Set up backup cleanup cron job
cron:
name: "Backup cleanup"
minute: "0"
hour: "1"
job: "{{ docker_root }}/scripts/cleanup-backups.sh >> {{ docker_root }}/logs/backup-cleanup.log 2>&1"
user: "{{ docker_user }}"
when: backup_enabled
tags: ['backup_cron']
- name: Set up backup verification cron job
cron:
name: "Backup verification"
minute: "0"
hour: "4"
weekday: "1"
job: "{{ docker_root }}/scripts/verify-backup.sh >> {{ docker_root }}/logs/backup-verify.log 2>&1"
user: "{{ docker_user }}"
when: backup_enabled
tags: ['backup_cron']
- name: Create database backup script (for future use)
template:
src: backup-databases.sh.j2
dest: "{{ docker_root }}/scripts/backup-databases.sh"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
tags: ['backup_scripts']
- name: Create media backup script (for large files)
template:
src: backup-media.sh.j2
dest: "{{ docker_root }}/scripts/backup-media.sh"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
tags: ['backup_scripts']
- name: Create backup status script
template:
src: backup-status.sh.j2
dest: "{{ docker_root }}/scripts/backup-status.sh"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
tags: ['backup_scripts']
- name: Create backup notification script
template:
src: backup-notify.sh.j2
dest: "{{ docker_root }}/scripts/backup-notify.sh"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
tags: ['backup_scripts']
- name: Create emergency backup script
template:
src: emergency-backup.sh.j2
dest: "{{ docker_root }}/scripts/emergency-backup.sh"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
tags: ['backup_scripts']
- name: Create backup README
template:
src: backup-README.md.j2
dest: "{{ backup_dir }}/README.md"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0644'
tags: ['backup_docs']
- name: Set up log rotation for backup logs
template:
src: backup-logrotate.j2
dest: /etc/logrotate.d/arrs-backup
mode: '0644'
tags: ['backup_logging']
- name: Create initial backup
command: "{{ docker_root }}/scripts/backup-arrs.sh"
become_user: "{{ docker_user }}"
when: backup_enabled
tags: ['initial_backup']
- name: Display backup information
debug:
msg: |
Backup system configured successfully!
Backup location: {{ backup_dir }}
Backup schedule: {{ backup_schedule }}
Retention: {{ backup_retention_days }} days
Manual backup commands:
- Full backup: {{ docker_root }}/scripts/backup-arrs.sh
- Incremental: {{ docker_root }}/scripts/backup-incremental.sh
- Restore: {{ docker_root }}/scripts/restore-arrs.sh
- Status: {{ docker_root }}/scripts/backup-status.sh
Backup logs: {{ docker_root }}/logs/backup.log
tags: ['backup_info']

125
tasks/docker_setup.yml Normal file
View File

@@ -0,0 +1,125 @@
---
# Docker installation and configuration tasks
- name: Remove old Docker packages
apt:
name:
- docker
- docker-engine
- docker.io
- containerd
- runc
state: absent
tags: ['docker_install']
- name: Add Docker GPG key
apt_key:
url: https://download.docker.com/linux/ubuntu/gpg
state: present
tags: ['docker_install']
- name: Add Docker repository
apt_repository:
repo: "deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable"
state: present
update_cache: yes
tags: ['docker_install']
- name: Install Docker CE
apt:
name:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-buildx-plugin
- docker-compose-plugin
state: present
update_cache: yes
notify: restart docker
tags: ['docker_install']
- name: Install Docker Compose standalone
get_url:
url: "https://github.com/docker/compose/releases/download/v{{ docker_compose_version }}/docker-compose-linux-x86_64"
dest: /usr/local/bin/docker-compose
mode: '0755'
owner: root
group: root
tags: ['docker_compose']
- name: Remove existing docker-compose if present
file:
path: /usr/bin/docker-compose
state: absent
tags: ['docker_compose']
- name: Create docker-compose symlink
file:
src: /usr/local/bin/docker-compose
dest: /usr/bin/docker-compose
state: link
tags: ['docker_compose']
- name: Start and enable Docker service
systemd:
name: docker
state: started
enabled: yes
daemon_reload: yes
tags: ['docker_service']
- name: Configure Docker daemon
template:
src: daemon.json.j2
dest: /etc/docker/daemon.json
backup: yes
notify: restart docker
tags: ['docker_config']
- name: Create Docker log rotation configuration
template:
src: docker-logrotate.j2
dest: /etc/logrotate.d/docker
mode: '0644'
tags: ['docker_logging']
- name: Verify Docker installation
command: docker --version
register: docker_version
changed_when: false
tags: ['docker_verify']
- name: Verify Docker Compose installation
command: docker-compose --version
register: docker_compose_version_check
changed_when: false
tags: ['docker_verify']
- name: Display Docker versions
debug:
msg: |
Docker version: {{ docker_version.stdout }}
Docker Compose version: {{ docker_compose_version_check.stdout }}
tags: ['docker_verify']
- name: Test Docker functionality
docker_container:
name: hello-world-test
image: hello-world
state: started
auto_remove: yes
detach: no
register: docker_test
tags: ['docker_test']
- name: Remove test container
docker_container:
name: hello-world-test
state: absent
tags: ['docker_test']
- name: Clean up Docker test image
docker_image:
name: hello-world
state: absent
tags: ['docker_test']

260
tasks/monitoring_setup.yml Normal file
View File

@@ -0,0 +1,260 @@
---
# Monitoring and logging setup tasks
- name: Create monitoring directories
file:
path: "{{ item }}"
state: directory
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
loop:
- "{{ docker_root }}/monitoring"
- "{{ docker_root }}/logs"
- "{{ docker_root }}/logs/arrs"
- "{{ docker_root }}/logs/system"
tags: ['monitoring_dirs']
- name: Install monitoring tools
apt:
name:
- htop
- iotop
- nethogs
- ncdu
- tree
- lsof
- strace
- tcpdump
- nmap
state: present
tags: ['monitoring_tools']
- name: Create monitoring scripts directory
file:
path: /usr/local/bin
state: directory
mode: '0755'
tags: ['monitoring_scripts']
- name: Create monitoring log directories
file:
path: "{{ item }}"
state: directory
owner: root
group: root
mode: '0755'
loop:
- /var/log/arrs
- /opt/monitoring
- /opt/monitoring/scripts
tags: ['monitoring_dirs']
- name: Deploy health dashboard script
template:
src: health-dashboard.sh.j2
dest: /usr/local/bin/health-dashboard.sh
owner: root
group: root
mode: '0755'
tags: ['monitoring_scripts']
- name: Deploy system monitoring script
template:
src: system-monitor.sh.j2
dest: /usr/local/bin/system-monitor.sh
owner: root
group: root
mode: '0755'
tags: ['monitoring_scripts']
- name: Deploy Docker monitoring script
template:
src: docker-monitor.sh.j2
dest: /usr/local/bin/docker-monitor.sh
owner: root
group: root
mode: '0755'
tags: ['monitoring_scripts']
- name: Deploy network monitoring script
template:
src: network-monitor.sh.j2
dest: /usr/local/bin/network-monitor.sh
owner: root
group: root
mode: '0755'
tags: ['monitoring_scripts']
- name: Deploy performance monitoring script
template:
src: performance-monitor.sh.j2
dest: /usr/local/bin/performance-monitor.sh
owner: root
group: root
mode: '0755'
tags: ['monitoring_scripts']
- name: Deploy security audit script
template:
src: security-audit.sh.j2
dest: /usr/local/bin/security-audit.sh
owner: root
group: root
mode: '0755'
tags: ['monitoring_scripts']
- name: Deploy disk usage monitoring script
template:
src: disk-usage-monitor.sh.j2
dest: /usr/local/bin/disk-usage-monitor.sh
owner: root
group: root
mode: '0755'
tags: ['monitoring_scripts']
- name: Deploy service health check script
template:
src: check-services.sh.j2
dest: /usr/local/bin/check-services.sh
owner: root
group: root
mode: '0755'
tags: ['monitoring_scripts']
- name: Deploy log aggregator script
template:
src: log-aggregator.sh.j2
dest: /usr/local/bin/log-aggregator.sh
owner: root
group: root
mode: '0755'
tags: ['monitoring_scripts']
- name: Set up log rotation for Arrs applications
template:
src: arrs-logrotate.j2
dest: /etc/logrotate.d/arrs
mode: '0644'
tags: ['log_rotation']
- name: Add health dashboard alias to root bashrc
lineinfile:
path: /root/.bashrc
line: "alias health='/usr/local/bin/health-dashboard.sh'"
create: yes
tags: ['monitoring_scripts']
- name: Set up cron job for system monitoring
cron:
name: "System monitoring"
minute: "*/10"
job: "/usr/local/bin/system-monitor.sh >> /var/log/arrs/system-monitor.log 2>&1"
user: root
tags: ['monitoring_cron']
- name: Set up cron job for Docker monitoring
cron:
name: "Docker monitoring"
minute: "*/5"
job: "/usr/local/bin/docker-monitor.sh >> /var/log/arrs/docker-monitor.log 2>&1"
user: root
tags: ['monitoring_cron']
- name: Set up cron job for network monitoring
cron:
name: "Network monitoring"
minute: "*/15"
job: "/usr/local/bin/network-monitor.sh >> /var/log/arrs/network-monitor.log 2>&1"
user: root
tags: ['monitoring_cron']
- name: Set up cron job for performance monitoring
cron:
name: "Performance monitoring"
minute: "*/20"
job: "/usr/local/bin/performance-monitor.sh >> /var/log/arrs/performance-monitor.log 2>&1"
user: root
tags: ['monitoring_cron']
- name: Set up cron job for security audit
cron:
name: "Security audit"
minute: "0"
hour: "2"
job: "/usr/local/bin/security-audit.sh >> /var/log/arrs/security-audit.log 2>&1"
user: root
tags: ['monitoring_cron']
- name: Set up cron job for disk usage monitoring
cron:
name: "Disk usage monitoring"
minute: "0"
hour: "*/6"
job: "/usr/local/bin/disk-usage-monitor.sh >> /var/log/arrs/disk-usage.log 2>&1"
user: root
tags: ['monitoring_cron']
- name: Set up cron job for service health checks
cron:
name: "Service health checks"
minute: "*/5"
job: "/usr/local/bin/check-services.sh >> /var/log/arrs/service-checks.log 2>&1"
user: root
tags: ['monitoring_cron']
- name: Set up cron job for log aggregation
cron:
name: "Log aggregation"
minute: "0"
hour: "1"
job: "/usr/local/bin/log-aggregator.sh >> /var/log/arrs/log-aggregator.log 2>&1"
user: root
tags: ['monitoring_cron']
- name: Create alerting script
template:
src: alert-manager.sh.j2
dest: "{{ docker_root }}/scripts/alert-manager.sh"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
tags: ['alerting']
- name: Configure rsyslog for centralized logging
template:
src: rsyslog-arrs.conf.j2
dest: /etc/rsyslog.d/40-arrs.conf
mode: '0644'
notify: restart rsyslog
tags: ['centralized_logging']
- name: Create log analysis script
template:
src: log-analyzer.sh.j2
dest: "{{ docker_root }}/scripts/log-analyzer.sh"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
tags: ['log_analysis']
- name: Set up weekly log analysis cron job
cron:
name: "Weekly log analysis"
minute: "0"
hour: "2"
weekday: "0"
job: "{{ docker_root }}/scripts/log-analyzer.sh >> {{ docker_root }}/logs/system/log-analysis.log 2>&1"
user: "{{ docker_user }}"
tags: ['log_analysis']
- name: Create monitoring configuration file
template:
src: monitoring.conf.j2
dest: "{{ docker_root }}/monitoring/monitoring.conf"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0644'
tags: ['monitoring_config']

185
tasks/security_setup.yml Normal file
View File

@@ -0,0 +1,185 @@
---
# Security and firewall configuration tasks
- name: Configure SSH security
lineinfile:
path: /etc/ssh/sshd_config
regexp: "{{ item.regexp }}"
line: "{{ item.line }}"
backup: yes
loop:
- { regexp: '^#?PermitRootLogin', line: 'PermitRootLogin yes' }
- { regexp: '^#?PasswordAuthentication', line: 'PasswordAuthentication {{ "yes" if not ssh_key_based_auth else "no" }}' }
- { regexp: '^#?PubkeyAuthentication', line: 'PubkeyAuthentication yes' }
- { regexp: '^#?Port', line: 'Port {{ ssh_port }}' }
- { regexp: '^#?MaxAuthTries', line: 'MaxAuthTries 3' }
- { regexp: '^#?ClientAliveInterval', line: 'ClientAliveInterval 300' }
- { regexp: '^#?ClientAliveCountMax', line: 'ClientAliveCountMax 2' }
notify: restart sshd
tags: ['ssh_security']
- name: Configure fail2ban for SSH
template:
src: jail.local.j2
dest: /etc/fail2ban/jail.local
backup: yes
notify: restart fail2ban
tags: ['fail2ban']
- name: Configure fail2ban filter for Plex
template:
src: plex-fail2ban-filter.j2
dest: /etc/fail2ban/filter.d/plex.conf
backup: yes
when: plex_public_access | default(false)
notify: restart fail2ban
tags: ['fail2ban', 'plex']
- name: Start and enable fail2ban
systemd:
name: fail2ban
state: started
enabled: yes
tags: ['fail2ban']
- name: Reset UFW to defaults
ufw:
state: reset
when: ufw_enabled
tags: ['firewall']
- name: Configure UFW default policies
ufw:
direction: "{{ item.direction }}"
policy: "{{ item.policy }}"
loop:
- { direction: 'incoming', policy: "{{ ufw_default_policy_incoming }}" }
- { direction: 'outgoing', policy: "{{ ufw_default_policy_outgoing }}" }
when: ufw_enabled
tags: ['firewall']
- name: Allow SSH through UFW
ufw:
rule: allow
port: "{{ ssh_port }}"
proto: tcp
when: ufw_enabled
tags: ['firewall']
- name: Check if Tailscale is installed
command: which tailscale
register: tailscale_check
failed_when: false
changed_when: false
when: tailscale_enabled
tags: ['tailscale']
- name: Install Tailscale
shell: |
curl -fsSL https://tailscale.com/install.sh | sh
when: tailscale_enabled and tailscale_check.rc != 0
tags: ['tailscale']
- name: Get Tailscale interface information
shell: ip addr show {{ tailscale_interface }} | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1
register: tailscale_ip
failed_when: false
changed_when: false
when: tailscale_enabled
tags: ['tailscale']
- name: Allow Tailscale interface through UFW
ufw:
rule: allow
interface: "{{ tailscale_interface }}"
direction: in
when: ufw_enabled and tailscale_enabled
tags: ['firewall', 'tailscale']
- name: Allow Arrs services from Tailscale network
ufw:
rule: allow
port: "{{ item.value }}"
proto: tcp
src: "{{ tailscale_ip.stdout | regex_replace('\\.[0-9]+$', '.0/24') }}"
loop: "{{ ports | dict2items }}"
when: ufw_enabled and tailscale_enabled and tailscale_ip.stdout != ""
tags: ['firewall', 'tailscale']
- name: Allow Docker bridge network communication
ufw:
rule: allow
from_ip: "{{ docker_network_subnet }}"
to_ip: "{{ docker_network_subnet }}"
when: ufw_enabled
tags: ['firewall', 'docker']
- name: Allow Plex Media Server through UFW (public access)
ufw:
rule: allow
port: "{{ item.port }}"
proto: "{{ item.proto }}"
comment: "{{ item.comment }}"
loop:
- { port: "32400", proto: "tcp", comment: "Plex Media Server" }
- { port: "3005", proto: "tcp", comment: "Plex Home Theater via Plex Companion" }
- { port: "8324", proto: "tcp", comment: "Plex for Roku via Plex Companion" }
- { port: "32469", proto: "tcp", comment: "Plex DLNA Server" }
- { port: "1900", proto: "udp", comment: "Plex DLNA Server" }
- { port: "32410", proto: "udp", comment: "Plex GDM network discovery" }
- { port: "32412", proto: "udp", comment: "Plex GDM network discovery" }
- { port: "32413", proto: "udp", comment: "Plex GDM network discovery" }
- { port: "32414", proto: "udp", comment: "Plex GDM network discovery" }
when: ufw_enabled and plex_public_access | default(false)
tags: ['firewall', 'plex']
- name: Enable UFW
ufw:
state: enabled
when: ufw_enabled
tags: ['firewall']
- name: Configure Docker security options
template:
src: docker-security.json.j2
dest: /etc/docker/seccomp-profile.json
mode: '0644'
notify: restart docker
tags: ['docker_security']
- name: Create AppArmor profile for Docker containers
template:
src: docker-apparmor.j2
dest: /etc/apparmor.d/docker-arrs
mode: '0644'
notify: reload apparmor
tags: ['apparmor']
- name: Set secure file permissions
file:
path: "{{ item.path }}"
mode: "{{ item.mode }}"
owner: "{{ item.owner | default('root') }}"
group: "{{ item.group | default('root') }}"
loop:
- { path: '/etc/ssh/sshd_config', mode: '0600' }
- { path: '/etc/fail2ban/jail.local', mode: '0644' }
- { path: '/etc/docker', mode: '0755' }
tags: ['file_permissions']
- name: Configure log monitoring
template:
src: rsyslog-docker.conf.j2
dest: /etc/rsyslog.d/30-docker.conf
mode: '0644'
notify: restart rsyslog
tags: ['logging']
- name: Create security audit script
template:
src: security-audit.sh.j2
dest: "{{ docker_root }}/scripts/security-audit.sh"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
tags: ['security_audit']

View File

@@ -0,0 +1,192 @@
---
# Services deployment tasks
- name: Generate Docker Compose file
template:
src: docker-compose.yml.j2
dest: "{{ docker_compose_dir }}/docker-compose.yml"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0644'
backup: yes
tags: ['compose']
- name: Create environment file for Docker Compose
template:
src: docker.env.j2
dest: "{{ docker_compose_dir }}/.env"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0600'
tags: ['compose']
- name: Create Gluetun VPN directory
file:
path: "{{ docker_root }}/gluetun"
state: directory
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
when: vpn_enabled
tags: ['vpn']
- name: Copy custom OpenVPN configuration
copy:
src: custom.conf
dest: "{{ docker_root }}/gluetun/custom.conf"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0600'
when: vpn_enabled and vpn_provider == 'custom' and vpn_type == 'openvpn'
tags: ['vpn']
- name: Copy WireGuard configuration
copy:
src: wireguard/protonvpn.conf
dest: "{{ docker_root }}/gluetun/protonvpn.conf"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0600'
when: vpn_enabled and vpn_type == 'wireguard'
tags: ['vpn']
- name: Pull Docker images
shell: docker-compose pull
args:
chdir: "{{ docker_compose_dir }}"
become_user: "{{ docker_user }}"
tags: ['images']
- name: Start Arrs services
shell: docker-compose up -d
args:
chdir: "{{ docker_compose_dir }}"
become_user: "{{ docker_user }}"
tags: ['services']
- name: Wait for services to be ready
wait_for:
port: "{{ item.value }}"
host: "127.0.0.1"
delay: 10
timeout: 300
loop: "{{ ports | dict2items }}"
tags: ['health_check']
- name: Verify service health
uri:
url: "http://127.0.0.1:{{ item.value }}/ping"
method: GET
status_code: 200
loop: "{{ ports | dict2items }}"
register: health_checks
retries: 5
delay: 10
until: health_checks is succeeded
tags: ['health_check']
- name: Create systemd service for Arrs stack
template:
src: arrs-stack.service.j2
dest: /etc/systemd/system/arrs-stack.service
mode: '0644'
notify: reload systemd
tags: ['systemd']
- name: Enable Arrs stack systemd service
systemd:
name: arrs-stack
enabled: yes
daemon_reload: yes
tags: ['systemd']
- name: Create service management script
template:
src: manage-arrs.sh.j2
dest: /usr/local/bin/manage-arrs
mode: '0755'
tags: ['management']
- name: Create Docker network if it doesn't exist
docker_network:
name: "{{ docker_network_name }}"
driver: bridge
ipam_config:
- subnet: "{{ docker_network_subnet }}"
gateway: "{{ docker_network_gateway }}"
ignore_errors: yes
tags: ['network']
- name: Set up log rotation for Docker containers
template:
src: docker-container-logrotate.j2
dest: /etc/logrotate.d/docker-containers
mode: '0644'
tags: ['logging']
- name: Create service status check script
template:
src: check-services.sh.j2
dest: "{{ docker_root }}/scripts/check-services.sh"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
tags: ['monitoring']
- name: Set up cron job for service monitoring
cron:
name: "Check Arrs services"
minute: "*/5"
job: "{{ docker_root }}/scripts/check-services.sh >> {{ docker_root }}/logs/service-check.log 2>&1"
user: "{{ docker_user }}"
tags: ['monitoring']
- name: Display service information
debug:
msg: |
Services deployed successfully!
Access URLs:
- Sonarr: http://{{ ansible_default_ipv4.address }}:{{ ports.sonarr }}
- Radarr: http://{{ ansible_default_ipv4.address }}:{{ ports.radarr }}
- Lidarr: http://{{ ansible_default_ipv4.address }}:{{ ports.lidarr }}
- Bazarr: http://{{ ansible_default_ipv4.address }}:{{ ports.bazarr }}
- Prowlarr: http://{{ ansible_default_ipv4.address }}:{{ ports.prowlarr }}
Management commands:
- Start: sudo systemctl start arrs-stack
- Stop: sudo systemctl stop arrs-stack
- Status: sudo systemctl status arrs-stack
- Logs: docker-compose -f {{ docker_compose_dir }}/docker-compose.yml logs -f
tags: ['info']
- name: Deploy SABnzbd configuration fix script
template:
src: sabnzbd-config-fix.sh.j2
dest: "{{ docker_root }}/scripts/sabnzbd-config-fix.sh"
mode: '0755'
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
tags: ['services', 'sabnzbd']
- name: Apply SABnzbd hostname whitelist fix
shell: |
cd {{ docker_compose_dir }}
docker-compose exec -T sabnzbd /bin/bash -c "
if ! grep -q 'sonarr, radarr, lidarr' /config/sabnzbd.ini 2>/dev/null; then
echo 'Updating SABnzbd host_whitelist...'
sed -i 's/host_whitelist = \([^,]*\),/host_whitelist = \1, sonarr, radarr, lidarr, bazarr, prowlarr, whisparr, gluetun, localhost, 127.0.0.1,/' /config/sabnzbd.ini
echo 'SABnzbd host_whitelist updated for service connections'
else
echo 'SABnzbd host_whitelist already configured'
fi"
register: sabnzbd_config_result
changed_when: "'updated for service connections' in sabnzbd_config_result.stdout"
tags: ['services', 'sabnzbd']
- name: Restart SABnzbd if configuration was updated
shell: |
cd {{ docker_compose_dir }}
docker-compose restart sabnzbd
when: sabnzbd_config_result.changed
tags: ['services', 'sabnzbd']

93
tasks/system_setup.yml Normal file
View File

@@ -0,0 +1,93 @@
---
# System setup tasks for Arrs Media Stack deployment
- name: Set timezone
timezone:
name: "{{ timezone }}"
notify: reload systemd
tags: ['timezone']
- name: Update system packages
apt:
upgrade: dist
update_cache: yes
cache_valid_time: 3600
tags: ['system_update']
- name: Install additional system utilities
apt:
name:
- vim
- git
- rsync
- cron
- logrotate
- fail2ban
- ncdu
- iotop
- nethogs
- jq
state: present
tags: ['system_packages']
- name: Configure automatic security updates
apt:
name: unattended-upgrades
state: present
tags: ['security_updates']
- name: Configure unattended-upgrades
template:
src: 50unattended-upgrades.j2
dest: /etc/apt/apt.conf.d/50unattended-upgrades
backup: yes
tags: ['security_updates']
- name: Enable automatic security updates
template:
src: 20auto-upgrades.j2
dest: /etc/apt/apt.conf.d/20auto-upgrades
backup: yes
tags: ['security_updates']
- name: Configure system limits for Docker
pam_limits:
domain: "{{ docker_user }}"
limit_type: "{{ item.type }}"
limit_item: "{{ item.item }}"
value: "{{ item.value }}"
loop:
- { type: 'soft', item: 'nofile', value: '65536' }
- { type: 'hard', item: 'nofile', value: '65536' }
- { type: 'soft', item: 'nproc', value: '32768' }
- { type: 'hard', item: 'nproc', value: '32768' }
tags: ['system_limits']
- name: Configure kernel parameters for Docker
sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
reload: yes
loop:
- { name: 'vm.max_map_count', value: '262144' }
- { name: 'fs.file-max', value: '2097152' }
- { name: 'net.core.somaxconn', value: '65535' }
tags: ['kernel_params']
- name: Create systemd override directory for Docker
file:
path: /etc/systemd/system/docker.service.d
state: directory
mode: '0755'
tags: ['docker_systemd']
- name: Configure Docker systemd service
template:
src: docker-override.conf.j2
dest: /etc/systemd/system/docker.service.d/override.conf
backup: yes
notify:
- reload systemd
- restart docker
tags: ['docker_systemd']

128
tasks/user_setup.yml Normal file
View File

@@ -0,0 +1,128 @@
---
# User and directory setup tasks
- name: Create docker group
group:
name: "{{ docker_group }}"
state: present
tags: ['users']
- name: Create docker user
user:
name: "{{ docker_user }}"
group: "{{ docker_group }}"
groups: docker
shell: /bin/bash
home: "{{ docker_root }}"
create_home: yes
system: no
state: present
tags: ['users']
- name: Add docker user to docker group
user:
name: "{{ docker_user }}"
groups: docker
append: yes
tags: ['users']
- name: Get docker user UID and GID
getent:
database: passwd
key: "{{ docker_user }}"
tags: ['users']
- name: Get docker group GID
getent:
database: group
key: "{{ docker_group }}"
tags: ['users']
- name: Display docker user information
debug:
msg: |
Docker user: {{ docker_user }}
Docker UID: {{ ansible_facts['getent_passwd'][docker_user][1] }}
Docker GID: {{ ansible_facts['getent_group'][docker_group][1] }}
tags: ['users']
- name: Create media directories
file:
path: "{{ item }}"
state: directory
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
loop: "{{ media_dirs }}"
tags: ['directories']
- name: Create docker config directories
file:
path: "{{ item }}"
state: directory
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
loop: "{{ docker_dirs }}"
tags: ['directories']
- name: Set ownership of media root
file:
path: "{{ media_root }}"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
recurse: yes
state: directory
tags: ['permissions']
- name: Set ownership of docker root
file:
path: "{{ docker_root }}"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
recurse: yes
state: directory
tags: ['permissions']
- name: Create docker user .bashrc
template:
src: bashrc.j2
dest: "{{ docker_root }}/.bashrc"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0644'
tags: ['user_config']
- name: Create useful aliases for docker user
template:
src: bash_aliases.j2
dest: "{{ docker_root }}/.bash_aliases"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0644'
tags: ['user_config']
- name: Create scripts directory
file:
path: "{{ docker_root }}/scripts"
state: directory
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
tags: ['directories']
- name: Create management scripts
template:
src: "{{ item }}.j2"
dest: "{{ docker_root }}/scripts/{{ item }}"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
loop:
- arrs-start.sh
- arrs-stop.sh
- arrs-restart.sh
- arrs-logs.sh
- arrs-update.sh
- arrs-status.sh
tags: ['scripts']