Files
arr-suite-template-bootstrap/tasks/services_deployment.yml
openhands 24f2cd64e9 Initial template repository
🎬 ARR Suite Template Bootstrap - Complete Media Automation Stack

Features:
- 16 production services (Prowlarr, Sonarr, Radarr, Plex, etc.)
- One-command Ansible deployment
- VPN-protected downloads via Gluetun
- Tailscale secure access
- Production-ready security (UFW, Fail2Ban)
- Automated backups and monitoring
- Comprehensive documentation

Ready for customization and deployment to any VPS.

Co-authored-by: openhands <openhands@all-hands.dev>
2025-11-28 04:26:12 +00:00

192 lines
5.4 KiB
YAML

---
# Services deployment tasks
- name: Generate Docker Compose file
template:
src: docker-compose.yml.j2
dest: "{{ docker_compose_dir }}/docker-compose.yml"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0644'
backup: yes
tags: ['compose']
- name: Create environment file for Docker Compose
template:
src: docker.env.j2
dest: "{{ docker_compose_dir }}/.env"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0600'
tags: ['compose']
- name: Create Gluetun VPN directory
file:
path: "{{ docker_root }}/gluetun"
state: directory
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
when: vpn_enabled
tags: ['vpn']
- name: Copy custom OpenVPN configuration
copy:
src: custom.conf
dest: "{{ docker_root }}/gluetun/custom.conf"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0600'
when: vpn_enabled and vpn_provider == 'custom' and vpn_type == 'openvpn'
tags: ['vpn']
- name: Copy WireGuard configuration
copy:
src: wireguard/protonvpn.conf
dest: "{{ docker_root }}/gluetun/protonvpn.conf"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0600'
when: vpn_enabled and vpn_type == 'wireguard'
tags: ['vpn']
- name: Pull Docker images
shell: docker-compose pull
args:
chdir: "{{ docker_compose_dir }}"
become_user: "{{ docker_user }}"
tags: ['images']
- name: Start Arrs services
shell: docker-compose up -d
args:
chdir: "{{ docker_compose_dir }}"
become_user: "{{ docker_user }}"
tags: ['services']
- name: Wait for services to be ready
wait_for:
port: "{{ item.value }}"
host: "127.0.0.1"
delay: 10
timeout: 300
loop: "{{ ports | dict2items }}"
tags: ['health_check']
- name: Verify service health
uri:
url: "http://127.0.0.1:{{ item.value }}/ping"
method: GET
status_code: 200
loop: "{{ ports | dict2items }}"
register: health_checks
retries: 5
delay: 10
until: health_checks is succeeded
tags: ['health_check']
- name: Create systemd service for Arrs stack
template:
src: arrs-stack.service.j2
dest: /etc/systemd/system/arrs-stack.service
mode: '0644'
notify: reload systemd
tags: ['systemd']
- name: Enable Arrs stack systemd service
systemd:
name: arrs-stack
enabled: yes
daemon_reload: yes
tags: ['systemd']
- name: Create service management script
template:
src: manage-arrs.sh.j2
dest: /usr/local/bin/manage-arrs
mode: '0755'
tags: ['management']
- name: Create Docker network if it doesn't exist
docker_network:
name: "{{ docker_network_name }}"
driver: bridge
ipam_config:
- subnet: "{{ docker_network_subnet }}"
gateway: "{{ docker_network_gateway }}"
ignore_errors: yes
tags: ['network']
- name: Set up log rotation for Docker containers
template:
src: docker-container-logrotate.j2
dest: /etc/logrotate.d/docker-containers
mode: '0644'
tags: ['logging']
- name: Create service status check script
template:
src: check-services.sh.j2
dest: "{{ docker_root }}/scripts/check-services.sh"
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
mode: '0755'
tags: ['monitoring']
- name: Set up cron job for service monitoring
cron:
name: "Check Arrs services"
minute: "*/5"
job: "{{ docker_root }}/scripts/check-services.sh >> {{ docker_root }}/logs/service-check.log 2>&1"
user: "{{ docker_user }}"
tags: ['monitoring']
- name: Display service information
debug:
msg: |
Services deployed successfully!
Access URLs:
- Sonarr: http://{{ ansible_default_ipv4.address }}:{{ ports.sonarr }}
- Radarr: http://{{ ansible_default_ipv4.address }}:{{ ports.radarr }}
- Lidarr: http://{{ ansible_default_ipv4.address }}:{{ ports.lidarr }}
- Bazarr: http://{{ ansible_default_ipv4.address }}:{{ ports.bazarr }}
- Prowlarr: http://{{ ansible_default_ipv4.address }}:{{ ports.prowlarr }}
Management commands:
- Start: sudo systemctl start arrs-stack
- Stop: sudo systemctl stop arrs-stack
- Status: sudo systemctl status arrs-stack
- Logs: docker-compose -f {{ docker_compose_dir }}/docker-compose.yml logs -f
tags: ['info']
- name: Deploy SABnzbd configuration fix script
template:
src: sabnzbd-config-fix.sh.j2
dest: "{{ docker_root }}/scripts/sabnzbd-config-fix.sh"
mode: '0755'
owner: "{{ docker_user }}"
group: "{{ docker_group }}"
tags: ['services', 'sabnzbd']
- name: Apply SABnzbd hostname whitelist fix
shell: |
cd {{ docker_compose_dir }}
docker-compose exec -T sabnzbd /bin/bash -c "
if ! grep -q 'sonarr, radarr, lidarr' /config/sabnzbd.ini 2>/dev/null; then
echo 'Updating SABnzbd host_whitelist...'
sed -i 's/host_whitelist = \([^,]*\),/host_whitelist = \1, sonarr, radarr, lidarr, bazarr, prowlarr, whisparr, gluetun, localhost, 127.0.0.1,/' /config/sabnzbd.ini
echo 'SABnzbd host_whitelist updated for service connections'
else
echo 'SABnzbd host_whitelist already configured'
fi"
register: sabnzbd_config_result
changed_when: "'updated for service connections' in sabnzbd_config_result.stdout"
tags: ['services', 'sabnzbd']
- name: Restart SABnzbd if configuration was updated
shell: |
cd {{ docker_compose_dir }}
docker-compose restart sabnzbd
when: sabnzbd_config_result.changed
tags: ['services', 'sabnzbd']