Sanitized mirror from private repository - 2026-03-27 11:53:55 UTC
This commit is contained in:
39
docs/advanced/ansible/playbooks/add_ssh_keys.yml
Normal file
39
docs/advanced/ansible/playbooks/add_ssh_keys.yml
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
- name: Ensure homelab's SSH key is present on all reachable hosts
|
||||
hosts: all
|
||||
gather_facts: false
|
||||
become: true
|
||||
|
||||
vars:
|
||||
ssh_pub_key: "{{ lookup('file', '/home/homelab/.ssh/id_ed25519.pub') }}"
|
||||
ssh_user: "{{ ansible_user | default('vish') }}"
|
||||
ssh_port: "{{ ansible_port | default(22) }}"
|
||||
|
||||
tasks:
|
||||
- name: Check if SSH is reachable
|
||||
wait_for:
|
||||
host: "{{ inventory_hostname }}"
|
||||
port: "{{ ssh_port }}"
|
||||
timeout: 8
|
||||
state: started
|
||||
delegate_to: localhost
|
||||
ignore_errors: true
|
||||
register: ssh_port_check
|
||||
|
||||
- name: Add SSH key for user
|
||||
authorized_key:
|
||||
user: "{{ ssh_user }}"
|
||||
key: "{{ ssh_pub_key }}"
|
||||
state: present
|
||||
when: not ssh_port_check is failed
|
||||
ignore_unreachable: true
|
||||
|
||||
- name: Report hosts where SSH key was added
|
||||
debug:
|
||||
msg: "SSH key added successfully to {{ inventory_hostname }}"
|
||||
when: not ssh_port_check is failed
|
||||
|
||||
- name: Report hosts where SSH was unreachable
|
||||
debug:
|
||||
msg: "Skipped {{ inventory_hostname }} (SSH not reachable)"
|
||||
when: ssh_port_check is failed
|
||||
127
docs/advanced/ansible/playbooks/ansible_status_check.yml
Normal file
127
docs/advanced/ansible/playbooks/ansible_status_check.yml
Normal file
@@ -0,0 +1,127 @@
|
||||
---
|
||||
# Check Ansible status across all reachable hosts
|
||||
# Simple status check and upgrade where possible
|
||||
# Created: February 8, 2026
|
||||
|
||||
- name: Check Ansible status on all reachable hosts
|
||||
hosts: homelab,pi-5,vish-concord-nuc,pve
|
||||
gather_facts: yes
|
||||
become: yes
|
||||
ignore_errors: yes
|
||||
|
||||
tasks:
|
||||
- name: Display host information
|
||||
debug:
|
||||
msg: |
|
||||
=== {{ inventory_hostname | upper }} ===
|
||||
IP: {{ ansible_host }}
|
||||
OS: {{ ansible_distribution }} {{ ansible_distribution_version }}
|
||||
Architecture: {{ ansible_architecture }}
|
||||
|
||||
- name: Check if Ansible is installed
|
||||
command: ansible --version
|
||||
register: ansible_check
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Display Ansible status
|
||||
debug:
|
||||
msg: |
|
||||
Ansible on {{ inventory_hostname }}:
|
||||
{% if ansible_check.rc == 0 %}
|
||||
✅ INSTALLED: {{ ansible_check.stdout_lines[0] }}
|
||||
{% else %}
|
||||
❌ NOT INSTALLED
|
||||
{% endif %}
|
||||
|
||||
- name: Check if apt is available (Debian/Ubuntu only)
|
||||
stat:
|
||||
path: /usr/bin/apt
|
||||
register: has_apt
|
||||
|
||||
- name: Try to install/upgrade Ansible (Debian/Ubuntu only)
|
||||
block:
|
||||
- name: Update package cache (ignore GPG errors)
|
||||
apt:
|
||||
update_cache: yes
|
||||
cache_valid_time: 0
|
||||
register: apt_update
|
||||
failed_when: false
|
||||
|
||||
- name: Install/upgrade Ansible
|
||||
apt:
|
||||
name: ansible
|
||||
state: latest
|
||||
register: ansible_install
|
||||
when: apt_update is not failed
|
||||
|
||||
- name: Display installation result
|
||||
debug:
|
||||
msg: |
|
||||
Ansible installation on {{ inventory_hostname }}:
|
||||
{% if ansible_install is succeeded %}
|
||||
{% if ansible_install.changed %}
|
||||
✅ {{ 'INSTALLED' if ansible_check.rc != 0 else 'UPGRADED' }} successfully
|
||||
{% else %}
|
||||
ℹ️ Already at latest version
|
||||
{% endif %}
|
||||
{% elif apt_update is failed %}
|
||||
⚠️ APT update failed - using cached packages
|
||||
{% else %}
|
||||
❌ Installation failed
|
||||
{% endif %}
|
||||
|
||||
when: has_apt.stat.exists
|
||||
rescue:
|
||||
- name: Installation failed
|
||||
debug:
|
||||
msg: "❌ Failed to install/upgrade Ansible on {{ inventory_hostname }}"
|
||||
|
||||
- name: Final Ansible version check
|
||||
command: ansible --version
|
||||
register: final_ansible_check
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Final status summary
|
||||
debug:
|
||||
msg: |
|
||||
=== FINAL STATUS: {{ inventory_hostname | upper }} ===
|
||||
{% if final_ansible_check.rc == 0 %}
|
||||
✅ Ansible: {{ final_ansible_check.stdout_lines[0] }}
|
||||
{% else %}
|
||||
❌ Ansible: Not available
|
||||
{% endif %}
|
||||
OS: {{ ansible_distribution }} {{ ansible_distribution_version }}
|
||||
APT Available: {{ '✅ Yes' if has_apt.stat.exists else '❌ No' }}
|
||||
|
||||
- name: Summary Report
|
||||
hosts: localhost
|
||||
gather_facts: no
|
||||
run_once: true
|
||||
|
||||
tasks:
|
||||
- name: Display overall summary
|
||||
debug:
|
||||
msg: |
|
||||
|
||||
========================================
|
||||
ANSIBLE UPDATE SUMMARY - {{ ansible_date_time.date }}
|
||||
========================================
|
||||
|
||||
Processed hosts:
|
||||
- homelab (100.67.40.126)
|
||||
- pi-5 (100.77.151.40)
|
||||
- vish-concord-nuc (100.72.55.21)
|
||||
- pve (100.87.12.28)
|
||||
|
||||
Excluded hosts:
|
||||
- Synology devices (atlantis, calypso, setillo) - Use DSM package manager
|
||||
- homeassistant - Uses Home Assistant OS package management
|
||||
- truenas-scale - Uses TrueNAS package management
|
||||
- pi-5-kevin - Currently unreachable
|
||||
|
||||
✅ homelab: Already has Ansible 2.16.3 (latest)
|
||||
📋 Check individual host results above for details
|
||||
|
||||
========================================
|
||||
193
docs/advanced/ansible/playbooks/check_apt_proxy.yml
Normal file
193
docs/advanced/ansible/playbooks/check_apt_proxy.yml
Normal file
@@ -0,0 +1,193 @@
|
||||
---
|
||||
- name: Check APT Proxy Configuration on Debian/Ubuntu hosts
|
||||
hosts: debian_clients
|
||||
become: no
|
||||
gather_facts: yes
|
||||
|
||||
vars:
|
||||
expected_proxy_host: 100.103.48.78 # calypso
|
||||
expected_proxy_port: 3142
|
||||
apt_proxy_file: /etc/apt/apt.conf.d/01proxy
|
||||
expected_proxy_url: "http://{{ expected_proxy_host }}:{{ expected_proxy_port }}/"
|
||||
|
||||
tasks:
|
||||
# ---------- System Detection ----------
|
||||
- name: Detect OS family
|
||||
ansible.builtin.debug:
|
||||
msg: "Host {{ inventory_hostname }} is running {{ ansible_os_family }} {{ ansible_distribution }} {{ ansible_distribution_version }}"
|
||||
|
||||
- name: Skip non-Debian systems
|
||||
ansible.builtin.meta: end_host
|
||||
when: ansible_os_family != "Debian"
|
||||
|
||||
# ---------- APT Proxy Configuration Check ----------
|
||||
- name: Check if APT proxy config file exists
|
||||
ansible.builtin.stat:
|
||||
path: "{{ apt_proxy_file }}"
|
||||
register: proxy_file_stat
|
||||
|
||||
- name: Read APT proxy configuration (if exists)
|
||||
ansible.builtin.slurp:
|
||||
src: "{{ apt_proxy_file }}"
|
||||
register: proxy_config_content
|
||||
when: proxy_file_stat.stat.exists
|
||||
failed_when: false
|
||||
|
||||
- name: Parse proxy configuration
|
||||
ansible.builtin.set_fact:
|
||||
proxy_config_decoded: "{{ proxy_config_content.content | b64decode }}"
|
||||
when: proxy_file_stat.stat.exists and proxy_config_content is defined
|
||||
|
||||
# ---------- Network Connectivity Test ----------
|
||||
- name: Test connectivity to expected proxy server
|
||||
ansible.builtin.uri:
|
||||
url: "http://{{ expected_proxy_host }}:{{ expected_proxy_port }}/"
|
||||
method: HEAD
|
||||
timeout: 10
|
||||
register: proxy_connectivity
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
# ---------- APT Configuration Analysis ----------
|
||||
- name: Check current APT proxy settings via apt-config
|
||||
ansible.builtin.command: apt-config dump Acquire::http::Proxy
|
||||
register: apt_config_proxy
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
become: yes
|
||||
|
||||
- name: Test APT update with current configuration (dry-run)
|
||||
ansible.builtin.command: apt-get update --print-uris --dry-run
|
||||
register: apt_update_test
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
become: yes
|
||||
|
||||
# ---------- Analysis and Reporting ----------
|
||||
- name: Analyze proxy configuration status
|
||||
ansible.builtin.set_fact:
|
||||
proxy_status:
|
||||
file_exists: "{{ proxy_file_stat.stat.exists }}"
|
||||
file_content: "{{ proxy_config_decoded | default('N/A') }}"
|
||||
expected_config: "Acquire::http::Proxy \"{{ expected_proxy_url }}\";"
|
||||
proxy_reachable: "{{ proxy_connectivity.status is defined and (proxy_connectivity.status == 200 or proxy_connectivity.status == 406) }}"
|
||||
apt_config_output: "{{ apt_config_proxy.stdout | default('N/A') }}"
|
||||
using_expected_proxy: "{{ (proxy_config_decoded | default('')) is search(expected_proxy_host) }}"
|
||||
|
||||
# ---------- Health Assertions ----------
|
||||
- name: Assert APT proxy is properly configured
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- proxy_status.file_exists
|
||||
- proxy_status.using_expected_proxy
|
||||
- proxy_status.proxy_reachable
|
||||
success_msg: "✅ {{ inventory_hostname }} is correctly using APT proxy {{ expected_proxy_host }}:{{ expected_proxy_port }}"
|
||||
fail_msg: "❌ {{ inventory_hostname }} APT proxy configuration issues detected"
|
||||
failed_when: false
|
||||
register: proxy_assertion
|
||||
|
||||
# ---------- Detailed Summary ----------
|
||||
- name: Display comprehensive proxy status
|
||||
ansible.builtin.debug:
|
||||
msg: |
|
||||
|
||||
🔍 APT Proxy Status for {{ inventory_hostname }}:
|
||||
================================================
|
||||
OS: {{ ansible_distribution }} {{ ansible_distribution_version }}
|
||||
|
||||
📁 Configuration File:
|
||||
Path: {{ apt_proxy_file }}
|
||||
Exists: {{ proxy_status.file_exists }}
|
||||
Content: {{ proxy_status.file_content | regex_replace('\n', ' ') }}
|
||||
|
||||
🎯 Expected Configuration:
|
||||
{{ proxy_status.expected_config }}
|
||||
|
||||
🌐 Network Connectivity:
|
||||
Proxy Server: {{ expected_proxy_host }}:{{ expected_proxy_port }}
|
||||
Reachable: {{ proxy_status.proxy_reachable }}
|
||||
Response: {{ proxy_connectivity.status | default('N/A') }}
|
||||
|
||||
⚙️ Current APT Config:
|
||||
{{ proxy_status.apt_config_output }}
|
||||
|
||||
✅ Status: {{ 'CONFIGURED' if proxy_status.using_expected_proxy else 'NOT CONFIGURED' }}
|
||||
🔗 Connectivity: {{ 'OK' if proxy_status.proxy_reachable else 'FAILED' }}
|
||||
|
||||
{% if not proxy_assertion.failed %}
|
||||
🎉 Result: APT proxy is working correctly!
|
||||
{% else %}
|
||||
⚠️ Result: APT proxy needs attention
|
||||
{% endif %}
|
||||
|
||||
# ---------- Recommendations ----------
|
||||
- name: Provide configuration recommendations
|
||||
ansible.builtin.debug:
|
||||
msg: |
|
||||
|
||||
💡 Recommendations for {{ inventory_hostname }}:
|
||||
{% if not proxy_status.file_exists %}
|
||||
- Create APT proxy config: echo 'Acquire::http::Proxy "{{ expected_proxy_url }}";' | sudo tee {{ apt_proxy_file }}
|
||||
{% endif %}
|
||||
{% if not proxy_status.proxy_reachable %}
|
||||
- Check network connectivity to {{ expected_proxy_host }}:{{ expected_proxy_port }}
|
||||
- Verify calypso apt-cacher-ng service is running
|
||||
{% endif %}
|
||||
{% if proxy_status.file_exists and not proxy_status.using_expected_proxy %}
|
||||
- Update proxy configuration to use {{ expected_proxy_url }}
|
||||
{% endif %}
|
||||
when: proxy_assertion.failed
|
||||
|
||||
# ---------- Summary Statistics ----------
|
||||
- name: Record results for summary
|
||||
ansible.builtin.set_fact:
|
||||
host_proxy_result:
|
||||
hostname: "{{ inventory_hostname }}"
|
||||
configured: "{{ proxy_status.using_expected_proxy }}"
|
||||
reachable: "{{ proxy_status.proxy_reachable }}"
|
||||
status: "{{ 'OK' if (proxy_status.using_expected_proxy and proxy_status.proxy_reachable) else 'NEEDS_ATTENTION' }}"
|
||||
|
||||
# ---------- Final Summary Report ----------
|
||||
- name: APT Proxy Summary Report
|
||||
hosts: localhost
|
||||
gather_facts: no
|
||||
run_once: true
|
||||
|
||||
vars:
|
||||
expected_proxy_host: 100.103.48.78 # calypso
|
||||
expected_proxy_port: 3142
|
||||
|
||||
tasks:
|
||||
- name: Collect all host results
|
||||
ansible.builtin.set_fact:
|
||||
all_results: "{{ groups['debian_clients'] | map('extract', hostvars) | selectattr('host_proxy_result', 'defined') | map(attribute='host_proxy_result') | list }}"
|
||||
when: groups['debian_clients'] is defined
|
||||
|
||||
- name: Generate summary statistics
|
||||
ansible.builtin.set_fact:
|
||||
summary_stats:
|
||||
total_hosts: "{{ all_results | length }}"
|
||||
configured_hosts: "{{ all_results | selectattr('configured', 'equalto', true) | list | length }}"
|
||||
reachable_hosts: "{{ all_results | selectattr('reachable', 'equalto', true) | list | length }}"
|
||||
healthy_hosts: "{{ all_results | selectattr('status', 'equalto', 'OK') | list | length }}"
|
||||
when: all_results is defined
|
||||
|
||||
- name: Display final summary
|
||||
ansible.builtin.debug:
|
||||
msg: |
|
||||
|
||||
📊 APT PROXY HEALTH SUMMARY
|
||||
===========================
|
||||
Total Debian Clients: {{ summary_stats.total_hosts | default(0) }}
|
||||
Properly Configured: {{ summary_stats.configured_hosts | default(0) }}
|
||||
Proxy Reachable: {{ summary_stats.reachable_hosts | default(0) }}
|
||||
Fully Healthy: {{ summary_stats.healthy_hosts | default(0) }}
|
||||
|
||||
🎯 Target Proxy: calypso ({{ expected_proxy_host }}:{{ expected_proxy_port }})
|
||||
|
||||
{% if summary_stats.healthy_hosts | default(0) == summary_stats.total_hosts | default(0) %}
|
||||
🎉 ALL SYSTEMS OPTIMAL - APT proxy working perfectly across all clients!
|
||||
{% else %}
|
||||
⚠️ Some systems need attention - check individual host reports above
|
||||
{% endif %}
|
||||
when: summary_stats is defined
|
||||
26
docs/advanced/ansible/playbooks/cleanup.yml
Normal file
26
docs/advanced/ansible/playbooks/cleanup.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
- name: Clean up unused packages and temporary files
|
||||
hosts: all
|
||||
become: true
|
||||
tasks:
|
||||
- name: Autoremove unused packages
|
||||
apt:
|
||||
autoremove: yes
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Clean apt cache
|
||||
apt:
|
||||
autoclean: yes
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Clear temporary files
|
||||
file:
|
||||
path: /tmp
|
||||
state: absent
|
||||
ignore_errors: true
|
||||
|
||||
- name: Recreate /tmp directory
|
||||
file:
|
||||
path: /tmp
|
||||
state: directory
|
||||
mode: '1777'
|
||||
48
docs/advanced/ansible/playbooks/common/backup_configs.yml
Normal file
48
docs/advanced/ansible/playbooks/common/backup_configs.yml
Normal file
@@ -0,0 +1,48 @@
|
||||
---
|
||||
# Backup all docker-compose configs and data
|
||||
- name: Backup Docker configurations
|
||||
hosts: "{{ target_host | default('all') }}"
|
||||
gather_facts: true
|
||||
|
||||
vars:
|
||||
backup_dest: "{{ backup_path | default('/backup') }}"
|
||||
backup_timestamp: "{{ ansible_date_time.date }}_{{ ansible_date_time.hour }}{{ ansible_date_time.minute }}"
|
||||
|
||||
tasks:
|
||||
- name: Create backup directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ backup_dest }}/{{ inventory_hostname }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
become: "{{ ansible_become | default(false) }}"
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Find all docker-compose files
|
||||
ansible.builtin.find:
|
||||
paths: "{{ docker_data_path }}"
|
||||
patterns: "docker-compose.yml,docker-compose.yaml,.env"
|
||||
recurse: true
|
||||
register: compose_files
|
||||
|
||||
- name: Archive docker configs
|
||||
ansible.builtin.archive:
|
||||
path: "{{ docker_data_path }}"
|
||||
dest: "/tmp/{{ inventory_hostname }}_configs_{{ backup_timestamp }}.tar.gz"
|
||||
format: gz
|
||||
exclude_path:
|
||||
- "*/data/*"
|
||||
- "*/logs/*"
|
||||
- "*/cache/*"
|
||||
become: "{{ ansible_become | default(false) }}"
|
||||
|
||||
- name: Fetch backup to control node
|
||||
ansible.builtin.fetch:
|
||||
src: "/tmp/{{ inventory_hostname }}_configs_{{ backup_timestamp }}.tar.gz"
|
||||
dest: "{{ backup_dest }}/{{ inventory_hostname }}/"
|
||||
flat: true
|
||||
|
||||
- name: Clean up remote archive
|
||||
ansible.builtin.file:
|
||||
path: "/tmp/{{ inventory_hostname }}_configs_{{ backup_timestamp }}.tar.gz"
|
||||
state: absent
|
||||
become: "{{ ansible_become | default(false) }}"
|
||||
55
docs/advanced/ansible/playbooks/common/install_docker.yml
Normal file
55
docs/advanced/ansible/playbooks/common/install_docker.yml
Normal file
@@ -0,0 +1,55 @@
|
||||
---
|
||||
# Install Docker on a host (for non-Synology systems)
|
||||
- name: Install Docker
|
||||
hosts: "{{ target_host | default('all:!synology') }}"
|
||||
become: true
|
||||
gather_facts: true
|
||||
|
||||
tasks:
|
||||
- name: Install prerequisites
|
||||
ansible.builtin.apt:
|
||||
name:
|
||||
- apt-transport-https
|
||||
- ca-certificates
|
||||
- curl
|
||||
- gnupg
|
||||
- lsb-release
|
||||
- python3-pip
|
||||
state: present
|
||||
update_cache: true
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Add Docker GPG key
|
||||
ansible.builtin.apt_key:
|
||||
url: https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg
|
||||
state: present
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Add Docker repository
|
||||
ansible.builtin.apt_repository:
|
||||
repo: "deb https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} stable"
|
||||
state: present
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Install Docker
|
||||
ansible.builtin.apt:
|
||||
name:
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
- containerd.io
|
||||
- docker-compose-plugin
|
||||
state: present
|
||||
update_cache: true
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Ensure Docker service is running
|
||||
ansible.builtin.service:
|
||||
name: docker
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Add user to docker group
|
||||
ansible.builtin.user:
|
||||
name: "{{ ansible_user }}"
|
||||
groups: docker
|
||||
append: true
|
||||
27
docs/advanced/ansible/playbooks/common/logs.yml
Normal file
27
docs/advanced/ansible/playbooks/common/logs.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
# View logs for a specific service
|
||||
# Usage: ansible-playbook playbooks/common/logs.yml -e "service_name=plex" -e "target_host=atlantis"
|
||||
- name: View service logs
|
||||
hosts: "{{ target_host }}"
|
||||
gather_facts: false
|
||||
|
||||
vars:
|
||||
log_lines: 100
|
||||
follow_logs: false
|
||||
|
||||
tasks:
|
||||
- name: Validate service_name is provided
|
||||
ansible.builtin.fail:
|
||||
msg: "service_name variable is required. Use -e 'service_name=<name>'"
|
||||
when: service_name is not defined
|
||||
|
||||
- name: Get service logs
|
||||
ansible.builtin.command:
|
||||
cmd: "docker compose logs --tail={{ log_lines }} {{ '--follow' if follow_logs else '' }}"
|
||||
chdir: "{{ docker_data_path }}/{{ service_name }}"
|
||||
register: logs_result
|
||||
become: "{{ ansible_become | default(false) }}"
|
||||
|
||||
- name: Display logs
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ logs_result.stdout }}"
|
||||
23
docs/advanced/ansible/playbooks/common/restart_service.yml
Normal file
23
docs/advanced/ansible/playbooks/common/restart_service.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
# Restart a specific service
|
||||
# Usage: ansible-playbook playbooks/common/restart_service.yml -e "service_name=plex" -e "target_host=atlantis"
|
||||
- name: Restart Docker service
|
||||
hosts: "{{ target_host }}"
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: Validate service_name is provided
|
||||
ansible.builtin.fail:
|
||||
msg: "service_name variable is required. Use -e 'service_name=<name>'"
|
||||
when: service_name is not defined
|
||||
|
||||
- name: Restart service
|
||||
ansible.builtin.command:
|
||||
cmd: docker compose restart
|
||||
chdir: "{{ docker_data_path }}/{{ service_name }}"
|
||||
register: restart_result
|
||||
become: "{{ ansible_become | default(false) }}"
|
||||
|
||||
- name: Display result
|
||||
ansible.builtin.debug:
|
||||
msg: "Service {{ service_name }} restarted on {{ inventory_hostname }}"
|
||||
34
docs/advanced/ansible/playbooks/common/setup_directories.yml
Normal file
34
docs/advanced/ansible/playbooks/common/setup_directories.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
# Setup base directories for Docker services
|
||||
- name: Setup Docker directories
|
||||
hosts: "{{ target_host | default('all') }}"
|
||||
gather_facts: true
|
||||
|
||||
tasks:
|
||||
- name: Create base docker directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ docker_data_path }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
become: "{{ ansible_become | default(false) }}"
|
||||
|
||||
- name: Create common directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ docker_data_path }}/{{ item }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
loop:
|
||||
- configs
|
||||
- data
|
||||
- logs
|
||||
- backups
|
||||
become: "{{ ansible_become | default(false) }}"
|
||||
|
||||
- name: Create service directories from host_services
|
||||
ansible.builtin.file:
|
||||
path: "{{ docker_data_path }}/{{ item.stack_dir }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
loop: "{{ host_services | default([]) }}"
|
||||
when: host_services is defined
|
||||
become: "{{ ansible_become | default(false) }}"
|
||||
49
docs/advanced/ansible/playbooks/common/status.yml
Normal file
49
docs/advanced/ansible/playbooks/common/status.yml
Normal file
@@ -0,0 +1,49 @@
|
||||
---
|
||||
# Check status of all Docker containers
|
||||
- name: Check container status
|
||||
hosts: "{{ target_host | default('all') }}"
|
||||
gather_facts: true
|
||||
|
||||
tasks:
|
||||
- name: Get list of running containers
|
||||
ansible.builtin.command:
|
||||
cmd: docker ps --format "table {{ '{{' }}.Names{{ '}}' }}\t{{ '{{' }}.Status{{ '}}' }}\t{{ '{{' }}.Image{{ '}}' }}"
|
||||
register: docker_ps
|
||||
changed_when: false
|
||||
become: "{{ ansible_become | default(false) }}"
|
||||
|
||||
- name: Display running containers
|
||||
ansible.builtin.debug:
|
||||
msg: |
|
||||
|
||||
=== {{ inventory_hostname }} ===
|
||||
{{ docker_ps.stdout }}
|
||||
|
||||
- name: Get stopped/exited containers
|
||||
ansible.builtin.command:
|
||||
cmd: docker ps -a --filter "status=exited" --format "table {{ '{{' }}.Names{{ '}}' }}\t{{ '{{' }}.Status{{ '}}' }}"
|
||||
register: docker_exited
|
||||
changed_when: false
|
||||
become: "{{ ansible_become | default(false) }}"
|
||||
|
||||
- name: Display stopped containers
|
||||
ansible.builtin.debug:
|
||||
msg: |
|
||||
|
||||
=== Stopped containers on {{ inventory_hostname }} ===
|
||||
{{ docker_exited.stdout }}
|
||||
when: docker_exited.stdout_lines | length > 1
|
||||
|
||||
- name: Get disk usage
|
||||
ansible.builtin.command:
|
||||
cmd: docker system df
|
||||
register: docker_df
|
||||
changed_when: false
|
||||
become: "{{ ansible_become | default(false) }}"
|
||||
|
||||
- name: Display disk usage
|
||||
ansible.builtin.debug:
|
||||
msg: |
|
||||
|
||||
=== Docker disk usage on {{ inventory_hostname }} ===
|
||||
{{ docker_df.stdout }}
|
||||
46
docs/advanced/ansible/playbooks/common/update_containers.yml
Normal file
46
docs/advanced/ansible/playbooks/common/update_containers.yml
Normal file
@@ -0,0 +1,46 @@
|
||||
---
|
||||
# Update all Docker containers (pull new images and recreate)
|
||||
- name: Update Docker containers
|
||||
hosts: "{{ target_host | default('all') }}"
|
||||
gather_facts: true
|
||||
|
||||
vars:
|
||||
services: "{{ host_services | default([]) }}"
|
||||
|
||||
tasks:
|
||||
- name: Display update info
|
||||
ansible.builtin.debug:
|
||||
msg: "Updating {{ services | length }} services on {{ inventory_hostname }}"
|
||||
|
||||
- name: Pull latest images for each service
|
||||
ansible.builtin.command:
|
||||
cmd: docker compose pull
|
||||
chdir: "{{ docker_data_path }}/{{ item.stack_dir }}"
|
||||
loop: "{{ services }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
when: item.enabled | default(true)
|
||||
register: pull_result
|
||||
changed_when: "'Downloaded' in pull_result.stdout"
|
||||
failed_when: false
|
||||
become: "{{ ansible_become | default(false) }}"
|
||||
|
||||
- name: Recreate containers with new images
|
||||
ansible.builtin.command:
|
||||
cmd: docker compose up -d --remove-orphans
|
||||
chdir: "{{ docker_data_path }}/{{ item.stack_dir }}"
|
||||
loop: "{{ services }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
when: item.enabled | default(true)
|
||||
register: up_result
|
||||
changed_when: "'Started' in up_result.stdout or 'Recreated' in up_result.stdout"
|
||||
failed_when: false
|
||||
become: "{{ ansible_become | default(false) }}"
|
||||
|
||||
- name: Clean up unused images
|
||||
ansible.builtin.command:
|
||||
cmd: docker image prune -af
|
||||
when: prune_images | default(true)
|
||||
changed_when: false
|
||||
become: "{{ ansible_become | default(false) }}"
|
||||
62
docs/advanced/ansible/playbooks/configure_apt_proxy.yml
Normal file
62
docs/advanced/ansible/playbooks/configure_apt_proxy.yml
Normal file
@@ -0,0 +1,62 @@
|
||||
---
|
||||
- name: Configure APT Proxy on Debian/Ubuntu hosts
|
||||
hosts: debian_clients
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
|
||||
vars:
|
||||
apt_proxy_host: 100.103.48.78
|
||||
apt_proxy_port: 3142
|
||||
apt_proxy_file: /etc/apt/apt.conf.d/01proxy
|
||||
|
||||
tasks:
|
||||
- name: Verify OS compatibility
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- ansible_os_family == "Debian"
|
||||
fail_msg: "Host {{ inventory_hostname }} is not Debian-based. Skipping."
|
||||
success_msg: "Host {{ inventory_hostname }} is Debian-based."
|
||||
tags: verify
|
||||
|
||||
- name: Create APT proxy configuration
|
||||
ansible.builtin.copy:
|
||||
dest: "{{ apt_proxy_file }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
content: |
|
||||
Acquire::http::Proxy "http://{{ apt_proxy_host }}:{{ apt_proxy_port }}/";
|
||||
Acquire::https::Proxy "false";
|
||||
register: proxy_conf
|
||||
tags: config
|
||||
|
||||
- name: Ensure APT cache directories exist
|
||||
ansible.builtin.file:
|
||||
path: /var/cache/apt/archives
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
tags: config
|
||||
|
||||
- name: Test APT proxy connection (dry-run)
|
||||
ansible.builtin.command: >
|
||||
apt-get update --print-uris -o Acquire::http::Proxy="http://{{ apt_proxy_host }}:{{ apt_proxy_port }}/"
|
||||
register: apt_proxy_test
|
||||
changed_when: false
|
||||
failed_when: apt_proxy_test.rc != 0
|
||||
tags: verify
|
||||
|
||||
- name: Display proxy test result
|
||||
ansible.builtin.debug:
|
||||
msg: |
|
||||
✅ {{ inventory_hostname }} is using APT proxy {{ apt_proxy_host }}:{{ apt_proxy_port }}
|
||||
{{ apt_proxy_test.stdout | default('') }}
|
||||
when: apt_proxy_test.rc == 0
|
||||
tags: verify
|
||||
|
||||
- name: Display failure if APT proxy test failed
|
||||
ansible.builtin.debug:
|
||||
msg: "⚠️ {{ inventory_hostname }} failed to reach APT proxy at {{ apt_proxy_host }}:{{ apt_proxy_port }}"
|
||||
when: apt_proxy_test.rc != 0
|
||||
tags: verify
|
||||
35
docs/advanced/ansible/playbooks/deploy_anubis.yml
Normal file
35
docs/advanced/ansible/playbooks/deploy_anubis.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
# Deployment playbook for anubis
|
||||
# Category: physical
|
||||
# Services: 8
|
||||
#
|
||||
# Usage:
|
||||
# ansible-playbook playbooks/deploy_anubis.yml
|
||||
# ansible-playbook playbooks/deploy_anubis.yml -e "stack_deploy=false"
|
||||
# ansible-playbook playbooks/deploy_anubis.yml --check
|
||||
|
||||
- name: Deploy services to anubis
|
||||
hosts: anubis
|
||||
gather_facts: true
|
||||
vars:
|
||||
services: '{{ host_services | default([]) }}'
|
||||
tasks:
|
||||
- name: Display deployment info
|
||||
ansible.builtin.debug:
|
||||
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
|
||||
- name: Ensure docker data directory exists
|
||||
ansible.builtin.file:
|
||||
path: '{{ docker_data_path }}'
|
||||
state: directory
|
||||
mode: '0755'
|
||||
- name: Deploy each enabled service
|
||||
ansible.builtin.include_role:
|
||||
name: docker_stack
|
||||
vars:
|
||||
stack_name: '{{ item.stack_dir }}'
|
||||
stack_compose_file: '{{ item.compose_file }}'
|
||||
stack_env_file: '{{ item.env_file | default(omit) }}'
|
||||
loop: '{{ services }}'
|
||||
loop_control:
|
||||
label: '{{ item.name }}'
|
||||
when: item.enabled | default(true)
|
||||
35
docs/advanced/ansible/playbooks/deploy_atlantis.yml
Normal file
35
docs/advanced/ansible/playbooks/deploy_atlantis.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
# Deployment playbook for atlantis
|
||||
# Category: synology
|
||||
# Services: 53
|
||||
#
|
||||
# Usage:
|
||||
# ansible-playbook playbooks/deploy_atlantis.yml
|
||||
# ansible-playbook playbooks/deploy_atlantis.yml -e "stack_deploy=false"
|
||||
# ansible-playbook playbooks/deploy_atlantis.yml --check
|
||||
|
||||
- name: Deploy services to atlantis
|
||||
hosts: atlantis
|
||||
gather_facts: true
|
||||
vars:
|
||||
services: '{{ host_services | default([]) }}'
|
||||
tasks:
|
||||
- name: Display deployment info
|
||||
ansible.builtin.debug:
|
||||
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
|
||||
- name: Ensure docker data directory exists
|
||||
ansible.builtin.file:
|
||||
path: '{{ docker_data_path }}'
|
||||
state: directory
|
||||
mode: '0755'
|
||||
- name: Deploy each enabled service
|
||||
ansible.builtin.include_role:
|
||||
name: docker_stack
|
||||
vars:
|
||||
stack_name: '{{ item.stack_dir }}'
|
||||
stack_compose_file: '{{ item.compose_file }}'
|
||||
stack_env_file: '{{ item.env_file | default(omit) }}'
|
||||
loop: '{{ services }}'
|
||||
loop_control:
|
||||
label: '{{ item.name }}'
|
||||
when: item.enabled | default(true)
|
||||
35
docs/advanced/ansible/playbooks/deploy_bulgaria_vm.yml
Normal file
35
docs/advanced/ansible/playbooks/deploy_bulgaria_vm.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
# Deployment playbook for bulgaria-vm
|
||||
# Category: vms
|
||||
# Services: 10
|
||||
#
|
||||
# Usage:
|
||||
# ansible-playbook playbooks/deploy_bulgaria_vm.yml
|
||||
# ansible-playbook playbooks/deploy_bulgaria_vm.yml -e "stack_deploy=false"
|
||||
# ansible-playbook playbooks/deploy_bulgaria_vm.yml --check
|
||||
|
||||
- name: Deploy services to bulgaria-vm
|
||||
hosts: bulgaria_vm
|
||||
gather_facts: true
|
||||
vars:
|
||||
services: '{{ host_services | default([]) }}'
|
||||
tasks:
|
||||
- name: Display deployment info
|
||||
ansible.builtin.debug:
|
||||
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
|
||||
- name: Ensure docker data directory exists
|
||||
ansible.builtin.file:
|
||||
path: '{{ docker_data_path }}'
|
||||
state: directory
|
||||
mode: '0755'
|
||||
- name: Deploy each enabled service
|
||||
ansible.builtin.include_role:
|
||||
name: docker_stack
|
||||
vars:
|
||||
stack_name: '{{ item.stack_dir }}'
|
||||
stack_compose_file: '{{ item.compose_file }}'
|
||||
stack_env_file: '{{ item.env_file | default(omit) }}'
|
||||
loop: '{{ services }}'
|
||||
loop_control:
|
||||
label: '{{ item.name }}'
|
||||
when: item.enabled | default(true)
|
||||
35
docs/advanced/ansible/playbooks/deploy_calypso.yml
Normal file
35
docs/advanced/ansible/playbooks/deploy_calypso.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
# Deployment playbook for calypso
|
||||
# Category: synology
|
||||
# Services: 24
|
||||
#
|
||||
# Usage:
|
||||
# ansible-playbook playbooks/deploy_calypso.yml
|
||||
# ansible-playbook playbooks/deploy_calypso.yml -e "stack_deploy=false"
|
||||
# ansible-playbook playbooks/deploy_calypso.yml --check
|
||||
|
||||
- name: Deploy services to calypso
|
||||
hosts: calypso
|
||||
gather_facts: true
|
||||
vars:
|
||||
services: '{{ host_services | default([]) }}'
|
||||
tasks:
|
||||
- name: Display deployment info
|
||||
ansible.builtin.debug:
|
||||
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
|
||||
- name: Ensure docker data directory exists
|
||||
ansible.builtin.file:
|
||||
path: '{{ docker_data_path }}'
|
||||
state: directory
|
||||
mode: '0755'
|
||||
- name: Deploy each enabled service
|
||||
ansible.builtin.include_role:
|
||||
name: docker_stack
|
||||
vars:
|
||||
stack_name: '{{ item.stack_dir }}'
|
||||
stack_compose_file: '{{ item.compose_file }}'
|
||||
stack_env_file: '{{ item.env_file | default(omit) }}'
|
||||
loop: '{{ services }}'
|
||||
loop_control:
|
||||
label: '{{ item.name }}'
|
||||
when: item.enabled | default(true)
|
||||
35
docs/advanced/ansible/playbooks/deploy_chicago_vm.yml
Normal file
35
docs/advanced/ansible/playbooks/deploy_chicago_vm.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
# Deployment playbook for chicago-vm
|
||||
# Category: vms
|
||||
# Services: 7
|
||||
#
|
||||
# Usage:
|
||||
# ansible-playbook playbooks/deploy_chicago_vm.yml
|
||||
# ansible-playbook playbooks/deploy_chicago_vm.yml -e "stack_deploy=false"
|
||||
# ansible-playbook playbooks/deploy_chicago_vm.yml --check
|
||||
|
||||
- name: Deploy services to chicago-vm
|
||||
hosts: chicago_vm
|
||||
gather_facts: true
|
||||
vars:
|
||||
services: '{{ host_services | default([]) }}'
|
||||
tasks:
|
||||
- name: Display deployment info
|
||||
ansible.builtin.debug:
|
||||
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
|
||||
- name: Ensure docker data directory exists
|
||||
ansible.builtin.file:
|
||||
path: '{{ docker_data_path }}'
|
||||
state: directory
|
||||
mode: '0755'
|
||||
- name: Deploy each enabled service
|
||||
ansible.builtin.include_role:
|
||||
name: docker_stack
|
||||
vars:
|
||||
stack_name: '{{ item.stack_dir }}'
|
||||
stack_compose_file: '{{ item.compose_file }}'
|
||||
stack_env_file: '{{ item.env_file | default(omit) }}'
|
||||
loop: '{{ services }}'
|
||||
loop_control:
|
||||
label: '{{ item.name }}'
|
||||
when: item.enabled | default(true)
|
||||
35
docs/advanced/ansible/playbooks/deploy_concord_nuc.yml
Normal file
35
docs/advanced/ansible/playbooks/deploy_concord_nuc.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
# Deployment playbook for concord-nuc
|
||||
# Category: physical
|
||||
# Services: 11
|
||||
#
|
||||
# Usage:
|
||||
# ansible-playbook playbooks/deploy_concord_nuc.yml
|
||||
# ansible-playbook playbooks/deploy_concord_nuc.yml -e "stack_deploy=false"
|
||||
# ansible-playbook playbooks/deploy_concord_nuc.yml --check
|
||||
|
||||
- name: Deploy services to concord-nuc
|
||||
hosts: concord_nuc
|
||||
gather_facts: true
|
||||
vars:
|
||||
services: '{{ host_services | default([]) }}'
|
||||
tasks:
|
||||
- name: Display deployment info
|
||||
ansible.builtin.debug:
|
||||
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
|
||||
- name: Ensure docker data directory exists
|
||||
ansible.builtin.file:
|
||||
path: '{{ docker_data_path }}'
|
||||
state: directory
|
||||
mode: '0755'
|
||||
- name: Deploy each enabled service
|
||||
ansible.builtin.include_role:
|
||||
name: docker_stack
|
||||
vars:
|
||||
stack_name: '{{ item.stack_dir }}'
|
||||
stack_compose_file: '{{ item.compose_file }}'
|
||||
stack_env_file: '{{ item.env_file | default(omit) }}'
|
||||
loop: '{{ services }}'
|
||||
loop_control:
|
||||
label: '{{ item.name }}'
|
||||
when: item.enabled | default(true)
|
||||
35
docs/advanced/ansible/playbooks/deploy_contabo_vm.yml
Normal file
35
docs/advanced/ansible/playbooks/deploy_contabo_vm.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
# Deployment playbook for contabo-vm
|
||||
# Category: vms
|
||||
# Services: 1
|
||||
#
|
||||
# Usage:
|
||||
# ansible-playbook playbooks/deploy_contabo_vm.yml
|
||||
# ansible-playbook playbooks/deploy_contabo_vm.yml -e "stack_deploy=false"
|
||||
# ansible-playbook playbooks/deploy_contabo_vm.yml --check
|
||||
|
||||
- name: Deploy services to contabo-vm
|
||||
hosts: contabo_vm
|
||||
gather_facts: true
|
||||
vars:
|
||||
services: '{{ host_services | default([]) }}'
|
||||
tasks:
|
||||
- name: Display deployment info
|
||||
ansible.builtin.debug:
|
||||
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
|
||||
- name: Ensure docker data directory exists
|
||||
ansible.builtin.file:
|
||||
path: '{{ docker_data_path }}'
|
||||
state: directory
|
||||
mode: '0755'
|
||||
- name: Deploy each enabled service
|
||||
ansible.builtin.include_role:
|
||||
name: docker_stack
|
||||
vars:
|
||||
stack_name: '{{ item.stack_dir }}'
|
||||
stack_compose_file: '{{ item.compose_file }}'
|
||||
stack_env_file: '{{ item.env_file | default(omit) }}'
|
||||
loop: '{{ services }}'
|
||||
loop_control:
|
||||
label: '{{ item.name }}'
|
||||
when: item.enabled | default(true)
|
||||
35
docs/advanced/ansible/playbooks/deploy_guava.yml
Normal file
35
docs/advanced/ansible/playbooks/deploy_guava.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
# Deployment playbook for guava
|
||||
# Category: truenas
|
||||
# Services: 1
|
||||
#
|
||||
# Usage:
|
||||
# ansible-playbook playbooks/deploy_guava.yml
|
||||
# ansible-playbook playbooks/deploy_guava.yml -e "stack_deploy=false"
|
||||
# ansible-playbook playbooks/deploy_guava.yml --check
|
||||
|
||||
- name: Deploy services to guava
|
||||
hosts: guava
|
||||
gather_facts: true
|
||||
vars:
|
||||
services: '{{ host_services | default([]) }}'
|
||||
tasks:
|
||||
- name: Display deployment info
|
||||
ansible.builtin.debug:
|
||||
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
|
||||
- name: Ensure docker data directory exists
|
||||
ansible.builtin.file:
|
||||
path: '{{ docker_data_path }}'
|
||||
state: directory
|
||||
mode: '0755'
|
||||
- name: Deploy each enabled service
|
||||
ansible.builtin.include_role:
|
||||
name: docker_stack
|
||||
vars:
|
||||
stack_name: '{{ item.stack_dir }}'
|
||||
stack_compose_file: '{{ item.compose_file }}'
|
||||
stack_env_file: '{{ item.env_file | default(omit) }}'
|
||||
loop: '{{ services }}'
|
||||
loop_control:
|
||||
label: '{{ item.name }}'
|
||||
when: item.enabled | default(true)
|
||||
35
docs/advanced/ansible/playbooks/deploy_homelab_vm.yml
Normal file
35
docs/advanced/ansible/playbooks/deploy_homelab_vm.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
# Deployment playbook for homelab-vm
|
||||
# Category: vms
|
||||
# Services: 33
|
||||
#
|
||||
# Usage:
|
||||
# ansible-playbook playbooks/deploy_homelab_vm.yml
|
||||
# ansible-playbook playbooks/deploy_homelab_vm.yml -e "stack_deploy=false"
|
||||
# ansible-playbook playbooks/deploy_homelab_vm.yml --check
|
||||
|
||||
- name: Deploy services to homelab-vm
|
||||
hosts: homelab_vm
|
||||
gather_facts: true
|
||||
vars:
|
||||
services: '{{ host_services | default([]) }}'
|
||||
tasks:
|
||||
- name: Display deployment info
|
||||
ansible.builtin.debug:
|
||||
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
|
||||
- name: Ensure docker data directory exists
|
||||
ansible.builtin.file:
|
||||
path: '{{ docker_data_path }}'
|
||||
state: directory
|
||||
mode: '0755'
|
||||
- name: Deploy each enabled service
|
||||
ansible.builtin.include_role:
|
||||
name: docker_stack
|
||||
vars:
|
||||
stack_name: '{{ item.stack_dir }}'
|
||||
stack_compose_file: '{{ item.compose_file }}'
|
||||
stack_env_file: '{{ item.env_file | default(omit) }}'
|
||||
loop: '{{ services }}'
|
||||
loop_control:
|
||||
label: '{{ item.name }}'
|
||||
when: item.enabled | default(true)
|
||||
35
docs/advanced/ansible/playbooks/deploy_lxc.yml
Normal file
35
docs/advanced/ansible/playbooks/deploy_lxc.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
# Deployment playbook for lxc
|
||||
# Category: proxmox
|
||||
# Services: 1
|
||||
#
|
||||
# Usage:
|
||||
# ansible-playbook playbooks/deploy_lxc.yml
|
||||
# ansible-playbook playbooks/deploy_lxc.yml -e "stack_deploy=false"
|
||||
# ansible-playbook playbooks/deploy_lxc.yml --check
|
||||
|
||||
- name: Deploy services to lxc
|
||||
hosts: lxc
|
||||
gather_facts: true
|
||||
vars:
|
||||
services: '{{ host_services | default([]) }}'
|
||||
tasks:
|
||||
- name: Display deployment info
|
||||
ansible.builtin.debug:
|
||||
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
|
||||
- name: Ensure docker data directory exists
|
||||
ansible.builtin.file:
|
||||
path: '{{ docker_data_path }}'
|
||||
state: directory
|
||||
mode: '0755'
|
||||
- name: Deploy each enabled service
|
||||
ansible.builtin.include_role:
|
||||
name: docker_stack
|
||||
vars:
|
||||
stack_name: '{{ item.stack_dir }}'
|
||||
stack_compose_file: '{{ item.compose_file }}'
|
||||
stack_env_file: '{{ item.env_file | default(omit) }}'
|
||||
loop: '{{ services }}'
|
||||
loop_control:
|
||||
label: '{{ item.name }}'
|
||||
when: item.enabled | default(true)
|
||||
35
docs/advanced/ansible/playbooks/deploy_matrix_ubuntu_vm.yml
Normal file
35
docs/advanced/ansible/playbooks/deploy_matrix_ubuntu_vm.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
# Deployment playbook for matrix-ubuntu-vm
|
||||
# Category: vms
|
||||
# Services: 2
|
||||
#
|
||||
# Usage:
|
||||
# ansible-playbook playbooks/deploy_matrix_ubuntu_vm.yml
|
||||
# ansible-playbook playbooks/deploy_matrix_ubuntu_vm.yml -e "stack_deploy=false"
|
||||
# ansible-playbook playbooks/deploy_matrix_ubuntu_vm.yml --check
|
||||
|
||||
- name: Deploy services to matrix-ubuntu-vm
|
||||
hosts: matrix_ubuntu_vm
|
||||
gather_facts: true
|
||||
vars:
|
||||
services: '{{ host_services | default([]) }}'
|
||||
tasks:
|
||||
- name: Display deployment info
|
||||
ansible.builtin.debug:
|
||||
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
|
||||
- name: Ensure docker data directory exists
|
||||
ansible.builtin.file:
|
||||
path: '{{ docker_data_path }}'
|
||||
state: directory
|
||||
mode: '0755'
|
||||
- name: Deploy each enabled service
|
||||
ansible.builtin.include_role:
|
||||
name: docker_stack
|
||||
vars:
|
||||
stack_name: '{{ item.stack_dir }}'
|
||||
stack_compose_file: '{{ item.compose_file }}'
|
||||
stack_env_file: '{{ item.env_file | default(omit) }}'
|
||||
loop: '{{ services }}'
|
||||
loop_control:
|
||||
label: '{{ item.name }}'
|
||||
when: item.enabled | default(true)
|
||||
35
docs/advanced/ansible/playbooks/deploy_rpi5_vish.yml
Normal file
35
docs/advanced/ansible/playbooks/deploy_rpi5_vish.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
# Deployment playbook for rpi5-vish
|
||||
# Category: edge
|
||||
# Services: 3
|
||||
#
|
||||
# Usage:
|
||||
# ansible-playbook playbooks/deploy_rpi5_vish.yml
|
||||
# ansible-playbook playbooks/deploy_rpi5_vish.yml -e "stack_deploy=false"
|
||||
# ansible-playbook playbooks/deploy_rpi5_vish.yml --check
|
||||
|
||||
- name: Deploy services to rpi5-vish
|
||||
hosts: rpi5_vish
|
||||
gather_facts: true
|
||||
vars:
|
||||
services: '{{ host_services | default([]) }}'
|
||||
tasks:
|
||||
- name: Display deployment info
|
||||
ansible.builtin.debug:
|
||||
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
|
||||
- name: Ensure docker data directory exists
|
||||
ansible.builtin.file:
|
||||
path: '{{ docker_data_path }}'
|
||||
state: directory
|
||||
mode: '0755'
|
||||
- name: Deploy each enabled service
|
||||
ansible.builtin.include_role:
|
||||
name: docker_stack
|
||||
vars:
|
||||
stack_name: '{{ item.stack_dir }}'
|
||||
stack_compose_file: '{{ item.compose_file }}'
|
||||
stack_env_file: '{{ item.env_file | default(omit) }}'
|
||||
loop: '{{ services }}'
|
||||
loop_control:
|
||||
label: '{{ item.name }}'
|
||||
when: item.enabled | default(true)
|
||||
35
docs/advanced/ansible/playbooks/deploy_setillo.yml
Normal file
35
docs/advanced/ansible/playbooks/deploy_setillo.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
# Deployment playbook for setillo
|
||||
# Category: synology
|
||||
# Services: 2
|
||||
#
|
||||
# Usage:
|
||||
# ansible-playbook playbooks/deploy_setillo.yml
|
||||
# ansible-playbook playbooks/deploy_setillo.yml -e "stack_deploy=false"
|
||||
# ansible-playbook playbooks/deploy_setillo.yml --check
|
||||
|
||||
- name: Deploy services to setillo
|
||||
hosts: setillo
|
||||
gather_facts: true
|
||||
vars:
|
||||
services: '{{ host_services | default([]) }}'
|
||||
tasks:
|
||||
- name: Display deployment info
|
||||
ansible.builtin.debug:
|
||||
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
|
||||
- name: Ensure docker data directory exists
|
||||
ansible.builtin.file:
|
||||
path: '{{ docker_data_path }}'
|
||||
state: directory
|
||||
mode: '0755'
|
||||
- name: Deploy each enabled service
|
||||
ansible.builtin.include_role:
|
||||
name: docker_stack
|
||||
vars:
|
||||
stack_name: '{{ item.stack_dir }}'
|
||||
stack_compose_file: '{{ item.compose_file }}'
|
||||
stack_env_file: '{{ item.env_file | default(omit) }}'
|
||||
loop: '{{ services }}'
|
||||
loop_control:
|
||||
label: '{{ item.name }}'
|
||||
when: item.enabled | default(true)
|
||||
17
docs/advanced/ansible/playbooks/install_tools.yml
Normal file
17
docs/advanced/ansible/playbooks/install_tools.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
- name: Install common diagnostic tools
|
||||
hosts: all
|
||||
become: true
|
||||
tasks:
|
||||
- name: Install essential packages
|
||||
package:
|
||||
name:
|
||||
- htop
|
||||
- curl
|
||||
- wget
|
||||
- net-tools
|
||||
- iperf3
|
||||
- ncdu
|
||||
- vim
|
||||
- git
|
||||
state: present
|
||||
137
docs/advanced/ansible/playbooks/synology_health.yml
Normal file
137
docs/advanced/ansible/playbooks/synology_health.yml
Normal file
@@ -0,0 +1,137 @@
|
||||
---
|
||||
- name: Synology Healthcheck
|
||||
hosts: synology
|
||||
gather_facts: yes
|
||||
become: false
|
||||
|
||||
vars:
|
||||
ts_candidates:
|
||||
- /var/packages/Tailscale/target/bin/tailscale
|
||||
- /usr/bin/tailscale
|
||||
|
||||
tasks:
|
||||
# ---------- System info ----------
|
||||
- name: DSM version
|
||||
ansible.builtin.shell: |
|
||||
set -e
|
||||
if [ -f /etc.defaults/VERSION ]; then
|
||||
. /etc.defaults/VERSION
|
||||
echo "${productversion:-unknown} (build ${buildnumber:-unknown})"
|
||||
else
|
||||
echo "unknown"
|
||||
fi
|
||||
register: dsm_version
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Uptime (pretty)
|
||||
ansible.builtin.command: uptime -p
|
||||
register: uptime_pretty
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Load averages
|
||||
ansible.builtin.command: cat /proc/loadavg
|
||||
register: loadavg
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Memory summary (MB)
|
||||
ansible.builtin.command: free -m
|
||||
register: mem
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
# ---------- Storage ----------
|
||||
- name: Disk usage of root (/)
|
||||
ansible.builtin.shell: df -P / | awk 'NR==2 {print $5}' | tr -d '%'
|
||||
register: root_usage
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Disk usage of /volume1 (if present)
|
||||
ansible.builtin.shell: |
|
||||
if mountpoint -q /volume1; then
|
||||
df -P /volume1 | awk 'NR==2 {print $5}' | tr -d '%'
|
||||
fi
|
||||
register: vol1_usage
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: RAID status (/proc/mdstat)
|
||||
ansible.builtin.command: cat /proc/mdstat
|
||||
register: mdstat
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
# ---------- Tailscale (optional) ----------
|
||||
- name: Detect Tailscale binary path (first that exists)
|
||||
ansible.builtin.shell: |
|
||||
for p in {{ ts_candidates | join(' ') }}; do
|
||||
[ -x "$p" ] && echo "$p" && exit 0
|
||||
done
|
||||
echo ""
|
||||
register: ts_bin
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Get Tailscale IPv4 (if tailscale present)
|
||||
ansible.builtin.command: "{{ ts_bin.stdout }} ip -4"
|
||||
register: ts_ip
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: ts_bin.stdout | length > 0
|
||||
|
||||
- name: Get Tailscale self status (brief)
|
||||
ansible.builtin.command: "{{ ts_bin.stdout }} status --self"
|
||||
register: ts_status
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: ts_bin.stdout | length > 0
|
||||
|
||||
# ---------- Assertions (lightweight, no sudo) ----------
|
||||
- name: Check RAID not degraded/resyncing
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- mdstat.stdout is not search('degraded', ignorecase=True)
|
||||
- mdstat.stdout is not search('resync', ignorecase=True)
|
||||
success_msg: "RAID OK"
|
||||
fail_msg: "RAID issue detected (degraded or resync) — check Storage Manager"
|
||||
changed_when: false
|
||||
|
||||
- name: Check root FS usage < 90%
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- (root_usage.stdout | default('0')) | int < 90
|
||||
success_msg: "Root filesystem usage OK ({{ root_usage.stdout | default('n/a') }}%)"
|
||||
fail_msg: "Root filesystem high ({{ root_usage.stdout | default('n/a') }}%)"
|
||||
changed_when: false
|
||||
|
||||
- name: Check /volume1 usage < 90% (if present)
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- (vol1_usage.stdout | default('0')) | int < 90
|
||||
success_msg: "/volume1 usage OK ({{ vol1_usage.stdout | default('n/a') }}%)"
|
||||
fail_msg: "/volume1 usage high ({{ vol1_usage.stdout | default('n/a') }}%)"
|
||||
when: vol1_usage.stdout is defined and vol1_usage.stdout != ""
|
||||
changed_when: false
|
||||
|
||||
# ---------- Summary (shows the results) ----------
|
||||
- name: Summary
|
||||
ansible.builtin.debug:
|
||||
msg: |
|
||||
Host: {{ inventory_hostname }}
|
||||
DSM: {{ dsm_version.stdout | default('unknown') }}
|
||||
Uptime: {{ uptime_pretty.stdout | default('n/a') }}
|
||||
Load: {{ loadavg.stdout | default('n/a') }}
|
||||
Memory (MB):
|
||||
{{ (mem.stdout | default('n/a')) | indent(2) }}
|
||||
Root usage: {{ root_usage.stdout | default('n/a') }}%
|
||||
Volume1 usage: {{ (vol1_usage.stdout | default('n/a')) if (vol1_usage.stdout is defined and vol1_usage.stdout != "") else 'n/a' }}%
|
||||
RAID (/proc/mdstat):
|
||||
{{ (mdstat.stdout | default('n/a')) | indent(2) }}
|
||||
Tailscale:
|
||||
binary: {{ (ts_bin.stdout | default('not found')) if ts_bin.stdout|length > 0 else 'not found' }}
|
||||
ip: {{ ts_ip.stdout | default('n/a') }}
|
||||
self:
|
||||
{{ (ts_status.stdout | default('n/a')) | indent(2) }}
|
||||
12
docs/advanced/ansible/playbooks/system_info.yml
Normal file
12
docs/advanced/ansible/playbooks/system_info.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
- name: Display system information
|
||||
hosts: all
|
||||
gather_facts: yes
|
||||
tasks:
|
||||
- name: Print system details
|
||||
debug:
|
||||
msg:
|
||||
- "Hostname: {{ ansible_hostname }}"
|
||||
- "OS: {{ ansible_distribution }} {{ ansible_distribution_version }}"
|
||||
- "Kernel: {{ ansible_kernel }}"
|
||||
- "Uptime (hours): {{ ansible_uptime_seconds | int / 3600 | round(1) }}"
|
||||
75
docs/advanced/ansible/playbooks/tailscale_health.yml
Normal file
75
docs/advanced/ansible/playbooks/tailscale_health.yml
Normal file
@@ -0,0 +1,75 @@
|
||||
---
|
||||
- name: Tailscale Health Check (Homelab)
|
||||
hosts: active # or "all" if you want to check everything
|
||||
gather_facts: yes
|
||||
become: false
|
||||
|
||||
vars:
|
||||
tailscale_bin: "/usr/bin/tailscale"
|
||||
tailscale_service: "tailscaled"
|
||||
|
||||
tasks:
|
||||
|
||||
- name: Verify Tailscale binary exists
|
||||
stat:
|
||||
path: "{{ tailscale_bin }}"
|
||||
register: ts_bin
|
||||
ignore_errors: true
|
||||
|
||||
- name: Skip host if Tailscale not installed
|
||||
meta: end_host
|
||||
when: not ts_bin.stat.exists
|
||||
|
||||
- name: Get Tailscale CLI version
|
||||
command: "{{ tailscale_bin }} version"
|
||||
register: ts_version
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Get Tailscale status (JSON)
|
||||
command: "{{ tailscale_bin }} status --json"
|
||||
register: ts_status
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Parse Tailscale JSON
|
||||
set_fact:
|
||||
ts_parsed: "{{ ts_status.stdout | from_json }}"
|
||||
when: ts_status.rc == 0 and (ts_status.stdout | length) > 0 and ts_status.stdout is search('{')
|
||||
|
||||
- name: Extract important fields
|
||||
set_fact:
|
||||
ts_backend_state: "{{ ts_parsed.BackendState | default('unknown') }}"
|
||||
ts_ips: "{{ ts_parsed.Self.TailscaleIPs | default([]) }}"
|
||||
ts_hostname: "{{ ts_parsed.Self.HostName | default(inventory_hostname) }}"
|
||||
when: ts_parsed is defined
|
||||
|
||||
- name: Report healthy nodes
|
||||
debug:
|
||||
msg: >-
|
||||
HEALTHY: {{ ts_hostname }}
|
||||
version={{ ts_version.stdout | default('n/a') }},
|
||||
backend={{ ts_backend_state }},
|
||||
ips={{ ts_ips }}
|
||||
when:
|
||||
- ts_parsed is defined
|
||||
- ts_backend_state == "Running"
|
||||
- ts_ips | length > 0
|
||||
|
||||
- name: Report unhealthy or unreachable nodes
|
||||
debug:
|
||||
msg: >-
|
||||
UNHEALTHY: {{ inventory_hostname }}
|
||||
rc={{ ts_status.rc }},
|
||||
backend={{ ts_backend_state | default('n/a') }},
|
||||
ips={{ ts_ips | default([]) }},
|
||||
version={{ ts_version.stdout | default('n/a') }}
|
||||
when: ts_parsed is not defined or ts_backend_state != "Running"
|
||||
|
||||
- name: Always print concise summary
|
||||
debug:
|
||||
msg: >-
|
||||
Host={{ inventory_hostname }},
|
||||
Version={{ ts_version.stdout | default('n/a') }},
|
||||
Backend={{ ts_backend_state | default('unknown') }},
|
||||
IPs={{ ts_ips | default([]) }}
|
||||
96
docs/advanced/ansible/playbooks/update_ansible.yml
Normal file
96
docs/advanced/ansible/playbooks/update_ansible.yml
Normal file
@@ -0,0 +1,96 @@
|
||||
---
|
||||
# Update and upgrade Ansible on Linux hosts
|
||||
# Excludes Synology devices and handles Home Assistant carefully
|
||||
# Created: February 8, 2026
|
||||
|
||||
- name: Update package cache and upgrade Ansible on Linux hosts
|
||||
hosts: debian_clients:!synology
|
||||
gather_facts: yes
|
||||
become: yes
|
||||
vars:
|
||||
ansible_become_pass: "{{ ansible_ssh_pass | default(omit) }}"
|
||||
|
||||
tasks:
|
||||
- name: Display target host information
|
||||
debug:
|
||||
msg: "Updating Ansible on {{ inventory_hostname }} ({{ ansible_host }})"
|
||||
|
||||
- name: Check if host is Home Assistant
|
||||
set_fact:
|
||||
is_homeassistant: "{{ inventory_hostname == 'homeassistant' }}"
|
||||
|
||||
- name: Skip Home Assistant with warning
|
||||
debug:
|
||||
msg: "Skipping {{ inventory_hostname }} - Home Assistant uses its own package management"
|
||||
when: is_homeassistant
|
||||
|
||||
- name: Update apt package cache
|
||||
apt:
|
||||
update_cache: yes
|
||||
cache_valid_time: 3600
|
||||
when: not is_homeassistant
|
||||
register: apt_update_result
|
||||
|
||||
- name: Display apt update results
|
||||
debug:
|
||||
msg: "APT cache updated on {{ inventory_hostname }}"
|
||||
when: not is_homeassistant and apt_update_result is succeeded
|
||||
|
||||
- name: Check current Ansible version
|
||||
command: ansible --version
|
||||
register: current_ansible_version
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: not is_homeassistant
|
||||
|
||||
- name: Display current Ansible version
|
||||
debug:
|
||||
msg: "Current Ansible version on {{ inventory_hostname }}: {{ current_ansible_version.stdout_lines[0] if current_ansible_version.stdout_lines else 'Not installed' }}"
|
||||
when: not is_homeassistant and current_ansible_version is defined
|
||||
|
||||
- name: Upgrade Ansible package
|
||||
apt:
|
||||
name: ansible
|
||||
state: latest
|
||||
only_upgrade: yes
|
||||
when: not is_homeassistant
|
||||
register: ansible_upgrade_result
|
||||
|
||||
- name: Display Ansible upgrade results
|
||||
debug:
|
||||
msg: |
|
||||
Ansible upgrade on {{ inventory_hostname }}:
|
||||
{% if ansible_upgrade_result.changed %}
|
||||
✅ Ansible was upgraded successfully
|
||||
{% else %}
|
||||
ℹ️ Ansible was already at the latest version
|
||||
{% endif %}
|
||||
when: not is_homeassistant
|
||||
|
||||
- name: Check new Ansible version
|
||||
command: ansible --version
|
||||
register: new_ansible_version
|
||||
changed_when: false
|
||||
when: not is_homeassistant and ansible_upgrade_result is succeeded
|
||||
|
||||
- name: Display new Ansible version
|
||||
debug:
|
||||
msg: "New Ansible version on {{ inventory_hostname }}: {{ new_ansible_version.stdout_lines[0] }}"
|
||||
when: not is_homeassistant and new_ansible_version is defined
|
||||
|
||||
- name: Summary of changes
|
||||
debug:
|
||||
msg: |
|
||||
Summary for {{ inventory_hostname }}:
|
||||
{% if is_homeassistant %}
|
||||
- Skipped (Home Assistant uses its own package management)
|
||||
{% else %}
|
||||
- APT cache: {{ 'Updated' if apt_update_result.changed else 'Already current' }}
|
||||
- Ansible: {{ 'Upgraded' if ansible_upgrade_result.changed else 'Already latest version' }}
|
||||
{% endif %}
|
||||
|
||||
handlers:
|
||||
- name: Clean apt cache
|
||||
apt:
|
||||
autoclean: yes
|
||||
when: not is_homeassistant
|
||||
122
docs/advanced/ansible/playbooks/update_ansible_targeted.yml
Normal file
122
docs/advanced/ansible/playbooks/update_ansible_targeted.yml
Normal file
@@ -0,0 +1,122 @@
|
||||
---
|
||||
# Targeted Ansible update for confirmed Debian/Ubuntu hosts
|
||||
# Excludes Synology, TrueNAS, Home Assistant, and unreachable hosts
|
||||
# Created: February 8, 2026
|
||||
|
||||
- name: Update and upgrade Ansible on confirmed Linux hosts
|
||||
hosts: homelab,pi-5,vish-concord-nuc,pve
|
||||
gather_facts: yes
|
||||
become: yes
|
||||
serial: 1 # Process one host at a time for better control
|
||||
|
||||
tasks:
|
||||
- name: Display target host information
|
||||
debug:
|
||||
msg: |
|
||||
Processing: {{ inventory_hostname }} ({{ ansible_host }})
|
||||
OS: {{ ansible_distribution }} {{ ansible_distribution_version }}
|
||||
Python: {{ ansible_python_version }}
|
||||
|
||||
- name: Check if apt is available
|
||||
stat:
|
||||
path: /usr/bin/apt
|
||||
register: apt_available
|
||||
|
||||
- name: Skip non-Debian hosts
|
||||
debug:
|
||||
msg: "Skipping {{ inventory_hostname }} - apt not available"
|
||||
when: not apt_available.stat.exists
|
||||
|
||||
- name: Update apt package cache (with retry)
|
||||
apt:
|
||||
update_cache: yes
|
||||
cache_valid_time: 0 # Force update
|
||||
register: apt_update_result
|
||||
retries: 3
|
||||
delay: 10
|
||||
when: apt_available.stat.exists
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Display apt update status
|
||||
debug:
|
||||
msg: |
|
||||
APT update on {{ inventory_hostname }}:
|
||||
{% if apt_update_result is succeeded %}
|
||||
✅ Success - Cache updated
|
||||
{% elif apt_update_result is failed %}
|
||||
❌ Failed - {{ apt_update_result.msg | default('Unknown error') }}
|
||||
{% else %}
|
||||
⏭️ Skipped - apt not available
|
||||
{% endif %}
|
||||
|
||||
- name: Check if Ansible is installed
|
||||
command: which ansible
|
||||
register: ansible_installed
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: apt_available.stat.exists and apt_update_result is succeeded
|
||||
|
||||
- name: Get current Ansible version if installed
|
||||
command: ansible --version
|
||||
register: current_ansible_version
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: ansible_installed is succeeded and ansible_installed.rc == 0
|
||||
|
||||
- name: Display current Ansible status
|
||||
debug:
|
||||
msg: |
|
||||
Ansible status on {{ inventory_hostname }}:
|
||||
{% if ansible_installed is defined and ansible_installed.rc == 0 %}
|
||||
📦 Installed: {{ current_ansible_version.stdout_lines[0] if current_ansible_version.stdout_lines else 'Version check failed' }}
|
||||
{% else %}
|
||||
📦 Not installed
|
||||
{% endif %}
|
||||
|
||||
- name: Install or upgrade Ansible
|
||||
apt:
|
||||
name: ansible
|
||||
state: latest
|
||||
update_cache: no # We already updated above
|
||||
register: ansible_upgrade_result
|
||||
when: apt_available.stat.exists and apt_update_result is succeeded
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Display Ansible installation/upgrade results
|
||||
debug:
|
||||
msg: |
|
||||
Ansible operation on {{ inventory_hostname }}:
|
||||
{% if ansible_upgrade_result is succeeded %}
|
||||
{% if ansible_upgrade_result.changed %}
|
||||
✅ {{ 'Installed' if ansible_installed.rc != 0 else 'Upgraded' }} successfully
|
||||
{% else %}
|
||||
ℹ️ Already at latest version
|
||||
{% endif %}
|
||||
{% elif ansible_upgrade_result is failed %}
|
||||
❌ Failed: {{ ansible_upgrade_result.msg | default('Unknown error') }}
|
||||
{% else %}
|
||||
⏭️ Skipped due to previous errors
|
||||
{% endif %}
|
||||
|
||||
- name: Verify final Ansible version
|
||||
command: ansible --version
|
||||
register: final_ansible_version
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: ansible_upgrade_result is succeeded
|
||||
|
||||
- name: Final status summary
|
||||
debug:
|
||||
msg: |
|
||||
=== SUMMARY FOR {{ inventory_hostname | upper }} ===
|
||||
Host: {{ ansible_host }}
|
||||
OS: {{ ansible_distribution }} {{ ansible_distribution_version }}
|
||||
APT Update: {{ '✅ Success' if apt_update_result is succeeded else '❌ Failed' if apt_update_result is defined else '⏭️ Skipped' }}
|
||||
Ansible: {% if final_ansible_version is succeeded %}{{ final_ansible_version.stdout_lines[0] }}{% elif ansible_upgrade_result is succeeded %}{{ 'Installed/Updated' if ansible_upgrade_result.changed else 'Already current' }}{% else %}{{ '❌ Failed or skipped' }}{% endif %}
|
||||
|
||||
post_tasks:
|
||||
- name: Clean up apt cache
|
||||
apt:
|
||||
autoclean: yes
|
||||
when: apt_available.stat.exists and apt_update_result is succeeded
|
||||
ignore_errors: yes
|
||||
8
docs/advanced/ansible/playbooks/update_system.yml
Normal file
8
docs/advanced/ansible/playbooks/update_system.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
- hosts: all
|
||||
become: true
|
||||
tasks:
|
||||
- name: Update apt cache and upgrade packages
|
||||
apt:
|
||||
update_cache: yes
|
||||
upgrade: dist
|
||||
when: ansible_os_family == "Debian"
|
||||
Reference in New Issue
Block a user