Sanitized mirror from private repository - 2026-04-20 01:32:01 UTC
Some checks failed
Documentation / Build Docusaurus (push) Failing after 5m3s
Documentation / Deploy to GitHub Pages (push) Has been skipped

This commit is contained in:
Gitea Mirror Bot
2026-04-20 01:32:01 +00:00
commit e7652c8dab
1445 changed files with 364095 additions and 0 deletions

View File

@@ -0,0 +1,48 @@
---
# Backup all docker-compose configs and data
- name: Backup Docker configurations
hosts: "{{ target_host | default('all') }}"
gather_facts: true
vars:
backup_dest: "{{ backup_path | default('/backup') }}"
backup_timestamp: "{{ ansible_date_time.date }}_{{ ansible_date_time.hour }}{{ ansible_date_time.minute }}"
tasks:
- name: Create backup directory
ansible.builtin.file:
path: "{{ backup_dest }}/{{ inventory_hostname }}"
state: directory
mode: '0755'
become: "{{ ansible_become | default(false) }}"
delegate_to: localhost
- name: Find all docker-compose files
ansible.builtin.find:
paths: "{{ docker_data_path }}"
patterns: "docker-compose.yml,docker-compose.yaml,.env"
recurse: true
register: compose_files
- name: Archive docker configs
ansible.builtin.archive:
path: "{{ docker_data_path }}"
dest: "/tmp/{{ inventory_hostname }}_configs_{{ backup_timestamp }}.tar.gz"
format: gz
exclude_path:
- "*/data/*"
- "*/logs/*"
- "*/cache/*"
become: "{{ ansible_become | default(false) }}"
- name: Fetch backup to control node
ansible.builtin.fetch:
src: "/tmp/{{ inventory_hostname }}_configs_{{ backup_timestamp }}.tar.gz"
dest: "{{ backup_dest }}/{{ inventory_hostname }}/"
flat: true
- name: Clean up remote archive
ansible.builtin.file:
path: "/tmp/{{ inventory_hostname }}_configs_{{ backup_timestamp }}.tar.gz"
state: absent
become: "{{ ansible_become | default(false) }}"

View File

@@ -0,0 +1,55 @@
---
# Install Docker on a host (for non-Synology systems)
- name: Install Docker
hosts: "{{ target_host | default('all:!synology') }}"
become: true
gather_facts: true
tasks:
- name: Install prerequisites
ansible.builtin.apt:
name:
- apt-transport-https
- ca-certificates
- curl
- gnupg
- lsb-release
- python3-pip
state: present
update_cache: true
when: ansible_os_family == "Debian"
- name: Add Docker GPG key
ansible.builtin.apt_key:
url: https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg
state: present
when: ansible_os_family == "Debian"
- name: Add Docker repository
ansible.builtin.apt_repository:
repo: "deb https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} stable"
state: present
when: ansible_os_family == "Debian"
- name: Install Docker
ansible.builtin.apt:
name:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-compose-plugin
state: present
update_cache: true
when: ansible_os_family == "Debian"
- name: Ensure Docker service is running
ansible.builtin.service:
name: docker
state: started
enabled: true
- name: Add user to docker group
ansible.builtin.user:
name: "{{ ansible_user }}"
groups: docker
append: true

View File

@@ -0,0 +1,27 @@
---
# View logs for a specific service
# Usage: ansible-playbook playbooks/common/logs.yml -e "service_name=plex" -e "target_host=atlantis"
- name: View service logs
hosts: "{{ target_host }}"
gather_facts: false
vars:
log_lines: 100
follow_logs: false
tasks:
- name: Validate service_name is provided
ansible.builtin.fail:
msg: "service_name variable is required. Use -e 'service_name=<name>'"
when: service_name is not defined
- name: Get service logs
ansible.builtin.command:
cmd: "docker compose logs --tail={{ log_lines }} {{ '--follow' if follow_logs else '' }}"
chdir: "{{ docker_data_path }}/{{ service_name }}"
register: logs_result
become: "{{ ansible_become | default(false) }}"
- name: Display logs
ansible.builtin.debug:
msg: "{{ logs_result.stdout }}"

View File

@@ -0,0 +1,23 @@
---
# Restart a specific service
# Usage: ansible-playbook playbooks/common/restart_service.yml -e "service_name=plex" -e "target_host=atlantis"
- name: Restart Docker service
hosts: "{{ target_host }}"
gather_facts: false
tasks:
- name: Validate service_name is provided
ansible.builtin.fail:
msg: "service_name variable is required. Use -e 'service_name=<name>'"
when: service_name is not defined
- name: Restart service
ansible.builtin.command:
cmd: docker compose restart
chdir: "{{ docker_data_path }}/{{ service_name }}"
register: restart_result
become: "{{ ansible_become | default(false) }}"
- name: Display result
ansible.builtin.debug:
msg: "Service {{ service_name }} restarted on {{ inventory_hostname }}"

View File

@@ -0,0 +1,34 @@
---
# Setup base directories for Docker services
- name: Setup Docker directories
hosts: "{{ target_host | default('all') }}"
gather_facts: true
tasks:
- name: Create base docker directory
ansible.builtin.file:
path: "{{ docker_data_path }}"
state: directory
mode: '0755'
become: "{{ ansible_become | default(false) }}"
- name: Create common directories
ansible.builtin.file:
path: "{{ docker_data_path }}/{{ item }}"
state: directory
mode: '0755'
loop:
- configs
- data
- logs
- backups
become: "{{ ansible_become | default(false) }}"
- name: Create service directories from host_services
ansible.builtin.file:
path: "{{ docker_data_path }}/{{ item.stack_dir }}"
state: directory
mode: '0755'
loop: "{{ host_services | default([]) }}"
when: host_services is defined
become: "{{ ansible_become | default(false) }}"

View File

@@ -0,0 +1,49 @@
---
# Check status of all Docker containers
- name: Check container status
hosts: "{{ target_host | default('all') }}"
gather_facts: true
tasks:
- name: Get list of running containers
ansible.builtin.command:
cmd: docker ps --format "table {{ '{{' }}.Names{{ '}}' }}\t{{ '{{' }}.Status{{ '}}' }}\t{{ '{{' }}.Image{{ '}}' }}"
register: docker_ps
changed_when: false
become: "{{ ansible_become | default(false) }}"
- name: Display running containers
ansible.builtin.debug:
msg: |
=== {{ inventory_hostname }} ===
{{ docker_ps.stdout }}
- name: Get stopped/exited containers
ansible.builtin.command:
cmd: docker ps -a --filter "status=exited" --format "table {{ '{{' }}.Names{{ '}}' }}\t{{ '{{' }}.Status{{ '}}' }}"
register: docker_exited
changed_when: false
become: "{{ ansible_become | default(false) }}"
- name: Display stopped containers
ansible.builtin.debug:
msg: |
=== Stopped containers on {{ inventory_hostname }} ===
{{ docker_exited.stdout }}
when: docker_exited.stdout_lines | length > 1
- name: Get disk usage
ansible.builtin.command:
cmd: docker system df
register: docker_df
changed_when: false
become: "{{ ansible_become | default(false) }}"
- name: Display disk usage
ansible.builtin.debug:
msg: |
=== Docker disk usage on {{ inventory_hostname }} ===
{{ docker_df.stdout }}

View File

@@ -0,0 +1,46 @@
---
# Update all Docker containers (pull new images and recreate)
- name: Update Docker containers
hosts: "{{ target_host | default('all') }}"
gather_facts: true
vars:
services: "{{ host_services | default([]) }}"
tasks:
- name: Display update info
ansible.builtin.debug:
msg: "Updating {{ services | length }} services on {{ inventory_hostname }}"
- name: Pull latest images for each service
ansible.builtin.command:
cmd: docker compose pull
chdir: "{{ docker_data_path }}/{{ item.stack_dir }}"
loop: "{{ services }}"
loop_control:
label: "{{ item.name }}"
when: item.enabled | default(true)
register: pull_result
changed_when: "'Downloaded' in pull_result.stdout"
failed_when: false
become: "{{ ansible_become | default(false) }}"
- name: Recreate containers with new images
ansible.builtin.command:
cmd: docker compose up -d --remove-orphans
chdir: "{{ docker_data_path }}/{{ item.stack_dir }}"
loop: "{{ services }}"
loop_control:
label: "{{ item.name }}"
when: item.enabled | default(true)
register: up_result
changed_when: "'Started' in up_result.stdout or 'Recreated' in up_result.stdout"
failed_when: false
become: "{{ ansible_become | default(false) }}"
- name: Clean up unused images
ansible.builtin.command:
cmd: docker image prune -af
when: prune_images | default(true)
changed_when: false
become: "{{ ansible_become | default(false) }}"

View File

@@ -0,0 +1,35 @@
---
# Deployment playbook for anubis
# Category: physical
# Services: 8
#
# Usage:
# ansible-playbook playbooks/deploy_anubis.yml
# ansible-playbook playbooks/deploy_anubis.yml -e "stack_deploy=false"
# ansible-playbook playbooks/deploy_anubis.yml --check
- name: Deploy services to anubis
hosts: anubis
gather_facts: true
vars:
services: '{{ host_services | default([]) }}'
tasks:
- name: Display deployment info
ansible.builtin.debug:
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
- name: Ensure docker data directory exists
ansible.builtin.file:
path: '{{ docker_data_path }}'
state: directory
mode: '0755'
- name: Deploy each enabled service
ansible.builtin.include_role:
name: docker_stack
vars:
stack_name: '{{ item.stack_dir }}'
stack_compose_file: '{{ item.compose_file }}'
stack_env_file: '{{ item.env_file | default(omit) }}'
loop: '{{ services }}'
loop_control:
label: '{{ item.name }}'
when: item.enabled | default(true)

View File

@@ -0,0 +1,35 @@
---
# Deployment playbook for atlantis
# Category: synology
# Services: 57
#
# Usage:
# ansible-playbook playbooks/deploy_atlantis.yml
# ansible-playbook playbooks/deploy_atlantis.yml -e "stack_deploy=false"
# ansible-playbook playbooks/deploy_atlantis.yml --check
- name: Deploy services to atlantis
hosts: atlantis
gather_facts: true
vars:
services: '{{ host_services | default([]) }}'
tasks:
- name: Display deployment info
ansible.builtin.debug:
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
- name: Ensure docker data directory exists
ansible.builtin.file:
path: '{{ docker_data_path }}'
state: directory
mode: '0755'
- name: Deploy each enabled service
ansible.builtin.include_role:
name: docker_stack
vars:
stack_name: '{{ item.stack_dir }}'
stack_compose_file: '{{ item.compose_file }}'
stack_env_file: '{{ item.env_file | default(omit) }}'
loop: '{{ services }}'
loop_control:
label: '{{ item.name }}'
when: item.enabled | default(true)

View File

@@ -0,0 +1,35 @@
---
# Deployment playbook for bulgaria-vm
# Category: vms
# Services: 12
#
# Usage:
# ansible-playbook playbooks/deploy_bulgaria_vm.yml
# ansible-playbook playbooks/deploy_bulgaria_vm.yml -e "stack_deploy=false"
# ansible-playbook playbooks/deploy_bulgaria_vm.yml --check
- name: Deploy services to bulgaria-vm
hosts: bulgaria_vm
gather_facts: true
vars:
services: '{{ host_services | default([]) }}'
tasks:
- name: Display deployment info
ansible.builtin.debug:
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
- name: Ensure docker data directory exists
ansible.builtin.file:
path: '{{ docker_data_path }}'
state: directory
mode: '0755'
- name: Deploy each enabled service
ansible.builtin.include_role:
name: docker_stack
vars:
stack_name: '{{ item.stack_dir }}'
stack_compose_file: '{{ item.compose_file }}'
stack_env_file: '{{ item.env_file | default(omit) }}'
loop: '{{ services }}'
loop_control:
label: '{{ item.name }}'
when: item.enabled | default(true)

View File

@@ -0,0 +1,35 @@
---
# Deployment playbook for calypso
# Category: synology
# Services: 34
#
# Usage:
# ansible-playbook playbooks/deploy_calypso.yml
# ansible-playbook playbooks/deploy_calypso.yml -e "stack_deploy=false"
# ansible-playbook playbooks/deploy_calypso.yml --check
- name: Deploy services to calypso
hosts: calypso
gather_facts: true
vars:
services: '{{ host_services | default([]) }}'
tasks:
- name: Display deployment info
ansible.builtin.debug:
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
- name: Ensure docker data directory exists
ansible.builtin.file:
path: '{{ docker_data_path }}'
state: directory
mode: '0755'
- name: Deploy each enabled service
ansible.builtin.include_role:
name: docker_stack
vars:
stack_name: '{{ item.stack_dir }}'
stack_compose_file: '{{ item.compose_file }}'
stack_env_file: '{{ item.env_file | default(omit) }}'
loop: '{{ services }}'
loop_control:
label: '{{ item.name }}'
when: item.enabled | default(true)

View File

@@ -0,0 +1,35 @@
---
# Deployment playbook for chicago-vm
# Category: vms
# Services: 7
#
# Usage:
# ansible-playbook playbooks/deploy_chicago_vm.yml
# ansible-playbook playbooks/deploy_chicago_vm.yml -e "stack_deploy=false"
# ansible-playbook playbooks/deploy_chicago_vm.yml --check
- name: Deploy services to chicago-vm
hosts: chicago_vm
gather_facts: true
vars:
services: '{{ host_services | default([]) }}'
tasks:
- name: Display deployment info
ansible.builtin.debug:
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
- name: Ensure docker data directory exists
ansible.builtin.file:
path: '{{ docker_data_path }}'
state: directory
mode: '0755'
- name: Deploy each enabled service
ansible.builtin.include_role:
name: docker_stack
vars:
stack_name: '{{ item.stack_dir }}'
stack_compose_file: '{{ item.compose_file }}'
stack_env_file: '{{ item.env_file | default(omit) }}'
loop: '{{ services }}'
loop_control:
label: '{{ item.name }}'
when: item.enabled | default(true)

View File

@@ -0,0 +1,35 @@
---
# Deployment playbook for concord-nuc
# Category: physical
# Services: 15
#
# Usage:
# ansible-playbook playbooks/deploy_concord_nuc.yml
# ansible-playbook playbooks/deploy_concord_nuc.yml -e "stack_deploy=false"
# ansible-playbook playbooks/deploy_concord_nuc.yml --check
- name: Deploy services to concord-nuc
hosts: concord_nuc
gather_facts: true
vars:
services: '{{ host_services | default([]) }}'
tasks:
- name: Display deployment info
ansible.builtin.debug:
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
- name: Ensure docker data directory exists
ansible.builtin.file:
path: '{{ docker_data_path }}'
state: directory
mode: '0755'
- name: Deploy each enabled service
ansible.builtin.include_role:
name: docker_stack
vars:
stack_name: '{{ item.stack_dir }}'
stack_compose_file: '{{ item.compose_file }}'
stack_env_file: '{{ item.env_file | default(omit) }}'
loop: '{{ services }}'
loop_control:
label: '{{ item.name }}'
when: item.enabled | default(true)

View File

@@ -0,0 +1,35 @@
---
# Deployment playbook for contabo-vm
# Category: vms
# Services: 1
#
# Usage:
# ansible-playbook playbooks/deploy_contabo_vm.yml
# ansible-playbook playbooks/deploy_contabo_vm.yml -e "stack_deploy=false"
# ansible-playbook playbooks/deploy_contabo_vm.yml --check
- name: Deploy services to contabo-vm
hosts: contabo_vm
gather_facts: true
vars:
services: '{{ host_services | default([]) }}'
tasks:
- name: Display deployment info
ansible.builtin.debug:
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
- name: Ensure docker data directory exists
ansible.builtin.file:
path: '{{ docker_data_path }}'
state: directory
mode: '0755'
- name: Deploy each enabled service
ansible.builtin.include_role:
name: docker_stack
vars:
stack_name: '{{ item.stack_dir }}'
stack_compose_file: '{{ item.compose_file }}'
stack_env_file: '{{ item.env_file | default(omit) }}'
loop: '{{ services }}'
loop_control:
label: '{{ item.name }}'
when: item.enabled | default(true)

View File

@@ -0,0 +1,35 @@
---
# Deployment playbook for guava
# Category: truenas
# Services: 2
#
# Usage:
# ansible-playbook playbooks/deploy_guava.yml
# ansible-playbook playbooks/deploy_guava.yml -e "stack_deploy=false"
# ansible-playbook playbooks/deploy_guava.yml --check
- name: Deploy services to guava
hosts: guava
gather_facts: true
vars:
services: '{{ host_services | default([]) }}'
tasks:
- name: Display deployment info
ansible.builtin.debug:
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
- name: Ensure docker data directory exists
ansible.builtin.file:
path: '{{ docker_data_path }}'
state: directory
mode: '0755'
- name: Deploy each enabled service
ansible.builtin.include_role:
name: docker_stack
vars:
stack_name: '{{ item.stack_dir }}'
stack_compose_file: '{{ item.compose_file }}'
stack_env_file: '{{ item.env_file | default(omit) }}'
loop: '{{ services }}'
loop_control:
label: '{{ item.name }}'
when: item.enabled | default(true)

View File

@@ -0,0 +1,35 @@
---
# Deployment playbook for homelab-vm
# Category: vms
# Services: 39
#
# Usage:
# ansible-playbook playbooks/deploy_homelab_vm.yml
# ansible-playbook playbooks/deploy_homelab_vm.yml -e "stack_deploy=false"
# ansible-playbook playbooks/deploy_homelab_vm.yml --check
- name: Deploy services to homelab-vm
hosts: homelab_vm
gather_facts: true
vars:
services: '{{ host_services | default([]) }}'
tasks:
- name: Display deployment info
ansible.builtin.debug:
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
- name: Ensure docker data directory exists
ansible.builtin.file:
path: '{{ docker_data_path }}'
state: directory
mode: '0755'
- name: Deploy each enabled service
ansible.builtin.include_role:
name: docker_stack
vars:
stack_name: '{{ item.stack_dir }}'
stack_compose_file: '{{ item.compose_file }}'
stack_env_file: '{{ item.env_file | default(omit) }}'
loop: '{{ services }}'
loop_control:
label: '{{ item.name }}'
when: item.enabled | default(true)

View File

@@ -0,0 +1,35 @@
---
# Deployment playbook for lxc
# Category: proxmox
# Services: 1
#
# Usage:
# ansible-playbook playbooks/deploy_lxc.yml
# ansible-playbook playbooks/deploy_lxc.yml -e "stack_deploy=false"
# ansible-playbook playbooks/deploy_lxc.yml --check
- name: Deploy services to lxc
hosts: lxc
gather_facts: true
vars:
services: '{{ host_services | default([]) }}'
tasks:
- name: Display deployment info
ansible.builtin.debug:
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
- name: Ensure docker data directory exists
ansible.builtin.file:
path: '{{ docker_data_path }}'
state: directory
mode: '0755'
- name: Deploy each enabled service
ansible.builtin.include_role:
name: docker_stack
vars:
stack_name: '{{ item.stack_dir }}'
stack_compose_file: '{{ item.compose_file }}'
stack_env_file: '{{ item.env_file | default(omit) }}'
loop: '{{ services }}'
loop_control:
label: '{{ item.name }}'
when: item.enabled | default(true)

View File

@@ -0,0 +1,35 @@
---
# Deployment playbook for matrix-ubuntu-vm
# Category: vms
# Services: 4
#
# Usage:
# ansible-playbook playbooks/deploy_matrix_ubuntu_vm.yml
# ansible-playbook playbooks/deploy_matrix_ubuntu_vm.yml -e "stack_deploy=false"
# ansible-playbook playbooks/deploy_matrix_ubuntu_vm.yml --check
- name: Deploy services to matrix-ubuntu-vm
hosts: matrix_ubuntu_vm
gather_facts: true
vars:
services: '{{ host_services | default([]) }}'
tasks:
- name: Display deployment info
ansible.builtin.debug:
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
- name: Ensure docker data directory exists
ansible.builtin.file:
path: '{{ docker_data_path }}'
state: directory
mode: '0755'
- name: Deploy each enabled service
ansible.builtin.include_role:
name: docker_stack
vars:
stack_name: '{{ item.stack_dir }}'
stack_compose_file: '{{ item.compose_file }}'
stack_env_file: '{{ item.env_file | default(omit) }}'
loop: '{{ services }}'
loop_control:
label: '{{ item.name }}'
when: item.enabled | default(true)

View File

@@ -0,0 +1,35 @@
---
# Deployment playbook for rpi5-vish
# Category: edge
# Services: 6
#
# Usage:
# ansible-playbook playbooks/deploy_rpi5_vish.yml
# ansible-playbook playbooks/deploy_rpi5_vish.yml -e "stack_deploy=false"
# ansible-playbook playbooks/deploy_rpi5_vish.yml --check
- name: Deploy services to rpi5-vish
hosts: rpi5_vish
gather_facts: true
vars:
services: '{{ host_services | default([]) }}'
tasks:
- name: Display deployment info
ansible.builtin.debug:
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
- name: Ensure docker data directory exists
ansible.builtin.file:
path: '{{ docker_data_path }}'
state: directory
mode: '0755'
- name: Deploy each enabled service
ansible.builtin.include_role:
name: docker_stack
vars:
stack_name: '{{ item.stack_dir }}'
stack_compose_file: '{{ item.compose_file }}'
stack_env_file: '{{ item.env_file | default(omit) }}'
loop: '{{ services }}'
loop_control:
label: '{{ item.name }}'
when: item.enabled | default(true)

View File

@@ -0,0 +1,35 @@
---
# Deployment playbook for seattle
# Category: vms
# Services: 13
#
# Usage:
# ansible-playbook playbooks/deploy_seattle.yml
# ansible-playbook playbooks/deploy_seattle.yml -e "stack_deploy=false"
# ansible-playbook playbooks/deploy_seattle.yml --check
- name: Deploy services to seattle
hosts: seattle
gather_facts: true
vars:
services: '{{ host_services | default([]) }}'
tasks:
- name: Display deployment info
ansible.builtin.debug:
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
- name: Ensure docker data directory exists
ansible.builtin.file:
path: '{{ docker_data_path }}'
state: directory
mode: '0755'
- name: Deploy each enabled service
ansible.builtin.include_role:
name: docker_stack
vars:
stack_name: '{{ item.stack_dir }}'
stack_compose_file: '{{ item.compose_file }}'
stack_env_file: '{{ item.env_file | default(omit) }}'
loop: '{{ services }}'
loop_control:
label: '{{ item.name }}'
when: item.enabled | default(true)

View File

@@ -0,0 +1,35 @@
---
# Deployment playbook for setillo
# Category: synology
# Services: 5
#
# Usage:
# ansible-playbook playbooks/deploy_setillo.yml
# ansible-playbook playbooks/deploy_setillo.yml -e "stack_deploy=false"
# ansible-playbook playbooks/deploy_setillo.yml --check
- name: Deploy services to setillo
hosts: setillo
gather_facts: true
vars:
services: '{{ host_services | default([]) }}'
tasks:
- name: Display deployment info
ansible.builtin.debug:
msg: Deploying {{ services | length }} services to {{ inventory_hostname }}
- name: Ensure docker data directory exists
ansible.builtin.file:
path: '{{ docker_data_path }}'
state: directory
mode: '0755'
- name: Deploy each enabled service
ansible.builtin.include_role:
name: docker_stack
vars:
stack_name: '{{ item.stack_dir }}'
stack_compose_file: '{{ item.compose_file }}'
stack_env_file: '{{ item.env_file | default(omit) }}'
loop: '{{ services }}'
loop_control:
label: '{{ item.name }}'
when: item.enabled | default(true)

View File

@@ -0,0 +1,173 @@
---
# Portainer Stack Management via API
# Manages GitOps stacks across all Portainer endpoints
# Run with: ansible-playbook -i hosts.ini playbooks/portainer_stack_management.yml
- name: Portainer Stack Management
hosts: localhost
gather_facts: no
vars:
portainer_url: "https://192.168.0.200:9443"
portainer_username: "admin"
# portainer_password: "{{ vault_portainer_password }}" # Use ansible-vault
git_repo_url: "https://git.vish.gg/Vish/homelab.git"
# Portainer endpoints mapping
endpoints:
atlantis:
id: 1
name: "Atlantis"
stacks_path: "Atlantis"
calypso:
id: 2
name: "Calypso"
stacks_path: "Calypso"
concord_nuc:
id: 3
name: "Concord NUC"
stacks_path: "concord_nuc"
homelab_vm:
id: 4
name: "Homelab VM"
stacks_path: "homelab_vm"
rpi5:
id: 5
name: "RPi 5"
stacks_path: "raspberry-pi-5-vish"
tasks:
- name: Authenticate with Portainer
uri:
url: "{{ portainer_url }}/api/auth"
method: POST
body_format: json
body:
Username: "{{ portainer_username }}"
Password: "{{ portainer_password | default('admin') }}"
validate_certs: no
register: auth_response
no_log: true
- name: Set authentication token
set_fact:
portainer_token: "{{ auth_response.json.jwt }}"
- name: Get all endpoints
uri:
url: "{{ portainer_url }}/api/endpoints"
method: GET
headers:
Authorization: "Bearer {{ portainer_token }}"
validate_certs: no
register: endpoints_response
- name: Display available endpoints
debug:
msg: |
Available Portainer Endpoints:
{% for endpoint in endpoints_response.json %}
- ID: {{ endpoint.Id }}, Name: {{ endpoint.Name }}, Status: {{ endpoint.Status }}
{% endfor %}
- name: Get stacks for each endpoint
uri:
url: "{{ portainer_url }}/api/stacks"
method: GET
headers:
Authorization: "Bearer {{ portainer_token }}"
validate_certs: no
register: stacks_response
- name: Analyze GitOps stacks
set_fact:
gitops_stacks: "{{ stacks_response.json | selectattr('GitConfig', 'defined') | list }}"
non_gitops_stacks: "{{ stacks_response.json | rejectattr('GitConfig', 'defined') | list }}"
- name: Display GitOps status
debug:
msg: |
GitOps Stack Analysis:
- Total Stacks: {{ stacks_response.json | length }}
- GitOps Managed: {{ gitops_stacks | length }}
- Non-GitOps: {{ non_gitops_stacks | length }}
GitOps Stacks:
{% for stack in gitops_stacks %}
- {{ stack.Name }} (Endpoint: {{ stack.EndpointId }})
{% endfor %}
Non-GitOps Stacks:
{% for stack in non_gitops_stacks %}
- {{ stack.Name }} (Endpoint: {{ stack.EndpointId }})
{% endfor %}
- name: Check stack health
uri:
url: "{{ portainer_url }}/api/stacks/{{ item.Id }}/file"
method: GET
headers:
Authorization: "Bearer {{ portainer_token }}"
validate_certs: no
register: stack_files
loop: "{{ gitops_stacks }}"
failed_when: false
- name: Trigger GitOps sync for all stacks
uri:
url: "{{ portainer_url }}/api/stacks/{{ item.Id }}/git/redeploy"
method: PUT
headers:
Authorization: "Bearer {{ portainer_token }}"
body_format: json
body:
RepositoryReferenceName: "refs/heads/main"
PullImage: true
validate_certs: no
register: sync_results
loop: "{{ gitops_stacks }}"
when: sync_stacks | default(false) | bool
failed_when: false
- name: Display sync results
debug:
msg: |
GitOps Sync Results:
{% for result in sync_results.results %}
{% if result.skipped is not defined %}
- Stack: {{ gitops_stacks[loop.index0].Name }} - Status: {{ result.status | default('Failed') }}
{% endif %}
{% endfor %}
when: sync_stacks | default(false) | bool
- name: Generate stack health report
copy:
content: |
# Portainer Stack Health Report
Generated: {{ ansible_date_time.iso8601 }}
## Summary
- Total Stacks: {{ stacks_response.json | length }}
- GitOps Managed: {{ gitops_stacks | length }}
- Non-GitOps: {{ non_gitops_stacks | length }}
## GitOps Stacks
{% for stack in gitops_stacks %}
### {{ stack.Name }}
- Endpoint: {{ stack.EndpointId }}
- Status: {{ stack.Status }}
- Git Repository: {{ stack.GitConfig.URL if stack.GitConfig is defined else 'N/A' }}
- Git Reference: {{ stack.GitConfig.ReferenceName if stack.GitConfig is defined else 'N/A' }}
- Last Update: {{ stack.UpdatedAt }}
{% endfor %}
## Non-GitOps Stacks (Manual Management Required)
{% for stack in non_gitops_stacks %}
- {{ stack.Name }} (Endpoint: {{ stack.EndpointId }})
{% endfor %}
dest: "/tmp/portainer_stack_report_{{ ansible_date_time.epoch }}.md"
delegate_to: localhost
- name: Display report location
debug:
msg: "Stack health report saved to: /tmp/portainer_stack_report_{{ ansible_date_time.epoch }}.md"

View File

@@ -0,0 +1,187 @@
---
# SSH Mesh Key Distribution & Verification
#
# Distributes SSH public keys across all managed hosts so every host can SSH
# to every other host. Also verifies connectivity.
#
# Usage:
# ansible-playbook -i inventory.yml playbooks/ssh_mesh.yml
# ansible-playbook -i inventory.yml playbooks/ssh_mesh.yml --tags verify
# ansible-playbook -i inventory.yml playbooks/ssh_mesh.yml --tags distribute
# ansible-playbook -i inventory.yml playbooks/ssh_mesh.yml -e "generate_missing=true"
- name: SSH Mesh — Collect Keys
hosts: ssh_mesh
gather_facts: false
tags: [collect, distribute]
tasks:
- name: Check if ed25519 key exists
stat:
path: "~/.ssh/id_ed25519.pub"
register: ed25519_key
- name: Check if RSA key exists (fallback)
stat:
path: "~/.ssh/id_rsa.pub"
register: rsa_key
when: not ed25519_key.stat.exists
- name: Generate ed25519 key if missing
command: ssh-keygen -t ed25519 -f ~/.ssh/id_ed25519 -N "" -C "{{ ansible_user }}@{{ inventory_hostname }}"
args:
creates: ~/.ssh/id_ed25519
when:
- not ed25519_key.stat.exists
- not (rsa_key.stat.exists | default(false))
- generate_missing | default(false) | bool
- name: Re-check for ed25519 key after generation
stat:
path: "~/.ssh/id_ed25519.pub"
register: ed25519_key_recheck
when:
- not ed25519_key.stat.exists
- generate_missing | default(false) | bool
- name: Read ed25519 public key
slurp:
src: "~/.ssh/id_ed25519.pub"
register: pubkey_ed25519
when: ed25519_key.stat.exists or (ed25519_key_recheck.stat.exists | default(false))
- name: Read RSA public key (fallback)
slurp:
src: "~/.ssh/id_rsa.pub"
register: pubkey_rsa
when:
- not ed25519_key.stat.exists
- not (ed25519_key_recheck.stat.exists | default(false))
- rsa_key.stat.exists | default(false)
- name: Set public key fact
set_fact:
ssh_pubkey: >-
{{ (pubkey_ed25519.content | default(pubkey_rsa.content) | b64decode | trim) }}
ssh_key_comment: "{{ inventory_hostname }}"
when: pubkey_ed25519 is not skipped or pubkey_rsa is not skipped
- name: Warn if no key found
debug:
msg: "WARNING: No SSH key on {{ inventory_hostname }}. Run with -e generate_missing=true to create one."
when: ssh_pubkey is not defined
- name: SSH Mesh — Distribute Keys
hosts: ssh_mesh
gather_facts: false
tags: [distribute]
tasks:
- name: Build list of all mesh public keys
set_fact:
all_mesh_keys: >-
{{ groups['ssh_mesh']
| map('extract', hostvars)
| selectattr('ssh_pubkey', 'defined')
| map(attribute='ssh_pubkey')
| list }}
- name: Include admin key
set_fact:
all_mesh_keys: >-
{{ all_mesh_keys + [admin_key] }}
when: admin_key is defined
- name: Ensure .ssh directory exists
file:
path: "~/.ssh"
state: directory
mode: "0700"
- name: Ensure authorized_keys exists
file:
path: "~/.ssh/authorized_keys"
state: touch
mode: "0600"
changed_when: false
- name: Add missing keys to authorized_keys
lineinfile:
path: "~/.ssh/authorized_keys"
line: "{{ item }}"
state: present
loop: "{{ all_mesh_keys }}"
loop_control:
label: "{{ item.split()[-1] | default('unknown') }}"
- name: SSH Mesh — Verify Connectivity
hosts: localhost
gather_facts: false
connection: local
tags: [verify]
tasks:
- name: Build mesh host list
set_fact:
mesh_hosts: >-
{{ groups['ssh_mesh']
| map('extract', hostvars)
| list }}
- name: Test SSH from localhost to each mesh host
shell: |
ssh -o BatchMode=yes \
-o ConnectTimeout=5 \
-o StrictHostKeyChecking=accept-new \
-i ~/.ssh/id_ed25519 \
-p {{ item.ansible_port | default(22) }} \
{{ item.ansible_user }}@{{ item.ansible_host }} \
"echo ok" 2>&1
register: ssh_tests
loop: "{{ mesh_hosts }}"
loop_control:
label: "localhost -> {{ item.inventory_hostname | default(item.ansible_host) }}"
failed_when: false
changed_when: false
- name: Display connectivity matrix
debug:
msg: |
SSH Mesh Verification (from localhost):
{% for result in ssh_tests.results %}
{{ '✓' if result.rc == 0 and 'ok' in (result.stdout | default('')) else '✗' }} -> {{ result.item.inventory_hostname | default(result.item.ansible_host) }}{% if result.rc != 0 or 'ok' not in (result.stdout | default('')) %} ({{ result.stdout_lines[-1] | default('unknown error') }}){% endif %}
{% endfor %}
{{ ssh_tests.results | selectattr('rc', 'equalto', 0) | list | length }}/{{ ssh_tests.results | length }} hosts reachable
- name: Test cross-host SSH (sample pairs)
shell: |
results=""
{% for pair in cross_test_pairs | default([]) %}
src_user="{{ pair.src_user }}"
src_host="{{ pair.src_host }}"
src_port="{{ pair.src_port | default(22) }}"
dst_user="{{ pair.dst_user }}"
dst_host="{{ pair.dst_host }}"
dst_port="{{ pair.dst_port | default(22) }}"
out=$(ssh -o BatchMode=yes -o ConnectTimeout=5 -o StrictHostKeyChecking=no \
-p ${src_port} ${src_user}@${src_host} \
"ssh -o BatchMode=yes -o ConnectTimeout=5 -o StrictHostKeyChecking=accept-new \
-i ~/.ssh/id_ed25519 -p ${dst_port} ${dst_user}@${dst_host} 'echo ok'" 2>&1)
if echo "$out" | grep -q "ok"; then
results="${results}✓ {{ pair.label }}\n"
else
results="${results}✗ {{ pair.label }} ($(echo "$out" | tail -1))\n"
fi
{% endfor %}
echo -e "$results"
register: cross_tests
when: cross_test_pairs is defined
changed_when: false
- name: Display cross-host results
debug:
msg: |
Cross-Host SSH Tests:
{{ cross_tests.stdout }}
when: cross_tests is not skipped and cross_tests.stdout is defined

View File

@@ -0,0 +1,137 @@
---
- name: Synology Healthcheck
hosts: synology
gather_facts: yes
become: false
vars:
ts_candidates:
- /var/packages/Tailscale/target/bin/tailscale
- /usr/bin/tailscale
tasks:
# ---------- System info ----------
- name: DSM version
ansible.builtin.shell: |
set -e
if [ -f /etc.defaults/VERSION ]; then
. /etc.defaults/VERSION
echo "${productversion:-unknown} (build ${buildnumber:-unknown})"
else
echo "unknown"
fi
register: dsm_version
changed_when: false
failed_when: false
- name: Uptime (pretty)
ansible.builtin.command: uptime -p
register: uptime_pretty
changed_when: false
failed_when: false
- name: Load averages
ansible.builtin.command: cat /proc/loadavg
register: loadavg
changed_when: false
failed_when: false
- name: Memory summary (MB)
ansible.builtin.command: free -m
register: mem
changed_when: false
failed_when: false
# ---------- Storage ----------
- name: Disk usage of root (/)
ansible.builtin.shell: df -P / | awk 'NR==2 {print $5}' | tr -d '%'
register: root_usage
changed_when: false
failed_when: false
- name: Disk usage of /volume1 (if present)
ansible.builtin.shell: |
if mountpoint -q /volume1; then
df -P /volume1 | awk 'NR==2 {print $5}' | tr -d '%'
fi
register: vol1_usage
changed_when: false
failed_when: false
- name: RAID status (/proc/mdstat)
ansible.builtin.command: cat /proc/mdstat
register: mdstat
changed_when: false
failed_when: false
# ---------- Tailscale (optional) ----------
- name: Detect Tailscale binary path (first that exists)
ansible.builtin.shell: |
for p in {{ ts_candidates | join(' ') }}; do
[ -x "$p" ] && echo "$p" && exit 0
done
echo ""
register: ts_bin
changed_when: false
failed_when: false
- name: Get Tailscale IPv4 (if tailscale present)
ansible.builtin.command: "{{ ts_bin.stdout }} ip -4"
register: ts_ip
changed_when: false
failed_when: false
when: ts_bin.stdout | length > 0
- name: Get Tailscale self status (brief)
ansible.builtin.command: "{{ ts_bin.stdout }} status --self"
register: ts_status
changed_when: false
failed_when: false
when: ts_bin.stdout | length > 0
# ---------- Assertions (lightweight, no sudo) ----------
- name: Check RAID not degraded/resyncing
ansible.builtin.assert:
that:
- mdstat.stdout is not search('degraded', ignorecase=True)
- mdstat.stdout is not search('resync', ignorecase=True)
success_msg: "RAID OK"
fail_msg: "RAID issue detected (degraded or resync) — check Storage Manager"
changed_when: false
- name: Check root FS usage < 90%
ansible.builtin.assert:
that:
- (root_usage.stdout | default('0')) | int < 90
success_msg: "Root filesystem usage OK ({{ root_usage.stdout | default('n/a') }}%)"
fail_msg: "Root filesystem high ({{ root_usage.stdout | default('n/a') }}%)"
changed_when: false
- name: Check /volume1 usage < 90% (if present)
ansible.builtin.assert:
that:
- (vol1_usage.stdout | default('0')) | int < 90
success_msg: "/volume1 usage OK ({{ vol1_usage.stdout | default('n/a') }}%)"
fail_msg: "/volume1 usage high ({{ vol1_usage.stdout | default('n/a') }}%)"
when: vol1_usage.stdout is defined and vol1_usage.stdout != ""
changed_when: false
# ---------- Summary (shows the results) ----------
- name: Summary
ansible.builtin.debug:
msg: |
Host: {{ inventory_hostname }}
DSM: {{ dsm_version.stdout | default('unknown') }}
Uptime: {{ uptime_pretty.stdout | default('n/a') }}
Load: {{ loadavg.stdout | default('n/a') }}
Memory (MB):
{{ (mem.stdout | default('n/a')) | indent(2) }}
Root usage: {{ root_usage.stdout | default('n/a') }}%
Volume1 usage: {{ (vol1_usage.stdout | default('n/a')) if (vol1_usage.stdout is defined and vol1_usage.stdout != "") else 'n/a' }}%
RAID (/proc/mdstat):
{{ (mdstat.stdout | default('n/a')) | indent(2) }}
Tailscale:
binary: {{ (ts_bin.stdout | default('not found')) if ts_bin.stdout|length > 0 else 'not found' }}
ip: {{ ts_ip.stdout | default('n/a') }}
self:
{{ (ts_status.stdout | default('n/a')) | indent(2) }}

View File

@@ -0,0 +1,372 @@
---
- name: Tailscale Network Management
hosts: all
gather_facts: yes
vars:
tailscale_timestamp: "{{ ansible_date_time.iso8601 }}"
tailscale_report_dir: "/tmp/tailscale_reports"
tasks:
- name: Create Tailscale reports directory
file:
path: "{{ tailscale_report_dir }}"
state: directory
mode: '0755'
delegate_to: localhost
run_once: true
- name: Check if Tailscale is installed
shell: command -v tailscale >/dev/null 2>&1
register: tailscale_available
changed_when: false
ignore_errors: yes
- name: Skip Tailscale tasks if not available
set_fact:
skip_tailscale: "{{ tailscale_available.rc != 0 }}"
- name: Get Tailscale status
shell: |
if ! command -v tailscale >/dev/null 2>&1; then
echo "Tailscale not installed"
exit 0
fi
echo "=== TAILSCALE STATUS ==="
tailscale status --json 2>/dev/null || tailscale status 2>/dev/null || echo "Tailscale not accessible"
register: tailscale_status
changed_when: false
when: not skip_tailscale
- name: Get Tailscale network information
shell: |
if ! command -v tailscale >/dev/null 2>&1; then
echo "Tailscale not installed"
exit 0
fi
echo "=== TAILSCALE NETWORK INFO ==="
# Get IP addresses
echo "Tailscale IPs:"
tailscale ip -4 2>/dev/null || echo "IPv4 not available"
tailscale ip -6 2>/dev/null || echo "IPv6 not available"
echo ""
# Get peer information
echo "Peer Status:"
tailscale status --peers 2>/dev/null || echo "Peer status not available"
echo ""
# Get routes
echo "Routes:"
tailscale status --self=false 2>/dev/null | grep -E "^[0-9]" | head -10 || echo "Route information not available"
echo ""
# Check connectivity to key peers
echo "Connectivity Tests:"
key_peers="100.83.230.112 100.103.48.78 100.125.0.20" # atlantis, calypso, setillo
for peer in $key_peers; do
if ping -c 1 -W 2 "$peer" >/dev/null 2>&1; then
echo "✅ $peer - reachable"
else
echo "❌ $peer - unreachable"
fi
done
register: tailscale_network
changed_when: false
when: not skip_tailscale
ignore_errors: yes
- name: Check Tailscale service health
shell: |
if ! command -v tailscale >/dev/null 2>&1; then
echo "Tailscale not installed"
exit 0
fi
echo "=== TAILSCALE SERVICE HEALTH ==="
# Check daemon status
if command -v systemctl >/dev/null 2>&1; then
echo "Service Status:"
systemctl is-active tailscaled 2>/dev/null || echo "tailscaled service status unknown"
systemctl is-enabled tailscaled 2>/dev/null || echo "tailscaled service enablement unknown"
echo ""
fi
# Check authentication status
echo "Authentication:"
if tailscale status --json 2>/dev/null | grep -q '"BackendState":"Running"'; then
echo "✅ Authenticated and running"
elif tailscale status 2>/dev/null | grep -q "Logged out"; then
echo "❌ Not logged in"
else
echo "⚠️ Status unclear"
fi
echo ""
# Check for exit node status
echo "Exit Node Status:"
if tailscale status --json 2>/dev/null | grep -q '"ExitNodeID"'; then
echo "Using exit node"
else
echo "Not using exit node"
fi
echo ""
# Check MagicDNS
echo "MagicDNS:"
if tailscale status --json 2>/dev/null | grep -q '"MagicDNSSuffix"'; then
suffix=$(tailscale status --json 2>/dev/null | grep -o '"MagicDNSSuffix":"[^"]*"' | cut -d'"' -f4)
echo "✅ Enabled (suffix: $suffix)"
else
echo "❌ Disabled or not available"
fi
register: tailscale_health
changed_when: false
when: not skip_tailscale
- name: Analyze Tailscale configuration
shell: |
if ! command -v tailscale >/dev/null 2>&1; then
echo "Tailscale not installed"
exit 0
fi
echo "=== TAILSCALE CONFIGURATION ==="
# Get preferences
echo "Preferences:"
tailscale debug prefs 2>/dev/null | head -20 || echo "Preferences not accessible"
echo ""
# Check for subnet routes
echo "Subnet Routes:"
tailscale status --json 2>/dev/null | grep -o '"AdvertiseRoutes":\[[^\]]*\]' || echo "No advertised routes"
echo ""
# Check ACL status (if accessible)
echo "ACL Information:"
tailscale debug netmap 2>/dev/null | grep -i acl | head -5 || echo "ACL information not accessible"
echo ""
# Check for Tailscale SSH
echo "Tailscale SSH:"
if tailscale status --json 2>/dev/null | grep -q '"SSH"'; then
echo "SSH feature available"
else
echo "SSH feature not detected"
fi
register: tailscale_config
changed_when: false
when: not skip_tailscale
ignore_errors: yes
- name: Tailscale network diagnostics
shell: |
if ! command -v tailscale >/dev/null 2>&1; then
echo "Tailscale not installed"
exit 0
fi
echo "=== NETWORK DIAGNOSTICS ==="
# Check DERP (relay) connectivity
echo "DERP Connectivity:"
tailscale netcheck 2>/dev/null | head -10 || echo "Network check not available"
echo ""
# Check for direct connections
echo "Direct Connections:"
tailscale status --json 2>/dev/null | grep -o '"CurAddr":"[^"]*"' | head -5 || echo "Connection info not available"
echo ""
# Interface information
echo "Network Interfaces:"
ip addr show tailscale0 2>/dev/null || echo "Tailscale interface not found"
echo ""
# Routing table
echo "Tailscale Routes:"
ip route show | grep tailscale0 2>/dev/null || echo "No Tailscale routes found"
register: tailscale_diagnostics
changed_when: false
when: not skip_tailscale
ignore_errors: yes
- name: Create Tailscale report
set_fact:
tailscale_report:
timestamp: "{{ tailscale_timestamp }}"
hostname: "{{ inventory_hostname }}"
tailscale_available: "{{ not skip_tailscale }}"
status: "{{ tailscale_status.stdout if not skip_tailscale else 'Not available' }}"
network: "{{ tailscale_network.stdout if not skip_tailscale else 'Not available' }}"
health: "{{ tailscale_health.stdout if not skip_tailscale else 'Not available' }}"
configuration: "{{ tailscale_config.stdout if not skip_tailscale else 'Not available' }}"
diagnostics: "{{ tailscale_diagnostics.stdout if not skip_tailscale else 'Not available' }}"
- name: Display Tailscale report
debug:
msg: |
==========================================
🌐 TAILSCALE REPORT - {{ inventory_hostname }}
==========================================
📊 AVAILABILITY: {{ 'Available' if tailscale_report.tailscale_available else 'Not Available' }}
📡 STATUS:
{{ tailscale_report.status }}
🔗 NETWORK INFO:
{{ tailscale_report.network }}
🏥 HEALTH CHECK:
{{ tailscale_report.health }}
⚙️ CONFIGURATION:
{{ tailscale_report.configuration }}
🔍 DIAGNOSTICS:
{{ tailscale_report.diagnostics }}
==========================================
- name: Generate JSON Tailscale report
copy:
content: |
{
"timestamp": "{{ tailscale_report.timestamp }}",
"hostname": "{{ tailscale_report.hostname }}",
"tailscale_available": {{ tailscale_report.tailscale_available | lower }},
"status": {{ tailscale_report.status | to_json }},
"network": {{ tailscale_report.network | to_json }},
"health": {{ tailscale_report.health | to_json }},
"configuration": {{ tailscale_report.configuration | to_json }},
"diagnostics": {{ tailscale_report.diagnostics | to_json }},
"recommendations": [
{% if not tailscale_report.tailscale_available %}
"Install Tailscale for network connectivity",
{% endif %}
{% if 'Not logged in' in tailscale_report.health %}
"Authenticate Tailscale client",
{% endif %}
{% if 'unreachable' in tailscale_report.network %}
"Investigate network connectivity issues",
{% endif %}
"Regular Tailscale health monitoring recommended"
]
}
dest: "{{ tailscale_report_dir }}/{{ inventory_hostname }}_tailscale_{{ ansible_date_time.epoch }}.json"
delegate_to: localhost
- name: Tailscale management operations (when action is specified)
block:
- name: Validate action parameter
fail:
msg: "Invalid action. Supported actions: status, login, logout, up, down, ping"
when: tailscale_action not in ['status', 'login', 'logout', 'up', 'down', 'ping']
- name: Execute Tailscale action
shell: |
case "{{ tailscale_action }}" in
"status")
tailscale status --peers
;;
"login")
echo "Login requires interactive authentication"
tailscale login --timeout=30s
;;
"logout")
tailscale logout
;;
"up")
tailscale up {{ tailscale_args | default('') }}
;;
"down")
tailscale down
;;
"ping")
if [ -n "{{ tailscale_target | default('') }}" ]; then
tailscale ping "{{ tailscale_target }}"
else
echo "Error: tailscale_target required for ping action"
exit 1
fi
;;
esac
register: tailscale_action_result
when: not skip_tailscale
- name: Display action result
debug:
msg: |
🔧 Tailscale action '{{ tailscale_action }}' completed on {{ inventory_hostname }}
Result:
{{ tailscale_action_result.stdout }}
{% if tailscale_action_result.stderr %}
Errors:
{{ tailscale_action_result.stderr }}
{% endif %}
when: tailscale_action is defined and not skip_tailscale
- name: Generate network topology map (run once)
shell: |
cd "{{ tailscale_report_dir }}"
echo "# Tailscale Network Topology" > network_topology.md
echo "" >> network_topology.md
echo "**Generated:** {{ tailscale_timestamp }}" >> network_topology.md
echo "" >> network_topology.md
# Process all Tailscale JSON reports
for json_file in *_tailscale_*.json; do
if [ -f "$json_file" ]; then
hostname=$(basename "$json_file" | cut -d'_' -f1)
echo "## 🖥️ $hostname" >> network_topology.md
echo "" >> network_topology.md
# Extract key information
if command -v jq >/dev/null 2>&1; then
available=$(jq -r '.tailscale_available' "$json_file" 2>/dev/null || echo "unknown")
echo "- **Tailscale:** $available" >> network_topology.md
# Try to extract IP if available
if [ "$available" = "true" ]; then
echo "- **Status:** Connected" >> network_topology.md
else
echo "- **Status:** Not available" >> network_topology.md
fi
fi
echo "- **Report:** [$json_file](./$json_file)" >> network_topology.md
echo "" >> network_topology.md
fi
done
echo "---" >> network_topology.md
echo "*Auto-generated by Ansible tailscale_management.yml playbook*" >> network_topology.md
delegate_to: localhost
run_once: true
- name: Summary message
debug:
msg: |
🌐 Tailscale management complete for {{ inventory_hostname }}
📄 Report saved to: {{ tailscale_report_dir }}/{{ inventory_hostname }}_tailscale_{{ ansible_date_time.epoch }}.json
🗺️ Network topology: {{ tailscale_report_dir }}/network_topology.md
{% if tailscale_action is defined %}
🔧 Action performed: {{ tailscale_action }}
{% endif %}
💡 Use -e tailscale_action=<action> for management operations
💡 Supported actions: status, login, logout, up, down, ping
💡 Use -e tailscale_target=<ip> with ping action

View File

@@ -0,0 +1,255 @@
---
# Tailscale Mesh Management
# Validates mesh connectivity, manages keys, and monitors VPN performance
# Run with: ansible-playbook -i hosts.ini playbooks/tailscale_mesh_management.yml
- name: Tailscale Mesh Management
hosts: all
gather_facts: yes
vars:
tailscale_expected_nodes:
- "homelab"
- "atlantis"
- "calypso"
- "setillo"
- "pi-5"
- "pi-5-kevin"
- "vish-concord-nuc"
- "pve"
- "truenas-scale"
- "homeassistant"
performance_test_targets:
- "100.64.0.1" # Tailscale coordinator
- "atlantis"
- "calypso"
tasks:
- name: Check if Tailscale is installed
command: which tailscale
register: tailscale_installed
failed_when: false
changed_when: false
- name: Get Tailscale status
command: tailscale status --json
register: tailscale_status_raw
when: tailscale_installed.rc == 0
become: yes
- name: Parse Tailscale status
set_fact:
tailscale_status: "{{ tailscale_status_raw.stdout | from_json }}"
when: tailscale_installed.rc == 0 and tailscale_status_raw.stdout != ""
- name: Get Tailscale IP
command: tailscale ip -4
register: tailscale_ip
when: tailscale_installed.rc == 0
become: yes
- name: Display Tailscale node info
debug:
msg: |
Tailscale Status for {{ inventory_hostname }}:
- Installed: {{ 'Yes' if tailscale_installed.rc == 0 else 'No' }}
{% if tailscale_installed.rc == 0 %}
- IP Address: {{ tailscale_ip.stdout }}
- Backend State: {{ tailscale_status.BackendState }}
- Version: {{ tailscale_status.Version }}
- Online: {{ tailscale_status.Self.Online }}
- Exit Node: {{ tailscale_status.Self.ExitNode | default('None') }}
{% endif %}
- name: Get peer information
set_fact:
tailscale_peers: "{{ tailscale_status.Peer | dict2items | map(attribute='value') | list }}"
when: tailscale_installed.rc == 0 and tailscale_status.Peer is defined
- name: Analyze mesh connectivity
set_fact:
online_peers: "{{ tailscale_peers | selectattr('Online', 'equalto', true) | list }}"
offline_peers: "{{ tailscale_peers | selectattr('Online', 'equalto', false) | list }}"
expected_missing: "{{ tailscale_expected_nodes | difference(tailscale_peers | map(attribute='HostName') | list + [tailscale_status.Self.HostName]) }}"
when: tailscale_installed.rc == 0 and tailscale_peers is defined
- name: Display mesh analysis
debug:
msg: |
Tailscale Mesh Analysis:
- Total Peers: {{ tailscale_peers | length if tailscale_peers is defined else 0 }}
- Online Peers: {{ online_peers | length if online_peers is defined else 0 }}
- Offline Peers: {{ offline_peers | length if offline_peers is defined else 0 }}
- Expected Nodes: {{ tailscale_expected_nodes | length }}
- Missing Nodes: {{ expected_missing | length if expected_missing is defined else 0 }}
{% if offline_peers is defined and offline_peers | length > 0 %}
Offline Peers:
{% for peer in offline_peers %}
- {{ peer.HostName }} ({{ peer.TailscaleIPs[0] }})
{% endfor %}
{% endif %}
{% if expected_missing is defined and expected_missing | length > 0 %}
Missing Expected Nodes:
{% for node in expected_missing %}
- {{ node }}
{% endfor %}
{% endif %}
when: tailscale_installed.rc == 0
- name: Test connectivity to key nodes
shell: |
echo "=== Connectivity Tests ==="
{% for target in performance_test_targets %}
echo "Testing {{ target }}..."
if ping -c 3 -W 2 {{ target }} >/dev/null 2>&1; then
latency=$(ping -c 3 {{ target }} | tail -1 | awk -F '/' '{print $5}')
echo "✓ {{ target }}: ${latency}ms avg"
else
echo "✗ {{ target }}: Unreachable"
fi
{% endfor %}
register: connectivity_tests
when: tailscale_installed.rc == 0
- name: Check Tailscale service status
systemd:
name: tailscaled
register: tailscale_service
when: tailscale_installed.rc == 0
become: yes
- name: Get Tailscale logs
shell: journalctl -u tailscaled --since "1 hour ago" --no-pager | tail -20
register: tailscale_logs
when: tailscale_installed.rc == 0
become: yes
- name: Check for Tailscale updates
shell: |
current_version=$(tailscale version | head -1 | awk '{print $1}')
echo "Current version: $current_version"
# Check if update is available (this is a simplified check)
if command -v apt >/dev/null 2>&1; then
apt list --upgradable 2>/dev/null | grep tailscale || echo "No updates available via apt"
elif command -v yum >/dev/null 2>&1; then
yum check-update tailscale 2>/dev/null || echo "No updates available via yum"
else
echo "Package manager not supported for update check"
fi
register: update_check
when: tailscale_installed.rc == 0
become: yes
- name: Generate network performance report
shell: |
echo "=== Network Performance Report ==="
echo "Timestamp: $(date)"
echo "Host: {{ inventory_hostname }}"
echo ""
{% if tailscale_installed.rc == 0 %}
echo "=== Tailscale Interface ==="
ip addr show tailscale0 2>/dev/null || echo "Tailscale interface not found"
echo ""
echo "=== Route Table ==="
ip route | grep -E "(tailscale|100\.)" || echo "No Tailscale routes found"
echo ""
echo "=== DNS Configuration ==="
tailscale status --peers=false --self=false 2>/dev/null | grep -E "(DNS|MagicDNS)" || echo "DNS info not available"
{% else %}
echo "Tailscale not installed on this host"
{% endif %}
register: performance_report
when: tailscale_installed.rc == 0
- name: Check exit node configuration
shell: tailscale status --json | jq -r '.ExitNodeStatus // "No exit node configured"'
register: exit_node_status
when: tailscale_installed.rc == 0
become: yes
failed_when: false
- name: Validate Tailscale ACLs (if admin)
uri:
url: "https://api.tailscale.com/api/v2/tailnet/{{ tailscale_tailnet | default('example.com') }}/acl"
method: GET
headers:
Authorization: "Bearer {{ tailscale_api_key }}"
register: acl_check
when:
- tailscale_api_key is defined
- check_acls | default(false) | bool
delegate_to: localhost
run_once: true
failed_when: false
- name: Generate Tailscale mesh report
copy:
content: |
# Tailscale Mesh Report - {{ inventory_hostname }}
Generated: {{ ansible_date_time.iso8601 }}
## Node Status
- Tailscale Installed: {{ 'Yes' if tailscale_installed.rc == 0 else 'No' }}
{% if tailscale_installed.rc == 0 %}
- IP Address: {{ tailscale_ip.stdout }}
- Backend State: {{ tailscale_status.BackendState }}
- Version: {{ tailscale_status.Version }}
- Online: {{ tailscale_status.Self.Online }}
- Service Status: {{ tailscale_service.status.ActiveState }}
{% endif %}
{% if tailscale_peers is defined %}
## Mesh Connectivity
- Total Peers: {{ tailscale_peers | length }}
- Online Peers: {{ online_peers | length }}
- Offline Peers: {{ offline_peers | length }}
### Online Peers
{% for peer in online_peers %}
- {{ peer.HostName }} ({{ peer.TailscaleIPs[0] }}) - Last Seen: {{ peer.LastSeen }}
{% endfor %}
{% if offline_peers | length > 0 %}
### Offline Peers
{% for peer in offline_peers %}
- {{ peer.HostName }} ({{ peer.TailscaleIPs[0] }}) - Last Seen: {{ peer.LastSeen }}
{% endfor %}
{% endif %}
{% endif %}
## Connectivity Tests
```
{{ connectivity_tests.stdout if connectivity_tests is defined else 'Not performed' }}
```
## Performance Report
```
{{ performance_report.stdout if performance_report is defined else 'Not available' }}
```
## Recent Logs
```
{{ tailscale_logs.stdout if tailscale_logs is defined else 'Not available' }}
```
## Update Status
```
{{ update_check.stdout if update_check is defined else 'Not checked' }}
```
dest: "/tmp/tailscale_mesh_{{ inventory_hostname }}_{{ ansible_date_time.epoch }}.md"
delegate_to: localhost
- name: Display mesh summary
debug:
msg: |
Tailscale Mesh Summary for {{ inventory_hostname }}:
- Status: {{ 'Connected' if tailscale_installed.rc == 0 and tailscale_status.BackendState == 'Running' else 'Disconnected' }}
- IP: {{ tailscale_ip.stdout if tailscale_installed.rc == 0 else 'N/A' }}
- Peers: {{ tailscale_peers | length if tailscale_peers is defined else 0 }}
- Report: /tmp/tailscale_mesh_{{ inventory_hostname }}_{{ ansible_date_time.epoch }}.md

View File

@@ -0,0 +1,111 @@
---
# Tailscale Update Playbook
#
# Updates Tailscale across all managed hosts using the appropriate method
# for each host type.
#
# Usage:
# ansible-playbook -i inventory.yml playbooks/tailscale_update.yml
# ansible-playbook -i inventory.yml playbooks/tailscale_update.yml --tags check
# ansible-playbook -i inventory.yml playbooks/tailscale_update.yml --tags update
# ansible-playbook -i inventory.yml playbooks/tailscale_update.yml --limit "pi-5,homelab"
#
# Host types and update methods:
# apt_tailscale: apt update && apt install tailscale (Debian/Ubuntu)
# synology: Manual via DSM Package Center (report only)
# truenas-scale: Manual via TrueNAS Apps UI (Docker container, report only)
# routers: Manual via vendor UI (report only)
- name: Tailscale Update — Check Versions
hosts: tailscale_hosts
gather_facts: false
tags: [check, update]
tasks:
- name: Get current Tailscale version (apt hosts)
shell: tailscale version 2>/dev/null | head -1 || echo "NOT_INSTALLED"
register: ts_version
changed_when: false
when: "'apt_tailscale' in group_names"
- name: Get current Tailscale version (Synology)
shell: |
for p in /var/packages/Tailscale/target/bin/tailscale /usr/local/bin/tailscale /var/packages/WireGuard/target/bin/tailscale; do
[ -x "$p" ] && $p version 2>/dev/null | head -1 && exit 0
done
synopkg version Tailscale 2>/dev/null || echo "UNKNOWN"
register: ts_version_synology
changed_when: false
when: "'synology' in group_names"
- name: Get current Tailscale version (TrueNAS Docker)
shell: docker ps --filter "name=tailscale" --format "{{ '{{' }}.Image{{ '}}' }}" 2>/dev/null | head -1 || echo "UNKNOWN"
register: ts_version_truenas
changed_when: false
become: true
when: inventory_hostname == 'truenas-scale'
- name: Get current Tailscale version (OpenWrt)
shell: tailscale version 2>/dev/null | head -1 || opkg info tailscale 2>/dev/null | grep Version | awk '{print $2}' || echo "UNKNOWN"
register: ts_version_router
changed_when: false
when: "'routers' in group_names"
- name: Set unified version fact
set_fact:
tailscale_current: >-
{{ ts_version.stdout | default(
ts_version_synology.stdout | default(
ts_version_truenas.stdout | default(
ts_version_router.stdout | default('UNKNOWN')))) | trim }}
- name: Display current versions
debug:
msg: "{{ inventory_hostname }}: {{ tailscale_current }}"
- name: Tailscale Update — APT Hosts
hosts: apt_tailscale
gather_facts: false
become: true
tags: [update]
tasks:
- name: Check for available update
shell: apt list --upgradable 2>/dev/null | grep tailscale || echo "UP_TO_DATE"
register: apt_check
changed_when: false
- name: Update Tailscale via apt
apt:
name: tailscale
state: latest
update_cache: true
cache_valid_time: 300
register: apt_update
when: "'UP_TO_DATE' not in apt_check.stdout"
- name: Get new version after update
shell: tailscale version | head -1
register: ts_new_version
changed_when: false
when: apt_update is changed
- name: Report update result
debug:
msg: >-
{{ inventory_hostname }}:
{{ 'Updated to ' + ts_new_version.stdout if apt_update is changed
else 'Already up to date' }}
- name: Tailscale Update — Manual Hosts Report
hosts: tailscale_manual
gather_facts: false
tags: [update]
tasks:
- name: Report manual update required
debug:
msg: >-
{{ inventory_hostname }} ({{ tailscale_update_method | default('unknown') }}):
Current version {{ tailscale_current | default('unknown') }}.
Update manually via {{ tailscale_update_instructions | default('vendor UI') }}.

View File

@@ -0,0 +1,202 @@
---
- name: TrueNAS SCALE Health Check
hosts: truenas-scale
gather_facts: yes
become: true
vars:
report_dir: "/tmp/health_reports"
tasks:
# ---------- Report directory ----------
- name: Ensure health report directory exists
ansible.builtin.file:
path: "{{ report_dir }}"
state: directory
mode: '0755'
delegate_to: localhost
run_once: true
# ---------- System overview ----------
- name: TrueNAS version
ansible.builtin.shell: |
if [ -f /etc/version ]; then
cat /etc/version
elif midclt call system.version 2>/dev/null; then
true
else
echo "version unavailable"
fi
register: truenas_version
changed_when: false
failed_when: false
- name: System uptime
ansible.builtin.command: uptime -p
register: uptime_pretty
changed_when: false
failed_when: false
# ---------- ZFS pool health ----------
- name: ZFS pool status (verbose)
ansible.builtin.command: zpool status -v
register: zpool_status
changed_when: false
failed_when: false
- name: ZFS pool list with usage
ansible.builtin.command: zpool list -H
register: zpool_list
changed_when: false
failed_when: false
- name: Count degraded or faulted pools
ansible.builtin.shell: >
zpool status 2>/dev/null
| grep -E "state:\s*(DEGRADED|FAULTED|OFFLINE|REMOVED)"
| wc -l
register: pool_errors
changed_when: false
failed_when: false
- name: Assert all ZFS pools are ONLINE
ansible.builtin.assert:
that:
- pool_errors.stdout | trim | int == 0
success_msg: "All ZFS pools ONLINE"
fail_msg: "DEGRADED or FAULTED pool detected"
ignore_errors: yes
# ---------- ZFS scrub status ----------
- name: ZFS scrub/scan status per pool
ansible.builtin.shell: |
for pool in $(zpool list -H -o name 2>/dev/null); do
echo "Pool: $pool"
zpool status "$pool" 2>/dev/null | grep -E "scrub|scan" | head -3
echo "---"
done
register: zpool_scrub
changed_when: false
failed_when: false
# ---------- Dataset usage ----------
- name: ZFS dataset usage (top-level, up to 20)
ansible.builtin.shell: >
zfs list -H -o name,used,avail,refer,mountpoint -d 1 2>/dev/null | head -20
register: zfs_datasets
changed_when: false
failed_when: false
# ---------- SMART disk status ----------
# Note: empty output here means lsblk returned no physical disks or is unavailable,
# not that no disks exist. The SMART loop below re-runs lsblk independently.
- name: List physical disks
ansible.builtin.shell: >
lsblk -d -o NAME,SIZE,MODEL,SERIAL 2>/dev/null
| grep -v "loop\|sr"
register: disk_list
changed_when: false
failed_when: false
- name: Check SMART health for each disk
ansible.builtin.shell: |
failed=0
results=""
for disk in $(lsblk -d -n -o NAME 2>/dev/null | grep -v "loop\|sr"); do
out=$(smartctl -H /dev/$disk 2>/dev/null | grep -E "SMART overall-health|result:")
if echo "$out" | grep -qi "FAILED"; then
failed=$((failed + 1))
results="$results\n$disk: FAILED ($out)"
else
results="$results\n$disk: ${out:-SMART unavailable}"
fi
done
echo -e "SMART failures: $failed$results"
register: smart_status
changed_when: false
failed_when: false
# ---------- TrueNAS apps (k3s / midclt) ----------
- name: TrueNAS app status
ansible.builtin.shell: |
out=$(k3s kubectl get pods -A --no-headers 2>/dev/null \
| awk '{print $4}' | sort | uniq -c | sort -rn 2>/dev/null)
if [ -n "$out" ]; then
echo "$out"
exit 0
fi
out=$(midclt call chart.release.query 2>/dev/null \
| python3 -c "
import json,sys
try:
data = json.load(sys.stdin)
[print(f'{a.get(\"id\",\"?\"):30} {a.get(\"status\",\"?\")}') for a in data]
except Exception:
pass
" 2>/dev/null)
if [ -n "$out" ]; then
echo "$out"
exit 0
fi
echo "App runtime not detected"
register: app_status
changed_when: false
failed_when: false
# ---------- Summary ----------
- name: TrueNAS health summary
ansible.builtin.debug:
msg: |
============================================================
TrueNAS SCALE Health — {{ inventory_hostname }}
============================================================
Version : {{ truenas_version.stdout | default('unknown') | trim }}
Uptime : {{ uptime_pretty.stdout | default('n/a') | trim }}
--- ZFS Pool Status ---
{{ zpool_status.stdout | default('unavailable') }}
--- ZFS Pool List ---
{{ zpool_list.stdout | default('unavailable') }}
--- Pool Error Count ---
{{ pool_errors.stdout | default('0') | trim }} degraded/faulted/offline/removed pool(s)
--- ZFS Scrub / Scan Status ---
{{ zpool_scrub.stdout | default('unavailable') }}
--- Dataset Usage (top-level) ---
{{ zfs_datasets.stdout | default('unavailable') }}
--- Physical Disks ---
{{ disk_list.stdout | default('unavailable') }}
--- SMART Health ---
{{ smart_status.stdout | default('unavailable') }}
--- App Status ---
{{ app_status.stdout | default('unavailable') }}
============================================================
# ---------- JSON report ----------
- name: Write TrueNAS health JSON report
ansible.builtin.copy:
content: "{{ report_data | to_nice_json }}"
dest: "{{ report_dir }}/truenas_{{ ansible_date_time.date }}.json"
vars:
report_data:
timestamp: "{{ ansible_date_time.iso8601 }}"
host: "{{ inventory_hostname }}"
truenas_version: "{{ truenas_version.stdout | default('unknown') | trim }}"
uptime: "{{ uptime_pretty.stdout | default('n/a') | trim }}"
zpool_status: "{{ zpool_status.stdout | default('') }}"
zpool_list: "{{ zpool_list.stdout | default('') }}"
pool_errors: "{{ pool_errors.stdout | default('0') | trim }}"
zpool_scrub: "{{ zpool_scrub.stdout | default('') }}"
zfs_datasets: "{{ zfs_datasets.stdout | default('') }}"
disk_list: "{{ disk_list.stdout | default('') }}"
smart_status: "{{ smart_status.stdout | default('') }}"
app_status: "{{ app_status.stdout | default('') }}"
delegate_to: localhost
changed_when: false

View File

@@ -0,0 +1,28 @@
---
- name: Update Debian-based systems
hosts: debian_clients
become: yes
vars:
ansible_become_method: sudo
tasks:
- name: Update package cache
apt:
update_cache: yes
cache_valid_time: 3600
- name: Upgrade all packages
apt:
upgrade: full
autoclean: yes
autoremove: yes
- name: Check for available updates
command: apt list --upgradable
register: apt_updates
changed_when: false
check_mode: no
- name: Show available updates
debug:
var: apt_updates.stdout_lines