Sanitized mirror from private repository - 2026-04-16 07:19:56 UTC
This commit is contained in:
158
scripts/ssh-planner.py
Normal file
158
scripts/ssh-planner.py
Normal file
@@ -0,0 +1,158 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Interactive tool: plain English -> SSH commands via Ollama LLM.
|
||||
|
||||
Usage:
|
||||
python3 scripts/ssh-planner.py "restart the media stack on atlantis"
|
||||
python3 scripts/ssh-planner.py --execute "check disk usage on all NAS boxes"
|
||||
python3 scripts/ssh-planner.py --dry-run "update packages on all debian hosts"
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent))
|
||||
from lib.ollama import ollama_generate, ollama_available, OllamaUnavailableError
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
HOST_ROLES = {
|
||||
"atlantis": "Primary NAS (Synology DS1823xs+), media stack, arr suite, Docker host",
|
||||
"calypso": "Secondary NAS (Synology DS920+), AdGuard DNS, Headscale, Authentik SSO",
|
||||
"nuc": "Intel NUC, lightweight services, concord",
|
||||
"homelab-vm": "Main VM (192.168.0.210), Prometheus, Grafana, monitoring stack",
|
||||
"rpi5": "Raspberry Pi 5, Uptime Kuma monitoring",
|
||||
"olares": "Olares k3s host (192.168.0.145), RTX 5090, Jellyfin, Ollama",
|
||||
"guava": "TrueNAS SCALE, additional storage and compute",
|
||||
"seattle": "Remote VM, lightweight services",
|
||||
"setillo": "GL.iNet router, network edge",
|
||||
"matrix-ubuntu": "Matrix/Synapse, NPM, CrowdSec (192.168.0.154)",
|
||||
}
|
||||
|
||||
|
||||
def build_context() -> str:
|
||||
lines = []
|
||||
for host, role in HOST_ROLES.items():
|
||||
lines.append(f" - {host}: {role}")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def generate_plan(request: str) -> str:
|
||||
context = build_context()
|
||||
prompt = (
|
||||
f"Given these homelab hosts and their roles:\n{context}\n\n"
|
||||
f"Generate SSH commands to accomplish: {request}\n\n"
|
||||
"Reply as a numbered list. Each line: HOST: COMMAND — DESCRIPTION\n"
|
||||
"For multi-step tasks, order the steps correctly.\n"
|
||||
"Use the host names exactly as listed above for SSH targets.\n"
|
||||
"Do not wrap in code blocks."
|
||||
)
|
||||
return ollama_generate(prompt, num_predict=1500)
|
||||
|
||||
|
||||
def parse_plan_lines(plan_text: str) -> list[dict]:
|
||||
"""Best-effort parse of 'N. HOST: COMMAND — DESCRIPTION' lines."""
|
||||
import re
|
||||
|
||||
steps = []
|
||||
for line in plan_text.splitlines():
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
# Try: N. HOST: command — description or N. HOST: command - description
|
||||
m = re.match(r"^\d+\.\s*(\S+):\s*(.+?)(?:\s[—–-]\s(.+))?$", line)
|
||||
if m:
|
||||
host = m.group(1).strip()
|
||||
command = m.group(2).strip()
|
||||
description = m.group(3).strip() if m.group(3) else ""
|
||||
steps.append({"host": host, "command": command, "description": description})
|
||||
return steps
|
||||
|
||||
|
||||
def display_plan(plan_text: str, steps: list[dict]) -> None:
|
||||
print("\n" + "=" * 60)
|
||||
print("SSH EXECUTION PLAN")
|
||||
print("=" * 60)
|
||||
|
||||
if steps:
|
||||
for i, step in enumerate(steps, 1):
|
||||
host = step["host"]
|
||||
cmd = step["command"]
|
||||
desc = step["description"]
|
||||
print(f"\n {i}. [{host}]")
|
||||
print(f" $ ssh {host} {cmd!r}")
|
||||
if desc:
|
||||
print(f" -> {desc}")
|
||||
else:
|
||||
# Couldn't parse structured steps — show raw
|
||||
print(plan_text)
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
|
||||
|
||||
def execute_steps(steps: list[dict]) -> None:
|
||||
for i, step in enumerate(steps, 1):
|
||||
host = step["host"]
|
||||
cmd = step["command"]
|
||||
print(f"\n[{i}/{len(steps)}] Executing on {host}: {cmd}")
|
||||
result = subprocess.run(
|
||||
["ssh", host, cmd],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=60,
|
||||
)
|
||||
if result.stdout.strip():
|
||||
print(result.stdout.strip())
|
||||
if result.stderr.strip():
|
||||
print(f" STDERR: {result.stderr.strip()}")
|
||||
if result.returncode != 0:
|
||||
print(f" WARNING: exit code {result.returncode}")
|
||||
answer = input(" Continue? [Y/n] ").strip().lower()
|
||||
if answer == "n":
|
||||
print("Aborted.")
|
||||
return
|
||||
print("\nAll steps completed.")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(description="Plain English -> SSH commands via LLM")
|
||||
parser.add_argument("request", help="Plain English description of what to do")
|
||||
parser.add_argument("--execute", action="store_true", help="Prompt to execute the generated commands")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Show plan only (default behavior without --execute)")
|
||||
parser.add_argument("--verbose", "-v", action="store_true", help="Enable debug logging")
|
||||
args = parser.parse_args()
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG if args.verbose else logging.INFO,
|
||||
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
|
||||
)
|
||||
|
||||
if not ollama_available():
|
||||
log.error("Ollama is not reachable — aborting")
|
||||
sys.exit(1)
|
||||
|
||||
log.info("Generating SSH plan for: %s", args.request)
|
||||
plan_text = generate_plan(args.request)
|
||||
steps = parse_plan_lines(plan_text)
|
||||
|
||||
display_plan(plan_text, steps)
|
||||
|
||||
if args.dry_run or not args.execute:
|
||||
return
|
||||
|
||||
if not steps:
|
||||
print("Could not parse structured steps from LLM output. Cannot execute.")
|
||||
return
|
||||
|
||||
answer = input("\nExecute these commands? [y/N] ").strip().lower()
|
||||
if answer != "y":
|
||||
print("Aborted.")
|
||||
return
|
||||
|
||||
execute_steps(steps)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user