Sanitized mirror from private repository - 2026-04-05 09:32:56 UTC
This commit is contained in:
241
dashboard/api/routers/overview.py
Normal file
241
dashboard/api/routers/overview.py
Normal file
@@ -0,0 +1,241 @@
|
||||
"""Overview stats and SSE activity stream."""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import subprocess
|
||||
import sqlite3
|
||||
from datetime import date
|
||||
from fastapi import APIRouter
|
||||
from sse_starlette.sse import EventSourceResponse
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from lib_bridge import (
|
||||
portainer_list_containers, ENDPOINTS, ollama_available,
|
||||
GMAIL_DB, DVISH_DB, PROTON_DB, RESTART_DB, LOG_DIR, OLLAMA_URL,
|
||||
)
|
||||
from log_parser import get_recent_events, tail_logs, get_new_lines
|
||||
|
||||
router = APIRouter(tags=["overview"])
|
||||
|
||||
|
||||
def _count_today_emails(db_path: Path) -> int:
|
||||
"""Count emails processed today from a processed.db file."""
|
||||
if not db_path.exists():
|
||||
return 0
|
||||
try:
|
||||
today = date.today().isoformat()
|
||||
conn = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True)
|
||||
cur = conn.execute(
|
||||
"SELECT COUNT(*) FROM processed WHERE processed_at LIKE ?",
|
||||
(f"{today}%",),
|
||||
)
|
||||
count = cur.fetchone()[0]
|
||||
conn.close()
|
||||
return count
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
|
||||
def _count_unhealthy(db_path: Path) -> int:
|
||||
"""Count unhealthy containers from stack-restart.db."""
|
||||
if not db_path.exists():
|
||||
return 0
|
||||
try:
|
||||
conn = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True)
|
||||
cur = conn.execute("SELECT COUNT(*) FROM unhealthy_tracking")
|
||||
count = cur.fetchone()[0]
|
||||
conn.close()
|
||||
return count
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
|
||||
def _gpu_info() -> dict:
|
||||
"""Get GPU info from olares via SSH."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["ssh", "-o", "ConnectTimeout=3", "olares",
|
||||
"nvidia-smi --query-gpu=temperature.gpu,power.draw,power.limit,"
|
||||
"memory.used,memory.total,utilization.gpu --format=csv,noheader,nounits"],
|
||||
capture_output=True, text=True, timeout=10,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
return {"available": False}
|
||||
parts = [p.strip() for p in result.stdout.strip().split(",")]
|
||||
|
||||
def _f(v):
|
||||
try:
|
||||
return float(v)
|
||||
except (ValueError, TypeError):
|
||||
return None
|
||||
|
||||
if len(parts) >= 6:
|
||||
return {
|
||||
"available": True,
|
||||
"temp_c": _f(parts[0]),
|
||||
"power_draw_w": _f(parts[1]),
|
||||
"power_limit_w": _f(parts[2]),
|
||||
"memory_used_mb": _f(parts[3]),
|
||||
"memory_total_mb": _f(parts[4]),
|
||||
"utilization_pct": _f(parts[5]),
|
||||
}
|
||||
except Exception:
|
||||
pass
|
||||
return {"available": False}
|
||||
|
||||
|
||||
@router.get("/stats/overview")
|
||||
def stats_overview():
|
||||
"""Aggregate overview stats."""
|
||||
# Container counts
|
||||
container_counts = {}
|
||||
total = 0
|
||||
for ep_name in ENDPOINTS:
|
||||
try:
|
||||
containers = portainer_list_containers(ep_name)
|
||||
running = sum(1 for c in containers if c.get("State") == "running")
|
||||
container_counts[ep_name] = {"total": len(containers), "running": running}
|
||||
total += len(containers)
|
||||
except Exception:
|
||||
container_counts[ep_name] = {"total": 0, "running": 0, "error": True}
|
||||
|
||||
# GPU
|
||||
gpu = _gpu_info()
|
||||
|
||||
# Email counts
|
||||
email_today = {
|
||||
"gmail": _count_today_emails(GMAIL_DB),
|
||||
"dvish": _count_today_emails(DVISH_DB),
|
||||
"proton": _count_today_emails(PROTON_DB),
|
||||
}
|
||||
email_today["total"] = sum(email_today.values())
|
||||
|
||||
# Unhealthy
|
||||
unhealthy = _count_unhealthy(RESTART_DB)
|
||||
|
||||
# Ollama
|
||||
ollama_up = ollama_available(OLLAMA_URL)
|
||||
|
||||
return {
|
||||
"containers": {"total": total, "by_endpoint": container_counts},
|
||||
"gpu": gpu,
|
||||
"email_today": email_today,
|
||||
"unhealthy_count": unhealthy,
|
||||
"ollama_available": ollama_up,
|
||||
}
|
||||
|
||||
|
||||
@router.get("/activity")
|
||||
async def activity_stream():
|
||||
"""SSE stream of today's automation events."""
|
||||
|
||||
async def event_generator():
|
||||
# Send initial batch
|
||||
events = get_recent_events(LOG_DIR)
|
||||
yield {"event": "init", "data": json.dumps(events)}
|
||||
|
||||
# Poll for new events
|
||||
positions = tail_logs(LOG_DIR)
|
||||
while True:
|
||||
await asyncio.sleep(5)
|
||||
new_events, positions = get_new_lines(LOG_DIR, positions)
|
||||
if new_events:
|
||||
yield {"event": "update", "data": json.dumps(new_events)}
|
||||
|
||||
return EventSourceResponse(event_generator())
|
||||
|
||||
|
||||
@router.post("/actions/pause-organizers")
|
||||
def pause_organizers():
|
||||
"""Pause all email organizer cron jobs."""
|
||||
result = subprocess.run(
|
||||
["/home/homelab/organized/repos/homelab/scripts/gmail-organizer-ctl.sh", "stop"],
|
||||
capture_output=True, text=True, timeout=10,
|
||||
)
|
||||
return {"success": result.returncode == 0, "output": result.stdout.strip()}
|
||||
|
||||
|
||||
@router.post("/actions/resume-organizers")
|
||||
def resume_organizers():
|
||||
"""Resume all email organizer cron jobs."""
|
||||
result = subprocess.run(
|
||||
["/home/homelab/organized/repos/homelab/scripts/gmail-organizer-ctl.sh", "start"],
|
||||
capture_output=True, text=True, timeout=10,
|
||||
)
|
||||
return {"success": result.returncode == 0, "output": result.stdout.strip()}
|
||||
|
||||
|
||||
@router.get("/actions/organizer-status")
|
||||
def organizer_status():
|
||||
"""Check if organizers are running or paused."""
|
||||
result = subprocess.run(
|
||||
["/home/homelab/organized/repos/homelab/scripts/gmail-organizer-ctl.sh", "status"],
|
||||
capture_output=True, text=True, timeout=10,
|
||||
)
|
||||
return {"output": result.stdout.strip()}
|
||||
|
||||
|
||||
@router.post("/chat")
|
||||
def chat_with_ollama(body: dict):
|
||||
"""Chat with Ollama using live homelab context."""
|
||||
message = body.get("message", "")
|
||||
if not message:
|
||||
return {"error": "No message provided"}
|
||||
|
||||
# Gather live context
|
||||
context_parts = []
|
||||
try:
|
||||
overview = get_overview()
|
||||
containers = overview.get("containers", {})
|
||||
gpu = overview.get("gpu", {})
|
||||
context_parts.append(
|
||||
f"Containers: {containers.get('total', '?')} total across endpoints: "
|
||||
+ ", ".join(f"{k} ({v.get('total','?')} containers, {v.get('running','?')} running)"
|
||||
for k, v in containers.get("by_endpoint", {}).items())
|
||||
)
|
||||
if gpu.get("available"):
|
||||
context_parts.append(
|
||||
f"GPU: {gpu.get('name','RTX 5090')}, {gpu.get('temp_c','?')}°C, "
|
||||
f"{gpu.get('memory_used_mb','?')}/{gpu.get('memory_total_mb','?')} MB VRAM, "
|
||||
f"{gpu.get('utilization_pct','?')}% util"
|
||||
)
|
||||
email_data = overview.get("email_today", {})
|
||||
if isinstance(email_data, dict):
|
||||
context_parts.append(f"Emails today: {email_data.get('total', 0)} (dvish: {email_data.get('dvish', 0)}, proton: {email_data.get('proton', 0)})")
|
||||
context_parts.append(f"Ollama: {'online' if overview.get('ollama_available') else 'offline'}")
|
||||
context_parts.append(f"Unhealthy containers: {overview.get('unhealthy_count', 0)}")
|
||||
except Exception:
|
||||
context_parts.append("(could not fetch live stats)")
|
||||
|
||||
system_context = (
|
||||
"You are a homelab assistant. You have direct access to the following live infrastructure data:\n\n"
|
||||
+ "\n".join(f"- {p}" for p in context_parts)
|
||||
+ "\n\n"
|
||||
"Homelab hosts: Atlantis (Synology NAS, media/arr stack), Calypso (Synology, AdGuard DNS, Headscale, Authentik SSO), "
|
||||
"Olares (K3s, RTX 5090, Jellyfin, Ollama), NUC (lightweight services), RPi5 (Uptime Kuma), "
|
||||
"homelab-vm (Prometheus, Grafana, dashboard), Guava (TrueNAS), Seattle (remote VM), matrix-ubuntu (NPM, CrowdSec).\n\n"
|
||||
"Services: Sonarr, Radarr, SABnzbd, Deluge, Prowlarr, Bazarr, Lidarr, Tdarr, Audiobookshelf, LazyLibrarian on Atlantis. "
|
||||
"Jellyfin + Ollama on Olares with GPU transcoding. 3 email auto-organizers (Gmail x2 + Proton). "
|
||||
"11 Ollama-powered automation scripts. Gitea CI with AI PR reviewer.\n\n"
|
||||
"IMPORTANT: Answer using the LIVE DATA above, not general knowledge. The container counts are REAL numbers from Portainer right now. "
|
||||
"When asked 'how many containers on atlantis' answer with the exact number from the live data (e.g. 59). Be concise."
|
||||
)
|
||||
|
||||
prompt = f"{system_context}\n\nUser: {message}\nAssistant:"
|
||||
|
||||
try:
|
||||
from lib_bridge import ollama_available as _ollama_check
|
||||
if not _ollama_check():
|
||||
return {"response": "Ollama is currently offline. Try again later."}
|
||||
import sys as _sys
|
||||
scripts_dir = str(Path("/app/scripts") if Path("/app/scripts").exists() else Path(__file__).parent.parent.parent / "scripts")
|
||||
if scripts_dir not in _sys.path:
|
||||
_sys.path.insert(0, scripts_dir)
|
||||
from lib.ollama import ollama_generate
|
||||
response = ollama_generate(prompt, num_predict=800, timeout=90)
|
||||
return {"response": response}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
Reference in New Issue
Block a user