# Perplexica - AI-powered search engine # Port: 4785 # Configure LLM providers via web UI at http://192.168.0.210:4785/settings # # Configured to use Seattle Ollama instance (100.82.197.124:11434) via Tailscale # This distributes LLM inference load to the Contabo VPS with CPU-only inference services: perplexica: image: itzcrazykns1337/perplexica:latest container_name: perplexica ports: - "4785:3000" environment: - OLLAMA_BASE_URL=http://100.82.197.124:11434 volumes: - perplexica-data:/home/perplexica/data restart: unless-stopped volumes: perplexica-data: