#!/usr/bin/env python3
"""
NeuralGate Hoster Client v1.0
─────────────────────────────
Helps you set up and register your GPU server as a NeuralGate hoster.

Usage:
    python3 neuralgate-hoster.py

What it does:
    1. Checks your local server is running
    2. Generates a secure bearer token (if needed)
    3. Tests /health and /v1/chat/completions endpoints
    4. Collects your model info and pricing
    5. Registers you on NeuralGate and runs verification
"""

import sys
import json
import secrets
import urllib.request
import urllib.error
import urllib.parse

GATEWAY = "https://api.computeshare.servequake.com"
COLORS = {
    "green": "\033[92m", "red": "\033[91m", "yellow": "\033[93m",
    "blue": "\033[94m", "purple": "\033[95m", "bold": "\033[1m", "reset": "\033[0m"
}

def c(color, text): return f"{COLORS.get(color,'')}{text}{COLORS['reset']}"
def ok(msg): print(c("green", f"  ✅ {msg}"))
def fail(msg): print(c("red", f"  ❌ {msg}"))
def info(msg): print(c("blue", f"  ℹ  {msg}"))
def warn(msg): print(c("yellow", f"  ⚠  {msg}"))
def header(msg): print(f"\n{c('purple', c('bold', msg))}\n{'─'*50}")

def request(method, url, body=None, headers=None):
    headers = headers or {"Content-Type": "application/json"}
    data = json.dumps(body).encode() if body else None
    req = urllib.request.Request(url, data=data, headers=headers, method=method)
    try:
        with urllib.request.urlopen(req, timeout=15) as resp:
            return resp.status, json.loads(resp.read())
    except urllib.error.HTTPError as e:
        try: detail = json.loads(e.read())
        except: detail = {"detail": str(e)}
        return e.code, detail
    except Exception as e:
        return 0, {"detail": str(e)}

def check_endpoint(endpoint, bearer_token=None):
    """Run the two checks NeuralGate requires."""
    base = endpoint.rstrip("/")
    headers = {"Content-Type": "application/json"}
    if bearer_token:
        headers["Authorization"] = f"Bearer {bearer_token}"

    # Check 1: /health
    try:
        req = urllib.request.Request(f"{base}/health", headers=headers)
        with urllib.request.urlopen(req, timeout=10) as resp:
            health_ok = resp.status == 200
    except Exception as e:
        health_ok = False
        warn(f"Health check failed: {e}")

    if health_ok:
        ok("Health endpoint (/health) responds")
    else:
        fail("Health endpoint failed — make sure your server is running")

    # Check 2: /v1/chat/completions
    inference_ok = False
    model_used = None
    try:
        # First get available models
        try:
            req = urllib.request.Request(f"{base}/v1/models", headers=headers)
            with urllib.request.urlopen(req, timeout=10) as resp:
                models_data = json.loads(resp.read())
                models = models_data.get("data", models_data.get("models", []))
                model_used = models[0].get("id") if models else "default"
        except:
            model_used = "default"

        payload = {
            "model": model_used or "default",
            "messages": [{"role": "user", "content": "Reply with only the text: NEURALGATE_OK"}],
            "max_tokens": 20,
            "temperature": 0.0
        }
        req = urllib.request.Request(
            f"{base}/v1/chat/completions",
            data=json.dumps(payload).encode(),
            headers=headers, method="POST"
        )
        with urllib.request.urlopen(req, timeout=30) as resp:
            data = json.loads(resp.read())
            reply = data.get("choices", [{}])[0].get("message", {}).get("content", "")
            inference_ok = len(reply) > 0
            if inference_ok:
                ok(f"Inference works (model: {model_used}, reply: '{reply[:50]}')")
            else:
                fail("Got empty response from inference endpoint")
    except Exception as e:
        fail(f"Inference check failed: {e}")

    return health_ok, inference_ok, model_used


def get_models_interactive(detected_model=None):
    """Collect model info from the user."""
    models = []
    print(f"\n  {c('bold', 'Add your models')} (at least one required)")
    if detected_model:
        info(f"Detected model: {detected_model}")

    while True:
        print()
        model_id = input(f"  Model ID [{detected_model or 'e.g. llama-3-8b'}]: ").strip() or detected_model
        if not model_id:
            warn("Model ID is required")
            continue

        alias = input(f"  Display name [e.g. Llama 3 8B]: ").strip() or model_id

        print(f"  {c('yellow', 'Pricing')} (microdollars per 1M tokens. 100 = $0.10/1M)")
        try:
            price_in = int(input(f"  Input price [100]: ").strip() or "100")
            price_out = int(input(f"  Output price [300]: ").strip() or "300")
            ctx = int(input(f"  Context window [4096]: ").strip() or "4096")
            max_tok = int(input(f"  Max output tokens [2048]: ").strip() or "2048")
        except ValueError:
            warn("Invalid number, using defaults")
            price_in, price_out, ctx, max_tok = 100, 300, 4096, 2048

        models.append({
            "model_id": model_id,
            "model_alias": alias,
            "price_per_input_token": price_in,
            "price_per_output_token": price_out,
            "context_window": ctx,
            "max_tokens": max_tok,
        })
        ok(f"Added: {alias}")

        another = input("\n  Add another model? [y/N]: ").strip().lower()
        if another != "y":
            break

    return models


def main():
    print(c("purple", c("bold", """
  ⚡ NeuralGate Hoster Setup
  ─────────────────────────
  Earn from your idle GPUs.
""")))

    # ── Step 1: Endpoint URL ──────────────────────────────────────────────────
    header("Step 1: Your Server Endpoint")
    print("  Enter the public URL of your LLM server.")
    info("If local, use a tunnel first: cloudflared tunnel --url http://localhost:8080")
    endpoint = input("\n  Endpoint URL [http://localhost:8080]: ").strip() or "http://localhost:8080"
    endpoint = endpoint.rstrip("/")

    # ── Step 2: Bearer Token ──────────────────────────────────────────────────
    header("Step 2: Bearer Token")
    print("  A bearer token secures your endpoint so only NeuralGate can call it.")
    existing = input("\n  Do you have an existing bearer token? [y/N]: ").strip().lower()
    if existing == "y":
        bearer_token = input("  Enter your token: ").strip()
    else:
        bearer_token = secrets.token_urlsafe(32)
        print(f"\n  {c('green', 'Generated token:')} {c('bold', bearer_token)}")
        warn("Configure your server to require this token, then press Enter.")
        print(f"\n  For llama.cpp, restart with: --api-key {bearer_token}")
        input("\n  Press Enter when ready... ")

    # ── Step 3: Verify ────────────────────────────────────────────────────────
    header("Step 3: Verifying Your Server")
    print(f"  Testing endpoint: {endpoint}\n")
    health_ok, inference_ok, detected_model = check_endpoint(endpoint, bearer_token)

    if not health_ok or not inference_ok:
        warn("Some checks failed. You can still register, but verification may fail.")
        proceed = input("\n  Continue anyway? [y/N]: ").strip().lower()
        if proceed != "y":
            print(c("yellow", "\n  Fix the issues above and run this script again.\n"))
            sys.exit(1)

    # ── Step 4: Model Info ────────────────────────────────────────────────────
    header("Step 4: Your Models & Pricing")
    models = get_models_interactive(detected_model)

    # ── Step 5: Your Info ─────────────────────────────────────────────────────
    header("Step 5: Your Details")
    print(f"  {c('yellow', 'By registering, you agree to the Terms of Service:')}")
    print(f"  {GATEWAY}/terms")
    print(f"  {c('yellow', 'You confirm you OWN the compute and are NOT proxying commercial APIs.')}")
    agreed = input(f"
  I agree to the Terms of Service [y/N]: ").strip().lower()
    if agreed != "y":
        print(c("yellow", "\n  Registration cancelled.\n"))
        sys.exit(0)

    name = input("  Your name or organization: ").strip()
    email = input("  Email address: ").strip()
    payout = input("  Payout address (PayPal, bank, crypto) [optional]: ").strip() or None

    # ── Step 6: Register ──────────────────────────────────────────────────────
    header("Step 6: Registering on NeuralGate")
    payload = {
        "name": name,
        "email": email,
        "endpoint_url": endpoint,
        "api_key": bearer_token,
        "payout_address": payout,
        "models": models,
    }

    print(f"\n  Registering at {GATEWAY}...\n")
    status, data = request("POST", f"{GATEWAY}/hosters/register", body=payload)

    if status != 200:
        fail(f"Registration failed: {data.get('detail', data)}")
        sys.exit(1)

    hoster_id = data["hoster_id"]
    ok(f"Registered! Hoster ID: {hoster_id}")

    # ── Step 7: NeuralGate Verification ──────────────────────────────────────
    header("Step 7: NeuralGate Verification")
    print("  Running NeuralGate's official verification checks...\n")
    status, result = request("POST", f"{GATEWAY}/hosters/{hoster_id}/verify")

    checks = result.get("checks", {})
    if checks.get("health", {}).get("passed"):
        ok("NeuralGate health check passed")
    else:
        fail(f"NeuralGate health check failed: {checks.get('health', {}).get('detail', '')}")

    if checks.get("inference", {}).get("passed"):
        ok(f"NeuralGate inference check passed")
    else:
        fail(f"NeuralGate inference check failed: {checks.get('inference', {}).get('detail', '')}")

    verified = result.get("verified", False)

    # ── Summary ───────────────────────────────────────────────────────────────
    print(f"\n{'═'*50}")
    if verified:
        print(c("green", c("bold", "\n  🎉 You're live! Your server is now receiving traffic.\n")))
        print(f"  Hoster ID:    {hoster_id}")
        print(f"  Endpoint:     {endpoint}")
        print(f"  Models:       {', '.join(m['model_id'] for m in models)}")
        print(f"\n  Dashboard:    {GATEWAY}/dashboard")
        print(f"  Docs:         {GATEWAY}/hosters/docs\n")
    else:
        print(c("yellow", c("bold", "\n  ⚠  Registered but verification failed.\n")))
        print(f"  Hoster ID: {hoster_id}")
        print(f"  Fix the issues above and retry verification:")
        print(f"  curl -X POST {GATEWAY}/hosters/{hoster_id}/verify\n")
        print(f"  Or visit: {GATEWAY}/hosters/docs for troubleshooting.\n")

if __name__ == "__main__":
    main()
