#!/usr/bin/env python3
"""EmpirioLabs integration setup helper.

This script writes local config snippets for OpenAI-compatible coding tools.
It does not install packages or run agent CLIs.
"""

from __future__ import annotations

import argparse
import getpass
import json
import os
import platform
import shutil
import sys
import textwrap
import time
import urllib.error
import urllib.request
from pathlib import Path

API_BASE = "https://api.empiriolabs.ai"
OPENAI_BASE = f"{API_BASE}/v1"
ANTHROPIC_BASE = API_BASE
DEFAULT_MODEL = "qwen3-max"
MARKER_START = "BEGIN EMPIRIOLABS"
MARKER_END = "END EMPIRIOLABS"
OPENCODE_SECRET_FILE = ".empiriolabs-api-key"
OPENHANDS_CONFIG_FILE = "openhands.empiriolabs.toml"

# Categories that are valid chat targets for coding agents. These become
# the populated set in tool configs that support multi-model dropdowns
# (OpenCode, Continue, Qwen Code, goose). Image / video / audio /
# embedding / rerank / transcription / search / research models are not
# useful as the chat model in a coding agent and would clutter the
# dropdown.
CHAT_MODEL_CATEGORIES = {"text", "multimodal", "code", "reasoning"}
PROJECT_TOOLS = {"opencode", "aider", "qwen-code", "openhands"}
USER_TOOLS = {"continue", "claude-code", "codex", "qwen-code", "hermes", "goose", "openclaw"}
ALL_TOOLS = PROJECT_TOOLS | USER_TOOLS
TOOL_SCOPES = {
    "opencode": "project",
    "aider": "project",
    "openhands": "project",
    "qwen-code": "project or user",
    "continue": "user",
    "claude-code": "user",
    "codex": "user",
    "hermes": "user",
    "goose": "user",
    "openclaw": "user",
}
REASONING_FEATURES = {"reasoning", "thinking", "deep_thinking"}
REASONING_PARAMETERS = {
    "enable_thinking",
    "reasoning",
    "reasoning_effort",
    "thinking",
    "thinking_budget",
    "thinking_tokens",
}


def info(message: str) -> None:
    print(f"[empiriolabs] {message}")


def backup(path: Path) -> None:
    if path.exists():
        stamp = time.strftime("%Y%m%d-%H%M%S")
        dst = path.with_name(f"{path.name}.bak.{stamp}")
        counter = 2
        while dst.exists():
            dst = path.with_name(f"{path.name}.bak.{stamp}.{counter}")
            counter += 1
        shutil.copy2(path, dst)
        info(f"backed up {path} -> {dst}")


def upsert_marked_block(path: Path, block: str, comment: str = "#") -> None:
    path.parent.mkdir(parents=True, exist_ok=True)
    start = f"{comment} {MARKER_START}"
    end = f"{comment} {MARKER_END}"
    new_block = f"{start}\n{block.rstrip()}\n{end}\n"

    if path.exists():
        text = path.read_text(encoding="utf-8")
        if start in text and end in text:
            before, rest = text.split(start, 1)
            _, after = rest.split(end, 1)
            path.write_text(before.rstrip() + "\n\n" + new_block + after.lstrip(), encoding="utf-8")
            info(f"updated marked block in {path}")
            return
        backup(path)
        sep = "\n" if text.endswith("\n") else "\n\n"
        path.write_text(text + sep + new_block, encoding="utf-8")
        info(f"appended marked block to {path}")
        return

    path.write_text(new_block, encoding="utf-8")
    info(f"created {path}")


def write_text(path: Path, text: str, overwrite: bool = True) -> None:
    path.parent.mkdir(parents=True, exist_ok=True)
    if path.exists() and not overwrite:
        info(f"left existing {path} unchanged")
        return
    if path.exists():
        backup(path)
    path.write_text(text.rstrip() + "\n", encoding="utf-8")
    info(f"wrote {path}")


def write_secret_file(path: Path, value: str) -> None:
    path.parent.mkdir(parents=True, exist_ok=True)
    if path.exists():
        backup(path)
    path.write_text(value.strip(), encoding="utf-8")
    try:
        path.chmod(0o600)
    except OSError:
        pass
    info(f"wrote {path}")


def load_json(path: Path) -> dict | None:
    if not path.exists():
        return {}
    try:
        return json.loads(path.read_text(encoding="utf-8"))
    except json.JSONDecodeError:
        info(f"{path} is not strict JSON; writing sidecar instead")
        return None


def toml_string(value: str) -> str:
    return json.dumps(value)


def string_set(value) -> set[str]:
    if isinstance(value, str):
        return {value.lower()}
    if isinstance(value, dict):
        return {str(k).lower() for k, v in value.items() if v}
    if isinstance(value, list):
        out: set[str] = set()
        for item in value:
            if isinstance(item, str):
                out.add(item.lower())
            elif isinstance(item, dict):
                name = item.get("name") or item.get("id") or item.get("key")
                if name:
                    out.add(str(name).lower())
        return out
    return set()


def parameter_names(model: dict) -> set[str]:
    names: set[str] = set()
    for key in ("parameters", "supported_parameters", "accepted_parameters"):
        value = model.get(key)
        if isinstance(value, dict):
            names.update(str(name).lower() for name in value.keys())
        else:
            names.update(string_set(value))
    return names


def supports_reasoning(model: dict) -> bool:
    category = (model.get("category") or model.get("type") or "").lower()
    if category == "reasoning":
        return True
    features = string_set(model.get("features")) | string_set(model.get("capabilities"))
    if features & REASONING_FEATURES:
        return True
    if parameter_names(model) & REASONING_PARAMETERS:
        return True
    label = " ".join(
        str(model.get(key) or "")
        for key in ("id", "slug", "display_name", "name")
    ).lower()
    return "thinking" in label or "reasoning" in label


def open_code_model_entry(model: dict) -> dict:
    entry = {
        "name": f"EmpirioLabs {model['display_name']}",
        "context": model["context_window"],
    }
    if model.get("reasoning"):
        entry["reasoning"] = True
    return entry


def write_opencode(project: Path, model: str, chat: list[dict] | None = None) -> None:
    """Populate opencode.json with every chat-capable model from the live
    catalog when available, falling back to just the user-selected model
    when the catalog fetch failed. OpenCode's model picker (`/models`)
    reads this map, so populating multiple gives the user instant access
    to every chat model without editing config later."""
    if chat:
        models_map = {
            m["id"]: open_code_model_entry(m)
            for m in chat
        }
    else:
        fallback = {
            "id": model,
            "display_name": model,
            "context_window": 256000,
            "reasoning": supports_reasoning({"id": model}),
        }
        models_map = {model: open_code_model_entry(fallback)}

    provider = {
        "npm": "@ai-sdk/openai-compatible",
        "name": "EmpirioLabs",
        "options": {
            "baseURL": OPENAI_BASE,
            "apiKey": f"{{file:{OPENCODE_SECRET_FILE}}}",
        },
        "models": models_map,
    }

    path = project / "opencode.json"
    data = load_json(path)
    if data is None:
        write_text(project / "opencode.empiriolabs.json", json.dumps({
            "$schema": "https://opencode.ai/config.json",
            "provider": {"empiriolabs": provider},
        }, indent=2), overwrite=True)
        return

    data.setdefault("$schema", "https://opencode.ai/config.json")
    data.setdefault("provider", {})
    data["provider"]["empiriolabs"] = provider
    if path.exists():
        backup(path)
    path.write_text(json.dumps(data, indent=2) + "\n", encoding="utf-8")
    info(f"configured OpenCode provider in {path} ({len(models_map)} model(s))")


def write_qwen_settings(path: Path, model: str, api_key: str, chat: list[dict] | None = None) -> None:
    """Qwen Code's settings.json supports a list of OpenAI-compatible
    providers; we register one entry per chat model so the model picker
    shows every option. Existing non-EmpirioLabs entries are preserved;
    existing EmpirioLabs entries are replaced wholesale."""
    if chat:
        new_providers = [
            {
                "id": m["id"],
                "name": f"EmpirioLabs {m['display_name']}",
                "envKey": "EMPIRIOLABS_API_KEY",
                "baseUrl": OPENAI_BASE,
                "generationConfig": {"timeout": 120000, "maxRetries": 3},
            }
            for m in chat
        ]
    else:
        new_providers = [
            {
                "id": model,
                "name": f"EmpirioLabs {model}",
                "envKey": "EMPIRIOLABS_API_KEY",
                "baseUrl": OPENAI_BASE,
                "generationConfig": {"timeout": 120000, "maxRetries": 3},
            }
        ]

    empirio_ids = {p["id"] for p in new_providers}

    data = load_json(path)
    if data is None:
        write_text(
            path.with_name("settings.empiriolabs.json"),
            json.dumps(
                {
                    "model": {"name": model},
                    "security": {"auth": {"selectedType": "openai"}},
                    "env": {"EMPIRIOLABS_API_KEY": api_key},
                    "modelProviders": {"openai": new_providers},
                },
                indent=2,
            ),
            overwrite=True,
        )
        return

    if not isinstance(data.get("model"), dict):
        data["model"] = {}
    data["model"]["name"] = model
    if not isinstance(data.get("security"), dict):
        data["security"] = {}
    if not isinstance(data["security"].get("auth"), dict):
        data["security"]["auth"] = {}
    data["security"]["auth"]["selectedType"] = "openai"
    if not isinstance(data.get("env"), dict):
        data["env"] = {}
    data["env"]["EMPIRIOLABS_API_KEY"] = api_key
    if not isinstance(data.get("modelProviders"), dict):
        data["modelProviders"] = {}
    data["modelProviders"].setdefault("openai", [])
    existing = data["modelProviders"]["openai"]
    if not isinstance(existing, list):
        data["modelProviders"]["openai"] = list(new_providers)
    else:
        # Drop only the EmpirioLabs entries we own; leave any user-added
        # third-party providers in place.
        data["modelProviders"]["openai"] = [
            p for p in existing
            if not (isinstance(p, dict) and p.get("id") in empirio_ids)
        ] + new_providers

    if path.exists():
        backup(path)
    path.parent.mkdir(parents=True, exist_ok=True)
    path.write_text(json.dumps(data, indent=2) + "\n", encoding="utf-8")
    info(f"configured Qwen Code provider and persisted env in {path} ({len(new_providers)} model(s))")


def write_qwen_project(project: Path, api_key: str, model: str, chat: list[dict] | None = None) -> None:
    write_qwen_settings(project / ".qwen" / "settings.json", model, api_key, chat)


def write_aider(project: Path, model: str) -> None:
    text = f"""
model: openai/{model}
openai-api-base: {OPENAI_BASE}
env-file: .env
"""
    write_text(project / ".aider.empiriolabs.yml", text, overwrite=True)


def write_openhands_project(project: Path, api_key: str, model: str) -> None:
    text = f"""
[llm]
model = {toml_string(f"openai/{model}")}
api_key = {toml_string(api_key)}
base_url = {toml_string(OPENAI_BASE)}
"""
    write_text(project / OPENHANDS_CONFIG_FILE, text, overwrite=True)


def write_env_files(project: Path, api_key: str, model: str) -> None:
    env_block = f"""
EMPIRIOLABS_API_KEY={api_key}
OPENAI_API_KEY={api_key}
OPENAI_API_BASE={OPENAI_BASE}
OPENAI_BASE_URL={OPENAI_BASE}
OPENAI_MODEL={model}
ANTHROPIC_AUTH_TOKEN={api_key}
ANTHROPIC_BASE_URL={ANTHROPIC_BASE}
ANTHROPIC_MODEL={model}
ANTHROPIC_CUSTOM_MODEL_OPTION={model}
ANTHROPIC_CUSTOM_MODEL_OPTION_NAME=EmpirioLabs {model}
LLM_API_KEY={api_key}
LLM_BASE_URL={OPENAI_BASE}
LLM_MODEL=openai/{model}
"""
    upsert_marked_block(project / ".env", env_block)
    write_secret_file(project / OPENCODE_SECRET_FILE, api_key)

    sh = f"""
export EMPIRIOLABS_API_KEY="{api_key}"
export OPENAI_API_KEY="$EMPIRIOLABS_API_KEY"
export OPENAI_API_BASE="{OPENAI_BASE}"
export OPENAI_BASE_URL="{OPENAI_BASE}"
export OPENAI_MODEL="{model}"
export ANTHROPIC_AUTH_TOKEN="$EMPIRIOLABS_API_KEY"
export ANTHROPIC_BASE_URL="{ANTHROPIC_BASE}"
export ANTHROPIC_MODEL="{model}"
export ANTHROPIC_CUSTOM_MODEL_OPTION="{model}"
export ANTHROPIC_CUSTOM_MODEL_OPTION_NAME="EmpirioLabs {model}"
export LLM_API_KEY="$EMPIRIOLABS_API_KEY"
export LLM_BASE_URL="{OPENAI_BASE}"
export LLM_MODEL="openai/{model}"
"""
    write_text(project / "empirio-env.sh", sh, overwrite=True)

    ps1 = f"""
$env:EMPIRIOLABS_API_KEY = "{api_key}"
$env:OPENAI_API_KEY = $env:EMPIRIOLABS_API_KEY
$env:OPENAI_API_BASE = "{OPENAI_BASE}"
$env:OPENAI_BASE_URL = "{OPENAI_BASE}"
$env:OPENAI_MODEL = "{model}"
$env:ANTHROPIC_AUTH_TOKEN = $env:EMPIRIOLABS_API_KEY
$env:ANTHROPIC_BASE_URL = "{ANTHROPIC_BASE}"
$env:ANTHROPIC_MODEL = "{model}"
$env:ANTHROPIC_CUSTOM_MODEL_OPTION = "{model}"
$env:ANTHROPIC_CUSTOM_MODEL_OPTION_NAME = "EmpirioLabs {model}"
$env:LLM_API_KEY = $env:EMPIRIOLABS_API_KEY
$env:LLM_BASE_URL = "{OPENAI_BASE}"
$env:LLM_MODEL = "openai/{model}"
"""
    write_text(project / "empirio-env.ps1", ps1, overwrite=True)


def update_gitignore(project: Path) -> None:
    block = """
.env
.env.*
.empiriolabs-api-key
.qwen/settings.json
.qwen/settings.empiriolabs.json
openhands.empiriolabs.toml
empirio-env.sh
empirio-env.ps1
!.env.example
"""
    upsert_marked_block(project / ".gitignore", block)


def write_continue_home(home: Path, api_key: str, model: str, chat: list[dict] | None = None) -> None:
    """Continue's config.yaml takes a `models:` array; populate one entry
    per chat-capable model so the agent picker is fully populated."""
    cont = home / ".continue"
    upsert_marked_block(cont / ".env", f"EMPIRIOLABS_API_KEY={api_key}")
    models_to_write = chat if chat else [{"id": model, "display_name": model, "context_window": 256000}]
    model_blocks = []
    for m in models_to_write:
        model_blocks.append(
            f"  - name: EmpirioLabs {m['display_name']}\n"
            f"    provider: openai\n"
            f"    model: {m['id']}\n"
            f"    apiBase: {OPENAI_BASE}\n"
            f"    apiKey: ${{{{ secrets.EMPIRIOLABS_API_KEY }}}}\n"
            f"    capabilities:\n"
            f"      - tool_use\n"
        )
    config = (
        "name: EmpirioLabs\n"
        "version: 0.0.1\n"
        "schema: v1\n"
        "\n"
        "models:\n" + "".join(model_blocks)
    )
    target = cont / "config.yaml"
    if target.exists():
        write_text(cont / "empiriolabs.config.yaml", config, overwrite=True)
        info(f"Continue already has config.yaml; wrote ~/.continue/empiriolabs.config.yaml ({len(models_to_write)} models), review and merge")
    else:
        write_text(target, config, overwrite=False)
        info(f"wrote Continue config with {len(models_to_write)} model(s)")


def write_claude_home(home: Path, api_key: str, model: str) -> None:
    settings = home / ".claude" / "settings.json"
    env = {
        "ANTHROPIC_AUTH_TOKEN": api_key,
        "ANTHROPIC_BASE_URL": ANTHROPIC_BASE,
        "ANTHROPIC_CUSTOM_MODEL_OPTION": model,
        "ANTHROPIC_CUSTOM_MODEL_OPTION_NAME": f"EmpirioLabs {model}",
        "ANTHROPIC_MODEL": model,
    }
    data = {}
    if settings.exists():
        try:
            data = json.loads(settings.read_text(encoding="utf-8"))
            backup(settings)
        except json.JSONDecodeError:
            write_text(settings.parent / "settings.empiriolabs.json", json.dumps({"env": env}, indent=2), overwrite=True)
            info("Claude settings.json was not JSON; wrote settings.empiriolabs.json instead")
            return
    data.setdefault("env", {})
    data["env"].update(env)
    settings.parent.mkdir(parents=True, exist_ok=True)
    settings.write_text(json.dumps(data, indent=2) + "\n", encoding="utf-8")
    info(f"configured Claude Code env in {settings}")


def write_codex_home(home: Path, model: str) -> None:
    block = f"""
model = "{model}"
model_provider = "empiriolabs"

[model_providers.empiriolabs]
name = "EmpirioLabs"
base_url = "{OPENAI_BASE}"
env_key = "EMPIRIOLABS_API_KEY"
wire_api = "responses"
"""
    upsert_marked_block(home / ".codex" / "config.toml", block)


def write_qwen_home(home: Path, api_key: str, model: str, chat: list[dict] | None = None) -> None:
    write_qwen_settings(home / ".qwen" / "settings.json", model, api_key, chat)


def write_hermes_home(home: Path, api_key: str, model: str) -> None:
    hermes = home / ".hermes"
    upsert_marked_block(hermes / ".env", f"EMPIRIOLABS_API_KEY={api_key}")
    config = f"""
custom_providers:
  - name: empiriolabs
    base_url: {OPENAI_BASE}
    key_env: EMPIRIOLABS_API_KEY
    api_mode: chat_completions

model:
  provider: custom:empiriolabs
  default: {model}
"""
    write_text(hermes / "empiriolabs.config.yaml", config, overwrite=True)
    info("Hermes sidecar written; merge ~/.hermes/empiriolabs.config.yaml into ~/.hermes/config.yaml")


def goose_config_dir(home: Path) -> Path:
    if platform.system().lower().startswith("win"):
        appdata = os.environ.get("APPDATA")
        if appdata:
            return Path(appdata) / "Block" / "goose" / "config" / "custom_providers"
    return home / ".config" / "goose" / "custom_providers"


def write_goose_home(home: Path, model: str, chat: list[dict] | None = None) -> None:
    """goose's custom-provider JSON takes a `models` array; populate
    every chat-capable model so the goose model picker shows them all."""
    if chat:
        models_list = [
            {"name": m["id"], "context_limit": m["context_window"]}
            for m in chat
        ]
    else:
        models_list = [{"name": model, "context_limit": 256000}]
    provider = {
        "name": "empiriolabs",
        "engine": "openai",
        "display_name": "EmpirioLabs",
        "description": "EmpirioLabs OpenAI-compatible API",
        "api_key_env": "EMPIRIOLABS_API_KEY",
        "base_url": f"{OPENAI_BASE}/chat/completions",
        "models": models_list,
        "supports_streaming": True,
        "requires_auth": True,
    }
    write_text(goose_config_dir(home) / "empiriolabs.json", json.dumps(provider, indent=2), overwrite=True)
    info(f"configured goose provider with {len(models_list)} model(s)")


def write_openclaw_home(home: Path, model: str) -> None:
    config = f"""
// Merge this into ~/.openclaw/openclaw.json or apply it with openclaw config set.
{{
  secrets: {{
    providers: {{
      default: {{ source: "env" }}
    }},
    defaults: {{
      env: "default"
    }}
  }},
  models: {{
    mode: "merge",
    providers: {{
      empiriolabs: {{
        baseUrl: "{OPENAI_BASE}",
        apiKey: {{ source: "env", provider: "default", id: "EMPIRIOLABS_API_KEY" }},
        authHeader: true,
        api: "openai-completions",
        models: [
          {{
            id: "{model}",
            name: "EmpirioLabs {model}",
            input: ["text"],
            contextWindow: 256000
          }}
        ]
      }}
    }}
  }},
  agents: {{
    defaults: {{
      model: {{
        primary: "empiriolabs/{model}"
      }}
    }}
  }}
}}
"""
    write_text(home / ".openclaw" / "empiriolabs.example.json5", config, overwrite=True)


def write_project_readme(project: Path, model: str) -> None:
    text = f"""
# EmpirioLabs integration snippets

Run the environment loader first:

```bash
source ./empirio-env.sh
```

```powershell
. .\\empirio-env.ps1
```

OpenCode reads `.empiriolabs-api-key` directly from `opencode.json`, Qwen Code stores its fallback env in `.qwen/settings.json`, Aider reads `.env`, and OpenHands can use `openhands.empiriolabs.toml`. These files are gitignored so the tools can reopen without re-exporting environment variables.

## Smoke test

```bash
curl "{OPENAI_BASE}/chat/completions" \\
  -H "Authorization: Bearer $EMPIRIOLABS_API_KEY" \\
  -H "Content-Type: application/json" \\
  -d '{{"model":"{model}","messages":[{{"role":"user","content":"Reply with one sentence."}}]}}'
```

## Tool launch commands

```bash
opencode
aider --config .aider.empiriolabs.yml
cline auth -p openai -k "$EMPIRIOLABS_API_KEY" -b "{OPENAI_BASE}" -m "{model}"
qwen --auth-type openai --openaiApiKey "$EMPIRIOLABS_API_KEY" --openaiBaseUrl "{OPENAI_BASE}" --model "{model}"
codex --model "{model}" --config model_provider=empiriolabs
openhands --config-file {OPENHANDS_CONFIG_FILE}
```

## UI tools

Use these values in Cline, Roo Code, Kilo Code, Zed, Cursor-compatible fields, OpenHands, and similar OpenAI-compatible UIs:

| Field | Value |
| --- | --- |
| Provider | OpenAI Compatible |
| Base URL | {OPENAI_BASE} |
| API key | your EmpirioLabs key |
| Model | {model} |
"""
    write_text(project / "EMPIRIOLABS_INTEGRATIONS.md", text, overwrite=True)


def fetch_models_catalog(api_key: str) -> list[dict] | None:
    """Fetch the live model catalog with one bearer-auth GET. Returns the
    parsed list of model dicts, or None if the call fails. Used both to
    validate the key and to power multi-model auto-populate for tool
    configs that support multiple models in one file (OpenCode, Continue,
    Qwen Code, goose)."""
    req = urllib.request.Request(
        f"{OPENAI_BASE}/models?available=true",
        headers={"Authorization": f"Bearer {api_key}"},
        method="GET",
    )
    try:
        with urllib.request.urlopen(req, timeout=15) as resp:
            raw = resp.read(2 * 1024 * 1024)
            data = json.loads(raw.decode("utf-8"))
            models = data.get("data") or data.get("models") or []
            return [m for m in models if isinstance(m, dict)]
    except urllib.error.HTTPError as exc:
        if exc.code in {401, 403}:
            return None
        info(f"could not fetch catalog: HTTP {exc.code}")
    except (urllib.error.URLError, TimeoutError, json.JSONDecodeError) as exc:
        info(f"could not fetch catalog: {exc}")
    return []


def chat_models(catalog: list[dict] | None) -> list[dict]:
    """Filter the raw catalog down to chat-capable entries with a stable
    {id, display_name, context_window} shape. Skips media/embedding/
    rerank/transcription/search/research categories and anything whose
    supported_endpoints don't include POST /v1/chat/completions."""
    if not catalog:
        return []
    out: list[dict] = []
    for m in catalog:
        category = (m.get("category") or "").lower()
        if category and category not in CHAT_MODEL_CATEGORIES:
            continue
        endpoints = m.get("supported_endpoints") or []
        if endpoints and not any(
            isinstance(e, str) and ("chat/completions" in e.lower() or "/v1/messages" in e.lower() or "/v1/responses" in e.lower())
            for e in endpoints
        ):
            continue
        slug = m.get("id") or m.get("slug")
        if not slug:
            continue
        out.append({
            "id": slug,
            "display_name": m.get("display_name") or m.get("name") or slug,
            "context_window": m.get("context_window") or 256000,
            "reasoning": supports_reasoning(m),
        })
    return out


def validate_key(api_key: str, model: str) -> tuple[bool, list[dict]]:
    """Returns (is_valid, list_of_chat_models). list is empty if the
    catalog couldn't be fetched but the key looked valid (network blip);
    callers should fall back to single-model writes in that case."""
    catalog = fetch_models_catalog(api_key)
    if catalog is None:
        print("API key validation failed (HTTP 401/403). Check EMPIRIOLABS_API_KEY.", file=sys.stderr)
        return False, []
    chat = chat_models(catalog)
    info(f"validated API key; catalog returned {len(catalog)} models, {len(chat)} chat-capable")
    if catalog:
        ids = {m.get("id") or m.get("slug") for m in catalog if isinstance(m, dict)}
        if model not in ids:
            info(f"warning: {model!r} was not found in the first catalog response")
    return True, chat


def smoke_test(api_key: str, model: str) -> bool:
    body = json.dumps(
        {
            "model": model,
            "messages": [{"role": "user", "content": "Reply with exactly: EmpirioLabs ready"}],
            "max_tokens": 16,
        }
    ).encode("utf-8")
    req = urllib.request.Request(
        f"{OPENAI_BASE}/chat/completions",
        data=body,
        headers={
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json",
        },
        method="POST",
    )
    try:
        with urllib.request.urlopen(req, timeout=45) as resp:
            data = json.loads(resp.read().decode("utf-8"))
            content = ""
            choices = data.get("choices") if isinstance(data, dict) else None
            if choices and isinstance(choices[0], dict):
                message = choices[0].get("message") or {}
                if isinstance(message, dict):
                    content = str(message.get("content") or "").strip()
            info(f"smoke test succeeded: {content or 'chat completion returned successfully'}")
            return True
    except urllib.error.HTTPError as exc:
        detail = exc.read(2048).decode("utf-8", errors="replace")
        print(f"Smoke test failed with HTTP {exc.code}: {detail}", file=sys.stderr)
    except (urllib.error.URLError, TimeoutError, json.JSONDecodeError) as exc:
        print(f"Smoke test failed: {exc}", file=sys.stderr)
    return False


def print_next_steps(project: Path, tools: set[str], scope: str, model: str) -> None:
    info("next steps")
    if scope in {"project", "all"}:
        commands: list[tuple[str, str]] = []
        if "opencode" in tools:
            commands.append(("OpenCode", "opencode"))
        if "aider" in tools:
            commands.append(("Aider", "aider --config .aider.empiriolabs.yml"))
        if "qwen-code" in tools:
            commands.append(("Qwen Code", f"qwen --auth-type openai --model {model}"))
        if "openhands" in tools:
            commands.append(("OpenHands", f"openhands --config-file {OPENHANDS_CONFIG_FILE}"))
        command_lines = "\n".join(f"  {name}: {cmd}" for name, cmd in commands)
        print(textwrap.dedent(f"""
          Project files were written under:
            {project}

          Load the generated environment:
            macOS/Linux/WSL: source ./empirio-env.sh
            PowerShell:      . .\\empirio-env.ps1
        """).rstrip())
        if command_lines:
            print("\nTry a configured project tool:")
            print(command_lines)
    if scope in {"user", "all"}:
        user_tools = ", ".join(sorted(t for t in tools if t not in {"opencode", "aider", "openhands"})) or "user-level tools"
        print(textwrap.dedent(f"""

          User-level snippets were prepared for: {user_tools}
          Review the generated sidecar files before replacing an existing hand-tuned config.
        """).rstrip())
    print(textwrap.dedent(f"""

      To run an end-to-end API call now, rerun the helper with:
        python {Path(__file__).name} --model {model} --tools opencode --no-validate --smoke-test
    """).rstrip())
    if scope in {"project", "all"}:
        print(textwrap.dedent("""

          To see exactly what changed:
            git diff -- .env .empiriolabs-api-key opencode.json .aider.empiriolabs.yml .qwen/settings.json openhands.empiriolabs.toml EMPIRIOLABS_INTEGRATIONS.md
        """).rstrip())


def parse_tools(raw: str) -> set[str]:
    if raw.lower() in {"all", "*"}:
        return set(ALL_TOOLS)
    tools = {t.strip().lower() for t in raw.split(",") if t.strip()}
    unknown = sorted(tools - ALL_TOOLS)
    if unknown:
        allowed = ", ".join(sorted(ALL_TOOLS))
        raise ValueError(f"unknown --tools value(s): {', '.join(unknown)}. Allowed values: {allowed}, all")
    return tools


def print_supported_tools() -> None:
    print("Supported --tools values:")
    for tool in sorted(ALL_TOOLS):
        print(f"  {tool:<12} {TOOL_SCOPES[tool]}")
    print("\nUse comma-separated values such as --tools opencode,aider,qwen-code,openhands.")
    print("Use --tools all to select every helper-supported tool for the chosen scope.")


def warn_scope_mismatches(tools: set[str], scope: str) -> None:
    if scope == "project":
        writable = PROJECT_TOOLS
    elif scope == "user":
        writable = USER_TOOLS
    else:
        return
    skipped = sorted(tools - writable)
    if skipped:
        info(
            f"note: --scope {scope} will not write {', '.join(skipped)}. "
            "Use --scope all, or choose tools whose scope matches this run."
        )


def main() -> int:
    parser = argparse.ArgumentParser(
        description="Create EmpirioLabs setup files for coding tools.",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog=textwrap.dedent(
            """
            Examples:
              python empirio-integrations-setup.py --tools opencode,aider
              python empirio-integrations-setup.py --scope all --tools all --model qwen3-max
              python empirio-integrations-setup.py --scope user --tools qwen-code,codex,hermes
              python empirio-integrations-setup.py --tools opencode --smoke-test
              python empirio-integrations-setup.py --list-tools
            """
        ),
    )
    parser.add_argument("--model", default=DEFAULT_MODEL)
    parser.add_argument("--tools", default="opencode,aider")
    parser.add_argument("--scope", choices=["project", "user", "all"], default="project")
    parser.add_argument("--project-dir", default=".")
    parser.add_argument("--api-key", default=os.environ.get("EMPIRIOLABS_API_KEY") or os.environ.get("OPENAI_API_KEY"))
    parser.add_argument("--list-tools", action="store_true", help="Print supported --tools values and exit.")
    parser.add_argument("--no-validate", action="store_true")
    parser.add_argument(
        "--no-populate-models",
        action="store_true",
        help="Skip auto-populating tool configs with the full chat-model catalog. When set, only --model is registered (legacy single-model behavior).",
    )
    parser.add_argument(
        "--smoke-test",
        action="store_true",
        help="After writing files, run one tiny chat completion to prove the key, model, and endpoint work.",
    )
    args = parser.parse_args()

    if args.list_tools:
        print_supported_tools()
        return 0

    try:
        tools = parse_tools(args.tools)
    except ValueError as exc:
        print(str(exc), file=sys.stderr)
        return 2
    warn_scope_mismatches(tools, args.scope)

    api_key = args.api_key
    if not api_key:
        api_key = getpass.getpass("EmpirioLabs API key (hidden): ").strip()
    if not api_key:
        print("No API key provided. Set EMPIRIOLABS_API_KEY or pass --api-key.", file=sys.stderr)
        return 2

    project = Path(args.project_dir).expanduser().resolve()
    home = Path.home()

    chat: list[dict] = []
    if not args.no_validate:
        ok, chat = validate_key(api_key, args.model)
        if not ok:
            return 1
    if args.no_populate_models:
        chat = []

    if args.scope in {"project", "all"}:
        project.mkdir(parents=True, exist_ok=True)
        write_env_files(project, api_key, args.model)
        update_gitignore(project)
        if "opencode" in tools:
            write_opencode(project, args.model, chat)
        if "qwen-code" in tools:
            write_qwen_project(project, api_key, args.model, chat)
        if "aider" in tools:
            write_aider(project, args.model)
        if "openhands" in tools:
            write_openhands_project(project, api_key, args.model)
        write_project_readme(project, args.model)

    if args.scope in {"user", "all"}:
        if "continue" in tools:
            write_continue_home(home, api_key, args.model, chat)
        if "claude-code" in tools:
            write_claude_home(home, api_key, args.model)
        if "codex" in tools:
            write_codex_home(home, args.model)
        if "qwen-code" in tools:
            write_qwen_home(home, api_key, args.model, chat)
        if "hermes" in tools:
            write_hermes_home(home, api_key, args.model)
        if "goose" in tools:
            write_goose_home(home, args.model, chat)
        if "openclaw" in tools:
            write_openclaw_home(home, args.model)

    if args.smoke_test and not smoke_test(api_key, args.model):
        return 1

    print_next_steps(project, tools, args.scope, args.model)
    info("done")
    return 0


if __name__ == "__main__":
    raise SystemExit(main())
