"""
Read-only check: PH MLBB product URL — compare API package rows vs in-browser SELECT_PACKAGE_PH_JS.

Usage (from repo root):
  python tools/diagnose_ph_mlbb.py

Requires: playwright, smile_auth.json (optional; without it only prints API packages).
"""

from __future__ import annotations

import asyncio
import json
import os
import sys
from pathlib import Path

ROOT = Path(__file__).resolve().parents[1]
sys.path.insert(0, str(ROOT))

URL = os.environ.get("DIAGNOSE_MLBB_URL", "https://www.smile.one/ph/merchant/mobilelegends")


def main() -> None:
    os.chdir(ROOT)
    try:
        from dotenv import load_dotenv

        load_dotenv(ROOT / ".env")
    except ImportError:
        pass

    from scraper_playwright import run_game_packages_cached

    print(f"Scrape (refresh): {URL}")
    data = run_game_packages_cached(URL, bypass_cache=True)
    pkgs = data.get("packages") or []
    print(f"Packages from scraper: {len(pkgs)}")
    for i, p in enumerate(pkgs[:12]):
        nm = ((p.get("name") or "")[:40]).encode("ascii", "replace").decode("ascii")
        pt = ((p.get("price_text") or "")[:30]).encode("ascii", "replace").decode("ascii")
        print(f"  [{i}] li={p.get('smile_li_id')!r}  name={nm!r}  price={pt!r}")
    if len(pkgs) > 12:
        print(f"  ... +{len(pkgs) - 12} more")

    auth = ROOT / "smile_auth.json"
    if not auth.exists():
        print("\nNo smile_auth.json — skip Playwright DOM check.")
        return

    asyncio.run(_dom_check(pkgs, auth))


async def _dom_check(pkgs: list, auth_path: Path) -> None:
    from playwright.async_api import async_playwright

    from order_automation_ph import SELECT_PACKAGE_PH_JS

    async with async_playwright() as p:
        try:
            from config import playwright_effective_channel
        except Exception:

            def playwright_effective_channel():
                return (os.getenv("PLAYWRIGHT_CHANNEL") or "").strip() or None

        ch = playwright_effective_channel()
        kw = {"headless": True}
        if ch:
            kw["channel"] = ch
        browser = await p.chromium.launch(**kw)
        ctx = await browser.new_context(
            storage_state=str(auth_path),
            user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) Chrome/120.0.0.0 Safari/537.36",
        )
        page = await ctx.new_page()
        await page.goto(URL, wait_until="load", timeout=90_000)
        await asyncio.sleep(2)

        # First catalog row with li id
        sample = next((x for x in pkgs if str((x or {}).get("smile_li_id") or "").strip()), None)
        if not sample:
            print("No smile_li_id in scrape — cannot compare.")
            await browser.close()
            return

        li_id = str(sample["smile_li_id"]).strip()
        from currency import parse_php_amount_from_text
        from decimal import Decimal, ROUND_HALF_UP

        pt = (sample.get("price_text") or "").strip()
        php_amt = parse_php_amount_from_text(pt)
        php_cents = (
            int((php_amt * Decimal(100)).quantize(Decimal("1"), rounding=ROUND_HALF_UP))
            if php_amt is not None
            else None
        )
        name = (sample.get("name") or "").strip()
        idx = pkgs.index(sample)

        args = [None, idx, name, li_id, php_cents]
        r = await page.evaluate(SELECT_PACKAGE_PH_JS, args)
        print(f"\nSELECT_PACKAGE_PH_JS sample li_id={li_id!r} php_cents={php_cents} scrape_idx={idx}:")
        print(json.dumps(r, indent=2)[:800])

        await browser.close()


if __name__ == "__main__":
    main()
