# app/blueprints/bulk_api.py
import os
import time
import uuid
import random
from flask import Blueprint, request, jsonify, current_app

from db.bulk import bulk_db
from services.auth import require_bulk_api_key, get_access_token_for_alias
from services.creator import parse_publish_at_iso_to_ts, creator_can_post_now
from services.media import safe_filename, video_duration_seconds_ffprobe, build_public_media_url
from services.bulk_worker import (
    process_one_scheduled_job,
    resolve_access_token_for_job,
    refresh_submitted_job_status,
)

from tiktok_client import query_creator_info, upload_video_direct_post, fetch_post_status

bp = Blueprint("bulk_api", __name__)


def _table_columns(cur, table_name: str):
    """
    Devuelve lista de columnas reales del schema para inserts robustos.
    Funciona con row_factory sqlite3.Row o tuplas.
    """
    cur.execute(f"PRAGMA table_info({table_name})")
    rows = cur.fetchall()
    cols = []
    for r in rows:
        # sqlite3.Row: r["name"], tuple: r[1]
        try:
            cols.append(r["name"])
        except Exception:
            cols.append(r[1])
    return cols


def _insert_bulk_job(conn, job: dict):
    """
    Inserta en bulk_jobs filtrando por columnas reales (evita 'N values for M columns').
    """
    cur = conn.cursor()
    cols = _table_columns(cur, "bulk_jobs")
    colset = set(cols)

    filtered = {k: v for k, v in job.items() if k in colset}

    colnames = ", ".join(filtered.keys())
    placeholders = ", ".join(["?"] * len(filtered))
    sql = f"INSERT INTO bulk_jobs ({colnames}) VALUES ({placeholders})"
    cur.execute(sql, tuple(filtered.values()))
    return cur


def _is_transient_error(err: str) -> bool:
    """
    Decide si el fallo merece reintento (requeue) en lugar de marcar el job como failed.

    Filosofía:
      - Reintentar todo lo que sea probablemente transitorio: 5xx, 429, internal_error,
        timeouts, errores de red, "cannot_post_now", etc.
      - Marcar failed solo para errores claramente permanentes (fichero inexistente, privacy
        inválida, duración excedida, etc.)
    """
    if not err:
        return True

    e = str(err).lower()

    # Señales claras de transitorio
    if err.startswith("cannot_post_now"):
        return True
    if err.startswith("missing_publish_id"):
        return True

    transient_markers = [
        # rate limit / throttling
        "http=429", "too many requests", "rate_limit", "throttle",
        # 5xx
        "http=500", "http=502", "http=503", "http=504",
        "internal_error", "service unavailable", "bad gateway", "gateway timeout",
        # network / timeout
        "timeout", "timed out", "readtimeout", "connecttimeout",
        "connection", "connection reset", "temporarily", "try again",
    ]
    if any(m in e for m in transient_markers):
        return True

    # Estas familias suelen ser red/API; por defecto reintentar
    if err.startswith("direct_post_failed") or err.startswith("creator_info_failed"):
        return True

    # Errores permanentes conocidos (no reintentar)
    permanent_prefixes = [
        "file_not_found_on_server",
        "duration_exceeds_max",
        "invalid_privacy_level",
        "branded_cannot_be_self_only",
        "missing_privacy_level",
        "missing_video",
        "missing_filename",
    ]
    if any(err.startswith(p) for p in permanent_prefixes):
        return False

    return False


def _backoff_seconds(err: str, attempts_next: int) -> int:
    """
    Backoff exponencial con jitter, con cap. Evita martillear la API.

    - cannot_post_now: arranca más alto.
    - cap: 6h (el sistema sigue intentando, pero sin bucle agresivo).
    """
    cap = 6 * 3600
    base = 60 if (err or "").startswith("cannot_post_now") else 30
    exp = min(cap, base * (2 ** max(0, attempts_next - 1)))
    jitter = int(exp * random.uniform(0.0, 0.2))
    return int(exp + jitter)


def _job_account_key(row) -> str:
    """
    Clave para agrupar 'cooldown' por cuenta.
    Preferimos account_alias; si no existe (legacy), degradamos a un hash parcial del access_token.
    """
    try:
        alias = (row["account_alias"] or "").strip()
    except Exception:
        alias = ""
    if alias:
        return f"alias:{alias}"

    try:
        tok = (row["access_token"] or "").strip()
    except Exception:
        tok = ""
    return f"token:{tok[:12]}" if tok else "unknown"


@bp.route("/api/bulk/publish", methods=["POST"], endpoint="api_bulk_publish")
def api_bulk_publish():
    """
    Publish inmediato (server-to-server). Aquí SÍ es correcto generar video_url firmado,
    porque se publica en el momento (la ventana de TTL es suficiente).
    Soporta: account_alias (recomendado) o access_token.
    """
    require_bulk_api_key()

    account_alias = (request.form.get("account_alias") or "").strip()
    access_token = (request.form.get("access_token") or "").strip()

    if account_alias:
        try:
            access_token = get_access_token_for_alias(account_alias)
        except Exception as e:
            return jsonify({"ok": False, "error": "alias_token_failed", "details": str(e)}), 400

    if not access_token:
        return jsonify({"ok": False, "error": "missing_access_token_or_account_alias"}), 400

    if "video" not in request.files:
        return jsonify({"ok": False, "error": "missing_video"}), 400

    file = request.files["video"]
    if not file.filename:
        return jsonify({"ok": False, "error": "missing_filename"}), 400

    title = (request.form.get("title") or "").strip()
    privacy_level = (request.form.get("privacy_level") or "").strip()
    if not privacy_level:
        return jsonify({"ok": False, "error": "missing_privacy_level"}), 400

    allow_comment = request.form.get("allow_comment", "1") == "1"
    allow_duet = request.form.get("allow_duet", "1") == "1"
    allow_stitch = request.form.get("allow_stitch", "1") == "1"

    commercial_toggle = request.form.get("commercial_toggle", "0") == "1"
    brand_organic_toggle = request.form.get("brand_organic_toggle", "0") == "1"
    brand_content_toggle = request.form.get("brand_content_toggle", "0") == "1"
    is_aigc = request.form.get("is_aigc", "1") == "1"

    if brand_content_toggle and privacy_level == "SELF_ONLY":
        return jsonify({"ok": False, "error": "branded_cannot_be_self_only"}), 400

    # creator_info (latest)
    try:
        creator_info = query_creator_info(access_token)
    except Exception as e:
        return jsonify({"ok": False, "error": "creator_info_failed", "details": str(e)}), 400

    can_post_now, reason = creator_can_post_now(creator_info)
    if not can_post_now:
        return jsonify({"ok": False, "error": "cannot_post_now", "reason": reason}), 400

    # Validate privacy option is allowed
    options = creator_info.get("privacy_level_options") or []
    if privacy_level not in options:
        return jsonify({"ok": False, "error": "invalid_privacy_level", "options": options}), 400

    # Respect creator settings
    if creator_info.get("comment_disabled") is True:
        allow_comment = False
    if creator_info.get("duet_disabled") is True:
        allow_duet = False
    if creator_info.get("stitch_disabled") is True:
        allow_stitch = False

    disable_comment = not allow_comment
    disable_duet = not allow_duet
    disable_stitch = not allow_stitch

    upload_dir = current_app.config["UPLOAD_DIR"]

    original_fn = safe_filename(file.filename)
    stored_filename = f"{int(time.time())}_{original_fn}"
    save_path = os.path.join(upload_dir, stored_filename)
    file.save(save_path)

    # Duration enforcement (best-effort)
    max_dur = creator_info.get("max_video_post_duration_sec")
    dur = video_duration_seconds_ffprobe(save_path)
    if isinstance(max_dur, int) and max_dur > 0 and dur > 0 and dur > max_dur:
        try:
            os.remove(save_path)
        except Exception:
            pass
        return jsonify({"ok": False, "error": "duration_exceeds_max", "dur": dur, "max": max_dur}), 400

    # Firmado OK aquí (publish inmediato)
    video_url = build_public_media_url(
        stored_filename=stored_filename,
        public_base_url=current_app.config["PUBLIC_BASE_URL"],
        signing_secret=current_app.config["MEDIA_SIGNING_SECRET"],
        ttl_seconds=current_app.config["MEDIA_TOKEN_TTL_SECONDS"],
    )

    try:
        init_resp = upload_video_direct_post(
            access_token=access_token,
            caption=title,
            privacy_level=privacy_level,
            disable_comment=disable_comment,
            disable_duet=disable_duet,
            disable_stitch=disable_stitch,
            brand_content_toggle=brand_content_toggle,
            brand_organic_toggle=brand_organic_toggle,
            is_aigc=is_aigc,
            mode="PULL_FROM_URL",
            video_url=video_url,
        )
        publish_id = (init_resp.get("data") or {}).get("publish_id")
        return jsonify(
            {
                "ok": True,
                "publish_id": publish_id,
                "video_url": video_url,
                "init_resp": init_resp,
                "stored_filename": stored_filename,
                "account_alias": account_alias or None,
            }
        ), 200
    except Exception as e:
        return jsonify({"ok": False, "error": "direct_post_failed", "details": str(e)}), 400


@bp.route("/api/bulk/status", methods=["POST"], endpoint="api_bulk_status")
def api_bulk_status():
    """
    Status (server-to-server). Soporta account_alias o access_token.
    """
    require_bulk_api_key()

    account_alias = (request.form.get("account_alias") or "").strip()
    access_token = (request.form.get("access_token") or "").strip()

    if account_alias:
        try:
            access_token = get_access_token_for_alias(account_alias)
        except Exception as e:
            return jsonify({"ok": False, "error": "alias_token_failed", "details": str(e)}), 400

    publish_id = (request.form.get("publish_id") or "").strip()
    if not access_token or not publish_id:
        return jsonify({"ok": False, "error": "missing_access_token_or_publish_id"}), 400

    try:
        data = fetch_post_status(access_token, publish_id)
        return jsonify({"ok": True, "data": data}), 200
    except Exception as e:
        return jsonify({"ok": False, "error": str(e)}), 200


@bp.route("/api/bulk/schedule", methods=["POST"], endpoint="api_bulk_schedule")
def api_bulk_schedule():
    """
    Schedule (server-to-server). Para jobs programados, account_alias es requerido
    para que el cron/worker pueda refrescar tokens con refresh_token.

    Importante:
      - NO generamos video_url firmado aquí, porque caduca (TTL) antes de publish.
      - El worker debe generar video_url justo en el momento de publicar.
    """
    require_bulk_api_key()

    account_alias = (request.form.get("account_alias") or "").strip()
    if not account_alias:
        return jsonify({"ok": False, "error": "account_alias_required_for_scheduled_jobs"}), 400

    # Resolvemos access_token ahora SOLO para compatibilidad (si DB lo exige).
    # El worker volverá a resolver/refrescar en publish time usando account_alias.
    try:
        access_token = get_access_token_for_alias(account_alias)
    except Exception as e:
        return jsonify({"ok": False, "error": "alias_token_failed", "details": str(e)}), 400

    publish_at = (request.form.get("publish_at") or "").strip()
    if not publish_at:
        return jsonify({"ok": False, "error": "missing_publish_at"}), 400

    try:
        publish_at_ts = parse_publish_at_iso_to_ts(publish_at)
    except Exception as e:
        return jsonify({"ok": False, "error": "invalid_publish_at", "details": str(e)}), 400

    if "video" not in request.files:
        return jsonify({"ok": False, "error": "missing_video"}), 400

    file = request.files["video"]
    if not file.filename:
        return jsonify({"ok": False, "error": "missing_filename"}), 400

    title = (request.form.get("title") or "").strip()
    privacy_level = (request.form.get("privacy_level") or "").strip()
    if not privacy_level:
        return jsonify({"ok": False, "error": "missing_privacy_level"}), 400

    allow_comment = request.form.get("allow_comment", "1") == "1"
    allow_duet = request.form.get("allow_duet", "1") == "1"
    allow_stitch = request.form.get("allow_stitch", "1") == "1"

    commercial_toggle = request.form.get("commercial_toggle", "0") == "1"
    brand_organic_toggle = request.form.get("brand_organic_toggle", "0") == "1"
    brand_content_toggle = request.form.get("brand_content_toggle", "0") == "1"
    is_aigc = request.form.get("is_aigc", "1") == "1"

    if brand_content_toggle and privacy_level == "SELF_ONLY":
        return jsonify({"ok": False, "error": "branded_cannot_be_self_only"}), 400

    upload_dir = current_app.config["UPLOAD_DIR"]
    original_fn = safe_filename(file.filename)

    job_id = uuid.uuid4().hex
    stored_filename = f"{job_id}_{original_fn}"
    save_path = os.path.join(upload_dir, stored_filename)
    file.save(save_path)

    now_ts = int(time.time())

    # IMPORTANT:
    # - video_url: NO firmar aquí. Guardamos "" (falsy) para forzar JIT signing en worker.
    job = {
        "id": job_id,
        "created_at_ts": now_ts,
        "publish_at_ts": publish_at_ts,
        "next_attempt_ts": publish_at_ts,
        "status": "scheduled",
        "attempts": 0,
        "last_error": None,
        "access_token": access_token,      # compat (si DB NOT NULL)
        "account_alias": account_alias,    # requerido para refresh en worker
        "title": title,
        "privacy_level": privacy_level,
        "allow_comment": 1 if allow_comment else 0,
        "allow_duet": 1 if allow_duet else 0,
        "allow_stitch": 1 if allow_stitch else 0,
        "commercial_toggle": 1 if commercial_toggle else 0,
        "brand_organic_toggle": 1 if brand_organic_toggle else 0,
        "brand_content_toggle": 1 if brand_content_toggle else 0,
        "is_aigc": 1 if is_aigc else 0,
        "stored_filename": stored_filename,
        "original_filename": original_fn,
        "video_url": "",                  # falsy => worker genera URL
        "publish_id": None,
        "last_status": None,
        "last_fail_reason": None,
        "updated_at_ts": now_ts,
    }

    conn = bulk_db(current_app.config["BULK_DB_PATH"])
    _insert_bulk_job(conn, job)
    conn.commit()
    conn.close()

    return jsonify(
        {
            "ok": True,
            "job_id": job_id,
            "publish_at_ts": publish_at_ts,
            "stored_filename": stored_filename,
            "account_alias": account_alias,
        }
    ), 200


@bp.route("/api/bulk/process_due", methods=["POST"], endpoint="api_bulk_process_due")
def api_bulk_process_due():
    """
    Cron endpoint:
      - Auth: X-Api-Key
      - Procesa jobs due (scheduled) y refresca jobs submitted.

    Mejoras:
      - Reintento con backoff para errores transitorios (incl. 5xx/internal_error).
      - Cooldown por cuenta para no disparar 2 publicaciones seguidas en segundos.
      - Protección anti-atasco: si el worker lanza excepción, requeue en lugar de dejar "submitting".
    """
    require_bulk_api_key()

    max_jobs = int(request.form.get("max_jobs", "10") or "10")
    now_ts = int(time.time())

    # Gap mínimo entre posts por cuenta (segundos)
    cooldown_s = int(os.environ.get("BULK_MIN_SECONDS_BETWEEN_POSTS", "180") or "180")

    conn = bulk_db(current_app.config["BULK_DB_PATH"])
    cur = conn.cursor()

    processed = {
        "scheduled_submitted": 0,
        "scheduled_failed": 0,
        "scheduled_requeued": 0,
        "submitted_updated": 0,
        "scheduled_deferred_by_cooldown": 0,
    }

    # 1) scheduled due -> pick 1 per account_key, defer the rest by cooldown, then lock picked as submitting
    cur.execute(
        """
        SELECT * FROM bulk_jobs
        WHERE status='scheduled' AND next_attempt_ts <= ?
        ORDER BY publish_at_ts ASC
        LIMIT ?
        """,
        (now_ts, max_jobs),
    )
    due = cur.fetchall()

    picked = []
    deferred = []
    seen = set()

    for row in due:
        key = _job_account_key(row)
        if key in seen:
            deferred.append(row)
            continue
        seen.add(key)
        picked.append(row)

    # Defer duplicates for same account (so they don't fire back-to-back)
    if deferred and cooldown_s > 0:
        bump_ts = now_ts + cooldown_s
        for r in deferred:
            cur.execute(
                """
                UPDATE bulk_jobs
                SET next_attempt_ts=?, updated_at_ts=?
                WHERE id=? AND status='scheduled' AND next_attempt_ts <= ?
                """,
                (bump_ts, now_ts, r["id"], now_ts),
            )
        processed["scheduled_deferred_by_cooldown"] += len(deferred)
        conn.commit()

    for row in picked:
        job_id = row["id"]

        # lock (anti-concurrency)
        cur.execute(
            """
            UPDATE bulk_jobs
            SET status='submitting', updated_at_ts=?
            WHERE id=? AND status='scheduled'
            """,
            (now_ts, job_id),
        )
        if cur.rowcount != 1:
            continue
        conn.commit()

        try:
            ok, msg, _init_resp = process_one_scheduled_job(
                row,
                upload_dir=current_app.config["UPLOAD_DIR"],
                public_base_url=current_app.config["PUBLIC_BASE_URL"],
                signing_secret=current_app.config["MEDIA_SIGNING_SECRET"],
                ttl_seconds=current_app.config["MEDIA_TOKEN_TTL_SECONDS"],
            )
        except Exception as e:
            ok = False
            msg = f"worker_exception: {type(e).__name__}: {str(e)}"

        if ok:
            publish_id = msg
            used_access_token = resolve_access_token_for_job(row)

            cur.execute(
                """
                UPDATE bulk_jobs
                SET status='submitted',
                    publish_id=?,
                    attempts=attempts+1,
                    last_error=NULL,
                    last_status=NULL,
                    last_fail_reason=NULL,
                    access_token=?,
                    next_attempt_ts=?,
                    updated_at_ts=?
                WHERE id=?
                """,
                (publish_id, used_access_token, now_ts, now_ts, job_id),
            )
            processed["scheduled_submitted"] += 1
            conn.commit()
            continue

        # failed or requeue
        err = msg or "unknown_error"
        attempts_next = int(row["attempts"]) + 1

        if _is_transient_error(err):
            next_ts = now_ts + _backoff_seconds(err, attempts_next)
            cur.execute(
                """
                UPDATE bulk_jobs
                SET status='scheduled',
                    attempts=?,
                    last_error=?,
                    next_attempt_ts=?,
                    updated_at_ts=?
                WHERE id=?
                """,
                (attempts_next, err, next_ts, now_ts, job_id),
            )
            processed["scheduled_requeued"] += 1
        else:
            cur.execute(
                """
                UPDATE bulk_jobs
                SET status='failed',
                    attempts=?,
                    last_error=?,
                    updated_at_ts=?
                WHERE id=?
                """,
                (attempts_next, err, now_ts, job_id),
            )
            processed["scheduled_failed"] += 1

        conn.commit()

    # 2) refresh submitted jobs
    cur.execute(
        """
        SELECT * FROM bulk_jobs
        WHERE status='submitted'
        ORDER BY updated_at_ts ASC
        LIMIT ?
        """,
        (max_jobs,),
    )
    subs = cur.fetchall()

    for row in subs:
        job_id = row["id"]
        status, fail_reason = refresh_submitted_job_status(row)
        if not status:
            continue

        # terminal
        if status in ("PUBLISH_COMPLETE", "FAILED", "SEND_TO_USER_INBOX"):
            new_status = "complete" if status == "PUBLISH_COMPLETE" else "failed"
            cur.execute(
                """
                UPDATE bulk_jobs
                SET status=?,
                    last_status=?,
                    last_fail_reason=?,
                    updated_at_ts=?
                WHERE id=?
                """,
                (new_status, status, fail_reason, now_ts, job_id),
            )
        else:
            cur.execute(
                """
                UPDATE bulk_jobs
                SET last_status=?,
                    last_fail_reason=?,
                    updated_at_ts=?
                WHERE id=?
                """,
                (status, fail_reason, now_ts, job_id),
            )

        processed["submitted_updated"] += 1
        conn.commit()

    conn.close()
    return jsonify({"ok": True, "processed": processed, "now_ts": now_ts}), 200
