#ppt_pro.py
import os
import io
import uuid
import json
import base64
import logging
import re
from typing import List, Dict, Optional, Tuple
import subprocess
import shutil
import requests
from flask import Blueprint, request, jsonify, current_app, url_for
from PIL import Image
from io import BytesIO

from pptx import Presentation
from pptx.util import Inches, Pt
from pptx.enum.text import PP_ALIGN
from pptx.dml.color import RGBColor
from pptx.enum.shapes import MSO_AUTO_SHAPE_TYPE
from dotenv import load_dotenv
load_dotenv()

# Gemini (google-genai client)
from google import genai

ppt_pro_bp = Blueprint("ppt_pro_bp", __name__)
log = logging.getLogger(__name__)

IMAGE_MODEL = "gemini-2.5-flash-image-preview"

# -----------------------------
# Utilities / paths
# -----------------------------
def _ensure_dir(path: str):
    os.makedirs(path, exist_ok=True)

def _static_dir() -> str:
    return os.path.join(current_app.root_path, "static")

def _exports_dir() -> str:
    p = os.path.join(_static_dir(), "exports")
    _ensure_dir(p)
    return p

def _images_dir() -> str:
    p = os.path.join(_exports_dir(), "images")
    _ensure_dir(p)
    return p

def _public_url(local_path: str) -> str:
    """Turn a file under static/ into an absolute URL, proxy/HTTPS-safe."""
    static_root = os.path.abspath(current_app.static_folder)
    local_abs = os.path.abspath(local_path)
    rel = os.path.relpath(local_abs, static_root).replace("\\", "/")
    return url_for("static", filename=rel, _external=True, _scheme=request.scheme)

# -----------------------------
# Image generation (providers)
# -----------------------------
def generate_image_gemini(prompt: str, out_dir: str, diagram: bool = False) -> Tuple[Optional[str], Optional[str]]:
    """
    Generate an image via google-genai Client. Enforces robust parsing for inline bytes/base64.
    Returns (path, None) on success; otherwise (None, reason).
    """
    api_key = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY")
    if not api_key:
        return None, "GEMINI_API_KEY/GOOGLE_API_KEY missing"

    try:
        client = genai.Client(api_key=api_key)
        final_prompt = (
            f"Generate a clean, professional flowchart or diagram. Topic: {prompt}. "
            f"Style: minimal, vector, modern."
        ) if diagram else prompt

        resp = client.models.generate_content(model=IMAGE_MODEL, contents=[final_prompt])
        candidates = getattr(resp, "candidates", None) or []
        if not candidates:
            return None, "no candidates"

        for cand in candidates:
            content = getattr(cand, "content", None)
            if not content:
                continue
            parts = getattr(content, "parts", None) or []
            for part in parts:
                inline = getattr(part, "inline_data", None)
                if inline is None:
                    # log text parts if present
                    if getattr(part, "text", None):
                        log.debug("Gemini text part: %s", part.text)
                    continue

                mime = getattr(inline, "mime_type", "") or ""
                if not mime.startswith("image/"):
                    continue

                raw = getattr(inline, "data", None)
                if not raw:
                    continue

                # Accept bytes or base64 string
                if isinstance(raw, str):
                    try:
                        raw = base64.b64decode(raw)
                    except Exception as e:
                        log.warning("Gemini inline_data base64 decode failed: %s", e)
                        continue

                try:
                    with Image.open(BytesIO(raw)) as im:
                        img = im.convert("RGB")
                except Exception as e:
                    log.warning("PIL open failed: %s", e)
                    continue

                safe_prefix = re.sub(r"[^a-zA-Z0-9]+", "_", (final_prompt[:40] or "img")).strip("_")
                fname = f"{'diagram' if diagram else 'img'}_{safe_prefix}_{uuid.uuid4().hex[:8]}.jpg"
                _ensure_dir(out_dir)
                fpath = os.path.join(out_dir, fname)
                img.save(fpath, "JPEG", quality=92)
                return fpath, None

        return None, "no inline image data"

    except Exception as e:
        msg = str(e)
        if "Quota exceeded" in msg or "429" in msg or "rate limit" in msg.lower():
            return None, "gemini_quota_exceeded"
        return None, f"gemini: {msg}"

def generate_image_openai(prompt: str, out_dir: str) -> Tuple[Optional[str], Optional[str]]:
    api_key = os.getenv("OPENAI_API_KEY")
    if not api_key:
        return None, "OPENAI_API_KEY missing"
    size = "1792x1024"
    try:
        r = requests.post(
            "https://api.openai.com/v1/images/generations",
            headers={"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"},
            json={
                "model": "dall-e-3",
                "prompt": prompt,
                "size": size,
                "response_format": "b64_json"
            },
            timeout=90
        )
        if r.status_code >= 400:
            err = r.json().get("error", {})
            if err.get("code") == "billing_hard_limit_reached":
                return None, "openai_billing_cap"
            return None, f"{r.status_code} {r.reason}: {r.text}"
        b64 = r.json()["data"][0]["b64_json"]
        raw = io.BytesIO(base64.b64decode(b64))
        img = Image.open(raw).convert("RGB")
        fname = f"{uuid.uuid4().hex}.jpg"
        fpath = os.path.join(out_dir, fname)
        _ensure_dir(out_dir)
        img.save(fpath, "JPEG", quality=92)
        return fpath, None
    except Exception as e:
        return None, str(e)

# -----------------------------
# Speaker notes (OpenAI with retry)
# -----------------------------
ENRICH_PROMPT_TMPL = (
    "You are writing engaging, concise speaker notes for a slide.\n"
    "Heading:\n{heading}\n\n"
    "Bullets:\n{bullets_list}\n\n"
    "Write 4–7 punchy sentences (no markdown, no bullet symbols)."
)

def enrich_speaker_notes(heading: str, bullets: List[str]) -> Tuple[str, Optional[str]]:
    api_key = os.getenv("OPENAI_API_KEY")
    if not api_key:
        return (" • " + "\n • ".join(bullets or []), "OPENAI_API_KEY missing")
    prompt = ENRICH_PROMPT_TMPL.format(
        heading=heading or "",
        bullets_list="\n".join(f"- {b}" for b in (bullets or []))
    )
    import time
    tries, delay = 4, 1.5
    last_err = None
    for i in range(tries):
        try:
            r = requests.post(
                "https://api.openai.com/v1/chat/completions",
                headers={"Authorization": f"Bearer {api_key}"},
                json={
                    "model": "gpt-4o-mini",
                    "messages": [
                        {"role": "system", "content": "You write concise, vivid speaker notes."},
                        {"role": "user", "content": prompt}
                    ],
                    "temperature": 0.7,
                    "max_tokens": 260
                },
                timeout=60
            )
            # back off fast on hard auth errors
            if r.status_code in (400, 401, 403):
                last_err = f"{r.status_code} {r.reason}"
                break
            if r.status_code == 429:
                last_err = "429 Too Many Requests"
                if i < tries - 1:
                    time.sleep(delay); delay *= 2
                    continue
                break
            r.raise_for_status()
            content = r.json()["choices"][0]["message"]["content"].strip()
            return (content, None)
        except Exception as e:
            last_err = str(e)
            if i < tries - 1:
                time.sleep(delay); delay *= 2
                continue
            break
    return (" • " + "\n • ".join(bullets or []), last_err)

# -----------------------------
# Layout helpers (WHITE slides)
# -----------------------------
def _set_white_background(slide, prs: Presentation):
    """Cover slide with a white rectangle, sent to back."""
    rect = slide.shapes.add_shape(
        MSO_AUTO_SHAPE_TYPE.RECTANGLE, 0, 0, prs.slide_width, prs.slide_height
    )
    rect.fill.solid()
    rect.fill.fore_color.rgb = RGBColor(255, 255, 255)
    rect.line.fill.background()
    # send to back
    rect.element.getparent().insert(0, rect.element)

def _fit_picture_in_box(slide, img_path: str, box_left, box_top, box_w, box_h):
    """Insert image scaled to fit inside the given EMU box, centered."""
    with Image.open(img_path) as pil_img:
        iw, ih = pil_img.size  # pixels
    EMU_PER_INCH = 914400
    def px_to_emu(px): return int((px / 96.0) * EMU_PER_INCH)  # assume 96 DPI
    iw_emu, ih_emu = px_to_emu(iw), px_to_emu(ih)
    scale = min(box_w / iw_emu, box_h / ih_emu)
    w, h = int(iw_emu * scale), int(ih_emu * scale)
    left = int(box_left + (box_w - w) / 2)
    top = int(box_top + (box_h - h) / 2)
    slide.shapes.add_picture(img_path, left, top, width=w, height=h)

def _add_title_slide(prs: Presentation, title: str):
    slide = prs.slides.add_slide(prs.slide_layouts[6])  # Blank
    _set_white_background(slide, prs)

    left = Inches(0.9)
    top = Inches(2.0)
    width = prs.slide_width - Inches(1.8)
    height = Inches(2.0)

    box = slide.shapes.add_textbox(left, top, width, height)
    tf = box.text_frame
    tf.clear()
    p = tf.paragraphs[0]
    p.text = title
    p.font.size = Pt(48)
    p.font.bold = True
    p.font.color.rgb = RGBColor(0, 0, 0)
    p.alignment = PP_ALIGN.CENTER

def _add_clean_content_slide(
    prs: Presentation,
    heading: str,
    bullets: List[str],
    image_path: Optional[str],
    speaker_notes: Optional[str],
):
    """White slide, left text, right image (no reserved avatar space)."""
    slide = prs.slides.add_slide(prs.slide_layouts[6])  # Blank
    _set_white_background(slide, prs)

    # --- layout measurements ---
    L_MARGIN = Inches(0.9)
    R_MARGIN = Inches(0.9)
    TOP = Inches(0.7)
    GUTTER = Inches(0.4)

    total_w = prs.slide_width
    left_w = int(total_w * 0.52)              # ~52% for text
    right_w = total_w - left_w - int(GUTTER) - int(R_MARGIN)

    # Text area (left): title + bullets
    title_h = Inches(1.1)
    body_h = prs.slide_height - TOP - title_h - Inches(0.9)

    title_box = slide.shapes.add_textbox(L_MARGIN, TOP, left_w, title_h)
    tf_t = title_box.text_frame
    tf_t.clear()
    p = tf_t.paragraphs[0]
    p.text = heading or ""
    p.font.size = Pt(36)
    p.font.bold = True
    p.font.color.rgb = RGBColor(0, 0, 0)

    body_box = slide.shapes.add_textbox(L_MARGIN, TOP + title_h, left_w, body_h)
    tf_b = body_box.text_frame
    tf_b.clear()
    if bullets:
        for i, b in enumerate(bullets):
            par = tf_b.paragraphs[0] if i == 0 else tf_b.add_paragraph()
            par.text = (b or "").strip()
            par.level = 0
            par.font.size = Pt(22)
            par.font.color.rgb = RGBColor(20, 20, 20)

    # Image area (right) – use full available height
    right_left = L_MARGIN + left_w + GUTTER
    right_top = TOP
    right_h_for_image = prs.slide_height - TOP - Inches(0.9)  # no avatar reservation

    if image_path and os.path.exists(image_path):
        _fit_picture_in_box(slide, image_path, right_left, right_top, right_w, right_h_for_image)

    # Speaker notes (notes pane)
    if speaker_notes:
        notes_slide = slide.notes_slide
        ntf = getattr(notes_slide, "notes_text_frame", None)
        if ntf is None:
            ntf = notes_slide.notes_text_frame
        ntf.clear()
        ntf.text = speaker_notes

# -----------------------------
# Image pipeline (RIGHT column only)
# -----------------------------
def pick_right_image(bullets: List[str], image_suggestion: Optional[str], out_dir: str):
    """
    Generate a clean illustration for the RIGHT column only.
    No text/labels/captions inside the image.
    Returns (img_path, source_label, error_text_or_None)
    """
    key_bits = ", ".join([b for b in (bullets or []) if isinstance(b, str)][:4])
    base = image_suggestion or f"Professional illustration related to: {key_bits}"

    prompt = (
        f"{base}. Style: modern, clean, flat vector or minimal illustration. "
        f"No text, no typography, no captions, no labels, no watermark. "
        f"Focus on concept visuals only. Plain or subtle background."
    )

    # Gemini first
    img, err = generate_image_gemini(prompt, out_dir, diagram=False)
    if img:
        return img, "gemini", None

    # Optional OpenAI fallback if enabled
    if os.getenv("USE_OPENAI_IMG") == "1":
        img2, err2 = generate_image_openai(prompt, out_dir)
        if img2:
            return img2, "openai_img", None
        err = (err or "") + f" | openai_img: {err2}"

    return None, "none", err or "image generation failed"

#end point
from celery_app import celery

def create_ppt_pro(data: dict) -> dict:

    try:
        out_path, diagnostics = create_ppt_from_data(data)
    except Exception as e:
        return {"status": False, "message": str(e)}

    return {
        "status": True,
        "message": "PPT created successfully",
        "file": _public_url(out_path),
        "diagnostics": diagnostics
    }

@celery.task(name="ppt_pro.create_ppt_from_data")
def create_ppt_from_data(data: dict, base_dir = "./public/ppt/") -> Dict[str, str]:
    """
    data: dict with "title" and "slides" keys (same as /create-pro JSON body)
    base_dir: optional base directory to save outputs (defaults to exports/images)
    
    Returns:
        - full local path to generated PPTX
        - diagnostics list for each slide
    """
    session_id = data.get("id")
    if not session_id:
        raise ValueError("data must include 'id' key for session identification")
    exports_dir = base_dir or _exports_dir()
    images_dir = os.path.join(exports_dir, f"{session_id}_images" if session_id else "images")
    _ensure_dir(images_dir)
    data= data.get('json')
    if isinstance(data, dict):
        pass
    else:
        raise ValueError("data must include 'json' key with presentation content")
    title = (data.get("title") or "Untitled").strip()
    slides_in = data.get("slides") or []
    diagnostics = []

    prs = Presentation()
    prs.slide_width = Inches(13.333)
    prs.slide_height = Inches(7.5)

    # Title slide
    _add_title_slide(prs, title)

    # Content slides
    for idx, s in enumerate(slides_in, start=1):
        heading = (s.get("heading") or "").strip()[:180]
        raw_bullets = s.get("bullets") or []
        bullets = [str(b).strip()[:240] for b in raw_bullets if isinstance(b, (str, int, float))]
        img_suggest = s.get("image_suggestion")

        # Speaker notes: enrich unless provided
        if s.get("speaker_notes"):
            notes = s["speaker_notes"]
            notes_err = None
            notes_src = "input"
        else:
            notes, notes_err = enrich_speaker_notes(heading, bullets)
            notes_src = "openai" if not notes_err else "fallback"

        # Right-side image generation
        img_path, img_src, img_err = pick_right_image(bullets, img_suggest, images_dir)

        diagnostics.append({
            "slide": idx,
            "heading": heading,
            "right_image_path": img_path,
            "image_source": img_src,
            "image_error": img_err,
            "notes_source": notes_src,
            "notes_error": notes_err
        })

        _add_clean_content_slide(prs, heading, bullets, img_path, notes)

    out_name = f"{session_id}.pptx"
    pdf_name = f"{session_id}.pdf"
    pdf_path = os.path.join(exports_dir, pdf_name)
    pptx_path = os.path.join(exports_dir, out_name)
    prs.save(pptx_path)
    # Convert to PDF
    subprocess.run([
        os.getenv("LIBREOFFICE"), "--headless", "--convert-to", "pdf", pptx_path, "--outdir", exports_dir
    ], check=True)

    # Delete the intermediate PPTX
    if os.path.exists(pptx_path):
        os.remove(pptx_path)
    if os.path.exists(images_dir):
        try:
            shutil.rmtree(images_dir)
        except Exception as e:
            log.warning("Failed to delete images dir: %s", e)
    

    return {"pdf_path": pdf_path, "id": session_id}
