import json
from pathlib import Path
from celery import chain, chord
from celery_app import celery

# -------------------------------------------------------------------
# 🧩 GLOBAL CELERY TASK
# -------------------------------------------------------------------

@celery.task(name="mcq_main.merge_mcq")
def merge_mcq(results):
    """Merge results from previous tasks (PPT + Audio) before video sync."""
    ppt_result, audio_result = results
    pdf_path = ppt_result.get("pdf_path")
    audio_mapping_file = audio_result.get("audio_mapping_file")
    session_id = ppt_result.get("id") or audio_result.get("id")

    if not session_id:
        raise ValueError("Both ppt_result and audio_result must include 'id' key for session identification")
    if not pdf_path or not audio_mapping_file:
        raise ValueError("ppt_result must include 'pdf_path' and audio_result must include 'audio_mapping_file'")

    print(f"📁 PDF Path: {pdf_path}")
    print(f"🎵 Audio Mapping File: {audio_mapping_file}")
    return {"pdf_path": pdf_path, "audio_mapping_file": audio_mapping_file, "id": session_id}


# -------------------------------------------------------------------
# 🚀 MAIN MCQ PIPELINE
# -------------------------------------------------------------------

def run_mcq_pipeline(pdf_path, avatar_path, session_id, voice_id=None):
    """
    Run the full MCQ pipeline:
      1️⃣ Extract MCQs and create slides.json
      2️⃣ Create PPT from slides.json
      3️⃣ Generate audio (uses internal VOICE_ID from audio_generation.py)
      4️⃣ Merge results
      5️⃣ Sync PPT + Audio + Avatar into video
    """
    from Video_generation.MCQ_creator import generate_mcq_content
    from Video_generation.mcq_ppt_generator import create_mcq_ppt
    from Video_generation.audio_conversion import generate_audio_from_presentation
    from Video_generation.Video_sync import pdf_audio_map_to_video

    print("DEBUG TASK SIGNATURES:")
    print(" - MCQ:", generate_mcq_content)
    print(" - PPT:", create_mcq_ppt)
    print(" - AUDIO:", generate_audio_from_presentation)
    print(" - VIDEO:", pdf_audio_map_to_video)

    SERIAL_QUEUE = "video_serial"

    print(f"\n🚀 Launching MCQ Video Pipeline for session: {session_id}")
    print("------------------------------------------------------------")

    # Step 1 — Extract MCQs (returns [mcq_json_path, slides_json_path])
    gen_mcq_task = generate_mcq_content.s(pdf_path, session_id).set(queue=SERIAL_QUEUE)

    # Step 2 — Create PPT (auto-handles tuple/list input)
    ppt_task = create_mcq_ppt.s().set(queue=SERIAL_QUEUE)

    # Step 3 — Generate audio (voice_id handled internally)
    audio_task = generate_audio_from_presentation.s(voice_id=voice_id).set(queue=SERIAL_QUEUE)

    # Step 4 — Merge
    merge_task = merge_mcq.s().set(queue=SERIAL_QUEUE)

    # Step 5 — Create video
    video_task = pdf_audio_map_to_video.s(avatar_path=avatar_path).set(queue=SERIAL_QUEUE)

    # Chain + Chord (PPT + Audio in parallel)
    work_chain = chain(
        gen_mcq_task,
        chord([ppt_task, audio_task], merge_task),
        video_task
    ).apply_async()

    print(f"✅ [Pipeline Started] Celery job queued for: {session_id}")
    print("Check Celery worker logs for progress.\n------------------------------------------------------------")

    return work_chain
