############################################################################
# import the necessary libraries
############################################################################


from Ai_Agents.models.language_model import llm # type: ignore
from langchain.prompts import ChatPromptTemplate # type: ignore
from langchain_core.output_parsers import JsonOutputParser # type: ignore
from pydantic import BaseModel, Field # type: ignore
from langchain_core.runnables import RunnableParallel # type: ignore
from Ai_Agents.services.templates.character_template import CHARACTER_TEMPLATE


############################################################################
# Define the Pydantic model for the assistant response
############################################################################


class AssistantResponse(BaseModel):
    response: str = Field(description="The natural language response to the user query.")
    action_id: str = Field(description="The related action ID from the input action ID array.")
    destination: str | None = Field(description="The destination if specified by the user; otherwise, null.")

parser = JsonOutputParser(pydantic_object=AssistantResponse)

prompt_template = """
You are an intelligent assistant. Based on the user input, the list of possible actions, 
and optionally the distances to various destinations, determine the most relevant action ID and generate an appropriate response.

If distances are provided and the user specifies a movement or want to see a destination but doesn't mention the mode (car or walk), decide based on the distance:
- Use "Move by walk" if the distance is <= 2 km.
- Use "Move by car" if the distance is > 2 km.

If distances are not provided, make a logical decision based only on the user input.

Input: "{user_input}"
Actions: {actions}
Distances: {distances}

Output format:
{format_instructions}

Example with distances:
Input: "Take me to Park Area"
Actions: ["Move by car", "Move by walk", "Stop", "Start Tour", "End Tour"]
Distances: {{"Park Area": 24, "Premium Villa": 45, "Playground": 1.5}}
Response: "Okay, I will take you to the Park Area."
Action ID: "Move by car"
Destination: "Park Area"

Example without distances:
Input: "End the tour for today."
Actions: ["Move by car", "Move by walk", "Stop", "Start Tour", "End Tour"]
Distances: null
Response: "Ending the tour for today."
Action ID: "End Tour"
Destination: null
"""

prompt = ChatPromptTemplate.from_template(
    template=prompt_template,
    partial_variables={"format_instructions": parser.get_format_instructions()},
)

chain = prompt | llm | parser


############################################################################
# function to get the response from the LLM
############################################################################


def get_action_response(user_input: str, actions: list, distances: dict | None = None):
    """Get the response from the LLM based on user input, action IDs, and optional distances."""
    distances = distances if distances is not None else "null"
    result = chain.invoke({
        "user_input": user_input,
        "actions": actions,
        "distances": distances,
    })

    return result


############################################################################
# Example usage
############################################################################


# user_input = "Take me to Premium Villa"
# actions = ["Move by car", "Move by walk"]
# distances = {"Park Area": 24, "Premium Villa": 45, "Playground": 1.5}

# response = get_response(user_input, actions, distances)
# print(response)


# with the character context

# # Combine character context with the original prompt
# full_prompt = RunnableParallel({
#     "character_context": ChatPromptTemplate.from_template(CHARACTER_TEMPLATE),
#     "task_instructions": ChatPromptTemplate.from_template(prompt_template)
# })

# # Final assembly template
# final_template = """
# {character_context}

# {task_instructions}

# Additional Guidelines:
# - Use your character's unique voice and style in the response.
# - Ensure the response aligns with your backstory.
# - Keep the response natural and conversational.
# """

# # Update the chain to include character context
# chain = (
#     full_prompt |
#     ChatPromptTemplate.from_template(final_template) |
#     llm |
#     parser
# )

# # Function remains unchanged, but now supports character context
# def get_action_response(
#     user_input: str,
#     actions: list,
#     distances: dict | None = None,
#     name: str = "Assistant",  # Default name if not provided
#     gender: str = "neutral",  # Default gender if not provided
#     backstory: str = "A helpful virtual assistant."  # Default backstory if not provided
# ):
#     """Get the response from the LLM based on user input, action IDs, and optional distances."""
#     distances = distances if distances is not None else "null"
#     result = chain.invoke({
#         "name": name,
#         "gender": gender,
#         "backstory": backstory,
#         "user_input": user_input,
#         "actions": actions,
#         "distances": distances,
#     })
#     return result