
    h~1                         d Z ddlmZmZ ddlmZ ddlmZ ddlm	Z	 ddl
mZ ddlmZ ddlmZ dd	lmZ dd
lmZmZ ddlmZ  eddd       G d de             Zy)zMChain for applying constitutional principles to the outputs of another chain.    )AnyOptional)
deprecated)CallbackManagerForChainRun)BaseLanguageModel)BasePromptTemplate)Chain)ConstitutionalPrinciple)
PRINCIPLES)CRITIQUE_PROMPTREVISION_PROMPTLLMChainz0.2.13zThis class is deprecated and will be removed in langchain 1.0. See API reference for replacement: https://api.python.langchain.com/en/latest/chains/langchain.chains.constitutional_ai.base.ConstitutionalChain.htmlz1.0)sincemessageremovalc                   2   e Zd ZU dZeed<   ee   ed<   eed<   eed<   dZe	ed<   e
	 dd	eee      d
ee   fd       Ze
eefdededededed
d fd       Zed
ee   fd       Zed
ee   fd       Z	 ddeeef   dee   d
eeef   fdZeded
efd       Zy)ConstitutionalChainaG  Chain for applying constitutional principles.

    Note: this class is deprecated. See below for a replacement implementation
        using LangGraph. The benefits of this implementation are:

        - Uses LLM tool calling features instead of parsing string responses;
        - Support for both token-by-token and step-by-step streaming;
        - Support for checkpointing and memory of chat history;
        - Easier to modify or extend (e.g., with additional tools, structured responses, etc.)

        Install LangGraph with:

        .. code-block:: bash

            pip install -U langgraph

        .. code-block:: python

            from typing import List, Optional, Tuple

            from langchain.chains.constitutional_ai.prompts import (
                CRITIQUE_PROMPT,
                REVISION_PROMPT,
            )
            from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
            from langchain_core.output_parsers import StrOutputParser
            from langchain_core.prompts import ChatPromptTemplate
            from langchain_openai import ChatOpenAI
            from langgraph.graph import END, START, StateGraph
            from typing_extensions import Annotated, TypedDict

            llm = ChatOpenAI(model="gpt-4o-mini")

            class Critique(TypedDict):
                """Generate a critique, if needed."""
                critique_needed: Annotated[bool, ..., "Whether or not a critique is needed."]
                critique: Annotated[str, ..., "If needed, the critique."]

            critique_prompt = ChatPromptTemplate.from_template(
                "Critique this response according to the critique request. "
                "If no critique is needed, specify that.\n\n"
                "Query: {query}\n\n"
                "Response: {response}\n\n"
                "Critique request: {critique_request}"
            )

            revision_prompt = ChatPromptTemplate.from_template(
                "Revise this response according to the critique and reivsion request.\n\n"
                "Query: {query}\n\n"
                "Response: {response}\n\n"
                "Critique request: {critique_request}\n\n"
                "Critique: {critique}\n\n"
                "If the critique does not identify anything worth changing, ignore the "
                "revision request and return 'No revisions needed'. If the critique "
                "does identify something worth changing, revise the response based on "
                "the revision request.\n\n"
                "Revision Request: {revision_request}"
            )

            chain = llm | StrOutputParser()
            critique_chain = critique_prompt | llm.with_structured_output(Critique)
            revision_chain = revision_prompt | llm | StrOutputParser()


            class State(TypedDict):
                query: str
                constitutional_principles: List[ConstitutionalPrinciple]
                initial_response: str
                critiques_and_revisions: List[Tuple[str, str]]
                response: str


            async def generate_response(state: State):
                """Generate initial response."""
                response = await chain.ainvoke(state["query"])
                return {"response": response, "initial_response": response}

            async def critique_and_revise(state: State):
                """Critique and revise response according to principles."""
                critiques_and_revisions = []
                response = state["initial_response"]
                for principle in state["constitutional_principles"]:
                    critique = await critique_chain.ainvoke(
                        {
                            "query": state["query"],
                            "response": response,
                            "critique_request": principle.critique_request,
                        }
                    )
                    if critique["critique_needed"]:
                        revision = await revision_chain.ainvoke(
                            {
                                "query": state["query"],
                                "response": response,
                                "critique_request": principle.critique_request,
                                "critique": critique["critique"],
                                "revision_request": principle.revision_request,
                            }
                        )
                        response = revision
                        critiques_and_revisions.append((critique["critique"], revision))
                    else:
                        critiques_and_revisions.append((critique["critique"], ""))
                return {
                    "critiques_and_revisions": critiques_and_revisions,
                    "response": response,
                }

            graph = StateGraph(State)
            graph.add_node("generate_response", generate_response)
            graph.add_node("critique_and_revise", critique_and_revise)

            graph.add_edge(START, "generate_response")
            graph.add_edge("generate_response", "critique_and_revise")
            graph.add_edge("critique_and_revise", END)
            app = graph.compile()

        .. code-block:: python

            constitutional_principles=[
                ConstitutionalPrinciple(
                    critique_request="Tell if this answer is good.",
                    revision_request="Give a better answer.",
                )
            ]

            query = "What is the meaning of life? Answer in 10 words or fewer."

            async for step in app.astream(
                {"query": query, "constitutional_principles": constitutional_principles},
                stream_mode="values",
            ):
                subset = ["initial_response", "critiques_and_revisions", "response"]
                print({k: v for k, v in step.items() if k in subset})

    Example:
        .. code-block:: python

            from langchain_community.llms import OpenAI
            from langchain.chains import LLMChain, ConstitutionalChain
            from langchain.chains.constitutional_ai.models                 import ConstitutionalPrinciple

            llm = OpenAI()

            qa_prompt = PromptTemplate(
                template="Q: {question} A:",
                input_variables=["question"],
            )
            qa_chain = LLMChain(llm=llm, prompt=qa_prompt)

            constitutional_chain = ConstitutionalChain.from_llm(
                llm=llm,
                chain=qa_chain,
                constitutional_principles=[
                    ConstitutionalPrinciple(
                        critique_request="Tell if this answer is good.",
                        revision_request="Give a better answer.",
                    )
                ],
            )

            constitutional_chain.run(question="What is the meaning of life?")
    chainconstitutional_principlescritique_chainrevision_chainFreturn_intermediate_stepsNnamesreturnc                 x    |t        t        j                               S |D cg c]  }t        |    c}S c c}w N)listr   values)clsr   names      e/var/www/html/eduruby.in/venv/lib/python3.12/site-packages/langchain/chains/constitutional_ai/base.pyget_principlesz"ConstitutionalChain.get_principles   s6    
 =
))+,,-23T
4 333s   7llmcritique_promptrevision_promptkwargsc                 N    t        ||      }t        ||      } | d|||d|S )zCreate a chain from an LLM.)r$   prompt)r   r   r    r   )r    r$   r   r%   r&   r'   r   r   s           r"   from_llmzConstitutionalChain.from_llm   sA     "c/B!c/B 
))
 	
 	
    c                 .    | j                   j                  S )zInput keys.)r   
input_keysselfs    r"   r.   zConstitutionalChain.input_keys   s     zz$$$r,   c                 (    | j                   rg dS dgS )zOutput keys.)outputcritiques_and_revisionsinitial_outputr2   )r   r/   s    r"   output_keyszConstitutionalChain.output_keys   s     ))JJzr,   inputsrun_managerc                 B   |xs t        j                         } | j                  j                  di |d|j	                  d      i}|} | j                  j
                  j                  di |}|j                  d|z   dz   | j                  d       g }| j                  D ]_  }| j                  j                  |||j                  |j	                  d            }	| j                  |		      j                         }
d
|
j                         v r|j                  |
df       | j                   j                  |||j                  |
|j"                  |j	                  d            j                         }|}|j                  |
|f       |j                  d|j$                   ddz   | j                  d       |j                  d|
z   dz   | j                  d       |j                  d|z   dz   | j                  d       b d|i}| j&                  r
||d<   ||d<   |S )N	callbacksoriginalzInitial response: 

yellow)textverbosecolorcritique)input_promptoutput_from_modelcritique_requestr9   output_stringzno critique needed revision)rA   rB   rC   r@   revision_requestr9   z	Applying z...greenz
Critique: bluezUpdated response: r2   r4   r3   r*   )r   get_noop_managerr   run	get_childr)   formaton_textr>   r   r   rC   _parse_critiquestriplowerappendr   rH   r!   r   )r0   r6   r7   _run_managerresponseinitial_responserA   r3   constitutional_principleraw_critiquer@   rG   final_outputs                r"   _callzConstitutionalChain._call   sg   
 #S&@&Q&Q&S!4::>> 

",,Z8
 $/tzz((//9&9%069LL 	 	

 #%(,(F(F 1	$  ..22)"*!9!J!J&00<	 3 L ++* , eg  $x~~'77'.."~> **..)"*!9!J!J!!9!J!J&00< /  eg   H#**Hh+?@   !9!>!> ?sCfL !    !H,v5 !    )H4v= ! [1	f )1(';))-=L)*6ML23r,   rE   c                 j    d| vr| S | j                  d      d   } d| v r| j                  d      d   } | S )NzRevision request:r   r;   )splitrD   s    r"   rP   z#ConstitutionalChain._parse_critique:  sI    m3  %++,?@C]")//7:Mr,   r   )__name__
__module____qualname____doc__r   __annotations__r   r
   r   boolclassmethodr   strr#   r   r   r   r   r   r+   propertyr.   r5   dictr   rZ   staticmethodrP   r*   r,   r"   r   r      sc   cJ O#$;<<&+t+ &*4S	"4 
%	&4 4 
 />.=

 
 ,	

 ,
 
 

 
$ %DI % % T#Y   =AJS#XJ 89J 
c3h	JX s s  r,   r   N)r`   typingr   r   langchain_core._apir   langchain_core.callbacksr   langchain_core.language_modelsr   langchain_core.promptsr   langchain.chains.baser	   )langchain.chains.constitutional_ai.modelsr
   -langchain.chains.constitutional_ai.principlesr   *langchain.chains.constitutional_ai.promptsr   r   langchain.chains.llmr   r   r*   r,   r"   <module>rr      sW    S   * ? < 5 ' M D W ) 
	} g% ggr,   