
    h                        U d Z ddlmZ ddlmZmZmZ ddlmZ ddl	m
Z
 ddlmZ ddlmZ ddlmZ dd	lmZmZ dd
lmZmZ ddlmZ ddlmZmZ ddlmZ ddlmZ ddl m!Z!m"Z"m#Z# ddl$m%Z% ddl&m'Z'm(Z(m)Z) ddl*m+Z+m,Z, ddl-m.Z.m/Z/ de0de1e2   fdZ3i e'jh                  e#e'jj                  e"e'jl                  e!e'jn                  ee'jp                  e,e'jr                  ee'jt                  e+e'jv                  ee'jx                  ee'jz                  ee'j|                  e/e'j~                  e.e'j                  ee'j                  ee'j                  ee'j                  ee'j                  ee'j                  ee'j                  e%e'j                  eiZHe2e'eeIe(   eIe
   eIe)   f   f   eJd<   ddde'dee   dedee
e)f   fdZKddddee'   dee   d ee2   dede1ee
e)f      f
d!ZLy)"z Loading datasets and evaluators.    )Sequence)AnyOptionalUnion)BaseLanguageModel)Chain)TrajectoryEvalChain)PairwiseStringEvalChain)LabeledPairwiseStringEvalChain)CriteriaEvalChainLabeledCriteriaEvalChain)EmbeddingDistanceEvalChain"PairwiseEmbeddingDistanceEvalChain)ExactMatchStringEvaluator)JsonEqualityEvaluatorJsonValidityEvaluator)JsonEditDistanceEvaluator)JsonSchemaEvaluator)ContextQAEvalChainCotQAEvalChainQAEvalChain)RegexMatchStringEvaluator)EvaluatorTypeLLMEvalChainStringEvaluator)LabeledScoreStringEvalChainScoreStringEvalChain)PairwiseStringDistanceEvalChainStringDistanceEvalChainurireturnc                     	 ddl m}  |d|        }t        |d         S # t        $ r}d}t        |      |d}~ww xY w)a  Load a dataset from the `LangChainDatasets on HuggingFace <https://huggingface.co/LangChainDatasets>`_.

    Args:
        uri: The uri of the dataset to load.

    Returns:
        A list of dictionaries, each representing a row in the dataset.

    **Prerequisites**

    .. code-block:: shell

        pip install datasets

    Examples
    --------
    .. code-block:: python

        from langchain.evaluation import load_dataset
        ds = load_dataset("llm-math")
    r   )load_datasetzXload_dataset requires the `datasets` package. Please install with `pip install datasets`NzLangChainDatasets/train)datasetsr#   ImportErrorlist)r    r#   emsgdatasets        Z/var/www/html/eduruby.in/venv/lib/python3.12/site-packages/langchain/evaluation/loading.pyr#   r#   (   sY    ,&) /u56G !!  &: 	 #A%&s   ! 	=8=_EVALUATOR_MAPN)llm	evaluatorr-   kwargsc                   | t         vr.d|  dt        t         j                                }t        |      t         |    }t	        |t
              r+	 	 ddlm} |xs  |ddd	      } |j                  dd|i|S  |di |S # t        $ r* 	 ddl	m} n# t        $ r}d}t        |      |d}~ww xY wY ]w xY w# t        $ r}d
| d}t        |      |d}~ww xY w)a<  Load the requested evaluation chain specified by a string.

    Parameters
    ----------
    evaluator : EvaluatorType
        The type of evaluator to load.
    llm : BaseLanguageModel, optional
        The language model to use for evaluation, by default None
    **kwargs : Any
        Additional keyword arguments to pass to the evaluator.

    Returns
    -------
    Chain
        The loaded evaluation chain.

    Examples
    --------
    >>> from langchain.evaluation import load_evaluator, EvaluatorType
    >>> evaluator = load_evaluator(EvaluatorType.QA)
    zUnknown evaluator type: z
Valid types are: r   )
ChatOpenAIzCould not import langchain_openai or fallback onto langchain_community. Please install langchain_openai or specify a language model explicitly. It's recommended to install langchain_openai AND specify a language model explicitly.Nzgpt-4*   )modelseedtemperaturezEvaluation with the z requires a language model to function. Failed to create the default 'gpt-4' model. Please manually provide an evaluation LLM or check your openai credentials.r-    )r,   r'   keys
ValueError
issubclassr   langchain_openair1   r&   &langchain_community.chat_models.openai	Exceptionfrom_llm)r.   r-   r/   r)   evaluator_clsr1   r(   s          r+   load_evaluatorr?   f   s!   6 &&yk!$~':':'<"=!>@ 	 o"9-M-.	)27  J'JC &}%%8#888"6""5  22 # 2?  &c*122   	)&}o 65 5  S/q(	)sZ   B B9 	B6BB6	B0B++B00B63B9 5B66B9 9	CCC)r-   config
evaluatorsr@   c          	          g }| D ]9  }|r|j                  |i       ni }|j                  t        |fd|ii ||       ; |S )ae  Load evaluators specified by a list of evaluator types.

    Parameters
    ----------
    evaluators : Sequence[EvaluatorType]
        The list of evaluator types to load.
    llm : BaseLanguageModel, optional
        The language model to use for evaluation, if none is provided, a default
        ChatOpenAI gpt-4 model will be used.
    config : dict, optional
        A dictionary mapping evaluator types to additional keyword arguments,
        by default None
    **kwargs : Any
        Additional keyword arguments to pass to all evaluators.

    Returns
    -------
    List[Chain]
        The loaded evaluators.

    Examples
    --------
    >>> from langchain.evaluation import load_evaluators, EvaluatorType
    >>> evaluators = [EvaluatorType.QA, EvaluatorType.CRITERIA]
    >>> loaded_evaluators = load_evaluators(evaluators, criteria="helpfulness")
    r-   )getappendr?   )rA   r-   r@   r/   loadedr.   _kwargss          r+   load_evaluatorsrG      s\    B F S	/5&**Y+2nYQCQ;Pf;P;PQRS M    )M__doc__collections.abcr   typingr   r   r   langchain_core.language_modelsr   langchain.chains.baser   1langchain.evaluation.agents.trajectory_eval_chainr	   langchain.evaluation.comparisonr
   *langchain.evaluation.comparison.eval_chainr   (langchain.evaluation.criteria.eval_chainr   r   ,langchain.evaluation.embedding_distance.baser   r   %langchain.evaluation.exact_match.baser   !langchain.evaluation.parsing.baser   r   *langchain.evaluation.parsing.json_distancer   (langchain.evaluation.parsing.json_schemar   langchain.evaluation.qar   r   r   %langchain.evaluation.regex_match.baser   langchain.evaluation.schemar   r   r   'langchain.evaluation.scoring.eval_chainr   r   )langchain.evaluation.string_distance.baser   r   strr'   dictr#   QACOT_QA
CONTEXT_QAPAIRWISE_STRINGSCORE_STRINGLABELED_PAIRWISE_STRINGLABELED_SCORE_STRINGAGENT_TRAJECTORYCRITERIALABELED_CRITERIASTRING_DISTANCEPAIRWISE_STRING_DISTANCEEMBEDDING_DISTANCEPAIRWISE_EMBEDDING_DISTANCEJSON_VALIDITYJSON_EQUALITYJSON_EDIT_DISTANCEJSON_SCHEMA_VALIDATIONREGEX_MATCHEXACT_MATCHr,   type__annotations__r?   rG   r6   rH   r+   <module>rt      s   & $ ' ' < ' Q C U L Q H S S K T T "c  "d4j  "Lk. 0 !!#:	
  4 ))+I &&(C ""$7 - ""$< !!#: **,K $$&@ --/Q !6  !6!" $$&?#$ ((*=88) 	$|
d5k4+@
@AC < (,@#@# 
#	$@# 	@#
 5/!"@#L (,!	%'% 
#	$% TN	%
 % 
%&
'(%rH   