"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""

from __future__ import annotations
from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict
from .classifiertrainingparametersin import (
    ClassifierTrainingParametersIn,
    ClassifierTrainingParametersInTypedDict,
)
from .completiontrainingparametersin import (
    CompletionTrainingParametersIn,
    CompletionTrainingParametersInTypedDict,
)
from .finetuneablemodeltype import FineTuneableModelType
from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict
from .trainingfile import TrainingFile, TrainingFileTypedDict
from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
from pydantic import model_serializer
from typing import List, Optional, Union
from typing_extensions import NotRequired, TypeAliasType, TypedDict


JobInIntegrationsTypedDict = WandbIntegrationTypedDict


JobInIntegrations = WandbIntegration


HyperparametersTypedDict = TypeAliasType(
    "HyperparametersTypedDict",
    Union[
        ClassifierTrainingParametersInTypedDict, CompletionTrainingParametersInTypedDict
    ],
)


Hyperparameters = TypeAliasType(
    "Hyperparameters",
    Union[ClassifierTrainingParametersIn, CompletionTrainingParametersIn],
)


JobInRepositoriesTypedDict = GithubRepositoryInTypedDict


JobInRepositories = GithubRepositoryIn


class JobInTypedDict(TypedDict):
    model: str
    r"""The name of the model to fine-tune."""
    hyperparameters: HyperparametersTypedDict
    training_files: NotRequired[List[TrainingFileTypedDict]]
    validation_files: NotRequired[Nullable[List[str]]]
    r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files."""
    suffix: NotRequired[Nullable[str]]
    r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`"""
    integrations: NotRequired[Nullable[List[JobInIntegrationsTypedDict]]]
    r"""A list of integrations to enable for your fine-tuning job."""
    auto_start: NotRequired[bool]
    r"""This field will be required in a future release."""
    invalid_sample_skip_percentage: NotRequired[float]
    job_type: NotRequired[Nullable[FineTuneableModelType]]
    repositories: NotRequired[Nullable[List[JobInRepositoriesTypedDict]]]
    classifier_targets: NotRequired[Nullable[List[ClassifierTargetInTypedDict]]]


class JobIn(BaseModel):
    model: str
    r"""The name of the model to fine-tune."""

    hyperparameters: Hyperparameters

    training_files: Optional[List[TrainingFile]] = None

    validation_files: OptionalNullable[List[str]] = UNSET
    r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files."""

    suffix: OptionalNullable[str] = UNSET
    r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`"""

    integrations: OptionalNullable[List[JobInIntegrations]] = UNSET
    r"""A list of integrations to enable for your fine-tuning job."""

    auto_start: Optional[bool] = None
    r"""This field will be required in a future release."""

    invalid_sample_skip_percentage: Optional[float] = 0

    job_type: OptionalNullable[FineTuneableModelType] = UNSET

    repositories: OptionalNullable[List[JobInRepositories]] = UNSET

    classifier_targets: OptionalNullable[List[ClassifierTargetIn]] = UNSET

    @model_serializer(mode="wrap")
    def serialize_model(self, handler):
        optional_fields = [
            "training_files",
            "validation_files",
            "suffix",
            "integrations",
            "auto_start",
            "invalid_sample_skip_percentage",
            "job_type",
            "repositories",
            "classifier_targets",
        ]
        nullable_fields = [
            "validation_files",
            "suffix",
            "integrations",
            "job_type",
            "repositories",
            "classifier_targets",
        ]
        null_default_fields = []

        serialized = handler(self)

        m = {}

        for n, f in type(self).model_fields.items():
            k = f.alias or n
            val = serialized.get(k)
            serialized.pop(k, None)

            optional_nullable = k in optional_fields and k in nullable_fields
            is_set = (
                self.__pydantic_fields_set__.intersection({n})
                or k in null_default_fields
            )  # pylint: disable=no-member

            if val is not None and val != UNSET_SENTINEL:
                m[k] = val
            elif val != UNSET_SENTINEL and (
                not k in optional_fields or (optional_nullable and is_set)
            ):
                m[k] = val

        return m
