
    h&                         d Z ddlmZmZ ddlmZ ddlmZ  ej                  e	      Z
 G d de      Z G d d	e      Z G d
 de      ZdgZy)zDBRX model configuration    )AnyOptional   )PretrainedConfig)loggingc                   L     e Zd ZdZdZ	 	 	 	 d	dedee   dededef
 fdZ	 xZ
S )
DbrxAttentionConfiga_  Configuration class for Dbrx Attention.

    [`DbrxAttention`] class. It is used to instantiate attention layers
    according to the specified arguments, defining the layers architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        attn_pdrop (`float`, *optional*, defaults to 0.0):
            The dropout probability for the attention layers.
        clip_qkv (`float`, *optional*):
            If set, clip the queries, keys, and values in the attention layer to this value.
        kv_n_heads (`int`, *optional*, defaults to 1): For grouped_query_attention only, allow user to specify number of kv heads.
        rope_theta (`float`, *optional*, defaults to 10000.0): The base frequency for rope.
    attn_config
attn_pdropclip_qkv
kv_n_heads
rope_thetakwargsc                     t        |   di | || _        || _        || _        || _        dD ]  }||v s|j                  |        t        |      dk7  rt        d|      y )N
model_typeattn_implementationtransformers_version_commit_hashtorch_dtypedtyper   Found unknown kwargs= )	super__init__r   r   r   r   poplen
ValueError)selfr   r   r   r   r   k	__class__s          i/var/www/html/eduruby.in/venv/lib/python3.12/site-packages/transformers/models/dbrx/configuration_dbrx.pyr   zDbrxAttentionConfig.__init__.   sx     	"6"$ $$v 	AF{

1	 v;!5fY788     )        N   g     @)__name__
__module____qualname____doc__base_config_keyfloatr   intr   r   __classcell__r!   s   @r"   r	   r	      s[    " $O  $(#99 5/9 	9
 9 9 9r#   r	   c                   j     e Zd ZdZdZ	 	 	 	 	 	 	 ddee   dedededee   ded	ee   d
e	f fdZ
 xZS )DbrxFFNConfiga|  Configuration class for Dbrx FFN.

    [`DbrxFFN`] class. It is used to instantiate feedforward layers according to
    the specified arguments, defining the layers architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        ffn_act_fn (`dict`, *optional*, defaults to `None`): A dict specifying activation function for the FFN.
            The dict should have a key 'name' with the value being the name of the activation function along with
            any additional keyword arguments. If `None`, then set to `{"name": "silu"}`.
        ffn_hidden_size (`int`, *optional*, defaults to 3584): The hidden size of the feedforward network.
        moe_num_experts (`int`, *optional*, defaults to 4): The number of experts in the mixture of experts layer.
        moe_top_k (`int`, *optional*, defaults to 1): The number of experts to use in the mixture of experts layer.
        moe_jitter_eps (`float`, *optional*, defaults to `None`): If not `None`, the jitter epsilon for the mixture of experts layer.
        moe_loss_weight (`float`, *optional*, defaults to 0.01): The loss weight for the mixture of experts layer.
        moe_normalize_expert_weights (`float`, *optional*, defaults to 1.0): The normalization factor for the expert weights.
    
ffn_config
ffn_act_fnffn_hidden_sizemoe_num_experts	moe_top_kmoe_jitter_epsmoe_loss_weightmoe_normalize_expert_weightsr   c                    t         
|           |ddi}|| _        || _        || _        || _        || _        || _        || _        dD ]  }	|	|v s|j                  |	        t        |      dk7  rt        d|      y )Nnamesilur   r   r   )r   r   r2   r3   r4   r5   r6   r7   r8   r   r   r   )r   r2   r3   r4   r5   r6   r7   r8   r   r    r!   s             r"   r   zDbrxFFNConfig.__init__Z   s     	 &)J$..",.,H)v 	AF{

1	 v;!5fY788 r#   )Ni      r%   Ng{Gz?g      ?)r&   r'   r(   r)   r*   r   dictr,   r+   r   r   r-   r.   s   @r"   r0   r0   C   s    ( #O &*# *.!%8;9TN9 9 	9
 9 !9 9 '/uo9 9 9r#   r0   c                        e Zd ZdZdZeedZdddddZ	 	 	 	 	 	 	 	 	 	 	 	 dde	de	de	de	d	e	d
e
de
dee   dee   dede
dedef fdZ xZS )
DbrxConfiga.  

    This is the configuration class to store the configuration of a [`DbrxModel`]. It is used to instantiate a Dbrx model according to the
    specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a different configuration to that of the [databricks/dbrx-instruct](https://huggingface.co/databricks/dbrx-instruct) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.


    Args:
        d_model (`int`, *optional*, defaults to 2048):
            Dimensionality of the embeddings and hidden states.
        n_heads (`int`, *optional*, defaults to 16):
            Number of attention heads for each attention layer in the Transformer encoder.
        n_layers (`int`, *optional*, defaults to 24):
            Number of hidden layers in the Transformer encoder.
        max_seq_len (`int`, *optional*, defaults to 2048):
            The maximum sequence length of the model.
        vocab_size (`int`, *optional*, defaults to 32000):
            Vocabulary size of the Dbrx model. Defines the maximum number of different tokens that can be represented by
            the `inputs_ids` passed when calling [`DbrxModel`].
        resid_pdrop (`float`, *optional*, defaults to 0.0):
            The dropout probability applied to the attention output before combining with residual.
        emb_pdrop (`float`, *optional*, defaults to 0.0):
            The dropout probability for the embedding layer.
        attn_config (`dict`, *optional*):
            A dictionary used to configure the model's attention module.
        ffn_config (`dict`, *optional*):
            A dictionary used to configure the model's FFN module.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models).
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        output_router_logits (`bool`, *optional*, defaults to `False`):
            Whether or not the router logits should be returned by the model. Enabling this will also
            allow the model to output the auxiliary loss. See [here]() for more details.


    Example:
    ```python
    >>> from transformers import DbrxConfig, DbrxModel

    >>> # Initializing a Dbrx configuration
    >>> configuration = DbrxConfig(n_layers=2, d_model=256, n_heads=8, vocab_size=128)

    >>> # Initializing a model (with random weights) from the configuration
    >>> model = DbrxModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```
    dbrx)r
   r1   n_headsd_modeln_layersmax_seq_len)num_attention_headshidden_sizenum_hidden_layersmax_position_embeddings
vocab_sizeresid_pdrop	emb_pdropr
   r1   	use_cacheinitializer_rangeoutput_router_logitsr   c                    |t               | _        n(t        |t              rt        di || _        n|| _        |	t	               | _        n(t        |	t              rt	        di |	| _        n|	| _        || _        || _        || _        || _	        || _
        || _        || _        |
| _        || _        || _        | j                  j                   | _        |j%                  dd      }|rt'        d      t)        | T  dd|i| y )Ntie_word_embeddingsFz5tie_word_embeddings is not supported for DBRX models.r   )r	   r
   
isinstancer=   r0   r1   rB   rA   rC   rD   rI   rJ   rK   rL   rM   rN   r   num_key_value_headsr   r   r   r   )r   rB   rA   rC   rD   rI   rJ   rK   r
   r1   rL   rM   rN   r   rP   r!   s                  r"   r   zDbrxConfig.__init__   s      24DT*2A[AD*D+oDO
D)+9j9DO(DO &$&""!2$8!#'#3#3#>#> $jj)>FTUUK-@KFKr#   )         rS   i }  r$   r$   NNTg{Gz?F)r&   r'   r(   r)   r   r	   r0   sub_configsattribute_mapr,   r+   r   boolr   r   r-   r.   s   @r"   r?   r?   w   s    4l J"5]SK( '#0	M  59.2#'%*.L.L .L 	.L
 .L .L .L .L 12.L ]+.L .L !.L #.L .L .Lr#   r?   N)r)   typingr   r   configuration_utilsr   utilsr   
get_loggerr&   loggerr	   r0   r?   __all__r   r#   r"   <module>r_      s^       3  
		H	%&9* &9R19$ 19hnL! nLb .r#   