
    h                     b   d dl mZmZmZmZ d dlZd dlmc mZ	 d dlmZ ddl
mZ ddlmZmZ ddlmZ ddlmZ dd	lmZ dd
lmZmZmZ ddlmZmZ ddlmZmZ ddlm Z  ddl!m"Z"m#Z#m$Z$ ddl%m&Z& ddl'm(Z(  e#       rd dl)m*Z* ddl+m,Z,  e$jZ                  e.      Z/ G d ded      Z0 G d dejb                        Z2 G d dejb                        Z3 G d dejb                        Z4 G d dejb                        Z5 G d  d!ejb                        Z6d" Z7d@d#Z8d$ejr                  d%e:d&ejr                  fd'Z;	 dAd(ejb                  d)ejr                  d*ejr                  d+ejr                  d,eejr                     d-e<d.e<fd/Z= G d0 d1ejb                        Z> G d2 d3e      Z?e" G d4 d5e             Z@ G d6 d7ejb                        ZAe" G d8 d9e@             ZB	 	 	 dBd:eejr                  eCejr                     df   d;ee:   d,eejr                     d&eejr                  e:f   fd<ZD G d= d>e@e      ZEg d?ZFy)C    )CallableOptional	TypedDictUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)AttentionMaskConverter)GradientCheckpointingLayer)BaseModelOutputWithPastMoeCausalLMOutputWithPastMoeModelOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)auto_docstringis_torch_flex_attn_availablelogging)deprecate_kwarg   )GraniteMoeSharedConfig)	BlockMask)make_flex_block_causal_maskc                       e Zd ZU dZej
                  ed<   ej
                  ed<   eed<   eed<   ej                  ed<   y)GraniteFlashAttentionKwargsa  
    Keyword arguments for advanced Flash Attention, causal-conv1d, and mamba_ssm kernel usage.
    Use cases include padding-free training and fewer `torch.compile` graph breaks.

    Attributes:
        cu_seq_lens_q (`torch.LongTensor`)
            Gets cumulative sequence length for query state.
        cu_seq_lens_k (`torch.LongTensor`)
            Gets cumulative sequence length for key state.
        max_length_q (`int`):
            Maximum sequence length for query state.
        max_length_k (`int`):
            Maximum sequence length for key state.
        seq_idx (`torch.IntTensor):
            Index of each packed sequence.
    cu_seq_lens_qcu_seq_lens_kmax_length_qmax_length_kseq_idxN)	__name__
__module____qualname____doc__torch
LongTensor__annotations__int	IntTensor     |/var/www/html/eduruby.in/venv/lib/python3.12/site-packages/transformers/models/granitemoeshared/modeling_granitemoeshared.pyr    r    3   s7    " ######__r0   r    F)totalc                   `     e Zd ZdZdef fdZdej                  dej                  fdZ xZ	S )GraniteMoeSharedMLPz~
    MLP layer for shared experts

    Args:
        config:
            Configuration object with model hyperparameters.
    configc                 `   t         |           |j                  | _        |j                  | _        t
        |j                     | _        t        j                  | j                  | j                  dz  d      | _
        t        j                  | j                  | j                  d      | _        y )N   Fbias)super__init__hidden_size
input_sizeshared_intermediate_sizer	   
hidden_act
activationr   Linearinput_linearoutput_linearselfr5   	__class__s     r1   r;   zGraniteMoeSharedMLP.__init__U   s     ,,!:: !2!23IIdoot7G7G!7KRWXYYt'7'7uUr0   hidden_statesreturnc                     | j                  |      }|j                  dd      }| j                  |d         |d   z  }| j                  |      }|S )Nr7   dimr   r   )rB   chunkr@   rC   )rE   rG   chunked_hidden_statess      r1   forwardzGraniteMoeSharedMLP.forward^   s^    ))-8 - 3 3A2 3 >(=a(@ADYZ[D\\**=9r0   )
r&   r'   r(   r)   r   r;   r*   TensorrO   __classcell__rF   s   @r1   r4   r4   L   s2    V5 VU\\ ell r0   r4   c                   ,     e Zd Zd fd	Zd Zd Z xZS )GraniteMoeSharedRMSNormc                     t         |           t        j                  t	        j
                  |            | _        || _        y)zF
        GraniteMoeSharedRMSNorm is equivalent to T5LayerNorm
        N)r:   r;   r   	Parameterr*   onesweightvariance_epsilon)rE   r<   epsrF   s      r1   r;   z GraniteMoeSharedRMSNorm.__init__g   s1     	ll5::k#:; #r0   c                 "   |j                   }|j                  t        j                        }|j	                  d      j                  dd      }|t        j                  || j                  z         z  }| j                  |j                  |      z  S )Nr7   rJ   T)keepdim)	dtypetor*   float32powmeanrsqrtrY   rX   )rE   rG   input_dtypevariances       r1   rO   zGraniteMoeSharedRMSNorm.forwardo   sy    #))%((7 $$Q',,R,>%Ht?T?T4T(UU{{]--k:::r0   c                 ^    t        | j                  j                         d| j                   S )Nz, eps=)tuplerX   shaperY   )rE   s    r1   
extra_reprz"GraniteMoeSharedRMSNorm.extra_reprv   s*    ))*+6$2G2G1HIIr0   )gư>)r&   r'   r(   r;   rO   rh   rQ   rR   s   @r1   rT   rT   f   s    $;Jr0   rT   c                   6     e Zd Zdedededdf fdZd Z xZS )GraniteMoeSharedParallelExpertsnum_expertsr=   output_sizerH   Nc                     t         |           t        j                  t	        j
                  |||            | _        || _        || _        || _	        y)a  
        Initialize the GraniteMoeSharedParallelExperts module.
        The experts weights are stored in [num_experts, output_size, input_size] format. Such that it's compatible with
        many MoE libraries, such as [Megablock](https://github.com/databricks/megablocks) and
        [ScatterMoE](https://github.com/shawntan/scattermoe), as well as the
        [MoE kernel](https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/fused_moe/fused_moe.py)
        used in vllm.

        Args:
            num_experts (int):
                Number of experts.
            input_size (int):
                Size of the input.
            output_size (int):
                Size of the output.
        N)
r:   r;   r   rV   r*   emptyrX   rk   r=   rl   )rE   rk   r=   rl   rF   s       r1   r;   z(GraniteMoeSharedParallelExperts.__init__{   sD    " 	ll5;;{K#TU&$&r0   c                     |j                  |d      }g }t        | j                        D ]7  }|j                  t	        j
                  ||   | j                  |                9 t        j                  |d      }|S )a  
        Forward pass of the GraniteMoeSharedParallelExperts module.

        Args:
            inputs (Tensor):
                Input tensor.
            expert_size:
                Expert size information.

        Returns:
            Tensor: Output tensor.
        r   rK   )	splitrangerk   appendFlinearrX   r*   cat)rE   inputsexpert_size
input_listoutput_listiresultss          r1   rO   z'GraniteMoeSharedParallelExperts.forward   sq     \\+1\5
t''( 	HAqxx
1t{{1~FG	H))KQ/r0   r&   r'   r(   r-   r;   rO   rQ   rR   s   @r1   rj   rj   z   s)    'C 'S 's 't '.r0   rj   c                   2     e Zd Zdededef fdZd Z xZS )GraniteMoeSharedTopKGatingr=   rk   top_kc                     t         |           || _        || _        || _        t        j                  ||d      | _        y)a  
        Initialize the top-k gating mechanism.
        Args:
            input_size (`int`):
                Size of the input.
            num_experts (`int`):
                Number of experts.
            top_k (`int`):
                Number of top experts to select.
        Fr8   N)r:   r;   rk   r=   r   r   rA   layer)rE   r=   rk   r   rF   s       r1   r;   z#GraniteMoeSharedTopKGating.__init__   s:     	&$
YYz;UC
r0   c                    | j                  |      j                         }|j                  | j                  d      \  }}t	        j
                  |d      j                  |      }t	        j                  |j                  d      | j                  g|j                  |j                        }|j                  d|d      }|j                         j                  d      }|j                         }|j!                         }	|	j#                  d      \  }
}|j%                  | j                  d      }|j!                         }||   }|||||fS )Nr   rK   r   r]   devicetrunc)rounding_mode)r   floattopkr   r*   softmaxtype_aszerossizerk   r]   r   scatterlongsumtolistflattensortdiv)rE   rG   logitstop_k_logitstop_k_indicestop_k_gatesr   gatesrw   top_k_experts_index_sorted_expertsbatch_indexbatch_gatess                 r1   rO   z"GraniteMoeSharedTopKGating.forward   s.   M*002&,kk$**!k&D#mmmLa8@@O a $"2"23;;L;LU`UgUg
 a2jjl&&q) "((* &--/"/"4"4Q"7*..tzz.Q "))+!"67#[+{FRRr0   r|   rR   s   @r1   r~   r~      s'    D3 DS D D&Sr0   r~   c                   .     e Zd ZdZdef fdZd Z xZS )GraniteMoeSharedMoEz
    A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.

    Args:
        config:
            Configuration object with model hyperparameters.
    r5   c                    t         |           |j                  | _        |j                  | _        t
        |j                     | _        t        |j                  | j                  | j                  dz        | _
        t        |j                  | j                  | j                        | _        t        | j                  |j                  |j                        | _        y )Nr7   )r=   rk   r   )r:   r;   r<   r=   intermediate_sizer	   r?   r@   rj   num_local_expertsrB   rC   r~   num_experts_per_tokrouterrD   s     r1   r;   zGraniteMoeSharedMoE.__init__   s     ,,!33 !2!23;$$doot7G7G!7K
 =$$d&6&6
 100,,
r0   c                    |j                         \  }}}|j                  d|      }| j                  |      \  }}}}}	||   }
| j                  |
|      }|j	                  dd      }| j                  |d         |d   z  }| j                  ||      }||dddf   z  }t        j                  ||z  | j                  f|j                  |j                        }|j                  d||      }|j                  ||| j                        }||	fS )a  
        Forward pass of the mixture of experts layer.

        Args:
            layer_input (Tensor):
                Input tensor.

        Returns:
            Tensor:
                Output tensor.
            Tensor:
                Router logits.
        rJ   r7   rK   r   r   Nr   )r   reshaper   rB   rM   r@   rC   r*   r   r=   r]   r   	index_addview)rE   layer_inputbszlengthemb_sizer   r   r   rw   router_logitsexpert_inputsrG   rN   expert_outputsr   layer_outputs                   r1   rO   zGraniteMoeSharedMoE.forward   s    !, 0 0 2VX!))"h7BF++kBZ?;[-#K0))-E - 3 3A2 3 >(=a(@ADYZ[D\\++M;G'+ag*>>S6\4??;>CWCW`n`u`uvq+~F#((fdooF]**r0   )r&   r'   r(   r)   r   r;   rO   rQ   rR   s   @r1   r   r      s    
5 
&+r0   r   c                     | dd| j                   d   dz  f   }| d| j                   d   dz  df   }t        j                  | |fd      S )z*Rotates half the hidden dims of the input..NrJ   r7   rK   )rg   r*   ru   )xx1x2s      r1   rotate_halfr     sZ    	
3"!''"+"""	#B	
3q ""	#B99rc2YB''r0   c                     |j                  |      }|j                  |      }| |z  t        |       |z  z   }||z  t        |      |z  z   }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezer   )qkcossinposition_idsunsqueeze_dimq_embedk_embeds           r1   apply_rotary_pos_embr     sY    ( --
&C
--
&C3w;q>C/0G3w;q>C/0GGr0   rG   n_reprH   c                     | j                   \  }}}}|dk(  r| S | dddddddddf   j                  |||||      } | j                  |||z  ||      S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)rg   expandr   )rG   r   batchnum_key_value_headsslenhead_dims         r1   	repeat_kvr   5  so    
 2?1D1D.Ehz!!Qa"23::5BUW\^bdlmM  (;e(CT8TTr0   modulequerykeyvalueattention_maskscalingdropoutc                 T   t        || j                        }t        || j                        }	t        j                  ||j	                  dd            |z  }
|#|d d d d d d d |j
                  d   f   }|
|z   }
t        j                  j                  |
dt        j                        j                  |j                        }
t        j                  j                  |
|| j                        }
t        j                  |
|	      }|j	                  dd      j                         }||
fS )Nr7   r   rJ   )rL   r]   )ptrainingr   )r   num_key_value_groupsr*   matmul	transposerg   r   
functionalr   r_   r^   r]   r   r   
contiguous)r   r   r   r   r   r   r   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputs                r1   eager_attention_forwardr   A  s    3 ; ;<JUF$?$?@L<<z';';Aq'ABWLL!$Q1.D
0@0@0D.D%DE#k1 ==((2U]](SVVW\WbWbcL==((6??([L,,|\:K''1-88:K$$r0   c                       e Zd ZdZddedee   f fdZ eddd      	 	 	 	 	 	 dd	e	j                  d
ee	j                     dee	j                     dee   dedee	j                     deee	j                  e	j                  f      dee	j                  ee	j                     eee	j                        f   fd       Z xZS )GraniteMoeSharedAttentionz=Multi-headed attention from 'Attention Is All You Need' paperr5   	layer_idxc                    t         |           || _        || _        |-t        j                  d| j                  j                   d       |j                  | _        |j                  | _	        |j                  | _        | j                  | j                  z  | _        |j                  | _        | j                  | j                  z  | _        d| _        |j                   | _        | j                  | j                  z  | j                  k7  r&t%        d| j                   d| j                   d      t'        j(                  | j                  | j                  | j                  z  |j*                        | _        t'        j(                  | j                  | j                  | j                  z  |j*                        | _        t'        j(                  | j                  | j                  | j                  z  |j*                        | _        t'        j(                  | j                  | j                  |j*                        | _        y )NzInstantiating z without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.Tz?hidden_size must be divisible by num_heads (got `hidden_size`: z and `num_heads`: z).r8   )r:   r;   r5   r   loggerwarning_oncerF   r&   attention_dropoutr<   num_attention_heads	num_headsr   r   r   	is_causalattention_multiplierr   
ValueErrorr   rA   attention_biasq_projk_projv_projo_projrE   r5   r   rF   s      r1   r;   z"GraniteMoeSharedAttention.__init__a  s   " !8!8 9 :, , "(!9!9!--33((DNN:#)#=#= $(NNd6N6N$N!22MMDNN*t/?/??QRVRbRbQc$T^^$4B8 
 ii 0 0$..4==2PW]WlWlmii 0 0$2J2JT]]2Zagavavwii 0 0$2J2JT]]2Zagavavwii 0 0$2B2BI^I^_r0   past_key_valuepast_key_values4.58new_nameversionrG   r   r   	use_cachecache_positionposition_embeddingsrH   c                    |j                         \  }	}
}| j                  |      }| j                  |      }| j                  |      }|j	                  |	|
| j
                  | j                        j                  dd      }|j	                  |	|
| j                  | j                        j                  dd      }|j	                  |	|
| j                  | j                        j                  dd      }||nd\  }}|t        ||||      \  }}|'|||d}|j                  ||| j                  |      \  }}t        }| j                  j                  dk7  rt        | j                  j                     } || ||||f| j                   sdn| j"                  | j$                  d|\  }}|j	                  |	|
d      }| j'                  |      }||fS )	Nr   r7   )NN)r   r   r   eager        )r   r   rJ   )r   r   r   r   r   r   r   r   r   r   updater   r   r5   _attn_implementationr   r   r   r   r   )rE   rG   r   r   r   r   r   r   r   r   q_lenr   query_statesr   r   r   r   cache_kwargsattention_interfacer   r   s                        r1   rO   z!GraniteMoeSharedAttention.forward  s    &**,UA{{=1[[/
{{=1#((eT^^T]]S]]^_abc__S%1I1I4==Yccdeghi
#((eT5M5Mt}}]gghiklm*=*I&|S*';L*VY[^'_$L*&#&snUL'6'='=j,X\XfXfht'u$J(?;;++w6"9$++:Z:Z"[$7	%
  $}}C$2H2HLL	%
 	%
!\ "&&sE26kk+.L((r0   N)NNNFNN)r&   r'   r(   r)   r   r   r-   r;   r   r*   rP   r+   r
   boolrf   rO   rQ   rR   s   @r1   r   r   ^  s	   G`5 `(3- `@ %0A6R 2637+/59KO0)||0) !.0) u//0	0)
 "%0) 0) !!1!120) &eELL%,,,F&GH0) 
u||Xell3XeELL>Q5RR	S0) S0)r0   r   c                       e Zd Zdedef fdZ eddd      	 	 	 	 	 	 	 	 ddej                  d	e	ej                     d
e	ej                     de	e   de	e   de	e   de	ej                     de	e   de	eej                  ej                  f      dee   deej                   e	eej                   ej                   f      f   fd       Z xZS )GraniteMoeSharedDecoderLayerr5   r   c                    t         |           |j                  | _        t        ||      | _        |j
                  dkD  rt        |      | _        t        |j                  |j                        | _
        t        |j                  |j                        | _        |j                  | _        |j                  dk(  rd | _        y t        |      | _        y )N)r5   r   r   rZ   )r:   r;   r<   r   	self_attnr   r   block_sparse_moerT   rms_norm_epsinput_layernormpost_attention_layernormresidual_multiplierr>   r4   
shared_mlpr   s      r1   r;   z%GraniteMoeSharedDecoderLayer.__init__  s    !--2&IV##a'$7$?D!6v7I7IvObObc(?@R@RX^XkXk(l%#)#=#= "("A"AQ"F$L_`fLgr0   r   r   r   r   rG   r   r   output_attentionsr   r   output_router_logitsr   r   rH   c
                 l   |}| j                  |      } | j                  d||||||||	d|
\  }}||| j                  z  z   }|}| j                  |      }| j	                  |      \  }}| j
                  |}n|| j                  |      z   }~||| j                  z  z   }|f}|r||fz  }|r||fz  }|S )aD  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*):
                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
                query_sequence_length, key_sequence_length)` if default attention is used.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            output_router_logits (`bool`, *optional*):
                Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
                should not be returned during inference.
            position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
            kwargs (`dict`, *optional*):
                Arbitrary kwargs. Can be used to provide `GraniteFlashAttentionKwargs` for
                padding-free training and/or improve torch.compile performance.
        )rG   r   r   r   r  r   r   r   r/   )r  r
  r  r  r  r  )rE   rG   r   r   r   r  r   r   r  r   r   residualself_attn_weightsmoe_hidden_statesr   outputss                   r1   rO   z$GraniteMoeSharedDecoderLayer.forward  s   N !,,]; ,:4>> 
,
')%+/) 3
,
 
,
(( !=43K3K#KK !55mD+/+@+@+O(=??"-M-0NNM =43K3K#KK ")++G''Gr0   )NNNFFNFN)r&   r'   r(   r   r-   r;   r   r*   rP   r   r+   r
   r  rf   r   r    FloatTensorrO   rQ   rR   s   @r1   r  r    sH   h5 h# h %0A6R 2637+/,1$)59/4KOO||O !.O u//0	O
 "%O $D>O D>O !!1!12O 'tnO &eELL%,,,F&GHO 45O 
u  (51B1BEDUDU1U+V"WW	XO SOr0   r  c                   J     e Zd ZU eed<   dZdZdgZdgZdZ	dZ
dZ fdZ xZS )GraniteMoeSharedPreTrainedModelr5   modelTr  r   Fc                     t         |   |       t        |t              r<|j                  j
                  j                  d| j                  j                         y y )Nr   )ra   std)	r:   _init_weights
isinstancerj   rX   datanormal_r5   initializer_range)rE   r   rF   s     r1   r  z-GraniteMoeSharedPreTrainedModel._init_weights"  sG    f%f=>MM&&CT[[5R5R&S ?r0   )r&   r'   r(   r   r,   base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_can_compile_fullgraphr  rQ   rR   s   @r1   r  r    sD    ""&*#78#4"5N"T Tr0   r  c                   ~     e Zd ZU ej                  ed<   ddef fdZ ej                         e	d               Z
 xZS )GraniteMoeSharedRotaryEmbeddinginv_freqr5   c                    t         |           t        |d      rUt        |j                  t
              r;|j                  j                  d|j                  j                  d            | _        nd| _        |j                  | _	        |j                  | _
        || _        t        | j                     | _        | j                  | j                  |      \  }| _        | j                  d|d       | j                   | _        y )Nrope_scaling	rope_typetypedefaultr,  F)
persistent)r:   r;   hasattrr  r.  dictgetr/  max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr5   r   rope_init_fnattention_scalingregister_bufferr,  original_inv_freq)rE   r5   r   r,  rF   s       r1   r;   z(GraniteMoeSharedRotaryEmbedding.__init__+  s    6>*z&:M:Mt/T#0044[&BUBUBYBYZ`BabDN&DN"("@"@$*$B$B!/?+/+<+<T[[&+Q($(ZeD!%r0   c                 b   | j                   d d d d f   j                         j                  |j                  d   dd      j	                  |j
                        }|d d d d d f   j                         }t        |j
                  j                  t              r/|j
                  j                  dk7  r|j
                  j                  nd}t        j                  |d      5  |j                         |j                         z  j                  dd      }t        j                  ||fd	      }|j                         | j                  z  }|j                         | j                  z  }	d d d        j	                  |j                   
      	j	                  |j                   
      fS # 1 sw Y   AxY w)Nr   rJ   r   mpscpuF)device_typeenabledr7   rK   )r]   )r,  r   r   rg   r^   r   r  r0  strr*   autocastr   ru   r   r:  r   r]   )
rE   r   r   inv_freq_expandedposition_ids_expandedr@  freqsembr   r   s
             r1   rO   z'GraniteMoeSharedRotaryEmbedding.forward<  sV    !MM$4-8>>@GGHZHZ[\H]_acdehhijiqiqr ,QaZ 8 > > @'1!((--'E!((--[`J`ahhmmfk^^UC 	5&,,.1F1L1L1NNYYZ[]^_E))UEN3C'')d444C'')d444C		5 vvAGGv$cff177f&;;;	5 	5s    BF%%F.r  )r&   r'   r(   r*   rP   r,   r   r;   no_gradr   rO   rQ   rR   s   @r1   r+  r+  (  s>    ll/5 /" U]]_<  <r0   r+  c                       e Zd Zdef fdZe	 	 	 	 	 	 	 	 	 	 	 ddeej                     deej                     deej                     dee
eeej                     f      deej                     dee   d	ee   d
ee   dee   dee   deej                     de
eef   fd       Z	 dde
ej                  df   dej                  dej                  ded	ef
dZedej                  dededej*                  dej                  defd       Z xZS )GraniteMoeSharedModelr5   c           	      4   t         |   |       |j                  | _        |j                  | _        t        j                  |j                  |j                  | j                        | _        t        j                  t        |j                        D cg c]  }t        ||       c}      | _        t        |j                  |j                        | _        d| _        |j$                  | _        |j                  | _        |j&                  | _        | j                  | j(                  z  | _        |j,                  | _        |j.                  | _        |j0                  | _        | j0                  dk(  rt3        |      nd | _        | j7                          y c c}w )Nr	  Frope)r:   r;   pad_token_idpadding_idx
vocab_sizer   	Embeddingr<   embed_tokens
ModuleListrq   num_hidden_layersr  layersrT   r  normgradient_checkpointingembedding_multiplierr   r   r   r6  
rope_thetaposition_embedding_typer+  
rotary_emb	post_initr   s      r1   r;   zGraniteMoeSharedModel.__init__N  sA    !.. ++LL):):F<N<NPTP`P`ammNSTZTlTlNmn)&)<n
 ,F,>,>FDWDWX	&+#$*$?$?!!--33((DNN:'-'E'E$ ++'-'E'E$EIEaEaekEk9&Aqu 	! os   F	input_idsr   r   r   inputs_embedsr   r  output_hidden_statesr  return_dictr   rH   c                 v   ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }|
|
n| j                   j                  }
|d u |d uz  rt        d      | j                  r%| j                  r|rt        j                  d       d}|| j                  |      }|| j                  z  }t        |t        d       t        f      st        d      |r|t        | j                         }|F||j!                         nd}t#        j$                  |||j&                  d   z   |j(                        }||j+                  d      }| j-                  |||||      }|}d }| j.                  | j/                  ||      }|rd	nd }|rd	nd }|	rd	nd }| j0                  D ]7  }|r||fz  } |||||||||	|
	      }|d   }|r	||d   fz  }|	s/||d   fz  }9 | j3                  |      }|r||fz  }|
st5        d ||||fD              S t7        |||||      S )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FzBThe `past_key_values` should be either a `Cache` object or `None`.)r5   r   r   r   r/   )r   r   r   r  r   r   r  r   rJ   c              3   &   K   | ]	  }||  y wr  r/   ).0vs     r1   	<genexpr>z0GraniteMoeSharedModel.forward.<locals>.<genexpr>  s      bcbos   )last_hidden_stater   rG   
attentionsr   )r5   r  r^  r   use_return_dictr   rV  r   r   r   rQ  rW  r  r0  r
   r   get_seq_lengthr*   arangerg   r   r   _update_causal_maskrZ  rT  rU  rf   r   )rE   r\  r   r   r   r]  r   r  r^  r  r_  r   r   past_seen_tokensr   rG   r   all_hidden_statesall_self_attnsall_router_logitsdecoder_layerlayer_outputss                         r1   rO   zGraniteMoeSharedModel.forwardg  s     2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	%0%<k$++B]B]-t";<YZZ&&4==Yj I  --i8M%(A(AA /DJ+>?abb0*$++>O!CRC^==?de"\\ "2]5H5H5K"KTaThThN )33A6L..M>?L]

 &"??&"&//-"N #7BD0d"6BD![[ 	:M#!m%55!)*) /"3#-%9$7
M *!,M =#3"55#!mB&7%99!-	:0 		-0  -!11 )?<M~^   &+++%+
 	
r0   r   input_tensorc           	         | j                   j                  dk(  r||dk(  j                         r|S y | j                   j                  dk(  r't        |t        j
                        rt        |      }|S ||j                         nd}||j                  nd}| j                   j                  dk(  r(|s&|s$t        j                  |||| j                        ry |j                  }|j                  d   }	|r|j                         }
n1t        |t        j
                        r|j                  d	   n||	z   dz   }
| j                  ||	|
|||j                  d   
      }| j                   j                  dk(  rQ|O|j                   j"                  dv r7|s5t	        j$                  |      j&                  }t        j(                  ||      }|S )Nflash_attention_2r   flex_attentionr   Fsdpa)r]  past_key_values_lengthis_trainingr   rJ   )sequence_lengthtarget_lengthr]   r   
batch_size)cudaxpunpu)r5   r   anyr  r*   rP   r   ri  is_compileabler   _ignore_causal_mask_sdpar   r]   rg   get_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionr   r0  finfomin_unmask_unattended)rE   r   rr  r   r   r  rl  using_compilable_cacher]   ry  rz  r   	min_dtypes                r1   rk  z)GraniteMoeSharedModel._update_causal_mask  s    ;;++/BB)~/D.I.I.K%%;;++/??.%,,7!<^!L!!
 @O?Z?99;`aCRC^!?!?di ;;++v5>T]n%>>*'7 MM	 ""&,,Q/!+??AM nell; $$R(%7!;  PP+')#))!, Q 
 KK,,6*%%**.DD%
 E*..I0CCKQZ[Kr0   ry  rz  r]   r{  c                    | | j                         dk(  r| }|S t        j                  |      j                  }t        j                  ||f|||j
                        }|dk7  rt        j                  |d      }|t        j                  ||j
                        |j                  dd      kD  z  }|ddddddf   j                  |ddd      }| |j                         }| j                  d   }	|ddddddd|	f   | ddddddf   j                  |j
                        z   }
|
dk(  }
|ddddddd|	f   j                  |
|      |ddddddd|	f<   |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )
fill_valuer]   r   r   )diagonalra  rJ   r   )rL   r*   r  r  fullr   triurj  r   r   clonerg   r^   masked_fill)r   ry  rz  r]   r   r{  r   r   r  mask_lengthpadding_masks              r1   r  zKGraniteMoeSharedModel._prepare_4d_causal_attention_mask_with_cache_position  s   < %.*<*<*>!*C(K* ' E*..I** -0Ye\j\q\qK !##jjqA5<<n>S>STWeWmWmnprsWtttK%dD!Q&67>>z1bRTUK))//1,2226*1aL[L+@ANSTVZ\`bcScDdDgDg&&E    ,q05@Aq,;,AV5W5c5c )6Aq!\k\12 r0   )NNNNNNNNNNN)F)r&   r'   r(   r   r;   r   r   r*   r+   rP   r   r
   listr  r  rf   r   rO   rk  staticmethodr-   r]   r  rQ   rR   s   @r1   rJ  rJ  L  s   5 2  151537KO59$(,0/3/3&*59l
E,,-l
 !.l
 u//0	l

 "%tE4E4E/F(F"GHl
   1 12l
 D>l
 $D>l
 'tnl
 'tnl
 d^l
 !!1!12l
 
u--	.l
 l
h #(BellK78B llB 	B
 B  BH 444 4 {{	4
 4 4 4r0   rJ  gate_logitsrk   c                    | t        | t              syt        | t              rC| d   j                  }t        j                  | D cg c]  }|j                  |       c}d      }t        j                  j                  j                  d      }t        j                  ||d      \  }}	t        j                  j                  j                  |	|      }
|>t        j                  |
j                         d      }t        j                  |d      }n1|j                  \  }}|j                  d   ||z  z  }|dddddddf   j                  |||||f      j                  d||      j                        }t        j                   |
j                         |z  d      t        j                   |d      z  }|ddddddf   j                  ||||j                  d   f      j                  d|j                  d         j                  |      }t        j                   ||z  d      t        j                   |d      z  }|j                  j"                  |j                  j"                  nd}|j                  d   t%        |      z  }t        j                   |dd|||j                  d   z   f   |j'                  d      z        }||z  S c c}w )a  
    Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.

    See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
    function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
    experts is too unbalanced.

    Args:
        gate_logits:
            Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
            shape [batch_size X sequence_length, num_experts].
        num_experts:
            Number of experts
        top_k:
            The number of experts to route per-token, can be also interpreted as the `top-k` routing
            parameter.
        attention_mask (`torch.Tensor`, *optional*):
            The attention_mask used in forward function
            shape [batch_size X sequence_length] if not None.

    Returns:
        The auxiliary loss.
    Nr   rK   rJ   r   )r  rf   r   r*   ru   r^   r   r   r   r   one_hotra   r   rg   r   r   r   indexr-   r   )r  rk   r   r   compute_device
layer_gateconcatenated_gate_logitsrouting_weightsr   selected_expertsexpert_masktokens_per_expertrouter_prob_per_expertr{  ry  rS  expert_attention_mask router_per_expert_attention_maskdevice_indexrankoverall_losss                        r1   load_balancing_loss_funcr  R  s   : *[%"@+u%$Q..#(99^i-jPZjmmN.K-jpq#r hh))112JPR1SO**_eDA((%%--.>LK!JJ{'8'8':B "'O!C&4&:&:#
O4::1=*B^_ 4AtT12V&
OUKXYWR,R	 	 "IIk&7&7&9<Q&QWXY\a\e\e!q]
 
 4At+,V&
O_EZEZ[\E]^_WR..q12R	 	) "'?=]+]cd!ehmhqhq,!i
 "
 4C3I3I3O3O3[?))//abL  #c,&77D99!TD?+@+@+C$CCCDG]GgGghiGjjL +%%c .ks   Kc                        e Zd ZdgZdef fdZe	 	 	 	 	 	 	 	 	 	 	 	 	 ddeej                     deej                     deej                     deeeeej                     f      deej                     d	eej                     d
ee   dee   dee   dee   dee   deej                     deeej                  f   deeef   fd       Z xZS )GraniteMoeSharedForCausalLMzlm_head.weightr5   c                 N   t         |   |       t        |      | _        |j                  | _        t        j                  |j                  |j                  d      | _        |j                  | _	        |j                  | _        |j                  | _        | j                          y )NFr8   )r:   r;   rJ  r  rO  r   rA   r<   lm_headrouter_aux_loss_coefr   rk   r   r[  rD   s     r1   r;   z$GraniteMoeSharedForCausalLM.__init__  s     *62
 ++yy!3!3V5F5FUS$*$?$?!!33#)#=#=  	r0   r\  r   r   r   r]  labelsr   r  r^  r  r_  r   logits_to_keeprH   c                    ||n| j                   j                  }|
|
n| j                   j                  }
|	|	n| j                   j                  }	||n| j                   j                  } | j
                  d||||||||	|
||d|}|d   }t        |t              rt        | d      n|}| j                  |dd|ddf         }|| j                   j                  z  }d}|:|j                         } | j                  ||fd| j                   j                  i|}d}|
r`t        |r|j                  n|d   | j                   | j"                  |      }|+|| j$                  |j'                  |j(                        z  z  }|s|f|dd z   }|
r|f|z   }||f|z   S |S t+        ||||j,                  |j.                  |j0                  |j                        S )	ax  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, GraniteMoeSharedForCausalLM

        >>> model = GraniteMoeSharedForCausalLM.from_pretrained("ibm/PowerMoE-3b")
        >>> tokenizer = AutoTokenizer.from_pretrained("ibm/PowerMoE-3b")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)r\  r   r   r   r]  r   r  r^  r  r_  r   r   rO  rJ   r   )lossaux_lossr   r   rG   rg  r   r/   )r5   r  r  r^  rh  r  r  r-   slicer  logits_scalingr   loss_functionrO  r  r   rk   r   r  r^   r   r   r   rG   rg  )rE   r\  r   r   r   r]  r  r   r  r^  r  r_  r   r  r   r  rG   slice_indicesr   r  r  outputs                         r1   rO   z#GraniteMoeSharedForCausalLM.forward  s*   P 2C1N-TXT_T_TqTq$8$D $++JjJj 	 %9$D $++JjJj 	 &1%<k$++B]B] $** 
)%+'/!5!5#)
 
   
8B>SV8W~ot4]kmA}a,?@A$++444\\^F%4%%  ;;11 	D /)4%%'"+  ((	H !11HKK4LLLY,F#"v-'+'7D7V#CVC(#33!//))!//
 	
r0   )NNNNNNNNNNNNr   )r&   r'   r(   _tied_weights_keysr   r;   r   r   r*   r+   rP   r   r
   r  r  r  r-   rf   r   rO   rQ   rR   s   @r1   r  r    sw   *+5   151537KO59-1$(,0/3/3&*5934k
E,,-k
 !.k
 u//0	k

 "%tE4E4E/F(F"GHk
   1 12k
 ))*k
 D>k
 $D>k
 'tnk
 'tnk
 d^k
 !!1!12k
 c5<</0k
  
u//	0!k
 k
r0   r  )r  rJ  r  )Nr   )r   )Nr7   N)Gtypingr   r   r   r   r*   torch.nn.functionalr   r   rs   activationsr	   cache_utilsr
   r   
generationr   modeling_attn_mask_utilsr   modeling_layersr   modeling_outputsr   r   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   utils.deprecationr   configuration_granitemoesharedr   !torch.nn.attention.flex_attentionr   integrations.flex_attentionr   
get_loggerr&   r   r    Moduler4   rT   rj   r~   r   r   r   rP   r-   r   r   r   r   r  r  r+  rJ  rf   r  r  __all__r/   r0   r1   <module>r     sY  , 8 7     ! . ) > 9 j j K F & J J 0 B  !;J 
		H	%)5 2")) 4Jbii J(*bii *Z-S -S`9+")) 9+x(6	UU\\ 	U# 	U%,, 	U& %II%<<% 
% <<	%
 U\\*% % %:T)		 T)n^#= ^B To T T"!<bii !<H B; B BN "&
-1	S&u||U5<<%8$>?S&#S& U\\*	S&
 5<<S&l|
"A? |
~ fr0   