
    hn?                        d Z ddlmZmZmZ ddlZddlZddlmZ ddlm	Z	 ddl
mZ ddlmZ dd	lmZmZ dd
lmZ ddlmZ ddlmZ ddlmZmZ ddlmZ ddlmZmZmZm Z m!Z!m"Z" ddl#m$Z$  ejJ                  e&      Z' G d dejP                        Z) G d de!      Z*d Z+d$dZ, G d de      Z- G d de      Z. G d de      Z/ G d d e       Z0 G d! d"e      Z1g d#Z2y)%zPyTorch Cohere model.    )CallableOptionalUnionN)nn   )Cache)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)dynamic_rope_update)ALL_ATTENTION_FUNCTIONS)Unpack)TransformersKwargslogging)deprecate_kwarg   )LlamaAttentionLlamaForCausalLMLlamaMLP
LlamaModelLlamaRotaryEmbeddingeager_attention_forward   )CohereConfigc                   &     e Zd Zd fd	Zd Z xZS )CohereLayerNormc                     t         |           t        j                  t	        j
                  |            | _        || _        y)zcThe hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dimN)super__init__r   	Parametertorchonesweightvariance_epsilon)selfhidden_sizeepsbias	__class__s       g/var/www/html/eduruby.in/venv/lib/python3.12/site-packages/transformers/models/cohere/modular_cohere.pyr    zCohereLayerNorm.__init__7   s/    ll5::k#:; #    c                    |j                   }|j                  t        j                        }|j	                  dd      }||z
  j                  d      j	                  dd      }||z
  t        j                  || j                  z         z  }| j                  j                  t        j                        |z  }|j                  |      S )NT)keepdimr   )	dtypetor"   float32meanpowrsqrtr%   r$   )r&   hidden_statesinput_dtyper3   variances        r+   forwardzCohereLayerNorm.forward=   s    #))%((7!!"d!3!D(--a055b$5G&-XH]H]=]1^^u}}5E,,r,   )Ngh㈵>F)__name__
__module____qualname__r    r9   __classcell__r*   s   @r+   r   r   6   s    $-r,   r   c                   D    e Zd Z ej                         ed               Zy)CohereRotaryEmbeddingc                 .   | j                   d d d d f   j                         j                  |j                  d   dd      }|d d d d d f   j                         }t	        |j
                  j                  t              r/|j
                  j                  dk7  r|j
                  j                  nd}t        j                  |d      5  |j                         |j                         z  j                  dd      }t        j                  |dd	      }|j                         | j                  z  }|j                         | j                  z  }	d d d        j                  |j                   
      	j                  |j                   
      fS # 1 sw Y   AxY w)Nr   r.   r   mpscpuF)device_typeenabledr   dimr0   )inv_freqfloatexpandshape
isinstancedevicetypestrr"   autocast	transposerepeat_interleavecosattention_scalingsinr1   r0   )
r&   xposition_idsinv_freq_expandedposition_ids_expandedrD   freqsembrT   rV   s
             r+   r9   zCohereRotaryEmbedding.forwardH   sD    !MM$4-8>>@GGHZHZ[\H]_acde ,QaZ 8 > > @'1!((--'E!((--[`J`ahhmmfk^^UC 	5&,,.1F1L1L1NNYYZ[]^_E))%;C'')d444C'')d444C		5 vvAGGv$cff177f&;;;	5 	5s   BFFN)r:   r;   r<   r"   no_gradr   r9    r,   r+   r@   r@   G   s$    U]]_<  <r,   r@   c                     | dd d df   }| ddd df   }t        j                  | |gd      j                  d      }|S )N.r   r   r.   rF   )r"   stackflatten)rW   x1x2rot_xs       r+   rotate_halfrf   X   sL    	
3!8B	
319BKK"b	r*2226ELr,   c                 6   | j                   }| j                         } |j                         }|j                  |      }|j                  |      }| |z  t        |       |z  z   }||z  t        |      |z  z   }|j	                  |      |j	                  |      fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    rH   )r0   rJ   	unsqueezerf   r1   )	qkrT   rV   rX   unsqueeze_dimr0   q_embedk_embeds	            r+   apply_rotary_pos_embrn   `   s    ( GGE		A		A
--
&C
--
&C3w;q>C/0G3w;q>C/0G::E:"GJJUJ$;;;r,   c                        e Zd Z fdZ xZS )	CohereMLPc                 J   t         |   |       t        j                  | j                  | j
                  d      | _        t        j                  | j                  | j
                  d      | _        t        j                  | j
                  | j                  d      | _        y )NF)r)   )	r   r    r   Linearr'   intermediate_size	gate_projup_proj	down_projr&   configr*   s     r+   r    zCohereMLP.__init__   ss     4#3#3T5K5KRWXyy!1!143I3IPUV4#9#94;K;KRWXr,   )r:   r;   r<   r    r=   r>   s   @r+   rp   rp   ~   s    Y Yr,   rp   c                   8    e Zd ZdZddedee   f fdZ eddd      	 	 dd	e	j                  d
ee	j                  e	j                  f   dee	j                     dee   dee	j                     dee   dee	j                  ee	j                     f   fd       Z xZS )CohereAttentionz=Multi-headed attention from 'Attention Is All You Need' paperrx   	layer_idxc                 *   t         |   ||       |j                  | _        | j                  ret        |j                  | j
                  f|j                        | _        t        |j                  | j
                  f|j                        | _	        y y )Nr'   r(   )
r   r    use_qk_normr   num_attention_headshead_dimlayer_norm_epsq_normnum_key_value_headsk_normr&   rx   r{   r*   s      r+   r    zCohereAttention.__init__   s|    +!--)#77GVMbMbDK *#77GVMbMbDK r,   past_key_valuepast_key_values4.58new_nameversionr6   position_embeddingsattention_maskcache_positionkwargsreturnc                    |j                   d d }g |d| j                  }| j                  |      j                  |      }	| j	                  |      j                  |      }
| j                  |      j                  |      }| j                  r"| j                  |	      }	| j                  |
      }
|	j                  dd      }	|
j                  dd      }
|j                  dd      }|\  }}t        |	|
||      \  }	}
|'|||d}|j                  |
|| j                  |      \  }
}t        }| j                  j                  dk7  rt         | j                  j                     } || |	|
||f| j"                  sdn| j$                  | j&                  d|\  }} |j(                  g |d j+                         }| j-                  |      }||fS )Nr.   r   r   )rV   rT   r   eagerg        )dropoutscaling)rL   r   q_projviewk_projv_projr~   r   r   rR   rn   updater{   r   rx   _attn_implementationr   trainingattention_dropoutr   reshape
contiguouso_proj)r&   r6   r   r   r   r   r   input_shapehidden_shapequery_states
key_statesvalue_statesrT   rV   cache_kwargsattention_interfaceattn_outputattn_weightss                     r+   r9   zCohereAttention.forward   s    $))#2.88b8$--8{{=166|D[[/44\B
{{=166|D;;|4LZ0J#--a3))!Q/
#--a3&S#7jRUWZ#[ j&#&snUL'6'='=j,X\XfXfht'u$J(?;;++w6"9$++:Z:Z"[$7	%
  $}}C$2H2HLL	%
 	%
!\ *k));;;;FFHkk+.L((r,   N)NN)r:   r;   r<   __doc__r   r   intr    r   r"   Tensortupler   
LongTensorr   r	   r9   r=   r>   s   @r+   rz   rz      s    G
| 
 
 %0A6R ,0591)||1) #5<<#=>1) !.	1)
 "%1) !!1!121) -.1) 
u||Xell33	41) S1)r,   rz   c                       e Zd Zdedef fdZ eddd      	 	 	 	 	 	 ddej                  d	e	ej                     d
e	ej                     de	e   de	e   de	ej                     de	eej                  ej                  f      dee   deej                   e	eej                   ej                   f      f   fd       Z xZS )CohereDecoderLayerrx   r{   c                     t         |           |j                  | _        t        ||      | _        t        |      | _        t        |j                  |j                        | _	        y )N)rx   r{   r}   )
r   r    r'   rz   	self_attnrp   mlpr   r   input_layernormr   s      r+   r    zCohereDecoderLayer.__init__   sR    !--()LV$.F<N<NU[UjUjkr,   r   r   r   r   r6   r   rX   	use_cacher   r   r   r   c                     |}	| j                  |      } | j                  d|||||||d|\  }
}| j                  |      }|	|
z   |z   }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*):
                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
                query_sequence_length, key_sequence_length)` if default attention is used.
            past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
        )r6   r   rX   r   r   r   r   r^   )r   r   r   )r&   r6   r   rX   r   r   r   r   r   residualhidden_states_attention_hidden_states_mlps                r+   r9   zCohereDecoderLayer.forward   s{    > !,,];%3T^^ 	&
')%+) 3	&
 	&
" !HH]3 #::=NNr,   )NNNFNN)r:   r;   r<   r   r   r    r   r"   r   r   r   r   boolr   r   r	   FloatTensorr9   r=   r>   s   @r+   r   r      s   l| l l %0A6R 2637+/$)59KO.||. !.. u//0	.
 "%. D>. !!1!12. &eELL%,,,F&GH. -.. 
u  (51B1BEDUDU1U+V"WW	X. S.r,   r   c                   $     e Zd Zdef fdZ xZS )CohereModelrx   c           	      &   t         |   |       t        j                  t	        |j
                        D cg c]  }t        ||       c}      | _        t        |      | _	        t        |j                  |j                        | _        y c c}w )N)rx   r}   )r   r    r   
ModuleListrangenum_hidden_layersr   layersr@   
rotary_embr   r'   r   normr   s      r+   r    zCohereModel.__init__  so     mmDI&JbJbDcdy	2d
 0v>#1C1C&J_J_`	 es   B)r:   r;   r<   r   r    r=   r>   s   @r+   r   r     s    a| a ar,   r   c                   n    e Zd Z fdZ	 	 	 	 	 	 	 	 	 	 	 ddeej                     deej                     deej                     deee	e
ej                     f      deej                     deej                     dee   d	ee   d
ee   deej                     deeej                  f   dee   defdZ xZS )CohereForCausalLMc                     t         |   |       t        |      | _        |j                  | _        |j
                  | _        y r   )r   r    r   modellogit_scaletie_word_embeddingsrw   s     r+   r    zCohereForCausalLM.__init__  s8      (
!--#)#=#= r,   	input_idsr   rX   r   inputs_embedslabelsr   output_attentionsoutput_hidden_statesr   logits_to_keepr   r   c                    ||n| j                   j                  }|	|	n| j                   j                  }	 | j                  d||||||||	|
d	|}|j                  }t        |t              rt        | d      n|}| j                  |dd|ddf         }|| j                  z  }d}|* | j                  d||| j                   j                  d|}t        |||j                  |j                  |j                        S )az  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >> from transformers import AutoTokenizer, CohereForCausalLM

        >> model = CohereForCausalLM.from_pretrained("CohereForAI/c4ai-command-r-v01")
        >> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")

        >> prompt = "Hey, are you conscious? Can you talk to me?"
        >> inputs = tokenizer(prompt, return_tensors="pt")

        >> # Generate
        >> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r   r   rX   r   r   r   r   r   r   )logitsr   
vocab_size)lossr   r   r6   
attentionsr^   )rx   r   r   r   last_hidden_staterM   r   slicelm_headr   loss_functionr   r   r   r6   r   )r&   r   r   rX   r   r   r   r   r   r   r   r   r   outputsr6   slice_indicesr   r   s                     r+   r9   zCohereForCausalLM.forward  s+   J 2C1N-TXT_T_TqTq$8$D $++JjJj 	
 ,64:: ,
)%+'/!5),
 ,
  118B>SV8W~ot4]kmA}a,?@A$***%4%%pVFt{{OeOepiopD%#33!//))
 	
r,   )NNNNNNNNNNr   )r:   r;   r<   r    r   r"   r   r   r   r   listr   r   r   r   r   r   r9   r=   r>   s   @r+   r   r     s8   > 151537KO59-1$(,0/35934H
E,,-H
 !.H
 u//0	H

 "%tE4E4E/F(F"GHH
   1 12H
 ))*H
 D>H
 $D>H
 'tnH
 !!1!12H
 c5<</0H
 +,H
 
 H
r,   r   )r   r   CoherePreTrainedModel)Nr   )3r   typingr   r   r   r"   torch.utils.checkpointr   cache_utilsr   modeling_flash_attention_utilsr	   modeling_layersr
   modeling_outputsr   r   modeling_rope_utilsr   modeling_utilsr   processing_utilsr   utilsr   r   utils.deprecationr   llama.modeling_llamar   r   r   r   r   r   configuration_coherer   
get_loggerr:   loggerModuler   r@   rf   rn   rp   rz   r   r   r   __all__r^   r,   r+   <module>r      s   .  , ,      B 9 O 6 5 & 0 0  / 
		H	%-bii -"<0 <"<<Y YA)n A)H73 7ta* aO
( O
dr,   