
    hI                        d Z ddlmZmZmZ ddlZddlZddlmZ ddlm	Z	 ddl
mZmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZmZ ddlmZmZ ddlmZmZ ddlmZmZ ddl m!Z! ddl"m#Z#m$Z$m%Z%m&Z& ddl'm(Z( ddl)m*Z*  e%       rddl+m,Z, ddl-m.Z.  e&j^                  e0      Z1 G d dejd                        Z3d Z4d3dZ5 G d dejd                        Z6	 d4dejd                  dejn                  dejn                  dejn                  d eejn                     d!e8d"e8fd#Z9 G d$ d%ejd                        Z: G d& d'e      Z;e# G d( d)e             Z<e# G d* d+e<             Z= G d, d-e<e      Z> G d. d/ee<      Z? G d0 d1ee<      Z@g d2ZAy)5zPyTorch Persimmon model.    )CallableOptionalUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)AttentionMaskConverter)FlashAttentionKwargs) GenericForSequenceClassificationGenericForTokenClassificationGradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)auto_docstringcan_return_tupleis_torch_flex_attn_availablelogging)deprecate_kwarg   )PersimmonConfig)	BlockMask)make_flex_block_causal_maskc                   ~     e Zd ZU ej                  ed<   ddef fdZ ej                         e	d               Z
 xZS )PersimmonRotaryEmbeddinginv_freqconfigc                    t         |           t        |d      rUt        |j                  t
              r;|j                  j                  d|j                  j                  d            | _        nd| _        |j                  | _	        |j                  | _
        || _        t        | j                     | _        | j                  | j                  |      \  }| _        | j                  d|d       | j                   | _        y )Nrope_scaling	rope_typetypedefaultr#   F)
persistent)super__init__hasattr
isinstancer&   dictgetr'   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr$   r   rope_init_fnattention_scalingregister_bufferr#   original_inv_freq)selfr$   devicer#   	__class__s       n/var/www/html/eduruby.in/venv/lib/python3.12/site-packages/transformers/models/persimmon/modeling_persimmon.pyr,   z!PersimmonRotaryEmbedding.__init__?   s    6>*z&:M:Mt/T#0044[&BUBUBYBYZ`BabDN&DN"("@"@$*$B$B!/?+/+<+<T[[&+Q($(ZeD!%    c                 b   | j                   d d d d f   j                         j                  |j                  d   dd      j	                  |j
                        }|d d d d d f   j                         }t        |j
                  j                  t              r/|j
                  j                  dk7  r|j
                  j                  nd}t        j                  |d      5  |j                         |j                         z  j                  dd      }t        j                  ||fd	      }|j                         | j                  z  }|j                         | j                  z  }	d d d        j	                  |j                   
      	j	                  |j                   
      fS # 1 sw Y   AxY w)Nr   r   mpscpuF)device_typeenabled   dim)dtype)r#   floatexpandshapetor9   r.   r(   strtorchautocast	transposecatcosr5   sinrF   )
r8   xposition_idsinv_freq_expandedposition_ids_expandedrA   freqsembrP   rQ   s
             r;   forwardz PersimmonRotaryEmbedding.forwardP   sV    !MM$4-8>>@GGHZHZ[\H]_acdehhijiqiqr ,QaZ 8 > > @'1!((--'E!((--[`J`ahhmmfk^^UC 	5&,,.1F1L1L1NNYYZ[]^_E))UEN3C'')d444C'')d444C		5 vvAGGv$cff177f&;;;	5 	5s    BF%%F.N)__name__
__module____qualname__rL   Tensor__annotations__r   r,   no_gradr   rX   __classcell__r:   s   @r;   r"   r"   <   s=    ll/ /" U]]_<  <r<   r"   c                     | dd| j                   d   dz  f   }| d| j                   d   dz  df   }t        j                  | |fd      S )z*Rotates half the hidden dims of the input..Nr>   rC   rD   )rI   rL   rO   )rR   x1x2s      r;   rotate_halfre   a   sZ    	
3"!''"+"""	#B	
3q ""	#B99rc2YB''r<   c                     |j                  |      }|j                  |      }| |z  t        |       |z  z   }||z  t        |      |z  z   }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezere   )qkrP   rQ   rS   unsqueeze_dimq_embedk_embeds           r;   apply_rotary_pos_embrm   i   sY    ( --
&C
--
&C3w;q>C/0G3w;q>C/0GGr<   c                   $     e Zd Z fdZd Z xZS )PersimmonMLPc                    t         |           t        j                  |j                  |j
                        | _        t        j                  |j
                  |j                        | _        t        |j                     | _
        y rY   )r+   r,   r   Linearhidden_sizeintermediate_sizedense_h_to_4hdense_4h_to_hr   
hidden_actactr8   r$   r:   s     r;   r,   zPersimmonMLP.__init__   s^    YYv'9'96;S;STYYv'?'?ASAST&++,r<   c                 l    | j                  |      }| j                  |      }| j                  |      }|S rY   )rt   rw   ru   )r8   hidden_statess     r;   rX   zPersimmonMLP.forward   s6    **=9/**=9r<   )rZ   r[   r\   r,   rX   r`   ra   s   @r;   ro   ro      s    -r<   ro   modulequerykeyvalueattention_maskscalingdropoutc                    t        j                  ||j                  dd            |z  }|#|d d d d d d d |j                  d   f   }	||	z   }t        j
                  j                  |dt         j                        j                  |j                        }t        j
                  j                  ||| j                        }t        j                  ||      }
|
j                  dd      j                         }
|
|fS )NrC   r   r>   )rE   rF   )ptrainingr   )rL   matmulrN   rI   r   
functionalsoftmaxfloat32rJ   rF   r   r   
contiguous)r{   r|   r}   r~   r   r   r   kwargsattn_weightscausal_maskattn_outputs              r;   eager_attention_forwardr      s     <<s}}Q':;gEL!$Q1o		"o%=>#k1==((2U]](SVVW\WbWbcL==((6??([L,,|U3K''1-88:K$$r<   c                       e Zd ZdZddedee   f fdZdej                  de
ej                  ej                  ej                  f   fdZ edd	d
      	 	 	 	 	 	 	 ddej                  deej                     deej                     d	ee   dededeej                     dee
ej                  ej                  f      dee   de
ej                  eej                     ee
ej                        f   fd       Z xZS )PersimmonAttentionz=Multi-headed attention from 'Attention Is All You Need' paperr$   	layer_idxc                    t         |           || _        || _        |-t        j                  d| j                  j                   d       |j                  | _        |j                  | _
        | j                  | j                  z  | _        |j                  | _        t        | j                  |j                  z        | _        d| _        | j                  | j                  z  | j                  k7  r&t#        d| j                   d| j                   d      t%        j&                  | j                  d| j                  z  d      | _        t%        j&                  | j                  | j                  z  | j                  d      | _        |j,                  | _        | j                  d	z  | _        | j,                  r|t%        j0                  |j                  | j                  z  |j2                  d
      | _        t%        j0                  |j                  | j                  z  |j2                  d
      | _        t%        j8                  |j:                        | _        t=        | j                        | _        y )NzInstantiating z without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.Tz?hidden_size must be divisible by num_heads (got `hidden_size`: z and `num_heads`: z).r   biasg      )epselementwise_affiner$   ) r+   r,   r$   r   loggerwarning_oncer:   rZ   rr   num_attention_heads	num_headshead_dim
rope_thetaintpartial_rotary_factorrotary_ndims	is_causal
ValueErrorr   rq   query_key_valuedenseqk_layernormr   	LayerNormlayer_norm_epsq_layernormk_layernormDropoutattention_dropoutr"   
rotary_embr8   r$   r   r:   s      r;   r,   zPersimmonAttention.__init__   s   " !8!8 9 :, , "--33((DNN: ++0L0L LMMMDNN*t/?/??QRVRbRbQc$T^^$4B8   "yy)9)91t?O?O;OVZ[YYt~~=t?O?OVZ[
"//}}d*!||""dnn4&:O:Odh D  "||""dnn4&:O:Odh D "$F,D,D!E2$++Fr<   	fused_qkvreturnc                     |j                   \  }}}|j                  ||| j                  d| j                        }|ddddf   |ddddf   |ddddf   fS )a  
        Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory
        storage as `fused_qkv`

        Args:
            fused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim]

        Returns:
            query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim]
            value: [batch_size, seq_length, num_heads, head_dim]
        r   .r   Nr   rC   )rI   viewr   r   )r8   r   
batch_size
seq_lengththree_times_hidden_sizes        r;   _split_headszPersimmonAttention._split_heads   sb     ;D//7
J 7NN:z4>>1dmm\	a#YsAqy%99S!QY;OOOr<   past_key_valuepast_key_values4.58new_nameversionrz   r   rS   output_attentions	use_cachecache_positionposition_embeddingsr   c	                    |j                         \  }
}}| j                  |      }| j                  |      \  }}}| j                  r"| j	                  |      }| j                  |      }|j                  dd      }|j                  dd      }|j                  dd      }|\  }}|dd | j                  f   |d| j                  d f   }}|dd | j                  f   |d| j                  d f   }}t        ||||      \  }}t        j                  ||fd      }t        j                  ||fd      }|2||| j                  |d}|j                  ||| j                  |      \  }}t        }| j                  j                  dk7  rt         | j                  j                     } || ||||f| j"                  sdn| j                  j$                  | j&                  d	|	\  }}|j)                  |
|d      }| j+                  |      }|sd }||fS )
Nr   rC   .r>   rD   )rQ   rP   partial_rotation_sizer   eager        )r   r   )sizer   r   r   r   r   rN   r   rm   rL   rO   updater   r   r$   _attn_implementationr   r   r   r   reshaper   )r8   rz   r   rS   r   r   r   r   r   r   bszq_len_r   query_states
key_statesvalue_statesrP   rQ   	query_rot
query_passkey_rotkey_passcache_kwargsattention_interfacer   r   s                              r;   rX   zPersimmonAttention.forward   sU    &**,UA ((7	 483D3DY3O0z<++L9L))*5J $--a3#--a3))!Q/
&S 1 1 1112d//112 	
 s/d////0sD--//0 
 2)Wc3O	7 yy)Z!8bAYY2;
& )-):):"0	L (7'='=j,X\XfXfht'u$J(?;;++w6"9$++:Z:Z"[$7	%
  $}}C$++2O2OLL	%
 	%
!\ "))#ub9jj- LL((r<   rY   NNNFFNN)rZ   r[   r\   __doc__r   r   r   r,   rL   r]   tupler   r   
LongTensorr	   boolr   r   rX   r`   ra   s   @r;   r   r      sf   G$G $G8C= $GLPell PuU\\5<<Y^YeYe=e7f P  %0A6R 2637+/"'59KON)||N) !.N) u//0	N)
 "%N)  N) N) !!1!12N) &eELL%,,,F&GHN) -.N) 
u||Xell3XeELL>Q5RR	SN) SN)r<   r   c                       e Zd Zdedef fdZ eddd      	 	 	 	 	 	 	 ddej                  d	e	ej                     d
e	ej                     de	eej                        de	e   de	e   de	ej                     de	eej                  ej                  f      dee   deej                  e	eej                  ej                  f      f   fd       Z xZS )PersimmonDecoderLayerr$   r   c                    t         |           |j                  | _        t        ||      | _        t        |      | _        t        j                  |j                  |j                        | _
        t        j                  |j                  |j                        | _        t        j                  |j                        | _        y )N)r$   r   r   )r+   r,   rr   r   	self_attnro   mlpr   r   r   input_layernormpost_attention_layernormr   hidden_dropoutr   r   s      r;   r,   zPersimmonDecoderLayer.__init__6  s    !--+6YO'!||F,>,>FDYDYZ(*V5G5GVMbMb(c%zz&"7"78r<   r   r   r   r   rz   r   rS   r   r   r   r   r   r   c	                     |}
| j                  |      } | j                  d||||||||d|	\  }}|
|z   }|}
| j                  |      }| j                  |      }| j	                  |      }||
z   }|f}|r||fz  }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
                Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
                `[0, config.n_positions - 1]`.
                [What are position IDs?](../glossary#position-ids)
            past_key_values (`Tuple(torch.FloatTensor)`, *optional*):
                cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
        )rz   r   rS   r   r   r   r   r    )r   r   r   r   r   )r8   rz   r   rS   r   r   r   r   r   r   residualself_attn_weightsoutputss                r;   rX   zPersimmonDecoderLayer.forward?  s    J !,,]; ,:4>> 
,
')%+/) 3
,
 
,
(( !=0 !55mD/]3%0 ")++Gr<   r   )rZ   r[   r\   r   r   r,   r   rL   r]   r   r   r   r   r   r   FloatTensorrX   r`   ra   s   @r;   r   r   5  s<   9 93 9 %0A6R 26379=,1$)59KOC||C !.C u//0	C
 "%"56C $D>C D>C !!1!12C &eELL%,,,F&GHC -.C 
u  (51B1BEDUDU1U+V"WW	XC SCr<   r   c                   @    e Zd ZU eed<   dZdZdgZdZdZ	dZ
dZdZd Zy)PersimmonPreTrainedModelr$   modelTr   r   c                    | j                   j                  }t        |t        j                        rY|j
                  j                  j                  d|       |j                  %|j                  j                  j                          y y t        |t        j                        rf|j
                  j                  j                  d|       |j                  2|j
                  j                  |j                     j                          y y t        |t        j                        rJ|j
                  j                  j                  d       |j                  j                  j                          y y )Nr   )meanstdg      ?)r$   initializer_ranger.   r   rq   weightdatanormal_r   zero_	Embeddingpadding_idxr   fill_)r8   r{   r   s      r;   _init_weightsz&PersimmonPreTrainedModel._init_weights  s    kk++fbii(MM&&CS&9{{&  &&( '-MM&&CS&9!!-""6#5#56<<> .-MM$$S)KK""$ .r<   N)rZ   r[   r\   r   r^   base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_can_compile_fullgraph_supports_sdpa_supports_flash_attn_supports_attention_backendr   r   r<   r;   r   r     s?    &*#01"3!N"&%r<   r   c                       e Zd ZdZdef fdZee	 	 	 	 	 	 	 	 	 ddee	j                     dee	j                     dee	j                     dee   dee	j                     d	ee   d
ee   dee   dee	j                     dee   defd              Z	 ddee	j                  df   de	j                  de	j                  ded
ef
dZede	j                  dedede	j.                  de	j                  defd       Z xZS )PersimmonModelz
    Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`PersimmonDecoderLayer`]

    Args:
        config: PersimmonConfig
    r$   c           	          t         |   |       |j                  | _        |j                  | _        t        j                  |j                  |j                  | j                        | _        t        j                  t        |j                        D cg c]  }t        ||       c}      | _        t        j                  |j                  |j                        | _        t#        |      | _        d| _        | j)                          y c c}w )Nr   r   F)r+   r,   pad_token_idr   
vocab_sizer   r   rr   embed_tokens
ModuleListrangenum_hidden_layersr   layersr   r   final_layernormr"   r   gradient_checkpointing	post_initr   s      r;   r,   zPersimmonModel.__init__  s     !.. ++LL):):F<N<NPTP`P`ammGLVMeMeGfg)"695g
  "||F,>,>FDYDYZ2&A&+# hs   D	input_idsr   rS   r   inputs_embedsr   r   output_hidden_statesr   r   r   c
                    ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }|d u |d uz  rt	        d      | j
                  r%| j                  r|rt        j                  d       d}t        |t        d       t        f      st	        d      |r|t        | j                         }|| j                  |      }|	F||j                         nd}t        j                   |||j"                  d   z   |j$                        }	||	j'                  d      }| j)                  |||	||      }|}| j+                  ||      }|rd	nd }|rd	nd }| j,                  D ],  }|r||fz  } ||f||||||	|d
|
}|d   }|s$||d   fz  }. | j/                  |      }|r||fz  }t1        ||||      S )Nz:You must specify exactly one of input_ids or inputs_embedszZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...FzBThe `past_key_values` should be either a `Cache` object or `None`.r   r   r   r9   r   )r   rS   r   r   r   r   r   )last_hidden_stater   rz   
attentions)r$   r   r  r   r   r  r   r   r   r.   r(   r	   r
   r  get_seq_lengthrL   arangerI   r9   rg   _update_causal_maskr   r	  r
  r   )r8   r  r   rS   r   r  r   r   r  r   r   past_seen_tokensr   rz   r   all_hidden_statesall_self_attnsdecoder_layerlayer_outputss                      r;   rX   zPersimmonModel.forward  sH    2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	-t";<YZZ&&4==##p "	 /DJ+>?abb0*$++>O  --i8M!CRC^==?de"\\ "2]5H5H5K"KTaThThN )33A6L..M>?L]
 & #oom\J #7BD0d![[ 	6M#!m%55!)
*) /"3#-$7
 
M *!,M =#3"55'	6* ,,];  -!11&+++%	
 	
r<   r   input_tensorc           	         | j                   j                  dk(  r||dk(  j                         r|S y | j                   j                  dk(  r't        |t        j
                        rt        |      }|S ||j                         nd}||j                  nd}| j                   j                  dk(  r(|s&|s$t        j                  |||| j                        ry |j                  }|j                  d   }	|r|j                         }
n1t        |t        j
                        r|j                  d	   n||	z   dz   }
| j                  ||	|
|||j                  d   
      }| j                   j                  dk(  rQ|O|j                   j"                  dv r7|s5t	        j$                  |      j&                  }t        j(                  ||      }|S )Nflash_attention_2r   flex_attentionr   Fsdpa)r  past_key_values_lengthis_trainingr   r>   )sequence_lengthtarget_lengthrF   r   r   )cudaxpunpu)r$   r   anyr.   rL   r]   r    r  is_compileabler   _ignore_causal_mask_sdpar   rF   rI   get_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionr9   r(   finfomin_unmask_unattended)r8   r   r  r   r   r   r  using_compilable_cacherF   r#  r$  r   	min_dtypes                r;   r  z"PersimmonModel._update_causal_mask  s    ;;++/BB)~/D.I.I.K%%;;++/??.%,,7!<^!L!!
 @O?Z?99;`aCRC^!?!?di ;;++v5>T]n%>>*'7 MM	 ""&,,Q/!+??AM nell; $$R(%7!;  PP+')#))!, Q 
 KK,,6*%%**.DD%
 E*..I0CCKQZ[Kr<   r#  r$  rF   r   c                    | | j                         dk(  r| }|S t        j                  |      j                  }t        j                  ||f|||j
                        }|dk7  rt        j                  |d      }|t        j                  ||j
                        |j                  dd      kD  z  }|ddddddf   j                  |ddd      }| |j                         }| j                  d   }	|ddddddd|	f   | ddddddf   j                  |j
                        z   }
|
dk(  }
|ddddddd|	f   j                  |
|      |ddddddd|	f<   |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )
fill_valuerF   r9   r   )diagonalr  r>   r   )rE   rL   r-  r.  fullr9   triur  r   rH   clonerI   rJ   masked_fill)r   r#  r$  rF   r   r   r   r   r1  mask_lengthpadding_masks              r;   r,  zDPersimmonModel._prepare_4d_causal_attention_mask_with_cache_positiona  s   > %.*<*<*>!*C(K* ' E*..I** -0Ye\j\q\qK !##jjqA5<<n>S>STWeWmWmnprsWtttK%dD!Q&67>>z1bRTUK))//1,2226*1aL[L+@ANSTVZ\`bcScDdDgDg&&E    ,q05@Aq,;,AV5W5c5c )6Aq!\k\12 r<   )	NNNNNNNNN)F)rZ   r[   r\   r   r   r,   r   r   r   rL   r   r]   r	   r   r   r   r   r   rX   r   r  staticmethodr   rF   r,  r`   ra   s   @r;   r  r    s    "  151537+/59$(,0/359\
E,,-\
 !.\
 u//0	\

 "%\
   1 12\
 D>\
 $D>\
 'tn\
 !!1!12\
 -.\
 
!\
  \
J #(BellK78B llB 	B
 B  BH 444 4 {{	4
 4 4 4r<   r  c                   Z    e Zd ZdgZ fdZee	 	 	 	 	 	 	 	 	 	 	 ddeej                     deej                     deej                     dee   deej                     deej                     d	ee   d
ee   dee   deej                     deeej                  f   defd              Z xZS )PersimmonForCausalLMzlm_head.weightc                     t         |   |       t        |      | _        |j                  | _        t        j                  |j                  |j                  d      | _        | j                          y )NFr   )
r+   r,   r  r   r  r   rq   rr   lm_headr  rx   s     r;   r,   zPersimmonForCausalLM.__init__  sU     #F+
 ++yy!3!3V5F5FUS 	r<   r  r   rS   r   r  labelsr   r   r  r   logits_to_keepr   c                    ||n| j                   j                  }|	|	n| j                   j                  }	 | j                  d||||||||	|
d	|}|j                  }t        |t              rt        | d      n|}| j                  |dd|ddf         }d}|* | j                  ||fd| j                   j                  i|}t        |||j                  |j                  |j                        S )uk  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, PersimmonForCausalLM

        >>> model = PersimmonForCausalLM.from_pretrained("adept/persimmon-8b-base")
        >>> tokenizer = AutoTokenizer.from_pretrained("adept/persimmon-8b-base")

        >>> prompt = "human: Hey, what should I eat for dinner?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        'human: Hey, what should I eat for dinner?\n\ncat: 🐱\n\nhuman: 😐\n\n'
        ```N)	r  r   rS   r   r  r   r   r  r   r  )losslogitsr   rz   r  r   )r$   r   r  r   r  r.   r   slicer@  loss_functionr  r   r   rz   r  )r8   r  r   rS   r   r  rA  r   r   r  r   rB  r   r   rz   slice_indicesrE  rD  s                     r;   rX   zPersimmonForCausalLM.forward  s*   P 2C1N-TXT_T_TqTq$8$D $++JjJj 	
 ,64:: ,
)%+'/!5),
 ,
  118B>SV8W~ot4]kmA}a,?@A%4%%  ;;11 	D &#33!//))
 	
r<   )NNNNNNNNNNr   )rZ   r[   r\   _tied_weights_keysr,   r   r   r   rL   r   r]   r	   r   r   r   r   r   rX   r`   ra   s   @r;   r>  r>    s2   *+  151537+/59-1$(,0/35934M
E,,-M
 !.M
 u//0	M

 "%M
   1 12M
 ))*M
 D>M
 $D>M
 'tnM
 !!1!12M
 c5<</0M
 
 M
  M
r<   r>  c                       e Zd Zy)"PersimmonForSequenceClassificationNrZ   r[   r\   r   r<   r;   rK  rK        r<   rK  c                       e Zd Zy)PersimmonForTokenClassificationNrL  r   r<   r;   rO  rO    rM  r<   rO  )r>  r  r   rK  rO  )Nr   )r   )Br   typingr   r   r   rL   torch.utils.checkpointr   activationsr   cache_utilsr	   r
   
generationr   modeling_attn_mask_utilsr   modeling_flash_attention_utilsr   modeling_layersr   r   r   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   utils.deprecationr   configuration_persimmonr   !torch.nn.attention.flex_attentionr   integrations.flex_attentionr    
get_loggerrZ   r   Moduler"   re   rm   ro   r]   rG   r   r   r   r   r  r>  rK  rO  __all__r   r<   r;   <module>rd     s  (  , ,    ! . ) > B 
 L F & \ \ 0 4  !;J 
		H	%!<ryy !<J(8299 * %II%<<% 
% <<	%
 U\\*% % %.H) H)VN6 Nb % % %6 t- t tn\
3_ \
~ j)IKc i d&CE] cr<   