
    hZu              (       4   d dl Z d dlZd dlmZ d dlmZmZ d dlZd dlm	c m
Z ddlmZmZmZmZmZ  ej$                  e      Zd Zd Zdadadadadadd	d
Zdee   fdZd Zdee   fdZ d Z!d:dZ"d Z#dejH                  de%ejH                  ejH                  e&f   fdZ'dejH                  dejH                  dejH                  dejH                  de&f
dZ(d Z)d Z*d Z+	 d:dejH                  dejH                  dejH                  d eejX                     fd!Z- G d" d#ed$%      Z.	 	 	 	 	 	 	 	 d;de&d&e&d'e/d(e0d)ee0   d*ee&   d+e/d,ee0   d-ee/   d.eejH                     d/ee1ee/f      fd0Z2	 	 	 	 	 	 	 	 	 	 	 	 	 d<d1ejH                  d2ejH                  d3ejH                  deejH                     de&d'e/d(e0d4eejH                     d)ee0   d*ee&   d+e/d,ee0   d-ee/   d5eejf                     d6eejf                     d7ee&   d8ee&   d eejX                     dee   f&d9Z4y)=    N)partial)Optional	TypedDict   )is_flash_attn_2_availableis_flash_attn_3_available#is_flash_attn_greater_or_equal_2_10is_torch_npu_availableloggingc                  \    t               ryt               rt                S ddlm}   |        S )NFr   'is_npu_fa2_top_left_aligned_causal_mask)r   r   r	    integrations.npu_flash_attentionr   r   s    i/var/www/html/eduruby.in/venv/lib/python3.12/site-packages/transformers/modeling_flash_attention_utils.py!flash_attn_supports_top_left_maskr   #   s)     " "6888Y244    c                  F    t               xs t               xs
 t               S N)r   r   r
    r   r   is_flash_attn_availabler   /   s    $&a*C*EaI_Iaar   	dropout_pwindow_size)dropoutsliding_windowimplementationc                 6   t               }t               }t        t        }}| dk(  r|s| |r|sddlm}m} ddlm}m	} nVt               rddlm} ddlm} n?| d	k(  s| |r	ddlm}m} n-t        | d
d      }t        | dd      }||t!        d|  d      ||||fS )a  
    Lazy loads the respective flash attention implementations.

    Return:
        flash_attn_func: The base flash attention function.
        flash_attn_varlen_func: The flash attention function supporting variable sequence lengths,
                                e.g. for padding-free training.
        pad_input: The function to pad inputs into one sequence and returning the respective kwargs.
        unpad_input: The function to unpad outputs based on the kwargs (from pad_input).
    flash_attention_2Nr   )flash_attn_funcflash_attn_varlen_func)	pad_inputunpad_inputr   )npu_flash_attn_func)npu_flash_attn_varlen_funcflash_attention_3r   r   zJCould not find the currently requested flash attention implementation at `z^`.Make sure that you request a valid kernel from the hub, e.g. `kernels-community/flash-attn`.)r   r   
_pad_input_unpad_input
flash_attnr   r   flash_attn.bert_paddingr    r!   r
   r   r"   r#   flash_attn_interfacegetattr
ValueError)r   is_fa2is_fa3r    r!   r   r   s          r   _lazy_importsr.   B   s     '(F&(F'{I--&n>TY_hnFBB		! 	]j00^5KPVTT &n6GNO%,^=UW[%\"%-1H `ao`p qs t 
 2I{JJr   c                     t        j                  |       j                  }t        j                  t              j                  }i }|D ]  }t        j                  ||      }||v ||<   ! t        t        |      S )a  
    Depending on the version and kernel some features are not supported. Due to limitations in
    `torch.compile`, we opt to statically type which (optional) kwarg parameters are supported
    within `_process_flash_attention_kwargs`.

    NOTE: While all supported kwargs are marked as `True`, everything else is marked as `False`.
          This might be confusing for kwargs that we use in any case, e.g. `is_causal`.
    )supports_mapping)inspect	signature
parameters_process_flash_attention_kwargs_hf_api_to_flash_mappinggetr   )flash_functionflash_parametersprocess_parametersr0   paramfa_params         r   _lazy_define_process_functionr<   j   s~     ((8CC **+JKVV# B+//u=%-1A%A"B 2EUVVr   c                     t        d t        t        t        t        fD              rt        |       \  aaaat        t        t              at        t        t        t        ft        fS )a  
    Lazily import flash attention and return the respective functions + flags.

    NOTE: For fullgraph, this needs to be called before compile, while no fullgraph can
    work without preloading. See `load_and_register_kernel` in `integrations.hub_kernels`.
    c              3   $   K   | ]  }|d u  
 y wr   r   ).0ks     r   	<genexpr>z.lazy_import_flash_attention.<locals>.<genexpr>   s     
P19
P   )any	_flash_fn_flash_varlen_fn_pad_fn	_unpad_fnr.   _process_flash_kwargs_fnr<   )r   s    r   lazy_import_flash_attentionrI      sZ     
Py*:GYO
PP:G:W7	#Wi  '#@AQ#R ')<>VVVr   c                 L     | j                   dg| j                  dd  }||   S )a  
    A local implementation of the PyTorch indexing operation `tensor[indices]` on the first axis,
    after flattening the first two dimensions of the tensor. This is functionally equivalent to
    FA2's `index_first_axis` and replaces the need to import it.
       N)reshapeshape)tensorindicesreshaped_tensors      r   _index_first_axisrR      s/     %fnnR;&,,qr*:;O7##r   c                    |||z   n|}|j                  dt        j                        }|j                  dt        j                        }t        j                  |j	                         d      j	                         }|j                         j                         }t        j                  t        j                  |dt        j                        d      }t        | |      ||||fS )a4  
    unpad_input function for flash attention variants that do not have them within their pkg themselves, e.g. fa3.

    Arguments:
        hidden_states: (batch, seqlen, ...)
        attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
        unused_mask: (batch, seqlen), bool / int, 1 means the element is allocated but unused.

    Return:
        hidden_states: (total_nnz, ...), where total_nnz = number of tokens selected in attention_mask + unused_mask.
        indices: (total_nnz), the indices of masked tokens from the flattened input sequence.
        cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
        max_seqlen_in_batch: int
        seqused: (batch), returns the number of tokens selected in attention_mask + unused_mask.
    rK   dimdtypeFas_tupler   r   r   )sumtorchint32nonzeroflattenmaxitemFpadcumsumrR   )	hidden_statesattention_maskunused_mask	all_masksseqlens_in_batchused_seqlens_in_batchrP   max_seqlen_in_batch
cu_seqlenss	            r   r&   r&      s      3>2I+-~I }}5;;}?*..2U[[.ImmI--/%@HHJG*..0557u||$4!5;;OQWXJ 	-1 r   c                     | j                   dd }t        j                  ||z  g|| j                  | j                  d}| ||<    |j
                  ||g| S )a  
    pad_input function for flash attention variants that do not have them within their pkg themselves, e.g. fa3.

    Arguments:
        hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
        indices: (total_nnz), the indices that represent the non-masked tokens of the original padded input sequence.
        batch: int, batch size for the padded sequence.
        seqlen: int, maximum sequence length for the padded sequence.

    Return:
        hidden_states: (batch, seqlen, ...)
    r   N)devicerV   )rN   r[   zerosrm   rV   view)rd   rP   batchseqlenrU   outputs         r   r%   r%      sb     

ab
!C[[%&.hCh8L8LTaTgTghF#F7O6;;uf+s++r   re   returnc                 d   | j                  dt        j                        }t        j                  | j	                         d      j	                         }|j                         j                         }t        j                  t        j                  |dt        j                        d      }|||fS )aq  
    Retrieves indexing data required to repad unpadded (ragged) tensors.

    Arguments:
        attention_mask (`torch.Tensor`):
            Boolean or int tensor of shape (batch_size, sequence_length), 1 means valid and 0 means not valid.

    Return:
        indices (`torch.Tensor`):
            The indices of non-masked tokens from the flattened input sequence.
        cu_seqlens (`torch.Tensor`):
            The cumulative sequence lengths, used to index into ragged (unpadded) tensors. `cu_seqlens` shape is (batch_size + 1,).
        max_seqlen_in_batch (`int`):
            Maximum sequence length in batch.
    rK   rT   FrW   r   rY   )
rZ   r[   r\   r]   r^   r_   r`   ra   rb   rc   )re   rh   rP   rj   rk   s        r   _get_unpad_dataru      s      &))b)DmmN224uEMMOG +..0557u||$4!5;;OQWXJ r   query_layer	key_layervalue_layerquery_lengthc                    t        |      \  }}}|j                  d   |j                  d   x}	kD  r"|ddd|	ddddf   |ddd|	ddddf   }}|j                  \  }
}}}t        ||      }t        ||      }||k(  rt        | |      } |}|}|}nk|dk(  rLd}t        j                  |
dz   t        j
                  | j                        }|dd }| j                  d      } n|dd| df   } || |      ^} }}}}| |||||f||ffS )a  
    Unpads query, key, and values tensors, using a single dimension for all tokens even though they belong to different batches.
    This function is used instead of `flash_attn.bert_padding.unpad_input` in order to avoid the recomputation of the same intermediary
    tensors for query, key, value tensors.

    Arguments:
        query_layer (`torch.Tensor`):
            Query state with padding. Shape: (batch_size, query_length, num_heads, head_dim).
        key_layer (`torch.Tensor`):
            Key state with padding. Shape: (batch_size, kv_seq_len, num_key_value_heads, head_dim).
        value_layer (`torch.Tensor`):
            Value state with padding. Shape: (batch_size, kv_seq_len, num_key_value_heads, head_dim).
        attention_mask (`torch.Tensor`):
            Boolean or int tensor of shape (batch_size, sequence_length), 1 means valid and 0 means not valid.
        query_length (`int`):
            Target length.
        unpad_input_func:
            The function to use for unpadding the input tensors.

    Return:
        query_layer (`torch.Tensor`):
            Query state without padding. Shape: (total_target_length, num_heads, head_dim).
        key_layer (`torch.Tensor`):
            Key state with padding. Shape: (total_source_length, num_key_value_heads, head_dim).
        value_layer (`torch.Tensor`):
            Value state with padding. Shape: (total_source_length, num_key_value_heads, head_dim).
        indices_q (`torch.Tensor`):
            The indices of non-masked tokens from the flattened input target sequence.
        (cu_seqlens_q, cu_seqlens_k) (`tuple[int]`):
            The cumulative sequence lengths for the target (query) and source (key, value), used to index into ragged (unpadded) tensors. `cu_seqlens` shape is (batch_size + 1,).
        (max_seqlen_in_batch_q, max_seqlen_in_batch_k) (`tuple[int]`):
            Maximum sequence length in batch (`max_seqlen_in_batch_q` for the target sequence i.e. query, `max_seqlen_in_batch_k` for the source sequence i.e. key/value).
    r   rK   NrV   rm   )ru   rN   rR   r[   aranger\   rm   squeeze)rv   rw   rx   re   ry   unpad_input_func	indices_kcu_seqlens_kmax_seqlen_in_batch_kseq_len
batch_size
kv_seq_lennum_key_value_headshead_dimcu_seqlens_qmax_seqlen_in_batch_q	indices_q_s                     r   _upad_inputr      s`   R 6E^5T2I|2 q(<(<R(@@WA!*1hwh1+<!={1hwhXY[\K\?];	<EOO9J
/!)Y7I#K;Kz!'Y?# 5			 !||N%++k6H6H
 !"%	!))!, (L=>(9:JZ[fhvJwGY.Ca 		|$	 56 r   c                    t         j                  | j                  d}| j                  d      } | dk(  j	                         j                  d      }t        j
                   |j                  di |t        j                  | j                         fi |f      }|}|j                         j                         }|j                         }|}||f||ffS )a  
    This function returns all the necessary kwargs to call `flash_attn_varlen_func` extracted from position_ids.

    Arguments:
        position_ids (`torch.Tensor`):
            Boolean or int tensor of shape (batch_size, sequence_length), 1 means valid and 0 means not valid.

    Return:
        (cu_seqlens_q, cu_seqlens_k) (`tuple[int]`):
            The cumulative sequence lengths for the target (query) and source (key, value), used to index into
            ragged (unpadded) tensors. `cu_seqlens` shape is (batch_size + 1,).
        (max_seqlen_in_batch_q, max_seqlen_in_batch_k) (`tuple[int]`):
            Maximum sequence length in batch (`max_seqlen_in_batch_q` for the target sequence i.e. query,
            `max_seqlen_in_batch_k` for the source sequence i.e. key/value).
    r{   rK   r   r   )r[   r\   rm   ro   r]   cattorO   sizediffr_   r`   )position_idstensor_kwargsr   cu_seq_lens_qcu_seq_lens_kmax_length_qmax_length_ks          r   #prepare_fa_kwargs_from_position_idsr   <  s      $kk\5H5HIM$$R(L"++-2226IIIILL)=)LL**,>>	
M "M
 !%%'++-L  $$&LL=)L,+GGGr   c                    | j                         j                  d| j                  d      | j                  d            } |j                         j                  d|j                  d      |j                  d            }|j                         j                  d|j                  d      |j                  d            }t        |      \  \  }}\  }}| ||||f||ffS )a  
    This function returns necessary arguments to call `flash_attn_varlen_func`.
    All three query, key, value states will be flattened.
    Cumulative lengths of each examples in the batch will be extracted from position_ids.
    NOTE: ideally cumulative lengths should be prepared at the data collator stage

    Arguments:
        query (`torch.Tensor`):
            Query state with padding. Shape: (batch_size, query_length, num_heads, head_dim).
        key (`torch.Tensor`):
            Key state with padding. Shape: (batch_size, kv_seq_len, num_key_value_heads, head_dim).
        value (`torch.Tensor`):
            Value state with padding. Shape: (batch_size, kv_seq_len, num_key_value_heads, head_dim).
        position_ids (`torch.Tensor`):
            Boolean or int tensor of shape (batch_size, sequence_length), 1 means valid and 0 means not valid.

    Return:
        query (`torch.Tensor`):
            Query state without padding. Shape: (total_target_length, num_heads, head_dim).
        key (`torch.Tensor`):
            Key state with padding. Shape: (total_source_length, num_key_value_heads, head_dim).
        value (`torch.Tensor`):
            Value state with padding. Shape: (total_source_length, num_key_value_heads, head_dim).
        (cu_seqlens_q, cu_seqlens_k) (`tuple[int]`):
            The cumulative sequence lengths for the target (query) and source (key, value), used to index into ragged (unpadded) tensors. `cu_seqlens` shape is (batch_size + 1,).
        (max_seqlen_in_batch_q, max_seqlen_in_batch_k) (`tuple[int]`):
            Maximum sequence length in batch (`max_seqlen_in_batch_q` for the target sequence i.e. query, `max_seqlen_in_batch_k` for the source sequence i.e. key/value).
    rK   )
contiguousro   r   r   )querykeyvaluer   r   r   r   r   s           r   _prepare_from_posidsr   h  s    : ##B

2

2GE
..


CHHRL#((2,
?C##B

2

2GECfgsCt@"]M$@\<3}=l?[\\r   c                     | yt        j                  | j                  d   | j                        | j	                         z   }|dk(  xr/ || z
  j                         j                         j                         S )a  
    Check the position ids whether packed sequences are indicated or not
        1. Position ids exist
        2. Flattened sequences only are supported
        3. Compile-friendly `not (torch.diff(position_ids, dim=-1) >= 0).all()`, i.e. we have multiple increasing sequences
    Fr   )rm   )r[   r|   rN   rm   minabsrZ   bool)r   r   increasing_position_sequencess      r   _is_packed_sequencer     ss      	\''*<3F3FG,JZJZJ\\ " ?` = LQQSWWY^^``r   qr@   vtarget_dtypec                     |ri| j                   t        j                  k(  rLt        j	                  d| d       | j                  |      |j                  |      |j                  |      }}} | ||fS )aa  
    PEFT usually casts the layer norms in float32 for training stability reasons
    therefore the input hidden states gets silently casted in float32. Hence, we need
    cast them back in float16 / bfloat16 just to be sure everything works as expected.
    This might slowdown training & inference so it is recommended to not cast the LayerNorms!
    zCasting fp32 inputs back to z for flash-attn compatibility.)rV   r[   float32loggerwarning_oncer   )r   r@   r   r   s       r   fa_peft_integration_checkr     sb     5==0:<.Hfgh$$|$add<&8!$$|:La1a7Nr   c                   z    e Zd ZU dZeej                     ed<   eej                     ed<   ee   ed<   ee   ed<   y)FlashAttentionKwargsa  
    Keyword arguments for Flash Attention with Compile.

    Attributes:
        cu_seq_lens_q (`torch.LongTensor`, *optional*)
            Gets cumulative sequence length for query state.
        cu_seq_lens_k (`torch.LongTensor`, *optional*)
            Gets cumulative sequence length for key state.
        max_length_q (`int`, *optional*):
            Maximum sequence length for query state.
        max_length_k (`int`, *optional*):
            Maximum sequence length for key state.
    r   r   r   r   N)	__name__
__module____qualname____doc__r   r[   
LongTensor__annotations__intr   r   r   r   r     s?     E,,--E,,--3-3-r   r   F)total
key_length	is_causalr   softmax_scaler   use_top_left_masksoftcapdeterministics_auxr0   c                     |xr
 |xr | dk(   |d}|
d   r||d<   |
d   r|||kD  r|dz
  |dz
  f|d<   |
d   r ||nt        j                  dd      dk(  |d<   |
d	   r|||d	<   |
d
   r|	|	|d
<   |S )aZ  
    Returns a set of kwargs that are passed down to the according flash attention function based on
    requested features and whether it is supported - depends on the version and kernel implementation
    which is dynamically configured at `lazy_import_flash_attention`. The (un)supported features can be
    inspected in `supports_mapping`, see `_lazy_define_process_function` for more details.

    Args:
        query_length (`int`):
            Length of the query states
        key_length (`int`):
            Length of the key states
        is_causal (`bool`):
            Whether we perform causal (decoder) attention or full attention.
        dropout (`float`):
            Attention dropout.
        softmax_scale (`float`, *optional*):
            The scaling of QK^T before applying softmax. Default to `1 / sqrt(head_dim)`.
        sliding_window (`int`, *optional*):
            The size of the sliding window, i.e. we look at a max of `sliding_window` tokens back.
        use_top_left_mask (`bool`):
            Deprecated behavior of older versions of flash attention requiring different masking.
        softcap (`float`, *optional*):
            Softcap for the attention logits, used e.g. in gemma2.
        deterministic (`bool`, *optional*):
            Determines if the deterministic option introduced in flash_attn>=2.4.1 is enabled.
        s_aux (`torch.Tensor`, *optional*):
            Attention sink auxiliary that adds a `bias` to the attention calculation via an additional head.
    Return:
        flash_kwargs (`dict`):
            A dict of kwargs that are requested and supported.
    r   )causalr   r   r   r   FLASH_ATTENTION_DETERMINISTIC01r   r   )osgetenv)ry   r   r   r   r   r   r   r   r   r   r0   kwargsflash_kwargss                r   r4   r4     s    \ M%6%L<1;L M&L
 $$+[!&>+E*WeJe (6'9>A;M&N]#(*6MBIIFegj<kor<r 	_% 	"w':")Y  U%6 %Wr   query_states
key_statesvalue_statesr   r   r   r   r   c                    t        |      \  \  }}}}}t        | |||      \  } }} |d
||j                  d      ||||	|
||d	|}t        || j                  d            }t	        d ||||fD              }|t        | |||||      \  }}}}\  }}\  }}dt        |j                        v r|j                         } ||||f||||d|} t        | t              r| d   }  || || j                  d      |      }!|!S |s|rA||t        | |||      \  }}}\  }}\  }}n| j                  d| j                  d	      | j                  d            }|j                  d|j                  d	      |j                  d            }|j                  d|j                  d	      |j                  d            }dt        |j                        v r|j                         } ||||f||||d|}!t        |!t              r|!d   }!|!j                  | j                  d      d|!j                  d	      |!j                  d            }!|!S  || ||fi |}!t        |!t              r|!d   }!|!S )a  
    Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
    first unpad the input, then computes the attention scores and pad the final attention scores.

    (Optional) kwargs are described further in `_process_flash_attention_kwargs` and `FlashAttentionKwargs`.

    Args:
        query_states (`torch.Tensor`):
            Input query states to be passed to Flash Attention API
        key_states (`torch.Tensor`):
            Input key states to be passed to Flash Attention API
        value_states (`torch.Tensor`):
            Input value states to be passed to Flash Attention API
        attention_mask (`torch.Tensor`, *optional*):
            The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
            position of padding tokens and 1 for the position of non-padding tokens.
        implementation (`str`, *optional*):
            The attention implementation to use. If None, will default to the one based on the environment.
    r   )	ry   r   r   r   r   r   r   r   r   r   )r   c              3   $   K   | ]  }|d u 
 y wr   r   )r?   kwargs     r   rA   z+_flash_attention_forward.<locals>.<genexpr>Y  s      ##T#rB   mps)r   r   max_seqlen_qmax_seqlen_krK   r   r   )rI   r   r   r   allr   strrm   clone
isinstancetupler   rM   ro   )"r   r   r   re   ry   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   flash_fnflash_varlen_fnpad_fnunpad_fnprocess_flash_kwargs_fnr   is_fa_with_position_idsis_fa_with_varlen_kwargsr   r@   r   r   	out_unpadouts"                                     r   _flash_attention_forwardr     s    R NiNJ1X3J
 .Gj,.*L*l
 + !??1%#%+# L( 2,<K\K\]^K_`" #(5}lT`'a#  
 ![f*lNLRZ\
X1a:]M<X\< CM!)//1M#	
 '&%%	
 	
	 i'!!IY	<+<+<Q+?NN JI 
"%< M$9Thj,UQAq!3m]5QlL $$R):):2)>@Q@QRT@UVA""2zr':JOOB<OPA$$R):):2)>@Q@QRT@UVA CM!)//1M	
 '&%%	
 	
 c5!a&Chh|((+R"sxx|L J	 |ZNNc5!a&CJr   r   )        NNFNNNN)r   NNNFNNNNNNNN)5r1   r   	functoolsr   typingr   r   r[   torch.nn.functionalnn
functionalra   utilsr   r   r	   r
   r   
get_loggerr   r   r   r   rD   rE   rF   rG   rH   r5   r   r.   r<   rI   rR   r&   r%   Tensorr   r   ru   r   r   r   r   rV   r   r   r   floatdictr4   r   r   r   r   r   <module>r      s    	  &     
		H	%5b
 	 
	    # %K(3- %KPW*W W$	$@,&ELL U5<<WZ;Z5[ :LL||L L LL	L
 L^)HX#]La( +/	|||| || 5;;'	$ 9E  2 %)$(##$($(26III I 	I
 E?I SMI I e_I D>I ELL!I tCI/If +/%)$(##$(0404"&"&*.$('K,,KK ,,K U\\*	K
 K K K 5<<(K E?K SMK K e_K D>K E,,-K E,,-K  3-!K" 3-#K$ 5;;'%K& SM'Kr   