
    hk                     @   d Z ddlZddlmZmZ ddlZddlmZ ddlZddlm	Z	m
Z
mZ ddlmZ ddlmZmZ ddlmZ dd	lmZ dd
lmZmZmZmZ ddlmZ ddlmZ ddlmZm Z m!Z!m"Z" ddl#m$Z$ ddl%m&Z&m'Z'm(Z( ddl)m*Z* ddl+m,Z,  e!       rddl-m.Z.m/Z/  G d de*      Z0 G d de(      Z1 G d de&      Z2 G d de'      Z3e  G d de             Z4e  G d d e4             Z5 e d!"       G d# d$e4e             Z6e  G d% d&e4             Z7 e d'"       G d( d)e4             Z8g d*Z9y)+zPyTorch BioGPT model.    N)OptionalUnion)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FN)CacheDynamicCache)GenerationMixin)AttentionMaskConverter))BaseModelOutputWithPastAndCrossAttentions!CausalLMOutputWithCrossAttentions SequenceClassifierOutputWithPastTokenClassifierOutput)PreTrainedModel)Unpack)TransformersKwargsauto_docstringis_torch_flex_attn_availablelogger)deprecate_kwarg   )BartAttentionBartDecoderLayerBartScaledWordEmbedding)OPTLearnedPositionalEmbedding   )BioGptConfig)	BlockMaskmake_flex_block_causal_maskc                   `     e Zd Z	 	 ddej                  dedeej                     f fdZ xZS ) BioGptLearnedPositionalEmbeddingattention_maskpast_key_values_lengthposition_idsc                 (    t         |   |||       y)z3`input_ids_shape` is expected to be [bsz x seqlen].N)superforward)selfr$   r%   r&   	__class__s       g/var/www/html/eduruby.in/venv/lib/python3.12/site-packages/transformers/models/biogpt/modular_biogpt.pyr)   z(BioGptLearnedPositionalEmbedding.forward<   s     	(>M    )r   N)	__name__
__module____qualname__torch
LongTensorintr   r)   __classcell__r+   s   @r,   r#   r#   ;   sG     '(37	N((N !$N u//0	N Nr-   r#   c                       e Zd Zy)BioGptScaledWordEmbeddingNr.   r/   r0    r-   r,   r7   r7   F       r-   r7   c                       e Zd Zy)BioGptAttentionNr8   r9   r-   r,   r<   r<   J   r:   r-   r<   c                   v    e Zd Zddedee   f fdZ eddd      	 	 	 	 	 	 	 ddej                  d	eej                     d
eej                     dee
   dee   dee   deej                     deej                     dee   deej                   eeej                   ej                   f      f   fd       Z xZS )BioGptDecoderLayerconfig	layer_idxc           	         t         |   |       |j                  | _        t	        | j                  |j
                  |j                  dd||      | _        |j                  | _	        t        |j                     | _        t        j                  | j                  |j                        | _        t        j                  |j                  | j                        | _        | `| `y )NT)	embed_dim	num_headsdropout
is_decoder	is_causalr?   r@   )r(   __init__hidden_sizerB   r<   num_attention_headsattention_probs_dropout_prob	self_attnhidden_dropout_probrD   r	   
hidden_actactivation_fnnnLinearintermediate_sizefc1fc2encoder_attnencoder_attn_layer_norm)r*   r?   r@   r+   s      r,   rG   zBioGptDecoderLayer.__init__O   s     ++(nn0077
 11#F$5$5699T^^V-E-EF99V55t~~F(r-   past_key_valuepast_key_valuesz4.58)new_nameversionhidden_statesr$   layer_head_maskoutput_attentions	use_cacher&   cache_positionkwargsreturnc	                 `   |}
| j                  |      } | j                  d|||||||d|	\  }}t        j                  j	                  || j                  | j
                        }|
|z   }|}
| j                  |      }| j                  |      }| j                  |      }t        j                  j	                  || j                  | j
                        }| j                  |      }t        j                  j	                  || j                  | j
                        }|
|z   }|f}|r||fz  }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
                `(encoder_attention_heads,)`.
            past_key_values (`Tuple(torch.FloatTensor)`): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
                cache in the correct position and to infer the complete sequence length.
        )rZ   rW   r$   r[   r\   r&   r^   ptrainingr9   )self_attn_layer_normrK   rO   
functionalrD   rd   final_layer_normrR   rN   activation_dropoutrS   )r*   rZ   r$   r[   rW   r\   r]   r&   r^   r_   residualself_attn_weightsoutputss                r,   r)   zBioGptDecoderLayer.forwarde   s@   > !11-@ ,:4>> 	,
'+)+/%)	,
 	,
(( --mt||VZVcVc-d =0 !--m</**=9--mt?V?Vaeanan-o/--mt||VZVcVc-d =0 ")++Gr-   N)NNNFTNN)r.   r/   r0   r   r   r3   rG   r   r1   Tensorr
   boolr2   r   r   tupleFloatTensorr)   r4   r5   s   @r,   r>   r>   N   s   )| ) ), %0A6R 2626+/,1$(3715?||? !.? "%,,/	?
 "%? $D>? D>? u//0? !.? +,? 
u  (51B1BEDUDU1U+V"WW	X? S?r-   r>   c                       e Zd ZU eed<   dZdZdZdZdZ	dZ
deeej                  df      dej                  dej                  defd	Zedej                  d
ededej&                  dej                  defd       Zy)BioGptPreTrainedModelr?   biogptTr$   r    input_tensorr^   rW   c           	         | j                   j                  dk(  rqt        |t        j                        rt        |      }|S |Ft        t        j                  |j                  d   |j                  d   f|j                              }|S | j                   j                  dk(  r||dk(  j                         r|S y ||j                         nd}||j                  nd}| j                   j                  dk(  r&|s$t        j                  |||| j                  	      ry |j                  }|j                  d   }|r|j!                         }	n1t        |t        j                        r|j                  d
   n||z   dz   }	| j#                  |||	|||j                  d         }
| j                   j                  dk(  rO|M|j                  j$                  dv r5t        j&                  |      j(                  }t        j*                  |
|      }
|
S )Nflex_attentionr   r   )sizedeviceflash_attention_2g        Fsdpa)inputs_embedsr%   is_training)sequence_lengthtarget_lengthdtyper^   
batch_size)cudaxpunpu)r?   _attn_implementation
isinstancer1   rm   r!   onesshaperx   anyget_seq_lengthis_compileabler   _ignore_causal_mask_sdpard   r   get_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positiontypefinfomin_unmask_unattended)r*   r$   rt   r^   rW   past_seen_tokensusing_compilable_cacher   r~   r   causal_mask	min_dtypes               r,   _update_causal_maskz)BioGptPreTrainedModel._update_causal_mask   s    ;;++/??.%,,7!<^!L "!  '!<JJ*003\5G5G5JK-44" "!;;++/BB)~/D.I.I.K%%
 @O?Z?99;`aCRC^!?!?di ;;++v5>T%>>*'7 MM	 ""&,,Q/!+??AM nell; $$R(%7!;  PP+')#))!, Q 
 KK,,6*%%**.DD
 E*..I0CCKQZ[Kr-   r~   r   r   r   c                    | | j                         dk(  r| }|S t        j                  |      j                  }t        j                  ||f|||j
                        }|dk7  rt        j                  |d      }|t        j                  ||j
                        |j                  dd      kD  z  }|ddddddf   j                  |ddd      }| |j                         }| j                  d   }	|ddddddd|	f   | ddddddf   j                  |j
                        z   }
|
dk(  }
|ddddddd|	f   j                  |
|      |ddddddd|	f<   |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )
fill_valuer   rx   r   )diagonalrx   r}   r   )dimr1   r   r   fullrx   triuarangereshapeexpandcloner   tomasked_fill)r$   r~   r   r   r^   r   r_   r   r   mask_lengthpadding_masks              r,   r   zKBioGptPreTrainedModel._prepare_4d_causal_attention_mask_with_cache_position   s   > %.*<*<*>!*C(K* ' E*..I** -0Ye\j\q\qK !##jjqA5<<n>S>STWeWmWmnprsWtttK%dD!Q&67>>z1bRTUK))//1,2226*1aL[L+@ANSTVZ\`bcScDdDgDg&&E    ,q05@Aq,;,AV5W5c5c )6Aq!\k\12 r-   N)r.   r/   r0   r   __annotations__base_model_prefixsupports_gradient_checkpointing_supports_flash_attn_supports_sdpa_supports_flex_attn_can_compile_fullgraphr   r   r1   rm   r
   r   staticmethodr3   r   r   r9   r-   r,   rr   rr      s     &*#N!J u||['@!ABJ llJ 	J
 JX 444 4 {{	4
 4 4 4r-   rr   c                   l    e Zd Zdef fdZe	 	 	 	 	 	 	 	 	 	 	 ddeej                     deej                     deej                     deej                     dee
e
ej                           dee   d	eej                     d
ee   dee   dee   deej                     dee   dee
ef   fd       Z xZS )BioGptModelr?   c           	         t         |   |       || _        |j                  | _        |j                  | _        |j                  | _        |j                  | _	        |j                  rt        j                  |j                        nd}t        |j                  | j                  | j                  |      | _        t!        |j"                  | j                        | _        t'        j(                  t+        |j,                        D cg c]  }t/        ||       c}      | _        t'        j2                  | j                        | _        d| _        | j9                          y c c}w )Ng      ?)embed_scale)r@   F)r(   rG   r?   	layerdroprL   rD   rH   rB   pad_token_idpadding_idxscale_embeddingmathsqrtr7   
vocab_sizeembed_tokensr#   max_position_embeddingsembed_positionsrO   
ModuleListrangenum_hidden_layersr>   layers	LayerNorm
layer_normgradient_checkpointing	post_init)r*   r?   r   ir+   s       r,   rG   zBioGptModel.__init__;  s    ))11++!..7=7M7Mdii 2 23SV5t~~t/?/?[
  @@^@^`d`n`nommV[\b\t\tVu$vQR%7!%L$vw,,t~~6&+# %ws   E"	input_idsr$   	head_maskr{   rW   r]   r&   r\   output_hidden_statesreturn_dictr^   r_   r`   c                    ||n| j                   j                  }|	|	n| j                   j                  }	||n| j                   j                  }|
|
n| j                   j                  }
|d u |d uz  rt        d      |$|}|j                  }|j                  d|d         }n-| |j                         d d }|d d d d df   }nt        d      || j                  |      }| j                  r%| j                  r|rt        j                  d       d}|r|t        | j                         }|r:t        |t               r*t        j                  d       t        j"                  |      }|j                         d d \  }}||j%                         nd}|%t'        j(                  |||z   |j*                  	      }|'||z   }t'        j,                  |||j*                  	      }|}| j/                  ||||      }|8t'        j0                  |d
      }||z  d
z
  j3                         }|d d |d f   }| j5                  |||      }||z   }t6        j8                  j;                  || j:                  | j                        }| j                  r%| j                  r|rt        j                  d       d}|	rdnd }|rdnd }d }t=        | j>                        D ]g  \  }}|	r||fz  }| j                  r%t'        j@                  g       }|| jB                  k  r? ||f||||   nd |||||d|}|d   }|s_||d
   fz  }i |	r||fz  }| jE                  |      }|
st!        d |||||fD              S tG        |||||      S )NzTYou cannot specify both decoder_input_ids and decoder_inputs_embeds at the same timer}   zEYou have to specify either decoder_input_ids or decoder_inputs_embedsz[`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`...F)r?   zPassing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `DynamicCache` instead, e.g. `past_key_values=DynamicCache.from_legacy_cache(past_key_values)`.r   r   r   )r   )r&   rb   zZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...r9   )r$   r[   rW   r\   r]   r&   r^   c              3   $   K   | ]  }|| 
 y wrl   r9   ).0vs     r,   	<genexpr>z&BioGptModel.forward.<locals>.<genexpr>  s      = s   )last_hidden_staterW   rZ   
attentionscross_attentions)$r?   r\   r   r]   use_return_dict
ValueErrorr   viewrw   r   r   rd   r   warning_oncer   r   ro   from_legacy_cacher   r1   r   rx   r   r   cumsumlongr   rO   rf   rD   	enumerater   randr   r   r   )r*   r   r$   r   r{   rW   r]   r&   r\   r   r   r^   r_   inputinput_shaper   
seq_lengthr%   mask_seq_lengthself_attn_cacher   	positionsrZ   all_hidden_statesall_self_attnsall_cross_attentionsidxdecoder_layerdropout_probabilitylayer_outputss                                 r,   r)   zBioGptModel.forwardP  s     2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	%0%<k$++B]B] -t";<stt"E++K!r;r?;I&',,.s3K!!Q(+Edee  --e4M&&4==##q "	 0*$++>OOU;U
 +<<_MO!.!3!3!5cr!:
JETE`!?!?!Afg!"\\&(>(KTaThThN !4zAO"ZZ
OML`L`aN)..	
  <<A>L(>9A=CCEL'+A+B(BCL((9O^j(k	%	1--mt||VZVcVc-d&&4==##p "	"6BD0d#"+DKK"8 	6C#!m%55!}}&+jjn#&7)
*3<3H3d /"3#)-
 
M *!,M =#3"551	66  -!116 ':K^]qr  
 9+++%1
 	
r-   )NNNNNNNNNNN)r.   r/   r0   r   rG   r   r   r1   r2   rp   ro   rm   rn   r   r   r   r   r)   r4   r5   s   @r,   r   r   9  sI   | *  156:1559@D$(37,0/3&*15P
E,,-P
 !!2!23P
 E--.	P

   1 12P
 "%ell(;"<=P
 D>P
 u//0P
 $D>P
 'tnP
 d^P
 !.P
 +,P
 
u??	@P
 P
r-   r   zR
    BioGPT Model with a `language modeling` head on top for CLM fine-tuning.
    )custom_introc                        e Zd ZdgZ fdZd Zd Ze	 	 	 	 	 	 	 	 	 	 	 	 ddee	j                     dee	j                     dee	j                     dee	j                     d	eeee	j                           d
ee	j                     dee   dee	j                     dee   dee   dee   dee	j                     dee   deeef   fd       Z xZS )BioGptForCausalLMzoutput_projection.weightc                     t         |   |       t        |      | _        t	        j
                  |j                  |j                  d      | _        | j                          y NF)bias)
r(   rG   r   rs   rO   rP   rH   r   output_projectionr   r*   r?   r+   s     r,   rG   zBioGptForCausalLM.__init__  sJ     !&)!#6+=+=v?P?PW\!] 	r-   c                     | j                   S rl   r   r*   s    r,   get_output_embeddingsz'BioGptForCausalLM.get_output_embeddings  s    %%%r-   c                     || _         y rl   r   )r*   new_embeddingss     r,   set_output_embeddingsz'BioGptForCausalLM.set_output_embeddings  s
    !/r-   r   r$   r   r{   rW   labelsr]   r&   r\   r   r   r^   r_   r`   c                    ||n| j                   j                  } | j                  |f|||||||	|
||d
|}|d   }| j                  |      }d}|* | j                  ||fd| j                   j
                  i|}|s|f|dd z   }||f|z   S |S t        |||j                  |j                  |j                  |j                        S )a  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
            `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
            are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
        N)
r$   r   r{   rW   r]   r&   r\   r   r   r^   r   r   r   )losslogitsrW   rZ   r   r   )r?   r   rs   r   loss_functionr   r   rW   rZ   r   r   )r*   r   r$   r   r{   rW   r   r]   r&   r\   r   r   r^   r_   rk   sequence_outputprediction_scoreslm_lossoutputs                      r,   r)   zBioGptForCausalLM.forward  s   . &1%<k$++B]B]$++
)'+%/!5#)
 
 "!* 22?C(d((!  ;;11 	G ')GABK7F,3,?WJ'KVK0$#33!//))$55
 	
r-   NNNNNNNNNNNN)r.   r/   r0   _tied_weights_keysrG   r   r   r   r   r1   r2   rp   ro   rm   rn   r   r   r   r   r)   r4   r5   s   @r,   r   r     s`    55&0  156:1559@D-1$(37,0/3&*15>
E,,->
 !!2!23>
 E--.	>

   1 12>
 "%ell(;"<=>
 ))*>
 D>>
 u//0>
 $D>>
 'tn>
 d^>
 !.>
 +,>
 
u77	8>
 >
r-   r   c                        e Zd Z fdZe	 	 	 	 	 	 	 	 	 	 	 	 	 ddeej                     deej                     deej                     deej                     dee	e	ej                           deej                     deej                     d	ee   d
eej                     dee   dee   dee   deej                     dee	ef   fd       Z xZS )BioGptForTokenClassificationc                 z   t         |   |       |j                  | _        t        |      | _        t        |d      r|j                  |j                  }n|j                  }t        j                  |      | _
        t        j                  |j                  |j                        | _        | j                          y )Nclassifier_dropout)r(   rG   
num_labelsr   rs   hasattrr  rL   rO   DropoutrD   rP   rH   
classifierr   )r*   r?   r  r+   s      r,   rG   z%BioGptForTokenClassification.__init__?  s      ++!&)6/0V5N5N5Z!'!:!:!'!;!;zz"45))F$6$68I8IJr-   r   token_type_idsr$   r   rW   r{   r   r]   r&   r\   r   r   r^   r`   c                    ||n| j                   j                  }| j                  |||||||	|
|||      }|d   }| j                  |      }| j	                  |      }d}|t               }||j                  d      dk(  }|j                  d| j                        }t        j                  ||j                  d      t        j                  |j                        j                  |            } |||      }n2 ||j                  d| j                        |j                  d            }|s|f|dd z   }||f|z   S |S t        |||j                  |j                        S )  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        N
rW   r$   r   r{   r]   r&   r\   r   r   r^   r   r}   r   r   )r   r   rZ   r   )r?   r   rs   rD   r
  r   r   r  r1   wheretensorignore_indextype_asr   rZ   r   )r*   r   r  r$   r   rW   r{   r   r]   r&   r\   r   r   r^   transformer_outputsrZ   r   r   loss_fctactive_lossactive_logitsactive_labelsr   s                          r,   r)   z$BioGptForTokenClassification.forwardM  su   . &1%<k$++B]B]"kk+)'%/!5#) * 
 ,A.]3/')H),11"5: &B @ %R%,,x?T?T2U2]2]^d2e!  }=B @&++b/RY!4QR!88F)-)9TGf$EvE$-;;*55	
 	
r-   )NNNNNNNNNNNNN)r.   r/   r0   rG   r   r   r1   r2   rp   ro   rm   rn   r   r   r)   r4   r5   s   @r,   r  r  =  sc     15596:15@D59-1$(37,0/3&*15A
E,,-A
 !!1!12A
 !!2!23	A

 E--.A
 "%ell(;"<=A
   1 12A
 ))*A
 D>A
 u//0A
 $D>A
 'tnA
 d^A
 !.A
 
u++	,A
 A
r-   r  a  
    The BioGpt Model transformer with a sequence classification head on top (linear layer).

    [`BioGptForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-2) do.

    Since it does classification on the last token, it is required to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    c                       e Zd Zdef fdZe	 	 	 	 	 	 	 	 	 	 	 	 ddeej                     deej                     deej                     dee
e
ej                           deej                     deej                     d	ee   d
eej                     dee   dee   dee   deej                     dee
ef   fd       Zd Zd Z xZS )BioGptForSequenceClassificationr?   c                     t         |   |       |j                  | _        t        |      | _        t        j                  |j                  | j                  d      | _        | j                          y r   )
r(   rG   r  r   rs   rO   rP   rH   scorer   r   s     r,   rG   z(BioGptForSequenceClassification.__init__  sS      ++!&)YYv114??O
 	r-   r   r$   r   rW   r{   r   r]   r&   r\   r   r   r^   r`   c                    ||n| j                   j                  }| j                  ||||||||	|
||      }|d   }| j                  |      }||j                  dd \  }}n|j                  dd \  }}| j                   j
                  d}n|Vt        j                  || j                   j
                        j                  d      dz
  j                  |j                        }n.d}t        j                  | j                  j                   d       |t        j                  ||j                        |f   }d}|| j                   j                   | j"                  dk(  rd	| j                   _        nl| j"                  dkD  rL|j$                  t        j&                  k(  s|j$                  t        j(                  k(  rd
| j                   _        nd| j                   _        | j                   j                   d	k(  rIt+               }| j"                  dk(  r& ||j-                         |j-                               }n |||      }n| j                   j                   d
k(  r=t/               } ||j1                  d| j"                        |j1                  d            }n,| j                   j                   dk(  rt3               } |||      }|s|f|dd z   }||f|z   S |S t5        |||j6                  |j8                  |j:                        S )r  Nr  r   r   r}   r   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r   
regressionsingle_label_classificationmulti_label_classification)r   r   rW   rZ   r   )r?   r   rs   r  r   r   r1   nesumr   rx   r   r   r+   r.   r   problem_typer  r   r   r3   r   squeezer   r   r   r   rW   rZ   r   )r*   r   r$   r   rW   r{   r   r]   r&   r\   r   r   r^   r  rZ   r   r   r~   pooled_logitsr   r  r   s                         r,   r)   z'BioGptForSequenceClassification.forward  s   , &1%<k$++B]B]"kk+)'%/!5#) * 
 ,A.M* *3//"1*='J*7*=*=bq*A'J;;##+ O$#(88It{{7O7O#P#T#TUW#X[\#\"`"`aganan"o"$##~~../ 0^ ^
 u||Jv}}M^_{{''/??a'/;DKK,__q(fllejj.HFLL\a\e\eLe/LDKK,/KDKK,{{''<7"9??a'#M$9$9$;V^^=MND#M6:D))-JJ+- 2 22t GUWY))-II,.v6#%(;AB(??F)-)9TGf$EvE/ /??-;;*55
 	
r-   c                 .    | j                   j                  S rl   rs   r   r   s    r,   get_input_embeddingsz4BioGptForSequenceClassification.get_input_embeddings  s    {{'''r-   c                 &    || j                   _        y rl   r&  )r*   values     r,   set_input_embeddingsz4BioGptForSequenceClassification.set_input_embeddings
  s    #( r-   r  )r.   r/   r0   r   rG   r   r   r1   r2   rp   ro   rm   rn   r   r   r)   r'  r*  r4   r5   s   @r,   r  r    s\   |   156:15@D59-1$(37,0/3&*15Z
E,,-Z
 !!2!23Z
 E--.	Z

 "%ell(;"<=Z
   1 12Z
 ))*Z
 D>Z
 u//0Z
 $D>Z
 'tnZ
 d^Z
 !.Z
 
u66	7Z
 Z
x()r-   r  )r   r  r  r   rr   ):__doc__r   typingr   r   r1   torch.nnrO   torch.utils.checkpointr   r   r   activationsr	   cache_utilsr
   r   
generationr   modeling_attn_mask_utilsr   modeling_outputsr   r   r   r   modeling_utilsr   processing_utilsr   utilsr   r   r   r   utils.deprecationr   bart.modeling_bartr   r   r   opt.modeling_optr   configuration_biogptr   integrations.flex_attentionr    r!   r#   r7   r<   r>   rr   r   r   r  r  __all__r9   r-   r,   <module>r=     s_     "    A A ! . )  . &  1 
 = .  !UN'D N	 7 		m 	W) Wt MO M M` g
' g
 g
T 
Q
- Q

Q
h Q
#8 Q
 Q
h k)&; k)k)\r-   