
    h(                     D    d dl mZ ddlmZ ddlmZ  G d de      ZdgZy)    )Optional   )PretrainedConfig)rope_config_validationc            5            e Zd ZdZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddeee      deee      deee      dededed	ed
edededede	de
dede
de	de	dedede	de	de	dee   dede	de	f4 fdZ xZS )EfficientLoFTRConfiga  
    This is the configuration class to store the configuration of a [`EffientLoFTRFromKeypointMatching`].
    It is used to instantiate a EfficientLoFTR model according to the specified arguments, defining the model
    architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the
    EfficientLoFTR [zju-community/efficientloftr](https://huggingface.co/zju-community/efficientloftr) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        stage_num_blocks (`List`, *optional*, defaults to [1, 2, 4, 14]):
            The number of blocks in each stages
        out_features (`List`, *optional*, defaults to [64, 64, 128, 256]):
            The number of channels in each stage
        stage_stride (`List`, *optional*, defaults to [2, 1, 2, 2]):
            The stride used in each stage
        hidden_size (`int`, *optional*, defaults to 256):
            The dimension of the descriptors.
        activation_function (`str`, *optional*, defaults to `"relu"`):
            The activation function used in the backbone
        q_aggregation_kernel_size (`int`, *optional*, defaults to 4):
            The kernel size of the aggregation of query states in the fusion network
        kv_aggregation_kernel_size (`int`, *optional*, defaults to 4):
            The kernel size of the aggregation of key and value states in the fusion network
        q_aggregation_stride (`int`, *optional*, defaults to 4):
            The stride of the aggregation of query states in the fusion network
        kv_aggregation_stride (`int`, *optional*, defaults to 4):
            The stride of the aggregation of key and value states in the fusion network
        num_attention_layers (`int`, *optional*, defaults to 4):
            Number of attention layers in the LocalFeatureTransformer
        num_attention_heads (`int`, *optional*, defaults to 8):
            The number of heads in the GNN layers.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        attention_bias (`bool`, *optional*, defaults to `False`):
            Whether to use a bias in the query, key, value and output projection layers during attention.
        mlp_activation_function (`str`, *optional*, defaults to `"leaky_relu"`):
            Activation function used in the attention mlp layer.
        coarse_matching_skip_softmax (`bool`, *optional*, defaults to `False`):
            Whether to skip softmax or not at the coarse matching step.
        coarse_matching_threshold (`float`, *optional*, defaults to 0.2):
            The threshold for the minimum score required for a match.
        coarse_matching_temperature (`float`, *optional*, defaults to 0.1):
            The temperature to apply to the coarse similarity matrix
        coarse_matching_border_removal (`int`, *optional*, defaults to 2):
            The size of the border to remove during coarse matching
        fine_kernel_size (`int`, *optional*, defaults to 8):
            Kernel size used for the fine feature matching
        batch_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon used by the batch normalization layers.
        rope_theta (`float`, *optional*, defaults to 10000.0):
            The base period of the RoPE embeddings.
        partial_rotary_factor (`float`, *optional*, defaults to 4.0):
            Dim factor for the RoPE embeddings, in EfficientLoFTR, frequencies should be generated for
            the whole hidden_size, so this factor is used to compensate.
        rope_scaling (`Dict`, *optional*):
            Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
            and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
            accordingly.
            Expected contents:
                `rope_type` (`str`):
                    The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
                    'llama3', '2d'], with 'default' being the original RoPE implementation.
                `dim` (`int`): The dimension of the RoPE embeddings.
        fine_matching_slice_dim (`int`, *optional*, defaults to 8):
            The size of the slice used to divide the fine features for the first and second fine matching stages.
        fine_matching_regress_temperature (`float`, *optional*, defaults to 10.0):
            The temperature to apply to the fine similarity matrix
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.

    Examples:
        ```python
        >>> from transformers import EfficientLoFTRConfig, EfficientLoFTRForKeypointMatching

        >>> # Initializing a EfficientLoFTR configuration
        >>> configuration = EfficientLoFTRConfig()

        >>> # Initializing a model from the EfficientLoFTR configuration
        >>> model = EfficientLoFTRForKeypointMatching(configuration)

        >>> # Accessing the model configuration
        >>> configuration = model.config
        ```
    efficientloftrstage_num_blocksout_featuresstage_stridehidden_sizeactivation_functionq_aggregation_kernel_sizekv_aggregation_kernel_sizeq_aggregation_stridekv_aggregation_stridenum_attention_layersnum_attention_headsattention_dropoutattention_biasmlp_activation_functioncoarse_matching_skip_softmaxcoarse_matching_thresholdcoarse_matching_temperaturecoarse_matching_border_removalfine_kernel_sizebatch_norm_eps
rope_thetapartial_rotary_factorrope_scalingfine_matching_slice_dim!fine_matching_regress_temperatureinitializer_rangec                    ||ng d| _         ||ng d| _        ||ng d| _        dg| j                  d d z   | _        t	        | j                  | j                         D cg c]  \  }}|gdg|dz
  z  z    c}}| _        t        | j                         D cg c]  \  }}| j                  |   g|z   c}}| _        t        t        | j                               D cg c]%  }| j                  |   g| j                  |   d d z   ' c}| _
        t        t        | j                              d d | _        || _        | j                  | j                  d   k7  r(t        d| j                   d| j                  d          || _        || _        || _        || _        |	| _        |
| _        || _        || _        || _        | j                  dz  | _        || _        || _        || _        || _        || _        || _        || _         || _!        || _"        || _#        || _$        ||nd	d
i| _%        || _&        tO        |        || _(        tS        |   di | y c c}}w c c}}w c c}w )N)            )r&   r%   r&   r&   )@   r)         r%   zMhidden_size should be equal to the last value in out_features. hidden_size = z, out_features = r&   	rope_typedefault )+r
   r   r   stage_in_channelszipstage_block_stride	enumeratestage_block_out_channelsrangelenstage_block_in_channelslistreversedfine_fusion_dimsr   
ValueErrorr   r   r   r   r   r   r   r   r   intermediate_sizer   r   r   r   r   r   r   r!   r"   num_key_value_headsr   r    r   r   r#   super__init__) selfr
   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r#   kwargsstride
num_blocks	stage_idx	__class__s                                   }/var/www/html/eduruby.in/venv/lib/python3.12/site-packages/transformers/models/efficientloftr/configuration_efficientloftr.pyr?   zEfficientLoFTRConfig.__init__m   s   > 5E4P 0Vc,8,DL,,8,DLJ\"#t'8'8"'=!= ILDL]L]_c_t_tHu#
2D&*VHsj1n--#
 V__c_t_tUu)
<QIzTy)*Z7)
%
 #3t'<'<#=>(
 ##I./$2O2OPY2Z[^\^2__(
$ !%Xd.?.?%@ A#2 F&t0044_`d`p`p_q  rC  DH  DU  DU  VX  DY  CZ  [  $7 )B&*D'$8!%:"$8!#6 !2,!%!1!1A!5'>$,H))B&+F(.L+ 0,'>$1R.#6 $,8,DL;XaJb &;"t$!2"6"e#
)
(
s   $II!"*I')NNNr+   relur'   r'   r'   r'   r'      g        F
leaky_reluFg?g?r&   rH   gh㈵>g     @g      @NrH   g      $@g{Gz?)__name__
__module____qualname____doc__
model_typer   r8   intstrfloatbooldictr?   __classcell__)rE   s   @rF   r   r      s   Tl "J 15,0,0#))**+$%%&$%#$#&$'3-2+.-0./ ! $#'*'+'(37#'7W#"49-W# tCy)W# tCy)	W#
 W# !W# $'W# %(W# "W#  #W# "W# !W# !W# W# "%W#  '+!W#" $)#W#$ &+%W#& ),'W#( )W#* +W#, -W#.  %/W#0 tn1W#2 "%3W#4 ,15W#6 !7W# W#    r   N)typingr   configuration_utilsr   modeling_rope_utilsr   r   __all__r/   rU   rF   <module>rZ      s*     3 9p#+ p#f "
"rU   