
    hD                     *   d Z ddlmZ ddlmZmZmZ ddlZddl	Z	ddl
mZ ddlmc mZ ddlZ	ddlmZ ddlmZmZ ddlmZmZ dd	lmZmZmZmZmZmZmZ dd
l m!Z! ddl"m#Z#m$Z$ ddl%m&Z& ddl'm(Z(m)Z)m*Z*m+Z+m,Z, ddl-m.Z.m/Z/ ddl0m1Z1 ddl2m3Z3 ddl4m5Z5 ddl6m7Z7m8Z8m9Z9m:Z:m;Z;m<Z<m=Z=m>Z> ddl?m@Z@mAZA ddlBmCZCmDZDmEZEmFZFmGZG  e+       r
ddl	Z	ddl
mZ  e,j                  eI      ZJ G d de      ZKe* G d de5             ZLe e*d       G d de(                    ZMe e*d        G d! d"e(                    ZN G d# d$ej                        ZP G d% d&e3      ZQ G d' d(ej                        ZRdQd)e	j                  d*eeT   d+e	j                  fd,ZU G d- d.ej                        ZV G d/ d0ej                        ZW G d1 d2e!      ZXe e*d3       G d4 d5e(                    ZYe* G d6 d7e$             ZZ G d8 d9eZ      Z[ e*d:       G d; d<eZ             Z\ G d= d>ej                        Z] G d? d@e9      Z^ G dA dBe;      Z_ G dC dDej                        Z` G dE dFe<e!      Za G dG dHe=      Zb G dI dJe7      Zc G dK dLe8      Zd e*dM       G dN dOe:             Zeg dPZfy)RzPyTorch SAM 2 model.    )	dataclass)CallableOptionalUnionN   )ACT2FN)BatchFeatureget_size_dict)BaseImageProcessorFastDefaultFastImageProcessorKwargs)IMAGENET_DEFAULT_MEANIMAGENET_DEFAULT_STDChannelDimension
ImageInputPILImageResamplingSizeDictpil_torch_interpolation_mapping)GradientCheckpointingLayer)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)ModelOutput
TensorTypeauto_docstringis_torch_availablelogging)TransformersKwargscheck_model_inputs   )	AutoModel)MaskFormerSinePositionEmbedding)SamImageProcessorFast)SamLayerNormSamMaskDecoderSamMaskEmbeddingSamModelSamPromptEncoderSamTwoWayAttentionBlockSamTwoWayTransformereager_attention_forward)window_partitionwindow_unpartition   )
Sam2ConfigSam2HieraDetConfigSam2MaskDecoderConfigSam2PromptEncoderConfigSam2VisionConfig)
functionalc                   ,    e Zd ZU dZeeeef      ed<   y)Sam2FastImageProcessorKwargsz
    mask_size (`dict[str, int]`, *optional*):
        The size `{"height": int, "width": int}` to resize the segmentation maps to.
    	mask_sizeN)	__name__
__module____qualname____doc__r   dictstrint__annotations__     c/var/www/html/eduruby.in/venv/lib/python3.12/site-packages/transformers/models/sam2/modular_sam2.pyr5   r5   P   s    
 S#X''r@   r5   c                      e Zd Zej                  ZeZeZ	dddZ
dddZdZdZdZdZeZdZdZdZdee   fdZd Zd	 Zd
 Zded   deeeef      ddfdZ	 d de dee    de!de"deeedf      dee   de#fdZ$	 	 	 	 	 	 d!dee%   dee%   dee!   deee&ee&   f      deee&ee&   f      dee"   de'fdZ(de)jT                  de)jT                  fdZ+	 	 	 	 	 d"dZ,y)#Sam2ImageProcessorFasti   )heightwidth   TNkwargsc                 0    t        j                  | fi | y N)r   __init__)selfrG   s     rA   rJ   zSam2ImageProcessorFast.__init__l   s    ''77r@   c                      t        d      )NzNo pad_image for SAM 2.NotImplementedErrorr?   r@   rA   	pad_imagez Sam2ImageProcessorFast.pad_imageo   s    !";<<r@   c                      t        d      )Nz#No _get_preprocess_shape for SAM 2.rM   r?   r@   rA   _get_preprocess_shapez,Sam2ImageProcessorFast._get_preprocess_shaper   s    !"GHHr@   c                      t        d      )Nz%No need to override resize for SAM 2.rM   r?   r@   rA   resizezSam2ImageProcessorFast.resizeu   s    !"IJJr@   imagesztorch.Tensorreturn_tensorsreturnc                 H    t        j                  | |fd|i|j                  S )NrU   )r   _preprocesspixel_values)rK   rT   rU   rG   s       rA   rX   z"Sam2ImageProcessorFast._preprocessx   s(     &11$h~haghuuur@   segmentation_mapsdo_convert_rgbinput_data_formatdeviceztorch.devicec                    | j                  ||||      }|D cg c]  }|j                  dd  }}|j                         }	 | j                  |fi |	}
|D cg c]  }|j                  dd  }}|
||d}|| j                  |ddt        j
                        }|j                         }|j                  ddt        t        j                     |j                  d      d	        | j                  dd
|i|}|j                  d      j                  t        j                        |d<   t        ||d         S c c}w c c}w )z/
        Preprocess image-like inputs.
        )rT   r[   r\   r]   N)rY   original_sizesreshaped_input_sizesr   F)rT   expected_ndimsr[   r\   r6   )do_normalize
do_rescaleinterpolationsizerT   r-   labelsrU   )datatensor_typer?   )_prepare_image_like_inputsshapecopyrX   r   FIRSTupdater   r   NEARESTpopsqueezetotorchint64r	   )rK   rT   rZ   r[   r\   r]   rG   imager`   images_kwargsrY   ra   rh   processed_segmentation_mapssegmentation_maps_kwargss                  rA   _preprocess_image_like_inputsz4Sam2ImageProcessorFast._preprocess_image_like_inputs   st    00.L]fl 1 
 9??u%++bc*??'t''@-@>DEUBC 0EE(,$8
 (*.*I*I( $"2"8"8	 +J +' (.{{}$$++$)"'%DEWE_E_%`488E	 +;$*:*: +2+6N+' 9@@CFFu{{SDN6:J3KLLA @  Fs   D;E rf   r6   default_to_square
image_mean	image_stddata_formatc                    |i }|t        d
i t        ||      }|t        d
i t        |d      }t        |t              rt	        |      }t        |t              rt	        |      }|t
        j                  }||d<   ||d<   ||d<   ||d<   ||d<   |j                  d      }t        |t        t        f      r	t        |   n||d	<   |S )z
        Update kwargs that need further processing before being validated
        Can be overridden by subclasses to customize the processing of kwargs.
        )rf   rz   r6   )
param_namerf   r{   r|   r}   resamplere   r?   )r   r
   
isinstancelisttupler   rm   rp   r   r=   r   )	rK   rf   r6   rz   r{   r|   r}   rG   r   s	            rA   _further_process_kwargsz.Sam2ImageProcessorFast._further_process_kwargs   s     >F\mIZ[\D  T={#STIj$'z*Ji&i(I*00Kv'{)|'{ +} ::j)9CHOacfNg9h+H5nv 	 r@   
pred_masksc                     |j                  d      }|dk(  r|S |j                  }t        j                  |dd      }t        j                  ||      dddddf   }||k(  }t        j
                  ||t        j                  |d            }|S )	z
        Apply non-overlapping constraints to the object scores in pred_masks. Here we
        keep only the highest scoring object at each spatial location in pred_masks.
        r   r-   T)dimkeepdim)r]   Ng      $)max)rf   r]   rs   argmaxarangewhereclamp)rK   r   
batch_sizer]   max_obj_indsbatch_obj_indskeeps          rA   "_apply_non_overlapping_constraintsz9Sam2ImageProcessorFast._apply_non_overlapping_constraints   s    
  __Q'
?""||JAtDj@D$PTATU~- [[z5;;zu3UV
r@   c                    t        |t        j                  t        j                  f      r|j                         }g }	t        |      D ]  \  }
}t        ||
   t        j                        rt        j                  ||
         ||
<   n(t        ||
   t        j                        st        d      t        j                  ||
   |dd      }|r| j                  |      }|r||kD  }|	j                  |        |	S )aG  
        Remove padding and upscale masks to the original image size.

        Args:
            masks (`Union[torch.Tensor, List[torch.Tensor], np.ndarray, List[np.ndarray]]`):
                Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
            original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
                The original sizes of each image before it was resized to the model's expected input shape, in (height,
                width) format.
            mask_threshold (`float`, *optional*, defaults to 0.0):
                Threshold for binarization and post-processing operations.
            binarize (`bool`, *optional*, defaults to `True`):
                Whether to binarize the masks.
            max_hole_area (`float`, *optional*, defaults to 0.0):
                The maximum area of a hole to fill.
            max_sprinkle_area (`float`, *optional*, defaults to 0.0):
                The maximum area of a sprinkle to fill.
            apply_non_overlapping_constraints (`bool`, *optional*, defaults to `False`):
                Whether to apply non-overlapping constraints to the masks.

        Returns:
            (`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width)
            is given by original_size.
        zIInput masks should be a list of `torch.tensors` or a list of `np.ndarray`bilinearF)modealign_corners)r   rs   Tensornpndarraytolist	enumerate
from_numpy
ValueErrorFinterpolater   append)rK   masksr`   mask_thresholdbinarizemax_hole_areamax_sprinkle_area!apply_non_overlapping_constraintsrG   output_masksioriginal_sizeinterpolated_masks                rA   post_process_masksz)Sam2ImageProcessorFast.post_process_masks   s    F nu||RZZ&@A+224N ). 9 
	3A}%(BJJ/ ++E!H5aa%,,7 !lmm !eAhJfk l0$($K$KL]$^!$5$F! 12
	3 r@   rI   )NNNNNN)        Tr   r   F)-r7   r8   r9   r   BILINEARr   r   r{   r   r|   rf   r6   	do_resizerd   rc   r[   r5   valid_kwargsdo_padpad_sizemask_pad_sizer   rJ   rO   rQ   rS   r   r   r   r<   r   rX   r   boolr   r	   ry   r   floatr;   r   rs   r   r   r   r?   r@   rA   rC   rC   Y   s   !**H&J$IT*D-IIJLN/L FHM8(D!E 8=IKv^$v !sJ!78v
 
v 8</M/M $J//M 	/M
 ,/M sN234/M 56/M 
/Mf $((,,0:>9=26*x * H%* $D>	*
 U5$u+#567* E%e"456* ./* 
*XU\\ ell 0 */3r@   rC   z,Base class for the vision encoder's outputs.)custom_introc                       e Zd ZU dZdZej                  ed<   dZe	ej                     ed<   dZ
e	ej                     ed<   dZe	eej                  df      ed<   dZe	eej                  df      ed<   y)	Sam2VisionEncoderOutputa  
    last_hidden_state (`torch.FloatTensor` of shape `(batch_size, height, width, hidden_size)`):
        Sequence of hidden-states at the output of the last layer of the model.
    fpn_hidden_states (`tuple(torch.FloatTensor)`):
        Tuple of `torch.FloatTensor` (one for each feature level, from high to low resolution) of shape
        `(batch_size, hidden_size, height, width)`. Feature maps from the Feature Pyramid Network neck.
    fpn_position_encoding (`tuple(torch.FloatTensor)`):
        Tuple of `torch.FloatTensor` (one for each feature level, from high to low resolution) of shape
        `(batch_size, hidden_size, height, width)`. Positional encodings corresponding to the `fpn_hidden_states`.
    hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
        one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`. Hidden-states of the
        model at the output of each stage.
    attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
        Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
        sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
        the self-attention heads.
    Nlast_hidden_statefpn_hidden_statesfpn_position_encoding.hidden_states
attentions)r7   r8   r9   r:   r   rs   FloatTensorr>   r   r   r   r   r   r   r?   r@   rA   r   r   '  s    & ,0u((/59x 1 1299=8E$5$56==AM8E%"3"3S"89:A:>Ju00#567>r@   r   z'Base class for the Sam2 model's output.c                   :   e Zd ZU dZdZej                  ed<   dZej                  ed<   dZ	ej                  ed<   dZ
eej                  df   ed<   dZeeej                  df      ed<   dZeeej                  df      ed	<   dZeeej                  df      ed
<   y)Sam2ImageSegmentationOutputa  
    iou_scores (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks)`):
        The Intersection over Union (IoU) scores of the predicted masks.
    pred_masks (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks, height, width)`):
        The predicted low-resolution masks. This is an alias for `low_res_masks`. These masks need to be post-processed
        by the processor to be brought to the original image size.
    object_score_logits (`torch.FloatTensor` of shape `(batch_size, point_batch_size, 1)`):
        Logits for the object score, indicating if an object is present.
    image_embeddings (`tuple(torch.FloatTensor)`):
        The features from the FPN, which are used by the mask decoder. This is a tuple of `torch.FloatTensor` where each
        tensor has shape `(batch_size, channels, height, width)`.
    vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`.
        Hidden-states of the vision model at the output of each stage.
    vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
        Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
        Attentions weights of the vision model.
    mask_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
        Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
        Attentions weights of the mask decoder.
    N
iou_scoresr   object_score_logits.image_embeddingsvision_hidden_statesvision_attentionsmask_decoder_attentions)r7   r8   r9   r:   r   rs   r   r>   r   r   r   r   r   r   r   r   r?   r@   rA   r   r   D  s    , %)J!!($(J!!(-1**16:eE--s23:DH(5):):C)?#@AHAExe&7&7&< =>EGKXeE,=,=s,B&CDKr@   r   c                   .     e Zd ZdZdef fdZd Z xZS )Sam2PatchEmbeddingsa  
    Turns pixel values into patch embeddings for transformer consumption.

    Args:
        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Pixel values. Pixel values can be obtained using
            [`AutoImageProcessor`]. See [`Sam2ImageProcessorFast.__call__`] for details.

    Returns:
        embeddings (`torch.FloatTensor`):
            Patch embeddings depend on image_size, patch_kernel_size, patch_stride and patch_padding
    configc                     t         |           |j                  }|j                  }t	        j
                  |||j                  |j                  |j                        | _	        y )N)kernel_sizestridepadding)
superrJ   num_channelshidden_sizennConv2dpatch_kernel_sizepatch_stridepatch_padding
projection)rK   r   r   r   	__class__s       rA   rJ   zSam2PatchEmbeddings.__init__t  sU    **(())00&&((
r@   c                 n    |j                   \  }}}}| j                  |      j                  dddd      }|S )Nr   r   r   r-   )rk   r   permute)rK   rY   _r   rD   rE   
embeddingss          rA   forwardzSam2PatchEmbeddings.forward  s;    )5););&<__\2::1aAF
r@   )r7   r8   r9   r:   r/   rJ   r   __classcell__r   s   @rA   r   r   f  s    
1 
r@   r   c                       e Zd Zy)Sam2SinePositionEmbeddingNr7   r8   r9   r?   r@   rA   r   r         r@   r   c                        e Zd Zdef fdZdej                  deeej                  df   eej                  df   f   fdZ xZ	S )Sam2VisionNeckr   c           
         t         |           || _        t        |j                  dz  d      | _        t        j                         | _        |j                  D ]]  }| j                  j                  t        j                  ||j                  |j                  |j                  |j                               _ |j                  | _        y )Nr   T)num_pos_feats	normalize)in_channelsout_channelsr   r   r   )r   rJ   r   r   fpn_hidden_sizeposition_encodingr   
ModuleListconvsbackbone_channel_listr   r   fpn_kernel_size
fpn_stridefpn_paddingfpn_top_down_levels)rK   r   r   r   s      rA   rJ   zSam2VisionNeck.__init__  s    !:I_I_cdIdpt!u]]_
!77 		KJJ		 +!'!7!7 & 6 6!,,"..		 $*#=#= r@   r   rV   .c                 H   d}d}t        | j                        dz
  }t        |dd      D ]  }||   j                  dddd      } | j                  ||z
     |      }|| j                  vs||k(  r|}nVt        j                  j                  t        j                        dd	d d
      j                  |j                        }||z   }| j                  |j                  |j                  |j                        j                  |j                        }	||fz  }||	fz  } ||fS )Nr?   r-   r   r   r   )dtypeg       @nearestF)scale_factorr   r   	antialias)lenr   ranger   r   r   r   rr   rs   float32r   r   rk   r]   )
rK   r   r   r   nr   lateral_featuresprev_featurestop_down_featuresprev_position_encodings
             rA   r   zSam2VisionNeck.forward  sE    " 

Oaq"b! 	?A,Q/771aC0tzz!a%01AB000AF 0$%MM!$$5==$9!$""&#% "%++, " !13D D%)%;%;##]%9%9=;N;N&b$$% # -!11!&<%>>!)	?, !"777r@   )
r7   r8   r9   r2   rJ   rs   r   r   r   r   r   s   @rA   r   r     sS    >/ >$8U\\ 8eE%,,PSBS<TV[\a\h\hjm\mVn<n6o 8r@   r   xquery_striderV   c                     || S | j                  dddd      } t        j                  j                  | ||d      } | j                  dddd      } | S )Nr   r   r-   r   F)r   r   	ceil_mode)r   r   r3   
max_pool2d)r   r  s     rA   do_poolr    sX    			!Q1A
  \]b cA			!Q1AHr@   c                        e Zd Z	 d
dededededeeeef      f
 fdZdej                  dej                  fd	Z
 xZS )Sam2MultiScaleAttentionr   r   dim_outnum_attention_headsr  c                    t         |           || _        || _        || _        || _        || _        ||z  }|dz  | _        t        j                  ||dz        | _
        t        j                  ||      | _        d| _        y )N      r   F)r   rJ   r   r   r  r  r	  scaler   Linearqkvproj	is_causal)rK   r   r   r  r	  r  head_dimr   s          rA   rJ   z Sam2MultiScaleAttention.__init__  sz     	(#6 11t^
99S'A+.IIgw/	r@   r   rV   c                    |j                   \  }}}}| j                  |      j                  |||z  d| j                  d      }t	        j
                  |d      \  }}	}
|| j                  z  |	j                  dd      z  }t        j                  j                  j                  |t        j                  d      j                  |j                        }| j                  r[t        |j                  |||d      | j                        }|j                   dd \  }}|j                  |||z  | j                  d      }|j                  dd      }|	j                  dd      }	|
j                  dd      }
t         }| j"                  j$                  dk7  rt&        | j"                  j$                     } || ||	|
fd | j(                  | j                  d|\  }}|j                  |||d      }| j+                  |      }|S )	Nr   r   r   r_   )r   r   r-   eager)attention_maskr  scaling)rk   r  reshaper	  rs   unbindr  	transposer   r3   softmaxr   rr   r   r  r  r*   r   _attn_implementationr   r  r  )rK   r   rG   r   rD   rE   r   r  querykeyvalueattn_weightsattention_interfaceattn_outputs                 rA   r   zSam2MultiScaleAttention.forward  s   '4':':$
FE1hh}%--j&5.!TMeMegij!LLa0sE

*cmmB.CCxx**22<u}}Z\2]``afalalm EMM*feRH$J[J[\E!KK!,MFEMM*fund>V>VXZ[E 1%mmAq!1%(?;;++w6"9$++:Z:Z"[,		

  nnJJ	
 	
Q "))*feRHii,r@   rI   )r7   r8   r9   r/   r=   r   r   rJ   rs   r   r   r   r   s   @rA   r  r    se     37"  	
 ! uS#X/0&U\\ & &r@   r  c                   D     e Zd Z	 	 d	dedededededef fdZd Z xZS )
Sam2FeedForward	input_dim
hidden_dim
output_dim
num_layers
activationsigmoid_outputc           	      `   t         |           || _        t        |   | _        t        j                  ||      | _        t        j                  ||      | _        t        j                  t        |dz
        D cg c]  }t        j                  ||       c}      | _        || _        y c c}w )Nr   )r   rJ   r&  r   r'  r   r  proj_inproj_outr   r   layersr(  )	rK   r#  r$  r%  r&  r'  r(  r   r   s	           rA   rJ   zSam2FeedForward.__init__  s     	$ ,yyJ7		*j9mmPUV`cdVdPe$f1RYYz:%F$fg, %gs   :B+c                     | j                  |      }| j                  |      }| j                  D ]  }| j                   ||            } | j                  |      }| j                  rt        j                  |      }|S rI   )r*  r'  r,  r+  r(  r   sigmoid)rK   r   layers      rA   r   zSam2FeedForward.forward  ss    ]36[[ 	BE OOE-,@AM	B m4IIm4Mr@   )reluF)	r7   r8   r9   r=   r<   r   rJ   r   r   r   s   @rA   r"  r"  
  sO     !$-- - 	-
 - - -"	r@   r"  c                   r     e Zd Zdedededef fdZdej                  dee	   dej                  fd	Z xZS )
Sam2MultiScaleBlockr   	stage_idx	block_idxtotal_block_idxc                 $   t         |           |dkD  r|dk(  r|j                  |dz
     n|j                  |   | _        |j                  |   | _        t        j                  | j                  |j                        | _        |dkD  r|dk(  r|j                  |dz
     n|j                  |   | _
        ||j                  v rdn| j                  | _
        d|cxk  r|j                  k  rn n|dk(  r|j                  nd | _        t        || j                  | j                  |j                  |   | j                        | _        t        j                  | j                  |j                        | _        t%        | j                  t'        | j                  |j(                  z        | j                  d|j*                        | _        | j                  | j                  k7  r0t        j.                  | j                  | j                        | _        y y )Nr   r-   )eps)r	  r  r   )r&  r'  )r   rJ   embed_dim_per_stager   r  r   	LayerNormlayer_norm_epslayer_norm1window_size_per_stagewindow_sizeglobal_attention_blocksnum_query_pool_stagesr  r  num_attention_heads_per_stageattnlayer_norm2r"  r=   	mlp_ratio
hidden_actmlpr  r  )rK   r   r3  r4  r5  r   s        rA   rJ   zSam2MultiScaleBlock.__init__)  s    	
 1}a &&y1}5++I6 	
 11)<<<f6K6KL 1}a ((Q7--i8 	
 !063Q3Q Q1W[WgWg $%y#PF4P4P#PU^bcUcFim 	 ,HHLL & D DY O**
	 <<&:O:OP"LLv///0LL((
 88t||#		$((DLL9DI $r@   r   rG   rV   c                    |}| j                  |      }| j                  | j                  k7  r%t        | j	                  |      | j
                        }| j                  }| j                  dkD  r-|j                  d   |j                  d   }}t        ||      \  }} | j                  dd|i|}|}| j
                  rN| j                  | j
                  d   z  }|j                  dd \  }}|||z  z
  |z  }	|||z  z
  |z  }
||	z   ||
z   f}| j                  dkD  rt        ||f      }||z   }| j                  |      }|| j                  |      z   }|S )Nr   r-   r   r   r   r?   )r;  r   r  r  r  r  r=  rk   r+   rA  r,   rB  rE  )rK   r   rG   residualr=  HWpad_hwr   pad_hpad_wlayernorm_outputs               rA   r   zSam2MultiScaleBlock.forwardX  s   
 !((7 88t||#tyy79J9JKH &&a &&q)=+>+>q+AqA$4]K$P!M6  dii 
'

 $**d.?.?.BBK>>!A&DAq 1{?2kAE 1{?2kAE%iU+F a.}k6TUWXSYZM =0++M:%1A(BBr@   )r7   r8   r9   r/   r=   rJ   rs   r   r   r   r   r   r   r   s   @rA   r2  r2  (  s`    -:"-: -: 	-:
 -:^*||* +,* 
			*r@   r2  zW
    Hiera model's outputs that also contains a pooling of the last hidden states.
    c                   l    e Zd ZU dZdZeej                     ed<   dZ	ee
ej                  df      ed<   y)Sam2HieraDetModelOutputat  
    last_hidden_state (`torch.FloatTensor` of shape `(batch_size, height, width, hidden_size)`):
        hidden-states at the output of the last layer of the model.
    intermediate_hidden_states (`tuple[torch.FloatTensor]` of shape `(batch_size, height, width, hidden_size)`):
        Sequence of hidden-states at the output of the intermediate layers of the model.
    Nr   .intermediate_hidden_states)r7   r8   r9   r:   r   r   rs   r   r>   rP  r   r?   r@   rA   rO  rO    s?     6:x 1 129JNu/@/@#/E)F GNr@   rO  c                   *    e Zd ZeZdZdZdZdZdZ	d Z
y)Sam2PreTrainedModelsam2rY   Tc                    | j                   j                  }t        |t        j                  t        j
                  t        j                  f      rY|j                  j                  j                  d|       |j                  |j                  j                  j                          nt        |t        j                        re|j                  j                  j                  d|       |j                  |j                  j                  |j                     j                          nit        |t        j                  t        f      rI|j                  j                  j!                  d       |j                  j                  j                          t        |t"              r`|j$                  $|j$                  j                  j                          |j&                  $|j&                  j                  j                          t        |t(              r2|j*                  %|j*                  j                  j                          y y y )Nr   )meanstd      ?)r   initializer_ranger   r   r  r   ConvTranspose2dweightrh   normal_biaszero_	Embeddingpadding_idxr9  Sam2LayerNormfill_Sam2HieraDetModel	pos_embedpos_embed_window	Sam2Modelno_memory_embedding)rK   modulerV  s      rA   _init_weightsz!Sam2PreTrainedModel._init_weights  s   kk++fryy"))R5G5GHIMM&&CS&9{{&  &&(-MM&&CS&9!!-""6#5#56<<>} =>MM$$S)KK""$f/0+  %%++-&&2'',,224fi())5**//557 6 )r@   N)r7   r8   r9   r.   config_classbase_model_prefixmain_input_name_supports_sdpa_supports_flash_attn_2_supports_attention_backendrh  r?   r@   rA   rR  rR    s(    L$ON!"&8r@   rR  c            
            e Zd ZeZdZeedZdef fdZ	d Z
deeef   dej                  fdZe	 ddeej$                     d	ee   deeef   fd
       Z xZS )rb  rY   r   r   r   c           	         t         |   |       t        |      | _        t	        j
                  t        j                  d|j                  g|j                         | _
        t	        j
                  t        j                  d|j                  |j                  d   |j                  d               | _        t        j                  |j                        dz
  j!                         | _        t	        j$                         | _        d}t)        |j                        D ]D  \  }}t+        |      D ]1  }t-        ||||      }| j&                  j/                  |       |dz  }3 F y )Nr-   r   )r   r3  r4  r5  )r   rJ   r   patch_embedr   	Parameterrs   zerosr   +window_positional_embedding_background_sizerc  r<  rd  r   cumsumblocks_per_stager   
stage_endsr   blocksr   r   r2  r   )rK   r   r5  r3  rw  r4  blockr   s          rA   rJ   zSam2HieraDetModel.__init__  s.    .v6KK6--c0b0bc
 !#KK6--v/K/KA/NPVPlPlmnPop!
 99V%<%<=AIIKmmo+4V5L5L+M 	%'I'"#34 %	+!Y)]l ""5)1$%	%r@   c                     | j                   S rI   )rr  rK   s    rA   get_input_embeddingsz&Sam2HieraDetModel.get_input_embeddings  s    r@   hwrV   c           	      4   |\  }}| j                   }t        j                  | j                  ||fd      }||j	                  t        |j                  |j                        D cg c]
  \  }}||z   c}}      z   }|j                  dddd      }|S c c}}w )Nbicubic)rf   r   r   r   r   r-   )rd  r   r   rc  tileziprk   r   )rK   r~  hwwindow_embedrc  r   ys           rA   _get_pos_embedz Sam2HieraDetModel._get_pos_embed  s    1,,MM$..1vIN	 1 1c)//[g[m[mFn2oda162o pp	%%aAq1	 3ps   %BrG   c                    |t        d      | j                  |      }|| j                  |j                  dd       z   }d}t	        | j
                        D ]#  \  }} ||fi |}|| j                  v s||fz   }% t        ||      S )N You have to specify pixel_valuesr-   r   r?   )r   rP  )r   rr  r  rk   r   ry  rx  rO  )rK   rY   rG   r   rP  r   block_modules          rA   r   zSam2HieraDetModel.forward  s     ?@@((6%(;(;M<O<OPQRS<T(UU%'"(5 	[OA|(A&AMDOO#-G=JZ-Z*		[ '+'A
 	
r@   rI   )r7   r8   r9   r/   ri  rk  r2  r  _can_record_outputsrJ   r}  r   r=   rs   r   r  r   r   r   r   r   r   rO  r   r   r   s   @rA   rb  rb    s    %L$O,-
%1 %, sCx U\\   59
u001
 +,
 
u--	.	
 
r@   rb  zJ
    The vision model from Sam without any head or projection on top.
    c            
            e Zd ZeZdZeedZdef fdZ	d Z
e	 d	deej                     dee   deeef   fd       Z xZS )
Sam2VisionModelrY   rp  r   c                     t         |   |       || _        t        j                  |j
                        | _        t        |      | _        |j                  | _	        | j                          y rI   )r   rJ   r   r    from_configbackbone_configbackboner   necknum_feature_levels	post_initrK   r   r   s     rA   rJ   zSam2VisionModel.__init__  sS     !--f.D.DE"6*	"(";";r@   c                 6    | j                   j                         S rI   )r  r}  r|  s    rA   r}  z$Sam2VisionModel.get_input_embeddings  s    }}1133r@   rG   rV   c                    |t        d       | j                  |fi |}|j                  }|j                  }| j	                  |      \  }}|| j
                   d  d d d   }|| j
                   d  d d d   }t        |||      S )Nr  r   )r   r   r   )r   r  r   rP  r  r  r   )rK   rY   rG   backbone_outputr   rP  r   r   s           rA   r   zSam2VisionModel.forward  s     ?@@ ($--??'99%4%O%O"3799=W3X00-t/F/F.F.HI$B$O 5t7N7N6N6P QRVTVRV W&+/"7
 	
r@   rI   )r7   r8   r9   r2   ri  rk  r2  r  r  rJ   r}  r   r   rs   r   r   r   r   r   r   r   r   r   s   @rA   r  r    s     $L$O,-
	/ 	4  59
u001
 +,
 
u--	.	
 
r@   r  c                   ,     e Zd Zdef fdZddZ xZS )Sam2PositionalEmbeddingr   c                     t         |           |j                  | _        | j                  t        j                  d|j
                  dz  f      z  }| j                  d|       y )Nr   positional_embedding)r   rJ   r  rs   randnr   register_buffer)rK   r   r  r   s      rA   rJ   z Sam2PositionalEmbedding.__init__/  sT    \\
#zzEKKF<N<NRS<S8T,UU35IJr@   c                    |j                         }|D|dddddddf   |d   z  |dddddddf<   |dddddddf   |d   z  |dddddddf<   |j                  t        j                         d|z  dz
  }|j                  | j                  j
                        }|| j                  z  }dt        j                  z  |z  }t        j                  t        j                  |      t        j                  |      gd      S )z8Positionally encode points that are normalized to [0,1].Nr   r-   r   r   r   )clonerr   rs   r   r  r   r   picatsincos)rK   input_coordsinput_shapecoordinatess       rA   r   zSam2PositionalEmbedding.forward5  s    "((*"&1!Q1*&=A&NK1a
#&1!Q1*&=A&NK1a
#u}}% +o)!nnT%>%>%D%DE!D$=$=="%%i+-yy%))K0%))K2HIrRRr@   rI   )r7   r8   r9   r1   rJ   r   r   r   s   @rA   r  r  .  s    K6 KSr@   r  c                       e Zd Zy)Sam2MaskEmbeddingNr   r?   r@   rA   r  r  G  r   r@   r  c                       e Zd ZdefdZdej                  dej                  dedej                  fdZdej                  dej                  fd	Z	y
)Sam2PromptEncoderr   c                    t         j                  j                  |        t        |      | _        t        |      | _        t        j                  d|j                        | _	        |j                  |j                  z  |j                  |j                  z  f| _        d|j                  z  |j                  z  d|j                  z  |j                  z  f| _        |j                  | _        t        j                  |j                  |j                        | _        |j                  | _        t        j                  d|j                        | _        y )Nr-      )r   ModulerJ   r  shared_embeddingr  
mask_embedr^  r   no_mask_embed
image_size
patch_sizeimage_embedding_sizemask_input_sizeinput_image_sizenum_point_embeddingspoint_embednot_a_point_embedrK   r   s     rA   rJ   zSam2PromptEncoder.__init__L  s   
		4  7 ?+F3\\!V-?-?@%+%6%6&:K:K%KVM^M^bhbsbsMs$t! !F$5$5 59J9J JAPVPaPaLaekevevLvw & 1 1<<(C(CVEWEWX!--!#a1C1C!Dr@   pointsrg   padrV   c                 P   |dz   }|rZt         j                  j                  j                  |ddd      }t         j                  j                  j                  |ddd      }| j                  | j                  f}| j                  ||      }t        j                  |d   dk(  | j                  j                  |      }t        j                  |d   d	k7  |t        j                  |            }|| j                  |j                  d
            |dk\  j                  d      z  z   }|S )zEmbeds point prompts.      ?)r   r   r   r-   constantr   )r   r  )r   r-   r   ).Ni)min)rs   r   r3   r  r  r  r   r  rZ  
zeros_liker  r   	unsqueeze)rK   r  rg   r  r  point_embeddings         rA   _embed_pointszSam2PromptEncoder._embed_pointsZ  s   #XX((,,V\
Z[,\FXX((,,VV*TV,WF,,d.C.CD//D  ++fY&72&=t?U?U?\?\^mn  ++9$_-
 *D,<,<V\\a\=P,QU[_`U`TkTklnTo,oor@   boxesc                 h   |dz   }|j                   dd \  }}|j                  ||dd      }| j                  | j                  f}| j                  ||      }|dddddddfxx   | j                  j
                  d   z  cc<   |dddddddfxx   | j                  j
                  d   z  cc<   |S )zEmbeds box prompts.r  Nr   r   r-   r   )rk   r  r  r  r  rZ  )rK   r  r   nb_boxescoordsr  corner_embeddings          rA   _embed_boxeszSam2PromptEncoder._embed_boxess  s    ${{2A
Hz8Q:,,d.C.CD00EAq!$(8(8(?(?(BB$Aq!$(8(8(?(?(BB$r@   N)
r7   r8   r9   r1   rJ   rs   r   r   r  r  r?   r@   rA   r  r  K  s\    E6 EELL %,, T V[VbVb 2	 %,, 	 5<< 	 r@   r  c                        e Zd ZdZd
 fd	Z	 d
dej                  dej                  dej                  deej                     dee	   de
ej                  ej                  f   fd	Z xZS )Sam2Attentionz
    SAM2's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and
    values.
    c                    t         |           ||j                  n|}|| _        |j                  | _        |j                  |z  | _        |j                  | _        | j
                  |j                  z  | _        | j                  dz  | _        d| _	        t        j                  | j                  | j
                        | _        t        j                  | j                  | j
                        | _        t        j                  | j                  | j
                        | _        t        j                  | j
                  | j                        | _        y )Nr  F)r   rJ   attention_downsample_rater   r   internal_dimr	  r  r  r  r   r  q_projk_projv_projo_proj)rK   r   downsample_rater   s      rA   rJ   zSam2Attention.__init__  s    >M>U&::[j!--"../A#)#=#= ))V-G-GG}}d*ii 0 0$2C2CDii 0 0$2C2CDii 0 0$2C2CDii 1 143C3CDr@   r  r  r  attention_similarityrG   rV   c                    |j                   d d \  }}||z  d| j                  | j                  f} | j                  |      j                  | j                  dd      } | j                  |      j                  | j                  dd      } | j                  |      j                  | j                  dd      }t        }	| j                  j                  dk7  rt        | j                  j                     }	 |	| |||f|d| j                  | j                  d|\  }
}|
j                  ||d| j                  | j                  z        j                         }
| j!                  |
      }
|
|fS )Nr   r   r-   r  r   )r  dropoutr  r  )rk   r	  r  r  viewr  r  r  r*   r   r  r   r  r  r  
contiguousr  )rK   r  r  r  r  rG   r   point_batch_size	new_shaper  r   r  s               rA   r   zSam2Attention.forward  so    (-{{2A$
$"22B8P8PRVR_R_`	'E"''3==aC#dkk###Y/99!Q?'E"''3==aC(?;;++w6"9$++:Z:Z"[$7	
%

 0LLnn
%
 
%
!\ "))("d.F.F.V

*, 	 kk+.L((r@   rI   )r7   r8   r9   r:   rJ   rs   r   r   r   r   r   r   r   r   s   @rA   r  r    s    
E* 8<%)||%) \\%) ||	%)
 'u||4%) +,%) 
u||U\\)	*%)r@   r  c                       e Zd ZddedefdZy)Sam2TwoWayAttentionBlockr   skip_first_layer_pec                 L   t         j                  j                  |        t        |d      | _        t        j
                  |j                        | _        t        |      | _        t        j
                  |j                        | _	        t        |j                  |j                  |j                  |j                        | _        t        j
                  |j                        | _        t        j
                  |j                        | _        t        |      | _        || _        y )Nr-   )r  )r&  )r   r  rJ   r  	self_attnr9  r   r;  cross_attn_token_to_imagerB  r"  mlp_dimnum_hidden_layersrE  layer_norm3layer_norm4cross_attn_image_to_tokenr  )rK   r   r  s      rA   rJ   z!Sam2TwoWayAttentionBlock.__init__  s    
		4 &vqA<<(:(:;)6v)>&<<(:(:;"0B0BvOgOg
 <<(:(:;<<(:(:;)6v)>&#6 r@   N)F)r7   r8   r9   r0   r   rJ   r?   r@   rA   r  r    s    74 74 7r@   r  c                       e Zd Zy)Sam2TwoWayTransformerNr   r?   r@   rA   r  r    r   r@   r  c                       e Zd Zy)r`  Nr   r?   r@   rA   r`  r`    r   r@   r`  c                   h    e Zd Zdef fdZd Zd Z	 	 ddej                  dej                  dej                  dej                  d	e	d
e
ej                     deej                     deej                     dee   deej                  ej                  ej                  ej                  f   fdZ xZS )Sam2MaskDecoderr   c                 t   t         |   |       | `t        | j                  |j
                  | j                  |j                  d      | _        t        j                  |j                  |j                  dz  dd      | _
        t        j                  |j                  |j                  dz  dd      | _        t        j                  d| j                        | _        t        | j                  | j                  dd      | _        |j                  | _        |j                   | _        |j"                  | _        y )NT)r(     r-   )r   r   r  r   )r   rJ   iou_prediction_headr"  r   iou_head_hidden_dimnum_mask_tokensiou_head_depthr   r   conv_s0conv_s1r^  obj_score_tokenpred_obj_score_headdynamic_multimask_via_stability!dynamic_multimask_stability_delta"dynamic_multimask_stability_threshr  s     rA   rJ   zSam2MaskDecoder.__init__  s    $#2&&  !!$
  yy!3!3V5G5G15LZ[defyy!3!3V5G5G15LZ[def!||At/?/?@#243C3CTEUEUWXZ[#\ /5/U/U,171Y1Y.282[2[/r@   c                    |j                  d      }| j                  }t        j                  ||kD  d      j	                         }t        j                  || kD  d      j	                         }t        j
                  |dkD  ||z  d      }|S )zz
        Compute stability scores of the mask logits based on the IoU between upper and
        lower thresholds.
        r_   r   r  r   rW  )flattenr  rs   sumr   r   )rK   mask_logitsstability_deltaarea_iarea_ustability_scoress         rA   _get_stability_scoresz%Sam2MaskDecoder._get_stability_scores  s    
 "))"-@@;8bAGGI;/)99rBHHJ ;;vz6F?CHr@   c           	         |ddddddddddf   }|ddddddf   }t        j                  |d      }|j                  d      j                  d      j                  d      }|j                  ddd|j	                  d      |j	                  d            }t        j
                  |d|      }t        j
                  |d|j                  d            }|ddddddddddf   }	|ddddddf   }
| j                  |	      }|| j                  k\  }t        j                  |d   j                  |	      |	|      }t        j                  |j                  |
      |
|      }||fS )	as  
        When outputting a single mask, if the stability score from the current single-mask
        output (based on output token 0) falls below a threshold, we instead select from
        multi-mask outputs (based on output token 1~3) the mask with the highest predicted
        IoU score. This is intended to ensure a valid mask for both clicking and tracking.
        Nr-   r   r  r_   r   r   ).NN)
rs   r   r  expandrf   gatherr  r  r   	expand_as)rK   all_mask_logitsall_iou_scoresmultimask_logitsmultimask_iou_scoresbest_scores_indsbest_scores_inds_expandedbest_multimask_logitsbest_multimask_iou_scoressinglemask_logitssinglemask_iou_scoresr  	is_stablemask_logits_outiou_scores_outs                  rA    _dynamic_multimask_via_stabilityz0Sam2MaskDecoder._dynamic_multimask_via_stability  s    +1aQ>:-aABh7 <<(<"E$4$>$>r$B$L$LR$P$Z$Z[]$^!$=$D$DA',,R02B2G2G2K%
! !&-=qB[ \$)LL1EqJZJdJdegJh$i! ,Aq!A#q!O< .q!QqSy 9556GH$(O(OO	  ++o&001BC!

  56!%

 ..r@   r   image_positional_embeddingssparse_prompt_embeddingsdense_prompt_embeddingsmultimask_outputhigh_resolution_featuresr  target_embeddingrG   rV   c	           
         |j                   \  }
}}}|j                   d   }t        j                  | j                  j                  | j
                  j                  | j                  j                  gd      }|j                  |
|dd      }|j                   d   dk7  rt        j                  ||fd      }n|}|j                  | j
                  j                  j                        }||z   }|j                  |d      }|j                  |d      } | j                  d	|||||d|	\  }}|dddddddf   }|dddddd| j                  z   ddf   }|j                  dd      j                  |
|z  |||      }|\  }}|j                  |d      }|j                  |d      }| j                  |      |z   }| j!                  | j#                  |            }| j!                  | j%                  |      |z         }g }t'        | j                        D ]*  }| j(                  |   }| ||dddd|ddf         gz  }, t        j*                  |d      }|j                   \  }}}}|j                  |
||||z        }||z  j                  |
|d||      }| j-                  |      }| j/                  |dddddddf         }|r+t1        dd      }|dddd|ddddf   }|dddd|f   }nd| j2                  r.| j4                  s"t1        dd      }| j7                  ||      \  }}n*t1        dd      }|dddd|ddddf   }|dddd|f   }|dddd|f   } ||| |fS )
a  
        Predict masks given image and prompt embeddings.

        Args:
            image_embeddings (`torch.Tensor`):
                The embeddings from the image encoder.
            image_positional_embeddings (`torch.Tensor`):
                Positional encoding with the shape of image_embeddings.
            sparse_prompt_embeddings (`torch.Tensor`):
                The embeddings of the points and boxes.
            dense_prompt_embeddings (`torch.Tensor`):
                The embeddings of the mask inputs.
            multimask_output (`bool`):
                Whether to return multiple masks or a single mask.
            high_resolution_features (`list[torch.Tensor]`, *optional*):
                The high-resolution features from the vision encoder.
            attention_similarity (`torch.Tensor`, *optional*):
                The attention similarity tensor.
            target_embedding (`torch.Tensor`, *optional*):
                The target embedding.
        r-   r   r  r   )point_embeddingsr   r  r  r  Nr   r   r?   )rk   rs   r  r  rZ  	iou_tokenmask_tokensrepeatrr   r   repeat_interleavetransformerr  r  r  upscale_conv1r'  upscale_layer_normupscale_conv2r   output_hypernetworks_mlpsstackr  r  slicer  trainingr  )!rK   r   r  r  r  r  r  r  r  rG   r   r   rD   rE   r  output_tokenstokensr  iou_token_outmask_tokens_outfeat_s0feat_s1upscaled_embeddinghyper_in_listr   current_mlphyper_inr   r   iou_predr   
mask_slicesam_tokens_outs!                                    rA   r   zSam2MaskDecoder.forward   s   B 3C2H2H/
L&%399!<		$$++%%  ''
 
 &,,Z9I1aP#))!,1YY/GHaPF"F!99T^^%:%:%@%@A ,.EE+==>NTU=V&A&S&STdfg&h#-=T-=-= .
--(C!5-.
 .
** )Aq!4*1aa$:N:N6N1OQR+RS ,55a;@@))<
 4++,<!+D++,<!+D!//0@AGK!__T-D-DEW-XY!__T-?-?@R-SV]-]^,.t++, 	HA88;Kk/!Q1**EFGGM	H ;;}!4);)A)A&</44ZAQS_agjoaop..44ZAQSUW]_de ++M:"667G1aQR
7ST q$J!Q
Aq01E1j 01H11$--q!J"CCE8TOE8q!J!Q
Aq01E1j 01H(Az)9:h0CCCr@   )NN)r7   r8   r9   r0   rJ   r  r  rs   r   r   r   r   r   r   r   r   r   r   s   @rA   r  r    s    \4 \*
 #/Z 8<37mD,,mD &+\\mD #(,,	mD
 "'mD mD #'u||"4mD 'u||4mD #5<<0mD +,mD 
u||U\\5<<E	FmDr@   r  z
    Segment Anything Model 2 (SAM 2) for generating segmentation masks, given an input image and
    input points and labels, boxes, or masks.
    c                      e Zd Zg dZdefdZdej                  fdZ ej                         dej                  dee   deej                     fd       Zdej                  dee   deeej                     eej                     eeej                  d	f      eeej                  d	f      f   fd
Zee	 	 	 	 	 	 	 	 	 ddeej                     deej                     deej(                     deej                     deej(                     deej                     dedeej                     deej                     dee   defd              Zy)re  )z
^memory_.*z^mask_downsample.*z^object_pointer_proj.*z0^temporal_positional_encoding_projection_layer.*no_memory_positional_encodingno_object_pointer%occlusion_spatial_embedding_parameterr   c                    t        j                  | |       t        |j                        | _        t        j                  |j                        | _        t        |j                        | _
        |j                  |j                  _        t        |j                        | _        |j                  j                  | _        |j                  j                   | _        |j                  j"                  | _        t&        j(                  j+                  t'        j,                  dd| j$                              | _        | j1                          y )Nr-   )r   rJ   r  prompt_encoder_configshared_image_embeddingr    r  vision_configvision_encoderr  prompt_encoderr  mask_decoder_configr  mask_decoderr  backbone_feature_sizesr   r$  rs   r   rs  rt  rf  r  r  s     rA   rJ   zSam2Model.__init__  s      v.&=f>Z>Z&[#'33F4H4HI/0L0LM:@:U:U""7+F,F,FG"("6"6"I"I&,&:&:&Q&Q# ..>>#(88#5#5ekk!Q6X#Y r@   rV   c                    | j                   j                  }| j                  j                  j                  }| j                  j                  j
                  }t        j                  |||      }|j                  d      dz
  }|j                  d      dz
  }||d   z  }||d   z  }| j                  t        j                  ||gd            }|j                  ddd      j                  d      S )N)r]   r   r   r  r  r-   r   r   )r?  r  r<  r  r]   r   rs   onesrv  r&  r   r  )rK   rf   target_devicetarget_dtypegridy_embedx_embedr  s           rA   $get_image_wide_positional_embeddingsz.Sam2Model.get_image_wide_positional_embeddings  s    ""7733HHOO22GGMMzz$}LI++!+$s*++!+$s*DG#DG##::5;;QXGY_a;bc#++Aq!4>>qAAr@   rY   rG   c           
          |j                   d   } | j                  |fi |\  }}}}|d   | j                  z   |d<   t        || j                        D cg c]*  \  }} |j                  ddd      j                  |dg| , }}}|S c c}}w )z
        Returns the image embeddings by passing the pixel values through the vision encoder.

        Args:
            pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
                Input pixel values
        r   r   r-   r   )rk   get_image_featuresrf  r  rB  r   r  )	rK   rY   rG   r   feature_mapsr   feat	feat_sizer   s	            rA   get_image_embeddingszSam2Model.get_image_embeddings  s     "''*
 7 7 7 O OaA (+d.F.FFR
 $'|T5P5P#Q
i 'DLLAq!&&z2B	B
 

  
s   /B
.c                     | j                   |fi |}|j                  }|j                  }t        |      }| j                  j                  |d         |d<   | j                  j                  |d         |d<   |D cg c]$  }|j                  d      j                  ddd      & }}|D cg c]$  }|j                  d      j                  ddd      & }}|||j                  |j                  fS c c}w c c}w )a  
        Extract and preprocess image features using the vision encoder.

        Args:
            pixel_values (`torch.FloatTensor`):
                Input pixel values of shape `(batch_size, num_channels, height, width)`.

        Returns:
            `tuple`: A tuple containing:
                - feature_maps (`list[torch.Tensor]`): List of feature maps from different levels.
                - feature_maps_position_embeddings (`list[torch.Tensor]`): List of positional embeddings for each feature level.
                - vision_hidden_states (`tuple[torch.FloatTensor]`, *optional*): Hidden states from the vision encoder.
                - vision_attentions (`tuple[torch.FloatTensor]`, *optional*): Attention weights from the vision encoder.
        r   r-   r   )r>  r   r   r   rA  r  r  r  r   r   r   )rK   rY   rG   vision_outputsrM   feature_maps_position_embeddingsfeature_mapfeature_map_position_embeddings           rA   rL  zSam2Model.get_image_features  s   0 3F$2E2E3
3

 &77+9+O+O( L)++33LODQ++33LODQ T``K++A.66q!Q?`` 3S,
. +2215==aAF,
( ,

 =~?[?[]k]v]vvv a,
s   =)C1,)C6Ninput_pointsinput_labelsinput_boxesinput_masksr   r  r  r  c
                 h   |du |du z  st        d      |V|T|j                  d   |j                  d   k7  r5t        dj                  |j                  d   |j                  d               | j                         }||j                  d   n|d   j                  d   }|j	                  |ddd      }d}d}|x | j
                  |fi |
\  }}}}|d   | j                  z   |d<   t        || j                        D cg c]*  \  }} |j                  ddd      j                  |dg| , }}}|?|=t        j                  |dddddddf   t        j                  |j                        }|m|kt        j                  |ddd|d   j                   |d   j                        }t        j"                  |ddt        j$                  |d   j                         }|{|j                  d	d | j&                  j(                  k7  rUt+        j,                  |j/                         | j&                  j(                  d
dd      j1                  |j                         }| j'                  ||||      \  }} | j2                  d|d   |||||dd ||	d|
\  }}}}t5        ||||||      S c c}}w )a  
        input_points (`torch.FloatTensor` of shape `(batch_size, num_points, 2)`):
            Input 2D spatial points, this is used by the prompt encoder to encode the prompt. Generally yields to much
            better results. The points can be obtained by passing a list of list of list to the processor that will
            create corresponding `torch` tensors of dimension 4. The first dimension is the image batch size, the
            second dimension is the point batch size (i.e. how many segmentation masks do we want the model to predict
            per input point), the third dimension is the number of points per segmentation mask (it is possible to pass
            multiple points for a single mask), and the last dimension is the x (vertical) and y (horizontal)
            coordinates of the point. If a different number of points is passed either for each image, or for each
            mask, the processor will create "PAD" points that will correspond to the (0, 0) coordinate, and the
            computation of the embedding will be skipped for these points using the labels.
        input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points)`):
            Input labels for the points, this is used by the prompt encoder to encode the prompt. According to the
            official implementation, there are 3 types of labels

            - `1`: the point is a point that contains the object of interest
            - `0`: the point is a point that does not contain the object of interest
            - `-1`: the point corresponds to the background

            We added the label:

            - `-10`: the point is a padding point, thus should be ignored by the prompt encoder

            The padding labels should be automatically done by the processor.
        input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes, 4)`):
            Input boxes for the points, this is used by the prompt encoder to encode the prompt. Generally yields to
            much better generated masks. The boxes can be obtained by passing a list of list of list to the processor,
            that will generate a `torch` tensor, with each dimension corresponding respectively to the image batch
            size, the number of boxes per image and the coordinates of the top left and bottom right point of the box.
            In the order (`x1`, `y1`, `x2`, `y2`):

            - `x1`: the x coordinate of the top left point of the input box
            - `y1`: the y coordinate of the top left point of the input box
            - `x2`: the x coordinate of the bottom right point of the input box
            - `y2`: the y coordinate of the bottom right point of the input box
        input_masks (`torch.FloatTensor` of shape `(batch_size, image_size, image_size)`):
            SAM model also accepts segmentation masks as input. The mask will be embedded by the prompt encoder to
            generate a corresponding embedding, that will be fed later on to the mask decoder. These masks needs to be
            manually fed by the user, and they need to be of shape (`batch_size`, `image_size`, `image_size`).
        image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_channels, window_size, window_size)`):
            Image embeddings, this is used by the mask decoder to generate masks and iou scores. For more memory
            efficient computation, users can first retrieve the image embeddings using the `get_image_embeddings`
            method, and then feed them to the `forward` method instead of feeding the `pixel_values`.
        multimask_output (`bool`, *optional*):
            In the original implementation and paper, the model always outputs 3 masks per image (or per point / per
            bounding box if relevant). However, it is possible to just output a single mask, that corresponds to the
            "best" mask, by specifying `multimask_output=False`.
        attention_similarity (`torch.FloatTensor`, *optional*):
            Attention similarity tensor, to be provided to the mask decoder for target-guided attention in case the
            model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048).
        target_embedding (`torch.FloatTensor`, *optional*):
            Embedding of the target concept, to be provided to the mask decoder for target-semantic prompting in case
            the model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048).

        Example:

        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoModel, AutoProcessor

        >>> model = AutoModel.from_pretrained("danelcsb/sam2.1_hiera_tiny")
        >>> processor = AutoProcessor.from_pretrained("danelcsb/sam2.1_hiera_tiny")

        >>> img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/sam-car.png"
        >>> raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
        >>> input_points = [[[400, 650]]]  # 2D location of a window on the car
        >>> inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt")

        >>> # Get segmentation mask
        >>> outputs = model(**inputs)

        >>> # Postprocess masks
        >>> masks = processor.post_process_masks(
        ...     outputs.pred_masks, inputs["original_sizes"], inputs["reshaped_input_sizes"]
        ... )
        ```
        NzAExactly one of pixel_values or image_embeddings must be provided.r-   zQYou should provide as many bounding boxes as input points per box. Got {} and {}.r   r   r   )r   r]   r_   Fr   T)rf   r   r   r   )rV  rW  rX  rY  )r   r  r  r  r  r  r  r  )r   r   r   r   r   r   r?   )r   rk   formatrJ  r  rL  rf  r  rB  r   r  rs   	ones_liker=   r]   rt  r   rD  int32r?  r  r   r   r   rr   rA  r   )rK   rY   rV  rW  rX  rY  r   r  r  r  rG   r  r   r   r   rM  r   rN  rO  sparse_embeddingsdense_embeddingslow_res_multimasksr   r   s                           rA   r   zSam2Model.forward	  s=   z %*:d*BC`aa#(?!!!$(9(9!(<< gnn$**1-{/@/@/C  '+&O&O&Q#.:.F\''*L\]_L`LfLfghLi
&A&H&HUVXY[\&]# ##G^tG^G^HHDL!13D  ,B/$2J2JJL
 (+<9T9T'U #D) +Q1%**:rFIF   
 #(< ??<1a
+C599]i]p]pqLK$7 ;;Aq!+;B+?+E+EN^_aNbNiNiL "JJz1au{{ScdfSgSnSnooL"   %)<)<)L)LLmm%%',,<<"'#" "[&&'  /3.A.A%%##	 /B /
++ BSARAR 
B
-b1(C%6$4-%5cr%:!5-
B
 
B
>J+> +!) 3-!5/
 	
[ s    /J.)	NNNNNNTNN)r7   r8   r9   "_keys_to_ignore_on_load_unexpectedr.   rJ   rs   r   rJ  no_gradr   r   r   r   rP  r   r   rL  r   r   
LongTensorr   r   r   r?   r@   rA   re  re    s	   *&z "Bell B U]]_ ''  +,  
ell		   4-w''-w +,-w 
U\\U\\u((#-./u((#-./	1
	-w^  59483737268<!%<@8<k
u001k
 u001k
 u//0	k

 e//0k
 e../k
 #5#4#45k
 k
 'u'8'89k
 #5#4#45k
 +,k
 
%k
  k
r@   re  )re  r  rR  rC   rb  rI   )gr:   dataclassesr   typingr   r   r   numpyr   rs   torch.nnr   torch.nn.functionalr3   r   torch.utils.checkpointactivationsr   image_processing_utilsr	   r
   image_processing_utils_fastr   r   image_utilsr   r   r   r   r   r   r   modeling_layersr   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   r   utils.genericr   r   autor    maskformer.modeling_maskformerr!   sam.image_processing_sam_fastr"   sam.modeling_samr#   r$   r%   r&   r'   r(   r)   r*   vitdet.modeling_vitdetr+   r,   configuration_sam2r.   r/   r0   r1   r2   
get_loggerr7   loggerr5   rC   r   r   r  r   r   r   r   r=   r  r  r"  r2  rO  rR  rb  r  r  r  r  r  r  r  r`  r  re  __all__r?   r@   rA   <module>r|     s    ! , ,       ! A b   : F &  D  L A	 	 	 J  ( 
		H	%(#B ( J2 J JZ KL?k ? M ?6 FGL+ L H L@")) B	 ? 	/8RYY /8du|| 8C= ELL ?bii ?Dbii <Z4 Zz 
	Ok 	O 	O 8/ 8 8>?
+ ?
D 
-
) -

-
`Sbii S2	( 	1 ( 1 h;)BII ;)|768R 7(	0 		L 	tDn tDn `
 `
`
F	r@   