
    hfJ                        d dl mZmZ d dlZd dlZd dlmZ ddlmZmZ ddl	m
Z
 ddlmZ ddlmZ dd	lmZmZmZ d
dlmZmZ d
dlmZ d
dlmZ d
dlmZmZmZmZmZ  ej@                  e!      Z" G d de      Z# G d de      Z$ G d de      Z% G d de      Z& G d de      Z' G d de      Z( G d de      Z) G d de      Z* G d d e      Z+g d!Z,y)"    )OptionalUnionN)nn   )CacheDynamicCache)GenerationConfig)FlashAttentionKwargs)Unpack)auto_docstringcan_return_tuplelogging   )Idefics3ConfigIdefics3VisionConfig)Idefics3ImageProcessor)Idefics3ImageProcessorFast)Idefics3BaseModelOutputWithPast Idefics3ForConditionalGenerationIdefics3ModelIdefics3PreTrainedModelIdefics3VisionTransformerc                       e Zd ZdZdZy)SmolVLMVisionConfiga  
    This is the configuration class to store the configuration of a [`SmolVLMVisionModel`]. It is used to instantiate a
    SmolVLM vision encoder according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint
    [google/siglip-so400m-patch14-384](https://huggingface.co/google/siglip-so400m-patch14-384) used in SmolVLM
    [HuggingFaceTB/SmolVLM2-2.2B-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM2-2.2B-Instruct).

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        hidden_size (`int`, *optional*, defaults to 1152):
            Dimensionality of the encoder layers and the pooler layer.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 16):
            Number of attention heads for each attention layer in the Transformer encoder.
        num_channels (`int`, *optional*, defaults to 3):
            Number of channels in the input images.
        image_size (`int`, *optional*, defaults to 224):
            The size (resolution) of each image.
        patch_size (`int`, *optional*, defaults to 32):
            The size (resolution) of each patch.
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
        layer_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the layer normalization layers.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.

    Example:

    ```python
    >>> from transformers.models.smolvlm.modeling_smolvlm import SmolVLMVisionTransformer
    >>> from transformers.models.smolvlm.configuration_smolvlm import SmolVLMVisionConfig

    >>> # Initializing a SmolVLMVisionConfig with google/siglip-so400m-patch14-384 style configuration
    >>> configuration = SmolVLMVisionConfig()

    >>> # Initializing a SmolVLMVisionTransformer (with random weights) from the google/siglip-so400m-patch14-384 style configuration
    >>> model = SmolVLMVisionTransformer(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```smolvlm_visionN__name__
__module____qualname____doc__
model_type     i/var/www/html/eduruby.in/venv/lib/python3.12/site-packages/transformers/models/smolvlm/modular_smolvlm.pyr   r   *   s    1f "Jr#   r   c                       e Zd Zy)SmolVLMPreTrainedModelNr   r   r   r"   r#   r$   r&   r&   b       r#   r&   c                       e Zd Zy)SmolVLMVisionTransformerNr'   r"   r#   r$   r*   r*   f   r(   r#   r*   c                       e Zd ZdZdZy)SmolVLMConfiga  
    This is the configuration class to store the configuration of a [`SmolVLMModel`]. It is used to instantiate a
    SmolVLM model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the model of the SmolVLM
    [HuggingFaceTB/SmolVLM2-2.2B-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM2-2.2B-Instruct) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should cache the key/value pairs of the attention mechanism. Only
            relevant if `config.is_decoder=True`.
        image_token_id (`int`, *optional*, defaults to 128257):
            The id of the "image" token.
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether or not to tie the word embeddings with the token embeddings.
        vision_config (`IdeficsVisionConfig` or `dict`, *optional*, defaults to `IdeficsVisionConfig`):
            Custom vision config or dict for the vision tower
        text_config (`PretrainedConfig` or `dict`, *optional*, defaults to `LlamaConfig`):
            Custom text config or dict for the text model
        scale_factor (`int`, *optional*, defaults to 2):
            The scale factor for the image encoder.
        pad_token_id (`int`, *optional*, defaults to 128002):
            The id of the padding token.

    Example:
    ```python
    >>> from transformers import SmolVLMModel, SmolVLMConfig
    >>> # Initializing configuration
    >>> configuration = SmolVLMConfig()
    >>> # Initializing a model from the configuration
    >>> model = SmolVLMModel(configuration)
    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```smolvlmNr   r"   r#   r$   r,   r,   j   s    #J Jr#   r,   c                       e Zd Zy)SmolVLMImageProcessorNr'   r"   r#   r$   r/   r/      r(   r#   r/   c                       e Zd Zy)SmolVLMImageProcessorFastNr'   r"   r#   r$   r1   r1      r(   r#   r1   c                       e Zd Zy)SmolVLMBaseModelOutputWithPastNr'   r"   r#   r$   r3   r3      r(   r#   r3   c            #          e Zd ZdZdej
                  dej                  dej                  fdZddej                  dej
                  fd	Z	e
 ed
      	 	 	 	 	 	 	 	 	 	 	 	 	 ddeej
                     deej                     deej
                     dee   deej                     deej                     deej                     deej                     dee   dee   dee   dee   deej
                     dee   deeef   fd              Zy)SmolVLMModelz
    A subclass of Idefics3Model. We do *not* remove or block the call to inputs_merger
    in forward. Instead, we override inputs_merger here with custom logic.
    	input_idsinputs_embedsimage_hidden_statesc                 "   |j                   \  }}}|a| | j                         t        j                  | j                  j
                  t        j                  |j                              k(  }|d   }n|| j                  j
                  k(  }|j                  d      }t        j                  ||z  dk(        st        d      ||z  }t        j                  j                  j                  |j                  d      dd      }	|	d d	 }
|j                  d	      }|dz
  |z  }|dz
  |z  }|
j                  d      |z   }t        j                   |      }|||   ||   d d f   ||<   t        j"                  |j                  d	      ||      }|S )
Ndtypedevice).r      dimr   zCAt least one sample has <image> tokens not divisible by patch_size.)r=   r   )value)shapeget_input_embeddingstorchtensorconfigimage_token_idlongr<   sumall
ValueErrorr   
functionalpadcumsum	unsqueeze
zeros_likewhere)selfr6   r7   r8   _
patch_size
image_masknum_image_tokensblocks_per_sampleoffsetsblock_offsetrow_cum	chunk_idx	local_idx	block_idximage_embedsmerged_embedss                    r$   inputs_mergerzSmolVLMModel.inputs_merger   s    /44:q&*E$*C*C*ET[[77uzzR_RfRfg+ J $F+J"dkk&@&@@J%>>a>0yy)J6!;<bcc,
:((%%))*;*B*Bq*B*I6YZ)[s|###+q[Z/	q[J.	 **1-	9	''6#6y7LiXbNcef7f#gZ J$8$8$<lMZr#   Npixel_valuespixel_attention_maskc                    |j                   \  }}}}}|j                  | j                        } |j                  ||z  g|j                   dd  }|j                   dd j	                         }|dk(  j                  d      |k7  }	t        |	      sd|	d	<   ||	   j                         }|Lt        j                  d
D 
cg c]  }
|j                   |
    c}
t        j                  |j                        }n6 |j                  ||z  g|j                   dd  }||	   j                         }| j                  j                  j                  }|j                  d||      }|j                  d||      }|j                  d      d	kD  j                         }| j!                  ||      }|j"                  }| j%                  |      }|S c c}
w )a  
        Encodes images into continuous embeddings that can be forwarded to the language model.

        Args:
            pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
                The tensors corresponding to the input images.
            pixel_attention_mask (`torch.LongTensor`, *optional*):
                The attention mask indicating padded regions in the image.
        )r;   r   Nr=   g        )rA   r>   Tr   )r   r   r   )sizer;   r<   )	dimensionrf   step)rA   rd   )ra   patch_attention_mask)rB   tor;   viewnumelrI   any
contiguousrD   onesboolr<   rF   vision_configrT   unfoldvision_modellast_hidden_state	connector)rR   ra   rb   
batch_size
num_imagesnum_channelsheightwidthnb_values_per_imagereal_images_indsirT   patches_subgridri   r8   s                  r$   get_image_featureszSmolVLMModel.get_image_features   s    ?K>P>P;
Jfe#TZZ8(|((j)@Z<CUCUVWVXCYZ +004::<(C/444FJ]]#$"&Q#$45@@B'#(::5>?l((+?jj#**$  $=#7#<#<Z*=T#vWkWqWqrsrtWu#v #78H#I#T#T#V [[..99
.55
Yc5d)001:T^0_ / 3 3 3 AA EKKM #//\`t/u1CC #nn-@A""' @s   ?Ga  
        Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to
        the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where
        max_num_images is the maximum number of images among the batch_size samples in the batch.
        Padding images are not needed beyond padding the pixel_values at the entrance of the model.
        For efficiency, we only pass through the vision_model's forward the real images by
        discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where
        image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3.
        )custom_introattention_maskposition_idspast_key_values	use_cacheoutput_attentionsoutput_hidden_statesreturn_dictcache_positionkwargsreturnc                    |
|
n| j                   j                  }
||n| j                   j                  }|	|	n| j                   j                  }	||n| j                   j                  }| j
                  r/| j                  j                  r|	rt        j                  d       d}	||j                  \  }}n||j                  \  }}}nt        d      |	r|t        | j                         }|9 | j                  j                         |      j                  |j                        }||t        d      |,| j!                  ||      j                  |j                        }n)|'|j                  | j"                  |j                        }|| j%                  |||      } | j                  d|||||	|
|d|d		|}t'        |j(                  |j*                  |j,                  |j.                  |
      S )NzZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fz5You have to specify either input_ids or inputs_embeds)rF   zMYou cannot specify both pixel_values and image_hidden_states at the same timer:   )r6   r7   r8   T)	r7   r   r   r   r   r   r   r   r   )rt   r   hidden_states
attentionsr8   r"   )rF   r   r   r   use_return_dicttraining
text_modelgradient_checkpointingloggerwarning_oncerB   rK   r   rC   rj   r<   r   r;   r`   r3   rt   r   r   r   )rR   r6   r   r   r   r7   ra   rb   r8   r   r   r   r   r   r   rv   
seq_lengthrS   outputss                      r$   forwardzSmolVLMModel.forward   s   : 2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	%0%<k$++B]B]==T__CC	l I  %.__"J
&(5(;(;%J
ATUU0*$++>O BDOO@@B9MPPQZQaQabM #(;(Glmm#"&"9"9,H\"]"`"`anauau"v ,"5"8"8tzzR_RfRf"8"g* !..#+$7 / M "$// 
')%+/!5)
 
 .%77#33!//)) 3
 	
r#   )N)NNNNNNNNNNNNN)r   r   r   r    rD   
LongTensorTensorr`   FloatTensorr   r   r   r   r   
BoolTensorrp   r   r
   r   tupler3   r   r"   r#   r$   r5   r5      s   
)):?,,]b]i]i@-#u/@/@ -#X]XhXh -#^ 
 151537+/5948;?;?$(,0/3&*59Q
E,,-Q
 !.Q
 u//0	Q

 "%Q
   1 12Q
 u001Q
 'u'7'78Q
 &e&7&78Q
 D>Q
 $D>Q
 'tnQ
 d^Q
 !!1!12Q
 -.Q
  
u44	5!Q

 Q
r#   r5   c                   (     e Zd Z fdZ fdZ xZS )SmolVLMForConditionalGenerationc                 J   t         |   |       t        |      | _        t	        j
                  |      | j                  j                  _        t        j                  |j                  j                  |j                  j                  d      | _        | j                          y )NF)bias)super__init__r5   modelr	   from_model_configr   generation_configr   Lineartext_confighidden_size
vocab_sizelm_head	post_init)rR   rF   	__class__s     r$   r   z(SmolVLMForConditionalGeneration.__init__V  sq     !&)
2B2T2TU[2\

/yy!3!3!?!?ASASA^A^ejkr#   c                 $    t        |   di | y)a	  
        pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
            Mask to avoid performing attention on padding pixel indices.
        image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
            The hidden states of the image encoder after modality projection.
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or `model.image_token_id`. Tokens with indices set to `model.image_token_id` are
            ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> import requests
        >>> import torch
        >>> from PIL import Image
        >>> from io import BytesIO

        >>> from transformers import AutoProcessor, AutoModelForImageTextToText
        >>> from transformers.image_utils import load_image

        >>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
        >>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
        >>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
        >>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")

        >>> processor = AutoProcessor.from_pretrained("HuggingFaceTB/SmolVLM2-2.2B-Instruct")
        >>> model = AutoModelForImageTextToText.from_pretrained("HuggingFaceTB/SmolVLM2-2.2B-Instruct", dtype=torch.bfloat16, device_map="auto")

        >>> # Create inputs
        >>> messages = [
        ...     {
        ...         "role": "user",
        ...         "content": [
        ...             {"type": "video", "path": path/to/video},
        ...             {"type": "text", "text": "What is happening in this video?"},
        ...         ]
        ...     }
        ... ]

        >>> inputs = processor.apply_chat_template([messages], add_generation_prompt=True)

        >>> # Generate
        >>> generated_ids = model.generate(**inputs, max_new_tokens=256)
        >>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)

        >>> print(generated_texts)
        ```Nr"   )r   r   )rR   super_kwargsr   s     r$   r   z'SmolVLMForConditionalGeneration.forward]  s    b 	','r#   )r   r   r   r   r   __classcell__)r   s   @r$   r   r   U  s    1( 1(r#   r   )r   r,   r/   r1   r   r&   r5   r*   )-typingr   r   rD   torch.utils.checkpointr   cache_utilsr   r   
generationr	   modeling_flash_attention_utilsr
   processing_utilsr   utilsr   r   r   idefics3.configuration_idefics3r   r   "idefics3.image_processing_idefics3r   'idefics3.image_processing_idefics3_fastr   idefics3.modeling_idefics3r   r   r   r   r   
get_loggerr   r   r   r&   r*   r,   r/   r1   r3   r5   r   __all__r"   r#   r$   <module>r      s     #    . * B & > > R G P  
		H	%5	. 5	p	4 		8 	'	N '	T	2 		 : 		%D 	r
= r
j9(&F 9(x	r#   