| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | import math |
| | import warnings |
| | from dataclasses import dataclass |
| | from typing import Any, Callable, Optional, Tuple, Union |
| |
|
| | import numpy as np |
| | import torch |
| | import torch.nn as nn |
| | import torch.nn.functional as F |
| | from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss |
| | from torch.nn.init import _calculate_fan_in_and_fan_out |
| |
|
| | from transformers.activations import ACT2FN |
| | from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask |
| | from transformers.modeling_layers import GradientCheckpointingLayer |
| | from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput |
| | from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel |
| | from transformers.utils import ModelOutput, auto_docstring, can_return_tuple, logging |
| | from transformers.cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache |
| |
|
| | from .configuration_siglip2 import Siglip2VisionConfig |
| |
|
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| | def _trunc_normal_(tensor, mean, std, a, b): |
| | |
| | |
| | def norm_cdf(x): |
| | |
| | return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 |
| |
|
| | if (mean < a - 2 * std) or (mean > b + 2 * std): |
| | warnings.warn( |
| | "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " |
| | "The distribution of values may be incorrect.", |
| | stacklevel=2, |
| | ) |
| |
|
| | |
| | |
| | |
| | l = norm_cdf((a - mean) / std) |
| | u = norm_cdf((b - mean) / std) |
| |
|
| | |
| | |
| | tensor.uniform_(2 * l - 1, 2 * u - 1) |
| |
|
| | |
| | |
| | tensor.erfinv_() |
| |
|
| | |
| | tensor.mul_(std * math.sqrt(2.0)) |
| | tensor.add_(mean) |
| |
|
| | |
| | tensor.clamp_(min=a, max=b) |
| |
|
| | def trunc_normal_tf_( |
| | tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0 |
| | ) -> torch.Tensor: |
| | """Fills the input Tensor with values drawn from a truncated |
| | normal distribution. The values are effectively drawn from the |
| | normal distribution :math:`\\mathcal{N}(\text{mean}, \text{std}^2)` |
| | with values outside :math:`[a, b]` redrawn until they are within |
| | the bounds. The method used for generating the random values works |
| | best when :math:`a \\leq \text{mean} \\leq b`. |
| | |
| | NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the |
| | bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0 |
| | and the result is subsequently scaled and shifted by the mean and std args. |
| | |
| | Args: |
| | tensor: an n-dimensional `torch.Tensor` |
| | mean: the mean of the normal distribution |
| | std: the standard deviation of the normal distribution |
| | a: the minimum cutoff value |
| | b: the maximum cutoff value |
| | """ |
| | with torch.no_grad(): |
| | _trunc_normal_(tensor, 0, 1.0, a, b) |
| | tensor.mul_(std).add_(mean) |
| |
|
| | def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"): |
| | fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) |
| | if mode == "fan_in": |
| | denom = fan_in |
| | elif mode == "fan_out": |
| | denom = fan_out |
| | elif mode == "fan_avg": |
| | denom = (fan_in + fan_out) / 2 |
| |
|
| | variance = scale / denom |
| |
|
| | if distribution == "truncated_normal": |
| | |
| | trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.87962566103423978) |
| | elif distribution == "normal": |
| | with torch.no_grad(): |
| | tensor.normal_(std=math.sqrt(variance)) |
| | elif distribution == "uniform": |
| | bound = math.sqrt(3 * variance) |
| | with torch.no_grad(): |
| | tensor.uniform_(-bound, bound) |
| | else: |
| | raise ValueError(f"invalid distribution {distribution}") |
| |
|
| | def lecun_normal_(tensor): |
| | variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal") |
| |
|
| | def default_flax_embed_init(tensor): |
| | variance_scaling_(tensor, mode="fan_in", distribution="normal") |
| |
|
| | |
| | def rotate_half(x): |
| | """Rotates half the hidden dims of the input.""" |
| | x1 = x[..., : x.shape[-1] // 2] |
| | x2 = x[..., x.shape[-1] // 2 :] |
| | return torch.cat((-x2, x1), dim=-1) |
| |
|
| | def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1): |
| | cos = cos.unsqueeze(unsqueeze_dim) |
| | sin = sin.unsqueeze(unsqueeze_dim) |
| | q_embed = (q * cos) + (rotate_half(q) * sin) |
| | k_embed = (k * cos) + (rotate_half(k) * sin) |
| | return q_embed, k_embed |
| |
|
| | class Siglip2VisionEmbeddings(nn.Module): |
| | def __init__(self, config: Siglip2VisionConfig): |
| | super().__init__() |
| | self.config = config |
| | self.embed_dim = config.hidden_size |
| | self.patch_size = config.patch_size |
| |
|
| | self.patch_embedding = nn.Linear( |
| | in_features=config.num_channels * self.patch_size * self.patch_size, |
| | out_features=self.embed_dim, |
| | ) |
| |
|
| | self.num_patches = config.num_patches |
| | self.position_embedding_size = int(self.num_patches**0.5) |
| | self.position_embedding = nn.Embedding(self.num_patches, self.embed_dim) |
| |
|
| | @staticmethod |
| | def resize_positional_embeddings( |
| | positional_embeddings: torch.Tensor, |
| | spatial_shapes: torch.LongTensor, |
| | max_length: int, |
| | ) -> torch.Tensor: |
| | """ |
| | Resize positional embeddings to image-specific size and pad to a fixed size. |
| | |
| | Args: |
| | positional_embeddings (`torch.Tensor`): |
| | Position embeddings of shape (height, width, embed_dim) |
| | spatial_shapes (`torch.LongTensor`): |
| | Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to |
| | max_length (`int`): |
| | Maximum length of the positional embeddings to pad resized positional embeddings to |
| | |
| | Returns: |
| | `torch.Tensor`: Embeddings of shape (batch_size, max_length, embed_dim) |
| | """ |
| | batch_size = spatial_shapes.shape[0] |
| | embed_dim = positional_embeddings.shape[-1] |
| | source_dtype = positional_embeddings.dtype |
| |
|
| | resulted_positional_embeddings = torch.empty( |
| | (batch_size, max_length, embed_dim), |
| | device=positional_embeddings.device, |
| | dtype=source_dtype, |
| | ) |
| |
|
| | |
| | positional_embeddings = positional_embeddings.permute(2, 0, 1).unsqueeze(0) |
| |
|
| | |
| | if positional_embeddings.device.type == "cpu": |
| | positional_embeddings = positional_embeddings.to(torch.float32) |
| |
|
| | for i in range(batch_size): |
| | |
| | height, width = spatial_shapes[i] |
| | resized_embeddings = F.interpolate( |
| | positional_embeddings, |
| | size=(height, width), |
| | mode="bilinear", |
| | align_corners=False, |
| | antialias=True, |
| | ) |
| |
|
| | |
| | resized_embeddings = resized_embeddings.reshape(embed_dim, height * width).transpose(0, 1) |
| |
|
| | |
| | resized_embeddings = resized_embeddings.to(source_dtype) |
| |
|
| | resulted_positional_embeddings[i, : height * width] = resized_embeddings |
| | resulted_positional_embeddings[i, height * width :] = resized_embeddings[0] |
| |
|
| | return resulted_positional_embeddings |
| |
|
| | def forward(self, pixel_values: torch.FloatTensor, spatial_shapes: torch.LongTensor) -> torch.Tensor: |
| | """ |
| | Args: |
| | pixel_values (`torch.FloatTensor`): |
| | Pixel values of shape (batch_size, max_num_patches, num_channels * patch_size * patch_size) |
| | spatial_shapes (`List[Tuple[int, int]]`): |
| | Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to |
| | """ |
| |
|
| | |
| | target_dtype = self.patch_embedding.weight.dtype |
| | patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) |
| |
|
| | |
| | positional_embeddings = self.position_embedding.weight.reshape( |
| | self.position_embedding_size, self.position_embedding_size, -1 |
| | ) |
| | resized_positional_embeddings = self.resize_positional_embeddings( |
| | positional_embeddings, spatial_shapes, max_length=pixel_values.shape[1] |
| | ) |
| |
|
| | |
| | embeddings = patch_embeds + resized_positional_embeddings |
| | return embeddings |
| |
|
| | def eager_attention_forward( |
| | module: nn.Module, |
| | query: torch.Tensor, |
| | key: torch.Tensor, |
| | value: torch.Tensor, |
| | attention_mask: Optional[torch.Tensor], |
| | scaling: float, |
| | dropout: float = 0.0, |
| | **kwargs, |
| | ): |
| | attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling |
| | if attention_mask is not None: |
| | attn_weights = attn_weights + attention_mask |
| |
|
| | attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) |
| | attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) |
| |
|
| | attn_output = torch.matmul(attn_weights, value) |
| | attn_output = attn_output.transpose(1, 2).contiguous() |
| |
|
| | return attn_output, attn_weights |
| |
|
| | class Siglip2Attention(nn.Module): |
| | """Multi-headed attention from 'Attention Is All You Need' paper""" |
| |
|
| | def __init__(self, config: Union[Siglip2VisionConfig], layer_idx: int): |
| | super().__init__() |
| | self.config = config |
| | self.layer_idx = layer_idx |
| | self.embed_dim = config.hidden_size |
| | self.num_heads = config.num_attention_heads |
| | self.head_dim = self.embed_dim // self.num_heads |
| | if self.head_dim * self.num_heads != self.embed_dim: |
| | raise ValueError( |
| | f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" |
| | f" {self.num_heads})." |
| | ) |
| | self.scale = self.head_dim**-0.5 |
| | self.dropout = config.attention_dropout |
| | self.is_causal = False |
| |
|
| | self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) |
| | self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) |
| | self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) |
| | self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) |
| |
|
| | def forward( |
| | self, |
| | hidden_states: torch.Tensor, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | output_attentions: Optional[bool] = False, |
| | position_embeddings: Optional[torch.Tensor] = None, |
| | past_key_value: Optional[Cache] = None, |
| | ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: |
| | """Input shape: Batch x Time x Channel""" |
| |
|
| | batch_size, seq_length, embed_dim = hidden_states.shape |
| |
|
| | query_states = self.q_proj(hidden_states) |
| | key_states = self.k_proj(hidden_states) |
| | value_states = self.v_proj(hidden_states) |
| |
|
| | query_states = query_states.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) |
| | key_states = key_states.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) |
| | value_states = value_states.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) |
| |
|
| | if position_embeddings is not None: |
| | cos, sin = position_embeddings |
| | query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) |
| |
|
| | if past_key_value is not None: |
| | key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx) |
| | |
| | attention_interface: Callable = eager_attention_forward |
| | if self.config._attn_implementation != "eager": |
| | if self.config._attn_implementation == "sdpa" and output_attentions: |
| | logger.warning_once( |
| | "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " |
| | 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' |
| | ) |
| | else: |
| | attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] |
| |
|
| | attn_output, attn_weights = attention_interface( |
| | self, |
| | query_states, |
| | key_states, |
| | value_states, |
| | attention_mask, |
| | is_causal=self.is_causal, |
| | scaling=self.scale, |
| | dropout=0.0 if not self.training else self.dropout, |
| | ) |
| |
|
| | attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous() |
| | attn_output = self.out_proj(attn_output) |
| |
|
| | if not output_attentions: |
| | attn_weights = None |
| |
|
| | return attn_output, attn_weights |
| |
|
| |
|
| | class Siglip2MLP(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.config = config |
| | self.activation_fn = ACT2FN[config.hidden_act] |
| | self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) |
| | self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) |
| |
|
| | def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| | hidden_states = self.fc1(hidden_states) |
| | hidden_states = self.activation_fn(hidden_states) |
| | hidden_states = self.fc2(hidden_states) |
| | return hidden_states |
| |
|
| | class VisionRotaryEmbedding(nn.Module): |
| | def __init__(self, dim: int, theta: float = 10000.0) -> None: |
| | super().__init__() |
| | inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim)) |
| | self.register_buffer("inv_freq", inv_freq, persistent=False) |
| |
|
| | def forward(self, x, position_ids: int) -> torch.Tensor: |
| | inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) |
| | position_ids_expanded = position_ids[:, None, :].float() |
| |
|
| | device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" |
| | with torch.autocast(device_type=device_type, enabled=False): |
| | freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) |
| | emb = torch.cat((freqs, freqs), dim=-1) |
| | cos, sin = emb.cos(), emb.sin() |
| |
|
| | return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) |
| |
|
| | def _apply(self, fn, recurse=True): |
| | for key, buf in self._buffers.items(): |
| | if buf is not None: |
| | |
| | value = self._buffers[key] |
| | value_ = fn(buf) |
| | self._buffers[key] = value.to(value_.device) |
| |
|
| | return self |
| |
|
| | class Siglip2EncoderLayer(GradientCheckpointingLayer): |
| | def __init__(self, config: Union[Siglip2VisionConfig], layer_idx): |
| | super().__init__() |
| | self.embed_dim = config.hidden_size |
| | self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) |
| | self.self_attn = Siglip2Attention(config, layer_idx) |
| | self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) |
| | self.mlp = Siglip2MLP(config) |
| |
|
| | def forward( |
| | self, |
| | hidden_states: torch.Tensor, |
| | attention_mask: torch.Tensor, |
| | output_attentions: Optional[bool] = False, |
| | position_embeddings: Optional[torch.Tensor] = None, |
| | past_key_value: Optional[Cache] = None, |
| | ) -> Tuple[torch.FloatTensor]: |
| | """ |
| | Args: |
| | hidden_states (`torch.FloatTensor`): |
| | Input to the layer of shape `(batch, seq_len, embed_dim)`. |
| | attention_mask (`torch.FloatTensor`): |
| | Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values. |
| | output_attentions (`bool`, *optional*, defaults to `False`): |
| | Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| | returned tensors for more detail. |
| | """ |
| | residual = hidden_states |
| |
|
| | hidden_states = self.layer_norm1(hidden_states) |
| | hidden_states, attn_weights = self.self_attn( |
| | hidden_states=hidden_states, |
| | attention_mask=attention_mask, |
| | output_attentions=output_attentions, |
| | position_embeddings=position_embeddings, |
| | past_key_value=past_key_value, |
| | ) |
| | hidden_states = residual + hidden_states |
| |
|
| | residual = hidden_states |
| | hidden_states = self.layer_norm2(hidden_states) |
| | hidden_states = self.mlp(hidden_states) |
| | hidden_states = residual + hidden_states |
| |
|
| | outputs = (hidden_states,) |
| |
|
| | if output_attentions: |
| | outputs += (attn_weights,) |
| |
|
| | return outputs |
| |
|
| |
|
| | class Siglip2Encoder(nn.Module): |
| | """ |
| | Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a |
| | [`Siglip2EncoderLayer`]. |
| | |
| | Args: |
| | config: Siglip2Config |
| | """ |
| |
|
| | def __init__(self, config: Siglip2VisionConfig): |
| | super().__init__() |
| | self.config = config |
| | self.layers = nn.ModuleList([Siglip2EncoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]) |
| | self.gradient_checkpointing = False |
| |
|
| | |
| | @can_return_tuple |
| | def forward( |
| | self, |
| | inputs_embeds, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | position_embeddings: Optional[torch.Tensor] = None, |
| | past_key_value: Optional[Cache] = None, |
| | ) -> BaseModelOutput: |
| | r""" |
| | Args: |
| | inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): |
| | Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. |
| | This is useful if you want more control over how to convert `input_ids` indices into associated vectors |
| | than the model's internal embedding lookup matrix. |
| | attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| | |
| | - 1 for tokens that are **not masked**, |
| | - 0 for tokens that are **masked**. |
| | |
| | [What are attention masks?](../glossary#attention-mask) |
| | output_attentions (`bool`, *optional*): |
| | Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| | returned tensors for more detail. |
| | output_hidden_states (`bool`, *optional*): |
| | Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors |
| | for more detail. |
| | return_dict (`bool`, *optional*): |
| | Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| | """ |
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| | ) |
| |
|
| | encoder_states = () if output_hidden_states else None |
| | all_attentions = () if output_attentions else None |
| |
|
| | hidden_states = inputs_embeds |
| | for encoder_layer in self.layers: |
| | if output_hidden_states: |
| | encoder_states = encoder_states + (hidden_states,) |
| |
|
| | layer_outputs = encoder_layer( |
| | hidden_states, |
| | attention_mask, |
| | output_attentions=output_attentions, |
| | position_embeddings=position_embeddings, |
| | past_key_value=past_key_value, |
| | ) |
| |
|
| | hidden_states = layer_outputs[0] |
| |
|
| | if output_attentions: |
| | all_attentions = all_attentions + (layer_outputs[1],) |
| |
|
| | if output_hidden_states: |
| | encoder_states = encoder_states + (hidden_states,) |
| |
|
| | return BaseModelOutput( |
| | last_hidden_state=hidden_states, |
| | hidden_states=encoder_states, |
| | attentions=all_attentions, |
| | ) |
| |
|
| | class Siglip2VisionTransformer(nn.Module): |
| | def __init__(self, config: Siglip2VisionConfig): |
| | super().__init__() |
| | self.config = config |
| | embed_dim = config.hidden_size |
| |
|
| | head_dim = config.hidden_size // config.num_attention_heads |
| | self.rotary_pos_emb = VisionRotaryEmbedding(head_dim) |
| |
|
| | self.embeddings = Siglip2VisionEmbeddings(config) |
| | self.encoder = Siglip2Encoder(config) |
| | self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) |
| | self.use_head = True if not hasattr(config, "vision_use_head") else config.vision_use_head |
| | self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" |
| |
|
| | @can_return_tuple |
| | @auto_docstring |
| | def forward( |
| | self, |
| | pixel_values: torch.FloatTensor, |
| | attention_mask: torch.Tensor, |
| | spatial_shapes: torch.LongTensor, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | past_key_value: Optional[Cache] = None, |
| | ) -> BaseModelOutputWithPooling: |
| | r""" |
| | spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`): |
| | Tensor containing the spatial dimensions (height, width) of the input images. |
| | """ |
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| | ) |
| |
|
| | hidden_states = self.embeddings(pixel_values, spatial_shapes) |
| |
|
| | position_embeddings = self.rotary_pos_emb(hidden_states.shape[1]) |
| |
|
| | if attention_mask is not None and not self._use_flash_attention_2: |
| | |
| | encoder_attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) |
| | else: |
| | encoder_attention_mask = attention_mask |
| |
|
| | encoder_outputs: BaseModelOutput = self.encoder( |
| | inputs_embeds=hidden_states, |
| | attention_mask=encoder_attention_mask, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | position_embeddings=position_embeddings, |
| | past_key_value=past_key_value |
| | ) |
| |
|
| | last_hidden_state = encoder_outputs.last_hidden_state |
| | last_hidden_state = self.post_layernorm(last_hidden_state) |
| |
|
| | return BaseModelOutputWithPooling( |
| | last_hidden_state=last_hidden_state, |
| | pooler_output=None, |
| | hidden_states=encoder_outputs.hidden_states, |
| | attentions=encoder_outputs.attentions, |
| | ) |
| |
|
| |
|
| | |
| |
|
| | @auto_docstring( |
| | custom_intro=""" |
| | The vision model from Siglip2 without any head or projection on top. |
| | """ |
| | ) |
| | class Siglip2VisionModel(PreTrainedModel): |
| | config_class = Siglip2VisionConfig |
| | main_input_name = "pixel_values" |
| | base_model_prefix = "siglip2" |
| | supports_gradient_checkpointing = True |
| | _no_split_modules = ["Siglip2EncoderLayer", "Siglip2VisionEmbeddings", "Siglip2EncoderLayer"] |
| | _supports_flash_attn_2 = True |
| | _supports_sdpa = True |
| | |
| | def _init_weights(self, module): |
| | """Initialize the weights""" |
| | if isinstance(module, Siglip2VisionEmbeddings): |
| | width = self.config.hidden_size |
| | nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width)) |
| | elif isinstance(module, nn.Embedding): |
| | default_flax_embed_init(module.weight) |
| | elif isinstance(module, Siglip2Attention): |
| | nn.init.xavier_uniform_(module.q_proj.weight) |
| | nn.init.xavier_uniform_(module.k_proj.weight) |
| | nn.init.xavier_uniform_(module.v_proj.weight) |
| | nn.init.xavier_uniform_(module.out_proj.weight) |
| | nn.init.zeros_(module.q_proj.bias) |
| | nn.init.zeros_(module.k_proj.bias) |
| | nn.init.zeros_(module.v_proj.bias) |
| | nn.init.zeros_(module.out_proj.bias) |
| | elif isinstance(module, Siglip2MLP): |
| | nn.init.xavier_uniform_(module.fc1.weight) |
| | nn.init.xavier_uniform_(module.fc2.weight) |
| | nn.init.normal_(module.fc1.bias, std=1e-6) |
| | nn.init.normal_(module.fc2.bias, std=1e-6) |
| | elif isinstance(module, (nn.Linear, nn.Conv2d)): |
| | lecun_normal_(module.weight) |
| | if module.bias is not None: |
| | nn.init.zeros_(module.bias) |
| | elif isinstance(module, nn.LayerNorm): |
| | module.bias.data.zero_() |
| | module.weight.data.fill_(1.0) |
| |
|
| | def __init__(self, config: Siglip2VisionConfig): |
| | super().__init__(config) |
| |
|
| | self.vision_model = Siglip2VisionTransformer(config) |
| |
|
| | |
| | self.post_init() |
| |
|
| | def get_input_embeddings(self) -> nn.Module: |
| | return self.vision_model.embeddings.patch_embedding |
| |
|
| | @can_return_tuple |
| | @auto_docstring |
| | def forward( |
| | self, |
| | pixel_values: torch.FloatTensor, |
| | pixel_attention_mask: torch.Tensor, |
| | spatial_shapes: torch.LongTensor, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | past_key_value: Optional[Cache] = None, |
| | ) -> BaseModelOutputWithPooling: |
| | r""" |
| | pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*): |
| | Mask to avoid performing attention on padding pixel indices. |
| | spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`): |
| | Tensor containing the spatial dimensions (height, width) of the input images. |
| | |
| | Examples: |
| | |
| | ```python |
| | >>> from PIL import Image |
| | >>> import requests |
| | >>> from transformers import AutoProcessor, Siglip2VisionModel |
| | |
| | >>> model = Siglip2VisionModel.from_pretrained("google/siglip2-base-patch16-224") |
| | >>> processor = AutoProcessor.from_pretrained("google/siglip2-base-patch16-224") |
| | |
| | >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" |
| | >>> image = Image.open(requests.get(url, stream=True).raw) |
| | |
| | >>> inputs = processor(images=image, return_tensors="pt") |
| | |
| | >>> outputs = model(**inputs) |
| | >>> last_hidden_state = outputs.last_hidden_state |
| | >>> pooled_output = outputs.pooler_output # pooled features |
| | ```""" |
| | return self.vision_model( |
| | pixel_values=pixel_values, |
| | attention_mask=pixel_attention_mask, |
| | spatial_shapes=spatial_shapes, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | past_key_value=past_key_value, |
| | ) |
| |
|