diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index c3036b8a3973..ba043d3dc738 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -1018,6 +1018,8 @@ title: EdgeTamVideo - local: model_doc/emu3 title: Emu3 + - local: model_doc/ernie4_5_vl + title: ernie4_5_vl - local: model_doc/evolla title: Evolla - local: model_doc/flava diff --git a/docs/source/en/model_doc/ernie4_5_vl.md b/docs/source/en/model_doc/ernie4_5_vl.md new file mode 100644 index 000000000000..052163f61a94 --- /dev/null +++ b/docs/source/en/model_doc/ernie4_5_vl.md @@ -0,0 +1,97 @@ + + +
+
+PyTorch +FlashAttention +SDPA
+
+ +# ernie4_5_vl + +## Overview + +The ernie4_5_vl model was proposed in []() by . + + +The abstract from the paper is the following: + +In this report, we propose PaddleOCR-VL, a SOTA and resource-efficient model tailored for document parsing. Its core component is PaddleOCR-VL-0.9B, a compact yet powerful vision-language model (VLM) that integrates a NaViT-style dynamic resolution visual encoder with the ERNIE-4.5-0.3B language model to enable accurate element recognition. This innovative model efficiently supports 109 languages and excels in recognizing complex elements (e.g., text, tables, formulas, and charts), while maintaining minimal resource consumption. Through comprehensive evaluations on widely used public benchmarks and in-house benchmarks, PaddleOCR-VL achieves SOTA performance in both page-level document parsing and element-level recognition. It significantly outperforms existing solutions, exhibits strong competitiveness against top-tier VLMs, and delivers fast inference speeds. These strengths make it highly suitable for practical deployment in real-world scenarios. + +Tips: + + + +This model was contributed by [INSERT YOUR HF USERNAME HERE](https://huggingface.co/). +The original code can be found [here](). + + +## Ernie4_5_VLConfig + +[[autodoc]] Ernie4_5_VLConfig + +## Ernie4_5_VLTextConfig + +[[autodoc]] Ernie4_5_VLTextConfig + +## Ernie4_5_VLVisionConfig + +[[autodoc]] Ernie4_5_VLVisionConfig + +## Ernie4_5_VLImageProcessor + +[[autodoc]] Ernie4_5_VLImageProcessor + - preprocess + +## Ernie4_5_VLImageProcessorFast + +[[autodoc]] Ernie4_5_VLImageProcessorFast + - preprocess + +## Ernie4_5_VLVideoProcessor + +[[autodoc]] Ernie4_5_VLVideoProcessor + - preprocess + +## Ernie4_5_VLProcessor + +[[autodoc]] Ernie4_5_VLProcessor + +## Ernie4_5_VLTextModel + +[[autodoc]] Ernie4_5_VLTextModel + - forward + +## Ernie4_5_VLVisionTransformerPretrainedModel + +[[autodoc]] Ernie4_5_VLVisionTransformerPretrainedModel + - forward + +## Ernie4_5_VLVariableResolutionResamplerModel + +[[autodoc]] Ernie4_5_VLVariableResolutionResamplerModel + - forward + +## Ernie4_5_VLModel + +[[autodoc]] Ernie4_5_VLModel + - forward + +## Ernie4_5_VLForConditionalGeneration + +[[autodoc]] Ernie4_5_VLForConditionalGeneration + - forward diff --git a/src/transformers/conversion_mapping.py b/src/transformers/conversion_mapping.py index b6aad7e94650..eb9ebf911b25 100644 --- a/src/transformers/conversion_mapping.py +++ b/src/transformers/conversion_mapping.py @@ -15,7 +15,14 @@ from copy import deepcopy -from .core_model_loading import Concatenate, MergeModulelist, WeightConverter, WeightRenaming +from .core_model_loading import ( + Chunk, + Concatenate, + MergeModulelist, + ModulelistSplitAndFuse, + WeightConverter, + WeightRenaming, +) from .utils import is_torch_available @@ -67,6 +74,50 @@ def _build_checkpoint_conversion_mapping(): operations=[MergeModulelist(dim=0)], ), ], + "ernie4_5_vl": [ + # vision + WeightRenaming("^vision_model", "model.vision_tower"), + # resampler + WeightRenaming("resampler_model.spatial_linear.0", "resampler_model.spatial_linear.fc1"), + WeightRenaming("resampler_model.spatial_linear.2", "resampler_model.spatial_linear.fc2"), + WeightRenaming("resampler_model.spatial_linear.3", "resampler_model.spatial_linear.ln"), + WeightRenaming("resampler_model.temporal_linear.0", "resampler_model.temporal_linear.fc1"), + WeightRenaming("resampler_model.temporal_linear.2", "resampler_model.temporal_linear.fc2"), + WeightRenaming("resampler_model.temporal_linear.3", "resampler_model.temporal_linear.ln"), + # language model + WeightRenaming("^model.norm", "model.language_model.norm"), + WeightRenaming("^model.embed_tokens", "model.language_model.embed_tokens"), + WeightRenaming("^model.layers", "model.language_model.layers"), + WeightRenaming("mlp.gate.weight", "mlp.text_moe.gate.weight"), + WeightRenaming("mlp.gate.weight_1", "mlp.vision_moe.gate.weight"), + WeightConverter( + source_keys=["mlp.moe_statics.e_score_correction_bias"], + target_keys=[ + "mlp.text_moe.gate.moe_statics.e_score_correction_bias", + "mlp.vision_moe.gate.moe_statics.e_score_correction_bias", + ], + operations=[Chunk(dim=0, chunks=2)], + ), + WeightConverter( + source_keys=["experts.*.down_proj.weight"], + target_keys=[ + "text_moe.experts.down_proj", + "vision_moe.experts.down_proj", + ], + operations=[ModulelistSplitAndFuse(stack_dim=0, concat_dim=1)], + ), + WeightConverter( + source_keys=[ + "experts.*.gate_proj.weight", + "experts.*.up_proj.weight", + ], + target_keys=[ + "text_moe.experts.gate_up_proj", + "vision_moe.experts.gate_up_proj", + ], + operations=[ModulelistSplitAndFuse(stack_dim=0, concat_dim=1)], + ), + ], "legacy": [ WeightRenaming( source_keys="LayerNorm.gamma", diff --git a/src/transformers/core_model_loading.py b/src/transformers/core_model_loading.py index fecaf6f39fd7..8869780a611e 100644 --- a/src/transformers/core_model_loading.py +++ b/src/transformers/core_model_loading.py @@ -16,14 +16,17 @@ from __future__ import annotations +import math import os import re from abc import abstractmethod +from collections import defaultdict from collections.abc import MutableMapping, MutableSet, Sequence from concurrent.futures import Future, ThreadPoolExecutor from contextlib import contextmanager from copy import deepcopy from dataclasses import dataclass, field +from itertools import chain from typing import TYPE_CHECKING, Any, Optional, Union import torch @@ -268,6 +271,118 @@ def convert( return output +class ModulelistSplitAndFuse(ConversionOps): + """ + Special operation that splits a module list over all keys and fuses over the number of original modules. + + Example with 2 original modules "Gate" and "Up" with 2 target keys "Text" and "Vision": + + ModuleList 1 ModuleList 2 + [ Gate ] [ Up ] + | | | | + [Gate_Text] [Gate_Vision] [Up_Text] [Up_Vision] + \ \ / / + \ \ / / + \ / \ / + \ / \ / + [GateUp_Text] [GateUp_Vision] + + The splits are equal and are defined by the amount of target keys. + The final fusions are defined by the amount of original module lists. + """ # noqa: W605 + + def __init__(self, stack_dim: int = 0, concat_dim: int = 1): + self.stack_dim = stack_dim + self.concat_dim = concat_dim + self.reverse_op = ModulelistSplitAndDecouple + + def split_list_into_chunks(self, tensor_list: list[torch.Tensor], chunks: int = 2): + split_size = math.ceil(len(tensor_list) / chunks) # best effort split size + return [tensor_list[i * split_size : (i + 1) * split_size] for i in range(chunks)] + + @torch.no_grad() + def convert( + self, + value: dict[str, list[torch.Tensor]], + source_keys: list[str], + target_keys: list[str], + full_layer_name: str, + config, + ) -> dict[str, list[torch.Tensor]]: + layer_name_prefix = full_layer_name.removesuffix( + target_keys[0] + ) # full layer name is based on first best match + split_layer_names = [layer_name_prefix + key for key in target_keys] + + split_and_fused = defaultdict(list) + for key in value.keys(): + tensors = value.get(key, []) + + split_tensor_lists = self.split_list_into_chunks(tensors, chunks=len(split_layer_names)) + stacked_tensors = (torch.stack(tensor_group, dim=self.stack_dim) for tensor_group in split_tensor_lists) + for idx, tensor_group in enumerate(stacked_tensors): + split_and_fused[split_layer_names[idx]].append(tensor_group) + + for k, v in split_and_fused.items(): + split_and_fused[k] = torch.cat(v, dim=self.concat_dim) + + return split_and_fused + + +class ModulelistSplitAndDecouple(ConversionOps): + """ + Special operation that splits a fused module list over all original modules and + then decouples them into a mixed module list each over all keys. + + Example with 2 original modules "Gate" and "Up" with 2 target keys "Text" and "Vision": + + [GateUp_Text] [GateUp_Vision] + / \ / \ + / \ / \ + / / \ \ + / / \ \ + [Gate_Text] [Gate_Vision] [Up_Text] [Up_Vision] + | | | | + [ Gate ] [ Up ] + ModuleList 1 ModuleList 2 + + The splits are equal and are defined by the amount of original module lists. + The final decoupled module lists are defined by the amount of keys. + """ # noqa: W605 + + def __init__(self, stack_dim: int = 0, concat_dim: int = 1): + self.stack_dim = stack_dim + self.concat_dim = concat_dim + self.reverse_op = ModulelistSplitAndFuse + + @torch.no_grad() + def convert( + self, + value: dict[str, list[torch.Tensor]], + source_keys: list[str], + target_keys: list[str], + full_layer_name: str, + config, + ) -> dict[str, list[torch.Tensor]]: + # TODO: check how reverse ops interacts here + layer_name_prefix = full_layer_name.removesuffix( + target_keys[0] + ) # full layer name is based on first best match + decoupled_layer_names = [layer_name_prefix + key for key in target_keys] + + fused_modules = len(target_keys) + split_tensors = [value[key].chunk(fused_modules, dim=self.concat_dim) for key in value.keys()] + + decoupled = {} + for idx, key in enumerate(decoupled_layer_names): + tensor_groups = [ + list(torch.unbind(tensor_group[idx], dim=self.stack_dim)) for tensor_group in split_tensors + ] + decoupled[key] = list(chain.from_iterable(tensor_groups)) + + return decoupled + + @dataclass(slots=True) class WeightTransform: source_keys: Union[str, list[str]] = field(init=True) @@ -325,9 +440,7 @@ class WeightConverter(WeightTransform): def __post_init__(self): WeightTransform.__post_init__(self) if bool(len(self.source_keys) - 1) + bool(len(self.target_keys) - 1) >= 2: - raise ValueError( - f"source keys={self.source_keys}, target_keys={self.target_keys} but you can only have one to many, one to one or many to one." - ) + logger.warning_once("Many-to-many conversions are risky; use at your own risk.") if not self.operations: raise ValueError("WeightConverter requires at least one operation.") diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index c55980e471c7..8a394e47b12b 100644 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -141,6 +141,7 @@ ("ernie", "ErnieConfig"), ("ernie4_5", "Ernie4_5Config"), ("ernie4_5_moe", "Ernie4_5_MoeConfig"), + ("ernie4_5_vl", "Ernie4_5_VLConfig"), ("esm", "EsmConfig"), ("evolla", "EvollaConfig"), ("exaone4", "Exaone4Config"), @@ -573,6 +574,7 @@ ("ernie", "ERNIE"), ("ernie4_5", "Ernie4_5"), ("ernie4_5_moe", "Ernie4_5_MoE"), + ("ernie4_5_vl", "Ernie4_5_VL"), ("esm", "ESM"), ("evolla", "Evolla"), ("exaone4", "EXAONE-4.0"), diff --git a/src/transformers/models/auto/image_processing_auto.py b/src/transformers/models/auto/image_processing_auto.py index 2644741006dd..e03aa00509a2 100644 --- a/src/transformers/models/auto/image_processing_auto.py +++ b/src/transformers/models/auto/image_processing_auto.py @@ -100,6 +100,7 @@ ("efficientnet", ("EfficientNetImageProcessor", "EfficientNetImageProcessorFast")), ("emu3", ("Emu3ImageProcessor", None)), ("eomt", ("EomtImageProcessor", "EomtImageProcessorFast")), + ("ernie4_5_vl", ("Ernie4_5_VLImageProcessor", "Ernie4_5_VLImageProcessorFast")), ("flava", ("FlavaImageProcessor", "FlavaImageProcessorFast")), ("florence2", ("CLIPImageProcessor", "CLIPImageProcessorFast")), ("focalnet", ("BitImageProcessor", "BitImageProcessorFast")), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 22985f413341..14ea9ce8d627 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -144,6 +144,7 @@ class _BaseModelWithGenerate(PreTrainedModel, GenerationMixin): ("ernie", "ErnieModel"), ("ernie4_5", "Ernie4_5Model"), ("ernie4_5_moe", "Ernie4_5_MoeModel"), + ("ernie4_5_vl", "Ernie4_5_VLModel"), ("esm", "EsmModel"), ("evolla", "EvollaModel"), ("exaone4", "Exaone4Model"), @@ -985,6 +986,7 @@ class _BaseModelWithGenerate(PreTrainedModel, GenerationMixin): ("deepseek_vl", "DeepseekVLForConditionalGeneration"), ("deepseek_vl_hybrid", "DeepseekVLHybridForConditionalGeneration"), ("emu3", "Emu3ForConditionalGeneration"), + ("ernie4_5_vl", "Ernie4_5_VLForConditionalGeneration"), ("evolla", "EvollaForProteinText2Text"), ("florence2", "Florence2ForConditionalGeneration"), ("fuyu", "FuyuForCausalLM"), diff --git a/src/transformers/models/auto/processing_auto.py b/src/transformers/models/auto/processing_auto.py index 9e6f4e66ff4d..a790acb0b52b 100644 --- a/src/transformers/models/auto/processing_auto.py +++ b/src/transformers/models/auto/processing_auto.py @@ -68,6 +68,7 @@ ("dia", "DiaProcessor"), ("edgetam", "Sam2Processor"), ("emu3", "Emu3Processor"), + ("ernie4_5_vl", "Ernie4_5_VLProcessor"), ("evolla", "EvollaProcessor"), ("flava", "FlavaProcessor"), ("florence2", "Florence2Processor"), diff --git a/src/transformers/models/auto/tokenization_auto.py b/src/transformers/models/auto/tokenization_auto.py index 5edda2f5be8c..8bf8c68e5544 100644 --- a/src/transformers/models/auto/tokenization_auto.py +++ b/src/transformers/models/auto/tokenization_auto.py @@ -239,6 +239,7 @@ ("ernie", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), ("ernie4_5", (None, "LlamaTokenizerFast" if is_tokenizers_available() else None)), ("ernie4_5_moe", (None, "LlamaTokenizerFast" if is_tokenizers_available() else None)), + ("ernie4_5_vl", (None, "LlamaTokenizerFast" if is_tokenizers_available() else None)), ("esm", ("EsmTokenizer", None)), ("evolla", (None, "LlamaTokenizerFast" if is_tokenizers_available() else None)), ( diff --git a/src/transformers/models/auto/video_processing_auto.py b/src/transformers/models/auto/video_processing_auto.py index d130230345d1..2192c4009861 100644 --- a/src/transformers/models/auto/video_processing_auto.py +++ b/src/transformers/models/auto/video_processing_auto.py @@ -53,6 +53,7 @@ else: VIDEO_PROCESSOR_MAPPING_NAMES = OrderedDict( [ + ("ernie4_5_vl", "Ernie4_5_VLVideoProcessor"), ("glm46v", "Glm46VVideoProcessor"), ("glm4v", "Glm4vVideoProcessor"), ("instructblip", "InstructBlipVideoVideoProcessor"), @@ -322,7 +323,12 @@ def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs): trust_remote_code = kwargs.pop("trust_remote_code", None) kwargs["_from_auto"] = True - config_dict, _ = BaseVideoProcessor.get_video_processor_dict(pretrained_model_name_or_path, **kwargs) + config_dict, processed_kwargs = BaseVideoProcessor.get_video_processor_dict( + pretrained_model_name_or_path, **kwargs + ) + # Specific models need the original path for modification in `from_dict`, e.g. see `Ernie 4.5 VL` with fonts + kwargs["resolved_file_path"] = processed_kwargs.get("resolved_file_path") + video_processor_class = config_dict.get("video_processor_type", None) video_processor_auto_map = None if "AutoVideoProcessor" in config_dict.get("auto_map", {}): diff --git a/src/transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py b/src/transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py index ccd05fe26347..3bd1cd77c2e9 100644 --- a/src/transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py +++ b/src/transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py @@ -388,8 +388,8 @@ def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tens routing_weights = routing_weights / torch.clamp( routing_weights.sum(dim=-1, keepdim=True), min=self.norm_min ) - routing_weights = routing_weights.to(router_logits.dtype) - return routing_weights, selected_experts + routing_weights = routing_weights.to(hidden_states.dtype) + return routing_weights, selected_experts, router_logits class Ernie4_5_MoeSparseMoeBlock(nn.Module): @@ -412,7 +412,7 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if self.shared_experts is not None: shared_output = self.shared_experts(hidden_states) - routing_weights, selected_experts = self.gate(hidden_states) + routing_weights, selected_experts, _ = self.gate(hidden_states) final_hidden_states = self.experts(hidden_states, selected_experts, routing_weights) if self.shared_experts is not None: diff --git a/src/transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py b/src/transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py index 48c43140ba48..da00115c4c7e 100644 --- a/src/transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py +++ b/src/transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py @@ -170,8 +170,8 @@ def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tens routing_weights = routing_weights / torch.clamp( routing_weights.sum(dim=-1, keepdim=True), min=self.norm_min ) - routing_weights = routing_weights.to(router_logits.dtype) - return routing_weights, selected_experts + routing_weights = routing_weights.to(hidden_states.dtype) + return routing_weights, selected_experts, router_logits class Ernie4_5_MoeSparseMoeBlock(nn.Module): @@ -194,7 +194,7 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if self.shared_experts is not None: shared_output = self.shared_experts(hidden_states) - routing_weights, selected_experts = self.gate(hidden_states) + routing_weights, selected_experts, _ = self.gate(hidden_states) final_hidden_states = self.experts(hidden_states, selected_experts, routing_weights) if self.shared_experts is not None: diff --git a/src/transformers/models/ernie4_5_vl/__init__.py b/src/transformers/models/ernie4_5_vl/__init__.py new file mode 100644 index 000000000000..b77a97a4b65f --- /dev/null +++ b/src/transformers/models/ernie4_5_vl/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure + + +if TYPE_CHECKING: + from .configuration_ernie4_5_vl import * + from .modeling_ernie4_5_vl import * + from .processing_ernie4_5_vl import * +else: + import sys + + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/ernie4_5_vl/configuration_ernie4_5_vl.py b/src/transformers/models/ernie4_5_vl/configuration_ernie4_5_vl.py new file mode 100644 index 000000000000..b4d53b3a478c --- /dev/null +++ b/src/transformers/models/ernie4_5_vl/configuration_ernie4_5_vl.py @@ -0,0 +1,305 @@ +# coding=utf-8 +# Copyright 2025 Baidu and HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Ernie4.5-VL model configuration""" + +from ...configuration_utils import PreTrainedConfig +from ...modeling_rope_utils import rope_config_validation, standardize_rope_params + + +class Ernie4_5_VLVisionConfig(PreTrainedConfig): + r""" + This is the configuration class to store the configuration of the [`Ernie4_5_VLVisionTransformerPretrainedModel`] and the + [`Ernie4_5_VLVariableResolutionResamplerModel`]. It is used to instantiate the vision models portion of the complete + Ernie4.5-VL model according to the specified arguments, defining the model architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + depth (`int`, *optional*, defaults to 32): + Number of layers (depth) in the model. + hidden_size (`int`, *optional*, defaults to 1280): + Dimensionality of the encoder layers and the pooler layer. + hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. + intermediate_size (`int`, *optional*, defaults to 5120): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + num_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer encoder. + in_channels (`int`, *optional*, defaults to 3): + The number of input channels. + patch_size (`int`, *optional*, defaults to 14): + The size (resolution) of each patch. + spatial_merge_size (`int`, *optional*, defaults to 2): + The size used for merging spatial dimensions. + temporal_merge_size (`int`, *optional*, defaults to 2): + The size used for merge along the temporal dimension. + rms_norm_eps (`float`, *optional*, defaults to 1e-06): + The epsilon used by the rms normalization layers. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + """ + + model_type = "ernie4_5_vl_vision" + base_config_key = "vision_config" + + def __init__( + self, + depth=32, + hidden_size=1280, + hidden_act="quick_gelu", + intermediate_size=4 * 1280, + num_heads=16, + in_channels=3, + patch_size=14, + spatial_merge_size=2, + temporal_merge_size=2, + rms_norm_eps=1e-6, + initializer_range=0.02, + **kwargs, + ): + super().__init__(**kwargs) + + self.depth = depth + self.hidden_size = hidden_size + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.num_heads = num_heads + self.in_channels = in_channels + self.patch_size = patch_size + self.spatial_merge_size = spatial_merge_size + self.temporal_merge_size = temporal_merge_size + self.rms_norm_eps = rms_norm_eps + self.initializer_range = initializer_range + + +class Ernie4_5_VLTextConfig(PreTrainedConfig): + r""" + This is the configuration class to store the configuration of a [`Ernie4_5_VLTextModel`]. It is used to instantiate a + the text model portion of the complete Ernie4.5-VL model according to the specified arguments, defining the model architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + vocab_size (`int`, *optional*, defaults to 103424): + Vocabulary size of the Ernie 4.5 VL model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`Ernie4_5_VLTextModel`] + hidden_size (`int`, *optional*, defaults to 2560): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 12288): + Dimension of the MLP representations. + num_hidden_layers (`int`, *optional*, defaults to 28): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 20): + Number of attention heads for each attention layer in the Transformer encoder. + num_key_value_heads (`int`, *optional*, defaults to 4): + This is the number of key_value heads that should be used to implement Grouped Query Attention. If + `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if + `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When + converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed + by meanpooling all the original heads within that group. For more details, check out [this + paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `4`. + hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): + The non-linear activation function (function or string) in the decoder. + max_position_embeddings (`int`, *optional*, defaults to 131072): + The maximum sequence length that this model might ever be used with. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + rms_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the rms normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + use_bias (`bool`, *optional*, defaults to `False`): + Whether to use a bias in any of the projections including mlp and attention for example. + tie_word_embeddings (`bool`, *optional*, defaults to `True`): + Whether the model's input and output word embeddings should be tied. + rope_parameters (`RopeParameters`, *optional*): + Dictionary containing the configuration parameters for the RoPE embeddings. The dictionaty should contain + a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE + with longer `max_position_embeddings`. + moe_intermediate_size (`list[int]`, *optional*, defaults to `[1536, 512]`): + Intermediate size of the routed experts; differs between text (first) and image (second) experts. + moe_k (`int`, *optional*, defaults to 6): + Number of selected experts. + moe_num_experts (`int`, *optional*, defaults to 64): + Number of routed experts. + moe_num_shared_experts (`int`, *optional*, defaults to 2): + The number of experts that are shared for all MoE forwards. + moe_layer_start_index (`int`, *optional*, defaults to 1): + The first index at which MoE layers start to appear. + moe_layer_end_index (`int`, *optional*, defaults to 29): + The last possible index for a MoE layer. + moe_layer_interval (`int`, *optional*, defaults to 1): + The intervals between MoE layers to appear. + moe_norm_min (`float`, *optional*, defaults to 1e-12): + Minimum division value during routing normalization. + output_router_logits (`bool`, *optional*, defaults to `False`): + Whether or not the router logits should be returned by the model. Enabling this will also + allow the model to output the auxiliary loss, including load balancing loss and router z-loss. + router_aux_loss_coef (`float`, *optional*, defaults to 0.001): + The aux loss factor for the total loss. + """ + + model_type = "ernie4_5_vl_text" + base_config_key = "text_config" + attribute_map = {"num_experts": "moe_num_experts", "num_experts_per_tok": "moe_k"} + + def __init__( + self, + vocab_size=103424, + hidden_size=2560, + intermediate_size=12288, + num_hidden_layers=28, + num_attention_heads=20, + num_key_value_heads=4, + hidden_act="silu", + max_position_embeddings=131072, + initializer_range=0.02, + rms_norm_eps=1e-5, + use_cache=True, + use_bias=False, + tie_word_embeddings=True, + rope_parameters=None, + moe_intermediate_size=None, + moe_k=6, + moe_num_experts=64, + moe_num_shared_experts=2, + moe_layer_start_index=1, + moe_layer_end_index=29, + moe_layer_interval=1, + moe_norm_min=1e-12, + output_router_logits=False, + router_aux_loss_coef=0.001, + **kwargs, + ): + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.num_key_value_heads = num_key_value_heads + self.hidden_act = hidden_act + self.max_position_embeddings = max_position_embeddings + self.initializer_range = initializer_range + self.rms_norm_eps = rms_norm_eps + self.use_cache = use_cache + self.use_bias = use_bias + self.rope_parameters = rope_parameters + standardize_rope_params(self) + rope_config_validation(self, ignore_keys={"mrope_section"}) + self.moe_intermediate_size = moe_intermediate_size + if self.moe_intermediate_size is None: + self.moe_intermediate_size = [1536, 512] + self.moe_k = moe_k + self.moe_num_experts = moe_num_experts + self.moe_num_shared_experts = moe_num_shared_experts + self.moe_layer_start_index = moe_layer_start_index + self.moe_layer_end_index = moe_layer_end_index + self.moe_layer_interval = moe_layer_interval + self.moe_norm_min = moe_norm_min + self.output_router_logits = output_router_logits + self.router_aux_loss_coef = router_aux_loss_coef + + super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) + + +class Ernie4_5_VLConfig(PreTrainedConfig): + r""" + This is the configuration class to store the configuration of a [`Ernie4_5_VLModel`]. It is used to instantiate a + Ernie4.5-VL model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of + Ernie 4.5 VL 28B A3B [baidu/ERNIE-4.5-VL-28B-A3B-PT](https://huggingface.co/baidu/ERNIE-4.5-VL-28B-A3B-PT). + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Ernie4_5_VLTextConfig`): + The config object or dictionary of the text backbone. + vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Ernie4_5_VLVisionConfig`): + The config object or dictionary of the vision backbone. + image_start_token_id (`int`, *optional*, defaults to 101304): + The image token index to encode the start of image. + image_end_token_id (`int`, *optional*, defaults to 101305): + The image token index to encode the end of image. + image_token_id (`int`, *optional*, defaults to 100295): + The image token index to encode the image prompt. + video_start_token_id (`int`, *optional*, defaults to 101306): + The video token index to encode the start of video. + video_end_token_id (`int`, *optional*, defaults to 101307): + The video token index to encode the end of video. + video_token_id (`int`, *optional*, defaults to 100296): + The video token index to encode the video prompt. + + ```python + >>> from transformers import Ernie4_5_VLForConditionalGeneration, Ernie4_5_VLConfig + + >>> # Initializing a Ernie4_5_VL style configuration + >>> configuration = Ernie4_5_VLConfig() + + >>> # Initializing a model from the Ernie 4.5 VL 28B A3B configuration + >>> model = Ernie4_5_VLForConditionalGeneration(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "ernie4_5_vl" + sub_configs = {"vision_config": Ernie4_5_VLVisionConfig, "text_config": Ernie4_5_VLTextConfig} + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + text_config=None, + vision_config=None, + image_start_token_id=101304, + image_end_token_id=101305, + image_token_id=100295, + video_start_token_id=101306, + video_end_token_id=101307, + video_token_id=100296, + **kwargs, + ): + if isinstance(vision_config, dict): + self.vision_config = self.sub_configs["vision_config"](**vision_config) + elif isinstance(vision_config, Ernie4_5_VLVisionConfig): + self.vision_config = vision_config + elif vision_config is None: + self.vision_config = self.sub_configs["vision_config"]() + + if isinstance(text_config, dict): + self.text_config = self.sub_configs["text_config"](**text_config) + elif isinstance(vision_config, Ernie4_5_VLTextConfig): + self.text_config = text_config + elif text_config is None: + self.text_config = self.sub_configs["text_config"](**kwargs) + + self.image_start_token_id = image_start_token_id + self.image_end_token_id = image_end_token_id + self.image_token_id = image_token_id + self.video_start_token_id = video_start_token_id + self.video_end_token_id = video_end_token_id + self.video_token_id = video_token_id + + super().__init__(**kwargs) + + +__all__ = [ + "Ernie4_5_VLConfig", + "Ernie4_5_VLTextConfig", + "Ernie4_5_VLVisionConfig", +] diff --git a/src/transformers/models/ernie4_5_vl/convert_ernie4_5_vl_to_hf.py b/src/transformers/models/ernie4_5_vl/convert_ernie4_5_vl_to_hf.py new file mode 100644 index 000000000000..dd680e029ee9 --- /dev/null +++ b/src/transformers/models/ernie4_5_vl/convert_ernie4_5_vl_to_hf.py @@ -0,0 +1,478 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Converts an Ernie 4.5 VL models to Hugging Face format.""" + +import argparse +import json +import os +import re +from pathlib import Path +from shutil import copyfile + +from huggingface_hub import hf_hub_download, snapshot_download +from safetensors.torch import load_file, save_file +from tqdm import tqdm + +from transformers import ( + AutoTokenizer, + Ernie4_5_VLConfig, + Ernie4_5_VLImageProcessor, + Ernie4_5_VLProcessor, + Ernie4_5_VLVideoProcessor, + LlamaTokenizer, +) + + +TIED_MAPPING = { + "baidu/ERNIE-4.5-VL-28B-A3B-PT": True, + "baidu/ERNIE-4.5-VL-28B-A3B-Base-PT": True, + "baidu/ERNIE-4.5-VL-424B-A47B-PT": False, + "baidu/ERNIE-4.5-VL-424B-A47B-Base-PT": False, +} +SAFETENSOR_INDEX_NAME = "model.safetensors.index.json" + +CONFIG_NAME = "config.json" +VALID_VISION_CONFIG_KEYS = [ + "depth", + "hidden_size", + "hidden_act", + "num_heads", + "in_channels", + "patch_size", + "spatial_merge_size", +] +VALID_TEXT_CONFIG_KEYS = [ + "hidden_size", + "intermediate_size", + "max_position_embeddings", + "moe_intermediate_size", + "moe_k", + "moe_layer_interval", + "moe_num_shared_experts", + "num_attention_heads", + "num_hidden_layers", + "num_key_value_heads", + "rms_norm_eps", + "rope_theta", + "vocab_size", + "tie_word_embeddings", + "use_cache", + "use_bias", +] +TEXT_TO_VISION_CONFIG_KEYS = [ + "spatial_conv_size", + "temporal_conv_size", +] +ALL_VISION_CONFIG_KEYS = VALID_VISION_CONFIG_KEYS + TEXT_TO_VISION_CONFIG_KEYS + ["intermediate_size"] +ALL_TEXT_CONFIG_KEYS = VALID_TEXT_CONFIG_KEYS + [ + "hidden_act", + "moe_layer_end_index", + "moe_layer_start_index", + "moe_num_experts", + "rope_parameters", +] + +TMP_TOKENIZER_DIR = "/tmp/ernie_vl_tokenizer" +ADDED_TOKENS_FILE = "added_tokens.json" +SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json" +TOKENIZER_CONFIG_FILE = "tokenizer_config.json" +DEFAULT_CHAT_TEMPLATE = """ +{%- set image_count = namespace(value=0) -%} +{%- set video_count = namespace(value=0) -%} +{{- '<|begin_of_sentence|>' }} +{%- for message in messages -%} + {%- if message.role in ['system', 'user'] -%} + {%- if message.role == 'user' -%} + {{- 'User: ' -}} + {%- endif -%} + {%- if message.content is string -%} + {{- message.content -}} + {%- else -%} + {%- for content_item in message.content -%} + {%- if content_item.type == 'text' -%} + {{- content_item.text -}} + {%- elif content_item.type in ['image_url', 'image'] -%} + {%- set image_count.value = image_count.value + 1 -%} + Picture {{ image_count.value }}:<|IMAGE_START|><|IMAGE_PLACEHOLDER|><|IMAGE_END|> + {%- elif content_item.type in ['video_url', 'video'] -%} + {%- set video_count.value = video_count.value + 1 -%} + Video {{ video_count.value }}:<|VIDEO_START|><|VIDEO_PLACEHOLDER|><|VIDEO_END|> + {%- endif -%} + {%- endfor -%} + {%- endif -%} + {%- if message.role == 'system' -%} + {{- ' + ' -}} + {%- endif -%} + {%- elif message.role == 'assistant' -%} + {%- macro extract_text_content(content_field) -%} + {%- if content_field is string -%} + {{- content_field -}} + {%- elif content_field is iterable and content_field is not string -%} + {%- set ns = namespace(text_parts=[]) -%} + {%- set text_parts = [] -%} + {%- for item in content_field -%} + {%- if item.type == 'text' -%} + {%- set ns.text_parts = ns.text_parts + [item.text] -%} + {%- endif -%} + {%- endfor -%} + {{- ns.text_parts | join("") -}} + {%- else -%} + {{- '' -}} + {%- endif -%} + {%- endmacro -%} + {%- set reasoning_content = extract_text_content(message.reasoning_content) -%} + {%- set content = extract_text_content(message.content) -%} + {%- if '' in content %} + {%- set reasoning_content = content.split('')[0].rstrip(' + ').split('')[-1].lstrip(' + ') %} + {%- set content = content.split('')[-1].lstrip(' + ') %} + {%- endif %} + {%- if reasoning_content %} + {{- ' + ' + 'Assistant: ' + ' + ' + reasoning_content.strip(' + ') + ' + + ' + content.lstrip(' + ') }} + {%- else %} + {{- ' + ' + 'Assistant: ' + content }} + {%- endif %} + {{- '<|end_of_sentence |>' }} + {%- endif -%} +{%- endfor -%} +{%- if add_generation_prompt is not defined or add_generation_prompt is true %} + {{- '\nAssistant: ' -}} + {%- if (enable_thinking is defined and enable_thinking is false) or enable_thinking is not defined %} + {{- '\n\n\n\n' }} + {%- endif %} + {%- if enable_thinking is defined and enable_thinking is true %}{{- '' }}{%- endif %} +{%- endif %}""" +DEFAULT_TEXT_ADD_TOKENS = [ + "", + "", + "", + "", +] +FONT_REPO = "AntonV/ernie4_5_fonts" +FONT_NAME = "Roboto-Regular.ttf" + + +def load_json(save_dir, filename): + with open(os.path.join(save_dir, filename), "r") as f: + return json.load(f) + + +def write_json(json_object, save_dir, filename): + with open(os.path.join(save_dir, filename), "w") as f: + json.dump(json_object, f) + + +def convert_state_dict_to_hf(state_dict, is_tied=True): + converted_state_dict = {} + for key, tensor in state_dict.items(): + key = re.sub("^vision_model", "vision_tower", key) + key = re.sub("^model", "language_model", key) + key = re.sub("^language_model.resampler_model", "resampler_model", key) + key = "model." + key + + if "lm_head" in key and is_tied: + if is_tied: # skip tied weights + pass + else: + # avoid any prefix introduced before + converted_state_dict["lm_head"] = tensor.contiguous() + # Moe is split into their modalities (text, vision) + elif "mlp" in key: + if "moe_statics" in key: + suffix = "moe_statics.e_score_correction_bias" + converted_key = key.removesuffix(suffix) + # splitting param (2, ...) to 2 * (1, ...) + converted_state_dict[converted_key + "text_moe." + suffix] = tensor[0][None, :].contiguous() + converted_state_dict[converted_key + "vision_moe." + suffix] = tensor[1][None, :].contiguous() + elif "gate.weight" in key: + moe_type = "text_moe" + if "weight_1" in key: + moe_type = "vision_moe" + suffix = "gate.weight" + converted_key = key.removesuffix("_1") # vision + converted_key = converted_key.removesuffix("gate.weight") + # previously a `nn.Parameter` which is why we need a transpose for `nn.Linear` + converted_state_dict[converted_key + f"{moe_type}." + suffix] = tensor.T.contiguous() + elif ".experts" in key: + moe_type = "text_moe" + expert_number = int(re.findall(r"\d+", key)[-1]) + # 128 experts split into 64 each (text, vision) + if expert_number >= 64: + moe_type = "vision_moe" + expert_number -= 64 + # avoid subbing the layer idx + experts twice + prefix = re.findall(r"model.language_model.layers.\d+.mlp.experts.", key)[0] + converted_key = re.sub(r"\d+", f"{moe_type}.experts.{expert_number}", key.removeprefix(prefix)) + converted_state_dict[re.sub(".experts", "", prefix) + converted_key] = tensor.contiguous() + else: + converted_state_dict[key] = tensor.contiguous() + # Convert sequential to its own module + elif "spatial_linear" in key or "temporal_linear" in key: + sequential_number = int(re.findall(r"\d+", key)[-1]) + + if sequential_number == 0: + converted_key = re.sub(r"(?<=\.)\d+(?=\.)", "fc1", key) + elif sequential_number == 2: + converted_key = re.sub(r"(?<=\.)\d+(?=\.)", "fc2", key) + elif sequential_number == 3: + converted_key = re.sub(r"(?<=\.)\d+(?=\.)", "ln", key) + else: + converted_key = key + + converted_state_dict[converted_key] = tensor.contiguous() + else: + converted_state_dict[key] = tensor.contiguous() + return converted_state_dict + + +def convert_weights(model_path, save_dir): + print("Starting to convert model weights") + + if not os.path.isdir(save_dir): + os.makedirs(save_dir, exist_ok=True) + + # indexing base dict + index_dict = {"weight_map": {}, "metadata": {"total_size": 0}} + + is_tied = TIED_MAPPING[model_path] + checkpoint_path = snapshot_download(repo_id=model_path, allow_patterns=["*.safetensors*"]) + for filename in tqdm(sorted(os.listdir(checkpoint_path))): + # metadata doesn't change + if filename == SAFETENSOR_INDEX_NAME: + original_index = load_json(checkpoint_path, filename) + index_dict["metadata"] = original_index["metadata"] + # sharded files are converted 1 by 1 + if filename.endswith(".safetensors"): + input_file = os.path.join(checkpoint_path, filename) + output_file = os.path.join(save_dir, filename) + + state_dict = load_file(input_file) + converted_state_dict = convert_state_dict_to_hf(state_dict, is_tied=is_tied) + save_file(converted_state_dict, output_file) + + # remap namings in index + for k in converted_state_dict.keys(): + index_dict["weight_map"][k] = filename + + # save index + write_json(index_dict, save_dir, SAFETENSOR_INDEX_NAME) + + print("Converted all model weights\n") + + +def convert_vision_config_to_hf(vision_config, original_config, original_vision_config): + # convert vision related stuff + for key in VALID_VISION_CONFIG_KEYS: + vision_config[key] = original_vision_config[key] + vision_config["intermediate_size"] = original_vision_config["hidden_size"] * original_vision_config["mlp_ratio"] + + # convert originally text attributes to vision + for key in TEXT_TO_VISION_CONFIG_KEYS: + vision_config[key.replace("conv", "merge")] = original_config[key] + vision_config["rms_norm_eps"] = 1e-6 + + # delete everything else + for key in list(vision_config.keys()): + if key not in ALL_VISION_CONFIG_KEYS: + del vision_config[key] + + return vision_config + + +def convert_text_config_to_hf(text_config, original_config): + # carry directly over + for key in VALID_TEXT_CONFIG_KEYS: + text_config[key] = original_config[key] + + # special cases + text_config["hidden_act"] = "silu" # default value which is not explicit in their json + text_config["moe_layer_end_index"] = max(original_config["moe_layer_end_index"]) + text_config["moe_layer_start_index"] = min(original_config["moe_layer_start_index"]) + text_config["moe_num_experts"] = original_config["moe_num_experts"][0] # the same for both modalities + text_config["rope_parameters"] = { + "rope_type": "default", + "rope_theta": 500_000.0, + "mrope_section": [22, 22, 20], + } + + # delete everything else + for key in list(text_config.keys()): + if key not in ALL_TEXT_CONFIG_KEYS: + del text_config[key] + + return text_config + + +def convert_config(model_path, save_dir): + checkpoint_path = snapshot_download(repo_id=model_path, allow_patterns=["*config*"]) + for filename in sorted(os.listdir(checkpoint_path)): + if filename == CONFIG_NAME: + hf_config = Ernie4_5_VLConfig() + original_config = load_json(checkpoint_path, filename) + + # general config + image_token_id = original_config["im_patch_id"] + + # vision config + vision_config = hf_config.vision_config.to_dict() + original_vision_config = original_config["vision_config"] + vision_config = convert_vision_config_to_hf(vision_config, original_config, original_vision_config) + + # text config + text_config = hf_config.text_config.to_dict() + text_config = convert_text_config_to_hf(text_config, original_config) + + # total config + final_config = Ernie4_5_VLConfig( + text_config=text_config, + vision_config=vision_config, + image_token_id=image_token_id, + ) + + final_config.save_pretrained(save_dir) + break + print("Converted model config\n") + + +def convert_tokenizer(original_tokenizer_path, save_dir): + # same conversion as the moe and base ernie tokenizers + hf_tok = LlamaTokenizer.from_pretrained( + original_tokenizer_path, + pad_token="", + cls_token="<|begin_of_sentence|>", + sep_token="<|end_of_sentence|>", + mask_token="", + add_bos_token=False, + add_prefix_space=False, + chat_template=DEFAULT_CHAT_TEMPLATE, + legacy=True, + ) + hf_tok.model_max_length = 131072 + hf_tok.init_kwargs.pop("auto_map", None) + # special tokens which we need to map as additional special tokens instead + hf_tok.init_kwargs.pop("header_start_token", None) + hf_tok.init_kwargs.pop("header_end_token", None) + hf_tok.init_kwargs.pop("sys_start_token", None) + hf_tok.init_kwargs.pop("sys_end_token", None) + for token in DEFAULT_TEXT_ADD_TOKENS: + hf_tok.add_tokens([token], special_tokens=True) + # save slow model + hf_tok.save_pretrained(TMP_TOKENIZER_DIR) + + # we will exchange the special audio token as we need a dedicated video token + original_str = "<|AUDIO_PLACEHOLDER|>" + new_str = "<|VIDEO_PLACEHOLDER|>" + + # overwrite every occurrence of the special tokens with the new string + added_tokens = load_json(TMP_TOKENIZER_DIR, ADDED_TOKENS_FILE) + original_id = added_tokens.get(original_str, -1) + if original_id < 0: + raise ValueError(f"The requested string '{original_str}' is not a special token.") + + added_tokens.pop(original_str) + added_tokens[new_str] = original_id + write_json(added_tokens, TMP_TOKENIZER_DIR, ADDED_TOKENS_FILE) + + special_tokens_map = load_json(TMP_TOKENIZER_DIR, SPECIAL_TOKENS_MAP_FILE) + for i, token in enumerate(special_tokens_map["additional_special_tokens"]): + if token == original_str: + special_tokens_map["additional_special_tokens"][i] = new_str + break + write_json(special_tokens_map, TMP_TOKENIZER_DIR, SPECIAL_TOKENS_MAP_FILE) + + tokenizer_config = load_json(TMP_TOKENIZER_DIR, TOKENIZER_CONFIG_FILE) + for i, token in enumerate(tokenizer_config["additional_special_tokens"]): + if token == original_str: + tokenizer_config["additional_special_tokens"][i] = new_str + break + tokenizer_config["added_tokens_decoder"][f"{original_id}"]["content"] = new_str + write_json(tokenizer_config, TMP_TOKENIZER_DIR, TOKENIZER_CONFIG_FILE) + + # reload and save to get correct formatting + tokenizer = AutoTokenizer.from_pretrained( + TMP_TOKENIZER_DIR, + extra_special_tokens={ + "image_token": "<|IMAGE_PLACEHOLDER|>", + "image_end_token": "<|IMAGE_END|>", + "image_start_token": "<|IMAGE_START|>", + "video_token": "<|VIDEO_PLACEHOLDER|>", + "video_end_token": "<|VIDEO_END|>", + "video_start_token": "<|VIDEO_START|>", + }, + from_slow=True, + ) + tokenizer.save_pretrained(save_dir) + + +def convert_processor(model_path, save_dir): + print("Starting to convert processor") + + convert_tokenizer(model_path, save_dir) + tokenizer = AutoTokenizer.from_pretrained(save_dir) + + # font used within the video processor + copyfile(hf_hub_download(FONT_REPO, FONT_NAME), Path(save_dir, FONT_NAME)) + + processor = Ernie4_5_VLProcessor( + # Intentionally use the slow image processor as the fast processor + # creates too much fluctuation affecting the model output + # image_processor=Ernie4_5_VLImageProcessorFast(), + image_processor=Ernie4_5_VLImageProcessor(), + tokenizer=tokenizer, + video_processor=Ernie4_5_VLVideoProcessor(), + chat_template=tokenizer.chat_template, + ) + processor.save_pretrained(save_dir) + + print("Finished converting the processor\n") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # # Required parameters + parser.add_argument( + "--checkpoint_path", + type=str, + default="baidu/ERNIE-4.5-VL-28B-A3B-PT", + help="Path to the downloaded checkpoints", + ) + parser.add_argument( + "--pytorch_dump_folder_path", default="AntonV/ErnieVL", type=str, help="Path to the output PyTorch model." + ) + parser.add_argument( + "--convert_preprocessor", + type=bool, + default=True, + help="Whether or not the preprocessor (tokenizer + image/video processors) should be converted along with the model.", + ) + args = parser.parse_args() + + # convert_weights(args.checkpoint_path, args.pytorch_dump_folder_path) + convert_config(args.checkpoint_path, args.pytorch_dump_folder_path) + + # if args.convert_preprocessor: + # convert_processor(args.checkpoint_path, args.pytorch_dump_folder_path) + + print(f"Saved converted checkpoint to {args.pytorch_dump_folder_path}") diff --git a/src/transformers/models/ernie4_5_vl/image_processing_ernie4_5_vl.py b/src/transformers/models/ernie4_5_vl/image_processing_ernie4_5_vl.py new file mode 100644 index 000000000000..ee5fbe8b72bb --- /dev/null +++ b/src/transformers/models/ernie4_5_vl/image_processing_ernie4_5_vl.py @@ -0,0 +1,436 @@ +# coding=utf-8 +# Copyright 2025 Baidu and HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math +from typing import Optional, Union + +import numpy as np + +from ...image_processing_utils import BaseImageProcessor, BatchFeature +from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format +from ...image_utils import ( + OPENAI_CLIP_MEAN, + OPENAI_CLIP_STD, + ChannelDimension, + ImageInput, + PILImageResampling, + get_image_size, + infer_channel_dimension_format, + is_scaled_image, + make_flat_list_of_images, + make_list_of_images, + to_numpy_array, + valid_images, + validate_preprocess_arguments, +) +from ...processing_utils import ImagesKwargs +from ...utils import TensorType, logging + + +logger = logging.get_logger(__name__) + + +class Ernie4_5_VLImageProcessorKwargs(ImagesKwargs, total=False): + r""" + patch_size (`int`, *optional*, defaults to 14): + The spatial patch size of the vision encoder. + merge_size (`int`, *optional*, defaults to 2): + The merge size of the vision encoder to llm encoder. + """ + + patch_size: int + merge_size: int + + +def smart_resize( + height: int, width: int, factor: int = 28, min_pixels: int = 56 * 56, max_pixels: int = 6177 * 28 * 28 +): + """Rescales the image so that the following conditions are met: + + 1. Both dimensions (height and width) are divisible by 'factor'. + + 2. The total number of pixels is within the range ['min_pixels', 'max_pixels']. + + 3. The aspect ratio of the image is maintained as closely as possible. + + """ + if max(height, width) / min(height, width) > 200: + raise ValueError( + f"absolute aspect ratio must be smaller than 200, got {max(height, width) / min(height, width)}" + ) + h_bar = round(height / factor) * factor + w_bar = round(width / factor) * factor + if h_bar * w_bar > max_pixels: + beta = math.sqrt((height * width) / max_pixels) + h_bar = max(factor, math.floor(height / beta / factor) * factor) + w_bar = max(factor, math.floor(width / beta / factor) * factor) + elif h_bar * w_bar < min_pixels: + beta = math.sqrt(min_pixels / (height * width)) + h_bar = math.ceil(height * beta / factor) * factor + w_bar = math.ceil(width * beta / factor) * factor + return h_bar, w_bar + + +class Ernie4_5_VLImageProcessor(BaseImageProcessor): + r""" + Constructs a Ernie 4.5 VL image processor that dynamically resizes images based on the original images. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the image's (height, width) dimensions. + size (`dict[str, int]`, *optional*, defaults to `{"shortest_edge": 56 * 56, "longest_edge": 28 * 28 * 6177}`): + Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present. + resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): + Resampling filter to use when resizing the image. + do_rescale (`bool`, *optional*, defaults to `True`): + Whether to rescale the image by the specified scale `rescale_factor`. + rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): + Scale factor to use if rescaling the image. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image. + image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): + Mean to use if normalizing the image. This is a float or list of floats for each channel in the image. + image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): + Standard deviation to use if normalizing the image. This is a float or list of floats for each channel + in the image. + do_convert_rgb (`bool`, *optional*, defaults to `True`): + Whether to convert the image to RGB. + patch_size (`int`, *optional*, defaults to 14): + The spatial patch size of the vision encoder. + merge_size (`int`, *optional*, defaults to 2): + The merge size of the vision encoder to llm encoder. + """ + + model_input_names = ["pixel_values", "image_grid_thw"] + valid_kwargs = Ernie4_5_VLImageProcessorKwargs + + def __init__( + self, + do_resize: bool = True, + size: Optional[dict[str, int]] = None, + resample: PILImageResampling = PILImageResampling.BICUBIC, + do_rescale: bool = True, + rescale_factor: Union[int, float] = 1 / 255, + do_normalize: bool = True, + image_mean: Optional[Union[float, list[float]]] = None, + image_std: Optional[Union[float, list[float]]] = None, + do_convert_rgb: bool = True, + patch_size: int = 14, + merge_size: int = 2, + **kwargs, + ) -> None: + super().__init__(**kwargs) + if size is not None: + if "shortest_edge" not in size or "longest_edge" not in size: + raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.") + size = {"shortest_edge": size["shortest_edge"], "longest_edge": size["longest_edge"]} + else: + size = {"shortest_edge": 56 * 56, "longest_edge": 6177 * 28 * 28} + self.size = size + + self.do_resize = do_resize + self.resample = resample + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.do_normalize = do_normalize + self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN + self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD + self.patch_size = patch_size + self.merge_size = merge_size + self.do_convert_rgb = do_convert_rgb + + def _preprocess( + self, + images: ImageInput, + do_resize: Optional[bool] = None, + size: Optional[dict[str, int]] = None, + resample: PILImageResampling = None, + do_rescale: Optional[bool] = None, + rescale_factor: Optional[float] = None, + do_normalize: Optional[bool] = None, + image_mean: Optional[Union[float, list[float]]] = None, + image_std: Optional[Union[float, list[float]]] = None, + patch_size: Optional[int] = None, + merge_size: Optional[int] = None, + do_convert_rgb: Optional[bool] = None, + data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ): + """ + Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`. + + Args: + images (`ImageInput`): + Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`. + vision_info (`list[Dict]`, *optional*): + Optional list of dictionaries containing additional information about vision inputs. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`dict[str, int]`, *optional*, defaults to `self.size`): + Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present. + resample (`PILImageResampling`, *optional*, defaults to `self.resample`): + Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Scale factor to use if rescaling the image. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): + Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image. + image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): + Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image. + patch_size (`int`, *optional*, defaults to `self.patch_size`): + The spatial patch size of the vision encoder. + merge_size (`int`, *optional*, defaults to `self.merge_size`): + The merge size of the vision encoder to llm encoder. + do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): + Whether to convert the image to RGB. + data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + """ + images = self.fetch_images(images) + images = make_list_of_images(images) + + if do_convert_rgb: + images = [convert_to_rgb(image) for image in images] + + # All transformations expect numpy arrays. + images = [to_numpy_array(image) for image in images] + + if do_rescale and is_scaled_image(images[0]): + logger.warning_once( + "It looks like you are trying to rescale already rescaled images. If the input" + " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." + ) + if input_data_format is None: + # We assume that all images have the same channel dimension format. + input_data_format = infer_channel_dimension_format(images[0]) + + height, width = get_image_size(images[0], channel_dim=input_data_format) + resized_height, resized_width = height, width + processed_images = [] + for image in images: + if do_resize: + resized_height, resized_width = smart_resize( + height, + width, + factor=patch_size * merge_size, + min_pixels=size["shortest_edge"], + max_pixels=size["longest_edge"], + ) + image = resize( + image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format + ) + + if do_rescale: + image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format) + + if do_normalize: + image = self.normalize( + image=image, mean=image_mean, std=image_std, input_data_format=input_data_format + ) + + image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) + processed_images.append(image) + + patches = np.array(processed_images) + if data_format == ChannelDimension.LAST: + patches = patches.transpose([0, 3, 1, 2]) + + # Main difference to Qwen2 VL - no temporal patches + channel = patches.shape[1] + grid_t = patches.shape[0] + grid_h, grid_w = resized_height // patch_size, resized_width // patch_size + patches = patches.reshape( + [ + grid_t, + channel, + grid_h // merge_size, + merge_size, + patch_size, + grid_w // merge_size, + merge_size, + patch_size, + ] + ) + # [grid_t, grid_h/merge, grid_w/merge, merge, merge, channel, patch, patch] + patches = patches.transpose([0, 2, 5, 3, 6, 1, 4, 7]) + flatten_patches = patches.reshape(grid_t * grid_h * grid_w, channel * patch_size * patch_size) + + return flatten_patches, (grid_t, grid_h, grid_w) + + def preprocess( + self, + images: ImageInput, + do_resize: Optional[bool] = None, + size: Optional[dict[str, int]] = None, + resample: Optional[PILImageResampling] = None, + do_rescale: Optional[bool] = None, + rescale_factor: Optional[float] = None, + do_normalize: Optional[bool] = None, + image_mean: Optional[Union[float, list[float]]] = None, + image_std: Optional[Union[float, list[float]]] = None, + patch_size: Optional[int] = None, + merge_size: Optional[int] = None, + do_convert_rgb: Optional[bool] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ): + """ + Args: + images (`ImageInput`): + Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If + passing in images with pixel values between 0 and 1, set `do_rescale=False`. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`dict[str, int]`, *optional*, defaults to `self.size`): + Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with + the longest edge resized to keep the input aspect ratio. + resample (`int`, *optional*, defaults to `self.resample`): + Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only + has an effect if `do_resize` is set to `True`. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Rescale factor to rescale the image by if `do_rescale` is set to `True`. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): + Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. + image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to + `True`. + patch_size (`int`, *optional*, defaults to `self.patch_size`): + The spatial patch size of the vision encoder. + merge_size (`int`, *optional*, defaults to `self.merge_size`): + The merge size of the vision encoder to llm encoder. + do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): + Whether to convert the image to RGB. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + + """ + if size is not None: + if "shortest_edge" not in size or "longest_edge" not in size: + raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.") + else: + size = {**self.size} + + resample = resample if resample is not None else self.resample + do_resize = do_resize if do_resize is not None else self.do_resize + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + patch_size = patch_size if patch_size is not None else self.patch_size + merge_size = merge_size if merge_size is not None else self.merge_size + do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb + + if images is not None: + images = self.fetch_images(images) + images = make_flat_list_of_images(images) + + if images is not None and not valid_images(images): + raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor") + + validate_preprocess_arguments( + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + do_resize=do_resize, + size=size, + resample=resample, + ) + + data = {} + pixel_values, vision_grid_thws = [], [] + for image in images: + patches, image_grid_thw = self._preprocess( + image, + do_resize=do_resize, + size=size, + resample=resample, + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + patch_size=patch_size, + merge_size=merge_size, + data_format=data_format, + do_convert_rgb=do_convert_rgb, + input_data_format=input_data_format, + ) + pixel_values.extend(patches) + vision_grid_thws.append(image_grid_thw) + pixel_values = np.array(pixel_values) + vision_grid_thws = np.array(vision_grid_thws) + data.update({"pixel_values": pixel_values, "image_grid_thw": vision_grid_thws}) + + return BatchFeature(data=data, tensor_type=return_tensors) + + def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None): + """ + A utility that returns number of image patches for a given image size. + + Args: + height (`int`): + Height of the input image. + width (`int`): + Width of the input image. + images_kwargs (`dict`, *optional*) + Any kwargs to override defaults of the image processor. + Returns: + `int`: Number of image patches per image. + """ + min_pixels = self.size["shortest_edge"] + max_pixels = self.size["longest_edge"] + patch_size = images_kwargs.get("patch_size", self.patch_size) + merge_size = images_kwargs.get("merge_size", self.merge_size) + + factor = patch_size * merge_size + resized_height, resized_width = smart_resize( + height, width, factor, min_pixels=min_pixels, max_pixels=max_pixels + ) + grid_h, grid_w = resized_height // patch_size, resized_width // patch_size + return grid_h * grid_w + + +__all__ = ["Ernie4_5_VLImageProcessor"] diff --git a/src/transformers/models/ernie4_5_vl/image_processing_ernie4_5_vl_fast.py b/src/transformers/models/ernie4_5_vl/image_processing_ernie4_5_vl_fast.py new file mode 100644 index 000000000000..0f415bbf0b7c --- /dev/null +++ b/src/transformers/models/ernie4_5_vl/image_processing_ernie4_5_vl_fast.py @@ -0,0 +1,254 @@ +# coding=utf-8 +# Copyright 2025 Baidu and HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math +from typing import Optional, Union + +import torch +import torch.nn.functional as F + +from ...image_processing_utils import BatchFeature +from ...image_processing_utils_fast import BaseImageProcessorFast, group_images_by_shape, reorder_images +from ...image_utils import ( + OPENAI_CLIP_MEAN, + OPENAI_CLIP_STD, + ChannelDimension, + ImageInput, + PILImageResampling, + SizeDict, +) +from ...processing_utils import Unpack +from ...utils import TensorType, auto_docstring +from .image_processing_ernie4_5_vl import Ernie4_5_VLImageProcessorKwargs + + +def smart_resize( + height: int, width: int, factor: int = 28, min_pixels: int = 56 * 56, max_pixels: int = 6177 * 28 * 28 +): + """Rescales the image so that the following conditions are met: + + 1. Both dimensions (height and width) are divisible by 'factor'. + + 2. The total number of pixels is within the range ['min_pixels', 'max_pixels']. + + 3. The aspect ratio of the image is maintained as closely as possible. + + """ + if max(height, width) / min(height, width) > 200: + raise ValueError( + f"absolute aspect ratio must be smaller than 200, got {max(height, width) / min(height, width)}" + ) + h_bar = round(height / factor) * factor + w_bar = round(width / factor) * factor + if h_bar * w_bar > max_pixels: + beta = math.sqrt((height * width) / max_pixels) + h_bar = max(factor, math.floor(height / beta / factor) * factor) + w_bar = max(factor, math.floor(width / beta / factor) * factor) + elif h_bar * w_bar < min_pixels: + beta = math.sqrt(min_pixels / (height * width)) + h_bar = math.ceil(height * beta / factor) * factor + w_bar = math.ceil(width * beta / factor) * factor + return h_bar, w_bar + + +@auto_docstring +class Ernie4_5_VLImageProcessorFast(BaseImageProcessorFast): + do_resize = True + resample = PILImageResampling.BICUBIC + size = {"shortest_edge": 56 * 56, "longest_edge": 28 * 28 * 6177} + do_rescale = True + do_normalize = True + image_mean = OPENAI_CLIP_MEAN + image_std = OPENAI_CLIP_STD + do_convert_rgb = True + patch_size = 14 + merge_size = 2 + valid_kwargs = Ernie4_5_VLImageProcessorKwargs + model_input_names = ["pixel_values", "image_grid_thw"] + + def __init__(self, **kwargs: Unpack[Ernie4_5_VLImageProcessorKwargs]): + size = kwargs.pop("size", None) + size = self.size if size is None else size + if "shortest_edge" not in size or "longest_edge" not in size: + raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.") + + super().__init__(size=size, **kwargs) + + def _further_process_kwargs( + self, + size: Optional[SizeDict] = None, + **kwargs, + ) -> dict: + """ + Update kwargs that need further processing before being validated + Can be overridden by subclasses to customize the processing of kwargs. + """ + if size is not None: + if "shortest_edge" not in size or "longest_edge" not in size: + raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.") + else: + size = {**self.size} + + return super()._further_process_kwargs(size=size, **kwargs) + + @auto_docstring + def preprocess( + self, + images: ImageInput, + **kwargs: Unpack[Ernie4_5_VLImageProcessorKwargs], + ) -> BatchFeature: + return super().preprocess(images, **kwargs) + + def _preprocess_image_like_inputs( + self, + images: ImageInput, + do_convert_rgb: bool, + input_data_format: ChannelDimension, + device: Optional[Union[str, "torch.device"]] = None, + **kwargs: Unpack[Ernie4_5_VLImageProcessorKwargs], + ) -> BatchFeature: + """ + Preprocess image-like inputs. + To be overridden by subclasses when image-like inputs other than images should be processed. + It can be used for segmentation maps, depth maps, etc. + """ + # Prepare input images + batch_feature = BatchFeature() + images = self._prepare_image_like_inputs( + images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device + ) + batch_feature = self._preprocess(images, **kwargs) + return batch_feature + + def _preprocess( + self, + images: list["torch.Tensor"], + do_resize: bool, + size: SizeDict, + interpolation: Optional["F.InterpolationMode"], + do_rescale: bool, + rescale_factor: float, + do_normalize: bool, + image_mean: Optional[Union[float, list[float]]], + image_std: Optional[Union[float, list[float]]], + patch_size: int, + merge_size: int, + disable_grouping: Optional[bool], + return_tensors: Optional[Union[str, TensorType]], + **kwargs, + ): + # Group images by size for batched resizing + grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) + resized_images_grouped = {} + for shape, stacked_images in grouped_images.items(): + height, width = stacked_images.shape[-2:] + if do_resize: + resized_height, resized_width = smart_resize( + height, + width, + factor=patch_size * merge_size, + min_pixels=size["shortest_edge"], + max_pixels=size["longest_edge"], + ) + stacked_images = self.resize( + image=stacked_images, + size=SizeDict(height=resized_height, width=resized_width), + interpolation=interpolation, + ) + resized_images_grouped[shape] = stacked_images + resized_images = reorder_images(resized_images_grouped, grouped_images_index) + + # Group images by size for further processing + # Needed in case do_resize is False, or resize returns images with different sizes + grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping) + processed_images_grouped = {} + processed_grids = {} + for shape, stacked_images in grouped_images.items(): + resized_height, resized_width = stacked_images.shape[-2:] + # Fused rescale and normalize + patches = self.rescale_and_normalize( + stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std + ) + if patches.ndim == 4: + # add a temporal dimension if we have images + patches = patches.unsqueeze(1) + + # Main difference to Qwen2 VL - no temporal patches + batch_size, grid_t, channel = patches.shape[:3] + grid_h, grid_w = resized_height // patch_size, resized_width // patch_size + + patches = patches.view( + batch_size, + grid_t, + channel, + grid_h // merge_size, + merge_size, + patch_size, + grid_w // merge_size, + merge_size, + patch_size, + ) + # Reorder dimensions to group grid and patch information for subsequent flattening. + # [batch, grid_t, grid_h/merge, grid_w/merge, merge, merge, channel, patch, patch] + patches = patches.permute(0, 1, 3, 6, 4, 7, 2, 5, 8) + + flatten_patches = patches.reshape( + batch_size, + grid_t * grid_h * grid_w, + channel * patch_size * patch_size, + ) + + processed_images_grouped[shape] = flatten_patches + processed_grids[shape] = [[grid_t, grid_h, grid_w]] * batch_size + + processed_images = reorder_images(processed_images_grouped, grouped_images_index) + processed_grids = reorder_images(processed_grids, grouped_images_index) + pixel_values = torch.cat(processed_images, dim=0) + image_grid_thw = torch.tensor(processed_grids) + + return BatchFeature( + data={"pixel_values": pixel_values, "image_grid_thw": image_grid_thw}, tensor_type=return_tensors + ) + + def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None): + """ + A utility that returns number of image patches for a given image size. + + Note: Do not remove this method! It is used by vLLM to infer the number of patches and placeholders + without an image input. + + Args: + height (`int`): + Height of the input image. + width (`int`): + Width of the input image. + images_kwargs (`dict`, *optional*) + Any kwargs to override defaults of the image processor. + Returns: + `int`: Number of image patches per image. + """ + min_pixels = self.size["shortest_edge"] + max_pixels = self.size["longest_edge"] + patch_size = images_kwargs.get("patch_size", self.patch_size) + merge_size = images_kwargs.get("merge_size", self.merge_size) + + factor = patch_size * merge_size + resized_height, resized_width = smart_resize( + height, width, factor, min_pixels=min_pixels, max_pixels=max_pixels + ) + grid_h, grid_w = resized_height // patch_size, resized_width // patch_size + return grid_h * grid_w + + +__all__ = ["Ernie4_5_VLImageProcessorFast"] diff --git a/src/transformers/models/ernie4_5_vl/modeling_ernie4_5_vl.py b/src/transformers/models/ernie4_5_vl/modeling_ernie4_5_vl.py new file mode 100644 index 000000000000..01fb1f454f59 --- /dev/null +++ b/src/transformers/models/ernie4_5_vl/modeling_ernie4_5_vl.py @@ -0,0 +1,1901 @@ +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# This file was automatically generated from src/transformers/models/ernie4_5_vl/modular_ernie4_5_vl.py. +# Do NOT edit this file manually as any edits will be overwritten by the generation of +# the file from the modular. If any change should be done, please apply the change to the +# modular_ernie4_5_vl.py file directly. One of our CI enforces this. +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# coding=utf-8 +# Copyright 2025 Baidu and HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import itertools +from collections.abc import Callable +from typing import Any, Optional, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ... import initialization as init +from ...activations import ACT2FN +from ...cache_utils import Cache, DynamicCache +from ...generation import GenerationMixin +from ...integrations import use_kernel_forward_from_hub +from ...masking_utils import create_causal_mask +from ...modeling_flash_attention_utils import FlashAttentionKwargs +from ...modeling_layers import GradientCheckpointingLayer +from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast +from ...modeling_rope_utils import dynamic_rope_update +from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel +from ...processing_utils import Unpack +from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torchdynamo_compiling +from ...utils.generic import OutputRecorder, check_model_inputs +from .configuration_ernie4_5_vl import Ernie4_5_VLConfig, Ernie4_5_VLTextConfig, Ernie4_5_VLVisionConfig + + +class Ernie4_5_VLTextRotaryEmbedding(nn.Module): + inv_freq: torch.Tensor # fix linting for `register_buffer` + + def __init__(self, config, device=None): + super().__init__() + self.max_seq_len_cached = config.max_position_embeddings + self.original_max_seq_len = config.max_position_embeddings + + self.config = config + + self.rope_type = self.config.rope_parameters["rope_type"] + rope_init_fn: Callable = self.compute_default_rope_parameters + if self.rope_type != "default": + raise ValueError(f"Ernie 4.5 VL requires the `default` rope type, but found {self.rope_type} instead.") + inv_freq, self.attention_scaling = rope_init_fn(self.config, device) + + self.register_buffer("inv_freq", inv_freq, persistent=False) + self.original_inv_freq = inv_freq + + self.mrope_section = config.rope_parameters.get("mrope_section", [22, 22, 20]) + + @staticmethod + def compute_default_rope_parameters( + config: Optional[Ernie4_5_VLTextConfig] = None, + device: Optional["torch.device"] = None, + seq_len: Optional[int] = None, + ) -> tuple["torch.Tensor", float]: + """ + Computes the inverse frequencies according to the original RoPE implementation + Args: + config ([`~transformers.PreTrainedConfig`]): + The model configuration. + device (`torch.device`): + The device to use for initialization of the inverse frequencies. + seq_len (`int`, *optional*): + The current sequence length. Unused for this type of RoPE. + Returns: + Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the + post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE). + """ + base = config.rope_parameters["rope_theta"] + dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads + + attention_factor = 1.0 # Unused in this type of RoPE + + # Compute the inverse frequencies + inv_freq = 1.0 / ( + base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim) + ) + + # Special to ernie, we prerotate on the hw dim + mrope_section = config.rope_parameters.get("mrope_section", [22, 22, 20]) + hw_dim = mrope_section[0] + mrope_section[1] + t_dim = mrope_section[2] + + inv_freq_3d = torch.empty_like(inv_freq) + # (Pre-)Rotate to avoid another rotation during the forward + inv_freq_3d[:hw_dim] = torch.cat([inv_freq[:-t_dim][0::2], inv_freq[:-t_dim][1::2]]) + inv_freq_3d[-t_dim:] = inv_freq[-t_dim:] + + return inv_freq_3d, attention_factor + + @torch.no_grad() + @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) + def forward(self, x, position_ids): + inv_freq_expanded = ( + self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1).to(x.device) + ) + position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions) + + device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" + with torch.autocast(device_type=device_type, enabled=False): # Force float32 + freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3) + cos = freqs.cos() * self.attention_scaling + sin = freqs.sin() * self.attention_scaling + + sin = self.recomposition_to_3d(sin) + cos = self.recomposition_to_3d(cos) + + return cos, sin + + def recomposition_to_3d(self, freq): + freq_h, freq_w, freq_t = (m[(i + 1) % 3] for i, m in enumerate(freq.split([*self.mrope_section], dim=-1))) + freq_hw = torch.stack([freq_h, freq_w], dim=-1).flatten(-2) + freq_hwt = torch.cat([freq_hw, freq_t], dim=-1) + return freq_hwt.repeat_interleave(2, dim=-1) + + +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +def eager_attention_forward( + module: nn.Module, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attention_mask: Optional[torch.Tensor], + scaling: float, + dropout: float = 0.0, + **kwargs: Unpack[TransformersKwargs], +): + key_states = repeat_kv(key, module.num_key_value_groups) + value_states = repeat_kv(value, module.num_key_value_groups) + + attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling + if attention_mask is not None: + causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] + attn_weights = attn_weights + causal_mask + + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) + attn_output = torch.matmul(attn_weights, value_states) + attn_output = attn_output.transpose(1, 2).contiguous() + + return attn_output, attn_weights + + +def rotate_half_text(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., 0::2] + x2 = x[..., 1::2] + return torch.stack((-x2, x1), dim=-1).flatten(-2) + + +def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`, *optional*): + Deprecated and unused. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + original_dtype = q.dtype + + cos = cos.unsqueeze(unsqueeze_dim) + sin = sin.unsqueeze(unsqueeze_dim) + + q_embed = (q.float() * cos) + (rotate_half_text(q).float() * sin) + k_embed = (k.float() * cos) + (rotate_half_text(k).float() * sin) + + return q_embed.to(original_dtype), k_embed.to(original_dtype) + + +class Ernie4_5_VLTextAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: Ernie4_5_VLConfig, layer_idx: int): + super().__init__() + self.config = config + self.layer_idx = layer_idx + self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) + self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads + self.scaling = self.head_dim**-0.5 + + self.attention_dropout = 0.0 + self.is_causal = True + + self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.use_bias) + self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias) + self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias) + self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.use_bias) + + def forward( + self, + hidden_states: torch.Tensor, + position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Cache] = None, + cache_position: Optional[torch.LongTensor] = None, + **kwargs: Unpack[TransformersKwargs], + ) -> tuple[torch.Tensor, torch.Tensor]: + input_shape = hidden_states.shape[:-1] + hidden_shape = (*input_shape, -1, self.head_dim) + + query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) + key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) + value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) + + cos, sin = position_embeddings + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + if past_key_values is not None: + # sin and cos are specific to RoPE models; cache_position needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) + + attention_interface: Callable = eager_attention_forward + if self.config._attn_implementation != "eager": + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + + attn_output, attn_weights = attention_interface( + self, + query_states, + key_states, + value_states, + attention_mask, + dropout=0.0 if not self.training else self.attention_dropout, + scaling=self.scaling, + **kwargs, + ) + + attn_output = attn_output.reshape(*input_shape, -1).contiguous() + attn_output = self.o_proj(attn_output) + return attn_output, attn_weights + + +@use_kernel_forward_from_hub("RMSNorm") +class Ernie4_5_VLRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + Ernie4_5_VLRMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + + def extra_repr(self): + return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" + + +class Ernie4_5_VLMLP(nn.Module): + def __init__(self, config, intermediate_size=None): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size + + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, x): + down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + return down_proj + + +class Ernie4_5_VLMoeStatics(nn.Module): + """ + Stores MoE (Mixture of Experts) statistics + - Bias for the gating + - Additionally, usage per expert in the original codebase + """ + + def __init__(self, config): + super().__init__() + + num_experts_groups = 1 + num_experts = config.moe_num_experts + + self.e_score_correction_bias = nn.Parameter( + torch.zeros(num_experts_groups, num_experts, dtype=torch.float32), + requires_grad=False, + ) + + def forward(self, hidden_states): + # NOTE: This is a workaround to enable TP with a module that only has parameters + # + # Otherwise, it stays as `DTensor` when called in the "super" forward + # 1. All other tensors are local (`torch.Tensor`) + # 2. Isolate does not work on `nn.Module` which only has parameters + return hidden_states + self.e_score_correction_bias.squeeze() + + +class Ernie4_5_VLMoeTopKRouter(nn.Module): + def __init__(self, config): + super().__init__() + self.weight = nn.Parameter(torch.zeros(config.moe_num_experts, config.hidden_size, dtype=torch.float32)) + self.moe_statics = Ernie4_5_VLMoeStatics(config) + self.top_k = config.moe_k + self.norm_min = config.moe_norm_min + + def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: + device_type = ( + hidden_states.device.type + if isinstance(hidden_states.device.type, str) and hidden_states.device.type != "mps" + else "cpu" + ) + + with torch.autocast(device_type=device_type, enabled=False): # Force float32 + router_logits = F.linear(hidden_states.float(), self.weight) + routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) + _, selected_experts = torch.topk(self.moe_statics(routing_weights), self.top_k, dim=-1) + routing_weights = torch.gather(routing_weights, dim=-1, index=selected_experts) + routing_weights = routing_weights / torch.clamp( + routing_weights.sum(dim=-1, keepdim=True), min=self.norm_min + ) + routing_weights = routing_weights.to(hidden_states.dtype) + return routing_weights, selected_experts, router_logits + + +class Ernie4_5_VLMoeExperts(nn.Module): + def __init__(self, config, intermediate_size=None): + super().__init__() + self.num_experts = config.moe_num_experts + self.hidden_dim = config.hidden_size + self.intermediate_dim = config.moe_intermediate_size if intermediate_size is None else intermediate_size + self.use_bias = config.use_bias + self.act_fn = ACT2FN[config.hidden_act] + + self.gate_up_proj = nn.Parameter(torch.zeros(self.num_experts, 2 * self.intermediate_dim, self.hidden_dim)) + self.down_proj = nn.Parameter(torch.zeros(self.num_experts, self.hidden_dim, self.intermediate_dim)) + if self.use_bias: + self.gate_up_proj_bias = nn.Parameter(torch.zeros(self.num_experts, 2 * self.intermediate_dim)) + self.down_proj_bias = nn.Parameter(torch.zeros(self.num_experts, self.hidden_dim)) + else: + self.gate_up_proj_bias = None + self.down_proj_bias = None + + def forward( + self, hidden_states: torch.Tensor, selected_experts: torch.Tensor, routing_weights: torch.Tensor + ) -> torch.Tensor: + final_hidden_states = torch.zeros_like(hidden_states) + if selected_experts.numel() == 0: + return final_hidden_states + + expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0) + + expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero() + for expert_idx in expert_hit: + expert_idx = int(expert_idx.item()) + idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0)) + current_state = hidden_states[None, top_x].reshape(-1, hidden_states.shape[-1]) + gate_inputs = F.linear( + current_state, + self.gate_up_proj[expert_idx], + None if self.gate_up_proj_bias is None else self.gate_up_proj_bias[expert_idx], + ) + gate, up = gate_inputs.chunk(2, dim=-1) + current_hidden_states = self.act_fn(gate) * up + current_hidden_states = F.linear( + current_hidden_states, + self.down_proj[expert_idx], + None if self.down_proj_bias is None else self.down_proj_bias[expert_idx], + ) + current_hidden_states = current_hidden_states * routing_weights[top_x, idx, None] + final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype)) + return final_hidden_states + + +class Ernie4_5_VLSparseMoeBlock(nn.Module): + def __init__(self, config, intermediate_size): + super().__init__() + self.hidden_dim = config.hidden_size + self.num_experts = config.moe_num_experts + self.top_k = config.moe_k + self.gate = Ernie4_5_VLMoeTopKRouter(config) + self.experts = Ernie4_5_VLMoeExperts(config, intermediate_size) + + def forward( + self, + hidden_states: torch.Tensor, + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + hidden_states = hidden_states.view(-1, self.hidden_dim) + + routing_weights, selected_experts, router_logits = self.gate(hidden_states) + final_hidden_states = self.experts(hidden_states, selected_experts, routing_weights) + + # moe results are changed to a flattened shape to ease the modality isolated assigning of results + return final_hidden_states.flatten(), router_logits.flatten() + + +class Ernie4_5_VLMoeBlock(nn.Module): + """ + Similar to `Ernie4_5_Moe` where we have modality isolated experts: + - A set of text experts that are only run on text tokens + - A set of vision experts that are only run on vision (image/video) tokens + + This modality isolation is unique to the Ernie 4.5 VL models. + """ + + def __init__(self, config): + super().__init__() + self.num_experts = config.moe_num_experts + + self.text_moe = Ernie4_5_VLSparseMoeBlock(config, intermediate_size=config.moe_intermediate_size[0]) + self.vision_moe = Ernie4_5_VLSparseMoeBlock(config, intermediate_size=config.moe_intermediate_size[1]) + + self.shared_experts = None + if config.moe_num_shared_experts > 0: + self.shared_experts = Ernie4_5_VLMLP( + config, config.moe_intermediate_size[0] * config.moe_num_shared_experts + ) + + def forward( + self, + hidden_states: torch.Tensor, + mm_token_type_ids: Optional[torch.IntTensor] = None, + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + batch_size, sequence_length, hidden_dim = hidden_states.shape + + # (Optional) shared experts + if self.shared_experts is not None: + shared_output = self.shared_experts(hidden_states) + + if mm_token_type_ids is not None and mm_token_type_ids.any(): + final_hidden_states = torch.zeros_like(hidden_states) + router_logits = torch.zeros( + size=(batch_size * sequence_length, self.num_experts), + device=final_hidden_states.device, + dtype=torch.float, + ) + + # True (1 or 2) == vision, False (0) == text tokens + mm_token_type_ids = mm_token_type_ids.bool() + token_type_ids_router = mm_token_type_ids.reshape(-1)[:, None].expand(-1, self.num_experts) + token_type_ids_states = mm_token_type_ids[..., None].expand(-1, -1, hidden_dim) + + # Run moe on each modality and assign their results to the original token positions + final_hidden_states[~token_type_ids_states], router_logits[~token_type_ids_router] = self.text_moe( + hidden_states[~token_type_ids_states] + ) + final_hidden_states[token_type_ids_states], router_logits[token_type_ids_router] = self.vision_moe( + hidden_states[token_type_ids_states] + ) + else: + final_hidden_states, router_logits = self.text_moe(hidden_states) + final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim) + router_logits = router_logits.reshape(-1, self.num_experts) + + # Add (optional) shared experts to the result + if self.shared_experts is not None: + final_hidden_states = final_hidden_states + shared_output + + return final_hidden_states, router_logits + + +class Ernie4_5_VLDecoderLayer(GradientCheckpointingLayer): + def __init__(self, config, layer_idx): + super().__init__() + self.hidden_size = config.hidden_size + + self.self_attn = Ernie4_5_VLTextAttention(config, layer_idx) + + if ( + ((layer_idx + 1) % config.moe_layer_interval == 0) + and layer_idx >= config.moe_layer_start_index + and layer_idx <= config.moe_layer_end_index + ): + self.mlp = Ernie4_5_VLMoeBlock(config) + else: + self.mlp = Ernie4_5_VLMLP(config) + + self.input_layernorm = Ernie4_5_VLRMSNorm(config.hidden_size, config.rms_norm_eps) + self.post_attention_layernorm = Ernie4_5_VLRMSNorm(config.hidden_size, config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + position_embeddings: tuple[torch.Tensor, torch.Tensor], + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + mm_token_type_ids: Optional[torch.IntTensor] = None, + past_key_values: Optional[Cache] = None, + cache_position: Optional[torch.LongTensor] = None, + **kwargs: Unpack[FlashAttentionKwargs], + ) -> tuple[torch.Tensor, Optional[tuple[torch.Tensor, torch.Tensor]]]: + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, _ = self.self_attn( + hidden_states=hidden_states, + position_embeddings=position_embeddings, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + cache_position=cache_position, + **kwargs, + ) + hidden_states = hidden_states + residual + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + if isinstance(self.mlp, Ernie4_5_VLMoeBlock): + hidden_states, _ = self.mlp(hidden_states, mm_token_type_ids) + else: + hidden_states = self.mlp(hidden_states) + hidden_states = hidden_states + residual + + return hidden_states + + +@auto_docstring +class Ernie4_5_VLPreTrainedModel(PreTrainedModel): + config: Ernie4_5_VLConfig + base_model_prefix = "model" + input_modalities = ["image", "video", "text"] + supports_gradient_checkpointing = True + _no_split_modules = ["Ernie4_5_VLDecoderLayer", "Ernie4_5_VLVisionBlock"] + _skip_keys_device_placement = "past_key_values" + _supports_flash_attn = True + _supports_sdpa = True + _can_compile_fullgraph = False + _supports_attention_backend = True + + _can_record_outputs = { + "router_logits": OutputRecorder(Ernie4_5_VLMoeBlock, index=1), + "hidden_states": Ernie4_5_VLDecoderLayer, + "attentions": Ernie4_5_VLTextAttention, + } + _keep_in_fp32_modules_strict = ["gate.weight", "moe_statics"] + + def _init_weights(self, module): + super()._init_weights(module) + if isinstance(module, Ernie4_5_VLMoeTopKRouter): + init.zeros_(module.moe_statics.e_score_correction_bias) + init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) + elif isinstance(module, Ernie4_5_VLMoeExperts): + init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range) + init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range) + if module.gate_up_proj_bias is not None: + init.zeros_(module.gate_up_proj_bias) + init.zeros_(module.down_proj_bias) + + +@auto_docstring +class Ernie4_5_VLTextModel(Ernie4_5_VLPreTrainedModel): + config: Ernie4_5_VLTextConfig + + def __init__(self, config: Ernie4_5_VLTextConfig): + super().__init__(config) + self.vocab_size = config.vocab_size + self.embed_tokens = nn.Embedding( + self.vocab_size, + config.hidden_size, + ) + self.layers = nn.ModuleList( + [Ernie4_5_VLDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] + ) + self.norm = Ernie4_5_VLRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.rotary_emb = Ernie4_5_VLTextRotaryEmbedding(config=config) + self.gradient_checkpointing = False + + # Initialize weights and apply final processing + self.post_init() + + @check_model_inputs() + @auto_docstring + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + mm_token_type_ids: Optional[torch.IntTensor] = None, + past_key_values: Optional[Cache] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + **kwargs: Unpack[FlashAttentionKwargs], + ) -> MoeModelOutputWithPast: + r""" + mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*): + Token type ids matching image/video tokens in the inputs sequence to `True` and otherwise `False`. + """ + if (input_ids is None) ^ (inputs_embeds is not None): + raise ValueError("You must specify exactly one of input_ids or inputs_embeds") + + # torch.jit.trace() doesn't support cache objects in the output + if use_cache and past_key_values is None and not torch.jit.is_tracing(): + past_key_values = DynamicCache(config=self.config) + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if cache_position is None: + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) + + # the hard coded `3` is for temporal, height and width. + if position_ids is None: + position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1) + elif position_ids.ndim == 2: + position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1) + + # NOTE: we need to pass text position ids for packing. Ernie 4.5 VL uses 3D positions + # where each dim indicates visual spatial positions for temporal/height/width grids. + # There are is only one scenario when FA2-like packed masking might be activated. + # 1. User specifically passed packed `position_ids` and no attention mask. + # In this case we expect the useer to create correct position ids for all 3 grids + # and prepend text-only position ids to it. The final tensor will be [4, bs, seq-len] + if position_ids.ndim == 3 and position_ids.shape[0] == 4: + text_position_ids = position_ids[0] + position_ids = position_ids[1:] + else: + # If inputs are not packed (usual 3D positions), do not prepare mask from position_ids + text_position_ids = None + + attention_mask = create_causal_mask( + config=self.config, + input_embeds=inputs_embeds, + attention_mask=attention_mask, + cache_position=cache_position, + past_key_values=past_key_values, + position_ids=text_position_ids, + ) + + hidden_states = inputs_embeds + + # create position embeddings to be shared across the decoder layers + position_embeddings = self.rotary_emb(hidden_states, position_ids) + + for decoder_layer in self.layers[: self.config.num_hidden_layers]: + hidden_states = decoder_layer( + hidden_states, + position_embeddings=position_embeddings, + attention_mask=attention_mask, + position_ids=position_ids, + mm_token_type_ids=mm_token_type_ids, + past_key_values=past_key_values, + cache_position=cache_position, + **kwargs, + ) + + hidden_states = self.norm(hidden_states) + + return MoeModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=past_key_values, + ) + + +class Ernie4_5VLVisionMLP(nn.Module): + def __init__(self, dim: int, hidden_dim: int, hidden_act: str) -> None: + super().__init__() + self.fc1 = nn.Linear(dim, hidden_dim) + self.act = ACT2FN[hidden_act] + self.fc2 = nn.Linear(hidden_dim, dim) + + def forward(self, x) -> torch.Tensor: + return self.fc2(self.act(self.fc1(x))) + + +class Ernie4_5_VLPatchEmbed(nn.Module): + def __init__( + self, + patch_size: int = 14, + in_channels: int = 3, + embed_dim: int = 1152, + ) -> None: + super().__init__() + self.patch_size = patch_size + self.in_channels = in_channels + self.embed_dim = embed_dim + self.proj = nn.Linear(in_channels * patch_size * patch_size, embed_dim, bias=False) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + target_dtype = self.proj.weight.dtype + return self.proj(hidden_states.to(target_dtype)) + + +class Ernie4_5_VLVisionRotaryEmbedding(nn.Module): + inv_freq: torch.Tensor # fix linting for `register_buffer` + + def __init__(self, dim: int, theta: float = 10000.0) -> None: + super().__init__() + inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + def forward(self, seqlen: int) -> torch.Tensor: + seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype) + freqs = torch.outer(seq, self.inv_freq) + return freqs + + +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb_vision( + q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor +) -> tuple[torch.Tensor, torch.Tensor]: + orig_q_dtype = q.dtype + orig_k_dtype = k.dtype + q, k = q.float(), k.float() + cos, sin = cos.unsqueeze(-2).float(), sin.unsqueeze(-2).float() + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + q_embed = q_embed.to(orig_q_dtype) + k_embed = k_embed.to(orig_k_dtype) + return q_embed, k_embed + + +class Ernie4_5_VLVisionAttention(nn.Module): + def __init__(self, config: Ernie4_5_VLVisionConfig) -> None: + super().__init__() + self.dim = config.hidden_size + self.num_heads = config.num_heads + self.head_dim = self.dim // self.num_heads + self.num_key_value_groups = 1 # needed for eager attention + self.qkv = nn.Linear(self.dim, self.dim * 3, bias=True) + self.proj = nn.Linear(self.dim, self.dim) + self.scaling = self.head_dim**-0.5 + self.config = config + self.attention_dropout = 0.0 + self.is_causal = False + + def forward( + self, + hidden_states: torch.Tensor, + cu_seqlens: torch.Tensor, + rotary_pos_emb: Optional[torch.Tensor] = None, + position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, + **kwargs, + ) -> torch.Tensor: + seq_length = hidden_states.shape[0] + query_states, key_states, value_states = ( + self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0) + ) + cos, sin = position_embeddings + query_states, key_states = apply_rotary_pos_emb_vision(query_states, key_states, cos, sin) + + query_states = query_states.transpose(0, 1).unsqueeze(0) + key_states = key_states.transpose(0, 1).unsqueeze(0) + value_states = value_states.transpose(0, 1).unsqueeze(0) + + attention_interface: Callable = eager_attention_forward + if self.config._attn_implementation != "eager": + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + + if self.config._attn_implementation == "flash_attention_2": + # Flash Attention 2: Use cu_seqlens for variable length attention + max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max() + attn_output, _ = attention_interface( + self, + query_states, + key_states, + value_states, + attention_mask=None, + scaling=self.scaling, + dropout=0.0 if not self.training else self.attention_dropout, + cu_seq_lens_q=cu_seqlens, + cu_seq_lens_k=cu_seqlens, + max_length_q=max_seqlen, + max_length_k=max_seqlen, + is_causal=False, + **kwargs, + ) + else: + # Other implementations: Process each chunk separately + lengths = cu_seqlens[1:] - cu_seqlens[:-1] + splits = [ + torch.split(tensor, lengths.tolist(), dim=2) for tensor in (query_states, key_states, value_states) + ] + + attn_outputs = [ + attention_interface( + self, + q, + k, + v, + attention_mask=None, + scaling=self.scaling, + dropout=0.0 if not self.training else self.attention_dropout, + is_causal=False, + **kwargs, + )[0] + for q, k, v in zip(*splits) + ] + attn_output = torch.cat(attn_outputs, dim=1) + + attn_output = attn_output.reshape(seq_length, -1).contiguous() + attn_output = self.proj(attn_output) + return attn_output + + +class Ernie4_5_VLVisionBlock(GradientCheckpointingLayer): + def __init__(self, config, attn_implementation: str = "sdpa") -> None: + super().__init__() + + self.norm1 = nn.LayerNorm(config.hidden_size, config.rms_norm_eps) + self.norm2 = nn.LayerNorm(config.hidden_size, config.rms_norm_eps) + self.attn = Ernie4_5_VLVisionAttention(config=config) + self.mlp = Ernie4_5VLVisionMLP( + dim=config.hidden_size, + hidden_dim=config.intermediate_size, + hidden_act=config.hidden_act, + ) + + def forward( + self, + hidden_states: torch.Tensor, + cu_seqlens: torch.Tensor, + rotary_pos_emb: Optional[torch.Tensor] = None, + position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, + **kwargs, + ) -> torch.Tensor: + hidden_states = hidden_states + self.attn( + self.norm1(hidden_states), + cu_seqlens=cu_seqlens, + rotary_pos_emb=rotary_pos_emb, + position_embeddings=position_embeddings, + **kwargs, + ) + hidden_states = hidden_states + self.mlp(self.norm2(hidden_states)) + return hidden_states + + +class Ernie4_5_VLVisionTransformerPretrainedModel(Ernie4_5_VLPreTrainedModel): + config: Ernie4_5_VLVisionConfig + _no_split_modules = ["Ernie4_5_VLVisionBlock"] + + def __init__(self, config, *inputs, **kwargs) -> None: + super().__init__(config, *inputs, **kwargs) + self.spatial_merge_size = config.spatial_merge_size + self.patch_size = config.patch_size + self.spatial_merge_unit = self.spatial_merge_size * self.spatial_merge_size + + self.patch_embed = Ernie4_5_VLPatchEmbed( + patch_size=config.patch_size, + in_channels=config.in_channels, + embed_dim=config.hidden_size, + ) + + head_dim = config.hidden_size // config.num_heads + self.rotary_pos_emb = Ernie4_5_VLVisionRotaryEmbedding(head_dim // 2) + + self.blocks = nn.ModuleList([Ernie4_5_VLVisionBlock(config) for _ in range(config.depth)]) + self.gradient_checkpointing = False + + self.ln = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps) + + def rot_pos_emb(self, grid_thw): + pos_ids = [] + for t, h, w in grid_thw: + hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w) + hpos_ids = hpos_ids.reshape( + h // self.spatial_merge_size, + self.spatial_merge_size, + w // self.spatial_merge_size, + self.spatial_merge_size, + ) + hpos_ids = hpos_ids.permute(0, 2, 1, 3) + hpos_ids = hpos_ids.flatten() + + wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1) + wpos_ids = wpos_ids.reshape( + h // self.spatial_merge_size, + self.spatial_merge_size, + w // self.spatial_merge_size, + self.spatial_merge_size, + ) + wpos_ids = wpos_ids.permute(0, 2, 1, 3) + wpos_ids = wpos_ids.flatten() + pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1)) + pos_ids = torch.cat(pos_ids, dim=0) + max_grid_size = grid_thw[:, 1:].max() + rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size) + rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1) + return rotary_pos_emb + + def forward( + self, + hidden_states: torch.Tensor, + grid_thw: torch.Tensor, + **kwargs, + ) -> torch.Tensor: + """ + Args: + hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`): + The final hidden states of the model. + grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`): + The temporal, height and width of feature shape of each image in LLM. + + Returns: + `torch.Tensor`: hidden_states. + """ + hidden_states = self.patch_embed(hidden_states) + + seq_len, _ = hidden_states.size() + rotary_pos_emb = self.rot_pos_emb(grid_thw) + rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1) + emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1) + position_embeddings = (emb.cos(), emb.sin()) + + cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum( + dim=0, + # Select dtype based on the following factors: + # - FA2 requires that cu_seqlens_q must have dtype int32 + # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw + # See https://github.com/huggingface/transformers/pull/34852 for more information + dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, + ) + cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0) + + for block in self.blocks: + hidden_states = block( + hidden_states, + cu_seqlens=cu_seqlens, + position_embeddings=position_embeddings, + **kwargs, + ) + hidden_states = self.ln(hidden_states) + return hidden_states + + +class Ernie4_5_VLVisionMLP(nn.Module): + def __init__(self, config, in_dim, out_dim): + super().__init__() + + self.fc1 = nn.Linear(in_dim, out_dim) + self.act_fn = nn.GELU() + self.fc2 = nn.Linear(out_dim, out_dim) + self.ln = nn.LayerNorm(out_dim, eps=config.vision_config.rms_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.fc1(hidden_states) + hidden_states = self.act_fn(hidden_states) + hidden_states = self.fc2(hidden_states) + hidden_states = self.ln(hidden_states) + return hidden_states + + +class Ernie4_5_VLVariableResolutionResamplerModel(nn.Module): + def __init__(self, config: Ernie4_5_VLConfig): + super().__init__() + self.config = config + + self.in_dim = config.vision_config.hidden_size + self.out_dim = config.text_config.hidden_size + self.spatial_merge_size = config.vision_config.spatial_merge_size + self.temporal_merge_size = config.vision_config.temporal_merge_size + + # compress 2d conv(picture) to 1d + self.spatial_dim = self.in_dim * self.spatial_merge_size**2 + # compress 3d conv(video) to 1d + self.temporal_dim = self.in_dim * self.spatial_merge_size**2 * self.temporal_merge_size + + self.spatial_linear = Ernie4_5_VLVisionMLP(config, self.spatial_dim, self.spatial_dim) + self.temporal_linear = Ernie4_5_VLVisionMLP(config, self.temporal_dim, self.spatial_dim) + + self.mlp = nn.Linear(self.spatial_dim, self.out_dim) + self.after_norm = Ernie4_5_VLRMSNorm(self.out_dim, config.text_config.rms_norm_eps) + + def _temporal_slicing(self, hidden_states, grid_thw): + """ + Slices along the temporal dimension in even/odd patterns (usually if we have a video input) + or duplicates along temporal dimension (usually if we have an image input). + + Example: + Video input with temporal pattern of [1, -1, 2, -2, 3, -3] + > Even input [1, 2, 3], odd input [-1, -2, -3] + > Reorderd via slices to [1, 2, 3, -1, -2, -3] + Image input with temporal pattern [1] + > Duplicate input [1], [1] + > Reordered to [1, 1] + + NOTE: This is hard-coded for `temporal_merge_size == 2` and won't work otherwise. + """ + # Calculating offsets on spatial dim (based on flattened tensors) + grid_t, grid_hw = grid_thw[:, 0], grid_thw[:, 1:] + grid_hw_after_conv = grid_hw.prod(-1) // (self.spatial_merge_size**2) + + # Calculating offsets on batch dim (based on flattened tensors) + tokens_per_img_or_vid = (grid_thw.prod(-1) // (self.spatial_merge_size**2)).flatten() + batch_offsets = torch.empty(tokens_per_img_or_vid.size(), dtype=tokens_per_img_or_vid.dtype) + batch_offsets[0] = 0 + batch_offsets[1:] = tokens_per_img_or_vid.cumsum(dim=0)[:-1] + + first_slice_offsets = [] + second_slice_offsets = [] + for temporal_size, spatial_size, batch_offset in zip(grid_t, grid_hw_after_conv, batch_offsets): + # Depending on temporal, we may interleave: + # - Images have temporal == 1 --> same offsets (duplicate "frame" image) + # - Videos have temporal > 1 --> different offsets (even, odd) + first_offset_range = range(0, temporal_size, 2) + second_offset_range = range(1 if temporal_size > 1 else 0, temporal_size, 2) + + for temporal_offset_even, temporal_offset_odd in zip(first_offset_range, second_offset_range): + first_slice_offsets.append( + torch.arange( + batch_offset + (temporal_offset_even) * spatial_size, + batch_offset + (temporal_offset_even + 1) * spatial_size, + ) + ) + second_slice_offsets.append( + torch.arange( + batch_offset + (temporal_offset_odd) * spatial_size, + batch_offset + (temporal_offset_odd + 1) * spatial_size, + ) + ) + + # Input: [1, -1, 2, -2, 3, -3] or [1] + # Indices: [0, 2, 4] (even) or [0] (duplicate) + first_slice_offsets = torch.cat(first_slice_offsets, dim=-1).to(hidden_states.device) + # Indices: [1, 3, 5] (odd) or [0] (duplicate) + second_slice_offsets = torch.cat(second_slice_offsets, dim=-1).to(hidden_states.device) + + # Output: [1, 2, 3, -1, -2, -3] or [1, 1] + return torch.concat( + [ + torch.index_select(hidden_states, dim=0, index=first_slice_offsets), + torch.index_select(hidden_states, dim=0, index=second_slice_offsets), + ], + dim=-1, + ) + + def forward(self, hidden_states, grid_thw): + # image spatial + # reshape imitates convolution via linear projection + hidden_states = hidden_states.reshape([-1, hidden_states.shape[-1] * (self.spatial_merge_size**2)]) + hidden_states = self.spatial_linear(hidden_states) + + # video temporal + hidden_states = self._temporal_slicing(hidden_states, grid_thw) + hidden_states = self.temporal_linear(hidden_states) + + # final mlp + hidden_states = self.mlp(hidden_states) + hidden_states = self.after_norm(hidden_states) + + return hidden_states + + +@auto_docstring +class Ernie4_5_VLModel(Ernie4_5_VLPreTrainedModel): + base_model_prefix = "model" + _checkpoint_conversion_mapping = {} + # Reference: fix gemma3 grad acc #37208 + accepts_loss_kwargs = False + config: Ernie4_5_VLConfig + _no_split_modules = ["Ernie4_5_VLDecoderLayer", "Ernie4_5_VLVisionBlock"] + + def __init__(self, config: Ernie4_5_VLConfig): + super().__init__(config) + self.language_model = Ernie4_5_VLTextModel._from_config(config.text_config) + self.rope_deltas = None # cache rope_deltas here + self.vision_tower = Ernie4_5_VLVisionTransformerPretrainedModel._from_config(config.vision_config) + self.resampler_model = Ernie4_5_VLVariableResolutionResamplerModel(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.language_model.get_input_embeddings() + + def set_input_embeddings(self, value): + self.language_model.set_input_embeddings(value) + + def get_rope_index( + self, + input_ids: Optional[torch.LongTensor] = None, + image_grid_thw: Optional[torch.LongTensor] = None, + video_grid_thw: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + ) -> tuple[torch.Tensor, torch.Tensor]: + """ + Calculate the 3D rope index based on image and video's temporal, height and width in LLM. + + Explanation: + Each embedding sequence contains vision embedding and text embedding or just contains text embedding. + + For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs. + Examples: + input_ids: [T T T T T], here T is for text. + temporal position_ids: [0, 1, 2, 3, 4] + height position_ids: [0, 1, 2, 3, 4] + width position_ids: [0, 1, 2, 3, 4] + + For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part + and 1D rotary position embedding for text part. + Examples: + Temporal (Time): 3 patches, representing different segments of the video in time. + Height: 2 patches, dividing each frame vertically. + Width: 2 patches, dividing each frame horizontally. + We also have some important parameters: + fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each second. + tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens per second. So each second of the video will be represented with 25 separate time points. It essentially defines the temporal granularity. + temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames. + interval: The step size for the temporal position IDs, calculated as tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be have a difference of 50 in the temporal position IDs. + input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision. + vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100] + vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1] + vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1] + text temporal position_ids: [101, 102, 103, 104, 105] + text height position_ids: [101, 102, 103, 104, 105] + text width position_ids: [101, 102, 103, 104, 105] + Here we calculate the text start position_ids as the max vision position_ids plus 1. + + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): + The temporal, height and width of feature shape of each image in LLM. + video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): + The temporal, height and width of feature shape of each video in LLM. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + Returns: + position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`) + mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`) + """ + + image_token_id = self.config.image_token_id + video_token_id = self.config.video_token_id + video_start_token_id = self.config.video_start_token_id + video_end_token_id = self.config.video_end_token_id + temporal_merge_size = self.config.vision_config.temporal_merge_size + spatial_merge_size = self.config.vision_config.spatial_merge_size + + mrope_position_deltas = [] + if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None): + total_input_ids = input_ids + if attention_mask is None: + attention_mask = torch.ones_like(total_input_ids) + position_ids = torch.ones( + 3, + input_ids.shape[0], + input_ids.shape[1], + dtype=input_ids.dtype, + device=input_ids.device, + ) + image_index, video_index = 0, 0 + attention_mask = attention_mask.to(total_input_ids.device) + for i, input_ids in enumerate(total_input_ids): + input_ids = input_ids[attention_mask[i] == 1] + input_tokens = input_ids.tolist() + + input_token_type = [] + video_check_flg = False + for token in input_tokens: + if token == video_start_token_id: + video_check_flg = True + elif token == video_end_token_id: + video_check_flg = False + + if token == image_token_id and not video_check_flg: + input_token_type.append("image") + elif token == video_token_id and video_check_flg: + input_token_type.append("video") + else: + input_token_type.append("text") + + input_type_group = [] + for key, group in itertools.groupby(enumerate(input_token_type), lambda x: x[1]): + group = list(group) + start_index = group[0][0] + end_index = group[-1][0] + 1 + input_type_group.append((key, start_index, end_index)) + + llm_pos_ids_list = [] + for modality_type, start_idx, end_idx in input_type_group: + st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 + + if modality_type == "text": + text_len = end_idx - start_idx + llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) + + else: + grid_thw = image_grid_thw if modality_type == "image" else video_grid_thw + mm_index = image_index if modality_type == "image" else video_index + t_merge_size = 1 if modality_type == "image" else temporal_merge_size + + t, h, w = ( + grid_thw[mm_index][0], + grid_thw[mm_index][1], + grid_thw[mm_index][2], + ) + llm_grid_t, llm_grid_h, llm_grid_w = ( + t.item() // t_merge_size, + h.item() // spatial_merge_size, + w.item() // spatial_merge_size, + ) + + for t_idx in range(llm_grid_t): + t_index = torch.tensor(t_idx).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten() + h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(1, -1, llm_grid_w).flatten() + w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(1, llm_grid_h, -1).flatten() + llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + st_idx) + + if modality_type == "image": + image_index += 1 + else: + video_index += 1 + + llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1) + position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(position_ids.device) + mrope_position_deltas.append(llm_positions.max() + 1 - len(total_input_ids[i])) + mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1) + return position_ids, mrope_position_deltas + else: + if attention_mask is not None: + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device) + max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0] + mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1] + else: + position_ids = ( + torch.arange(input_ids.shape[1], device=input_ids.device) + .view(1, 1, -1) + .expand(3, input_ids.shape[0], -1) + ) + mrope_position_deltas = torch.zeros( + [input_ids.shape[0], 1], + device=input_ids.device, + dtype=input_ids.dtype, + ) + + return position_ids, mrope_position_deltas + + def get_video_features( + self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None + ): + """ + Encodes videos into continuous embeddings that can be forwarded to the language model. + + Args: + pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): + The tensors corresponding to the input videos. + video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): + The temporal, height and width of feature shape of each video in LLM. + """ + video_embeds = self.vision_tower(pixel_values_videos, video_grid_thw) + video_embeds = self.resampler_model(video_embeds, video_grid_thw) + split_sizes = ( + video_grid_thw.prod(-1) + // self.vision_tower.spatial_merge_size**2 + // self.resampler_model.temporal_merge_size + ).tolist() + video_embeds = torch.split(video_embeds, split_sizes) + return video_embeds + + def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None): + """ + Encodes images into continuous embeddings that can be forwarded to the language model. + + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): + The tensors corresponding to the input images. + image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): + The temporal, height and width of feature shape of each image in LLM. + """ + image_embeds = self.vision_tower(pixel_values, image_grid_thw) + image_embeds = self.resampler_model(image_embeds, image_grid_thw) + split_sizes = (image_grid_thw.prod(-1) // self.vision_tower.spatial_merge_size**2).tolist() + image_embeds = torch.split(image_embeds, split_sizes) + return image_embeds + + def get_placeholder_mask( + self, + input_ids: torch.LongTensor, + inputs_embeds: torch.FloatTensor, + image_features: Optional[torch.FloatTensor] = None, + video_features: Optional[torch.FloatTensor] = None, + ): + """ + Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is + equal to the length of multimodal features. If the lengths are different, an error is raised. + """ + if input_ids is None: + special_image_mask = inputs_embeds == self.get_input_embeddings()( + torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device) + ) + special_image_mask = special_image_mask.all(-1) + special_video_mask = inputs_embeds == self.get_input_embeddings()( + torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device) + ) + special_video_mask = special_video_mask.all(-1) + else: + special_image_mask = input_ids == self.config.image_token_id + special_video_mask = input_ids == self.config.video_token_id + + n_image_tokens = special_image_mask.sum() + special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) + if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel(): + raise ValueError( + f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0]}" + ) + + n_video_tokens = special_video_mask.sum() + special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) + if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel(): + raise ValueError( + f"Videos features and video tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}" + ) + + return special_image_mask, special_video_mask + + @auto_docstring + @can_return_tuple + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + mm_token_type_ids: Optional[torch.IntTensor] = None, + past_key_values: Optional[Cache] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + pixel_values: Optional[torch.Tensor] = None, + pixel_values_videos: Optional[torch.FloatTensor] = None, + image_grid_thw: Optional[torch.LongTensor] = None, + video_grid_thw: Optional[torch.LongTensor] = None, + rope_deltas: Optional[torch.LongTensor] = None, + cache_position: Optional[torch.LongTensor] = None, + **kwargs: Unpack[TransformersKwargs], + ) -> Union[tuple, MoeModelOutputWithPast]: + r""" + mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*): + Token type ids matching image/video tokens in the inputs sequence to `True` and otherwise `False`. + image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): + The temporal, height and width of feature shape of each image in LLM. + video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): + The temporal, height and width of feature shape of each video in LLM. + rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): + The rope index difference between sequence length and multimodal rope. + """ + if inputs_embeds is None: + inputs_embeds = self.get_input_embeddings()(input_ids) + + if pixel_values is not None: + image_embeds = self.get_image_features(pixel_values, image_grid_thw) + image_embeds = torch.cat(image_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype) + image_mask, _ = self.get_placeholder_mask( + input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds + ) + inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) + + if pixel_values_videos is not None: + video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw) + video_embeds = torch.cat(video_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype) + _, video_mask = self.get_placeholder_mask( + input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds + ) + inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds) + + if position_ids is None: + position_ids = self.get_position_ids( + input_ids=input_ids, + attention_mask=attention_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + image_grid_thw=image_grid_thw, + video_grid_thw=video_grid_thw, + cache_position=cache_position, + ) + + outputs = self.language_model( + input_ids=None, + position_ids=position_ids, + mm_token_type_ids=mm_token_type_ids, + attention_mask=attention_mask, + use_cache=use_cache, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + return_dict=True, + cache_position=cache_position, + **kwargs, + ) + + return MoeModelOutputWithPast( + last_hidden_state=outputs.last_hidden_state, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + router_logits=outputs.router_logits, + ) + + # TODO: Should be moved to generation loop instead in the future + # Relevant PR(s): https://github.com/huggingface/transformers/pull/42088 + def get_position_ids( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Cache] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + image_grid_thw: Optional[torch.LongTensor] = None, + video_grid_thw: Optional[torch.LongTensor] = None, + cache_position: Optional[torch.LongTensor] = None, + ): + """ + Calculating the 3D position ids with a custom mechanism / caching + - First forward calculates the initial positions and the respective + deltas (offset) for subsequent positions. See `get_rope_index` for + more details. + - Second and on (generation), uses the cache position combined with the + cached deltas to determine the current position. + + NOTE: We assume that the position ids are `None` and recalculate them here in any case. + """ + # Calculate RoPE index once per generation in the pre-fill stage only. + # When compiling, we can't check tensor values thus we check only input length + # It is safe to assume that `length!=1` means we're in pre-fill because compiled + # models currently cannot do asssisted decoding + prefill_compiled_stage = is_torchdynamo_compiling() and ( + (input_ids is not None and input_ids.shape[1] != 1) + or (inputs_embeds is not None and inputs_embeds.shape[1] != 1) + ) + prefill_noncompiled_stage = not is_torchdynamo_compiling() and ( + (cache_position is not None and cache_position[0] == 0) + or (past_key_values is None or past_key_values.get_seq_length() == 0) + ) + if (prefill_compiled_stage or prefill_noncompiled_stage) or self.rope_deltas is None: + position_ids, rope_deltas = self.get_rope_index( + input_ids, + image_grid_thw, + video_grid_thw, + attention_mask=attention_mask, + ) + self.rope_deltas = rope_deltas + # then use the prev pre-calculated rope-deltas to get the correct position ids + else: + batch_size, seq_length, device = input_ids.shape[0], 1, input_ids.device + delta = (cache_position[0] + self.rope_deltas).to(device) if cache_position is not None else 0 + position_ids = torch.arange(seq_length, device=device) + position_ids = position_ids.view(1, -1).expand(batch_size, -1) + if cache_position is not None: # otherwise `deltas` is an int `0` + delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0) + position_ids = position_ids.add(delta) + position_ids = position_ids.unsqueeze(0).expand(3, -1, -1) + + return position_ids + + +def load_balancing_loss_func( + gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None], + num_experts: Optional[int] = None, + top_k=2, + attention_mask: Optional[torch.Tensor] = None, +) -> Union[torch.Tensor, int]: + r""" + Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. + + See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss + function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between + experts is too unbalanced. + + Args: + gate_logits: + Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of + shape [batch_size X sequence_length, num_experts]. + num_experts: + Number of experts + top_k: + The number of experts to route per-token, can be also interpreted as the `top-k` routing + parameter. + attention_mask (`torch.Tensor`, *optional*): + The attention_mask used in forward function + shape [batch_size X sequence_length] if not None. + + Returns: + The auxiliary loss. + """ + if gate_logits is None or not isinstance(gate_logits, tuple): + return 0 + + if isinstance(gate_logits, tuple): + compute_device = gate_logits[0].device + concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0) + + routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1) + + _, selected_experts = torch.topk(routing_weights, top_k, dim=-1) + + expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts) + + if attention_mask is None: + # Compute the percentage of tokens routed to each experts + tokens_per_expert = torch.mean(expert_mask.float(), dim=0) + + # Compute the average probability of routing to these experts + router_prob_per_expert = torch.mean(routing_weights, dim=0) + else: + batch_size, sequence_length = attention_mask.shape + num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length) + + # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask + expert_attention_mask = ( + attention_mask[None, :, :, None, None] + .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts)) + .reshape(-1, top_k, num_experts) + .to(compute_device) + ) + + # Compute the percentage of tokens routed to each experts + tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum( + expert_attention_mask, dim=0 + ) + + # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert + router_per_expert_attention_mask = ( + attention_mask[None, :, :, None] + .expand((num_hidden_layers, batch_size, sequence_length, num_experts)) + .reshape(-1, num_experts) + .to(compute_device) + ) + + # Compute the average probability of routing to these experts + router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum( + router_per_expert_attention_mask, dim=0 + ) + + overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0)) + return overall_loss * num_experts + + +class Ernie4_5_VLForConditionalGeneration(Ernie4_5_VLPreTrainedModel, GenerationMixin): + _checkpoint_conversion_mapping = {} + _tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"} + # Reference: fix gemma3 grad acc #37208 + accepts_loss_kwargs = False + + def __init__(self, config): + super().__init__(config) + self.model = Ernie4_5_VLModel(config) + self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) + + self.router_aux_loss_coef = config.text_config.router_aux_loss_coef + self.num_experts = config.text_config.moe_num_experts + self.num_experts_per_tok = config.text_config.moe_k + + self.post_init() + + def get_input_embeddings(self): + return self.model.get_input_embeddings() + + def set_input_embeddings(self, value): + self.model.set_input_embeddings(value) + + def get_video_features( + self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None + ): + return self.model.get_video_features(pixel_values_videos, video_grid_thw) + + def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None): + return self.model.get_image_features(pixel_values, image_grid_thw) + + @auto_docstring + @can_return_tuple + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + mm_token_type_ids: Optional[torch.IntTensor] = None, + past_key_values: Optional[Cache] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_router_logits: Optional[bool] = None, + pixel_values: Optional[torch.Tensor] = None, + pixel_values_videos: Optional[torch.FloatTensor] = None, + image_grid_thw: Optional[torch.LongTensor] = None, + video_grid_thw: Optional[torch.LongTensor] = None, + rope_deltas: Optional[torch.LongTensor] = None, + cache_position: Optional[torch.LongTensor] = None, + logits_to_keep: Union[int, torch.Tensor] = 0, + **kwargs: Unpack[TransformersKwargs], + ) -> Union[tuple, MoeCausalLMOutputWithPast]: + r""" + mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*): + Token type ids matching image/video tokens in the inputs sequence to `True` and otherwise `False`. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): + The temporal, height and width of feature shape of each image in LLM. + video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): + The temporal, height and width of feature shape of each video in LLM. + rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): + The rope index difference between sequence length and multimodal rope. + """ + output_router_logits = ( + output_router_logits if output_router_logits is not None else self.config.text_config.output_router_logits + ) + + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + mm_token_type_ids=mm_token_type_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_router_logits=output_router_logits, + return_dict=True, + pixel_values=pixel_values, + pixel_values_videos=pixel_values_videos, + image_grid_thw=image_grid_thw, + video_grid_thw=video_grid_thw, + rope_deltas=rope_deltas, + cache_position=cache_position, + **kwargs, + ) + + hidden_states = outputs.last_hidden_state + # Only compute necessary logits, and do not upcast them to float if we are not computing the loss + slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep + logits = self.lm_head(hidden_states[:, slice_indices, :]) + + loss = None + if labels is not None: + loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size) + + aux_loss = None + if output_router_logits: + aux_loss = load_balancing_loss_func( + outputs.router_logits, + self.num_experts, + self.num_experts_per_tok, + attention_mask, + ) + if labels is not None: + loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device + + return MoeCausalLMOutputWithPast( + loss=loss, + aux_loss=aux_loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + router_logits=outputs.router_logits, + ) + + def prepare_inputs_for_generation( + self, + input_ids, + inputs_embeds=None, + attention_mask=None, + cache_position=None, + past_key_values=None, + image_grid_thw=None, + video_grid_thw=None, + # Intentionally ignore position ids to force custom cache logic + position_ids=None, + **kwargs, + ): + model_inputs = super().prepare_inputs_for_generation( + input_ids, + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + cache_position=cache_position, + past_key_values=past_key_values, + image_grid_thw=image_grid_thw, + video_grid_thw=video_grid_thw, + **kwargs, + ) + + # Using our own caching with rope delta + model_inputs["position_ids"] = self.model.get_position_ids( + input_ids=model_inputs.get("input_ids"), + attention_mask=model_inputs.get("attention_mask"), + past_key_values=model_inputs.get("past_key_values"), + inputs_embeds=model_inputs.get("inputs_embeds"), + image_grid_thw=model_inputs.get("image_grid_thw"), + video_grid_thw=model_inputs.get("video_grid_thw"), + cache_position=model_inputs.get("cache_position"), + ) + + if model_inputs["cache_position"][0] != 0: + model_inputs["pixel_values"] = None + model_inputs["pixel_values_videos"] = None + model_inputs["mm_token_type_ids"] = None + + return model_inputs + + def _get_image_nums_and_video_nums( + self, + input_ids: Optional[torch.LongTensor], + inputs_embeds: Optional[torch.Tensor] = None, + ) -> tuple[torch.Tensor, torch.Tensor]: + """ + Get the number of images and videos for each sample to calculate the separation length of the sample tensor. + These parameters are not passed through the processor to avoid unpredictable impacts from interface modifications. + + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Returns: + image_nums (`torch.LongTensor` of shape `(batch_size, num_images_sample)`) + video_nums (`torch.LongTensor` of shape `(batch_size, num_videos_sample)`) + """ + + if inputs_embeds is not None: + is_image = ( + inputs_embeds + == self.get_input_embeddings()( + torch.tensor(self.config.image_start_token_id, dtype=torch.long, device=inputs_embeds.device) + ) + )[..., 0] + is_video_start = ( + inputs_embeds + == self.get_input_embeddings()( + torch.tensor(self.config.video_start_token_id, dtype=torch.long, device=inputs_embeds.device) + ) + )[..., 0] + is_video_end = ( + inputs_embeds + == self.get_input_embeddings()( + torch.tensor(self.config.video_end_token_id, dtype=torch.long, device=inputs_embeds.device) + ) + )[..., 0] + else: + is_image = input_ids == self.config.image_start_token_id + is_video_start = input_ids == self.config.video_start_token_id + is_video_end = input_ids == self.config.video_end_token_id + + # Cumulative sum to track if we're inside a video span + # We'll assume well-formed video tags (i.e. matching starts and ends) + video_level = torch.cumsum(is_video_start.int() - is_video_end.int(), dim=1) + inside_video = video_level > 0 # shape (batch_size, seq_length) + + # Mask out image tokens that are inside video spans + standalone_images = is_image & (~inside_video) + + # Count per batch + image_counts = standalone_images.sum(dim=1) + video_counts = is_video_start.sum(dim=1) + + return image_counts, video_counts + + def _expand_inputs_for_generation( + self, + expand_size: int = 1, + is_encoder_decoder: bool = False, + input_ids: Optional[torch.LongTensor] = None, + **model_kwargs, + ) -> tuple[torch.LongTensor, dict[str, Any]]: + # Overwritten -- Support for expanding tensors without a batch size dimension + # e.g., pixel_values, image_grid_thw, pixel_values_videos, video_grid_thw, second_per_grid_t + # pixel_values.shape[0] is sum(seqlen_images for samples) + # image_grid_thw.shape[0] is sum(num_images for samples) + + if expand_size == 1: + return input_ids, model_kwargs + + visual_keys = ["pixel_values", "image_grid_thw", "pixel_values_videos", "video_grid_thw", "second_per_grid_ts"] + + def _expand_dict_for_generation_visual(dict_to_expand): + image_grid_thw = model_kwargs.get("image_grid_thw", None) + video_grid_thw = model_kwargs.get("video_grid_thw", None) + image_nums, video_nums = self._get_image_nums_and_video_nums( + input_ids, inputs_embeds=model_kwargs.get("inputs_embeds", None) + ) + + def _repeat_interleave_samples(x, lengths, repeat_times): + samples = torch.split(x, lengths) + repeat_args = [repeat_times] + [1] * (x.dim() - 1) + result = torch.cat([sample.repeat(*repeat_args) for sample in samples], dim=0) + return result + + for key in dict_to_expand: + if key == "pixel_values": + # split images into samples + samples = torch.split(image_grid_thw, list(image_nums)) + # compute the sequence length of images for each sample + lengths = [torch.prod(sample, dim=1).sum() for sample in samples] + dict_to_expand[key] = _repeat_interleave_samples( + dict_to_expand[key], lengths=lengths, repeat_times=expand_size + ) + elif key == "image_grid_thw": + # get the num of images for each sample + lengths = list(image_nums) + dict_to_expand[key] = _repeat_interleave_samples( + dict_to_expand[key], lengths=lengths, repeat_times=expand_size + ) + elif key == "pixel_values_videos": + samples = torch.split(video_grid_thw, list(video_nums)) + lengths = [torch.prod(sample, dim=1).sum() for sample in samples] + dict_to_expand[key] = _repeat_interleave_samples( + dict_to_expand[key], lengths=lengths, repeat_times=expand_size + ) + elif key == "video_grid_thw": + lengths = list(video_nums) + dict_to_expand[key] = _repeat_interleave_samples( + dict_to_expand[key], lengths=lengths, repeat_times=expand_size + ) + elif key == "second_per_grid_ts": + dict_to_expand[key] = _repeat_interleave_samples( + dict_to_expand[key], lengths=list(video_nums), repeat_times=expand_size + ) + return dict_to_expand + + def _expand_dict_for_generation(dict_to_expand): + for key in dict_to_expand: + if ( + key != "cache_position" + and dict_to_expand[key] is not None + and isinstance(dict_to_expand[key], torch.Tensor) + and key not in visual_keys + ): + dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0) + return dict_to_expand + + model_kwargs = _expand_dict_for_generation_visual(model_kwargs) + + if input_ids is not None: + input_ids = input_ids.repeat_interleave(expand_size, dim=0) + + model_kwargs = _expand_dict_for_generation(model_kwargs) + + if is_encoder_decoder: + if model_kwargs.get("encoder_outputs") is None: + raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.") + model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"]) + + return input_ids, model_kwargs + + @property + def visual(self): + return self.model.vision_tower + + +__all__ = [ + "Ernie4_5_VLPreTrainedModel", + "Ernie4_5_VLForConditionalGeneration", + "Ernie4_5_VLModel", + "Ernie4_5_VLTextModel", + "Ernie4_5_VLVisionTransformerPretrainedModel", + "Ernie4_5_VLVariableResolutionResamplerModel", +] diff --git a/src/transformers/models/ernie4_5_vl/modular_ernie4_5_vl.py b/src/transformers/models/ernie4_5_vl/modular_ernie4_5_vl.py new file mode 100644 index 000000000000..22bf0de504a8 --- /dev/null +++ b/src/transformers/models/ernie4_5_vl/modular_ernie4_5_vl.py @@ -0,0 +1,1209 @@ +# coding=utf-8 +# Copyright 2025 Baidu and HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch Ernie4.5-VL model.""" + +import itertools +from collections.abc import Callable +from typing import Optional, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ... import initialization as init +from ...cache_utils import Cache, DynamicCache +from ...generation import GenerationMixin +from ...masking_utils import create_causal_mask +from ...modeling_flash_attention_utils import FlashAttentionKwargs +from ...modeling_layers import GradientCheckpointingLayer +from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast +from ...modeling_rope_utils import dynamic_rope_update +from ...modeling_utils import PreTrainedModel +from ...processing_utils import Unpack +from ...utils import ( + TransformersKwargs, + auto_docstring, + can_return_tuple, + is_torchdynamo_compiling, + logging, +) +from ...utils.generic import OutputRecorder, check_model_inputs +from ..ernie4_5_moe.modeling_ernie4_5_moe import ( + Ernie4_5_MoeAttention, + Ernie4_5_MoeExperts, + Ernie4_5_MoeMLP, + Ernie4_5_MoeModel, + Ernie4_5_MoeRMSNorm, + Ernie4_5_MoeStatics, + Ernie4_5_MoeTopKRouter, +) +from ..glm4v.modeling_glm4v import Glm4vForConditionalGeneration +from ..mixtral.modeling_mixtral import load_balancing_loss_func +from ..qwen2_5_vl.modeling_qwen2_5_vl import ( + Qwen2_5_VisionPatchEmbed, + Qwen2_5_VisionRotaryEmbedding, + Qwen2_5_VisionTransformerPretrainedModel, + Qwen2_5_VLModel, + Qwen2_5_VLPreTrainedModel, + Qwen2_5_VLVisionBlock, +) +from ..qwen2_vl.modeling_qwen2_vl import VisionMlp +from .configuration_ernie4_5_vl import Ernie4_5_VLConfig, Ernie4_5_VLTextConfig + + +logger = logging.get_logger(__name__) + + +class Ernie4_5_VLTextRotaryEmbedding(nn.Module): + inv_freq: torch.Tensor # fix linting for `register_buffer` + + def __init__(self, config, device=None): + super().__init__() + self.max_seq_len_cached = config.max_position_embeddings + self.original_max_seq_len = config.max_position_embeddings + + self.config = config + + self.rope_type = self.config.rope_parameters["rope_type"] + rope_init_fn: Callable = self.compute_default_rope_parameters + if self.rope_type != "default": + raise ValueError(f"Ernie 4.5 VL requires the `default` rope type, but found {self.rope_type} instead.") + inv_freq, self.attention_scaling = rope_init_fn(self.config, device) + + self.register_buffer("inv_freq", inv_freq, persistent=False) + self.original_inv_freq = inv_freq + + self.mrope_section = config.rope_parameters.get("mrope_section", [22, 22, 20]) + + @staticmethod + def compute_default_rope_parameters( + config: Optional[Ernie4_5_VLTextConfig] = None, + device: Optional["torch.device"] = None, + seq_len: Optional[int] = None, + ) -> tuple["torch.Tensor", float]: + """ + Computes the inverse frequencies according to the original RoPE implementation + Args: + config ([`~transformers.PreTrainedConfig`]): + The model configuration. + device (`torch.device`): + The device to use for initialization of the inverse frequencies. + seq_len (`int`, *optional*): + The current sequence length. Unused for this type of RoPE. + Returns: + Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the + post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE). + """ + base = config.rope_parameters["rope_theta"] + dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads + + attention_factor = 1.0 # Unused in this type of RoPE + + # Compute the inverse frequencies + inv_freq = 1.0 / ( + base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim) + ) + + # Special to ernie, we prerotate on the hw dim + mrope_section = config.rope_parameters.get("mrope_section", [22, 22, 20]) + hw_dim = mrope_section[0] + mrope_section[1] + t_dim = mrope_section[2] + + inv_freq_3d = torch.empty_like(inv_freq) + # (Pre-)Rotate to avoid another rotation during the forward + inv_freq_3d[:hw_dim] = torch.cat([inv_freq[:-t_dim][0::2], inv_freq[:-t_dim][1::2]]) + inv_freq_3d[-t_dim:] = inv_freq[-t_dim:] + + return inv_freq_3d, attention_factor + + @torch.no_grad() + @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) + def forward(self, x, position_ids): + inv_freq_expanded = ( + self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1).to(x.device) + ) + position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions) + + device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" + with torch.autocast(device_type=device_type, enabled=False): # Force float32 + freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3) + cos = freqs.cos() * self.attention_scaling + sin = freqs.sin() * self.attention_scaling + + sin = self.recomposition_to_3d(sin) + cos = self.recomposition_to_3d(cos) + + return cos, sin + + def recomposition_to_3d(self, freq): + freq_h, freq_w, freq_t = (m[(i + 1) % 3] for i, m in enumerate(freq.split([*self.mrope_section], dim=-1))) + freq_hw = torch.stack([freq_h, freq_w], dim=-1).flatten(-2) + freq_hwt = torch.cat([freq_hw, freq_t], dim=-1) + return freq_hwt.repeat_interleave(2, dim=-1) + + +def rotate_half_text(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., 0::2] + x2 = x[..., 1::2] + return torch.stack((-x2, x1), dim=-1).flatten(-2) + + +def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`, *optional*): + Deprecated and unused. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + original_dtype = q.dtype + + cos = cos.unsqueeze(unsqueeze_dim) + sin = sin.unsqueeze(unsqueeze_dim) + + q_embed = (q.float() * cos) + (rotate_half_text(q).float() * sin) + k_embed = (k.float() * cos) + (rotate_half_text(k).float() * sin) + + return q_embed.to(original_dtype), k_embed.to(original_dtype) + + +class Ernie4_5_VLTextAttention(Ernie4_5_MoeAttention): + pass + + +class Ernie4_5_VLRMSNorm(Ernie4_5_MoeRMSNorm): + pass + + +class Ernie4_5_VLMLP(Ernie4_5_MoeMLP): + pass + + +class Ernie4_5_VLMoeStatics(Ernie4_5_MoeStatics): + pass + + +class Ernie4_5_VLMoeTopKRouter(Ernie4_5_MoeTopKRouter): + def __init__(self, config): + super().__init__(config) + self.moe_statics = Ernie4_5_VLMoeStatics(config) + + +class Ernie4_5_VLMoeExperts(Ernie4_5_MoeExperts): + def __init__(self, config, intermediate_size=None): + super().__init__(config) + self.intermediate_dim = config.moe_intermediate_size if intermediate_size is None else intermediate_size + + +class Ernie4_5_VLSparseMoeBlock(nn.Module): + def __init__(self, config, intermediate_size): + super().__init__() + self.hidden_dim = config.hidden_size + self.num_experts = config.moe_num_experts + self.top_k = config.moe_k + self.gate = Ernie4_5_VLMoeTopKRouter(config) + self.experts = Ernie4_5_VLMoeExperts(config, intermediate_size) + + def forward( + self, + hidden_states: torch.Tensor, + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + hidden_states = hidden_states.view(-1, self.hidden_dim) + + routing_weights, selected_experts, router_logits = self.gate(hidden_states) + final_hidden_states = self.experts(hidden_states, selected_experts, routing_weights) + + # moe results are changed to a flattened shape to ease the modality isolated assigning of results + return final_hidden_states.flatten(), router_logits.flatten() + + +class Ernie4_5_VLMoeBlock(nn.Module): + """ + Similar to `Ernie4_5_Moe` where we have modality isolated experts: + - A set of text experts that are only run on text tokens + - A set of vision experts that are only run on vision (image/video) tokens + + This modality isolation is unique to the Ernie 4.5 VL models. + """ + + def __init__(self, config): + super().__init__() + self.num_experts = config.moe_num_experts + + self.text_moe = Ernie4_5_VLSparseMoeBlock(config, intermediate_size=config.moe_intermediate_size[0]) + self.vision_moe = Ernie4_5_VLSparseMoeBlock(config, intermediate_size=config.moe_intermediate_size[1]) + + self.shared_experts = None + if config.moe_num_shared_experts > 0: + self.shared_experts = Ernie4_5_VLMLP( + config, config.moe_intermediate_size[0] * config.moe_num_shared_experts + ) + + def forward( + self, + hidden_states: torch.Tensor, + mm_token_type_ids: Optional[torch.IntTensor] = None, + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + batch_size, sequence_length, hidden_dim = hidden_states.shape + + # (Optional) shared experts + if self.shared_experts is not None: + shared_output = self.shared_experts(hidden_states) + + if mm_token_type_ids is not None and mm_token_type_ids.any(): + final_hidden_states = torch.zeros_like(hidden_states) + router_logits = torch.zeros( + size=(batch_size * sequence_length, self.num_experts), + device=final_hidden_states.device, + dtype=torch.float, + ) + + # True (1 or 2) == vision, False (0) == text tokens + mm_token_type_ids = mm_token_type_ids.bool() + token_type_ids_router = mm_token_type_ids.reshape(-1)[:, None].expand(-1, self.num_experts) + token_type_ids_states = mm_token_type_ids[..., None].expand(-1, -1, hidden_dim) + + # Run moe on each modality and assign their results to the original token positions + final_hidden_states[~token_type_ids_states], router_logits[~token_type_ids_router] = self.text_moe( + hidden_states[~token_type_ids_states] + ) + final_hidden_states[token_type_ids_states], router_logits[token_type_ids_router] = self.vision_moe( + hidden_states[token_type_ids_states] + ) + else: + final_hidden_states, router_logits = self.text_moe(hidden_states) + final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim) + router_logits = router_logits.reshape(-1, self.num_experts) + + # Add (optional) shared experts to the result + if self.shared_experts is not None: + final_hidden_states = final_hidden_states + shared_output + + return final_hidden_states, router_logits + + +class Ernie4_5_VLDecoderLayer(GradientCheckpointingLayer): + def __init__(self, config, layer_idx): + super().__init__() + self.hidden_size = config.hidden_size + + self.self_attn = Ernie4_5_VLTextAttention(config, layer_idx) + + if ( + ((layer_idx + 1) % config.moe_layer_interval == 0) + and layer_idx >= config.moe_layer_start_index + and layer_idx <= config.moe_layer_end_index + ): + self.mlp = Ernie4_5_VLMoeBlock(config) + else: + self.mlp = Ernie4_5_VLMLP(config) + + self.input_layernorm = Ernie4_5_VLRMSNorm(config.hidden_size, config.rms_norm_eps) + self.post_attention_layernorm = Ernie4_5_VLRMSNorm(config.hidden_size, config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + position_embeddings: tuple[torch.Tensor, torch.Tensor], + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + mm_token_type_ids: Optional[torch.IntTensor] = None, + past_key_values: Optional[Cache] = None, + cache_position: Optional[torch.LongTensor] = None, + **kwargs: Unpack[FlashAttentionKwargs], + ) -> tuple[torch.Tensor, Optional[tuple[torch.Tensor, torch.Tensor]]]: + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, _ = self.self_attn( + hidden_states=hidden_states, + position_embeddings=position_embeddings, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + cache_position=cache_position, + **kwargs, + ) + hidden_states = hidden_states + residual + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + if isinstance(self.mlp, Ernie4_5_VLMoeBlock): + hidden_states, _ = self.mlp(hidden_states, mm_token_type_ids) + else: + hidden_states = self.mlp(hidden_states) + hidden_states = hidden_states + residual + + return hidden_states + + +class Ernie4_5_VLPreTrainedModel(Qwen2_5_VLPreTrainedModel): + _can_compile_fullgraph = False + + _can_record_outputs = { + "router_logits": OutputRecorder(Ernie4_5_VLMoeBlock, index=1), + "hidden_states": Ernie4_5_VLDecoderLayer, + "attentions": Ernie4_5_VLTextAttention, + } + _keep_in_fp32_modules_strict = ["gate.weight", "moe_statics"] + + def _init_weights(self, module): + PreTrainedModel._init_weights(self, module) + if isinstance(module, Ernie4_5_VLMoeTopKRouter): + init.zeros_(module.moe_statics.e_score_correction_bias) + init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) + elif isinstance(module, Ernie4_5_VLMoeExperts): + init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range) + init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range) + if module.gate_up_proj_bias is not None: + init.zeros_(module.gate_up_proj_bias) + init.zeros_(module.down_proj_bias) + + +class Ernie4_5_VLTextModel(Ernie4_5_MoeModel): + config: Ernie4_5_VLTextConfig + + def __init__(self, config: Ernie4_5_VLTextConfig): + super().__init__(config) + + del self.padding_idx + self.embed_tokens = nn.Embedding( + self.vocab_size, + config.hidden_size, + ) + self.rotary_emb = Ernie4_5_VLTextRotaryEmbedding(config=config) + + @check_model_inputs() + @auto_docstring + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + mm_token_type_ids: Optional[torch.IntTensor] = None, + past_key_values: Optional[Cache] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + **kwargs: Unpack[FlashAttentionKwargs], + ) -> MoeModelOutputWithPast: + r""" + mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*): + Token type ids matching image/video tokens in the inputs sequence to `True` and otherwise `False`. + """ + if (input_ids is None) ^ (inputs_embeds is not None): + raise ValueError("You must specify exactly one of input_ids or inputs_embeds") + + # torch.jit.trace() doesn't support cache objects in the output + if use_cache and past_key_values is None and not torch.jit.is_tracing(): + past_key_values = DynamicCache(config=self.config) + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if cache_position is None: + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) + + # the hard coded `3` is for temporal, height and width. + if position_ids is None: + position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1) + elif position_ids.ndim == 2: + position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1) + + # NOTE: we need to pass text position ids for packing. Ernie 4.5 VL uses 3D positions + # where each dim indicates visual spatial positions for temporal/height/width grids. + # There are is only one scenario when FA2-like packed masking might be activated. + # 1. User specifically passed packed `position_ids` and no attention mask. + # In this case we expect the useer to create correct position ids for all 3 grids + # and prepend text-only position ids to it. The final tensor will be [4, bs, seq-len] + if position_ids.ndim == 3 and position_ids.shape[0] == 4: + text_position_ids = position_ids[0] + position_ids = position_ids[1:] + else: + # If inputs are not packed (usual 3D positions), do not prepare mask from position_ids + text_position_ids = None + + attention_mask = create_causal_mask( + config=self.config, + input_embeds=inputs_embeds, + attention_mask=attention_mask, + cache_position=cache_position, + past_key_values=past_key_values, + position_ids=text_position_ids, + ) + + hidden_states = inputs_embeds + + # create position embeddings to be shared across the decoder layers + position_embeddings = self.rotary_emb(hidden_states, position_ids) + + for decoder_layer in self.layers[: self.config.num_hidden_layers]: + hidden_states = decoder_layer( + hidden_states, + position_embeddings=position_embeddings, + attention_mask=attention_mask, + position_ids=position_ids, + mm_token_type_ids=mm_token_type_ids, + past_key_values=past_key_values, + cache_position=cache_position, + **kwargs, + ) + + hidden_states = self.norm(hidden_states) + + return MoeModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=past_key_values, + ) + + +class Ernie4_5VLVisionMLP(VisionMlp): + pass + + +class Ernie4_5_VLPatchEmbed(Qwen2_5_VisionPatchEmbed): + def __init__( + self, + patch_size: int = 14, + in_channels: int = 3, + embed_dim: int = 1152, + ) -> None: + super().__init__(patch_size, in_channels, embed_dim) + + del self.temporal_patch_size + del kernel_size # noqa: F821 + self.proj = nn.Linear(in_channels * patch_size * patch_size, embed_dim, bias=False) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + target_dtype = self.proj.weight.dtype + return self.proj(hidden_states.to(target_dtype)) + + +class Ernie4_5_VLVisionRotaryEmbedding(Qwen2_5_VisionRotaryEmbedding): + pass + + +class Ernie4_5_VLVisionBlock(Qwen2_5_VLVisionBlock): + def __init__(self, config, attn_implementation: str = "sdpa") -> None: + super().__init__(config, attn_implementation) + + self.norm1 = nn.LayerNorm(config.hidden_size, config.rms_norm_eps) + self.norm2 = nn.LayerNorm(config.hidden_size, config.rms_norm_eps) + self.mlp = Ernie4_5VLVisionMLP( + dim=config.hidden_size, + hidden_dim=config.intermediate_size, + hidden_act=config.hidden_act, + ) + + +class Ernie4_5_VLVisionTransformerPretrainedModel(Qwen2_5_VisionTransformerPretrainedModel): + _no_split_modules = ["Ernie4_5_VLVisionBlock"] + + def __init__(self, config, *inputs, **kwargs) -> None: + super().__init__(config, *inputs, **kwargs) + + del self.fullatt_block_indexes + del self.window_size + del self.merger + + self.patch_embed = Ernie4_5_VLPatchEmbed( + patch_size=config.patch_size, + in_channels=config.in_channels, + embed_dim=config.hidden_size, + ) + + head_dim = config.hidden_size // config.num_heads + self.rotary_pos_emb = Ernie4_5_VLVisionRotaryEmbedding(head_dim // 2) + + self.ln = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps) + + def get_window_index(self, grid_thw): + raise AttributeError("Ernie 4.5 VL does not use windowed attention!") + + def forward( + self, + hidden_states: torch.Tensor, + grid_thw: torch.Tensor, + **kwargs, + ) -> torch.Tensor: + hidden_states = self.patch_embed(hidden_states) + + seq_len, _ = hidden_states.size() + rotary_pos_emb = self.rot_pos_emb(grid_thw) + rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1) + emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1) + position_embeddings = (emb.cos(), emb.sin()) + + cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum( + dim=0, + # Select dtype based on the following factors: + # - FA2 requires that cu_seqlens_q must have dtype int32 + # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw + # See https://github.com/huggingface/transformers/pull/34852 for more information + dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, + ) + cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0) + + for block in self.blocks: + hidden_states = block( + hidden_states, + cu_seqlens=cu_seqlens, + position_embeddings=position_embeddings, + **kwargs, + ) + hidden_states = self.ln(hidden_states) + return hidden_states + + +class Ernie4_5_VLVisionMLP(nn.Module): + def __init__(self, config, in_dim, out_dim): + super().__init__() + + self.fc1 = nn.Linear(in_dim, out_dim) + self.act_fn = nn.GELU() + self.fc2 = nn.Linear(out_dim, out_dim) + self.ln = nn.LayerNorm(out_dim, eps=config.vision_config.rms_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.fc1(hidden_states) + hidden_states = self.act_fn(hidden_states) + hidden_states = self.fc2(hidden_states) + hidden_states = self.ln(hidden_states) + return hidden_states + + +class Ernie4_5_VLVariableResolutionResamplerModel(nn.Module): + def __init__(self, config: Ernie4_5_VLConfig): + super().__init__() + self.config = config + + self.in_dim = config.vision_config.hidden_size + self.out_dim = config.text_config.hidden_size + self.spatial_merge_size = config.vision_config.spatial_merge_size + self.temporal_merge_size = config.vision_config.temporal_merge_size + + # compress 2d conv(picture) to 1d + self.spatial_dim = self.in_dim * self.spatial_merge_size**2 + # compress 3d conv(video) to 1d + self.temporal_dim = self.in_dim * self.spatial_merge_size**2 * self.temporal_merge_size + + self.spatial_linear = Ernie4_5_VLVisionMLP(config, self.spatial_dim, self.spatial_dim) + self.temporal_linear = Ernie4_5_VLVisionMLP(config, self.temporal_dim, self.spatial_dim) + + self.mlp = nn.Linear(self.spatial_dim, self.out_dim) + self.after_norm = Ernie4_5_VLRMSNorm(self.out_dim, config.text_config.rms_norm_eps) + + def _temporal_slicing(self, hidden_states, grid_thw): + """ + Slices along the temporal dimension in even/odd patterns (usually if we have a video input) + or duplicates along temporal dimension (usually if we have an image input). + + Example: + Video input with temporal pattern of [1, -1, 2, -2, 3, -3] + > Even input [1, 2, 3], odd input [-1, -2, -3] + > Reorderd via slices to [1, 2, 3, -1, -2, -3] + Image input with temporal pattern [1] + > Duplicate input [1], [1] + > Reordered to [1, 1] + + NOTE: This is hard-coded for `temporal_merge_size == 2` and won't work otherwise. + """ + # Calculating offsets on spatial dim (based on flattened tensors) + grid_t, grid_hw = grid_thw[:, 0], grid_thw[:, 1:] + grid_hw_after_conv = grid_hw.prod(-1) // (self.spatial_merge_size**2) + + # Calculating offsets on batch dim (based on flattened tensors) + tokens_per_img_or_vid = (grid_thw.prod(-1) // (self.spatial_merge_size**2)).flatten() + batch_offsets = torch.empty(tokens_per_img_or_vid.size(), dtype=tokens_per_img_or_vid.dtype) + batch_offsets[0] = 0 + batch_offsets[1:] = tokens_per_img_or_vid.cumsum(dim=0)[:-1] + + first_slice_offsets = [] + second_slice_offsets = [] + for temporal_size, spatial_size, batch_offset in zip(grid_t, grid_hw_after_conv, batch_offsets): + # Depending on temporal, we may interleave: + # - Images have temporal == 1 --> same offsets (duplicate "frame" image) + # - Videos have temporal > 1 --> different offsets (even, odd) + first_offset_range = range(0, temporal_size, 2) + second_offset_range = range(1 if temporal_size > 1 else 0, temporal_size, 2) + + for temporal_offset_even, temporal_offset_odd in zip(first_offset_range, second_offset_range): + first_slice_offsets.append( + torch.arange( + batch_offset + (temporal_offset_even) * spatial_size, + batch_offset + (temporal_offset_even + 1) * spatial_size, + ) + ) + second_slice_offsets.append( + torch.arange( + batch_offset + (temporal_offset_odd) * spatial_size, + batch_offset + (temporal_offset_odd + 1) * spatial_size, + ) + ) + + # Input: [1, -1, 2, -2, 3, -3] or [1] + # Indices: [0, 2, 4] (even) or [0] (duplicate) + first_slice_offsets = torch.cat(first_slice_offsets, dim=-1).to(hidden_states.device) + # Indices: [1, 3, 5] (odd) or [0] (duplicate) + second_slice_offsets = torch.cat(second_slice_offsets, dim=-1).to(hidden_states.device) + + # Output: [1, 2, 3, -1, -2, -3] or [1, 1] + return torch.concat( + [ + torch.index_select(hidden_states, dim=0, index=first_slice_offsets), + torch.index_select(hidden_states, dim=0, index=second_slice_offsets), + ], + dim=-1, + ) + + def forward(self, hidden_states, grid_thw): + # image spatial + # reshape imitates convolution via linear projection + hidden_states = hidden_states.reshape([-1, hidden_states.shape[-1] * (self.spatial_merge_size**2)]) + hidden_states = self.spatial_linear(hidden_states) + + # video temporal + hidden_states = self._temporal_slicing(hidden_states, grid_thw) + hidden_states = self.temporal_linear(hidden_states) + + # final mlp + hidden_states = self.mlp(hidden_states) + hidden_states = self.after_norm(hidden_states) + + return hidden_states + + +class Ernie4_5_VLModel(Qwen2_5_VLModel): + _checkpoint_conversion_mapping = {} + + def __init__(self, config: Ernie4_5_VLConfig): + super().__init__(config) + + del self.visual + self.vision_tower = Ernie4_5_VLVisionTransformerPretrainedModel._from_config(config.vision_config) + self.resampler_model = Ernie4_5_VLVariableResolutionResamplerModel(config) + + # TODO: Should be moved to generation loop instead in the future + # Relevant PR(s): https://github.com/huggingface/transformers/pull/42088 + def get_position_ids( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Cache] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + image_grid_thw: Optional[torch.LongTensor] = None, + video_grid_thw: Optional[torch.LongTensor] = None, + cache_position: Optional[torch.LongTensor] = None, + ): + """ + Calculating the 3D position ids with a custom mechanism / caching + - First forward calculates the initial positions and the respective + deltas (offset) for subsequent positions. See `get_rope_index` for + more details. + - Second and on (generation), uses the cache position combined with the + cached deltas to determine the current position. + + NOTE: We assume that the position ids are `None` and recalculate them here in any case. + """ + # Calculate RoPE index once per generation in the pre-fill stage only. + # When compiling, we can't check tensor values thus we check only input length + # It is safe to assume that `length!=1` means we're in pre-fill because compiled + # models currently cannot do asssisted decoding + prefill_compiled_stage = is_torchdynamo_compiling() and ( + (input_ids is not None and input_ids.shape[1] != 1) + or (inputs_embeds is not None and inputs_embeds.shape[1] != 1) + ) + prefill_noncompiled_stage = not is_torchdynamo_compiling() and ( + (cache_position is not None and cache_position[0] == 0) + or (past_key_values is None or past_key_values.get_seq_length() == 0) + ) + if (prefill_compiled_stage or prefill_noncompiled_stage) or self.rope_deltas is None: + position_ids, rope_deltas = self.get_rope_index( + input_ids, + image_grid_thw, + video_grid_thw, + attention_mask=attention_mask, + ) + self.rope_deltas = rope_deltas + # then use the prev pre-calculated rope-deltas to get the correct position ids + else: + batch_size, seq_length, device = input_ids.shape[0], 1, input_ids.device + delta = (cache_position[0] + self.rope_deltas).to(device) if cache_position is not None else 0 + position_ids = torch.arange(seq_length, device=device) + position_ids = position_ids.view(1, -1).expand(batch_size, -1) + if cache_position is not None: # otherwise `deltas` is an int `0` + delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0) + position_ids = position_ids.add(delta) + position_ids = position_ids.unsqueeze(0).expand(3, -1, -1) + + return position_ids + + def get_rope_index( + self, + input_ids: Optional[torch.LongTensor] = None, + image_grid_thw: Optional[torch.LongTensor] = None, + video_grid_thw: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + ) -> tuple[torch.Tensor, torch.Tensor]: + """ + Calculate the 3D rope index based on image and video's temporal, height and width in LLM. + + Explanation: + Each embedding sequence contains vision embedding and text embedding or just contains text embedding. + + For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs. + Examples: + input_ids: [T T T T T], here T is for text. + temporal position_ids: [0, 1, 2, 3, 4] + height position_ids: [0, 1, 2, 3, 4] + width position_ids: [0, 1, 2, 3, 4] + + For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part + and 1D rotary position embedding for text part. + Examples: + Temporal (Time): 3 patches, representing different segments of the video in time. + Height: 2 patches, dividing each frame vertically. + Width: 2 patches, dividing each frame horizontally. + We also have some important parameters: + fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each second. + tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens per second. So each second of the video will be represented with 25 separate time points. It essentially defines the temporal granularity. + temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames. + interval: The step size for the temporal position IDs, calculated as tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be have a difference of 50 in the temporal position IDs. + input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision. + vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100] + vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1] + vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1] + text temporal position_ids: [101, 102, 103, 104, 105] + text height position_ids: [101, 102, 103, 104, 105] + text width position_ids: [101, 102, 103, 104, 105] + Here we calculate the text start position_ids as the max vision position_ids plus 1. + + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): + The temporal, height and width of feature shape of each image in LLM. + video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): + The temporal, height and width of feature shape of each video in LLM. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + Returns: + position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`) + mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`) + """ + + image_token_id = self.config.image_token_id + video_token_id = self.config.video_token_id + video_start_token_id = self.config.video_start_token_id + video_end_token_id = self.config.video_end_token_id + temporal_merge_size = self.config.vision_config.temporal_merge_size + spatial_merge_size = self.config.vision_config.spatial_merge_size + + mrope_position_deltas = [] + if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None): + total_input_ids = input_ids + if attention_mask is None: + attention_mask = torch.ones_like(total_input_ids) + position_ids = torch.ones( + 3, + input_ids.shape[0], + input_ids.shape[1], + dtype=input_ids.dtype, + device=input_ids.device, + ) + image_index, video_index = 0, 0 + attention_mask = attention_mask.to(total_input_ids.device) + for i, input_ids in enumerate(total_input_ids): + input_ids = input_ids[attention_mask[i] == 1] + input_tokens = input_ids.tolist() + + input_token_type = [] + video_check_flg = False + for token in input_tokens: + if token == video_start_token_id: + video_check_flg = True + elif token == video_end_token_id: + video_check_flg = False + + if token == image_token_id and not video_check_flg: + input_token_type.append("image") + elif token == video_token_id and video_check_flg: + input_token_type.append("video") + else: + input_token_type.append("text") + + input_type_group = [] + for key, group in itertools.groupby(enumerate(input_token_type), lambda x: x[1]): + group = list(group) + start_index = group[0][0] + end_index = group[-1][0] + 1 + input_type_group.append((key, start_index, end_index)) + + llm_pos_ids_list = [] + for modality_type, start_idx, end_idx in input_type_group: + st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 + + if modality_type == "text": + text_len = end_idx - start_idx + llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) + + else: + grid_thw = image_grid_thw if modality_type == "image" else video_grid_thw + mm_index = image_index if modality_type == "image" else video_index + t_merge_size = 1 if modality_type == "image" else temporal_merge_size + + t, h, w = ( + grid_thw[mm_index][0], + grid_thw[mm_index][1], + grid_thw[mm_index][2], + ) + llm_grid_t, llm_grid_h, llm_grid_w = ( + t.item() // t_merge_size, + h.item() // spatial_merge_size, + w.item() // spatial_merge_size, + ) + + for t_idx in range(llm_grid_t): + t_index = torch.tensor(t_idx).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten() + h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(1, -1, llm_grid_w).flatten() + w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(1, llm_grid_h, -1).flatten() + llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + st_idx) + + if modality_type == "image": + image_index += 1 + else: + video_index += 1 + + llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1) + position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(position_ids.device) + mrope_position_deltas.append(llm_positions.max() + 1 - len(total_input_ids[i])) + mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1) + return position_ids, mrope_position_deltas + else: + if attention_mask is not None: + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device) + max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0] + mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1] + else: + position_ids = ( + torch.arange(input_ids.shape[1], device=input_ids.device) + .view(1, 1, -1) + .expand(3, input_ids.shape[0], -1) + ) + mrope_position_deltas = torch.zeros( + [input_ids.shape[0], 1], + device=input_ids.device, + dtype=input_ids.dtype, + ) + + return position_ids, mrope_position_deltas + + def get_video_features( + self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None + ): + """ + Encodes videos into continuous embeddings that can be forwarded to the language model. + + Args: + pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): + The tensors corresponding to the input videos. + video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): + The temporal, height and width of feature shape of each video in LLM. + """ + video_embeds = self.vision_tower(pixel_values_videos, video_grid_thw) + video_embeds = self.resampler_model(video_embeds, video_grid_thw) + split_sizes = ( + video_grid_thw.prod(-1) + // self.vision_tower.spatial_merge_size**2 + // self.resampler_model.temporal_merge_size + ).tolist() + video_embeds = torch.split(video_embeds, split_sizes) + return video_embeds + + def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None): + """ + Encodes images into continuous embeddings that can be forwarded to the language model. + + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): + The tensors corresponding to the input images. + image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): + The temporal, height and width of feature shape of each image in LLM. + """ + image_embeds = self.vision_tower(pixel_values, image_grid_thw) + image_embeds = self.resampler_model(image_embeds, image_grid_thw) + split_sizes = (image_grid_thw.prod(-1) // self.vision_tower.spatial_merge_size**2).tolist() + image_embeds = torch.split(image_embeds, split_sizes) + return image_embeds + + @auto_docstring + @can_return_tuple + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + mm_token_type_ids: Optional[torch.IntTensor] = None, + past_key_values: Optional[Cache] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + pixel_values: Optional[torch.Tensor] = None, + pixel_values_videos: Optional[torch.FloatTensor] = None, + image_grid_thw: Optional[torch.LongTensor] = None, + video_grid_thw: Optional[torch.LongTensor] = None, + rope_deltas: Optional[torch.LongTensor] = None, + cache_position: Optional[torch.LongTensor] = None, + **kwargs: Unpack[TransformersKwargs], + ) -> Union[tuple, MoeModelOutputWithPast]: + r""" + mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*): + Token type ids matching image/video tokens in the inputs sequence to `True` and otherwise `False`. + image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): + The temporal, height and width of feature shape of each image in LLM. + video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): + The temporal, height and width of feature shape of each video in LLM. + rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): + The rope index difference between sequence length and multimodal rope. + """ + if inputs_embeds is None: + inputs_embeds = self.get_input_embeddings()(input_ids) + + if pixel_values is not None: + image_embeds = self.get_image_features(pixel_values, image_grid_thw) + image_embeds = torch.cat(image_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype) + image_mask, _ = self.get_placeholder_mask( + input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds + ) + inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) + + if pixel_values_videos is not None: + video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw) + video_embeds = torch.cat(video_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype) + _, video_mask = self.get_placeholder_mask( + input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds + ) + inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds) + + if position_ids is None: + position_ids = self.get_position_ids( + input_ids=input_ids, + attention_mask=attention_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + image_grid_thw=image_grid_thw, + video_grid_thw=video_grid_thw, + cache_position=cache_position, + ) + + outputs = self.language_model( + input_ids=None, + position_ids=position_ids, + mm_token_type_ids=mm_token_type_ids, + attention_mask=attention_mask, + use_cache=use_cache, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + return_dict=True, + cache_position=cache_position, + **kwargs, + ) + + return MoeModelOutputWithPast( + last_hidden_state=outputs.last_hidden_state, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + router_logits=outputs.router_logits, + ) + + +class Ernie4_5_VLForConditionalGeneration(Glm4vForConditionalGeneration, GenerationMixin): + def __init__(self, config): + super().__init__(config) + + self.router_aux_loss_coef = config.text_config.router_aux_loss_coef + self.num_experts = config.text_config.moe_num_experts + self.num_experts_per_tok = config.text_config.moe_k + + @property + def visual(self): + return self.model.vision_tower + + def prepare_inputs_for_generation( + self, + input_ids, + inputs_embeds=None, + attention_mask=None, + cache_position=None, + past_key_values=None, + image_grid_thw=None, + video_grid_thw=None, + # Intentionally ignore position ids to force custom cache logic + position_ids=None, + **kwargs, + ): + model_inputs = super().prepare_inputs_for_generation( + input_ids, + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + cache_position=cache_position, + past_key_values=past_key_values, + image_grid_thw=image_grid_thw, + video_grid_thw=video_grid_thw, + **kwargs, + ) + + # Using our own caching with rope delta + model_inputs["position_ids"] = self.model.get_position_ids( + input_ids=model_inputs.get("input_ids"), + attention_mask=model_inputs.get("attention_mask"), + past_key_values=model_inputs.get("past_key_values"), + inputs_embeds=model_inputs.get("inputs_embeds"), + image_grid_thw=model_inputs.get("image_grid_thw"), + video_grid_thw=model_inputs.get("video_grid_thw"), + cache_position=model_inputs.get("cache_position"), + ) + + if model_inputs["cache_position"][0] != 0: + model_inputs["pixel_values"] = None + model_inputs["pixel_values_videos"] = None + model_inputs["mm_token_type_ids"] = None + + return model_inputs + + @auto_docstring + @can_return_tuple + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + mm_token_type_ids: Optional[torch.IntTensor] = None, + past_key_values: Optional[Cache] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_router_logits: Optional[bool] = None, + pixel_values: Optional[torch.Tensor] = None, + pixel_values_videos: Optional[torch.FloatTensor] = None, + image_grid_thw: Optional[torch.LongTensor] = None, + video_grid_thw: Optional[torch.LongTensor] = None, + rope_deltas: Optional[torch.LongTensor] = None, + cache_position: Optional[torch.LongTensor] = None, + logits_to_keep: Union[int, torch.Tensor] = 0, + **kwargs: Unpack[TransformersKwargs], + ) -> Union[tuple, MoeCausalLMOutputWithPast]: + r""" + mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*): + Token type ids matching image/video tokens in the inputs sequence to `True` and otherwise `False`. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): + The temporal, height and width of feature shape of each image in LLM. + video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): + The temporal, height and width of feature shape of each video in LLM. + rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): + The rope index difference between sequence length and multimodal rope. + """ + output_router_logits = ( + output_router_logits if output_router_logits is not None else self.config.text_config.output_router_logits + ) + + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + mm_token_type_ids=mm_token_type_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_router_logits=output_router_logits, + return_dict=True, + pixel_values=pixel_values, + pixel_values_videos=pixel_values_videos, + image_grid_thw=image_grid_thw, + video_grid_thw=video_grid_thw, + rope_deltas=rope_deltas, + cache_position=cache_position, + **kwargs, + ) + + hidden_states = outputs.last_hidden_state + # Only compute necessary logits, and do not upcast them to float if we are not computing the loss + slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep + logits = self.lm_head(hidden_states[:, slice_indices, :]) + + loss = None + if labels is not None: + loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size) + + aux_loss = None + if output_router_logits: + aux_loss = load_balancing_loss_func( + outputs.router_logits, + self.num_experts, + self.num_experts_per_tok, + attention_mask, + ) + if labels is not None: + loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device + + return MoeCausalLMOutputWithPast( + loss=loss, + aux_loss=aux_loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + router_logits=outputs.router_logits, + ) + + +__all__ = [ + "Ernie4_5_VLPreTrainedModel", + "Ernie4_5_VLForConditionalGeneration", + "Ernie4_5_VLModel", + "Ernie4_5_VLTextModel", + "Ernie4_5_VLVisionTransformerPretrainedModel", + "Ernie4_5_VLVariableResolutionResamplerModel", +] diff --git a/src/transformers/models/ernie4_5_vl/processing_ernie4_5_vl.py b/src/transformers/models/ernie4_5_vl/processing_ernie4_5_vl.py new file mode 100644 index 000000000000..49bdf08da65d --- /dev/null +++ b/src/transformers/models/ernie4_5_vl/processing_ernie4_5_vl.py @@ -0,0 +1,255 @@ +# coding=utf-8 +# Copyright 2025 Baidu and HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Union + +import numpy as np + +from ...image_processing_utils import BatchFeature +from ...image_utils import ImageInput +from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack +from ...tokenization_utils_base import PreTokenizedInput, TextInput +from ...video_utils import VideoInput + + +class Ernie4_5_VLProcessorKwargs(ProcessingKwargs, total=False): + _defaults = { + "text_kwargs": { + "padding": False, + "return_token_type_ids": False, + "return_mm_token_type_ids": True, + }, + } + + +class Ernie4_5_VLProcessor(ProcessorMixin): + r""" + Constructs a Ernie 4.5 VL processor which wraps a Ernie 4.5 VL image processor and a Llama tokenizer into a single processor. + [`Ernie4_5_VLProcessor`] offers all the functionalities of [`Ernie4_5_VLImageProcessor`] and [`LlamaTokenizerFast`]. See the + [`~Ernie4_5_VLProcessor.__call__`] and [`~Ernie4_5_VLProcessor.decode`] for more information. + Args: + image_processor ([`Ernie4_5_VLImageProcessor`], *optional*): + The image processor is a required input. + tokenizer ([`LlamaTokenizerFast`], *optional*): + The tokenizer is a required input. + video_processor ([`Ernie4_5_VLVideoProcessor`], *optional*): + The video processor is a required input. + chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages + in a chat into a tokenizable string. + """ + + def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs): + self.image_token = tokenizer.image_token + self.image_end_token = tokenizer.image_end_token + self.image_start_token = tokenizer.image_start_token + self.video_token = tokenizer.video_token + self.video_end_token = tokenizer.video_end_token + self.video_start_token = tokenizer.video_start_token + + self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token) + self.image_end_token_id = tokenizer.convert_tokens_to_ids(self.image_end_token) + self.image_start_token_id = tokenizer.convert_tokens_to_ids(self.image_start_token) + self.video_token_id = tokenizer.convert_tokens_to_ids(self.video_token) + self.video_end_token_id = tokenizer.convert_tokens_to_ids(self.video_end_token) + self.video_start_token_id = tokenizer.convert_tokens_to_ids(self.video_start_token) + + super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template) + + def __call__( + self, + images: Optional[ImageInput] = None, + text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None, + videos: Optional[VideoInput] = None, + **kwargs: Unpack[Ernie4_5_VLProcessorKwargs], + ) -> BatchFeature: + """ + Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` + and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode + the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwargs` arguments to + Ernie4_5_VLImageProcessor's [`~Ernie4_5_VLImageProcessor.__call__`] if `vision_infos` is not `None`. + + Args: + images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): + The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch + tensor. Both channels-first and channels-last formats are supported. + text (`str`, `list[str]`, `list[list[str]]`): + The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings + (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set + `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). + videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`): + The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch + tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported. + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors of a particular framework. Acceptable values are: + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. + + Returns: + [`BatchFeature`]: A [`BatchFeature`] with the following fields: + + - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. + - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when + `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not + `None`). + - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. + - **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`. + - **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`. + - **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`. + - **mm_token_type_ids** -- List of token type ids differentiating between image, video and text input. + Returned when `text` is not `None`. + """ + output_kwargs = self._merge_kwargs( + Ernie4_5_VLProcessorKwargs, + tokenizer_init_kwargs=self.tokenizer.init_kwargs, + **kwargs, + ) + + image_inputs = videos_inputs = {} + if images is not None: + image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"]) + image_grid_thw = image_inputs["image_grid_thw"] + + if videos is not None: + videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"]) + video_grid_thw = videos_inputs["video_grid_thw"] + + if not isinstance(text, list): + text = [text] + + text = text.copy() # below lines change text in-place + + if images is not None: + merge_length = self.image_processor.merge_size**2 + index = 0 + for i in range(len(text)): + while self.image_token in text[i]: + num_image_tokens = image_grid_thw[index].prod() // merge_length + text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1) + index += 1 + text[i] = text[i].replace("<|placeholder|>", self.image_token) + + if videos is not None: + merge_length = self.video_processor.merge_size**2 * self.video_processor.temporal_patch_size + index = 0 + for i in range(len(text)): + while self.video_token in text[i]: + num_video_tokens = video_grid_thw[index].prod() // merge_length + text[i] = text[i].replace(self.video_token, "<|placeholder|>" * num_video_tokens, 1) + index += 1 + text[i] = text[i].replace("<|placeholder|>", self.video_token) + + return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None) + return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False) + text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"], return_tensors=None) + self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"]) + + if return_mm_token_type_ids: + array_ids = np.array(text_inputs["input_ids"]) + mm_token_type_ids = np.zeros_like(text_inputs["input_ids"]) + for token_id in [ + self.image_token_id, + self.image_start_token_id, + self.image_end_token_id, + ]: + mm_token_type_ids[array_ids == token_id] = 1 + for token_id in [ + self.video_token_id, + self.video_start_token_id, + self.video_end_token_id, + ]: + mm_token_type_ids[array_ids == token_id] = 2 + text_inputs["mm_token_type_ids"] = mm_token_type_ids.astype(int).tolist() + + return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors) + + @property + def model_input_names(self): + """Additional `mm_token_type_ids` used for modality isolated MoE""" + model_input_names = super().model_input_names + model_input_names.append("mm_token_type_ids") + return model_input_names + + def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs): + """ + Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. + Args: + image_sizes (`list[list[int]]`, *optional*): + The input sizes formatted as (height, width) per each image. + video_sizes (`list[list[int]]`, *optional*): + The input sizes formatted as (num_frames, height, width) per each video. + Returns: + `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided + input modalities, along with other useful data. + """ + + vision_data = {} + if image_sizes is not None: + images_kwargs = Ernie4_5_VLProcessorKwargs._defaults.get("images_kwargs", {}) + images_kwargs.update(kwargs) + merge_size = images_kwargs.get("merge_size", None) or self.image_processor.merge_size + + num_image_patches = [ + self.image_processor.get_number_of_image_patches(*image_size, images_kwargs) + for image_size in image_sizes + ] + num_image_tokens = [(num_patches // merge_size**2) for num_patches in num_image_patches] + vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches}) + + if video_sizes is not None: + videos_kwargs = Ernie4_5_VLProcessorKwargs._defaults.get("videos_kwargs", {}) + videos_kwargs.update(kwargs) + temporal_merge_size = ( + videos_kwargs.get("temporal_patch_size", None) or self.video_processor.temporal_patch_size + ) + + num_video_patches = [ + self.video_processor.get_number_of_video_patches(*video_size, videos_kwargs) + for video_size in video_sizes + ] + num_video_tokens = [ + (num_patches // merge_size**2 // temporal_merge_size) for num_patches in num_video_patches + ] + vision_data["num_video_tokens"] = num_video_tokens + + return MultiModalData(**vision_data) + + def post_process_image_text_to_text( + self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs + ): + """ + Post-process the output of the model to decode the text. + + Args: + generated_outputs (`torch.Tensor` or `np.ndarray`): + The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)` + or `(sequence_length,)`. + skip_special_tokens (`bool`, *optional*, defaults to `True`): + Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method. + clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): + Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method. + **kwargs: + Additional arguments to be passed to the tokenizer's `batch_decode method`. + + Returns: + `list[str]`: The decoded text. + """ + return self.tokenizer.batch_decode( + generated_outputs, + skip_special_tokens=skip_special_tokens, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + **kwargs, + ) + + +__all__ = ["Ernie4_5_VLProcessor"] diff --git a/src/transformers/models/ernie4_5_vl/video_processing_ernie4_5_vl.py b/src/transformers/models/ernie4_5_vl/video_processing_ernie4_5_vl.py new file mode 100644 index 000000000000..00a6e38d5797 --- /dev/null +++ b/src/transformers/models/ernie4_5_vl/video_processing_ernie4_5_vl.py @@ -0,0 +1,446 @@ +# coding=utf-8 +# Copyright 2025 Baidu and HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from functools import partial +from pathlib import Path +from typing import Any, Optional, Union + +import numpy as np +import torch +from huggingface_hub.dataclasses import validate_typed_dict +from PIL import ImageDraw, ImageFont +from torchvision.transforms.functional import pil_to_tensor, to_pil_image + +from ...image_processing_utils import BatchFeature +from ...image_utils import ( + OPENAI_CLIP_MEAN, + OPENAI_CLIP_STD, + ChannelDimension, + PILImageResampling, + SizeDict, + get_image_size, + validate_kwargs, +) +from ...processing_utils import Unpack, VideosKwargs +from ...utils import ( + TensorType, + add_start_docstrings, + logging, +) +from ...utils.import_utils import is_tracing, requires +from ...video_processing_utils import BASE_VIDEO_PROCESSOR_DOCSTRING, BaseVideoProcessor +from ...video_utils import ( + VideoInput, + VideoMetadata, + group_videos_by_shape, + infer_channel_dimension_format, + reorder_videos, +) +from .image_processing_ernie4_5_vl import smart_resize + + +logger = logging.get_logger(__name__) + + +class Ernie4_5_VLVideoProcessorInitKwargs(VideosKwargs, total=False): + patch_size: int + temporal_patch_size: int + merge_size: int + min_frames: int + max_frames: int + draw_on_frames: bool + font: str + + +@add_start_docstrings( + "Constructs a fast Ernie 4.5 VL image processor that dynamically resizes videos based on the original videos.", + BASE_VIDEO_PROCESSOR_DOCSTRING, + """ + patch_size (`int`, *optional*, defaults to 14): + The spacial patch size of the vision encoder. + temporal_patch_size (`int`, *optional*, defaults to 2): + The temporal patch size of the vision encoder. + merge_size (`int`, *optional*, defaults to 2): + The merge size of the vision encoder to llm encoder. + min_frames (`int`, *optional*, defaults to 16): + The minimum number of frames that can be sampled. + max_frames (`int`, *optional*, defaults to 180): + The maximum number of frames that can be sampled. + draw_on_frames (`bool`, *optional*, defaults to `True`): + Whether to draw timestamps on each frame or not. + This does not work with `torch.compile` but resembles + the performance of the original model. + font (`str`, *optional*, defaults to "Roboto-Regular.ttf"): + The associated font name for drawing on frames. + Defaults to "Roboto-Regular.ttf" and is expected to be + saved along the processor as separate file. + """, +) +@requires(backends=("torchvision",)) +class Ernie4_5_VLVideoProcessor(BaseVideoProcessor): + resample = PILImageResampling.BICUBIC + size = {"shortest_edge": 299 * 28 * 28, "longest_edge": 1196 * 28 * 28} + image_mean = OPENAI_CLIP_MEAN + image_std = OPENAI_CLIP_STD + do_resize = True + do_rescale = True + do_normalize = True + do_convert_rgb = True + patch_size = 14 + temporal_patch_size = 2 + merge_size = 2 + min_frames = 16 + max_frames = 180 + do_sample_frames = True + draw_on_frames = True + font = "Roboto-Regular.ttf" + valid_kwargs = Ernie4_5_VLVideoProcessorInitKwargs + model_input_names = ["pixel_values_videos", "video_grid_thw"] + + def __init__(self, **kwargs: Unpack[Ernie4_5_VLVideoProcessorInitKwargs]): + temporal_patch_size = kwargs.get("temporal_patch_size", 2) + if temporal_patch_size is None or temporal_patch_size != 2: + raise ValueError("`Ernie 4.5 VL` only supports a temporal patch size of 2") + + size = kwargs.pop("size", None) + size = self.size if size is None else size + if "shortest_edge" not in size or "longest_edge" not in size: + raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.") + + super().__init__(size=size, **kwargs) + + @classmethod + def from_dict(cls, video_processor_dict: dict[str, Any], **kwargs): + """ + Specifc logic to this ernie model where we expect an associated font name to be saved within dict + and the specific font file to exist along side the json in a separate file. + + Note that this is only relevant when we use `draw_on_frames` as this is required to have a font. + """ + resolved_file_path = kwargs.pop("resolved_file_path", None) + draws_on_frames = video_processor_dict.get("draw_on_frames") + if (font_name := video_processor_dict.get("font")) is None and draws_on_frames: + raise AttributeError( + "Expected a `font` to be saved when using `draw_on_frames` in Ernie 4.5 VL; found nothing." + ) + if font_name is not None and draws_on_frames: + try: + base_directory = Path(resolved_file_path).parent + video_processor_dict["font"] = str(Path(base_directory, font_name)) + ImageFont.truetype(video_processor_dict["font"]) + except (TypeError, OSError): + raise OSError( + f"Could not find an associated font file for {video_processor_dict['font']}. " + "Make sure to save a font file along for Ernie 4.5 VL." + ) + + return super().from_dict(video_processor_dict, **kwargs) + + def _further_process_kwargs( + self, + size: Optional[SizeDict] = None, + **kwargs, + ) -> dict: + """ + Update kwargs that need further processing before being validated + Can be overridden by subclasses to customize the processing of kwargs. + """ + if size is not None and ("shortest_edge" not in size or "longest_edge" not in size): + raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.") + + return super()._further_process_kwargs(size=size, **kwargs) + + def sample_frames( + self, + metadata: VideoMetadata, + min_frames: Optional[int] = None, + max_frames: Optional[int] = None, + num_frames: Optional[int] = None, + fps: Optional[Union[int, float]] = None, + **kwargs, + ): + if fps is not None and num_frames is not None: + raise ValueError("`num_frames` and `fps` are mutually exclusive arguments, please use only one!") + + num_frames = num_frames if num_frames is not None else self.num_frames + min_frames = min_frames if min_frames is not None else self.min_frames + max_frames = max_frames if max_frames is not None else self.max_frames + total_num_frames = metadata.total_num_frames + + if num_frames is not None: + if num_frames < min_frames or num_frames > max_frames: + raise ValueError(f"`num_frames` must be {min_frames} <= x <= {max_frames}. Got {num_frames} instead.") + else: + if fps is not None and (metadata is None or metadata.fps is None): + raise ValueError( + "Asked to sample `fps` frames per second but no video metadata was provided which is required when sampling with `fps`. " + "Please pass in `VideoMetadata` object or use a fixed `num_frames` per input video" + ) + num_frames = total_num_frames / metadata.fps * fps if fps is not None else total_num_frames + num_frames = min(max(num_frames, min_frames), max_frames, total_num_frames) + + if num_frames > total_num_frames: + raise ValueError( + f"Video can't be sampled. The inferred `num_frames={num_frames}` exceeds `total_num_frames={total_num_frames}`. " + "Decrease `num_frames` or `fps` for sampling." + ) + + indices = torch.arange(0, total_num_frames, total_num_frames / num_frames).int() + + return indices + + def _convert_timestamp(self, time_stamp_in_seconds): + """Convert to `time: hr:min:sec` format""" + hours = time_stamp_in_seconds // 3600 + time_stamp_in_seconds = time_stamp_in_seconds % 3600 + mins = time_stamp_in_seconds // 60 + time_stamp_in_seconds = time_stamp_in_seconds % 60 + return f"time: {int(hours):02d}:{int(mins):02d}:{time_stamp_in_seconds:05.02f}" + + def _render_image_with_timestamp(self, image: torch.Tensor, timestamp: str, size_factor: float = 0.1): + """Draws a black timestamp with a white border on the corner of the frame""" + if self.font is None: + raise AttributeError("To draw on frames with Ernie 4.5 VL, you need an associated font; found nothing") + + # FIXME: conversion `torch->PIL->torch` is inefficient ~6ms per frame + # Left for optimization if anyone want to pick it up + # + # This can take up to ~1s in preprocessing (if default sampling is used): + # 180 (frames) x 6ms = 1080ms = ~1,1s + image = to_pil_image(image) + + font_size = int(min(*image.size) * size_factor) + outline_size = int(font_size * size_factor) + font = ImageFont.truetype(self.font, font_size) + + # Draw a black text with a white border + draw = ImageDraw.Draw(image) + draw.text( + (0, 0), + timestamp, + font=font, + fill=(0, 0, 0), + stroke_width=outline_size, + stroke_fill=(255, 255, 255), + ) + return pil_to_tensor(image) + + def _prepare_input_videos( + self, + videos: VideoInput, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + device: Optional[str] = None, + video_metadata: Optional[list[VideoMetadata]] = None, + draw_on_frames: bool = True, + ) -> list["torch.Tensor"]: + """ + Prepare the input videos for processing. + """ + processed_videos = [] + for video, metadata in zip(videos, video_metadata): + # Check for attributes that are necessary to draw timestamps on frames + if draw_on_frames: + if metadata is None: + raise ValueError("Need video metadata to process videos in Ernie 4.5 VL using `draw_on_frames`") + elif metadata.fps is None: + metadata.fps = 24 + logger.warning_once( + "Could not infer the fps of a video due to the metadata not being available, " + "defaulting to `24`. Please provide `video_metadata` for more accurate results." + ) + + # `make_batched_videos` always returns a 4D array per video + if isinstance(video, np.ndarray): + # not using F.to_tensor as it doesn't handle (C, H, W) numpy arrays + video = torch.from_numpy(video).contiguous() + + # Infer the channel dimension format if not provided + if input_data_format is None: + input_data_format = infer_channel_dimension_format(video) + + if input_data_format == ChannelDimension.LAST: + video = video.permute(0, 3, 1, 2).contiguous() + + # specific to ernie, draws timestamps on each frame (if enabled) + if draw_on_frames: + if is_tracing(video): + raise RuntimeError( + "Using `torch.compile` is not compatible with drawing on frames. " + "Either don't use `torch.compile` or don't draw on frames via the kwarg `draw_on_frames=False`." + ) + + for idx, frame in enumerate(video): + video[idx] = self._render_image_with_timestamp( + frame, self._convert_timestamp(metadata.timestamps[idx]) + ) + + # last frame is copied if uneven (mitigating issues for temporal patch size) + if video.shape[0] % 2 != 0: + video = torch.cat((video, video[-1].detach().clone()[None, ...]), dim=0) + + if device is not None: + video = video.to(device) + + processed_videos.append(video) + return processed_videos + + def _preprocess( + self, + videos: list[torch.Tensor], + do_convert_rgb: bool = True, + do_resize: bool = True, + size: Optional[SizeDict] = None, + interpolation: PILImageResampling = PILImageResampling.BICUBIC, + do_rescale: bool = True, + rescale_factor: float = 1 / 255.0, + do_normalize: bool = True, + image_mean: Optional[Union[float, list[float]]] = None, + image_std: Optional[Union[float, list[float]]] = None, + patch_size: Optional[int] = None, + merge_size: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + **kwargs, + ): + # Group videos by size for batched resizing + grouped_videos, grouped_videos_index = group_videos_by_shape(videos) + resized_videos_grouped = {} + for shape, stacked_videos in grouped_videos.items(): + if do_convert_rgb: + stacked_videos = self.convert_to_rgb(stacked_videos) + + height, width = get_image_size(stacked_videos[0], channel_dim=ChannelDimension.FIRST) + resized_height, resized_width = height, width + if do_resize: + resized_height, resized_width = smart_resize( + height, + width, + factor=patch_size * merge_size, + min_pixels=size["shortest_edge"], + max_pixels=size["longest_edge"], + ) + stacked_videos = self.resize( + image=stacked_videos, + size=SizeDict(height=resized_height, width=resized_width), + interpolation=interpolation, + ) + resized_videos_grouped[shape] = stacked_videos + resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index) + + # Group videos by size for further processing + # Needed in case do_resize is False, or resize returns videos with different sizes + grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos) + processed_videos_grouped = {} + processed_grids = {} + for shape, stacked_videos in grouped_videos.items(): + resized_height, resized_width = get_image_size(stacked_videos[0], channel_dim=ChannelDimension.FIRST) + + # Fused rescale and normalize + stacked_videos = self.rescale_and_normalize( + stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std + ) + patches = stacked_videos + + batch_size, grid_t, channel = patches.shape[:3] + grid_h, grid_w = resized_height // patch_size, resized_width // patch_size + + patches = patches.view( + batch_size, + grid_t, + channel, + grid_h // merge_size, + merge_size, + patch_size, + grid_w // merge_size, + merge_size, + patch_size, + ) + # Reorder dimensions to group grid and patch information for subsequent flattening. + # [batch, grid_t, grid_h/merge, grid_w/merge, merge, merge, channel, patch, patch] + patches = patches.permute(0, 1, 3, 6, 4, 7, 2, 5, 8) + + flatten_patches = patches.reshape( + batch_size, + grid_t * grid_h * grid_w, + channel * patch_size * patch_size, + ) + + processed_videos_grouped[shape] = flatten_patches + processed_grids[shape] = [[grid_t, grid_h, grid_w]] * batch_size + + processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index) + processed_grids = reorder_videos(processed_grids, grouped_videos_index) + pixel_values_videos = torch.cat(processed_videos, dim=0) + video_grid_thw = torch.tensor(processed_grids) + + return BatchFeature( + data={"pixel_values_videos": pixel_values_videos, "video_grid_thw": video_grid_thw}, + tensor_type=return_tensors, + ) + + @add_start_docstrings( + BASE_VIDEO_PROCESSOR_DOCSTRING, + ) + def preprocess( + self, + videos: VideoInput, + **kwargs: Unpack[VideosKwargs], + ) -> BatchFeature: + validate_kwargs( + captured_kwargs=kwargs.keys(), + valid_processor_keys=list(self.valid_kwargs.__annotations__.keys()) + ["return_tensors"], + ) + + # Perform type validation on received kwargs + validate_typed_dict(self.valid_kwargs, kwargs) + + # Set default kwargs from self. This ensures that if a kwarg is not provided + # by the user, it gets its default value from the instance, or is set to None. + for kwarg_name in self.valid_kwargs.__annotations__: + kwargs.setdefault(kwarg_name, getattr(self, kwarg_name, None)) + + input_data_format = kwargs.pop("input_data_format") + do_sample_frames = kwargs.pop("do_sample_frames") + device = kwargs.pop("device") + video_metadata = kwargs.pop("video_metadata") + draw_on_frames = kwargs.pop("draw_on_frames") + + sample_indices_fn = partial(self.sample_frames, **kwargs) if do_sample_frames else None + videos, video_metadata = self._decode_and_sample_videos( + videos, + video_metadata=video_metadata, + do_sample_frames=do_sample_frames, + sample_indices_fn=sample_indices_fn, + ) + videos = self._prepare_input_videos( + videos=videos, + input_data_format=input_data_format, + device=device, + video_metadata=video_metadata, + draw_on_frames=draw_on_frames, + ) + + kwargs = self._further_process_kwargs(**kwargs) + self._validate_preprocess_kwargs(**kwargs) + + # Pop kwargs that are not needed in _preprocess + kwargs.pop("data_format") + return_metadata = kwargs.pop("return_metadata") + + preprocessed_videos = self._preprocess(videos=videos, **kwargs) + if return_metadata: + preprocessed_videos["video_metadata"] = video_metadata + return preprocessed_videos + + +__all__ = ["Ernie4_5_VLVideoProcessor"] diff --git a/src/transformers/video_processing_utils.py b/src/transformers/video_processing_utils.py index eeb04eeb3adb..5643d425e6d6 100644 --- a/src/transformers/video_processing_utils.py +++ b/src/transformers/video_processing_utils.py @@ -700,12 +700,15 @@ def get_video_processor_dict( # We are downloading both configs because almost all models have a `processor_config.json` but # not all of these are nested. We need to check if it was saved recebtly as nested or if it is legacy style video_processor_dict = None + final_resolved_file_path = None if resolved_processor_file is not None: + final_resolved_file_path = resolved_processor_file processor_dict = safe_load_json_file(resolved_processor_file) if "video_processor" in processor_dict: video_processor_dict = processor_dict["video_processor"] if resolved_video_processor_file is not None and video_processor_dict is None: + final_resolved_file_path = resolved_video_processor_file video_processor_dict = safe_load_json_file(resolved_video_processor_file) if video_processor_dict is None: @@ -722,6 +725,10 @@ def get_video_processor_dict( logger.info( f"loading configuration file {video_processor_file} from cache at {resolved_video_processor_file}" ) + + # Specific models need the original path for modification in `from_dict`, e.g. see `Ernie 4.5 VL` with fonts + kwargs["resolved_file_path"] = final_resolved_file_path + return video_processor_dict, kwargs @classmethod diff --git a/tests/models/ernie4_5_vl/__init__.py b/tests/models/ernie4_5_vl/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/models/ernie4_5_vl/test_image_processing_ernie4_5_vl.py b/tests/models/ernie4_5_vl/test_image_processing_ernie4_5_vl.py new file mode 100644 index 000000000000..f738df6b5407 --- /dev/null +++ b/tests/models/ernie4_5_vl/test_image_processing_ernie4_5_vl.py @@ -0,0 +1,346 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import itertools +import tempfile +import unittest + +import numpy as np + +from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, load_image +from transformers.models.ernie4_5_vl.image_processing_ernie4_5_vl import smart_resize +from transformers.testing_utils import require_torch, require_vision +from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available + +from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs +from ...test_processing_common import url_to_local_path + + +if is_torch_available(): + import torch + +if is_vision_available(): + from PIL import Image + + from transformers import Ernie4_5_VLImageProcessor + + if is_torchvision_available(): + from transformers import Ernie4_5_VLImageProcessorFast + + +class Ernie4_5_VLImageProcessorTester: + def __init__( + self, + parent, + batch_size=7, + num_channels=3, + min_resolution=56, + max_resolution=1024, + size=None, + do_resize=True, + do_normalize=True, + do_convert_rgb=True, + image_mean=OPENAI_CLIP_MEAN, + image_std=OPENAI_CLIP_STD, + patch_size=14, + merge_size=2, + ): + self.parent = parent + self.batch_size = batch_size + self.num_channels = num_channels + self.min_resolution = min_resolution + self.max_resolution = max_resolution + if size is None: + size = {"shortest_edge": 56 * 56, "longest_edge": 6177 * 28 * 28} + self.size = size + self.do_resize = do_resize + self.do_normalize = do_normalize + self.do_convert_rgb = do_convert_rgb + self.image_mean = image_mean + self.image_std = image_std + self.patch_size = patch_size + self.merge_size = merge_size + + def prepare_image_processor_dict(self): + return { + "do_resize": self.do_resize, + "image_mean": self.image_mean, + "image_std": self.image_std, + "size": self.size, + "patch_size": self.patch_size, + "merge_size": self.merge_size, + } + + def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): + images = prepare_image_inputs( + batch_size=self.batch_size, + num_channels=self.num_channels, + min_resolution=self.min_resolution, + max_resolution=self.max_resolution, + equal_resolution=equal_resolution, + numpify=numpify, + torchify=torchify, + ) + return [[image] for image in images] + + +@require_torch +@require_vision +class Ernie4_5_VLImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): + image_processing_class = Ernie4_5_VLImageProcessor if is_vision_available() else None + fast_image_processing_class = Ernie4_5_VLImageProcessorFast if is_torchvision_available() else None + + def setUp(self): + super().setUp() + self.image_processor_tester = Ernie4_5_VLImageProcessorTester(self) + + @property + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + for image_processing_class in self.image_processor_list: + image_processing = image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "do_convert_rgb")) + self.assertTrue(hasattr(image_processing, "patch_size")) + self.assertTrue(hasattr(image_processing, "merge_size")) + + def test_image_processor_from_dict_with_kwargs(self): + for image_processing_class in self.image_processor_list: + image_processor = image_processing_class.from_dict(self.image_processor_dict) + self.assertEqual(image_processor.size["shortest_edge"], 56 * 56) + self.assertEqual(image_processor.size["longest_edge"], 6177 * 28 * 28) + + image_processor = image_processing_class.from_dict( + self.image_processor_dict, + size={"shortest_edge": 256 * 256, "longest_edge": 640 * 640}, + ) + self.assertEqual(image_processor.size["shortest_edge"], 256 * 256) + self.assertEqual(image_processor.size["longest_edge"], 640 * 640) + + def test_select_best_resolution(self): + # Test with a final resize resolution + best_resolution = smart_resize(561, 278, factor=28) + self.assertEqual(best_resolution, (560, 280)) + + def test_call_pil(self): + for image_processing_class in self.image_processor_list: + # Initialize image_processing + image_processing = image_processing_class(**self.image_processor_dict) + # create random PIL images + image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) + for image in image_inputs: + self.assertIsInstance(image[0], Image.Image) + + # Test not batched input + process_out = image_processing(image_inputs[0], return_tensors="pt") + encoded_images = process_out.pixel_values + image_grid_thws = process_out.image_grid_thw + expected_output_image_shape = (5476, 588) + expected_image_grid_thws = torch.Tensor([[1, 74, 74]]) + self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) + self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) + + # Test batched + process_out = image_processing(image_inputs, return_tensors="pt") + encoded_images = process_out.pixel_values + image_grid_thws = process_out.image_grid_thw + expected_output_image_shape = (38332, 588) + expected_image_grid_thws = torch.Tensor([[1, 74, 74]] * 7) + self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) + self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) + + def test_call_numpy(self): + for image_processing_class in self.image_processor_list: + # Initialize image_processing + image_processing = image_processing_class(**self.image_processor_dict) + # create random numpy tensors + image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True) + for image in image_inputs: + self.assertIsInstance(image[0], np.ndarray) + + # Test not batched input + process_out = image_processing(image_inputs[0], return_tensors="pt") + encoded_images = process_out.pixel_values + image_grid_thws = process_out.image_grid_thw + expected_output_image_shape = (5476, 588) + expected_image_grid_thws = torch.Tensor([[1, 74, 74]]) + self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) + self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) + + # Test batched + process_out = image_processing(image_inputs, return_tensors="pt") + encoded_images = process_out.pixel_values + image_grid_thws = process_out.image_grid_thw + expected_output_image_shape = (38332, 588) + expected_image_grid_thws = torch.Tensor([[1, 74, 74]] * 7) + self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) + self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) + + def test_call_pytorch(self): + for image_processing_class in self.image_processor_list: + # Initialize image_processing + image_processing = image_processing_class(**self.image_processor_dict) + # create random PyTorch tensors + image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True) + + for image in image_inputs: + self.assertIsInstance(image[0], torch.Tensor) + + # Test not batched input + process_out = image_processing(image_inputs[0], return_tensors="pt") + encoded_images = process_out.pixel_values + image_grid_thws = process_out.image_grid_thw + expected_output_image_shape = (5476, 588) + expected_image_grid_thws = torch.Tensor([[1, 74, 74]]) + self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) + self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) + + # Test batched + process_out = image_processing(image_inputs, return_tensors="pt") + encoded_images = process_out.pixel_values + image_grid_thws = process_out.image_grid_thw + expected_output_image_shape = (38332, 588) + expected_image_grid_thws = torch.Tensor([[1, 74, 74]] * 7) + self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) + self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) + + @unittest.skip(reason="Erni4_5_VLImageProcessor doesn't treat 4 channel PIL and numpy consistently yet") + def test_call_numpy_4_channels(self): + pass + + def test_nested_input(self): + for image_processing_class in self.image_processor_list: + image_processing = image_processing_class(**self.image_processor_dict) + image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) + + # Test batched as a list of images + process_out = image_processing(image_inputs, return_tensors="pt") + encoded_images = process_out.pixel_values + image_grid_thws = process_out.image_grid_thw + expected_output_image_shape = (38332, 588) + expected_image_grid_thws = torch.Tensor([[1, 74, 74]] * 7) + self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) + self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) + + # Test batched as a nested list of images, where each sublist is one batch + image_inputs_nested = image_inputs[:3] + image_inputs[3:] + process_out = image_processing(image_inputs_nested, return_tensors="pt") + encoded_images_nested = process_out.pixel_values + image_grid_thws_nested = process_out.image_grid_thw + expected_output_image_shape = (38332, 588) + expected_image_grid_thws = torch.Tensor([[1, 74, 74]] * 7) + self.assertEqual(tuple(encoded_images_nested.shape), expected_output_image_shape) + self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) + + # Image processor should return same pixel values, independently of ipnut format + self.assertTrue((encoded_images_nested == encoded_images).all()) + self.assertTrue((image_grid_thws_nested == expected_image_grid_thws).all()) + + def test_custom_image_size(self): + for image_processing_class in self.image_processor_list: + image_processing = image_processing_class(**self.image_processor_dict) + with tempfile.TemporaryDirectory() as tmpdirname: + image_processing.save_pretrained(tmpdirname) + image_processor_loaded = image_processing_class.from_pretrained( + tmpdirname, size={"shortest_edge": 28 * 28, "longest_edge": 56 * 56} + ) + + image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) + process_out = image_processor_loaded(image_inputs, return_tensors="pt") + expected_output_image_shape = [112, 588] + self.assertListEqual(list(process_out.pixel_values.shape), expected_output_image_shape) + + def test_custom_pixels(self): + pixel_choices = frozenset(itertools.product((100, 150, 200, 20000), (100, 150, 200, 20000))) + for image_processing_class in self.image_processor_list: + image_processor_dict = self.image_processor_dict.copy() + for a_pixels, b_pixels in pixel_choices: + image_processor_dict["size"] = { + "shortest_edge": min(a_pixels, b_pixels), + "longest_edge": max(a_pixels, b_pixels), + } + image_processor = image_processing_class(**image_processor_dict) + image_inputs = self.image_processor_tester.prepare_image_inputs() + # Just checking that it doesn't raise an error + image_processor(image_inputs, return_tensors="pt") + + @require_vision + @require_torch + def test_slow_fast_equivalence(self): + dummy_image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg")) + + if not self.test_slow_image_processor or not self.test_fast_image_processor: + self.skipTest(reason="Skipping slow/fast equivalence test") + + if self.image_processing_class is None or self.fast_image_processing_class is None: + self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") + + image_processor_slow = self.image_processing_class(**self.image_processor_dict) + image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) + + encoding_slow = image_processor_slow(dummy_image, return_tensors="pt") + encoding_fast = image_processor_fast(dummy_image, return_tensors="pt") + + self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values) + self.assertEqual(encoding_slow.image_grid_thw.dtype, encoding_fast.image_grid_thw.dtype) + self._assert_slow_fast_tensors_equivalence( + encoding_slow.image_grid_thw.float(), encoding_fast.image_grid_thw.float() + ) + + @require_vision + @require_torch + def test_slow_fast_equivalence_batched(self): + if not self.test_slow_image_processor or not self.test_fast_image_processor: + self.skipTest(reason="Skipping slow/fast equivalence test") + + if self.image_processing_class is None or self.fast_image_processing_class is None: + self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") + + if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop: + self.skipTest( + reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors" + ) + + dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) + image_processor_slow = self.image_processing_class(**self.image_processor_dict) + image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) + + encoding_slow = image_processor_slow(dummy_images, return_tensors="pt") + encoding_fast = image_processor_fast(dummy_images, return_tensors="pt") + + self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values) + self.assertEqual(encoding_slow.image_grid_thw.dtype, encoding_fast.image_grid_thw.dtype) + self._assert_slow_fast_tensors_equivalence( + encoding_slow.image_grid_thw.float(), encoding_fast.image_grid_thw.float() + ) + + def test_get_num_patches_without_images(self): + for image_processing_class in self.image_processor_list: + image_processing = image_processing_class(**self.image_processor_dict) + num_patches = image_processing.get_number_of_image_patches(height=100, width=100, images_kwargs={}) + self.assertEqual(num_patches, 64) + + num_patches = image_processing.get_number_of_image_patches(height=200, width=50, images_kwargs={}) + self.assertEqual(num_patches, 56) + + num_patches = image_processing.get_number_of_image_patches( + height=100, width=100, images_kwargs={"patch_size": 28} + ) + self.assertEqual(num_patches, 16) diff --git a/tests/models/ernie4_5_vl/test_modeling_ernie4_5_vl.py b/tests/models/ernie4_5_vl/test_modeling_ernie4_5_vl.py new file mode 100644 index 000000000000..fa445213342a --- /dev/null +++ b/tests/models/ernie4_5_vl/test_modeling_ernie4_5_vl.py @@ -0,0 +1,556 @@ +# Copyright 2025 Baidu and HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Testing suite for the PyTorch Ernie 4.5 VL model.""" + +import unittest + +from transformers import ( + AutoProcessor, + Ernie4_5_VLConfig, + Ernie4_5_VLForConditionalGeneration, + Ernie4_5_VLModel, + is_torch_available, + is_vision_available, +) +from transformers.testing_utils import ( + cleanup, + require_flash_attn, + require_torch, + require_torch_gpu, + require_torch_large_accelerator, + require_torch_multi_accelerator, + slow, + torch_device, +) +from transformers.utils import is_cv2_available + +from ...generation.test_utils import GenerationTesterMixin +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ( + ModelTesterMixin, + floats_tensor, + ids_tensor, +) + + +if is_cv2_available(): + pass + +if is_torch_available(): + import torch + +if is_vision_available(): + pass + + +class Ernie4_5_VLVisionText2TextModelTester: + def __init__( + self, + parent, + batch_size=3, + seq_length=7, + num_channels=3, + ignore_index=-100, + image_size=112, + video_start_token_id=3, + video_end_token_id=4, + image_start_token_id=5, + image_end_token_id=6, + image_token_id=7, + video_token_id=8, + is_training=True, + text_config=None, + vision_config=None, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.num_channels = num_channels + self.ignore_index = ignore_index + self.image_size = image_size + self.bos_token_id = 0 + self.eos_token_id = 0 + self.pad_token_id = 0 + self.video_start_token_id = video_start_token_id + self.video_end_token_id = video_end_token_id + self.image_start_token_id = image_start_token_id + self.image_end_token_id = image_end_token_id + self.image_token_id = image_token_id + self.video_token_id = video_token_id + self.is_training = is_training + + self.text_config = text_config + if text_config is None: + self.text_config = { + "vocab_size": 99, + "hidden_size": 16, + "intermediate_size": 22, + "num_hidden_layers": 2, + "num_attention_heads": 2, + "num_key_value_heads": 1, + "hidden_act": "silu", + "max_position_embeddings": 512, + "tie_word_embeddings": True, + "rope_parameters": {"type": "default", "rope_theta": 500_000.0, "mrope_section": [1, 1, 2]}, + "moe_intermediate_size": [22, 22], + "moe_k": 2, + "moe_num_experts": 8, + "moe_num_shared_experts": 2, + "moe_layer_start_index": 0, + "moe_layer_interval": 1, + "moe_norm_min": 1e-12, + } + + self.vision_config = vision_config + if vision_config is None: + self.vision_config = { + "depth": 2, + "hidden_size": 32, + "hidden_act": "silu", + "intermediate_size": 22, + "num_heads": 2, + "spatial_merge_size": 1, + } + + self.hidden_size = self.text_config["hidden_size"] + self.num_hidden_layers = self.text_config["num_hidden_layers"] + self.num_attention_heads = self.text_config["num_attention_heads"] + self.vocab_size = self.text_config["vocab_size"] + + self.num_image_tokens = 64 + self.seq_length = seq_length + self.num_image_tokens + + def get_config(self): + return Ernie4_5_VLConfig( + text_config=self.text_config, + vision_config=self.vision_config, + image_token_id=self.image_token_id, + video_token_id=self.video_token_id, + video_start_token_id=self.video_start_token_id, + video_end_token_id=self.video_end_token_id, + image_start_token_id=self.image_start_token_id, + image_end_token_id=self.image_end_token_id, + ) + + def prepare_config_and_inputs(self): + config = self.get_config() + patch_size = config.vision_config.patch_size + pixel_values = floats_tensor( + [self.batch_size * (self.image_size**2) // (patch_size**2), self.num_channels * (patch_size**2)] + ) + + return config, pixel_values + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, pixel_values = config_and_inputs + input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) + attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) + + input_ids[input_ids == self.video_token_id] = self.pad_token_id + input_ids[input_ids == self.image_token_id] = self.pad_token_id + input_ids[input_ids == self.video_start_token_id] = self.pad_token_id + input_ids[input_ids == self.image_start_token_id] = self.pad_token_id + input_ids[input_ids == self.video_end_token_id] = self.pad_token_id + input_ids[input_ids == self.image_end_token_id] = self.pad_token_id + + input_ids[:, 0] = self.image_start_token_id + input_ids[:, 1 : 1 + self.num_image_tokens] = self.image_token_id + input_ids[:, 1 + self.num_image_tokens] = self.image_end_token_id + + patch_size = config.vision_config.patch_size + patches_per_side = self.image_size // patch_size + + inputs_dict = { + "pixel_values": pixel_values, + "image_grid_thw": torch.tensor( + [[1, patches_per_side, patches_per_side]] * self.batch_size, device=torch_device + ), + "input_ids": input_ids, + "attention_mask": attention_mask, + } + return config, inputs_dict + + +@require_torch +class Ernie4_5_VLModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): + all_model_classes = ( + ( + Ernie4_5_VLModel, + Ernie4_5_VLForConditionalGeneration, + ) + if is_torch_available() + else () + ) + model_split_percents = [0.7, 0.9] # model too big to split at 0.5 + test_all_params_have_gradient = False # e score correction bias + moe + _is_composite = True + + def setUp(self): + self.model_tester = Ernie4_5_VLVisionText2TextModelTester(self) + self.config_tester = ConfigTester(self, config_class=Ernie4_5_VLConfig, has_text_modality=False) + + def test_config(self): + self.config_tester.run_common_tests() + + def prepare_config_and_inputs_for_generate(self, batch_size=2): + """ + Same as in GLM4V, see `tests/models/glm4v/test_modeling_glm4v.py` for reference + """ + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + # We don't want a few model inputs in our model input dictionary for generation tests + input_keys_to_ignore = [ + # we don't want encoder-decoder models to start from filled decoder ids + "decoder_input_ids", + "decoder_attention_mask", + # we'll set cache use in each test differently + "use_cache", + # ignore labels if it is in the input dict + "labels", + ] + + # The diff from the general `prepare_config_and_inputs_for_generate` lies here + patch_size = config.vision_config.patch_size + filtered_image_length = batch_size * (self.model_tester.image_size**2) // (patch_size**2) + filtered_inputs_dict = { + k: v[:batch_size, ...] if isinstance(v, torch.Tensor) else v + for k, v in inputs_dict.items() + if k not in input_keys_to_ignore + } + filtered_inputs_dict["pixel_values"] = inputs_dict["pixel_values"][:filtered_image_length] + + # It is important set `eos_token_id` to `None` to avoid early stopping (would break for length-based checks) + text_gen_config = config.get_text_config(decoder=True) + if text_gen_config.eos_token_id is not None and text_gen_config.pad_token_id is None: + text_gen_config.pad_token_id = ( + text_gen_config.eos_token_id + if isinstance(text_gen_config.eos_token_id, int) + else text_gen_config.eos_token_id[0] + ) + text_gen_config.eos_token_id = None + text_gen_config.forced_eos_token_id = None + + return config, filtered_inputs_dict + + def test_inputs_embeds_matches_input_ids(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + model.to(torch_device) + model.eval() + + inputs = self._prepare_for_class(inputs_dict, model_class) + input_ids = inputs["input_ids"] + del inputs["input_ids"] + del inputs["pixel_values"] + del inputs["image_grid_thw"] + + inputs_embeds = model.get_input_embeddings()(input_ids) + + with torch.no_grad(): + out_ids = model(input_ids=input_ids, **inputs)[0] + out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0] + torch.testing.assert_close(out_embeds, out_ids) + + +@slow +@require_torch_multi_accelerator +@require_torch_large_accelerator +@require_torch +class Ernie4_5_VLIntegrationTest(unittest.TestCase): + # TODO: update path + model_id = "/raid/anton/code/forks/transformers/AntonV/ErnieVL" + + def setUp(self): + cleanup(torch_device, gc_collect=True) + + self.processor = AutoProcessor.from_pretrained(self.model_id) + self.message = [ + { + "role": "user", + "content": [ + {"type": "text", "text": "What kind of dog is this?"}, + { + "type": "image", + "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg", + }, + ], + } + ] + self.message2 = [ + { + "role": "user", + "content": [ + {"type": "text", "text": "What kind of dog is this?"}, + { + "type": "image", + "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png", + }, + ], + } + ] + + def tearDown(self): + cleanup(torch_device, gc_collect=True) + + def test_small_model_integration_test(self): + model = Ernie4_5_VLForConditionalGeneration.from_pretrained(self.model_id, dtype="auto", device_map="auto") + + inputs = self.processor.apply_chat_template( + self.message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt" + ) + expected_input_ids = [100273, 2969, 93963, 1912, 3836, 315, 9159, 357, 501, 94009, 39082, 93919, 4, 93963, 101304, 100295, 100295] # fmt: skip + assert expected_input_ids == inputs.input_ids[0].tolist()[:17] + + expected_pixel_slice = torch.tensor( + [ + [-0.0988, -0.0842, -0.0842], + [-0.5660, -0.5514, -0.4200], + [-0.0259, -0.0259, -0.0259], + [-0.1280, -0.0988, -0.2010], + [-0.4638, -0.5806, -0.6974], + [-1.2083, -1.2229, -1.2083], + ], + dtype=torch.float32, + device="cpu", + ) + assert torch.allclose(expected_pixel_slice, inputs.pixel_values[:6, :3], atol=3e-3) + + # verify generation + inputs = inputs.to(torch_device) + + # This model on the hub has `do_sample=True`. + torch.manual_seed(42) + + output = model.generate(**inputs, max_new_tokens=30) + EXPECTED_DECODED_TEXT = "User: What kind of dog is this?Picture 1:\nAssistant: \n\n\n\nThe animal in the image is a lynx. It's a medium-sized wild cat characterized by its distinctive facial ruff, short tail" + self.assertEqual( + self.processor.decode(output[0], skip_special_tokens=True), + EXPECTED_DECODED_TEXT, + ) + + def test_small_model_integration_test_batch(self): + model = Ernie4_5_VLForConditionalGeneration.from_pretrained(self.model_id, dtype="auto", device_map="auto") + batch_messages = [self.message] * 2 + inputs = self.processor.apply_chat_template( + batch_messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt" + ).to(torch_device) + + # This model on the hub has `do_sample=True`. + torch.manual_seed(42) + + # it should not matter whether two images are the same size or not + output = model.generate(**inputs, max_new_tokens=30) + + EXPECTED_DECODED_TEXT = [ + "User: What kind of dog is this?Picture 1:\nAssistant: \n\n\n\nThe animal in the image is a lynx. It's a medium-sized wild cat characterized by its distinctive facial ruff, short tail", + "User: What kind of dog is this?Picture 1:\nAssistant: \n\n\n\nThe animal in the image is a lynx. It's a medium-sized wild cat characterized by its distinctive facial ruff, short tail" + ] # fmt: skip + + self.assertEqual( + self.processor.batch_decode(output, skip_special_tokens=True), + EXPECTED_DECODED_TEXT, + ) + + def test_small_model_integration_test_with_video(self): + processor = AutoProcessor.from_pretrained(self.model_id, max_image_size={"longest_edge": 50176}) + model = Ernie4_5_VLForConditionalGeneration.from_pretrained( + self.model_id, dtype=torch.float16, device_map="auto" + ) + questions = ["Only use English during your responses and describe the following video."] + video_urls = ["https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/tiny_video.mp4"] + messages = [ + [ + { + "role": "user", + "content": [ + {"type": "text", "text": question}, + { + "type": "video", + "video": video_url, + }, + ], + } + ] + for question, video_url in zip(questions, video_urls) + ] + inputs = processor.apply_chat_template( + messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt", padding=True + ).to(torch_device) + + # This model on the hub has `do_sample=True`. + torch.manual_seed(42) + + output = model.generate(**inputs, max_new_tokens=30) + EXPECTED_DECODED_TEXT = ['User: Only use English during your responses and describe the following video.Video 1:\nAssistant: \n\n\n\nA black-and-white image shows a person lying on their back on a mat in a dojo. They are dressed in a white judo gi'] # fmt: skip + + self.assertEqual( + processor.batch_decode(output, skip_special_tokens=True), + EXPECTED_DECODED_TEXT, + ) + + def test_small_model_integration_test_expand(self): + model = Ernie4_5_VLForConditionalGeneration.from_pretrained(self.model_id, dtype="auto", device_map="auto") + inputs = self.processor.apply_chat_template( + self.message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt" + ).to(torch_device) + + # This model on the hub has `do_sample=True`. + torch.manual_seed(42) + + output = model.generate(**inputs, max_new_tokens=30, do_sample=False, num_beams=2, num_return_sequences=2) + + EXPECTED_DECODED_TEXT = [ + "User: What kind of dog is this?Picture 1:\nAssistant: \n\n\n\nThe animal in the image is a lynx. It's a medium-sized wild cat characterized by its distinctive facial ruff, short tail", + 'User: What kind of dog is this?Picture 1:\nAssistant: \n\n\n\nThe animal in the image is a lynx, not a dog. It has the distinctive features of a lynx, such as tuft' + ] # fmt: skip + + self.assertEqual( + self.processor.batch_decode(output, skip_special_tokens=True), + EXPECTED_DECODED_TEXT, + ) + + def test_small_model_integration_test_batch_wo_image(self): + model = Ernie4_5_VLForConditionalGeneration.from_pretrained(self.model_id, dtype="auto", device_map="auto") + message_wo_image = [ + {"role": "user", "content": [{"type": "text", "text": "Who are you?"}]}, + ] + batched_messages = [self.message, message_wo_image] + inputs = self.processor.apply_chat_template( + batched_messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt", + padding=True, + ).to(torch_device) + + # This model on the hub has `do_sample=True`. + torch.manual_seed(42) + + # it should not matter whether two images are the same size or not + output = model.generate(**inputs, max_new_tokens=30) + + EXPECTED_DECODED_TEXT = [ + "User: What kind of dog is this?Picture 1:\nAssistant: \n\n\n\nThe animal in the image is a lynx. It's a medium-sized wild cat characterized by its distinctive facial ruff, short tail", + "User: Who are you?\nAssistant: \n\n\n\nI'm an AI assistant created to help answer questions and provide information on a wide range of topics! I don't have personal experiences or a" + ] # fmt: skip + + self.assertEqual( + self.processor.batch_decode(output, skip_special_tokens=True), + EXPECTED_DECODED_TEXT, + ) + + def test_small_model_integration_test_batch_different_resolutions(self): + model = Ernie4_5_VLForConditionalGeneration.from_pretrained(self.model_id, dtype="auto", device_map="auto") + batched_messages = [self.message, self.message2] + inputs = self.processor.apply_chat_template( + batched_messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt", + padding=True, + ).to(torch_device) + + # This model on the hub has `do_sample=True`. + torch.manual_seed(42) + + # it should not matter whether two images are the same size or not + output = model.generate(**inputs, max_new_tokens=30) + + EXPECTED_DECODED_TEXT = [ + "User: What kind of dog is this?Picture 1:\nAssistant: \n\n\n\nThe animal in the image is a lynx. It's a medium-sized wild cat characterized by its distinctive facial ruff, short tail", + 'User: What kind of dog is this?Picture 1:\nAssistant: \n\n\n\nthere are no dogs here, there are 2 cats\nI am an AI assistant. I will benefit from learning by comparing what', + ] # fmt: skip + + self.assertEqual( + self.processor.batch_decode(output, skip_special_tokens=True), + EXPECTED_DECODED_TEXT, + ) + + @require_flash_attn + @require_torch_gpu + def test_small_model_integration_test_batch_flashatt2(self): + model = Ernie4_5_VLForConditionalGeneration.from_pretrained( + self.model_id, + dtype=torch.bfloat16, + attn_implementation="flash_attention_2", + device_map="auto", + ) + batched_messages = [self.message, self.message2] + inputs = self.processor.apply_chat_template( + batched_messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt", + padding=True, + ).to(torch_device) + + # This model on the hub has `do_sample=True`. + torch.manual_seed(42) + + # it should not matter whether two images are the same size or not + output = model.generate(**inputs, max_new_tokens=30) + + EXPECTED_DECODED_TEXT = [ + "User: What kind of dog is this?Picture 1:\nAssistant: \n\n\n\nThe animal in the image is a lynx. It's a medium-sized wild cat characterized by its distinctive facial ruff, short tail", + 'User: What kind of dog is this?Picture 1:\nAssistant: \n\n\n\nthere are no dogs here, there are 2 cats\nI am an AI assistant. I will benefit from learning by comparing what', + ] # fmt: skip + + self.assertEqual( + self.processor.batch_decode(output, skip_special_tokens=True), + EXPECTED_DECODED_TEXT, + ) + + @require_flash_attn + @require_torch_gpu + def test_small_model_integration_test_batch_wo_image_flashatt2(self): + model = Ernie4_5_VLForConditionalGeneration.from_pretrained( + self.model_id, + dtype=torch.bfloat16, + attn_implementation="flash_attention_2", + device_map="auto", + ) + message_wo_image = [ + {"role": "user", "content": [{"type": "text", "text": "Who are you?"}]}, + ] + batched_messages = [self.message, message_wo_image] + inputs = self.processor.apply_chat_template( + batched_messages, + tokenize=True, + add_generation_prompt=True, + return_dict=True, + return_tensors="pt", + padding=True, + ).to(torch_device) + + # This model on the hub has `do_sample=True`. + torch.manual_seed(42) + + # it should not matter whether two images are the same size or not + output = model.generate(**inputs, max_new_tokens=30) + + EXPECTED_DECODED_TEXT = [ + "User: What kind of dog is this?Picture 1:\nAssistant: \n\n\n\nThe animal in the image is a lynx. It's a medium-sized wild cat characterized by its distinctive facial ruff, short tail", + "User: Who are you?\nAssistant: \n\n\n\nI'm an AI assistant created to help answer questions and provide information on a wide range of topics! I don't have personal experiences or a" + ] # fmt: skip + + self.assertEqual( + self.processor.batch_decode(output, skip_special_tokens=True), + EXPECTED_DECODED_TEXT, + ) diff --git a/tests/models/ernie4_5_vl/test_processing_ernie4_5_vl.py b/tests/models/ernie4_5_vl/test_processing_ernie4_5_vl.py new file mode 100644 index 000000000000..a19318c7056a --- /dev/null +++ b/tests/models/ernie4_5_vl/test_processing_ernie4_5_vl.py @@ -0,0 +1,359 @@ +# Copyright 2025 HuggingFace Inc team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import shutil +import tempfile +import unittest + +import numpy as np +import pytest + +from transformers import AutoProcessor, LlamaTokenizerFast +from transformers.testing_utils import require_av, require_torch, require_torchvision, require_vision +from transformers.utils import is_torch_available, is_vision_available + +from ...test_processing_common import ProcessorTesterMixin + + +if is_vision_available(): + from transformers import Ernie4_5_VLImageProcessor, Ernie4_5_VLProcessor + +if is_torch_available(): + import torch + + +@require_vision +@require_torch +@require_torchvision +class Ernie4_5_VLProcessorTest(ProcessorTesterMixin, unittest.TestCase): + processor_class = Ernie4_5_VLProcessor + + @classmethod + def setUpClass(cls): + cls.tmpdirname = tempfile.mkdtemp() + # TODO: update path + processor = Ernie4_5_VLProcessor.from_pretrained( + "/raid/anton/code/forks/transformers/AntonV/ErnieVL", + patch_size=4, + size={"shortest_edge": 28 * 28, "longest_edge": 56 * 56}, + ) + processor.save_pretrained(cls.tmpdirname) + cls.image_token = processor.image_token + + def get_tokenizer(self, **kwargs): + return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer + + def get_image_processor(self, **kwargs): + return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor + + def get_video_processor(self, **kwargs): + return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).video_processor + + def get_processor(self, **kwargs): + return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs) + + @classmethod + def tearDownClass(cls): + shutil.rmtree(cls.tmpdirname, ignore_errors=True) + + # Copied from tests.models.llava.test_processing_llava.LlavaProcessorTest.test_get_num_vision_tokens + def test_get_num_vision_tokens(self): + "Tests general functionality of the helper used internally in vLLM" + + processor = self.get_processor() + + output = processor._get_num_multimodal_tokens(image_sizes=[(100, 100), (300, 100), (500, 30)]) + self.assertTrue("num_image_tokens" in output) + self.assertEqual(len(output["num_image_tokens"]), 3) + + self.assertTrue("num_image_patches" in output) + self.assertEqual(len(output["num_image_patches"]), 3) + + def test_save_load_pretrained_default(self): + tokenizer = self.get_tokenizer() + image_processor = self.get_image_processor() + video_processor = self.get_video_processor() + + processor = Ernie4_5_VLProcessor( + tokenizer=tokenizer, image_processor=image_processor, video_processor=video_processor + ) + processor.save_pretrained(self.tmpdirname) + processor = Ernie4_5_VLProcessor.from_pretrained(self.tmpdirname) + + self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) + self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string()) + self.assertIsInstance(processor.tokenizer, LlamaTokenizerFast) + self.assertIsInstance(processor.image_processor, Ernie4_5_VLImageProcessor) + + def test_image_processor(self): + image_processor = self.get_image_processor() + tokenizer = self.get_tokenizer() + video_processor = self.get_video_processor() + + processor = Ernie4_5_VLProcessor( + tokenizer=tokenizer, image_processor=image_processor, video_processor=video_processor + ) + + image_input = self.prepare_image_inputs() + + input_image_proc = image_processor(image_input, return_tensors="pt") + input_processor = processor(images=image_input, text="dummy", return_tensors="pt") + + for key in input_image_proc: + self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2) + + def test_processor(self): + image_processor = self.get_image_processor() + tokenizer = self.get_tokenizer() + video_processor = self.get_video_processor() + + processor = Ernie4_5_VLProcessor( + tokenizer=tokenizer, image_processor=image_processor, video_processor=video_processor + ) + + input_str = "lower newer" + image_input = self.prepare_image_inputs() + inputs = processor(text=input_str, images=image_input) + + self.assertListEqual( + list(inputs.keys()), + ["input_ids", "attention_mask", "mm_token_type_ids", "pixel_values", "image_grid_thw"], + ) + + # test if it raises when no input is passed + with pytest.raises(ValueError): + processor() + + # test if it raises when no text is passed + with pytest.raises(TypeError): + processor(images=image_input) + + @require_torch + @require_av + def _test_apply_chat_template( + self, + modality: str, + batch_size: int, + return_tensors: str, + input_name: str, + processor_name: str, + input_data: list[str], + ): + processor = self.get_processor() + if processor.chat_template is None: + self.skipTest("Processor has no chat template") + + if processor_name not in self.processor_class.get_attributes(): + self.skipTest(f"{processor_name} attribute not present in {self.processor_class}") + + batch_messages = [ + [ + { + "role": "user", + "content": [{"type": "text", "text": "Describe this."}], + }, + ] + ] * batch_size + + # Test that jinja can be applied + formatted_prompt = processor.apply_chat_template(batch_messages, add_generation_prompt=True, tokenize=False) + self.assertEqual(len(formatted_prompt), batch_size) + + # Test that tokenizing with template and directly with `self.tokenizer` gives same output + formatted_prompt_tokenized = processor.apply_chat_template( + batch_messages, add_generation_prompt=True, tokenize=True, return_tensors=return_tensors + ) + add_special_tokens = True + if processor.tokenizer.bos_token is not None and formatted_prompt[0].startswith(processor.tokenizer.bos_token): + add_special_tokens = False + tok_output = processor.tokenizer( + formatted_prompt, return_tensors=return_tensors, add_special_tokens=add_special_tokens + ) + expected_output = tok_output.input_ids + self.assertListEqual(expected_output.tolist(), formatted_prompt_tokenized.tolist()) + + # Test that kwargs passed to processor's `__call__` are actually used + tokenized_prompt_100 = processor.apply_chat_template( + batch_messages, + add_generation_prompt=True, + tokenize=True, + padding="max_length", + truncation=True, + return_tensors=return_tensors, + max_length=100, + ) + self.assertEqual(len(tokenized_prompt_100[0]), 100) + + # Test that `return_dict=True` returns text related inputs in the dict + out_dict_text = processor.apply_chat_template( + batch_messages, + add_generation_prompt=True, + tokenize=True, + return_dict=True, + return_tensors=return_tensors, + ) + self.assertTrue(all(key in out_dict_text for key in ["input_ids", "attention_mask", "mm_token_type_ids"])) + self.assertEqual(len(out_dict_text["input_ids"]), batch_size) + self.assertEqual(len(out_dict_text["attention_mask"]), batch_size) + self.assertEqual(len(out_dict_text["mm_token_type_ids"]), batch_size) + + # Test that with modality URLs and `return_dict=True`, we get modality inputs in the dict + for idx, url in enumerate(input_data[:batch_size]): + batch_messages[idx][0]["content"] = [batch_messages[idx][0]["content"][0], {"type": modality, "url": url}] + + out_dict = processor.apply_chat_template( + batch_messages, + add_generation_prompt=True, + tokenize=True, + return_dict=True, + return_tensors=return_tensors, + max_frames=2, # by default no more than 2 frames, otherwise too slow + ) + input_name = getattr(self, input_name) + self.assertTrue(input_name in out_dict) + self.assertEqual(len(out_dict["input_ids"]), batch_size) + self.assertEqual(len(out_dict["attention_mask"]), batch_size) + self.assertEqual(len(out_dict["mm_token_type_ids"]), batch_size) + + if modality == "video": + # qwen pixels don't scale with bs same way as other models, calculate expected video token count based on video_grid_thw + expected_video_token_count = 0 + for thw in out_dict["video_grid_thw"]: + expected_video_token_count += thw[0] * thw[1] * thw[2] + mm_len = expected_video_token_count + else: + mm_len = batch_size * 192 + self.assertEqual(len(out_dict[input_name]), mm_len) + + return_tensor_to_type = {"pt": torch.Tensor, "np": np.ndarray, None: list} + for k in out_dict: + self.assertIsInstance(out_dict[k], return_tensor_to_type[return_tensors]) + + @require_av + def test_apply_chat_template_video_frame_sampling(self): + processor = self.get_processor() + if processor.chat_template is None: + self.skipTest("Processor has no chat template") + + signature = inspect.signature(processor.__call__) + if "videos" not in {*signature.parameters.keys()} or ( + signature.parameters.get("videos") is not None + and signature.parameters["videos"].annotation == inspect._empty + ): + self.skipTest("Processor doesn't accept videos at input") + + messages = [ + [ + { + "role": "user", + "content": [ + {"type": "video"}, + {"type": "text", "text": "What is shown in this video?"}, + ], + }, + ] + ] + + formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) + self.assertEqual(len(formatted_prompt), 1) + + formatted_prompt_tokenized = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True) + expected_output = processor.tokenizer(formatted_prompt, return_tensors=None).input_ids + self.assertListEqual(expected_output, formatted_prompt_tokenized) + + out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True) + self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask", "mm_token_type_ids"]) + + # Add video URL for return dict and load with `num_frames` arg + messages[0][0]["content"][0] = { + "type": "video", + "url": "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/tiny_video.mp4", + } + num_frames = 3 + out_dict_with_video = processor.apply_chat_template( + messages, + add_generation_prompt=True, + tokenize=True, + return_dict=True, + num_frames=num_frames, + min_frames=3, # default is 16 + ) + self.assertTrue(self.videos_input_name in out_dict_with_video) + self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 720) + + # Load with `fps` arg + fps = 1 + out_dict_with_video = processor.apply_chat_template( + messages, + add_generation_prompt=True, + tokenize=True, + return_dict=True, + fps=fps, + ) + self.assertTrue(self.videos_input_name in out_dict_with_video) + self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 2160) + + # Load with `fps` and `num_frames` args, should raise an error + with self.assertRaises(ValueError): + out_dict_with_video = processor.apply_chat_template( + messages, + add_generation_prompt=True, + tokenize=True, + return_dict=True, + fps=fps, + num_frames=num_frames, + ) + + # Load without any arg should load the whole video + out_dict_with_video = processor.apply_chat_template( + messages, + add_generation_prompt=True, + tokenize=True, + return_dict=True, + ) + self.assertTrue(self.videos_input_name in out_dict_with_video) + self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 2160) + + # Load video as a list of frames (i.e. images). NOTE: each frame should have same size + # because we assume they come from one video + messages[0][0]["content"][0] = { + "type": "video", + "url": [ + "https://www.ilankelman.org/stopsigns/australia.jpg", + "https://www.ilankelman.org/stopsigns/australia.jpg", + ], + } + out_dict_with_video = processor.apply_chat_template( + messages, + add_generation_prompt=True, + tokenize=True, + return_dict=True, + do_sample_frames=False, + ) + self.assertTrue(self.videos_input_name in out_dict_with_video) + self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 320) + + def test_kwargs_overrides_custom_image_processor_kwargs(self): + processor = self.get_processor() + self.skip_processor_without_typed_kwargs(processor) + + input_str = self.prepare_text_inputs() + image_input = self.prepare_image_inputs() + + size = {"shortest_edge": processor.image_processor.size["shortest_edge"], "longest_edge": 56 * 56 * 4} + inputs = processor(text=input_str, images=image_input, size=size, return_tensors="pt") + self.assertEqual(inputs[self.images_input_name].shape[0], 612) + inputs = processor(text=input_str, images=image_input, return_tensors="pt") + self.assertEqual(inputs[self.images_input_name].shape[0], 100) diff --git a/tests/models/ernie4_5_vl/test_video_processing_ernie4_5_vl.py b/tests/models/ernie4_5_vl/test_video_processing_ernie4_5_vl.py new file mode 100644 index 000000000000..47d9f39cd6c1 --- /dev/null +++ b/tests/models/ernie4_5_vl/test_video_processing_ernie4_5_vl.py @@ -0,0 +1,336 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +from transformers.image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD +from transformers.testing_utils import require_torch, require_vision +from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available + +from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs + + +if is_torch_available(): + from PIL import Image + +if is_vision_available(): + if is_torchvision_available(): + from transformers import Ernie4_5_VLVideoProcessor + from transformers.models.ernie4_5_vl.video_processing_ernie4_5_vl import smart_resize + + +class Ernie4_5_VLVideoProcessingTester: + def __init__( + self, + parent, + batch_size=5, + num_frames=8, + num_channels=3, + min_resolution=30, + max_resolution=80, + temporal_patch_size=2, + patch_size=14, + merge_size=2, + do_resize=True, + size=None, + do_normalize=True, + image_mean=IMAGENET_STANDARD_MEAN, + image_std=IMAGENET_STANDARD_STD, + do_convert_rgb=True, + draw_on_frames=False, + ): + size = size if size is not None else {"longest_edge": 20, "shortest_edge": 10} + self.parent = parent + self.batch_size = batch_size + self.num_frames = num_frames + self.num_channels = num_channels + self.min_resolution = min_resolution + self.max_resolution = max_resolution + self.do_resize = do_resize + self.size = size + self.do_normalize = do_normalize + self.image_mean = image_mean + self.image_std = image_std + self.do_convert_rgb = do_convert_rgb + self.temporal_patch_size = temporal_patch_size + self.patch_size = patch_size + self.merge_size = merge_size + self.draw_on_frames = draw_on_frames + + def prepare_video_processor_dict(self): + return { + "do_resize": self.do_resize, + "size": self.size, + "do_normalize": self.do_normalize, + "image_mean": self.image_mean, + "image_std": self.image_std, + "do_convert_rgb": self.do_convert_rgb, + "do_sample_frames": True, + "draw_on_frames": self.draw_on_frames, + } + + def prepare_video_metadata(self, videos): + video_metadata = [] + for video in videos: + if isinstance(video, list): + num_frames = len(video) + elif hasattr(video, "shape"): + if len(video.shape) == 4: # (T, H, W, C) + num_frames = video.shape[0] + else: + num_frames = 1 + else: + num_frames = self.num_frames + + metadata = { + "fps": 2, + "duration": num_frames / 2, + "total_num_frames": num_frames, + } + video_metadata.append(metadata) + return video_metadata + + def expected_output_video_shape(self, videos): + grid_t = self.num_frames + hidden_dim = self.num_channels * self.patch_size * self.patch_size + seq_len = 0 + for video in videos: + if isinstance(video, list) and isinstance(video[0], Image.Image): + video = np.stack([np.array(frame) for frame in video]) + elif hasattr(video, "shape"): + pass + else: + video = np.array(video) + + if hasattr(video, "shape") and len(video.shape) >= 3: + if len(video.shape) == 4: + _, height, width = video.shape[:3] + elif len(video.shape) == 3: + height, width = video.shape[:2] + else: + height, width = self.num_frames, self.min_resolution, self.min_resolution + else: + height, width = self.min_resolution, self.min_resolution + + resized_height, resized_width = smart_resize( + height, + width, + factor=self.patch_size * self.merge_size, + min_pixels=self.size["shortest_edge"], + max_pixels=self.size["longest_edge"], + ) + grid_h, grid_w = resized_height // self.patch_size, resized_width // self.patch_size + seq_len += grid_t * grid_h * grid_w + return [seq_len, hidden_dim] + + def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"): + videos = prepare_video_inputs( + batch_size=self.batch_size, + num_frames=self.num_frames, + num_channels=self.num_channels, + min_resolution=self.min_resolution, + max_resolution=self.max_resolution, + equal_resolution=equal_resolution, + return_tensors=return_tensors, + ) + return videos + + +@require_torch +@require_vision +class Ernie4_5_VlVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase): + fast_video_processing_class = Ernie4_5_VLVideoProcessor if is_torchvision_available() else None + input_name = "pixel_values_videos" + + def setUp(self): + super().setUp() + self.video_processor_tester = Ernie4_5_VLVideoProcessingTester(self) + + @property + def video_processor_dict(self): + return self.video_processor_tester.prepare_video_processor_dict() + + def test_video_processor_from_dict_with_kwargs(self): + video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict) + self.assertEqual(video_processor.size, {"longest_edge": 20, "shortest_edge": 10}) + + video_processor = self.fast_video_processing_class.from_dict( + self.video_processor_dict, size={"longest_edge": 42, "shortest_edge": 42} + ) + self.assertEqual(video_processor.size, {"longest_edge": 42, "shortest_edge": 42}) + + def test_call_pil(self): + for video_processing_class in self.video_processor_list: + video_processing = video_processing_class(**self.video_processor_dict) + video_inputs = self.video_processor_tester.prepare_video_inputs( + equal_resolution=False, return_tensors="pil" + ) + + for video in video_inputs: + self.assertIsInstance(video[0], Image.Image) + + video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs) + encoded_videos = video_processing( + video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt" + )[self.input_name] + expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) + self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) + encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[ + self.input_name + ] + expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) + self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) + + def test_call_numpy(self): + for video_processing_class in self.video_processor_list: + video_processing = video_processing_class(**self.video_processor_dict) + video_inputs = self.video_processor_tester.prepare_video_inputs( + equal_resolution=False, return_tensors="np" + ) + + video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs) + encoded_videos = video_processing( + video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt" + )[self.input_name] + expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) + self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) + + encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[ + self.input_name + ] + expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) + self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) + + def test_call_pytorch(self): + for video_processing_class in self.video_processor_list: + video_processing = video_processing_class(**self.video_processor_dict) + video_inputs = self.video_processor_tester.prepare_video_inputs( + equal_resolution=False, return_tensors="pt" + ) + video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs) + encoded_videos = video_processing( + video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt" + )[self.input_name] + expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) + self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) + encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[ + self.input_name + ] + expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) + self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) + + @unittest.skip("Skip for now, the test needs adjustment for Ernie 4.5 VL") + def test_call_numpy_4_channels(self): + for video_processing_class in self.video_processor_list: + # Test that can process videos which have an arbitrary number of channels + # Initialize video_processing + video_processor = video_processing_class(**self.video_processor_dict) + + # create random numpy tensors + self.video_processor_tester.num_channels = 4 + video_inputs = self.video_processor_tester.prepare_video_inputs( + equal_resolution=False, return_tensors="np" + ) + + # Test not batched input + encoded_videos = video_processor( + video_inputs[0], + return_tensors="pt", + input_data_format="channels_last", + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), + )[self.input_name] + expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) + self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) + + # Test batched + encoded_videos = video_processor( + video_inputs, + return_tensors="pt", + input_data_format="channels_last", + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), + )[self.input_name] + expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) + self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) + + def test_nested_input(self): + """Tests that the processor can work with nested list where each video is a list of arrays""" + for video_processing_class in self.video_processor_list: + video_processing = video_processing_class(**self.video_processor_dict) + video_inputs = self.video_processor_tester.prepare_video_inputs( + equal_resolution=False, return_tensors="np" + ) + + video_inputs_nested = [list(video) for video in video_inputs] + video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs) + + # Test not batched input + encoded_videos = video_processing( + video_inputs_nested[0], video_metadata=[video_metadata[0]], return_tensors="pt" + )[self.input_name] + expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) + self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) + + # Test batched + encoded_videos = video_processing(video_inputs_nested, video_metadata=video_metadata, return_tensors="pt")[ + self.input_name + ] + expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) + self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) + + def test_call_sample_frames(self): + for video_processing_class in self.video_processor_list: + video_processor_dict = self.video_processor_dict.copy() + video_processing = video_processing_class(**video_processor_dict) + + prev_num_frames = self.video_processor_tester.num_frames + self.video_processor_tester.num_frames = 8 + prev_min_resolution = getattr(self.video_processor_tester, "min_resolution", None) + prev_max_resolution = getattr(self.video_processor_tester, "max_resolution", None) + self.video_processor_tester.min_resolution = 56 + self.video_processor_tester.max_resolution = 112 + + video_inputs = self.video_processor_tester.prepare_video_inputs( + equal_resolution=False, + return_tensors="torch", + ) + + metadata = [[{"total_num_frames": 8, "fps": 4}]] + batched_metadata = metadata * len(video_inputs) + + encoded_videos = video_processing(video_inputs[0], return_tensors="pt", video_metadata=metadata)[ + self.input_name + ] + encoded_videos_batched = video_processing( + video_inputs, return_tensors="pt", video_metadata=batched_metadata + )[self.input_name] + + self.assertIsNotNone(encoded_videos) + self.assertIsNotNone(encoded_videos_batched) + self.assertEqual(len(encoded_videos.shape), 2) + self.assertEqual(len(encoded_videos_batched.shape), 2) + + # error out when sampled frames would go over total number of frames + with self.assertRaises(ValueError): + video_processing(video_inputs[0], num_frames=10, return_tensors="pt")[self.input_name] + + self.video_processor_tester.num_frames = prev_num_frames + if prev_min_resolution is not None: + self.video_processor_tester.min_resolution = prev_min_resolution + if prev_max_resolution is not None: + self.video_processor_tester.max_resolution = prev_max_resolution diff --git a/utils/check_config_attributes.py b/utils/check_config_attributes.py index 1864e928b752..9ddaeb8d79c6 100644 --- a/utils/check_config_attributes.py +++ b/utils/check_config_attributes.py @@ -35,6 +35,7 @@ "xLSTMConfig": ["add_out_norm", "chunkwise_kernel", "sequence_kernel", "step_kernel"], "Ernie4_5Config": ["tie_word_embeddings"], "Ernie4_5_MoeConfig": ["tie_word_embeddings"], + "Ernie4_5_VLTextConfig": ["tie_word_embeddings"], "Lfm2Config": ["full_attn_idxs", "tie_word_embeddings"], "Lfm2MoeConfig": ["tie_word_embeddings"], # used internally during generation to provide the custom logit processors with their necessary information diff --git a/utils/check_repo.py b/utils/check_repo.py index 58ff56484f27..bf1730b283fe 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -196,6 +196,7 @@ "BltLocalDecoder", # Building part of bigger (tested) model. Tested implicitly through BLTForCausalLM. "BltGlobalTransformer", # Building part of bigger (tested) model. Tested implicitly through BLTForCausalLM. "Florence2VisionBackbone", # Building part of bigger (tested) model. Tested implicitly through Florence2ForConditionalGeneration. + "Ernie4_5_VLTextModel", # Building part of bigger (tested) model ] ) @@ -402,6 +403,7 @@ "Qwen3OmniMoeTalkerModel", # Building part of a bigger model "Qwen3OmniMoeThinkerForConditionalGeneration", # Building part of a bigger model "Qwen3OmniMoeThinkerTextModel", # Building part of a bigger model + "Ernie4_5_VLTextModel", # Building part of a bigger model ]