|
| 1 | +# Copyright 2024 Rhymes AI. All rights reserved. |
| 2 | +# |
| 3 | +# Licensed to the Apache Software Foundation (ASF) under one |
| 4 | +# or more contributor license agreements. See the NOTICE file |
| 5 | +# distributed with this work for additional information |
| 6 | +# regarding copyright ownership. The ASF licenses this file |
| 7 | +# to you under the Apache License, Version 2.0 (the |
| 8 | +# "License"); you may not use this file except in compliance |
| 9 | +# with the License. You may obtain a copy of the License at |
| 10 | +# |
| 11 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 12 | +# |
| 13 | +# Unless required by applicable law or agreed to in writing, |
| 14 | +# software distributed under the License is distributed on an |
| 15 | +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| 16 | +# KIND, either express or implied. See the License for the |
| 17 | +# specific language governing permissions and limitations |
| 18 | +# under the License. |
| 19 | +from typing import Mapping |
| 20 | + |
| 21 | +from transformers import PretrainedConfig |
1 | 22 | from transformers.models.idefics2.configuration_idefics2 import (
|
2 | 23 | Idefics2VisionConfig)
|
3 | 24 | from transformers.models.llama.configuration_llama import LlamaConfig
|
4 | 25 |
|
| 26 | +from vllm.logger import init_logger |
| 27 | + |
| 28 | +logger = init_logger(__name__) |
| 29 | + |
5 | 30 |
|
6 | 31 | class AriaVisionConfig(Idefics2VisionConfig):
|
7 | 32 | model_type = "aria_vision_model"
|
@@ -45,3 +70,96 @@ def __init__(
|
45 | 70 | self.moe_num_experts = moe_num_experts
|
46 | 71 | self.moe_topk = moe_topk
|
47 | 72 | self.moe_num_shared_experts = moe_num_shared_experts
|
| 73 | + |
| 74 | + |
| 75 | +class AriaConfig(PretrainedConfig): |
| 76 | + """ |
| 77 | + Configuration class for Aria model. |
| 78 | + This class handles the configuration for both vision and text components of |
| 79 | + the Aria model, |
| 80 | + as well as additional parameters for image token handling and projector |
| 81 | + mapping. |
| 82 | +
|
| 83 | + Args: |
| 84 | + vision_config (AriaVisionConfig or dict): Configuration for the vision |
| 85 | + component. |
| 86 | + text_config (AriaMoELMConfig or dict): Configuration for the text |
| 87 | + component. |
| 88 | + projector_patch_to_query_dict (dict): Mapping of patch sizes to query |
| 89 | + dimensions. |
| 90 | + ignore_index (int): Index to ignore in loss calculation. |
| 91 | + image_token_index (int): Index used to represent image tokens. |
| 92 | + **kwargs: Additional keyword arguments passed to the parent class. |
| 93 | + Attributes: |
| 94 | + model_type (str): Type of the model, set to "aria". |
| 95 | + is_composition (bool): Whether the model is a composition of multiple |
| 96 | + components. |
| 97 | + ignore_index (int): Index to ignore in loss calculation. |
| 98 | + image_token_index (int): Index used to represent image tokens. |
| 99 | + projector_patch_to_query_dict (dict): Mapping of patch sizes to query |
| 100 | + dimensions. |
| 101 | + vision_config (AriaVisionConfig): Configuration for the vision |
| 102 | + component. |
| 103 | + text_config (AriaMoELMConfig): Configuration for the text component. |
| 104 | + """ |
| 105 | + |
| 106 | + model_type = "aria" |
| 107 | + is_composition = False |
| 108 | + |
| 109 | + def __init__( |
| 110 | + self, |
| 111 | + vision_config: AriaVisionConfig = AriaVisionConfig(), # noqa: B008 |
| 112 | + text_config: AriaMoELMConfig = AriaMoELMConfig(), # noqa: B008 |
| 113 | + projector_patch_to_query_dict: Mapping[int, int] = { |
| 114 | + 1225: 128, |
| 115 | + 4900: 256, |
| 116 | + }, |
| 117 | + ignore_index=-100, |
| 118 | + image_token_index=32000, |
| 119 | + tie_word_embeddings=False, |
| 120 | + **kwargs, |
| 121 | + ): |
| 122 | + super().__init__(**kwargs) |
| 123 | + self.ignore_index = ignore_index |
| 124 | + self.image_token_index = image_token_index |
| 125 | + self.tie_word_embeddings = tie_word_embeddings |
| 126 | + attn_implementation = kwargs.pop("attn_implementation", None) |
| 127 | + |
| 128 | + # Set the default attention implementation to flash_attention_2 if not |
| 129 | + # specified |
| 130 | + self._attn_implementation = ("flash_attention_2" |
| 131 | + if attn_implementation is None else |
| 132 | + attn_implementation) |
| 133 | + |
| 134 | + # Convert the keys and values of projector_patch_to_query_dict to |
| 135 | + # integers |
| 136 | + # This ensures consistency even if they were provided as strings |
| 137 | + self.projector_patch_to_query_dict = { |
| 138 | + int(k): int(v) |
| 139 | + for k, v in projector_patch_to_query_dict.items() |
| 140 | + } |
| 141 | + |
| 142 | + if isinstance(vision_config, dict) and "model_type" in vision_config: |
| 143 | + vision_config = AriaVisionConfig(**vision_config) |
| 144 | + if attn_implementation is None: |
| 145 | + vision_attn_implementation = "flash_attention_2" |
| 146 | + elif attn_implementation == "sdpa": |
| 147 | + logger.warning("SDPA is not supported for vit, using " |
| 148 | + "flash_attention_2 instead") |
| 149 | + vision_attn_implementation = "flash_attention_2" |
| 150 | + else: |
| 151 | + vision_attn_implementation = attn_implementation |
| 152 | + vision_config._attn_implementation = vision_attn_implementation |
| 153 | + |
| 154 | + self.vision_config = vision_config |
| 155 | + |
| 156 | + if isinstance(text_config, dict) and "model_type" in text_config: |
| 157 | + text_attn_implementation = ("sdpa" if attn_implementation is None |
| 158 | + else attn_implementation) |
| 159 | + text_config = AriaMoELMConfig(**text_config) |
| 160 | + text_config._attn_implementation = text_attn_implementation |
| 161 | + |
| 162 | + self.text_config = text_config |
| 163 | + |
| 164 | + # This is needed for the static kv cache |
| 165 | + self.num_hidden_layers = self.text_config.num_hidden_layers |
0 commit comments