Skip to content

Commit f402b6f

Browse files
jeejeeleemzusman
authored andcommitted
[Bugfix] Fix TeleChat2ForCausalLM weights mapper (vllm-project#11546)
Signed-off-by: Jee Jee Li <[email protected]>
1 parent b46fda6 commit f402b6f

File tree

1 file changed

+13
-13
lines changed

1 file changed

+13
-13
lines changed

vllm/model_executor/models/telechat2.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -31,19 +31,6 @@
3131

3232
class TeleChat2Model(LlamaModel):
3333

34-
hf_to_vllm_mapper = WeightsMapper(
35-
orig_to_new_prefix={
36-
"transformer.": "model.",
37-
},
38-
orig_to_new_substr={
39-
".h.": ".layers.",
40-
".self_attention.": ".self_attn.",
41-
".word_embeddings.": ".embed_tokens.",
42-
".dense.": ".o_proj.",
43-
".ln_f.": ".norm.",
44-
},
45-
)
46-
4734
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
4835
# 1. Initialize the LlamaModel with bias
4936
vllm_config.model_config.hf_config.bias = True
@@ -118,6 +105,19 @@ def load_weights(self, weights: Iterable[Tuple[str,
118105

119106
class TeleChat2ForCausalLM(LlamaForCausalLM):
120107

108+
hf_to_vllm_mapper = WeightsMapper(
109+
orig_to_new_prefix={
110+
"transformer.": "model.",
111+
},
112+
orig_to_new_substr={
113+
".h.": ".layers.",
114+
".self_attention.": ".self_attn.",
115+
".word_embeddings.": ".embed_tokens.",
116+
".dense.": ".o_proj.",
117+
".ln_f.": ".norm.",
118+
},
119+
)
120+
121121
def _init_model(self, vllm_config: VllmConfig, prefix: str = ""):
122122
return TeleChat2Model(vllm_config=vllm_config, prefix=prefix)
123123

0 commit comments

Comments
 (0)