Skip to content

Commit 76744da

Browse files
tdoublephmellor
andcommitted
Apply suggestions from code review
Co-authored-by: Harry Mellor <[email protected]> Signed-off-by: Thomas Parnell <[email protected]>
1 parent 03b6480 commit 76744da

File tree

3 files changed

+5
-5
lines changed

3 files changed

+5
-5
lines changed

vllm/config/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2440,7 +2440,7 @@ class LoRAConfig:
24402440
bias_enabled: bool = False
24412441
"""[DEPRECATED] Enable bias for LoRA adapters. This option will be
24422442
removed in v0.12.0."""
2443-
activated_lora_enabled: bool = False
2443+
enable_activated_lora: bool = False
24442444
"""Enable Activated LoRA."""
24452445

24462446
def compute_hash(self) -> str:

vllm/engine/arg_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -373,7 +373,7 @@ class EngineArgs:
373373
# LoRA fields
374374
enable_lora: bool = False
375375
enable_lora_bias: bool = LoRAConfig.bias_enabled
376-
enable_activated_lora: bool = LoRAConfig.activated_lora_enabled
376+
enable_activated_lora: bool = LoRAConfig.enable_activated_lora
377377
max_loras: int = LoRAConfig.max_loras
378378
max_lora_rank: int = LoRAConfig.max_lora_rank
379379
default_mm_loras: Optional[Dict[str, str]] = \
@@ -796,7 +796,7 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser:
796796
lora_group.add_argument("--enable-lora-bias",
797797
**lora_kwargs["bias_enabled"])
798798
lora_group.add_argument("--enable-activated-lora",
799-
**lora_kwargs["activated_lora_enabled"])
799+
**lora_kwargs["enable_activated_lora"])
800800
lora_group.add_argument("--max-loras", **lora_kwargs["max_loras"])
801801
lora_group.add_argument("--max-lora-rank",
802802
**lora_kwargs["max_lora_rank"])
@@ -1372,7 +1372,7 @@ def create_engine_config(
13721372

13731373
lora_config = LoRAConfig(
13741374
bias_enabled=self.enable_lora_bias,
1375-
activated_lora_enabled=self.enable_activated_lora,
1375+
enable_activated_lora=self.enable_activated_lora,
13761376
max_lora_rank=self.max_lora_rank,
13771377
max_loras=self.max_loras,
13781378
default_mm_loras=self.default_mm_loras,

vllm/lora/peft_helper.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ class PEFTHelper:
3636
# True to use Weight-Decomposed Low-Rank Adaptation (DoRA, see: https://arxiv.org/abs/2402.09353)
3737
use_dora: bool = field(default=False)
3838
# Invocation tokens for Activated LoRA (aLoRA, see: https://arxiv.org/abs/2504.12397)
39-
alora_invocation_tokens: Optional[list[int]] = field(default=None)
39+
alora_invocation_tokens: Optional[list[int]] = None
4040
# Extra vllm field, start with 'vllm_' to avoid conflict
4141
vllm_lora_scaling_factor: float = field(default=1.0)
4242
vllm_max_position_embeddings: Optional[int] = field(default=False)

0 commit comments

Comments
 (0)