Skip to content

Commit 6b83cc4

Browse files
committed
Fix a few naming issues
Signed-off-by: Thomas Parnell <[email protected]>
1 parent 76744da commit 6b83cc4

File tree

4 files changed

+6
-6
lines changed

4 files changed

+6
-6
lines changed

vllm/lora/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ def from_layer(layer: nn.Module,
7171
packed_modules_list=packed_modules_list,
7272
model_config=model_config):
7373
# inject a-LoRA behaviour
74-
if lora_config.activated_lora_enabled:
74+
if lora_config.enable_activated_lora:
7575
lora_cls = LinearLayerWithActivatedLoRAMixin.maybe_mixin(
7676
lora_cls)
7777
instance_layer = lora_cls(layer)

vllm/model_executor/layers/linear.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,7 @@ def __init__(
243243

244244
vllm_config = get_current_vllm_config()
245245
if (vllm_config.lora_config
246-
and vllm_config.lora_config.activated_lora_enabled):
246+
and vllm_config.lora_config.enable_activated_lora):
247247
# lets torch.compile know that forward_context needs to be
248248
# considered as an input to the layer (copied from attention)
249249
compilation_config = vllm_config.compilation_config

vllm/v1/engine/processor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -425,7 +425,7 @@ def process_inputs(
425425
mm_position=decoder_mm_positions[modality][idx]))
426426

427427
# Handle aLoRA invocation sequence if applicable.
428-
if (self.lora_config and self.lora_config.activated_lora_enabled
428+
if (self.lora_config and self.lora_config.enable_activated_lora
429429
and lora_request is not None):
430430

431431
text_config = self.model_config.hf_config.get_text_config()

vllm/v1/worker/gpu_model_runner.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -329,7 +329,7 @@ def __init__(
329329
self.num_accepted_tokens = self._make_buffer(self.max_num_reqs,
330330
dtype=torch.int64)
331331

332-
if self.lora_config and self.lora_config.activated_lora_enabled:
332+
if self.lora_config and self.lora_config.enable_activated_lora:
333333
self.mask1d = torch.zeros(self.max_num_tokens,
334334
dtype=torch.int64,
335335
device=self.device)
@@ -1101,7 +1101,7 @@ def _prepare_inputs(
11011101

11021102
# Compute aLoRA metadata
11031103
alora_metadata = None
1104-
if self.lora_config and self.lora_config.activated_lora_enabled:
1104+
if self.lora_config and self.lora_config.enable_activated_lora:
11051105
alora_metadata = self.build_alora_metadata(
11061106
num_reqs, positions_np, req_indices,
11071107
total_num_scheduled_tokens, self.input_batch, self.requests,
@@ -2816,7 +2816,7 @@ def _dummy_run(
28162816
f"Expected {_cg_mode}, but got {cudagraph_runtime_mode}.")
28172817

28182818
alora_metadata = None
2819-
if self.lora_config and self.lora_config.activated_lora_enabled:
2819+
if self.lora_config and self.lora_config.enable_activated_lora:
28202820
alora_metadata = self.build_dummy_alora_metadata(
28212821
num_tokens, self.mask1d)
28222822

0 commit comments

Comments
 (0)