Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
86c79ba
some prelim cleanups
PeaBrane May 30, 2025
6bee243
router can route to dp ranks
PeaBrane May 30, 2025
dab052c
make the bunny hoppy
PeaBrane May 30, 2025
be6900e
Merge remote-tracking branch 'origin/main' into rupei/router-general
PeaBrane May 30, 2025
25e1291
Merge remote-tracking branch 'origin/main' into rupei/router-general
PeaBrane May 30, 2025
34e5c5b
new struct combining worker_id with dp_rank, dirty commit, breaks bin…
PeaBrane May 30, 2025
2cef74c
binding works
PeaBrane May 30, 2025
10d3326
dummy c binding note
PeaBrane May 30, 2025
4483c68
add_class WorkerWithDpRank
PeaBrane May 30, 2025
263c12d
renames + comments + fmt
PeaBrane May 31, 2025
65ea6b5
allow suffix for dp_rank identification
PeaBrane Jun 3, 2025
a2ef896
WIP: fix fn dp_rank, add TODO's
alec-flowers Jun 3, 2025
e80d66c
refactor: fix bugs, kv publishing working
alec-flowers Jun 3, 2025
7a733bd
fix panicing metric thread issue
alec-flowers Jun 4, 2025
1bddc8e
remove verbose log
alec-flowers Jun 4, 2025
ee283cc
update v1 worker
alec-flowers Jun 4, 2025
183a8fe
put dp_rank in PreprocessedRequest
PeaBrane Jun 4, 2025
be7f951
new agg config
PeaBrane Jun 4, 2025
e1011d8
updated comments
PeaBrane Jun 4, 2025
5bf4fae
update v1 example
alec-flowers Jun 4, 2025
d6ded6c
final touches for it working with dp
alec-flowers Jun 4, 2025
61b94ac
Merge branch 'main' into rupei/router-general
alec-flowers Jun 4, 2025
9335efe
fix cost function trace
PeaBrane Jun 4, 2025
931b837
fmt
PeaBrane Jun 4, 2025
2a72271
Merge branch 'main' into rupei/router-general
PeaBrane Jun 4, 2025
eb7bb10
WIP document current work steps
alec-flowers Jun 5, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions components/metrics/src/bin/mock_worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
// limitations under the License.

use dynamo_llm::kv_router::{
protocols::ForwardPassMetrics, scheduler::KVHitRateEvent, KV_HIT_RATE_SUBJECT,
protocols::ForwardPassMetrics, protocols::KVHitRateEvent, KV_HIT_RATE_SUBJECT,
};
use dynamo_runtime::{
component::{service::EndpointStats, Namespace},
Expand Down Expand Up @@ -89,7 +89,7 @@ async fn mock_event_publisher(namespace: Namespace) {
let overlap_blocks = rand::rng().random_range(0..=isl_blocks);

let event = KVHitRateEvent {
worker_id,
worker: worker_id,
isl_blocks,
overlap_blocks,
};
Expand Down
5 changes: 3 additions & 2 deletions components/metrics/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,7 @@ use std::net::SocketAddr;
use std::time::Duration as StdDuration;

use dynamo_llm::kv_router::protocols::ForwardPassMetrics;
use dynamo_llm::kv_router::scheduler::Endpoint;
use dynamo_llm::kv_router::scoring::ProcessedEndpoints;
use dynamo_llm::kv_router::scoring::{Endpoint, ProcessedEndpoints};

use dynamo_runtime::{
distributed::Component, error, service::EndpointInfo, utils::Duration, Result,
Expand Down Expand Up @@ -451,6 +450,8 @@ impl PrometheusMetrics {
let worker_id = worker_id.to_string();
let metrics = endpoint.data.clone();

// NOTE: using metrics[0] just to get the first dp_rank for now
// to not change the existing behavior
self.set_worker_gauge(
&self.kv_blocks_active,
config,
Expand Down
12 changes: 7 additions & 5 deletions components/metrics/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
//! - ISL Blocks: Cumulative count of total blocks in all KV hit rate events
//! - Overlap Blocks: Cumulative count of blocks that were already in the KV cache
use clap::Parser;
use dynamo_llm::kv_router::scheduler::KVHitRateEvent;
use dynamo_llm::kv_router::protocols::{KVHitRateEvent, WorkerDp};
use dynamo_llm::kv_router::KV_HIT_RATE_SUBJECT;
use dynamo_runtime::{
error, logging,
Expand Down Expand Up @@ -180,14 +180,15 @@ async fn app(runtime: Runtime) -> Result<()> {
tracing::debug!("Successfully subscribed to KV hit rate events");

while let Some(msg) = subscriber.next().await {
match serde_json::from_slice::<KVHitRateEvent>(&msg.payload) {
match serde_json::from_slice::<KVHitRateEvent<WorkerDp>>(&msg.payload) {
Ok(event) => {
// TODO: Lower to debug
let cache_hit_pct =
(event.overlap_blocks as f64 / event.isl_blocks as f64) * 100.0;
tracing::debug!(
"Received KV hit rate event: worker_id={}, isl_blocks={}, overlap_blocks={}, cache_hit_pct={:.2}%",
event.worker_id,
"Received KV hit rate event: worker_id={}, dp_rank={}, isl_blocks={}, overlap_blocks={}, cache_hit_pct={:.2}%",
event.worker.worker_id,
event.worker.dp_rank.unwrap_or(0),
event.isl_blocks,
event.overlap_blocks,
cache_hit_pct
Expand All @@ -197,7 +198,8 @@ async fn app(runtime: Runtime) -> Result<()> {
let mut metrics = metrics_collector_clone.lock().await;
metrics.update_kv_hit_rate(
&config_clone,
event.worker_id,
// TODO: this will not take care of dp ranks
event.worker.worker_id,
event.isl_blocks,
event.overlap_blocks,
);
Expand Down
4 changes: 2 additions & 2 deletions components/router/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ use std::sync::Arc;
use clap::Parser;

use dynamo_llm::kv_router::{
protocols::WorkerSelectionResult,
protocols::{WorkerDp, WorkerSelectionResult},
scheduler::{DefaultWorkerSelector, KvSchedulerError, SchedulingRequest},
scoring::ProcessedEndpoints,
KvRouter, WorkerSelector,
Expand Down Expand Up @@ -89,7 +89,7 @@ impl WorkerSelector for CustomWorkerSelector {
workers: &ProcessedEndpoints,
request: &SchedulingRequest,
block_size: usize,
) -> Result<WorkerSelectionResult, KvSchedulerError> {
) -> Result<WorkerSelectionResult<WorkerDp>, KvSchedulerError> {
// customize logic here
// F12 into [DefaultWorkerSelector] to see the original logic
self.0.select_worker(workers, request, block_size)
Expand Down
91 changes: 29 additions & 62 deletions examples/vllm_v1/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,15 @@ limitations under the License.

# vLLM Deployment Examples

This directory contains examples for deploying vLLM models in both aggregated and disaggregated configurations.
This directory contains examples for deploying vLLM models aggregated with with DP.

## Prerequisites

1. Install vLLM:
```bash
# Note: Currently requires installation from main branch
# From vLLM 0.8.6 onwards, you can install directly from wheel
git clone https://github.com/vllm-project/vllm.git
VLLM_USE_PRECOMPILED=1 uv pip install --editable ./vllm/
cd vllm && git checkout d459fae0a2c464e28680bc6d564c1de1b295029e
VLLM_USE_PRECOMPILED=1 uv pip install --editable .
```

2. Start required services:
Expand All @@ -36,78 +35,46 @@ docker compose -f deploy/metrics/docker-compose.yml up -d

## Running the Server

### Aggregated Deployment
### Aggregated Deployment with Multiple disconnected DP engines

Serves the leader AsyncLLM engine + number of dp ranks you specify
```bash
cd examples/vllm_v1
dynamo serve graphs.agg:Frontend -f configs/agg.yaml
```

### Disaggregated Deployment
```bash
cd examples/vllm_v1
dynamo serve graphs.disagg:Frontend -f configs/disagg.yaml
To run other dp ranks headless on same node or other nodes can run

```
VLLM_LOGGING_LEVEL=DEBUG CUDA_VISIBLE_DEVICES=1 VLLM_USE_V1=1 vllm serve Qwen/Qwen3-0.6B -dp 1 -dpr 1 --data-parallel-address 127.0.0.1 --data-parallel-rpc-port 62300 --data-parallel-size-local 1 --enforce-eager --headless --kv-events-config '{"enable_kv_cache_events": true, "publisher": "zmq"}' --enable-prefix-caching
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

V1 and prefix caching are enabled by default

```

## Testing the API
To test can run this curl reqeust. KV Routing will mean this will keep routing to a single node, so you will need to switch it up to see routing to different dp workers.

Send a test request using curl:
```bash
curl localhost:8000/v1/completions \
```
curl localhost:8000/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"model": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
"prompt": "In the heart of Eldoria...",
"stream": false,
"model": "Qwen/Qwen3-0.6B",
"messages": [
{
"role": "user",
"content": "In the heart of Eldoria, an ancient land of boundless magic and mysterious creatures, lies the long-forgotten city of Aeloria. Once a beacon of knowledge and power, Aeloria was buried beneath the shifting sands of time, lost to the world for centuries. You are an intrepid explorer, known for your unparalleled curiosity and courage, who has stumbled upon an ancient map hinting at ests that Aeloria holds a secret so profound that it has the potential to reshape the very fabric of reality. Your journey will take you through treacherous deserts, enchanted forests, and across perilous mountain ranges. Your Task: Character Background: Develop a detailed background for your character. Describe their motivations for seeking out Aeloria, their skills and weaknesses, and any personal connections to the ancient city or its legends. Are they driven by a quest for knowledge, a search for lost familt clue is hidden."
}
],
"stream":false,
"max_tokens": 30
}'
```

For more detailed explenations, refer to the main [LLM examples README](../llm/README.md).



## Deepseek R1

To run DSR1 model please first follow the Ray setup from the [multinode documentation](../../docs/examples/multinode.md).

### Aggregated Deployment

```bash
cd examples/vllm_v1
dynamo serve graphs.agg:Frontend -f configs/deepseek_r1/agg.yaml
```


### Disaggregated Deployment
```

To create frontend with a single decode worker:
```bash
cd examples/vllm_v1
dynamo serve graphs.agg:Frontend -f configs/deepseek_r1/disagg.yaml
```

To create a single decode worker:
```bash
cd examples/vllm_v1
dynamo serve components.worker:VllmDecodeWorker -f configs/deepseek_r1/disagg.yaml
TODO:
- Currently if you run more than one instance or worker on the same node this will fail because the ZmqKvPublishers will overlap ports, need to add some port offsetting to manage that.
```

To create a single prefill worker:
```bash
cd examples/vllm_v1
dynamo serve components.worker:VllmPrefillWorker -f configs/deepseek_r1/disagg.yaml
ServiceArgs:
workers: 1 # 2 workers not supported
```
- It would be best to distill the vLLM serve into a VllmHeadlessWorker using - run_headless(self.engine_args). This is relatively simple, the main difficulty here is if you want to add the ZmqKvEventPublisher to these nodes (which would be easier for multi-node because then you just need to set-up nats and not worry about port stuff) they will have a different lease_id than the leader worker. This is a problem because we don't actually route requests to these dp_ranks directly but in the KV Router and KV Indexer it will see these KVEvents as coming from a seperate "worker". We still need to route the KVEvents through the leader AsyncLLM engine and that engine will take care of routing to the dp ranks.
- To address this we could create a concept of worker groups? IE components whose lease_ids are tied to a single leader worker?

## Testing

Send a test request using curl:
```bash
curl localhost:8000/v1/completions \
-H "Content-Type: application/json" \
-d '{
"model": "deepseek-ai/DeepSeek-R1",
"prompt": "In the heart of Eldoria...",
"stream": false,
"max_tokens": 30
}'
```
For more detailed explenations, refer to the main [LLM examples README](../llm/README.md).
13 changes: 6 additions & 7 deletions examples/vllm_v1/components/frontend.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import subprocess
from pathlib import Path

from components.simple_load_balancer import SimpleLoadBalancer
from components.worker import VllmDecodeWorker
from fastapi import FastAPI
from pydantic import BaseModel

Expand All @@ -42,9 +42,8 @@ def get_dynamo_run_binary():
class FrontendConfig(BaseModel):
"""Configuration for the Frontend service including model and HTTP server settings."""

served_model_name: str
endpoint: str
port: int = 8080
router_mode: str = "round-robin"


# TODO: move these to common for all LLMs once we adopt dynamo-run
Expand All @@ -58,7 +57,7 @@ class FrontendConfig(BaseModel):
app=FastAPI(title="LLM Example"),
)
class Frontend:
worker = depends(SimpleLoadBalancer)
worker = depends(VllmDecodeWorker)

def __init__(self):
"""Initialize Frontend service with HTTP server and model configuration."""
Expand All @@ -74,20 +73,20 @@ def start_ingress_and_processor(self):
f"Starting HTTP server and processor on port {self.frontend_config.port}"
)
dynamo_run_binary = get_dynamo_run_binary()
endpoint = f"dyn://{self.frontend_config.endpoint}"

logger.info(
f"Starting HTTP server and processor on port {self.frontend_config.port}"
)
logger.info(f"Endpoint: {endpoint}")

self.process = subprocess.Popen(
[
dynamo_run_binary,
"in=http",
f"out={endpoint}",
"out=dyn",
"--http-port",
str(self.frontend_config.port),
"--router-mode",
self.frontend_config.router_mode,
],
stdout=None,
stderr=None,
Expand Down
100 changes: 100 additions & 0 deletions examples/vllm_v1/components/headless_worker.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


# Work In Progress. This is not usable currently

import asyncio
import logging
import os
import signal
import socket
from typing import Optional

from utils.args import parse_vllm_args
from vllm import run_headless
from vllm.distributed.kv_events import KVEventsConfig

from dynamo.sdk import service

logger = logging.getLogger(__name__)

BLOCK_SIZE = 16


@service(
dynamo={
"enabled": True,
"namespace": "dynamo",
},
resources={"gpu": 1, "cpu": "10", "memory": "20Gi"},
workers=1,
)
class VllmHeadlessWorker:
def __init__(self):
class_name = self.__class__.__name__
self.engine_args = parse_vllm_args(class_name, "")
self.engine_args.kv_events_config = KVEventsConfig(
enable_kv_cache_events=True, publisher="zmq"
)
if not self.engine_args.block_size:
logger.info(f"block_size not set, default to {BLOCK_SIZE}")
self.engine_args.block_size = BLOCK_SIZE

os.environ["VLLM_NO_USAGE_STATS"] = "1" # Avoid internal HTTP requests

model_config = self.engine_args.create_model_config()
self.default_sampling_params = model_config.get_diff_sampling_param()

self.kv_publishers = []

signal.signal(signal.SIGTERM, self.shutdown_vllm_engine)
signal.signal(signal.SIGINT, self.shutdown_vllm_engine)

self.set_side_channel_host_and_port()

async def async_init(self):
run_headless(self.engine_args)

def shutdown_vllm_engine(self, signum, frame):
"""Shutdown the background loop"""
logger.info(f"Received signal {signum}, shutting down")
loop = asyncio.get_event_loop()
try:
self.engine_client.shutdown()
for publisher in self.kv_publishers:
publisher.shutdown()
logger.info("VllmWorker shutdown complete")
except Exception as e:
logger.error(f"Error during shutdown: {e}")
finally:
loop.stop()

def set_side_channel_host_and_port(
self, hostname: Optional[str] = None, port: Optional[int] = None
):
"""vLLM V1 NixlConnector creates a side channel to exchange metadata with other NIXL connectors.
This sets the port number for the side channel.
"""
if hostname is None:
hostname = socket.gethostname()
if port is None:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("", 0)) # Bind to a free port provided by the host.
port = s.getsockname()[1] # Get the port number assigned.
logger.debug("Setting VLLM_NIXL_SIDE_CHANNEL_HOST to %s", hostname)
os.environ["VLLM_NIXL_SIDE_CHANNEL_HOST"] = hostname
logger.debug("Setting VLLM_NIXL_SIDE_CHANNEL_PORT to %s", port)
os.environ["VLLM_NIXL_SIDE_CHANNEL_PORT"] = str(port)
Loading
Loading