Skip to content

Commit a5c7264

Browse files
committed
Rename class
Signed-off-by: sfeng33 <[email protected]>
1 parent 05f1a8b commit a5c7264

File tree

3 files changed

+15
-13
lines changed

3 files changed

+15
-13
lines changed

tests/entrypoints/test_renderer.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
import pytest
99

10-
from vllm.entrypoints.renderer import Renderer
10+
from vllm.entrypoints.renderer import CompletionRenderer
1111

1212

1313
@dataclass
@@ -41,9 +41,9 @@ def mock_async_tokenizer():
4141

4242
@pytest.fixture
4343
def renderer(mock_model_config, mock_tokenizer):
44-
return Renderer(model_config=mock_model_config,
45-
tokenizer=mock_tokenizer,
46-
async_tokenizer_pool={})
44+
return CompletionRenderer(model_config=mock_model_config,
45+
tokenizer=mock_tokenizer,
46+
async_tokenizer_pool={})
4747

4848

4949
class TestRenderPrompt:
@@ -161,9 +161,10 @@ async def test_max_length_exceeded(self, renderer):
161161

162162
@pytest.mark.asyncio
163163
async def test_no_tokenizer_for_text(self, mock_model_config):
164-
renderer_no_tokenizer = Renderer(model_config=mock_model_config,
165-
tokenizer=None,
166-
async_tokenizer_pool={})
164+
renderer_no_tokenizer = CompletionRenderer(
165+
model_config=mock_model_config,
166+
tokenizer=None,
167+
async_tokenizer_pool={})
167168

168169
with pytest.raises(ValueError, match="No tokenizer available"):
169170
await renderer_no_tokenizer.render_prompt(

vllm/entrypoints/openai/serving_engine.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@
6060
TranslationRequest)
6161
from vllm.entrypoints.openai.serving_models import OpenAIServingModels
6262
from vllm.entrypoints.openai.tool_parsers import ToolParser
63-
from vllm.entrypoints.renderer import Renderer
63+
from vllm.entrypoints.renderer import BaseRenderer, CompletionRenderer
6464
# yapf: enable
6565
from vllm.inputs.data import EmbedsPrompt as EngineEmbedsPrompt
6666
from vllm.inputs.data import TokensPrompt as EngineTokensPrompt
@@ -226,14 +226,15 @@ def __init__(
226226
AsyncMicrobatchTokenizer] = {}
227227
self.log_error_stack = log_error_stack
228228

229-
def _get_renderer(self, tokenizer: Optional[AnyTokenizer]) -> Renderer:
229+
def _get_renderer(self, tokenizer: Optional[AnyTokenizer]) -> BaseRenderer:
230230
"""
231231
Get a Renderer instance with the provided tokenizer.
232232
Uses shared async tokenizer pool for efficiency.
233233
"""
234-
return Renderer(model_config=self.model_config,
235-
tokenizer=tokenizer,
236-
async_tokenizer_pool=self._async_tokenizer_pool)
234+
return CompletionRenderer(
235+
model_config=self.model_config,
236+
tokenizer=tokenizer,
237+
async_tokenizer_pool=self._async_tokenizer_pool)
237238

238239
def _get_async_tokenizer(self, tokenizer) -> AsyncMicrobatchTokenizer:
239240
"""

vllm/entrypoints/renderer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ async def render_prompt(
8080
raise NotImplementedError
8181

8282

83-
class Renderer(BaseRenderer):
83+
class CompletionRenderer(BaseRenderer):
8484

8585
def __init__(
8686
self,

0 commit comments

Comments
 (0)