diff --git a/tests/ut/core/test_schedule_config.py b/tests/ut/core/test_schedule_config.py new file mode 100644 index 00000000000..cd31a7b2f9f --- /dev/null +++ b/tests/ut/core/test_schedule_config.py @@ -0,0 +1,135 @@ +# +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from vllm.config import SchedulerConfig + +from tests.ut.base import TestBase +from vllm_ascend.core.schedule_config import AscendSchedulerConfig + + +class TestAscendSchedulerConfig(TestBase): + + def setUp(self): + self.basic_scheduler_config = SchedulerConfig( + max_num_batched_tokens=8192, + max_model_len=8192, + is_multimodal_model=False, + send_delta_data=False, + is_encoder_decoder=False, + ) + + def test_initialize_from_config_with_default(self): + # No additional config given, check the default value here. + ascend_config = AscendSchedulerConfig.initialize_from_config( + self.basic_scheduler_config, {}) + self.assertEqual(ascend_config.enable_chunked_prefill, False) + self.assertEqual(ascend_config.policy, "fcfs") + self.assertEqual(ascend_config.scheduler_cls, + "vllm_ascend.core.scheduler.AscendScheduler") + self.assertEqual(ascend_config.max_num_encoder_input_tokens, 8192) + self.assertEqual(ascend_config.encoder_cache_size, 8192) + + def test_initialize_from_config_with_override(self): + # test override + ascend_config = AscendSchedulerConfig.initialize_from_config( + self.basic_scheduler_config, + AscendSchedulerConfig( + enable_chunked_prefill=False, + policy="fcfs", + scheduler_cls="vllm_ascend.core.scheduler.AscendScheduler", + max_num_batched_tokens=8192, + max_model_len=2048, + max_long_partial_prefills=1, + long_prefill_token_threshold=512, + ), + ) + self.assertEqual(ascend_config.enable_chunked_prefill, False) + self.assertEqual(ascend_config.policy, "fcfs") + self.assertEqual(ascend_config.scheduler_cls, + "vllm_ascend.core.scheduler.AscendScheduler") + self.assertEqual(ascend_config.max_num_batched_tokens, 8192) + self.assertEqual(ascend_config.encoder_cache_size, 8192) + self.assertEqual(ascend_config.max_long_partial_prefills, 1) + self.assertEqual(ascend_config.long_prefill_token_threshold, 512) + + def test_not_implemented_policy(self): + with self.assertRaises(NotImplementedError) as context: + AscendSchedulerConfig.initialize_from_config( + self.basic_scheduler_config, + AscendSchedulerConfig( + policy="custom_policy", + max_num_batched_tokens=8192, + max_model_len=2048, + ), + ) + self.assertIn( + "currently AscendScheduler only supports fcfs policy", + str(context.exception), + ) + + def test_no_override(self): + ascend_config = AscendSchedulerConfig.initialize_from_config( + self.basic_scheduler_config, {}) + self.assertEqual(ascend_config.max_num_encoder_input_tokens, 8192) + self.assertEqual(ascend_config.encoder_cache_size, 8192) + + def test_valid_config_with_multimodal(self): + config = AscendSchedulerConfig.initialize_from_config( + SchedulerConfig(is_multimodal_model=True, + max_num_batched_tokens=8192), {}) + self.assertTrue(config.is_multimodal_model) + + def test_valid_config_with_chunked_prefill(self): + ascend_config = AscendSchedulerConfig.initialize_from_config( + self.basic_scheduler_config, + AscendSchedulerConfig( + enable_chunked_prefill=True, + max_num_batched_tokens=8192, + max_model_len=8192, + ), + ) + self.assertEqual(ascend_config.max_num_batched_tokens, 8192) + self.assertEqual(ascend_config.max_model_len, 8192) + self.assertTrue(ascend_config.enable_chunked_prefill) + + def test_invalid_config_without_chunked_prefill(self): + with self.assertRaises(ValueError) as context: + AscendSchedulerConfig.initialize_from_config( + self.basic_scheduler_config, + AscendSchedulerConfig( + enable_chunked_prefill=False, + max_num_batched_tokens=2048, + max_model_len=8192, + ), + ) + self.assertIn( + "Ascend scheduler is enabled without chunked prefill feature", + str(context.exception), + ) + self.assertIn("max_num_batched_tokens (2048)", str(context.exception)) + self.assertIn("max_model_len (8192)", str(context.exception)) + + def test_initialize_from_config_with_pd_transfer(self): + ascend_config = AscendSchedulerConfig.initialize_from_config( + self.basic_scheduler_config, + AscendSchedulerConfig( + enable_pd_transfer=True, + decode_max_num_seqs=48, + max_num_batched_tokens=8192, + max_model_len=4096, + ), + ) + self.assertEqual(ascend_config.enable_pd_transfer, True) + self.assertEqual(ascend_config.decode_max_num_seqs, 48) \ No newline at end of file diff --git a/tests/ut/core/test_scheduler.py b/tests/ut/core/test_scheduler.py new file mode 100644 index 00000000000..a6468d8714f --- /dev/null +++ b/tests/ut/core/test_scheduler.py @@ -0,0 +1,1461 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +from typing import Any, Dict, List, Optional, Tuple +from unittest.mock import MagicMock, patch + +import pytest +import torch +from vllm.config import (CacheConfig, KVTransferConfig, ModelConfig, + SchedulerConfig, SpeculativeConfig, VllmConfig) +from vllm.multimodal.inputs import (MultiModalFeatureSpec, + MultiModalKwargsItem, PlaceholderRange) +from vllm.sampling_params import SamplingParams +from vllm.utils.hashing import sha256 +from vllm.v1.core.kv_cache_utils import (get_request_block_hasher, + init_none_hash) +from vllm.v1.core.sched.output import SchedulerOutput +from vllm.v1.kv_cache_interface import (FullAttentionSpec, KVCacheConfig, + KVCacheGroupSpec) +from vllm.v1.outputs import DraftTokenIds, ModelRunnerOutput +from vllm.v1.request import Request, RequestStatus +from vllm.v1.structured_output import StructuredOutputManager + +from tests.ut.base import TestBase +from vllm_ascend.core.scheduler import AscendScheduler +from vllm_ascend.core.scheduler_dynamic_batch import SchedulerDynamicBatch + +EOS_TOKEN_ID = 50256 +MODEL = "Qwen3-0.6B" +ENABLE_PREFIX_CACHING = None +PROMPT_LOGPROBS = None +ENABLE_CHUNKED_PREFILL = False +MAX_NUM_BATCHED_TOKENS = 10000 +LONG_PREFILL_TOKEN_THRESHOLD = 0 +NUM_SPECULATIVE_TOKENS = None +MAX_NUM_SEQS = 16 + + +def create_requests( + num_requests: int, + num_tokens: int = 10, + mm_positions: Optional[list[PlaceholderRange]] = None, + max_tokens: int = 16, + stop_token_ids: Optional[list[int]] = None, + block_size: int = 3, + hash_fn=sha256, +): + init_none_hash(hash_fn) + prompt_logprobs = PROMPT_LOGPROBS + sampling_params = SamplingParams(ignore_eos=False, + max_tokens=max_tokens, + stop_token_ids=stop_token_ids, + prompt_logprobs=prompt_logprobs) + requests = [] + for i in range(num_requests): + mm_features = [] + if mm_positions is not None: + mm_position = mm_positions[i] + for j, position in enumerate(mm_position): + identifier = f"hash{i}_{j}" + mm_feature = MultiModalFeatureSpec( + data=MultiModalKwargsItem.dummy("dummy_m"), + mm_position=position, + identifier=identifier, + modality="image") + mm_features.append(mm_feature) + request = Request(request_id=f"{i}", + prompt_token_ids=[i] * num_tokens, + sampling_params=sampling_params, + eos_token_id=EOS_TOKEN_ID, + pooling_params=None, + mm_features=mm_features if mm_features else None, + block_hasher=get_request_block_hasher( + block_size, hash_fn)) + requests.append(request) + return requests + + +def make_output(scheduler): + req_ids = [req.request_id for req in scheduler.running] + req_id_to_index = { + req.request_id: i + for i, req in enumerate(scheduler.running) + } + sampled_token_ids = [[1000]] * len(scheduler.running) + + logprobs = None + + modelrunner_output = ModelRunnerOutput( + req_ids=req_ids, + req_id_to_index=req_id_to_index, + sampled_token_ids=sampled_token_ids, + logprobs=logprobs, + prompt_logprobs_dict={}, + pooler_output=[], + ) + return modelrunner_output + + +@pytest.mark.skip("Ascend Scheduler has been deprecated") +class TestAscendScheduler(TestBase): + + @patch("vllm.config.ModelConfig.__post_init__", MagicMock()) + @patch("vllm.config.VllmConfig.__post_init__", MagicMock()) + @patch('vllm.v1.core.sched.scheduler.compute_encoder_budget') + def create_scheduler(self, mock_compute_encoder_budget): + mock_compute_encoder_budget.return_value = [100, 100] + use_kv_connector = False + block_size = 16 + + scheduler_config = SchedulerConfig( + max_num_seqs=16, + max_model_len=MAX_NUM_BATCHED_TOKENS, + long_prefill_token_threshold=LONG_PREFILL_TOKEN_THRESHOLD, + disable_chunked_mm_input=False, + enable_chunked_prefill=ENABLE_CHUNKED_PREFILL, + max_num_batched_tokens=MAX_NUM_BATCHED_TOKENS, + ) + + scheduler_config.max_num_encoder_input_tokens = 10000 + scheduler_config.encoder_cache_size = 10000 + scheduler_config.chunked_prefill_enabled = False + + model_config = ModelConfig( + model=MODEL, + task="auto", + tokenizer=MODEL, + tokenizer_mode="auto", + trust_remote_code=True, + dtype="float16", + seed=42, + max_model_len=MAX_NUM_BATCHED_TOKENS, + ) + model_config.pooler_config = MagicMock() + model_config.multimodal_config = MagicMock() + model_config.hf_config = MagicMock() + model_config.hf_config.is_encoder_decoder = False + # Cache config, optionally force APC + kwargs_cache: Dict[str, + Any] = ({} if ENABLE_PREFIX_CACHING is None else { + 'enable_prefix_caching': + ENABLE_PREFIX_CACHING + }) + cache_config = CacheConfig( + block_size=block_size, + gpu_memory_utilization=0.9, + swap_space=0, + cache_dtype="auto", + **kwargs_cache, + ) + + kv_transfer_config = KVTransferConfig( + kv_connector="SharedStorageConnector", + kv_role="kv_both", + kv_connector_extra_config={"shared_storage_path": "local_storage"}, + ) if use_kv_connector else None + + speculative_config: Optional[SpeculativeConfig] = None + if NUM_SPECULATIVE_TOKENS is not None: + speculative_config = SpeculativeConfig( + model="ngram", num_speculative_tokens=NUM_SPECULATIVE_TOKENS) + + vllm_config = VllmConfig( + scheduler_config=scheduler_config, + model_config=model_config, + cache_config=cache_config, + kv_transfer_config=kv_transfer_config, + speculative_config=speculative_config, + ) + + kv_cache_config = KVCacheConfig( + num_blocks=10000, # A large number of blocks to hold all requests + kv_cache_tensors=[], + kv_cache_groups=[ + KVCacheGroupSpec(['layer'], + FullAttentionSpec(block_size, 1, 1, + torch.float32, False, + False)) + ], + ) + cache_config.num_gpu_blocks = 10000 + + scheduler = AscendScheduler( + vllm_config=vllm_config, + kv_cache_config=kv_cache_config, + log_stats=True, + block_size=block_size, + structured_output_manager=MagicMock(spec=StructuredOutputManager), + ) + + should_advance = MagicMock() + should_advance.return_value = False + scheduler.structured_output_manager.should_advance = should_advance + + return scheduler + + def test_add_requests(self): + scheduler = self.create_scheduler() + requests = create_requests(num_requests=10) + + for i, request in enumerate(requests): + scheduler.add_request(request) + self.assertIn(request.request_id, scheduler.requests) + self.assertEqual(len(scheduler.waiting), i + 1) + + def test_finish_request(self): + scheduler = self.create_scheduler() + requests = create_requests(num_requests=10) + for request in requests: + scheduler.add_request(request) + + for i, request in enumerate(requests): + scheduler.finish_requests(request.request_id, + RequestStatus.FINISHED_ABORTED) + self.assertNotIn(request.request_id, scheduler.requests) + self.assertEqual(len(scheduler.waiting), 9 - i) + + def test_get_num_unfinished_requests(self): + scheduler = self.create_scheduler() + requests = create_requests(num_requests=10) + for request in requests: + scheduler.add_request(request) + + for i, request in enumerate(requests): + scheduler.finish_requests(request.request_id, + RequestStatus.FINISHED_STOPPED) + self.assertEqual(scheduler.get_num_unfinished_requests(), + len(requests) - i - 1) + + def test_schedule(self): + '''Test scheduling. + Two cases: default APC/no prompt logprobs; APC=True + prompt logprobs + ''' + scheduler = self.create_scheduler() + scheduler.scheduler_config.chunked_prefill_enabled = False + requests = create_requests(num_requests=10) + for request in requests: + scheduler.add_request(request) + + # Test initial scheduling + output = scheduler.schedule() + self.assertEqual(len(output.scheduled_new_reqs), len(requests)) + self.assertEqual(output.scheduled_cached_reqs.num_reqs, 0) + self.assertEqual(len(output.finished_req_ids), 0) + # Verify all requests are scheduled. + for req_id, num_tokens in output.num_scheduled_tokens.items(): + self.assertEqual(num_tokens, + len(requests[int(req_id)].prompt_token_ids)) + + # Verify requests moved from waiting to running + self.assertEqual(len(scheduler.waiting), 0) + self.assertEqual(len(scheduler.running), len(requests)) + for i, request in enumerate(requests): + self.assertEqual(scheduler.running[i], request) + + def test_schedule_multimodal_requests(self): + scheduler = self.create_scheduler() + scheduler.scheduler_config.chunked_prefill_enabled = False + mm_positions = [[PlaceholderRange(offset=i, length=10)] + for i in range(10)] + requests = create_requests( + num_requests=10, + mm_positions=mm_positions, + ) + for request in requests: + scheduler.add_request(request) + + output = scheduler.schedule() + self.assertEqual(len(output.scheduled_new_reqs), len(requests)) + self.assertEqual(output.scheduled_cached_reqs.num_reqs, 0) + self.assertEqual(len(output.finished_req_ids), 0) + for req_id, num_tokens in output.num_scheduled_tokens.items(): + assert num_tokens == len(requests[int(req_id)].prompt_token_ids) + + # Verify all requests are scheduled. + for req_id, num_tokens in output.num_scheduled_tokens.items(): + self.assertEqual(num_tokens, + len(requests[int(req_id)].prompt_token_ids)) + self.assertEqual(len(output.scheduled_encoder_inputs), len(requests)) + for req_id, encoder_input in output.scheduled_encoder_inputs.items(): + assert len(encoder_input) == 1 + + # Verify requests moved from waiting to running + self.assertEqual(len(scheduler.waiting), 0) + self.assertEqual(len(scheduler.running), len(requests)) + for i, request in enumerate(requests): + self.assertEqual(scheduler.running[i], request) + + def test_concurrent_partial_prefills_schedule(self): + '''Test concurrent partial prefills scheduling. + total requests = 10, every request has 10 token. + while set long_prefill_token_threshold = 1, scheduler can + only schedule max_long_partial_prefills long request. + ''' + scheduler = self.create_scheduler() + scheduler.scheduler_config.chunked_prefill_enabled = False + scheduler.scheduler_config.max_long_partial_prefills = 2 + scheduler.scheduler_config.long_prefill_token_threshold = 1 + requests = create_requests(num_requests=10, num_tokens=20) + for request in requests: + scheduler.add_request(request) + + # Test initial scheduling + output = scheduler.schedule() + self.assertEqual(len(output.scheduled_new_reqs), + scheduler.scheduler_config.max_long_partial_prefills) + self.assertEqual(output.scheduled_cached_reqs.num_reqs, 0) + self.assertEqual(len(output.finished_req_ids), 0) + + def test_schedule_enable_prefix_caching(self): + '''Test scheduling. + Two cases: default APC/no prompt logprobs; APC=True + prompt logprobs + ''' + global ENABLE_PREFIX_CACHING + ENABLE_PREFIX_CACHING = True + global PROMPT_LOGPROBS + PROMPT_LOGPROBS = 5 + scheduler = self.create_scheduler() + scheduler.scheduler_config.chunked_prefill_enabled = False + requests = create_requests(num_requests=10) + for request in requests: + scheduler.add_request(request) + + # Test initial scheduling + output = scheduler.schedule() + self.assertEqual(len(output.scheduled_new_reqs), len(requests)) + self.assertEqual(output.scheduled_cached_reqs.num_reqs, 0) + self.assertEqual(len(output.finished_req_ids), 0) + # Verify all requests are scheduled. + for req_id, num_tokens in output.num_scheduled_tokens.items(): + self.assertEqual(num_tokens, + len(requests[int(req_id)].prompt_token_ids)) + + # Verify requests moved from waiting to running + self.assertEqual(len(scheduler.waiting), 0) + self.assertEqual(len(scheduler.running), len(requests)) + for i, request in enumerate(requests): + self.assertEqual(scheduler.running[i], request) + + def test_stop_via_update_from_output(self): + """Test stopping behavior through update_from_output""" + global NUM_SPECULATIVE_TOKENS + NUM_SPECULATIVE_TOKENS = 1 + scheduler = self.create_scheduler() + + # Test case 1: Stop on EOS token + requests = create_requests(num_requests=2, max_tokens=10) + for req in requests: + req.num_computed_tokens = req.num_tokens + scheduler.requests[req.request_id] = req + scheduler.running.append(req) + req.status = RequestStatus.RUNNING + + scheduler_output = SchedulerOutput(scheduled_new_reqs=[], + scheduled_cached_reqs=[], + num_scheduled_tokens={ + requests[0].request_id: 1, + requests[1].request_id: 2 + }, + total_num_scheduled_tokens=3, + scheduled_encoder_inputs={}, + scheduled_spec_decode_tokens={ + requests[0].request_id: [], + requests[1].request_id: [10] + }, + num_common_prefix_blocks=0, + finished_req_ids=set(), + free_encoder_mm_hashes=[]) + model_output = ModelRunnerOutput( + req_ids=[req.request_id for req in requests], + req_id_to_index={ + req.request_id: i + for i, req in enumerate(requests) + }, + sampled_token_ids=[[EOS_TOKEN_ID], [10, 11] + ], # First request hits EOS, second continues + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=[]) + + scheduler.update_from_output(scheduler_output, model_output) + + # Verify first request stopped, second continues + self.assertEqual(len(scheduler.running), 1) + self.assertEqual(scheduler.running[0].request_id, + requests[1].request_id) + self.assertEqual(requests[0].status, RequestStatus.FINISHED_STOPPED) + self.assertIn(requests[0].request_id, scheduler.finished_req_ids) + self.assertEqual(list(requests[0].output_token_ids), [EOS_TOKEN_ID]) + self.assertEqual(list(requests[1].output_token_ids), [10, 11]) + + # Test case 2: Stop on custom stop token + NUM_SPECULATIVE_TOKENS = 2 + scheduler = self.create_scheduler() + requests = create_requests(num_requests=2, + max_tokens=10, + stop_token_ids=[42, 43]) + for req in requests: + req.num_computed_tokens = req.num_tokens + scheduler.requests[req.request_id] = req + scheduler.running.append(req) + req.status = RequestStatus.RUNNING + + scheduler_output = SchedulerOutput(scheduled_new_reqs=[], + scheduled_cached_reqs=[], + num_scheduled_tokens={ + requests[0].request_id: 3, + requests[1].request_id: 2 + }, + total_num_scheduled_tokens=5, + scheduled_encoder_inputs={}, + scheduled_spec_decode_tokens={ + requests[0].request_id: + [10, 42], + requests[1].request_id: [13] + }, + num_common_prefix_blocks=0, + finished_req_ids=set(), + free_encoder_mm_hashes=[]) + model_output = ModelRunnerOutput( + req_ids=[req.request_id for req in requests], + req_id_to_index={ + req.request_id: i + for i, req in enumerate(requests) + }, + sampled_token_ids=[[10, 42, 12], + [13, 14]], # First request hits stop token + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=[]) + + scheduler.update_from_output(scheduler_output, model_output) + + # Verify first request stopped on custom token + self.assertEqual(len(scheduler.running), 1) + self.assertEqual(scheduler.running[0].request_id, + requests[1].request_id) + self.assertEqual(requests[0].status, RequestStatus.FINISHED_STOPPED) + self.assertEqual(requests[0].stop_reason, 42) + self.assertIn(requests[0].request_id, scheduler.finished_req_ids) + self.assertEqual(list(requests[0].output_token_ids), [10, 42]) + self.assertEqual(list(requests[1].output_token_ids), [13, 14]) + + # Test case 3: Stop on max tokens + NUM_SPECULATIVE_TOKENS = 2 + scheduler = self.create_scheduler() + requests = create_requests(num_requests=2, max_tokens=2) + for req in requests: + req.num_computed_tokens = req.num_tokens + scheduler.requests[req.request_id] = req + scheduler.running.append(req) + req.status = RequestStatus.RUNNING + + scheduler_output = SchedulerOutput(scheduled_new_reqs=[], + scheduled_cached_reqs=[], + num_scheduled_tokens={ + requests[0].request_id: 3, + requests[1].request_id: 1 + }, + total_num_scheduled_tokens=4, + scheduled_encoder_inputs={}, + scheduled_spec_decode_tokens={ + requests[0].request_id: + [10, 11], + requests[1].request_id: [] + }, + num_common_prefix_blocks=0, + finished_req_ids=set(), + free_encoder_mm_hashes=[]) + model_output = ModelRunnerOutput( + req_ids=[req.request_id for req in requests], + req_id_to_index={ + req.request_id: i + for i, req in enumerate(requests) + }, + sampled_token_ids=[[10, 11, 12], + [13]], # First request exceeds max_tokens + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=[]) + scheduler.update_from_output(scheduler_output, model_output) + + # Verify first request stopped due to length + self.assertEqual(len(scheduler.running), 1) + self.assertEqual(scheduler.running[0].request_id, + requests[1].request_id) + self.assertEqual(requests[0].status, + RequestStatus.FINISHED_LENGTH_CAPPED) + self.assertIn(requests[0].request_id, scheduler.finished_req_ids) + self.assertEqual(list(requests[0].output_token_ids), [10, 11]) + self.assertEqual(list(requests[1].output_token_ids), [13]) + + # Test case 4: Ignore EOS flag + scheduler = self.create_scheduler() + requests = create_requests(num_requests=1, max_tokens=10) + requests[0].sampling_params.ignore_eos = True + requests[0].num_computed_tokens = requests[0].num_tokens + scheduler.requests[requests[0].request_id] = requests[0] + scheduler.running.append(requests[0]) + + scheduler_output = SchedulerOutput( + scheduled_new_reqs=[], + scheduled_cached_reqs=[], + num_scheduled_tokens={requests[0].request_id: 3}, + total_num_scheduled_tokens=3, + scheduled_encoder_inputs={}, + scheduled_spec_decode_tokens={ + requests[0].request_id: [EOS_TOKEN_ID, 10] + }, + num_common_prefix_blocks=0, + finished_req_ids=set(), + free_encoder_mm_hashes=[]) + model_output = ModelRunnerOutput( + req_ids=[requests[0].request_id], + req_id_to_index={requests[0].request_id: 0}, + sampled_token_ids=[[EOS_TOKEN_ID, 10, 11]], + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=[]) + + scheduler.update_from_output(scheduler_output, model_output) + + # Verify request continues past EOS + self.assertEqual(len(scheduler.running), 1) + self.assertFalse(requests[0].is_finished()) + self.assertEqual(list(requests[0].output_token_ids), + [EOS_TOKEN_ID, 10, 11]) + + def test_schedule_concurrent_batches(self): + global MAX_NUM_BATCHED_TOKENS + global ENABLE_PREFIX_CACHING + global ENABLE_CHUNKED_PREFILL + global MAX_NUM_SEQS + global PROMPT_LOGPROBS + ENABLE_PREFIX_CACHING = None + MAX_NUM_BATCHED_TOKENS = 1024 + MAX_NUM_SEQS = 2 + ENABLE_CHUNKED_PREFILL = True + PROMPT_LOGPROBS = None + + enable_prefix_caching_list = [None, True] + prompt_logprobs_list = [None, 5] + + for i in range(len(enable_prefix_caching_list)): + ENABLE_PREFIX_CACHING = enable_prefix_caching_list[i] + PROMPT_LOGPROBS = prompt_logprobs_list[i] + scheduler = self.create_scheduler() + requests = create_requests( + num_requests=2, + num_tokens=512, + ) + + # Schedule the first request. + scheduler.add_request(requests[0]) + scheduler_output0 = scheduler.schedule() + self.assertEqual(len(scheduler_output0.scheduled_new_reqs), 1) + self.assertEqual( + scheduler_output0.num_scheduled_tokens[requests[0].request_id], + 512) + + # The first request is still running, so only schedule the second request. + scheduler.add_request(requests[1]) + scheduler_output1 = scheduler.schedule() + self.assertEqual(len(scheduler_output1.scheduled_new_reqs), 1) + self.assertEqual( + scheduler_output1.num_scheduled_tokens[requests[1].request_id], + 512) + + # Model output of the first request. + model_runner_output = ModelRunnerOutput( + req_ids=[requests[0].request_id], + req_id_to_index={requests[0].request_id: 0}, + sampled_token_ids=[[0]], + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=[]) + + scheduler.update_from_output(scheduler_output0, + model_runner_output) + + # Schedule the next step. + # The first request can be scheduled again while the second + # request is still running. + scheduler.schedule() + # Model output of the second request. + model_runner_output = ModelRunnerOutput( + req_ids=[requests[1].request_id], + req_id_to_index={requests[1].request_id: 0}, + sampled_token_ids=[[0]], + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=[]) + + scheduler.update_from_output(scheduler_output1, + model_runner_output) + + def test_schedule_spec_decoding_stats(self): + """Test scheduling behavior with speculative decoding. + + This test verifies that: + 1. Speculated tokens get scheduled correctly + 2. Spec decoding stats properly count number of draft and accepted tokens + """ + spec_tokens_list: List[List[List[int]]] = [[[1, 2, 3]], [[1, 2, 3]], + [[1, 2], [3]], [[1]], [[]], + [[1, 2, 3], [4, 5, 6]]] + output_tokens_list: List[List[List[int]]] = [[[1, 2, 3, 4]], [[1, 5]], + [[1, 2, 5], [3, 4]], + [[1, 2]], [[5]], + [[1, 2, 7], [4, 8]]] + expected_list: List[Tuple[int, int, + int, List[int]]] = [(1, 3, 3, [1, 1, 1]), + (1, 3, 1, [1, 0, 0]), + (2, 3, 3, [2, 1]), + (1, 1, 1, [1]), + (0, 0, 0, [0]), + (2, 6, 3, [2, 1, 0])] + + global NUM_SPECULATIVE_TOKENS + for idx in range(len(spec_tokens_list)): + spec_tokens = spec_tokens_list[idx] + output_tokens = output_tokens_list[idx] + expected = expected_list[idx] + num_spec_tokens = max(1, max(len(t) for t in spec_tokens)) + NUM_SPECULATIVE_TOKENS = num_spec_tokens + scheduler = self.create_scheduler() + requests = create_requests(num_requests=len(spec_tokens), + num_tokens=1) + req_ids = [] + req_to_index = {} + for i, request in enumerate(requests): + scheduler.add_request(request) + req_ids.append(request.request_id) + req_to_index[request.request_id] = i + + # Schedule a decode, which will also draft speculative tokens + output = scheduler.schedule() + self.assertEqual(len(output.scheduled_new_reqs), len(requests)) + self.assertEqual(output.total_num_scheduled_tokens, len(requests)) + for i in range(len(requests)): + req_id = requests[i].request_id + self.assertEqual(output.num_scheduled_tokens[req_id], 1) + self.assertNotIn(req_id, output.scheduled_spec_decode_tokens) + + model_runner_output = ModelRunnerOutput( + req_ids=req_ids, + req_id_to_index=req_to_index, + sampled_token_ids=[[0] for _ in range(len(requests))], + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=[]) + draft_token_ids = DraftTokenIds(req_ids, spec_tokens) + + engine_core_outputs = scheduler.update_from_output( + output, model_runner_output) + scheduler.update_draft_token_ids(draft_token_ids) + + for i in range(len(requests)): + running_req = scheduler.running[i] + # The prompt token + self.assertEqual(running_req.num_computed_tokens, 1) + # The prompt token and the sampled token + self.assertEqual(running_req.num_tokens, 2) + # The prompt token, the sampled token, and the speculated tokens + self.assertEqual(running_req.num_tokens_with_spec, + 2 + len(spec_tokens[i])) + + # No draft or accepted tokens counted yet + self.assertTrue( + not engine_core_outputs + or (engine_core_outputs[0].scheduler_stats.spec_decoding_stats + is None)) + + # Schedule the speculated tokens for validation + output = scheduler.schedule() + self.assertEqual(len(output.scheduled_new_reqs), 0) + # The sampled token and speculated tokens + self.assertEqual( + output.total_num_scheduled_tokens, + len(requests) + sum(len(ids) for ids in spec_tokens)) + for i in range(len(requests)): + req_id = requests[i].request_id + self.assertEqual(output.num_scheduled_tokens[req_id], + 1 + len(spec_tokens[i])) + if spec_tokens[i]: + self.assertEqual( + len(output.scheduled_spec_decode_tokens[req_id]), + len(spec_tokens[i])) + else: + self.assertNotIn(req_id, + output.scheduled_spec_decode_tokens) + + model_runner_output = ModelRunnerOutput( + req_ids=req_ids, + req_id_to_index=req_to_index, + sampled_token_ids=output_tokens, + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=[]) + + engine_core_outputs = scheduler.update_from_output( + output, model_runner_output) + + scheduler_stats = engine_core_outputs[0].scheduler_stats \ + if engine_core_outputs else None + if expected[0] == 0: + self.assertIsNone(scheduler_stats.spec_decoding_stats) + else: + self.assertIsNotNone(scheduler_stats.spec_decoding_stats) + stats = scheduler_stats.spec_decoding_stats + self.assertEqual(stats.num_drafts, expected[0]) + self.assertEqual(stats.num_draft_tokens, expected[1]) + self.assertEqual(stats.num_accepted_tokens, expected[2]) + self.assertEqual(stats.num_accepted_tokens_per_pos, + expected[3]) + + def assert_scheduler_empty(self, scheduler): + """Confirm the scheduler is "empty" - i.e. no leaks.""" + # Scheduler Metadata. + scheduler = self.create_scheduler() + self.assertEqual(len(scheduler.requests), 0) + self.assertEqual(len(scheduler.waiting), 0) + self.assertEqual(len(scheduler.running), 0) + self.assertEqual(len(scheduler.finished_req_ids), 0) + + # EncoderCacheManager. + self.assertEqual(len(scheduler.encoder_cache_manager.freed), 0) + self.assertEqual(len(scheduler.encoder_cache_manager.cached), 0) + + # KVCache Manager. + self.assertEqual( + len(scheduler.kv_cache_manager.coordinator.single_type_managers[0]. + req_to_blocks), 0) + self.assertEqual( + len(scheduler.kv_cache_manager.coordinator.single_type_managers[0]. + num_cached_block), 0) + num_free_blocks = (scheduler.kv_cache_manager.block_pool. + free_block_queue.num_free_blocks) + self.assertEqual( + num_free_blocks, + scheduler.kv_cache_manager.block_pool.num_gpu_blocks - 1) + + # NOTE(rob): just the ref count on blocks will be 0. The hash + # value, etc will remain since we lazily evict for prefix cache. + for block in scheduler.kv_cache_manager.block_pool.blocks: + self.assertEqual(block.ref_cnt, 0) + + def test_memory_leak(self): + """Test that we do not have a memory leak.""" + scheduler = self.create_scheduler() + NUM_REQUESTS = 5 + NUM_TOKENS = 10 + MAX_TOKENS = 10 + requests = create_requests(num_requests=NUM_REQUESTS, + num_tokens=NUM_TOKENS, + max_tokens=MAX_TOKENS) + + # Add each request. + for request in requests: + scheduler.add_request(request) + scheduler_output = scheduler.schedule() + model_runner_output = make_output(scheduler) + scheduler.update_from_output(scheduler_output, model_runner_output) + + # Iterate until done. + while True: + scheduler_output = scheduler.schedule() + if len(scheduler.running) == 0: + break + model_runner_output = make_output(scheduler) + scheduler.update_from_output(scheduler_output, model_runner_output) + + # Confirm no memory leak. + self.assert_scheduler_empty(scheduler) + + def test_scheduler_with_pd_transfer(self): + scheduler = self.create_scheduler() + scheduler.phase = "prefill" + requests = create_requests(num_requests=32) + for request in requests: + scheduler.add_request(request) + + # 1st iteration, move 16 requests from waiting to running for prefill + scheduler_output = scheduler.schedule() + model_runner_output = make_output(scheduler) + scheduler.update_from_output(scheduler_output, model_runner_output) + first_iter_prefilled_req_num = len(scheduler.running) + self.assertEqual(len(scheduler_output.scheduled_new_reqs), + scheduler.max_num_running_reqs) + self.assertEqual(scheduler_output.scheduled_cached_reqs.num_reqs, 0) + self.assertEqual(len(scheduler_output.finished_req_ids), 0) + + # 2nd iteration, move 16 prefilled requests to finished_prefill_reqs + # and move 16 requests from waiting to running for prefill + scheduler_output = scheduler.schedule() + model_runner_output = make_output(scheduler) + scheduler.update_from_output(scheduler_output, model_runner_output) + self.assertEqual(len(scheduler.finished_prefill_reqs), + first_iter_prefilled_req_num) + + # 3rd iteration, all requests prefilled, change scheduler phase to decode + scheduler_output = scheduler.schedule() + model_runner_output = make_output(scheduler) + scheduler.update_from_output(scheduler_output, model_runner_output) + self.assertEqual(scheduler.phase, "decode") + + +class TestSchedulerDynamicBatch(TestBase): + + @patch("vllm.config.ModelConfig.__post_init__", MagicMock()) + @patch("vllm.config.VllmConfig.__post_init__", MagicMock()) + @patch('vllm.v1.core.sched.scheduler.compute_encoder_budget') + def create_scheduler(self, mock_compute_encoder_budget): + mock_compute_encoder_budget.return_value = [100, 100] + use_kv_connector = False + block_size = 16 + + scheduler_config = SchedulerConfig( + max_num_seqs=16, + max_model_len=MAX_NUM_BATCHED_TOKENS, + long_prefill_token_threshold=LONG_PREFILL_TOKEN_THRESHOLD, + disable_chunked_mm_input=False, + enable_chunked_prefill=True, + max_num_batched_tokens=MAX_NUM_BATCHED_TOKENS, + is_encoder_decoder=False, + ) + + scheduler_config.max_num_encoder_input_tokens = 10000 + scheduler_config.encoder_cache_size = 10000 + scheduler_config.chunked_prefill_enabled = True + scheduler_config.SLO_limits_for_dynamic_batch = 0 + + model_config = ModelConfig( + model=MODEL, + task="auto", + tokenizer=MODEL, + tokenizer_mode="auto", + trust_remote_code=True, + dtype="float16", + seed=42, + max_model_len=MAX_NUM_BATCHED_TOKENS, + ) + model_config.pooler_config = MagicMock() + model_config.multimodal_config = MagicMock() + model_config.hf_config = MagicMock() + model_config.hf_config.is_encoder_decoder = False + # Cache config, optionally force APC + kwargs_cache: Dict[str, + Any] = ({} if ENABLE_PREFIX_CACHING is None else { + 'enable_prefix_caching': + ENABLE_PREFIX_CACHING + }) + cache_config = CacheConfig( + block_size=block_size, + gpu_memory_utilization=0.9, + swap_space=0, + cache_dtype="auto", + **kwargs_cache, + ) + + kv_transfer_config = KVTransferConfig( + kv_connector="SharedStorageConnector", + kv_role="kv_both", + kv_connector_extra_config={"shared_storage_path": "local_storage"}, + ) if use_kv_connector else None + + speculative_config: Optional[SpeculativeConfig] = None + if NUM_SPECULATIVE_TOKENS is not None: + speculative_config = SpeculativeConfig( + model="ngram", num_speculative_tokens=NUM_SPECULATIVE_TOKENS) + + vllm_config = VllmConfig( + scheduler_config=scheduler_config, + model_config=model_config, + cache_config=cache_config, + kv_transfer_config=kv_transfer_config, + speculative_config=speculative_config, + ) + + kv_cache_config = KVCacheConfig( + num_blocks=10000, # A large number of blocks to hold all requests + kv_cache_tensors=[], + kv_cache_groups=[ + KVCacheGroupSpec(['layer'], + FullAttentionSpec(block_size, 1, 1, + torch.float32, False)) + ], + ) + kv_cache_config.hash_block_size = block_size + cache_config.num_gpu_blocks = 10000 + + scheduler = SchedulerDynamicBatch( + vllm_config=vllm_config, + kv_cache_config=kv_cache_config, + block_size=block_size, + log_stats=True, + structured_output_manager=MagicMock(spec=StructuredOutputManager), + ) + + should_advance = MagicMock() + should_advance.return_value = False + scheduler.structured_output_manager.should_advance = should_advance + + return scheduler + + def test_add_requests(self): + scheduler = self.create_scheduler() + requests = create_requests(num_requests=10) + + for i, request in enumerate(requests): + scheduler.add_request(request) + self.assertIn(request.request_id, scheduler.requests) + self.assertEqual(len(scheduler.waiting), i + 1) + + def test_finish_request(self): + scheduler = self.create_scheduler() + requests = create_requests(num_requests=10) + for request in requests: + scheduler.add_request(request) + + for i, request in enumerate(requests): + scheduler.finish_requests(request.request_id, + RequestStatus.FINISHED_ABORTED) + self.assertNotIn(request.request_id, scheduler.requests) + self.assertEqual(len(scheduler.waiting), 9 - i) + + def test_get_num_unfinished_requests(self): + scheduler = self.create_scheduler() + requests = create_requests(num_requests=10) + for request in requests: + scheduler.add_request(request) + + for i, request in enumerate(requests): + scheduler.finish_requests(request.request_id, + RequestStatus.FINISHED_STOPPED) + self.assertEqual(scheduler.get_num_unfinished_requests(), + len(requests) - i - 1) + + def test_schedule(self): + '''Test scheduling. + Two cases: default APC/no prompt logprobs; APC=True + prompt logprobs + ''' + scheduler = self.create_scheduler() + scheduler.scheduler_config.chunked_prefill_enabled = True + requests = create_requests(num_requests=10) + for request in requests: + scheduler.add_request(request) + + # Test initial scheduling + output = scheduler.schedule() + self.assertEqual(len(output.scheduled_new_reqs), len(requests)) + self.assertEqual(output.scheduled_cached_reqs.num_reqs, 0) + self.assertEqual(len(output.finished_req_ids), 0) + # Verify all requests are scheduled. + for req_id, num_tokens in output.num_scheduled_tokens.items(): + self.assertEqual(num_tokens, + len(requests[int(req_id)].prompt_token_ids)) + + # Verify requests moved from waiting to running + self.assertEqual(len(scheduler.waiting), 0) + self.assertEqual(len(scheduler.running), len(requests)) + for i, request in enumerate(requests): + self.assertEqual(scheduler.running[i], request) + + def test_schedule_multimodal_requests(self): + scheduler = self.create_scheduler() + scheduler.scheduler_config.chunked_prefill_enabled = True + mm_positions = [[PlaceholderRange(offset=i, length=10)] + for i in range(10)] + requests = create_requests( + num_requests=10, + mm_positions=mm_positions, + ) + for request in requests: + scheduler.add_request(request) + + output = scheduler.schedule() + self.assertEqual(len(output.scheduled_new_reqs), len(requests)) + self.assertEqual(output.scheduled_cached_reqs.num_reqs, 0) + self.assertEqual(len(output.finished_req_ids), 0) + for req_id, num_tokens in output.num_scheduled_tokens.items(): + assert num_tokens == len(requests[int(req_id)].prompt_token_ids) + + # Verify all requests are scheduled. + for req_id, num_tokens in output.num_scheduled_tokens.items(): + self.assertEqual(num_tokens, + len(requests[int(req_id)].prompt_token_ids)) + self.assertEqual(len(output.scheduled_encoder_inputs), len(requests)) + for req_id, encoder_input in output.scheduled_encoder_inputs.items(): + assert len(encoder_input) == 1 + + # Verify requests moved from waiting to running + self.assertEqual(len(scheduler.waiting), 0) + self.assertEqual(len(scheduler.running), len(requests)) + for i, request in enumerate(requests): + self.assertEqual(scheduler.running[i], request) + + def test_schedule_enable_prefix_caching(self): + '''Test scheduling. + Two cases: default APC/no prompt logprobs; APC=True + prompt logprobs + ''' + global ENABLE_PREFIX_CACHING + ENABLE_PREFIX_CACHING = True + global PROMPT_LOGPROBS + PROMPT_LOGPROBS = 5 + scheduler = self.create_scheduler() + scheduler.scheduler_config.chunked_prefill_enabled = False + requests = create_requests(num_requests=10) + for request in requests: + scheduler.add_request(request) + + # Test initial scheduling + output = scheduler.schedule() + self.assertEqual(len(output.scheduled_new_reqs), len(requests)) + self.assertEqual(output.scheduled_cached_reqs.num_reqs, 0) + self.assertEqual(len(output.finished_req_ids), 0) + # Verify all requests are scheduled. + for req_id, num_tokens in output.num_scheduled_tokens.items(): + self.assertEqual(num_tokens, + len(requests[int(req_id)].prompt_token_ids)) + + # Verify requests moved from waiting to running + self.assertEqual(len(scheduler.waiting), 0) + self.assertEqual(len(scheduler.running), len(requests)) + for i, request in enumerate(requests): + self.assertEqual(scheduler.running[i], request) + + def test_stop_via_update_from_output(self): + """Test stopping behavior through update_from_output""" + global NUM_SPECULATIVE_TOKENS + NUM_SPECULATIVE_TOKENS = 1 + scheduler = self.create_scheduler() + + # Test case 1: Stop on EOS token + requests = create_requests(num_requests=2, max_tokens=10) + for req in requests: + req.num_computed_tokens = req.num_tokens + scheduler.requests[req.request_id] = req + scheduler.running.append(req) + req.status = RequestStatus.RUNNING + + scheduler_output = SchedulerOutput(scheduled_new_reqs=[], + scheduled_cached_reqs=[], + num_scheduled_tokens={ + requests[0].request_id: 1, + requests[1].request_id: 2 + }, + total_num_scheduled_tokens=3, + scheduled_encoder_inputs={}, + scheduled_spec_decode_tokens={ + requests[0].request_id: [], + requests[1].request_id: [10] + }, + num_common_prefix_blocks=0, + finished_req_ids=set(), + free_encoder_mm_hashes=[]) + model_output = ModelRunnerOutput( + req_ids=[req.request_id for req in requests], + req_id_to_index={ + req.request_id: i + for i, req in enumerate(requests) + }, + sampled_token_ids=[[EOS_TOKEN_ID], [10, 11] + ], # First request hits EOS, second continues + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=[]) + + scheduler.update_from_output(scheduler_output, model_output) + + # Verify first request stopped, second continues + self.assertEqual(len(scheduler.running), 1) + self.assertEqual(scheduler.running[0].request_id, + requests[1].request_id) + self.assertEqual(requests[0].status, RequestStatus.FINISHED_STOPPED) + self.assertIn(requests[0].request_id, scheduler.finished_req_ids) + self.assertEqual(list(requests[0].output_token_ids), [EOS_TOKEN_ID]) + self.assertEqual(list(requests[1].output_token_ids), [10, 11]) + + # Test case 2: Stop on custom stop token + NUM_SPECULATIVE_TOKENS = 2 + scheduler = self.create_scheduler() + requests = create_requests(num_requests=2, + max_tokens=10, + stop_token_ids=[42, 43]) + for req in requests: + req.num_computed_tokens = req.num_tokens + scheduler.requests[req.request_id] = req + scheduler.running.append(req) + req.status = RequestStatus.RUNNING + + scheduler_output = SchedulerOutput(scheduled_new_reqs=[], + scheduled_cached_reqs=[], + num_scheduled_tokens={ + requests[0].request_id: 3, + requests[1].request_id: 2 + }, + total_num_scheduled_tokens=5, + scheduled_encoder_inputs={}, + scheduled_spec_decode_tokens={ + requests[0].request_id: + [10, 42], + requests[1].request_id: [13] + }, + num_common_prefix_blocks=0, + finished_req_ids=set(), + free_encoder_mm_hashes=[]) + model_output = ModelRunnerOutput( + req_ids=[req.request_id for req in requests], + req_id_to_index={ + req.request_id: i + for i, req in enumerate(requests) + }, + sampled_token_ids=[[10, 42, 12], + [13, 14]], # First request hits stop token + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=[]) + + scheduler.update_from_output(scheduler_output, model_output) + + # Verify first request stopped on custom token + self.assertEqual(len(scheduler.running), 1) + self.assertEqual(scheduler.running[0].request_id, + requests[1].request_id) + self.assertEqual(requests[0].status, RequestStatus.FINISHED_STOPPED) + self.assertEqual(requests[0].stop_reason, 42) + self.assertIn(requests[0].request_id, scheduler.finished_req_ids) + self.assertEqual(list(requests[0].output_token_ids), [10, 42]) + self.assertEqual(list(requests[1].output_token_ids), [13, 14]) + + # Test case 3: Stop on max tokens + NUM_SPECULATIVE_TOKENS = 2 + scheduler = self.create_scheduler() + requests = create_requests(num_requests=2, max_tokens=2) + for req in requests: + req.num_computed_tokens = req.num_tokens + scheduler.requests[req.request_id] = req + scheduler.running.append(req) + req.status = RequestStatus.RUNNING + + scheduler_output = SchedulerOutput(scheduled_new_reqs=[], + scheduled_cached_reqs=[], + num_scheduled_tokens={ + requests[0].request_id: 3, + requests[1].request_id: 1 + }, + total_num_scheduled_tokens=4, + scheduled_encoder_inputs={}, + scheduled_spec_decode_tokens={ + requests[0].request_id: + [10, 11], + requests[1].request_id: [] + }, + num_common_prefix_blocks=0, + finished_req_ids=set(), + free_encoder_mm_hashes=[]) + model_output = ModelRunnerOutput( + req_ids=[req.request_id for req in requests], + req_id_to_index={ + req.request_id: i + for i, req in enumerate(requests) + }, + sampled_token_ids=[[10, 11, 12], + [13]], # First request exceeds max_tokens + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=[]) + scheduler.update_from_output(scheduler_output, model_output) + + # Verify first request stopped due to length + self.assertEqual(len(scheduler.running), 1) + self.assertEqual(scheduler.running[0].request_id, + requests[1].request_id) + self.assertEqual(requests[0].status, + RequestStatus.FINISHED_LENGTH_CAPPED) + self.assertIn(requests[0].request_id, scheduler.finished_req_ids) + self.assertEqual(list(requests[0].output_token_ids), [10, 11]) + self.assertEqual(list(requests[1].output_token_ids), [13]) + + # Test case 4: Ignore EOS flag + scheduler = self.create_scheduler() + requests = create_requests(num_requests=1, max_tokens=10) + requests[0].sampling_params.ignore_eos = True + requests[0].num_computed_tokens = requests[0].num_tokens + scheduler.requests[requests[0].request_id] = requests[0] + scheduler.running.append(requests[0]) + + scheduler_output = SchedulerOutput( + scheduled_new_reqs=[], + scheduled_cached_reqs=[], + num_scheduled_tokens={requests[0].request_id: 3}, + total_num_scheduled_tokens=3, + scheduled_encoder_inputs={}, + scheduled_spec_decode_tokens={ + requests[0].request_id: [EOS_TOKEN_ID, 10] + }, + num_common_prefix_blocks=0, + finished_req_ids=set(), + free_encoder_mm_hashes=[]) + model_output = ModelRunnerOutput( + req_ids=[requests[0].request_id], + req_id_to_index={requests[0].request_id: 0}, + sampled_token_ids=[[EOS_TOKEN_ID, 10, 11]], + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=[]) + + scheduler.update_from_output(scheduler_output, model_output) + + # Verify request continues past EOS + self.assertEqual(len(scheduler.running), 1) + self.assertFalse(requests[0].is_finished()) + self.assertEqual(list(requests[0].output_token_ids), + [EOS_TOKEN_ID, 10, 11]) + + def test_schedule_concurrent_batches(self): + global MAX_NUM_BATCHED_TOKENS + global ENABLE_PREFIX_CACHING + global ENABLE_CHUNKED_PREFILL + global MAX_NUM_SEQS + global PROMPT_LOGPROBS + ENABLE_PREFIX_CACHING = None + MAX_NUM_BATCHED_TOKENS = 1024 + MAX_NUM_SEQS = 2 + ENABLE_CHUNKED_PREFILL = True + PROMPT_LOGPROBS = None + + enable_prefix_caching_list = [None, True] + prompt_logprobs_list = [None, 5] + + for i in range(len(enable_prefix_caching_list)): + ENABLE_PREFIX_CACHING = enable_prefix_caching_list[i] + PROMPT_LOGPROBS = prompt_logprobs_list[i] + scheduler = self.create_scheduler() + requests = create_requests( + num_requests=2, + num_tokens=512, + ) + + # Schedule the first request. + scheduler.add_request(requests[0]) + scheduler_output0 = scheduler.schedule() + self.assertEqual(len(scheduler_output0.scheduled_new_reqs), 1) + self.assertEqual( + scheduler_output0.num_scheduled_tokens[requests[0].request_id], + 512) + + # The first request is still running, so only schedule the second request. + scheduler.add_request(requests[1]) + scheduler_output1 = scheduler.schedule() + self.assertEqual(len(scheduler_output1.scheduled_new_reqs), 1) + self.assertEqual( + scheduler_output1.num_scheduled_tokens[requests[1].request_id], + 512) + + # Model output of the first request. + model_runner_output = ModelRunnerOutput( + req_ids=[requests[0].request_id], + req_id_to_index={requests[0].request_id: 0}, + sampled_token_ids=[[0]], + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=[]) + + scheduler.update_from_output(scheduler_output0, + model_runner_output) + + # Schedule the next step. + # The first request can be scheduled again while the second + # request is still running. + scheduler.schedule() + # Model output of the second request. + model_runner_output = ModelRunnerOutput( + req_ids=[requests[1].request_id], + req_id_to_index={requests[1].request_id: 0}, + sampled_token_ids=[[0]], + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=[]) + + scheduler.update_from_output(scheduler_output1, + model_runner_output) + + def test_schedule_spec_decoding_stats(self): + """Test scheduling behavior with speculative decoding. + + This test verifies that: + 1. Speculated tokens get scheduled correctly + 2. Spec decoding stats properly count number of draft and accepted tokens + """ + spec_tokens_list: List[List[List[int]]] = [[[1, 2, 3]], [[1, 2, 3]], + [[1, 2], [3]], [[1]], [[]], + [[1, 2, 3], [4, 5, 6]]] + output_tokens_list: List[List[List[int]]] = [[[1, 2, 3, 4]], [[1, 5]], + [[1, 2, 5], [3, 4]], + [[1, 2]], [[5]], + [[1, 2, 7], [4, 8]]] + expected_list: List[Tuple[int, int, + int, List[int]]] = [(1, 3, 3, [1, 1, 1]), + (1, 3, 1, [1, 0, 0]), + (2, 3, 3, [2, 1]), + (1, 1, 1, [1]), + (0, 0, 0, [0]), + (2, 6, 3, [2, 1, 0])] + + global NUM_SPECULATIVE_TOKENS + for idx in range(len(spec_tokens_list)): + spec_tokens = spec_tokens_list[idx] + output_tokens = output_tokens_list[idx] + expected = expected_list[idx] + num_spec_tokens = max(1, max(len(t) for t in spec_tokens)) + NUM_SPECULATIVE_TOKENS = num_spec_tokens + scheduler = self.create_scheduler() + requests = create_requests(num_requests=len(spec_tokens), + num_tokens=1) + req_ids = [] + req_to_index = {} + for i, request in enumerate(requests): + scheduler.add_request(request) + req_ids.append(request.request_id) + req_to_index[request.request_id] = i + + # Schedule a decode, which will also draft speculative tokens + output = scheduler.schedule() + self.assertEqual(len(output.scheduled_new_reqs), len(requests)) + self.assertEqual(output.total_num_scheduled_tokens, len(requests)) + for i in range(len(requests)): + req_id = requests[i].request_id + self.assertEqual(output.num_scheduled_tokens[req_id], 1) + self.assertNotIn(req_id, output.scheduled_spec_decode_tokens) + + model_runner_output = ModelRunnerOutput( + req_ids=req_ids, + req_id_to_index=req_to_index, + sampled_token_ids=[[0] for _ in range(len(requests))], + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=[]) + draft_token_ids = DraftTokenIds(req_ids, spec_tokens) + + engine_core_outputs = scheduler.update_from_output( + output, model_runner_output) + scheduler.update_draft_token_ids(draft_token_ids) + + for i in range(len(requests)): + running_req = scheduler.running[i] + # The prompt token + self.assertEqual(running_req.num_computed_tokens, 1) + # The prompt token and the sampled token + self.assertEqual(running_req.num_tokens, 2) + # The prompt token, the sampled token, and the speculated tokens + self.assertEqual(running_req.num_tokens_with_spec, + 2 + len(spec_tokens[i])) + + # No draft or accepted tokens counted yet + self.assertTrue( + not engine_core_outputs + or (engine_core_outputs[0].scheduler_stats.spec_decoding_stats + is None)) + + # Schedule the speculated tokens for validation + output = scheduler.schedule() + self.assertEqual(len(output.scheduled_new_reqs), 0) + # The sampled token and speculated tokens + self.assertEqual( + output.total_num_scheduled_tokens, + len(requests) + sum(len(ids) for ids in spec_tokens)) + for i in range(len(requests)): + req_id = requests[i].request_id + self.assertEqual(output.num_scheduled_tokens[req_id], + 1 + len(spec_tokens[i])) + if spec_tokens[i]: + self.assertEqual( + len(output.scheduled_spec_decode_tokens[req_id]), + len(spec_tokens[i])) + else: + self.assertNotIn(req_id, + output.scheduled_spec_decode_tokens) + + model_runner_output = ModelRunnerOutput( + req_ids=req_ids, + req_id_to_index=req_to_index, + sampled_token_ids=output_tokens, + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=[]) + + engine_core_outputs = scheduler.update_from_output( + output, model_runner_output) + + scheduler_stats = engine_core_outputs[0].scheduler_stats \ + if engine_core_outputs else None + if expected[0] == 0: + self.assertIsNone(scheduler_stats.spec_decoding_stats) + else: + self.assertIsNotNone(scheduler_stats.spec_decoding_stats) + stats = scheduler_stats.spec_decoding_stats + self.assertEqual(stats.num_drafts, expected[0]) + self.assertEqual(stats.num_draft_tokens, expected[1]) + self.assertEqual(stats.num_accepted_tokens, expected[2]) + self.assertEqual(stats.num_accepted_tokens_per_pos, + expected[3]) + + def assert_scheduler_empty(self, scheduler): + """Confirm the scheduler is "empty" - i.e. no leaks.""" + # Scheduler Metadata. + scheduler = self.create_scheduler() + self.assertEqual(len(scheduler.requests), 0) + self.assertEqual(len(scheduler.waiting), 0) + self.assertEqual(len(scheduler.running), 0) + self.assertEqual(len(scheduler.finished_req_ids), 0) + + # EncoderCacheManager. + self.assertEqual(len(scheduler.encoder_cache_manager.freed), 0) + self.assertEqual(len(scheduler.encoder_cache_manager.cached), 0) + + # KVCache Manager. + self.assertEqual( + len(scheduler.kv_cache_manager.coordinator.single_type_managers[0]. + req_to_blocks), 0) + self.assertEqual( + len(scheduler.kv_cache_manager.coordinator.single_type_managers[0]. + num_cached_block), 0) + num_free_blocks = (scheduler.kv_cache_manager.block_pool. + free_block_queue.num_free_blocks) + self.assertEqual( + num_free_blocks, + scheduler.kv_cache_manager.block_pool.num_gpu_blocks - 1) + + # NOTE(rob): just the ref count on blocks will be 0. The hash + # value, etc will remain since we lazily evict for prefix cache. + for block in scheduler.kv_cache_manager.block_pool.blocks: + self.assertEqual(block.ref_cnt, 0) + + def test_memory_leak(self): + """Test that we do not have a memory leak.""" + scheduler = self.create_scheduler() + NUM_REQUESTS = 5 + NUM_TOKENS = 10 + MAX_TOKENS = 10 + requests = create_requests(num_requests=NUM_REQUESTS, + num_tokens=NUM_TOKENS, + max_tokens=MAX_TOKENS) + + # Add each request. + for request in requests: + scheduler.add_request(request) + scheduler_output = scheduler.schedule() + model_runner_output = make_output(scheduler) + scheduler.update_from_output(scheduler_output, model_runner_output) + + # Iterate until done. + while True: + scheduler_output = scheduler.schedule() + if len(scheduler.running) == 0: + break + model_runner_output = make_output(scheduler) + scheduler.update_from_output(scheduler_output, model_runner_output) + + # Confirm no memory leak. + self.assert_scheduler_empty(scheduler) \ No newline at end of file diff --git a/tests/ut/eplb/core/test_eplb_device_transfer_loader.py b/tests/ut/eplb/core/test_eplb_device_transfer_loader.py index 6a204dc0024..963a34927af 100644 --- a/tests/ut/eplb/core/test_eplb_device_transfer_loader.py +++ b/tests/ut/eplb/core/test_eplb_device_transfer_loader.py @@ -47,7 +47,8 @@ def test_generate_task_and_state_flow(mock_adaptor): loader_obj.state = loader.ExpertWeightUpdateState.WAITING loader_obj.generate_expert_d2d_transfer_task([], [], {}, 0) - assert loader_obj.comm_op_list is None + # assert loader_obj.comm_op_list is None + assert loader_obj.comm_op_list == [] assert loader_obj.state == loader.ExpertWeightUpdateState.WAITING @@ -113,4 +114,4 @@ def test_load_impl_not_implemented(mock_adaptor): loader_obj = loader.D2DExpertWeightLoader() loader_obj.set_adator(mock_adaptor) with pytest.raises(NotImplementedError): - loader_obj.load_impl({}, {}) + loader_obj.load_impl({}, {}) \ No newline at end of file diff --git a/tests/ut/kv_connector/utils.py b/tests/ut/kv_connector/utils.py index ab4af6a732c..39aea4ca087 100644 --- a/tests/ut/kv_connector/utils.py +++ b/tests/ut/kv_connector/utils.py @@ -62,6 +62,7 @@ def create_vllm_config( max_num_seqs=max_num_seqs, max_num_batched_tokens=max_num_batched_tokens, max_model_len=max_num_batched_tokens, + is_encoder_decoder=False, ) fake_weight_path = os.path.join(os.path.dirname(__file__), "..", "fake_weight") @@ -208,4 +209,4 @@ def create_model_runner_output( **extra_args, ) - return model_runner_output + return model_runner_output \ No newline at end of file diff --git a/tests/ut/model_loader/netloader/test_netloader_elastic.py b/tests/ut/model_loader/netloader/test_netloader_elastic.py index 127f1dd6c54..b7e6deee41a 100644 --- a/tests/ut/model_loader/netloader/test_netloader_elastic.py +++ b/tests/ut/model_loader/netloader/test_netloader_elastic.py @@ -196,7 +196,11 @@ def test_server_initialization(server_config, mock_model): log_capture_string = io.StringIO() ch = logging.StreamHandler(log_capture_string) ch.setLevel(logging.DEBUG) + root_logger = logging.getLogger() + root_logger.addHandler(ch) + root_logger.setLevel(logging.DEBUG) vllm.logger.logger.addHandler(ch) + vllm.logger.logger.setLevel(logging.DEBUG) server = ElasticServer(**server_config) @@ -218,14 +222,16 @@ def test_server_initialization(server_config, mock_model): assert server.model_path == server_config['model_path'] assert server.tp == server_config['tp'] assert server.pp == server_config['pp'] - + + log_capture_string.flush() # Get captured logs log_output = log_capture_string.getvalue() + root_logger.removeHandler(ch) vllm.logger.logger.removeHandler(ch) log_capture_string.close() # Check output - assert "Server 127.0.0.1:8080 starts" in log_output + assert "Server" in log_output and "127.0.0.1:8080" in log_output and "starts" in log_output # Test the int8 cache option @@ -241,16 +247,26 @@ def test_int8_cache_handling(server_config, mock_model, cache_option, log_capture_string = io.StringIO() ch = logging.StreamHandler(log_capture_string) ch.setLevel(logging.DEBUG) + + root_logger = logging.getLogger() + root_logger.addHandler(ch) + root_logger.setLevel(logging.DEBUG) vllm.logger.logger.addHandler(ch) + vllm.logger.logger.setLevel(logging.DEBUG) server = ElasticServer(**server_config) + log_capture_string.flush() log_output = log_capture_string.getvalue() + root_logger.removeHandler(ch) vllm.logger.logger.removeHandler(ch) log_capture_string.close() if cache_option == "invalid": - assert "int8_cache should be selected in [HBM, DRAM]" in log_output + # assert "int8_cache should be selected in [HBM, DRAM]" in log_output + assert "int8_cache should be selected in [HBM, DRAM]" in log_output.lower() or \ + "int8_cache should be selected in [hbm, dram]" in log_output + if expected_device is None: assert len(server.original_int8) == 0 @@ -361,7 +377,14 @@ def test_client_handler_invalid_requests(server_config, invalid_data, log_capture_string = io.StringIO() ch = logging.StreamHandler(log_capture_string) ch.setLevel(logging.DEBUG) + + root_logger = logging.getLogger() + root_logger.addHandler(ch) + root_logger.setLevel(logging.DEBUG) vllm.logger.logger.addHandler(ch) + vllm.logger.logger.setLevel(logging.DEBUG) + + with patch("socket.socket"): server = ElasticServer(**server_config) @@ -389,11 +412,14 @@ def test_client_handler_invalid_requests(server_config, invalid_data, else: mock_conn.send.assert_not_called() + log_capture_string.flush() log_output = log_capture_string.getvalue() + root_logger.removeHandler(ch) vllm.logger.logger.removeHandler(ch) log_capture_string.close() # Any warning in the log is acceptable + log_lower = log_output.lower() assert "Failed to load" in log_output or "does not contain" in log_output mock_conn.close.assert_called_once() @@ -429,4 +455,4 @@ def test_server_cleanup(server_config): if __name__ == "__main__": - pytest.main() + pytest.main() \ No newline at end of file diff --git a/tests/ut/ops/test_linear.py b/tests/ut/ops/test_linear.py index c31033e67dd..073f9f3882f 100644 --- a/tests/ut/ops/test_linear.py +++ b/tests/ut/ops/test_linear.py @@ -96,10 +96,20 @@ def test_mlp_optimize(self): def test_oproj_tp(self): config._current_vllm_config = MagicMock() + + from vllm.config import SchedulerConfig + mock_scheduler_config = SchedulerConfig( + max_num_batched_tokens=2048, + max_num_seqs=128, + max_model_len=2048, + is_encoder_decoder=False, + ) + config._current_vllm_config.scheduler_config = mock_scheduler_config + ascend_config._ASCEND_CONFIG = MagicMock() ascend_config._ASCEND_CONFIG.oproj_tensor_parallel_size = 2 - ascend_config._ASCEND_CONFIG.recompute_scheduler_enable = False + ascend_config._ASCEND_CONFIG.ascend_scheduler_config.enabled = False linear = AscendRowParallelLinear( input_size=16, @@ -145,4 +155,4 @@ def test_init_without_disable_tp(self): if __name__ == '__main__': - unittest.main() + unittest.main() \ No newline at end of file diff --git a/vllm_ascend/core/schedule_config.py b/vllm_ascend/core/schedule_config.py new file mode 100644 index 00000000000..6d4c01f64ee --- /dev/null +++ b/vllm_ascend/core/schedule_config.py @@ -0,0 +1,105 @@ +# +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# This file is a part of the vllm-ascend project. +# + +from dataclasses import dataclass, fields +from typing import Type, Union + +from vllm.config import SchedulerConfig + +MAX_INT = 2147483647 + + +@dataclass +class AscendSchedulerConfig(SchedulerConfig): + enable_chunked_prefill: bool = False + max_long_partial_prefills: int = 1 + long_prefill_token_threshold: int = MAX_INT + policy: str = "fcfs" + scheduler_cls: Union[str, Type[object]] = ( + "vllm_ascend.core.scheduler.AscendScheduler") + enable_pd_transfer: bool = False + decode_max_num_seqs: int = 0 + + @classmethod + def initialize_from_config( + cls, + vllm_scheduler_config: SchedulerConfig, + ascend_scheduler_config, + ): + scheduler_config = { + field.name: getattr(vllm_scheduler_config, field.name) + for field in fields(vllm_scheduler_config) if field.init + } + # Override default values into original SchedulerConfig + scheduler_config["enable_chunked_prefill"] = False + scheduler_config["max_long_partial_prefills"] = None + scheduler_config["long_prefill_token_threshold"] = None + scheduler_config["policy"] = "fcfs" + scheduler_config["scheduler_cls"] = ( + "vllm_ascend.core.scheduler.AscendScheduler") + scheduler_config["enable_pd_transfer"] = False + scheduler_config["decode_max_num_seqs"] = 0 + # Override params in original SchedulerConfig with params in ascend_scheduler_config + for k, _ in scheduler_config.items(): + if hasattr(ascend_scheduler_config, k): + scheduler_config[k] = getattr(ascend_scheduler_config, k) + return cls(**scheduler_config) + + def __post_init__(self, *args) -> None: + self.max_num_encoder_input_tokens = self.max_num_batched_tokens + self.encoder_cache_size = self.max_num_batched_tokens + self.chunked_prefill_enabled = self.enable_chunked_prefill + if (self.max_num_batched_tokens < self.max_model_len + and not self.chunked_prefill_enabled): + raise ValueError( + "Ascend scheduler is enabled without chunked prefill feature. " + f"Argument max_num_batched_tokens ({self.max_num_batched_tokens}) is " + f"smaller than max_model_len ({self.max_model_len}). " + "This effectively limits the maximum sequence length to " + "max_num_batched_tokens and makes vLLM reject longer " + "sequences. Please increase max_num_batched_tokens or " + "decrease max_model_len.") + # concurrent partial prefills. Default is 1 meaning not enabled. + if self.max_long_partial_prefills is None: + self.max_long_partial_prefills = 1 + self.long_prefill_token_threshold = MAX_INT + + if self.long_prefill_token_threshold is None or \ + self.long_prefill_token_threshold <= 0: + if self.max_model_len is None: + self.long_prefill_token_threshold = MAX_INT + else: + self.long_prefill_token_threshold = \ + max(1, int(self.max_model_len * 0.04)) + + if self.max_long_partial_prefills < 0: + raise ValueError( + f"max_long_partial_prefills must be non-negative, but got " + f"{self.max_long_partial_prefills}") + if self.long_prefill_token_threshold < 0: + raise ValueError( + f"long_prefill_token_threshold must be non-negative, but got " + f"{self.long_prefill_token_threshold}") + + if self.policy != "fcfs": + raise NotImplementedError( + f"currently AscendScheduler only supports fcfs policy, got {self.policy}" + ) + if getattr(self, "scheduler_delay_factor", 0) > 0: + raise NotImplementedError( + "currently AscendScheduler doesn't support scheduler_delay_factor." + ) \ No newline at end of file diff --git a/vllm_ascend/core/scheduler.py b/vllm_ascend/core/scheduler.py new file mode 100644 index 00000000000..5a7fa168ac8 --- /dev/null +++ b/vllm_ascend/core/scheduler.py @@ -0,0 +1,592 @@ +# +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# This file is a part of the vllm-ascend project. +# +import time +from collections import deque +from typing import Iterable, Optional, Union + +from vllm.config import VllmConfig +from vllm.distributed.kv_events import KVEventBatch +from vllm.logger import logger +from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry +from vllm.utils.math_utils import cdiv +from vllm.v1.core.kv_cache_manager import KVCacheBlocks +from vllm.v1.core.sched.output import NewRequestData, SchedulerOutput +from vllm.v1.core.sched.scheduler import Scheduler +from vllm.v1.engine import EngineCoreEventType, EngineCoreOutputs +from vllm.v1.kv_cache_interface import KVCacheConfig +from vllm.v1.outputs import ModelRunnerOutput +from vllm.v1.request import Request, RequestStatus +from vllm.v1.structured_output import StructuredOutputManager + + +class AscendScheduler(Scheduler): + """This Scheduler extends vllm's original v1 scheduler + with prefill-first scheduling strategy.""" + + def _initialize_common(self) -> None: + """Initialize common attributes shared across all versions.""" + self.scheduled_req_ids: set[str] = set() + self.running: list[Request] = [] + self.finished_prefill_reqs: deque[Request] = deque() + + enable_pd_transfer = getattr(self.scheduler_config, + 'enable_pd_transfer', False) + decode_max_num_seqs = getattr(self.scheduler_config, + 'decode_max_num_seqs', 0) + self.phase = "" if not enable_pd_transfer else "prefill" + self.decode_max_num_running_reqs = max(self.max_num_running_reqs, + decode_max_num_seqs) + + def __init__( + self, + vllm_config: VllmConfig, + kv_cache_config: KVCacheConfig, + structured_output_manager: StructuredOutputManager, + block_size: Optional[int] = None, + mm_registry: MultiModalRegistry = MULTIMODAL_REGISTRY, + include_finished_set: bool = False, + log_stats: bool = False, + ) -> None: + # Call the parent class's __init__ method + super().__init__(vllm_config, kv_cache_config, + structured_output_manager, block_size, mm_registry, + include_finished_set, log_stats) + + # Initialize common attributes + self._initialize_common() + + def schedule(self) -> SchedulerOutput: + if self.scheduler_config.enable_chunked_prefill: + return super().schedule() + scheduled_new_reqs: list[Request] = [] + scheduled_resumed_reqs: list[Request] = [] + scheduled_running_reqs: list[Request] = [] + preempted_reqs: list[Request] = [] + + req_to_new_blocks: dict[str, KVCacheBlocks] = {} + num_scheduled_tokens: dict[str, int] = {} + token_budget = self.max_num_scheduled_tokens + + # Encoder-related. + scheduled_encoder_inputs: dict[str, list[int]] = {} + encoder_budget = self.max_num_encoder_input_tokens + + # Spec decode-related. + scheduled_spec_decode_tokens: dict[str, list[int]] = {} + + # For logging. + scheduled_timestamp = time.monotonic() + + # Record scheduled LoRA requests. + scheduled_loras: set[int] = set() + + # Use a temporary deque to collect requests that need to be skipped + # and put back at the head of the waiting queue later + skipped_waiting_requests: deque[Request] = deque() + + if self.phase == "prefill": + remaining_running_reqs = [] + for request in self.running: + # move request has finished prefill to finished_prefill_reqs + if request.num_tokens > request.num_prompt_tokens: + self.finished_prefill_reqs.append(request) + else: + remaining_running_reqs.append(request) + self.running = remaining_running_reqs + # all request prefilled, change phase to decode + if not self.waiting and not self.running: + self.phase = "decode" + # Skip long prompt requests in prefill stage. + # long_prefill_budget is float('inf') if not use. + if self.vllm_config.scheduler_config.long_prefill_token_threshold == 0: + long_prefill_budget = float('inf') + long_prefill_token_threshold = float('inf') + else: + long_prefill_budget = self.vllm_config.scheduler_config.max_long_partial_prefills + long_prefill_token_threshold = self.vllm_config.scheduler_config.long_prefill_token_threshold + + # Schedule prefill requests first. + while self.waiting and token_budget > 0: + if len(self.running) == (self.decode_max_num_running_reqs + if self.phase == "decode" else + self.max_num_running_reqs): + + break + + request = self.waiting[0] + + def skip_cur_request(): + self.waiting.popleft() + skipped_waiting_requests.appendleft(request) + + # P/D: skip request if still waiting for remote kvs. + if request.status == RequestStatus.WAITING_FOR_REMOTE_KVS: + is_ready = self._update_waiting_for_remote_kv(request) + if is_ready: + request.status = RequestStatus.WAITING + else: + skip_cur_request() + continue + + # Check that adding the request still respects the max_loras + # constraint. + if (self.lora_config and request.lora_request and + (len(scheduled_loras) == self.lora_config.max_loras + and request.lora_request.lora_int_id not in scheduled_loras)): + # Scheduling would exceed max_loras, skip. + skip_cur_request() + continue + + num_external_computed_tokens = 0 + load_kv_async = False + + # Get already-cached tokens. + if request.num_computed_tokens == 0: + new_computed_blocks, num_new_local_computed_tokens = \ + self.kv_cache_manager.get_computed_blocks( + request) + + # Get externally-cached tokens if using a KVConnector. + if self.connector is not None: + num_external_computed_tokens, load_kv_async = ( + self.connector.get_num_new_matched_tokens( + request, num_new_local_computed_tokens)) + + # Total computed tokens (local + external). + num_computed_tokens = (num_new_local_computed_tokens + + num_external_computed_tokens) + else: + # P/D: skip checking prefix cache if loaded from remote kvs. + new_computed_blocks = ( + self.kv_cache_manager.create_empty_block_list()) + num_new_local_computed_tokens = 0 + num_computed_tokens = request.num_computed_tokens + + encoder_inputs_to_schedule = None + new_encoder_budget = encoder_budget + + # P/D: loading remote KV, do not allocate for new work. + if load_kv_async: + assert num_external_computed_tokens > 0 + num_new_tokens = 0 + blocks = None + # Number of tokens to be scheduled. + else: + prompt_limit = self._get_prompt_limit(request) + # We use `request.num_tokens` instead of + # `request.num_prompt_tokens` to consider the resumed + # requests, which have output tokens. + num_new_tokens = request.num_tokens - num_computed_tokens + max_tokens_in_kvcache = (self.kv_cache_config.num_blocks * + self.block_size) + prompt_limit = min(prompt_limit, max_tokens_in_kvcache) + + # Finish request that exceeds prompt_limit or kv cache size. + if num_new_tokens > prompt_limit: + logger.warning( + "Input prompt (%d tokens) is too long" + " and exceeds limit of %d", + num_new_tokens, + prompt_limit, + ) + request.status = RequestStatus.FINISHED_IGNORED + self.finished_req_ids.add( # type: ignore + request.request_id) # type: ignore + self.waiting.popleft() + continue + + if num_new_tokens > token_budget: + # Scheduling would exceed token_budget, skip. + skip_cur_request() + continue + assert num_new_tokens > 0 + blocks = new_computed_blocks.blocks[0] + + # Schedule encoder inputs. + if request.has_encoder_inputs: + (encoder_inputs_to_schedule, num_new_tokens, + new_encoder_budget, + _) = self._try_schedule_encoder_inputs( + request, num_computed_tokens, num_new_tokens, + encoder_budget) + if num_new_tokens == 0 or len( + encoder_inputs_to_schedule) == 0: + # The request cannot be scheduled. + break + + watermark = getattr(self.scheduler_config, "watermark", 0.01) + if not self._check_watermark_for_prefill(request, num_new_tokens, + blocks, watermark): + # Scheduling would exceed watermark, skip. + skip_cur_request() + continue + + if num_new_tokens > long_prefill_token_threshold \ + and long_prefill_budget <= 0: + skip_cur_request() + continue + + new_blocks = self.kv_cache_manager.allocate_slots( + request, + num_new_tokens + num_external_computed_tokens, + num_new_local_computed_tokens, + new_computed_blocks=new_computed_blocks, + num_lookahead_tokens=self.num_lookahead_tokens, + delay_cache_blocks=load_kv_async) + if new_blocks is None: + # The request cannot be scheduled. + break + + # KVConnector: update internal state after allocation. + # This information is used to determine if a load is + # needed for this request. + if self.connector is not None: + self.connector.update_state_after_alloc( + request, + new_computed_blocks + new_blocks, + num_external_computed_tokens, + ) + + self.waiting.popleft() + if load_kv_async: + # If loading async, allocate memory and put request + # into the WAITING_FOR_REMOTE_KV state. + skipped_waiting_requests.appendleft(request) + request.status = RequestStatus.WAITING_FOR_REMOTE_KVS + continue + + self.running.append(request) + if self.log_stats: + request.record_event(EngineCoreEventType.SCHEDULED, + scheduled_timestamp) + self.scheduled_req_ids.add(request.request_id) + # Check request status. + if request.status == RequestStatus.WAITING: + scheduled_new_reqs.append(request) + elif request.status == RequestStatus.PREEMPTED: + scheduled_resumed_reqs.append(request) + else: + raise RuntimeError(f"Invalid request status: {request.status}") + + if self.lora_config and request.lora_request: + scheduled_loras.add(request.lora_request.lora_int_id) + + req_to_new_blocks[ + request.request_id] = self.kv_cache_manager.get_blocks( + request.request_id) + # Update request info. + num_scheduled_tokens[request.request_id] = num_new_tokens + token_budget -= num_new_tokens + if num_new_tokens > long_prefill_token_threshold: + long_prefill_budget -= 1 + request.status = RequestStatus.RUNNING + request.num_computed_tokens = num_computed_tokens + # Count the number of prefix cached tokens. + if request.num_cached_tokens < 0: + request.num_cached_tokens = num_computed_tokens + + # Encoder-related. + if encoder_inputs_to_schedule: + scheduled_encoder_inputs[request.request_id] = ( + encoder_inputs_to_schedule) + # Allocate the encoder cache. + for i in encoder_inputs_to_schedule: + self.encoder_cache_manager.allocate(request, i) + encoder_budget = new_encoder_budget + + # Put back any skipped requests at the head of the waiting queue + if skipped_waiting_requests: + self.waiting.extendleft(skipped_waiting_requests) + + if self.phase == "decode": + while len( + self.running + ) < self.decode_max_num_running_reqs and self.finished_prefill_reqs: + request = self.finished_prefill_reqs.popleft() + self.running.append(request) + + # If no prefill requests are scheduled, + # Schedule decode requests next. + if len(self.scheduled_req_ids) == 0: + req_index = 0 + while req_index < len(self.running) and token_budget > 0: + request = self.running[req_index] + if request.request_id in self.scheduled_req_ids: + # This request has already been scheduled. + req_index += 1 + continue + + num_new_tokens = (request.num_tokens_with_spec - + request.num_computed_tokens) + assert (request.num_tokens - request.num_computed_tokens) == 1 + num_new_tokens = min(num_new_tokens, token_budget) + # Make sure the input position does not exceed the max model len. + # This is necessary when using spec decoding. + num_new_tokens = min( + num_new_tokens, + self.max_model_len - request.num_computed_tokens) + + # Schedule encoder inputs. + encoder_inputs_to_schedule = None + new_encoder_budget = encoder_budget + if request.has_encoder_inputs: + (encoder_inputs_to_schedule, num_new_tokens, + new_encoder_budget) = self._try_schedule_encoder_inputs( + request, request.num_computed_tokens, num_new_tokens, + encoder_budget) + + # Check that adding the request still respects the max_loras + # constraint. + if self.lora_config and request.lora_request and ( + len(scheduled_loras) == self.lora_config.max_loras + and request.lora_request.lora_int_id + not in scheduled_loras): + # Scheduling would exceed max_loras, skip. + num_new_tokens = 0 + + if num_new_tokens == 0: + # The request cannot be scheduled because one of the following + # reason: + # 1. No new tokens to schedule. This may happen when PP>1 and + # we have already scheduled all prompt tokens but they are + # not finished yet. + # 2. Adding the request exceeds the max_loras constraint. + # NOTE(woosuk): Here, by doing `continue` instead of `break`, + # we do not strictly follow the FCFS scheduling policy and + # allow the lower-priority requests to be scheduled. + req_index += 1 + continue + + while True: + new_blocks = self.kv_cache_manager.allocate_slots( + request, + num_new_tokens, + num_lookahead_tokens=self.num_lookahead_tokens) + if new_blocks is None: + # The request cannot be scheduled. + # Preempt the lowest-priority request. + preempted_req = self.running.pop() + self.kv_cache_manager.free(preempted_req) + preempted_req.status = RequestStatus.PREEMPTED + preempted_req.num_computed_tokens = 0 + if self.log_stats: + preempted_req.record_event( + EngineCoreEventType.PREEMPTED, + scheduled_timestamp) + self.waiting.appendleft(preempted_req) + preempted_reqs.append(preempted_req) + if preempted_req == request: + # No more request to preempt. + can_schedule = False + break + else: + # The request can be scheduled. + can_schedule = True + break + if not can_schedule: + break + assert new_blocks is not None + + # Schedule the request. + scheduled_running_reqs.append(request) + self.scheduled_req_ids.add(request.request_id) + req_to_new_blocks[request.request_id] = new_blocks + num_scheduled_tokens[request.request_id] = num_new_tokens + token_budget -= num_new_tokens + req_index += 1 + + # Speculative decode related. + if request.spec_token_ids: + num_scheduled_spec_tokens = (num_new_tokens + + request.num_computed_tokens - + request.num_tokens) + if num_scheduled_spec_tokens > 0: + # Trim spec_token_ids list to num_scheduled_spec_tokens. + del request.spec_token_ids[num_scheduled_spec_tokens:] + scheduled_spec_decode_tokens[request.request_id] = ( + request.spec_token_ids) + + # Encoder-related. + if encoder_inputs_to_schedule: + scheduled_encoder_inputs[request.request_id] = ( + encoder_inputs_to_schedule) + # Allocate the encoder cache. + for i in encoder_inputs_to_schedule: + self.encoder_cache_manager.allocate(request, i) + encoder_budget = new_encoder_budget + + # Record scheduled LoRA requests. + if self.lora_config and request.lora_request: + scheduled_loras.add(request.lora_request.lora_int_id) + + # Check if the scheduling constraints are satisfied. + total_num_scheduled_tokens = sum(num_scheduled_tokens.values()) + assert total_num_scheduled_tokens <= self.max_num_scheduled_tokens + assert token_budget >= 0 + assert len( + self.running + ) <= self.decode_max_num_running_reqs if self.phase == "decode" else self.max_num_running_reqs + assert len(scheduled_new_reqs) + len(scheduled_resumed_reqs) + len( + scheduled_running_reqs) <= len(self.running) + + # Get the longest common prefix among all requests in the running queue. + # This can be potentially used for cascade attention. + num_common_prefix_blocks = [0] * len( + self.kv_cache_config.kv_cache_groups) + if self.running: + any_request = self.running[0] + num_common_prefix_blocks = ( + self.kv_cache_manager.get_num_common_prefix_blocks( + any_request.request_id)) + + # Construct the scheduler output. + new_reqs_data = [ + NewRequestData.from_request( + req, req_to_new_blocks[req.request_id].get_block_ids()) + for req in scheduled_new_reqs + ] + + cached_reqs_data = self._make_cached_request_data( + scheduled_running_reqs, scheduled_resumed_reqs, + num_scheduled_tokens, scheduled_spec_decode_tokens, + req_to_new_blocks) + scheduled_cached_reqs = cached_reqs_data + scheduler_output = SchedulerOutput( + scheduled_new_reqs=new_reqs_data, + scheduled_cached_reqs=scheduled_cached_reqs, + num_scheduled_tokens=num_scheduled_tokens, + total_num_scheduled_tokens=total_num_scheduled_tokens, + scheduled_spec_decode_tokens=scheduled_spec_decode_tokens, + scheduled_encoder_inputs=scheduled_encoder_inputs, + num_common_prefix_blocks=num_common_prefix_blocks, + # finished_req_ids is an existing state in the scheduler, + # instead of being newly scheduled in this step. + # It contains the request IDs that are finished in between + # the previous and the current steps. + finished_req_ids=self.finished_req_ids, # type: ignore + free_encoder_mm_hashes=self.encoder_cache_manager. + get_freed_mm_hashes(), + ) + # NOTE(Kuntai): this function is designed for multiple purposes: + # 1. Plan the KV cache store + # 2. Wrap up all the KV cache load / save ops into an opaque object + # 3. Clear the internal states of the connector + if self.connector is not None: + meta = self.connector.build_connector_meta(scheduler_output) + scheduler_output.kv_connector_metadata = meta + + events = self.kv_cache_manager.take_events() + if events: + batch = KVEventBatch(ts=time.time(), events=events) + self.kv_event_publisher.publish(batch) + + # Advance the number of computed tokens for the request AFTER + # the request is scheduled. + # 1. The scheduler_output of the current step has to include the + # original number of scheduled tokens to determine input IDs. + # 2. Advance the number of computed tokens here allowing us to + # schedule the prefill request again immediately in the next + # scheduling step. + # 3. If some tokens (e.g. spec tokens) are rejected later, the number of + # computed tokens will be adjusted in update_from_output. + for req_id, num_scheduled_token in num_scheduled_tokens.items(): + self.requests[req_id].num_computed_tokens += num_scheduled_token + + self.finished_req_ids = set() # type: ignore + return scheduler_output + + def _check_watermark_for_prefill(self, + request, + num_new_tokens, + computed_blocks, + watermark=0.01): + computed_blocks = computed_blocks or [] + watermark_blocks = self.kv_cache_config.num_blocks * watermark + num_computed_tokens = (request.num_computed_tokens + + len(computed_blocks) * self.block_size) + num_required_blocks = cdiv(num_new_tokens + num_computed_tokens, + self.block_size) + req_blocks = self.kv_cache_manager.coordinator.get_blocks( + request.request_id) + num_new_blocks = (num_required_blocks - len(req_blocks[0]) - + len(computed_blocks)) + num_evictable_computed_blocks = sum(1 for blk in computed_blocks + if blk.ref_cnt == 0) + # If number of free blocks is less than water mark after allocating, don't allocate. + if (self.kv_cache_manager.block_pool.get_num_free_blocks() - + num_evictable_computed_blocks - + num_new_blocks) < watermark_blocks: + return False + return True + + def _get_prompt_limit(self, request: Request) -> int: + if (self.scheduler_config.enable_chunked_prefill + and not self.scheduler_config.is_multi_step): + prompt_limit = self.vllm_config.model_config.max_model_len + else: + prompt_limit = min( + self.vllm_config.model_config.max_model_len, + self.scheduler_config.max_num_batched_tokens, + ) + + # Model is fine tuned with long context. Return the fine tuned max_len. + if request.lora_request and request.lora_request.long_lora_max_len: + assert prompt_limit <= request.lora_request.long_lora_max_len + return request.lora_request.long_lora_max_len + else: + return prompt_limit + + def finish_requests( + self, + request_ids: Union[str, Iterable[str]], + finished_status: RequestStatus, + ) -> None: + """Handles the finish signal from outside the scheduler. + + For example, the API server can abort a request when the client + disconnects. + """ + for req_id in request_ids: + request = self.requests.get(req_id) + if request is None: + # Invalid request ID. + continue + if request.status == RequestStatus.RUNNING: + self.scheduled_req_ids.discard(request.request_id) + super().finish_requests(request_ids, finished_status) + + def update_from_output( + self, + scheduler_output: SchedulerOutput, + model_runner_output: ModelRunnerOutput, + ) -> EngineCoreOutputs: + num_scheduled_tokens = scheduler_output.num_scheduled_tokens + + # NOTE(woosuk): As len(self.running) can be up to 1K or more, the below + # loop can be a performance bottleneck. We should do our best to avoid + # expensive operations inside the loop. + for request in self.running: + req_id = request.request_id + num_tokens_scheduled = num_scheduled_tokens.get(req_id, 0) + if num_tokens_scheduled == 0: + # The request was not scheduled in this step. + continue + if req_id in self.scheduled_req_ids: + self.scheduled_req_ids.remove(req_id) + + return super().update_from_output(scheduler_output, + model_runner_output) \ No newline at end of file diff --git a/vllm_ascend/ops/fused_moe/moe_mlp.py b/vllm_ascend/ops/fused_moe/moe_mlp.py index 13e1efc0acd..c3d9978391f 100644 --- a/vllm_ascend/ops/fused_moe/moe_mlp.py +++ b/vllm_ascend/ops/fused_moe/moe_mlp.py @@ -127,6 +127,9 @@ def quant_apply_mlp(hidden_states: torch.Tensor, if quantized_hidden_states is not None: dispose_tensor(quantized_hidden_states) # act_fn: swiglu + group_diff = torch.diff(group_list, dim=0) + new_group = torch.cat([group_list[0].unsqueeze(0), group_diff], + dim=0) hidden_states, swiglu_out_scale = torch_npu.npu_dequant_swiglu_quant( x=hidden_states, weight_scale=w1_scale, @@ -134,7 +137,7 @@ def quant_apply_mlp(hidden_states: torch.Tensor, bias=None, quant_scale=None, quant_offset=None, - group_index=group_list, + group_index=new_group, activate_left=True, quant_mode=1, ) @@ -295,4 +298,4 @@ def unified_apply_mlp(hidden_states: torch.Tensor, group_list=group_list, group_list_type=group_list_type, topk_scales=topk_scales, - need_trans=need_trans) + need_trans=need_trans) \ No newline at end of file