在上一期,我们看到了多个输入如何被封装,然后被塞入llm_engine中,接下来,通过_run_engine,我们要进行输入的处理了。
def _run_engine(
self, *, use_tqdm: bool
) -> List[Union[RequestOutput, EmbeddingRequestOutput]]:
# Initialize tqdm.
if use_tqdm:
num_requests = self.llm_engine.get_num_unfinished_requests()
pbar = tqdm(
total=num_requests,
desc="Processed prompts",
dynamic_ncols=True,
postfix=f"Generation Speed: {0:.2f} toks/s",
)
# Run the engine.
outputs: List[Union[RequestOutput, EmbeddingRequestOutput]] = []
total_toks = 0
while self.llm_engine.has_unfinished_requests():
step_outputs = self.llm_engine.step()
for output in step_outputs:
if output.finished:
outputs.append(output)
if use_tqdm:
if isinstance(output, RequestOutput):
# Calculate tokens only for RequestOutput
total_toks += sum(
len(stp.token_ids) for stp in output.outputs)
spd = total_toks / pbar.format_dict["elapsed"]
pbar.postfix = f"Generation Speed: {spd:.2f} toks/s"
pbar.update(1)
if use_tqdm:
pbar.close()
# Sort the outputs by request ID.
# This is necessary because some requests may be finished earlier than
# its previous requests.
return sorted(outputs, key=lambda x: int(x.request_id))
其中,主要的函数有两个,一个是
seq_group_metadata_list, scheduler_outputs = self.scheduler.schedule()
另一个是 model_exector.execute_model
output = self.model_executor.execute_model(
execute_model_req=execute_model_req)
该函数的调用非常深,涉及到底层缓存的管理。
看到这里,我已经失去耐心了,meta几个文件的代码怎么被搞的这么复杂。
所以,我们接着看,关于model_executor.execute_model最重要的内容,那就是模型的推理。对应函数
logits = self.model.compute_logits(hidden_states, sampling_metadata)
# Only perform sampling in the driver worker.
if not self.is_driver_worker:
return None
# Sample the next token.
output = self.model.sample(
logits=logits,
sampling_metadata=sampling_metadata,
)
这里,hidden_states尺寸为 [26,768], logits的维度为[4, 50272], output对应的数据类型为vllm.sequence.SamplerOutput. 这里4对应的就是输入数量,26对应的是输入的总token数。
至此,我们完成了一次迭代,得到了请求的输出,如下面的代码,但是,之前讨论的流程中,我们似乎没有找到区分decode和prefill流程的模块,必然是忽视了一些重要细节。
request_outputs = self._process_model_outputs(
output, scheduler_outputs.scheduled_seq_groups,
scheduler_outputs.ignored_seq_groups, seq_group_metadata_list)
# Log stats.
self.do_log_stats(scheduler_outputs, output)
Pdb) p request_outputs
[RequestOutput(request_id=0, prompt='Hello, my name is', prompt_token_ids=[2, 31414, 6, 127, 766, 16], prompt_logprobs=None, outputs=[CompletionOutput(index=0, text=' Joel', token_ids=[8966], cumulative_logprob=-7.448906421661377, logprobs=None, finish_reason=None, stop_reason=None)], finished=False, metrics=RequestMetrics(arrival_time=1721867331.3341556, last_token_time=1721867331.3341556, first_scheduled_time=1721867333.2603674, first_token_time=1721868233.8055446, time_in_queue=1.9262118339538574, finished_time=None), lora_request=None), RequestOutput(request_id=1, prompt='The president of the United States is', prompt_token_ids=[2, 133, 394, 9, 5, 315, 532, 16], prompt_logprobs=None, outputs=[CompletionOutput(index=0, text=' speaking', token_ids=[2686], cumulative_logprob=-5.128592491149902, logprobs=None, finish_reason=None, stop_reason=None)], finished=False, metrics=RequestMetrics(arrival_time=1721867331.3390863, last_token_time=1721867331.3390863, first_scheduled_time=1721867333.2603674, first_token_time=1721868233.8055446, time_in_queue=1.921281099319458, finished_time=None), lora_request=None), RequestOutput(request_id=2, prompt='The capital of France is', prompt_token_ids=[2, 133, 812, 9, 1470, 16], prompt_logprobs=None, outputs=[CompletionOutput(index=0, text=' a', token_ids=[10], cumulative_logprob=-1.876983642578125, logprobs=None, finish_reason=None, stop_reason=None)], finished=False, metrics=RequestMetrics(arrival_time=1721867331.3396564, last_token_time=1721867331.3396564, first_scheduled_time=1721867333.2603674, first_token_time=1721868233.8055446, time_in_queue=1.9207110404968262, finished_time=None), lora_request=None), RequestOutput(request_id=3, prompt='The future of AI is', prompt_token_ids=[2, 133, 499, 9, 4687, 16], prompt_logprobs=None, outputs=[CompletionOutput(index=0, text=' facing', token_ids=[2114], cumulative_logprob=-7.601373195648193, logprobs=None, finish_reason=None, stop_reason=None)], finished=False, metrics=RequestMetrics(arrival_time=1721867331.340093, last_token_time=1721867331.340093, first_scheduled_time=1721867333.2603674, first_token_time=1721868233.8055446, time_in_queue=1.9202744960784912, finished_time=None), lora_request=None)]
(Pdb)
我要去上班了。。