【大模型推理】vLLM 源码学习
强烈推荐
https://zhuanlan.zhihu.com/p/680153425
sequnceGroup 存储了相同的prompt对应的不同的sequence, 所以用字典存储
同一个Sequence可能占据多个逻辑Block, 所以在Sequence 中用列表存储
同一个block 要维护tokens_id 列表, 需要添加操作。 还需要判断block 是否还有空位可以放置tokens.
# https://github.com/vllm-project/vllm/tree/v0.2.7/vllm/core/scheduler.py
class Scheduler:
def _schedule(self) -> SchedulerOutputs:
...
if not self.swapped:
...
while self.waiting:
...
seq_group = self.waiting.pop(0)
self._allocate(seq_group)
self.running.append(seq_group)
num_curr_seqs += num_new_seqs
scheduled.append(seq_group)
...
...
return scheduler_outputs
def _allocate(self, seq_group: SequenceGroup) -> None:
# 调用 block manager 的 allocate 方法分配 physical token block
self.block_manager.allocate(seq_group)
# 将 sequence 的状态设置为 RUNNING,即将要被处理
for seq in seq_group.get_seqs(status=SequenceStatus.WAITING):
seq.status = SequenceStatus.RUNNING
# https://github.com/vllm-project/vllm/tree/v0.2.7/vllm/core/block_manager.py
class BlockSpaceManager:
def allocate(self, seq_group: SequenceGroup) -> None:
# NOTE: Here we assume that all sequences in the group have the same
# prompt.
seq = seq_group.get_seqs(status=SequenceStatus.WAITING)[0]
# 为请求的 prompt token 分配 physical token block
block_table: BlockTable = []
for logical_idx in range(len(seq.logical_token_blocks)):
if (self.block_sliding_window is not None
and logical_idx >= self.block_sliding_window):
block = block_table[logical_idx % self.block_sliding_window]
else:
block = self.gpu_allocator.allocate()
# 设置 block 的引用数,copy on write 机制会用到
block.ref_count = seq_group.num_seqs()
block_table.append(block)
# Assign the block table for each sequence.
for seq in seq_group.get_seqs(status=SequenceStatus.WAITING):
self.block_tables[seq.seq_id] = block_table.copy()
结合上图代码, 对于seq_group 中所有的 seq 分配了相同的block_table( 逻辑块对应的物理块), 因此说明seq_group 中所有的seq 是相同的内容, 即 # NOTE: Here we assume that all sequences in the group have the same prompt.
上面函数只是分配好了逻辑空间与物理block空间,下面函数append_slot将新的token 加入到block 的一个slot 时,可能引发copy on write
scheduler 的_append_slot方法:
# https://github.com/vllm-project/vllm/tree/v0.2.7/vllm/core/scheduler.py
class Scheduler:
def _append_slot(
self,
seq_group: SequenceGroup,
blocks_to_copy: Dict[int, List[int]],
) -> None:
for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING):
ret = self.block_manager.append_slot(seq)
if ret is not None:
# copy on write 机制
src_block, dst_block = ret
if src_block in blocks_to_copy:
blocks_to_copy[src_block].append(dst_block)
else:
blocks_to_copy[src_block] = [dst_block]
# https://github.com/vllm-project/vllm/tree/v0.2.7/vllm/core/block_manager.py
class BlockSpaceManager:
def append_slot(self, seq: Sequence) -> Optional[Tuple[int, int]]:
"""Allocate a physical slot for a new token."""
logical_blocks = seq.logical_token_blocks
block_table = self.block_tables[seq.seq_id]
if len(block_table) < len(logical_blocks):
if (self.block_sliding_window
and len(block_table) >= self.block_sliding_window):
# re-use a block
block_table.append(block_table[len(block_table) %
self.block_sliding_window])
else:
# sequence 有新的 logical token block
# 所以这里也要分配一个新的 physical token block
block = self.gpu_allocator.allocate()
block_table.append(block)
return None
# We want to append the token to the last physical block.
last_block = block_table[-1]
assert last_block.device == Device.GPU
if last_block.ref_count == 1:
# Not shared with other sequences. Appendable.
return None
else:
# The last block is shared with other sequences.
# Copy on Write: Allocate a new block and copy the tokens.
new_block = self.gpu_allocator.allocate()
block_table[-1] = new_block
self.gpu_allocator.free(last_block)
return last_block.block_number, new_block.block_number
原文地址:https://blog.csdn.net/qq_38662930/article/details/143950775
免责声明:本站文章内容转载自网络资源,如本站内容侵犯了原著者的合法权益,可联系本站删除。更多内容请关注自学内容网(zxcms.com)!