From 1658c70a467634791346e84090c6a3f0781009b6 Mon Sep 17 00:00:00 2001 From: Noam Gat Date: Wed, 17 Jul 2024 11:30:31 +0300 Subject: [PATCH 1/3] Update flashinfer.py with PagedAttention forwards --- vllm/attention/backends/flashinfer.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/vllm/attention/backends/flashinfer.py b/vllm/attention/backends/flashinfer.py index a4b01c6d3b508..7eb8d022dea77 100644 --- a/vllm/attention/backends/flashinfer.py +++ b/vllm/attention/backends/flashinfer.py @@ -15,6 +15,7 @@ from vllm import _custom_ops as ops from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, AttentionMetadata, AttentionType) +from vllm.attention.ops.paged_attn import PagedAttention class FlashInferBackend(AttentionBackend): @@ -46,14 +47,14 @@ def swap_blocks( dst_kv_cache: torch.Tensor, src_to_dst: torch.Tensor, ) -> None: - raise NotImplementedError + PagedAttention.swap_blocks(src_kv_cache, dst_kv_cache, src_to_dst) @staticmethod def copy_blocks( kv_caches: List[torch.Tensor], src_to_dists: torch.Tensor, ) -> None: - raise NotImplementedError + PagedAttention.copy_blocks(kv_caches, src_to_dists) @staticmethod def get_supported_head_sizes() -> List[int]: From 1d7959c316a3ffa71c0551971b81e310127b69e7 Mon Sep 17 00:00:00 2001 From: Noam Gat Date: Thu, 18 Jul 2024 08:49:49 +0300 Subject: [PATCH 2/3] Update flashinfer.py --- vllm/attention/backends/flashinfer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/attention/backends/flashinfer.py b/vllm/attention/backends/flashinfer.py index 4c9d173678beb..cd4483ed27811 100644 --- a/vllm/attention/backends/flashinfer.py +++ b/vllm/attention/backends/flashinfer.py @@ -13,8 +13,8 @@ import torch from vllm import _custom_ops as ops -from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, - AttentionMetadata, AttentionType) +from vllm.attention.backends.abstract import (AttentionBackend, + AttentionImpl, AttentionMetadata, AttentionMetadataBuilder, AttentionType) From 8ca76b2af10fcfa7e035ae770db1d9c7a11ae685 Mon Sep 17 00:00:00 2001 From: Noam Gat Date: Thu, 18 Jul 2024 08:51:57 +0300 Subject: [PATCH 3/3] Update flashinfer.py --- vllm/attention/backends/flashinfer.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/vllm/attention/backends/flashinfer.py b/vllm/attention/backends/flashinfer.py index cd4483ed27811..9c25b2cc2ba97 100644 --- a/vllm/attention/backends/flashinfer.py +++ b/vllm/attention/backends/flashinfer.py @@ -13,15 +13,14 @@ import torch from vllm import _custom_ops as ops -from vllm.attention.backends.abstract import (AttentionBackend, - AttentionImpl, +from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, AttentionMetadata, AttentionMetadataBuilder, AttentionType) -from vllm.attention.ops.paged_attn import PagedAttention from vllm.attention.backends.utils import (PAD_SLOT_ID, compute_slot_mapping, compute_slot_mapping_start_idx, is_block_tables_empty) +from vllm.attention.ops.paged_attn import PagedAttention from vllm.sequence import SequenceGroupMetadata from vllm.utils import get_kv_cache_torch_dtype, make_tensor_with_pad