drbh HF Staff commited on
Commit
d7ef8e6
·
verified ·
1 Parent(s): d96c6a8

Upload folder using huggingface_hub

Browse files
Files changed (29) hide show
  1. .gitattributes +8 -0
  2. build/_flash_mla_cuda_fgsajugfcsrz2/_flash_mla_cuda_fgsajugfcsrz2.abi3.so +3 -0
  3. build/_flash_mla_cuda_okxvrfdiud2xk/_flash_mla_cuda_okxvrfdiud2xk.abi3.so +3 -0
  4. build/_flash_mla_cuda_wdjyadhrx7ypy/_flash_mla_cuda_wdjyadhrx7ypy.abi3.so +3 -0
  5. build/_flash_mla_cuda_ybk54nsmnrfag/_flash_mla_cuda_ybk54nsmnrfag.abi3.so +3 -0
  6. build/torch210-cxx11-cu128-x86_64-linux/__init__.py +19 -0
  7. build/torch210-cxx11-cu128-x86_64-linux/_flash_mla_cuda_okxvrfdiud2xk.abi3.so +3 -0
  8. build/torch210-cxx11-cu128-x86_64-linux/_ops.py +9 -0
  9. build/torch210-cxx11-cu128-x86_64-linux/flash_mla/__init__.py +26 -0
  10. build/torch210-cxx11-cu128-x86_64-linux/flash_mla_interface.py +435 -0
  11. build/torch210-cxx11-cu128-x86_64-linux/metadata.json +4 -0
  12. build/torch210-cxx11-cu130-x86_64-linux/__init__.py +19 -0
  13. build/torch210-cxx11-cu130-x86_64-linux/_flash_mla_cuda_fgsajugfcsrz2.abi3.so +3 -0
  14. build/torch210-cxx11-cu130-x86_64-linux/_ops.py +9 -0
  15. build/torch210-cxx11-cu130-x86_64-linux/flash_mla/__init__.py +26 -0
  16. build/torch210-cxx11-cu130-x86_64-linux/flash_mla_interface.py +435 -0
  17. build/torch210-cxx11-cu130-x86_64-linux/metadata.json +4 -0
  18. build/torch29-cxx11-cu128-x86_64-linux/__init__.py +19 -0
  19. build/torch29-cxx11-cu128-x86_64-linux/_flash_mla_cuda_wdjyadhrx7ypy.abi3.so +3 -0
  20. build/torch29-cxx11-cu128-x86_64-linux/_ops.py +9 -0
  21. build/torch29-cxx11-cu128-x86_64-linux/flash_mla/__init__.py +26 -0
  22. build/torch29-cxx11-cu128-x86_64-linux/flash_mla_interface.py +435 -0
  23. build/torch29-cxx11-cu128-x86_64-linux/metadata.json +4 -0
  24. build/torch29-cxx11-cu130-x86_64-linux/__init__.py +19 -0
  25. build/torch29-cxx11-cu130-x86_64-linux/_flash_mla_cuda_ybk54nsmnrfag.abi3.so +3 -0
  26. build/torch29-cxx11-cu130-x86_64-linux/_ops.py +9 -0
  27. build/torch29-cxx11-cu130-x86_64-linux/flash_mla/__init__.py +26 -0
  28. build/torch29-cxx11-cu130-x86_64-linux/flash_mla_interface.py +435 -0
  29. build/torch29-cxx11-cu130-x86_64-linux/metadata.json +4 -0
.gitattributes CHANGED
@@ -33,3 +33,11 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ build/_flash_mla_cuda_fgsajugfcsrz2/_flash_mla_cuda_fgsajugfcsrz2.abi3.so filter=lfs diff=lfs merge=lfs -text
37
+ build/_flash_mla_cuda_okxvrfdiud2xk/_flash_mla_cuda_okxvrfdiud2xk.abi3.so filter=lfs diff=lfs merge=lfs -text
38
+ build/_flash_mla_cuda_wdjyadhrx7ypy/_flash_mla_cuda_wdjyadhrx7ypy.abi3.so filter=lfs diff=lfs merge=lfs -text
39
+ build/_flash_mla_cuda_ybk54nsmnrfag/_flash_mla_cuda_ybk54nsmnrfag.abi3.so filter=lfs diff=lfs merge=lfs -text
40
+ build/torch210-cxx11-cu128-x86_64-linux/_flash_mla_cuda_okxvrfdiud2xk.abi3.so filter=lfs diff=lfs merge=lfs -text
41
+ build/torch210-cxx11-cu130-x86_64-linux/_flash_mla_cuda_fgsajugfcsrz2.abi3.so filter=lfs diff=lfs merge=lfs -text
42
+ build/torch29-cxx11-cu128-x86_64-linux/_flash_mla_cuda_wdjyadhrx7ypy.abi3.so filter=lfs diff=lfs merge=lfs -text
43
+ build/torch29-cxx11-cu130-x86_64-linux/_flash_mla_cuda_ybk54nsmnrfag.abi3.so filter=lfs diff=lfs merge=lfs -text
build/_flash_mla_cuda_fgsajugfcsrz2/_flash_mla_cuda_fgsajugfcsrz2.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f5aad5a4a128ce15d13073fb8e6d2274c703200eb18bb8eff7fae606344381c
3
+ size 3614248
build/_flash_mla_cuda_okxvrfdiud2xk/_flash_mla_cuda_okxvrfdiud2xk.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0603b6c2473020878099485b88b466d1014eabda5469d892cccd03cfa8024de2
3
+ size 3534104
build/_flash_mla_cuda_wdjyadhrx7ypy/_flash_mla_cuda_wdjyadhrx7ypy.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b96472e26c3caa492a0e0d75014ab91dd47368c2f4f49931517e01c209342e0
3
+ size 3523120
build/_flash_mla_cuda_ybk54nsmnrfag/_flash_mla_cuda_ybk54nsmnrfag.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46f67b1de9489460562e162661cc1f19472c26f2fe2fb063529bbde879426125
3
+ size 3599176
build/torch210-cxx11-cu128-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __version__ = "1.0.0"
2
+
3
+ from .flash_mla_interface import (
4
+ get_mla_metadata,
5
+ flash_mla_with_kvcache,
6
+ flash_attn_varlen_func,
7
+ flash_attn_varlen_qkvpacked_func,
8
+ flash_attn_varlen_kvpacked_func,
9
+ flash_mla_sparse_fwd
10
+ )
11
+
12
+ __all__ = [
13
+ "get_mla_metadata",
14
+ "flash_mla_with_kvcache",
15
+ "flash_attn_varlen_func",
16
+ "flash_attn_varlen_qkvpacked_func",
17
+ "flash_attn_varlen_kvpacked_func",
18
+ "flash_mla_sparse_fwd"
19
+ ]
build/torch210-cxx11-cu128-x86_64-linux/_flash_mla_cuda_okxvrfdiud2xk.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0603b6c2473020878099485b88b466d1014eabda5469d892cccd03cfa8024de2
3
+ size 3534104
build/torch210-cxx11-cu128-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _flash_mla_cuda_okxvrfdiud2xk
3
+ ops = torch.ops._flash_mla_cuda_okxvrfdiud2xk
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_flash_mla_cuda_okxvrfdiud2xk::{op_name}"
build/torch210-cxx11-cu128-x86_64-linux/flash_mla/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch210-cxx11-cu128-x86_64-linux/flash_mla_interface.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple
2
+ import dataclasses
3
+
4
+ import torch
5
+
6
+ from ._ops import ops as flash_mla_cuda
7
+
8
+ @dataclasses.dataclass
9
+ class FlashMLASchedMeta:
10
+ """
11
+ A class that stores the tile scheduler metadata of FlashMLA
12
+ """
13
+
14
+ @dataclasses.dataclass
15
+ class Config:
16
+ b: int
17
+ s_q: int
18
+ h_q: int
19
+ page_block_size: int
20
+ h_k: int
21
+
22
+ causal: bool
23
+ is_fp8_kvcache: bool
24
+ topk: Optional[int]
25
+
26
+ extra_page_block_size: Optional[int]
27
+ extra_topk: Optional[int]
28
+
29
+ have_initialized: bool = False
30
+
31
+ config: Optional[Config] = None
32
+
33
+ tile_scheduler_metadata: Optional[torch.Tensor] = None # (num_sm_parts, TileSchedulerMetaDataSize), dtype torch.int32.
34
+ num_splits: Optional[torch.Tensor] = None # (1), dtype torch.int32.
35
+
36
+
37
+ def get_mla_metadata(
38
+ *args,
39
+ **kwargs
40
+ ) -> Tuple[FlashMLASchedMeta, None]:
41
+ """
42
+ Returns an empty instance of FlashMLASchedMeta. The actual scheduling metadata will be generated during the first invocation of flash_mla_with_kvcache.
43
+
44
+ Arguments:
45
+ This function does not need any arguments, but we keep *args and **kwargs to be compatible with the old interface.
46
+
47
+ Return:
48
+ A tuple. Due to historical reasons, we return a tuple of (FlashMLASchedMeta, None) now. Only the first element is useful.
49
+ """
50
+ return FlashMLASchedMeta(), None
51
+
52
+
53
+ def flash_mla_with_kvcache(
54
+ q: torch.Tensor,
55
+ k_cache: torch.Tensor,
56
+ block_table: Optional[torch.Tensor],
57
+ cache_seqlens: Optional[torch.Tensor],
58
+ head_dim_v: int,
59
+ tile_scheduler_metadata: FlashMLASchedMeta,
60
+ num_splits: None = None,
61
+ softmax_scale: Optional[float] = None,
62
+ causal: bool = False,
63
+ is_fp8_kvcache: bool = False,
64
+ indices: Optional[torch.Tensor] = None,
65
+ attn_sink: Optional[torch.Tensor] = None,
66
+ extra_k_cache: Optional[torch.Tensor] = None,
67
+ extra_indices_in_kvcache: Optional[torch.Tensor] = None,
68
+ topk_length: Optional[torch.Tensor] = None,
69
+ extra_topk_length: Optional[torch.Tensor] = None
70
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
71
+ """
72
+ Arguments:
73
+ q: (batch_size, seq_len_q, num_heads_q, head_dim).
74
+ k_cache: (num_blocks, page_block_size, num_heads_k, head_dim).
75
+ Different modes (including fp8/bf16, and sparsity) has different KV cache layouts. See comments below for details.
76
+ The KV cache must be contiguously valid for sparse attention on sm100. Here "contiguously valid" means that every byte, from the very beginning of the KV cache, till the last byte in the KV cache, is valid memory address to visit (i.e. won't IMA). In other words, the KV cache could be a slice of a larger array, but cannot be a list of disjoint memory blocks.
77
+ block_table: (batch_size, max_num_blocks_per_seq), torch.int32. Can be None when sparse attention is used.
78
+ cache_seqlens: (batch_size), torch.int32. Can be None when sparse attention is used.
79
+ head_dim_v: Head_dim of v. Must be 512
80
+ sched_meta: FlashMLASchedMeta, return by get_mla_metadata. You may reuse the same sched_meta across different invocations, but only when the tensor shapes and the values of cache_seqlens, topk_length, and extra_topk_length remain the same.
81
+ num_splits_placeholder: must be "None" (to be compatible with the old interface).
82
+ softmax_scale: float. The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim_k).
83
+ causal: bool. Whether to apply causal attention mask. Only valid for dense attention
84
+ is_fp8_kvcache: bool.
85
+ indices: (batch_size, seq_len_q, topk). KV indices when sparse attention is enabled.
86
+ Pay attention that indices_in_kvcache[i][j][k] = (the index of the page block where token t resides) * block_size + (the offset of token t among the page block),
87
+ where t is the k-th token of the j-th q-sequence in the i-th batch.
88
+ attn_sink: Optional[torch.Tensor], (num_heads_q, ), torch.float32. If presented, the final output will be scaled by exp(lse) / (exp(lse) + exp(attn_sink)). Have no affect on the returned softmax_lse. +inf will cause the result to become 0.
89
+ extra_k_cache and extra_indices_in_kvcache: If provided, will attend to these extra tokens in addition to those in k_cache and indices_in_kvcache. Their format requirements are the same as k_cache and indices_in_kvcache respectively.
90
+ topk_length/extra_topk_length: (batch_size, ), torch.int32. If provided, only the leftmost topk_length indices will be processed. Useful when the actual topk for different queries are different so that we can save some computation, compared to masking.
91
+
92
+ For DeepSeek V3, DeepSeek V3.1, and DeepSeek V3.2:
93
+ head_dim should be 576 while head_dim_v should be 512.
94
+ In FP8+sparse mode, each token's KV cache is 656 Bytes, structured as:
95
+ - The shape of the tensor `k_cache` is (num_blocks, page_block_size, num_heads_k, head_dim), and num_heads_k must be 1.
96
+ - First 512 bytes: The "quantized NoPE" part, containing 512 float8_e4m3 values.
97
+ - Next 16 bytes: Scale factors, containing 4 float32 values. The first float32 is the scale for the first 128 float8_e4m3 values, the second for the next 128, and so on.
98
+ - Last 128 bytes: The "RoPE" part, containing 64 bfloat16 values. This part is not quantized for accuracy.
99
+
100
+ Return:
101
+ out: (batch_size, seq_len_q, num_heads_q, head_dim_v).
102
+ softmax_lse: (batch_size, num_heads_q, seq_len_q), torch.float32.
103
+ """
104
+ sched_meta = tile_scheduler_metadata
105
+ indices_in_kvcache = indices
106
+ assert isinstance(sched_meta, FlashMLASchedMeta), "tile_scheduler_metadata must be of type FlashMLASchedMeta"
107
+ assert num_splits is None, "num_splits must be None"
108
+
109
+ topk = indices_in_kvcache.shape[-1] if indices_in_kvcache is not None else None
110
+ extra_k_page_block_size = extra_k_cache.shape[1] if extra_k_cache is not None else None
111
+ extra_topk = extra_indices_in_kvcache.shape[-1] if extra_indices_in_kvcache is not None else None
112
+ if softmax_scale is None:
113
+ softmax_scale = q.shape[-1] ** (-0.5)
114
+
115
+ if not sched_meta.have_initialized:
116
+ # Sanity check. We only perform sanity check during the first invocation to save CPU time.
117
+ if indices_in_kvcache is not None:
118
+ assert not causal, "causal must be False when indices_in_kvcache is not None (i.e. sparse attention is enabled)"
119
+
120
+ # Initialize the tile scheduler metadata during the first invocation.
121
+ sched_meta.have_initialized = True
122
+ sched_meta.config = FlashMLASchedMeta.Config(
123
+ q.shape[0],
124
+ q.shape[1],
125
+ q.shape[2],
126
+ k_cache.shape[1],
127
+ k_cache.shape[2],
128
+
129
+ causal,
130
+ is_fp8_kvcache,
131
+ topk,
132
+
133
+ extra_k_page_block_size,
134
+ extra_topk,
135
+ )
136
+ else:
137
+ # Check whether the input arguments are consistent with sched_meta
138
+ helper_msg = " Your input arguments are inconsistent with sched_meta. Please make sure the input arguments are consistent across different invocations of flash_mla_with_kvcache on the same sched_meta."
139
+ assert sched_meta.config is not None
140
+ assert sched_meta.config.b == q.shape[0], "sched_meta.config.b must be equal to batch_size." + helper_msg
141
+ assert sched_meta.config.s_q == q.shape[1], "sched_meta.config.s_q must be equal to seq_len_q." + helper_msg
142
+ assert sched_meta.config.h_q == q.shape[2], "sched_meta.config.h_q must be equal to num_heads_q." + helper_msg
143
+ assert sched_meta.config.page_block_size == k_cache.shape[1], "sched_meta.config.page_block_size must be equal to page_block_size." + helper_msg
144
+ assert sched_meta.config.h_k == k_cache.shape[2], "sched_meta.config.h_k must be equal to num_heads_k." + helper_msg
145
+ assert sched_meta.config.causal == causal, "sched_meta.config.causal must be equal to causal." + helper_msg
146
+ assert sched_meta.config.is_fp8_kvcache == is_fp8_kvcache, "sched_meta.config.is_fp8_kvcache must be equal to is_fp8_kvcache." + helper_msg
147
+ assert sched_meta.config.topk == topk, "sched_meta.config.topk must be equal to the last dim of indices_in_kvcache." + helper_msg
148
+ assert sched_meta.config.extra_page_block_size == extra_k_page_block_size, "sched_meta.config.extra_page_block_size must be equal to the page_block_size of extra_k_cache." + helper_msg
149
+ assert sched_meta.config.extra_topk == extra_topk, "sched_meta.config.extra_topk must be equal to the last dim of extra_indices_in_kvcache." + helper_msg
150
+
151
+ if topk is not None:
152
+ # Sparse attention
153
+ assert not causal, "causal must be False when sparse attention is enabled"
154
+ assert is_fp8_kvcache, "is_fp8_kvcache must be True when sparse attention is enabled"
155
+ out, lse, new_tile_scheduler_metadata, new_num_splits = flash_mla_cuda.sparse_decode_fwd(
156
+ q, k_cache, indices_in_kvcache, topk_length, attn_sink,
157
+ sched_meta.tile_scheduler_metadata, sched_meta.num_splits,
158
+ extra_k_cache, extra_indices_in_kvcache, extra_topk_length,
159
+ head_dim_v, softmax_scale
160
+ )
161
+ else:
162
+ # Dense attention
163
+ assert indices_in_kvcache is None and attn_sink is None and extra_k_cache is None and extra_indices_in_kvcache is None and topk_length is None and extra_topk_length is None, "indices_in_kvcache, attn_sink, extra_k_cache, extra_indices_in_kvcache, topk_length and extra_topk_length must be None when dense attention is used."
164
+ assert block_table is not None and cache_seqlens is not None, "block_table and cache_seqlens must be provided when dense attention is used."
165
+ out, lse, new_tile_scheduler_metadata, new_num_splits = flash_mla_cuda.dense_decode_fwd(
166
+ q, k_cache, head_dim_v,
167
+ cache_seqlens, block_table,
168
+ softmax_scale, causal,
169
+ sched_meta.tile_scheduler_metadata, sched_meta.num_splits
170
+ )
171
+ sched_meta.tile_scheduler_metadata = new_tile_scheduler_metadata
172
+ sched_meta.num_splits = new_num_splits
173
+ return (out, lse)
174
+
175
+
176
+ def flash_mla_sparse_fwd(
177
+ q: torch.Tensor,
178
+ kv: torch.Tensor,
179
+ indices: torch.Tensor,
180
+ sm_scale: float,
181
+ d_v: int = 512,
182
+ attn_sink: Optional[torch.Tensor] = None,
183
+ topk_length: Optional[torch.Tensor] = None,
184
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
185
+ """
186
+ Sparse attention prefill kernel
187
+
188
+ Args:
189
+ q: [s_q, h_q, d_qk], bfloat16
190
+ kv: [s_kv, h_kv, d_qk], bfloat16
191
+ indices: [s_q, h_kv, topk], int32. Invalid indices should be set to -1 or numbers >= s_kv
192
+ sm_scale: float
193
+ d_v: The dimension of value vectors. Can only be 512
194
+ attn_sink: optional, [h_q], float32.
195
+ If attn_sink is provided, when computing output, output will be additionally multiplied by exp(lse) / (exp(lse) + exp(attn_sink)).
196
+ +-inf in attn_sink will be handled normally (i.e., -inf has no effect, +inf will make corresponding output all zeros).
197
+ This argument has no effect on lse and max_logits.
198
+ topk_length: optional, [s_q], int32. If provided, the i-th q token will only attend to k tokens specified by indices[i, :, :topk_length[i]], ignoring later k/v tokens (even if provided in indices).
199
+ In extremely rare cases (topk_length provided, there is a valid topk index between topk_length[i] ~ s_kv, and that topk index points to a k token containing NaN), operator output will contain NaN, so please avoid this situation.
200
+
201
+ Returns:
202
+ (output, max_logits, lse)
203
+ Please refer to tests/ref.py for the precise definitions of these parameters.
204
+ - output: [s_q, h_q, d_v], bfloat16
205
+ - max_logits: [s_q, h_q], float
206
+ - lse: [s_q, h_q], float, log-sum-exp of attention scores
207
+ """
208
+ results = flash_mla_cuda.sparse_prefill_fwd(
209
+ q, kv, indices, sm_scale, d_v, attn_sink, topk_length
210
+ )
211
+ return results
212
+
213
+
214
+ def _flash_attn_varlen_forward(
215
+ q: torch.Tensor,
216
+ k: torch.Tensor,
217
+ v: torch.Tensor,
218
+ cu_seqlens_qo: torch.Tensor,
219
+ cu_seqlens_kv: torch.Tensor,
220
+ max_seqlen_qo: int,
221
+ max_seqlen_kv: int,
222
+ out: Optional[torch.Tensor] = None,
223
+ lse: Optional[torch.Tensor] = None,
224
+ causal: bool = False,
225
+ softmax_scale: Optional[float] = None,
226
+ is_varlen: bool = True,
227
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
228
+ qo_total_len, num_qo_heads, head_dim_qk = q.shape
229
+ kv_total_len, num_kv_heads, head_dim_vo = v.shape
230
+
231
+ mask_mode_code = 1 if causal else 0
232
+ if softmax_scale is None:
233
+ softmax_scale = head_dim_qk ** (-0.5)
234
+
235
+ if out is None:
236
+ out = torch.empty(qo_total_len, num_qo_heads, head_dim_vo, device=q.device, dtype=q.dtype)
237
+ if lse is None:
238
+ # Make lse contiguous on seqlen dim
239
+ lse = torch.empty(num_qo_heads, qo_total_len, device=q.device, dtype=torch.float32).T
240
+
241
+ workspace_buffer = torch.empty(32 * 1024 * 1024, dtype=torch.uint8, device=q.device)
242
+ flash_mla_cuda.dense_prefill_fwd(
243
+ workspace_buffer,
244
+ q,
245
+ k,
246
+ v,
247
+ cu_seqlens_qo,
248
+ cu_seqlens_kv,
249
+ out,
250
+ lse,
251
+ mask_mode_code,
252
+ softmax_scale,
253
+ max_seqlen_qo,
254
+ max_seqlen_kv,
255
+ is_varlen,
256
+ )
257
+
258
+ return out, lse
259
+
260
+
261
+ def _flash_attn_varlen_backward(
262
+ do: torch.Tensor,
263
+ q: torch.Tensor,
264
+ k: torch.Tensor,
265
+ v: torch.Tensor,
266
+ out: torch.Tensor,
267
+ lse: torch.Tensor,
268
+ cu_seqlens_qo: torch.Tensor,
269
+ cu_seqlens_kv: torch.Tensor,
270
+ max_seqlen_qo: int,
271
+ max_seqlen_kv: int,
272
+ dq: Optional[torch.Tensor] = None,
273
+ dk: Optional[torch.Tensor] = None,
274
+ dv: Optional[torch.Tensor] = None,
275
+ causal: bool = False,
276
+ softmax_scale: Optional[float] = None,
277
+ is_varlen: bool = True,
278
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
279
+ qo_total_len, num_qo_heads, head_dim_qk = q.shape
280
+ kv_total_len, num_kv_heads, head_dim_vo = v.shape
281
+
282
+ # TODO: fix bwd GQA
283
+ if num_qo_heads != num_kv_heads:
284
+ raise ValueError(f"SM100 bwd doesn't support GQA now. num_qo_heads: {num_qo_heads}, num_kv_heads: {num_kv_heads}.")
285
+
286
+ mask_mode_code = 1 if causal else 0
287
+ if softmax_scale is None:
288
+ softmax_scale = head_dim_qk ** (-0.5)
289
+
290
+ if dq is None:
291
+ dq = torch.empty(qo_total_len, num_qo_heads, head_dim_qk, device=q.device, dtype=q.dtype)
292
+ if dk is None:
293
+ dk = torch.empty(kv_total_len, num_kv_heads, head_dim_qk, device=q.device, dtype=q.dtype)
294
+ if dv is None:
295
+ dv = torch.empty(kv_total_len, num_kv_heads, head_dim_vo, device=q.device, dtype=q.dtype)
296
+
297
+ max_seqlen_qo_aligned = (max_seqlen_qo + 7) // 8 * 8
298
+ bs = cu_seqlens_qo.shape[0] - 1
299
+ workspace_bytes = 0
300
+ workspace_bytes += 4 * bs * max_seqlen_qo_aligned * num_qo_heads * head_dim_qk # dQ_acc
301
+ workspace_bytes += 4 * max_seqlen_qo_aligned * bs * num_qo_heads * 2 # sum_OdO and scaled_lse
302
+ if num_qo_heads != num_kv_heads:
303
+ workspace_bytes += 2 * kv_total_len * num_qo_heads * (head_dim_qk + head_dim_vo) # dKV_acc
304
+ workspace_buffer = torch.empty(workspace_bytes, dtype=torch.uint8, device=q.device)
305
+ flash_mla_cuda.dense_prefill_bwd(
306
+ workspace_buffer,
307
+ do,
308
+ q,
309
+ k,
310
+ v,
311
+ out,
312
+ lse,
313
+ cu_seqlens_qo,
314
+ cu_seqlens_kv,
315
+ dq,
316
+ dk,
317
+ dv,
318
+ mask_mode_code,
319
+ softmax_scale,
320
+ max_seqlen_qo,
321
+ max_seqlen_kv,
322
+ is_varlen,
323
+ )
324
+
325
+ return dq, dk, dv
326
+
327
+
328
+ class FlashAttnVarlenFunc(torch.autograd.Function):
329
+ def forward(
330
+ ctx,
331
+ q: torch.Tensor,
332
+ k: torch.Tensor,
333
+ v: torch.Tensor,
334
+ cu_seqlens_qo: torch.Tensor,
335
+ cu_seqlens_kv: torch.Tensor,
336
+ max_seqlen_qo: int,
337
+ max_seqlen_kv: int,
338
+ causal: bool = False,
339
+ softmax_scale: Optional[float] = None,
340
+ is_varlen: bool = True,
341
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
342
+ out, lse = _flash_attn_varlen_forward(
343
+ q, k, v,
344
+ cu_seqlens_qo, cu_seqlens_kv, max_seqlen_qo, max_seqlen_kv,
345
+ causal=causal, softmax_scale=softmax_scale,
346
+ is_varlen=is_varlen,
347
+ )
348
+ ctx.save_for_backward(q, k, v, out, lse, cu_seqlens_qo, cu_seqlens_kv)
349
+ ctx.max_seqlen_qo = max_seqlen_qo
350
+ ctx.max_seqlen_kv = max_seqlen_kv
351
+ ctx.causal = causal
352
+ ctx.softmax_scale = softmax_scale
353
+ ctx.is_varlen = is_varlen
354
+ return out, lse
355
+
356
+ def backward(
357
+ ctx,
358
+ do: torch.Tensor,
359
+ dlse: torch.Tensor,
360
+ ):
361
+ del dlse # LSE doesn't support backward currently
362
+ q, k, v, out, lse, cu_seqlens_qo, cu_seqlens_kv = ctx.saved_tensors
363
+ dq, dk, dv = _flash_attn_varlen_backward(
364
+ do, q, k, v, out, lse,
365
+ cu_seqlens_qo, cu_seqlens_kv, ctx.max_seqlen_qo, ctx.max_seqlen_kv,
366
+ causal=ctx.causal, softmax_scale=ctx.softmax_scale,
367
+ is_varlen=ctx.is_varlen,
368
+ )
369
+ return dq, dk, dv, None, None, None, None, None, None, None
370
+
371
+
372
+ def flash_attn_varlen_func(
373
+ q: torch.Tensor,
374
+ k: torch.Tensor,
375
+ v: torch.Tensor,
376
+ cu_seqlens_qo: torch.Tensor,
377
+ cu_seqlens_kv: torch.Tensor,
378
+ max_seqlen_qo: int,
379
+ max_seqlen_kv: int,
380
+ dropout_p: float = 0.0,
381
+ softmax_scale: Optional[float] = None,
382
+ causal: bool = False,
383
+ deterministic: bool = False,
384
+ is_varlen: bool = True,
385
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
386
+ assert dropout_p == 0.0
387
+ assert not deterministic
388
+ return FlashAttnVarlenFunc.apply(
389
+ q, k, v,
390
+ cu_seqlens_qo, cu_seqlens_kv, max_seqlen_qo, max_seqlen_kv,
391
+ causal, softmax_scale, is_varlen,
392
+ )
393
+
394
+
395
+ def flash_attn_varlen_qkvpacked_func(
396
+ qkv: torch.Tensor,
397
+ cu_seqlens: torch.Tensor,
398
+ max_seqlen: int,
399
+ head_dim_qk: int,
400
+ dropout_p: float = 0.0,
401
+ softmax_scale: Optional[float] = None,
402
+ causal: bool = False,
403
+ deterministic: bool = False,
404
+ is_varlen: bool = True,
405
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
406
+ assert dropout_p == 0.0
407
+ assert not deterministic
408
+ return FlashAttnVarlenFunc.apply(
409
+ qkv[:, :, :head_dim_qk], qkv[:, :, head_dim_qk:head_dim_qk * 2], qkv[:, :, head_dim_qk * 2:],
410
+ cu_seqlens, cu_seqlens, max_seqlen, max_seqlen,
411
+ causal, softmax_scale, is_varlen,
412
+ )
413
+
414
+
415
+ def flash_attn_varlen_kvpacked_func(
416
+ q: torch.Tensor,
417
+ kv: torch.Tensor,
418
+ cu_seqlens_qo: torch.Tensor,
419
+ cu_seqlens_kv: torch.Tensor,
420
+ max_seqlen_qo: int,
421
+ max_seqlen_kv: int,
422
+ head_dim_qk: int,
423
+ dropout_p: float = 0.0,
424
+ softmax_scale: Optional[float] = None,
425
+ causal: bool = False,
426
+ deterministic: bool = False,
427
+ is_varlen: bool = True,
428
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
429
+ assert dropout_p == 0.0
430
+ assert not deterministic
431
+ return FlashAttnVarlenFunc.apply(
432
+ q, kv[:, :, :head_dim_qk], kv[:, :, head_dim_qk:],
433
+ cu_seqlens_qo, cu_seqlens_kv, max_seqlen_qo, max_seqlen_kv,
434
+ causal, softmax_scale, is_varlen,
435
+ )
build/torch210-cxx11-cu128-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "python-depends": []
4
+ }
build/torch210-cxx11-cu130-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __version__ = "1.0.0"
2
+
3
+ from .flash_mla_interface import (
4
+ get_mla_metadata,
5
+ flash_mla_with_kvcache,
6
+ flash_attn_varlen_func,
7
+ flash_attn_varlen_qkvpacked_func,
8
+ flash_attn_varlen_kvpacked_func,
9
+ flash_mla_sparse_fwd
10
+ )
11
+
12
+ __all__ = [
13
+ "get_mla_metadata",
14
+ "flash_mla_with_kvcache",
15
+ "flash_attn_varlen_func",
16
+ "flash_attn_varlen_qkvpacked_func",
17
+ "flash_attn_varlen_kvpacked_func",
18
+ "flash_mla_sparse_fwd"
19
+ ]
build/torch210-cxx11-cu130-x86_64-linux/_flash_mla_cuda_fgsajugfcsrz2.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f5aad5a4a128ce15d13073fb8e6d2274c703200eb18bb8eff7fae606344381c
3
+ size 3614248
build/torch210-cxx11-cu130-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _flash_mla_cuda_fgsajugfcsrz2
3
+ ops = torch.ops._flash_mla_cuda_fgsajugfcsrz2
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_flash_mla_cuda_fgsajugfcsrz2::{op_name}"
build/torch210-cxx11-cu130-x86_64-linux/flash_mla/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch210-cxx11-cu130-x86_64-linux/flash_mla_interface.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple
2
+ import dataclasses
3
+
4
+ import torch
5
+
6
+ from ._ops import ops as flash_mla_cuda
7
+
8
+ @dataclasses.dataclass
9
+ class FlashMLASchedMeta:
10
+ """
11
+ A class that stores the tile scheduler metadata of FlashMLA
12
+ """
13
+
14
+ @dataclasses.dataclass
15
+ class Config:
16
+ b: int
17
+ s_q: int
18
+ h_q: int
19
+ page_block_size: int
20
+ h_k: int
21
+
22
+ causal: bool
23
+ is_fp8_kvcache: bool
24
+ topk: Optional[int]
25
+
26
+ extra_page_block_size: Optional[int]
27
+ extra_topk: Optional[int]
28
+
29
+ have_initialized: bool = False
30
+
31
+ config: Optional[Config] = None
32
+
33
+ tile_scheduler_metadata: Optional[torch.Tensor] = None # (num_sm_parts, TileSchedulerMetaDataSize), dtype torch.int32.
34
+ num_splits: Optional[torch.Tensor] = None # (1), dtype torch.int32.
35
+
36
+
37
+ def get_mla_metadata(
38
+ *args,
39
+ **kwargs
40
+ ) -> Tuple[FlashMLASchedMeta, None]:
41
+ """
42
+ Returns an empty instance of FlashMLASchedMeta. The actual scheduling metadata will be generated during the first invocation of flash_mla_with_kvcache.
43
+
44
+ Arguments:
45
+ This function does not need any arguments, but we keep *args and **kwargs to be compatible with the old interface.
46
+
47
+ Return:
48
+ A tuple. Due to historical reasons, we return a tuple of (FlashMLASchedMeta, None) now. Only the first element is useful.
49
+ """
50
+ return FlashMLASchedMeta(), None
51
+
52
+
53
+ def flash_mla_with_kvcache(
54
+ q: torch.Tensor,
55
+ k_cache: torch.Tensor,
56
+ block_table: Optional[torch.Tensor],
57
+ cache_seqlens: Optional[torch.Tensor],
58
+ head_dim_v: int,
59
+ tile_scheduler_metadata: FlashMLASchedMeta,
60
+ num_splits: None = None,
61
+ softmax_scale: Optional[float] = None,
62
+ causal: bool = False,
63
+ is_fp8_kvcache: bool = False,
64
+ indices: Optional[torch.Tensor] = None,
65
+ attn_sink: Optional[torch.Tensor] = None,
66
+ extra_k_cache: Optional[torch.Tensor] = None,
67
+ extra_indices_in_kvcache: Optional[torch.Tensor] = None,
68
+ topk_length: Optional[torch.Tensor] = None,
69
+ extra_topk_length: Optional[torch.Tensor] = None
70
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
71
+ """
72
+ Arguments:
73
+ q: (batch_size, seq_len_q, num_heads_q, head_dim).
74
+ k_cache: (num_blocks, page_block_size, num_heads_k, head_dim).
75
+ Different modes (including fp8/bf16, and sparsity) has different KV cache layouts. See comments below for details.
76
+ The KV cache must be contiguously valid for sparse attention on sm100. Here "contiguously valid" means that every byte, from the very beginning of the KV cache, till the last byte in the KV cache, is valid memory address to visit (i.e. won't IMA). In other words, the KV cache could be a slice of a larger array, but cannot be a list of disjoint memory blocks.
77
+ block_table: (batch_size, max_num_blocks_per_seq), torch.int32. Can be None when sparse attention is used.
78
+ cache_seqlens: (batch_size), torch.int32. Can be None when sparse attention is used.
79
+ head_dim_v: Head_dim of v. Must be 512
80
+ sched_meta: FlashMLASchedMeta, return by get_mla_metadata. You may reuse the same sched_meta across different invocations, but only when the tensor shapes and the values of cache_seqlens, topk_length, and extra_topk_length remain the same.
81
+ num_splits_placeholder: must be "None" (to be compatible with the old interface).
82
+ softmax_scale: float. The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim_k).
83
+ causal: bool. Whether to apply causal attention mask. Only valid for dense attention
84
+ is_fp8_kvcache: bool.
85
+ indices: (batch_size, seq_len_q, topk). KV indices when sparse attention is enabled.
86
+ Pay attention that indices_in_kvcache[i][j][k] = (the index of the page block where token t resides) * block_size + (the offset of token t among the page block),
87
+ where t is the k-th token of the j-th q-sequence in the i-th batch.
88
+ attn_sink: Optional[torch.Tensor], (num_heads_q, ), torch.float32. If presented, the final output will be scaled by exp(lse) / (exp(lse) + exp(attn_sink)). Have no affect on the returned softmax_lse. +inf will cause the result to become 0.
89
+ extra_k_cache and extra_indices_in_kvcache: If provided, will attend to these extra tokens in addition to those in k_cache and indices_in_kvcache. Their format requirements are the same as k_cache and indices_in_kvcache respectively.
90
+ topk_length/extra_topk_length: (batch_size, ), torch.int32. If provided, only the leftmost topk_length indices will be processed. Useful when the actual topk for different queries are different so that we can save some computation, compared to masking.
91
+
92
+ For DeepSeek V3, DeepSeek V3.1, and DeepSeek V3.2:
93
+ head_dim should be 576 while head_dim_v should be 512.
94
+ In FP8+sparse mode, each token's KV cache is 656 Bytes, structured as:
95
+ - The shape of the tensor `k_cache` is (num_blocks, page_block_size, num_heads_k, head_dim), and num_heads_k must be 1.
96
+ - First 512 bytes: The "quantized NoPE" part, containing 512 float8_e4m3 values.
97
+ - Next 16 bytes: Scale factors, containing 4 float32 values. The first float32 is the scale for the first 128 float8_e4m3 values, the second for the next 128, and so on.
98
+ - Last 128 bytes: The "RoPE" part, containing 64 bfloat16 values. This part is not quantized for accuracy.
99
+
100
+ Return:
101
+ out: (batch_size, seq_len_q, num_heads_q, head_dim_v).
102
+ softmax_lse: (batch_size, num_heads_q, seq_len_q), torch.float32.
103
+ """
104
+ sched_meta = tile_scheduler_metadata
105
+ indices_in_kvcache = indices
106
+ assert isinstance(sched_meta, FlashMLASchedMeta), "tile_scheduler_metadata must be of type FlashMLASchedMeta"
107
+ assert num_splits is None, "num_splits must be None"
108
+
109
+ topk = indices_in_kvcache.shape[-1] if indices_in_kvcache is not None else None
110
+ extra_k_page_block_size = extra_k_cache.shape[1] if extra_k_cache is not None else None
111
+ extra_topk = extra_indices_in_kvcache.shape[-1] if extra_indices_in_kvcache is not None else None
112
+ if softmax_scale is None:
113
+ softmax_scale = q.shape[-1] ** (-0.5)
114
+
115
+ if not sched_meta.have_initialized:
116
+ # Sanity check. We only perform sanity check during the first invocation to save CPU time.
117
+ if indices_in_kvcache is not None:
118
+ assert not causal, "causal must be False when indices_in_kvcache is not None (i.e. sparse attention is enabled)"
119
+
120
+ # Initialize the tile scheduler metadata during the first invocation.
121
+ sched_meta.have_initialized = True
122
+ sched_meta.config = FlashMLASchedMeta.Config(
123
+ q.shape[0],
124
+ q.shape[1],
125
+ q.shape[2],
126
+ k_cache.shape[1],
127
+ k_cache.shape[2],
128
+
129
+ causal,
130
+ is_fp8_kvcache,
131
+ topk,
132
+
133
+ extra_k_page_block_size,
134
+ extra_topk,
135
+ )
136
+ else:
137
+ # Check whether the input arguments are consistent with sched_meta
138
+ helper_msg = " Your input arguments are inconsistent with sched_meta. Please make sure the input arguments are consistent across different invocations of flash_mla_with_kvcache on the same sched_meta."
139
+ assert sched_meta.config is not None
140
+ assert sched_meta.config.b == q.shape[0], "sched_meta.config.b must be equal to batch_size." + helper_msg
141
+ assert sched_meta.config.s_q == q.shape[1], "sched_meta.config.s_q must be equal to seq_len_q." + helper_msg
142
+ assert sched_meta.config.h_q == q.shape[2], "sched_meta.config.h_q must be equal to num_heads_q." + helper_msg
143
+ assert sched_meta.config.page_block_size == k_cache.shape[1], "sched_meta.config.page_block_size must be equal to page_block_size." + helper_msg
144
+ assert sched_meta.config.h_k == k_cache.shape[2], "sched_meta.config.h_k must be equal to num_heads_k." + helper_msg
145
+ assert sched_meta.config.causal == causal, "sched_meta.config.causal must be equal to causal." + helper_msg
146
+ assert sched_meta.config.is_fp8_kvcache == is_fp8_kvcache, "sched_meta.config.is_fp8_kvcache must be equal to is_fp8_kvcache." + helper_msg
147
+ assert sched_meta.config.topk == topk, "sched_meta.config.topk must be equal to the last dim of indices_in_kvcache." + helper_msg
148
+ assert sched_meta.config.extra_page_block_size == extra_k_page_block_size, "sched_meta.config.extra_page_block_size must be equal to the page_block_size of extra_k_cache." + helper_msg
149
+ assert sched_meta.config.extra_topk == extra_topk, "sched_meta.config.extra_topk must be equal to the last dim of extra_indices_in_kvcache." + helper_msg
150
+
151
+ if topk is not None:
152
+ # Sparse attention
153
+ assert not causal, "causal must be False when sparse attention is enabled"
154
+ assert is_fp8_kvcache, "is_fp8_kvcache must be True when sparse attention is enabled"
155
+ out, lse, new_tile_scheduler_metadata, new_num_splits = flash_mla_cuda.sparse_decode_fwd(
156
+ q, k_cache, indices_in_kvcache, topk_length, attn_sink,
157
+ sched_meta.tile_scheduler_metadata, sched_meta.num_splits,
158
+ extra_k_cache, extra_indices_in_kvcache, extra_topk_length,
159
+ head_dim_v, softmax_scale
160
+ )
161
+ else:
162
+ # Dense attention
163
+ assert indices_in_kvcache is None and attn_sink is None and extra_k_cache is None and extra_indices_in_kvcache is None and topk_length is None and extra_topk_length is None, "indices_in_kvcache, attn_sink, extra_k_cache, extra_indices_in_kvcache, topk_length and extra_topk_length must be None when dense attention is used."
164
+ assert block_table is not None and cache_seqlens is not None, "block_table and cache_seqlens must be provided when dense attention is used."
165
+ out, lse, new_tile_scheduler_metadata, new_num_splits = flash_mla_cuda.dense_decode_fwd(
166
+ q, k_cache, head_dim_v,
167
+ cache_seqlens, block_table,
168
+ softmax_scale, causal,
169
+ sched_meta.tile_scheduler_metadata, sched_meta.num_splits
170
+ )
171
+ sched_meta.tile_scheduler_metadata = new_tile_scheduler_metadata
172
+ sched_meta.num_splits = new_num_splits
173
+ return (out, lse)
174
+
175
+
176
+ def flash_mla_sparse_fwd(
177
+ q: torch.Tensor,
178
+ kv: torch.Tensor,
179
+ indices: torch.Tensor,
180
+ sm_scale: float,
181
+ d_v: int = 512,
182
+ attn_sink: Optional[torch.Tensor] = None,
183
+ topk_length: Optional[torch.Tensor] = None,
184
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
185
+ """
186
+ Sparse attention prefill kernel
187
+
188
+ Args:
189
+ q: [s_q, h_q, d_qk], bfloat16
190
+ kv: [s_kv, h_kv, d_qk], bfloat16
191
+ indices: [s_q, h_kv, topk], int32. Invalid indices should be set to -1 or numbers >= s_kv
192
+ sm_scale: float
193
+ d_v: The dimension of value vectors. Can only be 512
194
+ attn_sink: optional, [h_q], float32.
195
+ If attn_sink is provided, when computing output, output will be additionally multiplied by exp(lse) / (exp(lse) + exp(attn_sink)).
196
+ +-inf in attn_sink will be handled normally (i.e., -inf has no effect, +inf will make corresponding output all zeros).
197
+ This argument has no effect on lse and max_logits.
198
+ topk_length: optional, [s_q], int32. If provided, the i-th q token will only attend to k tokens specified by indices[i, :, :topk_length[i]], ignoring later k/v tokens (even if provided in indices).
199
+ In extremely rare cases (topk_length provided, there is a valid topk index between topk_length[i] ~ s_kv, and that topk index points to a k token containing NaN), operator output will contain NaN, so please avoid this situation.
200
+
201
+ Returns:
202
+ (output, max_logits, lse)
203
+ Please refer to tests/ref.py for the precise definitions of these parameters.
204
+ - output: [s_q, h_q, d_v], bfloat16
205
+ - max_logits: [s_q, h_q], float
206
+ - lse: [s_q, h_q], float, log-sum-exp of attention scores
207
+ """
208
+ results = flash_mla_cuda.sparse_prefill_fwd(
209
+ q, kv, indices, sm_scale, d_v, attn_sink, topk_length
210
+ )
211
+ return results
212
+
213
+
214
+ def _flash_attn_varlen_forward(
215
+ q: torch.Tensor,
216
+ k: torch.Tensor,
217
+ v: torch.Tensor,
218
+ cu_seqlens_qo: torch.Tensor,
219
+ cu_seqlens_kv: torch.Tensor,
220
+ max_seqlen_qo: int,
221
+ max_seqlen_kv: int,
222
+ out: Optional[torch.Tensor] = None,
223
+ lse: Optional[torch.Tensor] = None,
224
+ causal: bool = False,
225
+ softmax_scale: Optional[float] = None,
226
+ is_varlen: bool = True,
227
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
228
+ qo_total_len, num_qo_heads, head_dim_qk = q.shape
229
+ kv_total_len, num_kv_heads, head_dim_vo = v.shape
230
+
231
+ mask_mode_code = 1 if causal else 0
232
+ if softmax_scale is None:
233
+ softmax_scale = head_dim_qk ** (-0.5)
234
+
235
+ if out is None:
236
+ out = torch.empty(qo_total_len, num_qo_heads, head_dim_vo, device=q.device, dtype=q.dtype)
237
+ if lse is None:
238
+ # Make lse contiguous on seqlen dim
239
+ lse = torch.empty(num_qo_heads, qo_total_len, device=q.device, dtype=torch.float32).T
240
+
241
+ workspace_buffer = torch.empty(32 * 1024 * 1024, dtype=torch.uint8, device=q.device)
242
+ flash_mla_cuda.dense_prefill_fwd(
243
+ workspace_buffer,
244
+ q,
245
+ k,
246
+ v,
247
+ cu_seqlens_qo,
248
+ cu_seqlens_kv,
249
+ out,
250
+ lse,
251
+ mask_mode_code,
252
+ softmax_scale,
253
+ max_seqlen_qo,
254
+ max_seqlen_kv,
255
+ is_varlen,
256
+ )
257
+
258
+ return out, lse
259
+
260
+
261
+ def _flash_attn_varlen_backward(
262
+ do: torch.Tensor,
263
+ q: torch.Tensor,
264
+ k: torch.Tensor,
265
+ v: torch.Tensor,
266
+ out: torch.Tensor,
267
+ lse: torch.Tensor,
268
+ cu_seqlens_qo: torch.Tensor,
269
+ cu_seqlens_kv: torch.Tensor,
270
+ max_seqlen_qo: int,
271
+ max_seqlen_kv: int,
272
+ dq: Optional[torch.Tensor] = None,
273
+ dk: Optional[torch.Tensor] = None,
274
+ dv: Optional[torch.Tensor] = None,
275
+ causal: bool = False,
276
+ softmax_scale: Optional[float] = None,
277
+ is_varlen: bool = True,
278
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
279
+ qo_total_len, num_qo_heads, head_dim_qk = q.shape
280
+ kv_total_len, num_kv_heads, head_dim_vo = v.shape
281
+
282
+ # TODO: fix bwd GQA
283
+ if num_qo_heads != num_kv_heads:
284
+ raise ValueError(f"SM100 bwd doesn't support GQA now. num_qo_heads: {num_qo_heads}, num_kv_heads: {num_kv_heads}.")
285
+
286
+ mask_mode_code = 1 if causal else 0
287
+ if softmax_scale is None:
288
+ softmax_scale = head_dim_qk ** (-0.5)
289
+
290
+ if dq is None:
291
+ dq = torch.empty(qo_total_len, num_qo_heads, head_dim_qk, device=q.device, dtype=q.dtype)
292
+ if dk is None:
293
+ dk = torch.empty(kv_total_len, num_kv_heads, head_dim_qk, device=q.device, dtype=q.dtype)
294
+ if dv is None:
295
+ dv = torch.empty(kv_total_len, num_kv_heads, head_dim_vo, device=q.device, dtype=q.dtype)
296
+
297
+ max_seqlen_qo_aligned = (max_seqlen_qo + 7) // 8 * 8
298
+ bs = cu_seqlens_qo.shape[0] - 1
299
+ workspace_bytes = 0
300
+ workspace_bytes += 4 * bs * max_seqlen_qo_aligned * num_qo_heads * head_dim_qk # dQ_acc
301
+ workspace_bytes += 4 * max_seqlen_qo_aligned * bs * num_qo_heads * 2 # sum_OdO and scaled_lse
302
+ if num_qo_heads != num_kv_heads:
303
+ workspace_bytes += 2 * kv_total_len * num_qo_heads * (head_dim_qk + head_dim_vo) # dKV_acc
304
+ workspace_buffer = torch.empty(workspace_bytes, dtype=torch.uint8, device=q.device)
305
+ flash_mla_cuda.dense_prefill_bwd(
306
+ workspace_buffer,
307
+ do,
308
+ q,
309
+ k,
310
+ v,
311
+ out,
312
+ lse,
313
+ cu_seqlens_qo,
314
+ cu_seqlens_kv,
315
+ dq,
316
+ dk,
317
+ dv,
318
+ mask_mode_code,
319
+ softmax_scale,
320
+ max_seqlen_qo,
321
+ max_seqlen_kv,
322
+ is_varlen,
323
+ )
324
+
325
+ return dq, dk, dv
326
+
327
+
328
+ class FlashAttnVarlenFunc(torch.autograd.Function):
329
+ def forward(
330
+ ctx,
331
+ q: torch.Tensor,
332
+ k: torch.Tensor,
333
+ v: torch.Tensor,
334
+ cu_seqlens_qo: torch.Tensor,
335
+ cu_seqlens_kv: torch.Tensor,
336
+ max_seqlen_qo: int,
337
+ max_seqlen_kv: int,
338
+ causal: bool = False,
339
+ softmax_scale: Optional[float] = None,
340
+ is_varlen: bool = True,
341
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
342
+ out, lse = _flash_attn_varlen_forward(
343
+ q, k, v,
344
+ cu_seqlens_qo, cu_seqlens_kv, max_seqlen_qo, max_seqlen_kv,
345
+ causal=causal, softmax_scale=softmax_scale,
346
+ is_varlen=is_varlen,
347
+ )
348
+ ctx.save_for_backward(q, k, v, out, lse, cu_seqlens_qo, cu_seqlens_kv)
349
+ ctx.max_seqlen_qo = max_seqlen_qo
350
+ ctx.max_seqlen_kv = max_seqlen_kv
351
+ ctx.causal = causal
352
+ ctx.softmax_scale = softmax_scale
353
+ ctx.is_varlen = is_varlen
354
+ return out, lse
355
+
356
+ def backward(
357
+ ctx,
358
+ do: torch.Tensor,
359
+ dlse: torch.Tensor,
360
+ ):
361
+ del dlse # LSE doesn't support backward currently
362
+ q, k, v, out, lse, cu_seqlens_qo, cu_seqlens_kv = ctx.saved_tensors
363
+ dq, dk, dv = _flash_attn_varlen_backward(
364
+ do, q, k, v, out, lse,
365
+ cu_seqlens_qo, cu_seqlens_kv, ctx.max_seqlen_qo, ctx.max_seqlen_kv,
366
+ causal=ctx.causal, softmax_scale=ctx.softmax_scale,
367
+ is_varlen=ctx.is_varlen,
368
+ )
369
+ return dq, dk, dv, None, None, None, None, None, None, None
370
+
371
+
372
+ def flash_attn_varlen_func(
373
+ q: torch.Tensor,
374
+ k: torch.Tensor,
375
+ v: torch.Tensor,
376
+ cu_seqlens_qo: torch.Tensor,
377
+ cu_seqlens_kv: torch.Tensor,
378
+ max_seqlen_qo: int,
379
+ max_seqlen_kv: int,
380
+ dropout_p: float = 0.0,
381
+ softmax_scale: Optional[float] = None,
382
+ causal: bool = False,
383
+ deterministic: bool = False,
384
+ is_varlen: bool = True,
385
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
386
+ assert dropout_p == 0.0
387
+ assert not deterministic
388
+ return FlashAttnVarlenFunc.apply(
389
+ q, k, v,
390
+ cu_seqlens_qo, cu_seqlens_kv, max_seqlen_qo, max_seqlen_kv,
391
+ causal, softmax_scale, is_varlen,
392
+ )
393
+
394
+
395
+ def flash_attn_varlen_qkvpacked_func(
396
+ qkv: torch.Tensor,
397
+ cu_seqlens: torch.Tensor,
398
+ max_seqlen: int,
399
+ head_dim_qk: int,
400
+ dropout_p: float = 0.0,
401
+ softmax_scale: Optional[float] = None,
402
+ causal: bool = False,
403
+ deterministic: bool = False,
404
+ is_varlen: bool = True,
405
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
406
+ assert dropout_p == 0.0
407
+ assert not deterministic
408
+ return FlashAttnVarlenFunc.apply(
409
+ qkv[:, :, :head_dim_qk], qkv[:, :, head_dim_qk:head_dim_qk * 2], qkv[:, :, head_dim_qk * 2:],
410
+ cu_seqlens, cu_seqlens, max_seqlen, max_seqlen,
411
+ causal, softmax_scale, is_varlen,
412
+ )
413
+
414
+
415
+ def flash_attn_varlen_kvpacked_func(
416
+ q: torch.Tensor,
417
+ kv: torch.Tensor,
418
+ cu_seqlens_qo: torch.Tensor,
419
+ cu_seqlens_kv: torch.Tensor,
420
+ max_seqlen_qo: int,
421
+ max_seqlen_kv: int,
422
+ head_dim_qk: int,
423
+ dropout_p: float = 0.0,
424
+ softmax_scale: Optional[float] = None,
425
+ causal: bool = False,
426
+ deterministic: bool = False,
427
+ is_varlen: bool = True,
428
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
429
+ assert dropout_p == 0.0
430
+ assert not deterministic
431
+ return FlashAttnVarlenFunc.apply(
432
+ q, kv[:, :, :head_dim_qk], kv[:, :, head_dim_qk:],
433
+ cu_seqlens_qo, cu_seqlens_kv, max_seqlen_qo, max_seqlen_kv,
434
+ causal, softmax_scale, is_varlen,
435
+ )
build/torch210-cxx11-cu130-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "python-depends": []
4
+ }
build/torch29-cxx11-cu128-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __version__ = "1.0.0"
2
+
3
+ from .flash_mla_interface import (
4
+ get_mla_metadata,
5
+ flash_mla_with_kvcache,
6
+ flash_attn_varlen_func,
7
+ flash_attn_varlen_qkvpacked_func,
8
+ flash_attn_varlen_kvpacked_func,
9
+ flash_mla_sparse_fwd
10
+ )
11
+
12
+ __all__ = [
13
+ "get_mla_metadata",
14
+ "flash_mla_with_kvcache",
15
+ "flash_attn_varlen_func",
16
+ "flash_attn_varlen_qkvpacked_func",
17
+ "flash_attn_varlen_kvpacked_func",
18
+ "flash_mla_sparse_fwd"
19
+ ]
build/torch29-cxx11-cu128-x86_64-linux/_flash_mla_cuda_wdjyadhrx7ypy.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b96472e26c3caa492a0e0d75014ab91dd47368c2f4f49931517e01c209342e0
3
+ size 3523120
build/torch29-cxx11-cu128-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _flash_mla_cuda_wdjyadhrx7ypy
3
+ ops = torch.ops._flash_mla_cuda_wdjyadhrx7ypy
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_flash_mla_cuda_wdjyadhrx7ypy::{op_name}"
build/torch29-cxx11-cu128-x86_64-linux/flash_mla/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch29-cxx11-cu128-x86_64-linux/flash_mla_interface.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple
2
+ import dataclasses
3
+
4
+ import torch
5
+
6
+ from ._ops import ops as flash_mla_cuda
7
+
8
+ @dataclasses.dataclass
9
+ class FlashMLASchedMeta:
10
+ """
11
+ A class that stores the tile scheduler metadata of FlashMLA
12
+ """
13
+
14
+ @dataclasses.dataclass
15
+ class Config:
16
+ b: int
17
+ s_q: int
18
+ h_q: int
19
+ page_block_size: int
20
+ h_k: int
21
+
22
+ causal: bool
23
+ is_fp8_kvcache: bool
24
+ topk: Optional[int]
25
+
26
+ extra_page_block_size: Optional[int]
27
+ extra_topk: Optional[int]
28
+
29
+ have_initialized: bool = False
30
+
31
+ config: Optional[Config] = None
32
+
33
+ tile_scheduler_metadata: Optional[torch.Tensor] = None # (num_sm_parts, TileSchedulerMetaDataSize), dtype torch.int32.
34
+ num_splits: Optional[torch.Tensor] = None # (1), dtype torch.int32.
35
+
36
+
37
+ def get_mla_metadata(
38
+ *args,
39
+ **kwargs
40
+ ) -> Tuple[FlashMLASchedMeta, None]:
41
+ """
42
+ Returns an empty instance of FlashMLASchedMeta. The actual scheduling metadata will be generated during the first invocation of flash_mla_with_kvcache.
43
+
44
+ Arguments:
45
+ This function does not need any arguments, but we keep *args and **kwargs to be compatible with the old interface.
46
+
47
+ Return:
48
+ A tuple. Due to historical reasons, we return a tuple of (FlashMLASchedMeta, None) now. Only the first element is useful.
49
+ """
50
+ return FlashMLASchedMeta(), None
51
+
52
+
53
+ def flash_mla_with_kvcache(
54
+ q: torch.Tensor,
55
+ k_cache: torch.Tensor,
56
+ block_table: Optional[torch.Tensor],
57
+ cache_seqlens: Optional[torch.Tensor],
58
+ head_dim_v: int,
59
+ tile_scheduler_metadata: FlashMLASchedMeta,
60
+ num_splits: None = None,
61
+ softmax_scale: Optional[float] = None,
62
+ causal: bool = False,
63
+ is_fp8_kvcache: bool = False,
64
+ indices: Optional[torch.Tensor] = None,
65
+ attn_sink: Optional[torch.Tensor] = None,
66
+ extra_k_cache: Optional[torch.Tensor] = None,
67
+ extra_indices_in_kvcache: Optional[torch.Tensor] = None,
68
+ topk_length: Optional[torch.Tensor] = None,
69
+ extra_topk_length: Optional[torch.Tensor] = None
70
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
71
+ """
72
+ Arguments:
73
+ q: (batch_size, seq_len_q, num_heads_q, head_dim).
74
+ k_cache: (num_blocks, page_block_size, num_heads_k, head_dim).
75
+ Different modes (including fp8/bf16, and sparsity) has different KV cache layouts. See comments below for details.
76
+ The KV cache must be contiguously valid for sparse attention on sm100. Here "contiguously valid" means that every byte, from the very beginning of the KV cache, till the last byte in the KV cache, is valid memory address to visit (i.e. won't IMA). In other words, the KV cache could be a slice of a larger array, but cannot be a list of disjoint memory blocks.
77
+ block_table: (batch_size, max_num_blocks_per_seq), torch.int32. Can be None when sparse attention is used.
78
+ cache_seqlens: (batch_size), torch.int32. Can be None when sparse attention is used.
79
+ head_dim_v: Head_dim of v. Must be 512
80
+ sched_meta: FlashMLASchedMeta, return by get_mla_metadata. You may reuse the same sched_meta across different invocations, but only when the tensor shapes and the values of cache_seqlens, topk_length, and extra_topk_length remain the same.
81
+ num_splits_placeholder: must be "None" (to be compatible with the old interface).
82
+ softmax_scale: float. The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim_k).
83
+ causal: bool. Whether to apply causal attention mask. Only valid for dense attention
84
+ is_fp8_kvcache: bool.
85
+ indices: (batch_size, seq_len_q, topk). KV indices when sparse attention is enabled.
86
+ Pay attention that indices_in_kvcache[i][j][k] = (the index of the page block where token t resides) * block_size + (the offset of token t among the page block),
87
+ where t is the k-th token of the j-th q-sequence in the i-th batch.
88
+ attn_sink: Optional[torch.Tensor], (num_heads_q, ), torch.float32. If presented, the final output will be scaled by exp(lse) / (exp(lse) + exp(attn_sink)). Have no affect on the returned softmax_lse. +inf will cause the result to become 0.
89
+ extra_k_cache and extra_indices_in_kvcache: If provided, will attend to these extra tokens in addition to those in k_cache and indices_in_kvcache. Their format requirements are the same as k_cache and indices_in_kvcache respectively.
90
+ topk_length/extra_topk_length: (batch_size, ), torch.int32. If provided, only the leftmost topk_length indices will be processed. Useful when the actual topk for different queries are different so that we can save some computation, compared to masking.
91
+
92
+ For DeepSeek V3, DeepSeek V3.1, and DeepSeek V3.2:
93
+ head_dim should be 576 while head_dim_v should be 512.
94
+ In FP8+sparse mode, each token's KV cache is 656 Bytes, structured as:
95
+ - The shape of the tensor `k_cache` is (num_blocks, page_block_size, num_heads_k, head_dim), and num_heads_k must be 1.
96
+ - First 512 bytes: The "quantized NoPE" part, containing 512 float8_e4m3 values.
97
+ - Next 16 bytes: Scale factors, containing 4 float32 values. The first float32 is the scale for the first 128 float8_e4m3 values, the second for the next 128, and so on.
98
+ - Last 128 bytes: The "RoPE" part, containing 64 bfloat16 values. This part is not quantized for accuracy.
99
+
100
+ Return:
101
+ out: (batch_size, seq_len_q, num_heads_q, head_dim_v).
102
+ softmax_lse: (batch_size, num_heads_q, seq_len_q), torch.float32.
103
+ """
104
+ sched_meta = tile_scheduler_metadata
105
+ indices_in_kvcache = indices
106
+ assert isinstance(sched_meta, FlashMLASchedMeta), "tile_scheduler_metadata must be of type FlashMLASchedMeta"
107
+ assert num_splits is None, "num_splits must be None"
108
+
109
+ topk = indices_in_kvcache.shape[-1] if indices_in_kvcache is not None else None
110
+ extra_k_page_block_size = extra_k_cache.shape[1] if extra_k_cache is not None else None
111
+ extra_topk = extra_indices_in_kvcache.shape[-1] if extra_indices_in_kvcache is not None else None
112
+ if softmax_scale is None:
113
+ softmax_scale = q.shape[-1] ** (-0.5)
114
+
115
+ if not sched_meta.have_initialized:
116
+ # Sanity check. We only perform sanity check during the first invocation to save CPU time.
117
+ if indices_in_kvcache is not None:
118
+ assert not causal, "causal must be False when indices_in_kvcache is not None (i.e. sparse attention is enabled)"
119
+
120
+ # Initialize the tile scheduler metadata during the first invocation.
121
+ sched_meta.have_initialized = True
122
+ sched_meta.config = FlashMLASchedMeta.Config(
123
+ q.shape[0],
124
+ q.shape[1],
125
+ q.shape[2],
126
+ k_cache.shape[1],
127
+ k_cache.shape[2],
128
+
129
+ causal,
130
+ is_fp8_kvcache,
131
+ topk,
132
+
133
+ extra_k_page_block_size,
134
+ extra_topk,
135
+ )
136
+ else:
137
+ # Check whether the input arguments are consistent with sched_meta
138
+ helper_msg = " Your input arguments are inconsistent with sched_meta. Please make sure the input arguments are consistent across different invocations of flash_mla_with_kvcache on the same sched_meta."
139
+ assert sched_meta.config is not None
140
+ assert sched_meta.config.b == q.shape[0], "sched_meta.config.b must be equal to batch_size." + helper_msg
141
+ assert sched_meta.config.s_q == q.shape[1], "sched_meta.config.s_q must be equal to seq_len_q." + helper_msg
142
+ assert sched_meta.config.h_q == q.shape[2], "sched_meta.config.h_q must be equal to num_heads_q." + helper_msg
143
+ assert sched_meta.config.page_block_size == k_cache.shape[1], "sched_meta.config.page_block_size must be equal to page_block_size." + helper_msg
144
+ assert sched_meta.config.h_k == k_cache.shape[2], "sched_meta.config.h_k must be equal to num_heads_k." + helper_msg
145
+ assert sched_meta.config.causal == causal, "sched_meta.config.causal must be equal to causal." + helper_msg
146
+ assert sched_meta.config.is_fp8_kvcache == is_fp8_kvcache, "sched_meta.config.is_fp8_kvcache must be equal to is_fp8_kvcache." + helper_msg
147
+ assert sched_meta.config.topk == topk, "sched_meta.config.topk must be equal to the last dim of indices_in_kvcache." + helper_msg
148
+ assert sched_meta.config.extra_page_block_size == extra_k_page_block_size, "sched_meta.config.extra_page_block_size must be equal to the page_block_size of extra_k_cache." + helper_msg
149
+ assert sched_meta.config.extra_topk == extra_topk, "sched_meta.config.extra_topk must be equal to the last dim of extra_indices_in_kvcache." + helper_msg
150
+
151
+ if topk is not None:
152
+ # Sparse attention
153
+ assert not causal, "causal must be False when sparse attention is enabled"
154
+ assert is_fp8_kvcache, "is_fp8_kvcache must be True when sparse attention is enabled"
155
+ out, lse, new_tile_scheduler_metadata, new_num_splits = flash_mla_cuda.sparse_decode_fwd(
156
+ q, k_cache, indices_in_kvcache, topk_length, attn_sink,
157
+ sched_meta.tile_scheduler_metadata, sched_meta.num_splits,
158
+ extra_k_cache, extra_indices_in_kvcache, extra_topk_length,
159
+ head_dim_v, softmax_scale
160
+ )
161
+ else:
162
+ # Dense attention
163
+ assert indices_in_kvcache is None and attn_sink is None and extra_k_cache is None and extra_indices_in_kvcache is None and topk_length is None and extra_topk_length is None, "indices_in_kvcache, attn_sink, extra_k_cache, extra_indices_in_kvcache, topk_length and extra_topk_length must be None when dense attention is used."
164
+ assert block_table is not None and cache_seqlens is not None, "block_table and cache_seqlens must be provided when dense attention is used."
165
+ out, lse, new_tile_scheduler_metadata, new_num_splits = flash_mla_cuda.dense_decode_fwd(
166
+ q, k_cache, head_dim_v,
167
+ cache_seqlens, block_table,
168
+ softmax_scale, causal,
169
+ sched_meta.tile_scheduler_metadata, sched_meta.num_splits
170
+ )
171
+ sched_meta.tile_scheduler_metadata = new_tile_scheduler_metadata
172
+ sched_meta.num_splits = new_num_splits
173
+ return (out, lse)
174
+
175
+
176
+ def flash_mla_sparse_fwd(
177
+ q: torch.Tensor,
178
+ kv: torch.Tensor,
179
+ indices: torch.Tensor,
180
+ sm_scale: float,
181
+ d_v: int = 512,
182
+ attn_sink: Optional[torch.Tensor] = None,
183
+ topk_length: Optional[torch.Tensor] = None,
184
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
185
+ """
186
+ Sparse attention prefill kernel
187
+
188
+ Args:
189
+ q: [s_q, h_q, d_qk], bfloat16
190
+ kv: [s_kv, h_kv, d_qk], bfloat16
191
+ indices: [s_q, h_kv, topk], int32. Invalid indices should be set to -1 or numbers >= s_kv
192
+ sm_scale: float
193
+ d_v: The dimension of value vectors. Can only be 512
194
+ attn_sink: optional, [h_q], float32.
195
+ If attn_sink is provided, when computing output, output will be additionally multiplied by exp(lse) / (exp(lse) + exp(attn_sink)).
196
+ +-inf in attn_sink will be handled normally (i.e., -inf has no effect, +inf will make corresponding output all zeros).
197
+ This argument has no effect on lse and max_logits.
198
+ topk_length: optional, [s_q], int32. If provided, the i-th q token will only attend to k tokens specified by indices[i, :, :topk_length[i]], ignoring later k/v tokens (even if provided in indices).
199
+ In extremely rare cases (topk_length provided, there is a valid topk index between topk_length[i] ~ s_kv, and that topk index points to a k token containing NaN), operator output will contain NaN, so please avoid this situation.
200
+
201
+ Returns:
202
+ (output, max_logits, lse)
203
+ Please refer to tests/ref.py for the precise definitions of these parameters.
204
+ - output: [s_q, h_q, d_v], bfloat16
205
+ - max_logits: [s_q, h_q], float
206
+ - lse: [s_q, h_q], float, log-sum-exp of attention scores
207
+ """
208
+ results = flash_mla_cuda.sparse_prefill_fwd(
209
+ q, kv, indices, sm_scale, d_v, attn_sink, topk_length
210
+ )
211
+ return results
212
+
213
+
214
+ def _flash_attn_varlen_forward(
215
+ q: torch.Tensor,
216
+ k: torch.Tensor,
217
+ v: torch.Tensor,
218
+ cu_seqlens_qo: torch.Tensor,
219
+ cu_seqlens_kv: torch.Tensor,
220
+ max_seqlen_qo: int,
221
+ max_seqlen_kv: int,
222
+ out: Optional[torch.Tensor] = None,
223
+ lse: Optional[torch.Tensor] = None,
224
+ causal: bool = False,
225
+ softmax_scale: Optional[float] = None,
226
+ is_varlen: bool = True,
227
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
228
+ qo_total_len, num_qo_heads, head_dim_qk = q.shape
229
+ kv_total_len, num_kv_heads, head_dim_vo = v.shape
230
+
231
+ mask_mode_code = 1 if causal else 0
232
+ if softmax_scale is None:
233
+ softmax_scale = head_dim_qk ** (-0.5)
234
+
235
+ if out is None:
236
+ out = torch.empty(qo_total_len, num_qo_heads, head_dim_vo, device=q.device, dtype=q.dtype)
237
+ if lse is None:
238
+ # Make lse contiguous on seqlen dim
239
+ lse = torch.empty(num_qo_heads, qo_total_len, device=q.device, dtype=torch.float32).T
240
+
241
+ workspace_buffer = torch.empty(32 * 1024 * 1024, dtype=torch.uint8, device=q.device)
242
+ flash_mla_cuda.dense_prefill_fwd(
243
+ workspace_buffer,
244
+ q,
245
+ k,
246
+ v,
247
+ cu_seqlens_qo,
248
+ cu_seqlens_kv,
249
+ out,
250
+ lse,
251
+ mask_mode_code,
252
+ softmax_scale,
253
+ max_seqlen_qo,
254
+ max_seqlen_kv,
255
+ is_varlen,
256
+ )
257
+
258
+ return out, lse
259
+
260
+
261
+ def _flash_attn_varlen_backward(
262
+ do: torch.Tensor,
263
+ q: torch.Tensor,
264
+ k: torch.Tensor,
265
+ v: torch.Tensor,
266
+ out: torch.Tensor,
267
+ lse: torch.Tensor,
268
+ cu_seqlens_qo: torch.Tensor,
269
+ cu_seqlens_kv: torch.Tensor,
270
+ max_seqlen_qo: int,
271
+ max_seqlen_kv: int,
272
+ dq: Optional[torch.Tensor] = None,
273
+ dk: Optional[torch.Tensor] = None,
274
+ dv: Optional[torch.Tensor] = None,
275
+ causal: bool = False,
276
+ softmax_scale: Optional[float] = None,
277
+ is_varlen: bool = True,
278
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
279
+ qo_total_len, num_qo_heads, head_dim_qk = q.shape
280
+ kv_total_len, num_kv_heads, head_dim_vo = v.shape
281
+
282
+ # TODO: fix bwd GQA
283
+ if num_qo_heads != num_kv_heads:
284
+ raise ValueError(f"SM100 bwd doesn't support GQA now. num_qo_heads: {num_qo_heads}, num_kv_heads: {num_kv_heads}.")
285
+
286
+ mask_mode_code = 1 if causal else 0
287
+ if softmax_scale is None:
288
+ softmax_scale = head_dim_qk ** (-0.5)
289
+
290
+ if dq is None:
291
+ dq = torch.empty(qo_total_len, num_qo_heads, head_dim_qk, device=q.device, dtype=q.dtype)
292
+ if dk is None:
293
+ dk = torch.empty(kv_total_len, num_kv_heads, head_dim_qk, device=q.device, dtype=q.dtype)
294
+ if dv is None:
295
+ dv = torch.empty(kv_total_len, num_kv_heads, head_dim_vo, device=q.device, dtype=q.dtype)
296
+
297
+ max_seqlen_qo_aligned = (max_seqlen_qo + 7) // 8 * 8
298
+ bs = cu_seqlens_qo.shape[0] - 1
299
+ workspace_bytes = 0
300
+ workspace_bytes += 4 * bs * max_seqlen_qo_aligned * num_qo_heads * head_dim_qk # dQ_acc
301
+ workspace_bytes += 4 * max_seqlen_qo_aligned * bs * num_qo_heads * 2 # sum_OdO and scaled_lse
302
+ if num_qo_heads != num_kv_heads:
303
+ workspace_bytes += 2 * kv_total_len * num_qo_heads * (head_dim_qk + head_dim_vo) # dKV_acc
304
+ workspace_buffer = torch.empty(workspace_bytes, dtype=torch.uint8, device=q.device)
305
+ flash_mla_cuda.dense_prefill_bwd(
306
+ workspace_buffer,
307
+ do,
308
+ q,
309
+ k,
310
+ v,
311
+ out,
312
+ lse,
313
+ cu_seqlens_qo,
314
+ cu_seqlens_kv,
315
+ dq,
316
+ dk,
317
+ dv,
318
+ mask_mode_code,
319
+ softmax_scale,
320
+ max_seqlen_qo,
321
+ max_seqlen_kv,
322
+ is_varlen,
323
+ )
324
+
325
+ return dq, dk, dv
326
+
327
+
328
+ class FlashAttnVarlenFunc(torch.autograd.Function):
329
+ def forward(
330
+ ctx,
331
+ q: torch.Tensor,
332
+ k: torch.Tensor,
333
+ v: torch.Tensor,
334
+ cu_seqlens_qo: torch.Tensor,
335
+ cu_seqlens_kv: torch.Tensor,
336
+ max_seqlen_qo: int,
337
+ max_seqlen_kv: int,
338
+ causal: bool = False,
339
+ softmax_scale: Optional[float] = None,
340
+ is_varlen: bool = True,
341
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
342
+ out, lse = _flash_attn_varlen_forward(
343
+ q, k, v,
344
+ cu_seqlens_qo, cu_seqlens_kv, max_seqlen_qo, max_seqlen_kv,
345
+ causal=causal, softmax_scale=softmax_scale,
346
+ is_varlen=is_varlen,
347
+ )
348
+ ctx.save_for_backward(q, k, v, out, lse, cu_seqlens_qo, cu_seqlens_kv)
349
+ ctx.max_seqlen_qo = max_seqlen_qo
350
+ ctx.max_seqlen_kv = max_seqlen_kv
351
+ ctx.causal = causal
352
+ ctx.softmax_scale = softmax_scale
353
+ ctx.is_varlen = is_varlen
354
+ return out, lse
355
+
356
+ def backward(
357
+ ctx,
358
+ do: torch.Tensor,
359
+ dlse: torch.Tensor,
360
+ ):
361
+ del dlse # LSE doesn't support backward currently
362
+ q, k, v, out, lse, cu_seqlens_qo, cu_seqlens_kv = ctx.saved_tensors
363
+ dq, dk, dv = _flash_attn_varlen_backward(
364
+ do, q, k, v, out, lse,
365
+ cu_seqlens_qo, cu_seqlens_kv, ctx.max_seqlen_qo, ctx.max_seqlen_kv,
366
+ causal=ctx.causal, softmax_scale=ctx.softmax_scale,
367
+ is_varlen=ctx.is_varlen,
368
+ )
369
+ return dq, dk, dv, None, None, None, None, None, None, None
370
+
371
+
372
+ def flash_attn_varlen_func(
373
+ q: torch.Tensor,
374
+ k: torch.Tensor,
375
+ v: torch.Tensor,
376
+ cu_seqlens_qo: torch.Tensor,
377
+ cu_seqlens_kv: torch.Tensor,
378
+ max_seqlen_qo: int,
379
+ max_seqlen_kv: int,
380
+ dropout_p: float = 0.0,
381
+ softmax_scale: Optional[float] = None,
382
+ causal: bool = False,
383
+ deterministic: bool = False,
384
+ is_varlen: bool = True,
385
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
386
+ assert dropout_p == 0.0
387
+ assert not deterministic
388
+ return FlashAttnVarlenFunc.apply(
389
+ q, k, v,
390
+ cu_seqlens_qo, cu_seqlens_kv, max_seqlen_qo, max_seqlen_kv,
391
+ causal, softmax_scale, is_varlen,
392
+ )
393
+
394
+
395
+ def flash_attn_varlen_qkvpacked_func(
396
+ qkv: torch.Tensor,
397
+ cu_seqlens: torch.Tensor,
398
+ max_seqlen: int,
399
+ head_dim_qk: int,
400
+ dropout_p: float = 0.0,
401
+ softmax_scale: Optional[float] = None,
402
+ causal: bool = False,
403
+ deterministic: bool = False,
404
+ is_varlen: bool = True,
405
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
406
+ assert dropout_p == 0.0
407
+ assert not deterministic
408
+ return FlashAttnVarlenFunc.apply(
409
+ qkv[:, :, :head_dim_qk], qkv[:, :, head_dim_qk:head_dim_qk * 2], qkv[:, :, head_dim_qk * 2:],
410
+ cu_seqlens, cu_seqlens, max_seqlen, max_seqlen,
411
+ causal, softmax_scale, is_varlen,
412
+ )
413
+
414
+
415
+ def flash_attn_varlen_kvpacked_func(
416
+ q: torch.Tensor,
417
+ kv: torch.Tensor,
418
+ cu_seqlens_qo: torch.Tensor,
419
+ cu_seqlens_kv: torch.Tensor,
420
+ max_seqlen_qo: int,
421
+ max_seqlen_kv: int,
422
+ head_dim_qk: int,
423
+ dropout_p: float = 0.0,
424
+ softmax_scale: Optional[float] = None,
425
+ causal: bool = False,
426
+ deterministic: bool = False,
427
+ is_varlen: bool = True,
428
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
429
+ assert dropout_p == 0.0
430
+ assert not deterministic
431
+ return FlashAttnVarlenFunc.apply(
432
+ q, kv[:, :, :head_dim_qk], kv[:, :, head_dim_qk:],
433
+ cu_seqlens_qo, cu_seqlens_kv, max_seqlen_qo, max_seqlen_kv,
434
+ causal, softmax_scale, is_varlen,
435
+ )
build/torch29-cxx11-cu128-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "python-depends": []
4
+ }
build/torch29-cxx11-cu130-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __version__ = "1.0.0"
2
+
3
+ from .flash_mla_interface import (
4
+ get_mla_metadata,
5
+ flash_mla_with_kvcache,
6
+ flash_attn_varlen_func,
7
+ flash_attn_varlen_qkvpacked_func,
8
+ flash_attn_varlen_kvpacked_func,
9
+ flash_mla_sparse_fwd
10
+ )
11
+
12
+ __all__ = [
13
+ "get_mla_metadata",
14
+ "flash_mla_with_kvcache",
15
+ "flash_attn_varlen_func",
16
+ "flash_attn_varlen_qkvpacked_func",
17
+ "flash_attn_varlen_kvpacked_func",
18
+ "flash_mla_sparse_fwd"
19
+ ]
build/torch29-cxx11-cu130-x86_64-linux/_flash_mla_cuda_ybk54nsmnrfag.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46f67b1de9489460562e162661cc1f19472c26f2fe2fb063529bbde879426125
3
+ size 3599176
build/torch29-cxx11-cu130-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _flash_mla_cuda_ybk54nsmnrfag
3
+ ops = torch.ops._flash_mla_cuda_ybk54nsmnrfag
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_flash_mla_cuda_ybk54nsmnrfag::{op_name}"
build/torch29-cxx11-cu130-x86_64-linux/flash_mla/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch29-cxx11-cu130-x86_64-linux/flash_mla_interface.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple
2
+ import dataclasses
3
+
4
+ import torch
5
+
6
+ from ._ops import ops as flash_mla_cuda
7
+
8
+ @dataclasses.dataclass
9
+ class FlashMLASchedMeta:
10
+ """
11
+ A class that stores the tile scheduler metadata of FlashMLA
12
+ """
13
+
14
+ @dataclasses.dataclass
15
+ class Config:
16
+ b: int
17
+ s_q: int
18
+ h_q: int
19
+ page_block_size: int
20
+ h_k: int
21
+
22
+ causal: bool
23
+ is_fp8_kvcache: bool
24
+ topk: Optional[int]
25
+
26
+ extra_page_block_size: Optional[int]
27
+ extra_topk: Optional[int]
28
+
29
+ have_initialized: bool = False
30
+
31
+ config: Optional[Config] = None
32
+
33
+ tile_scheduler_metadata: Optional[torch.Tensor] = None # (num_sm_parts, TileSchedulerMetaDataSize), dtype torch.int32.
34
+ num_splits: Optional[torch.Tensor] = None # (1), dtype torch.int32.
35
+
36
+
37
+ def get_mla_metadata(
38
+ *args,
39
+ **kwargs
40
+ ) -> Tuple[FlashMLASchedMeta, None]:
41
+ """
42
+ Returns an empty instance of FlashMLASchedMeta. The actual scheduling metadata will be generated during the first invocation of flash_mla_with_kvcache.
43
+
44
+ Arguments:
45
+ This function does not need any arguments, but we keep *args and **kwargs to be compatible with the old interface.
46
+
47
+ Return:
48
+ A tuple. Due to historical reasons, we return a tuple of (FlashMLASchedMeta, None) now. Only the first element is useful.
49
+ """
50
+ return FlashMLASchedMeta(), None
51
+
52
+
53
+ def flash_mla_with_kvcache(
54
+ q: torch.Tensor,
55
+ k_cache: torch.Tensor,
56
+ block_table: Optional[torch.Tensor],
57
+ cache_seqlens: Optional[torch.Tensor],
58
+ head_dim_v: int,
59
+ tile_scheduler_metadata: FlashMLASchedMeta,
60
+ num_splits: None = None,
61
+ softmax_scale: Optional[float] = None,
62
+ causal: bool = False,
63
+ is_fp8_kvcache: bool = False,
64
+ indices: Optional[torch.Tensor] = None,
65
+ attn_sink: Optional[torch.Tensor] = None,
66
+ extra_k_cache: Optional[torch.Tensor] = None,
67
+ extra_indices_in_kvcache: Optional[torch.Tensor] = None,
68
+ topk_length: Optional[torch.Tensor] = None,
69
+ extra_topk_length: Optional[torch.Tensor] = None
70
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
71
+ """
72
+ Arguments:
73
+ q: (batch_size, seq_len_q, num_heads_q, head_dim).
74
+ k_cache: (num_blocks, page_block_size, num_heads_k, head_dim).
75
+ Different modes (including fp8/bf16, and sparsity) has different KV cache layouts. See comments below for details.
76
+ The KV cache must be contiguously valid for sparse attention on sm100. Here "contiguously valid" means that every byte, from the very beginning of the KV cache, till the last byte in the KV cache, is valid memory address to visit (i.e. won't IMA). In other words, the KV cache could be a slice of a larger array, but cannot be a list of disjoint memory blocks.
77
+ block_table: (batch_size, max_num_blocks_per_seq), torch.int32. Can be None when sparse attention is used.
78
+ cache_seqlens: (batch_size), torch.int32. Can be None when sparse attention is used.
79
+ head_dim_v: Head_dim of v. Must be 512
80
+ sched_meta: FlashMLASchedMeta, return by get_mla_metadata. You may reuse the same sched_meta across different invocations, but only when the tensor shapes and the values of cache_seqlens, topk_length, and extra_topk_length remain the same.
81
+ num_splits_placeholder: must be "None" (to be compatible with the old interface).
82
+ softmax_scale: float. The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim_k).
83
+ causal: bool. Whether to apply causal attention mask. Only valid for dense attention
84
+ is_fp8_kvcache: bool.
85
+ indices: (batch_size, seq_len_q, topk). KV indices when sparse attention is enabled.
86
+ Pay attention that indices_in_kvcache[i][j][k] = (the index of the page block where token t resides) * block_size + (the offset of token t among the page block),
87
+ where t is the k-th token of the j-th q-sequence in the i-th batch.
88
+ attn_sink: Optional[torch.Tensor], (num_heads_q, ), torch.float32. If presented, the final output will be scaled by exp(lse) / (exp(lse) + exp(attn_sink)). Have no affect on the returned softmax_lse. +inf will cause the result to become 0.
89
+ extra_k_cache and extra_indices_in_kvcache: If provided, will attend to these extra tokens in addition to those in k_cache and indices_in_kvcache. Their format requirements are the same as k_cache and indices_in_kvcache respectively.
90
+ topk_length/extra_topk_length: (batch_size, ), torch.int32. If provided, only the leftmost topk_length indices will be processed. Useful when the actual topk for different queries are different so that we can save some computation, compared to masking.
91
+
92
+ For DeepSeek V3, DeepSeek V3.1, and DeepSeek V3.2:
93
+ head_dim should be 576 while head_dim_v should be 512.
94
+ In FP8+sparse mode, each token's KV cache is 656 Bytes, structured as:
95
+ - The shape of the tensor `k_cache` is (num_blocks, page_block_size, num_heads_k, head_dim), and num_heads_k must be 1.
96
+ - First 512 bytes: The "quantized NoPE" part, containing 512 float8_e4m3 values.
97
+ - Next 16 bytes: Scale factors, containing 4 float32 values. The first float32 is the scale for the first 128 float8_e4m3 values, the second for the next 128, and so on.
98
+ - Last 128 bytes: The "RoPE" part, containing 64 bfloat16 values. This part is not quantized for accuracy.
99
+
100
+ Return:
101
+ out: (batch_size, seq_len_q, num_heads_q, head_dim_v).
102
+ softmax_lse: (batch_size, num_heads_q, seq_len_q), torch.float32.
103
+ """
104
+ sched_meta = tile_scheduler_metadata
105
+ indices_in_kvcache = indices
106
+ assert isinstance(sched_meta, FlashMLASchedMeta), "tile_scheduler_metadata must be of type FlashMLASchedMeta"
107
+ assert num_splits is None, "num_splits must be None"
108
+
109
+ topk = indices_in_kvcache.shape[-1] if indices_in_kvcache is not None else None
110
+ extra_k_page_block_size = extra_k_cache.shape[1] if extra_k_cache is not None else None
111
+ extra_topk = extra_indices_in_kvcache.shape[-1] if extra_indices_in_kvcache is not None else None
112
+ if softmax_scale is None:
113
+ softmax_scale = q.shape[-1] ** (-0.5)
114
+
115
+ if not sched_meta.have_initialized:
116
+ # Sanity check. We only perform sanity check during the first invocation to save CPU time.
117
+ if indices_in_kvcache is not None:
118
+ assert not causal, "causal must be False when indices_in_kvcache is not None (i.e. sparse attention is enabled)"
119
+
120
+ # Initialize the tile scheduler metadata during the first invocation.
121
+ sched_meta.have_initialized = True
122
+ sched_meta.config = FlashMLASchedMeta.Config(
123
+ q.shape[0],
124
+ q.shape[1],
125
+ q.shape[2],
126
+ k_cache.shape[1],
127
+ k_cache.shape[2],
128
+
129
+ causal,
130
+ is_fp8_kvcache,
131
+ topk,
132
+
133
+ extra_k_page_block_size,
134
+ extra_topk,
135
+ )
136
+ else:
137
+ # Check whether the input arguments are consistent with sched_meta
138
+ helper_msg = " Your input arguments are inconsistent with sched_meta. Please make sure the input arguments are consistent across different invocations of flash_mla_with_kvcache on the same sched_meta."
139
+ assert sched_meta.config is not None
140
+ assert sched_meta.config.b == q.shape[0], "sched_meta.config.b must be equal to batch_size." + helper_msg
141
+ assert sched_meta.config.s_q == q.shape[1], "sched_meta.config.s_q must be equal to seq_len_q." + helper_msg
142
+ assert sched_meta.config.h_q == q.shape[2], "sched_meta.config.h_q must be equal to num_heads_q." + helper_msg
143
+ assert sched_meta.config.page_block_size == k_cache.shape[1], "sched_meta.config.page_block_size must be equal to page_block_size." + helper_msg
144
+ assert sched_meta.config.h_k == k_cache.shape[2], "sched_meta.config.h_k must be equal to num_heads_k." + helper_msg
145
+ assert sched_meta.config.causal == causal, "sched_meta.config.causal must be equal to causal." + helper_msg
146
+ assert sched_meta.config.is_fp8_kvcache == is_fp8_kvcache, "sched_meta.config.is_fp8_kvcache must be equal to is_fp8_kvcache." + helper_msg
147
+ assert sched_meta.config.topk == topk, "sched_meta.config.topk must be equal to the last dim of indices_in_kvcache." + helper_msg
148
+ assert sched_meta.config.extra_page_block_size == extra_k_page_block_size, "sched_meta.config.extra_page_block_size must be equal to the page_block_size of extra_k_cache." + helper_msg
149
+ assert sched_meta.config.extra_topk == extra_topk, "sched_meta.config.extra_topk must be equal to the last dim of extra_indices_in_kvcache." + helper_msg
150
+
151
+ if topk is not None:
152
+ # Sparse attention
153
+ assert not causal, "causal must be False when sparse attention is enabled"
154
+ assert is_fp8_kvcache, "is_fp8_kvcache must be True when sparse attention is enabled"
155
+ out, lse, new_tile_scheduler_metadata, new_num_splits = flash_mla_cuda.sparse_decode_fwd(
156
+ q, k_cache, indices_in_kvcache, topk_length, attn_sink,
157
+ sched_meta.tile_scheduler_metadata, sched_meta.num_splits,
158
+ extra_k_cache, extra_indices_in_kvcache, extra_topk_length,
159
+ head_dim_v, softmax_scale
160
+ )
161
+ else:
162
+ # Dense attention
163
+ assert indices_in_kvcache is None and attn_sink is None and extra_k_cache is None and extra_indices_in_kvcache is None and topk_length is None and extra_topk_length is None, "indices_in_kvcache, attn_sink, extra_k_cache, extra_indices_in_kvcache, topk_length and extra_topk_length must be None when dense attention is used."
164
+ assert block_table is not None and cache_seqlens is not None, "block_table and cache_seqlens must be provided when dense attention is used."
165
+ out, lse, new_tile_scheduler_metadata, new_num_splits = flash_mla_cuda.dense_decode_fwd(
166
+ q, k_cache, head_dim_v,
167
+ cache_seqlens, block_table,
168
+ softmax_scale, causal,
169
+ sched_meta.tile_scheduler_metadata, sched_meta.num_splits
170
+ )
171
+ sched_meta.tile_scheduler_metadata = new_tile_scheduler_metadata
172
+ sched_meta.num_splits = new_num_splits
173
+ return (out, lse)
174
+
175
+
176
+ def flash_mla_sparse_fwd(
177
+ q: torch.Tensor,
178
+ kv: torch.Tensor,
179
+ indices: torch.Tensor,
180
+ sm_scale: float,
181
+ d_v: int = 512,
182
+ attn_sink: Optional[torch.Tensor] = None,
183
+ topk_length: Optional[torch.Tensor] = None,
184
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
185
+ """
186
+ Sparse attention prefill kernel
187
+
188
+ Args:
189
+ q: [s_q, h_q, d_qk], bfloat16
190
+ kv: [s_kv, h_kv, d_qk], bfloat16
191
+ indices: [s_q, h_kv, topk], int32. Invalid indices should be set to -1 or numbers >= s_kv
192
+ sm_scale: float
193
+ d_v: The dimension of value vectors. Can only be 512
194
+ attn_sink: optional, [h_q], float32.
195
+ If attn_sink is provided, when computing output, output will be additionally multiplied by exp(lse) / (exp(lse) + exp(attn_sink)).
196
+ +-inf in attn_sink will be handled normally (i.e., -inf has no effect, +inf will make corresponding output all zeros).
197
+ This argument has no effect on lse and max_logits.
198
+ topk_length: optional, [s_q], int32. If provided, the i-th q token will only attend to k tokens specified by indices[i, :, :topk_length[i]], ignoring later k/v tokens (even if provided in indices).
199
+ In extremely rare cases (topk_length provided, there is a valid topk index between topk_length[i] ~ s_kv, and that topk index points to a k token containing NaN), operator output will contain NaN, so please avoid this situation.
200
+
201
+ Returns:
202
+ (output, max_logits, lse)
203
+ Please refer to tests/ref.py for the precise definitions of these parameters.
204
+ - output: [s_q, h_q, d_v], bfloat16
205
+ - max_logits: [s_q, h_q], float
206
+ - lse: [s_q, h_q], float, log-sum-exp of attention scores
207
+ """
208
+ results = flash_mla_cuda.sparse_prefill_fwd(
209
+ q, kv, indices, sm_scale, d_v, attn_sink, topk_length
210
+ )
211
+ return results
212
+
213
+
214
+ def _flash_attn_varlen_forward(
215
+ q: torch.Tensor,
216
+ k: torch.Tensor,
217
+ v: torch.Tensor,
218
+ cu_seqlens_qo: torch.Tensor,
219
+ cu_seqlens_kv: torch.Tensor,
220
+ max_seqlen_qo: int,
221
+ max_seqlen_kv: int,
222
+ out: Optional[torch.Tensor] = None,
223
+ lse: Optional[torch.Tensor] = None,
224
+ causal: bool = False,
225
+ softmax_scale: Optional[float] = None,
226
+ is_varlen: bool = True,
227
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
228
+ qo_total_len, num_qo_heads, head_dim_qk = q.shape
229
+ kv_total_len, num_kv_heads, head_dim_vo = v.shape
230
+
231
+ mask_mode_code = 1 if causal else 0
232
+ if softmax_scale is None:
233
+ softmax_scale = head_dim_qk ** (-0.5)
234
+
235
+ if out is None:
236
+ out = torch.empty(qo_total_len, num_qo_heads, head_dim_vo, device=q.device, dtype=q.dtype)
237
+ if lse is None:
238
+ # Make lse contiguous on seqlen dim
239
+ lse = torch.empty(num_qo_heads, qo_total_len, device=q.device, dtype=torch.float32).T
240
+
241
+ workspace_buffer = torch.empty(32 * 1024 * 1024, dtype=torch.uint8, device=q.device)
242
+ flash_mla_cuda.dense_prefill_fwd(
243
+ workspace_buffer,
244
+ q,
245
+ k,
246
+ v,
247
+ cu_seqlens_qo,
248
+ cu_seqlens_kv,
249
+ out,
250
+ lse,
251
+ mask_mode_code,
252
+ softmax_scale,
253
+ max_seqlen_qo,
254
+ max_seqlen_kv,
255
+ is_varlen,
256
+ )
257
+
258
+ return out, lse
259
+
260
+
261
+ def _flash_attn_varlen_backward(
262
+ do: torch.Tensor,
263
+ q: torch.Tensor,
264
+ k: torch.Tensor,
265
+ v: torch.Tensor,
266
+ out: torch.Tensor,
267
+ lse: torch.Tensor,
268
+ cu_seqlens_qo: torch.Tensor,
269
+ cu_seqlens_kv: torch.Tensor,
270
+ max_seqlen_qo: int,
271
+ max_seqlen_kv: int,
272
+ dq: Optional[torch.Tensor] = None,
273
+ dk: Optional[torch.Tensor] = None,
274
+ dv: Optional[torch.Tensor] = None,
275
+ causal: bool = False,
276
+ softmax_scale: Optional[float] = None,
277
+ is_varlen: bool = True,
278
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
279
+ qo_total_len, num_qo_heads, head_dim_qk = q.shape
280
+ kv_total_len, num_kv_heads, head_dim_vo = v.shape
281
+
282
+ # TODO: fix bwd GQA
283
+ if num_qo_heads != num_kv_heads:
284
+ raise ValueError(f"SM100 bwd doesn't support GQA now. num_qo_heads: {num_qo_heads}, num_kv_heads: {num_kv_heads}.")
285
+
286
+ mask_mode_code = 1 if causal else 0
287
+ if softmax_scale is None:
288
+ softmax_scale = head_dim_qk ** (-0.5)
289
+
290
+ if dq is None:
291
+ dq = torch.empty(qo_total_len, num_qo_heads, head_dim_qk, device=q.device, dtype=q.dtype)
292
+ if dk is None:
293
+ dk = torch.empty(kv_total_len, num_kv_heads, head_dim_qk, device=q.device, dtype=q.dtype)
294
+ if dv is None:
295
+ dv = torch.empty(kv_total_len, num_kv_heads, head_dim_vo, device=q.device, dtype=q.dtype)
296
+
297
+ max_seqlen_qo_aligned = (max_seqlen_qo + 7) // 8 * 8
298
+ bs = cu_seqlens_qo.shape[0] - 1
299
+ workspace_bytes = 0
300
+ workspace_bytes += 4 * bs * max_seqlen_qo_aligned * num_qo_heads * head_dim_qk # dQ_acc
301
+ workspace_bytes += 4 * max_seqlen_qo_aligned * bs * num_qo_heads * 2 # sum_OdO and scaled_lse
302
+ if num_qo_heads != num_kv_heads:
303
+ workspace_bytes += 2 * kv_total_len * num_qo_heads * (head_dim_qk + head_dim_vo) # dKV_acc
304
+ workspace_buffer = torch.empty(workspace_bytes, dtype=torch.uint8, device=q.device)
305
+ flash_mla_cuda.dense_prefill_bwd(
306
+ workspace_buffer,
307
+ do,
308
+ q,
309
+ k,
310
+ v,
311
+ out,
312
+ lse,
313
+ cu_seqlens_qo,
314
+ cu_seqlens_kv,
315
+ dq,
316
+ dk,
317
+ dv,
318
+ mask_mode_code,
319
+ softmax_scale,
320
+ max_seqlen_qo,
321
+ max_seqlen_kv,
322
+ is_varlen,
323
+ )
324
+
325
+ return dq, dk, dv
326
+
327
+
328
+ class FlashAttnVarlenFunc(torch.autograd.Function):
329
+ def forward(
330
+ ctx,
331
+ q: torch.Tensor,
332
+ k: torch.Tensor,
333
+ v: torch.Tensor,
334
+ cu_seqlens_qo: torch.Tensor,
335
+ cu_seqlens_kv: torch.Tensor,
336
+ max_seqlen_qo: int,
337
+ max_seqlen_kv: int,
338
+ causal: bool = False,
339
+ softmax_scale: Optional[float] = None,
340
+ is_varlen: bool = True,
341
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
342
+ out, lse = _flash_attn_varlen_forward(
343
+ q, k, v,
344
+ cu_seqlens_qo, cu_seqlens_kv, max_seqlen_qo, max_seqlen_kv,
345
+ causal=causal, softmax_scale=softmax_scale,
346
+ is_varlen=is_varlen,
347
+ )
348
+ ctx.save_for_backward(q, k, v, out, lse, cu_seqlens_qo, cu_seqlens_kv)
349
+ ctx.max_seqlen_qo = max_seqlen_qo
350
+ ctx.max_seqlen_kv = max_seqlen_kv
351
+ ctx.causal = causal
352
+ ctx.softmax_scale = softmax_scale
353
+ ctx.is_varlen = is_varlen
354
+ return out, lse
355
+
356
+ def backward(
357
+ ctx,
358
+ do: torch.Tensor,
359
+ dlse: torch.Tensor,
360
+ ):
361
+ del dlse # LSE doesn't support backward currently
362
+ q, k, v, out, lse, cu_seqlens_qo, cu_seqlens_kv = ctx.saved_tensors
363
+ dq, dk, dv = _flash_attn_varlen_backward(
364
+ do, q, k, v, out, lse,
365
+ cu_seqlens_qo, cu_seqlens_kv, ctx.max_seqlen_qo, ctx.max_seqlen_kv,
366
+ causal=ctx.causal, softmax_scale=ctx.softmax_scale,
367
+ is_varlen=ctx.is_varlen,
368
+ )
369
+ return dq, dk, dv, None, None, None, None, None, None, None
370
+
371
+
372
+ def flash_attn_varlen_func(
373
+ q: torch.Tensor,
374
+ k: torch.Tensor,
375
+ v: torch.Tensor,
376
+ cu_seqlens_qo: torch.Tensor,
377
+ cu_seqlens_kv: torch.Tensor,
378
+ max_seqlen_qo: int,
379
+ max_seqlen_kv: int,
380
+ dropout_p: float = 0.0,
381
+ softmax_scale: Optional[float] = None,
382
+ causal: bool = False,
383
+ deterministic: bool = False,
384
+ is_varlen: bool = True,
385
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
386
+ assert dropout_p == 0.0
387
+ assert not deterministic
388
+ return FlashAttnVarlenFunc.apply(
389
+ q, k, v,
390
+ cu_seqlens_qo, cu_seqlens_kv, max_seqlen_qo, max_seqlen_kv,
391
+ causal, softmax_scale, is_varlen,
392
+ )
393
+
394
+
395
+ def flash_attn_varlen_qkvpacked_func(
396
+ qkv: torch.Tensor,
397
+ cu_seqlens: torch.Tensor,
398
+ max_seqlen: int,
399
+ head_dim_qk: int,
400
+ dropout_p: float = 0.0,
401
+ softmax_scale: Optional[float] = None,
402
+ causal: bool = False,
403
+ deterministic: bool = False,
404
+ is_varlen: bool = True,
405
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
406
+ assert dropout_p == 0.0
407
+ assert not deterministic
408
+ return FlashAttnVarlenFunc.apply(
409
+ qkv[:, :, :head_dim_qk], qkv[:, :, head_dim_qk:head_dim_qk * 2], qkv[:, :, head_dim_qk * 2:],
410
+ cu_seqlens, cu_seqlens, max_seqlen, max_seqlen,
411
+ causal, softmax_scale, is_varlen,
412
+ )
413
+
414
+
415
+ def flash_attn_varlen_kvpacked_func(
416
+ q: torch.Tensor,
417
+ kv: torch.Tensor,
418
+ cu_seqlens_qo: torch.Tensor,
419
+ cu_seqlens_kv: torch.Tensor,
420
+ max_seqlen_qo: int,
421
+ max_seqlen_kv: int,
422
+ head_dim_qk: int,
423
+ dropout_p: float = 0.0,
424
+ softmax_scale: Optional[float] = None,
425
+ causal: bool = False,
426
+ deterministic: bool = False,
427
+ is_varlen: bool = True,
428
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
429
+ assert dropout_p == 0.0
430
+ assert not deterministic
431
+ return FlashAttnVarlenFunc.apply(
432
+ q, kv[:, :, :head_dim_qk], kv[:, :, head_dim_qk:],
433
+ cu_seqlens_qo, cu_seqlens_kv, max_seqlen_qo, max_seqlen_kv,
434
+ causal, softmax_scale, is_varlen,
435
+ )
build/torch29-cxx11-cu130-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "python-depends": []
4
+ }