summaryrefslogtreecommitdiff
path: root/candle-flash-attn/kernels/flash_fwd_hdim224_fp16_causal_sm80.cu
diff options
context:
space:
mode:
Diffstat (limited to 'candle-flash-attn/kernels/flash_fwd_hdim224_fp16_causal_sm80.cu')
-rw-r--r--candle-flash-attn/kernels/flash_fwd_hdim224_fp16_causal_sm80.cu10
1 files changed, 10 insertions, 0 deletions
diff --git a/candle-flash-attn/kernels/flash_fwd_hdim224_fp16_causal_sm80.cu b/candle-flash-attn/kernels/flash_fwd_hdim224_fp16_causal_sm80.cu
new file mode 100644
index 00000000..f01dad09
--- /dev/null
+++ b/candle-flash-attn/kernels/flash_fwd_hdim224_fp16_causal_sm80.cu
@@ -0,0 +1,10 @@
+// Copyright (c) 2023, Tri Dao.
+// Splitting the different head dimensions to different files to speed up compilation.
+// This file is auto-generated. See "generate_kernels.py"
+
+#include "flash_fwd_launch_template.h"
+
+template<>
+void run_mha_fwd_<cutlass::half_t, 224, true>(Flash_fwd_params &params, cudaStream_t stream) {
+ run_mha_fwd_hdim224<cutlass::half_t, true>(params, stream);
+}