summaryrefslogtreecommitdiff
path: root/candle-flash-attn/kernels/flash_fwd_hdim32_fp16_sm80.cu
diff options
context:
space:
mode:
Diffstat (limited to 'candle-flash-attn/kernels/flash_fwd_hdim32_fp16_sm80.cu')
-rw-r--r--candle-flash-attn/kernels/flash_fwd_hdim32_fp16_sm80.cu17
1 files changed, 2 insertions, 15 deletions
diff --git a/candle-flash-attn/kernels/flash_fwd_hdim32_fp16_sm80.cu b/candle-flash-attn/kernels/flash_fwd_hdim32_fp16_sm80.cu
index 91e6331e..8dbf8b94 100644
--- a/candle-flash-attn/kernels/flash_fwd_hdim32_fp16_sm80.cu
+++ b/candle-flash-attn/kernels/flash_fwd_hdim32_fp16_sm80.cu
@@ -1,23 +1,10 @@
// Copyright (c) 2023, Tri Dao.
-
// Splitting the different head dimensions to different files to speed up compilation.
+// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
-// template<>
-// void run_mha_fwd_<cutlass::half_t, 32>(Flash_fwd_params &params, cudaStream_t stream) {
-// using elem_type = cutlass::half_t;
-// BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] {
-// run_flash_fwd<Flash_fwd_kernel_traits<32, 128, 128, 4, false, false, elem_type>, Is_dropout>(params, stream);
-// // For dropout there might be a lot of register spilling?
-// // These two are very slow due to register spilling
-// // run_flash_fwd<Flash_fwd_kernel_traits<32, 256, 128, 4, false, elem_type>>(params, stream);
-// // run_flash_fwd<Flash_fwd_kernel_traits<32, 128, 256, 4, false, elem_type>>(params, stream);
-// // This one is slightly slower
-// // run_flash_fwd<Flash_fwd_kernel_traits<32, 256, 64, 4, false, elem_type>>(params, stream);
-// });
-// }
template<>
void run_mha_fwd_<cutlass::half_t, 32>(Flash_fwd_params &params, cudaStream_t stream) {
run_mha_fwd_hdim32<cutlass::half_t>(params, stream);
-} \ No newline at end of file
+}