summaryrefslogtreecommitdiff
path: root/candle-flash-attn/kernels/flash_fwd_hdim64_bf16_sm80.cu
diff options
context:
space:
mode:
Diffstat (limited to 'candle-flash-attn/kernels/flash_fwd_hdim64_bf16_sm80.cu')
-rw-r--r--candle-flash-attn/kernels/flash_fwd_hdim64_bf16_sm80.cu13
1 files changed, 2 insertions, 11 deletions
diff --git a/candle-flash-attn/kernels/flash_fwd_hdim64_bf16_sm80.cu b/candle-flash-attn/kernels/flash_fwd_hdim64_bf16_sm80.cu
index fffcbebb..22eac878 100644
--- a/candle-flash-attn/kernels/flash_fwd_hdim64_bf16_sm80.cu
+++ b/candle-flash-attn/kernels/flash_fwd_hdim64_bf16_sm80.cu
@@ -1,19 +1,10 @@
// Copyright (c) 2023, Tri Dao.
-
// Splitting the different head dimensions to different files to speed up compilation.
+// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
-// template<>
-// void run_mha_fwd_<cutlass::bfloat16_t, 64>(Flash_fwd_params &params, cudaStream_t stream) {
-// using elem_type = cutlass::bfloat16_t;
-// if (params.p_dropout == 1.f) {
-// run_flash_fwd<Flash_fwd_kernel_traits<64, 128, 64, 4, true, false, elem_type>, false>(params, stream);
-// } else {
-// run_flash_fwd<Flash_fwd_kernel_traits<64, 128, 64, 4, false, false, elem_type>, true>(params, stream);
-// }
-// }
template<>
void run_mha_fwd_<cutlass::bfloat16_t, 64>(Flash_fwd_params &params, cudaStream_t stream) {
run_mha_fwd_hdim64<cutlass::bfloat16_t>(params, stream);
-} \ No newline at end of file
+}