summaryrefslogtreecommitdiff
path: root/candle-flash-attn/kernels/flash_fwd_hdim160_bf16_sm80.cu
diff options
context:
space:
mode:
Diffstat (limited to 'candle-flash-attn/kernels/flash_fwd_hdim160_bf16_sm80.cu')
-rw-r--r--candle-flash-attn/kernels/flash_fwd_hdim160_bf16_sm80.cu11
1 files changed, 2 insertions, 9 deletions
diff --git a/candle-flash-attn/kernels/flash_fwd_hdim160_bf16_sm80.cu b/candle-flash-attn/kernels/flash_fwd_hdim160_bf16_sm80.cu
index 6a9d60c3..f674f481 100644
--- a/candle-flash-attn/kernels/flash_fwd_hdim160_bf16_sm80.cu
+++ b/candle-flash-attn/kernels/flash_fwd_hdim160_bf16_sm80.cu
@@ -1,17 +1,10 @@
// Copyright (c) 2023, Tri Dao.
-
// Splitting the different head dimensions to different files to speed up compilation.
+// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
-// template<>
-// void run_mha_fwd_<cutlass::bfloat16_t, 160>(Flash_fwd_params &params, cudaStream_t stream) {
-// using elem_type = cutlass::bfloat16_t;
-// BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] {
-// run_flash_fwd<Flash_fwd_kernel_traits<160, 128, 32, 4, false, false, elem_type>, Is_dropout>(params, stream);
-// });
-// }
template<>
void run_mha_fwd_<cutlass::bfloat16_t, 160>(Flash_fwd_params &params, cudaStream_t stream) {
run_mha_fwd_hdim160<cutlass::bfloat16_t>(params, stream);
-} \ No newline at end of file
+}