summaryrefslogtreecommitdiff
path: root/candle-flash-attn/kernels/flash_fwd_hdim128_bf16_sm80.cu
diff options
context:
space:
mode:
authorLaurent Mazare <laurent.mazare@gmail.com>2023-07-26 14:16:37 +0100
committerGitHub <noreply@github.com>2023-07-26 14:16:37 +0100
commit2ce5f12513d0dafb04c7e345da9d4fba566cfa16 (patch)
treed8370aa035f667905e6f033e99e08fd93e677041 /candle-flash-attn/kernels/flash_fwd_hdim128_bf16_sm80.cu
parentfa2b64d678ca83e2fbc3dabdecffbc778d5b067d (diff)
downloadcandle-2ce5f12513d0dafb04c7e345da9d4fba566cfa16.tar.gz
candle-2ce5f12513d0dafb04c7e345da9d4fba566cfa16.tar.bz2
candle-2ce5f12513d0dafb04c7e345da9d4fba566cfa16.zip
Again set a few extra params in flash-attn. (#245)
* Again set a few extra params. * Use the appropriate kernel sizes. * Add all the kernel sizes. * Parallel compiling. * Reduce the amount of parallelism. * Add the missing kernel. * Fix a typo. * Remove bf16 support for now.
Diffstat (limited to 'candle-flash-attn/kernels/flash_fwd_hdim128_bf16_sm80.cu')
-rw-r--r--candle-flash-attn/kernels/flash_fwd_hdim128_bf16_sm80.cu19
1 files changed, 19 insertions, 0 deletions
diff --git a/candle-flash-attn/kernels/flash_fwd_hdim128_bf16_sm80.cu b/candle-flash-attn/kernels/flash_fwd_hdim128_bf16_sm80.cu
new file mode 100644
index 00000000..654400a7
--- /dev/null
+++ b/candle-flash-attn/kernels/flash_fwd_hdim128_bf16_sm80.cu
@@ -0,0 +1,19 @@
+// Copyright (c) 2023, Tri Dao.
+
+// Splitting the different head dimensions to different files to speed up compilation.
+
+#include "flash_fwd_launch_template.h"
+
+// template<>
+// void run_mha_fwd_<cutlass::bfloat16_t, 128>(Flash_fwd_params &params, cudaStream_t stream) {
+// using elem_type = cutlass::bfloat16_t;
+// if (params.p_dropout == 1.f) {
+// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, false, false, elem_type>, false>(params, stream);
+// } else {
+// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, false, false, elem_type>, true>(params, stream);
+// }
+// }
+template<>
+void run_mha_fwd_<cutlass::bfloat16_t, 128>(Flash_fwd_params &params, cudaStream_t stream) {
+ run_mha_fwd_hdim128<cutlass::bfloat16_t>(params, stream);
+} \ No newline at end of file