diff options
author | Laurent Mazare <laurent.mazare@gmail.com> | 2023-07-26 14:16:37 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-07-26 14:16:37 +0100 |
commit | 2ce5f12513d0dafb04c7e345da9d4fba566cfa16 (patch) | |
tree | d8370aa035f667905e6f033e99e08fd93e677041 /candle-flash-attn/kernels/flash_fwd_hdim128_fp16_sm80.cu | |
parent | fa2b64d678ca83e2fbc3dabdecffbc778d5b067d (diff) | |
download | candle-2ce5f12513d0dafb04c7e345da9d4fba566cfa16.tar.gz candle-2ce5f12513d0dafb04c7e345da9d4fba566cfa16.tar.bz2 candle-2ce5f12513d0dafb04c7e345da9d4fba566cfa16.zip |
Again set a few extra params in flash-attn. (#245)
* Again set a few extra params.
* Use the appropriate kernel sizes.
* Add all the kernel sizes.
* Parallel compiling.
* Reduce the amount of parallelism.
* Add the missing kernel.
* Fix a typo.
* Remove bf16 support for now.
Diffstat (limited to 'candle-flash-attn/kernels/flash_fwd_hdim128_fp16_sm80.cu')
-rw-r--r-- | candle-flash-attn/kernels/flash_fwd_hdim128_fp16_sm80.cu | 32 |
1 files changed, 32 insertions, 0 deletions
diff --git a/candle-flash-attn/kernels/flash_fwd_hdim128_fp16_sm80.cu b/candle-flash-attn/kernels/flash_fwd_hdim128_fp16_sm80.cu new file mode 100644 index 00000000..5b7254a9 --- /dev/null +++ b/candle-flash-attn/kernels/flash_fwd_hdim128_fp16_sm80.cu @@ -0,0 +1,32 @@ +// Copyright (c) 2023, Tri Dao. + +// Splitting the different head dimensions to different files to speed up compilation. + +#include "flash_fwd_launch_template.h" + +// template<> +// void run_mha_fwd_<cutlass::half_t, 128>(Flash_fwd_params ¶ms, cudaStream_t stream) { +// using elem_type = cutlass::half_t; +// if (params.p_dropout == 1.f) { +// // Using 8 warps (128 x 128 and 256 x 64) is 28% slower for seqlen=2k +// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, false, false, elem_type>, false>(params, stream); +// // run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, true, false, elem_type>, false>(params, stream); +// // run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, false, true, elem_type>, false>(params, stream); +// // run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, true, true, elem_type>, false>(params, stream); +// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, false, false, elem_type>, false>(params, stream); +// run_flash_fwd<Flash_fwd_kernel_traits<128, 64, 64, 4, false, false, elem_type>, false>(params, stream); +// run_flash_fwd<Flash_fwd_kernel_traits<128, 64, 128, 4, false, false, elem_type>, false>(params, stream); +// // 1st ones are good for H100, A100 +// // 2nd one is good for A6000 bc we get slightly better occupancy +// } else { +// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, false, false, elem_type>, true>(params, stream); +// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, true, false, elem_type>, true>(params, stream); +// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, true, true, elem_type>, true>(params, stream); +// // 1st one is good for H100, A100, A6000 +// } +// } + +template<> +void run_mha_fwd_<cutlass::half_t, 128>(Flash_fwd_params ¶ms, cudaStream_t stream) { + run_mha_fwd_hdim128<cutlass::half_t>(params, stream); +}
\ No newline at end of file |