diff options
author | Laurent Mazare <laurent.mazare@gmail.com> | 2024-07-15 20:37:36 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-07-15 20:37:36 +0200 |
commit | 30cdd769f9404035235830e602ae01d50f782fb5 (patch) | |
tree | dd9d8adcfce61fe40678f2967bbb25cecb1f679a /candle-flash-attn/kernels/flash_fwd_hdim64_bf16_causal_sm80.cu | |
parent | d74fbed3341f875fa81112e2f59565c464cd59d8 (diff) | |
download | candle-30cdd769f9404035235830e602ae01d50f782fb5.tar.gz candle-30cdd769f9404035235830e602ae01d50f782fb5.tar.bz2 candle-30cdd769f9404035235830e602ae01d50f782fb5.zip |
Update the flash attn kernels. (#2333)
Diffstat (limited to 'candle-flash-attn/kernels/flash_fwd_hdim64_bf16_causal_sm80.cu')
-rw-r--r-- | candle-flash-attn/kernels/flash_fwd_hdim64_bf16_causal_sm80.cu | 10 |
1 files changed, 10 insertions, 0 deletions
diff --git a/candle-flash-attn/kernels/flash_fwd_hdim64_bf16_causal_sm80.cu b/candle-flash-attn/kernels/flash_fwd_hdim64_bf16_causal_sm80.cu new file mode 100644 index 00000000..99cd728b --- /dev/null +++ b/candle-flash-attn/kernels/flash_fwd_hdim64_bf16_causal_sm80.cu @@ -0,0 +1,10 @@ +// Copyright (c) 2023, Tri Dao. +// Splitting the different head dimensions to different files to speed up compilation. +// This file is auto-generated. See "generate_kernels.py" + +#include "flash_fwd_launch_template.h" + +template<> +void run_mha_fwd_<cutlass::bfloat16_t, 64, true>(Flash_fwd_params ¶ms, cudaStream_t stream) { + run_mha_fwd_hdim64<cutlass::bfloat16_t, true>(params, stream); +} |