summaryrefslogtreecommitdiff
path: root/candle-flash-attn/kernels/kernels.h
blob: 20d6605f4fd0b67e0142321ce68c0bd5a17d8250 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
#ifndef _GPU_OPS_KERNELS_H_
#define _GPU_OPS_KERNELS_H_

#include <cuda_runtime_api.h>

#include <cstddef>
#include <cstdint>

#include<stdlib.h>
#include<stdint.h>

namespace gpu_ops {

struct MHAParams {
  uint32_t q_batch_stride;
  uint32_t k_batch_stride;
  uint32_t v_batch_stride;
  uint32_t o_batch_stride;

  uint32_t q_row_stride;
  uint32_t k_row_stride;
  uint32_t v_row_stride;
  uint32_t o_row_stride;

  uint32_t q_head_stride;
  uint32_t k_head_stride;
  uint32_t v_head_stride;
  uint32_t o_head_stride;

  uint32_t b;
  uint32_t h;
  uint32_t h_k;
  uint32_t d;
  uint32_t d_rounded;
  float softmax_scale;
  float softcap;

  uint32_t seqlen_q;
  uint32_t seqlen_k;
  uint32_t seqlen_q_rounded;
  uint32_t seqlen_k_rounded;

  int window_size_left;
  int window_size_right;

  int is_causal;
  int is_bf16;
};

void run_mha_fwd_j(cudaStream_t stream, void **buffers,
                   const char *opaque,
                   std::size_t opaque_len);
void run_mha_bwd_j(cudaStream_t stream, void **buffers,
                   const char *opaque,
                   std::size_t opaque_len);
}

#endif