summaryrefslogtreecommitdiff
path: root/candle-kernels/src
diff options
context:
space:
mode:
authorNicolas Patry <patry.nicolas@protonmail.com>2023-08-25 12:01:12 +0000
committerNicolas Patry <patry.nicolas@protonmail.com>2023-08-25 12:01:58 +0000
commitd4e75d582506520ba6a76330bba4c14dcdcc19d8 (patch)
tree28dfc1296d76110d2d1cc94de0a8cf8f749f89b8 /candle-kernels/src
parentbe371e827c141e9452b0dd8790209e0b3642648c (diff)
downloadcandle-d4e75d582506520ba6a76330bba4c14dcdcc19d8.tar.gz
candle-d4e75d582506520ba6a76330bba4c14dcdcc19d8.tar.bz2
candle-d4e75d582506520ba6a76330bba4c14dcdcc19d8.zip
Let's keep the dirty code on its own.
Diffstat (limited to 'candle-kernels/src')
-rw-r--r--candle-kernels/src/cast.cu27
1 files changed, 25 insertions, 2 deletions
diff --git a/candle-kernels/src/cast.cu b/candle-kernels/src/cast.cu
index 03ca1ec7..ab2045a3 100644
--- a/candle-kernels/src/cast.cu
+++ b/candle-kernels/src/cast.cu
@@ -13,6 +13,29 @@ extern "C" __global__ void FN_NAME( \
const size_t *strides = info + num_dims; \
if (is_contiguous(num_dims, dims, strides)) { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
+ out[i] = inp[i]; \
+ } \
+ } \
+ else { \
+ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
+ unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \
+ out[i] = inp[strided_i]; \
+ } \
+ } \
+} \
+
+#define CAST_BF_OP(SRC_TYPENAME, DST_TYPENAME, FN_NAME) \
+extern "C" __global__ void FN_NAME( \
+ const size_t numel, \
+ const size_t num_dims, \
+ const size_t *info, \
+ const SRC_TYPENAME *inp, \
+ DST_TYPENAME *out \
+) { \
+ const size_t *dims = info; \
+ const size_t *strides = info + num_dims; \
+ if (is_contiguous(num_dims, dims, strides)) { \
+ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
out[i] = (DST_TYPENAME) (float) inp[i]; \
} \
} \
@@ -29,14 +52,14 @@ CAST_OP(__nv_bfloat16, __nv_bfloat16, cast_bf16_bf16)
// CAST_OP(__nv_bfloat16, uint8_t, cast_bf16_u8)
CAST_OP(__nv_bfloat16, uint32_t, cast_bf16_u32)
-CAST_OP(__nv_bfloat16, __half, cast_bf16_f16)
CAST_OP(__nv_bfloat16, float, cast_bf16_f32)
CAST_OP(__nv_bfloat16, double, cast_bf16_f64)
CAST_OP(uint8_t, __nv_bfloat16, cast_u8_bf16)
CAST_OP(uint32_t, __nv_bfloat16, cast_u32_bf16)
-CAST_OP(__half, __nv_bfloat16, cast_f16_bf16)
CAST_OP(float, __nv_bfloat16, cast_f32_bf16)
CAST_OP(double, __nv_bfloat16, cast_f64_bf16)
+CAST_BF_OP(__nv_bfloat16, __half, cast_bf16_f16)
+CAST_BF_OP(__half, __nv_bfloat16, cast_f16_bf16)
#endif
#if __CUDA_ARCH__ >= 530