summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/cuda_backend.rs41
-rw-r--r--src/dummy_cuda_backend.rs4
-rw-r--r--src/storage.rs48
3 files changed, 92 insertions, 1 deletions
diff --git a/src/cuda_backend.rs b/src/cuda_backend.rs
index aa9a55f1..de70c835 100644
--- a/src/cuda_backend.rs
+++ b/src/cuda_backend.rs
@@ -16,6 +16,9 @@ pub enum CudaError {
#[error("missing kernel '{module_name}'")]
MissingKernel { module_name: &'static str },
+
+ #[error("internal error '{0}'")]
+ InternalError(&'static str),
}
type Result<T> = std::result::Result<T, CudaError>;
@@ -163,6 +166,44 @@ impl CudaStorage {
}
}
+ pub(crate) fn add_impl(
+ &self,
+ rhs: &Self,
+ shape: &Shape,
+ lhs_stride: &[usize],
+ rhs_stride: &[usize],
+ ) -> Result<Self> {
+ let elem_count = shape.elem_count();
+ let dims = shape.dims();
+ let cfg = LaunchConfig::for_num_elems(elem_count as u32);
+ let dev = self.device();
+ let dims_and_strides = [dims, lhs_stride, rhs_stride].concat();
+ match (self, rhs) {
+ (Self::F32(lhs), Self::F32(rhs)) => {
+ let func = dev.get_or_load_func("badd_f32", kernels::BINARY_ADD)?;
+ // SAFETY: Set later by running the add kernel.
+ let out = unsafe { dev.0.alloc::<f32>(elem_count) }?;
+ let dims_and_strides = dev.0.htod_copy(dims_and_strides)?;
+ let params = (elem_count, dims.len(), &dims_and_strides, lhs, rhs, &out);
+ // SAFETY: ffi
+ unsafe { func.launch(cfg, params) }?;
+ Ok(Self::F32(out))
+ }
+ (Self::F64(lhs), Self::F64(rhs)) => {
+ // SAFETY: Set later by running the add kernel.
+ let func = dev.get_or_load_func("badd_f64", kernels::BINARY_ADD)?;
+ let out = unsafe { dev.0.alloc::<f64>(elem_count) }?;
+ let dims_and_strides = dev.0.htod_copy(dims_and_strides)?;
+ let params = (elem_count, dims.len(), &dims_and_strides, lhs, rhs, &out);
+ // SAFETY: ffi
+ unsafe { func.launch(cfg, params) }?;
+ Ok(Self::F64(out))
+ }
+ // The dtypes should have been checked at this point so this is an internal error.
+ _ => Err(CudaError::InternalError("dtype mismatch in add")),
+ }
+ }
+
pub(crate) fn to_cpu_storage(&self) -> Result<CpuStorage> {
match self {
Self::F32(slice) => {
diff --git a/src/dummy_cuda_backend.rs b/src/dummy_cuda_backend.rs
index 2eb393c1..16e78fbe 100644
--- a/src/dummy_cuda_backend.rs
+++ b/src/dummy_cuda_backend.rs
@@ -53,4 +53,8 @@ impl CudaStorage {
pub(crate) fn affine_impl(&self, _: &Shape, _: &[usize], _: f64, _: f64) -> Result<Self> {
Err(Error::NotCompiledWithCudaSupport)
}
+
+ pub(crate) fn add_impl(&self, _: &Self, _: &Shape, _: &[usize], _: &[usize]) -> Result<Self> {
+ Err(Error::NotCompiledWithCudaSupport)
+ }
}
diff --git a/src/storage.rs b/src/storage.rs
index f1a2d5a0..22f9a26c 100644
--- a/src/storage.rs
+++ b/src/storage.rs
@@ -16,6 +16,13 @@ pub(crate) trait BinaryOp {
const NAME: &'static str;
fn f32(v1: f32, v2: f32) -> f32;
fn f64(v1: f64, v2: f64) -> f64;
+ fn cuda_impl(
+ lhs: &CudaStorage,
+ rhs: &CudaStorage,
+ shape: &Shape,
+ lhs_stride: &[usize],
+ rhs_stride: &[usize],
+ ) -> Result<CudaStorage>;
}
struct Add;
@@ -34,6 +41,15 @@ impl BinaryOp for Add {
fn f64(v1: f64, v2: f64) -> f64 {
v1 + v2
}
+ fn cuda_impl(
+ lhs: &CudaStorage,
+ rhs: &CudaStorage,
+ shape: &Shape,
+ lhs_stride: &[usize],
+ rhs_stride: &[usize],
+ ) -> Result<CudaStorage> {
+ Ok(lhs.add_impl(rhs, shape, lhs_stride, rhs_stride)?)
+ }
}
impl BinaryOp for Sub {
@@ -44,6 +60,15 @@ impl BinaryOp for Sub {
fn f64(v1: f64, v2: f64) -> f64 {
v1 - v2
}
+ fn cuda_impl(
+ _: &CudaStorage,
+ _: &CudaStorage,
+ _: &Shape,
+ _: &[usize],
+ _: &[usize],
+ ) -> Result<CudaStorage> {
+ todo!()
+ }
}
impl BinaryOp for Mul {
@@ -54,6 +79,15 @@ impl BinaryOp for Mul {
fn f64(v1: f64, v2: f64) -> f64 {
v1 * v2
}
+ fn cuda_impl(
+ _: &CudaStorage,
+ _: &CudaStorage,
+ _: &Shape,
+ _: &[usize],
+ _: &[usize],
+ ) -> Result<CudaStorage> {
+ todo!()
+ }
}
impl BinaryOp for Div {
@@ -64,6 +98,15 @@ impl BinaryOp for Div {
fn f64(v1: f64, v2: f64) -> f64 {
v1 / v2
}
+ fn cuda_impl(
+ _: &CudaStorage,
+ _: &CudaStorage,
+ _: &Shape,
+ _: &[usize],
+ _: &[usize],
+ ) -> Result<CudaStorage> {
+ todo!()
+ }
}
impl UnaryOp for Neg {
@@ -177,7 +220,10 @@ impl Storage {
let storage = lhs.binary_impl::<B>(rhs, shape, lhs_stride, rhs_stride)?;
Ok(Self::Cpu(storage))
}
- (Self::Cuda { .. }, Self::Cuda { .. }) => todo!(),
+ (Self::Cuda(lhs), Self::Cuda(rhs)) => {
+ let storage = B::cuda_impl(lhs, rhs, shape, lhs_stride, rhs_stride)?;
+ Ok(Self::Cuda(storage))
+ }
(lhs, rhs) => {
// Should not happen because of the same device check above but we're defensive
// anyway.