summaryrefslogtreecommitdiff
path: root/candle-nn
diff options
context:
space:
mode:
authorLaurent Mazare <laurent.mazare@gmail.com>2024-11-04 10:42:18 +0100
committerGitHub <noreply@github.com>2024-11-04 10:42:18 +0100
commit6454597943599dd6df787a0d5f2446c5724d850a (patch)
tree8ad592626dae66ce06e8ff248dff5432483ee2ae /candle-nn
parent3fba2b5fc44f5c4b1963b0088018a25dd74ab2e9 (diff)
downloadcandle-6454597943599dd6df787a0d5f2446c5724d850a.tar.gz
candle-6454597943599dd6df787a0d5f2446c5724d850a.tar.bz2
candle-6454597943599dd6df787a0d5f2446c5724d850a.zip
Improved launch config for layer-norm/rms-norm. (#2591)
* Improved launch config for layer-norm/rms-norm. * Add more testing for the fused layer/rms norm kernels.
Diffstat (limited to 'candle-nn')
-rw-r--r--candle-nn/src/ops.rs25
-rw-r--r--candle-nn/tests/ops.rs45
2 files changed, 66 insertions, 4 deletions
diff --git a/candle-nn/src/ops.rs b/candle-nn/src/ops.rs
index 9a360c47..8a3c19fe 100644
--- a/candle-nn/src/ops.rs
+++ b/candle-nn/src/ops.rs
@@ -543,15 +543,23 @@ impl candle::CustomOp2 for RmsNorm {
let dim_m1 = dims[dims.len() - 1];
let (n_rows, n_cols) = (el / dim_m1, dim_m1);
+ let block_size = if n_cols < 1024 { 32 } else { 1024 };
let cfg = LaunchConfig {
grid_dim: (n_rows as u32, 1, 1),
- block_dim: (1024, 1, 1),
+ block_dim: (block_size, 1, 1),
shared_mem_bytes: 0,
};
let func = dev.get_or_load_func(&kernel_name::<T>("rmsnorm"), kernels::REDUCE)?;
// SAFETY: Set later by running the kernel.
let dst = unsafe { dev.alloc::<T>(el) }.w()?;
- let params = (&src, &dst, &alpha, n_cols as i32, self.eps);
+ let params = (
+ &src,
+ &dst,
+ &alpha,
+ n_cols as i32,
+ block_size as i32,
+ self.eps,
+ );
// SAFETY: ffi.
unsafe { func.launch(cfg, params) }.w()?;
Ok(dst)
@@ -776,15 +784,24 @@ impl candle::CustomOp3 for LayerNorm {
let dim_m1 = dims[dims.len() - 1];
let (n_rows, n_cols) = (el / dim_m1, dim_m1);
+ let block_size = if n_cols < 1024 { 32 } else { 1024 };
let cfg = LaunchConfig {
grid_dim: (n_rows as u32, 1, 1),
- block_dim: (1024, 1, 1),
+ block_dim: (block_size, 1, 1),
shared_mem_bytes: 0,
};
let func = dev.get_or_load_func(&kernel_name::<T>("layernorm"), kernels::REDUCE)?;
// SAFETY: Set later by running the kernel.
let dst = unsafe { dev.alloc::<T>(el) }.w()?;
- let params = (&src, &dst, &alpha, &beta, n_cols as i32, self.eps);
+ let params = (
+ &src,
+ &dst,
+ &alpha,
+ &beta,
+ n_cols as i32,
+ block_size as i32,
+ self.eps,
+ );
// SAFETY: ffi.
unsafe { func.launch(cfg, params) }.w()?;
Ok(dst)
diff --git a/candle-nn/tests/ops.rs b/candle-nn/tests/ops.rs
index 65a8fbf2..3a8a0bb9 100644
--- a/candle-nn/tests/ops.rs
+++ b/candle-nn/tests/ops.rs
@@ -77,6 +77,27 @@ fn rms_norm(device: &Device) -> Result<()> {
Ok(())
}
+fn rms_norml(device: &Device) -> Result<()> {
+ use rand::{rngs::StdRng, Rng, SeedableRng};
+
+ let (b_size, seq_len, head_dim) = (24, 70, 64);
+ let el_count = b_size * seq_len * head_dim;
+ let mut rng = StdRng::seed_from_u64(299792458);
+ let src: Vec<f32> = (0..el_count).map(|_| rng.gen::<f32>()).collect();
+ let tensor = Tensor::new(src, device)?.reshape((b_size, seq_len, head_dim))?;
+ let alpha = Tensor::ones(head_dim, candle::DType::F32, device)?;
+ let t = candle_nn::ops::rms_norm(&tensor, &alpha, 1e-5)?;
+ let t2 = candle_nn::ops::rms_norm_slow(&tensor, &alpha, 1e-5)?;
+ let diff = (t - t2)?
+ .abs()?
+ .flatten_all()?
+ .max(0)?
+ .reshape(())?
+ .to_vec0::<f32>()?;
+ assert!(diff < 1e-5);
+ Ok(())
+}
+
fn layer_norm(device: &Device) -> Result<()> {
let data = &[[[3f32, 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]];
let tensor = Tensor::new(data, device)?;
@@ -103,6 +124,28 @@ fn layer_norm(device: &Device) -> Result<()> {
Ok(())
}
+fn layer_norml(device: &Device) -> Result<()> {
+ use rand::{rngs::StdRng, Rng, SeedableRng};
+
+ let (b_size, seq_len, head_dim) = (24, 70, 64);
+ let el_count = b_size * seq_len * head_dim;
+ let mut rng = StdRng::seed_from_u64(299792458);
+ let src: Vec<f32> = (0..el_count).map(|_| rng.gen::<f32>()).collect();
+ let tensor = Tensor::new(src, device)?.reshape((b_size, seq_len, head_dim))?;
+ let alpha = Tensor::ones(head_dim, candle::DType::F32, device)?;
+ let beta = Tensor::zeros(head_dim, candle::DType::F32, device)?;
+ let t = candle_nn::ops::layer_norm(&tensor, &alpha, &beta, 1e-5)?;
+ let t2 = candle_nn::ops::layer_norm_slow(&tensor, &alpha, &beta, 1e-5)?;
+ let diff = (t - t2)?
+ .abs()?
+ .flatten_all()?
+ .max(0)?
+ .reshape(())?
+ .to_vec0::<f32>()?;
+ assert!(diff < 1e-5);
+ Ok(())
+}
+
#[test]
fn softmax_numerical_stability() -> Result<()> {
let dev = &Device::Cpu;
@@ -211,5 +254,7 @@ test_device!(rope, rope_cpu, rope_gpu, rope_metal);
test_device!(rope_thd, rope_thd_cpu, rope_thd_gpu, rope_thd_metal);
test_device!(softmax, softmax_cpu, softmax_gpu, softmax_metal);
test_device!(rms_norm, rms_norm_cpu, rms_norm_gpu, rms_norm_metal);
+test_device!(rms_norml, rms_norml_cpu, rms_norml_gpu, rms_norml_metal);
test_device!(layer_norm, ln_cpu, ln_gpu, ln_metal);
+test_device!(layer_norml, lnl_cpu, lnl_gpu, lnl_metal);
test_device!(sigmoid, sigmoid_cpu, sigmoid_gpu, sigmoid_metal);