diff options
Diffstat (limited to 'candle-nn/tests')
-rw-r--r-- | candle-nn/tests/batch_norm.rs | 4 | ||||
-rw-r--r-- | candle-nn/tests/group_norm.rs | 7 | ||||
-rw-r--r-- | candle-nn/tests/layer_norm.rs | 8 | ||||
-rw-r--r-- | candle-nn/tests/loss.rs | 3 | ||||
-rw-r--r-- | candle-nn/tests/ops.rs | 11 | ||||
-rw-r--r-- | candle-nn/tests/optim.rs | 3 | ||||
-rw-r--r-- | candle-nn/tests/test_utils.rs | 39 |
7 files changed, 13 insertions, 62 deletions
diff --git a/candle-nn/tests/batch_norm.rs b/candle-nn/tests/batch_norm.rs index 7a3cfc18..209fc10a 100644 --- a/candle-nn/tests/batch_norm.rs +++ b/candle-nn/tests/batch_norm.rs @@ -4,10 +4,8 @@ extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; -mod test_utils; - use anyhow::Result; -use candle::{DType, Device, Tensor}; +use candle::{test_utils, DType, Device, Tensor}; use candle_nn::BatchNorm; /* The test below has been generated using the following PyTorch code: diff --git a/candle-nn/tests/group_norm.rs b/candle-nn/tests/group_norm.rs index eff66d17..8145a220 100644 --- a/candle-nn/tests/group_norm.rs +++ b/candle-nn/tests/group_norm.rs @@ -25,10 +25,9 @@ extern crate intel_mkl_src; extern crate accelerate_src; use anyhow::Result; +use candle::test_utils::to_vec3_round; use candle::{Device, Tensor}; use candle_nn::{GroupNorm, Module}; -mod test_utils; -use test_utils::to_vec3_round; #[test] fn group_norm() -> Result<()> { @@ -60,7 +59,7 @@ fn group_norm() -> Result<()> { device, )?; assert_eq!( - to_vec3_round(gn2.forward(&input)?, 4)?, + to_vec3_round(&gn2.forward(&input)?, 4)?, &[ [ [-0.1653, 0.3748, -0.7866], @@ -81,7 +80,7 @@ fn group_norm() -> Result<()> { ] ); assert_eq!( - to_vec3_round(gn3.forward(&input)?, 4)?, + to_vec3_round(&gn3.forward(&input)?, 4)?, &[ [ [0.4560, 1.4014, -0.6313], diff --git a/candle-nn/tests/layer_norm.rs b/candle-nn/tests/layer_norm.rs index 0f43d804..f81c29bd 100644 --- a/candle-nn/tests/layer_norm.rs +++ b/candle-nn/tests/layer_norm.rs @@ -5,11 +5,9 @@ extern crate intel_mkl_src; extern crate accelerate_src; use anyhow::Result; -use candle::{Device, Tensor}; +use candle::{test_utils, Device, Tensor}; use candle_nn::{LayerNorm, Module}; -mod test_utils; - #[test] fn layer_norm() -> Result<()> { let device = &Device::Cpu; @@ -28,7 +26,7 @@ fn layer_norm() -> Result<()> { let inp = Tensor::new(&[[[1f32, 2., 3.], [4., 5., 6.], [9., 8., 7.]]], device)?; let res = ln.forward(&inp)?; assert_eq!( - test_utils::to_vec3_round(res.clone(), 4)?, + test_utils::to_vec3_round(&res, 4)?, [[ [-3.1742, 0.5, 4.1742], [-3.1742, 0.5, 4.1742], @@ -41,7 +39,7 @@ fn layer_norm() -> Result<()> { let std = (res.broadcast_sub(&mean)?.sqr()?.sum_keepdim(2)?.sqrt()? / 3.0)?; // The standard deviation should be sqrt(`w`). assert_eq!( - test_utils::to_vec3_round(std, 4)?, + test_utils::to_vec3_round(&std, 4)?, [[[1.7321], [1.7321], [1.7321]]] ); Ok(()) diff --git a/candle-nn/tests/loss.rs b/candle-nn/tests/loss.rs index c075c7fb..d772f176 100644 --- a/candle-nn/tests/loss.rs +++ b/candle-nn/tests/loss.rs @@ -4,9 +4,8 @@ extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; +use candle::test_utils::to_vec0_round; use candle::{Device, Result, Tensor}; -mod test_utils; -use test_utils::to_vec0_round; /* Equivalent python code: import torch diff --git a/candle-nn/tests/ops.rs b/candle-nn/tests/ops.rs index fcf39fd8..4ba8cfcc 100644 --- a/candle-nn/tests/ops.rs +++ b/candle-nn/tests/ops.rs @@ -4,10 +4,7 @@ extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; -mod test_utils; -use test_utils::to_vec3_round; - -use candle::{Device, Result, Tensor}; +use candle::{test_utils::to_vec3_round, Device, Result, Tensor}; #[test] fn softmax() -> Result<()> { @@ -18,7 +15,7 @@ fn softmax() -> Result<()> { let t1 = candle_nn::ops::softmax(&tensor.log()?, 1)?; let t2 = candle_nn::ops::softmax(&tensor.log()?, 2)?; assert_eq!( - to_vec3_round(t0, 4)?, + to_vec3_round(&t0, 4)?, &[ // 3/5, 1/2, 4/11 [[0.6, 0.5, 0.3636], [0.1111, 0.7143, 0.5294]], @@ -27,7 +24,7 @@ fn softmax() -> Result<()> { ] ); assert_eq!( - to_vec3_round(t1, 4)?, + to_vec3_round(&t1, 4)?, &[ // 3/4, 1/6, 4/13 [[0.75, 0.1667, 0.3077], [0.25, 0.8333, 0.6923]], @@ -36,7 +33,7 @@ fn softmax() -> Result<()> { ] ); assert_eq!( - to_vec3_round(t2, 4)?, + to_vec3_round(&t2, 4)?, &[ // (3, 1, 4) / 8, (1, 5, 9) / 15 [[0.375, 0.125, 0.5], [0.0667, 0.3333, 0.6]], diff --git a/candle-nn/tests/optim.rs b/candle-nn/tests/optim.rs index f1d3b3f5..673d0455 100644 --- a/candle-nn/tests/optim.rs +++ b/candle-nn/tests/optim.rs @@ -4,8 +4,7 @@ extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; -mod test_utils; -use test_utils::{to_vec0_round, to_vec2_round}; +use candle::test_utils::{to_vec0_round, to_vec2_round}; use anyhow::Result; use candle::{Device, Tensor, Var}; diff --git a/candle-nn/tests/test_utils.rs b/candle-nn/tests/test_utils.rs deleted file mode 100644 index bb422cd9..00000000 --- a/candle-nn/tests/test_utils.rs +++ /dev/null @@ -1,39 +0,0 @@ -#![allow(dead_code)] -use candle::{Result, Tensor}; - -pub fn to_vec0_round(t: &Tensor, digits: i32) -> Result<f32> { - let b = 10f32.powi(digits); - let t = t.to_vec0::<f32>()?; - Ok(f32::round(t * b) / b) -} - -pub fn to_vec1_round(t: &Tensor, digits: i32) -> Result<Vec<f32>> { - let b = 10f32.powi(digits); - let t = t.to_vec1::<f32>()?; - let t = t.iter().map(|t| f32::round(t * b) / b).collect(); - Ok(t) -} - -pub fn to_vec2_round(t: &Tensor, digits: i32) -> Result<Vec<Vec<f32>>> { - let b = 10f32.powi(digits); - let t = t.to_vec2::<f32>()?; - let t = t - .iter() - .map(|t| t.iter().map(|t| f32::round(t * b) / b).collect()) - .collect(); - Ok(t) -} - -pub fn to_vec3_round(t: Tensor, digits: i32) -> Result<Vec<Vec<Vec<f32>>>> { - let b = 10f32.powi(digits); - let t = t.to_vec3::<f32>()?; - let t = t - .iter() - .map(|t| { - t.iter() - .map(|t| t.iter().map(|t| f32::round(t * b) / b).collect()) - .collect() - }) - .collect(); - Ok(t) -} |