diff options
author | Laurent Mazare <laurent.mazare@gmail.com> | 2023-10-11 19:24:32 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-10-11 18:24:32 +0100 |
commit | 89b525b5e758218179dd32293e7167e3aae1b28f (patch) | |
tree | f343433a30febbf04d414522bb8bbe5f99f8818b /candle-nn | |
parent | 37dbbff261f1641db6dc868fc4dded5f8cb25a1f (diff) | |
download | candle-89b525b5e758218179dd32293e7167e3aae1b28f.tar.gz candle-89b525b5e758218179dd32293e7167e3aae1b28f.tar.bz2 candle-89b525b5e758218179dd32293e7167e3aae1b28f.zip |
Convmixer (#1073)
* Only optimize float tensors.
* Use full tensors for zeros and ones.
* Add a benchmark for the matmul slowness.
* Add the convmixer model.
* Proper adaptive pooling.
Diffstat (limited to 'candle-nn')
-rw-r--r-- | candle-nn/examples/cpu_benchmarks.rs | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/candle-nn/examples/cpu_benchmarks.rs b/candle-nn/examples/cpu_benchmarks.rs index e58ea727..6007ff6c 100644 --- a/candle-nn/examples/cpu_benchmarks.rs +++ b/candle-nn/examples/cpu_benchmarks.rs @@ -185,8 +185,8 @@ impl Benchmark for Matmul { type PreProcessData = (Tensor, Tensor); type RunResult = Tensor; fn preprocess() -> Result<Self::PreProcessData> { - let lhs = Tensor::randn(0f32, 1., (1024, 1024), &Device::Cpu)?; - let rhs = Tensor::randn(0f32, 1., (1024, 1024), &Device::Cpu)?; + let lhs = Tensor::randn(0f32, 1., (1024 * 4, 1024 * 4), &Device::Cpu)?; + let rhs = Tensor::randn(0f32, 1., (1024 * 4, 1), &Device::Cpu)?; Ok((lhs, rhs)) } |