diff options
author | Laurent Mazare <laurent.mazare@gmail.com> | 2023-08-15 17:19:18 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-08-15 17:19:18 +0100 |
commit | 8ad4a21ffcc03b745f33170130298bab79a09795 (patch) | |
tree | cdf7b715d84a31951d6b3d29a9177cb12e57115c /candle-nn/examples/basic_optimizer.rs | |
parent | 5e49922be2d01f81e4b5d352ae4feb9f71851709 (diff) | |
download | candle-8ad4a21ffcc03b745f33170130298bab79a09795.tar.gz candle-8ad4a21ffcc03b745f33170130298bab79a09795.tar.bz2 candle-8ad4a21ffcc03b745f33170130298bab79a09795.zip |
Add a basic optimizer example. (#454)
Diffstat (limited to 'candle-nn/examples/basic_optimizer.rs')
-rw-r--r-- | candle-nn/examples/basic_optimizer.rs | 33 |
1 files changed, 33 insertions, 0 deletions
diff --git a/candle-nn/examples/basic_optimizer.rs b/candle-nn/examples/basic_optimizer.rs new file mode 100644 index 00000000..3c5665e8 --- /dev/null +++ b/candle-nn/examples/basic_optimizer.rs @@ -0,0 +1,33 @@ +use candle::{DType, Device, Result, Tensor}; +use candle_nn::{linear, AdamW, Linear, ParamsAdamW, VarBuilder, VarMap}; + +fn gen_data() -> Result<(Tensor, Tensor)> { + // Generate some sample linear data. + let w_gen = Tensor::new(&[[3f32, 1.]], &Device::Cpu)?; + let b_gen = Tensor::new(-2f32, &Device::Cpu)?; + let gen = Linear::new(w_gen, Some(b_gen)); + let sample_xs = Tensor::new(&[[2f32, 1.], [7., 4.], [-4., 12.], [5., 8.]], &Device::Cpu)?; + let sample_ys = gen.forward(&sample_xs)?; + Ok((sample_xs, sample_ys)) +} + +fn main() -> Result<()> { + let (sample_xs, sample_ys) = gen_data()?; + + // Use backprop to run a linear regression between samples and get the coefficients back. + let varmap = VarMap::new(); + let vb = VarBuilder::from_varmap(&varmap, DType::F32, &Device::Cpu); + let model = linear(2, 1, vb.pp("linear"))?; + let params = ParamsAdamW { + lr: 0.1, + ..Default::default() + }; + let mut opt = AdamW::new(varmap.all_vars(), params)?; + for step in 0..10000 { + let ys = model.forward(&sample_xs)?; + let loss = ys.sub(&sample_ys)?.sqr()?.sum_all()?; + opt.backward_step(&loss)?; + println!("{step} {}", loss.to_vec0::<f32>()?); + } + Ok(()) +} |