diff options
author | Nicolas Patry <patry.nicolas@protonmail.com> | 2023-08-01 14:02:21 +0200 |
---|---|---|
committer | Nicolas Patry <patry.nicolas@protonmail.com> | 2023-08-01 14:26:02 +0200 |
commit | 5cead227efaad60ce5e1ea2c337785010c4a5f2a (patch) | |
tree | 87c84d066d1ea973d1036254b63c56fc9607a438 /candle-book | |
parent | ebd0315623a42c1798e39f4d6321d096fd328b06 (diff) | |
download | candle-5cead227efaad60ce5e1ea2c337785010c4a5f2a.tar.gz candle-5cead227efaad60ce5e1ea2c337785010c4a5f2a.tar.bz2 candle-5cead227efaad60ce5e1ea2c337785010c4a5f2a.zip |
Adressed comments.
Diffstat (limited to 'candle-book')
-rw-r--r-- | candle-book/src/guide/hello_world.md | 11 | ||||
-rw-r--r-- | candle-book/src/guide/installation.md | 2 |
2 files changed, 8 insertions, 5 deletions
diff --git a/candle-book/src/guide/hello_world.md b/candle-book/src/guide/hello_world.md index ee630979..d7cb76c9 100644 --- a/candle-book/src/guide/hello_world.md +++ b/candle-book/src/guide/hello_world.md @@ -2,7 +2,7 @@ We will now create the hello world of the ML world, building a model capable of solving MNIST dataset. -Open `src/main.rs` and fill in with these contents: +Open `src/main.rs` and fill in with this content: ```rust # extern crate candle; @@ -45,7 +45,7 @@ cargo run --release ## Using a `Linear` layer. -Now that we have this, we might want to complexity a little, for instance by adding `bias` and creating +Now that we have this, we might want to complexify things a bit, for instance by adding `bias` and creating the classical `Linear` layer. We can do as such ```rust @@ -76,7 +76,7 @@ impl Model { } ``` -This will change the loading code into a new function +This will change the model running code into a new function ```rust # extern crate candle; @@ -106,8 +106,10 @@ This will change the loading code into a new function # } fn main() -> Result<()> { // Use Device::new_cuda(0)?; to use the GPU. - let device = Device::Cpu; + // Use Device::Cpu; to use the CPU. + let device = Device::cuda_if_available(0)?; + // Creating a dummy model let weight = Tensor::zeros((784, 100), DType::F32, &device)?; let bias = Tensor::zeros((100, ), DType::F32, &device)?; let first = Linear{weight, bias}; @@ -118,6 +120,7 @@ fn main() -> Result<()> { let dummy_image = Tensor::zeros((1, 784), DType::F32, &device)?; + // Inference on the model let digit = model.forward(&dummy_image)?; println!("Digit {digit:?} digit"); Ok(()) diff --git a/candle-book/src/guide/installation.md b/candle-book/src/guide/installation.md index 6ed9f6c3..8e549d01 100644 --- a/candle-book/src/guide/installation.md +++ b/candle-book/src/guide/installation.md @@ -9,7 +9,7 @@ cargo add --git https://github.com/LaurentMazare/candle.git candle ``` At this point, candle will be built **without** CUDA support. -To get CUDA support use the feature `cuda` +To get CUDA support use the `cuda` feature ```bash cargo add --git https://github.com/LaurentMazare/candle.git candle --features cuda ``` |