summaryrefslogtreecommitdiff
path: root/candle-book/src/guide/hello_world.md
diff options
context:
space:
mode:
Diffstat (limited to 'candle-book/src/guide/hello_world.md')
-rw-r--r--candle-book/src/guide/hello_world.md40
1 files changed, 20 insertions, 20 deletions
diff --git a/candle-book/src/guide/hello_world.md b/candle-book/src/guide/hello_world.md
index fc4af0e1..b5b8d7b4 100644
--- a/candle-book/src/guide/hello_world.md
+++ b/candle-book/src/guide/hello_world.md
@@ -6,7 +6,7 @@ Open `src/main.rs` and fill in this content:
```rust
# extern crate candle_core;
-use candle_core::{DType, Device, Result, Tensor};
+use candle_core::{Device, Result, Tensor};
struct Model {
first: Tensor,
@@ -25,11 +25,11 @@ fn main() -> Result<()> {
// Use Device::new_cuda(0)?; to use the GPU.
let device = Device::Cpu;
- let first = Tensor::zeros((784, 100), DType::F32, &device)?;
- let second = Tensor::zeros((100, 10), DType::F32, &device)?;
+ let first = Tensor::randn(0f32, 1.0, (784, 100), &device)?;
+ let second = Tensor::randn(0f32, 1.0, (100, 10), &device)?;
let model = Model { first, second };
- let dummy_image = Tensor::zeros((1, 784), DType::F32, &device)?;
+ let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?;
let digit = model.forward(&dummy_image)?;
println!("Digit {digit:?} digit");
@@ -50,7 +50,7 @@ the classical `Linear` layer. We can do as such
```rust
# extern crate candle_core;
-# use candle_core::{DType, Device, Result, Tensor};
+# use candle_core::{Device, Result, Tensor};
struct Linear{
weight: Tensor,
bias: Tensor,
@@ -80,7 +80,7 @@ This will change the model running code into a new function
```rust
# extern crate candle_core;
-# use candle_core::{DType, Device, Result, Tensor};
+# use candle_core::{Device, Result, Tensor};
# struct Linear{
# weight: Tensor,
# bias: Tensor,
@@ -110,15 +110,15 @@ fn main() -> Result<()> {
let device = Device::cuda_if_available(0)?;
// Creating a dummy model
- let weight = Tensor::zeros((784, 100), DType::F32, &device)?;
- let bias = Tensor::zeros((100, ), DType::F32, &device)?;
+ let weight = Tensor::randn(0f32, 1.0, (784, 100), &device)?;
+ let bias = Tensor::randn(0f32, 1.0, (100, ), &device)?;
let first = Linear{weight, bias};
- let weight = Tensor::zeros((100, 10), DType::F32, &device)?;
- let bias = Tensor::zeros((10, ), DType::F32, &device)?;
+ let weight = Tensor::randn(0f32, 1.0, (100, 10), &device)?;
+ let bias = Tensor::randn(0f32, 1.0, (10, ), &device)?;
let second = Linear{weight, bias};
let model = Model { first, second };
- let dummy_image = Tensor::zeros((1, 784), DType::F32, &device)?;
+ let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?;
// Inference on the model
let digit = model.forward(&dummy_image)?;
@@ -146,7 +146,7 @@ And rewrite our examples using it
```rust
# extern crate candle_core;
# extern crate candle_nn;
-use candle_core::{DType, Device, Result, Tensor};
+use candle_core::{Device, Result, Tensor};
use candle_nn::{Linear, Module};
struct Model {
@@ -167,15 +167,15 @@ fn main() -> Result<()> {
let device = Device::Cpu;
// This has changed (784, 100) -> (100, 784) !
- let weight = Tensor::zeros((100, 784), DType::F32, &device)?;
- let bias = Tensor::zeros((100, ), DType::F32, &device)?;
+ let weight = Tensor::randn(0f32, 1.0, (100, 784), &device)?;
+ let bias = Tensor::randn(0f32, 1.0, (100, ), &device)?;
let first = Linear::new(weight, Some(bias));
- let weight = Tensor::zeros((10, 100), DType::F32, &device)?;
- let bias = Tensor::zeros((10, ), DType::F32, &device)?;
+ let weight = Tensor::randn(0f32, 1.0, (10, 100), &device)?;
+ let bias = Tensor::randn(0f32, 1.0, (10, ), &device)?;
let second = Linear::new(weight, Some(bias));
let model = Model { first, second };
- let dummy_image = Tensor::zeros((1, 784), DType::F32, &device)?;
+ let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?;
let digit = model.forward(&dummy_image)?;
println!("Digit {digit:?} digit");
@@ -188,8 +188,8 @@ Feel free to modify this example to use `Conv2d` to create a classical convnet i
Now that we have the running dummy code we can get to more advanced topics:
-- [For PyTorch users](./guide/cheatsheet.md)
-- [Running existing models](./inference/README.md)
-- [Training models](./training/README.md)
+- [For PyTorch users](../guide/cheatsheet.md)
+- [Running existing models](../inference/inference.md)
+- [Training models](../training/training.md)