diff options
author | Ssslakter <67190162+Ssslakter@users.noreply.github.com> | 2023-09-10 18:02:52 +0700 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-09-10 12:02:52 +0100 |
commit | 6c58fc59fd828492021cfd0f4518ae5ae3b03f56 (patch) | |
tree | 5a05024dffcb02ddc92deea29b7f9da63dd19ad6 | |
parent | 35f72514f59b3fa4bd321e3e88a75f5b43cf060f (diff) | |
download | candle-6c58fc59fd828492021cfd0f4518ae5ae3b03f56.tar.gz candle-6c58fc59fd828492021cfd0f4518ae5ae3b03f56.tar.bz2 candle-6c58fc59fd828492021cfd0f4518ae5ae3b03f56.zip |
Little docs changes (#791)
* Little doc fixes
* change imports in lib
* rename candle_core to candle
* revert "rename candle_core to candle"
-rw-r--r-- | candle-book/src/guide/hello_world.md | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/candle-book/src/guide/hello_world.md b/candle-book/src/guide/hello_world.md index fc4af0e1..74a147e7 100644 --- a/candle-book/src/guide/hello_world.md +++ b/candle-book/src/guide/hello_world.md @@ -25,8 +25,8 @@ fn main() -> Result<()> { // Use Device::new_cuda(0)?; to use the GPU. let device = Device::Cpu; - let first = Tensor::zeros((784, 100), DType::F32, &device)?; - let second = Tensor::zeros((100, 10), DType::F32, &device)?; + let first = Tensor::randn(0f32, 1.0, (784, 100), &device)?; + let second = Tensor::randn(0f32, 1.0, (100, 10), &device)?; let model = Model { first, second }; let dummy_image = Tensor::zeros((1, 784), DType::F32, &device)?; @@ -110,15 +110,15 @@ fn main() -> Result<()> { let device = Device::cuda_if_available(0)?; // Creating a dummy model - let weight = Tensor::zeros((784, 100), DType::F32, &device)?; + let weight = Tensor::randn(0f32, 1.0, (784, 100), &device)?; let bias = Tensor::zeros((100, ), DType::F32, &device)?; let first = Linear{weight, bias}; - let weight = Tensor::zeros((100, 10), DType::F32, &device)?; - let bias = Tensor::zeros((10, ), DType::F32, &device)?; + let weight = Tensor::randn(0f32, 1.0, (100, 10), &device)?; + let bias = Tensor::randn(0f32, 1.0, (10, ), &device)?; let second = Linear{weight, bias}; let model = Model { first, second }; - let dummy_image = Tensor::zeros((1, 784), DType::F32, &device)?; + let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?; // Inference on the model let digit = model.forward(&dummy_image)?; @@ -167,15 +167,15 @@ fn main() -> Result<()> { let device = Device::Cpu; // This has changed (784, 100) -> (100, 784) ! - let weight = Tensor::zeros((100, 784), DType::F32, &device)?; - let bias = Tensor::zeros((100, ), DType::F32, &device)?; + let weight = Tensor::randn(0f32, 1.0, (100, 784), &device)?; + let bias = Tensor::randn(0f32, 1.0, (100, ), &device)?; let first = Linear::new(weight, Some(bias)); - let weight = Tensor::zeros((10, 100), DType::F32, &device)?; - let bias = Tensor::zeros((10, ), DType::F32, &device)?; + let weight = Tensor::randn(0f32, 1.0, (10, 100), &device)?; + let bias = Tensor::randn(0f32, 1.0, (10, ), &device)?; let second = Linear::new(weight, Some(bias)); let model = Model { first, second }; - let dummy_image = Tensor::zeros((1, 784), DType::F32, &device)?; + let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?; let digit = model.forward(&dummy_image)?; println!("Digit {digit:?} digit"); @@ -188,8 +188,8 @@ Feel free to modify this example to use `Conv2d` to create a classical convnet i Now that we have the running dummy code we can get to more advanced topics: -- [For PyTorch users](./guide/cheatsheet.md) -- [Running existing models](./inference/README.md) -- [Training models](./training/README.md) +- [For PyTorch users](../guide/cheatsheet.md) +- [Running existing models](../inference/README.md) +- [Training models](../training/README.md) |