summaryrefslogtreecommitdiff
path: root/candle-book
diff options
context:
space:
mode:
authorNicolas Patry <patry.nicolas@protonmail.com>2023-08-02 18:35:31 +0200
committerNicolas Patry <patry.nicolas@protonmail.com>2023-08-02 18:40:24 +0200
commit166f4d1101437eb36c938781ed0b9270d9a1c282 (patch)
treef3a11d44897d36d473a0c9ed7fd8c3127e3c234c /candle-book
parentae68635af9dfcae359f621dd3e1df3b3c3d97042 (diff)
downloadcandle-166f4d1101437eb36c938781ed0b9270d9a1c282.tar.gz
candle-166f4d1101437eb36c938781ed0b9270d9a1c282.tar.bz2
candle-166f4d1101437eb36c938781ed0b9270d9a1c282.zip
`s/candle/candle_core/g`
Diffstat (limited to 'candle-book')
-rw-r--r--candle-book/src/inference/hub.md12
1 files changed, 6 insertions, 6 deletions
diff --git a/candle-book/src/inference/hub.md b/candle-book/src/inference/hub.md
index 01492df1..a974a1fa 100644
--- a/candle-book/src/inference/hub.md
+++ b/candle-book/src/inference/hub.md
@@ -10,17 +10,17 @@ Then let's start by downloading the [model file](https://huggingface.co/bert-bas
```rust
-# extern crate candle;
+# extern crate candle_core;
# extern crate hf_hub;
use hf_hub::api::sync::Api;
-use candle::Device;
+use candle_core::Device;
let api = Api::new().unwrap();
let repo = api.model("bert-base-uncased".to_string());
let weights = repo.get("model.safetensors").unwrap();
-let weights = candle::safetensors::load(weights, &Device::Cpu);
+let weights = candle_core::safetensors::load(weights, &Device::Cpu);
```
We now have access to all the [tensors](https://huggingface.co/bert-base-uncased?show_tensors=true) within the file.
@@ -48,7 +48,7 @@ cargo add hf-hub --features tokio
Now that we have our weights, we can use them in our bert architecture:
```rust
-# extern crate candle;
+# extern crate candle_core;
# extern crate candle_nn;
# extern crate hf_hub;
# use hf_hub::api::sync::Api;
@@ -57,10 +57,10 @@ Now that we have our weights, we can use them in our bert architecture:
# let repo = api.model("bert-base-uncased".to_string());
#
# let weights = repo.get("model.safetensors").unwrap();
-use candle::{Device, Tensor, DType};
+use candle_core::{Device, Tensor, DType};
use candle_nn::Linear;
-let weights = candle::safetensors::load(weights, &Device::Cpu).unwrap();
+let weights = candle_core::safetensors::load(weights, &Device::Cpu).unwrap();
let weight = weights.get("bert.encoder.layer.0.attention.self.query.weight").unwrap();
let bias = weights.get("bert.encoder.layer.0.attention.self.query.bias").unwrap();