summaryrefslogtreecommitdiff
path: root/candle-book/src/inference/hub.md
diff options
context:
space:
mode:
Diffstat (limited to 'candle-book/src/inference/hub.md')
-rw-r--r--candle-book/src/inference/hub.md12
1 files changed, 6 insertions, 6 deletions
diff --git a/candle-book/src/inference/hub.md b/candle-book/src/inference/hub.md
index 01492df1..a974a1fa 100644
--- a/candle-book/src/inference/hub.md
+++ b/candle-book/src/inference/hub.md
@@ -10,17 +10,17 @@ Then let's start by downloading the [model file](https://huggingface.co/bert-bas
```rust
-# extern crate candle;
+# extern crate candle_core;
# extern crate hf_hub;
use hf_hub::api::sync::Api;
-use candle::Device;
+use candle_core::Device;
let api = Api::new().unwrap();
let repo = api.model("bert-base-uncased".to_string());
let weights = repo.get("model.safetensors").unwrap();
-let weights = candle::safetensors::load(weights, &Device::Cpu);
+let weights = candle_core::safetensors::load(weights, &Device::Cpu);
```
We now have access to all the [tensors](https://huggingface.co/bert-base-uncased?show_tensors=true) within the file.
@@ -48,7 +48,7 @@ cargo add hf-hub --features tokio
Now that we have our weights, we can use them in our bert architecture:
```rust
-# extern crate candle;
+# extern crate candle_core;
# extern crate candle_nn;
# extern crate hf_hub;
# use hf_hub::api::sync::Api;
@@ -57,10 +57,10 @@ Now that we have our weights, we can use them in our bert architecture:
# let repo = api.model("bert-base-uncased".to_string());
#
# let weights = repo.get("model.safetensors").unwrap();
-use candle::{Device, Tensor, DType};
+use candle_core::{Device, Tensor, DType};
use candle_nn::Linear;
-let weights = candle::safetensors::load(weights, &Device::Cpu).unwrap();
+let weights = candle_core::safetensors::load(weights, &Device::Cpu).unwrap();
let weight = weights.get("bert.encoder.layer.0.attention.self.query.weight").unwrap();
let bias = weights.get("bert.encoder.layer.0.attention.self.query.bias").unwrap();