summaryrefslogtreecommitdiff
path: root/candle-examples/examples/llama
diff options
context:
space:
mode:
authorLaurent Mazare <laurent.mazare@gmail.com>2023-08-14 13:12:17 +0100
committerGitHub <noreply@github.com>2023-08-14 13:12:17 +0100
commitc84883ecf2c240792392353175b634f6ec92a011 (patch)
tree10b14324310421802a68669485c75cc3dcc16c48 /candle-examples/examples/llama
parenta094dc503d69a6ca3db71098ebc26d0d2f2a33a6 (diff)
downloadcandle-c84883ecf2c240792392353175b634f6ec92a011.tar.gz
candle-c84883ecf2c240792392353175b634f6ec92a011.tar.bz2
candle-c84883ecf2c240792392353175b634f6ec92a011.zip
Add a cuda kernel for upsampling. (#441)
* Add a cuda kernel for upsampling. * Update for the latest tokenizers version.
Diffstat (limited to 'candle-examples/examples/llama')
-rw-r--r--candle-examples/examples/llama/main.rs4
1 files changed, 2 insertions, 2 deletions
diff --git a/candle-examples/examples/llama/main.rs b/candle-examples/examples/llama/main.rs
index 9a62eba5..b1e112fd 100644
--- a/candle-examples/examples/llama/main.rs
+++ b/candle-examples/examples/llama/main.rs
@@ -223,7 +223,7 @@ fn main() -> Result<()> {
"{} token: {} '{}'",
index + 1,
next_token,
- tokenizer.decode(vec![next_token], true).map_err(E::msg)?
+ tokenizer.decode(&[next_token], true).map_err(E::msg)?
);
}
let dt = start_gen.elapsed();
@@ -231,7 +231,7 @@ fn main() -> Result<()> {
"{} tokens generated ({} token/s)\n----\n{}\n----",
args.sample_len,
args.sample_len as f64 / dt.as_secs_f64(),
- tokenizer.decode(new_tokens, true).map_err(E::msg)?
+ tokenizer.decode(&new_tokens, true).map_err(E::msg)?
);
Ok(())
}