diff options
author | Laurent Mazare <laurent.mazare@gmail.com> | 2023-08-14 13:12:17 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-08-14 13:12:17 +0100 |
commit | c84883ecf2c240792392353175b634f6ec92a011 (patch) | |
tree | 10b14324310421802a68669485c75cc3dcc16c48 /candle-examples/examples/llama | |
parent | a094dc503d69a6ca3db71098ebc26d0d2f2a33a6 (diff) | |
download | candle-c84883ecf2c240792392353175b634f6ec92a011.tar.gz candle-c84883ecf2c240792392353175b634f6ec92a011.tar.bz2 candle-c84883ecf2c240792392353175b634f6ec92a011.zip |
Add a cuda kernel for upsampling. (#441)
* Add a cuda kernel for upsampling.
* Update for the latest tokenizers version.
Diffstat (limited to 'candle-examples/examples/llama')
-rw-r--r-- | candle-examples/examples/llama/main.rs | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/candle-examples/examples/llama/main.rs b/candle-examples/examples/llama/main.rs index 9a62eba5..b1e112fd 100644 --- a/candle-examples/examples/llama/main.rs +++ b/candle-examples/examples/llama/main.rs @@ -223,7 +223,7 @@ fn main() -> Result<()> { "{} token: {} '{}'", index + 1, next_token, - tokenizer.decode(vec![next_token], true).map_err(E::msg)? + tokenizer.decode(&[next_token], true).map_err(E::msg)? ); } let dt = start_gen.elapsed(); @@ -231,7 +231,7 @@ fn main() -> Result<()> { "{} tokens generated ({} token/s)\n----\n{}\n----", args.sample_len, args.sample_len as f64 / dt.as_secs_f64(), - tokenizer.decode(new_tokens, true).map_err(E::msg)? + tokenizer.decode(&new_tokens, true).map_err(E::msg)? ); Ok(()) } |