diff options
author | Laurent Mazare <laurent.mazare@gmail.com> | 2023-08-14 13:12:17 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-08-14 13:12:17 +0100 |
commit | c84883ecf2c240792392353175b634f6ec92a011 (patch) | |
tree | 10b14324310421802a68669485c75cc3dcc16c48 /candle-examples/examples/falcon | |
parent | a094dc503d69a6ca3db71098ebc26d0d2f2a33a6 (diff) | |
download | candle-c84883ecf2c240792392353175b634f6ec92a011.tar.gz candle-c84883ecf2c240792392353175b634f6ec92a011.tar.bz2 candle-c84883ecf2c240792392353175b634f6ec92a011.zip |
Add a cuda kernel for upsampling. (#441)
* Add a cuda kernel for upsampling.
* Update for the latest tokenizers version.
Diffstat (limited to 'candle-examples/examples/falcon')
-rw-r--r-- | candle-examples/examples/falcon/main.rs | 6 |
1 files changed, 2 insertions, 4 deletions
diff --git a/candle-examples/examples/falcon/main.rs b/candle-examples/examples/falcon/main.rs index c37d9a96..0df3a001 100644 --- a/candle-examples/examples/falcon/main.rs +++ b/candle-examples/examples/falcon/main.rs @@ -72,16 +72,14 @@ impl TextGeneration { "{} token: {} '{}'", index + 1, next_token, - self.tokenizer - .decode(vec![next_token], true) - .map_err(E::msg)? + self.tokenizer.decode(&[next_token], true).map_err(E::msg)? ); } let dt = start_gen.elapsed(); println!( "{sample_len} tokens generated ({} token/s)\n----\n{}\n----", sample_len as f64 / dt.as_secs_f64(), - self.tokenizer.decode(new_tokens, true).map_err(E::msg)? + self.tokenizer.decode(&new_tokens, true).map_err(E::msg)? ); Ok(()) } |