diff options
author | Laurent Mazare <laurent.mazare@gmail.com> | 2023-08-14 13:12:17 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-08-14 13:12:17 +0100 |
commit | c84883ecf2c240792392353175b634f6ec92a011 (patch) | |
tree | 10b14324310421802a68669485c75cc3dcc16c48 /candle-wasm-examples | |
parent | a094dc503d69a6ca3db71098ebc26d0d2f2a33a6 (diff) | |
download | candle-c84883ecf2c240792392353175b634f6ec92a011.tar.gz candle-c84883ecf2c240792392353175b634f6ec92a011.tar.bz2 candle-c84883ecf2c240792392353175b634f6ec92a011.zip |
Add a cuda kernel for upsampling. (#441)
* Add a cuda kernel for upsampling.
* Update for the latest tokenizers version.
Diffstat (limited to 'candle-wasm-examples')
-rw-r--r-- | candle-wasm-examples/whisper/src/worker.rs | 5 |
1 files changed, 1 insertions, 4 deletions
diff --git a/candle-wasm-examples/whisper/src/worker.rs b/candle-wasm-examples/whisper/src/worker.rs index 139755cb..d77d3e32 100644 --- a/candle-wasm-examples/whisper/src/worker.rs +++ b/candle-wasm-examples/whisper/src/worker.rs @@ -159,10 +159,7 @@ impl Decoder { } sum_logprob += prob.ln(); } - let text = self - .tokenizer - .decode(tokens.clone(), true) - .map_err(E::msg)?; + let text = self.tokenizer.decode(&tokens, true).map_err(E::msg)?; let avg_logprob = sum_logprob / tokens.len() as f64; Ok(DecodingResult { |