summaryrefslogtreecommitdiff
path: root/candle-wasm-examples
diff options
context:
space:
mode:
authorJuarez Bochi <juarez.bochi@grammarly.com>2023-10-04 15:57:33 -0400
committerGitHub <noreply@github.com>2023-10-04 20:57:33 +0100
commitb86ac0c5076534e2a7c067e87d1125d4da21cd22 (patch)
tree64d85729cad805643c08365ca9155e441d3c7619 /candle-wasm-examples
parent27e70a50939b647a7c2e80428647f5668e592607 (diff)
downloadcandle-b86ac0c5076534e2a7c067e87d1125d4da21cd22.tar.gz
candle-b86ac0c5076534e2a7c067e87d1125d4da21cd22.tar.bz2
candle-b86ac0c5076534e2a7c067e87d1125d4da21cd22.zip
Quant t5: Add coedit model to wasm demo and readme (#1031)
Diffstat (limited to 'candle-wasm-examples')
-rw-r--r--candle-wasm-examples/t5/index.html12
-rw-r--r--candle-wasm-examples/t5/utils.js36
2 files changed, 44 insertions, 4 deletions
diff --git a/candle-wasm-examples/t5/index.html b/candle-wasm-examples/t5/index.html
index 227b723a..2c9a6f35 100644
--- a/candle-wasm-examples/t5/index.html
+++ b/candle-wasm-examples/t5/index.html
@@ -166,13 +166,19 @@
target="_blank"
class="link"
>flan-t5-small</a
- >
- and several t5
+ >,
+ several
<a
href="https://huggingface.co/lmz/candle-quantized-t5/tree/main"
target="_blank"
class="link">
- t5 quantized gguf</a
+ t5 quantized gguf models</a
+ >, and also a quantized
+ <a
+ href="https://huggingface.co/jbochi/candle-coedit-quantized/tree/main"
+ target="_blank"
+ class="link">
+ CoEdIT model for text rewrite</a
>.
</p>
</div>
diff --git a/candle-wasm-examples/t5/utils.js b/candle-wasm-examples/t5/utils.js
index 851d1b76..20b0a792 100644
--- a/candle-wasm-examples/t5/utils.js
+++ b/candle-wasm-examples/t5/utils.js
@@ -65,6 +65,7 @@ export async function generateText(
worker.addEventListener("message", messageHandler);
});
}
+
export const MODELS = {
t5_small_quantized: {
size: "64.4 MB",
@@ -133,7 +134,6 @@ export const MODELS = {
summarization: { prefix: "summarize: ", max_length: 200 },
},
},
-
flan_t5_base_quantized: {
size: "263 MB",
base_url: "https://huggingface.co/lmz/candle-quantized-t5/resolve/main/",
@@ -156,7 +156,41 @@ export const MODELS = {
summarization: { prefix: "summarize: ", max_length: 200 },
},
},
+ coedit_large_quantized: {
+ size: "643 MB",
+ base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/",
+ model: "model.gguf",
+ tokenizer: "tokenizer.json",
+ config: "config.json",
+ tasks: {
+ fluency: {
+ prefix: "Fix the grammar: ",
+ max_length: 300,
+ },
+ coherence: {
+ prefix: "Rewrite to make this easier to understand: ",
+ max_length: 300,
+ },
+ simplification: {
+ prefix: "translate English to Romanian: ",
+ max_length: 300,
+ },
+ simplification: {
+ prefix: "Paraphrase this: ",
+ max_length: 300,
+ },
+ formalization: {
+ prefix: "Write this more formally: ",
+ max_length: 300,
+ },
+ neutralize: {
+ prefix: "Write in a more neutral way: ",
+ max_length: 300,
+ },
+ },
+ },
};
+
export function getModelInfo(id, taskID) {
const model = MODELS[id];
return {