summaryrefslogtreecommitdiff
path: root/candle-transformers/src/models/quantized_mistral.rs
diff options
context:
space:
mode:
Diffstat (limited to 'candle-transformers/src/models/quantized_mistral.rs')
-rw-r--r--candle-transformers/src/models/quantized_mistral.rs14
1 files changed, 14 insertions, 0 deletions
diff --git a/candle-transformers/src/models/quantized_mistral.rs b/candle-transformers/src/models/quantized_mistral.rs
index 9e306c67..f2cb3b27 100644
--- a/candle-transformers/src/models/quantized_mistral.rs
+++ b/candle-transformers/src/models/quantized_mistral.rs
@@ -198,6 +198,10 @@ impl Attention {
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
+
+ fn clear_kv_cache(&mut self) {
+ self.kv_cache = None
+ }
}
#[derive(Debug, Clone)]
@@ -241,6 +245,10 @@ impl DecoderLayer {
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
residual + xs
}
+
+ fn clear_kv_cache(&mut self) {
+ self.self_attn.clear_kv_cache()
+ }
}
#[derive(Debug, Clone)]
@@ -322,4 +330,10 @@ impl Model {
.apply(&self.norm)?
.apply(&self.lm_head)
}
+
+ pub fn clear_kv_cache(&mut self) {
+ for layer in self.layers.iter_mut() {
+ layer.clear_kv_cache()
+ }
+ }
}