diff options
Diffstat (limited to 'candle-examples/examples/llama2-c/qmodel.rs')
-rw-r--r-- | candle-examples/examples/llama2-c/qmodel.rs | 227 |
1 files changed, 227 insertions, 0 deletions
diff --git a/candle-examples/examples/llama2-c/qmodel.rs b/candle-examples/examples/llama2-c/qmodel.rs new file mode 100644 index 00000000..07db146e --- /dev/null +++ b/candle-examples/examples/llama2-c/qmodel.rs @@ -0,0 +1,227 @@ +use super::model::{Cache, Config}; +use candle::{DType, IndexOp, Module, Result, Tensor, D}; +use candle_transformers::quantized_nn::{linear_no_bias as linear, Embedding, Linear, RmsNorm}; +pub use candle_transformers::quantized_var_builder::VarBuilder; + +fn silu(xs: &Tensor) -> Result<Tensor> { + xs / (xs.neg()?.exp()? + 1.0)? +} + +struct CausalSelfAttention { + q_proj: Linear, + k_proj: Linear, + v_proj: Linear, + o_proj: Linear, + n_head: usize, + n_key_value_head: usize, + head_dim: usize, + cache: Cache, +} + +impl CausalSelfAttention { + fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize) -> Result<Tensor> { + let (b_sz, seq_len, h, n_embd) = x.dims4()?; + let cos = self.cache.cos.i(index_pos..index_pos + seq_len)?; + let sin = self.cache.sin.i(index_pos..index_pos + seq_len)?; + let cos = cos.unsqueeze(1)?; + let sin = sin.unsqueeze(1)?; + let cos = cos.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?; + let sin = sin.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?; + let x = x.reshape((b_sz, seq_len, h, n_embd / 2, 2))?; + let x0 = x.narrow(D::Minus1, 0, 1)?; + let x1 = x.narrow(D::Minus1, 1, 1)?; + let dst0 = (x0.broadcast_mul(&cos)? - x1.broadcast_mul(&sin)?)?; + let dst1 = (x0.broadcast_mul(&sin)? + x1.broadcast_mul(&cos)?)?; + let rope = Tensor::cat(&[&dst0, &dst1], D::Minus1)?.reshape((b_sz, seq_len, h, n_embd))?; + Ok(rope) + } + + fn forward(&self, x: &Tensor, index_pos: usize, block_idx: usize) -> Result<Tensor> { + let (b_sz, seq_len, n_embd) = x.dims3()?; + let q = self.q_proj.forward(x)?; + let k = self.k_proj.forward(x)?; + let v = self.v_proj.forward(x)?; + + let q = q.reshape((b_sz, seq_len, self.n_head, self.head_dim))?; + let k = k.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?; + let mut v = v.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?; + + let q = self.apply_rotary_emb(&q, index_pos)?; + let mut k = self.apply_rotary_emb(&k, index_pos)?; + + if self.cache.use_kv_cache { + let mut cache = self.cache.kvs.lock().unwrap(); + if let Some((cache_k, cache_v)) = &cache[block_idx] { + k = Tensor::cat(&[cache_k, &k], 1)?.contiguous()?; + v = Tensor::cat(&[cache_v, &v], 1)?.contiguous()?; + } + cache[block_idx] = Some((k.clone(), v.clone())) + } + + let k = self.repeat_kv(k)?; + let v = self.repeat_kv(v)?; + + let q = q.transpose(1, 2)?.contiguous()?; + let k = k.transpose(1, 2)?.contiguous()?; + let v = v.transpose(1, 2)?.contiguous()?; + + let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?; + let mask = self.cache.mask(seq_len)?.broadcast_as(att.shape())?; + let att = masked_fill(&att, &mask, f32::NEG_INFINITY)?; + let att = candle_nn::ops::softmax(&att, D::Minus1)?; + // Convert to contiguous as matmul doesn't support strided vs for now. + let y = att.matmul(&v.contiguous()?)?; + let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?; + let y = self.o_proj.forward(&y)?; + Ok(y) + } + + fn repeat_kv(&self, x: Tensor) -> Result<Tensor> { + let n_rep = self.n_head / self.n_key_value_head; + if n_rep == 1 { + Ok(x) + } else { + let (b_sz, seq_len, n_kv_head, head_dim) = x.dims4()?; + let x = x + .unsqueeze(3)? + .expand((b_sz, seq_len, n_kv_head, n_rep, head_dim))? + .reshape((b_sz, seq_len, n_kv_head * n_rep, head_dim))?; + Ok(x) + } + } + + fn load(vb: VarBuilder, cache: &Cache, cfg: &Config) -> Result<Self> { + let size_in = cfg.dim; + let size_q = (cfg.dim / cfg.n_heads) * cfg.n_heads; + let size_kv = (cfg.dim / cfg.n_heads) * cfg.n_kv_heads; + let q_proj = linear(size_in, size_q, vb.pp("q_proj"))?; + let k_proj = linear(size_in, size_kv, vb.pp("k_proj"))?; + let v_proj = linear(size_in, size_kv, vb.pp("v_proj"))?; + let o_proj = linear(size_q, size_in, vb.pp("o_proj"))?; + Ok(Self { + q_proj, + k_proj, + v_proj, + o_proj, + n_head: cfg.n_heads, + n_key_value_head: cfg.n_kv_heads, + head_dim: cfg.dim / cfg.n_heads, + cache: cache.clone(), + }) + } +} + +fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { + let shape = mask.shape(); + let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; + let m = mask.where_cond(&on_true, on_false)?; + Ok(m) +} + +struct Mlp { + c_fc1: Linear, + c_fc2: Linear, + c_proj: Linear, +} + +impl Mlp { + fn new(c_fc1: Linear, c_fc2: Linear, c_proj: Linear) -> Self { + Self { + c_fc1, + c_fc2, + c_proj, + } + } + + fn forward(&self, x: &Tensor) -> Result<Tensor> { + let x = (silu(&self.c_fc1.forward(x)?)? * self.c_fc2.forward(x)?)?; + self.c_proj.forward(&x) + } + + fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { + let h_size = cfg.dim; + let i_size = cfg.hidden_dim; + let c_fc1 = linear(h_size, i_size, vb.pp("gate_proj"))?; + let c_fc2 = linear(h_size, i_size, vb.pp("up_proj"))?; + let c_proj = linear(i_size, h_size, vb.pp("down_proj"))?; + Ok(Self::new(c_fc1, c_fc2, c_proj)) + } +} + +struct Block { + rms_1: RmsNorm, + attn: CausalSelfAttention, + rms_2: RmsNorm, + mlp: Mlp, +} + +impl Block { + fn new(rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp) -> Self { + Self { + rms_1, + attn, + rms_2, + mlp, + } + } + + fn forward(&self, x: &Tensor, index_pos: usize, block_idx: usize) -> Result<Tensor> { + let residual = x; + let x = self.rms_1.forward(x)?; + let x = (self.attn.forward(&x, index_pos, block_idx)? + residual)?; + let residual = &x; + let x = (self.mlp.forward(&self.rms_2.forward(&x)?)? + residual)?; + Ok(x) + } + + fn load(vb: VarBuilder, cache: &Cache, cfg: &Config) -> Result<Self> { + let attn = CausalSelfAttention::load(vb.pp("self_attn"), cache, cfg)?; + let mlp = Mlp::load(vb.pp("mlp"), cfg)?; + let input_layernorm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("input_layernorm"))?; + let post_attention_layernorm = + RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("post_attention_layernorm"))?; + Ok(Self::new( + input_layernorm, + attn, + post_attention_layernorm, + mlp, + )) + } +} + +pub struct QLlama { + wte: Embedding, + blocks: Vec<Block>, + ln_f: RmsNorm, + lm_head: Linear, + pub config: Config, +} + +impl QLlama { + pub fn forward(&self, x: &Tensor, index_pos: usize) -> Result<Tensor> { + let (_b_sz, _seq_len) = x.dims2()?; + let mut x = self.wte.forward(x)?; + for (block_idx, block) in self.blocks.iter().enumerate() { + x = block.forward(&x, index_pos, block_idx)?; + } + let x = self.ln_f.forward(&x)?; + let logits = self.lm_head.forward(&x)?; + logits.to_dtype(DType::F32) + } + + pub fn load(vb: VarBuilder, cache: &Cache, cfg: Config) -> Result<Self> { + let wte = Embedding::new(cfg.vocab_size, cfg.dim, vb.pp("model.embed_tokens"))?; + let lm_head = linear(cfg.dim, cfg.vocab_size, vb.pp("lm_head"))?; + let ln_f = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("model.norm"))?; + let blocks: Vec<_> = (0..cfg.n_layers) + .map(|i| Block::load(vb.pp(format!("model.layers.{i}")), cache, &cfg).unwrap()) + .collect(); + Ok(Self { + wte, + blocks, + ln_f, + lm_head, + config: cfg, + }) + } +} |