diff options
Diffstat (limited to 'candle-examples/examples/stable-diffusion/attention.rs')
-rw-r--r-- | candle-examples/examples/stable-diffusion/attention.rs | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/candle-examples/examples/stable-diffusion/attention.rs b/candle-examples/examples/stable-diffusion/attention.rs index 58f5e87e..1ae1bfc3 100644 --- a/candle-examples/examples/stable-diffusion/attention.rs +++ b/candle-examples/examples/stable-diffusion/attention.rs @@ -208,9 +208,9 @@ impl CrossAttention { fn forward(&self, xs: &Tensor, context: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let query = self.to_q.forward(xs)?; - let context = context.unwrap_or(xs); - let key = self.to_k.forward(context)?; - let value = self.to_v.forward(context)?; + let context = context.unwrap_or(xs).contiguous()?; + let key = self.to_k.forward(&context)?; + let value = self.to_v.forward(&context)?; let query = self.reshape_heads_to_batch_dim(&query)?; let key = self.reshape_heads_to_batch_dim(&key)?; let value = self.reshape_heads_to_batch_dim(&value)?; @@ -473,7 +473,7 @@ impl AttentionBlock { let num_heads = channels / num_head_channels; let group_norm = nn::group_norm(config.num_groups, channels, config.eps, vs.pp("group_norm"))?; - let (q_path, k_path, v_path, out_path) = if vs.dtype() == DType::F16 { + let (q_path, k_path, v_path, out_path) = if vs.contains_tensor("to_q.weight") { ("to_q", "to_k", "to_v", "to_out.0") } else { ("query", "key", "value", "proj_attn") |