summaryrefslogtreecommitdiff
path: root/candle-transformers/src/models/mobileclip.rs
blob: 45a5dbad9f966ff9e8e51140c42661913495bb08 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
use super::fastvit;
use super::openclip::text_model;
use candle::{Result, Tensor, D};
use candle_nn::{Func, VarBuilder};

#[derive(Clone, Debug)]
pub struct MobileClipModel {
    text_model: text_model::OpenClipTextTransformer,
    vision_model: Func<'static>,
    text_projection: Tensor,
    logit_scale: Tensor,
}

#[derive(Clone, Debug)]
pub struct MobileClipConfig {
    pub text_config: text_model::Config,
    pub vision_config: fastvit::Config,
    pub image_size: usize,
}

impl MobileClipConfig {
    pub fn s1() -> Self {
        let text_config = text_model::Config::vit_base_patch32();
        let vision_config = fastvit::Config::mci1();
        Self {
            text_config,
            vision_config,
            image_size: 256,
        }
    }
    pub fn s2() -> Self {
        let text_config = text_model::Config::vit_base_patch32();
        let vision_config = fastvit::Config::mci2();
        Self {
            text_config,
            vision_config,
            image_size: 256,
        }
    }
}

impl MobileClipModel {
    pub fn new(vs: VarBuilder, c: &MobileClipConfig) -> Result<Self> {
        let vision_model = fastvit::fastvit(&c.vision_config, 512, vs.pp("visual.trunk"))?;
        let text_model = text_model::OpenClipTextTransformer::new(vs.pp("text"), &c.text_config)?;
        let text_projection = vs.get(
            (c.text_config.embed_dim, c.text_config.projection_dim),
            "text.text_projection",
        )?;
        let logit_scale = vs.get(&[], "logit_scale")?;
        Ok(Self {
            text_model,
            vision_model,
            text_projection,
            logit_scale,
        })
    }

    pub fn get_text_features(&self, input_ids: &Tensor) -> Result<Tensor> {
        input_ids
            .apply(&self.text_model)?
            .matmul(&self.text_projection)
    }

    pub fn get_image_features(&self, pixel_values: &Tensor) -> Result<Tensor> {
        pixel_values.apply(&self.vision_model)
    }

    pub fn forward(&self, pixel_values: &Tensor, input_ids: &Tensor) -> Result<(Tensor, Tensor)> {
        let image_features = self.get_image_features(pixel_values)?;
        let text_features = self.get_text_features(input_ids)?;
        let image_features_normalized = div_l2_norm(&image_features)?;
        let text_features_normalized = div_l2_norm(&text_features)?;
        let logits_per_text = text_features_normalized.matmul(&image_features_normalized.t()?)?;
        let logit_scale = self.logit_scale.exp()?;
        let logits_per_text = logits_per_text.broadcast_mul(&logit_scale)?;
        let logits_per_image = logits_per_text.t()?;
        Ok((logits_per_text, logits_per_image))
    }
}

pub fn div_l2_norm(v: &Tensor) -> Result<Tensor> {
    let l2_norm = v.sqr()?.sum_keepdim(D::Minus1)?.sqrt()?;
    v.broadcast_div(&l2_norm)
}