diff options
Diffstat (limited to 'candle-transformers/src/models/chinese_clip/vision_model.rs')
-rw-r--r-- | candle-transformers/src/models/chinese_clip/vision_model.rs | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/candle-transformers/src/models/chinese_clip/vision_model.rs b/candle-transformers/src/models/chinese_clip/vision_model.rs index 2d345e0f..a20535c4 100644 --- a/candle-transformers/src/models/chinese_clip/vision_model.rs +++ b/candle-transformers/src/models/chinese_clip/vision_model.rs @@ -3,8 +3,8 @@ //! Chinese contrastive Language-Image Pre-Training (CLIP) is an architecture trained on //! pairs of images with related texts. //! -//! https://github.com/OFA-Sys/Chinese-CLIP -//! https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/chinese_clip/modeling_chinese_clip.py +//! - 💻 [Chinese-CLIP](https://github.com/OFA-Sys/Chinese-CLIP) +//! - 💻 [GH](https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/chinese_clip/modeling_chinese_clip.py_ use candle::{DType, IndexOp, Module, Result, Shape, Tensor, D}; use candle_nn as nn; @@ -49,7 +49,7 @@ impl Default for ChineseClipVisionConfig { } impl ChineseClipVisionConfig { - /// referer: https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16/blob/main/config.json + /// [referer](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16/blob/main/config.json) pub fn clip_vit_base_patch16() -> Self { Self { hidden_size: 768, |