summaryrefslogtreecommitdiff
path: root/candle-transformers/src/models/clip/mod.rs
diff options
context:
space:
mode:
Diffstat (limited to 'candle-transformers/src/models/clip/mod.rs')
-rw-r--r--candle-transformers/src/models/clip/mod.rs6
1 files changed, 4 insertions, 2 deletions
diff --git a/candle-transformers/src/models/clip/mod.rs b/candle-transformers/src/models/clip/mod.rs
index e83f27e3..2b002673 100644
--- a/candle-transformers/src/models/clip/mod.rs
+++ b/candle-transformers/src/models/clip/mod.rs
@@ -3,8 +3,10 @@
//! Contrastive Language-Image Pre-Training (CLIP) is an architecture trained on
//! pairs of images with related texts.
//!
-//! - [GH Link](https://github.com/openai/CLIP)
-//! - Transformers Python [reference implementation](https://github.com/huggingface/transformers/tree/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip)
+//! - 💻 [GH Link](https://github.com/openai/CLIP)
+//! - 💻 Transformers Python [reference implementation](https://github.com/huggingface/transformers/tree/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip)
+//! - 🤗 [HF Model](https://huggingface.co/openai/clip-vit-large-patch14-336)
+//!
use self::{
text_model::{Activation, ClipTextTransformer},