summaryrefslogtreecommitdiff
path: root/candle-examples/examples/stable-diffusion-3
diff options
context:
space:
mode:
Diffstat (limited to 'candle-examples/examples/stable-diffusion-3')
-rw-r--r--candle-examples/examples/stable-diffusion-3/README.md20
-rw-r--r--candle-examples/examples/stable-diffusion-3/main.rs44
2 files changed, 52 insertions, 12 deletions
diff --git a/candle-examples/examples/stable-diffusion-3/README.md b/candle-examples/examples/stable-diffusion-3/README.md
index 52ebfa55..adae1b56 100644
--- a/candle-examples/examples/stable-diffusion-3/README.md
+++ b/candle-examples/examples/stable-diffusion-3/README.md
@@ -1,8 +1,8 @@
-# candle-stable-diffusion-3: Candle Implementation of Stable Diffusion 3 Medium
+# candle-stable-diffusion-3: Candle Implementation of Stable Diffusion 3/3.5
![](assets/stable-diffusion-3.jpg)
-*A cute rusty robot holding a candle torch in its hand, with glowing neon text \"LETS GO RUSTY\" displayed on its chest, bright background, high quality, 4k*
+*A cute rusty robot holding a candle torch in its hand, with glowing neon text \"LETS GO RUSTY\" displayed on its chest, bright background, high quality, 4k*, generated by Stable Diffusion 3 Medium
Stable Diffusion 3 Medium is a text-to-image model based on Multimodal Diffusion Transformer (MMDiT) architecture.
@@ -10,9 +10,17 @@ Stable Diffusion 3 Medium is a text-to-image model based on Multimodal Diffusion
- [research paper](https://arxiv.org/pdf/2403.03206)
- [announcement blog post](https://stability.ai/news/stable-diffusion-3-medium)
+Stable Diffusion 3.5 is a family of text-to-image models with latest improvements:
+- [announcement blog post](https://stability.ai/news/introducing-stable-diffusion-3-5)
+
+It has three variants:
+- [Stable Diffusion 3.5 Large](https://huggingface.co/stabilityai/stable-diffusion-3.5-large) @ 8.1b params, with scaled and slightly modified MMDiT architecture.
+- [Stable Diffusion 3.5 Large Turbo](https://huggingface.co/stabilityai/stable-diffusion-3.5-large-turbo) distilled version that enables 4-step inference.
+- [Stable Diffusion 3.5 Medium](https://huggingface.co/stabilityai/stable-diffusion-3.5-medium) @ 2.5b params, with improved MMDiT-X architecture.
+
## Getting access to the weights
-The weights of Stable Diffusion 3 Medium is released by Stability AI under the Stability Community License. You will need to accept the conditions and acquire a license by visiting [the repo on HuggingFace Hub](https://huggingface.co/stabilityai/stable-diffusion-3-medium) to gain access to the weights for your HuggingFace account.
+The weights of Stable Diffusion 3/3.5 is released by Stability AI under the Stability Community License. You will need to accept the conditions and acquire a license by visiting the repos on HuggingFace Hub to gain access to the weights for your HuggingFace account.
To allow your computer to gain access to the public-gated repos on HuggingFace, you might need to create a [HuggingFace User Access Tokens](https://huggingface.co/docs/hub/en/security-tokens) (recommended) and log in on your computer if you haven't done that before. A convenient way to do the login is to use [huggingface-cli](https://huggingface.co/docs/huggingface_hub/en/guides/cli):
@@ -27,10 +35,12 @@ On the first run, the weights will be automatically downloaded from the Huggingf
```shell
cargo run --example stable-diffusion-3 --release --features=cuda -- \
- --height 1024 --width 1024 \
+ --which 3-medium --height 1024 --width 1024 \
--prompt 'A cute rusty robot holding a candle torch in its hand, with glowing neon text \"LETS GO RUSTY\" displayed on its chest, bright background, high quality, 4k'
```
+To use different models, changed the value of `--which` option. (Possible values: `3-medium`, `3.5-large`, `3.5-large-turbo` and `3.5-medium`).
+
To display other options available,
```shell
@@ -45,7 +55,7 @@ cargo run --example stable-diffusion-3 --release --features=cuda,flash-attn -- -
## Performance Benchmark
-Below benchmark is done by generating 1024-by-1024 image from 28 steps of Euler sampling and measure the average speed (iteration per seconds).
+Below benchmark is done with Stable Diffusion 3 Medium by generating 1024-by-1024 image from 28 steps of Euler sampling and measure the average speed (iteration per seconds).
[candle](https://github.com/huggingface/candle) and [candle-flash-attn](https://github.com/huggingface/candle/tree/main/candle-flash-attn) is based on the commit of [0d96ec3](https://github.com/huggingface/candle/commit/0d96ec31e8be03f844ed0aed636d6217dee9c7bc).
diff --git a/candle-examples/examples/stable-diffusion-3/main.rs b/candle-examples/examples/stable-diffusion-3/main.rs
index d0bf4bb8..31d3fc42 100644
--- a/candle-examples/examples/stable-diffusion-3/main.rs
+++ b/candle-examples/examples/stable-diffusion-3/main.rs
@@ -19,13 +19,15 @@ enum Which {
V3_5Large,
#[value(name = "3.5-large-turbo")]
V3_5LargeTurbo,
+ #[value(name = "3.5-medium")]
+ V3_5Medium,
}
impl Which {
fn is_3_5(&self) -> bool {
match self {
Self::V3Medium => false,
- Self::V3_5Large | Self::V3_5LargeTurbo => true,
+ Self::V3_5Large | Self::V3_5LargeTurbo | Self::V3_5Medium => true,
}
}
}
@@ -117,36 +119,59 @@ fn main() -> Result<()> {
let default_inference_steps = match which {
Which::V3_5Large => 28,
Which::V3_5LargeTurbo => 4,
+ Which::V3_5Medium => 28,
Which::V3Medium => 28,
};
let num_inference_steps = num_inference_steps.unwrap_or(default_inference_steps);
let default_cfg_scale = match which {
Which::V3_5Large => 4.0,
Which::V3_5LargeTurbo => 1.0,
+ Which::V3_5Medium => 4.0,
Which::V3Medium => 4.0,
};
let cfg_scale = cfg_scale.unwrap_or(default_cfg_scale);
let api = hf_hub::api::sync::Api::new()?;
let (mmdit_config, mut triple, vb) = if which.is_3_5() {
- let sai_repo = {
+ let sai_repo_for_text_encoders = {
+ let name = match which {
+ Which::V3_5Large => "stabilityai/stable-diffusion-3.5-large",
+ Which::V3_5LargeTurbo => "stabilityai/stable-diffusion-3.5-large-turbo",
+
+ // Unfortunately, stabilityai/stable-diffusion-3.5-medium doesn't have the monolithic text encoders that's usually
+ // placed under the text_encoders directory, like the case in stabilityai/stable-diffusion-3.5-large and -large-turbo.
+ // To make things worse, it currently only has partitioned model.fp16-00001-of-00002.safetensors and model.fp16-00002-of-00002.safetensors
+ // under the text_encoder_3 directory, for the t5xxl_fp16.safetensors model. This means that we need to merge the two partitions
+ // to get the monolithic text encoders. This is not a trivial task.
+ // Since the situation can change, we do not want to spend efforts to handle the uniqueness of stabilityai/stable-diffusion-3.5-medium,
+ // which involves different paths and merging the two partitions files for t5xxl_fp16.safetensors.
+ // so for now, we'll use the text encoder models from the stabilityai/stable-diffusion-3.5-large repository.
+ // TODO: Change to "stabilityai/stable-diffusion-3.5-medium" once the maintainers of the repository add back the monolithic text encoders.
+ Which::V3_5Medium => "stabilityai/stable-diffusion-3.5-large",
+ Which::V3Medium => unreachable!(),
+ };
+ api.repo(hf_hub::Repo::model(name.to_string()))
+ };
+ let sai_repo_for_mmdit = {
let name = match which {
Which::V3_5Large => "stabilityai/stable-diffusion-3.5-large",
Which::V3_5LargeTurbo => "stabilityai/stable-diffusion-3.5-large-turbo",
+ Which::V3_5Medium => "stabilityai/stable-diffusion-3.5-medium",
Which::V3Medium => unreachable!(),
};
api.repo(hf_hub::Repo::model(name.to_string()))
};
- let clip_g_file = sai_repo.get("text_encoders/clip_g.safetensors")?;
- let clip_l_file = sai_repo.get("text_encoders/clip_l.safetensors")?;
- let t5xxl_file = sai_repo.get("text_encoders/t5xxl_fp16.safetensors")?;
+ let clip_g_file = sai_repo_for_text_encoders.get("text_encoders/clip_g.safetensors")?;
+ let clip_l_file = sai_repo_for_text_encoders.get("text_encoders/clip_l.safetensors")?;
+ let t5xxl_file = sai_repo_for_text_encoders.get("text_encoders/t5xxl_fp16.safetensors")?;
let model_file = {
let model_file = match which {
Which::V3_5Large => "sd3.5_large.safetensors",
Which::V3_5LargeTurbo => "sd3.5_large_turbo.safetensors",
+ Which::V3_5Medium => "sd3.5_medium.safetensors",
Which::V3Medium => unreachable!(),
};
- sai_repo.get(model_file)?
+ sai_repo_for_mmdit.get(model_file)?
};
let triple = StableDiffusion3TripleClipWithTokenizer::new_split(
&clip_g_file,
@@ -157,7 +182,12 @@ fn main() -> Result<()> {
let vb = unsafe {
candle_nn::VarBuilder::from_mmaped_safetensors(&[model_file], DType::F16, &device)?
};
- (MMDiTConfig::sd3_5_large(), triple, vb)
+ match which {
+ Which::V3_5Large => (MMDiTConfig::sd3_5_large(), triple, vb),
+ Which::V3_5LargeTurbo => (MMDiTConfig::sd3_5_large(), triple, vb),
+ Which::V3_5Medium => (MMDiTConfig::sd3_5_medium(), triple, vb),
+ Which::V3Medium => unreachable!(),
+ }
} else {
let sai_repo = {
let name = "stabilityai/stable-diffusion-3-medium";