diff options
author | Laurent Mazare <laurent.mazare@gmail.com> | 2024-09-05 22:46:55 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-09-05 23:46:55 +0200 |
commit | e3261216b157a7305c18ccdd766b6e2a41afe483 (patch) | |
tree | edec85bf6faae0c6662e833f74d79ccc2579ef05 /candle-transformers | |
parent | c02b7c327297f25fc9cc60b8f39c9aa32e40ff21 (diff) | |
download | candle-e3261216b157a7305c18ccdd766b6e2a41afe483.tar.gz candle-e3261216b157a7305c18ccdd766b6e2a41afe483.tar.bz2 candle-e3261216b157a7305c18ccdd766b6e2a41afe483.zip |
Clippy fixes for 1.81.0. (#2461)
* Clippy fixes for 1.81.0.
* Another fix.
Diffstat (limited to 'candle-transformers')
-rw-r--r-- | candle-transformers/src/models/bert.rs | 6 | ||||
-rw-r--r-- | candle-transformers/src/models/bigcode.rs | 2 | ||||
-rw-r--r-- | candle-transformers/src/models/distilbert.rs | 6 | ||||
-rw-r--r-- | candle-transformers/src/models/falcon.rs | 2 | ||||
-rw-r--r-- | candle-transformers/src/models/jina_bert.rs | 2 | ||||
-rw-r--r-- | candle-transformers/src/models/llama.rs | 2 | ||||
-rw-r--r-- | candle-transformers/src/models/llama2_c.rs | 2 | ||||
-rw-r--r-- | candle-transformers/src/models/moondream.rs | 2 | ||||
-rw-r--r-- | candle-transformers/src/models/segformer.rs | 8 | ||||
-rw-r--r-- | candle-transformers/src/models/t5.rs | 2 | ||||
-rw-r--r-- | candle-transformers/src/models/whisper/model.rs | 4 |
11 files changed, 19 insertions, 19 deletions
diff --git a/candle-transformers/src/models/bert.rs b/candle-transformers/src/models/bert.rs index 2262aa1a..354048de 100644 --- a/candle-transformers/src/models/bert.rs +++ b/candle-transformers/src/models/bert.rs @@ -419,7 +419,7 @@ struct BertEncoder { impl BertEncoder { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let layers = (0..config.num_hidden_layers) - .map(|index| BertLayer::load(vb.pp(&format!("layer.{index}")), config)) + .map(|index| BertLayer::load(vb.pp(format!("layer.{index}")), config)) .collect::<Result<Vec<_>>>()?; let span = tracing::span!(tracing::Level::TRACE, "encoder"); Ok(BertEncoder { layers, span }) @@ -454,8 +454,8 @@ impl BertModel { (Err(err), _) | (_, Err(err)) => { if let Some(model_type) = &config.model_type { if let (Ok(embeddings), Ok(encoder)) = ( - BertEmbeddings::load(vb.pp(&format!("{model_type}.embeddings")), config), - BertEncoder::load(vb.pp(&format!("{model_type}.encoder")), config), + BertEmbeddings::load(vb.pp(format!("{model_type}.embeddings")), config), + BertEncoder::load(vb.pp(format!("{model_type}.encoder")), config), ) { (embeddings, encoder) } else { diff --git a/candle-transformers/src/models/bigcode.rs b/candle-transformers/src/models/bigcode.rs index 2e1bbd37..f6b4a4ef 100644 --- a/candle-transformers/src/models/bigcode.rs +++ b/candle-transformers/src/models/bigcode.rs @@ -298,7 +298,7 @@ impl GPTBigCode { let wte = embedding(cfg.vocab_size, hidden_size, vb_t.pp("wte"))?; let wpe = embedding(cfg.max_position_embeddings, hidden_size, vb_t.pp("wpe"))?; let blocks = (0..cfg.num_hidden_layers) - .map(|i| Block::load(vb_t.pp(&format!("h.{i}")), &cfg)) + .map(|i| Block::load(vb_t.pp(format!("h.{i}")), &cfg)) .collect::<Result<Vec<_>>>()?; let ln_f = layer_norm(hidden_size, cfg.layer_norm_epsilon, vb_t.pp("ln_f"))?; let lm_head = linear(hidden_size, cfg.vocab_size, false, vb_t.pp("wte"))?; diff --git a/candle-transformers/src/models/distilbert.rs b/candle-transformers/src/models/distilbert.rs index ea074c97..f899d772 100644 --- a/candle-transformers/src/models/distilbert.rs +++ b/candle-transformers/src/models/distilbert.rs @@ -275,7 +275,7 @@ struct Transformer { impl Transformer { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let layers = (0..config.n_layers) - .map(|index| TransformerBlock::load(vb.pp(&format!("layer.{index}")), config)) + .map(|index| TransformerBlock::load(vb.pp(format!("layer.{index}")), config)) .collect::<Result<Vec<_>>>()?; let span = tracing::span!(tracing::Level::TRACE, "encoder"); Ok(Transformer { layers, span }) @@ -311,8 +311,8 @@ impl DistilBertModel { (Err(err), _) | (_, Err(err)) => { if let Some(model_type) = &config.model_type { if let (Ok(embeddings), Ok(encoder)) = ( - Embeddings::load(vb.pp(&format!("{model_type}.embeddings")), config), - Transformer::load(vb.pp(&format!("{model_type}.transformer")), config), + Embeddings::load(vb.pp(format!("{model_type}.embeddings")), config), + Transformer::load(vb.pp(format!("{model_type}.transformer")), config), ) { (embeddings, encoder) } else { diff --git a/candle-transformers/src/models/falcon.rs b/candle-transformers/src/models/falcon.rs index 3a3575aa..50ec66f3 100644 --- a/candle-transformers/src/models/falcon.rs +++ b/candle-transformers/src/models/falcon.rs @@ -448,7 +448,7 @@ impl Falcon { vb.pp("transformer.word_embeddings"), )?; let blocks = (0..cfg.num_hidden_layers) - .map(|i| FalconDecoderLayer::load(vb.pp(&format!("transformer.h.{i}")), &cfg)) + .map(|i| FalconDecoderLayer::load(vb.pp(format!("transformer.h.{i}")), &cfg)) .collect::<Result<Vec<_>>>()?; let ln_f = layer_norm( cfg.hidden_size, diff --git a/candle-transformers/src/models/jina_bert.rs b/candle-transformers/src/models/jina_bert.rs index a9ae37e9..1f0fae1e 100644 --- a/candle-transformers/src/models/jina_bert.rs +++ b/candle-transformers/src/models/jina_bert.rs @@ -344,7 +344,7 @@ impl BertEncoder { candle::bail!("only alibi is supported as a position-embedding-type") } let layers = (0..cfg.num_hidden_layers) - .map(|index| BertLayer::new(vb.pp(&format!("layer.{index}")), cfg)) + .map(|index| BertLayer::new(vb.pp(format!("layer.{index}")), cfg)) .collect::<Result<Vec<_>>>()?; let span = tracing::span!(tracing::Level::TRACE, "encoder"); let alibi = build_alibi_bias(cfg)?.to_device(vb.device())?; diff --git a/candle-transformers/src/models/llama.rs b/candle-transformers/src/models/llama.rs index 3681472b..e96bb855 100644 --- a/candle-transformers/src/models/llama.rs +++ b/candle-transformers/src/models/llama.rs @@ -507,7 +507,7 @@ impl Llama { let lm_head = linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; let ln_f = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("model.norm"))?; let blocks: Vec<_> = (0..cfg.num_hidden_layers) - .map(|i| Block::load(vb.pp(&format!("model.layers.{i}")), cfg).unwrap()) + .map(|i| Block::load(vb.pp(format!("model.layers.{i}")), cfg).unwrap()) .collect(); Ok(Self { diff --git a/candle-transformers/src/models/llama2_c.rs b/candle-transformers/src/models/llama2_c.rs index bba8b666..923a2706 100644 --- a/candle-transformers/src/models/llama2_c.rs +++ b/candle-transformers/src/models/llama2_c.rs @@ -354,7 +354,7 @@ impl Llama { let lm_head = linear(cfg.dim, cfg.vocab_size, vb.pp("lm_head"))?; let ln_f = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("model.norm"))?; let blocks: Vec<_> = (0..cfg.n_layers) - .map(|i| Block::load(vb.pp(&format!("model.layers.{i}")), &cfg).unwrap()) + .map(|i| Block::load(vb.pp(format!("model.layers.{i}")), &cfg).unwrap()) .collect(); Ok(Self { wte, diff --git a/candle-transformers/src/models/moondream.rs b/candle-transformers/src/models/moondream.rs index e63fcf6e..cde59d43 100644 --- a/candle-transformers/src/models/moondream.rs +++ b/candle-transformers/src/models/moondream.rs @@ -167,7 +167,7 @@ impl VisionTransformer { let blocks = (0..cfg.num_blocks) .map(|i| { VitBlock::new( - vb.pp(&format!("blocks.{}", i)), + vb.pp(format!("blocks.{}", i)), cfg.embed_dim, cfg.num_heads, cfg, diff --git a/candle-transformers/src/models/segformer.rs b/candle-transformers/src/models/segformer.rs index 3727e004..260ceb3a 100644 --- a/candle-transformers/src/models/segformer.rs +++ b/candle-transformers/src/models/segformer.rs @@ -404,7 +404,7 @@ impl SegformerEncoder { stride, num_channels, hidden_size, - vb.pp(&format!("patch_embeddings.{}", i)), + vb.pp(format!("patch_embeddings.{}", i)), )?); let mut layers = Vec::with_capacity(config.depths[i]); for j in 0..config.depths[i] { @@ -417,14 +417,14 @@ impl SegformerEncoder { num_attention_heads, sequence_reduction_ratio, mlp_ratio, - vb.pp(&format!("block.{}.{}", i, j)), + vb.pp(format!("block.{}.{}", i, j)), )?); } blocks.push(layers); layer_norms.push(layer_norm( hidden_size, config.layer_norm_eps, - vb.pp(&format!("layer_norm.{}", i)), + vb.pp(format!("layer_norm.{}", i)), )?); } Ok(Self { @@ -507,7 +507,7 @@ impl SegformerDecodeHead { linear_c.push(SegformerMLP::new( config, hidden_size, - vb.pp(&format!("linear_c.{}", i)), + vb.pp(format!("linear_c.{}", i)), )?); } let linear_fuse = conv2d_no_bias( diff --git a/candle-transformers/src/models/t5.rs b/candle-transformers/src/models/t5.rs index 21517d64..84e072a2 100644 --- a/candle-transformers/src/models/t5.rs +++ b/candle-transformers/src/models/t5.rs @@ -659,7 +659,7 @@ struct T5Stack { impl T5Stack { fn load(decoder: bool, vb: VarBuilder, shared: &Arc<Embedding>, cfg: &Config) -> Result<Self> { let block = (0..cfg.num_layers) - .map(|i| T5Block::load(i == 0, decoder, vb.pp(&format!("block.{i}")), cfg)) + .map(|i| T5Block::load(i == 0, decoder, vb.pp(format!("block.{i}")), cfg)) .collect::<Result<Vec<_>>>()?; let final_layer_norm = T5LayerNorm::load( cfg.d_model, diff --git a/candle-transformers/src/models/whisper/model.rs b/candle-transformers/src/models/whisper/model.rs index 593ed373..dc50e0db 100644 --- a/candle-transformers/src/models/whisper/model.rs +++ b/candle-transformers/src/models/whisper/model.rs @@ -260,7 +260,7 @@ impl AudioEncoder { let positional_embedding = sinusoids(n_ctx, n_state, vb.device())?; let blocks = (0..cfg.encoder_layers) .map(|i| { - ResidualAttentionBlock::load(n_state, n_head, false, vb.pp(&format!("layers.{i}"))) + ResidualAttentionBlock::load(n_state, n_head, false, vb.pp(format!("layers.{i}"))) }) .collect::<Result<Vec<_>>>()?; let ln_post = layer_norm(n_state, vb.pp("layer_norm"))?; @@ -321,7 +321,7 @@ impl TextDecoder { let positional_embedding = vb.get((n_ctx, n_state), "embed_positions.weight")?; let blocks = (0..cfg.decoder_layers) .map(|i| { - ResidualAttentionBlock::load(n_state, n_head, true, vb.pp(&format!("layers.{i}"))) + ResidualAttentionBlock::load(n_state, n_head, true, vb.pp(format!("layers.{i}"))) }) .collect::<Result<Vec<_>>>()?; let ln = layer_norm(n_state, vb.pp("layer_norm"))?; |