[package] name = "candle-examples" version = "0.1.0" edition = "2021" description = "Examples for the candle ML framework." repository = "https://github.com/LaurentMazare/candle" keywords = ["blas", "tensor", "machine-learning"] categories = ["science"] license = "MIT/Apache-2.0" readme = "README.md" [dependencies] candle = { path = "../candle-core" } candle-nn = { path = "../candle-nn" } candle-transformers = { path = "../candle-transformers" } candle-flash-attn = { path = "../candle-flash-attn", optional = true } safetensors = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } num-traits = { workspace = true } intel-mkl-src = { workspace = true, optional = true } cudarc = { workspace = true, optional = true } half = { workspace = true, optional = true } [dev-dependencies] anyhow = { workspace = true } byteorder = { workspace = true } clap = { workspace = true } hf-hub = { workspace = true } memmap2 = { workspace = true } rand = { workspace = true } tokenizers = { workspace = true, features = ["onig"] } tracing = { workspace = true } tracing-chrome = { workspace = true } tracing-subscriber = { workspace = true } wav = { workspace = true } [build-dependencies] anyhow = { workspace = true } [features] default = [] cuda = ["candle/cuda", "candle-nn/cuda", "candle-transformers/cuda"] flash-attn = ["cuda", "dep:candle-flash-attn"] mkl = ["dep:intel-mkl-src", "candle/mkl", "candle-nn/mkl", "candle-transformers/mkl"] nccl = ["cuda", "cudarc/nccl", "dep:half"] [[example]] name = "llama_multiprocess" required-features = ["cuda", "nccl", "flash-attn"]