summaryrefslogtreecommitdiff
path: root/candle-examples
diff options
context:
space:
mode:
Diffstat (limited to 'candle-examples')
-rw-r--r--candle-examples/Cargo.toml2
-rw-r--r--candle-examples/examples/reinforcement-learning/ddpg.rs8
-rw-r--r--candle-examples/examples/reinforcement-learning/gym_env.rs1
-rw-r--r--candle-examples/examples/reinforcement-learning/main.rs2
-rw-r--r--candle-examples/examples/reinforcement-learning/policy_gradient.rs2
-rw-r--r--candle-examples/examples/reinforcement-learning/vec_gym_env.rs5
6 files changed, 10 insertions, 10 deletions
diff --git a/candle-examples/Cargo.toml b/candle-examples/Cargo.toml
index 0c1219d7..df85302d 100644
--- a/candle-examples/Cargo.toml
+++ b/candle-examples/Cargo.toml
@@ -27,7 +27,7 @@ intel-mkl-src = { workspace = true, optional = true }
num-traits = { workspace = true }
palette = { version = "0.7.6", optional = true }
enterpolation = { version = "0.2.1", optional = true}
-pyo3 = { version = "0.22.0", features = ["auto-initialize"], optional = true }
+pyo3 = { version = "0.22.0", features = ["auto-initialize", "abi3-py311"], optional = true }
rayon = { workspace = true }
rubato = { version = "0.15.0", optional = true }
safetensors = { workspace = true }
diff --git a/candle-examples/examples/reinforcement-learning/ddpg.rs b/candle-examples/examples/reinforcement-learning/ddpg.rs
index 5309eaf6..389caac1 100644
--- a/candle-examples/examples/reinforcement-learning/ddpg.rs
+++ b/candle-examples/examples/reinforcement-learning/ddpg.rs
@@ -1,5 +1,4 @@
use std::collections::VecDeque;
-use std::fmt::Display;
use candle::{DType, Device, Error, Module, Result, Tensor, Var};
use candle_nn::{
@@ -167,6 +166,7 @@ fn track(
Ok(())
}
+#[allow(unused)]
struct Actor<'a> {
varmap: VarMap,
vb: VarBuilder<'a>,
@@ -211,7 +211,7 @@ impl Actor<'_> {
let target_network = make_network("target-actor")?;
// this sets the two networks to be equal to each other using tau = 1.0
- track(&mut varmap, &vb, "target-actor", "actor", &dims, 1.0);
+ track(&mut varmap, &vb, "target-actor", "actor", &dims, 1.0)?;
Ok(Self {
varmap,
@@ -244,6 +244,7 @@ impl Actor<'_> {
}
}
+#[allow(unused)]
struct Critic<'a> {
varmap: VarMap,
vb: VarBuilder<'a>,
@@ -287,7 +288,7 @@ impl Critic<'_> {
let target_network = make_network("target-critic")?;
// this sets the two networks to be equal to each other using tau = 1.0
- track(&mut varmap, &vb, "target-critic", "critic", &dims, 1.0);
+ track(&mut varmap, &vb, "target-critic", "critic", &dims, 1.0)?;
Ok(Self {
varmap,
@@ -322,6 +323,7 @@ impl Critic<'_> {
}
}
+#[allow(unused)]
#[allow(clippy::upper_case_acronyms)]
pub struct DDPG<'a> {
actor: Actor<'a>,
diff --git a/candle-examples/examples/reinforcement-learning/gym_env.rs b/candle-examples/examples/reinforcement-learning/gym_env.rs
index a2b6652f..05518b1b 100644
--- a/candle-examples/examples/reinforcement-learning/gym_env.rs
+++ b/candle-examples/examples/reinforcement-learning/gym_env.rs
@@ -1,4 +1,3 @@
-#![allow(unused)]
//! Wrappers around the Python API of Gymnasium (the new version of OpenAI gym)
use candle::{Device, Result, Tensor};
use pyo3::prelude::*;
diff --git a/candle-examples/examples/reinforcement-learning/main.rs b/candle-examples/examples/reinforcement-learning/main.rs
index 1a25cd93..34115b22 100644
--- a/candle-examples/examples/reinforcement-learning/main.rs
+++ b/candle-examples/examples/reinforcement-learning/main.rs
@@ -1,5 +1,3 @@
-#![allow(unused)]
-
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
diff --git a/candle-examples/examples/reinforcement-learning/policy_gradient.rs b/candle-examples/examples/reinforcement-learning/policy_gradient.rs
index 6c355fe6..3ae2617d 100644
--- a/candle-examples/examples/reinforcement-learning/policy_gradient.rs
+++ b/candle-examples/examples/reinforcement-learning/policy_gradient.rs
@@ -14,7 +14,7 @@ fn new_model(
) -> Result<(impl Module, VarMap)> {
let input_size = input_shape.iter().product();
- let mut varmap = VarMap::new();
+ let varmap = VarMap::new();
let var_builder = VarBuilder::from_varmap(&varmap, dtype, device);
let model = seq()
diff --git a/candle-examples/examples/reinforcement-learning/vec_gym_env.rs b/candle-examples/examples/reinforcement-learning/vec_gym_env.rs
index e382ad76..a985d9e9 100644
--- a/candle-examples/examples/reinforcement-learning/vec_gym_env.rs
+++ b/candle-examples/examples/reinforcement-learning/vec_gym_env.rs
@@ -1,9 +1,8 @@
-#![allow(unused)]
//! Vectorized version of the gym environment.
use candle::{DType, Device, Result, Tensor};
use pyo3::prelude::*;
-use pyo3::types::PyDict;
+#[allow(unused)]
#[derive(Debug)]
pub struct Step {
pub obs: Tensor,
@@ -11,6 +10,7 @@ pub struct Step {
pub is_done: Tensor,
}
+#[allow(unused)]
pub struct VecGymEnv {
env: PyObject,
action_space: usize,
@@ -21,6 +21,7 @@ fn w(res: PyErr) -> candle::Error {
candle::Error::wrap(res)
}
+#[allow(unused)]
impl VecGymEnv {
pub fn new(name: &str, img_dir: Option<&str>, nprocesses: usize) -> Result<VecGymEnv> {
Python::with_gil(|py| {