Skip to content

Commit

Permalink
Merge branch 'release/0.11.0'
Browse files Browse the repository at this point in the history
  • Loading branch information
lucidfrontier45 committed Nov 3, 2023
2 parents 0679a6a + 7589fcb commit 441536d
Show file tree
Hide file tree
Showing 22 changed files with 328 additions and 221 deletions.
17 changes: 9 additions & 8 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,21 +7,22 @@ categories = ["algorithms"]
repository = "https://github.com/lucidfrontier45/localsearch"
license-file = "LICENSE"
readme = "README.md"
version = "0.10.0"
version = "0.11.0"
edition = "2021"

# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

[dependencies]

rand = "0.8"
ordered-float = "4"
rayon = "1"
trait-set = "0.3"
rand = "0.8.5"
ordered-float = "4.1.1"
rayon = "1.8.0"
trait-set = "0.3.0"
auto_impl = "1.1.0"

[dev-dependencies]
approx = "0.5"
indicatif = "0.17"
approx = "0.5.1"
indicatif = "0.17.7"

[lib]
name = "localsearch"
doctest = false
7 changes: 5 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,10 @@ You need to implement your own model that implements `OptModel` trait. Actual op
use std::error::Error;

use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle};
use localsearch::{optim::HillClimbingOptimizer, OptModel, OptProgress};
use localsearch::{
optim::{HillClimbingOptimizer, LocalSearchOptimizer},
OptModel, OptProgress,
};
use ordered_float::NotNan;
use rand::{self, distributions::Uniform, prelude::Distribution};

Expand Down Expand Up @@ -104,7 +107,7 @@ fn main() {
pb.set_position(op.iter as u64);
};

let res = opt.optimize(&model, None, n_iter, Some(&callback));
let res = opt.optimize(&model, None, n_iter, Some(&callback), ());
pb.finish();
dbg!(res);
}
Expand Down
7 changes: 5 additions & 2 deletions examples/quadratic_model.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
use std::error::Error;

use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle};
use localsearch::{optim::HillClimbingOptimizer, OptModel, OptProgress};
use localsearch::{
optim::{HillClimbingOptimizer, LocalSearchOptimizer},
OptModel, OptProgress,
};
use ordered_float::NotNan;
use rand::{self, distributions::Uniform, prelude::Distribution};

Expand Down Expand Up @@ -85,7 +88,7 @@ fn main() {
pb.set_position(op.iter as u64);
};

let res = opt.optimize(&model, None, n_iter, Some(&callback));
let res = opt.optimize(&model, None, n_iter, Some(&callback), ());
pb.finish();
dbg!(res);
}
33 changes: 21 additions & 12 deletions examples/tsp_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ use std::{
use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle};
use localsearch::{
optim::{
EpsilonGreedyOptimizer, HillClimbingOptimizer, RelativeAnnealingOptimizer,
SimulatedAnnealingOptimizer, TabuList, TabuSearchOptimizer,
EpsilonGreedyOptimizer, HillClimbingOptimizer, LocalSearchOptimizer,
RelativeAnnealingOptimizer, SimulatedAnnealingOptimizer, TabuList, TabuSearchOptimizer,
},
utils::RingBuffer,
OptModel, OptProgress,
Expand Down Expand Up @@ -262,8 +262,13 @@ fn main() {

println!("run hill climbing");
let optimizer = HillClimbingOptimizer::new(1000, 200);
let (final_state, final_score) =
optimizer.optimize(&tsp_model, initial_state.clone(), n_iter, Some(&callback));
let (final_state, final_score, _) = optimizer.optimize(
&tsp_model,
initial_state.clone(),
n_iter,
Some(&callback),
(),
);
println!(
"final score = {}, num of cities {}",
final_score,
Expand All @@ -279,8 +284,8 @@ fn main() {
&tsp_model,
initial_state.clone(),
n_iter,
tabu_list,
Some(&callback),
tabu_list,
);
println!(
"final score = {}, num of cities {}",
Expand All @@ -292,13 +297,12 @@ fn main() {

println!("run annealing");
let optimizer = SimulatedAnnealingOptimizer::new(patience, 200);
let (final_state, final_score) = optimizer.optimize(
let (final_state, final_score, _) = optimizer.optimize(
&tsp_model,
initial_state.clone(),
n_iter,
200.0,
50.0,
Some(&callback),
(200.0, 50.0),
);
println!(
"final score = {}, num of cities {}",
Expand All @@ -310,8 +314,13 @@ fn main() {

println!("run epsilon greedy");
let optimizer = EpsilonGreedyOptimizer::new(patience, 200, 10, 0.3);
let (final_state, final_score) =
optimizer.optimize(&tsp_model, initial_state.clone(), n_iter, Some(&callback));
let (final_state, final_score, _) = optimizer.optimize(
&tsp_model,
initial_state.clone(),
n_iter,
Some(&callback),
(),
);
println!(
"final score = {}, num of cities {}",
final_score,
Expand All @@ -322,8 +331,8 @@ fn main() {

println!("run relative annealing");
let optimizer = RelativeAnnealingOptimizer::new(patience, 200, 10, 1e1);
let (final_state, final_score) =
optimizer.optimize(&tsp_model, initial_state, n_iter, Some(&callback));
let (final_state, final_score, _) =
optimizer.optimize(&tsp_model, initial_state, n_iter, Some(&callback), ());
println!(
"final score = {}, num of cities {}",
final_score,
Expand Down
8 changes: 5 additions & 3 deletions src/callback.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
use std::{cell::RefCell, rc::Rc};

use trait_set::trait_set;

/// OptProgress expresses Optimization Progress that is passed to a [`OptCallbackFn`]
#[derive(Debug, Clone)]
pub struct OptProgress<S, SC> {
Expand All @@ -27,6 +29,7 @@ impl<S, SC: Ord> OptProgress<S, SC> {
}
}

trait_set! {
/// OptCallbackFn is a trait of a callback function for optimization
/// Typical usage is to show progress bar and save current result to the file
///
Expand Down Expand Up @@ -55,6 +58,5 @@ impl<S, SC: Ord> OptProgress<S, SC> {
/// pb.set_position(op.iter as u64);
/// };
/// ```
pub trait OptCallbackFn<S, SC: PartialOrd>: Fn(OptProgress<S, SC>) {}

impl<T: Fn(OptProgress<S, SC>), S, SC: PartialOrd> OptCallbackFn<S, SC> for T {}
pub trait OptCallbackFn<S, SC: PartialOrd> = Fn(OptProgress<S, SC>);
}
32 changes: 3 additions & 29 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,40 +6,14 @@
pub mod optim;
pub mod utils;

use std::error::Error;

mod callback;
pub use callback::{OptCallbackFn, OptProgress};

mod model;
pub use model::OptModel;

/// Crate verison string
pub const VERSION: &str = env!("CARGO_PKG_VERSION");

/// OptModel is a trait that defines requirements to be used with optimization algorithm
pub trait OptModel {
/// Type of the Score
type ScoreType: Ord + Copy + Sync + Send;
/// Type of the State
type StateType: Clone + Sync + Send;
/// Type of the Transition
type TransitionType: Clone + Sync + Send;

/// Randomly generate a state
fn generate_random_state<R: rand::Rng>(
&self,
rng: &mut R,
) -> Result<Self::StateType, Box<dyn Error>>;

/// Generate a new trial state from current state
fn generate_trial_state<R: rand::Rng>(
&self,
current_state: &Self::StateType,
rng: &mut R,
current_score: Option<Self::ScoreType>,
) -> (Self::StateType, Self::TransitionType, Self::ScoreType);

/// Evaluate the given state
fn evaluate_state(&self, state: &Self::StateType) -> Self::ScoreType;
}

#[cfg(test)]
mod tests;
31 changes: 31 additions & 0 deletions src/model.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
use std::error::Error;

use auto_impl::auto_impl;

/// OptModel is a trait that defines requirements to be used with optimization algorithm
#[auto_impl(&, Box, Rc, Arc)]
pub trait OptModel: Sync + Send {
/// Type of the Score
type ScoreType: Ord + Copy + Sync + Send;
/// Type of the State
type StateType: Clone + Sync + Send;
/// Type of the Transition
type TransitionType: Clone + Sync + Send;

/// Randomly generate a state
fn generate_random_state<R: rand::Rng>(
&self,
rng: &mut R,
) -> Result<Self::StateType, Box<dyn Error>>;

/// Generate a new trial state from current state
fn generate_trial_state<R: rand::Rng>(
&self,
current_state: &Self::StateType,
rng: &mut R,
current_score: Option<Self::ScoreType>,
) -> (Self::StateType, Self::TransitionType, Self::ScoreType);

/// Evaluate the given state
fn evaluate_state(&self, state: &Self::StateType) -> Self::ScoreType;
}
3 changes: 3 additions & 0 deletions src/optim.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,16 @@
mod base;
mod epsilon_greedy;
mod generic;
mod hill_climbing;
mod logistic_annealing;
mod relative_annealing;
mod simulated_annealing;
mod tabu_search;

pub use base::{LocalSearchOptimizer, TransitionProbabilityFn};
pub use epsilon_greedy::EpsilonGreedyOptimizer;
pub use generic::GenericLocalSearchOptimizer;
pub use hill_climbing::HillClimbingOptimizer;
pub use logistic_annealing::LogisticAnnealingOptimizer;
pub use relative_annealing::RelativeAnnealingOptimizer;
Expand Down
Loading

0 comments on commit 441536d

Please sign in to comment.