Skip to content

Commit

Permalink
Merge branch 'develop'
Browse files Browse the repository at this point in the history
  • Loading branch information
lucidfrontier45 committed Feb 13, 2023
2 parents 26822bf + 86b520c commit a518b96
Show file tree
Hide file tree
Showing 10 changed files with 172 additions and 96 deletions.
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ categories = ["algorithms"]
repository = "https://github.com/lucidfrontier45/localsearch"
license-file = "LICENSE"
readme = "README.md"
version = "0.7.0"
version = "0.8.0"
edition = "2021"

# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
Expand Down
100 changes: 98 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,104 @@ All of the algorithms are parallelized with Rayon.
2. Tabu Search.
3. Simulated Annealing
4. Epsilon Greedy Search, a variant of Hill Climbing which accepts the trial state with a constant probabilith even if the score of the trial state is worse than the previous one.
5. Logistic Annealing, a variabt of Simulated Annealing which uses relative score diff to calculate transition probability.
5. Relative Annealing, a variant of Simulated Annealing which uses relative score diff to calculate transition probability.

# How to use

You need to implement your own model that implements `OptModel` trait. See examples for detail.
You need to implement your own model that implements `OptModel` trait. Actual optimization is handled by each algorithm functions. Here is a simple example to optimize a quadratic function with Hill Climbing algorithm.

```rust
use std::error::Error;

use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle};
use localsearch::{optim::HillClimbingOptimizer, OptModel, OptProgress};
use ordered_float::NotNan;
use rand::{self, distributions::Uniform, prelude::Distribution};

#[derive(Clone)]
struct QuadraticModel {
k: usize,
centers: Vec<f64>,
dist: Uniform<f64>,
}

impl QuadraticModel {
fn new(k: usize, centers: Vec<f64>, value_range: (f64, f64)) -> Self {
let (low, high) = value_range;
let dist = Uniform::new(low, high);
Self { k, centers, dist }
}
}

type StateType = Vec<f64>;
type ScoreType = NotNan<f64>;

impl OptModel for QuadraticModel {
type StateType = StateType;
type TransitionType = ();
type ScoreType = ScoreType;
fn generate_random_state<R: rand::Rng>(
&self,
rng: &mut R,
) -> Result<Self::StateType, Box<dyn Error>> {
let state = self.dist.sample_iter(rng).take(self.k).collect::<Vec<_>>();
Ok(state)
}

fn generate_trial_state<R: rand::Rng>(
&self,
current_state: &Self::StateType,
rng: &mut R,
_current_score: Option<NotNan<f64>>,
) -> (Self::StateType, Self::TransitionType, NotNan<f64>) {
let k = rng.gen_range(0..self.k);
let v = self.dist.sample(rng);
let mut new_state = current_state.clone();
new_state[k] = v;
let score = self.evaluate_state(&new_state);
(new_state, (), score)
}

fn evaluate_state(&self, state: &Self::StateType) -> NotNan<f64> {
let score = (0..self.k)
.into_iter()
.map(|i| (state[i] - self.centers[i]).powf(2.0))
.sum();
NotNan::new(score).unwrap()
}
}

fn create_pbar(n_iter: u64) -> ProgressBar {
let pb = ProgressBar::new(n_iter);
pb.set_style(
ProgressStyle::default_bar()
.template(
"{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {pos}/{len} (eta={eta}) {msg} ",
).unwrap()
.progress_chars("#>-")
);
pb.set_draw_target(ProgressDrawTarget::stderr_with_hz(10));
pb
}

fn main() {
let model = QuadraticModel::new(3, vec![2.0, 0.0, -3.5], (-10.0, 10.0));

println!("running Hill Climbing optimizer");
let n_iter = 10000;
let patiance = 1000;
let n_trials = 50;
let opt = HillClimbingOptimizer::new(patiance, n_trials);
let pb = create_pbar(n_iter as u64);
let callback = |op: OptProgress<StateType, ScoreType>| {
pb.set_message(format!("best score {:e}", op.score.into_inner()));
pb.set_position(op.iter as u64);
};

let res = opt.optimize(&model, None, n_iter, Some(&callback));
pb.finish();
dbg!(res);
}
```

Further details can be found at API document, example and test codes.
56 changes: 7 additions & 49 deletions examples/quadratic_model.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,7 @@
use std::error::Error;

use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle};
use localsearch::{
optim::{HillClimbingOptimizer, TabuList, TabuSearchOptimizer},
utils::RingBuffer,
OptModel, OptProgress,
};
use localsearch::{optim::HillClimbingOptimizer, OptModel, OptProgress};
use ordered_float::NotNan;
use rand::{self, distributions::Uniform, prelude::Distribution};

Expand All @@ -25,36 +21,35 @@ impl QuadraticModel {
}

type StateType = Vec<f64>;
type TransitionType = (usize, f64, f64);
type ScoreType = NotNan<f64>;

impl OptModel for QuadraticModel {
type StateType = StateType;
type TransitionType = TransitionType;
type TransitionType = ();
type ScoreType = ScoreType;
fn generate_random_state<R: rand::Rng>(
&self,
rng: &mut R,
) -> Result<StateType, Box<dyn Error>> {
) -> Result<Self::StateType, Box<dyn Error>> {
let state = self.dist.sample_iter(rng).take(self.k).collect::<Vec<_>>();
Ok(state)
}

fn generate_trial_state<R: rand::Rng>(
&self,
current_state: &StateType,
current_state: &Self::StateType,
rng: &mut R,
_current_score: Option<NotNan<f64>>,
) -> (StateType, TransitionType, NotNan<f64>) {
) -> (Self::StateType, Self::TransitionType, NotNan<f64>) {
let k = rng.gen_range(0..self.k);
let v = self.dist.sample(rng);
let mut new_state = current_state.clone();
new_state[k] = v;
let score = self.evaluate_state(&new_state);
(new_state, (k, current_state[k], v), score)
(new_state, (), score)
}

fn evaluate_state(&self, state: &StateType) -> NotNan<f64> {
fn evaluate_state(&self, state: &Self::StateType) -> NotNan<f64> {
let score = (0..self.k)
.into_iter()
.map(|i| (state[i] - self.centers[i]).powf(2.0))
Expand All @@ -63,33 +58,6 @@ impl OptModel for QuadraticModel {
}
}

#[derive(Debug)]
struct DequeTabuList {
buff: RingBuffer<TransitionType>,
}

impl DequeTabuList {
fn new(size: usize) -> Self {
let buff = RingBuffer::new(size);
Self { buff }
}
}

impl TabuList for DequeTabuList {
type Item = (StateType, TransitionType);

fn contains(&self, item: &Self::Item) -> bool {
let (k1, _, x) = item.1;
self.buff
.iter()
.any(|&(k2, y, _)| (k1 == k2) && (x - y).abs() < 0.0001)
}

fn append(&mut self, item: Self::Item) {
self.buff.append(item.1);
}
}

fn create_pbar(n_iter: u64) -> ProgressBar {
let pb = ProgressBar::new(n_iter);
pb.set_style(
Expand Down Expand Up @@ -120,14 +88,4 @@ fn main() {
let res = opt.optimize(&model, None, n_iter, Some(&callback));
pb.finish();
dbg!(res);

pb.finish_and_clear();
pb.reset();
println!("running Tabu Search optimizer");
let opt = TabuSearchOptimizer::new(patiance, n_trials, 20);
let tabu_list = DequeTabuList::new(2);

let res = opt.optimize(&model, None, n_iter, tabu_list, Some(&callback));
pb.finish();
dbg!((res.0, res.1));
}
10 changes: 6 additions & 4 deletions examples/tsp_model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ use std::{
use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle};
use localsearch::{
optim::{
EpsilonGreedyOptimizer, HillClimbingOptimizer, LogisticAnnealingOptimizer,
SimulatedAnnealingOptimizer, TabuList, TabuSearchOptimizer,
relative_transition_score, EpsilonGreedyOptimizer, HillClimbingOptimizer,
RelativeAnnealingOptimizer, SimulatedAnnealingOptimizer, TabuList, TabuSearchOptimizer,
},
utils::RingBuffer,
OptModel, OptProgress,
Expand Down Expand Up @@ -320,8 +320,10 @@ fn main() {
pb.finish_and_clear();
pb.reset();

println!("run logistic annealing");
let optimizer = LogisticAnnealingOptimizer::new(patience, 200, 1e3);
println!("run relative annealing");
let optimizer = RelativeAnnealingOptimizer::new(patience, 200, |ds: f64| {
relative_transition_score::exp(ds, 1e1)
});
let (final_state, final_score) =
optimizer.optimize(&tsp_model, initial_state, n_iter, Some(&callback));
println!(
Expand Down
2 changes: 1 addition & 1 deletion src/callback.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
use std::{cell::RefCell, rc::Rc};

/// OptProgress expresses Optimization Progress that is passed to a OptCallbackFn
/// OptProgress expresses Optimization Progress that is passed to a [`OptCallbackFn`]
#[derive(Debug, Clone)]
pub struct OptProgress<S, SC> {
/// current iteration step
Expand Down
4 changes: 2 additions & 2 deletions src/optim.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,12 @@
mod epsilon_greedy;
mod hill_climbing;
mod logistic_annealing;
mod relative_annealing;
mod simulated_annealing;
mod tabu_search;

pub use epsilon_greedy::EpsilonGreedyOptimizer;
pub use hill_climbing::HillClimbingOptimizer;
pub use logistic_annealing::LogisticAnnealingOptimizer;
pub use relative_annealing::{relative_transition_score, RelativeAnnealingOptimizer};
pub use simulated_annealing::SimulatedAnnealingOptimizer;
pub use tabu_search::{TabuList, TabuSearchOptimizer};
82 changes: 51 additions & 31 deletions src/optim/logistic_annealing.rs → src/optim/relative_annealing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,45 +7,49 @@ use rayon::prelude::*;
use crate::callback::{OptCallbackFn, OptProgress};
use crate::OptModel;

fn calc_transition_score(trial_score: f64, current_score: f64, w: f64) -> f64 {
let ds = (trial_score - current_score) / current_score;

if ds < 0.0 {
1.0
} else {
let z = ds * w;
2.0 / (1.0 + z.exp())
/// pre-defined functions to convert relative difference of scores to probability
pub mod relative_transition_score {
/// exp(-w * d)
pub fn exp(d: f64, w: f64) -> f64 {
(-w * d).exp()
}

/// 2.0 / (1.0 + exp(w * d))
pub fn logistic(d: f64, w: f64) -> f64 {
2.0 / (1.0 + (w * d).exp())
}
}

fn relative_difference(trial: f64, current: f64) -> f64 {
(trial - current) / current
}

/// Optimizer that implements logistic annealing algorithm
/// In this model, wether accept the trial state or not is decided by the following criterion
/// Optimizer that implements relative annealing algorithm
/// In this model, unlike simulated annealing, wether accept the trial state or not is calculated based on relative score difference
/// Given a functin f that converts a float number to probability, the actual procedure is as follows
///
/// d <- (trial_score / current_score) / current_score
/// if d < 0:
/// accept
/// else:
/// p <- sigmoid(-w * d) * 2.0
/// accept if p > rand(0, 1)
/// 1. d <- (trial_score - current_score) / current_score
/// 2. p <- f(d)
/// 3. accept if p > rand(0, 1)
#[derive(Clone, Copy)]
pub struct LogisticAnnealingOptimizer {
pub struct RelativeAnnealingOptimizer<FS: Fn(f64) -> f64> {
patience: usize,
n_trials: usize,
w: f64,
score_func: FS,
}

impl LogisticAnnealingOptimizer {
/// Constructor of LogisticAnnealingOptimizer
impl<FS: Fn(f64) -> f64> RelativeAnnealingOptimizer<FS> {
/// Constructor of RelativeAnnealingOptimizer
///
/// - `patience` : the optimizer will give up
/// if there is no improvement of the score after this number of iterations
/// - `n_trials` : number of trial states to generate and evaluate at each iteration
/// - `w` : positive weight parameter to be multiplied to relative difference.
pub fn new(patience: usize, n_trials: usize, w: f64) -> Self {
/// - `score_func` : score function to calculate transition probability from relative difference.
pub fn new(patience: usize, n_trials: usize, score_func: FS) -> Self {
Self {
patience,
n_trials,
w,
score_func,
}
}

Expand Down Expand Up @@ -90,8 +94,8 @@ impl LogisticAnnealingOptimizer {
.min_by_key(|(_, score)| *score)
.unwrap();

let p =
calc_transition_score(trial_score.into_inner(), current_score.into_inner(), self.w);
let ds = relative_difference(trial_score.into(), current_score.into());
let p = (self.score_func)(ds);
let r: f64 = rng.gen();

if p > r {
Expand Down Expand Up @@ -125,17 +129,33 @@ impl LogisticAnnealingOptimizer {

#[cfg(test)]
mod test {
use approx::assert_abs_diff_eq;
use crate::optim::relative_annealing::relative_difference;

use super::relative_transition_score;

#[test]
fn test_exp_transition_score() {
let w = 1e1;
let f = |ds| relative_transition_score::exp(ds, w);

let p = f(relative_difference(0.9, 1.0));
assert!(p >= 1.0);

use super::calc_transition_score;
let p1 = f(relative_difference(1.1, 1.0));
let p2 = f(relative_difference(1.2, 1.0));
assert!(p1 > p2);
}

#[test]
fn test_calc_transition_score() {
fn test_logistic_transition_score() {
let w = 1e1;
assert_abs_diff_eq!(calc_transition_score(0.9, 1.0, w), 1.0, epsilon = 0.01);
let f = |ds| relative_transition_score::logistic(ds, w);

let p = f(relative_difference(0.9, 1.0));
assert!(p >= 1.0);

let p1 = calc_transition_score(1.1, 1.0, w);
let p2 = calc_transition_score(1.2, 1.0, w);
let p1 = f(relative_difference(1.1, 1.0));
let p2 = f(relative_difference(1.2, 1.0));
assert!(p1 > p2);
}
}
Loading

0 comments on commit a518b96

Please sign in to comment.