diff --git a/Cargo.toml b/Cargo.toml index 6797df7..b48f78e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ categories = ["algorithms"] repository = "https://github.com/lucidfrontier45/localsearch" license-file = "LICENSE" readme = "README.md" -version = "0.11.0" +version = "0.12.0" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/README.md b/README.md index 753ea23..856acc9 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ All of the algorithms are parallelized with Rayon. 1. Hill Climbing. 2. Tabu Search. 3. Simulated Annealing -4. Epsilon Greedy Search, a variant of Hill Climbing which accepts the trial state with a constant probabilith even if the score of the trial state is worse than the previous one. +4. Epsilon Greedy Search, a variant of Hill Climbing which accepts the trial solution with a constant probabilith even if the score of the trial solution is worse than the previous one. 5. Relative Annealing, a variant of Simulated Annealing which uses relative score diff to calculate transition probability. 6. Logistic Annealing, a variant of Relative Annealing which uses logistic function instead of simple exponential. @@ -17,7 +17,7 @@ All of the algorithms are parallelized with Rayon. You need to implement your own model that implements `OptModel` trait. Actual optimization is handled by each algorithm functions. Here is a simple example to optimize a quadratic function with Hill Climbing algorithm. ```rust -use std::error::Error; +use std::{error::Error, time::Duration}; use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle}; use localsearch::{ @@ -42,39 +42,39 @@ impl QuadraticModel { } } -type StateType = Vec; +type SolutionType = Vec; type ScoreType = NotNan; impl OptModel for QuadraticModel { - type StateType = StateType; + type SolutionType = SolutionType; type TransitionType = (); type ScoreType = ScoreType; - fn generate_random_state( + fn generate_random_solution( &self, rng: &mut R, - ) -> Result> { - let state = self.dist.sample_iter(rng).take(self.k).collect::>(); - Ok(state) + ) -> Result> { + let solution = self.dist.sample_iter(rng).take(self.k).collect::>(); + Ok(solution) } - fn generate_trial_state( + fn generate_trial_solution( &self, - current_state: &Self::StateType, + current_solution: &Self::SolutionType, rng: &mut R, _current_score: Option>, - ) -> (Self::StateType, Self::TransitionType, NotNan) { + ) -> (Self::SolutionType, Self::TransitionType, NotNan) { let k = rng.gen_range(0..self.k); let v = self.dist.sample(rng); - let mut new_state = current_state.clone(); - new_state[k] = v; - let score = self.evaluate_state(&new_state); - (new_state, (), score) + let mut new_solution = current_solution.clone(); + new_solution[k] = v; + let score = self.evaluate_solution(&new_solution); + (new_solution, (), score) } - fn evaluate_state(&self, state: &Self::StateType) -> NotNan { + fn evaluate_solution(&self, solution: &Self::SolutionType) -> NotNan { let score = (0..self.k) .into_iter() - .map(|i| (state[i] - self.centers[i]).powf(2.0)) + .map(|i| (solution[i] - self.centers[i]).powf(2.0)) .sum(); NotNan::new(score).unwrap() } @@ -98,19 +98,21 @@ fn main() { println!("running Hill Climbing optimizer"); let n_iter = 10000; + let time_limit = Duration::from_secs(60); let patiance = 1000; let n_trials = 50; let opt = HillClimbingOptimizer::new(patiance, n_trials); let pb = create_pbar(n_iter as u64); - let callback = |op: OptProgress| { + let callback = |op: OptProgress| { pb.set_message(format!("best score {:e}", op.score.into_inner())); pb.set_position(op.iter as u64); }; - let res = opt.optimize(&model, None, n_iter, Some(&callback), ()); + let res = opt.optimize(&model, None, n_iter, time_limit, Some(&callback), ()); pb.finish(); dbg!(res); } + ``` Further details can be found at API document, example and test codes. \ No newline at end of file diff --git a/examples/quadratic_model.rs b/examples/quadratic_model.rs index b483c23..86dd574 100644 --- a/examples/quadratic_model.rs +++ b/examples/quadratic_model.rs @@ -1,4 +1,4 @@ -use std::error::Error; +use std::{error::Error, time::Duration}; use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle}; use localsearch::{ @@ -23,39 +23,39 @@ impl QuadraticModel { } } -type StateType = Vec; +type SolutionType = Vec; type ScoreType = NotNan; impl OptModel for QuadraticModel { - type StateType = StateType; + type SolutionType = SolutionType; type TransitionType = (); type ScoreType = ScoreType; - fn generate_random_state( + fn generate_random_solution( &self, rng: &mut R, - ) -> Result> { - let state = self.dist.sample_iter(rng).take(self.k).collect::>(); - Ok(state) + ) -> Result> { + let solution = self.dist.sample_iter(rng).take(self.k).collect::>(); + Ok(solution) } - fn generate_trial_state( + fn generate_trial_solution( &self, - current_state: &Self::StateType, + current_solution: &Self::SolutionType, rng: &mut R, _current_score: Option>, - ) -> (Self::StateType, Self::TransitionType, NotNan) { + ) -> (Self::SolutionType, Self::TransitionType, NotNan) { let k = rng.gen_range(0..self.k); let v = self.dist.sample(rng); - let mut new_state = current_state.clone(); - new_state[k] = v; - let score = self.evaluate_state(&new_state); - (new_state, (), score) + let mut new_solution = current_solution.clone(); + new_solution[k] = v; + let score = self.evaluate_solution(&new_solution); + (new_solution, (), score) } - fn evaluate_state(&self, state: &Self::StateType) -> NotNan { + fn evaluate_solution(&self, solution: &Self::SolutionType) -> NotNan { let score = (0..self.k) .into_iter() - .map(|i| (state[i] - self.centers[i]).powf(2.0)) + .map(|i| (solution[i] - self.centers[i]).powf(2.0)) .sum(); NotNan::new(score).unwrap() } @@ -79,16 +79,17 @@ fn main() { println!("running Hill Climbing optimizer"); let n_iter = 10000; + let time_limit = Duration::from_secs_f32(1.0); let patiance = 1000; let n_trials = 50; let opt = HillClimbingOptimizer::new(patiance, n_trials); let pb = create_pbar(n_iter as u64); - let callback = |op: OptProgress| { + let callback = |op: OptProgress| { pb.set_message(format!("best score {:e}", op.score.into_inner())); pb.set_position(op.iter as u64); }; - let res = opt.optimize(&model, None, n_iter, Some(&callback), ()); + let res = opt.optimize(&model, None, n_iter, time_limit, Some(&callback), ()); pb.finish(); dbg!(res); } diff --git a/examples/tsp_model.rs b/examples/tsp_model.rs index 5f71257..740226a 100644 --- a/examples/tsp_model.rs +++ b/examples/tsp_model.rs @@ -1,3 +1,4 @@ +use core::time::Duration; use std::{ collections::{HashMap, HashSet}, error::Error, @@ -84,19 +85,19 @@ fn select_two_indides(lb: usize, ub: usize, rng: &mut R) -> (usize min_sorted(n1, n2) } -type StateType = Vec; +type SolutionType = Vec; // remvoed edges and inserted edges type TransitionType = ([Edge; 2], [Edge; 2]); type ScoreType = NotNan; impl OptModel for TSPModel { - type StateType = StateType; + type SolutionType = SolutionType; type TransitionType = TransitionType; type ScoreType = ScoreType; - fn generate_random_state( + fn generate_random_solution( &self, rng: &mut R, - ) -> Result> { + ) -> Result> { let mut cities = self .distance_matrix .keys() @@ -118,27 +119,27 @@ impl OptModel for TSPModel { Ok(cities) } - fn generate_trial_state( + fn generate_trial_solution( &self, - current_state: &StateType, + current_solution: &SolutionType, rng: &mut R, current_score: Option>, - ) -> (StateType, TransitionType, NotNan) { - let (ind1, ind2) = select_two_indides(1, current_state.len() - 1, rng); + ) -> (SolutionType, TransitionType, NotNan) { + let (ind1, ind2) = select_two_indides(1, current_solution.len() - 1, rng); - let mut new_state = current_state.clone(); + let mut new_solution = current_solution.clone(); for (i, ind) in (ind1..=ind2).enumerate() { - new_state[ind] = current_state[ind2 - i]; + new_solution[ind] = current_solution[ind2 - i]; } let removed_edges = [ - min_sorted(current_state[ind1 - 1], current_state[ind1]), - min_sorted(current_state[ind2 + 1], current_state[ind2]), + min_sorted(current_solution[ind1 - 1], current_solution[ind1]), + min_sorted(current_solution[ind2 + 1], current_solution[ind2]), ]; let inserted_edges = [ - min_sorted(new_state[ind1 - 1], new_state[ind1]), - min_sorted(new_state[ind2 + 1], new_state[ind2]), + min_sorted(new_solution[ind1 - 1], new_solution[ind1]), + min_sorted(new_solution[ind2 + 1], new_solution[ind2]), ]; // calculate new score @@ -149,19 +150,19 @@ impl OptModel for TSPModel { + self.get_distance(&inserted_edges[0], true) + self.get_distance(&inserted_edges[1], true) } - None => self.evaluate_state(&new_state), + None => self.evaluate_solution(&new_solution), }; // create transition let trans = (removed_edges, inserted_edges); - (new_state, trans, new_score) + (new_solution, trans, new_score) } - fn evaluate_state(&self, state: &StateType) -> NotNan { - let score = (0..state.len() - 1) + fn evaluate_solution(&self, solution: &SolutionType) -> NotNan { + let score = (0..solution.len() - 1) .map(|i| { - let key = min_sorted(state[i], state[i + 1]); + let key = min_sorted(solution[i], solution[i + 1]); self.get_distance(&key, true) }) .sum(); @@ -182,7 +183,7 @@ impl DequeTabuList { } impl TabuList for DequeTabuList { - type Item = (StateType, TransitionType); + type Item = (SolutionType, TransitionType); fn contains(&self, item: &Self::Item) -> bool { let (_, (_, inserted_edges)) = item; @@ -243,13 +244,14 @@ fn main() { let tsp_model = TSPModel::from_coords(&coords); let n_iter: usize = 20000; + let time_limit = Duration::from_secs(60); let patience = n_iter / 2; let mut rng = rand::thread_rng(); - let initial_state = tsp_model.generate_random_state(&mut rng).ok(); + let initial_solution = tsp_model.generate_random_solution(&mut rng).ok(); let pb = create_pbar(n_iter as u64); - let callback = |op: OptProgress| { + let callback = |op: OptProgress| { let ratio = op.accepted_count as f64 / op.iter as f64; pb.set_message(format!( "best score {:.4e}, count = {}, acceptance ratio {:.2e}", @@ -262,17 +264,18 @@ fn main() { println!("run hill climbing"); let optimizer = HillClimbingOptimizer::new(1000, 200); - let (final_state, final_score, _) = optimizer.optimize( + let (final_solution, final_score, _) = optimizer.optimize( &tsp_model, - initial_state.clone(), + initial_solution.clone(), n_iter, + time_limit, Some(&callback), (), ); println!( "final score = {}, num of cities {}", final_score, - final_state.len() + final_solution.len() ); pb.finish_and_clear(); pb.reset(); @@ -280,67 +283,76 @@ fn main() { println!("run tabu search"); let tabu_list = DequeTabuList::new(20); let optimizer = TabuSearchOptimizer::new(patience, 200, 10); - let (final_state, final_score, _) = optimizer.optimize( + let (final_solution, final_score, _) = optimizer.optimize( &tsp_model, - initial_state.clone(), + initial_solution.clone(), n_iter, + time_limit, Some(&callback), tabu_list, ); println!( "final score = {}, num of cities {}", final_score, - final_state.len() + final_solution.len() ); pb.finish_and_clear(); pb.reset(); println!("run annealing"); let optimizer = SimulatedAnnealingOptimizer::new(patience, 200); - let (final_state, final_score, _) = optimizer.optimize( + let (final_solution, final_score, _) = optimizer.optimize( &tsp_model, - initial_state.clone(), + initial_solution.clone(), n_iter, + time_limit, Some(&callback), (200.0, 50.0), ); println!( "final score = {}, num of cities {}", final_score, - final_state.len() + final_solution.len() ); pb.finish_and_clear(); pb.reset(); println!("run epsilon greedy"); let optimizer = EpsilonGreedyOptimizer::new(patience, 200, 10, 0.3); - let (final_state, final_score, _) = optimizer.optimize( + let (final_solution, final_score, _) = optimizer.optimize( &tsp_model, - initial_state.clone(), + initial_solution.clone(), n_iter, + time_limit, Some(&callback), (), ); println!( "final score = {}, num of cities {}", final_score, - final_state.len() + final_solution.len() ); pb.finish_and_clear(); pb.reset(); println!("run relative annealing"); let optimizer = RelativeAnnealingOptimizer::new(patience, 200, 10, 1e1); - let (final_state, final_score, _) = - optimizer.optimize(&tsp_model, initial_state, n_iter, Some(&callback), ()); + let (final_solution, final_score, _) = optimizer.optimize( + &tsp_model, + initial_solution, + n_iter, + time_limit, + Some(&callback), + (), + ); println!( "final score = {}, num of cities {}", final_score, - final_state.len() + final_solution.len() ); let opt_route_file = args.get(2).unwrap(); - let opt_state = read_lines(opt_route_file) + let opt_solution = read_lines(opt_route_file) .unwrap() .into_iter() .map(|line| { @@ -349,10 +361,10 @@ fn main() { }) .collect::>(); - let opt_score = tsp_model.evaluate_state(&opt_state); + let opt_score = tsp_model.evaluate_solution(&opt_solution); println!( "optimal score = {}, num of cities {}", opt_score, - opt_state.len() + opt_solution.len() ); } diff --git a/src/callback.rs b/src/callback.rs index 7cfd5b6..540e507 100644 --- a/src/callback.rs +++ b/src/callback.rs @@ -11,19 +11,19 @@ pub struct OptProgress { pub iter: usize, /// number of accepted transitions pub accepted_count: usize, - /// current best state - pub state: Rc>, + /// current best solution + pub solution: Rc>, /// current best score pub score: SC, } impl OptProgress { /// constuctor of OptProgress - pub fn new(iter: usize, accepted_count: usize, state: Rc>, score: SC) -> Self { + pub fn new(iter: usize, accepted_count: usize, solution: Rc>, score: SC) -> Self { Self { iter, accepted_count, - state, + solution, score, } } @@ -47,7 +47,7 @@ trait_set! { /// pb.set_draw_target(ProgressDrawTarget::stderr_with_hz(10)); /// pb /// }; -/// let callback = |op: OptProgress| { +/// let callback = |op: OptProgress| { /// let ratio = op.accepted_count as f64 / op.iter as f64; /// pb.set_message(format!( /// "best score {:.4e}, count = {}, acceptance ratio {:.2e}", diff --git a/src/model.rs b/src/model.rs index 3aa2710..c73199e 100644 --- a/src/model.rs +++ b/src/model.rs @@ -7,25 +7,25 @@ use auto_impl::auto_impl; pub trait OptModel: Sync + Send { /// Type of the Score type ScoreType: Ord + Copy + Sync + Send; - /// Type of the State - type StateType: Clone + Sync + Send; + /// Type of the Solution + type SolutionType: Clone + Sync + Send; /// Type of the Transition type TransitionType: Clone + Sync + Send; - /// Randomly generate a state - fn generate_random_state( + /// Randomly generate a solution + fn generate_random_solution( &self, rng: &mut R, - ) -> Result>; + ) -> Result>; - /// Generate a new trial state from current state - fn generate_trial_state( + /// Generate a new trial solution from current solution + fn generate_trial_solution( &self, - current_state: &Self::StateType, + current_solution: &Self::SolutionType, rng: &mut R, current_score: Option, - ) -> (Self::StateType, Self::TransitionType, Self::ScoreType); + ) -> (Self::SolutionType, Self::TransitionType, Self::ScoreType); - /// Evaluate the given state - fn evaluate_state(&self, state: &Self::StateType) -> Self::ScoreType; + /// Evaluate the given solution + fn evaluate_solution(&self, solution: &Self::SolutionType) -> Self::ScoreType; } diff --git a/src/optim/base.rs b/src/optim/base.rs index 65c1871..4ebc048 100644 --- a/src/optim/base.rs +++ b/src/optim/base.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use auto_impl::auto_impl; use trait_set::trait_set; @@ -15,14 +17,15 @@ pub trait LocalSearchOptimizer { fn optimize( &self, model: &M, - initial_state: Option, + initial_solution: Option, n_iter: usize, + time_limit: Duration, callback: Option<&F>, extra_in: Self::ExtraIn, - ) -> (M::StateType, M::ScoreType, Self::ExtraOut) + ) -> (M::SolutionType, M::ScoreType, Self::ExtraOut) where M: OptModel, - F: OptCallbackFn; + F: OptCallbackFn; } trait_set! { diff --git a/src/optim/epsilon_greedy.rs b/src/optim/epsilon_greedy.rs index 354bef3..e3f98eb 100644 --- a/src/optim/epsilon_greedy.rs +++ b/src/optim/epsilon_greedy.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use crate::{callback::OptCallbackFn, OptModel}; use super::{base::LocalSearchOptimizer, GenericLocalSearchOptimizer}; @@ -25,8 +27,8 @@ impl EpsilonGreedyOptimizer { /// /// - `patience` : the optimizer will give up /// if there is no improvement of the score after this number of iterations - /// - `n_trials` : number of trial states to generate and evaluate at each iteration - /// - `return_iter` : returns to the current best state if there is no improvement after this number of iterations. + /// - `n_trials` : number of trial solutions to generate and evaluate at each iteration + /// - `return_iter` : returns to the current best solution if there is no improvement after this number of iterations. /// - `epsilon` : probability to accept a transition that worsens the score. Must be in [0, 1]. pub fn new(patience: usize, n_trials: usize, return_iter: usize, epsilon: f64) -> Self { Self { @@ -44,21 +46,22 @@ impl LocalSearchOptimizer for EpsilonGreedyOptimizer { /// Start optimization /// /// - `model` : the model to optimize - /// - `initial_state` : the initial state to start optimization. If None, a random state will be generated. + /// - `initial_solution` : the initial solution to start optimization. If None, a random solution will be generated. /// - `n_iter`: maximum iterations /// - `callback` : callback function that will be invoked at the end of each iteration /// - `_extra_in` : not used fn optimize( &self, model: &M, - initial_state: Option, + initial_solution: Option, n_iter: usize, + time_limit: Duration, callback: Option<&F>, _extra_in: Self::ExtraIn, - ) -> (M::StateType, M::ScoreType, Self::ExtraOut) + ) -> (M::SolutionType, M::ScoreType, Self::ExtraOut) where M: OptModel + Sync + Send, - F: OptCallbackFn, + F: OptCallbackFn, { let optimizer = GenericLocalSearchOptimizer::new( self.patience, @@ -66,6 +69,13 @@ impl LocalSearchOptimizer for EpsilonGreedyOptimizer { self.return_iter, |current, trial| transition_prob(current, trial, self.epsilon), ); - optimizer.optimize(model, initial_state, n_iter, callback, _extra_in) + optimizer.optimize( + model, + initial_solution, + n_iter, + time_limit, + callback, + _extra_in, + ) } } diff --git a/src/optim/generic.rs b/src/optim/generic.rs index bd7751b..54d944f 100644 --- a/src/optim/generic.rs +++ b/src/optim/generic.rs @@ -1,4 +1,9 @@ -use std::{cell::RefCell, marker::PhantomData, rc::Rc}; +use std::{ + cell::RefCell, + marker::PhantomData, + rc::Rc, + time::{Duration, Instant}, +}; use rand::Rng; use rayon::prelude::*; @@ -12,7 +17,7 @@ use super::{LocalSearchOptimizer, TransitionProbabilityFn}; /// Optimizer that implements local search algorithm /// Given a functin f that converts a float number to probability, -/// the trial state is accepted by the following procedure +/// the trial solution is accepted by the following procedure /// /// 1. p <- f(current_score, trial_score) /// 2. accept if p > rand(0, 1) @@ -35,8 +40,8 @@ impl> /// /// - `patience` : the optimizer will give up /// if there is no improvement of the score after this number of iterations - /// - `n_trials` : number of trial states to generate and evaluate at each iteration - /// - `return_iter` : returns to the current best state if there is no improvement after this number of iterations. + /// - `n_trials` : number of trial solutions to generate and evaluate at each iteration + /// - `return_iter` : returns to the current best solution if there is no improvement after this number of iterations. /// - `score_func` : score function to calculate transition probability. pub fn new(patience: usize, n_trials: usize, return_iter: usize, score_func: FT) -> Self { Self { @@ -60,41 +65,50 @@ where /// Start optimization /// /// - `model` : the model to optimize - /// - `initial_state` : the initial state to start optimization. If None, a random state will be generated. + /// - `initial_solution` : the initial solution to start optimization. If None, a random solution will be generated. /// - `n_iter`: maximum iterations /// - `callback` : callback function that will be invoked at the end of each iteration /// - `_extra_in` : not used fn optimize( &self, model: &M, - initial_state: Option, + initial_solution: Option, n_iter: usize, + time_limit: Duration, callback: Option<&F>, _extra_in: Self::ExtraIn, - ) -> (M::StateType, M::ScoreType, Self::ExtraOut) + ) -> (M::SolutionType, M::ScoreType, Self::ExtraOut) where - F: OptCallbackFn, + F: OptCallbackFn, { + let start_time = Instant::now(); let mut rng = rand::thread_rng(); - let mut current_state = if let Some(s) = initial_state { + let mut current_solution = if let Some(s) = initial_solution { s } else { - model.generate_random_state(&mut rng).unwrap() + model.generate_random_solution(&mut rng).unwrap() }; - let mut current_score = model.evaluate_state(¤t_state); - let best_state = Rc::new(RefCell::new(current_state.clone())); + let mut current_score = model.evaluate_solution(¤t_solution); + let best_solution = Rc::new(RefCell::new(current_solution.clone())); let mut best_score = current_score; let mut accepted_counter = 0; let mut counter = 0; for it in 0..n_iter { - let (trial_state, trial_score) = (0..self.n_trials) + let duration = Instant::now().duration_since(start_time); + if duration > time_limit { + break; + } + let (trial_solution, trial_score) = (0..self.n_trials) .into_par_iter() .map(|_| { let mut rng = rand::thread_rng(); - let (state, _, score) = - model.generate_trial_state(¤t_state, &mut rng, Some(current_score)); - (state, score) + let (solution, _, score) = model.generate_trial_solution( + ¤t_solution, + &mut rng, + Some(current_score), + ); + (solution, score) }) .min_by_key(|(_, score)| *score) .unwrap(); @@ -103,13 +117,13 @@ where let r: f64 = rng.gen(); if p > r { - current_state = trial_state; + current_solution = trial_solution; current_score = trial_score; accepted_counter += 1; } if current_score < best_score { - best_state.replace(current_state.clone()); + best_solution.replace(current_solution.clone()); best_score = current_score; counter = 0; } @@ -117,7 +131,7 @@ where counter += 1; if counter == self.return_iter { - current_state = best_state.borrow().clone(); + current_solution = best_solution.borrow().clone(); current_score = best_score; } @@ -127,12 +141,12 @@ where if let Some(f) = callback { let progress = - OptProgress::new(it, accepted_counter, best_state.clone(), best_score); + OptProgress::new(it, accepted_counter, best_solution.clone(), best_score); f(progress); } } - let best_state = (*best_state.borrow()).clone(); - (best_state, best_score, ()) + let best_solution = (*best_solution.borrow()).clone(); + (best_solution, best_score, ()) } } diff --git a/src/optim/hill_climbing.rs b/src/optim/hill_climbing.rs index 9ae82bd..4279777 100644 --- a/src/optim/hill_climbing.rs +++ b/src/optim/hill_climbing.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use crate::{callback::OptCallbackFn, OptModel}; use super::{EpsilonGreedyOptimizer, LocalSearchOptimizer}; @@ -12,7 +14,7 @@ pub struct HillClimbingOptimizer { impl HillClimbingOptimizer { /// - `patience` : the optimizer will give up /// if there is no improvement of the score after this number of iterations - /// - `n_trials` : number of trial states to generate and evaluate at each iteration + /// - `n_trials` : number of trial solutions to generate and evaluate at each iteration pub fn new(patience: usize, n_trials: usize) -> Self { Self { patience, n_trials } } @@ -25,22 +27,30 @@ impl LocalSearchOptimizer for HillClimbingOptimizer { /// Start optimization /// /// - `model` : the model to optimize - /// - `initial_state` : the initial state to start optimization. If None, a random state will be generated. + /// - `initial_solution` : the initial solution to start optimization. If None, a random solution will be generated. /// - `n_iter`: maximum iterations /// - `callback` : callback function that will be invoked at the end of each iteration /// - `_extra_in` : not used fn optimize( &self, model: &M, - initial_state: Option, + initial_solution: Option, n_iter: usize, + time_limit: Duration, callback: Option<&F>, _extra_in: Self::ExtraIn, - ) -> (M::StateType, M::ScoreType, Self::ExtraOut) + ) -> (M::SolutionType, M::ScoreType, Self::ExtraOut) where - F: OptCallbackFn, + F: OptCallbackFn, { let optimizer = EpsilonGreedyOptimizer::new(self.patience, self.n_trials, usize::MAX, 0.0); - optimizer.optimize(model, initial_state, n_iter, callback, _extra_in) + optimizer.optimize( + model, + initial_solution, + n_iter, + time_limit, + callback, + _extra_in, + ) } } diff --git a/src/optim/logistic_annealing.rs b/src/optim/logistic_annealing.rs index f01a67d..59472d6 100644 --- a/src/optim/logistic_annealing.rs +++ b/src/optim/logistic_annealing.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use ordered_float::NotNan; use crate::{callback::OptCallbackFn, OptModel}; @@ -12,7 +14,7 @@ fn transition_prob>(current: T, trial: T, w: f64) -> f64 { } /// Optimizer that implements logistic annealing algorithm -/// In this model, unlike simulated annealing, wether accept the trial state or not is calculated based on relative score difference +/// In this model, unlike simulated annealing, wether accept the trial solution or not is calculated based on relative score difference /// /// 1. d <- (trial_score - current_score) / current_score /// 2. p <- 2.0 / (1.0 + exp(w * d)) @@ -30,8 +32,8 @@ impl LogisticAnnealingOptimizer { /// /// - `patience` : the optimizer will give up /// if there is no improvement of the score after this number of iterations - /// - `n_trials` : number of trial states to generate and evaluate at each iteration - /// - `return_iter` : returns to the current best state if there is no improvement after this number of iterations. + /// - `n_trials` : number of trial solutions to generate and evaluate at each iteration + /// - `return_iter` : returns to the current best solution if there is no improvement after this number of iterations. /// - `w` : weight to be multiplied with the relative score difference. pub fn new(patience: usize, n_trials: usize, return_iter: usize, w: f64) -> Self { Self { @@ -49,20 +51,21 @@ impl>> LocalSearchOptimizer for LogisticA /// Start optimization /// /// - `model` : the model to optimize - /// - `initial_state` : the initial state to start optimization. If None, a random state will be generated. + /// - `initial_solution` : the initial solution to start optimization. If None, a random solution will be generated. /// - `n_iter`: maximum iterations /// - `callback` : callback function that will be invoked at the end of each iteration /// - `_extra_in` : not used fn optimize( &self, model: &M, - initial_state: Option, + initial_solution: Option, n_iter: usize, + time_limit: Duration, callback: Option<&F>, _extra_in: Self::ExtraIn, - ) -> (M::StateType, M::ScoreType, Self::ExtraOut) + ) -> (M::SolutionType, M::ScoreType, Self::ExtraOut) where - F: OptCallbackFn, + F: OptCallbackFn, { let optimizer = GenericLocalSearchOptimizer::new( self.patience, @@ -71,7 +74,14 @@ impl>> LocalSearchOptimizer for LogisticA |current, trial| transition_prob(current, trial, self.w), ); - optimizer.optimize(model, initial_state, n_iter, callback, _extra_in) + optimizer.optimize( + model, + initial_solution, + n_iter, + time_limit, + callback, + _extra_in, + ) } } diff --git a/src/optim/relative_annealing.rs b/src/optim/relative_annealing.rs index 3fa28ef..003644b 100644 --- a/src/optim/relative_annealing.rs +++ b/src/optim/relative_annealing.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use ordered_float::NotNan; use crate::{callback::OptCallbackFn, OptModel}; @@ -12,7 +14,7 @@ fn transition_prob>(current: T, trial: T, w: f64) -> f64 { } /// Optimizer that implements relative annealing algorithm -/// In this model, unlike simulated annealing, wether accept the trial state or not is calculated based on relative score difference +/// In this model, unlike simulated annealing, wether accept the trial solution or not is calculated based on relative score difference /// /// 1. d <- (trial_score - current_score) / current_score /// 2. p <- exp(-w * d) @@ -30,8 +32,8 @@ impl RelativeAnnealingOptimizer { /// /// - `patience` : the optimizer will give up /// if there is no improvement of the score after this number of iterations - /// - `n_trials` : number of trial states to generate and evaluate at each iteration - /// - `return_iter` : returns to the current best state if there is no improvement after this number of iterations. + /// - `n_trials` : number of trial solutions to generate and evaluate at each iteration + /// - `return_iter` : returns to the current best solution if there is no improvement after this number of iterations. /// - `w` : weight to be multiplied with the relative score difference. pub fn new(patience: usize, n_trials: usize, return_iter: usize, w: f64) -> Self { Self { @@ -50,20 +52,21 @@ impl>> LocalSearchOptimizer for RelativeA /// Start optimization /// /// - `model` : the model to optimize - /// - `initial_state` : the initial state to start optimization. If None, a random state will be generated. + /// - `initial_solution` : the initial solution to start optimization. If None, a random solution will be generated. /// - `n_iter`: maximum iterations /// - `callback` : callback function that will be invoked at the end of each iteration /// - `_extra_in` : not used fn optimize( &self, model: &M, - initial_state: Option, + initial_solution: Option, n_iter: usize, + time_limit: Duration, callback: Option<&F>, _extra_in: Self::ExtraIn, - ) -> (M::StateType, M::ScoreType, Self::ExtraOut) + ) -> (M::SolutionType, M::ScoreType, Self::ExtraOut) where - F: OptCallbackFn, + F: OptCallbackFn, { let optimizer = GenericLocalSearchOptimizer::new( self.patience, @@ -72,7 +75,14 @@ impl>> LocalSearchOptimizer for RelativeA |current, trial| transition_prob(current, trial, self.w), ); - optimizer.optimize(model, initial_state, n_iter, callback, _extra_in) + optimizer.optimize( + model, + initial_solution, + n_iter, + time_limit, + callback, + _extra_in, + ) } } diff --git a/src/optim/simulated_annealing.rs b/src/optim/simulated_annealing.rs index 5bb1e2e..d043a28 100644 --- a/src/optim/simulated_annealing.rs +++ b/src/optim/simulated_annealing.rs @@ -1,4 +1,8 @@ -use std::{cell::RefCell, rc::Rc}; +use std::{ + cell::RefCell, + rc::Rc, + time::{Duration, Instant}, +}; use ordered_float::NotNan; use rand::Rng; @@ -23,7 +27,7 @@ impl SimulatedAnnealingOptimizer { /// /// - `patience` : the optimizer will give up /// if there is no improvement of the score after this number of iterations - /// - `n_trials` : number of trial states to generate and evaluate at each iteration + /// - `n_trials` : number of trial solutions to generate and evaluate at each iteration pub fn new(patience: usize, n_trials: usize) -> Self { Self { patience, n_trials } } @@ -37,30 +41,32 @@ impl>> LocalSearchOptimizer for Simulated /// Start optimization /// /// - `model` : the model to optimize - /// - `initial_state` : the initial state to start optimization. If None, a random state will be generated. + /// - `initial_solution` : the initial solution to start optimization. If None, a random solution will be generated. /// - `n_iter`: maximum iterations /// - `callback` : callback function that will be invoked at the end of each iteration /// - `max_min_temperatures` : (max_temperature, min_temperature) fn optimize( &self, model: &M, - initial_state: Option, + initial_solution: Option, n_iter: usize, + time_limit: Duration, callback: Option<&F>, max_min_temperatures: Self::ExtraIn, - ) -> (M::StateType, M::ScoreType, Self::ExtraOut) + ) -> (M::SolutionType, M::ScoreType, Self::ExtraOut) where - F: OptCallbackFn, + F: OptCallbackFn, { + let start_time = Instant::now(); let (max_temperature, min_temperature) = max_min_temperatures; let mut rng = rand::thread_rng(); - let mut current_state = if let Some(s) = initial_state { + let mut current_solution = if let Some(s) = initial_solution { s } else { - model.generate_random_state(&mut rng).unwrap() + model.generate_random_solution(&mut rng).unwrap() }; - let mut current_score = model.evaluate_state(¤t_state); - let best_state = Rc::new(RefCell::new(current_state.clone())); + let mut current_score = model.evaluate_solution(¤t_solution); + let best_solution = Rc::new(RefCell::new(current_solution.clone())); let mut best_score = current_score; let mut accepted_counter = 0; let mut temperature = max_temperature; @@ -68,13 +74,20 @@ impl>> LocalSearchOptimizer for Simulated let mut counter = 0; for it in 0..n_iter { - let (trial_state, trial_score) = (0..self.n_trials) + let duration = Instant::now().duration_since(start_time); + if duration > time_limit { + break; + } + let (trial_solution, trial_score) = (0..self.n_trials) .into_par_iter() .map(|_| { let mut rng = rand::thread_rng(); - let (state, _, score) = - model.generate_trial_state(¤t_state, &mut rng, Some(current_score)); - (state, score) + let (solution, _, score) = model.generate_trial_solution( + ¤t_solution, + &mut rng, + Some(current_score), + ); + (solution, score) }) .min_by_key(|(_, score)| *score) .unwrap(); @@ -84,13 +97,13 @@ impl>> LocalSearchOptimizer for Simulated let r: f64 = rng.gen(); if p > r { - current_state = trial_state; + current_solution = trial_solution; current_score = trial_score; accepted_counter += 1; } if current_score < best_score { - best_state.replace(current_state.clone()); + best_solution.replace(current_solution.clone()); best_score = current_score; counter = 0; } @@ -104,12 +117,12 @@ impl>> LocalSearchOptimizer for Simulated if let Some(f) = callback { let progress = - OptProgress::new(it, accepted_counter, best_state.clone(), best_score); + OptProgress::new(it, accepted_counter, best_solution.clone(), best_score); f(progress); } } - let best_state = (*best_state.borrow()).clone(); - (best_state, best_score, ()) + let best_solution = (*best_solution.borrow()).clone(); + (best_solution, best_score, ()) } } diff --git a/src/optim/tabu_search.rs b/src/optim/tabu_search.rs index e782d62..441bc82 100644 --- a/src/optim/tabu_search.rs +++ b/src/optim/tabu_search.rs @@ -1,4 +1,9 @@ -use std::{cell::RefCell, marker::PhantomData, rc::Rc}; +use std::{ + cell::RefCell, + marker::PhantomData, + rc::Rc, + time::{Duration, Instant}, +}; use auto_impl::auto_impl; use rayon::prelude::*; @@ -40,14 +45,14 @@ where L: TabuList, O: Ord, { - for (state, transition, score) in samples.into_iter() { + for (solution, transition, score) in samples.into_iter() { // Aspiration Criterion if score < best_score { - return Some((state, transition, score)); + return Some((solution, transition, score)); } // Not Tabu - let item = (state, transition); + let item = (solution, transition); if !tabu_list.contains(&item) { return Some((item.0, item.1, score)); } @@ -61,8 +66,8 @@ impl TabuSearchOptimizer { /// /// - `patience` : the optimizer will give up /// if there is no improvement of the score after this number of iterations - /// - `n_trials` : number of trial states to generate and evaluate at each iteration - /// - `return_iter` : returns to the current best state if there is no improvement after this number of iterations. + /// - `n_trials` : number of trial solutions to generate and evaluate at each iteration + /// - `return_iter` : returns to the current best solution if there is no improvement after this number of iterations. pub fn new(patience: usize, n_trials: usize, return_iter: usize) -> Self { Self { patience, @@ -73,7 +78,7 @@ impl TabuSearchOptimizer { } } -impl> LocalSearchOptimizer +impl> LocalSearchOptimizer for TabuSearchOptimizer { type ExtraIn = T; @@ -82,42 +87,51 @@ impl> LocalSe /// Start optimization /// /// - `model` : the model to optimize - /// - `initial_state` : the initial state to start optimization. If None, a random state will be generated. + /// - `initial_solution` : the initial solution to start optimization. If None, a random solution will be generated. /// - `n_iter`: maximum iterations /// - `callback` : callback function that will be invoked at the end of each iteration /// - `tabu_list` : initial tabu list fn optimize( &self, model: &M, - initial_state: Option, + initial_solution: Option, n_iter: usize, + time_limit: Duration, callback: Option<&F>, mut tabu_list: Self::ExtraIn, - ) -> (M::StateType, M::ScoreType, Self::ExtraOut) + ) -> (M::SolutionType, M::ScoreType, Self::ExtraOut) where - F: OptCallbackFn, + F: OptCallbackFn, { + let start_time = Instant::now(); let mut rng = rand::thread_rng(); - let mut current_state = if let Some(s) = initial_state { + let mut current_solution = if let Some(s) = initial_solution { s } else { - model.generate_random_state(&mut rng).unwrap() + model.generate_random_solution(&mut rng).unwrap() }; - let mut current_score = model.evaluate_state(¤t_state); - let best_state = Rc::new(RefCell::new(current_state.clone())); + let mut current_score = model.evaluate_solution(¤t_solution); + let best_solution = Rc::new(RefCell::new(current_solution.clone())); let mut best_score = current_score; let mut counter = 0; let mut accepted_counter = 0; for it in 0..n_iter { + let duration = Instant::now().duration_since(start_time); + if duration > time_limit { + break; + } let mut samples = vec![]; (0..self.n_trials) .into_par_iter() .map(|_| { let mut rng = rand::thread_rng(); - let (state, transitions, score) = - model.generate_trial_state(¤t_state, &mut rng, Some(current_score)); - (state, transitions, score) + let (solution, transitions, score) = model.generate_trial_solution( + ¤t_solution, + &mut rng, + Some(current_score), + ); + (solution, transitions, score) }) .collect_into_vec(&mut samples); @@ -125,22 +139,22 @@ impl> LocalSe let res = find_accepted_solution(samples, &tabu_list, best_score); - if let Some((state, trans, score)) = res { + if let Some((solution, trans, score)) = res { if score < best_score { best_score = score; - best_state.replace(state.clone()); + best_solution.replace(solution.clone()); counter = 0; } current_score = score; - current_state = state.clone(); - tabu_list.append((state, trans)); + current_solution = solution.clone(); + tabu_list.append((solution, trans)); accepted_counter += 1; } counter += 1; if counter == self.return_iter { - current_state = best_state.borrow().clone(); + current_solution = best_solution.borrow().clone(); current_score = best_score; } @@ -150,13 +164,13 @@ impl> LocalSe if let Some(f) = callback { let progress = - OptProgress::new(it, accepted_counter, best_state.clone(), best_score); + OptProgress::new(it, accepted_counter, best_solution.clone(), best_score); f(progress); } } - let best_state = (*best_state.borrow()).clone(); + let best_solution = (*best_solution.borrow()).clone(); - (best_state, best_score, tabu_list) + (best_solution, best_score, tabu_list) } } diff --git a/src/tests.rs b/src/tests.rs index 3e2e381..b9406c7 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -20,39 +20,39 @@ impl QuadraticModel { } } -type StateType = Vec; +type SolutionType = Vec; type TransitionType = (usize, f64, f64); impl OptModel for QuadraticModel { - type StateType = StateType; + type SolutionType = SolutionType; type TransitionType = TransitionType; type ScoreType = NotNan; - fn generate_random_state( + fn generate_random_solution( &self, rng: &mut R, - ) -> Result> { - let state = self.dist.sample_iter(rng).take(self.k).collect::>(); - Ok(state) + ) -> Result> { + let solution = self.dist.sample_iter(rng).take(self.k).collect::>(); + Ok(solution) } - fn generate_trial_state( + fn generate_trial_solution( &self, - current_state: &Self::StateType, + current_solution: &Self::SolutionType, rng: &mut R, _current_score: Option>, - ) -> (Self::StateType, Self::TransitionType, NotNan) { + ) -> (Self::SolutionType, Self::TransitionType, NotNan) { let k = rng.gen_range(0..self.k); let v = self.dist.sample(rng); - let mut new_state = current_state.clone(); - new_state[k] = v; - let score = self.evaluate_state(&new_state); - (new_state, (k, current_state[k], v), score) + let mut new_solution = current_solution.clone(); + new_solution[k] = v; + let score = self.evaluate_solution(&new_solution); + (new_solution, (k, current_solution[k], v), score) } - fn evaluate_state(&self, state: &Self::StateType) -> NotNan { + fn evaluate_solution(&self, solution: &Self::SolutionType) -> NotNan { let score = (0..self.k) .into_iter() - .map(|i| (state[i] - self.centers[i]).powf(2.0)) + .map(|i| (solution[i] - self.centers[i]).powf(2.0)) .sum(); NotNan::new(score).unwrap() } diff --git a/src/tests/test_epsilon_greedy.rs b/src/tests/test_epsilon_greedy.rs index ff65e35..db7b397 100644 --- a/src/tests/test_epsilon_greedy.rs +++ b/src/tests/test_epsilon_greedy.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use approx::assert_abs_diff_eq; use crate::optim::{EpsilonGreedyOptimizer, LocalSearchOptimizer}; @@ -9,9 +11,16 @@ fn test() { let model = QuadraticModel::new(3, vec![2.0, 0.0, -3.5], (-10.0, 10.0)); let opt = EpsilonGreedyOptimizer::new(1000, 10, 200, 0.1); let null_closure = None::<&fn(_)>; - let (final_state, final_score, _) = opt.optimize(&model, None, 10000, null_closure, ()); - assert_abs_diff_eq!(2.0, final_state[0], epsilon = 0.05); - assert_abs_diff_eq!(0.0, final_state[1], epsilon = 0.05); - assert_abs_diff_eq!(-3.5, final_state[2], epsilon = 0.05); + let (final_solution, final_score, _) = opt.optimize( + &model, + None, + 10000, + Duration::from_secs(10), + null_closure, + (), + ); + assert_abs_diff_eq!(2.0, final_solution[0], epsilon = 0.05); + assert_abs_diff_eq!(0.0, final_solution[1], epsilon = 0.05); + assert_abs_diff_eq!(-3.5, final_solution[2], epsilon = 0.05); assert_abs_diff_eq!(0.0, final_score.into_inner(), epsilon = 0.05); } diff --git a/src/tests/test_hill_climbing.rs b/src/tests/test_hill_climbing.rs index 320ce67..37202bf 100644 --- a/src/tests/test_hill_climbing.rs +++ b/src/tests/test_hill_climbing.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use approx::assert_abs_diff_eq; use crate::optim::{HillClimbingOptimizer, LocalSearchOptimizer}; @@ -9,9 +11,16 @@ fn test() { let model = QuadraticModel::new(3, vec![2.0, 0.0, -3.5], (-10.0, 10.0)); let opt = HillClimbingOptimizer::new(1000, 10); let null_closure = None::<&fn(_)>; - let (final_state, final_score, _) = opt.optimize(&model, None, 10000, null_closure, ()); - assert_abs_diff_eq!(2.0, final_state[0], epsilon = 0.05); - assert_abs_diff_eq!(0.0, final_state[1], epsilon = 0.05); - assert_abs_diff_eq!(-3.5, final_state[2], epsilon = 0.05); + let (final_solution, final_score, _) = opt.optimize( + &model, + None, + 10000, + Duration::from_secs(10), + null_closure, + (), + ); + assert_abs_diff_eq!(2.0, final_solution[0], epsilon = 0.05); + assert_abs_diff_eq!(0.0, final_solution[1], epsilon = 0.05); + assert_abs_diff_eq!(-3.5, final_solution[2], epsilon = 0.05); assert_abs_diff_eq!(0.0, final_score.into_inner(), epsilon = 0.05); } diff --git a/src/tests/test_logistic_annealing.rs b/src/tests/test_logistic_annealing.rs index 33a6f47..3810165 100644 --- a/src/tests/test_logistic_annealing.rs +++ b/src/tests/test_logistic_annealing.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use approx::assert_abs_diff_eq; use crate::optim::{LocalSearchOptimizer, LogisticAnnealingOptimizer}; @@ -9,9 +11,16 @@ fn test() { let model = QuadraticModel::new(3, vec![2.0, 0.0, -3.5], (-10.0, 10.0)); let opt = LogisticAnnealingOptimizer::new(5000, 10, 200, 1e1); let null_closure = None::<&fn(_)>; - let (final_state, final_score, _) = opt.optimize(&model, None, 10000, null_closure, ()); - assert_abs_diff_eq!(2.0, final_state[0], epsilon = 0.05); - assert_abs_diff_eq!(0.0, final_state[1], epsilon = 0.05); - assert_abs_diff_eq!(-3.5, final_state[2], epsilon = 0.05); + let (final_solution, final_score, _) = opt.optimize( + &model, + None, + 10000, + Duration::from_secs(10), + null_closure, + (), + ); + assert_abs_diff_eq!(2.0, final_solution[0], epsilon = 0.05); + assert_abs_diff_eq!(0.0, final_solution[1], epsilon = 0.05); + assert_abs_diff_eq!(-3.5, final_solution[2], epsilon = 0.05); assert_abs_diff_eq!(0.0, final_score.into_inner(), epsilon = 0.05); } diff --git a/src/tests/test_relative_annealing.rs b/src/tests/test_relative_annealing.rs index 09711ee..5245aae 100644 --- a/src/tests/test_relative_annealing.rs +++ b/src/tests/test_relative_annealing.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use approx::assert_abs_diff_eq; use crate::optim::{LocalSearchOptimizer, RelativeAnnealingOptimizer}; @@ -9,9 +11,16 @@ fn test() { let model = QuadraticModel::new(3, vec![2.0, 0.0, -3.5], (-10.0, 10.0)); let opt = RelativeAnnealingOptimizer::new(5000, 10, 200, 1e1); let null_closure = None::<&fn(_)>; - let (final_state, final_score, _) = opt.optimize(&model, None, 10000, null_closure, ()); - assert_abs_diff_eq!(2.0, final_state[0], epsilon = 0.05); - assert_abs_diff_eq!(0.0, final_state[1], epsilon = 0.05); - assert_abs_diff_eq!(-3.5, final_state[2], epsilon = 0.05); + let (final_solution, final_score, _) = opt.optimize( + &model, + None, + 10000, + Duration::from_secs(10), + null_closure, + (), + ); + assert_abs_diff_eq!(2.0, final_solution[0], epsilon = 0.05); + assert_abs_diff_eq!(0.0, final_solution[1], epsilon = 0.05); + assert_abs_diff_eq!(-3.5, final_solution[2], epsilon = 0.05); assert_abs_diff_eq!(0.0, final_score.into_inner(), epsilon = 0.05); } diff --git a/src/tests/test_simulated_annealing.rs b/src/tests/test_simulated_annealing.rs index a4bc71d..5526dee 100644 --- a/src/tests/test_simulated_annealing.rs +++ b/src/tests/test_simulated_annealing.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use approx::assert_abs_diff_eq; use crate::optim::{LocalSearchOptimizer, SimulatedAnnealingOptimizer}; @@ -9,9 +11,16 @@ fn test() { let model = QuadraticModel::new(3, vec![2.0, 0.0, -3.5], (-10.0, 10.0)); let opt = SimulatedAnnealingOptimizer::new(10000, 10); let null_closure = None::<&fn(_)>; - let (final_state, final_score, _) = opt.optimize(&model, None, 5000, null_closure, (1.0, 0.1)); - assert_abs_diff_eq!(2.0, final_state[0], epsilon = 0.05); - assert_abs_diff_eq!(0.0, final_state[1], epsilon = 0.05); - assert_abs_diff_eq!(-3.5, final_state[2], epsilon = 0.05); + let (final_solution, final_score, _) = opt.optimize( + &model, + None, + 5000, + Duration::from_secs(10), + null_closure, + (1.0, 0.1), + ); + assert_abs_diff_eq!(2.0, final_solution[0], epsilon = 0.05); + assert_abs_diff_eq!(0.0, final_solution[1], epsilon = 0.05); + assert_abs_diff_eq!(-3.5, final_solution[2], epsilon = 0.05); assert_abs_diff_eq!(0.0, final_score.into_inner(), epsilon = 0.05); } diff --git a/src/tests/test_tabu_search.rs b/src/tests/test_tabu_search.rs index 0a14e1e..4c5c801 100644 --- a/src/tests/test_tabu_search.rs +++ b/src/tests/test_tabu_search.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use approx::assert_abs_diff_eq; use crate::{ @@ -5,7 +7,7 @@ use crate::{ utils::RingBuffer, }; -use super::{QuadraticModel, StateType, TransitionType}; +use super::{QuadraticModel, SolutionType, TransitionType}; #[derive(Debug)] struct MyTabuList { @@ -20,7 +22,7 @@ impl MyTabuList { } impl TabuList for MyTabuList { - type Item = (StateType, TransitionType); + type Item = (SolutionType, TransitionType); fn contains(&self, item: &Self::Item) -> bool { let (k1, _, x) = item.1; @@ -40,9 +42,16 @@ fn test() { let opt = TabuSearchOptimizer::new(1000, 25, 5); let tabu_list = MyTabuList::new(10); let null_closure = None::<&fn(_)>; - let (final_state, final_score, _) = opt.optimize(&model, None, 10000, null_closure, tabu_list); - assert_abs_diff_eq!(2.0, final_state[0], epsilon = 0.1); - assert_abs_diff_eq!(0.0, final_state[1], epsilon = 0.1); - assert_abs_diff_eq!(-3.5, final_state[2], epsilon = 0.1); + let (final_solution, final_score, _) = opt.optimize( + &model, + None, + 10000, + Duration::from_secs(10), + null_closure, + tabu_list, + ); + assert_abs_diff_eq!(2.0, final_solution[0], epsilon = 0.1); + assert_abs_diff_eq!(0.0, final_solution[1], epsilon = 0.1); + assert_abs_diff_eq!(-3.5, final_solution[2], epsilon = 0.1); assert_abs_diff_eq!(0.0, final_score.into_inner(), epsilon = 0.05); }