From c63e985f8d536b50e91c714476edbabf2eb5d31d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Petr=20Nevyho=C5=A1t=C4=9Bn=C3=BD?= Date: Sun, 12 Nov 2023 21:20:25 +0100 Subject: [PATCH 1/6] feat!: change the interoperability between System and Function traits --- src/core/function.rs | 66 +++---------- src/core/system.rs | 14 +++ src/solver/nelder_mead.rs | 40 ++------ src/testing.rs | 198 +++++++++++++++++++++++++++++--------- 4 files changed, 188 insertions(+), 130 deletions(-) diff --git a/src/core/function.rs b/src/core/function.rs index 015a019..4a5af56 100644 --- a/src/core/function.rs +++ b/src/core/function.rs @@ -1,8 +1,4 @@ -use nalgebra::{ - allocator::Allocator, storage::Storage, storage::StorageMut, DefaultAllocator, Dynamic, - IsContiguous, Vector, -}; -use num_traits::Zero; +use nalgebra::{storage::Storage, Dynamic, IsContiguous, Vector}; use super::{ base::{Problem, ProblemError}, @@ -61,54 +57,6 @@ pub trait Function: Problem { ) -> Result where Sx: Storage + IsContiguous; - - /// Calculate the norm of residuals of the system given values of the - /// variable for cases when the function is actually a system of equations. - /// - /// The optimizers should prefer calling this function because the - /// implementation for systems reuse `fx` for calculating the residuals and - /// do not make an unnecessary allocation for it. - fn apply_eval( - &self, - x: &Vector, - fx: &mut Vector, - ) -> Result - where - Sx: Storage + IsContiguous, - Sfx: StorageMut, - { - let norm = self.apply(x)?; - fx.fill(Self::Scalar::zero()); - fx[0] = norm; - Ok(norm) - } -} - -impl Function for F -where - DefaultAllocator: Allocator, -{ - fn apply(&self, x: &Vector) -> Result - where - Sx: Storage + IsContiguous, - { - let mut fx = x.clone_owned(); - self.apply_eval(x, &mut fx) - } - - fn apply_eval( - &self, - x: &Vector, - fx: &mut Vector, - ) -> Result - where - Sx: Storage + IsContiguous, - Sfx: StorageMut, - { - self.eval(x, fx)?; - let norm = fx.norm(); - Ok(norm) - } } /// Extension trait for `Result`. @@ -127,3 +75,15 @@ impl FunctionResultExt for Result { } } } + +impl Function for F +where + F: System, +{ + fn apply(&self, x: &Vector) -> Result + where + Sx: Storage + IsContiguous, + { + self.norm(x) + } +} diff --git a/src/core/system.rs b/src/core/system.rs index c415ff3..844706a 100644 --- a/src/core/system.rs +++ b/src/core/system.rs @@ -67,6 +67,20 @@ pub trait System: Problem { where Sx: Storage + IsContiguous, Sfx: StorageMut; + + /// Calculate the residuals vector norm. + /// + /// The default implementation allocates a temporary vector for the + /// residuals on every call. If you plan to solve the system by an + /// optimizer, consider overriding the default implementation. + fn norm(&self, x: &Vector) -> Result + where + Sx: Storage + IsContiguous, + { + let mut fx = x.clone_owned(); + self.eval(x, &mut fx)?; + Ok(fx.norm()) + } } /// A wrapper type for systems that implements a standard mechanism for diff --git a/src/solver/nelder_mead.rs b/src/solver/nelder_mead.rs index 55f85a6..a8a5f09 100644 --- a/src/solver/nelder_mead.rs +++ b/src/solver/nelder_mead.rs @@ -132,8 +132,6 @@ impl NelderMeadOptions { /// Nelder-Mead solver. See [module](self) documentation for more details. pub struct NelderMead { options: NelderMeadOptions, - fx: OVector, - fx_best: OVector, scale: OVector, centroid: OVector, reflection: OVector, @@ -163,8 +161,6 @@ impl NelderMead { Self { options, - fx: OVector::zeros_generic(dim, U1::name()), - fx_best: OVector::zeros_generic(dim, U1::name()), scale, centroid: OVector::zeros_generic(dim, U1::name()), reflection: OVector::zeros_generic(dim, U1::name()), @@ -216,8 +212,6 @@ impl NelderMead { } = self.options; let Self { - fx, - fx_best, scale, simplex, errors, @@ -237,8 +231,7 @@ impl NelderMead { // It is important to return early on error before the point is // added to the simplex. - let mut error_best = f.apply_eval(x, fx)?; - fx_best.copy_from(fx); + let mut error_best = f.apply(x)?; errors.push(error_best); simplex.push(x.clone_owned()); @@ -247,7 +240,7 @@ impl NelderMead { xi[j] += scale[j]; dom.project_in(&mut xi, j); - let error = match f.apply_eval(&xi, fx) { + let error = match f.apply(&xi) { Ok(error) => error, // Do not fail when invalid value is encountered during // building the simplex. Instead, treat it as infinity so @@ -267,7 +260,6 @@ impl NelderMead { }; if error < error_best { - fx_best.copy_from(fx); error_best = error; } @@ -332,7 +324,7 @@ impl NelderMead { // Perform one of possible simplex transformations. reflection.on_line2_mut(centroid, &simplex[sort_perm[n]], reflection_coeff); let reflection_not_feasible = dom.project(reflection); - let reflection_error = f.apply_eval(reflection, fx).ignore_invalid_value(inf)?; + let reflection_error = f.apply(reflection).ignore_invalid_value(inf)?; #[allow(clippy::suspicious_else_formatting)] let (transformation, not_feasible) = if errors[sort_perm[0]] <= reflection_error @@ -344,17 +336,13 @@ impl NelderMead { errors[sort_perm[n]] = reflection_error; (Transformation::Reflection, reflection_not_feasible) } else if reflection_error < errors[sort_perm[0]] { - fx_best.copy_from(fx); - // Reflected point is better than the current best. Try to go // farther along this direction. expansion.on_line2_mut(centroid, &simplex[sort_perm[n]], expansion_coeff); let expansion_not_feasible = dom.project(expansion); - let expansion_error = f.apply_eval(expansion, fx).ignore_invalid_value(inf)?; + let expansion_error = f.apply(expansion).ignore_invalid_value(inf)?; if expansion_error < reflection_error { - fx_best.copy_from(fx); - // Expansion indeed helped, replace the worst point. simplex[sort_perm[n]].copy_from(expansion); errors[sort_perm[n]] = expansion_error; @@ -377,13 +365,9 @@ impl NelderMead { // Try to perform outer contraction. contraction.on_line2_mut(centroid, &simplex[sort_perm[n]], outer_contraction_coeff); let contraction_not_feasible = dom.project(contraction); - let contraction_error = f.apply_eval(contraction, fx).ignore_invalid_value(inf)?; + let contraction_error = f.apply(contraction).ignore_invalid_value(inf)?; if contraction_error <= reflection_error { - if contraction_error < errors[sort_perm[0]] { - fx_best.copy_from(fx); - } - // Use the contracted point instead of the reflected point // because it's better. simplex[sort_perm[n]].copy_from(contraction); @@ -399,13 +383,9 @@ impl NelderMead { // Try to perform inner contraction. contraction.on_line2_mut(centroid, &simplex[sort_perm[n]], inner_contraction_coeff); let contraction_not_feasible = dom.project(contraction); - let contraction_error = f.apply_eval(contraction, fx).ignore_invalid_value(inf)?; + let contraction_error = f.apply(contraction).ignore_invalid_value(inf)?; if contraction_error <= errors[sort_perm[n]] { - if contraction_error < errors[sort_perm[0]] { - fx_best.copy_from(fx); - } - // The contracted point is better than the worst point. simplex[sort_perm[n]].copy_from(contraction); errors[sort_perm[n]] = contraction_error; @@ -430,11 +410,10 @@ impl NelderMead { for i in 1..=n { let xi = &mut simplex[sort_perm[i]]; xi.on_line_mut(contraction, shrink_coeff); - let error = f.apply_eval(xi, fx).ignore_invalid_value(inf)?; + let error = f.apply(xi).ignore_invalid_value(inf)?; errors[sort_perm[i]] = error; if error < error_best { - fx_best.copy_from(fx); error_best = error; } } @@ -525,8 +504,9 @@ impl Solver for NelderMead { Sx: StorageMut + IsContiguous, Sfx: StorageMut, { - self.next_inner(f, dom, x) - .map(|_| fx.copy_from(&self.fx_best)) + self.next_inner(f, dom, x)?; + f.eval(x, fx)?; + Ok(()) } } diff --git a/src/testing.rs b/src/testing.rs index 39997ee..0123f5c 100644 --- a/src/testing.rs +++ b/src/testing.rs @@ -23,11 +23,9 @@ use std::error::Error as StdError; use nalgebra::{ - allocator::Allocator, dvector, storage::{Storage, StorageMut}, - vector, DVector, DefaultAllocator, Dim, DimName, Dynamic, IsContiguous, OVector, Vector, U1, - U2, + DVector, Dim, DimName, Dynamic, IsContiguous, OVector, Vector, U1, }; use thiserror::Error; @@ -127,6 +125,25 @@ impl ExtendedRosenbrock { assert!(alpha > 0.0, "alpha must be greater than zero"); Self { n, alpha } } + + fn residuals<'a, Sx>(&self, x: &'a Vector) -> impl Iterator + 'a + where + Sx: Storage + IsContiguous, + { + let alpha = self.alpha; + (0..(self.n / 2)).flat_map(move |i| { + let i1 = 2 * i; + let i2 = 2 * i + 1; + + let x1 = x[i1] * alpha; + let x2 = x[i2] / alpha; + + let fx1 = 10.0 * (x2 - x1 * x1); + let fx2 = 1.0 - x1; + + [fx1, fx2].into_iter() + }) + } } impl Default for ExtendedRosenbrock { @@ -161,18 +178,14 @@ impl System for ExtendedRosenbrock { Sx: Storage + IsContiguous, Sfx: StorageMut, { - for i in 0..(self.n / 2) { - let i1 = 2 * i; - let i2 = 2 * i + 1; - - let x1 = x[i1] * self.alpha; - let x2 = x[i2] / self.alpha; - - fx[i1] = 10.0 * (x2 - x1 * x1); - fx[i2] = 1.0 - x1; - } + eval(self.residuals(x), fx) + } - Ok(()) + fn norm(&self, x: &Vector) -> Result + where + Sx: Storage + IsContiguous, + { + norm(self.residuals(x)) } } @@ -231,6 +244,25 @@ impl ExtendedPowell { assert!(n % 4 == 0, "n must be a multiple of 4"); Self { n } } + + fn residuals<'a, Sx>(&self, x: &'a Vector) -> impl Iterator + 'a + where + Sx: Storage + IsContiguous, + { + (0..(self.n / 4)).flat_map(move |i| { + let i1 = 4 * i; + let i2 = 4 * i + 1; + let i3 = 4 * i + 2; + let i4 = 4 * i + 3; + + let fx1 = x[i1] + 10.0 * x[i2]; + let fx2 = 5f64.sqrt() * (x[i3] - x[i4]); + let fx3 = (x[i2] - 2.0 * x[i3]).powi(2); + let fx4 = 10f64.sqrt() * (x[i1] - x[i4]).powi(2); + + [fx1, fx2, fx3, fx4].into_iter() + }) + } } impl Default for ExtendedPowell { @@ -257,19 +289,14 @@ impl System for ExtendedPowell { Sx: Storage + IsContiguous, Sfx: StorageMut, { - for i in 0..(self.n / 4) { - let i1 = 4 * i; - let i2 = 4 * i + 1; - let i3 = 4 * i + 2; - let i4 = 4 * i + 3; - - fx[i1] = x[i1] + 10.0 * x[i2]; - fx[i2] = 5f64.sqrt() * (x[i3] - x[i4]); - fx[i3] = (x[i2] - 2.0 * x[i3]).powi(2); - fx[i4] = 10f64.sqrt() * (x[i1] - x[i4]).powi(2); - } + eval(self.residuals(x), fx) + } - Ok(()) + fn norm(&self, x: &Vector) -> Result + where + Sx: Storage + IsContiguous, + { + norm(self.residuals(x)) } } @@ -313,6 +340,17 @@ impl BullardBiegler { pub fn new() -> Self { Self(()) } + + fn residuals<'a, Sx>(&self, x: &'a Vector) -> impl Iterator + 'a + where + Sx: Storage + IsContiguous, + { + [ + 1e4 * x[0] * x[1] - 1.0, + (-x[0]).exp() + (-x[1]).exp() - 1.001, + ] + .into_iter() + } } impl Default for BullardBiegler { @@ -339,10 +377,14 @@ impl System for BullardBiegler { Sx: Storage + IsContiguous, Sfx: StorageMut, { - fx[0] = 1e4 * x[0] * x[1] - 1.0; - fx[1] = (-x[0]).exp() + (-x[1]).exp() - 1.001; + eval(self.residuals(x), fx) + } - Ok(()) + fn norm(&self, x: &Vector) -> Result + where + Sx: Storage + IsContiguous, + { + norm(self.residuals(x)) } } @@ -382,6 +424,13 @@ impl Sphere { assert!(n > 0, "n must be greater than zero"); Self { n } } + + fn residuals<'a, Sx>(&self, x: &'a Vector) -> impl Iterator + 'a + where + Sx: Storage + IsContiguous, + { + x.iter().map(|xi| xi.powi(2)) + } } impl Default for Sphere { @@ -408,11 +457,14 @@ impl System for Sphere { Sx: Storage + IsContiguous, Sfx: StorageMut, { - for i in 0..self.n { - fx[i] = x[i].powi(2); - } + eval(self.residuals(x), fx) + } - Ok(()) + fn norm(&self, x: &Vector) -> Result + where + Sx: Storage + IsContiguous, + { + norm(self.residuals(x)) } } @@ -467,6 +519,23 @@ impl Brown { assert!(n > 1, "n must be greater than one"); Self { n } } + + fn residuals<'a, Sx>(&self, x: &'a Vector) -> impl Iterator + 'a + where + Sx: Storage + IsContiguous, + { + let n = self.n as f64; + let x_sum = x.sum(); + + let fx0 = x.iter().product::() - 1.0; + let fxs = x + .iter() + .skip(1) + .copied() + .map(move |xi| xi + x_sum - n + 1.0); + + std::iter::once(fx0).chain(fxs) + } } impl Default for Brown { @@ -493,13 +562,14 @@ impl System for Brown { Sx: Storage + IsContiguous, Sfx: StorageMut, { - fx[0] = x.iter().product::() - 1.0; - - for i in 1..self.n { - fx[i] = x[i] + x.sum() - (self.n as f64 + 1.0); - } + eval(self.residuals(x), fx) + } - Ok(()) + fn norm(&self, x: &Vector) -> Result + where + Sx: Storage + IsContiguous, + { + norm(self.residuals(x)) } } @@ -533,6 +603,18 @@ impl Exponential { assert!(n > 0, "n must be greater than zero"); Self { n } } + + fn residuals<'a, Sx>(&self, x: &'a Vector) -> impl Iterator + 'a + where + Sx: Storage + IsContiguous, + { + let x_sum = x.sum(); + + x.iter() + .copied() + .enumerate() + .map(move |(i, xi)| xi - (((i + 1) as f64) * x_sum).cos().exp()) + } } impl Default for Exponential { @@ -559,11 +641,14 @@ impl System for Exponential { Sx: Storage + IsContiguous, Sfx: StorageMut, { - for i in 0..self.n { - fx[i] = x[i] - (((i + 1) as f64) * x.sum()).cos().exp(); - } + eval(self.residuals(x), fx) + } - Ok(()) + fn norm(&self, x: &Vector) -> Result + where + Sx: Storage + IsContiguous, + { + norm(self.residuals(x)) } } @@ -614,12 +699,16 @@ impl System for InfiniteSolutions { Sx: Storage + IsContiguous, Sfx: StorageMut, { - for i in 0..self.n { - fx[i] = 0.0; - } - + fx.fill(0.0); Ok(()) } + + fn norm(&self, _: &Vector) -> Result + where + Sx: Storage + IsContiguous, + { + Ok(0.0) + } } impl TestProblem for InfiniteSolutions { @@ -727,3 +816,18 @@ where Ok(()) } + +fn eval( + residuals: impl Iterator, + fx: &mut Vector, +) -> Result<(), ProblemError> +where + Sfx: StorageMut, +{ + fx.iter_mut().zip(residuals).for_each(|(fxi, v)| *fxi = v); + Ok(()) +} + +fn norm(residuals: impl Iterator) -> Result { + Ok(residuals.map(|v| v.powi(2)).sum::().sqrt()) +} From 8125711f5134d94ca34959756b61809eec35a810 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Petr=20Nevyho=C5=A1t=C4=9Bn=C3=BD?= Date: Sun, 12 Nov 2023 21:50:01 +0100 Subject: [PATCH 2/6] feat!: make systems and functions infallible --- examples/rosenbrock.rs | 5 +- gomez-bench/benches/main.rs | 6 +-- src/analysis/initial.rs | 30 ++++-------- src/core/base.rs | 17 ------- src/core/function.rs | 36 ++------------ src/core/optimizer.rs | 9 ++-- src/core/solver.rs | 8 ++-- src/core/system.rs | 22 +++------ src/derivatives.rs | 94 +++++++++++-------------------------- src/lib.rs | 12 ++--- src/solver/lipo.rs | 13 ++--- src/solver/nelder_mead.rs | 67 +++++++++++++------------- src/solver/steffensen.rs | 24 +++++----- src/solver/trust_region.rs | 37 +++++---------- src/testing.rs | 59 +++++++++-------------- 15 files changed, 141 insertions(+), 298 deletions(-) diff --git a/examples/rosenbrock.rs b/examples/rosenbrock.rs index f42deb1..0ee8088 100644 --- a/examples/rosenbrock.rs +++ b/examples/rosenbrock.rs @@ -22,15 +22,12 @@ impl System for Rosenbrock { &self, x: &na::Vector, fx: &mut na::Vector, - ) -> Result<(), ProblemError> - where + ) where Sx: na::storage::Storage + IsContiguous, Sfx: na::storage::StorageMut, { fx[0] = (self.a - x[0]).powi(2); fx[1] = self.b * (x[1] - x[0].powi(2)).powi(2); - - Ok(()) } } diff --git a/gomez-bench/benches/main.rs b/gomez-bench/benches/main.rs index 9509256..344fc20 100644 --- a/gomez-bench/benches/main.rs +++ b/gomez-bench/benches/main.rs @@ -264,10 +264,8 @@ impl> GslFunction for GslFunctionWrapper { na::U1::name(), ); - match self.f.eval(&x, &mut fx) { - Ok(_) => GslStatus::ok(), - Err(_) => GslStatus::err(GslError::BadFunc), - } + self.f.eval(&x, &mut fx); + GslStatus::ok() } fn init(&self) -> GslVec { diff --git a/src/analysis/initial.rs b/src/analysis/initial.rs index 18197ca..d2cbbd2 100644 --- a/src/analysis/initial.rs +++ b/src/analysis/initial.rs @@ -12,24 +12,12 @@ use std::marker::PhantomData; use nalgebra::{ convert, storage::StorageMut, ComplexField, DimName, Dynamic, IsContiguous, OVector, Vector, U1, }; -use thiserror::Error; use crate::{ - core::{Domain, Problem, ProblemError, System}, - derivatives::{Jacobian, JacobianError, EPSILON_SQRT}, + core::{Domain, Problem, System}, + derivatives::{Jacobian, EPSILON_SQRT}, }; -/// Error returned from [`InitialGuessAnalysis`] solver. -#[derive(Debug, Error)] -pub enum InitialGuessAnalysisError { - /// Error that occurred when evaluating the system. - #[error("{0}")] - System(#[from] ProblemError), - /// Error that occurred when computing the Jacobian matrix. - #[error("{0}")] - Jacobian(#[from] JacobianError), -} - /// Initial guesses analyzer. See [module](self) documentation for more details. pub struct InitialGuessAnalysis { nonlinear: Vec, @@ -43,7 +31,7 @@ impl InitialGuessAnalysis { dom: &Domain, x: &mut Vector, fx: &mut Vector, - ) -> Result + ) -> Self where Sx: StorageMut + IsContiguous, Sfx: StorageMut, @@ -55,8 +43,8 @@ impl InitialGuessAnalysis { .unwrap_or_else(|| OVector::from_element_generic(dim, U1::name(), convert(1.0))); // Compute F'(x) in the initial point. - f.eval(x, fx)?; - let jac1 = Jacobian::new(f, x, &scale, fx)?; + f.eval(x, fx); + let jac1 = Jacobian::new(f, x, &scale, fx); // Compute Newton step. let mut p = fx.clone_owned(); @@ -70,8 +58,8 @@ impl InitialGuessAnalysis { *x += p; // Compute F'(x) after one Newton step. - f.eval(x, fx)?; - let jac2 = Jacobian::new(f, x, &scale, fx)?; + f.eval(x, fx); + let jac2 = Jacobian::new(f, x, &scale, fx); // Linear variables have no effect on the Jacobian matrix. They can be // recognized by observing no change in corresponding columns (i.e., @@ -92,10 +80,10 @@ impl InitialGuessAnalysis { .map(|(col, _)| col) .collect(); - Ok(Self { + Self { nonlinear, ty: PhantomData, - }) + } } /// Returns indices of variables that have influence on the Jacobian matrix diff --git a/src/core/base.rs b/src/core/base.rs index d4bc8a8..03e89ed 100644 --- a/src/core/base.rs +++ b/src/core/base.rs @@ -1,5 +1,4 @@ use nalgebra::RealField; -use thiserror::Error; use super::domain::Domain; @@ -13,19 +12,3 @@ pub trait Problem { /// system is unconstrained. fn domain(&self) -> Domain; } - -/// Error encountered while applying variables to the problem. -#[derive(Debug, Error)] -pub enum ProblemError { - /// The number of variables does not match the dimensionality of the problem - /// domain. - #[error("invalid dimensionality")] - InvalidDimensionality, - /// An invalid value (NaN, positive or negative infinity) of a residual or - /// the function value occurred. - #[error("invalid value encountered")] - InvalidValue, - /// A custom error specific to the system or function. - #[error("{0}")] - Custom(Box), -} diff --git a/src/core/function.rs b/src/core/function.rs index 4a5af56..f744940 100644 --- a/src/core/function.rs +++ b/src/core/function.rs @@ -1,9 +1,6 @@ use nalgebra::{storage::Storage, Dynamic, IsContiguous, Vector}; -use super::{ - base::{Problem, ProblemError}, - system::System, -}; +use super::{base::Problem, system::System}; /// The trait for defining functions. /// @@ -37,50 +34,27 @@ use super::{ /// /// impl Function for Rosenbrock { /// // Apply trial values of variables to the function. -/// fn apply( -/// &self, -/// x: &na::Vector, -/// ) -> Result +/// fn apply(&self, x: &na::Vector) -> Self::Scalar /// where /// Sx: na::storage::Storage + IsContiguous, /// { /// // Compute the function value. -/// Ok((self.a - x[0]).powi(2) + self.b * (x[1] - x[0].powi(2)).powi(2)) +/// (self.a - x[0]).powi(2) + self.b * (x[1] - x[0].powi(2)).powi(2) /// } /// } /// ``` pub trait Function: Problem { /// Calculate the function value given values of the variables. - fn apply( - &self, - x: &Vector, - ) -> Result + fn apply(&self, x: &Vector) -> Self::Scalar where Sx: Storage + IsContiguous; } -/// Extension trait for `Result`. -pub trait FunctionResultExt { - /// If the result is [`ProblemError::InvalidValue`], `Ok(default)` is - /// returned instead. The original result is returned otherwise. - fn ignore_invalid_value(self, replace_with: T) -> Self; -} - -impl FunctionResultExt for Result { - fn ignore_invalid_value(self, replace_with: T) -> Self { - match self { - Ok(value) => Ok(value), - Err(ProblemError::InvalidValue) => Ok(replace_with), - Err(error) => Err(error), - } - } -} - impl Function for F where F: System, { - fn apply(&self, x: &Vector) -> Result + fn apply(&self, x: &Vector) -> Self::Scalar where Sx: Storage + IsContiguous, { diff --git a/src/core/optimizer.rs b/src/core/optimizer.rs index c0065a7..116ac22 100644 --- a/src/core/optimizer.rs +++ b/src/core/optimizer.rs @@ -42,7 +42,7 @@ use super::{domain::Domain, function::Function}; /// Standard: Distribution, /// { /// const NAME: &'static str = "Random"; -/// type Error = ProblemError; +/// type Error = std::convert::Infallible; /// /// fn next( /// &mut self, @@ -57,8 +57,7 @@ use super::{domain::Domain, function::Function}; /// dom.sample(x, &mut self.rng); /// /// // We must compute the value. -/// let value = f.apply(x)?; -/// Ok(value) +/// Ok(f.apply(x)) /// } /// } /// ``` @@ -67,9 +66,7 @@ pub trait Optimizer { const NAME: &'static str; /// Error type of the iteration. Represents an invalid operation during - /// computing the next step. It is usual that one of the error kinds is - /// propagation of the [`ProblemError`](super::ProblemError) from the - /// function. + /// computing the next step. type Error; /// Computes the next step in the optimization process. diff --git a/src/core/solver.rs b/src/core/solver.rs index 305fed7..2f795e6 100644 --- a/src/core/solver.rs +++ b/src/core/solver.rs @@ -42,7 +42,7 @@ use super::{domain::Domain, system::System}; /// Standard: Distribution, /// { /// const NAME: &'static str = "Random"; -/// type Error = ProblemError; +/// type Error = std::convert::Infallible; /// /// fn next( /// &mut self, @@ -59,7 +59,7 @@ use super::{domain::Domain, system::System}; /// dom.sample(x, &mut self.rng); /// /// // We must compute the residuals. -/// f.eval(x, fx)?; +/// f.eval(x, fx); /// /// Ok(()) /// } @@ -70,9 +70,7 @@ pub trait Solver { const NAME: &'static str; /// Error type of the iteration. Represents an invalid operation during - /// computing the next step. It is usual that one of the error kinds is - /// propagation of the [`ProblemError`](super::ProblemError) from the - /// system. + /// computing the next step. type Error; /// Computes the next step in the solving process. diff --git a/src/core/system.rs b/src/core/system.rs index 844706a..d2063cc 100644 --- a/src/core/system.rs +++ b/src/core/system.rs @@ -4,10 +4,7 @@ use nalgebra::{ DefaultAllocator, Dynamic, IsContiguous, OVector, Vector, }; -use super::{ - base::{Problem, ProblemError}, - domain::Domain, -}; +use super::{base::Problem, domain::Domain}; /// The trait for defining equations systems. /// @@ -44,16 +41,13 @@ use super::{ /// &self, /// x: &na::Vector, /// fx: &mut na::Vector, -/// ) -> Result<(), ProblemError> -/// where +/// ) where /// Sx: na::storage::Storage + IsContiguous, /// Sfx: na::storage::StorageMut, /// { /// // Compute the residuals of all equations. /// fx[0] = (self.a - x[0]).powi(2); /// fx[1] = self.b * (x[1] - x[0].powi(2)).powi(2); -/// -/// Ok(()) /// } /// } /// ``` @@ -63,8 +57,7 @@ pub trait System: Problem { &self, x: &Vector, fx: &mut Vector, - ) -> Result<(), ProblemError> - where + ) where Sx: Storage + IsContiguous, Sfx: StorageMut; @@ -73,13 +66,13 @@ pub trait System: Problem { /// The default implementation allocates a temporary vector for the /// residuals on every call. If you plan to solve the system by an /// optimizer, consider overriding the default implementation. - fn norm(&self, x: &Vector) -> Result + fn norm(&self, x: &Vector) -> Self::Scalar where Sx: Storage + IsContiguous, { let mut fx = x.clone_owned(); - self.eval(x, &mut fx)?; - Ok(fx.norm()) + self.eval(x, &mut fx); + fx.norm() } } @@ -146,8 +139,7 @@ where &self, x: &Vector, fx: &mut Vector, - ) -> Result<(), ProblemError> - where + ) where Sx: Storage + IsContiguous, Sfx: StorageMut, { diff --git a/src/derivatives.rs b/src/derivatives.rs index 95ddca7..366d470 100644 --- a/src/derivatives.rs +++ b/src/derivatives.rs @@ -8,9 +8,8 @@ use nalgebra::{ ComplexField, DimName, Dynamic, IsContiguous, OMatrix, OVector, RealField, Vector, U1, }; use num_traits::{One, Zero}; -use thiserror::Error; -use crate::core::{Function, Problem, ProblemError, System}; +use crate::core::{Function, Problem, System}; /// Square root of double precision machine epsilon. This value is a standard /// constant for epsilons in approximating first-order derivate-based concepts. @@ -20,14 +19,6 @@ pub const EPSILON_SQRT: f64 = 0.000000014901161193847656; /// constant for epsilons in approximating second-order derivate-based concepts. pub const EPSILON_CBRT: f64 = 0.0000060554544523933395; -/// Error when computing the Jacobian matrix. -#[derive(Debug, Error)] -pub enum JacobianError { - /// Error that occurred when evaluating the system. - #[error("{0}")] - Problem(#[from] ProblemError), -} - /// Jacobian matrix of a system. #[derive(Debug)] pub struct Jacobian { @@ -53,15 +44,15 @@ impl Jacobian { x: &mut Vector, scale: &Vector, fx: &Vector, - ) -> Result + ) -> Self where Sx: StorageMut + IsContiguous, Sscale: Storage, Sfx: Storage, { let mut jac = Self::zeros(f); - jac.compute(f, x, scale, fx)?; - Ok(jac) + jac.compute(f, x, scale, fx); + jac } /// Compute the Jacobian matrix of the system in given point with given @@ -79,7 +70,7 @@ impl Jacobian { x: &mut Vector, scale: &Vector, fx: &Vector, - ) -> Result<&mut Self, JacobianError> + ) -> &mut Self where Sx: StorageMut + IsContiguous, Sscale: Storage, @@ -105,7 +96,7 @@ impl Jacobian { // Update the point. x[j] = xj + step; - f.eval(x, &mut col)?; + f.eval(x, &mut col); // Compute the derivative approximation: J[i, j] = (F(x + e_j * step_j) - F(x)) / step_j. col -= fx; @@ -115,7 +106,7 @@ impl Jacobian { x[j] = xj; } - Ok(self) + self } } @@ -127,14 +118,6 @@ impl Deref for Jacobian { } } -/// Error when computing the gradient matrix. -#[derive(Debug, Error)] -pub enum GradientError { - /// Error that occurred when evaluating the function. - #[error("{0}")] - Problem(#[from] ProblemError), -} - /// Gradient vector of a function. #[derive(Debug)] pub struct Gradient { @@ -160,14 +143,14 @@ impl Gradient { x: &mut Vector, scale: &Vector, fx: F::Scalar, - ) -> Result + ) -> Self where Sx: StorageMut + IsContiguous, Sscale: Storage, { let mut grad = Self::zeros(f); - grad.compute(f, x, scale, fx)?; - Ok(grad) + grad.compute(f, x, scale, fx); + grad } /// Compute the gradient vector of the function in given point with given @@ -185,7 +168,7 @@ impl Gradient { x: &mut Vector, scale: &Vector, fx: F::Scalar, - ) -> Result<&mut Self, GradientError> + ) -> &mut Self where Sx: StorageMut + IsContiguous, Sscale: Storage, @@ -202,7 +185,7 @@ impl Gradient { // Update the point. x[i] = xi + step; - let fxi = f.apply(x)?; + let fxi = f.apply(x); // Compute the derivative approximation: grad[i] = (F(x + e_i * step_i) - F(x)) / step_i. self.grad[i] = (fxi - fx) / step; @@ -211,7 +194,7 @@ impl Gradient { x[i] = xi; } - Ok(self) + self } } @@ -223,14 +206,6 @@ impl Deref for Gradient { } } -/// Error when computing the Hessian matrix. -#[derive(Debug, Error)] -pub enum HessianError { - /// Error that occurred when evaluating the system. - #[error("{0}")] - Problem(#[from] ProblemError), -} - /// Hessian matrix of a system. #[derive(Debug)] pub struct Hessian { @@ -260,14 +235,14 @@ impl Hessian { x: &mut Vector, scale: &Vector, fx: F::Scalar, - ) -> Result + ) -> Self where Sx: StorageMut + IsContiguous, Sscale: Storage, { let mut hes = Self::zeros(f); - hes.compute(f, x, scale, fx)?; - Ok(hes) + hes.compute(f, x, scale, fx); + hes } /// Compute the Hessian matrix of the function in given point with given @@ -285,7 +260,7 @@ impl Hessian { x: &mut Vector, scale: &Vector, fx: F::Scalar, - ) -> Result<&mut Self, HessianError> + ) -> &mut Self where Sx: StorageMut + IsContiguous, Sscale: Storage, @@ -305,7 +280,7 @@ impl Hessian { // Update the point and store the function output. x[i] = xi + step; - let fxi = f.apply(x)?; + let fxi = f.apply(x); self.neighbors[i] = fxi; // Restore the original value. @@ -319,7 +294,7 @@ impl Hessian { // Prepare x_i + 2 * e_i. x[i] = xi + stepi + stepi; - let fxi = f.apply(x)?; + let fxi = f.apply(x); let fni = self.neighbors[i]; x[i] = xi + stepi; @@ -332,7 +307,7 @@ impl Hessian { x[j] = xj + stepj; - let fxj = f.apply(x)?; + let fxj = f.apply(x); let fnj = self.neighbors[j]; let hij = ((fx - fni) + (fxj - fnj)) / (stepi * stepj); @@ -345,7 +320,7 @@ impl Hessian { x[i] = xi; } - Ok(self) + self } } @@ -379,10 +354,7 @@ mod tests { } impl Function for MixedVars { - fn apply( - &self, - x: &Vector, - ) -> Result + fn apply(&self, x: &Vector) -> Self::Scalar where Sx: Storage + IsContiguous, { @@ -391,7 +363,7 @@ mod tests { let x1 = x[0]; let x2 = x[1]; - Ok(x1.powi(2) + x1 * x2 + x2.powi(3)) + x1.powi(2) + x1 * x2 + x2.powi(3) } } @@ -402,12 +374,9 @@ mod tests { let mut fx = dvector![0.0, 0.0]; let func = ExtendedRosenbrock::new(2); - func.eval(&x, &mut fx).unwrap(); + func.eval(&x, &mut fx); let jac = Jacobian::new(&func, &mut x, &scale, &fx); - assert!(jac.is_ok()); - let jac = jac.unwrap(); - let expected = dmatrix![-40.0, 10.0; -1.0, 0.0]; assert_abs_diff_eq!(&*jac, &expected, epsilon = 10e-6); } @@ -419,12 +388,9 @@ mod tests { let mut fx = dvector![0.0, 0.0, 0.0, 0.0]; let func = ExtendedPowell::new(4); - func.eval(&x, &mut fx).unwrap(); + func.eval(&x, &mut fx); let jac = Jacobian::new(&func, &mut x, &scale, &fx); - assert!(jac.is_ok()); - let jac = jac.unwrap(); - let expected = dmatrix![ 1.0, 10.0, 0.0, 0.0; 0.0, 0.0, 5f64.sqrt(), -(5f64.sqrt()); @@ -440,12 +406,9 @@ mod tests { let scale = dvector![1.0, 1.0]; let func = MixedVars; - let fx = func.apply(&x).unwrap(); + let fx = func.apply(&x); let grad = Gradient::new(&func, &mut x, &scale, fx); - assert!(grad.is_ok()); - let grad = grad.unwrap(); - let expected = dvector![3.0, 30.0]; assert_abs_diff_eq!(&*grad, &expected, epsilon = 10e-6); } @@ -456,12 +419,9 @@ mod tests { let scale = dvector![1.0, 1.0]; let func = MixedVars; - let fx = func.apply(&x).unwrap(); + let fx = func.apply(&x); let hes = Hessian::new(&func, &mut x, &scale, fx); - assert!(hes.is_ok()); - let hes = hes.unwrap(); - let expected = dmatrix![2.0, 1.0; 1.0, -18.0]; assert_abs_diff_eq!(&*hes, &expected, epsilon = 10e-3); } diff --git a/src/lib.rs b/src/lib.rs index 34945b4..52e0a08 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -95,16 +95,13 @@ //! &self, //! x: &na::Vector, //! fx: &mut na::Vector, -//! ) -> Result<(), ProblemError> -//! where +//! ) where //! Sx: na::storage::Storage + IsContiguous, //! Sfx: na::storage::StorageMut, //! { //! // Compute the residuals of all equations. //! fx[0] = (self.a - x[0]).powi(2); //! fx[1] = self.b * (x[1] - x[0].powi(2)).powi(2); -//! -//! Ok(()) //! } //! } //! ``` @@ -172,15 +169,12 @@ //! # &self, //! # x: &na::Vector, //! # fx: &mut na::Vector, -//! # ) -> Result<(), ProblemError> -//! # where +//! # ) where //! # Sx: na::storage::Storage + IsContiguous, //! # Sfx: na::storage::StorageMut, //! # { //! # fx[0] = (self.a - x[0]).powi(2); //! # fx[1] = self.b * (x[1] - x[0].powi(2)).powi(2); -//! # -//! # Ok(()) //! # } //! # } //! @@ -254,5 +248,5 @@ pub use nalgebra; /// Gomez prelude. pub mod prelude { - pub use crate::core::{Domain, Problem, ProblemError, Solver, System}; + pub use crate::core::{Domain, Problem, Solver, System}; } diff --git a/src/solver/lipo.rs b/src/solver/lipo.rs index 06e2f5c..159f0ca 100644 --- a/src/solver/lipo.rs +++ b/src/solver/lipo.rs @@ -20,7 +20,7 @@ use rand::Rng; use rand_distr::{uniform::SampleUniform, Bernoulli, Distribution, Standard}; use thiserror::Error; -use crate::core::{Domain, Function, Optimizer, Problem, ProblemError, Solver, System}; +use crate::core::{Domain, Function, Optimizer, Problem, Solver, System}; use super::NelderMead; @@ -201,9 +201,6 @@ impl Lipo { /// Error returned from [`Lipo`] solver. #[derive(Debug, Error)] pub enum LipoError { - /// Error that occurred when evaluating the system. - #[error("{0}")] - Problem(#[from] ProblemError), /// Error when no potential minimizer is found after number sampling trials. #[error("potential minimizer was not found after specified number of trials")] PotentialMinimizerNotFound, @@ -250,7 +247,7 @@ where if xs.is_empty() { debug!("first iteration, just evaluating"); // First iteration. We just evaluate the initial point and store it. - let error = f.apply(x)?; + let error = f.apply(x); xs.push(x.clone_owned()); ys.push(error); @@ -355,10 +352,10 @@ where // optimization. match local_optimizer.next(f, dom, x) { Ok(error) => error, - Err(_) => f.apply(x)?, + Err(_) => f.apply(x), } } else { - f.apply(x)? + f.apply(x) }; // New point is better then the current best, so we update it. @@ -424,7 +421,7 @@ where Sfx: StorageMut<::Scalar, Dynamic>, { self.next_inner(f, dom, x)?; - f.eval(x, fx)?; + f.eval(x, fx); Ok(()) } } diff --git a/src/solver/nelder_mead.rs b/src/solver/nelder_mead.rs index a8a5f09..51fbb03 100644 --- a/src/solver/nelder_mead.rs +++ b/src/solver/nelder_mead.rs @@ -26,13 +26,13 @@ use log::debug; use nalgebra::{ convert, storage::{Storage, StorageMut}, - Dim, DimName, Dynamic, IsContiguous, OVector, RealField, Vector, U1, + ComplexField, Dim, DimName, Dynamic, IsContiguous, OVector, RealField, Vector, U1, }; use num_traits::{One, Zero}; use thiserror::Error; use crate::{ - core::{Domain, Function, FunctionResultExt, Optimizer, Problem, ProblemError, Solver, System}, + core::{Domain, Function, Optimizer, Problem, Solver, System}, derivatives::EPSILON_SQRT, }; @@ -184,12 +184,12 @@ impl NelderMead { /// Error returned from [`NelderMead`] solver. #[derive(Debug, Error)] pub enum NelderMeadError { - /// Error that occurred when evaluating the system. - #[error("{0}")] - Problem(#[from] ProblemError), /// Simplex collapsed so it is impossible to make any progress. #[error("simplex collapsed")] SimplexCollapsed, + /// Simplex contains too many invalid values (NaN, infinity). + #[error("simplex contains too many invalid values")] + SimplexInvalid, } impl NelderMead { @@ -224,14 +224,13 @@ impl NelderMead { } = self; let n = dom.dim(); - let inf = convert(f64::INFINITY); if simplex.is_empty() { // Simplex initialization. // It is important to return early on error before the point is // added to the simplex. - let mut error_best = f.apply(x)?; + let mut error_best = f.apply(x); errors.push(error_best); simplex.push(x.clone_owned()); @@ -240,24 +239,7 @@ impl NelderMead { xi[j] += scale[j]; dom.project_in(&mut xi, j); - let error = match f.apply(&xi) { - Ok(error) => error, - // Do not fail when invalid value is encountered during - // building the simplex. Instead, treat it as infinity so - // that it is worse than (or "equal" to) any other error and - // hope that it gets replaced. The exception is the very - // point provided by the caller as it is expected to be - // valid and it is erroneous situation if it is not. - Err(ProblemError::InvalidValue) => inf, - Err(error) => { - // Clear the simplex so the solver is not in invalid - // state. - debug!("encountered unexpected error during simplex initialization"); - simplex.clear(); - errors.clear(); - return Err(error.into()); - } - }; + let error = f.apply(&xi); if error < error_best { error_best = error; @@ -267,18 +249,18 @@ impl NelderMead { simplex.push(xi); } - let inf_error_count = errors.iter().filter(|e| **e == inf).count(); + let error_count = errors.iter().filter(|e| !e.is_finite()).count(); - if inf_error_count >= simplex.len() / 2 { + if error_count >= simplex.len() / 2 { // The simplex is too degenerate. debug!( "{} out of {} points in simplex have invalid value, returning error", - inf_error_count, + error_count, simplex.len() ); simplex.clear(); errors.clear(); - return Err(NelderMeadError::Problem(ProblemError::InvalidValue)); + return Err(NelderMeadError::SimplexInvalid); } sort_perm.extend(0..=n); @@ -324,7 +306,7 @@ impl NelderMead { // Perform one of possible simplex transformations. reflection.on_line2_mut(centroid, &simplex[sort_perm[n]], reflection_coeff); let reflection_not_feasible = dom.project(reflection); - let reflection_error = f.apply(reflection).ignore_invalid_value(inf)?; + let reflection_error = f.apply(reflection).nan_to_inf(); #[allow(clippy::suspicious_else_formatting)] let (transformation, not_feasible) = if errors[sort_perm[0]] <= reflection_error @@ -340,7 +322,7 @@ impl NelderMead { // farther along this direction. expansion.on_line2_mut(centroid, &simplex[sort_perm[n]], expansion_coeff); let expansion_not_feasible = dom.project(expansion); - let expansion_error = f.apply(expansion).ignore_invalid_value(inf)?; + let expansion_error = f.apply(expansion).nan_to_inf(); if expansion_error < reflection_error { // Expansion indeed helped, replace the worst point. @@ -365,7 +347,7 @@ impl NelderMead { // Try to perform outer contraction. contraction.on_line2_mut(centroid, &simplex[sort_perm[n]], outer_contraction_coeff); let contraction_not_feasible = dom.project(contraction); - let contraction_error = f.apply(contraction).ignore_invalid_value(inf)?; + let contraction_error = f.apply(contraction).nan_to_inf(); if contraction_error <= reflection_error { // Use the contracted point instead of the reflected point @@ -383,7 +365,7 @@ impl NelderMead { // Try to perform inner contraction. contraction.on_line2_mut(centroid, &simplex[sort_perm[n]], inner_contraction_coeff); let contraction_not_feasible = dom.project(contraction); - let contraction_error = f.apply(contraction).ignore_invalid_value(inf)?; + let contraction_error = f.apply(contraction).nan_to_inf(); if contraction_error <= errors[sort_perm[n]] { // The contracted point is better than the worst point. @@ -410,7 +392,7 @@ impl NelderMead { for i in 1..=n { let xi = &mut simplex[sort_perm[i]]; xi.on_line_mut(contraction, shrink_coeff); - let error = f.apply(xi).ignore_invalid_value(inf)?; + let error = f.apply(xi).nan_to_inf(); errors[sort_perm[i]] = error; if error < error_best { @@ -505,7 +487,7 @@ impl Solver for NelderMead { Sfx: StorageMut, { self.next_inner(f, dom, x)?; - f.eval(x, fx)?; + f.eval(x, fx); Ok(()) } } @@ -548,6 +530,21 @@ where } } +trait RealFieldNelderMeadExt { + fn nan_to_inf(self) -> Self; +} + +impl RealFieldNelderMeadExt for T { + fn nan_to_inf(self) -> Self { + if self.is_finite() { + self + } else { + // Not finite also covers NaN and negative infinity. + T::from_subset(&f64::INFINITY) + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/solver/steffensen.rs b/src/solver/steffensen.rs index 1e92c99..0658259 100644 --- a/src/solver/steffensen.rs +++ b/src/solver/steffensen.rs @@ -18,7 +18,7 @@ use getset::{CopyGetters, Setters}; use nalgebra::{storage::StorageMut, Dynamic, IsContiguous, Vector}; use thiserror::Error; -use crate::core::{Domain, Problem, ProblemError, Solver, System}; +use crate::core::{Domain, Problem, Solver, System}; /// Variant of the Steffenen's method. #[derive(Debug, Clone, Copy)] @@ -74,9 +74,9 @@ impl Steffensen { /// Error returned from [`Steffensen`] solver. #[derive(Debug, Error)] pub enum SteffensenError { - /// Error that occurred when evaluating the system. - #[error("{0}")] - Problem(#[from] ProblemError), + /// System is not one-dimensional. + #[error("system is not one-dimensional")] + InvalidDimensionality, } impl Solver for Steffensen { @@ -96,9 +96,7 @@ impl Solver for Steffensen { Sfx: StorageMut, { if dom.dim() != 1 { - return Err(SteffensenError::Problem( - ProblemError::InvalidDimensionality, - )); + return Err(SteffensenError::InvalidDimensionality); } let SteffensenOptions { variant, .. } = self.options; @@ -106,27 +104,27 @@ impl Solver for Steffensen { let x0 = x[0]; // Compute f(x). - f.eval(x, fx)?; + f.eval(x, fx); let fx0 = fx[0]; match variant { SteffensenVariant::Standard => { // Compute z = f(x + f(x)) and f(z). x[0] += fx0; - f.eval(x, fx)?; + f.eval(x, fx); let fz0 = fx[0]; // Compute the next point. x[0] = x0 - (fx0 * fx0) / (fz0 - fx0); // Compute f(x). - f.eval(x, fx)?; + f.eval(x, fx); } SteffensenVariant::Liu => { // Compute z = f(x + f(x)) and f(z). x[0] += fx0; let z0 = x[0]; - f.eval(x, fx)?; + f.eval(x, fx); let fz0 = fx[0]; // Compute f[x, z]. @@ -135,7 +133,7 @@ impl Solver for Steffensen { // Compute y = x - f(x) / f[x, z] and f(y). x[0] = x0 - fx0 / f_xz; let y0 = x[0]; - f.eval(x, fx)?; + f.eval(x, fx); let fy0 = fx[0]; // Compute f[x, y] and f[y, z]. @@ -146,7 +144,7 @@ impl Solver for Steffensen { x[0] = y0 - (f_xy - f_yz + f_xz) / (f_xy * f_xy) * fy0; // Compute f(x). - f.eval(x, fx)?; + f.eval(x, fx); } } diff --git a/src/solver/trust_region.rs b/src/solver/trust_region.rs index 0b02a87..57f1359 100644 --- a/src/solver/trust_region.rs +++ b/src/solver/trust_region.rs @@ -37,10 +37,8 @@ use num_traits::{One, Zero}; use thiserror::Error; use crate::{ - core::{Domain, Function, Optimizer, Problem, ProblemError, Solver, System}, - derivatives::{ - Gradient, GradientError, Hessian, HessianError, Jacobian, JacobianError, EPSILON_SQRT, - }, + core::{Domain, Function, Optimizer, Problem, Solver, System}, + derivatives::{Gradient, Hessian, Jacobian, EPSILON_SQRT}, }; /// Specification for initial value of trust region size. @@ -192,18 +190,6 @@ impl TrustRegion { /// Error returned from [`TrustRegion`] solver. #[derive(Debug, Error)] pub enum TrustRegionError { - /// Error that occurred when evaluating the system. - #[error("{0}")] - Problem(#[from] ProblemError), - /// Error that occurred when computing the Jacobian matrix. - #[error("{0}")] - Jacobian(#[from] JacobianError), - /// Error that occurred when computing the gradient vector. - #[error("{0}")] - Gradient(#[from] GradientError), - /// Error that occurred when computing the Hessian matrix. - #[error("{0}")] - Hessian(#[from] HessianError), /// Could not take any valid step. #[error("neither newton nor steepest descent step can be taken from the point")] NoValidStep, @@ -273,8 +259,8 @@ impl Solver for TrustRegion { } // Compute F(x) and F'(x). - f.eval(x, fx)?; - jac.compute(f, x, scale, fx)?; + f.eval(x, fx); + jac.compute(f, x, scale, fx); let fx_norm = fx.norm(); @@ -580,7 +566,8 @@ impl Solver for TrustRegion { } // Compute F(x'). - let is_trial_valid = f.eval(x_trial, fx_trial).is_ok(); + f.eval(x_trial, fx_trial); + let is_trial_valid = fx_trial.iter().all(|fxi| fxi.is_finite()); let fx_trial_norm = fx_trial.norm(); let gain_ratio = if is_trial_valid { @@ -747,9 +734,9 @@ impl Optimizer for TrustRegion { } // Compute f(x), grad f(x) and H(x). - let mut fx = f.apply(x)?; - grad.compute(f, x, scale, fx)?; - hes.compute(f, x, scale, fx)?; + let mut fx = f.apply(x); + grad.compute(f, x, scale, fx); + hes.compute(f, x, scale, fx); let estimate_delta = *delta == F::Scalar::zero(); if estimate_delta { @@ -950,10 +937,8 @@ impl Optimizer for TrustRegion { } // Compute f(x'). - let (fx_trial, is_trial_valid) = match f.apply(x_trial) { - Ok(fx_trial) => (fx_trial, true), - Err(_) => (F::Scalar::zero(), false), - }; + let fx_trial = f.apply(x_trial); + let is_trial_valid = fx_trial.is_finite(); let gain_ratio = if is_trial_valid { // Compute the gain ratio. diff --git a/src/testing.rs b/src/testing.rs index 0123f5c..5daaac9 100644 --- a/src/testing.rs +++ b/src/testing.rs @@ -29,7 +29,7 @@ use nalgebra::{ }; use thiserror::Error; -use crate::core::{Domain, Function, Optimizer, Problem, ProblemError, Solver, System}; +use crate::core::{Domain, Function, Optimizer, Problem, Solver, System}; /// Extension of the [`System`] trait that provides additional information that /// is useful for testing solvers. @@ -59,11 +59,8 @@ where Sx: Storage + IsContiguous, { let mut fx = x.clone_owned(); - if self.eval(x, &mut fx).is_ok() { - fx.norm() <= eps - } else { - false - } + self.eval(x, &mut fx); + fx.norm() <= eps } } @@ -173,15 +170,14 @@ impl System for ExtendedRosenbrock { &self, x: &Vector, fx: &mut Vector, - ) -> Result<(), ProblemError> - where + ) where Sx: Storage + IsContiguous, Sfx: StorageMut, { eval(self.residuals(x), fx) } - fn norm(&self, x: &Vector) -> Result + fn norm(&self, x: &Vector) -> Self::Scalar where Sx: Storage + IsContiguous, { @@ -284,15 +280,14 @@ impl System for ExtendedPowell { &self, x: &Vector, fx: &mut Vector, - ) -> Result<(), ProblemError> - where + ) where Sx: Storage + IsContiguous, Sfx: StorageMut, { eval(self.residuals(x), fx) } - fn norm(&self, x: &Vector) -> Result + fn norm(&self, x: &Vector) -> Self::Scalar where Sx: Storage + IsContiguous, { @@ -372,15 +367,14 @@ impl System for BullardBiegler { &self, x: &Vector, fx: &mut Vector, - ) -> Result<(), ProblemError> - where + ) where Sx: Storage + IsContiguous, Sfx: StorageMut, { eval(self.residuals(x), fx) } - fn norm(&self, x: &Vector) -> Result + fn norm(&self, x: &Vector) -> Self::Scalar where Sx: Storage + IsContiguous, { @@ -452,15 +446,14 @@ impl System for Sphere { &self, x: &Vector, fx: &mut Vector, - ) -> Result<(), ProblemError> - where + ) where Sx: Storage + IsContiguous, Sfx: StorageMut, { eval(self.residuals(x), fx) } - fn norm(&self, x: &Vector) -> Result + fn norm(&self, x: &Vector) -> Self::Scalar where Sx: Storage + IsContiguous, { @@ -494,7 +487,7 @@ impl TestFunction for Sphere { where Sx: Storage + IsContiguous, { - self.apply(x).map(|fx| fx.abs() <= eps).unwrap_or(false) + self.apply(x).abs() <= eps } } @@ -557,15 +550,14 @@ impl System for Brown { &self, x: &Vector, fx: &mut Vector, - ) -> Result<(), ProblemError> - where + ) where Sx: Storage + IsContiguous, Sfx: StorageMut, { eval(self.residuals(x), fx) } - fn norm(&self, x: &Vector) -> Result + fn norm(&self, x: &Vector) -> Self::Scalar where Sx: Storage + IsContiguous, { @@ -636,15 +628,14 @@ impl System for Exponential { &self, x: &Vector, fx: &mut Vector, - ) -> Result<(), ProblemError> - where + ) where Sx: Storage + IsContiguous, Sfx: StorageMut, { eval(self.residuals(x), fx) } - fn norm(&self, x: &Vector) -> Result + fn norm(&self, x: &Vector) -> Self::Scalar where Sx: Storage + IsContiguous, { @@ -694,20 +685,18 @@ impl System for InfiniteSolutions { &self, _x: &Vector, fx: &mut Vector, - ) -> Result<(), ProblemError> - where + ) where Sx: Storage + IsContiguous, Sfx: StorageMut, { fx.fill(0.0); - Ok(()) } - fn norm(&self, _: &Vector) -> Result + fn norm(&self, _: &Vector) -> Self::Scalar where Sx: Storage + IsContiguous, { - Ok(0.0) + 0.0 } } @@ -817,17 +806,13 @@ where Ok(()) } -fn eval( - residuals: impl Iterator, - fx: &mut Vector, -) -> Result<(), ProblemError> +fn eval(residuals: impl Iterator, fx: &mut Vector) where Sfx: StorageMut, { fx.iter_mut().zip(residuals).for_each(|(fxi, v)| *fxi = v); - Ok(()) } -fn norm(residuals: impl Iterator) -> Result { - Ok(residuals.map(|v| v.powi(2)).sum::().sqrt()) +fn norm(residuals: impl Iterator) -> f64 { + residuals.map(|v| v.powi(2)).sum::().sqrt() } From 7c3692a9a3b58438ef85454f780598f5735d4eef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Petr=20Nevyho=C5=A1t=C4=9Bn=C3=BD?= Date: Sun, 12 Nov 2023 21:53:21 +0100 Subject: [PATCH 3/6] feat!: rename next methods on Solver and Optimizer so they do not clash --- examples/rosenbrock.rs | 2 +- gomez-bench/benches/main.rs | 4 ++-- src/core/optimizer.rs | 6 +++--- src/core/solver.rs | 12 ++++++------ src/lib.rs | 2 +- src/solver/lipo.rs | 6 +++--- src/solver/nelder_mead.rs | 4 ++-- src/solver/steffensen.rs | 2 +- src/solver/trust_region.rs | 4 ++-- src/testing.rs | 6 +++--- 10 files changed, 24 insertions(+), 24 deletions(-) diff --git a/examples/rosenbrock.rs b/examples/rosenbrock.rs index 0ee8088..2246583 100644 --- a/examples/rosenbrock.rs +++ b/examples/rosenbrock.rs @@ -43,7 +43,7 @@ fn main() -> Result<(), String> { for i in 1..=100 { solver - .next(&f, &dom, &mut x, &mut fx) + .solve_next(&f, &dom, &mut x, &mut fx) .map_err(|err| format!("{}", err))?; println!( diff --git a/gomez-bench/benches/main.rs b/gomez-bench/benches/main.rs index 344fc20..ad90d21 100644 --- a/gomez-bench/benches/main.rs +++ b/gomez-bench/benches/main.rs @@ -184,7 +184,7 @@ where let mut fx = x.clone_owned(); let mut iter = 0; loop { - if solver.next(&f, &dom, &mut x, &mut fx).is_err() { + if solver.solve_next(&f, &dom, &mut x, &mut fx).is_err() { panic!("solver error"); } @@ -290,7 +290,7 @@ impl> Solver for GslSolverWrapper( + fn solve_next( &mut self, _f: &F, _dom: &Domain, diff --git a/src/core/optimizer.rs b/src/core/optimizer.rs index 116ac22..aaf8f25 100644 --- a/src/core/optimizer.rs +++ b/src/core/optimizer.rs @@ -5,7 +5,7 @@ use super::{domain::Domain, function::Function}; /// Common interface for all optimizers. /// /// All optimizers implement a common interface defined by the [`Optimizer`] -/// trait. The essential method is [`next`](Optimizer::next) which takes +/// trait. The essential method is [`opt_next`](Optimizer::opt_next) which takes /// variables *x* and computes the next step. Thus it represents one iteration /// in the process. Repeated call to this method should move *x* towards the /// minimum in successful cases. @@ -44,7 +44,7 @@ use super::{domain::Domain, function::Function}; /// const NAME: &'static str = "Random"; /// type Error = std::convert::Infallible; /// -/// fn next( +/// fn opt_next( /// &mut self, /// f: &F, /// dom: &Domain, @@ -82,7 +82,7 @@ pub trait Optimizer { /// The implementations *can* assume that subsequent calls to `next` pass /// the value of `x` as was outputted in the previous iteration by the same /// method. - fn next( + fn opt_next( &mut self, f: &F, dom: &Domain, diff --git a/src/core/solver.rs b/src/core/solver.rs index 2f795e6..a0bbc3f 100644 --- a/src/core/solver.rs +++ b/src/core/solver.rs @@ -5,10 +5,10 @@ use super::{domain::Domain, system::System}; /// Common interface for all solvers. /// /// All solvers implement a common interface defined by the [`Solver`] trait. -/// The essential method is [`next`](Solver::next) which takes variables *x* and -/// computes the next step. Thus it represents one iteration in the process. -/// Repeated call to this method should converge *x* to the solution in -/// successful cases. +/// The essential method is [`solve_next`](Solver::solve_next) which takes +/// variables *x* and computes the next step. Thus it represents one iteration +/// in the process. Repeated call to this method should converge *x* to the +/// solution in successful cases. /// /// If you implement a solver, please consider make it a contribution to this /// library. @@ -44,7 +44,7 @@ use super::{domain::Domain, system::System}; /// const NAME: &'static str = "Random"; /// type Error = std::convert::Infallible; /// -/// fn next( +/// fn solve_next( /// &mut self, /// f: &F, /// dom: &Domain, @@ -86,7 +86,7 @@ pub trait Solver { /// The implementations *can* assume that subsequent calls to `next` pass /// the value of `x` as was outputted in the previous iteration by the same /// method. - fn next( + fn solve_next( &mut self, f: &F, dom: &Domain, diff --git a/src/lib.rs b/src/lib.rs index 52e0a08..631f74a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -192,7 +192,7 @@ //! for i in 1.. { //! // Do one iteration in the solving process. //! solver -//! .next(&f, &dom, &mut x, &mut fx) +//! .solve_next(&f, &dom, &mut x, &mut fx) //! .expect("solver encountered an error"); //! //! println!( diff --git a/src/solver/lipo.rs b/src/solver/lipo.rs index 159f0ca..86184a5 100644 --- a/src/solver/lipo.rs +++ b/src/solver/lipo.rs @@ -350,7 +350,7 @@ where // Do not fail the optimization on an error from the local // optimization. - match local_optimizer.next(f, dom, x) { + match local_optimizer.opt_next(f, dom, x) { Ok(error) => error, Err(_) => f.apply(x), } @@ -387,7 +387,7 @@ where type Error = LipoError; - fn next( + fn opt_next( &mut self, f: &F, dom: &Domain<::Scalar>, @@ -409,7 +409,7 @@ where type Error = LipoError; - fn next( + fn solve_next( &mut self, f: &F, dom: &Domain<::Scalar>, diff --git a/src/solver/nelder_mead.rs b/src/solver/nelder_mead.rs index 51fbb03..6b5886a 100644 --- a/src/solver/nelder_mead.rs +++ b/src/solver/nelder_mead.rs @@ -457,7 +457,7 @@ impl Optimizer for NelderMead { type Error = NelderMeadError; - fn next( + fn opt_next( &mut self, f: &F, dom: &Domain, @@ -475,7 +475,7 @@ impl Solver for NelderMead { type Error = NelderMeadError; - fn next( + fn solve_next( &mut self, f: &F, dom: &Domain, diff --git a/src/solver/steffensen.rs b/src/solver/steffensen.rs index 0658259..8ab0627 100644 --- a/src/solver/steffensen.rs +++ b/src/solver/steffensen.rs @@ -84,7 +84,7 @@ impl Solver for Steffensen { type Error = SteffensenError; - fn next( + fn solve_next( &mut self, f: &F, dom: &Domain, diff --git a/src/solver/trust_region.rs b/src/solver/trust_region.rs index 57f1359..6116676 100644 --- a/src/solver/trust_region.rs +++ b/src/solver/trust_region.rs @@ -203,7 +203,7 @@ impl Solver for TrustRegion { type Error = TrustRegionError; - fn next( + fn solve_next( &mut self, f: &F, dom: &Domain, @@ -685,7 +685,7 @@ impl Optimizer for TrustRegion { type Error = TrustRegionError; - fn next( + fn opt_next( &mut self, f: &F, dom: &Domain<::Scalar>, diff --git a/src/testing.rs b/src/testing.rs index 5daaac9..2f85785 100644 --- a/src/testing.rs +++ b/src/testing.rs @@ -737,7 +737,7 @@ where let mut iter = 0; loop { - solver.next(f, dom, &mut x, &mut fx)?; + solver.solve_next(f, dom, &mut x, &mut fx)?; if fx.norm() <= tolerance { return Ok(x); @@ -767,7 +767,7 @@ where let mut iter = 0; loop { - let fx = optimizer.next(f, dom, &mut x)?; + let fx = optimizer.opt_next(f, dom, &mut x)?; if fx <= min + tolerance { // Converged. @@ -799,7 +799,7 @@ where let mut fx = x.clone_owned(); for iter in 0..iters { - solver.next(f, dom, &mut x, &mut fx)?; + solver.solve_next(f, dom, &mut x, &mut fx)?; inspect(&solver, &x, fx.norm(), iter); } From fb3e6b7a5e45871dd4240b4f7bfdf04bd33f8328 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Petr=20Nevyho=C5=A1t=C4=9Bn=C3=BD?= Date: Sun, 12 Nov 2023 21:57:57 +0100 Subject: [PATCH 4/6] feat!: rename scalar associated type to field --- examples/rosenbrock.rs | 12 +-- gomez-bench/benches/main.rs | 30 +++--- src/analysis/initial.rs | 12 +-- src/core/base.rs | 6 +- src/core/function.rs | 18 ++-- src/core/optimizer.rs | 20 ++-- src/core/solver.rs | 24 ++--- src/core/system.rs | 46 ++++----- src/derivatives.rs | 112 +++++++++++----------- src/lib.rs | 28 +++--- src/solver/lipo.rs | 76 +++++++-------- src/solver/nelder_mead.rs | 68 +++++++------- src/solver/steffensen.rs | 16 ++-- src/solver/trust_region.rs | 128 ++++++++++++------------- src/testing.rs | 182 ++++++++++++++++++------------------ 15 files changed, 389 insertions(+), 389 deletions(-) diff --git a/examples/rosenbrock.rs b/examples/rosenbrock.rs index 2246583..f3d6ff9 100644 --- a/examples/rosenbrock.rs +++ b/examples/rosenbrock.rs @@ -10,9 +10,9 @@ struct Rosenbrock { } impl Problem for Rosenbrock { - type Scalar = f64; + type Field = f64; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { Domain::unconstrained(2) } } @@ -20,11 +20,11 @@ impl Problem for Rosenbrock { impl System for Rosenbrock { fn eval( &self, - x: &na::Vector, - fx: &mut na::Vector, + x: &na::Vector, + fx: &mut na::Vector, ) where - Sx: na::storage::Storage + IsContiguous, - Sfx: na::storage::StorageMut, + Sx: na::storage::Storage + IsContiguous, + Sfx: na::storage::StorageMut, { fx[0] = (self.a - x[0]).powi(2); fx[1] = self.b * (x[1] - x[0].powi(2)).powi(2); diff --git a/gomez-bench/benches/main.rs b/gomez-bench/benches/main.rs index ad90d21..0b06f2e 100644 --- a/gomez-bench/benches/main.rs +++ b/gomez-bench/benches/main.rs @@ -168,7 +168,7 @@ mod bullard_biegler { fn bench_solve(bencher: divan::Bencher, with_system: GF, with_solver: GS) where GF: Fn() -> (F, usize), - GS: Fn(&F, &Domain, &na::OVector) -> S, + GS: Fn(&F, &Domain, &na::OVector) -> S, F: TestSystem, S: Solver, { @@ -203,8 +203,8 @@ where fn with_trust_region( f: &F, - dom: &Domain, - _: &na::OVector, + dom: &Domain, + _: &na::OVector, ) -> TrustRegion where F: Problem, @@ -214,8 +214,8 @@ where fn with_nelder_mead( f: &F, - dom: &Domain, - _: &na::OVector, + dom: &Domain, + _: &na::OVector, ) -> NelderMead where F: Problem, @@ -225,11 +225,11 @@ where fn with_gsl_hybrids( f: &F, - _: &Domain, - x: &na::OVector, + _: &Domain, + x: &na::OVector, ) -> GslSolverWrapper> where - F: TestSystem + Clone, + F: TestSystem + Clone, { GslSolverWrapper::new(GslFunctionWrapper::new( f.clone(), @@ -248,7 +248,7 @@ impl GslFunctionWrapper { } } -impl> GslFunction for GslFunctionWrapper { +impl> GslFunction for GslFunctionWrapper { fn eval(&self, x: &GslVec, f: &mut GslVec) -> GslStatus { use na::DimName; let dim = Dynamic::new(x.len()); @@ -285,7 +285,7 @@ impl GslSolverWrapper { } } -impl> Solver for GslSolverWrapper> { +impl> Solver for GslSolverWrapper> { const NAME: &'static str = "GSL hybrids"; type Error = String; @@ -293,13 +293,13 @@ impl> Solver for GslSolverWrapper( &mut self, _f: &F, - _dom: &Domain, - x: &mut na::Vector, - fx: &mut na::Vector, + _dom: &Domain, + x: &mut na::Vector, + fx: &mut na::Vector, ) -> Result<(), Self::Error> where - Sx: na::storage::StorageMut + IsContiguous, - Sfx: na::storage::StorageMut, + Sx: na::storage::StorageMut + IsContiguous, + Sfx: na::storage::StorageMut, { let result = self.solver.step().to_result(); x.copy_from_slice(self.solver.root()); diff --git a/src/analysis/initial.rs b/src/analysis/initial.rs index d2cbbd2..af27cc0 100644 --- a/src/analysis/initial.rs +++ b/src/analysis/initial.rs @@ -28,13 +28,13 @@ impl InitialGuessAnalysis { /// Analyze the system in given point. pub fn analyze( f: &F, - dom: &Domain, - x: &mut Vector, - fx: &mut Vector, + dom: &Domain, + x: &mut Vector, + fx: &mut Vector, ) -> Self where - Sx: StorageMut + IsContiguous, - Sfx: StorageMut, + Sx: StorageMut + IsContiguous, + Sfx: StorageMut, { let dim = Dynamic::new(dom.dim()); let scale = dom @@ -54,7 +54,7 @@ impl InitialGuessAnalysis { qr.solve_mut(&mut p); // Do Newton step. - p *= convert::<_, F::Scalar>(0.001); + p *= convert::<_, F::Field>(0.001); *x += p; // Compute F'(x) after one Newton step. diff --git a/src/core/base.rs b/src/core/base.rs index 03e89ed..8cdbb29 100644 --- a/src/core/base.rs +++ b/src/core/base.rs @@ -5,10 +5,10 @@ use super::domain::Domain; /// The base trait for [`System`](super::system::System) and /// [`Function`](super::function::Function). pub trait Problem { - /// Type of the scalar, usually f32 or f64. - type Scalar: RealField + Copy; + /// Type of the field, usually f32 or f64. + type Field: RealField + Copy; /// Get the domain (bound constraints) of the system. If not overridden, the /// system is unconstrained. - fn domain(&self) -> Domain; + fn domain(&self) -> Domain; } diff --git a/src/core/function.rs b/src/core/function.rs index f744940..3414072 100644 --- a/src/core/function.rs +++ b/src/core/function.rs @@ -7,7 +7,7 @@ use super::{base::Problem, system::System}; /// ## Defining a function /// /// A function is any type that implements [`Function`] and [`Problem`] traits. -/// There is one required associated type (scalar) and one required method +/// There is one required associated type (field) and one required method /// ([`apply`](Function::apply)). /// /// ```rust @@ -24,19 +24,19 @@ use super::{base::Problem, system::System}; /// /// impl Problem for Rosenbrock { /// // The numeric type. Usually f64 or f32. -/// type Scalar = f64; +/// type Field = f64; /// /// // The domain of the problem (could be bound-constrained). -/// fn domain(&self) -> Domain { +/// fn domain(&self) -> Domain { /// Domain::unconstrained(2) /// } /// } /// /// impl Function for Rosenbrock { /// // Apply trial values of variables to the function. -/// fn apply(&self, x: &na::Vector) -> Self::Scalar +/// fn apply(&self, x: &na::Vector) -> Self::Field /// where -/// Sx: na::storage::Storage + IsContiguous, +/// Sx: na::storage::Storage + IsContiguous, /// { /// // Compute the function value. /// (self.a - x[0]).powi(2) + self.b * (x[1] - x[0].powi(2)).powi(2) @@ -45,18 +45,18 @@ use super::{base::Problem, system::System}; /// ``` pub trait Function: Problem { /// Calculate the function value given values of the variables. - fn apply(&self, x: &Vector) -> Self::Scalar + fn apply(&self, x: &Vector) -> Self::Field where - Sx: Storage + IsContiguous; + Sx: Storage + IsContiguous; } impl Function for F where F: System, { - fn apply(&self, x: &Vector) -> Self::Scalar + fn apply(&self, x: &Vector) -> Self::Field where - Sx: Storage + IsContiguous, + Sx: Storage + IsContiguous, { self.norm(x) } diff --git a/src/core/optimizer.rs b/src/core/optimizer.rs index aaf8f25..46366e3 100644 --- a/src/core/optimizer.rs +++ b/src/core/optimizer.rs @@ -38,8 +38,8 @@ use super::{domain::Domain, function::Function}; /// /// impl Optimizer for Random /// where -/// F::Scalar: SampleUniform, -/// Standard: Distribution, +/// F::Field: SampleUniform, +/// Standard: Distribution, /// { /// const NAME: &'static str = "Random"; /// type Error = std::convert::Infallible; @@ -47,11 +47,11 @@ use super::{domain::Domain, function::Function}; /// fn opt_next( /// &mut self, /// f: &F, -/// dom: &Domain, -/// x: &mut Vector, -/// ) -> Result +/// dom: &Domain, +/// x: &mut Vector, +/// ) -> Result /// where -/// Sx: StorageMut + IsContiguous, +/// Sx: StorageMut + IsContiguous, /// { /// // Randomly sample in the domain. /// dom.sample(x, &mut self.rng); @@ -85,9 +85,9 @@ pub trait Optimizer { fn opt_next( &mut self, f: &F, - dom: &Domain, - x: &mut Vector, - ) -> Result + dom: &Domain, + x: &mut Vector, + ) -> Result where - Sx: StorageMut + IsContiguous; + Sx: StorageMut + IsContiguous; } diff --git a/src/core/solver.rs b/src/core/solver.rs index a0bbc3f..e19dd76 100644 --- a/src/core/solver.rs +++ b/src/core/solver.rs @@ -38,8 +38,8 @@ use super::{domain::Domain, system::System}; /// /// impl Solver for Random /// where -/// F::Scalar: SampleUniform, -/// Standard: Distribution, +/// F::Field: SampleUniform, +/// Standard: Distribution, /// { /// const NAME: &'static str = "Random"; /// type Error = std::convert::Infallible; @@ -47,13 +47,13 @@ use super::{domain::Domain, system::System}; /// fn solve_next( /// &mut self, /// f: &F, -/// dom: &Domain, -/// x: &mut Vector, -/// fx: &mut Vector, +/// dom: &Domain, +/// x: &mut Vector, +/// fx: &mut Vector, /// ) -> Result<(), Self::Error> /// where -/// Sx: StorageMut + IsContiguous, -/// Sfx: StorageMut, +/// Sx: StorageMut + IsContiguous, +/// Sfx: StorageMut, /// { /// // Randomly sample in the domain. /// dom.sample(x, &mut self.rng); @@ -89,11 +89,11 @@ pub trait Solver { fn solve_next( &mut self, f: &F, - dom: &Domain, - x: &mut Vector, - fx: &mut Vector, + dom: &Domain, + x: &mut Vector, + fx: &mut Vector, ) -> Result<(), Self::Error> where - Sx: StorageMut + IsContiguous, - Sfx: StorageMut; + Sx: StorageMut + IsContiguous, + Sfx: StorageMut; } diff --git a/src/core/system.rs b/src/core/system.rs index d2063cc..f7746b6 100644 --- a/src/core/system.rs +++ b/src/core/system.rs @@ -11,7 +11,7 @@ use super::{base::Problem, domain::Domain}; /// ## Defining a system /// /// A system is any type that implements [`System`] and [`Problem`] traits. -/// There is one required associated type (scalar type) and one required method +/// There is one required associated type (field type) and one required method /// ([`eval`](System::eval)). /// /// ```rust @@ -27,10 +27,10 @@ use super::{base::Problem, domain::Domain}; /// /// impl Problem for Rosenbrock { /// // The numeric type. Usually f64 or f32. -/// type Scalar = f64; +/// type Field = f64; /// /// // The domain of the problem (could be bound-constrained). -/// fn domain(&self) -> Domain { +/// fn domain(&self) -> Domain { /// Domain::unconstrained(2) /// } /// } @@ -39,11 +39,11 @@ use super::{base::Problem, domain::Domain}; /// // Evaluate trial values of variables to the system. /// fn eval( /// &self, -/// x: &na::Vector, -/// fx: &mut na::Vector, +/// x: &na::Vector, +/// fx: &mut na::Vector, /// ) where -/// Sx: na::storage::Storage + IsContiguous, -/// Sfx: na::storage::StorageMut, +/// Sx: na::storage::Storage + IsContiguous, +/// Sfx: na::storage::StorageMut, /// { /// // Compute the residuals of all equations. /// fx[0] = (self.a - x[0]).powi(2); @@ -55,20 +55,20 @@ pub trait System: Problem { /// Calculate the residuals of the system given values of the variables. fn eval( &self, - x: &Vector, - fx: &mut Vector, + x: &Vector, + fx: &mut Vector, ) where - Sx: Storage + IsContiguous, - Sfx: StorageMut; + Sx: Storage + IsContiguous, + Sfx: StorageMut; /// Calculate the residuals vector norm. /// /// The default implementation allocates a temporary vector for the /// residuals on every call. If you plan to solve the system by an /// optimizer, consider overriding the default implementation. - fn norm(&self, x: &Vector) -> Self::Scalar + fn norm(&self, x: &Vector) -> Self::Field where - Sx: Storage + IsContiguous, + Sx: Storage + IsContiguous, { let mut fx = x.clone_owned(); self.eval(x, &mut fx); @@ -90,7 +90,7 @@ pub trait System: Problem { /// for example. pub struct RepulsiveSystem<'f, F: System> { f: &'f F, - archive: Vec>, + archive: Vec>, } impl<'f, F: System> RepulsiveSystem<'f, F> { @@ -103,7 +103,7 @@ impl<'f, F: System> RepulsiveSystem<'f, F> { } /// Add a found solution to the archive. - pub fn push(&mut self, root: OVector) { + pub fn push(&mut self, root: OVector) { self.archive.push(root); } @@ -118,30 +118,30 @@ impl<'f, F: System> RepulsiveSystem<'f, F> { } /// Unpack the archive which contains all solutions found. - pub fn unpack(self) -> Vec> { + pub fn unpack(self) -> Vec> { self.archive } } impl<'f, F: System> Problem for RepulsiveSystem<'f, F> { - type Scalar = F::Scalar; + type Field = F::Field; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { self.f.domain() } } impl<'f, F: System> System for RepulsiveSystem<'f, F> where - DefaultAllocator: Allocator, + DefaultAllocator: Allocator, { fn eval( &self, - x: &Vector, - fx: &mut Vector, + x: &Vector, + fx: &mut Vector, ) where - Sx: Storage + IsContiguous, - Sfx: StorageMut, + Sx: Storage + IsContiguous, + Sfx: StorageMut, { // TODO: RepulsiveSystem should adjust the residuals of the inner system // such that solvers tend to go away from the roots stored in the diff --git a/src/derivatives.rs b/src/derivatives.rs index 366d470..e99a381 100644 --- a/src/derivatives.rs +++ b/src/derivatives.rs @@ -22,7 +22,7 @@ pub const EPSILON_CBRT: f64 = 0.0000060554544523933395; /// Jacobian matrix of a system. #[derive(Debug)] pub struct Jacobian { - jac: OMatrix, + jac: OMatrix, } impl Jacobian { @@ -41,14 +41,14 @@ impl Jacobian { /// details. pub fn new( f: &F, - x: &mut Vector, - scale: &Vector, - fx: &Vector, + x: &mut Vector, + scale: &Vector, + fx: &Vector, ) -> Self where - Sx: StorageMut + IsContiguous, - Sscale: Storage, - Sfx: Storage, + Sx: StorageMut + IsContiguous, + Sscale: Storage, + Sfx: Storage, { let mut jac = Self::zeros(f); jac.compute(f, x, scale, fx); @@ -67,16 +67,16 @@ impl Jacobian { pub fn compute( &mut self, f: &F, - x: &mut Vector, - scale: &Vector, - fx: &Vector, + x: &mut Vector, + scale: &Vector, + fx: &Vector, ) -> &mut Self where - Sx: StorageMut + IsContiguous, - Sscale: Storage, - Sfx: Storage, + Sx: StorageMut + IsContiguous, + Sscale: Storage, + Sfx: Storage, { - let eps: F::Scalar = convert(EPSILON_SQRT); + let eps: F::Field = convert(EPSILON_SQRT); for (j, mut col) in self.jac.column_iter_mut().enumerate() { let xj = x[j]; @@ -90,9 +90,9 @@ impl Jacobian { // A reasonable way to balance these competing needs is to scale // each component by x_j itself. To avoid problems when x_j is close // to zero, it is modified to take the typical magnitude instead. - let magnitude = F::Scalar::one() / scale[j]; - let step = eps * xj.abs().max(magnitude) * xj.copysign(F::Scalar::one()); - let step = if step == F::Scalar::zero() { eps } else { step }; + let magnitude = F::Field::one() / scale[j]; + let step = eps * xj.abs().max(magnitude) * xj.copysign(F::Field::one()); + let step = if step == F::Field::zero() { eps } else { step }; // Update the point. x[j] = xj + step; @@ -111,7 +111,7 @@ impl Jacobian { } impl Deref for Jacobian { - type Target = OMatrix; + type Target = OMatrix; fn deref(&self) -> &Self::Target { &self.jac @@ -121,7 +121,7 @@ impl Deref for Jacobian { /// Gradient vector of a function. #[derive(Debug)] pub struct Gradient { - grad: OVector, + grad: OVector, } impl Gradient { @@ -140,13 +140,13 @@ impl Gradient { /// details. pub fn new( f: &F, - x: &mut Vector, - scale: &Vector, - fx: F::Scalar, + x: &mut Vector, + scale: &Vector, + fx: F::Field, ) -> Self where - Sx: StorageMut + IsContiguous, - Sscale: Storage, + Sx: StorageMut + IsContiguous, + Sscale: Storage, { let mut grad = Self::zeros(f); grad.compute(f, x, scale, fx); @@ -165,23 +165,23 @@ impl Gradient { pub fn compute( &mut self, f: &F, - x: &mut Vector, - scale: &Vector, - fx: F::Scalar, + x: &mut Vector, + scale: &Vector, + fx: F::Field, ) -> &mut Self where - Sx: StorageMut + IsContiguous, - Sscale: Storage, + Sx: StorageMut + IsContiguous, + Sscale: Storage, { - let eps: F::Scalar = convert(EPSILON_SQRT); + let eps: F::Field = convert(EPSILON_SQRT); for i in 0..x.nrows() { let xi = x[i]; // See the implementation of Jacobian for details on computing step size. - let magnitude = F::Scalar::one() / scale[i]; - let step = eps * xi.abs().max(magnitude) * F::Scalar::one().copysign(xi); - let step = if step == F::Scalar::zero() { eps } else { step }; + let magnitude = F::Field::one() / scale[i]; + let step = eps * xi.abs().max(magnitude) * F::Field::one().copysign(xi); + let step = if step == F::Field::zero() { eps } else { step }; // Update the point. x[i] = xi + step; @@ -199,7 +199,7 @@ impl Gradient { } impl Deref for Gradient { - type Target = OVector; + type Target = OVector; fn deref(&self) -> &Self::Target { &self.grad @@ -209,9 +209,9 @@ impl Deref for Gradient { /// Hessian matrix of a system. #[derive(Debug)] pub struct Hessian { - hes: OMatrix, - steps: OVector, - neighbors: OVector, + hes: OMatrix, + steps: OVector, + neighbors: OVector, } impl Hessian { @@ -232,13 +232,13 @@ impl Hessian { /// details. pub fn new( f: &F, - x: &mut Vector, - scale: &Vector, - fx: F::Scalar, + x: &mut Vector, + scale: &Vector, + fx: F::Field, ) -> Self where - Sx: StorageMut + IsContiguous, - Sscale: Storage, + Sx: StorageMut + IsContiguous, + Sscale: Storage, { let mut hes = Self::zeros(f); hes.compute(f, x, scale, fx); @@ -257,23 +257,23 @@ impl Hessian { pub fn compute( &mut self, f: &F, - x: &mut Vector, - scale: &Vector, - fx: F::Scalar, + x: &mut Vector, + scale: &Vector, + fx: F::Field, ) -> &mut Self where - Sx: StorageMut + IsContiguous, - Sscale: Storage, + Sx: StorageMut + IsContiguous, + Sscale: Storage, { - let eps: F::Scalar = convert(EPSILON_CBRT); + let eps: F::Field = convert(EPSILON_CBRT); for i in 0..x.nrows() { let xi = x[i]; // See the implementation of Jacobian for details on computing step size. - let magnitude = F::Scalar::one() / scale[i]; - let step = eps * xi.abs().max(magnitude) * F::Scalar::one().copysign(xi); - let step = if step == F::Scalar::zero() { eps } else { step }; + let magnitude = F::Field::one() / scale[i]; + let step = eps * xi.abs().max(magnitude) * F::Field::one().copysign(xi); + let step = if step == F::Field::zero() { eps } else { step }; // Store the step for Hessian calculation. self.steps[i] = step; @@ -325,7 +325,7 @@ impl Hessian { } impl Deref for Hessian { - type Target = OMatrix; + type Target = OMatrix; fn deref(&self) -> &Self::Target { &self.hes @@ -346,17 +346,17 @@ mod tests { struct MixedVars; impl Problem for MixedVars { - type Scalar = f64; + type Field = f64; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { Domain::unconstrained(2) } } impl Function for MixedVars { - fn apply(&self, x: &Vector) -> Self::Scalar + fn apply(&self, x: &Vector) -> Self::Field where - Sx: Storage + IsContiguous, + Sx: Storage + IsContiguous, { // A simple, arbitrary function that produces Hessian matrix with // non-zero corners. diff --git a/src/lib.rs b/src/lib.rs index 631f74a..501b573 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -80,11 +80,11 @@ //! //! impl Problem for Rosenbrock { //! // The numeric type. Usually f64 or f32. -//! type Scalar = f64; +//! type Field = f64; //! //! // Specification for the domain. At the very least, the dimension //! // must be known. -//! fn domain(&self) -> Domain { +//! fn domain(&self) -> Domain { //! Domain::unconstrained(2) //! } //! } @@ -93,11 +93,11 @@ //! // Evaluate trial values of variables to the system. //! fn eval( //! &self, -//! x: &na::Vector, -//! fx: &mut na::Vector, +//! x: &na::Vector, +//! fx: &mut na::Vector, //! ) where -//! Sx: na::storage::Storage + IsContiguous, -//! Sfx: na::storage::StorageMut, +//! Sx: na::storage::Storage + IsContiguous, +//! Sfx: na::storage::StorageMut, //! { //! // Compute the residuals of all equations. //! fx[0] = (self.a - x[0]).powi(2); @@ -125,9 +125,9 @@ //! # } //! # //! impl Problem for Rosenbrock { -//! # type Scalar = f64; +//! # type Field = f64; //! -//! fn domain(&self) -> Domain { +//! fn domain(&self) -> Domain { //! [(-10.0, 10.0), (-10.0, 10.0)].into_iter().collect() //! } //! } @@ -157,9 +157,9 @@ //! # } //! # //! # impl Problem for Rosenbrock { -//! # type Scalar = f64; +//! # type Field = f64; //! # -//! # fn domain(&self) -> Domain { +//! # fn domain(&self) -> Domain { //! # Domain::unconstrained(2) //! # } //! # } @@ -167,11 +167,11 @@ //! # impl System for Rosenbrock { //! # fn eval( //! # &self, -//! # x: &na::Vector, -//! # fx: &mut na::Vector, +//! # x: &na::Vector, +//! # fx: &mut na::Vector, //! # ) where -//! # Sx: na::storage::Storage + IsContiguous, -//! # Sfx: na::storage::StorageMut, +//! # Sx: na::storage::Storage + IsContiguous, +//! # Sfx: na::storage::StorageMut, //! # { //! # fx[0] = (self.a - x[0]).powi(2); //! # fx[1] = self.b * (x[1] - x[0].powi(2)).powi(2); diff --git a/src/solver/lipo.rs b/src/solver/lipo.rs index 86184a5..cf03b1d 100644 --- a/src/solver/lipo.rs +++ b/src/solver/lipo.rs @@ -52,7 +52,7 @@ pub struct LipoOptions { p_explore: f64, /// Alpha parameter for (1 + alpha)^i meshgrid for Lipschitz constant /// estimation. - alpha: AlphaInit, + alpha: AlphaInit, /// Number of sampling trials. If no potential minimizer is found after this /// number of trials, the solver returns error. sampling_trials: usize, @@ -77,28 +77,28 @@ impl Default for LipoOptions { /// LIPO solver. See [module](self) documentation for more details. pub struct Lipo { options: LipoOptions, - alpha: F::Scalar, - xs: Vec>, - ys: Vec, + alpha: F::Field, + xs: Vec>, + ys: Vec, best: usize, - k: F::Scalar, - k_inf: F::Scalar, + k: F::Field, + k_inf: F::Field, rng: R, bernoulli: Bernoulli, - tmp: OVector, - x_tmp: OVector, + tmp: OVector, + x_tmp: OVector, local_optimizer: NelderMead, iter: usize, } impl Lipo { /// Initializes LIPO solver with default options. - pub fn new(f: &F, dom: &Domain, rng: R) -> Self { + pub fn new(f: &F, dom: &Domain, rng: R) -> Self { Self::with_options(f, dom, LipoOptions::default(), rng) } /// Initializes LIPO solver with given options. - pub fn with_options(f: &F, dom: &Domain, options: LipoOptions, rng: R) -> Self { + pub fn with_options(f: &F, dom: &Domain, options: LipoOptions, rng: R) -> Self { let dim = Dynamic::new(dom.dim()); let p_explore = options.p_explore.clamp(0.0, 1.0); @@ -114,8 +114,8 @@ impl Lipo { xs: Vec::new(), ys: Vec::new(), best: 0, - k: F::Scalar::zero(), - k_inf: F::Scalar::zero(), + k: F::Field::zero(), + k_inf: F::Field::zero(), rng, bernoulli: Bernoulli::new(p_explore).unwrap(), tmp: OVector::zeros_generic(dim, U1::name()), @@ -130,8 +130,8 @@ impl Lipo { self.xs.clear(); self.ys.clear(); self.best = 0; - self.k = F::Scalar::zero(); - self.k_inf = F::Scalar::zero(); + self.k = F::Field::zero(); + self.k_inf = F::Field::zero(); self.iter = 0; } @@ -142,8 +142,8 @@ impl Lipo { /// the LIPO solver gives extra information for free. pub fn add_evaluation( &mut self, - x: OVector, - y: F::Scalar, + x: OVector, + y: F::Field, ) -> Result<(), LipoError> { let alpha = self.alpha; @@ -179,9 +179,9 @@ impl Lipo { debug!("|| x - xi || = {}", dist); } - let it = try_convert(((*k_inf).ln() / (F::Scalar::one() + alpha).ln()).ceil()) + let it = try_convert(((*k_inf).ln() / (F::Field::one() + alpha).ln()).ceil()) .unwrap_or_default() as i32; - *k = (F::Scalar::one() + alpha).powi(it); + *k = (F::Field::one() + alpha).powi(it); if !k.is_finite() { return Err(LipoError::InfiniteLipschitzConstant); @@ -211,17 +211,17 @@ pub enum LipoError { impl Lipo where - F::Scalar: SampleUniform, - Standard: Distribution, + F::Field: SampleUniform, + Standard: Distribution, { fn next_inner( &mut self, f: &F, - dom: &Domain, - x: &mut Vector, - ) -> Result + dom: &Domain, + x: &mut Vector, + ) -> Result where - Sx: StorageMut + IsContiguous, + Sx: StorageMut + IsContiguous, { let LipoOptions { sampling_trials, @@ -268,7 +268,7 @@ where // Exploitation mode is allowed only when there is enough points // evaluated and the Lipschitz constant is estimated. Then there is // randomness involved in choosing whether we explore or exploit. - if !initialization && *k != F::Scalar::zero() && !bernoulli.sample(rng) { + if !initialization && *k != F::Field::zero() && !bernoulli.sample(rng) { debug!("exploitation mode"); let mut tmp_best = ys[*best]; @@ -380,8 +380,8 @@ where impl Optimizer for Lipo where - F::Scalar: SampleUniform, - Standard: Distribution, + F::Field: SampleUniform, + Standard: Distribution, { const NAME: &'static str = "LIPO"; @@ -390,11 +390,11 @@ where fn opt_next( &mut self, f: &F, - dom: &Domain<::Scalar>, - x: &mut Vector<::Scalar, Dynamic, Sx>, - ) -> Result<::Scalar, Self::Error> + dom: &Domain<::Field>, + x: &mut Vector<::Field, Dynamic, Sx>, + ) -> Result<::Field, Self::Error> where - Sx: StorageMut<::Scalar, Dynamic> + IsContiguous, + Sx: StorageMut<::Field, Dynamic> + IsContiguous, { self.next_inner(f, dom, x) } @@ -402,8 +402,8 @@ where impl Solver for Lipo where - F::Scalar: SampleUniform, - Standard: Distribution, + F::Field: SampleUniform, + Standard: Distribution, { const NAME: &'static str = "LIPO"; @@ -412,13 +412,13 @@ where fn solve_next( &mut self, f: &F, - dom: &Domain<::Scalar>, - x: &mut Vector<::Scalar, Dynamic, Sx>, - fx: &mut Vector<::Scalar, Dynamic, Sfx>, + dom: &Domain<::Field>, + x: &mut Vector<::Field, Dynamic, Sx>, + fx: &mut Vector<::Field, Dynamic, Sfx>, ) -> Result<(), Self::Error> where - Sx: StorageMut<::Scalar, Dynamic> + IsContiguous, - Sfx: StorageMut<::Scalar, Dynamic>, + Sx: StorageMut<::Field, Dynamic> + IsContiguous, + Sfx: StorageMut<::Field, Dynamic>, { self.next_inner(f, dom, x)?; f.eval(x, fx); diff --git a/src/solver/nelder_mead.rs b/src/solver/nelder_mead.rs index 6b5886a..7f3d5ed 100644 --- a/src/solver/nelder_mead.rs +++ b/src/solver/nelder_mead.rs @@ -61,15 +61,15 @@ pub struct NelderMeadOptions { /// balanced (see [`CoefficientsFamily`]). family: CoefficientsFamily, /// Coefficient for reflection operation. Default: `-1`. - reflection_coeff: F::Scalar, + reflection_coeff: F::Field, /// Coefficient for expansion operation. Default: `-2`. - expansion_coeff: F::Scalar, + expansion_coeff: F::Field, /// Coefficient for outer contraction operation. Default: `-0.5`. - outer_contraction_coeff: F::Scalar, + outer_contraction_coeff: F::Field, /// Coefficient for inner contraction operation. Default: `0.5`. - inner_contraction_coeff: F::Scalar, + inner_contraction_coeff: F::Field, /// Coefficient for shrinking operation. Default: `0.5`. - shrink_coeff: F::Scalar, + shrink_coeff: F::Field, } impl Default for NelderMeadOptions { @@ -86,7 +86,7 @@ impl Default for NelderMeadOptions { } impl NelderMeadOptions { - fn overwrite_coeffs(&mut self, dom: &Domain) { + fn overwrite_coeffs(&mut self, dom: &Domain) { let Self { family, reflection_coeff, @@ -105,14 +105,14 @@ impl NelderMeadOptions { *shrink_coeff = convert(0.5); } CoefficientsFamily::Balanced => { - let n: F::Scalar = convert(dom.dim() as f64); - let n_inv = F::Scalar::one() / n; + let n: F::Field = convert(dom.dim() as f64); + let n_inv = F::Field::one() / n; *reflection_coeff = convert(-1.0); *expansion_coeff = -(n_inv * convert(2.0) + convert(1.0)); - *outer_contraction_coeff = -(F::Scalar::one() - n_inv); + *outer_contraction_coeff = -(F::Field::one() - n_inv); *inner_contraction_coeff = -*outer_contraction_coeff; - *shrink_coeff = F::Scalar::one() - n_inv; + *shrink_coeff = F::Field::one() - n_inv; } CoefficientsFamily::GoldenSection => { let alpha = 1.0 / (0.5 * (5f64.sqrt() + 1.0)); @@ -132,24 +132,24 @@ impl NelderMeadOptions { /// Nelder-Mead solver. See [module](self) documentation for more details. pub struct NelderMead { options: NelderMeadOptions, - scale: OVector, - centroid: OVector, - reflection: OVector, - expansion: OVector, - contraction: OVector, - simplex: Vec>, - errors: Vec, + scale: OVector, + centroid: OVector, + reflection: OVector, + expansion: OVector, + contraction: OVector, + simplex: Vec>, + errors: Vec, sort_perm: Vec, } impl NelderMead { /// Initializes Nelder-Mead solver with default options. - pub fn new(f: &F, dom: &Domain) -> Self { + pub fn new(f: &F, dom: &Domain) -> Self { Self::with_options(f, dom, NelderMeadOptions::default()) } /// Initializes Nelder-Mead solver with given options. - pub fn with_options(_: &F, dom: &Domain, mut options: NelderMeadOptions) -> Self { + pub fn with_options(_: &F, dom: &Domain, mut options: NelderMeadOptions) -> Self { let dim = Dynamic::new(dom.dim()); options.overwrite_coeffs(dom); @@ -196,11 +196,11 @@ impl NelderMead { fn next_inner( &mut self, f: &F, - dom: &Domain, - x: &mut Vector, - ) -> Result + dom: &Domain, + x: &mut Vector, + ) -> Result where - Sx: StorageMut + IsContiguous, + Sx: StorageMut + IsContiguous, { let NelderMeadOptions { reflection_coeff, @@ -274,7 +274,7 @@ impl NelderMead { } // Calculate the centroid. - centroid.fill(F::Scalar::zero()); + centroid.fill(F::Field::zero()); (0..n) .map(|i| &simplex[sort_perm[i]]) .for_each(|xi| *centroid += xi); @@ -435,7 +435,7 @@ impl NelderMead { // otherwise an error reduction was achieved. This criterion is // taken from "Less is more: Simplified Nelder-Mead method for large // unconstrained optimization". - let eps: F::Scalar = convert(EPSILON_SQRT); + let eps: F::Field = convert(EPSILON_SQRT); let worst = errors[sort_perm[n]]; let best = errors[sort_perm[0]]; @@ -460,11 +460,11 @@ impl Optimizer for NelderMead { fn opt_next( &mut self, f: &F, - dom: &Domain, - x: &mut Vector, - ) -> Result + dom: &Domain, + x: &mut Vector, + ) -> Result where - Sx: StorageMut + IsContiguous, + Sx: StorageMut + IsContiguous, { self.next_inner(f, dom, x) } @@ -478,13 +478,13 @@ impl Solver for NelderMead { fn solve_next( &mut self, f: &F, - dom: &Domain, - x: &mut Vector, - fx: &mut Vector, + dom: &Domain, + x: &mut Vector, + fx: &mut Vector, ) -> Result<(), Self::Error> where - Sx: StorageMut + IsContiguous, - Sfx: StorageMut, + Sx: StorageMut + IsContiguous, + Sfx: StorageMut, { self.next_inner(f, dom, x)?; f.eval(x, fx); diff --git a/src/solver/steffensen.rs b/src/solver/steffensen.rs index 8ab0627..9c97a56 100644 --- a/src/solver/steffensen.rs +++ b/src/solver/steffensen.rs @@ -39,7 +39,7 @@ pub struct SteffensenOptions { /// [`SteffensenVariant`]). variant: SteffensenVariant, #[getset(skip)] - _phantom: PhantomData, + _phantom: PhantomData, } impl Default for SteffensenOptions { @@ -58,12 +58,12 @@ pub struct Steffensen { impl Steffensen { /// Initializes Steffensen solver with default options. - pub fn new(f: &F, dom: &Domain) -> Self { + pub fn new(f: &F, dom: &Domain) -> Self { Self::with_options(f, dom, SteffensenOptions::default()) } /// Initializes Steffensen solver with given options. - pub fn with_options(_f: &F, _dom: &Domain, options: SteffensenOptions) -> Self { + pub fn with_options(_f: &F, _dom: &Domain, options: SteffensenOptions) -> Self { Self { options } } @@ -87,13 +87,13 @@ impl Solver for Steffensen { fn solve_next( &mut self, f: &F, - dom: &Domain, - x: &mut Vector, - fx: &mut Vector, + dom: &Domain, + x: &mut Vector, + fx: &mut Vector, ) -> Result<(), Self::Error> where - Sx: StorageMut + IsContiguous, - Sfx: StorageMut, + Sx: StorageMut + IsContiguous, + Sfx: StorageMut, { if dom.dim() != 1 { return Err(SteffensenError::InvalidDimensionality); diff --git a/src/solver/trust_region.rs b/src/solver/trust_region.rs index 6116676..0c7f2e5 100644 --- a/src/solver/trust_region.rs +++ b/src/solver/trust_region.rs @@ -55,23 +55,23 @@ pub enum DeltaInit { #[getset(get_copy = "pub", set = "pub")] pub struct TrustRegionOptions { /// Minimum allowed trust region size. Default: `f64::EPSILON.sqrt()`. - delta_min: F::Scalar, + delta_min: F::Field, /// Maximum allowed trust region size. Default: `1e9`. - delta_max: F::Scalar, + delta_max: F::Field, /// Initial trust region size. Default: estimated (see [`DeltaInit`]). - delta_init: DeltaInit, + delta_init: DeltaInit, /// Minimum scaling factor for lambda in Levenberg-Marquardt step. Default: /// `1e-10`. - mu_min: F::Scalar, + mu_min: F::Field, /// Threshold for gain ratio to shrink trust region size if lower. Default: /// `0.25`. - shrink_thresh: F::Scalar, + shrink_thresh: F::Field, /// Threshold for gain ratio to expand trust region size if higher. Default: /// `0.75`. - expand_thresh: F::Scalar, + expand_thresh: F::Field, /// Threshold for gain ratio that needs to be exceeded to accept the /// calculated step. Default: `0.0001`. - accept_thresh: F::Scalar, + accept_thresh: F::Field, /// Number of step rejections that are allowed to happen before returning /// [`TrustRegionError::NoProgress`] error. Default: `10`. rejections_thresh: usize, @@ -119,36 +119,36 @@ impl Default for TrustRegionOptions { /// Trust region solver. See [module](self) documentation for more details. pub struct TrustRegion { options: TrustRegionOptions, - delta: F::Scalar, - mu: F::Scalar, - scale: OVector, + delta: F::Field, + mu: F::Field, + scale: OVector, jac: Jacobian, grad: Gradient, hes: Hessian, - q_tr_fx_neg: OVector, - newton: OVector, - grad_neg: OVector, - cauchy: OVector, - jac_tr_jac: OMatrix, - p: OVector, - temp: OVector, + q_tr_fx_neg: OVector, + newton: OVector, + grad_neg: OVector, + cauchy: OVector, + jac_tr_jac: OMatrix, + p: OVector, + temp: OVector, iter: usize, rejections_cnt: usize, } impl TrustRegion { /// Initializes trust region solver with default options. - pub fn new(f: &F, dom: &Domain) -> Self { + pub fn new(f: &F, dom: &Domain) -> Self { Self::with_options(f, dom, TrustRegionOptions::default()) } /// Initializes trust region solver with given options. - pub fn with_options(f: &F, dom: &Domain, options: TrustRegionOptions) -> Self { + pub fn with_options(f: &F, dom: &Domain, options: TrustRegionOptions) -> Self { let dim = Dynamic::new(dom.dim()); let delta_init = match options.delta_init { DeltaInit::Fixed(fixed) => fixed, // Zero is recognized in the function `next`. - DeltaInit::Estimated => F::Scalar::zero(), + DeltaInit::Estimated => F::Field::zero(), }; let scale = dom .scale() @@ -179,7 +179,7 @@ impl TrustRegion { pub fn reset(&mut self) { self.delta = match self.options.delta_init { DeltaInit::Fixed(fixed) => fixed, - DeltaInit::Estimated => F::Scalar::zero(), + DeltaInit::Estimated => F::Field::zero(), }; self.mu = convert(0.5); self.iter = 1; @@ -206,13 +206,13 @@ impl Solver for TrustRegion { fn solve_next( &mut self, f: &F, - dom: &Domain, - x: &mut Vector, - fx: &mut Vector, + dom: &Domain, + x: &mut Vector, + fx: &mut Vector, ) -> Result<(), Self::Error> where - Sx: StorageMut + IsContiguous, - Sfx: StorageMut, + Sx: StorageMut + IsContiguous, + Sfx: StorageMut, { let TrustRegionOptions { delta_min, @@ -245,9 +245,9 @@ impl Solver for TrustRegion { } = self; #[allow(clippy::needless_late_init)] - let scaled_newton: &mut OVector; - let scale_inv2: &mut OVector; - let cauchy_scaled: &mut OVector; + let scaled_newton: &mut OVector; + let scale_inv2: &mut OVector; + let cauchy_scaled: &mut OVector; #[derive(Debug, Clone, Copy, PartialEq)] enum StepType { @@ -264,7 +264,7 @@ impl Solver for TrustRegion { let fx_norm = fx.norm(); - let estimate_delta = *delta == F::Scalar::zero(); + let estimate_delta = *delta == F::Field::zero(); if estimate_delta { // Zero delta signifies that the initial delta is to be set // automatically and it has not been done yet. @@ -278,8 +278,8 @@ impl Solver for TrustRegion { // where K = 100. The approach is taken from GSL. for (j, col) in jac.column_iter().enumerate() { temp[j] = col.norm(); - if temp[j] == F::Scalar::zero() { - temp[j] = F::Scalar::one(); + if temp[j] == F::Field::zero() { + temp[j] = F::Field::one(); } } temp.component_mul_assign(x); @@ -287,7 +287,7 @@ impl Solver for TrustRegion { let factor = convert(100.0); *delta = temp.norm() * factor; - if *delta == F::Scalar::zero() { + if *delta == F::Field::zero() { *delta = factor; } } @@ -309,7 +309,7 @@ impl Solver for TrustRegion { "Newton step is invalid for ill-defined Jacobian (zero columns: {:?})", jac.column_iter() .enumerate() - .filter(|(_, col)| col.norm() == F::Scalar::zero()) + .filter(|(_, col)| col.norm() == F::Field::zero()) .map(|(i, _)| i) .collect::>() ); @@ -335,7 +335,7 @@ impl Solver for TrustRegion { let grad_norm = grad_neg.norm(); - if grad_norm == F::Scalar::zero() { + if grad_norm == F::Field::zero() { // Gradient is zero, it is useless to compute the dogleg step. // Instead, we take the Newton direction to the trust region // boundary. @@ -354,7 +354,7 @@ impl Solver for TrustRegion { // Compute D^(-2) (use p for storage). scale_inv2 = p; scale_inv2.copy_from(scale); - scale_inv2.apply(|s| *s = F::Scalar::one() / (*s * *s)); + scale_inv2.apply(|s| *s = F::Field::one() / (*s * *s)); // Compute g = -D^(-2) grad F, the steepest descent direction in // scaled space (use cauchy for storage). @@ -452,7 +452,7 @@ impl Solver for TrustRegion { #[allow(clippy::suspicious_operation_groupings)] let d = (b * b + a * c_neg).sqrt(); - let alpha = if b <= F::Scalar::zero() { + let alpha = if b <= F::Field::zero() { (-b + d) / a } else { c_neg / (b + d) @@ -494,10 +494,10 @@ impl Solver for TrustRegion { // from the solution (i.e., for large || F(x) ||). // Determine lambda. - let d = if fx_norm >= F::Scalar::one() { - F::Scalar::one() / fx_norm + let d = if fx_norm >= F::Field::one() { + F::Field::one() / fx_norm } else { - F::Scalar::one() + F::Scalar::one() / convert(*iter as f64) + F::Field::one() + F::Field::one() / convert(*iter as f64) }; let lambda = *mu * fx_norm.powf(d); @@ -579,13 +579,13 @@ impl Solver for TrustRegion { let deny = if allow_ascent { // If ascent is allowed, then check only for zero, which would // make the gain ratio calculation ill-defined. - predicted == F::Scalar::zero() + predicted == F::Field::zero() } else { // If ascent is not allowed, test positivity of the predicted // gain. Note that even if the actual reduction was positive, // the step would be rejected anyway because the gain ratio // would be negative. - predicted <= F::Scalar::zero() + predicted <= F::Field::zero() }; if deny { @@ -594,7 +594,7 @@ impl Solver for TrustRegion { } else { debug!("predicted gain <= 0"); } - F::Scalar::zero() + F::Field::zero() } else { let actual = fx_norm - fx_trial_norm; let gain_ratio = actual / predicted; @@ -604,7 +604,7 @@ impl Solver for TrustRegion { } } else { debug!("trial step is invalid, gain ratio = 0"); - F::Scalar::zero() + F::Field::zero() }; // Decide if the step is accepted or not. @@ -688,11 +688,11 @@ impl Optimizer for TrustRegion { fn opt_next( &mut self, f: &F, - dom: &Domain<::Scalar>, - x: &mut Vector<::Scalar, Dynamic, Sx>, - ) -> Result<::Scalar, Self::Error> + dom: &Domain<::Field>, + x: &mut Vector<::Field, Dynamic, Sx>, + ) -> Result<::Field, Self::Error> where - Sx: StorageMut<::Scalar, Dynamic> + IsContiguous, + Sx: StorageMut<::Field, Dynamic> + IsContiguous, { let TrustRegionOptions { delta_min, @@ -721,9 +721,9 @@ impl Optimizer for TrustRegion { } = self; #[allow(clippy::needless_late_init)] - let scaled_newton: &mut OVector; - let scale_inv2_grad: &mut OVector; - let cauchy_scaled: &mut OVector; + let scaled_newton: &mut OVector; + let scale_inv2_grad: &mut OVector; + let cauchy_scaled: &mut OVector; #[derive(Debug, Clone, Copy, PartialEq)] enum StepType { @@ -738,7 +738,7 @@ impl Optimizer for TrustRegion { grad.compute(f, x, scale, fx); hes.compute(f, x, scale, fx); - let estimate_delta = *delta == F::Scalar::zero(); + let estimate_delta = *delta == F::Field::zero(); if estimate_delta { *delta = grad.norm() * convert(0.1); } @@ -761,7 +761,7 @@ impl Optimizer for TrustRegion { "Newton step is invalid for ill-defined Hessian (zero columns: {:?})", hes.column_iter() .enumerate() - .filter(|(_, col)| col.norm() == F::Scalar::zero()) + .filter(|(_, col)| col.norm() == F::Field::zero()) .map(|(i, _)| i) .collect::>() ); @@ -784,7 +784,7 @@ impl Optimizer for TrustRegion { let grad_norm = grad_neg.norm(); - if grad_norm == F::Scalar::zero() { + if grad_norm == F::Field::zero() { // Gradient is zero, it is useless to compute the dogleg step. // Instead, we take the Newton direction to the trust region // boundary. @@ -803,13 +803,13 @@ impl Optimizer for TrustRegion { // Compute || D^-1 grad || (use p for storage). scale_inv2_grad = p; scale_inv2_grad.copy_from(scale); - scale_inv2_grad.apply(|s| *s = F::Scalar::one() / *s); + scale_inv2_grad.apply(|s| *s = F::Field::one() / *s); scale_inv2_grad.component_mul_assign(grad_neg); let scale_inv_grad_norm = scale_inv2_grad.norm(); // Compute D^(-2) grad. scale_inv2_grad.copy_from(scale); - scale_inv2_grad.apply(|s| *s = F::Scalar::one() / (*s * *s)); + scale_inv2_grad.apply(|s| *s = F::Field::one() / (*s * *s)); scale_inv2_grad.component_mul_assign(grad_neg); scale_inv2_grad.neg_mut(); @@ -823,11 +823,11 @@ impl Optimizer for TrustRegion { hes.mul_to(scale_inv2_grad, temp); let quadratic_form = scale_inv2_grad.dot(temp); - let tau = if quadratic_form <= F::Scalar::zero() { - F::Scalar::one() + let tau = if quadratic_form <= F::Field::zero() { + F::Field::one() } else { // tau = min(|| D^-1 grad|| / delta * grad^T D^-2 H D^-2 grad, 1). - (scale_inv_grad_norm.powi(3) / (*delta * quadratic_form)).min(F::Scalar::one()) + (scale_inv_grad_norm.powi(3) / (*delta * quadratic_form)).min(F::Field::one()) }; let p = scale_inv2_grad; @@ -900,7 +900,7 @@ impl Optimizer for TrustRegion { #[allow(clippy::suspicious_operation_groupings)] let d = (b * b + a * c_neg).sqrt(); - let alpha = if b <= F::Scalar::zero() { + let alpha = if b <= F::Field::zero() { (-b + d) / a } else { c_neg / (b + d) @@ -950,13 +950,13 @@ impl Optimizer for TrustRegion { let deny = if allow_ascent { // If ascent is allowed, then check only for zero, which would // make the gain ratio calculation ill-defined. - predicted == F::Scalar::zero() + predicted == F::Field::zero() } else { // If ascent is not allowed, test positivity of the predicted // gain. Note that even if the actual reduction was positive, // the step would be rejected anyway because the gain ratio // would be negative. - predicted <= F::Scalar::zero() + predicted <= F::Field::zero() }; if deny { @@ -965,7 +965,7 @@ impl Optimizer for TrustRegion { } else { debug!("predicted gain <= 0"); } - F::Scalar::zero() + F::Field::zero() } else { let actual = fx - fx_trial; let gain_ratio = actual / predicted; @@ -975,7 +975,7 @@ impl Optimizer for TrustRegion { } } else { debug!("trial step is invalid, gain ratio = 0"); - F::Scalar::zero() + F::Field::zero() }; // Decide if the step is accepted or not. diff --git a/src/testing.rs b/src/testing.rs index 2f85785..cd5d2f9 100644 --- a/src/testing.rs +++ b/src/testing.rs @@ -36,27 +36,27 @@ use crate::core::{Domain, Function, Optimizer, Problem, Solver, System}; pub trait TestProblem: Problem { /// Standard initial values for the problem. Using the same initial values is /// essential for fair comparison of methods. - fn initials(&self) -> Vec>; + fn initials(&self) -> Vec>; } /// Extension of the [`System`] trait that provides additional information that /// is useful for testing solvers. pub trait TestSystem: System + TestProblem where - Self::Scalar: approx::RelativeEq, + Self::Field: approx::RelativeEq, { /// A set of roots (if known and finite). This is mostly just for /// information, for example to know how close a solver got even if it /// failed. For testing if a given point is root, [`TestSystem::is_root`] /// should be used. - fn roots(&self) -> Vec> { + fn roots(&self) -> Vec> { Vec::new() } /// Test if given point is a root of the system, given the tolerance `eps`. - fn is_root(&self, x: &Vector, eps: Self::Scalar) -> bool + fn is_root(&self, x: &Vector, eps: Self::Field) -> bool where - Sx: Storage + IsContiguous, + Sx: Storage + IsContiguous, { let mut fx = x.clone_owned(); self.eval(x, &mut fx); @@ -68,20 +68,20 @@ where /// that is useful for testing optimizers. pub trait TestFunction: Function + TestProblem where - Self::Scalar: approx::RelativeEq, + Self::Field: approx::RelativeEq, { /// A set of global optima (if known and finite). This is mostly just for /// information, for example to know how close an optimizer got even if it /// failed. For testing if a given point is global optimum, [`TestFunction::is_optimum`] /// should be used. - fn optima(&self) -> Vec> { + fn optima(&self) -> Vec> { Vec::new() } /// Test if given point is a root of the system, given the tolerance `eps`. - fn is_optimum(&self, x: &Vector, eps: Self::Scalar) -> bool + fn is_optimum(&self, x: &Vector, eps: Self::Field) -> bool where - Sx: Storage + IsContiguous; + Sx: Storage + IsContiguous; } /// [Extended Rosenbrock @@ -150,9 +150,9 @@ impl Default for ExtendedRosenbrock { } impl Problem for ExtendedRosenbrock { - type Scalar = f64; + type Field = f64; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { (0..self.n) .map(|i| { if i % 2 == 0 { @@ -168,25 +168,25 @@ impl Problem for ExtendedRosenbrock { impl System for ExtendedRosenbrock { fn eval( &self, - x: &Vector, - fx: &mut Vector, + x: &Vector, + fx: &mut Vector, ) where - Sx: Storage + IsContiguous, - Sfx: StorageMut, + Sx: Storage + IsContiguous, + Sfx: StorageMut, { eval(self.residuals(x), fx) } - fn norm(&self, x: &Vector) -> Self::Scalar + fn norm(&self, x: &Vector) -> Self::Field where - Sx: Storage + IsContiguous, + Sx: Storage + IsContiguous, { norm(self.residuals(x)) } } impl TestProblem for ExtendedRosenbrock { - fn initials(&self) -> Vec> { + fn initials(&self) -> Vec> { let init1 = DVector::from_iterator( self.n, (0..self.n).map(|i| if i % 2 == 0 { -1.2 } else { 1.0 }), @@ -202,7 +202,7 @@ impl TestProblem for ExtendedRosenbrock { } impl TestSystem for ExtendedRosenbrock { - fn roots(&self) -> Vec> { + fn roots(&self) -> Vec> { let root = (0..self.n).map(|i| { if i % 2 == 0 { 1.0 / self.alpha @@ -268,9 +268,9 @@ impl Default for ExtendedPowell { } impl Problem for ExtendedPowell { - type Scalar = f64; + type Field = f64; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { Domain::unconstrained(self.n) } } @@ -278,25 +278,25 @@ impl Problem for ExtendedPowell { impl System for ExtendedPowell { fn eval( &self, - x: &Vector, - fx: &mut Vector, + x: &Vector, + fx: &mut Vector, ) where - Sx: Storage + IsContiguous, - Sfx: StorageMut, + Sx: Storage + IsContiguous, + Sfx: StorageMut, { eval(self.residuals(x), fx) } - fn norm(&self, x: &Vector) -> Self::Scalar + fn norm(&self, x: &Vector) -> Self::Field where - Sx: Storage + IsContiguous, + Sx: Storage + IsContiguous, { norm(self.residuals(x)) } } impl TestProblem for ExtendedPowell { - fn initials(&self) -> Vec> { + fn initials(&self) -> Vec> { let init = DVector::from_iterator( self.n, (0..self.n).map(|i| match i % 4 { @@ -313,7 +313,7 @@ impl TestProblem for ExtendedPowell { } impl TestSystem for ExtendedPowell { - fn roots(&self) -> Vec> { + fn roots(&self) -> Vec> { vec![DVector::from_element(self.n, 0.0)] } } @@ -355,9 +355,9 @@ impl Default for BullardBiegler { } impl Problem for BullardBiegler { - type Scalar = f64; + type Field = f64; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { [(5.45e-6, 4.553), (2.196e-3, 18.21)].into_iter().collect() } } @@ -365,25 +365,25 @@ impl Problem for BullardBiegler { impl System for BullardBiegler { fn eval( &self, - x: &Vector, - fx: &mut Vector, + x: &Vector, + fx: &mut Vector, ) where - Sx: Storage + IsContiguous, - Sfx: StorageMut, + Sx: Storage + IsContiguous, + Sfx: StorageMut, { eval(self.residuals(x), fx) } - fn norm(&self, x: &Vector) -> Self::Scalar + fn norm(&self, x: &Vector) -> Self::Field where - Sx: Storage + IsContiguous, + Sx: Storage + IsContiguous, { norm(self.residuals(x)) } } impl TestProblem for BullardBiegler { - fn initials(&self) -> Vec> { + fn initials(&self) -> Vec> { let init1 = dvector![0.1, 1.0]; let init2 = dvector![1.0, 1.0]; vec![init1, init2] @@ -391,7 +391,7 @@ impl TestProblem for BullardBiegler { } impl TestSystem for BullardBiegler { - fn roots(&self) -> Vec> { + fn roots(&self) -> Vec> { vec![dvector![1.45e-5, 6.8933353]] } } @@ -434,9 +434,9 @@ impl Default for Sphere { } impl Problem for Sphere { - type Scalar = f64; + type Field = f64; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { Domain::unconstrained(self.n) } } @@ -444,25 +444,25 @@ impl Problem for Sphere { impl System for Sphere { fn eval( &self, - x: &Vector, - fx: &mut Vector, + x: &Vector, + fx: &mut Vector, ) where - Sx: Storage + IsContiguous, - Sfx: StorageMut, + Sx: Storage + IsContiguous, + Sfx: StorageMut, { eval(self.residuals(x), fx) } - fn norm(&self, x: &Vector) -> Self::Scalar + fn norm(&self, x: &Vector) -> Self::Field where - Sx: Storage + IsContiguous, + Sx: Storage + IsContiguous, { norm(self.residuals(x)) } } impl TestProblem for Sphere { - fn initials(&self) -> Vec> { + fn initials(&self) -> Vec> { let init = DVector::from_iterator( self.n, (0..self.n).map(|i| if i % 2 == 0 { 10.0 } else { -10.0 }), @@ -473,19 +473,19 @@ impl TestProblem for Sphere { } impl TestSystem for Sphere { - fn roots(&self) -> Vec> { + fn roots(&self) -> Vec> { vec![DVector::from_element(self.n, 0.0)] } } impl TestFunction for Sphere { - fn optima(&self) -> Vec> { + fn optima(&self) -> Vec> { ::roots(self) } - fn is_optimum(&self, x: &Vector, eps: Self::Scalar) -> bool + fn is_optimum(&self, x: &Vector, eps: Self::Field) -> bool where - Sx: Storage + IsContiguous, + Sx: Storage + IsContiguous, { self.apply(x).abs() <= eps } @@ -538,9 +538,9 @@ impl Default for Brown { } impl Problem for Brown { - type Scalar = f64; + type Field = f64; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { Domain::unconstrained(self.n) } } @@ -548,25 +548,25 @@ impl Problem for Brown { impl System for Brown { fn eval( &self, - x: &Vector, - fx: &mut Vector, + x: &Vector, + fx: &mut Vector, ) where - Sx: Storage + IsContiguous, - Sfx: StorageMut, + Sx: Storage + IsContiguous, + Sfx: StorageMut, { eval(self.residuals(x), fx) } - fn norm(&self, x: &Vector) -> Self::Scalar + fn norm(&self, x: &Vector) -> Self::Field where - Sx: Storage + IsContiguous, + Sx: Storage + IsContiguous, { norm(self.residuals(x)) } } impl TestProblem for Brown { - fn initials(&self) -> Vec> { + fn initials(&self) -> Vec> { let init = DVector::zeros_generic(Dynamic::from_usize(self.n), U1::name()); vec![init] } @@ -616,9 +616,9 @@ impl Default for Exponential { } impl Problem for Exponential { - type Scalar = f64; + type Field = f64; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { Domain::unconstrained(2) } } @@ -626,25 +626,25 @@ impl Problem for Exponential { impl System for Exponential { fn eval( &self, - x: &Vector, - fx: &mut Vector, + x: &Vector, + fx: &mut Vector, ) where - Sx: Storage + IsContiguous, - Sfx: StorageMut, + Sx: Storage + IsContiguous, + Sfx: StorageMut, { eval(self.residuals(x), fx) } - fn norm(&self, x: &Vector) -> Self::Scalar + fn norm(&self, x: &Vector) -> Self::Field where - Sx: Storage + IsContiguous, + Sx: Storage + IsContiguous, { norm(self.residuals(x)) } } impl TestProblem for Exponential { - fn initials(&self) -> Vec> { + fn initials(&self) -> Vec> { let init = DVector::zeros_generic(Dynamic::from_usize(self.n), U1::name()); vec![init] } @@ -673,9 +673,9 @@ impl Default for InfiniteSolutions { } impl Problem for InfiniteSolutions { - type Scalar = f64; + type Field = f64; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { Domain::unconstrained(self.n) } } @@ -683,25 +683,25 @@ impl Problem for InfiniteSolutions { impl System for InfiniteSolutions { fn eval( &self, - _x: &Vector, - fx: &mut Vector, + _x: &Vector, + fx: &mut Vector, ) where - Sx: Storage + IsContiguous, - Sfx: StorageMut, + Sx: Storage + IsContiguous, + Sfx: StorageMut, { fx.fill(0.0); } - fn norm(&self, _: &Vector) -> Self::Scalar + fn norm(&self, _: &Vector) -> Self::Field where - Sx: Storage + IsContiguous, + Sx: Storage + IsContiguous, { 0.0 } } impl TestProblem for InfiniteSolutions { - fn initials(&self) -> Vec> { + fn initials(&self) -> Vec> { let init = DVector::zeros_generic(Dynamic::from_usize(self.n), U1::name()); vec![init] } @@ -724,12 +724,12 @@ pub enum TestingError { /// A simple solver driver that can be used in tests. pub fn solve>( f: &F, - dom: &Domain, + dom: &Domain, mut solver: S, - mut x: OVector, + mut x: OVector, max_iters: usize, - tolerance: F::Scalar, -) -> Result, TestingError> + tolerance: F::Field, +) -> Result, TestingError> where S::Error: StdError, { @@ -754,13 +754,13 @@ where /// A simple solver driver that can be used in tests. pub fn optimize>( f: &F, - dom: &Domain, + dom: &Domain, mut optimizer: O, - mut x: OVector, - min: F::Scalar, + mut x: OVector, + min: F::Field, max_iters: usize, - tolerance: F::Scalar, -) -> Result, TestingError> + tolerance: F::Field, +) -> Result, TestingError> where O::Error: StdError, { @@ -786,15 +786,15 @@ where /// testing evolutionary/nature-inspired algorithms. pub fn iter, G>( f: &F, - dom: &Domain, + dom: &Domain, mut solver: S, - mut x: OVector, + mut x: OVector, iters: usize, mut inspect: G, ) -> Result<(), S::Error> where S::Error: StdError, - G: FnMut(&S, &OVector, F::Scalar, usize), + G: FnMut(&S, &OVector, F::Field, usize), { let mut fx = x.clone_owned(); From 622cd9ebd5d5e574fb8289ea699197394d4f036b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Petr=20Nevyho=C5=A1t=C4=9Bn=C3=BD?= Date: Sun, 12 Nov 2023 22:06:00 +0100 Subject: [PATCH 5/6] feat!: introduce own RealField trait --- src/analysis/initial.rs | 6 +++--- src/core/base.rs | 25 +++++++++++++++++++++++-- src/derivatives.rs | 17 ++++------------- src/solver/nelder_mead.rs | 7 ++----- src/solver/trust_region.rs | 6 +++--- 5 files changed, 35 insertions(+), 26 deletions(-) diff --git a/src/analysis/initial.rs b/src/analysis/initial.rs index af27cc0..1861551 100644 --- a/src/analysis/initial.rs +++ b/src/analysis/initial.rs @@ -14,8 +14,8 @@ use nalgebra::{ }; use crate::{ - core::{Domain, Problem, System}, - derivatives::{Jacobian, EPSILON_SQRT}, + core::{Domain, Problem, RealField, System}, + derivatives::Jacobian, }; /// Initial guesses analyzer. See [module](self) documentation for more details. @@ -75,7 +75,7 @@ impl InitialGuessAnalysis { .filter(|(_, (c1, c2))| { c1.iter() .zip(c2.iter()) - .any(|(a, b)| (*a - *b).abs() > convert(EPSILON_SQRT)) + .any(|(a, b)| (*a - *b).abs() > F::Field::EPSILON_SQRT) }) .map(|(col, _)| col) .collect(); diff --git a/src/core/base.rs b/src/core/base.rs index 8cdbb29..7214630 100644 --- a/src/core/base.rs +++ b/src/core/base.rs @@ -1,7 +1,18 @@ -use nalgebra::RealField; - use super::domain::Domain; +/// Trait implemented by real numbers. +pub trait RealField: nalgebra::RealField { + /// Square root of double precision machine epsilon. This value is a + /// standard constant for epsilons in approximating first-order + /// derivate-based concepts. + const EPSILON_SQRT: Self; + + /// Cubic root of double precision machine epsilon. This value is a standard + /// constant for epsilons in approximating second-order derivate-based + /// concepts. + const EPSILON_CBRT: Self; +} + /// The base trait for [`System`](super::system::System) and /// [`Function`](super::function::Function). pub trait Problem { @@ -12,3 +23,13 @@ pub trait Problem { /// system is unconstrained. fn domain(&self) -> Domain; } + +impl RealField for f32 { + const EPSILON_SQRT: Self = 0.00034526698; + const EPSILON_CBRT: Self = 0.0049215667; +} + +impl RealField for f64 { + const EPSILON_SQRT: Self = 0.000000014901161193847656; + const EPSILON_CBRT: Self = 0.0000060554544523933395; +} diff --git a/src/derivatives.rs b/src/derivatives.rs index e99a381..94d0174 100644 --- a/src/derivatives.rs +++ b/src/derivatives.rs @@ -3,21 +3,12 @@ use std::ops::Deref; use nalgebra::{ - convert, storage::{Storage, StorageMut}, ComplexField, DimName, Dynamic, IsContiguous, OMatrix, OVector, RealField, Vector, U1, }; use num_traits::{One, Zero}; -use crate::core::{Function, Problem, System}; - -/// Square root of double precision machine epsilon. This value is a standard -/// constant for epsilons in approximating first-order derivate-based concepts. -pub const EPSILON_SQRT: f64 = 0.000000014901161193847656; - -/// Cubic root of double precision machine epsilon. This value is a standard -/// constant for epsilons in approximating second-order derivate-based concepts. -pub const EPSILON_CBRT: f64 = 0.0000060554544523933395; +use crate::core::{Function, Problem, RealField as _, System}; /// Jacobian matrix of a system. #[derive(Debug)] @@ -76,7 +67,7 @@ impl Jacobian { Sscale: Storage, Sfx: Storage, { - let eps: F::Field = convert(EPSILON_SQRT); + let eps = F::Field::EPSILON_SQRT; for (j, mut col) in self.jac.column_iter_mut().enumerate() { let xj = x[j]; @@ -173,7 +164,7 @@ impl Gradient { Sx: StorageMut + IsContiguous, Sscale: Storage, { - let eps: F::Field = convert(EPSILON_SQRT); + let eps = F::Field::EPSILON_SQRT; for i in 0..x.nrows() { let xi = x[i]; @@ -265,7 +256,7 @@ impl Hessian { Sx: StorageMut + IsContiguous, Sscale: Storage, { - let eps: F::Field = convert(EPSILON_CBRT); + let eps = F::Field::EPSILON_CBRT; for i in 0..x.nrows() { let xi = x[i]; diff --git a/src/solver/nelder_mead.rs b/src/solver/nelder_mead.rs index 7f3d5ed..a5d09d4 100644 --- a/src/solver/nelder_mead.rs +++ b/src/solver/nelder_mead.rs @@ -31,10 +31,7 @@ use nalgebra::{ use num_traits::{One, Zero}; use thiserror::Error; -use crate::{ - core::{Domain, Function, Optimizer, Problem, Solver, System}, - derivatives::EPSILON_SQRT, -}; +use crate::core::{Domain, Function, Optimizer, Problem, RealField as _, Solver, System}; /// Family of coefficients for reflection, expansion and contractions. #[derive(Debug, Clone, Copy)] @@ -435,7 +432,7 @@ impl NelderMead { // otherwise an error reduction was achieved. This criterion is // taken from "Less is more: Simplified Nelder-Mead method for large // unconstrained optimization". - let eps: F::Field = convert(EPSILON_SQRT); + let eps = F::Field::EPSILON_SQRT; let worst = errors[sort_perm[n]]; let best = errors[sort_perm[0]]; diff --git a/src/solver/trust_region.rs b/src/solver/trust_region.rs index 0c7f2e5..e8dbe1b 100644 --- a/src/solver/trust_region.rs +++ b/src/solver/trust_region.rs @@ -37,8 +37,8 @@ use num_traits::{One, Zero}; use thiserror::Error; use crate::{ - core::{Domain, Function, Optimizer, Problem, Solver, System}, - derivatives::{Gradient, Hessian, Jacobian, EPSILON_SQRT}, + core::{Domain, Function, Optimizer, Problem, RealField as _, Solver, System}, + derivatives::{Gradient, Hessian, Jacobian}, }; /// Specification for initial value of trust region size. @@ -102,7 +102,7 @@ impl TrustRegionOptions { impl Default for TrustRegionOptions { fn default() -> Self { Self { - delta_min: convert(EPSILON_SQRT), + delta_min: F::Field::EPSILON_SQRT, delta_max: convert(1e9), delta_init: DeltaInit::Estimated, mu_min: convert(1e-10), From 789b72369eeab20987d22b293fc8d54e27e27250 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Petr=20Nevyho=C5=A1t=C4=9Bn=C3=BD?= Date: Sun, 12 Nov 2023 22:21:31 +0100 Subject: [PATCH 6/6] feat!: remove prelude and move important types to lib root --- examples/rosenbrock.rs | 2 +- gomez-bench/benches/main.rs | 2 +- src/core/function.rs | 3 +-- src/core/optimizer.rs | 2 +- src/core/solver.rs | 2 +- src/core/system.rs | 2 +- src/lib.rs | 18 ++++++++---------- 7 files changed, 14 insertions(+), 17 deletions(-) diff --git a/examples/rosenbrock.rs b/examples/rosenbrock.rs index f3d6ff9..cd15bd9 100644 --- a/examples/rosenbrock.rs +++ b/examples/rosenbrock.rs @@ -1,6 +1,6 @@ use gomez::nalgebra as na; -use gomez::prelude::*; use gomez::solver::TrustRegion; +use gomez::{Domain, Problem, Solver, System}; use na::{Dynamic, IsContiguous}; // https://en.wikipedia.org/wiki/Rosenbrock_function diff --git a/gomez-bench/benches/main.rs b/gomez-bench/benches/main.rs index 0b06f2e..9ebfdbc 100644 --- a/gomez-bench/benches/main.rs +++ b/gomez-bench/benches/main.rs @@ -5,9 +5,9 @@ fn main() { use gomez::{ nalgebra as na, nalgebra::Dynamic, - prelude::*, solver::{NelderMead, TrustRegion}, testing::*, + Domain, Problem, Solver, }; use gsl_wrapper::{ function::Function as GslFunction, diff --git a/src/core/function.rs b/src/core/function.rs index 3414072..a12c478 100644 --- a/src/core/function.rs +++ b/src/core/function.rs @@ -11,9 +11,8 @@ use super::{base::Problem, system::System}; /// ([`apply`](Function::apply)). /// /// ```rust -/// use gomez::core::Function; /// use gomez::nalgebra as na; -/// use gomez::prelude::*; +/// use gomez::{Domain, Function, Problem}; /// use na::{Dynamic, IsContiguous}; /// /// // A problem is represented by a type. diff --git a/src/core/optimizer.rs b/src/core/optimizer.rs index 46366e3..1d8b7b4 100644 --- a/src/core/optimizer.rs +++ b/src/core/optimizer.rs @@ -21,7 +21,7 @@ use super::{domain::Domain, function::Function}; /// /// ```rust /// use gomez::nalgebra as na; -/// use gomez::core::*; +/// use gomez::*; /// use na::{storage::StorageMut, Dynamic, IsContiguous, Vector}; /// use rand::Rng; /// use rand_distr::{uniform::SampleUniform, Distribution, Standard}; diff --git a/src/core/solver.rs b/src/core/solver.rs index e19dd76..44443b6 100644 --- a/src/core/solver.rs +++ b/src/core/solver.rs @@ -21,7 +21,7 @@ use super::{domain::Domain, system::System}; /// /// ```rust /// use gomez::nalgebra as na; -/// use gomez::core::*; +/// use gomez::*; /// use na::{storage::StorageMut, Dynamic, IsContiguous, Vector}; /// use rand::Rng; /// use rand_distr::{uniform::SampleUniform, Distribution, Standard}; diff --git a/src/core/system.rs b/src/core/system.rs index f7746b6..3cea18f 100644 --- a/src/core/system.rs +++ b/src/core/system.rs @@ -16,7 +16,7 @@ use super::{base::Problem, domain::Domain}; /// /// ```rust /// use gomez::nalgebra as na; -/// use gomez::prelude::*; +/// use gomez::{Domain, Problem, System}; /// use na::{Dynamic, IsContiguous}; /// /// // A problem is represented by a type. diff --git a/src/lib.rs b/src/lib.rs index 501b573..a43fd0c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -64,12 +64,12 @@ //! incorporating them and the implementation of appropriate solvers. //! //! When it comes to code, the problem is any type that implements the -//! [`System`](core::System) and [`Problem`](core::Problem) traits. +//! [`System`] and [`Problem`] traits. //! //! ```rust //! // Gomez is based on `nalgebra` crate. //! use gomez::nalgebra as na; -//! use gomez::prelude::*; +//! use gomez::{Domain, Problem, System}; //! use na::{Dynamic, IsContiguous}; //! //! // A problem is represented by a type. @@ -117,7 +117,7 @@ //! //! ```rust //! # use gomez::nalgebra as na; -//! # use gomez::prelude::*; +//! # use gomez::*; //! # //! # struct Rosenbrock { //! # a: f64, @@ -146,9 +146,10 @@ //! //! ```rust //! use gomez::nalgebra as na; -//! use gomez::prelude::*; +//! use gomez::Solver; //! // Pick your solver. //! use gomez::solver::TrustRegion; +//! # use gomez::{Domain, Problem, System}; //! # use na::{Dynamic, IsContiguous}; //! # //! # struct Rosenbrock { @@ -234,10 +235,12 @@ //! Licensed under MIT. pub mod analysis; -pub mod core; +mod core; pub mod derivatives; pub mod solver; +pub use core::*; + #[cfg(feature = "testing")] pub mod testing; @@ -245,8 +248,3 @@ pub mod testing; pub(crate) mod testing; pub use nalgebra; - -/// Gomez prelude. -pub mod prelude { - pub use crate::core::{Domain, Problem, Solver, System}; -}