diff --git a/examples/rosenbrock.rs b/examples/rosenbrock.rs index f42deb1..cd15bd9 100644 --- a/examples/rosenbrock.rs +++ b/examples/rosenbrock.rs @@ -1,6 +1,6 @@ use gomez::nalgebra as na; -use gomez::prelude::*; use gomez::solver::TrustRegion; +use gomez::{Domain, Problem, Solver, System}; use na::{Dynamic, IsContiguous}; // https://en.wikipedia.org/wiki/Rosenbrock_function @@ -10,9 +10,9 @@ struct Rosenbrock { } impl Problem for Rosenbrock { - type Scalar = f64; + type Field = f64; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { Domain::unconstrained(2) } } @@ -20,17 +20,14 @@ impl Problem for Rosenbrock { impl System for Rosenbrock { fn eval( &self, - x: &na::Vector, - fx: &mut na::Vector, - ) -> Result<(), ProblemError> - where - Sx: na::storage::Storage + IsContiguous, - Sfx: na::storage::StorageMut, + x: &na::Vector, + fx: &mut na::Vector, + ) where + Sx: na::storage::Storage + IsContiguous, + Sfx: na::storage::StorageMut, { fx[0] = (self.a - x[0]).powi(2); fx[1] = self.b * (x[1] - x[0].powi(2)).powi(2); - - Ok(()) } } @@ -46,7 +43,7 @@ fn main() -> Result<(), String> { for i in 1..=100 { solver - .next(&f, &dom, &mut x, &mut fx) + .solve_next(&f, &dom, &mut x, &mut fx) .map_err(|err| format!("{}", err))?; println!( diff --git a/gomez-bench/benches/main.rs b/gomez-bench/benches/main.rs index 9509256..9ebfdbc 100644 --- a/gomez-bench/benches/main.rs +++ b/gomez-bench/benches/main.rs @@ -5,9 +5,9 @@ fn main() { use gomez::{ nalgebra as na, nalgebra::Dynamic, - prelude::*, solver::{NelderMead, TrustRegion}, testing::*, + Domain, Problem, Solver, }; use gsl_wrapper::{ function::Function as GslFunction, @@ -168,7 +168,7 @@ mod bullard_biegler { fn bench_solve(bencher: divan::Bencher, with_system: GF, with_solver: GS) where GF: Fn() -> (F, usize), - GS: Fn(&F, &Domain, &na::OVector) -> S, + GS: Fn(&F, &Domain, &na::OVector) -> S, F: TestSystem, S: Solver, { @@ -184,7 +184,7 @@ where let mut fx = x.clone_owned(); let mut iter = 0; loop { - if solver.next(&f, &dom, &mut x, &mut fx).is_err() { + if solver.solve_next(&f, &dom, &mut x, &mut fx).is_err() { panic!("solver error"); } @@ -203,8 +203,8 @@ where fn with_trust_region( f: &F, - dom: &Domain, - _: &na::OVector, + dom: &Domain, + _: &na::OVector, ) -> TrustRegion where F: Problem, @@ -214,8 +214,8 @@ where fn with_nelder_mead( f: &F, - dom: &Domain, - _: &na::OVector, + dom: &Domain, + _: &na::OVector, ) -> NelderMead where F: Problem, @@ -225,11 +225,11 @@ where fn with_gsl_hybrids( f: &F, - _: &Domain, - x: &na::OVector, + _: &Domain, + x: &na::OVector, ) -> GslSolverWrapper> where - F: TestSystem + Clone, + F: TestSystem + Clone, { GslSolverWrapper::new(GslFunctionWrapper::new( f.clone(), @@ -248,7 +248,7 @@ impl GslFunctionWrapper { } } -impl> GslFunction for GslFunctionWrapper { +impl> GslFunction for GslFunctionWrapper { fn eval(&self, x: &GslVec, f: &mut GslVec) -> GslStatus { use na::DimName; let dim = Dynamic::new(x.len()); @@ -264,10 +264,8 @@ impl> GslFunction for GslFunctionWrapper { na::U1::name(), ); - match self.f.eval(&x, &mut fx) { - Ok(_) => GslStatus::ok(), - Err(_) => GslStatus::err(GslError::BadFunc), - } + self.f.eval(&x, &mut fx); + GslStatus::ok() } fn init(&self) -> GslVec { @@ -287,21 +285,21 @@ impl GslSolverWrapper { } } -impl> Solver for GslSolverWrapper> { +impl> Solver for GslSolverWrapper> { const NAME: &'static str = "GSL hybrids"; type Error = String; - fn next( + fn solve_next( &mut self, _f: &F, - _dom: &Domain, - x: &mut na::Vector, - fx: &mut na::Vector, + _dom: &Domain, + x: &mut na::Vector, + fx: &mut na::Vector, ) -> Result<(), Self::Error> where - Sx: na::storage::StorageMut + IsContiguous, - Sfx: na::storage::StorageMut, + Sx: na::storage::StorageMut + IsContiguous, + Sfx: na::storage::StorageMut, { let result = self.solver.step().to_result(); x.copy_from_slice(self.solver.root()); diff --git a/src/analysis/initial.rs b/src/analysis/initial.rs index 18197ca..1861551 100644 --- a/src/analysis/initial.rs +++ b/src/analysis/initial.rs @@ -12,24 +12,12 @@ use std::marker::PhantomData; use nalgebra::{ convert, storage::StorageMut, ComplexField, DimName, Dynamic, IsContiguous, OVector, Vector, U1, }; -use thiserror::Error; use crate::{ - core::{Domain, Problem, ProblemError, System}, - derivatives::{Jacobian, JacobianError, EPSILON_SQRT}, + core::{Domain, Problem, RealField, System}, + derivatives::Jacobian, }; -/// Error returned from [`InitialGuessAnalysis`] solver. -#[derive(Debug, Error)] -pub enum InitialGuessAnalysisError { - /// Error that occurred when evaluating the system. - #[error("{0}")] - System(#[from] ProblemError), - /// Error that occurred when computing the Jacobian matrix. - #[error("{0}")] - Jacobian(#[from] JacobianError), -} - /// Initial guesses analyzer. See [module](self) documentation for more details. pub struct InitialGuessAnalysis { nonlinear: Vec, @@ -40,13 +28,13 @@ impl InitialGuessAnalysis { /// Analyze the system in given point. pub fn analyze( f: &F, - dom: &Domain, - x: &mut Vector, - fx: &mut Vector, - ) -> Result + dom: &Domain, + x: &mut Vector, + fx: &mut Vector, + ) -> Self where - Sx: StorageMut + IsContiguous, - Sfx: StorageMut, + Sx: StorageMut + IsContiguous, + Sfx: StorageMut, { let dim = Dynamic::new(dom.dim()); let scale = dom @@ -55,8 +43,8 @@ impl InitialGuessAnalysis { .unwrap_or_else(|| OVector::from_element_generic(dim, U1::name(), convert(1.0))); // Compute F'(x) in the initial point. - f.eval(x, fx)?; - let jac1 = Jacobian::new(f, x, &scale, fx)?; + f.eval(x, fx); + let jac1 = Jacobian::new(f, x, &scale, fx); // Compute Newton step. let mut p = fx.clone_owned(); @@ -66,12 +54,12 @@ impl InitialGuessAnalysis { qr.solve_mut(&mut p); // Do Newton step. - p *= convert::<_, F::Scalar>(0.001); + p *= convert::<_, F::Field>(0.001); *x += p; // Compute F'(x) after one Newton step. - f.eval(x, fx)?; - let jac2 = Jacobian::new(f, x, &scale, fx)?; + f.eval(x, fx); + let jac2 = Jacobian::new(f, x, &scale, fx); // Linear variables have no effect on the Jacobian matrix. They can be // recognized by observing no change in corresponding columns (i.e., @@ -87,15 +75,15 @@ impl InitialGuessAnalysis { .filter(|(_, (c1, c2))| { c1.iter() .zip(c2.iter()) - .any(|(a, b)| (*a - *b).abs() > convert(EPSILON_SQRT)) + .any(|(a, b)| (*a - *b).abs() > F::Field::EPSILON_SQRT) }) .map(|(col, _)| col) .collect(); - Ok(Self { + Self { nonlinear, ty: PhantomData, - }) + } } /// Returns indices of variables that have influence on the Jacobian matrix diff --git a/src/core/base.rs b/src/core/base.rs index d4bc8a8..7214630 100644 --- a/src/core/base.rs +++ b/src/core/base.rs @@ -1,31 +1,35 @@ -use nalgebra::RealField; -use thiserror::Error; - use super::domain::Domain; +/// Trait implemented by real numbers. +pub trait RealField: nalgebra::RealField { + /// Square root of double precision machine epsilon. This value is a + /// standard constant for epsilons in approximating first-order + /// derivate-based concepts. + const EPSILON_SQRT: Self; + + /// Cubic root of double precision machine epsilon. This value is a standard + /// constant for epsilons in approximating second-order derivate-based + /// concepts. + const EPSILON_CBRT: Self; +} + /// The base trait for [`System`](super::system::System) and /// [`Function`](super::function::Function). pub trait Problem { - /// Type of the scalar, usually f32 or f64. - type Scalar: RealField + Copy; + /// Type of the field, usually f32 or f64. + type Field: RealField + Copy; /// Get the domain (bound constraints) of the system. If not overridden, the /// system is unconstrained. - fn domain(&self) -> Domain; + fn domain(&self) -> Domain; +} + +impl RealField for f32 { + const EPSILON_SQRT: Self = 0.00034526698; + const EPSILON_CBRT: Self = 0.0049215667; } -/// Error encountered while applying variables to the problem. -#[derive(Debug, Error)] -pub enum ProblemError { - /// The number of variables does not match the dimensionality of the problem - /// domain. - #[error("invalid dimensionality")] - InvalidDimensionality, - /// An invalid value (NaN, positive or negative infinity) of a residual or - /// the function value occurred. - #[error("invalid value encountered")] - InvalidValue, - /// A custom error specific to the system or function. - #[error("{0}")] - Custom(Box), +impl RealField for f64 { + const EPSILON_SQRT: Self = 0.000000014901161193847656; + const EPSILON_CBRT: Self = 0.0000060554544523933395; } diff --git a/src/core/function.rs b/src/core/function.rs index 015a019..a12c478 100644 --- a/src/core/function.rs +++ b/src/core/function.rs @@ -1,26 +1,18 @@ -use nalgebra::{ - allocator::Allocator, storage::Storage, storage::StorageMut, DefaultAllocator, Dynamic, - IsContiguous, Vector, -}; -use num_traits::Zero; +use nalgebra::{storage::Storage, Dynamic, IsContiguous, Vector}; -use super::{ - base::{Problem, ProblemError}, - system::System, -}; +use super::{base::Problem, system::System}; /// The trait for defining functions. /// /// ## Defining a function /// /// A function is any type that implements [`Function`] and [`Problem`] traits. -/// There is one required associated type (scalar) and one required method +/// There is one required associated type (field) and one required method /// ([`apply`](Function::apply)). /// /// ```rust -/// use gomez::core::Function; /// use gomez::nalgebra as na; -/// use gomez::prelude::*; +/// use gomez::{Domain, Function, Problem}; /// use na::{Dynamic, IsContiguous}; /// /// // A problem is represented by a type. @@ -31,99 +23,40 @@ use super::{ /// /// impl Problem for Rosenbrock { /// // The numeric type. Usually f64 or f32. -/// type Scalar = f64; +/// type Field = f64; /// /// // The domain of the problem (could be bound-constrained). -/// fn domain(&self) -> Domain { +/// fn domain(&self) -> Domain { /// Domain::unconstrained(2) /// } /// } /// /// impl Function for Rosenbrock { /// // Apply trial values of variables to the function. -/// fn apply( -/// &self, -/// x: &na::Vector, -/// ) -> Result +/// fn apply(&self, x: &na::Vector) -> Self::Field /// where -/// Sx: na::storage::Storage + IsContiguous, +/// Sx: na::storage::Storage + IsContiguous, /// { /// // Compute the function value. -/// Ok((self.a - x[0]).powi(2) + self.b * (x[1] - x[0].powi(2)).powi(2)) +/// (self.a - x[0]).powi(2) + self.b * (x[1] - x[0].powi(2)).powi(2) /// } /// } /// ``` pub trait Function: Problem { /// Calculate the function value given values of the variables. - fn apply( - &self, - x: &Vector, - ) -> Result + fn apply(&self, x: &Vector) -> Self::Field where - Sx: Storage + IsContiguous; - - /// Calculate the norm of residuals of the system given values of the - /// variable for cases when the function is actually a system of equations. - /// - /// The optimizers should prefer calling this function because the - /// implementation for systems reuse `fx` for calculating the residuals and - /// do not make an unnecessary allocation for it. - fn apply_eval( - &self, - x: &Vector, - fx: &mut Vector, - ) -> Result - where - Sx: Storage + IsContiguous, - Sfx: StorageMut, - { - let norm = self.apply(x)?; - fx.fill(Self::Scalar::zero()); - fx[0] = norm; - Ok(norm) - } + Sx: Storage + IsContiguous; } -impl Function for F +impl Function for F where - DefaultAllocator: Allocator, + F: System, { - fn apply(&self, x: &Vector) -> Result + fn apply(&self, x: &Vector) -> Self::Field where - Sx: Storage + IsContiguous, + Sx: Storage + IsContiguous, { - let mut fx = x.clone_owned(); - self.apply_eval(x, &mut fx) - } - - fn apply_eval( - &self, - x: &Vector, - fx: &mut Vector, - ) -> Result - where - Sx: Storage + IsContiguous, - Sfx: StorageMut, - { - self.eval(x, fx)?; - let norm = fx.norm(); - Ok(norm) - } -} - -/// Extension trait for `Result`. -pub trait FunctionResultExt { - /// If the result is [`ProblemError::InvalidValue`], `Ok(default)` is - /// returned instead. The original result is returned otherwise. - fn ignore_invalid_value(self, replace_with: T) -> Self; -} - -impl FunctionResultExt for Result { - fn ignore_invalid_value(self, replace_with: T) -> Self { - match self { - Ok(value) => Ok(value), - Err(ProblemError::InvalidValue) => Ok(replace_with), - Err(error) => Err(error), - } + self.norm(x) } } diff --git a/src/core/optimizer.rs b/src/core/optimizer.rs index c0065a7..1d8b7b4 100644 --- a/src/core/optimizer.rs +++ b/src/core/optimizer.rs @@ -5,7 +5,7 @@ use super::{domain::Domain, function::Function}; /// Common interface for all optimizers. /// /// All optimizers implement a common interface defined by the [`Optimizer`] -/// trait. The essential method is [`next`](Optimizer::next) which takes +/// trait. The essential method is [`opt_next`](Optimizer::opt_next) which takes /// variables *x* and computes the next step. Thus it represents one iteration /// in the process. Repeated call to this method should move *x* towards the /// minimum in successful cases. @@ -21,7 +21,7 @@ use super::{domain::Domain, function::Function}; /// /// ```rust /// use gomez::nalgebra as na; -/// use gomez::core::*; +/// use gomez::*; /// use na::{storage::StorageMut, Dynamic, IsContiguous, Vector}; /// use rand::Rng; /// use rand_distr::{uniform::SampleUniform, Distribution, Standard}; @@ -38,27 +38,26 @@ use super::{domain::Domain, function::Function}; /// /// impl Optimizer for Random /// where -/// F::Scalar: SampleUniform, -/// Standard: Distribution, +/// F::Field: SampleUniform, +/// Standard: Distribution, /// { /// const NAME: &'static str = "Random"; -/// type Error = ProblemError; +/// type Error = std::convert::Infallible; /// -/// fn next( +/// fn opt_next( /// &mut self, /// f: &F, -/// dom: &Domain, -/// x: &mut Vector, -/// ) -> Result +/// dom: &Domain, +/// x: &mut Vector, +/// ) -> Result /// where -/// Sx: StorageMut + IsContiguous, +/// Sx: StorageMut + IsContiguous, /// { /// // Randomly sample in the domain. /// dom.sample(x, &mut self.rng); /// /// // We must compute the value. -/// let value = f.apply(x)?; -/// Ok(value) +/// Ok(f.apply(x)) /// } /// } /// ``` @@ -67,9 +66,7 @@ pub trait Optimizer { const NAME: &'static str; /// Error type of the iteration. Represents an invalid operation during - /// computing the next step. It is usual that one of the error kinds is - /// propagation of the [`ProblemError`](super::ProblemError) from the - /// function. + /// computing the next step. type Error; /// Computes the next step in the optimization process. @@ -85,12 +82,12 @@ pub trait Optimizer { /// The implementations *can* assume that subsequent calls to `next` pass /// the value of `x` as was outputted in the previous iteration by the same /// method. - fn next( + fn opt_next( &mut self, f: &F, - dom: &Domain, - x: &mut Vector, - ) -> Result + dom: &Domain, + x: &mut Vector, + ) -> Result where - Sx: StorageMut + IsContiguous; + Sx: StorageMut + IsContiguous; } diff --git a/src/core/solver.rs b/src/core/solver.rs index 305fed7..44443b6 100644 --- a/src/core/solver.rs +++ b/src/core/solver.rs @@ -5,10 +5,10 @@ use super::{domain::Domain, system::System}; /// Common interface for all solvers. /// /// All solvers implement a common interface defined by the [`Solver`] trait. -/// The essential method is [`next`](Solver::next) which takes variables *x* and -/// computes the next step. Thus it represents one iteration in the process. -/// Repeated call to this method should converge *x* to the solution in -/// successful cases. +/// The essential method is [`solve_next`](Solver::solve_next) which takes +/// variables *x* and computes the next step. Thus it represents one iteration +/// in the process. Repeated call to this method should converge *x* to the +/// solution in successful cases. /// /// If you implement a solver, please consider make it a contribution to this /// library. @@ -21,7 +21,7 @@ use super::{domain::Domain, system::System}; /// /// ```rust /// use gomez::nalgebra as na; -/// use gomez::core::*; +/// use gomez::*; /// use na::{storage::StorageMut, Dynamic, IsContiguous, Vector}; /// use rand::Rng; /// use rand_distr::{uniform::SampleUniform, Distribution, Standard}; @@ -38,28 +38,28 @@ use super::{domain::Domain, system::System}; /// /// impl Solver for Random /// where -/// F::Scalar: SampleUniform, -/// Standard: Distribution, +/// F::Field: SampleUniform, +/// Standard: Distribution, /// { /// const NAME: &'static str = "Random"; -/// type Error = ProblemError; +/// type Error = std::convert::Infallible; /// -/// fn next( +/// fn solve_next( /// &mut self, /// f: &F, -/// dom: &Domain, -/// x: &mut Vector, -/// fx: &mut Vector, +/// dom: &Domain, +/// x: &mut Vector, +/// fx: &mut Vector, /// ) -> Result<(), Self::Error> /// where -/// Sx: StorageMut + IsContiguous, -/// Sfx: StorageMut, +/// Sx: StorageMut + IsContiguous, +/// Sfx: StorageMut, /// { /// // Randomly sample in the domain. /// dom.sample(x, &mut self.rng); /// /// // We must compute the residuals. -/// f.eval(x, fx)?; +/// f.eval(x, fx); /// /// Ok(()) /// } @@ -70,9 +70,7 @@ pub trait Solver { const NAME: &'static str; /// Error type of the iteration. Represents an invalid operation during - /// computing the next step. It is usual that one of the error kinds is - /// propagation of the [`ProblemError`](super::ProblemError) from the - /// system. + /// computing the next step. type Error; /// Computes the next step in the solving process. @@ -88,14 +86,14 @@ pub trait Solver { /// The implementations *can* assume that subsequent calls to `next` pass /// the value of `x` as was outputted in the previous iteration by the same /// method. - fn next( + fn solve_next( &mut self, f: &F, - dom: &Domain, - x: &mut Vector, - fx: &mut Vector, + dom: &Domain, + x: &mut Vector, + fx: &mut Vector, ) -> Result<(), Self::Error> where - Sx: StorageMut + IsContiguous, - Sfx: StorageMut; + Sx: StorageMut + IsContiguous, + Sfx: StorageMut; } diff --git a/src/core/system.rs b/src/core/system.rs index c415ff3..3cea18f 100644 --- a/src/core/system.rs +++ b/src/core/system.rs @@ -4,22 +4,19 @@ use nalgebra::{ DefaultAllocator, Dynamic, IsContiguous, OVector, Vector, }; -use super::{ - base::{Problem, ProblemError}, - domain::Domain, -}; +use super::{base::Problem, domain::Domain}; /// The trait for defining equations systems. /// /// ## Defining a system /// /// A system is any type that implements [`System`] and [`Problem`] traits. -/// There is one required associated type (scalar type) and one required method +/// There is one required associated type (field type) and one required method /// ([`eval`](System::eval)). /// /// ```rust /// use gomez::nalgebra as na; -/// use gomez::prelude::*; +/// use gomez::{Domain, Problem, System}; /// use na::{Dynamic, IsContiguous}; /// /// // A problem is represented by a type. @@ -30,10 +27,10 @@ use super::{ /// /// impl Problem for Rosenbrock { /// // The numeric type. Usually f64 or f32. -/// type Scalar = f64; +/// type Field = f64; /// /// // The domain of the problem (could be bound-constrained). -/// fn domain(&self) -> Domain { +/// fn domain(&self) -> Domain { /// Domain::unconstrained(2) /// } /// } @@ -42,18 +39,15 @@ use super::{ /// // Evaluate trial values of variables to the system. /// fn eval( /// &self, -/// x: &na::Vector, -/// fx: &mut na::Vector, -/// ) -> Result<(), ProblemError> -/// where -/// Sx: na::storage::Storage + IsContiguous, -/// Sfx: na::storage::StorageMut, +/// x: &na::Vector, +/// fx: &mut na::Vector, +/// ) where +/// Sx: na::storage::Storage + IsContiguous, +/// Sfx: na::storage::StorageMut, /// { /// // Compute the residuals of all equations. /// fx[0] = (self.a - x[0]).powi(2); /// fx[1] = self.b * (x[1] - x[0].powi(2)).powi(2); -/// -/// Ok(()) /// } /// } /// ``` @@ -61,12 +55,25 @@ pub trait System: Problem { /// Calculate the residuals of the system given values of the variables. fn eval( &self, - x: &Vector, - fx: &mut Vector, - ) -> Result<(), ProblemError> + x: &Vector, + fx: &mut Vector, + ) where + Sx: Storage + IsContiguous, + Sfx: StorageMut; + + /// Calculate the residuals vector norm. + /// + /// The default implementation allocates a temporary vector for the + /// residuals on every call. If you plan to solve the system by an + /// optimizer, consider overriding the default implementation. + fn norm(&self, x: &Vector) -> Self::Field where - Sx: Storage + IsContiguous, - Sfx: StorageMut; + Sx: Storage + IsContiguous, + { + let mut fx = x.clone_owned(); + self.eval(x, &mut fx); + fx.norm() + } } /// A wrapper type for systems that implements a standard mechanism for @@ -83,7 +90,7 @@ pub trait System: Problem { /// for example. pub struct RepulsiveSystem<'f, F: System> { f: &'f F, - archive: Vec>, + archive: Vec>, } impl<'f, F: System> RepulsiveSystem<'f, F> { @@ -96,7 +103,7 @@ impl<'f, F: System> RepulsiveSystem<'f, F> { } /// Add a found solution to the archive. - pub fn push(&mut self, root: OVector) { + pub fn push(&mut self, root: OVector) { self.archive.push(root); } @@ -111,31 +118,30 @@ impl<'f, F: System> RepulsiveSystem<'f, F> { } /// Unpack the archive which contains all solutions found. - pub fn unpack(self) -> Vec> { + pub fn unpack(self) -> Vec> { self.archive } } impl<'f, F: System> Problem for RepulsiveSystem<'f, F> { - type Scalar = F::Scalar; + type Field = F::Field; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { self.f.domain() } } impl<'f, F: System> System for RepulsiveSystem<'f, F> where - DefaultAllocator: Allocator, + DefaultAllocator: Allocator, { fn eval( &self, - x: &Vector, - fx: &mut Vector, - ) -> Result<(), ProblemError> - where - Sx: Storage + IsContiguous, - Sfx: StorageMut, + x: &Vector, + fx: &mut Vector, + ) where + Sx: Storage + IsContiguous, + Sfx: StorageMut, { // TODO: RepulsiveSystem should adjust the residuals of the inner system // such that solvers tend to go away from the roots stored in the diff --git a/src/derivatives.rs b/src/derivatives.rs index 95ddca7..94d0174 100644 --- a/src/derivatives.rs +++ b/src/derivatives.rs @@ -3,35 +3,17 @@ use std::ops::Deref; use nalgebra::{ - convert, storage::{Storage, StorageMut}, ComplexField, DimName, Dynamic, IsContiguous, OMatrix, OVector, RealField, Vector, U1, }; use num_traits::{One, Zero}; -use thiserror::Error; -use crate::core::{Function, Problem, ProblemError, System}; - -/// Square root of double precision machine epsilon. This value is a standard -/// constant for epsilons in approximating first-order derivate-based concepts. -pub const EPSILON_SQRT: f64 = 0.000000014901161193847656; - -/// Cubic root of double precision machine epsilon. This value is a standard -/// constant for epsilons in approximating second-order derivate-based concepts. -pub const EPSILON_CBRT: f64 = 0.0000060554544523933395; - -/// Error when computing the Jacobian matrix. -#[derive(Debug, Error)] -pub enum JacobianError { - /// Error that occurred when evaluating the system. - #[error("{0}")] - Problem(#[from] ProblemError), -} +use crate::core::{Function, Problem, RealField as _, System}; /// Jacobian matrix of a system. #[derive(Debug)] pub struct Jacobian { - jac: OMatrix, + jac: OMatrix, } impl Jacobian { @@ -50,18 +32,18 @@ impl Jacobian { /// details. pub fn new( f: &F, - x: &mut Vector, - scale: &Vector, - fx: &Vector, - ) -> Result + x: &mut Vector, + scale: &Vector, + fx: &Vector, + ) -> Self where - Sx: StorageMut + IsContiguous, - Sscale: Storage, - Sfx: Storage, + Sx: StorageMut + IsContiguous, + Sscale: Storage, + Sfx: Storage, { let mut jac = Self::zeros(f); - jac.compute(f, x, scale, fx)?; - Ok(jac) + jac.compute(f, x, scale, fx); + jac } /// Compute the Jacobian matrix of the system in given point with given @@ -76,16 +58,16 @@ impl Jacobian { pub fn compute( &mut self, f: &F, - x: &mut Vector, - scale: &Vector, - fx: &Vector, - ) -> Result<&mut Self, JacobianError> + x: &mut Vector, + scale: &Vector, + fx: &Vector, + ) -> &mut Self where - Sx: StorageMut + IsContiguous, - Sscale: Storage, - Sfx: Storage, + Sx: StorageMut + IsContiguous, + Sscale: Storage, + Sfx: Storage, { - let eps: F::Scalar = convert(EPSILON_SQRT); + let eps = F::Field::EPSILON_SQRT; for (j, mut col) in self.jac.column_iter_mut().enumerate() { let xj = x[j]; @@ -99,13 +81,13 @@ impl Jacobian { // A reasonable way to balance these competing needs is to scale // each component by x_j itself. To avoid problems when x_j is close // to zero, it is modified to take the typical magnitude instead. - let magnitude = F::Scalar::one() / scale[j]; - let step = eps * xj.abs().max(magnitude) * xj.copysign(F::Scalar::one()); - let step = if step == F::Scalar::zero() { eps } else { step }; + let magnitude = F::Field::one() / scale[j]; + let step = eps * xj.abs().max(magnitude) * xj.copysign(F::Field::one()); + let step = if step == F::Field::zero() { eps } else { step }; // Update the point. x[j] = xj + step; - f.eval(x, &mut col)?; + f.eval(x, &mut col); // Compute the derivative approximation: J[i, j] = (F(x + e_j * step_j) - F(x)) / step_j. col -= fx; @@ -115,30 +97,22 @@ impl Jacobian { x[j] = xj; } - Ok(self) + self } } impl Deref for Jacobian { - type Target = OMatrix; + type Target = OMatrix; fn deref(&self) -> &Self::Target { &self.jac } } -/// Error when computing the gradient matrix. -#[derive(Debug, Error)] -pub enum GradientError { - /// Error that occurred when evaluating the function. - #[error("{0}")] - Problem(#[from] ProblemError), -} - /// Gradient vector of a function. #[derive(Debug)] pub struct Gradient { - grad: OVector, + grad: OVector, } impl Gradient { @@ -157,17 +131,17 @@ impl Gradient { /// details. pub fn new( f: &F, - x: &mut Vector, - scale: &Vector, - fx: F::Scalar, - ) -> Result + x: &mut Vector, + scale: &Vector, + fx: F::Field, + ) -> Self where - Sx: StorageMut + IsContiguous, - Sscale: Storage, + Sx: StorageMut + IsContiguous, + Sscale: Storage, { let mut grad = Self::zeros(f); - grad.compute(f, x, scale, fx)?; - Ok(grad) + grad.compute(f, x, scale, fx); + grad } /// Compute the gradient vector of the function in given point with given @@ -182,27 +156,27 @@ impl Gradient { pub fn compute( &mut self, f: &F, - x: &mut Vector, - scale: &Vector, - fx: F::Scalar, - ) -> Result<&mut Self, GradientError> + x: &mut Vector, + scale: &Vector, + fx: F::Field, + ) -> &mut Self where - Sx: StorageMut + IsContiguous, - Sscale: Storage, + Sx: StorageMut + IsContiguous, + Sscale: Storage, { - let eps: F::Scalar = convert(EPSILON_SQRT); + let eps = F::Field::EPSILON_SQRT; for i in 0..x.nrows() { let xi = x[i]; // See the implementation of Jacobian for details on computing step size. - let magnitude = F::Scalar::one() / scale[i]; - let step = eps * xi.abs().max(magnitude) * F::Scalar::one().copysign(xi); - let step = if step == F::Scalar::zero() { eps } else { step }; + let magnitude = F::Field::one() / scale[i]; + let step = eps * xi.abs().max(magnitude) * F::Field::one().copysign(xi); + let step = if step == F::Field::zero() { eps } else { step }; // Update the point. x[i] = xi + step; - let fxi = f.apply(x)?; + let fxi = f.apply(x); // Compute the derivative approximation: grad[i] = (F(x + e_i * step_i) - F(x)) / step_i. self.grad[i] = (fxi - fx) / step; @@ -211,32 +185,24 @@ impl Gradient { x[i] = xi; } - Ok(self) + self } } impl Deref for Gradient { - type Target = OVector; + type Target = OVector; fn deref(&self) -> &Self::Target { &self.grad } } -/// Error when computing the Hessian matrix. -#[derive(Debug, Error)] -pub enum HessianError { - /// Error that occurred when evaluating the system. - #[error("{0}")] - Problem(#[from] ProblemError), -} - /// Hessian matrix of a system. #[derive(Debug)] pub struct Hessian { - hes: OMatrix, - steps: OVector, - neighbors: OVector, + hes: OMatrix, + steps: OVector, + neighbors: OVector, } impl Hessian { @@ -257,17 +223,17 @@ impl Hessian { /// details. pub fn new( f: &F, - x: &mut Vector, - scale: &Vector, - fx: F::Scalar, - ) -> Result + x: &mut Vector, + scale: &Vector, + fx: F::Field, + ) -> Self where - Sx: StorageMut + IsContiguous, - Sscale: Storage, + Sx: StorageMut + IsContiguous, + Sscale: Storage, { let mut hes = Self::zeros(f); - hes.compute(f, x, scale, fx)?; - Ok(hes) + hes.compute(f, x, scale, fx); + hes } /// Compute the Hessian matrix of the function in given point with given @@ -282,30 +248,30 @@ impl Hessian { pub fn compute( &mut self, f: &F, - x: &mut Vector, - scale: &Vector, - fx: F::Scalar, - ) -> Result<&mut Self, HessianError> + x: &mut Vector, + scale: &Vector, + fx: F::Field, + ) -> &mut Self where - Sx: StorageMut + IsContiguous, - Sscale: Storage, + Sx: StorageMut + IsContiguous, + Sscale: Storage, { - let eps: F::Scalar = convert(EPSILON_CBRT); + let eps = F::Field::EPSILON_CBRT; for i in 0..x.nrows() { let xi = x[i]; // See the implementation of Jacobian for details on computing step size. - let magnitude = F::Scalar::one() / scale[i]; - let step = eps * xi.abs().max(magnitude) * F::Scalar::one().copysign(xi); - let step = if step == F::Scalar::zero() { eps } else { step }; + let magnitude = F::Field::one() / scale[i]; + let step = eps * xi.abs().max(magnitude) * F::Field::one().copysign(xi); + let step = if step == F::Field::zero() { eps } else { step }; // Store the step for Hessian calculation. self.steps[i] = step; // Update the point and store the function output. x[i] = xi + step; - let fxi = f.apply(x)?; + let fxi = f.apply(x); self.neighbors[i] = fxi; // Restore the original value. @@ -319,7 +285,7 @@ impl Hessian { // Prepare x_i + 2 * e_i. x[i] = xi + stepi + stepi; - let fxi = f.apply(x)?; + let fxi = f.apply(x); let fni = self.neighbors[i]; x[i] = xi + stepi; @@ -332,7 +298,7 @@ impl Hessian { x[j] = xj + stepj; - let fxj = f.apply(x)?; + let fxj = f.apply(x); let fnj = self.neighbors[j]; let hij = ((fx - fni) + (fxj - fnj)) / (stepi * stepj); @@ -345,12 +311,12 @@ impl Hessian { x[i] = xi; } - Ok(self) + self } } impl Deref for Hessian { - type Target = OMatrix; + type Target = OMatrix; fn deref(&self) -> &Self::Target { &self.hes @@ -371,27 +337,24 @@ mod tests { struct MixedVars; impl Problem for MixedVars { - type Scalar = f64; + type Field = f64; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { Domain::unconstrained(2) } } impl Function for MixedVars { - fn apply( - &self, - x: &Vector, - ) -> Result + fn apply(&self, x: &Vector) -> Self::Field where - Sx: Storage + IsContiguous, + Sx: Storage + IsContiguous, { // A simple, arbitrary function that produces Hessian matrix with // non-zero corners. let x1 = x[0]; let x2 = x[1]; - Ok(x1.powi(2) + x1 * x2 + x2.powi(3)) + x1.powi(2) + x1 * x2 + x2.powi(3) } } @@ -402,12 +365,9 @@ mod tests { let mut fx = dvector![0.0, 0.0]; let func = ExtendedRosenbrock::new(2); - func.eval(&x, &mut fx).unwrap(); + func.eval(&x, &mut fx); let jac = Jacobian::new(&func, &mut x, &scale, &fx); - assert!(jac.is_ok()); - let jac = jac.unwrap(); - let expected = dmatrix![-40.0, 10.0; -1.0, 0.0]; assert_abs_diff_eq!(&*jac, &expected, epsilon = 10e-6); } @@ -419,12 +379,9 @@ mod tests { let mut fx = dvector![0.0, 0.0, 0.0, 0.0]; let func = ExtendedPowell::new(4); - func.eval(&x, &mut fx).unwrap(); + func.eval(&x, &mut fx); let jac = Jacobian::new(&func, &mut x, &scale, &fx); - assert!(jac.is_ok()); - let jac = jac.unwrap(); - let expected = dmatrix![ 1.0, 10.0, 0.0, 0.0; 0.0, 0.0, 5f64.sqrt(), -(5f64.sqrt()); @@ -440,12 +397,9 @@ mod tests { let scale = dvector![1.0, 1.0]; let func = MixedVars; - let fx = func.apply(&x).unwrap(); + let fx = func.apply(&x); let grad = Gradient::new(&func, &mut x, &scale, fx); - assert!(grad.is_ok()); - let grad = grad.unwrap(); - let expected = dvector![3.0, 30.0]; assert_abs_diff_eq!(&*grad, &expected, epsilon = 10e-6); } @@ -456,12 +410,9 @@ mod tests { let scale = dvector![1.0, 1.0]; let func = MixedVars; - let fx = func.apply(&x).unwrap(); + let fx = func.apply(&x); let hes = Hessian::new(&func, &mut x, &scale, fx); - assert!(hes.is_ok()); - let hes = hes.unwrap(); - let expected = dmatrix![2.0, 1.0; 1.0, -18.0]; assert_abs_diff_eq!(&*hes, &expected, epsilon = 10e-3); } diff --git a/src/lib.rs b/src/lib.rs index 34945b4..a43fd0c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -64,12 +64,12 @@ //! incorporating them and the implementation of appropriate solvers. //! //! When it comes to code, the problem is any type that implements the -//! [`System`](core::System) and [`Problem`](core::Problem) traits. +//! [`System`] and [`Problem`] traits. //! //! ```rust //! // Gomez is based on `nalgebra` crate. //! use gomez::nalgebra as na; -//! use gomez::prelude::*; +//! use gomez::{Domain, Problem, System}; //! use na::{Dynamic, IsContiguous}; //! //! // A problem is represented by a type. @@ -80,11 +80,11 @@ //! //! impl Problem for Rosenbrock { //! // The numeric type. Usually f64 or f32. -//! type Scalar = f64; +//! type Field = f64; //! //! // Specification for the domain. At the very least, the dimension //! // must be known. -//! fn domain(&self) -> Domain { +//! fn domain(&self) -> Domain { //! Domain::unconstrained(2) //! } //! } @@ -93,18 +93,15 @@ //! // Evaluate trial values of variables to the system. //! fn eval( //! &self, -//! x: &na::Vector, -//! fx: &mut na::Vector, -//! ) -> Result<(), ProblemError> -//! where -//! Sx: na::storage::Storage + IsContiguous, -//! Sfx: na::storage::StorageMut, +//! x: &na::Vector, +//! fx: &mut na::Vector, +//! ) where +//! Sx: na::storage::Storage + IsContiguous, +//! Sfx: na::storage::StorageMut, //! { //! // Compute the residuals of all equations. //! fx[0] = (self.a - x[0]).powi(2); //! fx[1] = self.b * (x[1] - x[0].powi(2)).powi(2); -//! -//! Ok(()) //! } //! } //! ``` @@ -120,7 +117,7 @@ //! //! ```rust //! # use gomez::nalgebra as na; -//! # use gomez::prelude::*; +//! # use gomez::*; //! # //! # struct Rosenbrock { //! # a: f64, @@ -128,9 +125,9 @@ //! # } //! # //! impl Problem for Rosenbrock { -//! # type Scalar = f64; +//! # type Field = f64; //! -//! fn domain(&self) -> Domain { +//! fn domain(&self) -> Domain { //! [(-10.0, 10.0), (-10.0, 10.0)].into_iter().collect() //! } //! } @@ -149,9 +146,10 @@ //! //! ```rust //! use gomez::nalgebra as na; -//! use gomez::prelude::*; +//! use gomez::Solver; //! // Pick your solver. //! use gomez::solver::TrustRegion; +//! # use gomez::{Domain, Problem, System}; //! # use na::{Dynamic, IsContiguous}; //! # //! # struct Rosenbrock { @@ -160,9 +158,9 @@ //! # } //! # //! # impl Problem for Rosenbrock { -//! # type Scalar = f64; +//! # type Field = f64; //! # -//! # fn domain(&self) -> Domain { +//! # fn domain(&self) -> Domain { //! # Domain::unconstrained(2) //! # } //! # } @@ -170,17 +168,14 @@ //! # impl System for Rosenbrock { //! # fn eval( //! # &self, -//! # x: &na::Vector, -//! # fx: &mut na::Vector, -//! # ) -> Result<(), ProblemError> -//! # where -//! # Sx: na::storage::Storage + IsContiguous, -//! # Sfx: na::storage::StorageMut, +//! # x: &na::Vector, +//! # fx: &mut na::Vector, +//! # ) where +//! # Sx: na::storage::Storage + IsContiguous, +//! # Sfx: na::storage::StorageMut, //! # { //! # fx[0] = (self.a - x[0]).powi(2); //! # fx[1] = self.b * (x[1] - x[0].powi(2)).powi(2); -//! # -//! # Ok(()) //! # } //! # } //! @@ -198,7 +193,7 @@ //! for i in 1.. { //! // Do one iteration in the solving process. //! solver -//! .next(&f, &dom, &mut x, &mut fx) +//! .solve_next(&f, &dom, &mut x, &mut fx) //! .expect("solver encountered an error"); //! //! println!( @@ -240,10 +235,12 @@ //! Licensed under MIT. pub mod analysis; -pub mod core; +mod core; pub mod derivatives; pub mod solver; +pub use core::*; + #[cfg(feature = "testing")] pub mod testing; @@ -251,8 +248,3 @@ pub mod testing; pub(crate) mod testing; pub use nalgebra; - -/// Gomez prelude. -pub mod prelude { - pub use crate::core::{Domain, Problem, ProblemError, Solver, System}; -} diff --git a/src/solver/lipo.rs b/src/solver/lipo.rs index 06e2f5c..cf03b1d 100644 --- a/src/solver/lipo.rs +++ b/src/solver/lipo.rs @@ -20,7 +20,7 @@ use rand::Rng; use rand_distr::{uniform::SampleUniform, Bernoulli, Distribution, Standard}; use thiserror::Error; -use crate::core::{Domain, Function, Optimizer, Problem, ProblemError, Solver, System}; +use crate::core::{Domain, Function, Optimizer, Problem, Solver, System}; use super::NelderMead; @@ -52,7 +52,7 @@ pub struct LipoOptions { p_explore: f64, /// Alpha parameter for (1 + alpha)^i meshgrid for Lipschitz constant /// estimation. - alpha: AlphaInit, + alpha: AlphaInit, /// Number of sampling trials. If no potential minimizer is found after this /// number of trials, the solver returns error. sampling_trials: usize, @@ -77,28 +77,28 @@ impl Default for LipoOptions { /// LIPO solver. See [module](self) documentation for more details. pub struct Lipo { options: LipoOptions, - alpha: F::Scalar, - xs: Vec>, - ys: Vec, + alpha: F::Field, + xs: Vec>, + ys: Vec, best: usize, - k: F::Scalar, - k_inf: F::Scalar, + k: F::Field, + k_inf: F::Field, rng: R, bernoulli: Bernoulli, - tmp: OVector, - x_tmp: OVector, + tmp: OVector, + x_tmp: OVector, local_optimizer: NelderMead, iter: usize, } impl Lipo { /// Initializes LIPO solver with default options. - pub fn new(f: &F, dom: &Domain, rng: R) -> Self { + pub fn new(f: &F, dom: &Domain, rng: R) -> Self { Self::with_options(f, dom, LipoOptions::default(), rng) } /// Initializes LIPO solver with given options. - pub fn with_options(f: &F, dom: &Domain, options: LipoOptions, rng: R) -> Self { + pub fn with_options(f: &F, dom: &Domain, options: LipoOptions, rng: R) -> Self { let dim = Dynamic::new(dom.dim()); let p_explore = options.p_explore.clamp(0.0, 1.0); @@ -114,8 +114,8 @@ impl Lipo { xs: Vec::new(), ys: Vec::new(), best: 0, - k: F::Scalar::zero(), - k_inf: F::Scalar::zero(), + k: F::Field::zero(), + k_inf: F::Field::zero(), rng, bernoulli: Bernoulli::new(p_explore).unwrap(), tmp: OVector::zeros_generic(dim, U1::name()), @@ -130,8 +130,8 @@ impl Lipo { self.xs.clear(); self.ys.clear(); self.best = 0; - self.k = F::Scalar::zero(); - self.k_inf = F::Scalar::zero(); + self.k = F::Field::zero(); + self.k_inf = F::Field::zero(); self.iter = 0; } @@ -142,8 +142,8 @@ impl Lipo { /// the LIPO solver gives extra information for free. pub fn add_evaluation( &mut self, - x: OVector, - y: F::Scalar, + x: OVector, + y: F::Field, ) -> Result<(), LipoError> { let alpha = self.alpha; @@ -179,9 +179,9 @@ impl Lipo { debug!("|| x - xi || = {}", dist); } - let it = try_convert(((*k_inf).ln() / (F::Scalar::one() + alpha).ln()).ceil()) + let it = try_convert(((*k_inf).ln() / (F::Field::one() + alpha).ln()).ceil()) .unwrap_or_default() as i32; - *k = (F::Scalar::one() + alpha).powi(it); + *k = (F::Field::one() + alpha).powi(it); if !k.is_finite() { return Err(LipoError::InfiniteLipschitzConstant); @@ -201,9 +201,6 @@ impl Lipo { /// Error returned from [`Lipo`] solver. #[derive(Debug, Error)] pub enum LipoError { - /// Error that occurred when evaluating the system. - #[error("{0}")] - Problem(#[from] ProblemError), /// Error when no potential minimizer is found after number sampling trials. #[error("potential minimizer was not found after specified number of trials")] PotentialMinimizerNotFound, @@ -214,17 +211,17 @@ pub enum LipoError { impl Lipo where - F::Scalar: SampleUniform, - Standard: Distribution, + F::Field: SampleUniform, + Standard: Distribution, { fn next_inner( &mut self, f: &F, - dom: &Domain, - x: &mut Vector, - ) -> Result + dom: &Domain, + x: &mut Vector, + ) -> Result where - Sx: StorageMut + IsContiguous, + Sx: StorageMut + IsContiguous, { let LipoOptions { sampling_trials, @@ -250,7 +247,7 @@ where if xs.is_empty() { debug!("first iteration, just evaluating"); // First iteration. We just evaluate the initial point and store it. - let error = f.apply(x)?; + let error = f.apply(x); xs.push(x.clone_owned()); ys.push(error); @@ -271,7 +268,7 @@ where // Exploitation mode is allowed only when there is enough points // evaluated and the Lipschitz constant is estimated. Then there is // randomness involved in choosing whether we explore or exploit. - if !initialization && *k != F::Scalar::zero() && !bernoulli.sample(rng) { + if !initialization && *k != F::Field::zero() && !bernoulli.sample(rng) { debug!("exploitation mode"); let mut tmp_best = ys[*best]; @@ -353,12 +350,12 @@ where // Do not fail the optimization on an error from the local // optimization. - match local_optimizer.next(f, dom, x) { + match local_optimizer.opt_next(f, dom, x) { Ok(error) => error, - Err(_) => f.apply(x)?, + Err(_) => f.apply(x), } } else { - f.apply(x)? + f.apply(x) }; // New point is better then the current best, so we update it. @@ -383,21 +380,21 @@ where impl Optimizer for Lipo where - F::Scalar: SampleUniform, - Standard: Distribution, + F::Field: SampleUniform, + Standard: Distribution, { const NAME: &'static str = "LIPO"; type Error = LipoError; - fn next( + fn opt_next( &mut self, f: &F, - dom: &Domain<::Scalar>, - x: &mut Vector<::Scalar, Dynamic, Sx>, - ) -> Result<::Scalar, Self::Error> + dom: &Domain<::Field>, + x: &mut Vector<::Field, Dynamic, Sx>, + ) -> Result<::Field, Self::Error> where - Sx: StorageMut<::Scalar, Dynamic> + IsContiguous, + Sx: StorageMut<::Field, Dynamic> + IsContiguous, { self.next_inner(f, dom, x) } @@ -405,26 +402,26 @@ where impl Solver for Lipo where - F::Scalar: SampleUniform, - Standard: Distribution, + F::Field: SampleUniform, + Standard: Distribution, { const NAME: &'static str = "LIPO"; type Error = LipoError; - fn next( + fn solve_next( &mut self, f: &F, - dom: &Domain<::Scalar>, - x: &mut Vector<::Scalar, Dynamic, Sx>, - fx: &mut Vector<::Scalar, Dynamic, Sfx>, + dom: &Domain<::Field>, + x: &mut Vector<::Field, Dynamic, Sx>, + fx: &mut Vector<::Field, Dynamic, Sfx>, ) -> Result<(), Self::Error> where - Sx: StorageMut<::Scalar, Dynamic> + IsContiguous, - Sfx: StorageMut<::Scalar, Dynamic>, + Sx: StorageMut<::Field, Dynamic> + IsContiguous, + Sfx: StorageMut<::Field, Dynamic>, { self.next_inner(f, dom, x)?; - f.eval(x, fx)?; + f.eval(x, fx); Ok(()) } } diff --git a/src/solver/nelder_mead.rs b/src/solver/nelder_mead.rs index 55f85a6..a5d09d4 100644 --- a/src/solver/nelder_mead.rs +++ b/src/solver/nelder_mead.rs @@ -26,15 +26,12 @@ use log::debug; use nalgebra::{ convert, storage::{Storage, StorageMut}, - Dim, DimName, Dynamic, IsContiguous, OVector, RealField, Vector, U1, + ComplexField, Dim, DimName, Dynamic, IsContiguous, OVector, RealField, Vector, U1, }; use num_traits::{One, Zero}; use thiserror::Error; -use crate::{ - core::{Domain, Function, FunctionResultExt, Optimizer, Problem, ProblemError, Solver, System}, - derivatives::EPSILON_SQRT, -}; +use crate::core::{Domain, Function, Optimizer, Problem, RealField as _, Solver, System}; /// Family of coefficients for reflection, expansion and contractions. #[derive(Debug, Clone, Copy)] @@ -61,15 +58,15 @@ pub struct NelderMeadOptions { /// balanced (see [`CoefficientsFamily`]). family: CoefficientsFamily, /// Coefficient for reflection operation. Default: `-1`. - reflection_coeff: F::Scalar, + reflection_coeff: F::Field, /// Coefficient for expansion operation. Default: `-2`. - expansion_coeff: F::Scalar, + expansion_coeff: F::Field, /// Coefficient for outer contraction operation. Default: `-0.5`. - outer_contraction_coeff: F::Scalar, + outer_contraction_coeff: F::Field, /// Coefficient for inner contraction operation. Default: `0.5`. - inner_contraction_coeff: F::Scalar, + inner_contraction_coeff: F::Field, /// Coefficient for shrinking operation. Default: `0.5`. - shrink_coeff: F::Scalar, + shrink_coeff: F::Field, } impl Default for NelderMeadOptions { @@ -86,7 +83,7 @@ impl Default for NelderMeadOptions { } impl NelderMeadOptions { - fn overwrite_coeffs(&mut self, dom: &Domain) { + fn overwrite_coeffs(&mut self, dom: &Domain) { let Self { family, reflection_coeff, @@ -105,14 +102,14 @@ impl NelderMeadOptions { *shrink_coeff = convert(0.5); } CoefficientsFamily::Balanced => { - let n: F::Scalar = convert(dom.dim() as f64); - let n_inv = F::Scalar::one() / n; + let n: F::Field = convert(dom.dim() as f64); + let n_inv = F::Field::one() / n; *reflection_coeff = convert(-1.0); *expansion_coeff = -(n_inv * convert(2.0) + convert(1.0)); - *outer_contraction_coeff = -(F::Scalar::one() - n_inv); + *outer_contraction_coeff = -(F::Field::one() - n_inv); *inner_contraction_coeff = -*outer_contraction_coeff; - *shrink_coeff = F::Scalar::one() - n_inv; + *shrink_coeff = F::Field::one() - n_inv; } CoefficientsFamily::GoldenSection => { let alpha = 1.0 / (0.5 * (5f64.sqrt() + 1.0)); @@ -132,26 +129,24 @@ impl NelderMeadOptions { /// Nelder-Mead solver. See [module](self) documentation for more details. pub struct NelderMead { options: NelderMeadOptions, - fx: OVector, - fx_best: OVector, - scale: OVector, - centroid: OVector, - reflection: OVector, - expansion: OVector, - contraction: OVector, - simplex: Vec>, - errors: Vec, + scale: OVector, + centroid: OVector, + reflection: OVector, + expansion: OVector, + contraction: OVector, + simplex: Vec>, + errors: Vec, sort_perm: Vec, } impl NelderMead { /// Initializes Nelder-Mead solver with default options. - pub fn new(f: &F, dom: &Domain) -> Self { + pub fn new(f: &F, dom: &Domain) -> Self { Self::with_options(f, dom, NelderMeadOptions::default()) } /// Initializes Nelder-Mead solver with given options. - pub fn with_options(_: &F, dom: &Domain, mut options: NelderMeadOptions) -> Self { + pub fn with_options(_: &F, dom: &Domain, mut options: NelderMeadOptions) -> Self { let dim = Dynamic::new(dom.dim()); options.overwrite_coeffs(dom); @@ -163,8 +158,6 @@ impl NelderMead { Self { options, - fx: OVector::zeros_generic(dim, U1::name()), - fx_best: OVector::zeros_generic(dim, U1::name()), scale, centroid: OVector::zeros_generic(dim, U1::name()), reflection: OVector::zeros_generic(dim, U1::name()), @@ -188,23 +181,23 @@ impl NelderMead { /// Error returned from [`NelderMead`] solver. #[derive(Debug, Error)] pub enum NelderMeadError { - /// Error that occurred when evaluating the system. - #[error("{0}")] - Problem(#[from] ProblemError), /// Simplex collapsed so it is impossible to make any progress. #[error("simplex collapsed")] SimplexCollapsed, + /// Simplex contains too many invalid values (NaN, infinity). + #[error("simplex contains too many invalid values")] + SimplexInvalid, } impl NelderMead { fn next_inner( &mut self, f: &F, - dom: &Domain, - x: &mut Vector, - ) -> Result + dom: &Domain, + x: &mut Vector, + ) -> Result where - Sx: StorageMut + IsContiguous, + Sx: StorageMut + IsContiguous, { let NelderMeadOptions { reflection_coeff, @@ -216,8 +209,6 @@ impl NelderMead { } = self.options; let Self { - fx, - fx_best, scale, simplex, errors, @@ -230,15 +221,13 @@ impl NelderMead { } = self; let n = dom.dim(); - let inf = convert(f64::INFINITY); if simplex.is_empty() { // Simplex initialization. // It is important to return early on error before the point is // added to the simplex. - let mut error_best = f.apply_eval(x, fx)?; - fx_best.copy_from(fx); + let mut error_best = f.apply(x); errors.push(error_best); simplex.push(x.clone_owned()); @@ -247,27 +236,9 @@ impl NelderMead { xi[j] += scale[j]; dom.project_in(&mut xi, j); - let error = match f.apply_eval(&xi, fx) { - Ok(error) => error, - // Do not fail when invalid value is encountered during - // building the simplex. Instead, treat it as infinity so - // that it is worse than (or "equal" to) any other error and - // hope that it gets replaced. The exception is the very - // point provided by the caller as it is expected to be - // valid and it is erroneous situation if it is not. - Err(ProblemError::InvalidValue) => inf, - Err(error) => { - // Clear the simplex so the solver is not in invalid - // state. - debug!("encountered unexpected error during simplex initialization"); - simplex.clear(); - errors.clear(); - return Err(error.into()); - } - }; + let error = f.apply(&xi); if error < error_best { - fx_best.copy_from(fx); error_best = error; } @@ -275,18 +246,18 @@ impl NelderMead { simplex.push(xi); } - let inf_error_count = errors.iter().filter(|e| **e == inf).count(); + let error_count = errors.iter().filter(|e| !e.is_finite()).count(); - if inf_error_count >= simplex.len() / 2 { + if error_count >= simplex.len() / 2 { // The simplex is too degenerate. debug!( "{} out of {} points in simplex have invalid value, returning error", - inf_error_count, + error_count, simplex.len() ); simplex.clear(); errors.clear(); - return Err(NelderMeadError::Problem(ProblemError::InvalidValue)); + return Err(NelderMeadError::SimplexInvalid); } sort_perm.extend(0..=n); @@ -300,7 +271,7 @@ impl NelderMead { } // Calculate the centroid. - centroid.fill(F::Scalar::zero()); + centroid.fill(F::Field::zero()); (0..n) .map(|i| &simplex[sort_perm[i]]) .for_each(|xi| *centroid += xi); @@ -332,7 +303,7 @@ impl NelderMead { // Perform one of possible simplex transformations. reflection.on_line2_mut(centroid, &simplex[sort_perm[n]], reflection_coeff); let reflection_not_feasible = dom.project(reflection); - let reflection_error = f.apply_eval(reflection, fx).ignore_invalid_value(inf)?; + let reflection_error = f.apply(reflection).nan_to_inf(); #[allow(clippy::suspicious_else_formatting)] let (transformation, not_feasible) = if errors[sort_perm[0]] <= reflection_error @@ -344,17 +315,13 @@ impl NelderMead { errors[sort_perm[n]] = reflection_error; (Transformation::Reflection, reflection_not_feasible) } else if reflection_error < errors[sort_perm[0]] { - fx_best.copy_from(fx); - // Reflected point is better than the current best. Try to go // farther along this direction. expansion.on_line2_mut(centroid, &simplex[sort_perm[n]], expansion_coeff); let expansion_not_feasible = dom.project(expansion); - let expansion_error = f.apply_eval(expansion, fx).ignore_invalid_value(inf)?; + let expansion_error = f.apply(expansion).nan_to_inf(); if expansion_error < reflection_error { - fx_best.copy_from(fx); - // Expansion indeed helped, replace the worst point. simplex[sort_perm[n]].copy_from(expansion); errors[sort_perm[n]] = expansion_error; @@ -377,13 +344,9 @@ impl NelderMead { // Try to perform outer contraction. contraction.on_line2_mut(centroid, &simplex[sort_perm[n]], outer_contraction_coeff); let contraction_not_feasible = dom.project(contraction); - let contraction_error = f.apply_eval(contraction, fx).ignore_invalid_value(inf)?; + let contraction_error = f.apply(contraction).nan_to_inf(); if contraction_error <= reflection_error { - if contraction_error < errors[sort_perm[0]] { - fx_best.copy_from(fx); - } - // Use the contracted point instead of the reflected point // because it's better. simplex[sort_perm[n]].copy_from(contraction); @@ -399,13 +362,9 @@ impl NelderMead { // Try to perform inner contraction. contraction.on_line2_mut(centroid, &simplex[sort_perm[n]], inner_contraction_coeff); let contraction_not_feasible = dom.project(contraction); - let contraction_error = f.apply_eval(contraction, fx).ignore_invalid_value(inf)?; + let contraction_error = f.apply(contraction).nan_to_inf(); if contraction_error <= errors[sort_perm[n]] { - if contraction_error < errors[sort_perm[0]] { - fx_best.copy_from(fx); - } - // The contracted point is better than the worst point. simplex[sort_perm[n]].copy_from(contraction); errors[sort_perm[n]] = contraction_error; @@ -430,11 +389,10 @@ impl NelderMead { for i in 1..=n { let xi = &mut simplex[sort_perm[i]]; xi.on_line_mut(contraction, shrink_coeff); - let error = f.apply_eval(xi, fx).ignore_invalid_value(inf)?; + let error = f.apply(xi).nan_to_inf(); errors[sort_perm[i]] = error; if error < error_best { - fx_best.copy_from(fx); error_best = error; } } @@ -474,7 +432,7 @@ impl NelderMead { // otherwise an error reduction was achieved. This criterion is // taken from "Less is more: Simplified Nelder-Mead method for large // unconstrained optimization". - let eps: F::Scalar = convert(EPSILON_SQRT); + let eps = F::Field::EPSILON_SQRT; let worst = errors[sort_perm[n]]; let best = errors[sort_perm[0]]; @@ -496,14 +454,14 @@ impl Optimizer for NelderMead { type Error = NelderMeadError; - fn next( + fn opt_next( &mut self, f: &F, - dom: &Domain, - x: &mut Vector, - ) -> Result + dom: &Domain, + x: &mut Vector, + ) -> Result where - Sx: StorageMut + IsContiguous, + Sx: StorageMut + IsContiguous, { self.next_inner(f, dom, x) } @@ -514,19 +472,20 @@ impl Solver for NelderMead { type Error = NelderMeadError; - fn next( + fn solve_next( &mut self, f: &F, - dom: &Domain, - x: &mut Vector, - fx: &mut Vector, + dom: &Domain, + x: &mut Vector, + fx: &mut Vector, ) -> Result<(), Self::Error> where - Sx: StorageMut + IsContiguous, - Sfx: StorageMut, + Sx: StorageMut + IsContiguous, + Sfx: StorageMut, { - self.next_inner(f, dom, x) - .map(|_| fx.copy_from(&self.fx_best)) + self.next_inner(f, dom, x)?; + f.eval(x, fx); + Ok(()) } } @@ -568,6 +527,21 @@ where } } +trait RealFieldNelderMeadExt { + fn nan_to_inf(self) -> Self; +} + +impl RealFieldNelderMeadExt for T { + fn nan_to_inf(self) -> Self { + if self.is_finite() { + self + } else { + // Not finite also covers NaN and negative infinity. + T::from_subset(&f64::INFINITY) + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/solver/steffensen.rs b/src/solver/steffensen.rs index 1e92c99..9c97a56 100644 --- a/src/solver/steffensen.rs +++ b/src/solver/steffensen.rs @@ -18,7 +18,7 @@ use getset::{CopyGetters, Setters}; use nalgebra::{storage::StorageMut, Dynamic, IsContiguous, Vector}; use thiserror::Error; -use crate::core::{Domain, Problem, ProblemError, Solver, System}; +use crate::core::{Domain, Problem, Solver, System}; /// Variant of the Steffenen's method. #[derive(Debug, Clone, Copy)] @@ -39,7 +39,7 @@ pub struct SteffensenOptions { /// [`SteffensenVariant`]). variant: SteffensenVariant, #[getset(skip)] - _phantom: PhantomData, + _phantom: PhantomData, } impl Default for SteffensenOptions { @@ -58,12 +58,12 @@ pub struct Steffensen { impl Steffensen { /// Initializes Steffensen solver with default options. - pub fn new(f: &F, dom: &Domain) -> Self { + pub fn new(f: &F, dom: &Domain) -> Self { Self::with_options(f, dom, SteffensenOptions::default()) } /// Initializes Steffensen solver with given options. - pub fn with_options(_f: &F, _dom: &Domain, options: SteffensenOptions) -> Self { + pub fn with_options(_f: &F, _dom: &Domain, options: SteffensenOptions) -> Self { Self { options } } @@ -74,9 +74,9 @@ impl Steffensen { /// Error returned from [`Steffensen`] solver. #[derive(Debug, Error)] pub enum SteffensenError { - /// Error that occurred when evaluating the system. - #[error("{0}")] - Problem(#[from] ProblemError), + /// System is not one-dimensional. + #[error("system is not one-dimensional")] + InvalidDimensionality, } impl Solver for Steffensen { @@ -84,21 +84,19 @@ impl Solver for Steffensen { type Error = SteffensenError; - fn next( + fn solve_next( &mut self, f: &F, - dom: &Domain, - x: &mut Vector, - fx: &mut Vector, + dom: &Domain, + x: &mut Vector, + fx: &mut Vector, ) -> Result<(), Self::Error> where - Sx: StorageMut + IsContiguous, - Sfx: StorageMut, + Sx: StorageMut + IsContiguous, + Sfx: StorageMut, { if dom.dim() != 1 { - return Err(SteffensenError::Problem( - ProblemError::InvalidDimensionality, - )); + return Err(SteffensenError::InvalidDimensionality); } let SteffensenOptions { variant, .. } = self.options; @@ -106,27 +104,27 @@ impl Solver for Steffensen { let x0 = x[0]; // Compute f(x). - f.eval(x, fx)?; + f.eval(x, fx); let fx0 = fx[0]; match variant { SteffensenVariant::Standard => { // Compute z = f(x + f(x)) and f(z). x[0] += fx0; - f.eval(x, fx)?; + f.eval(x, fx); let fz0 = fx[0]; // Compute the next point. x[0] = x0 - (fx0 * fx0) / (fz0 - fx0); // Compute f(x). - f.eval(x, fx)?; + f.eval(x, fx); } SteffensenVariant::Liu => { // Compute z = f(x + f(x)) and f(z). x[0] += fx0; let z0 = x[0]; - f.eval(x, fx)?; + f.eval(x, fx); let fz0 = fx[0]; // Compute f[x, z]. @@ -135,7 +133,7 @@ impl Solver for Steffensen { // Compute y = x - f(x) / f[x, z] and f(y). x[0] = x0 - fx0 / f_xz; let y0 = x[0]; - f.eval(x, fx)?; + f.eval(x, fx); let fy0 = fx[0]; // Compute f[x, y] and f[y, z]. @@ -146,7 +144,7 @@ impl Solver for Steffensen { x[0] = y0 - (f_xy - f_yz + f_xz) / (f_xy * f_xy) * fy0; // Compute f(x). - f.eval(x, fx)?; + f.eval(x, fx); } } diff --git a/src/solver/trust_region.rs b/src/solver/trust_region.rs index 0b02a87..e8dbe1b 100644 --- a/src/solver/trust_region.rs +++ b/src/solver/trust_region.rs @@ -37,10 +37,8 @@ use num_traits::{One, Zero}; use thiserror::Error; use crate::{ - core::{Domain, Function, Optimizer, Problem, ProblemError, Solver, System}, - derivatives::{ - Gradient, GradientError, Hessian, HessianError, Jacobian, JacobianError, EPSILON_SQRT, - }, + core::{Domain, Function, Optimizer, Problem, RealField as _, Solver, System}, + derivatives::{Gradient, Hessian, Jacobian}, }; /// Specification for initial value of trust region size. @@ -57,23 +55,23 @@ pub enum DeltaInit { #[getset(get_copy = "pub", set = "pub")] pub struct TrustRegionOptions { /// Minimum allowed trust region size. Default: `f64::EPSILON.sqrt()`. - delta_min: F::Scalar, + delta_min: F::Field, /// Maximum allowed trust region size. Default: `1e9`. - delta_max: F::Scalar, + delta_max: F::Field, /// Initial trust region size. Default: estimated (see [`DeltaInit`]). - delta_init: DeltaInit, + delta_init: DeltaInit, /// Minimum scaling factor for lambda in Levenberg-Marquardt step. Default: /// `1e-10`. - mu_min: F::Scalar, + mu_min: F::Field, /// Threshold for gain ratio to shrink trust region size if lower. Default: /// `0.25`. - shrink_thresh: F::Scalar, + shrink_thresh: F::Field, /// Threshold for gain ratio to expand trust region size if higher. Default: /// `0.75`. - expand_thresh: F::Scalar, + expand_thresh: F::Field, /// Threshold for gain ratio that needs to be exceeded to accept the /// calculated step. Default: `0.0001`. - accept_thresh: F::Scalar, + accept_thresh: F::Field, /// Number of step rejections that are allowed to happen before returning /// [`TrustRegionError::NoProgress`] error. Default: `10`. rejections_thresh: usize, @@ -104,7 +102,7 @@ impl TrustRegionOptions { impl Default for TrustRegionOptions { fn default() -> Self { Self { - delta_min: convert(EPSILON_SQRT), + delta_min: F::Field::EPSILON_SQRT, delta_max: convert(1e9), delta_init: DeltaInit::Estimated, mu_min: convert(1e-10), @@ -121,36 +119,36 @@ impl Default for TrustRegionOptions { /// Trust region solver. See [module](self) documentation for more details. pub struct TrustRegion { options: TrustRegionOptions, - delta: F::Scalar, - mu: F::Scalar, - scale: OVector, + delta: F::Field, + mu: F::Field, + scale: OVector, jac: Jacobian, grad: Gradient, hes: Hessian, - q_tr_fx_neg: OVector, - newton: OVector, - grad_neg: OVector, - cauchy: OVector, - jac_tr_jac: OMatrix, - p: OVector, - temp: OVector, + q_tr_fx_neg: OVector, + newton: OVector, + grad_neg: OVector, + cauchy: OVector, + jac_tr_jac: OMatrix, + p: OVector, + temp: OVector, iter: usize, rejections_cnt: usize, } impl TrustRegion { /// Initializes trust region solver with default options. - pub fn new(f: &F, dom: &Domain) -> Self { + pub fn new(f: &F, dom: &Domain) -> Self { Self::with_options(f, dom, TrustRegionOptions::default()) } /// Initializes trust region solver with given options. - pub fn with_options(f: &F, dom: &Domain, options: TrustRegionOptions) -> Self { + pub fn with_options(f: &F, dom: &Domain, options: TrustRegionOptions) -> Self { let dim = Dynamic::new(dom.dim()); let delta_init = match options.delta_init { DeltaInit::Fixed(fixed) => fixed, // Zero is recognized in the function `next`. - DeltaInit::Estimated => F::Scalar::zero(), + DeltaInit::Estimated => F::Field::zero(), }; let scale = dom .scale() @@ -181,7 +179,7 @@ impl TrustRegion { pub fn reset(&mut self) { self.delta = match self.options.delta_init { DeltaInit::Fixed(fixed) => fixed, - DeltaInit::Estimated => F::Scalar::zero(), + DeltaInit::Estimated => F::Field::zero(), }; self.mu = convert(0.5); self.iter = 1; @@ -192,18 +190,6 @@ impl TrustRegion { /// Error returned from [`TrustRegion`] solver. #[derive(Debug, Error)] pub enum TrustRegionError { - /// Error that occurred when evaluating the system. - #[error("{0}")] - Problem(#[from] ProblemError), - /// Error that occurred when computing the Jacobian matrix. - #[error("{0}")] - Jacobian(#[from] JacobianError), - /// Error that occurred when computing the gradient vector. - #[error("{0}")] - Gradient(#[from] GradientError), - /// Error that occurred when computing the Hessian matrix. - #[error("{0}")] - Hessian(#[from] HessianError), /// Could not take any valid step. #[error("neither newton nor steepest descent step can be taken from the point")] NoValidStep, @@ -217,16 +203,16 @@ impl Solver for TrustRegion { type Error = TrustRegionError; - fn next( + fn solve_next( &mut self, f: &F, - dom: &Domain, - x: &mut Vector, - fx: &mut Vector, + dom: &Domain, + x: &mut Vector, + fx: &mut Vector, ) -> Result<(), Self::Error> where - Sx: StorageMut + IsContiguous, - Sfx: StorageMut, + Sx: StorageMut + IsContiguous, + Sfx: StorageMut, { let TrustRegionOptions { delta_min, @@ -259,9 +245,9 @@ impl Solver for TrustRegion { } = self; #[allow(clippy::needless_late_init)] - let scaled_newton: &mut OVector; - let scale_inv2: &mut OVector; - let cauchy_scaled: &mut OVector; + let scaled_newton: &mut OVector; + let scale_inv2: &mut OVector; + let cauchy_scaled: &mut OVector; #[derive(Debug, Clone, Copy, PartialEq)] enum StepType { @@ -273,12 +259,12 @@ impl Solver for TrustRegion { } // Compute F(x) and F'(x). - f.eval(x, fx)?; - jac.compute(f, x, scale, fx)?; + f.eval(x, fx); + jac.compute(f, x, scale, fx); let fx_norm = fx.norm(); - let estimate_delta = *delta == F::Scalar::zero(); + let estimate_delta = *delta == F::Field::zero(); if estimate_delta { // Zero delta signifies that the initial delta is to be set // automatically and it has not been done yet. @@ -292,8 +278,8 @@ impl Solver for TrustRegion { // where K = 100. The approach is taken from GSL. for (j, col) in jac.column_iter().enumerate() { temp[j] = col.norm(); - if temp[j] == F::Scalar::zero() { - temp[j] = F::Scalar::one(); + if temp[j] == F::Field::zero() { + temp[j] = F::Field::one(); } } temp.component_mul_assign(x); @@ -301,7 +287,7 @@ impl Solver for TrustRegion { let factor = convert(100.0); *delta = temp.norm() * factor; - if *delta == F::Scalar::zero() { + if *delta == F::Field::zero() { *delta = factor; } } @@ -323,7 +309,7 @@ impl Solver for TrustRegion { "Newton step is invalid for ill-defined Jacobian (zero columns: {:?})", jac.column_iter() .enumerate() - .filter(|(_, col)| col.norm() == F::Scalar::zero()) + .filter(|(_, col)| col.norm() == F::Field::zero()) .map(|(i, _)| i) .collect::>() ); @@ -349,7 +335,7 @@ impl Solver for TrustRegion { let grad_norm = grad_neg.norm(); - if grad_norm == F::Scalar::zero() { + if grad_norm == F::Field::zero() { // Gradient is zero, it is useless to compute the dogleg step. // Instead, we take the Newton direction to the trust region // boundary. @@ -368,7 +354,7 @@ impl Solver for TrustRegion { // Compute D^(-2) (use p for storage). scale_inv2 = p; scale_inv2.copy_from(scale); - scale_inv2.apply(|s| *s = F::Scalar::one() / (*s * *s)); + scale_inv2.apply(|s| *s = F::Field::one() / (*s * *s)); // Compute g = -D^(-2) grad F, the steepest descent direction in // scaled space (use cauchy for storage). @@ -466,7 +452,7 @@ impl Solver for TrustRegion { #[allow(clippy::suspicious_operation_groupings)] let d = (b * b + a * c_neg).sqrt(); - let alpha = if b <= F::Scalar::zero() { + let alpha = if b <= F::Field::zero() { (-b + d) / a } else { c_neg / (b + d) @@ -508,10 +494,10 @@ impl Solver for TrustRegion { // from the solution (i.e., for large || F(x) ||). // Determine lambda. - let d = if fx_norm >= F::Scalar::one() { - F::Scalar::one() / fx_norm + let d = if fx_norm >= F::Field::one() { + F::Field::one() / fx_norm } else { - F::Scalar::one() + F::Scalar::one() / convert(*iter as f64) + F::Field::one() + F::Field::one() / convert(*iter as f64) }; let lambda = *mu * fx_norm.powf(d); @@ -580,7 +566,8 @@ impl Solver for TrustRegion { } // Compute F(x'). - let is_trial_valid = f.eval(x_trial, fx_trial).is_ok(); + f.eval(x_trial, fx_trial); + let is_trial_valid = fx_trial.iter().all(|fxi| fxi.is_finite()); let fx_trial_norm = fx_trial.norm(); let gain_ratio = if is_trial_valid { @@ -592,13 +579,13 @@ impl Solver for TrustRegion { let deny = if allow_ascent { // If ascent is allowed, then check only for zero, which would // make the gain ratio calculation ill-defined. - predicted == F::Scalar::zero() + predicted == F::Field::zero() } else { // If ascent is not allowed, test positivity of the predicted // gain. Note that even if the actual reduction was positive, // the step would be rejected anyway because the gain ratio // would be negative. - predicted <= F::Scalar::zero() + predicted <= F::Field::zero() }; if deny { @@ -607,7 +594,7 @@ impl Solver for TrustRegion { } else { debug!("predicted gain <= 0"); } - F::Scalar::zero() + F::Field::zero() } else { let actual = fx_norm - fx_trial_norm; let gain_ratio = actual / predicted; @@ -617,7 +604,7 @@ impl Solver for TrustRegion { } } else { debug!("trial step is invalid, gain ratio = 0"); - F::Scalar::zero() + F::Field::zero() }; // Decide if the step is accepted or not. @@ -698,14 +685,14 @@ impl Optimizer for TrustRegion { type Error = TrustRegionError; - fn next( + fn opt_next( &mut self, f: &F, - dom: &Domain<::Scalar>, - x: &mut Vector<::Scalar, Dynamic, Sx>, - ) -> Result<::Scalar, Self::Error> + dom: &Domain<::Field>, + x: &mut Vector<::Field, Dynamic, Sx>, + ) -> Result<::Field, Self::Error> where - Sx: StorageMut<::Scalar, Dynamic> + IsContiguous, + Sx: StorageMut<::Field, Dynamic> + IsContiguous, { let TrustRegionOptions { delta_min, @@ -734,9 +721,9 @@ impl Optimizer for TrustRegion { } = self; #[allow(clippy::needless_late_init)] - let scaled_newton: &mut OVector; - let scale_inv2_grad: &mut OVector; - let cauchy_scaled: &mut OVector; + let scaled_newton: &mut OVector; + let scale_inv2_grad: &mut OVector; + let cauchy_scaled: &mut OVector; #[derive(Debug, Clone, Copy, PartialEq)] enum StepType { @@ -747,11 +734,11 @@ impl Optimizer for TrustRegion { } // Compute f(x), grad f(x) and H(x). - let mut fx = f.apply(x)?; - grad.compute(f, x, scale, fx)?; - hes.compute(f, x, scale, fx)?; + let mut fx = f.apply(x); + grad.compute(f, x, scale, fx); + hes.compute(f, x, scale, fx); - let estimate_delta = *delta == F::Scalar::zero(); + let estimate_delta = *delta == F::Field::zero(); if estimate_delta { *delta = grad.norm() * convert(0.1); } @@ -774,7 +761,7 @@ impl Optimizer for TrustRegion { "Newton step is invalid for ill-defined Hessian (zero columns: {:?})", hes.column_iter() .enumerate() - .filter(|(_, col)| col.norm() == F::Scalar::zero()) + .filter(|(_, col)| col.norm() == F::Field::zero()) .map(|(i, _)| i) .collect::>() ); @@ -797,7 +784,7 @@ impl Optimizer for TrustRegion { let grad_norm = grad_neg.norm(); - if grad_norm == F::Scalar::zero() { + if grad_norm == F::Field::zero() { // Gradient is zero, it is useless to compute the dogleg step. // Instead, we take the Newton direction to the trust region // boundary. @@ -816,13 +803,13 @@ impl Optimizer for TrustRegion { // Compute || D^-1 grad || (use p for storage). scale_inv2_grad = p; scale_inv2_grad.copy_from(scale); - scale_inv2_grad.apply(|s| *s = F::Scalar::one() / *s); + scale_inv2_grad.apply(|s| *s = F::Field::one() / *s); scale_inv2_grad.component_mul_assign(grad_neg); let scale_inv_grad_norm = scale_inv2_grad.norm(); // Compute D^(-2) grad. scale_inv2_grad.copy_from(scale); - scale_inv2_grad.apply(|s| *s = F::Scalar::one() / (*s * *s)); + scale_inv2_grad.apply(|s| *s = F::Field::one() / (*s * *s)); scale_inv2_grad.component_mul_assign(grad_neg); scale_inv2_grad.neg_mut(); @@ -836,11 +823,11 @@ impl Optimizer for TrustRegion { hes.mul_to(scale_inv2_grad, temp); let quadratic_form = scale_inv2_grad.dot(temp); - let tau = if quadratic_form <= F::Scalar::zero() { - F::Scalar::one() + let tau = if quadratic_form <= F::Field::zero() { + F::Field::one() } else { // tau = min(|| D^-1 grad|| / delta * grad^T D^-2 H D^-2 grad, 1). - (scale_inv_grad_norm.powi(3) / (*delta * quadratic_form)).min(F::Scalar::one()) + (scale_inv_grad_norm.powi(3) / (*delta * quadratic_form)).min(F::Field::one()) }; let p = scale_inv2_grad; @@ -913,7 +900,7 @@ impl Optimizer for TrustRegion { #[allow(clippy::suspicious_operation_groupings)] let d = (b * b + a * c_neg).sqrt(); - let alpha = if b <= F::Scalar::zero() { + let alpha = if b <= F::Field::zero() { (-b + d) / a } else { c_neg / (b + d) @@ -950,10 +937,8 @@ impl Optimizer for TrustRegion { } // Compute f(x'). - let (fx_trial, is_trial_valid) = match f.apply(x_trial) { - Ok(fx_trial) => (fx_trial, true), - Err(_) => (F::Scalar::zero(), false), - }; + let fx_trial = f.apply(x_trial); + let is_trial_valid = fx_trial.is_finite(); let gain_ratio = if is_trial_valid { // Compute the gain ratio. @@ -965,13 +950,13 @@ impl Optimizer for TrustRegion { let deny = if allow_ascent { // If ascent is allowed, then check only for zero, which would // make the gain ratio calculation ill-defined. - predicted == F::Scalar::zero() + predicted == F::Field::zero() } else { // If ascent is not allowed, test positivity of the predicted // gain. Note that even if the actual reduction was positive, // the step would be rejected anyway because the gain ratio // would be negative. - predicted <= F::Scalar::zero() + predicted <= F::Field::zero() }; if deny { @@ -980,7 +965,7 @@ impl Optimizer for TrustRegion { } else { debug!("predicted gain <= 0"); } - F::Scalar::zero() + F::Field::zero() } else { let actual = fx - fx_trial; let gain_ratio = actual / predicted; @@ -990,7 +975,7 @@ impl Optimizer for TrustRegion { } } else { debug!("trial step is invalid, gain ratio = 0"); - F::Scalar::zero() + F::Field::zero() }; // Decide if the step is accepted or not. diff --git a/src/testing.rs b/src/testing.rs index 39997ee..cd5d2f9 100644 --- a/src/testing.rs +++ b/src/testing.rs @@ -23,49 +23,44 @@ use std::error::Error as StdError; use nalgebra::{ - allocator::Allocator, dvector, storage::{Storage, StorageMut}, - vector, DVector, DefaultAllocator, Dim, DimName, Dynamic, IsContiguous, OVector, Vector, U1, - U2, + DVector, Dim, DimName, Dynamic, IsContiguous, OVector, Vector, U1, }; use thiserror::Error; -use crate::core::{Domain, Function, Optimizer, Problem, ProblemError, Solver, System}; +use crate::core::{Domain, Function, Optimizer, Problem, Solver, System}; /// Extension of the [`System`] trait that provides additional information that /// is useful for testing solvers. pub trait TestProblem: Problem { /// Standard initial values for the problem. Using the same initial values is /// essential for fair comparison of methods. - fn initials(&self) -> Vec>; + fn initials(&self) -> Vec>; } /// Extension of the [`System`] trait that provides additional information that /// is useful for testing solvers. pub trait TestSystem: System + TestProblem where - Self::Scalar: approx::RelativeEq, + Self::Field: approx::RelativeEq, { /// A set of roots (if known and finite). This is mostly just for /// information, for example to know how close a solver got even if it /// failed. For testing if a given point is root, [`TestSystem::is_root`] /// should be used. - fn roots(&self) -> Vec> { + fn roots(&self) -> Vec> { Vec::new() } /// Test if given point is a root of the system, given the tolerance `eps`. - fn is_root(&self, x: &Vector, eps: Self::Scalar) -> bool + fn is_root(&self, x: &Vector, eps: Self::Field) -> bool where - Sx: Storage + IsContiguous, + Sx: Storage + IsContiguous, { let mut fx = x.clone_owned(); - if self.eval(x, &mut fx).is_ok() { - fx.norm() <= eps - } else { - false - } + self.eval(x, &mut fx); + fx.norm() <= eps } } @@ -73,20 +68,20 @@ where /// that is useful for testing optimizers. pub trait TestFunction: Function + TestProblem where - Self::Scalar: approx::RelativeEq, + Self::Field: approx::RelativeEq, { /// A set of global optima (if known and finite). This is mostly just for /// information, for example to know how close an optimizer got even if it /// failed. For testing if a given point is global optimum, [`TestFunction::is_optimum`] /// should be used. - fn optima(&self) -> Vec> { + fn optima(&self) -> Vec> { Vec::new() } /// Test if given point is a root of the system, given the tolerance `eps`. - fn is_optimum(&self, x: &Vector, eps: Self::Scalar) -> bool + fn is_optimum(&self, x: &Vector, eps: Self::Field) -> bool where - Sx: Storage + IsContiguous; + Sx: Storage + IsContiguous; } /// [Extended Rosenbrock @@ -127,6 +122,25 @@ impl ExtendedRosenbrock { assert!(alpha > 0.0, "alpha must be greater than zero"); Self { n, alpha } } + + fn residuals<'a, Sx>(&self, x: &'a Vector) -> impl Iterator + 'a + where + Sx: Storage + IsContiguous, + { + let alpha = self.alpha; + (0..(self.n / 2)).flat_map(move |i| { + let i1 = 2 * i; + let i2 = 2 * i + 1; + + let x1 = x[i1] * alpha; + let x2 = x[i2] / alpha; + + let fx1 = 10.0 * (x2 - x1 * x1); + let fx2 = 1.0 - x1; + + [fx1, fx2].into_iter() + }) + } } impl Default for ExtendedRosenbrock { @@ -136,9 +150,9 @@ impl Default for ExtendedRosenbrock { } impl Problem for ExtendedRosenbrock { - type Scalar = f64; + type Field = f64; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { (0..self.n) .map(|i| { if i % 2 == 0 { @@ -154,30 +168,25 @@ impl Problem for ExtendedRosenbrock { impl System for ExtendedRosenbrock { fn eval( &self, - x: &Vector, - fx: &mut Vector, - ) -> Result<(), ProblemError> - where - Sx: Storage + IsContiguous, - Sfx: StorageMut, + x: &Vector, + fx: &mut Vector, + ) where + Sx: Storage + IsContiguous, + Sfx: StorageMut, { - for i in 0..(self.n / 2) { - let i1 = 2 * i; - let i2 = 2 * i + 1; - - let x1 = x[i1] * self.alpha; - let x2 = x[i2] / self.alpha; - - fx[i1] = 10.0 * (x2 - x1 * x1); - fx[i2] = 1.0 - x1; - } + eval(self.residuals(x), fx) + } - Ok(()) + fn norm(&self, x: &Vector) -> Self::Field + where + Sx: Storage + IsContiguous, + { + norm(self.residuals(x)) } } impl TestProblem for ExtendedRosenbrock { - fn initials(&self) -> Vec> { + fn initials(&self) -> Vec> { let init1 = DVector::from_iterator( self.n, (0..self.n).map(|i| if i % 2 == 0 { -1.2 } else { 1.0 }), @@ -193,7 +202,7 @@ impl TestProblem for ExtendedRosenbrock { } impl TestSystem for ExtendedRosenbrock { - fn roots(&self) -> Vec> { + fn roots(&self) -> Vec> { let root = (0..self.n).map(|i| { if i % 2 == 0 { 1.0 / self.alpha @@ -231,6 +240,25 @@ impl ExtendedPowell { assert!(n % 4 == 0, "n must be a multiple of 4"); Self { n } } + + fn residuals<'a, Sx>(&self, x: &'a Vector) -> impl Iterator + 'a + where + Sx: Storage + IsContiguous, + { + (0..(self.n / 4)).flat_map(move |i| { + let i1 = 4 * i; + let i2 = 4 * i + 1; + let i3 = 4 * i + 2; + let i4 = 4 * i + 3; + + let fx1 = x[i1] + 10.0 * x[i2]; + let fx2 = 5f64.sqrt() * (x[i3] - x[i4]); + let fx3 = (x[i2] - 2.0 * x[i3]).powi(2); + let fx4 = 10f64.sqrt() * (x[i1] - x[i4]).powi(2); + + [fx1, fx2, fx3, fx4].into_iter() + }) + } } impl Default for ExtendedPowell { @@ -240,9 +268,9 @@ impl Default for ExtendedPowell { } impl Problem for ExtendedPowell { - type Scalar = f64; + type Field = f64; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { Domain::unconstrained(self.n) } } @@ -250,31 +278,25 @@ impl Problem for ExtendedPowell { impl System for ExtendedPowell { fn eval( &self, - x: &Vector, - fx: &mut Vector, - ) -> Result<(), ProblemError> - where - Sx: Storage + IsContiguous, - Sfx: StorageMut, + x: &Vector, + fx: &mut Vector, + ) where + Sx: Storage + IsContiguous, + Sfx: StorageMut, { - for i in 0..(self.n / 4) { - let i1 = 4 * i; - let i2 = 4 * i + 1; - let i3 = 4 * i + 2; - let i4 = 4 * i + 3; - - fx[i1] = x[i1] + 10.0 * x[i2]; - fx[i2] = 5f64.sqrt() * (x[i3] - x[i4]); - fx[i3] = (x[i2] - 2.0 * x[i3]).powi(2); - fx[i4] = 10f64.sqrt() * (x[i1] - x[i4]).powi(2); - } + eval(self.residuals(x), fx) + } - Ok(()) + fn norm(&self, x: &Vector) -> Self::Field + where + Sx: Storage + IsContiguous, + { + norm(self.residuals(x)) } } impl TestProblem for ExtendedPowell { - fn initials(&self) -> Vec> { + fn initials(&self) -> Vec> { let init = DVector::from_iterator( self.n, (0..self.n).map(|i| match i % 4 { @@ -291,7 +313,7 @@ impl TestProblem for ExtendedPowell { } impl TestSystem for ExtendedPowell { - fn roots(&self) -> Vec> { + fn roots(&self) -> Vec> { vec![DVector::from_element(self.n, 0.0)] } } @@ -313,6 +335,17 @@ impl BullardBiegler { pub fn new() -> Self { Self(()) } + + fn residuals<'a, Sx>(&self, x: &'a Vector) -> impl Iterator + 'a + where + Sx: Storage + IsContiguous, + { + [ + 1e4 * x[0] * x[1] - 1.0, + (-x[0]).exp() + (-x[1]).exp() - 1.001, + ] + .into_iter() + } } impl Default for BullardBiegler { @@ -322,9 +355,9 @@ impl Default for BullardBiegler { } impl Problem for BullardBiegler { - type Scalar = f64; + type Field = f64; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { [(5.45e-6, 4.553), (2.196e-3, 18.21)].into_iter().collect() } } @@ -332,22 +365,25 @@ impl Problem for BullardBiegler { impl System for BullardBiegler { fn eval( &self, - x: &Vector, - fx: &mut Vector, - ) -> Result<(), ProblemError> - where - Sx: Storage + IsContiguous, - Sfx: StorageMut, + x: &Vector, + fx: &mut Vector, + ) where + Sx: Storage + IsContiguous, + Sfx: StorageMut, { - fx[0] = 1e4 * x[0] * x[1] - 1.0; - fx[1] = (-x[0]).exp() + (-x[1]).exp() - 1.001; + eval(self.residuals(x), fx) + } - Ok(()) + fn norm(&self, x: &Vector) -> Self::Field + where + Sx: Storage + IsContiguous, + { + norm(self.residuals(x)) } } impl TestProblem for BullardBiegler { - fn initials(&self) -> Vec> { + fn initials(&self) -> Vec> { let init1 = dvector![0.1, 1.0]; let init2 = dvector![1.0, 1.0]; vec![init1, init2] @@ -355,7 +391,7 @@ impl TestProblem for BullardBiegler { } impl TestSystem for BullardBiegler { - fn roots(&self) -> Vec> { + fn roots(&self) -> Vec> { vec![dvector![1.45e-5, 6.8933353]] } } @@ -382,6 +418,13 @@ impl Sphere { assert!(n > 0, "n must be greater than zero"); Self { n } } + + fn residuals<'a, Sx>(&self, x: &'a Vector) -> impl Iterator + 'a + where + Sx: Storage + IsContiguous, + { + x.iter().map(|xi| xi.powi(2)) + } } impl Default for Sphere { @@ -391,9 +434,9 @@ impl Default for Sphere { } impl Problem for Sphere { - type Scalar = f64; + type Field = f64; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { Domain::unconstrained(self.n) } } @@ -401,23 +444,25 @@ impl Problem for Sphere { impl System for Sphere { fn eval( &self, - x: &Vector, - fx: &mut Vector, - ) -> Result<(), ProblemError> - where - Sx: Storage + IsContiguous, - Sfx: StorageMut, + x: &Vector, + fx: &mut Vector, + ) where + Sx: Storage + IsContiguous, + Sfx: StorageMut, { - for i in 0..self.n { - fx[i] = x[i].powi(2); - } + eval(self.residuals(x), fx) + } - Ok(()) + fn norm(&self, x: &Vector) -> Self::Field + where + Sx: Storage + IsContiguous, + { + norm(self.residuals(x)) } } impl TestProblem for Sphere { - fn initials(&self) -> Vec> { + fn initials(&self) -> Vec> { let init = DVector::from_iterator( self.n, (0..self.n).map(|i| if i % 2 == 0 { 10.0 } else { -10.0 }), @@ -428,21 +473,21 @@ impl TestProblem for Sphere { } impl TestSystem for Sphere { - fn roots(&self) -> Vec> { + fn roots(&self) -> Vec> { vec![DVector::from_element(self.n, 0.0)] } } impl TestFunction for Sphere { - fn optima(&self) -> Vec> { + fn optima(&self) -> Vec> { ::roots(self) } - fn is_optimum(&self, x: &Vector, eps: Self::Scalar) -> bool + fn is_optimum(&self, x: &Vector, eps: Self::Field) -> bool where - Sx: Storage + IsContiguous, + Sx: Storage + IsContiguous, { - self.apply(x).map(|fx| fx.abs() <= eps).unwrap_or(false) + self.apply(x).abs() <= eps } } @@ -467,6 +512,23 @@ impl Brown { assert!(n > 1, "n must be greater than one"); Self { n } } + + fn residuals<'a, Sx>(&self, x: &'a Vector) -> impl Iterator + 'a + where + Sx: Storage + IsContiguous, + { + let n = self.n as f64; + let x_sum = x.sum(); + + let fx0 = x.iter().product::() - 1.0; + let fxs = x + .iter() + .skip(1) + .copied() + .map(move |xi| xi + x_sum - n + 1.0); + + std::iter::once(fx0).chain(fxs) + } } impl Default for Brown { @@ -476,9 +538,9 @@ impl Default for Brown { } impl Problem for Brown { - type Scalar = f64; + type Field = f64; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { Domain::unconstrained(self.n) } } @@ -486,25 +548,25 @@ impl Problem for Brown { impl System for Brown { fn eval( &self, - x: &Vector, - fx: &mut Vector, - ) -> Result<(), ProblemError> - where - Sx: Storage + IsContiguous, - Sfx: StorageMut, + x: &Vector, + fx: &mut Vector, + ) where + Sx: Storage + IsContiguous, + Sfx: StorageMut, { - fx[0] = x.iter().product::() - 1.0; - - for i in 1..self.n { - fx[i] = x[i] + x.sum() - (self.n as f64 + 1.0); - } + eval(self.residuals(x), fx) + } - Ok(()) + fn norm(&self, x: &Vector) -> Self::Field + where + Sx: Storage + IsContiguous, + { + norm(self.residuals(x)) } } impl TestProblem for Brown { - fn initials(&self) -> Vec> { + fn initials(&self) -> Vec> { let init = DVector::zeros_generic(Dynamic::from_usize(self.n), U1::name()); vec![init] } @@ -533,6 +595,18 @@ impl Exponential { assert!(n > 0, "n must be greater than zero"); Self { n } } + + fn residuals<'a, Sx>(&self, x: &'a Vector) -> impl Iterator + 'a + where + Sx: Storage + IsContiguous, + { + let x_sum = x.sum(); + + x.iter() + .copied() + .enumerate() + .map(move |(i, xi)| xi - (((i + 1) as f64) * x_sum).cos().exp()) + } } impl Default for Exponential { @@ -542,9 +616,9 @@ impl Default for Exponential { } impl Problem for Exponential { - type Scalar = f64; + type Field = f64; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { Domain::unconstrained(2) } } @@ -552,23 +626,25 @@ impl Problem for Exponential { impl System for Exponential { fn eval( &self, - x: &Vector, - fx: &mut Vector, - ) -> Result<(), ProblemError> - where - Sx: Storage + IsContiguous, - Sfx: StorageMut, + x: &Vector, + fx: &mut Vector, + ) where + Sx: Storage + IsContiguous, + Sfx: StorageMut, { - for i in 0..self.n { - fx[i] = x[i] - (((i + 1) as f64) * x.sum()).cos().exp(); - } + eval(self.residuals(x), fx) + } - Ok(()) + fn norm(&self, x: &Vector) -> Self::Field + where + Sx: Storage + IsContiguous, + { + norm(self.residuals(x)) } } impl TestProblem for Exponential { - fn initials(&self) -> Vec> { + fn initials(&self) -> Vec> { let init = DVector::zeros_generic(Dynamic::from_usize(self.n), U1::name()); vec![init] } @@ -597,9 +673,9 @@ impl Default for InfiniteSolutions { } impl Problem for InfiniteSolutions { - type Scalar = f64; + type Field = f64; - fn domain(&self) -> Domain { + fn domain(&self) -> Domain { Domain::unconstrained(self.n) } } @@ -607,23 +683,25 @@ impl Problem for InfiniteSolutions { impl System for InfiniteSolutions { fn eval( &self, - _x: &Vector, - fx: &mut Vector, - ) -> Result<(), ProblemError> - where - Sx: Storage + IsContiguous, - Sfx: StorageMut, + _x: &Vector, + fx: &mut Vector, + ) where + Sx: Storage + IsContiguous, + Sfx: StorageMut, { - for i in 0..self.n { - fx[i] = 0.0; - } + fx.fill(0.0); + } - Ok(()) + fn norm(&self, _: &Vector) -> Self::Field + where + Sx: Storage + IsContiguous, + { + 0.0 } } impl TestProblem for InfiniteSolutions { - fn initials(&self) -> Vec> { + fn initials(&self) -> Vec> { let init = DVector::zeros_generic(Dynamic::from_usize(self.n), U1::name()); vec![init] } @@ -646,12 +724,12 @@ pub enum TestingError { /// A simple solver driver that can be used in tests. pub fn solve>( f: &F, - dom: &Domain, + dom: &Domain, mut solver: S, - mut x: OVector, + mut x: OVector, max_iters: usize, - tolerance: F::Scalar, -) -> Result, TestingError> + tolerance: F::Field, +) -> Result, TestingError> where S::Error: StdError, { @@ -659,7 +737,7 @@ where let mut iter = 0; loop { - solver.next(f, dom, &mut x, &mut fx)?; + solver.solve_next(f, dom, &mut x, &mut fx)?; if fx.norm() <= tolerance { return Ok(x); @@ -676,20 +754,20 @@ where /// A simple solver driver that can be used in tests. pub fn optimize>( f: &F, - dom: &Domain, + dom: &Domain, mut optimizer: O, - mut x: OVector, - min: F::Scalar, + mut x: OVector, + min: F::Field, max_iters: usize, - tolerance: F::Scalar, -) -> Result, TestingError> + tolerance: F::Field, +) -> Result, TestingError> where O::Error: StdError, { let mut iter = 0; loop { - let fx = optimizer.next(f, dom, &mut x)?; + let fx = optimizer.opt_next(f, dom, &mut x)?; if fx <= min + tolerance { // Converged. @@ -708,22 +786,33 @@ where /// testing evolutionary/nature-inspired algorithms. pub fn iter, G>( f: &F, - dom: &Domain, + dom: &Domain, mut solver: S, - mut x: OVector, + mut x: OVector, iters: usize, mut inspect: G, ) -> Result<(), S::Error> where S::Error: StdError, - G: FnMut(&S, &OVector, F::Scalar, usize), + G: FnMut(&S, &OVector, F::Field, usize), { let mut fx = x.clone_owned(); for iter in 0..iters { - solver.next(f, dom, &mut x, &mut fx)?; + solver.solve_next(f, dom, &mut x, &mut fx)?; inspect(&solver, &x, fx.norm(), iter); } Ok(()) } + +fn eval(residuals: impl Iterator, fx: &mut Vector) +where + Sfx: StorageMut, +{ + fx.iter_mut().zip(residuals).for_each(|(fxi, v)| *fxi = v); +} + +fn norm(residuals: impl Iterator) -> f64 { + residuals.map(|v| v.powi(2)).sum::().sqrt() +}