From 28c7387e475a87a7f35e8d430f6f2f5a2c550372 Mon Sep 17 00:00:00 2001 From: Miles Lubin Date: Fri, 29 Jan 2016 21:58:36 -0500 Subject: [PATCH] fixes for 0.5 function changes --- test/brent.jl | 4 ++-- test/callbacks.jl | 8 -------- test/gradient_descent.jl | 12 ++++++------ test/nelder_mead.jl | 8 ++++---- test/newton.jl | 16 ++++++++-------- test/simulated_annealing.jl | 8 ++++---- 6 files changed, 24 insertions(+), 32 deletions(-) diff --git a/test/brent.jl b/test/brent.jl index cdd689e15..8cfd64424 100644 --- a/test/brent.jl +++ b/test/brent.jl @@ -1,6 +1,6 @@ -f(x) = 2x^2+3x+1 +f_b(x) = 2x^2+3x+1 -results = optimize(f, -2.0, 1.0, method = :brent) +results = optimize(f_b, -2.0, 1.0, method = :brent) @assert results.converged @assert abs(results.minimum+0.75) < 1e-7 diff --git a/test/callbacks.jl b/test/callbacks.jl index ee454c4da..9c16801ff 100644 --- a/test/callbacks.jl +++ b/test/callbacks.jl @@ -1,12 +1,4 @@ -function cb(tr::OptimizationTrace) - @test tr.states[end].iteration % 3 == 0 -end - -function cb(os::OptimizationState) - @test os.iteration % 3 == 0 -end - for method in (:nelder_mead, :simulated_annealing) ot_run = false diff --git a/test/gradient_descent.jl b/test/gradient_descent.jl index 6719d8715..3a06c4b77 100644 --- a/test/gradient_descent.jl +++ b/test/gradient_descent.jl @@ -1,14 +1,14 @@ -function f_gd(x) +function f_gd_1(x) (x[1] - 5.0)^2 end -function g_gd(x, storage) +function g_gd_1(x, storage) storage[1] = 2.0 * (x[1] - 5.0) end initial_x = [0.0] -d = DifferentiableFunction(f_gd, g_gd) +d = DifferentiableFunction(f_gd_1, g_gd_1) results = Optim.gradient_descent(d, initial_x) @assert isempty(results.trace.states) @@ -17,16 +17,16 @@ results = Optim.gradient_descent(d, initial_x) eta = 0.9 -function f_gd(x) +function f_gd_2(x) (1.0 / 2.0) * (x[1]^2 + eta * x[2]^2) end -function g_gd(x, storage) +function g_gd_2(x, storage) storage[1] = x[1] storage[2] = eta * x[2] end -d = DifferentiableFunction(f_gd, g_gd) +d = DifferentiableFunction(f_gd_2, g_gd_2) results = Optim.gradient_descent(d, [1.0, 1.0]) @assert isempty(results.trace.states) diff --git a/test/nelder_mead.jl b/test/nelder_mead.jl index 176b86387..cc51de8b9 100644 --- a/test/nelder_mead.jl +++ b/test/nelder_mead.jl @@ -9,23 +9,23 @@ for (name, prob) in Optim.UnconstrainedProblems.examples end end -function f(x::Vector) +function f_nm(x::Vector) (100.0 - x[1])^2 + x[2]^2 end -function rosenbrock(x::Vector) +function rosenbrock_nm(x::Vector) (1.0 - x[1])^2 + 100.0 * (x[2] - x[1]^2)^2 end initial_x = [0.0, 0.0] -results = Optim.nelder_mead(f, initial_x) +results = Optim.nelder_mead(f_nm, initial_x) @assert results.f_converged @assert norm(results.minimum - [100.0, 0.0]) < 0.01 @assert length(results.trace.states) == 0 -results = Optim.nelder_mead(rosenbrock, initial_x) +results = Optim.nelder_mead(rosenbrock_nm, initial_x) @assert results.f_converged @assert norm(results.minimum - [1.0, 1.0]) < 0.01 diff --git a/test/newton.jl b/test/newton.jl index 11bcdad5e..01e032757 100644 --- a/test/newton.jl +++ b/test/newton.jl @@ -1,18 +1,18 @@ using Optim -function f(x::Vector) +function f_1(x::Vector) (x[1] - 5.0)^4 end -function g!(x::Vector, storage::Vector) +function g!_1(x::Vector, storage::Vector) storage[1] = 4.0 * (x[1] - 5.0)^3 end -function h!(x::Vector, storage::Matrix) +function h!_1(x::Vector, storage::Matrix) storage[1, 1] = 12.0 * (x[1] - 5.0)^2 end -d = TwiceDifferentiableFunction(f, g!, h!) +d = TwiceDifferentiableFunction(f_1, g!_1, h!_1) results = Optim.newton(d, [0.0]) @assert length(results.trace.states) == 0 @@ -21,23 +21,23 @@ results = Optim.newton(d, [0.0]) eta = 0.9 -function f(x::Vector) +function f_2(x::Vector) (1.0 / 2.0) * (x[1]^2 + eta * x[2]^2) end -function g!(x::Vector, storage::Vector) +function g!_2(x::Vector, storage::Vector) storage[1] = x[1] storage[2] = eta * x[2] end -function h!(x::Vector, storage::Matrix) +function h!_2(x::Vector, storage::Matrix) storage[1, 1] = 1.0 storage[1, 2] = 0.0 storage[2, 1] = 0.0 storage[2, 2] = eta end -d = TwiceDifferentiableFunction(f, g!, h!) +d = TwiceDifferentiableFunction(f_2, g!_2, h!_2) results = Optim.newton(d, [127.0, 921.0]) @assert length(results.trace.states) == 0 @assert results.gr_converged diff --git a/test/simulated_annealing.jl b/test/simulated_annealing.jl index bdf9df764..732002d93 100644 --- a/test/simulated_annealing.jl +++ b/test/simulated_annealing.jl @@ -1,15 +1,15 @@ srand(1) -function f(x::Vector) +function f_s(x::Vector) (x[1] - 5.0)^4 end -results = Optim.simulated_annealing(f, [0.0]) +results = Optim.simulated_annealing(f_s, [0.0]) @assert norm(results.minimum - [5.0]) < 0.1 -function rosenbrock(x::Vector) +function rosenbrock_s(x::Vector) (1.0 - x[1])^2 + 100.0 * (x[2] - x[1]^2)^2 end -results = Optim.simulated_annealing(rosenbrock, [0.0, 0.0]) +results = Optim.simulated_annealing(rosenbrock_s, [0.0, 0.0]) @assert norm(results.minimum - [1.0, 1.0]) < 0.1