Skip to content

Commit

Permalink
Merge pull request #13 from jrising/smoothopt
Browse files Browse the repository at this point in the history
Adds smooth optimization functionality
  • Loading branch information
jrising authored Jun 15, 2023
2 parents 1daf475 + b557a78 commit 87ba98e
Show file tree
Hide file tree
Showing 5 changed files with 224 additions and 84 deletions.
3 changes: 2 additions & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,13 @@ uuid = "61d5e40c-f051-5c04-90a0-b0f47eb95ad5"

[deps]
BlackBoxOptim = "a134a8b2-14d6-55f6-9291-3336d3ab0209"
Clp = "e2554f3b-3117-50c0-817c-e040a3ddf72d"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
Dierckx = "39dd38d3-220a-591b-8e3c-4c3a8c710a94"
DiffResults = "163ba53b-c6d8-5494-b064-1a9d43ac40c5"
DiffRules = "b552c78f-8df3-52c6-915a-8e097449b14b"
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
Interpolations = "a98d9a8b-a2ab-59e6-89dd-64a1c18fca59"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
MathProgBase = "fdba3010-5040-5b88-9595-932c9decdf73"
Mimi = "e4e893b0-ee5e-52ea-8111-44b3bdec128c"
Expand Down
98 changes: 60 additions & 38 deletions src/OptiMimi.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ using NLopt
using ForwardDiff, DiffResults
using MathProgBase

import Mimi: Model
import Mimi: Model, AbstractModel

export problem, solution, unaryobjective, objevals, setparameters, nameindexes, sensitivity, uncertainproblem, setsolution

Expand All @@ -19,25 +19,28 @@ allverbose = false
objevals = 0

mutable struct OptimizationProblem
model::Model
model::AbstractModel
components::Vector{Symbol}
names::Vector{Symbol}
objective::Function
opt::Opt
constraints::Vector{Function}
paramtrans::Union{Vector{Function}, Nothing}
end

mutable struct BlackBoxOptimizationProblem{T}
algorithm::Symbol
model::Model
model::AbstractModel
components::Vector{Symbol}
names::Vector{Symbol}
objective::Function
lowers::Vector{T}
uppers::Vector{T}
paramtrans::Union{Vector{Function}, Nothing}
end

mutable struct LinprogOptimizationProblem{T}
model::Model
model::AbstractModel
components::Vector{Symbol}
names::Vector{Symbol}
objective::Function
Expand All @@ -47,10 +50,12 @@ mutable struct LinprogOptimizationProblem{T}
exuppers::Vector{T}
end

include("smooth.jl")

BlackBoxAlgorithms = [:separable_nes, :xnes, :dxnes]

"""Returns (ii, len, isscalar) with the index of each symbol and its length."""
function nameindexes(model::Model, components::Vector{Symbol}, names::Vector{Symbol}, chnl)
function nameindexes(model::AbstractModel, components::Vector{Symbol}, names::Vector{Symbol}, chnl)
for ii in 1:length(components)
dims = getdims(model, components[ii], names[ii])
if length(dims) == 0
Expand All @@ -64,24 +69,30 @@ end
"""
Initialize a model with the results from an optimization.
"""
function setparameters(model::Model, components::Vector{Symbol}, names::Vector{Symbol}, xx::Vector)
startindex = 1
for (ii, len, isscalar) in Channel((chnl) -> nameindexes(model, components, names, chnl))
if isscalar
set_or_update_param!(model, components[ii], names[ii], xx[startindex])
else
shape = getdims(model, components[ii], names[ii])
reshaped = reshape(collect(model.md.number_type, xx[startindex:(startindex+len - 1)]), tuple(shape...))
set_or_update_param!(model, components[ii], names[ii], reshaped)
function setparameters(model::AbstractModel, components::Vector{Symbol}, names::Vector{Symbol}, xx::Vector, paramtrans::Union{Vector{Function}, Nothing})
if isnothing(paramtrans)
startindex = 1
for (ii, len, isscalar) in Channel((chnl) -> nameindexes(model, components, names, chnl))
if isscalar
set_or_update_param!(model, components[ii], names[ii], xx[startindex])
else
shape = getdims(model, components[ii], names[ii])
reshaped = reshape(collect(model.md.number_type, xx[startindex:(startindex+len - 1)]), tuple(shape...))
set_or_update_param!(model, components[ii], names[ii], reshaped)
end
startindex += len
end
else
for (ii, len, isscalar) in Channel((chnl) -> nameindexes(model, components, names, chnl))
set_or_update_param!(model, components[ii], names[ii], paramtrans[ii](xx))
end
startindex += len
end
end

function sensitivity(model::Model, component::Symbol, parameter::Symbol, objective::Function, points::Vector{Float64})
function sensitivity(model::AbstractModel, component::Symbol, parameter::Symbol, objective::Function, points::Vector{Float64}, paramtrans::Union{Vector{Function}, Nothing}=nothing)
results = []
for point in points
setparameters(model, [component], [parameter], [point])
setparameters(model, [component], [parameter], [point], paramtrans)
run(model)
push!(results, objective(model))
end
Expand All @@ -90,7 +101,7 @@ function sensitivity(model::Model, component::Symbol, parameter::Symbol, objecti
end

"""Generate the form of objective function used by the optimization, taking parameters rather than a model."""
function unaryobjective(model::Model, components::Vector{Symbol}, names::Vector{Symbol}, objective::Function)
function unaryobjective(model::AbstractModel, components::Vector{Symbol}, names::Vector{Symbol}, objective::Function, paramtrans::Union{Vector{Function}, Nothing})
function my_objective(xx::Vector)
if allverbose
println(xx)
Expand All @@ -99,7 +110,7 @@ function unaryobjective(model::Model, components::Vector{Symbol}, names::Vector{
global objevals
objevals += 1

setparameters(model, components, names, xx)
setparameters(model, components, names, xx, paramtrans)
run(model)
objective(model)
end
Expand All @@ -108,8 +119,8 @@ function unaryobjective(model::Model, components::Vector{Symbol}, names::Vector{
end

"""Create an NLopt-style objective function which does not use its grad argument."""
function gradfreeobjective(model::Model, components::Vector{Symbol}, names::Vector{Symbol}, objective::Function)
myunaryobjective = unaryobjective(model, components, names, objective)
function gradfreeobjective(model::AbstractModel, components::Vector{Symbol}, names::Vector{Symbol}, objective::Function, paramtrans::Union{Vector{Function}, Nothing})
myunaryobjective = unaryobjective(model, components, names, objective, paramtrans)
function myobjective(xx::Vector, grad::Vector)
myunaryobjective(xx)
end
Expand All @@ -118,8 +129,8 @@ function gradfreeobjective(model::Model, components::Vector{Symbol}, names::Vect
end

"""Create an NLopt-style objective function which computes an autodiff gradient."""
function autodiffobjective(model::Model, components::Vector{Symbol}, names::Vector{Symbol}, objective::Function)
myunaryobjective = unaryobjective(model, components, names, objective)
function autodiffobjective(model::AbstractModel, components::Vector{Symbol}, names::Vector{Symbol}, objective::Function, paramtrans::Union{Vector{Function}, Nothing})
myunaryobjective = unaryobjective(model, components, names, objective, paramtrans)
function myobjective(xx::Vector, grad::Vector)
out = DiffResults.GradientResult(xx)
ForwardDiff.gradient!(out, myunaryobjective, xx)
Expand All @@ -134,7 +145,7 @@ function autodiffobjective(model::Model, components::Vector{Symbol}, names::Vect
end

"""Create a 0 point."""
function make0(model::Model, components::Vector{Symbol}, names::Vector{Symbol})
function make0(model::AbstractModel, components::Vector{Symbol}, names::Vector{Symbol})
initial = Float64[]
for (ii, len, isscalar) in Channel((chnl) -> nameindexes(model, components, names, chnl))
append!(initial, [0. for jj in 1:len])
Expand All @@ -144,7 +155,7 @@ function make0(model::Model, components::Vector{Symbol}, names::Vector{Symbol})
end

"""Expand parameter constraints to full vectors for every numerical parameter."""
function expandlimits(model::Model, components::Vector{Symbol}, names::Vector{Symbol}, lowers::Vector{T}, uppers::Vector{T}) where T <: Real
function expandlimits(model::AbstractModel, components::Vector{Symbol}, names::Vector{Symbol}, lowers::Vector{T}, uppers::Vector{T}) where T <: Real
my_lowers = T[]
my_uppers = T[]

Expand All @@ -160,21 +171,27 @@ function expandlimits(model::Model, components::Vector{Symbol}, names::Vector{Sy
end

"""Setup an optimization problem."""
function problem(model::Model, components::Vector{Symbol}, names::Vector{Symbol}, lowers::Vector{T}, uppers::Vector{T}, objective::Function; constraints::Vector{Function}=Function[], algorithm::Symbol=:LN_COBYLA_OR_LD_MMA) where T <: Real
my_lowers, my_uppers, totalvars = expandlimits(model, components, names, lowers, uppers)
function problem(model::AbstractModel, components::Vector{Symbol}, names::Vector{Symbol}, lowers::Vector{T}, uppers::Vector{T}, objective::Function; constraints::Vector{Function}=Function[], algorithm::Symbol=:LN_COBYLA_OR_LD_MMA, paramtrans::Union{Vector{Function}, Nothing}=nothing) where T <: Real
if isnothing(paramtrans)
my_lowers, my_uppers, totalvars = expandlimits(model, components, names, lowers, uppers)
else
my_lowers = lowers
my_uppers = uppers
totalvars = length(lowers)
end

if algorithm == :GUROBI_LINPROG
# Make no changes to objective!
elseif model.md.number_type == Number
elseif hasfield(typeof(model), :md) && model.md.number_type == Number
if algorithm == :LN_COBYLA_OR_LD_MMA
algorithm = :LD_MMA
end
if string(algorithm)[2] == 'N'
warn("Model is autodifferentiable, but optimizing using a derivative-free algorithm.")
myobjective = gradfreeobjective(model, components, names, objective)
myobjective = gradfreeobjective(model, components, names, objective, paramtrans)
else
println("Using AutoDiff objective.")
myobjective = autodiffobjective(model, components, names, objective)
myobjective = autodiffobjective(model, components, names, objective, paramtrans)
end
else
if algorithm == :LN_COBYLA_OR_LD_MMA
Expand All @@ -184,7 +201,7 @@ function problem(model::Model, components::Vector{Symbol}, names::Vector{Symbol}
algorithm = :LN_COBYLA
end

myobjective = gradfreeobjective(model, components, names, objective)
myobjective = gradfreeobjective(model, components, names, objective, paramtrans)
end

if algorithm == :GUROBI_LINPROG
Expand All @@ -195,7 +212,7 @@ function problem(model::Model, components::Vector{Symbol}, names::Vector{Symbol}
warn("Functional constraints not supported for BBO algorithms.")
end

BlackBoxOptimizationProblem(algorithm, model, components, names, myobjective, my_lowers, my_uppers)
BlackBoxOptimizationProblem(algorithm, model, components, names, myobjective, my_lowers, my_uppers, paramtrans)
else
opt = Opt(algorithm, totalvars)
lower_bounds!(opt, my_lowers)
Expand All @@ -207,7 +224,7 @@ function problem(model::Model, components::Vector{Symbol}, names::Vector{Symbol}
for constraint in constraints
let this_constraint = constraint
function my_constraint(xx::Vector, grad::Vector)
setparameters(model, components, names, xx)
setparameters(model, components, names, xx, paramtrans)
run(model)
this_constraint(model)
end
Expand All @@ -216,7 +233,7 @@ function problem(model::Model, components::Vector{Symbol}, names::Vector{Symbol}
end
end

OptimizationProblem(model, components, names, opt, constraints)
OptimizationProblem(model, components, names, objective, opt, constraints, paramtrans)
end
end
end
Expand All @@ -236,7 +253,7 @@ function solution(optprob::OptimizationProblem, generator::Function; maxiter=Inf
while attempts < maxiter
initial = generator()

setparameters(optprob.model, optprob.components, optprob.names, initial)
setparameters(optprob.model, optprob.components, optprob.names, initial, optprob.paramtrans)
run(optprob.model)

valid = true
Expand Down Expand Up @@ -269,6 +286,7 @@ function solution(optprob::OptimizationProblem, generator::Function; maxiter=Inf
end
(minf,minx,ret) = optimize(optprob.opt, initial)

print(ret)
(minf, minx)
end

Expand All @@ -287,7 +305,7 @@ end


"""Setup an optimization problem."""
function problem(model::Model, components::Vector{Symbol}, names::Vector{Symbol}, lowers::Vector{T}, uppers::Vector{T}, objective::Function, objectiveconstraints::Vector{Function}, matrixconstraints::Vector{MatrixConstraintSet}) where T <: Real
function problem(model::AbstractModel, components::Vector{Symbol}, names::Vector{Symbol}, lowers::Vector{T}, uppers::Vector{T}, objective::Function, objectiveconstraints::Vector{Function}, matrixconstraints::Vector{MatrixConstraintSet}) where T <: Real
my_lowers, my_uppers, totalvars = expandlimits(model, components, names, lowers, uppers)
LinprogOptimizationProblem(model, components, names, objective, objectiveconstraints, matrixconstraints, my_lowers, my_uppers)
end
Expand All @@ -303,8 +321,8 @@ function solution(optprob::LinprogOptimizationProblem, verbose=false)
println("Optimizing...")
end

if optprob.model.md.number_type == Number
myobjective = unaryobjective(optprob.model, optprob.components, optprob.names, optprob.objective)
if hasfield(typeof(optprob.model), :md) && optprob.model.md.number_type == Number
myobjective = unaryobjective(optprob.model, optprob.components, optprob.names, optprob.objective, nothing)
f, b, A = lpconstraints(optprob.model, optprob.components, optprob.names, myobjective, objectiveconstraints)
else
f, b, A = lpconstraints(optprob.model, optprob.components, optprob.names, optprob.objective, optprob.objectiveconstraints)
Expand All @@ -319,4 +337,8 @@ function solution(optprob::LinprogOptimizationProblem, verbose=false)
sol.sol
end

function include_smooth()
include(joinpath(dirname(@__FILE__), "smooth.jl"))
end

end # module
1 change: 0 additions & 1 deletion src/linproghouse.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
using MathProgBase
using DataFrames
using Clp
using SparseArrays, LinearAlgebra

import Base.*, Base.-, Base.+, Base./, Base.max
Expand Down
89 changes: 89 additions & 0 deletions src/smooth.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
using Interpolations
import Dierckx
using Distributions

using NLopt
# import OptiMimi.OptimizationProblem, OptiMimi.getdims, OptiMimi.gradfreeobjective
# using Plots

function piecewisesolution_single(optprob::OptimizationProblem, initgen::Function, maxiter=Inf, verbose=false; reltol=1e-6)
len = getdims(optprob.model, optprob.components[1], optprob.names[1])[1]
numsegs = Int64.([2 .^(0:(floor(log2(len-1))-1)); len-1])

last_xx = last_soln = nothing

for numseg in numsegs
print(numseg)
xx = trunc.(Int, range(1, stop=len, length=numseg+1))

keeps = [true for ii in 1:length(xx)]
if numseg > 2
# Drop xx where [LinearInterp - CubicInterp] < reltol
linears = LinearInterpolation(last_xx, last_soln)(xx)
cubics = Dierckx.evaluate(Dierckx.Spline1D(last_xx, last_soln, k=min(3, length(last_xx) - 1)), xx)
keeps = abs.(linears .- cubics) ./ ((linears .+ cubics) / 2) .> reltol
keeps[1] = true
keeps[length(keeps)] = true

# Drop knots only if can be linearly guessed
for ii in 2:numseg
if xx[ii] last_xx
jj = findfirst(last_xx .== xx[ii])
guess = LinearInterpolation([last_xx[jj-1], last_xx[jj+1]], [last_soln[jj-1], last_soln[jj+1]])(xx[ii])
if abs(guess - last_soln[jj]) / last_soln[jj] > reltol
keeps[ii] = true
end
end
end

println(keeps)
end

paramtrans = convert(Vector{Function}, [yy -> LinearInterpolation(xx[keeps], yy)(1:len)])

opt = Opt(optprob.opt.algorithm, sum(keeps))
lower_bounds!(opt, optprob.opt.lower_bounds[xx[keeps]])
upper_bounds!(opt, optprob.opt.upper_bounds[xx[keeps]])
xtol_rel!(opt, reltol)

myobjective = gradfreeobjective(optprob.model, optprob.components, optprob.names, optprob.objective, paramtrans)

max_objective!(opt, myobjective)

for constraint in optprob.constraints
let this_constraint = constraint
function my_constraint(xx::Vector, grad::Vector)
setparameters(optprob.model, optprob.components, optprob.names, xx, paramtrans)
run(optprob.model)
this_constraint(optprob.model)
end

inequality_constraint!(opt, my_constraint)
end
end

if sum(keeps) == len - 1
subprob = OptimizationProblem(optprob.model, optprob.components, optprob.names, optprob.objective, opt, optprob.constraints, nothing)
else
subprob = OptimizationProblem(optprob.model, optprob.components, optprob.names, optprob.objective, opt, optprob.constraints, paramtrans)
end

if isnothing(last_xx)
lin_yy = nothing
else
lin_yy = LinearInterpolation(last_xx, last_soln)(xx[keeps])
end
soln = solution(subprob, initgen(sum(keeps), lin_yy))

if numseg > 2
last_soln = LinearInterpolation(last_xx, last_soln)(xx)
last_soln[keeps] = soln[2]
else
last_soln = soln[2]
end
last_xx = xx
println([last_xx, last_soln])
end

last_soln
end
Loading

0 comments on commit 87ba98e

Please sign in to comment.