diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 507e806..57e0ad8 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -30,7 +30,7 @@ jobs: # Steps represent a sequence of tasks that will be executed as part of the job steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Setup Julia environment # You may pin to the exact commit or the version. @@ -64,6 +64,6 @@ jobs: # process and submit the code coverage information - uses: julia-actions/julia-processcoverage@v1 - - uses: codecov/codecov-action@v1 + - uses: codecov/codecov-action@v3 with: file: lcov.info diff --git a/Project.toml b/Project.toml index 9174697..ada71fa 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "StateSpaceEcon" uuid = "e4c825b0-b65c-11ea-0b5a-6176b64e7b7f" authors = ["Atai Akunov ", "Boyan Bejanov ", "Nicholas Labelle St-Pierre "] -version = "0.4.1" +version = "0.5.0" [deps] DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab" @@ -10,19 +10,24 @@ ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" ModelBaseEcon = "1d16192e-b65e-11ea-11ed-0789cee22d2f" +Pardiso = "46dd5b70-b6fb-5a00-ae2d-e8fea33afaf2" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" +SuiteSparse = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9" Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb" TimeSeriesEcon = "8b6756d2-c55c-11ea-2998-5f67ea17da60" +TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" [compat] DiffResults = "1.0" ForwardDiff = "0.10" JLD2 = "0.4" -ModelBaseEcon = "0.5.2" +ModelBaseEcon = "0.6" +Pardiso = "0.5" Suppressor = "0.2" -TimeSeriesEcon = "0.5.1" +TimerOutputs = "0.5" +TimeSeriesEcon = "0.6" julia = "1.7" [extras] diff --git a/TODO.md b/TODO.md deleted file mode 100644 index 339d038..0000000 --- a/TODO.md +++ /dev/null @@ -1,2 +0,0 @@ - -1. diff --git a/src/Plans.jl b/src/Plans.jl index 650988e..ffacc80 100644 --- a/src/Plans.jl +++ b/src/Plans.jl @@ -78,7 +78,7 @@ function Plan(model::Model, range::AbstractUnitRange) range = (first(range)-model.maxlag):(last(range)+model.maxlead) local varshks = model.varshks local N = length(varshks) - local names = tuple(Symbol.(varshks)...) # force conversion of Vector{ModelSymbol} to NTuple{N,Symbol} + local names = tuple(Symbol.(varshks)...) # force conversion of Vector{ModelVariable} to NTuple{N,Symbol} p = Plan(range, NamedTuple{names}(1:N), falses(length(range), length(varshks))) for (ind, var) = enumerate(varshks) if isshock(var) || isexog(var) @@ -146,12 +146,16 @@ Base.setindex!(p::Plan, x, i...) = error("Cannot assign directly. Use `exogenize ####################################### # query the exo-end status of a variable -@inline Base.getindex(p::Plan{T}, vars::Symbol...) where {T} = begin +@inline Base.getindex(p::Plan, vars::Union{AbstractString,Symbol,ModelVariable}...) = Base.getindex(p, Symbol[vars...]) +@inline Base.getindex(p::Plan, vars::AbstractVector{<:Union{AbstractString,ModelVariable}}) = Base.getindex(p, Symbol[vars...]) +@inline Base.getindex(p::Plan{T}, vars::AbstractVector{Symbol}) where {T} = begin var_inds = Int[p.varshks[vars]...] Plan{T}(p.range, NamedTuple{(vars...,)}(eachindex(vars)), p.exogenous[:, var_inds]) end -@inline Base.getindex(p::Plan{T}, rng::AbstractUnitRange{T}, vars::Symbol...) where {T} = begin +@inline Base.getindex(p::Plan, rng::AbstractUnitRange, vars::Union{AbstractString,Symbol,ModelVariable}...) = Base.getindex(p, rng, Symbol[vars...]) +@inline Base.getindex(p::Plan, rng::AbstractUnitRange, vars::AbstractVector{<:Union{AbstractString,ModelVariable}}) = Base.getindex(p, rng, Symbol[vars...]) +@inline Base.getindex(p::Plan{T}, rng::AbstractUnitRange{T}, vars::AbstractVector{Symbol}) where {T} = begin rng.start < p.range.start && throw(BoundsError(p, rng.start)) rng.stop > p.range.stop && throw(BoundsError(p, rng.stop)) var_inds = Int[p.varshks[vars]...] diff --git a/src/StackedTimeSolver.jl b/src/StackedTimeSolver.jl index b0433c4..d5f394f 100644 --- a/src/StackedTimeSolver.jl +++ b/src/StackedTimeSolver.jl @@ -23,6 +23,9 @@ using Printf using ModelBaseEcon using TimeSeriesEcon +using TimerOutputs +const timer = TimerOutput() + using ..Plans import ..steadystatearray @@ -40,6 +43,14 @@ import ModelBaseEcon.hassolverdata import ModelBaseEcon.getsolverdata import ModelBaseEcon.setsolverdata! +using Pardiso + + +using SuiteSparse +using SuiteSparse.UMFPACK + +include("stackedtime/sparse.jl") + include("stackedtime/fctypes.jl") include("stackedtime/misc.jl") include("stackedtime/solverdata.jl") @@ -61,6 +72,9 @@ export FCMatchSSRate, fcslope, fcrate export FCConstRate, fcnatural export setfc +export use_pardiso, use_pardiso! +export use_umfpack, use_umfpack! + # the following are deprecated export seriesoverlay, dictoverlay diff --git a/src/firstorder/solve.jl b/src/firstorder/solve.jl index cdfd407..e4f45d9 100644 --- a/src/firstorder/solve.jl +++ b/src/firstorder/solve.jl @@ -50,7 +50,7 @@ function VarMaps(model::Model) ex_vars = Tuple{Symbol,Int}[] for mvar in model.allvars var = mvar.name - lags, leads = extrema(lag for eqn in model.equations + lags, leads = extrema(lag for eqn in values(model.equations) for (name, lag) in keys(eqn.tsrefs) if name == var) if isexog(mvar) || isshock(mvar) diff --git a/src/stackedtime/shockdecomp.jl b/src/stackedtime/shockdecomp.jl index 749e077..064e3bb 100644 --- a/src/stackedtime/shockdecomp.jl +++ b/src/stackedtime/shockdecomp.jl @@ -19,7 +19,7 @@ dynamic model for the given plan range and final condition. We verify the residual and issue a warning, but do not enforce this. See [`steadystatedata`](@ref). -As part of the algorithm we run a simulation with the given `plan`, +As part of the algorithm we run a simulation with the given `plan`, `exog_data` and `fctype`. See [`simulate`](@ref) for other options. !!! note @@ -37,7 +37,6 @@ function shockdecomp(m::Model, p::Plan, exog_data::SimData; verbose::Bool=getoption(m, :verbose, false), maxiter::Int=getoption(m, :maxiter, 20), tol::Float64=getoption(m, :tol, 1e-12), - sparse_solver::Function=(A, b) -> A \ b, linesearch::Bool=getoption(m, :linesearch, false), fctype::FinalCondition=getoption(m, :fctype, fcgiven), _debug=false @@ -75,11 +74,11 @@ function shockdecomp(m::Model, p::Plan, exog_data::SimData; stackedtime_R!(res_shocked, shocked, shocked, gdata) # Run the "shocked" simulation with the given exogenous data. if norm(res_shocked, Inf) > tol assign_exog_data!(shocked, exog_data, gdata) - sim_nr!(shocked, gdata, maxiter, tol, verbose, sparse_solver, linesearch) + sim_nr!(shocked, gdata, maxiter, tol, verbose, linesearch) end # We need the Jacobian matrix evaluated at the control solution. - res_control, _ = stackedtime_RJ(control, control, gdata) + res_control, Jac_control = stackedtime_RJ(control, control, gdata) if norm(res_control, Inf) > tol # What to do if it's not a solution? We just give a warning for now, but maybe we should error()?! @warn "Control is not a solution:" norm(res_control, Inf) @@ -109,7 +108,7 @@ function shockdecomp(m::Model, p::Plan, exog_data::SimData; end # indices for the exogenous variable-points split into groups - exog_inds = Workspace(; + exog_inds = @views @inbounds Workspace(; # contributions of initial conditions init=vec(LI[init, :]), # contributions of final conditions @@ -129,7 +128,7 @@ function shockdecomp(m::Model, p::Plan, exog_data::SimData; # now do the decomposition begin - # we build a sparse matrix + # we build a sparse matrix SDI = Int[] # row indexes SDJ = Int[] # column indexes SDV = Float64[] # values @@ -149,7 +148,7 @@ function shockdecomp(m::Model, p::Plan, exog_data::SimData; continue end rsdv = result.sd[v.name] = MVTSeries(p.range, keys(exog_inds), zeros) - rsdv[pinit, :init] = delta[pinit, v.name] + rsdv.init[pinit] = view(delta[v.name], pinit) end else shex = filter(v -> isshock(v) || isexog(v), m.allvars) @@ -161,13 +160,13 @@ function shockdecomp(m::Model, p::Plan, exog_data::SimData; if haskey(id, v.name) # have initial decomposition idv = id[v.name] - if norm(delta[pinit, v.name]-sum(idv[pinit, :],dims=2),Inf)>tol + if norm(view(delta[v.name], pinit) - sum(view(idv, pinit, :), dims=2), Inf) > tol error("The given `initdecomp` does not add up for $(v.name).") end rsdv[pinit] .= idv for (colind, colname) in enumerate(keys(exog_inds)) if haskey(idv, colname) || colname ∈ shex - inds = LI[init, vind] + inds = @view LI[init, vind] append!(SDI, inds) append!(SDJ, fill(colind, size(inds))) append!(SDV, idv[pinit, colname].values) @@ -175,9 +174,9 @@ function shockdecomp(m::Model, p::Plan, exog_data::SimData; end else # no initial decomposition - it all goes in init - rsdv[pinit, :init] = delta[pinit, v.name] + rsdv.init[pinit] = view(delta[v.name], pinit) colind = 1 - inds = LI[init, vind] + inds = @view LI[init, vind] append!(SDI, inds) append!(SDJ, fill(colind, size(inds))) append!(SDV, delta[inds]) @@ -196,29 +195,27 @@ function shockdecomp(m::Model, p::Plan, exog_data::SimData; end SDMAT = Matrix(gdata.J * sparse(SDI, SDJ, -SDV, size(gdata.J, 2), length(exog_inds))) - ldiv!(gdata.J_factorized[], SDMAT) - + sf_solve!(Jac_control, SDMAT) end # now split and decorate the shock-decomposition matrix begin # in order to split the rows of SDMAT by variable, we need the inverse indexing map inv_endo_inds = zeros(Int, size(gdata.solve_mask)) - inv_endo_inds[gdata.solve_mask] .= 1:sum(gdata.solve_mask) + inv_endo_inds[gdata.solve_mask] = 1:sum(gdata.solve_mask) for (index, v) in enumerate(m.allvars) - v_inds = vec(LI[:, index]) - v_endo_mask = gdata.solve_mask[v_inds] + v_endo_mask = gdata.solve_mask[@view LI[:, index]] if !any(v_endo_mask) continue end - v_inv_endo_inds = inv_endo_inds[v_inds[v_endo_mask]] - v_data = result.sd[v] + v_inv_endo_inds = inv_endo_inds[@view LI[v_endo_mask, index]] + v_data = result.sd[v] # assign contributions to endogenous points v_data[v_endo_mask, :] = SDMAT[v_inv_endo_inds, :] # contributions of initial conditions already assigned # assign contributions to final conditions if fctype === fcgiven || fctype === fclevel - v_data[(begin-1).+term, :term] .= delta[v.name][term] + v_data.term[(begin-1).+term] = view(delta[v.name], term) elseif fctype === fcrate for tt in term v_data[begin-1+tt, :] = v_data[begin-2+tt, :] @@ -240,8 +237,8 @@ function shockdecomp(m::Model, p::Plan, exog_data::SimData; if deviation logvars = islog.(m.varshks) .| isneglog.(m.varshks) - result.s[:, logvars] ./= result.c[:, logvars] - result.s[:, .!logvars] .-= result.c[:, .!logvars] + @views result.s[:, logvars] ./= result.c[:, logvars] + @views result.s[:, .!logvars] .-= result.c[:, .!logvars] end if _debug diff --git a/src/stackedtime/simulate.jl b/src/stackedtime/simulate.jl index 1e1dc7c..6dc3fa9 100644 --- a/src/stackedtime/simulate.jl +++ b/src/stackedtime/simulate.jl @@ -10,7 +10,7 @@ end """ - sim_nr!(x, sd, maxiter, tol, verbose [, sparse_solver [, linesearch]]) + sim_nr!(x, sd, maxiter, tol, verbose [, linesearch]) Solve the simulation problem. * `x` - the array of data for the simulation. All initial, final and exogenous @@ -20,18 +20,15 @@ Solve the simulation problem. * `maxiter` - maximum number of iterations. * `tol` - desired accuracy. * `verbose` - whether or not to print progress information. - * `sparse_solver` (optional) - a function called to solve the linear system A - x = b for x. Defaults to A\\b * `linesearch::Bool` - When `true` the Newton-Raphson is modified to include a search along the descent direction for a sufficient decrease in f. It will do this at each iteration. Default is `false`. """ -function sim_nr!(x::AbstractArray{Float64}, sd::StackedTimeSolverData, - maxiter::Int64, tol::Float64, verbose::Bool, - sparse_solver::Function=(A, b) -> A \ b, linesearch::Bool=false) +@timeit_debug timer function sim_nr!(x::AbstractArray{Float64}, sd::StackedTimeSolverData, + maxiter::Int64, tol::Float64, verbose::Bool, linesearch::Bool=false) for it = 1:maxiter - Fx, Jx = stackedtime_RJ(x, x, sd) + Fx = stackedtime_R!(Vector{Float64}(undef, size(sd.J, 1)), x, x, sd) nFx = norm(Fx, Inf) if nFx < tol if verbose @@ -39,7 +36,9 @@ function sim_nr!(x::AbstractArray{Float64}, sd::StackedTimeSolverData, end return true end - Δx = sparse_solver(Jx, Fx) + Δx, Jx = stackedtime_RJ(x, x, sd) + Δx = sf_solve!(Jx, Δx) + λ = 1.0 if linesearch nf = norm(Fx) @@ -106,7 +105,6 @@ function simulate(m::Model, fctype=getoption(m, :fctype, fcgiven), expectation_horizon::Union{Nothing,Int64}=nothing, #= Newton-Raphson options =# - sparse_solver::Function=(A, b) -> A \ b, linesearch::Bool=getoption(m, :linesearch, false), warn_maxiter::Bool=getoption(getoption(m, :warn, Options()), :maxiter, false) ) @@ -127,7 +125,7 @@ function simulate(m::Model, NT = length(p_ant.range) nauxs = length(m.auxvars) nvarshks = length(m.varshks) - logvars = [islog(var) | isneglog(var) for var in m.varshks] + logvars = islog.(m.varshks) .| isneglog.(m.varshks) if size(exog_ant) != (NT, nvarshks) error("Incorrect dimensions of exog_data. Expected $((NT, nvarshks)), got $(size(exog_ant)).") @@ -144,8 +142,8 @@ function simulate(m::Model, if size(baseline) != (NT, nvarshks) error("Incorrect dimensions of baseline. Expected $((NT, nvarshks)), got $(size(baseline)).") end - exog_ant[:, logvars] .*= baseline[:, logvars] - exog_ant[:, .!logvars] .+= baseline[:, .!logvars] + @views exog_ant[:, logvars] .*= baseline[:, logvars] + @views exog_ant[:, .!logvars] .+= baseline[:, .!logvars] end exog_ant = ModelBaseEcon.update_auxvars(transform(exog_ant, m), m) @@ -161,7 +159,7 @@ function simulate(m::Model, if verbose @info "Simulating $(p_ant.range[1 + m.maxlag:NT - m.maxlead])" # anticipate gdata.FC end - converged = sim_nr!(x, gdata, maxiter, tol, verbose, sparse_solver, linesearch) + converged = sim_nr!(x, gdata, maxiter, tol, verbose, linesearch) if warn_maxiter && !converged @warn("Newton-Raphson reached maximum number of iterations (`maxiter`).") end @@ -194,14 +192,14 @@ function simulate(m::Model, if p_unant.range != p_ant.range error("Anticipated and unanticipated ranges don't match.") end - if size(exog_unant) != size(exog_ant) - error("Anticipated and unanticipated data don't match.") - end if deviation_unant - exog_unant[:, logvars] .*= baseline[:, logvars] - exog_unant[:, .!logvars] .+= baseline[:, .!logvars] + @views exog_unant[:, logvars] .*= baseline[:, logvars] + @views exog_unant[:, .!logvars] .+= baseline[:, .!logvars] end exog_unant = ModelBaseEcon.update_auxvars(transform(exog_unant, m), m) + if size(exog_unant) != size(exog_ant) + error("Anticipated and unanticipated data don't match.") + end else #=== prepare unanticipated data and plan (backward compatibility) ===# p_unant = p_ant @@ -211,7 +209,7 @@ function simulate(m::Model, x[sim, shkinds] .= 0 end - x[init, allvarinds] .= exog_ant[init, allvarinds] + x[init, allvarinds] = exog_ant[init, allvarinds] t0 = first(sim) T = last(sim) if expectation_horizon === nothing @@ -221,14 +219,8 @@ function simulate(m::Model, exog_inds = p_unant[t, Val(:inds)] psim = Plan(m, t:T) psim.exogenous .= p_ant.exogenous[begin+Int(t - t0):end, :] - if t == t0 - imaxiter = maxiter - itol = tol - elseif (maximum(abs, x[t, exog_inds] .- exog_unant[t, exog_inds]) < tol) #= && (psim[t0, Val(:inds)] == exog_inds) =# + if t !== t0 && (maximum(abs, x[t, exog_inds] - exog_unant[t, exog_inds]) < tol) #= && (psim[t0, Val(:inds)] == exog_inds) =# continue - else - imaxiter = 5 - itol = sqrt(tol) end setexog!(psim, t0, exog_inds) gdata = StackedTimeSolverData(m, psim, fctype, variant) @@ -238,11 +230,11 @@ function simulate(m::Model, xx = view(x, sim_range, :) assign_final_condition!(xx, exog_unant[sim_range, :], gdata) if verbose - @info "Simulating $(p_ant.range[t:T]) with $((itol, imaxiter))" # anticipate expectation_horizon gdata.FC + @info "Simulating $(p_ant.range[t:T]) with $((tol, maxiter))" # anticipate expectation_horizon gdata.FC end - converged = sim_nr!(xx, gdata, imaxiter, itol, verbose, sparse_solver, linesearch) + converged = sim_nr!(xx, gdata, maxiter, tol, verbose, linesearch) if warn_maxiter && !converged - @warn("Newton-Raphson reached maximum number of iterations (`imaxiter`).") + @warn("Newton-Raphson reached maximum number of iterations (`maxiter`).") end last_run = Workspace(; t, xx, gdata) end @@ -253,7 +245,7 @@ function simulate(m::Model, if verbose @info "Simulating $(p_ant.range[t:T]) with $((tol, maxiter))" # anticipate expectation_horizon gdata.FC end - converged = sim_nr!(xx, gdata, maxiter, tol, verbose, sparse_solver, linesearch) + converged = sim_nr!(xx, gdata, maxiter, tol, verbose, linesearch) if warn_maxiter && !converged @warn("Newton-Raphson reached maximum number of iterations (`maxiter`).") end @@ -287,7 +279,7 @@ function simulate(m::Model, if verbose @info "Simulating $(p_ant.range[t:T])" # anticipate expectation_horizon sdata.FC end - converged = sim_nr!(xx, sdata, maxiter, tol, verbose, sparse_solver, linesearch) + converged = sim_nr!(xx, sdata, maxiter, tol, verbose, linesearch) if warn_maxiter && !converged @warn("Newton-Raphson reached maximum number of iterations (`maxiter`).") end @@ -302,14 +294,14 @@ function simulate(m::Model, # these intermediate simulations are always with fcnatural, # have length equal to expectation_horizon and # only the first period is imposed - if (maximum(abs, x[t, exog_inds] .- exog_unant[t, exog_inds]) < tol) #= && (exog_inds == shkinds) =# + if (maximum(abs, x[t, exog_inds] - exog_unant[t, exog_inds]) < tol) #= && (exog_inds == shkinds) =# continue end psim1 = copy(psim) # the range of psim1 might extend beyond the range of p_ant. # we copy from p_ant as far as we have and copy the last line beyond that tmp_rng = t:min(t + expectation_horizon - 1, T) - psim1.exogenous[t0.+(0:length(tmp_rng)-1), :] .= p_ant.exogenous[tmp_rng, :] + psim1.exogenous[t0.+(0:length(tmp_rng)-1), :] = p_ant.exogenous[tmp_rng, :] # ===> must leave the psim1 plan empty beyond the end of p_ant # becasue we don't have data in exog_and for any exogenized @@ -335,7 +327,7 @@ function simulate(m::Model, if verbose @info("Simulating $(p_ant.range[t] .+ (0:expectation_horizon - 1))") # anticipate expectation_horizon sdata.FC end - converged = sim_nr!(xx, sdata, 5, sqrt(tol), verbose, sparse_solver, linesearch) + converged = sim_nr!(xx, sdata, maxiter, tol, verbose, linesearch) if warn_maxiter && !converged @warn("Newton-Raphson reached maximum number of iterations (`maxiter`).") end @@ -359,7 +351,7 @@ function simulate(m::Model, if verbose @info "Simulating $(p_ant.range[last_t + 1:T])" # anticipate expectation_horizon sdata.FC end - converged = sim_nr!(xx, sdata, maxiter, tol, verbose, sparse_solver, linesearch) + converged = sim_nr!(xx, sdata, maxiter, tol, verbose, linesearch) if warn_maxiter && !converged @warn("Newton-Raphson reached maximum number of iterations (`maxiter`).") end @@ -372,8 +364,8 @@ function simulate(m::Model, x = x[axes(exog_ant)...] x .= inverse_transform(x, m) if deviation - x[:, logvars] ./= baseline[:, logvars] - x[:, .!logvars] .-= baseline[:, .!logvars] + @views x[:, logvars] ./= baseline[:, logvars] + @views x[:, .!logvars] .-= baseline[:, .!logvars] end return x diff --git a/src/stackedtime/solverdata.jl b/src/stackedtime/solverdata.jl index af3a4b4..9e94267 100644 --- a/src/stackedtime/solverdata.jl +++ b/src/stackedtime/solverdata.jl @@ -68,7 +68,7 @@ function var_CiSc end """ assign_fc!(x::Vector, exog::Vector, vind::Int, sd::StackedTimeSolverData, fc::FinalCondition) - + Applying the final condition `fc` for variable with index `vind`. Exogenous data is provided in `exog` and stacked time solver data in `sd`. This function updates the solution vector `x` in place and returns `x`. @@ -84,7 +84,7 @@ function assign_fc! end ####### @inline function var_CiSc(::StackedTimeSolverData, ::ModelVariable, ::FCNone) - # No Jacobian correction + # No Jacobian correction return Dict{Int,Vector{Float64}}() end @@ -96,7 +96,7 @@ end ####### @inline function var_CiSc(::StackedTimeSolverData, ::ModelVariable, ::FCGiven) - # No Jacobian correction + # No Jacobian correction return Dict{Int,Vector{Float64}}() end @@ -111,7 +111,7 @@ end ####### @inline function var_CiSc(::StackedTimeSolverData, ::ModelVariable, ::FCMatchSSLevel) - # No Jacobian correction + # No Jacobian correction return Dict{Int,Vector{Float64}}() end @@ -319,7 +319,7 @@ function make_BI(JMAT::SparseMatrixCSC{Float64,Int}, II::AbstractVector{<:Abstra end StackedTimeSolverData(model::Model, plan::Plan, fctype::FinalCondition, variant::Symbol=model.options.variant) = StackedTimeSolverData(model, plan, setfc(model, fctype), variant) -function StackedTimeSolverData(model::Model, plan::Plan, fctype::AbstractVector{FinalCondition}, variant::Symbol=model.options.variant) +@timeit_debug timer function StackedTimeSolverData(model::Model, plan::Plan, fctype::AbstractVector{FinalCondition}, variant::Symbol=model.options.variant) evaldata = getevaldata(model, variant) var_to_idx = ModelBaseEcon.get_var_to_idx(model) @@ -368,8 +368,9 @@ function StackedTimeSolverData(model::Model, plan::Plan, fctype::AbstractVector{ # Prep the Jacobian matrix neq = 0 # running counter of equations added to matrix # Model equations are the same for each sim period, just shifted according to t - Jblock = [ti + NT * (var_to_idx[var] - 1) for eqn in equations for (var, ti) in keys(eqn.tsrefs)] - Iblock = [i for (i, eqn) in enumerate(equations) for _ in eqn.tsrefs] + Jblock = [ti + NT * (var_to_idx[var] - 1) for (_, eqn) in equations for (var, ti) in keys(eqn.tsrefs)] + Iblock = [i for (i, (_, eqn)) in enumerate(equations) for _ in eqn.tsrefs] + Tblock = -model.maxlag:model.maxlead for t in sim push!(TT, t .+ Tblock) @@ -394,7 +395,7 @@ function StackedTimeSolverData(model::Model, plan::Plan, fctype::AbstractVector{ # We also need the times of the final conditions push!(TT, term) - # + # exog_mask = falses(nunknowns * NT) # Initial conditions are set as exogenous values exog_mask[vec(LI[init, :])] .= true @@ -412,7 +413,7 @@ function StackedTimeSolverData(model::Model, plan::Plan, fctype::AbstractVector{ need_SS ? transform(steadystatearray(model, plan), model) : zeros(0, 0), islog.(model.allvars) .| isneglog.(model.allvars), islin.(model.allvars), evaldata, exog_mask, fc_mask, solve_mask, - Ref{Any}(nothing), getoption(model, :factorization, :lu)) + Ref{Any}(nothing), getoption(model, :factorization, :default)) return update_plan!(sd, model, plan; changed=true) end @@ -420,7 +421,7 @@ end """ assign_exog_data!(x::Matrix, exog::Matrix, sd::StackedTimeSolverData) -Assign the exogenous points into `x` according to the plan with which `sd` was created using +Assign the exogenous points into `x` according to the plan with which `sd` was created using exogenous data from `exog`. Also call [`assign_final_condition!`](@ref). !!! warning @@ -436,7 +437,7 @@ end """ assign_final_condition!(x::Matrix, exog::Matrix, sd::StackedTimeSolver) -Assign the final conditions into `x`. The final condition types for the different variables of the model +Assign the final conditions into `x`. The final condition types for the different variables of the model are stored in the the solver data `sd`. `exog` is used for [`fcgiven`](@ref). !!! warning @@ -455,7 +456,7 @@ end Compute the residual of the stacked time system at the given `point`. R is updated in place and returned. """ -function stackedtime_R!(res::AbstractArray{Float64,1}, point::AbstractArray{Float64}, exog_data::AbstractArray{Float64}, sd::StackedTimeSolverData) +@timeit_debug timer function stackedtime_R!(res::AbstractArray{Float64,1}, point::AbstractArray{Float64}, exog_data::AbstractArray{Float64}, sd::StackedTimeSolverData) @assert size(point) == size(exog_data) == (sd.NT, sd.NU) # point = reshape(point, sd.NT, sd.NU) # exog_data = reshape(exog_data, sd.NT, sd.NU) @@ -466,7 +467,7 @@ function stackedtime_R!(res::AbstractArray{Float64,1}, point::AbstractArray{Floa return res end -#= disable +#= disable # this is not necessary with log-transformed variables # we keep it because it might be necessary for other transformations in the future. @@ -573,7 +574,7 @@ function update_CiSc!(x, sd, ::Val{fcnatural}) end end return nothing -end +end =# @@ -583,7 +584,7 @@ end Compute the residual and Jacobian of the stacked time system at the given `point`. """ -function stackedtime_RJ(point::AbstractArray{Float64}, exog_data::AbstractArray{Float64}, sd::StackedTimeSolverData; +@timeit_debug timer function stackedtime_RJ(point::AbstractArray{Float64}, exog_data::AbstractArray{Float64}, sd::StackedTimeSolverData; debugging=false) nunknowns = sd.NU @assert size(point) == size(exog_data) == (sd.NT, nunknowns) @@ -615,20 +616,87 @@ function stackedtime_RJ(point::AbstractArray{Float64}, exog_data::AbstractArray{ else JJ = JAC[:, sd.solve_mask] end - try - # compute lu decomposition of the active part of J and cache it. - sd.J_factorized[] = sd.factorization == :qr ? qr(JJ) : lu(JJ) - catch e - if e isa SingularException - @error("The system is underdetermined with the given set of equations and final conditions.") - end - debugging && return RES, deepcopy(JJ) - rethrow() - end + # compute factorization of the active part of J and cache it. + sf_factorize!(Val(sd.factorization), sd.J_factorized, JJ) end return RES, sd.J_factorized[] end +function sf_factorize!(v::Val, Rf::Ref{Any}, A::SparseMatrixCSC) + if isnothing(Rf[]) + Rf[] = sf_prepare(v, A) + else + Rf[] = sf_factor!(Rf[], A) + end +end + + +#= KristofferC' implemetation (delete eventually, but keep it for now, just in case) + +using Pardiso + +mutable struct PardisoFactorization + ps::MKLPardisoSolver + J::SparseMatrixCSC + PardisoFactorization(ps::MKLPardisoSolver) = new(ps) +end + +@timeit_debug timer function pardiso_init() + ps = MKLPardisoSolver() + set_matrixtype!(ps, Pardiso.REAL_NONSYM) + pardisoinit(ps) + fix_iparm!(ps, :N) + # See https://www.intel.com/content/www/us/en/develop/documentation/onemkl-developer-reference-c/top/sparse-solver-routines/onemkl-pardiso-parallel-direct-sparse-solver-iface/pardiso-iparm-parameter.html + set_iparm!(ps, 2, 2) # The parallel (OpenMP) version of the nested dissection algorithm. + psf = PardisoFactorization(ps) + finalizer(psf) do x + set_phase!(x.ps, Pardiso.RELEASE_ALL) + pardiso(x.ps) + end + return psf +end + + +# See https://github.com/JuliaSparse/Pardiso.jl/blob/master/examples/exampleunsym.jl +@timeit_debug timer function pardiso_factorize(JJ; psf::Union{Nothing,PardisoFactorization}) + reuse_ps = psf !== nothing + if psf === nothing + psf = pardiso_init() + end + + ps = psf.ps + psf.J = get_matrix(ps, JJ, :N) + + # No need to run the analysis phase if the sparsity pattern is unchanged + # and we reuse the same solver object + if !reuse_ps + set_phase!(ps, Pardiso.ANALYSIS) + pardiso(ps, psf.J, Float64[]) + end + set_phase!(ps, Pardiso.NUM_FACT) + pardiso(ps, psf.J, Float64[]) + return psf +end + +@timeit_debug timer function pardiso_solve!(ps::PardisoFactorization, x::AbstractArray) + ps, J = ps.ps, ps.J + set_phase!(ps, Pardiso.SOLVE_ITERATIVE_REFINE) + X = similar(x) # Solution is stored in X + pardiso(ps, X, J, x) + copy!(x, X) +end + + +@timeit_debug timer function _factorize(JJ; psf) + if USE_PARDISO_FOR_LU + return pardiso_factorize(JJ; psf) + else + return lu(JJ) + end +end + +=# + """ assign_update_step!(x::Array, lambda, dx, sd::StackedTimeSolverData) diff --git a/src/stackedtime/sparse.jl b/src/stackedtime/sparse.jl new file mode 100644 index 0000000..6cbbbde --- /dev/null +++ b/src/stackedtime/sparse.jl @@ -0,0 +1,195 @@ +################################################################################## +# This file is part of StateSpaceEcon.jl +# BSD 3-Clause License +# Copyright (c) 2020-2023, Bank of Canada +# All rights reserved. +################################################################################## + + +# selected sparse linear algebra library is a Symbol +const sf_libs = ( + :default, # + :umfpack, # use Julia's standard library (UMFPACK) + :pardiso, # use Pardiso - the one included with MKL +) + +global sf_default = :umfpack + +""" + use_umfpack() + +Set the default sparse factorization library to UMFPACK (the one used in Julia's +standard library). See also [`use_pardiso`](@ref). + +""" +@inline use_umfpack() = (global sf_default = :umfpack; nothing) + +""" + use_umfpack!(model) + +Instruct the stacked-time solver to use Pardiso with this model. See also +[`use_pardiso!`](@ref). +""" +@inline use_umfpack!(m::Model) = (m.options.factorization = :umfpack; m.options) +export use_umfpack, use_umfpack! + +""" + use_pardiso() + +Set the default sparse factorization library to Pardiso. See also +[`use_umfpack`](@ref). +""" +@inline use_pardiso() = (global sf_default = :pardiso; nothing) + +""" + use_pardiso!(model) + +Instruct the stacked-time solver to use Pardiso with this model. +""" +@inline use_pardiso!(m::Model) = (m.options.factorization = :pardiso; m.options) +export use_pardiso, use_pardiso! + +### API + +# a function to initialize a Factorization instance +# this is also a good place to do the symbolic analysis +# sf_prepare(A::SparseMatrixCSC, sparse_lib::Symbol=:default) = sf_prepare(Val(sparse_lib), A) +# sf_prepare(::Val{S}, args...) where {S} = throw(ArgumentError("Unknown sparse library $S. Try one of $(sf_libs).")) + +# a function to calculate the numerical factors +# sf_factor!(f::Factorization, A::SparseMatrixCSC) = throw(ArgumentError("Unknown factorization type $(typeof(f)).")) + +# a function to solve the linear system +# sf_solve!(f::Factorization, x::AbstractArray) = throw(ArgumentError("Unknown factorization type $(typeof(f)).")) + + +########################################################################### +### Default (UMFPACK) +sf_prepare(::Val{:default}, A::SparseMatrixCSC) = sf_prepare(Val(sf_default), A) + +function _sf_same_sparse_pattern(A::SparseMatrixCSC, B::SparseMatrixCSC) + return (A.m == B.m) && (A.n == B.n) && (A.colptr == B.colptr) && (A.rowval == B.rowval) +end + +macro _sf_check_factorize(exception, expression) + error = gensym("error") + return esc(quote + try + $expression + catch $error + if $error isa $exception + @error("The system is underdetermined with the given set of equations and final conditions.") + end + rethrow() + end + end) +end + +mutable struct LUFactorization{Tv<:Real} <: Factorization{Tv} + F::SuiteSparse.UMFPACK.UmfpackLU{Tv,Int} + A::SparseMatrixCSC{Tv,Int} +end + +@timeit_debug timer "sf_prepare_lu" function sf_prepare(::Val{:umfpack}, A::SparseMatrixCSC) + Tv = eltype(A) + F = @_sf_check_factorize(SingularException, @timeit_debug timer "_lu_full" lu(A)) + return LUFactorization{Tv}(F, A) +end + +@timeit_debug timer "sf_factor!_lu" function sf_factor!(f::LUFactorization, A::SparseMatrixCSC) + _A = f.A + if _sf_same_sparse_pattern(A, _A) + if A.nzval ≈ _A.nzval + # matrix hasn't changed significantly + nothing + else + # sparse pattern is the same, different numbers + f.A = A + @_sf_check_factorize(SingularException, @timeit_debug timer "_lu_num" lu!(f.F, A)) + end + else + # totally new matrix, start over + f.A = A + f.F = @_sf_check_factorize(SingularException, @timeit_debug timer "_lu_full" lu(A)) + end + return f +end + +@timeit_debug timer "sf_solve!_lu" sf_solve!(f::LUFactorization, x::AbstractArray) = (ldiv!(f.F, x); x) + +########################################################################### +### Pardiso (thanks to @KristofferC) + +# See https://github.com/JuliaSparse/Pardiso.jl/blob/master/examples/exampleunsym.jl +# See https://www.intel.com/content/www/us/en/develop/documentation/onemkl-developer-reference-c/top/sparse-solver-routines/onemkl-pardiso-parallel-direct-sparse-solver-iface/pardiso-iparm-parameter.html + +mutable struct PardisoFactorization{Tv<:Real} <: Factorization{Tv} + ps::MKLPardisoSolver + A::SparseMatrixCSC{Tv,Int} +end + +@timeit_debug timer "sf_prepare_par" function sf_prepare(::Val{:pardiso}, A::SparseMatrixCSC) + Tv = eltype(A) + ps = MKLPardisoSolver() + set_matrixtype!(ps, Pardiso.REAL_NONSYM) + pardisoinit(ps) + fix_iparm!(ps, :N) + set_iparm!(ps, 2, 3) # Select algorithm + pf = PardisoFactorization{Tv}(ps, get_matrix(ps, A, :N)) + finalizer(pf) do x + set_phase!(x.ps, Pardiso.RELEASE_ALL) + pardiso(x.ps) + end + _pardiso_full!(pf) + return pf +end + +@timeit_debug timer "_pardso_full" function _pardiso_full!(pf::PardisoFactorization) + # run the analysis phase + ps = pf.ps + @_sf_check_factorize Union{Pardiso.PardisoException,Pardiso.PardisoPosDefException} begin + set_phase!(ps, Pardiso.ANALYSIS_NUM_FACT) + pardiso(ps, pf.A, Float64[]) + end + return pf +end + +@timeit_debug timer "_pardso_num" function _pardiso_numeric!(pf::PardisoFactorization) + # run the analysis phase + ps = pf.ps + @_sf_check_factorize Union{Pardiso.PardisoException,Pardiso.PardisoPosDefException} begin + set_phase!(ps, Pardiso.NUM_FACT) + pardiso(ps, pf.A, Float64[]) + end + return pf +end + +@timeit_debug timer "sf_factor!_par" function sf_factor!(pf::PardisoFactorization, A::SparseMatrixCSC) + A = get_matrix(pf.ps, A, :N)::typeof(A) + _A = pf.A + if _sf_same_sparse_pattern(A, _A) + if A.nzval ≈ _A.nzval + # same matrix, factorization hasn't changed + nothing + else + # same sparsity pattern, but different numbers + pf.A = A + _pardiso_numeric!(pf) + end + else + # totally new matrix, start over + pf.A = A + _pardiso_full!(pf) + end + return pf +end + +@timeit_debug timer "sf_solve!_par" function sf_solve!(pf::PardisoFactorization, x::AbstractArray) + ps = pf.ps + @_sf_check_factorize Union{Pardiso.PardisoException,Pardiso.PardisoPosDefException} begin + set_phase!(ps, Pardiso.SOLVE_ITERATIVE_REFINE) + pardiso(ps, x, pf.A, copy(x)) + end + return x +end + diff --git a/src/stackedtime/stoch_simulate.jl b/src/stackedtime/stoch_simulate.jl index bea08b8..6da7a1e 100644 --- a/src/stackedtime/stoch_simulate.jl +++ b/src/stackedtime/stoch_simulate.jl @@ -8,7 +8,8 @@ # version of simulate for stochastic simulations # NOTE: This requires TimeSeriesEcon v0.5.1 where map(func, ::Workspace) returns a Workspace. -_make_result_container(shocks, basedata) = map(_ -> copy(basedata), shocks) +_make_result_container(shocks::Workspace, basedata)::Workspace = map(_ -> copy(basedata), shocks) +_make_result_container(shocks::Vector, basedata)::Vector{MaybeSimData} = convert(Vector{MaybeSimData}, map(_ -> copy(basedata), shocks)) function stoch_simulate(m::Model, p::Plan, baseline::SimData, shocks; check::Bool=false, @@ -19,11 +20,14 @@ function stoch_simulate(m::Model, p::Plan, baseline::SimData, shocks; maxiter::Int=m.options.maxiter, fctype=getoption(m, :fctype, fcgiven), #= Newton-Raphson options =# - sparse_solver::Function=\, linesearch::Bool=getoption(m, :linesearch, false), warn_maxiter::Bool=getoption(getoption(m, :warn, Options()), :maxiter, false) ) + if isempty(shocks) + return _make_result_container(shocks, baseline); + end + # get the range of all shocks realizations (this will be the full simulation range) shkrng = mapreduce(rangeof, union, values(shocks)) @@ -47,7 +51,7 @@ function stoch_simulate(m::Model, p::Plan, baseline::SimData, shocks; tinds = Plans._offset(p, rangeof(val)) vind = p.varshks[shk] if !all(p.exogenous[tinds, vind]) - throw(ArgumentError("$shk in shocks realization $key is endogenous in the given plan.")) + @warn "$shk in shocks[$key] is endogenous in the given plan." end end end @@ -122,7 +126,7 @@ function stoch_simulate(m::Model, p::Plan, baseline::SimData, shocks; # solve try - converged = sim_nr!(eₜ, dₜ, maxiter, tol, verbose, sparse_solver, linesearch) + converged = sim_nr!(eₜ, dₜ, maxiter, tol, verbose, linesearch) if warn_maxiter && !converged @warn("Newton-Raphson reached maximum number of iterations for $skey at $(t:sim_end)") end @@ -137,9 +141,10 @@ function stoch_simulate(m::Model, p::Plan, baseline::SimData, shocks; end # time loop # strip auxvar columns and inverse transform + have_auxs = (m.nauxs > 0) for (key, result) in pairs(results) isfailed(result) && continue - if m.nauxs > 0 + if have_auxs result = result[:, m.varshks] end results[key] = inverse_transform(result, m) diff --git a/src/steadystate/diagnose.jl b/src/steadystate/diagnose.jl index ac1d07b..ba96b5f 100644 --- a/src/steadystate/diagnose.jl +++ b/src/steadystate/diagnose.jl @@ -27,7 +27,7 @@ Standard options (default values from `model.options`) """ function check_sstate(model::Model; verbose::Bool = model.options.verbose, tol::Float64 = model.options.tol) # if parameters changed, see that constraints use the latest - for eqn in model.sstate.constraints + for eqn in values(model.sstate.constraints) ModelBaseEcon._update_eqn_params!(eqn.eval_resid, model.parameters) end R, J = global_SS_RJ(model.sstate.values, model) diff --git a/src/steadystate/global.jl b/src/steadystate/global.jl index 2883ba7..33e8923 100644 --- a/src/steadystate/global.jl +++ b/src/steadystate/global.jl @@ -14,11 +14,11 @@ function inadmissible_error(eqind, eqn, point, val) varsargs = (; (v => a for (v, a) in zip(vars, args))...) params = (; _getparams(eqn)...) if typeof(val) <: AbstractArray - @error "Singular gradient in steady state equation $eqind: $eqn" params varsargs val - error("Singular gradient in steady state equation $eqind: $eqn") + @error "Singular gradient in steady state equation $(eqn.name) => $eqn" params varsargs val + error("Singular gradient in steady state equation $(eqn.name) => $eqn") else - @error "Inadmissible point in steady state equation $eqind: $eqn" params varsargs val - error("Inadmissible point in steady state equation $eqind: $eqn") + @error "Inadmissible point in steady state equation $(eqn.name)) => $eqn" params varsargs val + error("Inadmissible point in steady state equation $(eqn.name) => $eqn") end end @@ -59,7 +59,7 @@ end When a model is given, we compute the residual of the entire steady state system. """ -global_SS_R!(resid::AbstractVector{Float64}, point::AbstractVector{Float64}, model::Model) = global_SS_R!(resid, point, ModelBaseEcon.alleqns(model.sstate)) +global_SS_R!(resid::AbstractVector{Float64}, point::AbstractVector{Float64}, model::Model) = global_SS_R!(resid, point, values(ModelBaseEcon.alleqns(model.sstate))) @assert precompile(global_SS_R!, (Vector{Float64}, Vector{Float64}, Model)) """ @@ -110,5 +110,5 @@ end When a model is given, we compute the residual of the entire steady state system. """ -global_SS_RJ(point::Vector{Float64}, model::Model) = global_SS_RJ(point, ModelBaseEcon.alleqns(model.sstate)) +global_SS_RJ(point::Vector{Float64}, model::Model) = global_SS_RJ(point, values(ModelBaseEcon.alleqns(model.sstate))) @assert precompile(global_SS_RJ, (Vector{Float64}, Model)) diff --git a/src/steadystate/initial.jl b/src/steadystate/initial.jl index f38bea0..4dc104b 100644 --- a/src/steadystate/initial.jl +++ b/src/steadystate/initial.jl @@ -75,7 +75,7 @@ function _do_update_auxvars_presolve!(model::Model, verbose::Bool, method::Symbo end # presolve only the steady state constraints (to apply the values of exog variables) if !isempty(ss.constraints) - for eqn in ss.constraints + for eqn in values(ss.constraints) ModelBaseEcon._update_eqn_params!(eqn.eval_resid, model.parameters) end presolve_sstate!(ss.constraints, ss.mask, ss.values; model.tol, verbose, method) diff --git a/src/steadystate/presolve.jl b/src/steadystate/presolve.jl index 427575c..7881864 100644 --- a/src/steadystate/presolve.jl +++ b/src/steadystate/presolve.jl @@ -1,10 +1,12 @@ ################################################################################## # This file is part of StateSpaceEcon.jl # BSD 3-Clause License -# Copyright (c) 2020-2022, Bank of Canada +# Copyright (c) 2020-2023, Bank of Canada # All rights reserved. ################################################################################## +using ModelBaseEcon.OrderedCollections + """ presolve_sstate!(model; ) presolve_sstate!(model, mask, values; ) @@ -24,7 +26,7 @@ loop. solved for. `mask` and `values` are both input and output data to the algorithm. Any values -that are successfully presolved are updated in place, and their `mask` enties +that are successfully pre-solved are updated in place, and their `mask` entries are set to `true`. ### Options @@ -51,13 +53,13 @@ function solve1d(sseqn, vals, ind, ::Val{:newton}, tol = 1e-12, maxiter = 5) end function _presolve_equations!(eqns, mask, values, method, verbose, tol) - eqns_solved = falses(size(eqns)) - eqns_resid = zeros(size(eqns)) + eqns_solved = falses(length(eqns)) + eqns_resid = zeros(length(eqns)) retval = false # return value: true if any mask changed, false otherwise updated = true # keep track if any mask changed within the loop while updated && !all(eqns_solved) updated = false # keep track if anything changes this outer iteration - for (eqn_idx, sseqn) in enumerate(eqns) + for (eqn_idx, (eqn_key, sseqn)) in enumerate(eqns) eqns_solved[eqn_idx] && continue # mask is true for variables that are already solved unsolved = .!mask[sseqn.vinds] @@ -69,7 +71,7 @@ function _presolve_equations!(eqns, mask, values, method, verbose, tol) # mark it solved either way, but issue a warning if residual is not zero eqns_solved[eqn_idx] = true if verbose && abs(R) > 100tol - @warn "Equation $eqn_idx has residual $R:\n $sseqn" + @warn "Equation $eqn_key has residual $R:\n $sseqn" end elseif nunsolved == 1 # only one variable left unsolved. call the 1d solver @@ -83,11 +85,11 @@ function _presolve_equations!(eqns, mask, values, method, verbose, tol) values[sseqn.vinds[ind]] = vals[ind] retval = updated = mask[sseqn.vinds[ind]] = true if verbose - @info "Presolved equation $eqn_idx for $(sseqn.vsyms[ind]) = $(vals[ind])\n $sseqn" + @info "Presolved equation $eqn_key for $(sseqn.vsyms[ind]) = $(vals[ind])\n $sseqn" end else if verbose - @info "Failed to presolve $eqn_idx for $(sseqn.vsyms[ind])\n $sseqn" + @info "Failed to presolve $eqn_key for $(sseqn.vsyms[ind])\n $sseqn" end end else @@ -106,10 +108,10 @@ presolve_sstate!(model::Model; kwargs...) = presolve_sstate!(model::Model, mask::AbstractVector{Bool}, values::AbstractVector{Float64}; verbose = model.verbose, tol = model.tol, _1dsolver = :bisect, method = _1dsolver) = _presolve_equations!(ModelBaseEcon.alleqns(model.sstate), mask, values, method, verbose, tol) -presolve_sstate!(eqns::Vector{SteadyStateEquation}, mask::AbstractVector{Bool}, values::AbstractVector{Float64}; +presolve_sstate!(eqns::OrderedDict{Symbol,SteadyStateEquation}, mask::AbstractVector{Bool}, values::AbstractVector{Float64}; verbose = false, tol = 1e-12, _1dsolver = :bisect, method = _1dsolver) = _presolve_equations!(eqns, mask, values, method, verbose, tol) @assert precompile(presolve_sstate!, (Model, Vector{Bool}, Vector{Float64})) -@assert precompile(presolve_sstate!, (Vector{SteadyStateEquation}, Vector{Bool}, Vector{Float64})) +@assert precompile(presolve_sstate!, (OrderedDict{Symbol,SteadyStateEquation}, Vector{Bool}, Vector{Float64})) diff --git a/src/steadystate/solverdata.jl b/src/steadystate/solverdata.jl index e716af8..5f8f01d 100644 --- a/src/steadystate/solverdata.jl +++ b/src/steadystate/solverdata.jl @@ -102,7 +102,7 @@ function SolverData(model::Model; presolve::Bool=true, ) # constructor where we're solving all equations for all variables local sstate = model.sstate - local alleqns = ModelBaseEcon.alleqns(sstate) + local alleqns = collect(values(ModelBaseEcon.alleqns(sstate))) local neqns = length(alleqns) local nvars = length(sstate.values) sd = SolverData(copy(model.sstate.values), Vector{Float64}(undef, neqns), # point and resid diff --git a/src/steadystate/sssolve.jl b/src/steadystate/sssolve.jl index 38c25cf..aebae15 100644 --- a/src/steadystate/sssolve.jl +++ b/src/steadystate/sssolve.jl @@ -43,7 +43,7 @@ function sssolve!(model::Model; # !! make sure constraints use the latest parameter values # !! before calling SolverData constructor, because it computes the initial residual - for eqn in ss.constraints + for eqn in values(ss.constraints) ModelBaseEcon._update_eqn_params!(eqn.eval_resid, model.parameters) end diff --git a/test/dynss.jl b/test/dynss.jl index bd9745a..a4cd834 100644 --- a/test/dynss.jl +++ b/test/dynss.jl @@ -1,12 +1,12 @@ ################################################################################## # This file is part of StateSpaceEcon.jl # BSD 3-Clause License -# Copyright (c) 2020-2022, Bank of Canada +# Copyright (c) 2020-2023, Bank of Canada # All rights reserved. ################################################################################## @testset "dynss" begin - m = deepcopy(S1.model) + m = getS1() # reset parameters m.α = 0.5 m.β = 0.6 @@ -48,7 +48,7 @@ end @testset "dynss2" begin - m = deepcopy(S2.model) + m = getS2() m.α = 0.4 @@ -95,7 +95,7 @@ end @testset "dynss+" begin # tests to make sure that changes in parameter values are applied to steady state constraints correctly - local m = PC.model + local m = deepcopy(PC.model) m.sstate.values .= 0.0 @test begin sssolve!(m, presolve=false) diff --git a/test/logsimtests.jl b/test/logsimtests.jl index 06986a0..478ff26 100644 --- a/test/logsimtests.jl +++ b/test/logsimtests.jl @@ -1,7 +1,7 @@ ################################################################################## # This file is part of StateSpaceEcon.jl # BSD 3-Clause License -# Copyright (c) 2020-2022, Bank of Canada +# Copyright (c) 2020-2023, Bank of Canada # All rights reserved. ################################################################################## @@ -12,8 +12,8 @@ @shocks m Ex Ey @parameters m rate = 1.015 @equations m begin - @log X[t] = rate * X[t - 1] + Ex[t] - Y[t] = Y[t - 1] + rate + Ey[t] + @log X[t] = rate * X[t-1] + Ex[t] + Y[t] = Y[t-1] + rate + Ey[t] end @initialize m @@ -22,10 +22,10 @@ clear_sstate!(m) sssolve!(m) - @test m.sstate.X.level ≈ 1.0 + @test m.sstate.X.level ≈ 1.0 @test m.sstate.X.slope ≈ m.rate - @test m.sstate.Y.level + 1 ≈ 0.0 + 1 - @test m.sstate.Y.slope + 1 ≈ m.rate + 1 + @test m.sstate.Y.level + 1 ≈ 0.0 + 1 + @test m.sstate.Y.slope + 1 ≈ m.rate + 1 p = Plan(m, 1U:10U) @@ -39,7 +39,7 @@ @test k[s] == d[s] end @test k.X[1U] ≈ 1.0 - @test k.Y[1U] + 1 ≈ 1.0 + @test k.Y[1U] + 1 ≈ 1.0 @test pct(k.X, -1) ≈ TSeries(p.range, (m.rate - 1) * 100) @test diff(k.Y) ≈ TSeries(p.range, m.rate) @@ -65,8 +65,8 @@ @shocks m Ex Ey @parameters m rate = 1.015 @equations m begin - @log X[t + 1] = rate * X[t] + Ex[t] - Y[t + 1] = Y[t] + rate + Ey[t] + @log X[t+1] = rate * X[t] + Ex[t] + Y[t+1] = Y[t] + rate + Ey[t] end @initialize m @@ -77,8 +77,8 @@ sssolve!(m) @test m.sstate.X.level + 1 ≈ 1.0 + 1 @test m.sstate.X.slope + 1 ≈ m.rate + 1 - @test m.sstate.Y.level + 1 ≈ 0.0 + 1 - @test m.sstate.Y.slope + 1 ≈ m.rate + 1 + @test m.sstate.Y.level + 1 ≈ 0.0 + 1 + @test m.sstate.Y.slope + 1 ≈ m.rate + 1 p = Plan(m, 1U:10U) @@ -119,7 +119,7 @@ # update exogenous data accordingly ed[firstdate(p)] = k[firstdate(p)] - ed[lastdate(p),1:2] .= rand(2) + ed[lastdate(p), 1:2] .= rand(2) @test simulate(m, p, ed, fctype=fcslope) ≈ k @test simulate(m, p, ed, fctype=fcnatural) ≈ k diff --git a/test/misc.jl b/test/misc.jl index 9e4e1fe..6a50575 100644 --- a/test/misc.jl +++ b/test/misc.jl @@ -1,7 +1,7 @@ ################################################################################## # This file is part of StateSpaceEcon.jl # BSD 3-Clause License -# Copyright (c) 2020-2022, Bank of Canada +# Copyright (c) 2020-2023, Bank of Canada # All rights reserved. ################################################################################## @@ -13,8 +13,8 @@ end @testset "fcset" begin - m = deepcopy(E7A.model) - + m = getE7A() + empty!(m.sstate.constraints) @steadystate m lc = 1 @steadystate m linv = 1 diff --git a/test/modelchanges.jl b/test/modelchanges.jl new file mode 100644 index 0000000..e8789ba --- /dev/null +++ b/test/modelchanges.jl @@ -0,0 +1,530 @@ +# This set of tests reruns the existing test, but uses example models which have been constructed +# from the existing sample models. + +function constructE1!(model::Model) + model.flags.linear = true + model.options.substitutions = false + + @parameters model begin + α = 0.5 + β = 0.5 + end + + @variables model y + + @shocks model y_shk + + @autoexogenize model y = y_shk + + @equations model begin + y[t] = α * y[t - 1] + β * y[t + 1] + y_shk[t] + end + + @reinitialize model + + return model +end + +function constructE2!(model::Model) + model.flags.linear = true + model.options.substitutions = false + + # add parameters + @parameters model begin + cp = [0.5, 0.02] + cr = [0.75, 1.5, 0.5] + cy = [0.5, -0.02] + end + + # add variables: a list of symbols + @variables model begin + pinf + rate + ygap + end + + # add shocks: a list of symbols + @shocks model begin + pinf_shk + rate_shk + ygap_shk + end + + # autoexogenize: define a mapping of variables to shocks + @autoexogenize model begin + pinf = pinf_shk + rate = rate_shk + ygap = ygap_shk + end + + # add equations: a sequence of expressions, such that + # use y[t+1] for expectations/leads + # use y[t] for contemporaneous + # use y[t-1] for lags + # each expression must have exactly one "=" + @equations model begin + pinf[t]=cp[1]*pinf[t-1]+(.98-cp[1])*pinf[t+1]+cp[2]*ygap[t]+pinf_shk[t] + rate[t]=cr[1]*rate[t-1]+(1-cr[1])*(cr[2]*pinf[t]+cr[3]*ygap[t])+rate_shk[t] + ygap[t]=cy[1]*ygap[t-1]+(.98-cy[1])*ygap[t+1]+cy[2]*(rate[t]-pinf[t+1])+ygap_shk[t] + end + + # call initialize! to construct internal structures + @reinitialize model + return model +end + +function constructE3!(model::Model) + model.flags.linear = true + model.options.substitutions = false + + # add parameters + @parameters model begin + cp = [0.5, 0.02] + cr = [0.75, 1.5, 0.5] + cy = [0.5, -0.02] + end + + # add variables: a list of symbols + @variables model begin + pinf + rate + ygap + end + + # add shocks: a list of symbols + @shocks model begin + pinf_shk + rate_shk + ygap_shk + end + + # autoexogenize: define a mapping of variables to shocks + @autoexogenize model begin + pinf = pinf_shk + rate = rate_shk + ygap = ygap_shk + end + + # add equations: a sequence of expressions, such that + # use y[t+1] for expectations/leads + # use y[t] for contemporaneous + # use y[t-1] for lags + # each expression must have exactly one "=" + @equations model begin + pinf[t]=cp[1]*pinf[t-1]+0.3*pinf[t+1]+0.05*pinf[t+2]+0.05*pinf[t+3]+cp[2]*ygap[t]+pinf_shk[t] + rate[t]=cr[1]*rate[t-1]+(1-cr[1])*(cr[2]*pinf[t]+cr[3]*ygap[t])+rate_shk[t] + ygap[t]=cy[1]/2*ygap[t-2]+cy[1]/2*ygap[t-1]+(.98-cy[1])*ygap[t+1]+cy[2]*(rate[t]-pinf[t+1])+ygap_shk[t] + end + + # call initialize! to construct internal structures + @reinitialize model + return model +end + +function constructE3nl!(model::Model) + model.flags.linear = false + model.options.maxiter = 200 + model.options.substitutions = false + + @parameters model begin + cp = [0.5, 0.02] + cr = [0.75, 1.5, 0.5] + cy = [0.5, -0.02] + end + + @variables model begin + pinf + rate + ygap + end + + @shocks model begin + pinf_shk + rate_shk + ygap_shk + end + + @autoexogenize model begin + pinf = pinf_shk + rate = rate_shk + ygap = ygap_shk + end + + @equations model begin + exp(pinf[t])=exp(cp[1]*pinf[t-1]+0.3*pinf[t+1]+0.05*pinf[t+2]+0.05*pinf[t+3]+cp[2]*ygap[t]+pinf_shk[t]) + rate[t]=cr[1]*rate[t-1]+(1-cr[1])*(cr[2]*pinf[t]+cr[3]*ygap[t])+rate_shk[t] + ygap[t]=cy[1]/2*ygap[t-2]+cy[1]/2*ygap[t-1]+(.98-cy[1])*ygap[t+1]+cy[2]*(rate[t]-pinf[t+1])+ygap_shk[t] + end + + @reinitialize model + return model +end + +function constructE6!(model::Model) + model.flags.linear = true + model.options.substitutions = false + + @parameters model begin + p_dlp = 0.0050000000000000 + p_dly = 0.0045000000000000 + end + + @variables model begin + dlp; dly; dlyn; lp; ly; lyn + end + + @shocks model begin + dlp_shk; dly_shk + end + + @autoexogenize model begin + ly = dly_shk + lp = dlp_shk + end + + @equations model begin + dly[t]=(1-0.2-0.2)*p_dly+0.2*dly[t-1]+0.2*dly[t+1]+dly_shk[t] + dlp[t]=(1-0.5)*p_dlp+0.1*dlp[t-2]+0.1*dlp[t-1]+0.1*dlp[t+1]+0.1*dlp[t+2]+0.1*dlp[t+3]+dlp_shk[t] + dlyn[t]=dly[t]+dlp[t] + ly[t]=ly[t-1]+dly[t] + lp[t]=lp[t-1]+dlp[t] + lyn[t]=lyn[t-1]+dlyn[t] + end + + @reinitialize model + return model +end + +function constructE7!(model::Model) + model.flags.linear = false + model.options.substitutions = true + + @parameters model begin + delta = 0.1000000000000000 + p_dlc_ss = 0.0040000000000000 + p_dlinv_ss = 0.0040000000000000 + p_growth = 0.0040000000000000 + end + + @variables model begin + dlc; dlinv; dly; lc; linv; + lk; ly; + end + + @shocks model begin + dlc_shk; dlinv_shk; + end + + @autoexogenize model begin + lc = dlc_shk + linv = dlinv_shk + end + + @equations model begin + dlc[t]=(1-0.2-0.2)*p_dlc_ss+0.2*dlc[t-1]+0.2*dlc[t+1]+dlc_shk[t] + dlinv[t]=(1-0.5)*p_dlinv_ss+0.1*dlinv[t-2]+0.1*dlinv[t-1]+0.1*dlinv[t+1]+0.1*dlinv[t+2]+0.1*dlinv[t+3]+dlinv_shk[t] + lc[t]=lc[t-1]+dlc[t] + linv[t]=linv[t-1]+dlinv[t] + ly[t]=log(exp(lc[t])+exp(linv[t])) + dly[t]=ly[t]-ly[t-1] + lk[t]=log((1-delta)*exp(lk[t-1])+exp(linv[t])) + end + + @reinitialize model + return model +end + +function constructE7A!(model::Model) + model.flags.linear = false + model.options.substitutions = false + + @parameters model begin + delta = 0.1000000000000000 + p_dlc_ss = 0.0040000000000000 + p_dlinv_ss = 0.0040000000000000 + p_growth = 0.0040000000000000 + end + + @variables model begin + dlc; dlinv; dly; lc; linv; + lk; ly; + end + + @shocks model begin + dlc_shk; dlinv_shk; + end + + @autoexogenize model begin + lc = dlc_shk + linv = dlinv_shk + end + + @equations model begin + dlc[t] = (1 - 0.2 - 0.2) * p_dlc_ss + 0.2 * dlc[t - 1] + 0.2 * dlc[t + 1] + dlc_shk[t] + dlinv[t] = (1 - 0.5) * p_dlinv_ss + 0.1 * dlinv[t - 2] + 0.1 * dlinv[t - 1] + 0.1 * dlinv[t + 1] + 0.1 * dlinv[t + 2] + 0.1 * dlinv[t + 3] + dlinv_shk[t] + lc[t] = lc[t - 1] + dlc[t] + linv[t] = linv[t - 1] + dlinv[t] + ly[t] = log(exp(lc[t]) + exp(linv[t])) + dly[t] = ly[t] - ly[t - 1] + lk[t] = log((1 - delta) * exp(lk[t - 1]) + exp(linv[t])) + end + + @reinitialize model + + @steadystate model linv = lc - 7; + @steadystate model lc = 14; + + return model +end + +function constructS1!(model::Model) + model.flags.linear = true + model.options.substitutions = false + + @variables model a b c + + @shocks model b_shk c_shk + + @parameters model begin + a_ss = 1.2 + α = 0.5 + β = 0.8 + q = 2 + end + + @equations model begin + a[t] = b[t] + c[t] + b[t] = @sstate(b) * (1 - α) + α * b[t-1] + b_shk[t] + c[t] = q * @sstate(b) * (1 - β) + β * c[t-1] + c_shk[t] + end + + @reinitialize model + + @steadystate model a = a_ss + + return model +end + +function constructS2!(model::Model) + model.flags.linear = false + model.options.substitutions = false + + @parameters model begin + α = 0.5 + x_ss = 3.1 + end + + @variables model begin + y + @log x + # @shock x_shk + end + + @shocks model begin + x_shk + end + + @autoexogenize model begin + x = x_shk + end + + @equations model begin + y[t] = (1 - α) * 2 * @sstate(x) + (α) * @movav(y[t-1], 4) + log(x[t]) = (1 - α) * log(x_ss) + (α) * @movav(log(x[t-1]), 2) + x_shk[t] + end + + @reinitialize model + + return model +end + +function deconstructE1!(model::Model) + @variables model @delete y + + @shocks model @delete y_shk + + @autoexogenize model begin + @delete y => y_shk + end + + @equations model begin + @delete _EQ1 + end +end + +function deconstructE2!(model::Model) + @variables model @delete pinf rate ygap + + @shocks model @delete pinf_shk rate_shk ygap_shk + + @autoexogenize model begin + @delete pinf = pinf_shk + @delete rate = rate_shk + @delete ygap = ygap_shk + end + + @equations model begin + @delete _EQ1 _EQ2 _EQ3 + end +end + +function deconstructE3!(model::Model) + @variables model @delete pinf rate ygap + + @shocks model @delete pinf_shk rate_shk ygap_shk + + @autoexogenize model begin + @delete pinf = pinf_shk + @delete rate = rate_shk + @delete ygap = ygap_shk + end + + @equations model begin + @delete _EQ1 _EQ2 _EQ3 + end +end + +function deconstructE3nl!(model::Model) + @variables model @delete pinf rate ygap + + @shocks model @delete pinf_shk rate_shk ygap_shk + + @autoexogenize model begin + @delete pinf = pinf_shk + @delete rate = rate_shk + @delete ygap = ygap_shk + end + + @equations model begin + @delete _EQ1 _EQ2 _EQ3 + end +end + +function deconstructE6!(model::Model) + @variables model @delete dlp dly dlyn lp ly lyn + + @shocks model @delete dlp_shk dly_shk + + @autoexogenize model begin + @delete ly => dly_shk + @delete lp => dlp_shk + end + + @equations model begin + @delete _EQ1 _EQ2 _EQ3 _EQ4 _EQ5 _EQ6 + end +end + +function deconstructE7!(model::Model) + @variables model @delete dlc dlinv dly lc linv lk ly + + @shocks model @delete dlc_shk dlinv_shk + + @autoexogenize model begin + @delete lc => dlc_shk + @delete linv => dlinv_shk + end + + @equations model begin + @delete _EQ1 _EQ2 _EQ3 _EQ4 _EQ5 _EQ6 _EQ7 + end +end + +function deconstructE7A!(model::Model) + @variables model @delete dlc dlinv dly lc linv lk ly + + @shocks model @delete dlc_shk dlinv_shk + + @autoexogenize model begin + @delete lc => dlc_shk + @delete linv => dlinv_shk + end + + @equations model begin + @delete _EQ1 _EQ2 _EQ3 _EQ4 _EQ5 _EQ6 _EQ7 + end + + @steadystate model begin + @delete _SSEQ1 _SSEQ2 + end +end + +function deconstructS1!(model::Model) + @variables model @delete a b c + + @shocks model @delete b_shk c_shk + + @equations model begin + @delete _EQ1 _EQ2 _EQ3 + end + + @steadystate model begin + @delete _SSEQ1 + end +end + +function deconstructS2!(model::Model) + @variables model @delete y x + + @shocks model @delete x_shk + + @autoexogenize model begin + @delete x => x_shk + end + + @equations model begin + @delete _EQ1 _EQ2 + end +end + + +function getInitialModel(s::Symbol) + s == :E1 && return E1.newmodel() + s == :E2 && return E2.newmodel() + s == :E3 && return E3.newmodel() + s == :E3nl && return E3nl.newmodel() + s == :E6 && return E6.newmodel() + s == :E7 && return E7.newmodel() + s == :E7A && return E7A.newmodel() + s == :S1 && return S1.newmodel() + s == :S2 && return S2.newmodel() +end + +function deconstructedModel() + symbols = (:E1, :E2, :E3, :E3nl, :E6, :E7, :E7A, :S1, :S2) + sym = symbols[rand(1:length(symbols))] + m = getInitialModel(sym) + + sym == :E1 && deconstructE1!(m) + sym == :E2 && deconstructE2!(m) + sym == :E3 && deconstructE3!(m) + sym == :E3nl && deconstructE3nl!(m) + sym == :E6 && deconstructE6!(m) + sym == :E7 && deconstructE7!(m) + sym == :E7A && deconstructE7A!(m) + sym == :S1 && deconstructS1!(m) + sym == :S2 && deconstructS2!(m) + + return m +end + +for i in 1:3 + @info "Model change variations $i" + global getE1() = constructE1!(deconstructedModel()) + global getE2() = constructE2!(deconstructedModel()) + global getE3() = constructE3!(deconstructedModel()) + global getE3nl() = constructE3nl!(deconstructedModel()) + global getE6() = constructE6!(deconstructedModel()) + global getE7() = constructE7!(deconstructedModel()) + global getE7A() = constructE7A!(deconstructedModel()) + global getS1() = constructS1!(deconstructedModel()) + global getS2() = constructS2!(deconstructedModel()) + + include("simtests.jl") + include("sim_fo.jl") + include("dynss.jl") + include("stochsims.jl") + include("sstests.jl") + include("misc.jl") +end + diff --git a/test/runtests.jl b/test/runtests.jl index b2d2210..2e33988 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -20,9 +20,20 @@ using ModelBaseEcon @using_example S1 @using_example S2 +getE1() = E1.newmodel() +getE2() = E2.newmodel() +getE3() = E3.newmodel() +getE3nl() = E3nl.newmodel() +getE6() = E6.newmodel() +getE7() = E7.newmodel() +getE7A() = E7A.newmodel() +getS1() = S1.newmodel() +getS2() = S2.newmodel() + using Test using Random using Suppressor +import Pardiso @testset "1dsolvers" begin # f(x) = (x-2)*(x-3) = a x^2 + b x + c with vals = [a, x, b, c] @@ -49,7 +60,7 @@ using Suppressor end @testset "Plans" begin - m = deepcopy(E1.model) + m = getE1() p = Plan(m, 1:3) @test first(p.range) == 0U @test last(p.range) == 4U @@ -145,7 +156,7 @@ include("simdatatests.jl") include("sstests.jl") @testset "misc" begin - m = deepcopy(E3.model) + m = getE3() sim = m.maxlag .+ (1:10) p = Plan(m, sim) @@ -215,11 +226,39 @@ end @test t1 == TSeries(1U, [1, 1, 3, 5, 5, 5, 5, 5]) end -include("simtests.jl") -include("logsimtests.jl") -include("sim_fo.jl") -include("shockdecomp.jl") -include("dynss.jl") include("misc.jl") -include("stochsims.jl") +@testset "sparse" begin + @test (use_pardiso(); StateSpaceEcon.StackedTimeSolver.sf_default == :pardiso) + @test (use_umfpack(); StateSpaceEcon.StackedTimeSolver.sf_default == :umfpack) + let m = Model() + @test (use_pardiso!(m); m.factorization == :pardiso) + @test (use_umfpack!(m); m.factorization == :umfpack) + end +end + +# make sure Pardiso runs deterministic (single thread) for the tests +Pardiso.set_nprocs_mkl!(1) + +for sfdef = QuoteNode.(StateSpaceEcon.StackedTimeSolver.sf_libs) + + sfdef.value == :default && continue + + # Pardiso in macos is giving "Not enough memory". Disable for now + # sfdef.value == :pardiso && Sys.isapple() && continue + + @info "Using $(sfdef)" + + Core.eval(StateSpaceEcon.StackedTimeSolver, :(sf_default = $(sfdef))) + + include("simtests.jl") + include("logsimtests.jl") + include("sim_fo.jl") + include("shockdecomp.jl") + include("dynss.jl") + + include("stochsims.jl") + +end + +include("modelchanges.jl") \ No newline at end of file diff --git a/test/shockdecomp.jl b/test/shockdecomp.jl index ebbcf0f..e48eb60 100644 --- a/test/shockdecomp.jl +++ b/test/shockdecomp.jl @@ -1,10 +1,15 @@ +################################################################################## +# This file is part of StateSpaceEcon.jl +# BSD 3-Clause License +# Copyright (c) 2020-2023, Bank of Canada +# All rights reserved. +################################################################################## using LinearAlgebra using JLD2 @testset "shkdcmp" begin m = deepcopy(E7A.model) expected = Workspace(load("data/shkdcmp_E7A.jld2")) - TimeSeriesEcon.clean_old_frequencies!(expected) empty!(m.sstate.constraints) @steadystate m lc = 1 @@ -23,10 +28,10 @@ using JLD2 # shock on dly is half inventory half consumption dlinv_shk = result.sd.dlinv.dlinv_shk[result.sd.dlinv.dlinv_shk.>1e-9] # positive shock values dly_dlinv_shk = result.sd.dly.dlinv_shk[result.sd.dlinv.dlinv_shk.>1e-9] # same rows for dly - @test all(dly_dlinv_shk ./ dlinv_shk .≈ 0.5) + @test norm(dly_dlinv_shk ./ dlinv_shk .- 0.5, Inf) < 1e-7 dlc_shk = result.sd.dlc.dlc_shk[result.sd.dlc.dlc_shk.>1e-9] # positive shock values dly_dlc_shk = result.sd.dly.dlc_shk[result.sd.dlc.dlc_shk.>1e-9] # same rows for dly - @test all(dly_dlc_shk ./ dlc_shk .≈ 0.5) + @test norm(dly_dlc_shk ./ dlc_shk .- 0.5, Inf) < 1e-7 sd_zeros = fill!(similar(result.sd.dlc), 0) @@ -56,10 +61,10 @@ using JLD2 # and the same relation between shocks dlinv_shk = result_dev.sd.dlinv.dlinv_shk[result_dev.sd.dlinv.dlinv_shk.>1e-9] # positive shock values dly_dlinv_shk = result_dev.sd.dly.dlinv_shk[result_dev.sd.dlinv.dlinv_shk.>1e-9] # same rows for dly - @test all(dly_dlinv_shk ./ dlinv_shk .≈ 0.5) + @test norm(dly_dlinv_shk ./ dlinv_shk .- 0.5, Inf) < 1e-7 dlc_shk = result_dev.sd.dlc.dlc_shk[result_dev.sd.dlc.dlc_shk.>1e-9] # positive shock values dly_dlc_shk = result_dev.sd.dly.dlc_shk[result_dev.sd.dlc.dlc_shk.>1e-9] # same rows for dly - @test all(dly_dlc_shk ./ dlc_shk .≈ 0.5) + @test norm(dly_dlc_shk ./ dlc_shk .- 0.5, Inf) < 1e-7 # for consumption, term, init and nonlinear are zero @test norm(result_dev.sd.dlc.init, Inf) < 1e-12 @@ -106,7 +111,7 @@ using JLD2 result3fo = shockdecomp(m, p, exog_data3; solver=:firstorder) @test compare(result3, result3fo; ignoremissing=true, atol=2^10 * eps(), quiet=true) - # additive for linearized model + # additive for linearized model exog_data4 = copy(exog_data3) exog_data4[first(rng), m.shocks] .+= 0.1 result4 = shockdecomp(m, p, exog_data4; control=result3.s, variant=:linearize, fctype=fcnatural) diff --git a/test/sim_fo.jl b/test/sim_fo.jl index 9961a00..28e83cc 100644 --- a/test/sim_fo.jl +++ b/test/sim_fo.jl @@ -58,7 +58,7 @@ end end @testset "M.fo.unant" begin - run_fo_unant_tests(M.model) + run_fo_unant_tests(deepcopy(M.model)) end module R @@ -79,23 +79,23 @@ end @initialize model end @testset "R.fo.unant" begin - run_fo_unant_tests(R.model) + run_fo_unant_tests(deepcopy(R.model)) end @using_example E2 @testset "E2.fo.unant" begin - run_fo_unant_tests(E2.model) + run_fo_unant_tests(getE2()) end @using_example E3 @testset "E3.fo.unant" begin - run_fo_unant_tests(E3.model) + run_fo_unant_tests(getE3()) end @using_example E6 @testset "E6.fo.unant" begin - let m = E6.model + let m = getE6() # set slopes to 0, otherwise we're not allowed to linearize m.p_dly = 0 m.p_dlp = 0 @@ -131,6 +131,7 @@ end @testset "shkdcmp.fo" begin for m in (M.model, R.model) + m = deepcopy(m) m.rho = 0.6 empty!(m.sstate.constraints) test_shockdecomp_firstorder(m) @@ -139,10 +140,11 @@ end test_shockdecomp_firstorder(m, 1U:20U, fclevel) end for m in (E2.model, E3.model) + m = deepcopy(m) empty!(m.sstate.constraints) test_shockdecomp_firstorder(m, 1U:500U) end - let m = E6.model + let m = deepcopy(E6.model) # set slopes to 0, otherwise we're not allowed to linearize m.p_dly = 0 m.p_dlp = 0 @@ -189,6 +191,7 @@ end @testset "inidcmp.fo" begin for m in (M.model, R.model) + m = deepcopy(m) m.rho = 0.6 empty!(m.sstate.constraints) test_initdecomp_firstorder(m) @@ -196,11 +199,12 @@ end @steadystate m x = 6 test_initdecomp_firstorder(m) end - for m in (E2.model, E3.model) + for m in (getE2(), getE3()) + m = deepcopy(m) empty!(m.sstate.constraints) test_initdecomp_firstorder(m) end - let m = E6.model + let m = getE6() # set slopes to 0, otherwise we're not allowed to linearize m.p_dly = 0 m.p_dlp = 0 @@ -229,7 +233,7 @@ function test_initdecomp_stackedtime(m; nonlin=!m.linear, rng=2001Q1:2024Q4, fct p = Plan(m, rng) exog = steadystatedata(m, p) exog[begin:first(rng)-1, vars] = rand(m.maxlag, m.nvars) - # zero shocks + # zero shocks ref = shockdecomp(m, p, exog; solver, fctype) for v in vars, s in shks @test norm(ref.sd[v][:, s], Inf) < atol @@ -241,7 +245,7 @@ function test_initdecomp_stackedtime(m; nonlin=!m.linear, rng=2001Q1:2024Q4, fct exog1[begin:first(rng1)-1, vars] = ref.s exog1[rng1, shks] = ref.s[rng1, shks] if fctype === fclevel - exog1[last(rng1)+1:end,vars] = ref.s + exog1[last(rng1)+1:end, vars] = ref.s end res1 = shockdecomp(m, p1, exog1; solver, fctype, initdecomp=ref) res1a = shockdecomp(m, p1, exog1; solver, fctype) @@ -254,7 +258,7 @@ function test_initdecomp_stackedtime(m; nonlin=!m.linear, rng=2001Q1:2024Q4, fct # without initdecomp the resulting range is shorter # and the numbers will be identical for linear models @test rangeof(ref) ≠ rangeof(res1a) - @test compare(ref, res1a; atol, quiet=true) == !nonlin + @test compare(ref, res1a; atol, quiet=true, ignoremissing=true) == !nonlin #test 2 - only 1 shock at a time for shk in shks @@ -273,7 +277,7 @@ function test_initdecomp_stackedtime(m; nonlin=!m.linear, rng=2001Q1:2024Q4, fct exog1[begin:first(rng1)-1, shks] .= NaN exog1[rng1, shks] = ref.s[rng1, shks] if fctype === fclevel - exog1[last(rng1)+1:end,vars] = ref.s + exog1[last(rng1)+1:end, vars] = ref.s end res1 = shockdecomp(m, p1, exog1; solver, fctype, initdecomp=ref) res1a = shockdecomp(m, p1, exog1; solver, fctype) @@ -287,7 +291,7 @@ function test_initdecomp_stackedtime(m; nonlin=!m.linear, rng=2001Q1:2024Q4, fct # without initdecomp the resulting range is shorter # and the numbers are totally different @test rangeof(ref) ≠ rangeof(res1a) - @test compare(ref, res1a; atol, quiet=true, nans=true) == (m.maxlag == 0) + @test compare(ref, res1a; atol, quiet=true, nans=true, ignoremissing=true) == (m.maxlag == 0) end # test 3 - all shocks and init @@ -304,7 +308,7 @@ function test_initdecomp_stackedtime(m; nonlin=!m.linear, rng=2001Q1:2024Q4, fct exog1[begin:first(rng1)-1, shks] .= NaN # mask shocks in initial conditions exog1[rng1, shks] = ref.s[rng1, shks] if fctype === fclevel - exog1[last(rng1)+1:end,vars] = ref.s + exog1[last(rng1)+1:end, vars] = ref.s end res1 = shockdecomp(m, p1, exog1; solver, fctype, initdecomp=ref) @test rangeof(ref) == rangeof(res1) @@ -314,6 +318,7 @@ end @testset "inidcmp.st" begin for m in (M.model, R.model) + m = deepcopy(m) m.rho = 0.6 empty!(m.sstate.constraints) test_initdecomp_stackedtime(m) @@ -321,11 +326,12 @@ end @steadystate m x = 6 test_initdecomp_stackedtime(m, fctype=fclevel) end - for m in (E2.model, E3.model) + for m in (getE2(), getE3()) + m = deepcopy(m) empty!(m.sstate.constraints) test_initdecomp_stackedtime(m) end - let m = E6.model + let m = getE6() # set slopes to 0, otherwise we're not allowed to linearize m.p_dly = 0 m.p_dlp = 0 diff --git a/test/simdatatests.jl b/test/simdatatests.jl index 34982b2..33e0136 100644 --- a/test/simdatatests.jl +++ b/test/simdatatests.jl @@ -47,11 +47,11 @@ @test sd[:] == dta[:] @test sd[:a] isa TSeries && sd[:a].values == dta[:, 1] @test sd["b"] isa TSeries && sd["b"].values == dta[:, 2] - # + # sd.a = dta[:, 1] sd.b = dta[:, 2] @test sd[:] == dta[:] - # + # sd[:] = dta[:] sd.a = sd.b @test sd[:, 1] == sd[:, 2] @@ -70,7 +70,7 @@ @test_throws DimensionMismatch sd[2000Q3] = zeros(size(dta, 2) + 1) @test_throws ArgumentError sd[2000Y] @test_throws BoundsError sd[2010Q4] = rand(2) - # + # @test sd[2001Q1:2002Q4] isa SimData @test sd[2001Q1:2002Q4] ≈ sd[5:12, :] sd[2001Q1:2002Q4] = 1:16 @@ -95,7 +95,7 @@ @test similar(sd) isa typeof(sd) @test_throws BoundsError sd[1999Q1:2000Q4] = zeros(8, 2) @test_throws BoundsError sd[2004Q1:2013Q4, [:a, :b]] - # setindex with two + # setindex with two sd[2001Q1, (:b,)] = [3.5] @test sd[5, 2] == 3.5 sd[2000Q1:2001Q4, (:b,)] .= 3.7 diff --git a/test/simtests.jl b/test/simtests.jl index c97fd27..dfbc313 100644 --- a/test/simtests.jl +++ b/test/simtests.jl @@ -1,7 +1,7 @@ ################################################################################## # This file is part of StateSpaceEcon.jl # BSD 3-Clause License -# Copyright (c) 2020-2022, Bank of Canada +# Copyright (c) 2020-2023, Bank of Canada # All rights reserved. ################################################################################## @@ -9,25 +9,25 @@ using LinearAlgebra using DelimitedFiles @testset "E1.simple" begin - m = deepcopy(E1.model) + m = getE1() m.α = 0.5 m.β = 1.0 - m.α for T = 6:10 - p = Plan(m, 2:T - 1) + p = Plan(m, 2:T-1) data = zerodata(m, p) data[1, :] = [1 0] # initial condition data[end, :] = [5 0] # final condition sim00 = simulate(m, p, data) - exp00 = hcat(1.0:(5.0 - 1.0) / (T - 1):5.0, zeros(T)) + exp00 = hcat(1.0:(5.0-1.0)/(T-1):5.0, zeros(T)) # @info "T=$T" sim00 exp00 data @test sim00 ≈ exp00 # solution piecewise linear y - one line before and another line after the shock data1 = copy(data) - shk = .1 + shk = 0.1 y2val = ((T - 2) * (1 + 2 * shk) + 5.0) / (T - 1) - data1[2U,:y_shk] = shk # shock at time 2 + data1[2U, :y_shk] = shk # shock at time 2 sim01 = simulate(m, p, data1) - exp01 = vcat([1.0 y2val:(5.0 - y2val) / (T - 2):5.0...], [0 shk zeros(T - 2)...])' + exp01 = vcat([1.0 y2val:(5.0-y2val)/(T-2):5.0...], [0 shk zeros(T - 2)...])' @test sim01 ≈ exp01 # exogenous-endogenous swap # - replicate the solution above backing out the shock that produces it @@ -43,36 +43,36 @@ using DelimitedFiles # test_simulation(E1.model, "data/M1_TestMatrix.csv") end -function test_simulation(m_in, path; atol = 1.0e-9) +function test_simulation(m_in, path; atol=1.0e-9) m = deepcopy(m_in) nvars = ModelBaseEcon.nvariables(m) nshks = ModelBaseEcon.nshocks(m) - # + # data_chk = readdlm(path, ',', Float64) IDX = size(data_chk, 1) - plan = Plan(m, 1 + m.maxlag:IDX - m.maxlead) - # + plan = Plan(m, 1+m.maxlag:IDX-m.maxlead) + # p01 = deepcopy(plan) - autoexogenize!(p01, m, m.maxlag + 1:IDX - m.maxlead) + autoexogenize!(p01, m, m.maxlag+1:IDX-m.maxlead) data01 = zeroarray(m, p01) - data01[1:m.maxlag,:] = data_chk[1:m.maxlag,:] - data01[end - m.maxlead + 1:end,:] = data_chk[end - m.maxlead + 1:end,:] - data01[m.maxlag + 1:end - m.maxlead,1:nvars] = data_chk[m.maxlag + 1:end - m.maxlead,1:nvars] + data01[1:m.maxlag, :] = data_chk[1:m.maxlag, :] + data01[end-m.maxlead+1:end, :] = data_chk[end-m.maxlead+1:end, :] + data01[m.maxlag+1:end-m.maxlead, 1:nvars] = data_chk[m.maxlag+1:end-m.maxlead, 1:nvars] res01 = simulate(m, p01, data01) - @test isapprox(res01, data_chk; atol = atol) - + @test isapprox(res01, data_chk; atol=atol) + p02 = deepcopy(plan) data02 = zeroarray(m, p02) - data02[1:m.maxlag,:] = data_chk[1:m.maxlag,:] - data02[end - m.maxlead + 1:end,:] = data_chk[end - m.maxlead + 1:end,:] - data02[m.maxlag + 1:end - m.maxlead,nvars .+ (1:nshks)] = data_chk[m.maxlag + 1:end - m.maxlead,nvars .+ (1:nshks)] + data02[1:m.maxlag, :] = data_chk[1:m.maxlag, :] + data02[end-m.maxlead+1:end, :] = data_chk[end-m.maxlead+1:end, :] + data02[m.maxlag+1:end-m.maxlead, nvars.+(1:nshks)] = data_chk[m.maxlag+1:end-m.maxlead, nvars.+(1:nshks)] res02 = simulate(m, p02, data02) - @test isapprox(res02, data_chk; atol = atol) + @test isapprox(res02, data_chk; atol=atol) # linesearch m.options.linesearch = true res01_line = simulate(m, p01, data01) - @test isapprox(res01_line, data_chk; atol = atol) + @test isapprox(res01_line, data_chk; atol=atol) m.options.linesearch = false # deviation @@ -80,42 +80,42 @@ function test_simulation(m_in, path; atol = 1.0e-9) data_chk_dev = deepcopy(data_chk) sssolve!(m) for (i, var) in enumerate(m.allvars) - data01_dev[:,i] .-= m.sstate[var].level - data_chk_dev[:,i] .-= m.sstate[var].level + data01_dev[:, i] .-= m.sstate[var].level + data_chk_dev[:, i] .-= m.sstate[var].level if m.sstate[var].slope != 0 period = 0 for j in p01.range - data01_dev[j,i] -= m.sstate[var].slope*period - data_chk_dev[j,i] -= m.sstate[var].slope*period + data01_dev[j, i] -= m.sstate[var].slope * period + data_chk_dev[j, i] -= m.sstate[var].slope * period period += 1 end end end res01_dev = simulate(m, p01, data01_dev; deviation=true) - @test isapprox(res01_dev, data_chk_dev; atol = atol) + @test isapprox(res01_dev, data_chk_dev; atol=atol) end @testset "E1.sim" begin - test_simulation(E1.model, "data/M1_TestData.csv") + test_simulation(getE1(), "data/M1_TestData.csv") end @testset "E2.sim" begin - test_simulation(E2.model, "data/M2_TestData.csv") + test_simulation(getE2(), "data/M2_TestData.csv") end @testset "E3.sim" begin - test_simulation(E3.model, "data/M3_TestData.csv") + test_simulation(getE3(), "data/M3_TestData.csv") end @testset "E6.sim" begin - test_simulation(E6.model, "data/M6_TestData.csv") + test_simulation(getE6(), "data/M6_TestData.csv") end ############################################################# # linearization tests @testset "linearize" begin - m3 = deepcopy(E3.model) + m3 = getE3() clear_sstate!(m3) @test_throws ModelBaseEcon.LinearizationError linearize!(m3) @@ -124,7 +124,7 @@ end test_simulation(m3, "data/M3_TestData.csv") - m3nl = deepcopy(E3nl.model) + m3nl = getE3nl() m3nl.sstate.values .= m3.sstate.values m3nl.sstate.mask .= m3.sstate.mask @test issssolved(m3nl) @@ -141,18 +141,18 @@ end @test isa(ModelBaseEcon.getevaldata(m3), ModelBaseEcon.LinearizedModelEvaluationData) test_simulation(m3, "data/M3_TestData.csv") - m7 = deepcopy(E7.model) + m7 = getE7() if !issssolved(m7) empty!(m7.sstate.constraints) - @steadystate m7 linv = lc - 7; - @steadystate m7 lc = 14; + @steadystate m7 linv = lc - 7 + @steadystate m7 lc = 14 clear_sstate!(m7) - sssolve!(m7); + sssolve!(m7) end # the aux variables are present @test_throws ModelBaseEcon.LinearizationError linearize!(m7) - m7a = deepcopy(E7A.model) + m7a = getE7A() clear_sstate!(m7a) sssolve!(m7a) # non-zeros linear growth in the steady state @@ -165,42 +165,43 @@ end # Tests of unanticipated shocks @testset "E1.unant" begin - m = deepcopy(E1.model) + m = getE1() m.α = m.β = 0.5 - p = Plan(m, 1:3); - data = zeroarray(m, p); - data[1,1] = 1.0; - data[end,1] = 5.0; - data[3,2] = 0.1; + p = Plan(m, 1:3) + data = zeroarray(m, p) + data[1, 1] = 1.0 + data[end, 1] = 5.0 + data[3, 2] = 0.1 res_u = simulate(m, p, data; anticipate=false) - true_u = Float64[1 0; 2 0; 47 / 15 0.1; 61 / 15 0; 5 0] + true_u = Float64[1 0; 2 0; 47/15 0.1; 61/15 0; 5 0] @test res_u ≈ true_u atol = 1e-12 end @testset "E2.unant" begin - m = deepcopy(E2.model) + m = getE2() nvars = length(m.variables) nshks = length(m.shocks) var_inds = 1:nvars shk_inds = nvars .+ (1:nshks) # Steady state - m.sstate.values .= 0; - m.sstate.mask .= true; + m.sstate.values .= 0 + m.sstate.mask .= true let atrue = readdlm("./data/M2_Ant.csv", ',', Float64), utrue = readdlm("./data/M2_Unant.csv", ',', Float64) # Set simulation ranges and plan IDX = size(atrue, 1) init = 1:m.maxlag - sim = 1 + m.maxlag:IDX - m.maxlead - term = IDX .+ (1 - m.maxlead:0) + sim = 1+m.maxlag:IDX-m.maxlead + term = IDX .+ (1-m.maxlead:0) p = Plan(m, sim) # Make up the exogenous data for the experiment let adata = zeroarray(m, p), udata = zeroarray(m, p) - adata[sim,shk_inds] .= atrue[sim,shk_inds] - udata[sim,shk_inds] .= utrue[sim,shk_inds] - adata[term,:] .= atrue[term,:] - udata[term,:] .= utrue[term,:] + + adata[sim, shk_inds] .= atrue[sim, shk_inds] + udata[sim, shk_inds] .= utrue[sim, shk_inds] + adata[term, :] .= atrue[term, :] + udata[term, :] .= utrue[term, :] # Run the simulations and test ares = simulate(m, p, adata) @test ares ≈ atrue atol = m.options.tol @@ -209,11 +210,12 @@ end end let adata = zeroarray(m, p), udata = zeroarray(m, p) + autoexogenize!(p, m, sim) - adata[sim,var_inds] .= atrue[sim,var_inds] - udata[sim,var_inds] .= utrue[sim,var_inds] - adata[term,:] .= atrue[term,:] - udata[term,:] .= utrue[term,:] + adata[sim, var_inds] .= atrue[sim, var_inds] + udata[sim, var_inds] .= utrue[sim, var_inds] + adata[term, :] .= atrue[term, :] + udata[term, :] .= utrue[term, :] # Run the simulations and test ares = simulate(m, p, adata) @test ares ≈ atrue atol = m.options.tol @@ -227,17 +229,17 @@ end # Set simulation ranges and plan IDX = size(atrue, 1) init = 1:m.maxlag - sim = 1 + m.maxlag:IDX - m.maxlead - term = IDX .+ (1 - m.maxlead:0) + sim = 1+m.maxlag:IDX-m.maxlead + term = IDX .+ (1-m.maxlead:0) p = Plan(m, sim) # Make up the exogenous data for the experiment - adata = zeroarray(m, p); - udata = zeroarray(m, p); + adata = zeroarray(m, p) + udata = zeroarray(m, p) ygap_ind = indexin([:ygap], m.variables)[1] adata[sim[1:s], ygap_ind] .= -0.10 udata[sim[1:s], ygap_ind] .= -0.10 - adata[term,:] .= atrue[term,:] - udata[term,:] .= utrue[term,:] + adata[term, :] .= atrue[term, :] + udata[term, :] .= utrue[term, :] exogenize!(p, :ygap, sim[1:s]) endogenize!(p, :ygap_shk, sim[1:s]) # Run the simulations and test @@ -249,29 +251,30 @@ end end @testset "E3.unant" begin - m = deepcopy(E3.model) + m = getE3() nvars = length(m.variables) nshks = length(m.shocks) var_inds = 1:nvars shk_inds = nvars .+ (1:nshks) # Steady state - m.sstate.values .= 0; - m.sstate.mask .= true; + m.sstate.values .= 0 + m.sstate.mask .= true let atrue = readdlm("./data/M3_Ant.csv", ',', Float64), utrue = readdlm("./data/M3_Unant.csv", ',', Float64) # Set simulation ranges and plan IDX = size(atrue, 1) init = 1:m.maxlag - sim = 1 + m.maxlag:IDX - m.maxlead - term = IDX .+ (1 - m.maxlead:0) + sim = 1+m.maxlag:IDX-m.maxlead + term = IDX .+ (1-m.maxlead:0) p = Plan(m, sim) # Make up the exogenous data for the experiment let adata = zeroarray(m, p), udata = zeroarray(m, p) - adata[sim,shk_inds] .= atrue[sim,shk_inds] - udata[sim,shk_inds] .= utrue[sim,shk_inds] - adata[term,:] .= atrue[term,:] - udata[term,:] .= utrue[term,:] + + adata[sim, shk_inds] .= atrue[sim, shk_inds] + udata[sim, shk_inds] .= utrue[sim, shk_inds] + adata[term, :] .= atrue[term, :] + udata[term, :] .= utrue[term, :] # Run the simulations and test ares = simulate(m, p, adata) @test ares ≈ atrue atol = m.options.tol @@ -280,11 +283,12 @@ end end let adata = zeroarray(m, p), udata = zeroarray(m, p) + autoexogenize!(p, m, sim) - adata[sim,var_inds] .= atrue[sim,var_inds] - udata[sim,var_inds] .= utrue[sim,var_inds] - adata[term,:] .= atrue[term,:] - udata[term,:] .= utrue[term,:] + adata[sim, var_inds] .= atrue[sim, var_inds] + udata[sim, var_inds] .= utrue[sim, var_inds] + adata[term, :] .= atrue[term, :] + udata[term, :] .= utrue[term, :] # Run the simulations and test ares = simulate(m, p, adata) @test ares ≈ atrue atol = m.options.tol @@ -292,24 +296,24 @@ end @test ures ≈ utrue atol = m.options.tol end # tests with different final conditions and expectation_horizon values. - for (fc, eh) = Iterators.product((fcgiven, fclevel, fcslope, fcnatural), ( nothing, 30, 50, 100)) + for (fc, eh) = Iterators.product((fcgiven, fclevel, fcslope, fcnatural), (nothing, 30, 50, 100)) # Make up the exogenous data for the experiment p = Plan(m, sim) udata = zeroarray(m, p) - udata[sim,shk_inds] .= utrue[sim,shk_inds] - udata[term,:] .= utrue[term,:] + udata[sim, shk_inds] .= utrue[sim, shk_inds] + udata[term, :] .= utrue[term, :] # Run the simulations and test ures = simulate(m, p, udata; anticipate=false, expectation_horizon=eh, fctype=fc) - @test ures ≈ utrue atol = m.options.tol rtol = maximum(abs, utrue[term,:]) + @test ures ≈ utrue atol = m.options.tol rtol = maximum(abs, utrue[term, :]) # Repeat with autoexogenize p = Plan(m, sim) udata = zeroarray(m, p) autoexogenize!(p, m, sim) - udata[sim,var_inds] .= utrue[sim,var_inds] - udata[term,:] .= utrue[term,:] + udata[sim, var_inds] .= utrue[sim, var_inds] + udata[term, :] .= utrue[term, :] # Run the simulations and test ures = simulate(m, p, udata; anticipate=false, expectation_horizon=eh, fctype=fc) - @test ures ≈ utrue atol = m.options.tol rtol = maximum(abs, utrue[term,:]) + @test ures ≈ utrue atol = m.options.tol rtol = maximum(abs, utrue[term, :]) end end for s in (1, 4) @@ -318,17 +322,17 @@ end # Set simulation ranges and plan IDX = size(atrue, 1) init = 1:m.maxlag - sim = 1 + m.maxlag:IDX - m.maxlead - term = IDX .+ (1 - m.maxlead:0) + sim = 1+m.maxlag:IDX-m.maxlead + term = IDX .+ (1-m.maxlead:0) p = Plan(m, sim) # Make up the exogenous data for the experiment - adata = zeroarray(m, p); - udata = zeroarray(m, p); + adata = zeroarray(m, p) + udata = zeroarray(m, p) ygap_ind = indexin([:ygap], m.variables)[1] adata[sim[1:s], ygap_ind] .= -0.10 udata[sim[1:s], ygap_ind] .= -0.10 - adata[term,:] .= atrue[term,:] - udata[term,:] .= utrue[term,:] + adata[term, :] .= atrue[term, :] + udata[term, :] .= utrue[term, :] exogenize!(p, :ygap, sim[1:s]) endogenize!(p, :ygap_shk, sim[1:s]) # Run the simulations and test @@ -342,17 +346,20 @@ end @testset "FCTypes" begin let m = Model() @variables m x - @equations m begin x[t] = x[t + 3] end + @equations m begin + x[t] = x[t+3] + end @initialize m p = Plan(m, 1:1) ed = zerodata(m, p) @test_throws ArgumentError simulate(m, p, ed, fctype=fcnatural) end - let m = deepcopy(E1.model) + let m = getE1() p = Plan(m, 3:17) ed = zerodata(m, p) ed .= rand(Float64, size(ed)) - @test_logs (:error, r"The system is underdetermined.*") (@test_throws SingularException simulate(m, p, ed, fctype=fcnatural)) + @test_logs (:error, r"The system is underdetermined.*") (@test_throws Union{SingularException,Pardiso.PardisoPosDefException,Pardiso.PardisoException} simulate(m, p, ed, fctype=fcnatural)) + sd = StateSpaceEcon.StackedTimeSolver.StackedTimeSolverData(m, p, fcgiven) @test_throws ErrorException StateSpaceEcon.StackedTimeSolver.update_plan!(sd, m, Plan(m, 3:8)) x = rand(Float64, size(ed)) @@ -360,10 +367,10 @@ end R2 = StateSpaceEcon.StackedTimeSolver.stackedtime_R!(similar(R1), x, ed, sd) @test R1 ≈ R2 end - let m = deepcopy(E3.model) + let m = getE3() p = Plan(m, 3:177) ed = zerodata(m, p) - ed[3U, m.shocks] .= 0.2 * rand(1,3) + ed[3U, m.shocks] .= 0.2 * rand(1, 3) ed[3U:end, m.variables] .= rand(178, 3) empty!(m.sstate.constraints) clear_sstate!(m) @@ -376,9 +383,9 @@ end s1 = simulate(m, p, ed, fctype=fclevel) s2 = simulate(m, p, ed, fctype=fcslope) s3 = simulate(m, p, ed, fctype=fcnatural) - @test maximum(abs, s1 - s2; dims = :) < 1e-8 - @test maximum(abs, s1 - s3; dims = :) < 1e-8 - @test maximum(abs, s3 - s2; dims = :) < 1e-8 + @test maximum(abs, s1 - s2; dims=:) < 1e-8 + @test maximum(abs, s1 - s3; dims=:) < 1e-8 + @test maximum(abs, s3 - s2; dims=:) < 1e-8 sd = StateSpaceEcon.StackedTimeSolver.StackedTimeSolverData(m, p, fcgiven) x = rand(Float64, size(ed)) @@ -386,9 +393,9 @@ end R2 = StateSpaceEcon.StackedTimeSolver.stackedtime_R!(similar(R1), x, ed, sd) @test R1 ≈ R2 end - let m = deepcopy(E6.model) + let m = getE6() empty!(m.sstate.constraints) - @steadystate m lp = 1.0 + @steadystate m lp = 1.0 @steadystate m lp + lyn = 1.5 @steadystate m lp + ly = 1.2 clear_sstate!(m) @@ -399,7 +406,7 @@ end ed.dly_shk[3] = rand() s2 = simulate(m, p, ed, fctype=fcslope) s3 = simulate(m, p, ed, fctype=fcnatural) - @test maximum(abs, s3 - s2; dims = :) < 1e-12 + @test maximum(abs, s3 - s2; dims=:) < 1e-12 sd = StateSpaceEcon.StackedTimeSolver.StackedTimeSolverData(m, p, fcgiven) x = rand(Float64, size(ed)) @@ -409,7 +416,7 @@ end end @testset "new.unant" begin - m = E1.model + m = getE1() m.α = m.β = 0.5 test_rng = 20Q1:22Q1 @@ -439,7 +446,7 @@ end # now attempt to recover shocks from solutions p_a2 = autoexogenize!(copy(p_a), m, test_rng) p_u2 = copy(p_a2) - # recover + # recover chk_a = let data = copy(res_a) data.y_shk[test_rng] .= 100rand(length(test_rng)) simulate(m, p_a2, data) @@ -469,99 +476,100 @@ end @testset "linesearch, deviation" begin # linesearch should get the same results - let m = deepcopy(E3nl.model) + let m = getE3nl() clear_sstate!(m) nvars = ModelBaseEcon.nvariables(m) nshks = ModelBaseEcon.nshocks(m) - # + # data_chk = readdlm("data/M3_TestData.csv", ',', Float64) IDX = size(data_chk, 1) - plan = Plan(m, 1 + m.maxlag:IDX - m.maxlead) - # + plan = Plan(m, 1+m.maxlag:IDX-m.maxlead) + # p01 = deepcopy(plan) - autoexogenize!(p01, m, m.maxlag + 1:IDX - m.maxlead) + autoexogenize!(p01, m, m.maxlag+1:IDX-m.maxlead) data01 = zeroarray(m, p01) - data01[1:m.maxlag,:] = data_chk[1:m.maxlag,:] - data01[end - m.maxlead + 1:end,:] = data_chk[end - m.maxlead + 1:end,:] - data01[m.maxlag + 1:end - m.maxlead,1:nvars] = data_chk[m.maxlag + 1:end - m.maxlead,1:nvars] + data01[1:m.maxlag, :] = data_chk[1:m.maxlag, :] + data01[end-m.maxlead+1:end, :] = data_chk[end-m.maxlead+1:end, :] + data01[m.maxlag+1:end-m.maxlead, 1:nvars] = data_chk[m.maxlag+1:end-m.maxlead, 1:nvars] initial_guess = similar(data01) initial_guess .= 1 - initial_guess[end-2:end,:] .= 0 #at the very end + initial_guess[end-2:end, :] .= 0 #at the very end res01 = simulate(m, p01, data01) res01_line = nothing m.options.linesearch = true out = @capture_err begin res01_line = simulate(m, p01, data01; initial_guess=initial_guess, verbose=true) end - + # line search should give the same results @test occursin("Linesearch success", out) - @test isapprox(res01, data_chk; atol = 1.0e-9) - @test isapprox(res01_line, data_chk; atol = 1.0e-9) - @test isapprox(res01, res01_line; atol = 1.0e-9) + @test isapprox(res01, data_chk; atol=1.0e-9) + @test isapprox(res01_line, data_chk; atol=1.0e-9) + @test isapprox(res01, res01_line; atol=1.0e-9) end - let m = deepcopy(E1.model) + let m = getE1() clear_sstate!(m) nvars = ModelBaseEcon.nvariables(m) nshks = ModelBaseEcon.nshocks(m) - # + # data_chk = readdlm("data/M1_TestData.csv", ',', Float64) IDX = size(data_chk, 1) - plan = Plan(m, 1 + m.maxlag:IDX - m.maxlead) - # + plan = Plan(m, 1+m.maxlag:IDX-m.maxlead) + # p01 = deepcopy(plan) - autoexogenize!(p01, m, m.maxlag + 1:IDX - m.maxlead) + autoexogenize!(p01, m, m.maxlag+1:IDX-m.maxlead) data01 = zeroarray(m, p01) - data01[1:m.maxlag,:] = data_chk[1:m.maxlag,:] - data01[end - m.maxlead + 1:end,:] = data_chk[end - m.maxlead + 1:end,:] - data01[m.maxlag + 1:end - m.maxlead,1:nvars] = data_chk[m.maxlag + 1:end - m.maxlead,1:nvars] + data01[1:m.maxlag, :] = data_chk[1:m.maxlag, :] + data01[end-m.maxlead+1:end, :] = data_chk[end-m.maxlead+1:end, :] + data01[m.maxlag+1:end-m.maxlead, 1:nvars] = data_chk[m.maxlag+1:end-m.maxlead, 1:nvars] # deviation gives the same results res01 = simulate(m, p01, data01) - res01_dev = simulate(m, p01, data01; deviation=true) - @test isapprox(res01_dev, data_chk; atol = 1.0e-9) - @test isapprox(res01_dev, res01; atol = 1.0e-9) + res01_dev = simulate(m, p01, data01; deviation=true) + @test isapprox(res01_dev, data_chk; atol=1.0e-9) + @test isapprox(res01_dev, res01; atol=1.0e-9) end - let m = deepcopy(E3.model) + let m = getE3() clear_sstate!(m) nvars = ModelBaseEcon.nvariables(m) nshks = ModelBaseEcon.nshocks(m) - # + # data_chk = readdlm("data/M3_TestData.csv", ',', Float64) IDX = size(data_chk, 1) - plan = Plan(m, 1 + m.maxlag:IDX - m.maxlead) - # + plan = Plan(m, 1+m.maxlag:IDX-m.maxlead) + # p01 = deepcopy(plan) - autoexogenize!(p01, m, m.maxlag + 1:IDX - m.maxlead) + autoexogenize!(p01, m, m.maxlag+1:IDX-m.maxlead) data01 = zeroarray(m, p01) - data01[1:m.maxlag,:] = data_chk[1:m.maxlag,:] - data01[end - m.maxlead + 1:end,:] = data_chk[end - m.maxlead + 1:end,:] - data01[m.maxlag + 1:end - m.maxlead,1:nvars] = data_chk[m.maxlag + 1:end - m.maxlead,1:nvars] + data01[1:m.maxlag, :] = data_chk[1:m.maxlag, :] + data01[end-m.maxlead+1:end, :] = data_chk[end-m.maxlead+1:end, :] + data01[m.maxlag+1:end-m.maxlead, 1:nvars] = data_chk[m.maxlag+1:end-m.maxlead, 1:nvars] # deviation do not give the same result res01 = simulate(m, p01, data01) res01_dev = simulate(m, p01, data01; deviation=true) - @test isapprox(res01, data_chk; atol = 1.0e-9) - @test !isapprox(res01_dev, data_chk; atol = 1.0e-9) - @test !isapprox(res01_dev, res01; atol = 1.0e-9) + @test isapprox(res01, data_chk; atol=1.0e-9) + @test !isapprox(res01_dev, data_chk; atol=1.0e-9) + @test !isapprox(res01_dev, res01; atol=1.0e-9) # subtract steady-state from data and results data02 = deepcopy(data01) data_chk02 = deepcopy(data_chk) sssolve!(m) for (i, var) in enumerate(m.allvars) - data02[:,i] .-= m.sstate[var].level - data_chk02[:,i] .-= m.sstate[var].level + data02[:, i] .-= m.sstate[var].level + data_chk02[:, i] .-= m.sstate[var].level end res02 = simulate(m, p01, data02) res02_dev = simulate(m, p01, data02; deviation=true) - # results equal new data_chk02 - @test isapprox(res01, data_chk02; atol = 1.0e-9) - @test isapprox(res02_dev, data_chk02; atol = 1.0e-9) - @test isapprox(res02_dev, res01; atol = 1.0e-9) + # results equal new data_chk02 + @test isapprox(res01, data_chk02; atol=1.0e-9) + @test isapprox(res02_dev, data_chk02; atol=1.0e-9) + @test isapprox(res02_dev, res01; atol=1.0e-9) end end + diff --git a/test/sstests.jl b/test/sstests.jl index 4a598c4..9147d2c 100644 --- a/test/sstests.jl +++ b/test/sstests.jl @@ -1,20 +1,21 @@ ################################################################################## # This file is part of StateSpaceEcon.jl # BSD 3-Clause License -# Copyright (c) 2020-2022, Bank of Canada +# Copyright (c) 2020-2023, Bank of Canada # All rights reserved. ################################################################################## -empty!(E1.model.sstate.constraints) + @testset "E1.sstate" begin - let m = deepcopy(E1.model) + let m = getE1() + empty!(m.sstate.constraints) m.α = 0.5 m.β = 1.0 - m.α clear_sstate!(m) sssolve!(m) @test check_sstate(m) == 0 @test m.sstate.mask == [false, false, true, true] - # + # @steadystate m y = 1.2 m.α = 0.5 m.β = 1.0 - m.α @@ -23,7 +24,7 @@ empty!(E1.model.sstate.constraints) @test check_sstate(m) == 0 @test m.sstate.mask == [true, false, true, true] @test m.sstate.values[1] == 1.2 - # + # m.α = 0.4 m.β = 1.0 - m.α clear_sstate!(m) @@ -32,17 +33,17 @@ empty!(E1.model.sstate.constraints) @test check_sstate(m) == 0 @test m.sstate.mask == trues(4) @test m.sstate.values == [1.2, 0.0, 0.0, 0.0] - # + # empty!(m.sstate.constraints) m.α = 0.3 m.β = 1.0 - m.α clear_sstate!(m) @suppress begin - sssolve!(m; verbose=true, nropts = Options(linesearch = true)) + sssolve!(m; verbose=true, nropts=Options(linesearch=true)) end @test check_sstate(m) == 0 @test m.sstate.values[2] == 0.0 - # + # empty!(m.sstate.constraints) m.α = 0.3 m.β = 1.0 - m.α @@ -51,16 +52,16 @@ empty!(E1.model.sstate.constraints) end end -empty!(E1.model.sstate.constraints) @testset "E1.sstate, auto" begin - let m = deepcopy(E1.model) + let m = getE1() + empty!(m.sstate.constraints) m.α = 0.5 m.β = 1.0 - m.α clear_sstate!(m) sssolve!(m; method=:auto) @test check_sstate(m) == 0 @test m.sstate.mask == [true, true, true, true] # different from default - # + # @steadystate m y = 1.2 m.α = 0.5 m.β = 1.0 - m.α @@ -69,7 +70,7 @@ empty!(E1.model.sstate.constraints) @test check_sstate(m) == 0 @test m.sstate.mask == [true, true, true, true] # different from default @test m.sstate.values[1] == 1.2 - # + # m.α = 0.4 m.β = 1.0 - m.α clear_sstate!(m) @@ -78,7 +79,7 @@ empty!(E1.model.sstate.constraints) @test check_sstate(m) == 0 @test m.sstate.mask == trues(4) @test m.sstate.values == [1.2, 0.0, 0.0, 0.0] - # + # empty!(m.sstate.constraints) m.α = 0.3 m.β = 1.0 - m.α @@ -91,16 +92,16 @@ empty!(E1.model.sstate.constraints) end end -empty!(E1.model.sstate.constraints) @testset "E1.sstate, lm" begin - let m = deepcopy(E1.model) + let m = getE1() + empty!(m.sstate.constraints) m.α = 0.5 m.β = 1.0 - m.α clear_sstate!(m) sssolve!(m; method=:lm) @test check_sstate(m) == 0 @test m.sstate.mask == [true, true, true, true] # different from default - # + # @steadystate m y = 1.2 m.α = 0.5 m.β = 1.0 - m.α @@ -109,7 +110,7 @@ empty!(E1.model.sstate.constraints) @test check_sstate(m) == 0 @test m.sstate.mask == [true, true, true, true] # different from default @test m.sstate.values[1] == 1.2 - # + # m.α = 0.4 m.β = 1.0 - m.α clear_sstate!(m) @@ -118,7 +119,7 @@ empty!(E1.model.sstate.constraints) @test check_sstate(m) == 0 @test m.sstate.mask == trues(4) @test m.sstate.values == [1.2, 0.0, 0.0, 0.0] - # + # empty!(m.sstate.constraints) m.α = 0.3 m.β = 1.0 - m.α @@ -131,9 +132,9 @@ empty!(E1.model.sstate.constraints) end end -empty!(E2.model.sstate.constraints) @testset "E2.sstate" begin - let m = deepcopy(E2.model) + let m = getE2() + empty!(m.sstate.constraints) clear_sstate!(m) sssolve!(m) @test check_sstate(m) == 0 @@ -158,7 +159,7 @@ empty!(E2.model.sstate.constraints) end @testset "E6.sstate" begin - let m = deepcopy(E6.model) + let m = getE6() m.options.maxiter = 50 clear_sstate!(m) @@ -180,7 +181,7 @@ end end @testset "E7.sstate" begin - let m = deepcopy(E7.model) + let m = getE7() m.options.maxiter = 100 m.options.tol = 1e-9 @@ -195,7 +196,7 @@ end @steadystate m lc = 14 clear_sstate!(m) sssolve!(m) - @test check_sstate(m, tol = 10m.tol) == 0 + @test check_sstate(m, tol=10m.tol) == 0 @test all(m.sstate.mask) @test m.sstate.values ≈ [0.004, 0.0, 0.004, 0.0, 0.004, 0.0, 14.0, 0.004, 7.0, 0.004, 9.267287357063445, 0.004, 14.000911466453774, 0.004, 0, 0, 0, 0, 14.000911466453774, 0.004, 9.267287357063445, 0.004] @@ -241,7 +242,7 @@ end end @testset "SSTEST" begin - let m = SSTEST.model + let m = deepcopy(SSTEST.model) @test sum(issteady.(m.allvars)) == 2 clear_sstate!(m) @test issssolved(m) @@ -285,7 +286,7 @@ end @test check_sstate(m; verbose=true) == 6 end @test occursin("System may be inconsistent", out) - + end end @@ -296,12 +297,12 @@ end @variables m c @equations m begin a[t] = (a[t-1] + 10)^2 - b[t] = b[t-1]/0 + b[t] = b[t-1] / 0 b[t] = (a[t-1] + 10)^2 c[t] = a[t] + b[t] end @initialize m - + # tests @test sum(issteady.(m.allvars)) == 2 clear_sstate!(m) @@ -318,8 +319,8 @@ end end @testset "presolve, ssZeroSlope" begin - empty!(E1.model.sstate.constraints) - let m = deepcopy(E1.model) + let m = getE1() + empty!(m.sstate.constraints) m.α = 0.5 m.β = 1.0 - m.α clear_sstate!(m) @@ -328,8 +329,8 @@ end @test m.sstate.mask == [false, false, true, true] end - empty!(E1.model.sstate.constraints) - let m = deepcopy(E1.model) + let m = getE1() + empty!(m.sstate.constraints) m.α = 0.5 m.β = 1.0 - m.α m.flags.ssZeroSlope = true diff --git a/test/stochsims.jl b/test/stochsims.jl index 11e7642..0559883 100644 --- a/test/stochsims.jl +++ b/test/stochsims.jl @@ -8,7 +8,7 @@ @using_example E6 @testset "stochsims" begin - m = deepcopy(E6.model) + m = getE6() m.options.fctype = fcnatural @steadystate m lp = 0.6 @steadystate m ly = 0.3 @@ -41,11 +41,11 @@ db_1 = copy(db_ss) shk_1 = MVTSeries(srng, m.shocks, rand) db_1 .= db_1 .+ shk_1 - - @test simulate(m, plan, db_1; anticipate=false) ≈ stoch_simulate(m, plan, db_ss, shk_1)[1] - @test simulate(m, plan, db_1; anticipate=false) ≈ stoch_simulate(m, plan, rawdata(db_ss), shk_1)[1] - @test simulate(m, plan, db_1; anticipate=false) ≈ stoch_simulate(m, plan, Workspace(; pairs(db_ss)...), shk_1)[1] - + + @test simulate(m, plan, db_1; anticipate=false) ≈ stoch_simulate(m, plan, db_ss, shk_1)[1] + @test simulate(m, plan, db_1; anticipate=false) ≈ stoch_simulate(m, plan, rawdata(db_ss), shk_1)[1] + @test simulate(m, plan, db_1; anticipate=false) ≈ stoch_simulate(m, plan, Workspace(; pairs(db_ss)...), shk_1)[1] + db_1 = simulate(m, plan, db_1; anticipate=true) res = stoch_simulate(m, plan, db_1, shk_sample; check=true) for (r, s) in zip(res, shk_sample)