diff --git a/Project.toml b/Project.toml index 06e2ee59..d5ba1185 100644 --- a/Project.toml +++ b/Project.toml @@ -11,6 +11,7 @@ NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6" NLPModelsModifiers = "e01155f1-5c6f-4375-a9d8-616dd036575f" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" ProximalOperators = "a725b495-10eb-56fe-b38b-717eba820537" +RegularizedProblems = "ea076b23-609f-44d2-bb12-a4ae45328278" ShiftedProximalOperators = "d4fd37fa-580c-4e43-9b30-361c21aae263" SolverCore = "ff4d7338-4cf1-434d-91df-b86cb86fb843" TSVD = "9449cd9e-2762-5aa3-a617-5413e99d722e" @@ -20,7 +21,7 @@ LinearOperators = "2.7" NLPModels = "0.19, 0.20" NLPModelsModifiers = "0.7" ProximalOperators = "0.15" -RegularizedProblems = "0.1" +RegularizedProblems = "0.1.1" ShiftedProximalOperators = "0.2" SolverCore = "0.3.0" TSVD = "0.4" diff --git a/src/R2_alg.jl b/src/R2_alg.jl index 88481fc9..ede95e9c 100644 --- a/src/R2_alg.jl +++ b/src/R2_alg.jl @@ -1,9 +1,12 @@ -export R2 +export R2, R2Solver, solve! -mutable struct R2Solver{R, S <: AbstractVector{R}} <: AbstractOptimizationSolver +import SolverCore.solve! + +mutable struct R2Solver{R <: Real, G <: Union{ShiftedProximableFunction, Nothing}, S <: AbstractVector{R}} <: AbstractOptimizationSolver xk::S ∇fk::S mν∇fk::S + ψ::G xkn::S s::S has_bnds::Bool @@ -20,7 +23,8 @@ function R2Solver( x0::S, options::ROSolverOptions, l_bound::S, - u_bound::S, + u_bound::S; + ψ = nothing ) where {R <: Real, S <: AbstractVector{R}} maxIter = options.maxIter xk = similar(x0) @@ -36,13 +40,14 @@ function R2Solver( l_bound_m_x = similar(xk, 0) u_bound_m_x = similar(xk, 0) end - Fobj_hist = zeros(R, maxIter) - Hobj_hist = zeros(R, maxIter) - Complex_hist = zeros(Int, maxIter) + Fobj_hist = zeros(R, maxIter+2) + Hobj_hist = zeros(R, maxIter+2) + Complex_hist = zeros(Int, maxIter+2) return R2Solver( xk, ∇fk, mν∇fk, + ψ, xkn, s, has_bnds, @@ -56,9 +61,55 @@ function R2Solver( ) end +function R2Solver( + reg_nlp::AbstractRegularizedNLPModel{T,V}; + max_iter::Int = 10000 + ) where {T,V} + x0 = reg_nlp.model.meta.x0 + l_bound = reg_nlp.model.meta.lvar + u_bound = reg_nlp.model.meta.uvar + + xk = similar(x0) + ∇fk = similar(x0) + mν∇fk = similar(x0) + xkn = similar(x0) + s = zero(x0) + has_bnds = any(l_bound .!= T(-Inf)) || any(u_bound .!= T(Inf)) + if has_bnds + l_bound_m_x = similar(xk) + u_bound_m_x = similar(xk) + @. l_bound_m_x = l_bound - x0 + @. u_bound_m_x = u_bound - x0 + else + l_bound_m_x = similar(xk, 0) + u_bound_m_x = similar(xk, 0) + end + Fobj_hist = zeros(T, max_iter+2) + Hobj_hist = zeros(T, max_iter+2) + Complex_hist = zeros(Int, max_iter+2) + + ψ = has_bnds ? shifted(reg_nlp.h, xk, l_bound_m_x, u_bound_m_x, reg_nlp.selected) : shifted(reg_nlp.h, xk) + return R2Solver( + xk, + ∇fk, + mν∇fk, + ψ, + xkn, + s, + has_bnds, + l_bound, + u_bound, + l_bound_m_x, + u_bound_m_x, + Fobj_hist, + Hobj_hist, + Complex_hist, + ) +end + + """ - R2(nlp, h, options) - R2(f, ∇f!, h, options, x0) + R2(reg_nlp; kwargs…) A first-order quadratic regularization method for the problem @@ -72,62 +123,84 @@ About each iterate xₖ, a step sₖ is computed as a solution of min φ(s; xₖ) + ½ σₖ ‖s‖² + ψ(s; xₖ) where φ(s ; xₖ) = f(xₖ) + ∇f(xₖ)ᵀs is the Taylor linear approximation of f about xₖ, -ψ(s; xₖ) = h(xₖ + s), ‖⋅‖ is a user-defined norm and σₖ > 0 is the regularization parameter. - -### Arguments - -* `nlp::AbstractNLPModel`: a smooth optimization problem -* `h`: a regularizer such as those defined in ProximalOperators -* `options::ROSolverOptions`: a structure containing algorithmic parameters -* `x0::AbstractVector`: an initial guess (in the second calling form) - -### Keyword Arguments - -* `x0::AbstractVector`: an initial guess (in the first calling form: default = `nlp.meta.x0`) -* `selected::AbstractVector{<:Integer}`: (default `1:length(x0)`). - -The objective and gradient of `nlp` will be accessed. - -In the second form, instead of `nlp`, the user may pass in - -* `f` a function such that `f(x)` returns the value of f at x -* `∇f!` a function to evaluate the gradient in place, i.e., such that `∇f!(g, x)` store ∇f(x) in `g`. - -### Return values - -* `xk`: the final iterate -* `Fobj_hist`: an array with the history of values of the smooth objective -* `Hobj_hist`: an array with the history of values of the nonsmooth objective -* `Complex_hist`: an array with the history of number of inner iterations. +ψ(s; xₖ) is either h(xₖ + s) or an approximation of h(xₖ + s), ‖⋅‖ is a user-defined norm and σₖ > 0 is the regularization parameter. + +For advanced usage, first define a solver "R2Solver" to preallocate the memory used in the algorithm, and then call `solve!`: + + solver = R2Solver(reg_nlp) + solve!(solver, reg_nlp) + + stats = GenericExecutionStats(reg_nlp.model) + solver = R2Solver(reg_nlp) + solve!(solver, reg_nlp, stats) + +# Arguments +* `reg_nlp::AbstractRegularizedNLPModel{T, V}`: the problem to solve, see `RegularizedProblems.jl`, `NLPModels.jl`. + +# Keyword arguments +- `x::V = nlp.meta.x0`: the initial guess; +- `atol::T = √eps(T)`: absolute tolerance; +- `rtol::T = √eps(T)`: relative tolerance; +- `neg_tol::T = eps(T)^(1 / 4)`: negative tolerance +- `max_eval::Int = -1`: maximum number of evaluation of the objective function (negative number means unlimited); +- `max_time::Float64 = 30.0`: maximum time limit in seconds; +- `max_iter::Int = 10000`: maximum number of iterations; +- `verbose::Int = 0`: if > 0, display iteration details every `verbose` iteration; +- `σmin::T = eps(T)`: minimum value of the regularization parameter; +- `η1::T = √√eps(T)`: very successful iteration threshold; +- `η2::T = T(0.9)`: successful iteration threshold; +- `ν::T = eps(T)^(1 / 5)`: multiplicative inverse of the regularization parameter: ν = 1/σ; +- `γ::T = T(3)`: regularization parameter multiplier, σ := σ/γ when the iteration is very successful and σ := σγ when the iteration is unsuccessful. + +The algorithm stops either when `√(ξₖ/νₖ) < atol + rtol*√(ξ₀/ν₀) ` or `ξₖ < 0` and `√(-ξₖ/νₖ) < neg_tol` where ξₖ := f(xₖ) + h(xₖ) - φ(sₖ; xₖ) - ψ(sₖ; xₖ), and √(ξₖ/νₖ) is a stationarity measure. + +# Output +The value returned is a `GenericExecutionStats`, see `SolverCore.jl`. + +# Callback +The callback is called at each iteration. +The expected signature of the callback is `callback(nlp, solver, stats)`, and its output is ignored. +Changing any of the input arguments will affect the subsequent iterations. +In particular, setting `stats.status = :user` will stop the algorithm. +All relevant information should be available in `nlp` and `solver`. +Notably, you can access, and modify, the following: +- `solver.xk`: current iterate; +- `solver.∇fk`: current gradient; +- `stats`: structure holding the output of the algorithm (`GenericExecutionStats`), which contains, among other things: + - `stats.iter`: current iteration counter; + - `stats.objective`: current objective function value; + - `stats.solver_specific[:smooth_obj]`: current value of the smooth part of the objective function + - `stats.solver_specific[:nonsmooth_obj]`: current value of the nonsmooth part of the objective function + - `stats.status`: current status of the algorithm. Should be `:unknown` unless the algorithm has attained a stopping criterion. Changing this to anything will stop the algorithm, but you should use `:user` to properly indicate the intention. + - `stats.elapsed_time`: elapsed time in seconds. """ -function R2(nlp::AbstractNLPModel, args...; kwargs...) +function R2( + nlp::AbstractNLPModel{R, V}, + h, + options::ROSolverOptions{R}; + kwargs...) where{ R <: Real, V } kwargs_dict = Dict(kwargs...) + selected = pop!(kwargs_dict, :selected, 1:nlp.meta.nvar) x0 = pop!(kwargs_dict, :x0, nlp.meta.x0) - xk, k, outdict = R2( - x -> obj(nlp, x), - (g, x) -> grad!(nlp, x, g), - args..., - x0, - nlp.meta.lvar, - nlp.meta.uvar; - kwargs_dict..., + reg_nlp = RegularizedNLPModel(nlp, h, selected) + return R2( + reg_nlp, + x = x0, + atol = options.ϵa, + rtol = options.ϵr, + neg_tol = options.neg_tol, + verbose = options.verbose, + max_iter = options.maxIter, + max_time = options.maxTime, + σmin = options.σmin, + η1 = options.η1, + η2 = options.η2, + ν = options.ν, + γ = options.γ, ) - ξ = outdict[:ξ] - stats = GenericExecutionStats(nlp) - set_status!(stats, outdict[:status]) - set_solution!(stats, xk) - set_objective!(stats, outdict[:fk] + outdict[:hk]) - set_residuals!(stats, zero(eltype(xk)), ξ) - set_iter!(stats, k) - set_time!(stats, outdict[:elapsed_time]) - set_solver_specific!(stats, :Fhist, outdict[:Fhist]) - set_solver_specific!(stats, :Hhist, outdict[:Hhist]) - set_solver_specific!(stats, :NonSmooth, outdict[:NonSmooth]) - set_solver_specific!(stats, :SubsolverCounter, outdict[:Chist]) return stats end -# method without bounds function R2( f::F, ∇f!::G, @@ -137,23 +210,35 @@ function R2( selected::AbstractVector{<:Integer} = 1:length(x0), kwargs..., ) where {F <: Function, G <: Function, H, R <: Real} - start_time = time() - elapsed_time = 0.0 - solver = R2Solver(x0, options, similar(x0, 0), similar(x0, 0)) - k, status, fk, hk, ξ = R2!(solver, f, ∇f!, h, options, x0; selected = selected) - elapsed_time = time() - start_time - outdict = Dict( - :Fhist => solver.Fobj_hist[1:k], - :Hhist => solver.Hobj_hist[1:k], - :Chist => solver.Complex_hist[1:k], + nlp = FirstOrderModel(f,∇f!,x0) + reg_nlp = RegularizedNLPModel(nlp,h,selected) + stats = R2( + reg_nlp, + x=x0, + atol = options.ϵa, + rtol = options.ϵr, + neg_tol = options.neg_tol, + verbose = options.verbose, + max_iter = options.maxIter, + max_time = options.maxTime, + σmin = options.σmin, + η1 = options.η1, + η2 = options.η2, + ν = options.ν, + γ = options.γ, +) +outdict = Dict( + :Fhist => stats.solver_specific[:Fhist], + :Hhist => stats.solver_specific[:Hhist], + :Chist => stats.solver_specific[:SubsolverCounter], :NonSmooth => h, - :status => status, - :fk => fk, - :hk => hk, - :ξ => ξ, - :elapsed_time => elapsed_time, + :status => stats.status, + :fk => stats.solver_specific[:smooth_obj], + :hk => stats.solver_specific[:nonsmooth_obj], + :ξ => stats.solver_specific[:xi], + :elapsed_time => stats.elapsed_time, ) - return solver.xk, k, outdict +return stats.solution,stats.iter,outdict end function R2( @@ -167,53 +252,97 @@ function R2( selected::AbstractVector{<:Integer} = 1:length(x0), kwargs..., ) where {F <: Function, G <: Function, H, R <: Real} - start_time = time() - elapsed_time = 0.0 - solver = R2Solver(x0, options, l_bound, u_bound) - k, status, fk, hk, ξ = R2!(solver, f, ∇f!, h, options, x0; selected = selected) - elapsed_time = time() - start_time + nlp = FirstOrderModel(f,∇f!,x0,lcon = l_bound, ucon = u_bound) + reg_nlp = RegularizedNLPModel(nlp,h,selected) + stats = R2( + reg_nlp, + x=x0, + atol = options.ϵa, + rtol = options.ϵr, + neg_tol = options.neg_tol, + verbose = options.verbose, + max_iter = options.maxIter, + max_time = options.maxTime, + σmin = options.σmin, + η1 = options.η1, + η2 = options.η2, + ν = options.ν, + γ = options.γ, + ) outdict = Dict( - :Fhist => solver.Fobj_hist[1:k], - :Hhist => solver.Hobj_hist[1:k], - :Chist => solver.Complex_hist[1:k], + :Fhist => stats.solver_specific[:Fhist], + :Hhist => stats.solver_specific[:Hhist], + :Chist => stats.solver_specific[:SubsolverCounter], :NonSmooth => h, - :status => status, - :fk => fk, - :hk => hk, - :ξ => ξ, - :elapsed_time => elapsed_time, + :status => stats.status, + :fk => stats.solver_specific[:smooth_obj], + :hk => stats.solver_specific[:nonsmooth_obj], + :ξ => stats.solver_specific[:xi], + :elapsed_time => stats.elapsed_time, ) - return solver.xk, k, outdict + return stats.solution,stats.iter,outdict end -function R2!( - solver::R2Solver{R}, - f::F, - ∇f!::G, - h::H, - options::ROSolverOptions{R}, - x0::AbstractVector{R}; - selected::AbstractVector{<:Integer} = 1:length(x0), -) where {F <: Function, G <: Function, H, R <: Real} - start_time = time() - elapsed_time = 0.0 - ϵ = options.ϵa - ϵr = options.ϵr - neg_tol = options.neg_tol - verbose = options.verbose - maxIter = options.maxIter - maxTime = options.maxTime - σmin = options.σmin - η1 = options.η1 - η2 = options.η2 - ν = options.ν - γ = options.γ - - # retrieve workspace - xk = solver.xk - xk .= x0 + +function R2(reg_nlp::AbstractRegularizedNLPModel; kwargs...) + kwargs_dict = Dict(kwargs...) + max_iter = pop!(kwargs_dict, :max_iter, 10000) + solver = R2Solver(reg_nlp,max_iter = max_iter) + stats = GenericExecutionStats(reg_nlp.model) + cb = (nlp, solver, stats) -> begin + solver.Fobj_hist[stats.iter+1] = stats.solver_specific[:smooth_obj] + solver.Hobj_hist[stats.iter+1] = stats.solver_specific[:nonsmooth_obj] + solver.Complex_hist[stats.iter+1] += 1 + end + solve!( + solver, + reg_nlp, + stats; + callback = cb, + max_iter = max_iter, + kwargs... + ) + set_solver_specific!(stats, :Fhist, solver.Fobj_hist[1:stats.iter+1]) + set_solver_specific!(stats, :Hhist, solver.Hobj_hist[1:stats.iter+1]) + set_solver_specific!(stats, :SubsolverCounter, solver.Complex_hist[1:stats.iter+1]) + return stats +end + +function SolverCore.solve!( + solver::R2Solver{T}, + reg_nlp::AbstractRegularizedNLPModel{T, V}, + stats::GenericExecutionStats{T, V}; + callback = (args...) -> nothing, + x::V = reg_nlp.model.meta.x0, + atol::T = √eps(T), + rtol::T = √eps(T), + neg_tol::T = eps(T)^(1 / 4), + verbose::Int = 0, + max_iter::Int = 10000, + max_time::Float64 = 30.0, + max_eval::Int = -1, + σmin::T = eps(T), + η1::T = √√eps(T), + η2::T = T(0.9), + ν::T = eps(T)^(1 / 5), + γ::T = T(3), + ) where {T, V} + + reset!(stats) + + # Retrieve workspace + selected = reg_nlp.selected + h = reg_nlp.h + nlp = reg_nlp.model + + xk = solver.xk .= x + + # Make sure ψ has the correct shift + shift!(solver.ψ,xk) + ∇fk = solver.∇fk mν∇fk = solver.mν∇fk + ψ = solver.ψ xkn = solver.xkn s = solver.s has_bnds = solver.has_bnds @@ -223,103 +352,101 @@ function R2!( l_bound_m_x = solver.l_bound_m_x u_bound_m_x = solver.u_bound_m_x end - Fobj_hist = solver.Fobj_hist - Hobj_hist = solver.Hobj_hist - Complex_hist = solver.Complex_hist - - if verbose == 0 - ptf = Inf - elseif verbose == 1 - ptf = round(maxIter / 10) - elseif verbose == 2 - ptf = round(maxIter / 100) - else - ptf = 1 - end # initialize parameters + improper = false hk = @views h(xk[selected]) if hk == Inf verbose > 0 && @info "R2: finding initial guess where nonsmooth term is finite" - prox!(xk, h, x0, one(eltype(x0))) + prox!(xk, h, xk, one(eltype(x0))) hk = @views h(xk[selected]) hk < Inf || error("prox computation must be erroneous") verbose > 0 && @debug "R2: found point where h has value" hk end - hk == -Inf && error("nonsmooth term is not proper") - - if has_bnds - @. l_bound_m_x = l_bound - xk - @. u_bound_m_x = u_bound - xk - ψ = shifted(h, xk, l_bound_m_x, u_bound_m_x, selected) - else - ψ = shifted(h, xk) - end + improper = (hk == -Inf) if verbose > 0 - #! format: off - @info @sprintf "%6s %8s %8s %7s %8s %7s %7s %7s %1s" "iter" "f(x)" "h(x)" "√(ξ/ν)" "ρ" "σ" "‖x‖" "‖s‖" "" - #! format: off + @info log_header( + [:iter, :fx, :hx, :xi, :ρ, :σ, :normx, :norms, :arrow], + [Int, Float64, Float64, Float64, Float64, Float64, Float64, Float64, Char], + hdr_override = Dict{Symbol,String}( # TODO: Add this as constant dict elsewhere + :iter => "iter", + :fx => "f(x)", + :hx => "h(x)", + :xi => "√(ξ/ν)", + :ρ => "ρ", + :σ => "σ", + :normx => "‖x‖", + :norms => "‖s‖", + :arrow => " " + ), + colsep = 1, + ) end - local ξ::R - k = 0 + local ξ::T σk = max(1 / ν, σmin) ν = 1 / σk - sqrt_ξ_νInv = one(R) + sqrt_ξ_νInv = one(T) - fk = f(xk) - ∇f!(∇fk, xk) + fk = obj(nlp, xk) + grad!(nlp, xk, ∇fk) @. mν∇fk = -ν * ∇fk - optimal = false - tired = maxIter > 0 && k ≥ maxIter || elapsed_time > maxTime - - while !(optimal || tired) - k = k + 1 - elapsed_time = time() - start_time - Fobj_hist[k] = fk - Hobj_hist[k] = hk + set_iter!(stats, 0) + start_time = time() + set_time!(stats, 0.0) + set_objective!(stats,fk + hk) + set_solver_specific!(stats,:smooth_obj,fk) + set_solver_specific!(stats,:nonsmooth_obj, hk) + + φk(d) = dot(∇fk, d) + mk(d)::T = φk(d) + ψ(d)::T + + prox!(s, ψ, mν∇fk, ν) + mks = mk(s) + + ξ = hk - mks + max(1, abs(hk)) * 10 * eps() + + sqrt_ξ_νInv = ξ ≥ 0 ? sqrt(ξ / ν) : sqrt(-ξ / ν) + atol += rtol * sqrt_ξ_νInv # make stopping test absolute and relative + + solved = (ξ < 0 && sqrt_ξ_νInv ≤ neg_tol) || (ξ ≥ 0 && sqrt_ξ_νInv ≤ atol) + (ξ < 0 && sqrt_ξ_νInv > neg_tol) && error("R2: prox-gradient step should produce a decrease but ξ = $(ξ)") + + set_solver_specific!(stats,:xi,sqrt_ξ_νInv) + set_status!( + stats, + get_status( + reg_nlp, + elapsed_time = stats.elapsed_time, + iter = stats.iter, + optimal = solved, + improper = improper, + max_eval = max_eval, + max_time = max_time, + max_iter = max_iter + ), + ) + + callback(nlp, solver, stats) - # define model - φk(d) = dot(∇fk, d) - mk(d)::R = φk(d) + ψ(d)::R + done = stats.status != :unknown - prox!(s, ψ, mν∇fk, ν) - Complex_hist[k] += 1 - mks = mk(s) - ξ = hk - mks + max(1, abs(hk)) * 10 * eps() - sqrt_ξ_νInv = ξ ≥ 0 ? sqrt(ξ / ν) : sqrt(-ξ / ν) - - if ξ ≥ 0 && k == 1 - ϵ += ϵr * sqrt_ξ_νInv # make stopping test absolute and relative - end + while !done - if (ξ < 0 && sqrt_ξ_νInv ≤ neg_tol) || (ξ ≥ 0 && sqrt_ξ_νInv ≤ ϵ) - optimal = true - continue - end - - ξ > 0 || error("R2: prox-gradient step should produce a decrease but ξ = $(ξ)") + # Update xk, sigma_k xkn .= xk .+ s - fkn = f(xkn) + fkn = obj(nlp, xkn) hkn = @views h(xkn[selected]) - hkn == -Inf && error("nonsmooth term is not proper") + improper = (hkn == -Inf) Δobj = (fk + hk) - (fkn + hkn) + max(1, abs(fk + hk)) * 10 * eps() - ρk = Δobj / ξ - - if (verbose > 0) && (k % ptf == 0) - #! format: off - σ_stat = (η2 ≤ ρk < Inf) ? "↘" : (ρk < η1 ? "↗" : "=") - @info @sprintf "%6d %8.1e %8.1e %7.1e %8.1e %7.1e %7.1e %7.1e %1s" k fk hk sqrt_ξ_νInv ρk σk norm(xk) norm(s) σ_stat - - #! format: on - end + global ρk = Δobj / ξ - if η2 ≤ ρk < Inf - σk = max(σk / γ, σmin) - end + verbose > 0 && + stats.iter % verbose == 0 && + @info log_row(Any[stats.iter, fk, hk, sqrt_ξ_νInv, ρk, σk, norm(xk), norm(s), (η2 ≤ ρk < Inf) ? "↘" : (ρk < η1 ? "↗" : "=")], colsep = 1) if η1 ≤ ρk < Inf xk .= xkn @@ -330,41 +457,84 @@ function R2!( end fk = fkn hk = hkn - ∇f!(∇fk, xk) + grad!(nlp, xk, ∇fk) shift!(ψ, xk) end + if η2 ≤ ρk < Inf + σk = max(σk / γ, σmin) + end if ρk < η1 || ρk == Inf σk = σk * γ end ν = 1 / σk - tired = maxIter > 0 && k ≥ maxIter - if !tired - @. mν∇fk = -ν * ∇fk - end + @. mν∇fk = -ν * ∇fk + + set_objective!(stats, fk + hk) + set_solver_specific!(stats,:smooth_obj,fk) + set_solver_specific!(stats,:nonsmooth_obj, hk) + set_iter!(stats, stats.iter + 1) + set_time!(stats, time() - start_time) + + prox!(s, ψ, mν∇fk, ν) + mks = mk(s) + + ξ = hk - mks + max(1, abs(hk)) * 10 * eps() + sqrt_ξ_νInv = ξ ≥ 0 ? sqrt(ξ / ν) : sqrt(-ξ / ν) + solved = (ξ < 0 && sqrt_ξ_νInv ≤ neg_tol) || (ξ ≥ 0 && sqrt_ξ_νInv ≤ atol) + (ξ < 0 && sqrt_ξ_νInv > neg_tol) && error("R2: prox-gradient step should produce a decrease but ξ = $(ξ)") + + set_solver_specific!(stats,:xi,sqrt_ξ_νInv) + set_status!( + stats, + get_status( + reg_nlp, + elapsed_time = stats.elapsed_time, + iter = stats.iter, + optimal = solved, + improper = improper, + max_eval = max_eval, + max_time = max_time, + max_iter = max_iter + ), + ) + + callback(nlp, solver, stats) + + done = stats.status != :unknown end - if verbose > 0 - if k == 1 - @info @sprintf "%6d %8.1e %8.1e" k fk hk - elseif optimal - #! format: off - @info @sprintf "%6d %8.1e %8.1e %7.1e %8s %7.1e %7.1e %7.1e" k fk hk sqrt(ξ/ν) "" σk norm(xk) norm(s) - #! format: on - @info "R2: terminating with √(ξ/ν) = $(sqrt_ξ_νInv)" - end + if verbose > 0 && stats.status == :first_order + @info log_row(Any[stats.iter, fk, hk, sqrt_ξ_νInv, ρk, σk, norm(xk), norm(s), (η2 ≤ ρk < Inf) ? "↘" : (ρk < η1 ? "↗" : "=")], colsep = 1) + @info "R2: terminating with √(ξ/ν) = $(sqrt_ξ_νInv)" end - status = if optimal + set_solution!(stats,xk) + return stats +end + +function get_status( + reg_nlp::M; + elapsed_time = 0.0, + iter = 0, + optimal = false, + improper = false, + max_eval = Inf, + max_time = Inf, + max_iter = Inf, +) where{ M <: AbstractRegularizedNLPModel } + if optimal :first_order - elseif elapsed_time > maxTime - :max_time - elseif tired + elseif improper + :improper + elseif iter > max_iter :max_iter + elseif elapsed_time > max_time + :max_time + elseif neval_obj(reg_nlp.model) > max_eval && max_eval > -1 + :max_eval else - :exception + :unknown end - - return k, status, fk, hk, sqrt_ξ_νInv -end +end \ No newline at end of file diff --git a/src/RegularizedOptimization.jl b/src/RegularizedOptimization.jl index 458a8cce..e92f21da 100644 --- a/src/RegularizedOptimization.jl +++ b/src/RegularizedOptimization.jl @@ -7,7 +7,7 @@ using LinearAlgebra, Logging, Printf using ProximalOperators, TSVD # dependencies from us -using LinearOperators, NLPModels, NLPModelsModifiers, ShiftedProximalOperators, SolverCore +using LinearOperators, NLPModels, NLPModelsModifiers, RegularizedProblems, ShiftedProximalOperators, SolverCore include("utils.jl") include("input_struct.jl")