diff --git a/.github/workflows/breakage.yml b/.github/workflows/breakage.yml index 4ecb3f1e..34b33ff7 100644 --- a/.github/workflows/breakage.yml +++ b/.github/workflows/breakage.yml @@ -10,7 +10,7 @@ jobs: strategy: fail-fast: false matrix: - pkg: [AmplNLReader, CUTEst, CaNNOLeS, NLPModelsIpopt, NLPModelsJuMP, QuadraticModels, SolverTools] + pkg: [NLPModelsModifiers, NLPModelsTest, AmplNLReader, CaNNOLeS, CUTEst, NLPModelsJuMP, QuadraticModels, SolverTools] pkgversion: ["master", "stable"] steps: - uses: actions/checkout@v2 diff --git a/LICENSE.md b/LICENSE.md index 230ba387..f72e45d3 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,4 +1,4 @@ -Copyright (c) 2015-2019: Dominique Orban and Abel Soares Siqueira +Copyright (c) 2015-2021: Dominique Orban and Abel Soares Siqueira NLPModels.jl is licensed under the [MPL version 2.0](https://www.mozilla.org/MPL/2.0/). diff --git a/Project.toml b/Project.toml index 9725a4f2..949d3626 100644 --- a/Project.toml +++ b/Project.toml @@ -4,15 +4,18 @@ version = "0.13.2" [deps] FastClosures = "9aa1b823-49e4-5ca5-8b0f-3971ec8bab6a" -ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" LinearOperators = "5c8ed15e-5a4c-59e4-a42b-c7e8811fb125" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] FastClosures = "0.2.1, 0.3.0" -ForwardDiff = "0.9.0, 0.10.0" LinearOperators = "1.1.0" julia = "^1.3.0" + +[extras] +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[targets] +test = ["Test"] diff --git a/README.md b/README.md index 8031f040..5d1d9f46 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,6 @@ This package provides general guidelines to represent optimization problems in Julia and a standardized API to evaluate the functions and their derivatives. The main objective is to be able to rely on that API when designing optimization solvers in Julia. - ## How to Cite If you use NLPModels.jl in your work, please cite using the format given in [CITATION.bib](https://github.com/JuliaSmoothOptimizers/NLPModels.jl/blob/master/CITATION.bib). @@ -10,7 +9,7 @@ If you use NLPModels.jl in your work, please cite using the format given in [CIT [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.2558627.svg)](https://doi.org/10.5281/zenodo.2558627) [![GitHub release](https://img.shields.io/github/release/JuliaSmoothOptimizers/NLPModels.jl.svg)](https://github.com/JuliaSmoothOptimizers/NLPModels.jl/releases/latest) [![](https://img.shields.io/badge/docs-stable-3f51b5.svg)](https://JuliaSmoothOptimizers.github.io/NLPModels.jl/stable) -[![](https://img.shields.io/badge/docs-latest-3f51b5.svg)](https://JuliaSmoothOptimizers.github.io/NLPModels.jl/latest) +[![](https://img.shields.io/badge/docs-latest-3f51b5.svg)](https://JuliaSmoothOptimizers.github.io/NLPModels.jl/dev) [![codecov](https://codecov.io/gh/JuliaSmoothOptimizers/NLPModels.jl/branch/master/graph/badge.svg)](https://codecov.io/gh/JuliaSmoothOptimizers/NLPModels.jl) ![CI](https://github.com/JuliaSmoothOptimizers/NLPModels.jl/workflows/CI/badge.svg?branch=master) @@ -24,8 +23,8 @@ Such instances are composed of * other data specific to the provenance of the problem. See the -[documentation](https://JuliaSmoothOptimizers.github.io/NLPModels.jl/latest) for -details on the models, a tutorial and the API. +[documentation](https://JuliaSmoothOptimizers.github.io/NLPModels.jl/dev) for +details on the models and the API. ## Installation @@ -33,17 +32,11 @@ details on the models, a tutorial and the API. pkg> add NLPModels ``` -## External models +## Models -In addition to the models available in this package, there are some external models -for specific needs: +This package provides no models, although it allows the definition of manually written models. -- [AmplNLReader.jl](https://github.com/JuliaSmoothOptimizers/AmplNLReader.jl): Interface - for [AMPL](http://www.ampl.com/); -- [CUTEst.jl](https://github.com/JuliaSmoothOptimizers/CUTEst.jl): Interface for CUTEst - problems; -- [NLPModelsJuMP.jl](https://github.com/JuliaSmoothOptimizers/NLPModelsJuMP.jl): - Converts MathOptInterface/JuMP models to and from NLPModels. +Check the list of packages that define models in [this page of the docs](https://JuliaSmoothOptimizers.github.io/NLPModels.jl/dev/models) ## Main Methods diff --git a/src/NLPModels.jl b/src/NLPModels.jl index 80163d0c..7f2ca6b7 100644 --- a/src/NLPModels.jl +++ b/src/NLPModels.jl @@ -1,19 +1,36 @@ module NLPModels -using LinearAlgebra, LinearOperators, Printf, SparseArrays, FastClosures +# stdlib +using LinearAlgebra, Printf, SparseArrays +# external +using FastClosures +# JSO +using LinearOperators -include("core/core.jl") +export AbstractNLPModel, AbstractNLSModel -include("autodiff_model.jl") -include("autodiff_nlsmodel.jl") -include("feasibility_form_nls.jl") -include("feasibility_residual.jl") -include("lls_model.jl") -include("qn_model.jl") -include("slack_model.jl") +# For documentation purpose +const OBJECTIVE_HESSIAN = raw""" +```math +σ ∇²f(x), +``` +with `σ = obj_weight` +""" +const LAGRANGIAN_HESSIAN = raw""" +```math +∇²L(x,y) = σ ∇²f(x) + \sum_i yᵢ ∇²cᵢ(x), +``` +with `σ = obj_weight` +""" -include("model-interaction.jl") +# Base type for an optimization model. +abstract type AbstractNLPModel end -include("dercheck.jl") +abstract type AbstractNLSModel <: AbstractNLPModel end + +for f in ["utils", "api", "counters", "meta", "show", "tools", ] + include("nlp/$f.jl") + include("nls/$f.jl") +end end # module diff --git a/src/autodiff_model.jl b/src/autodiff_model.jl deleted file mode 100644 index cdaffb8b..00000000 --- a/src/autodiff_model.jl +++ /dev/null @@ -1,251 +0,0 @@ -using ForwardDiff - -export ADNLPModel - -mutable struct ADNLPModel <: AbstractNLPModel - meta :: NLPModelMeta - - counters :: Counters - - # Functions - f - c -end - -show_header(io :: IO, nlp :: ADNLPModel) = println(io, "ADNLPModel - Model with automatic differentiation") - -""" - ADNLPModel(f, x0) - ADNLPModel(f, x0, lvar, uvar) - ADNLPModel(f, x0, c, lcon, ucon) - ADNLPModel(f, x0, lvar, uvar, c, lcon, ucon) - -ADNLPModel is an AbstractNLPModel using ForwardDiff to compute the derivatives. -The problem is defined as - - min f(x) - s.to lcon ≤ c(x) ≤ ucon - lvar ≤ x ≤ uvar. - -The following keyword arguments are available to all constructors: - -- `name`: The name of the model (default: "Generic") - -The following keyword arguments are available to the constructors for constrained problems: - -- `lin`: An array of indexes of the linear constraints (default: `Int[]`) -- `y0`: An inital estimate to the Lagrangian multipliers (default: zeros) -""" -function ADNLPModel end - -function ADNLPModel(f, x0::AbstractVector{T}; name::String = "Generic") where T - nvar = length(x0) - @lencheck nvar x0 - - nnzh = nvar * (nvar + 1) / 2 - - meta = NLPModelMeta(nvar, x0=x0, nnzh=nnzh, minimize=true, islp=false, name=name) - - return ADNLPModel(meta, Counters(), f, x->T[]) -end - -function ADNLPModel(f, x0::AbstractVector{T}, lvar::AbstractVector, uvar::AbstractVector; - name::String = "Generic") where T - nvar = length(x0) - @lencheck nvar x0 lvar uvar - - nnzh = nvar * (nvar + 1) / 2 - - meta = NLPModelMeta(nvar, x0=x0, lvar=lvar, uvar=uvar, nnzh=nnzh, minimize=true, islp=false, name=name) - - return ADNLPModel(meta, Counters(), f, x->T[]) -end - -function ADNLPModel(f, x0::AbstractVector{T}, c, lcon::AbstractVector, ucon::AbstractVector; - y0::AbstractVector=fill!(similar(lcon), zero(T)), - name::String = "Generic", lin::AbstractVector{<: Integer}=Int[]) where T - - nvar = length(x0) - ncon = length(lcon) - @lencheck nvar x0 - @lencheck ncon ucon y0 - - nnzh = nvar * (nvar + 1) / 2 - nnzj = nvar * ncon - - nln = setdiff(1:ncon, lin) - - meta = NLPModelMeta(nvar, x0=x0, ncon=ncon, y0=y0, lcon=lcon, ucon=ucon, - nnzj=nnzj, nnzh=nnzh, lin=lin, nln=nln, minimize=true, islp=false, name=name) - - return ADNLPModel(meta, Counters(), f, c) -end - -function ADNLPModel(f, x0::AbstractVector{T}, lvar::AbstractVector, uvar::AbstractVector, - c, lcon::AbstractVector, ucon::AbstractVector; - y0::AbstractVector=fill!(similar(lcon), zero(T)), - name::String = "Generic", lin::AbstractVector{<: Integer}=Int[]) where T - - nvar = length(x0) - ncon = length(lcon) - @lencheck nvar x0 lvar uvar - @lencheck ncon y0 ucon - - nnzh = nvar * (nvar + 1) / 2 - nnzj = nvar * ncon - - nln = setdiff(1:ncon, lin) - - meta = NLPModelMeta(nvar, x0=x0, lvar=lvar, uvar=uvar, ncon=ncon, y0=y0, - lcon=lcon, ucon=ucon, nnzj=nnzj, nnzh=nnzh, lin=lin, nln=nln, minimize=true, - islp=false, name=name) - - return ADNLPModel(meta, Counters(), f, c) -end - -function obj(nlp :: ADNLPModel, x :: AbstractVector) - @lencheck nlp.meta.nvar x - increment!(nlp, :neval_obj) - return nlp.f(x) -end - -function grad!(nlp :: ADNLPModel, x :: AbstractVector, g :: AbstractVector) - @lencheck nlp.meta.nvar x g - increment!(nlp, :neval_grad) - ForwardDiff.gradient!(g, nlp.f, x) - return g -end - -function cons!(nlp :: ADNLPModel, x :: AbstractVector, c :: AbstractVector) - @lencheck nlp.meta.nvar x - @lencheck nlp.meta.ncon c - increment!(nlp, :neval_cons) - c .= nlp.c(x) - return c -end - -function jac(nlp :: ADNLPModel, x :: AbstractVector) - @lencheck nlp.meta.nvar x - increment!(nlp, :neval_jac) - return ForwardDiff.jacobian(nlp.c, x) -end - -function jac_structure!(nlp :: ADNLPModel, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck nlp.meta.nnzj rows cols - m, n = nlp.meta.ncon, nlp.meta.nvar - I = ((i,j) for i = 1:m, j = 1:n) - rows .= getindex.(I, 1)[:] - cols .= getindex.(I, 2)[:] - return rows, cols -end - -function jac_coord!(nlp :: ADNLPModel, x :: AbstractVector, vals :: AbstractVector) - @lencheck nlp.meta.nvar x - @lencheck nlp.meta.nnzj vals - increment!(nlp, :neval_jac) - Jx = ForwardDiff.jacobian(nlp.c, x) - vals .= Jx[:] - return vals -end - -function jprod!(nlp :: ADNLPModel, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck nlp.meta.nvar x v - @lencheck nlp.meta.ncon Jv - increment!(nlp, :neval_jprod) - Jv .= ForwardDiff.derivative(t -> nlp.c(x + t * v), 0) - return Jv -end - -function jtprod!(nlp :: ADNLPModel, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck nlp.meta.nvar x Jtv - @lencheck nlp.meta.ncon v - increment!(nlp, :neval_jtprod) - Jtv .= ForwardDiff.gradient(x -> dot(nlp.c(x), v), x) - return Jtv -end - -function hess(nlp :: ADNLPModel, x :: AbstractVector; obj_weight :: Real = one(eltype(x))) - @lencheck nlp.meta.nvar x - increment!(nlp, :neval_hess) - ℓ(x) = obj_weight * nlp.f(x) - Hx = ForwardDiff.hessian(ℓ, x) - return tril(Hx) -end - -function hess(nlp :: ADNLPModel, x :: AbstractVector, y :: AbstractVector; obj_weight :: Real = one(eltype(x))) - @lencheck nlp.meta.nvar x - @lencheck nlp.meta.ncon y - increment!(nlp, :neval_hess) - ℓ(x) = obj_weight * nlp.f(x) + dot(nlp.c(x), y) - Hx = ForwardDiff.hessian(ℓ, x) - return tril(Hx) -end - -function hess_structure!(nlp :: ADNLPModel, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - n = nlp.meta.nvar - @lencheck nlp.meta.nnzh rows cols - I = ((i,j) for i = 1:n, j = 1:n if i ≥ j) - rows .= getindex.(I, 1) - cols .= getindex.(I, 2) - return rows, cols -end - -function hess_coord!(nlp :: ADNLPModel, x :: AbstractVector, vals :: AbstractVector; obj_weight :: Real = one(eltype(x))) - @lencheck nlp.meta.nvar x - @lencheck nlp.meta.nnzh vals - increment!(nlp, :neval_hess) - ℓ(x) = obj_weight * nlp.f(x) - Hx = ForwardDiff.hessian(ℓ, x) - k = 1 - for j = 1 : nlp.meta.nvar - for i = j : nlp.meta.nvar - vals[k] = Hx[i, j] - k += 1 - end - end - return vals -end - -function hess_coord!(nlp :: ADNLPModel, x :: AbstractVector, y :: AbstractVector, vals :: AbstractVector; obj_weight :: Real = one(eltype(x))) - @lencheck nlp.meta.nvar x - @lencheck nlp.meta.ncon y - @lencheck nlp.meta.nnzh vals - increment!(nlp, :neval_hess) - ℓ(x) = obj_weight * nlp.f(x) + dot(nlp.c(x), y) - Hx = ForwardDiff.hessian(ℓ, x) - k = 1 - for j = 1 : nlp.meta.nvar - for i = j : nlp.meta.nvar - vals[k] = Hx[i, j] - k += 1 - end - end - return vals -end - -function hprod!(nlp :: ADNLPModel, x :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; obj_weight :: Real = one(eltype(x))) - n = nlp.meta.nvar - @lencheck n x v Hv - increment!(nlp, :neval_hprod) - ℓ(x) = obj_weight * nlp.f(x) - Hv .= ForwardDiff.derivative(t -> ForwardDiff.gradient(ℓ, x + t * v), 0) - return Hv -end - -function hprod!(nlp :: ADNLPModel, x :: AbstractVector, y :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; obj_weight :: Real = one(eltype(x))) - n = nlp.meta.nvar - @lencheck n x v Hv - @lencheck nlp.meta.ncon y - increment!(nlp, :neval_hprod) - ℓ(x) = obj_weight * nlp.f(x) + dot(nlp.c(x), y) - Hv .= ForwardDiff.derivative(t -> ForwardDiff.gradient(ℓ, x + t * v), 0) - return Hv -end - -function ghjvprod!(nlp :: ADNLPModel, x :: AbstractVector, g :: AbstractVector, v :: AbstractVector, gHv :: AbstractVector) - @lencheck nlp.meta.nvar x g v - @lencheck nlp.meta.ncon gHv - increment!(nlp, :neval_hprod) - gHv .= ForwardDiff.derivative(t -> ForwardDiff.derivative(s -> nlp.c(x + s * g + t * v), 0), 0) - return gHv -end diff --git a/src/autodiff_nlsmodel.jl b/src/autodiff_nlsmodel.jl deleted file mode 100644 index 33746209..00000000 --- a/src/autodiff_nlsmodel.jl +++ /dev/null @@ -1,342 +0,0 @@ -using ForwardDiff - -export ADNLSModel - -mutable struct ADNLSModel <: AbstractNLSModel - meta :: NLPModelMeta - nls_meta :: NLSMeta - counters :: NLSCounters - - # Function - F - c -end - -show_header(io :: IO, nls :: ADNLSModel) = println(io, "ADNLSModel - Nonlinear least-squares model with automatic differentiation") - -""" - ADNLSModel(F, x0, nequ) - ADNLSModel(F, x0, nequ, lvar, uvar) - ADNLSModel(F, x0, nequ, c, lcon, ucon) - ADNLSModel(F, x0, nequ, lvar, uvar, c, lcon, ucon) - -ADNLSModel is an Nonlinear Least Squares model using ForwardDiff to -compute the derivatives. -The problem is defined as - - min ½‖F(x)‖² - s.to lcon ≤ c(x) ≤ ucon - lvar ≤ x ≤ uvar - -The following keyword arguments are available to all constructors: - -- `linequ`: An array of indexes of the linear equations (default: `Int[]`) -- `name`: The name of the model (default: "Generic") - -The following keyword arguments are available to the constructors for constrained problems: - -- `lin`: An array of indexes of the linear constraints (default: `Int[]`) -- `y0`: An inital estimate to the Lagrangian multipliers (default: zeros) -""" -function ADNLSModel end - -function ADNLSModel(F, x0 :: AbstractVector{T}, nequ :: Integer; - linequ :: AbstractVector{<: Integer} = Int[], - name :: String = "Generic", - ) where T - - nvar = length(x0) - - meta = NLPModelMeta(nvar, x0=x0, name=name) - nlnequ = setdiff(1:nequ, linequ) - nls_meta = NLSMeta(nequ, nvar, nnzj=nequ * nvar, nnzh=div(nvar * (nvar + 1), 2), lin=linequ, nln=nlnequ) - - return ADNLSModel(meta, nls_meta, NLSCounters(), F, x->T[]) -end - -function ADNLSModel(F, x0 :: AbstractVector{T}, nequ :: Integer, - lvar :: AbstractVector, uvar :: AbstractVector; - linequ :: AbstractVector{<: Integer} = Int[], - name :: String = "Generic", - ) where T - - nvar = length(x0) - @lencheck nvar lvar uvar - - meta = NLPModelMeta(nvar, x0=x0, lvar=lvar, uvar=uvar, name=name) - nlnequ = setdiff(1:nequ, linequ) - nls_meta = NLSMeta(nequ, nvar, nnzj=nequ * nvar, nnzh=div(nvar * (nvar + 1), 2), lin=linequ, nln=nlnequ) - - return ADNLSModel(meta, nls_meta, NLSCounters(), F, x->T[]) -end - -function ADNLSModel(F, x0 :: AbstractVector{T}, nequ :: Integer, - c, lcon :: AbstractVector, ucon :: AbstractVector; - y0 :: AbstractVector = fill!(similar(lcon), zero(T)), - lin :: AbstractVector{<: Integer} = Int[], - linequ :: AbstractVector{<: Integer} = Int[], - name :: String = "Generic", - ) where T - - nvar = length(x0) - ncon = length(lcon) - @lencheck ncon ucon y0 - nnzj = nvar * ncon - - nln = setdiff(1:ncon, lin) - meta = NLPModelMeta(nvar, x0=x0, ncon=ncon, y0=y0, lcon=lcon, ucon=ucon, - nnzj=nnzj, name=name, lin=lin, nln=nln) - nlnequ = setdiff(1:nequ, linequ) - nls_meta = NLSMeta(nequ, nvar, nnzj=nequ * nvar, nnzh=div(nvar * (nvar + 1), 2), lin=linequ, nln=nlnequ) - - return ADNLSModel(meta, nls_meta, NLSCounters(), F, c) -end - -function ADNLSModel(F, x0 :: AbstractVector{T}, nequ :: Integer, - lvar :: AbstractVector, uvar :: AbstractVector, - c, lcon :: AbstractVector, ucon :: AbstractVector; - y0 :: AbstractVector = fill!(similar(lcon), zero(T)), - lin :: AbstractVector{<: Integer} = Int[], - linequ :: AbstractVector{<: Integer} = Int[], - name :: String = "Generic", - ) where T - - nvar = length(x0) - ncon = length(lcon) - @lencheck nvar lvar uvar - @lencheck ncon ucon y0 - nnzj = nvar * ncon - - nln = setdiff(1:ncon, lin) - meta = NLPModelMeta(nvar, x0=x0, lvar=lvar, uvar=uvar, ncon=ncon, y0=y0, - lcon=lcon, ucon=ucon, nnzj=nnzj, name=name, lin=lin, nln=nln) - nlnequ = setdiff(1:nequ, linequ) - nls_meta = NLSMeta(nequ, nvar, nnzj=nequ * nvar, nnzh=div(nvar * (nvar + 1), 2), lin=linequ, nln=nlnequ) - - return ADNLSModel(meta, nls_meta, NLSCounters(), F, c) -end - -function residual!(nls :: ADNLSModel, x :: AbstractVector, Fx :: AbstractVector) - @lencheck nls.meta.nvar x - @lencheck nls.nls_meta.nequ Fx - increment!(nls, :neval_residual) - Fx .= nls.F(x) - return Fx -end - -function jac_residual(nls :: ADNLSModel, x :: AbstractVector) - @lencheck nls.meta.nvar x - increment!(nls, :neval_jac_residual) - return ForwardDiff.jacobian(nls.F, x) -end - -function jac_structure_residual!(nls :: ADNLSModel, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck nls.nls_meta.nnzj rows cols - m, n = nls.nls_meta.nequ, nls.meta.nvar - I = ((i,j) for i = 1:m, j = 1:n) - rows .= getindex.(I, 1)[:] - cols .= getindex.(I, 2)[:] - return rows, cols -end - -function jac_coord_residual!(nls :: ADNLSModel, x :: AbstractVector, vals :: AbstractVector) - @lencheck nls.meta.nvar x - @lencheck nls.nls_meta.nnzj vals - increment!(nls, :neval_jac_residual) - Jx = ForwardDiff.jacobian(nls.F, x) - vals .= Jx[:] - return vals -end - -function jprod_residual!(nls :: ADNLSModel, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck nls.meta.nvar x v - @lencheck nls.nls_meta.nequ Jv - increment!(nls, :neval_jprod_residual) - Jv .= ForwardDiff.derivative(t -> nls.F(x + t * v), 0) - return Jv -end - -function jtprod_residual!(nls :: ADNLSModel, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck nls.meta.nvar x Jtv - @lencheck nls.nls_meta.nequ v - increment!(nls, :neval_jtprod_residual) - Jtv .= ForwardDiff.gradient(x -> dot(nls.F(x), v), x) - return Jtv -end - -function hess_residual(nls :: ADNLSModel, x :: AbstractVector, v :: AbstractVector) - @lencheck nls.meta.nvar x - @lencheck nls.nls_meta.nequ v - increment!(nls, :neval_hess_residual) - return tril(ForwardDiff.jacobian(x -> ForwardDiff.jacobian(nls.F, x)' * v, x)) -end - -function hess_structure_residual!(nls :: ADNLSModel, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck nls.nls_meta.nnzh rows cols - n = nls.meta.nvar - I = ((i,j) for i = 1:n, j = 1:n if i ≥ j) - rows .= getindex.(I, 1) - cols .= getindex.(I, 2) - return rows, cols -end - -function hess_coord_residual!(nls :: ADNLSModel, x :: AbstractVector, v :: AbstractVector, vals :: AbstractVector) - @lencheck nls.meta.nvar x - @lencheck nls.nls_meta.nequ v - @lencheck nls.nls_meta.nnzh vals - increment!(nls, :neval_hess_residual) - Hx = ForwardDiff.jacobian(x->ForwardDiff.jacobian(nls.F, x)' * v, x) - k = 1 - for j = 1:nls.meta.nvar - for i = j:nls.meta.nvar - vals[k] = Hx[i,j] - k += 1 - end - end - return vals -end - -function jth_hess_residual(nls :: ADNLSModel, x :: AbstractVector, i :: Int) - @lencheck nls.meta.nvar x - increment!(nls, :neval_jhess_residual) - return tril(ForwardDiff.hessian(x->nls.F(x)[i], x)) -end - -function hprod_residual!(nls :: ADNLSModel, x :: AbstractVector, i :: Int, v :: AbstractVector, Hiv :: AbstractVector) - @lencheck nls.meta.nvar x v Hiv - increment!(nls, :neval_hprod_residual) - Hiv .= ForwardDiff.derivative(t -> ForwardDiff.gradient(x -> nls.F(x)[i], x + t * v), 0) - return Hiv -end - -function cons!(nls :: ADNLSModel, x :: AbstractVector, c :: AbstractVector) - @lencheck nls.meta.nvar x - @lencheck nls.meta.ncon c - increment!(nls, :neval_cons) - c .= nls.c(x) - return c -end - -function jac(nls :: ADNLSModel, x :: AbstractVector) - @lencheck nls.meta.nvar x - increment!(nls, :neval_jac) - return ForwardDiff.jacobian(nls.c, x) -end - -function jac_structure!(nls :: ADNLSModel, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck nls.meta.nnzj rows cols - m, n = nls.meta.ncon, nls.meta.nvar - I = ((i,j) for i = 1:m, j = 1:n) - rows .= getindex.(I, 1)[:] - cols .= getindex.(I, 2)[:] - return rows, cols -end - -function jac_coord!(nls :: ADNLSModel, x :: AbstractVector, vals :: AbstractVector) - @lencheck nls.meta.nvar x - @lencheck nls.meta.nnzj vals - Jx = ForwardDiff.jacobian(nls.c, x) - vals .= Jx[:] - return vals -end - -function jprod!(nls :: ADNLSModel, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck nls.meta.nvar x v - @lencheck nls.meta.ncon Jv - increment!(nls, :neval_jprod) - Jv .= ForwardDiff.derivative(t -> nls.c(x + t * v), 0) - return Jv -end - -function jtprod!(nls :: ADNLSModel, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck nls.meta.nvar x Jtv - @lencheck nls.meta.ncon v - increment!(nls, :neval_jtprod) - Jtv .= ForwardDiff.gradient(x -> dot(nls.c(x), v), x) - return Jtv -end - -function hess(nls :: ADNLSModel, x :: AbstractVector; obj_weight :: Real = one(eltype(x))) - @lencheck nls.meta.nvar x - increment!(nls, :neval_hess) - ℓ(x) = obj_weight * sum(nls.F(x).^2) / 2 - Hx = ForwardDiff.hessian(ℓ, x) - return tril(Hx) -end - -function hess(nls :: ADNLSModel, x :: AbstractVector, y :: AbstractVector; obj_weight :: Real = one(eltype(x))) - @lencheck nls.meta.nvar x - @lencheck nls.meta.ncon y - increment!(nls, :neval_hess) - ℓ(x) = obj_weight * sum(nls.F(x).^2) / 2 + dot(y, nls.c(x)) - Hx = ForwardDiff.hessian(ℓ, x) - return tril(Hx) -end - -function hess_structure!(nls :: ADNLSModel, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck nls.meta.nnzh rows cols - n = nls.meta.nvar - I = ((i,j) for i = 1:n, j = 1:n if i ≥ j) - rows .= getindex.(I, 1) - cols .= getindex.(I, 2) - return rows, cols -end - -function hess_coord!(nls :: ADNLSModel, x :: AbstractVector, vals :: AbstractVector; obj_weight :: Real = one(eltype(x))) - @lencheck nls.meta.nvar x - @lencheck nls.meta.nnzh vals - increment!(nls, :neval_hess) - ℓ(x) = obj_weight * sum(nls.F(x).^2) / 2 - Hx = ForwardDiff.hessian(ℓ, x) - k = 1 - for j = 1:nls.meta.nvar - for i = j:nls.meta.nvar - vals[k] = Hx[i,j] - k += 1 - end - end - return vals -end - -function hess_coord!(nls :: ADNLSModel, x :: AbstractVector, y :: AbstractVector, vals :: AbstractVector; obj_weight :: Real = one(eltype(x))) - @lencheck nls.meta.nvar x - @lencheck nls.meta.ncon y - @lencheck nls.meta.nnzh vals - increment!(nls, :neval_hess) - ℓ(x) = obj_weight * sum(nls.F(x).^2) / 2 + dot(y, nls.c(x)) - Hx = ForwardDiff.hessian(ℓ, x) - k = 1 - for j = 1:nls.meta.nvar - for i = j:nls.meta.nvar - vals[k] = Hx[i,j] - k += 1 - end - end - return vals -end - -function hprod!(nls :: ADNLSModel, x :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; obj_weight = one(eltype(x))) - @lencheck nls.meta.nvar x v Hv - increment!(nls, :neval_hprod) - ℓ(x) = obj_weight * sum(nls.F(x).^2) / 2 - Hv .= ForwardDiff.derivative(t -> ForwardDiff.gradient(ℓ, x + t * v), 0) - return Hv -end - -function hprod!(nls :: ADNLSModel, x :: AbstractVector, y :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; - obj_weight = one(eltype(x))) - @lencheck nls.meta.nvar x v Hv - @lencheck nls.meta.ncon y - increment!(nls, :neval_hprod) - ℓ(x) = obj_weight * sum(nls.F(x).^2) / 2 + dot(y, nls.c(x)) - Hv .= ForwardDiff.derivative(t -> ForwardDiff.gradient(ℓ, x + t * v), 0) - return Hv -end - -function ghjvprod!(nls :: ADNLSModel, x :: AbstractVector, g :: AbstractVector, v :: AbstractVector, gHv :: AbstractVector) - @lencheck nls.meta.nvar x g v - @lencheck nls.meta.ncon gHv - increment!(nls, :neval_hprod) - gHv .= ForwardDiff.derivative(t -> ForwardDiff.derivative(s -> nls.c(x + s * g + t * v), 0), 0) - return gHv -end diff --git a/src/core/core.jl b/src/core/core.jl deleted file mode 100644 index 39f5e269..00000000 --- a/src/core/core.jl +++ /dev/null @@ -1,25 +0,0 @@ -export AbstractNLPModel, AbstractNLSModel - -# For documentation purpose -const OBJECTIVE_HESSIAN = raw""" -```math -σ ∇²f(x), -``` -with `σ = obj_weight` -""" -const LAGRANGIAN_HESSIAN = raw""" -```math -∇²L(x,y) = σ ∇²f(x) + \sum_i yᵢ ∇²cᵢ(x), -``` -with `σ = obj_weight` -""" - -# Base type for an optimization model. -abstract type AbstractNLPModel end - -abstract type AbstractNLSModel <: AbstractNLPModel end - -for f in ["utils", "api", "counters", "meta", "show", "tools", ] - include("nlp/$f.jl") - include("nls/$f.jl") -end \ No newline at end of file diff --git a/src/dercheck.jl b/src/dercheck.jl deleted file mode 100644 index 8dd0783d..00000000 --- a/src/dercheck.jl +++ /dev/null @@ -1,248 +0,0 @@ -# A simple derivative checker for AbstractNLPModels. -# D. Orban, March 2016. -# dominique.orban@gmail.com - -export gradient_check, jacobian_check, hessian_check, hessian_check_from_grad - - -""" - gradient_check(nlp; x=nlp.meta.x0, atol=1e-6, rtol=1e-4) - -Check the first derivatives of the objective at `x` against centered -finite differences. - -This function returns a dictionary indexed by components of the gradient for -which the relative error exceeds `rtol`. -""" -function gradient_check(nlp :: AbstractNLPModel; - x :: AbstractVector=nlp.meta.x0, - atol :: Float64=1.0e-6, rtol :: Float64=1.0e-4) - - # Optimal-ish step for second-order centered finite differences. - step = (eps(Float64) / 3)^(1/3) - - # Check objective gradient. - g_errs = Dict{Int, Float64}() - g = grad(nlp, x) - h = zeros(nlp.meta.nvar) - for i = 1 : nlp.meta.nvar - h[i] = step - dfdxi = (obj(nlp, x + h) - obj(nlp, x - h)) / 2 / step - err = abs(dfdxi - g[i]) - if err > atol + rtol * abs(dfdxi) - g_errs[i] = err - end - h[i] = 0 - end - return g_errs -end - - -""" - jacobian_check(nlp; x=nlp.meta.x0, atol=1e-6, rtol=1e-4) - -Check the first derivatives of the constraints at `x` against centered -finite differences. - -This function returns a dictionary indexed by (j, i) tuples such that the -relative error in the `i`-th partial derivative of the `j`-th constraint -exceeds `rtol`. -""" -function jacobian_check(nlp :: AbstractNLPModel; - x :: AbstractVector=nlp.meta.x0, - atol :: Float64=1.0e-6, rtol :: Float64=1.0e-4) - - # Fast exit if there are no constraints. - J_errs = Dict{Tuple{Int,Int}, Float64}() - nlp.meta.ncon > 0 || return J_errs - - # Optimal-ish step for second-order centered finite differences. - step = (eps(Float64) / 3)^(1/3) - - # Check constraints Jacobian. - J = jac(nlp, x) - h = zeros(nlp.meta.nvar) - cxph = zeros(nlp.meta.ncon) - cxmh = zeros(nlp.meta.ncon) - # Differentiate all constraints with respect to each variable in turn. - for i = 1 : nlp.meta.nvar - h[i] = step - cons!(nlp, x + h, cxph) - cons!(nlp, x - h, cxmh) - dcdxi = (cxph - cxmh) / 2 / step - for j = 1 : nlp.meta.ncon - err = abs(dcdxi[j] - J[j, i]) - if err > atol + rtol * abs(dcdxi[j]) - J_errs[(j, i)] = err - end - end - h[i] = 0 - end - return J_errs -end - - -""" - hessian_check(nlp; x=nlp.meta.x0, atol=1e-6, rtol=1e-4, sgn=1) - -Check the second derivatives of the objective and each constraints at `x` -against centered finite differences. This check does not rely on exactness of -the first derivatives, only on objective and constraint values. - -The `sgn` arguments refers to the formulation of the Lagrangian in the problem. -It should have a positive value if the Lagrangian is formulated as -```math -L(x,y) = f(x) + \\sum_j yⱼ cⱼ(x), -``` -and a negative value if the Lagrangian is formulated as -```math -L(x,y) = f(x) - \\sum_j yⱼ cⱼ(x). -``` -Only the sign of `sgn` is important. - -This function returns a dictionary indexed by functions. The 0-th function is -the objective while the k-th function (for k > 0) is the k-th constraint. The -values of the dictionary are dictionaries indexed by tuples (i, j) such that -the relative error in the second derivative ∂²fₖ/∂xᵢ∂xⱼ exceeds `rtol`. -""" -function hessian_check(nlp :: AbstractNLPModel; - x :: AbstractVector=nlp.meta.x0, - atol :: Float64=1.0e-6, rtol :: Float64=1.0e-4, - sgn :: Int=1) - - H_errs = Dict{Int, Dict{Tuple{Int,Int}, Float64}}() - - # Optimal-ish step for second-order centered finite differences. - step = eps(Float64)^(1/4) - sgn == 0 && error("sgn cannot be zero") - sgn = sign(sgn) - hi = zeros(nlp.meta.nvar) - hj = zeros(nlp.meta.nvar) - - k = 0 - H_errs[k] = Dict{Tuple{Int,Int}, Float64}() - H = hess(nlp, x) - for i = 1 : nlp.meta.nvar - hi[i] = step - for j = 1 : i - hj[j] = step - d2fdxidxj = (obj(nlp, x + hi + hj) - obj(nlp, x - hi + hj) - obj(nlp, x + hi - hj) + obj(nlp, x - hi - hj)) / 4 / step^2 - err = abs(d2fdxidxj - H[i, j]) - if err > atol + rtol * abs(d2fdxidxj) - H_errs[k][(i,j)] = err - end - hj[j] = 0 - end - hi[i] = 0 - end - - y = zeros(nlp.meta.ncon) - cxpp = zeros(nlp.meta.ncon) - cxmp = zeros(nlp.meta.ncon) - cxpm = zeros(nlp.meta.ncon) - cxmm = zeros(nlp.meta.ncon) - for k = 1 : nlp.meta.ncon - H_errs[k] = Dict{Tuple{Int,Int}, Float64}() - y[k] = sgn - Hk = hess(nlp, x, y, obj_weight=0.0) - for i = 1 : nlp.meta.nvar - hi[i] = step - for j = 1 : i - hj[j] = step - cons!(nlp, x + hi + hj, cxpp) - cons!(nlp, x - hi + hj, cxmp) - cons!(nlp, x + hi - hj, cxpm) - cons!(nlp, x - hi - hj, cxmm) - d2cdxidxj = (cxpp - cxmp - cxpm + cxmm) / 4 / step^2 - err = abs(d2cdxidxj[k] - Hk[i, j]) - if err > atol + rtol * abs(d2cdxidxj[k]) - println(d2cdxidxj[k], Hk[i, j]) - H_errs[k][(i,j)] = err - end - hj[j] = 0 - end - hi[i] = 0 - end - y[k] = 0 - end - - return H_errs -end - - -""" - hessian_check_from_grad(nlp; x=nlp.meta.x0, atol=1e-6, rtol=1e-4, sgn=1) - -Check the second derivatives of the objective and each constraints at `x` -against centered finite differences. This check assumes exactness of the first -derivatives. - -The `sgn` arguments refers to the formulation of the Lagrangian in the problem. -It should have a positive value if the Lagrangian is formulated as -```math -L(x,y) = f(x) + \\sum_j yⱼ cⱼ(x), -``` -and a negative value if the Lagrangian is formulated as -```math -L(x,y) = f(x) - \\sum_j yⱼ cⱼ(x). -``` -Only the sign of `sgn` is important. - -This function returns a dictionary indexed by functions. The 0-th function is -the objective while the k-th function (for k > 0) is the k-th constraint. The -values of the dictionary are dictionaries indexed by tuples (i, j) such that -the relative error in the second derivative ∂²fₖ/∂xᵢ∂xⱼ exceeds `rtol`. -""" -function hessian_check_from_grad(nlp :: AbstractNLPModel; - x :: AbstractVector=nlp.meta.x0, - atol :: Float64=1.0e-6, rtol :: Float64=1.0e-4, - sgn :: Int=1) - - H_errs = Dict{Int, Dict{Tuple{Int,Int}, Float64}}() - - # Optimal-ish step for second-order centered finite differences. - step = (eps(Float64) / 3)^(1/3) - sgn == 0 && error("sgn cannot be zero") - sgn = sign(sgn) - h = zeros(nlp.meta.nvar) - - k = 0 - H_errs[k] = Dict{Tuple{Int,Int}, Float64}() - H = hess(nlp, x) - gxph = zeros(nlp.meta.nvar) - gxmh = zeros(nlp.meta.nvar) - for i = 1 : nlp.meta.nvar - h[i] = step - grad!(nlp, x + h, gxph) - grad!(nlp, x - h, gxmh) - dgdxi = (gxph - gxmh) / 2 / step - for j = 1 : i - err = abs(dgdxi[j] - H[i, j]) - if err > atol + rtol * abs(dgdxi[j]) - H_errs[k][(i,j)] = err - end - end - h[i] = 0 - end - - y = zeros(nlp.meta.ncon) - for k = 1 : nlp.meta.ncon - H_errs[k] = Dict{Tuple{Int,Int}, Float64}() - y[k] = sgn - Hk = hess(nlp, x, y, obj_weight=0.0) - for i = 1 : nlp.meta.nvar - h[i] = step - dJdxi = (jac(nlp, x + h) - jac(nlp, x - h)) / 2 / step - for j = 1 : i - err = abs(dJdxi[k, j] - Hk[i, j]) - if err > atol + rtol * abs(dJdxi[k, j]) - H_errs[k][(i,j)] = err - end - end - h[i] = 0 - end - y[k] = 0 - end - - return H_errs -end diff --git a/src/feasibility_form_nls.jl b/src/feasibility_form_nls.jl deleted file mode 100644 index 506162da..00000000 --- a/src/feasibility_form_nls.jl +++ /dev/null @@ -1,331 +0,0 @@ -export FeasibilityFormNLS - - -"""Converts a nonlinear least-squares problem with residual ``F(x)`` to a nonlinear -optimization problem with constraints ``F(x) = r`` and objective ``\\tfrac{1}{2}\\|r\\|^2``. -In other words, converts -```math -\\begin{aligned} - \\min_x \\quad & \\tfrac{1}{2}\\|F(x)\\|^2 \\\\ -\\mathrm{s.t.} \\quad & c_L ≤ c(x) ≤ c_U \\\\ - & ℓ ≤ x ≤ u -\\end{aligned} -``` -to -```math -\\begin{aligned} - \\min_{x,r} \\quad & \\tfrac{1}{2}\\|r\\|^2 \\\\ -\\mathrm{s.t.} \\quad & F(x) - r = 0 \\\\ - & c_L ≤ c(x) ≤ c_U \\\\ - & ℓ ≤ x ≤ u -\\end{aligned} -``` -If you rather have the first problem, the `nls` model already works as an NLPModel of -that format. -""" -mutable struct FeasibilityFormNLS{M <: AbstractNLSModel} <: AbstractNLSModel - meta :: NLPModelMeta - nls_meta :: NLSMeta - internal :: M - counters :: NLSCounters -end - -show_header(io :: IO, nls :: FeasibilityFormNLS) = println(io, "FeasibilityFormNLS - Nonlinear least-squares moving the residual to constraints") - -""" - FeasibilityFormNLS(nls) - -Converts a nonlinear least-squares problem with residual `F(x)` to a nonlinear -optimization problem with constraints `F(x) = r` and objective `¹/₂‖r‖²`. -""" -function FeasibilityFormNLS(nls :: AbstractNLSModel; name="$(nls.meta.name)-ffnls") - nequ = nls.nls_meta.nequ - meta = nls.meta - nvar = meta.nvar + nequ - ncon = meta.ncon + nequ - nnzh = nls.nls_meta.nnzh + nequ + (meta.ncon == 0 ? 0 : meta.nnzh) # Some indexes can be repeated - meta = NLPModelMeta(nvar, x0=[meta.x0; zeros(nequ)], - lvar=[meta.lvar; fill(-Inf, nequ)], - uvar=[meta.uvar; fill( Inf, nequ)], - ncon=ncon, - lcon=[zeros(nequ); meta.lcon], - ucon=[zeros(nequ); meta.ucon], - y0=[zeros(nequ); meta.y0], - lin=[nls.nls_meta.lin; meta.lin .+ nequ], - nln=[nls.nls_meta.nln; meta.nln .+ nequ], - nnzj=meta.nnzj + nls.nls_meta.nnzj + nequ, - nnzh=nnzh, - name=name - ) - nls_meta = NLSMeta(nequ, nvar, x0=[meta.x0; zeros(nequ)], nnzj=nequ, nnzh=0, lin=1:nequ, nln=Int[]) - - nlp = FeasibilityFormNLS{typeof(nls)}(meta, nls_meta, nls, NLSCounters()) - finalizer(nlp -> finalize(nlp.internal), nlp) - - return nlp -end - -function obj(nlp :: FeasibilityFormNLS, x :: AbstractVector) - @lencheck nlp.meta.nvar x - increment!(nlp, :neval_obj) - n = nlp.internal.meta.nvar - r = @view x[n+1:end] - return dot(r, r) / 2 -end - -function grad!(nlp :: FeasibilityFormNLS, x :: AbstractVector, g :: AbstractVector) - @lencheck nlp.meta.nvar x g - increment!(nlp, :neval_grad) - n = nlp.internal.meta.nvar - g[1:n] .= 0.0 - g[n+1:end] .= @view x[n+1:end] - return g -end - -function objgrad!(nlp :: FeasibilityFormNLS, x :: Array{Float64}, g :: Array{Float64}) - @lencheck nlp.meta.nvar x g - increment!(nlp, :neval_obj) - increment!(nlp, :neval_grad) - n = nlp.internal.meta.nvar - r = @view x[n+1:end] - f = dot(r, r) / 2 - g[1:n] .= 0.0 - g[n+1:end] .= @view x[n+1:end] - return f, g -end - -function cons!(nlp :: FeasibilityFormNLS, xr :: AbstractVector, c :: AbstractVector) - @lencheck nlp.meta.nvar xr - @lencheck nlp.meta.ncon c - increment!(nlp, :neval_cons) - n, m, ne = nlp.internal.meta.nvar, nlp.internal.meta.ncon, nlp.internal.nls_meta.nequ - x = @view xr[1:n] - r = @view xr[n+1:end] - residual!(nlp.internal, x, @view c[1:ne]) - c[1:ne] .-= r - if m > 0 - cons!(nlp.internal, x, @view c[ne+1:end]) - end - return c -end - -function jac_structure!(nlp :: FeasibilityFormNLS, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck nlp.meta.nnzj rows cols - n, m, ne = nlp.internal.meta.nvar, nlp.internal.meta.ncon, nlp.internal.nls_meta.nequ - nnzjF = nlp.internal.nls_meta.nnzj - @views jac_structure_residual!(nlp.internal, rows[1:nnzjF], cols[1:nnzjF]) - if m > 0 - idx = nnzjF .+ (1:nlp.internal.meta.nnzj) - @views jac_structure!(nlp.internal, rows[idx], cols[idx]) - rows[idx] .+= ne - end - rows[end-ne+1:end] .= 1:ne - cols[end-ne+1:end] .= n .+ (1:ne) - return rows, cols -end - -function jac_coord!(nlp :: FeasibilityFormNLS, xr :: AbstractVector, vals :: AbstractVector) - @lencheck nlp.meta.nvar xr - @lencheck nlp.meta.nnzj vals - n, m, ne = nlp.internal.meta.nvar, nlp.internal.meta.ncon, nlp.internal.nls_meta.nequ - x = @view xr[1:n] - nnzjF = nlp.internal.nls_meta.nnzj - nnzjc = m > 0 ? nlp.internal.meta.nnzj : 0 - I = 1:nnzjF - @views jac_coord_residual!(nlp.internal, x, vals[I]) - if m > 0 - I = nnzjF+1:nnzjF+nnzjc - @views jac_coord!(nlp.internal, x, vals[I]) - end - vals[nnzjF+nnzjc+1:nnzjF+nnzjc+ne] .= -1 - return vals -end - -function jprod!(nlp :: FeasibilityFormNLS, xr :: AbstractVector, v :: AbstractVector, jv :: AbstractVector) - @lencheck nlp.meta.nvar xr v - @lencheck nlp.meta.ncon jv - increment!(nlp, :neval_jprod) - n, m, ne = nlp.internal.meta.nvar, nlp.internal.meta.ncon, nlp.internal.nls_meta.nequ - x = @view xr[1:n] - @views jprod_residual!(nlp.internal, x, v[1:n], jv[1:ne]) - @views jv[1:ne] .-= v[n+1:end] - if m > 0 - @views jprod!(nlp.internal, x, v[1:n], jv[ne+1:end]) - end - return jv -end - -function jtprod!(nlp :: FeasibilityFormNLS, xr :: AbstractVector, v :: AbstractVector, jtv :: AbstractVector) - @lencheck nlp.meta.nvar xr jtv - @lencheck nlp.meta.ncon v - increment!(nlp, :neval_jtprod) - n, m, ne = nlp.internal.meta.nvar, nlp.internal.meta.ncon, nlp.internal.nls_meta.nequ - x = @view xr[1:n] - @views jtprod_residual!(nlp.internal, x, v[1:ne], jtv[1:n]) - if m > 0 - @views jtv[1:n] .+= jtprod(nlp.internal, x, v[ne+1:end]) - end - @views jtv[n+1:end] .= -v[1:ne] - return jtv -end - -function hess_structure!(nlp :: FeasibilityFormNLS, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck nlp.meta.nnzh rows cols - n, m, ne = nlp.internal.meta.nvar, nlp.internal.meta.ncon, nlp.internal.nls_meta.nequ - nnzhF = nlp.internal.nls_meta.nnzh - nnzhc = m > 0 ? nlp.internal.meta.nnzh : 0 - I = 1:nnzhF - @views hess_structure_residual!(nlp.internal, rows[I], cols[I]) - if m > 0 - I = nnzhF+1:nnzhF+nnzhc - @views hess_structure!(nlp.internal, rows[I], cols[I]) - end - I = nnzhF+nnzhc+1:nnzhF+nnzhc+ne - rows[I] .= n+1:n+ne - cols[I] .= n+1:n+ne - return rows, cols -end - -function hess_coord!(nlp :: FeasibilityFormNLS, xr :: AbstractVector, y :: AbstractVector, vals :: AbstractVector; - obj_weight :: Float64=1.0) - @lencheck nlp.meta.nvar xr - @lencheck nlp.meta.ncon y - @lencheck nlp.meta.nnzh vals - increment!(nlp, :neval_hess) - n, m, ne = nlp.internal.meta.nvar, nlp.internal.meta.ncon, nlp.internal.nls_meta.nequ - nnzhF = nlp.internal.nls_meta.nnzh - nnzhc = m > 0 ? nlp.internal.meta.nnzh : 0 - x = @view xr[1:n] - y1 = @view y[1:ne] - y2 = @view y[ne+1:ne+m] - I = 1:nnzhF - @views hess_coord_residual!(nlp.internal, x, y1, vals[I]) - if m > 0 - I = nnzhF+1:nnzhF+nnzhc - @views hess_coord!(nlp.internal, x, y2, vals[I], obj_weight=0.0) - end - vals[nnzhF+nnzhc+1:nnzhF+nnzhc+ne] .= obj_weight - return vals -end - -function hess(nlp :: FeasibilityFormNLS, xr :: AbstractVector; obj_weight :: Float64=1.0) - @lencheck nlp.meta.nvar xr - increment!(nlp, :neval_hess) - n, ne = nlp.internal.meta.nvar, nlp.internal.nls_meta.nequ - return [spzeros(n, n + ne); spzeros(ne, n) obj_weight * I] -end - -function hess(nlp :: FeasibilityFormNLS, xr :: AbstractVector, y :: AbstractVector; - obj_weight :: Float64=1.0) - @lencheck nlp.meta.nvar xr - @lencheck nlp.meta.ncon y - increment!(nlp, :neval_hess) - n, m, ne = nlp.internal.meta.nvar, nlp.internal.meta.ncon, nlp.internal.nls_meta.nequ - x = @view xr[1:n] - @views Hx = m > 0 ? hess(nlp.internal, x, y[ne+1:end], obj_weight=0.0) : spzeros(n, n) - Hx += hess_residual(nlp.internal, x, @view y[1:ne]) - return [Hx spzeros(n, ne); spzeros(ne, n) obj_weight * I] -end - -function hprod!(nlp :: FeasibilityFormNLS, xr :: AbstractVector, y :: AbstractVector, v :: AbstractVector, hv :: AbstractVector; - obj_weight :: Float64=1.0) - @lencheck nlp.meta.nvar xr v hv - @lencheck nlp.meta.ncon y - n, m, ne = nlp.internal.meta.nvar, nlp.internal.meta.ncon, nlp.internal.nls_meta.nequ - x = @view xr[1:n] - if m > 0 - @views hprod!(nlp.internal, x, y[ne+1:end], v[1:n], hv[1:n], obj_weight=0.0) - else - fill!(hv, 0.0) - end - for i = 1:ne - @views hv[1:n] .+= hprod_residual(nlp.internal, x, i, v[1:n]) * y[i] - end - @views hv[n+1:end] .= obj_weight * v[n+1:end] - return hv -end - -function ghjvprod!(nlp :: FeasibilityFormNLS, x :: AbstractVector, g :: AbstractVector, v :: AbstractVector, gHv :: AbstractVector) - @lencheck nlp.meta.nvar x g v - @lencheck nlp.meta.ncon gHv - increment!(nlp, :neval_hprod) - n, m, ne = nlp.internal.meta.nvar, nlp.internal.meta.ncon, nlp.internal.nls_meta.nequ - IF = 1:ne - Ic = ne+1:ne+m - gHv[IF] .= [dot(g[1:n], hprod_residual(nlp.internal, x[1:n], j, v[1:n])) for j in IF] - if m > 0 - @views ghjvprod!(nlp.internal, x[1:n], g[1:n], v[1:n], gHv[Ic]) - end - return gHv -end - -function residual!(nlp :: FeasibilityFormNLS, x :: AbstractVector, Fx :: AbstractVector) - @lencheck nlp.meta.nvar x - @lencheck nlp.nls_meta.nequ Fx - increment!(nlp, :neval_residual) - n = nlp.internal.meta.nvar - Fx .= @view x[n+1:end] - return Fx -end - -function jac_structure_residual!(nlp :: FeasibilityFormNLS, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck nlp.nls_meta.nnzj rows cols - n, ne = nlp.internal.meta.nvar, nlp.internal.nls_meta.nequ - rows .= 1:ne - cols .= n .+ (1:ne) - return rows, cols -end - -function jac_coord_residual!(nlp :: FeasibilityFormNLS, x :: AbstractVector, vals :: AbstractVector) - @lencheck nlp.meta.nvar x - @lencheck nlp.nls_meta.nnzj vals - increment!(nlp, :neval_jac_residual) - vals[1:nlp.nls_meta.nnzj] .= 1 - return vals -end - -function jprod_residual!(nlp :: FeasibilityFormNLS, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck nlp.meta.nvar x v - @lencheck nlp.nls_meta.nequ Jv - increment!(nlp, :neval_jprod_residual) - n = nlp.internal.meta.nvar - Jv .= @view v[n+1:end] - return Jv -end - -function jtprod_residual!(nlp :: FeasibilityFormNLS, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck nlp.meta.nvar x Jtv - @lencheck nlp.nls_meta.nequ v - increment!(nlp, :neval_jtprod_residual) - n, ne = nlp.internal.meta.nvar, nlp.internal.nls_meta.nequ - Jtv[1:n] .= 0.0 - Jtv[n+1:end] .= v - return Jtv -end - -function hess_structure_residual!(nlp :: FeasibilityFormNLS, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck nlp.nls_meta.nnzh rows cols - return rows, cols # Hessian of residual is zero; do not change rows and cols -end - -function hess_coord_residual!(nlp :: FeasibilityFormNLS, x :: AbstractVector, v :: AbstractVector, vals :: AbstractVector) - @lencheck nlp.meta.nvar x - @lencheck nlp.nls_meta.nequ v - @lencheck nlp.nls_meta.nnzh vals - increment!(nlp, :neval_hess_residual) - return vals -end - -function jth_hess_residual(nlp :: FeasibilityFormNLS, x :: AbstractVector, i :: Int) - @lencheck nlp.meta.nvar x - increment!(nlp, :neval_jhess_residual) - n = nlp.meta.nvar - return spzeros(n, n) -end - -function hprod_residual!(nlp :: FeasibilityFormNLS, x :: AbstractVector, i :: Int, v :: AbstractVector, Hiv :: AbstractVector) - @lencheck nlp.meta.nvar x v Hiv - increment!(nlp, :neval_hprod_residual) - fill!(Hiv, 0.0) - return Hiv -end diff --git a/src/feasibility_residual.jl b/src/feasibility_residual.jl deleted file mode 100644 index 89bf49cb..00000000 --- a/src/feasibility_residual.jl +++ /dev/null @@ -1,135 +0,0 @@ -export FeasibilityResidual - -# TODO: Extend to handle bounds -""" -A feasibility residual model is created from a NLPModel of the form -```math -\\begin{aligned} - \\min_x \\quad & f(x) \\\\ -\\mathrm{s.t.} \\quad & c_L ≤ c(x) ≤ c_U \\\\ - & \\ell ≤ x ≤ u, -\\end{aligned} -``` -by creating slack variables ``s = c(x)`` and defining an NLS problem from the equality constraints. -The resulting problem is a bound-constrained nonlinear least-squares problem with residual -function ``F(x,s) = c(x) - s``: -```math -\\begin{aligned} - \\min_{x,s} \\quad & \\tfrac{1}{2} \\|c(x) - s\\|^2 \\\\ -\\mathrm{s.t.} \\quad & \\ell ≤ x ≤ u \\\\ - & c_L ≤ s ≤ c_U. -\\end{aligned} -``` -Notice that this problem is an `AbstractNLSModel`, thus the residual value, Jacobian and Hessian are explicitly defined through the [NLS API](@ref nls-api). -The slack variables are created using SlackModel. -If ``\\ell_i = u_i``, no slack variable is created. -In particular, if there are only equality constrained of the form ``c(x) = 0``, the resulting NLS is simply ``\\min_x \\tfrac{1}{2}\\|c(x)\\|^2``. -""" -mutable struct FeasibilityResidual <: AbstractNLSModel - meta :: NLPModelMeta - nls_meta :: NLSMeta - counters :: NLSCounters - nlp :: AbstractNLPModel -end - -show_header(io :: IO, nls :: FeasibilityResidual) = println(io, "FeasibilityResidual - Nonlinear least-squares defined from constraints of another problem") - -function FeasibilityResidual(nlp :: AbstractNLPModel; name="$(nlp.meta.name)-feasres") - if !equality_constrained(nlp) - if unconstrained(nlp) - throw(ErrorException("Can't handle unconstrained problem")) - else - return FeasibilityResidual(SlackModel(nlp), name=name) - end - end - - m, n = nlp.meta.ncon, nlp.meta.nvar - # TODO: What is copied? - meta = NLPModelMeta(n, x0=nlp.meta.x0, name=name, lvar=nlp.meta.lvar, - uvar=nlp.meta.uvar, nnzj=0) - nls_meta = NLSMeta(m, n, nnzj=nlp.meta.nnzj, nnzh=nlp.meta.nnzh, lin=nlp.meta.lin, nln=nlp.meta.nln) - nls = FeasibilityResidual(meta, nls_meta, NLSCounters(), nlp) - finalizer(nls -> finalize(nls.nlp), nls) - - return nls -end - -function residual!(nls :: FeasibilityResidual, x :: AbstractVector, Fx :: AbstractVector) - increment!(nls, :neval_residual) - cons!(nls.nlp, x, Fx) - Fx .-= nls.nlp.meta.lcon - return Fx -end - -function jac_residual(nls :: FeasibilityResidual, x :: AbstractVector) - increment!(nls, :neval_jac_residual) - return jac(nls.nlp, x) -end - -function jac_structure_residual!(nls :: FeasibilityResidual, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - return jac_structure!(nls.nlp, rows, cols) -end - -function jac_coord_residual!(nls :: FeasibilityResidual, x :: AbstractVector, vals :: AbstractVector) - increment!(nls, :neval_jac_residual) - return jac_coord!(nls.nlp, x, vals) -end - -function jprod_residual!(nls :: FeasibilityResidual, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - increment!(nls, :neval_jprod_residual) - return jprod!(nls.nlp, x, v, Jv) -end - -function jtprod_residual!(nls :: FeasibilityResidual, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - increment!(nls, :neval_jtprod_residual) - return jtprod!(nls.nlp, x, v, Jtv) -end - -function hess_residual(nls :: FeasibilityResidual, x :: AbstractVector, v :: AbstractVector) - increment!(nls, :neval_hess_residual) - return hess(nls.nlp, x, v, obj_weight = 0.0) -end - -function hess_structure_residual!(nls :: FeasibilityResidual, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - return hess_structure!(nls.nlp, rows, cols) -end - -function hess_coord_residual!(nls :: FeasibilityResidual, x :: AbstractVector, v :: AbstractVector, vals :: AbstractVector) - increment!(nls, :neval_hess_residual) - return hess_coord!(nls.nlp, x, v, vals, obj_weight=0.0) -end - -function jth_hess_residual(nls :: FeasibilityResidual, x :: AbstractVector, i :: Int) - increment!(nls, :neval_jhess_residual) - y = zeros(nls.nls_meta.nequ) - y[i] = 1.0 - return hess(nls.nlp, x, y, obj_weight = 0.0) -end - -function hprod_residual!(nls :: FeasibilityResidual, x :: AbstractVector, i :: Int, v :: AbstractVector, Hiv :: AbstractVector) - increment!(nls, :neval_hprod_residual) - y = zeros(nls.nls_meta.nequ) - y[i] = 1.0 - return hprod!(nls.nlp, x, y, v, Hiv, obj_weight = 0.0) -end - -function hess(nls :: FeasibilityResidual, x :: AbstractVector; obj_weight :: Real=one(eltype(x))) - increment!(nls, :neval_hess) - cx = cons(nls.nlp, x) - Jx = jac(nls.nlp, x) - Hx = tril(Jx' * Jx) - Hx .+= hess(nls.nlp, x, cx, obj_weight=0.0) - return obj_weight * Hx -end - -function hprod!(nls :: FeasibilityResidual, x :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; obj_weight :: Real=one(eltype(x))) - increment!(nls, :neval_hess) - cx = cons(nls.nlp, x) - Jv = jprod(nls.nlp, x, v) - jtprod!(nls.nlp, x, Jv, Hv) - Hiv = zeros(eltype(x), nls.meta.nvar) - hprod!(nls.nlp, x, cx, v, Hiv, obj_weight=0.0) - Hv .+= Hiv - Hv .*= obj_weight - return Hv -end diff --git a/src/lls_model.jl b/src/lls_model.jl deleted file mode 100644 index 920d91d8..00000000 --- a/src/lls_model.jl +++ /dev/null @@ -1,234 +0,0 @@ -export LLSModel - -""" - nls = LLSModel(A, b; lvar, uvar, C, lcon, ucon) - -Creates a Linear Least Squares model ``\\tfrac{1}{2}\\|Ax - b\\|^2`` with optional bounds -`lvar ≦ x ≦ uvar` and optional linear constraints `lcon ≦ Cx ≦ ucon`. -This problem is a nonlinear least-squares problem with residual given by ``F(x) = Ax - b``. -""" -mutable struct LLSModel <: AbstractNLSModel - meta :: NLPModelMeta - nls_meta :: NLSMeta - counters :: NLSCounters - - Arows :: Vector{Int} - Acols :: Vector{Int} - Avals :: Vector - b :: AbstractVector - Crows :: Vector{Int} - Ccols :: Vector{Int} - Cvals :: Vector -end - -show_header(io :: IO, nls :: LLSModel) = println(io, "LLSModel - Linear least-squares model") - -function LLSModel(A :: AbstractMatrix, b :: AbstractVector; - x0 :: AbstractVector = zeros(size(A,2)), - lvar :: AbstractVector = fill(-Inf, size(A, 2)), - uvar :: AbstractVector = fill(Inf, size(A, 2)), - C :: AbstractMatrix = Matrix{Float64}(undef, 0, 0), - lcon :: AbstractVector = Float64[], - ucon :: AbstractVector = Float64[], - y0 :: AbstractVector = zeros(size(C,1)), - name :: String = "generic-LLSModel" - ) - nvar = size(A, 2) - Arows, Acols, Avals = if A isa AbstractSparseMatrix - findnz(A) - else - m, n = size(A) - I = ((i,j) for i = 1:m, j = 1:n) - getindex.(I, 1)[:], getindex.(I, 2)[:], A[:] - end - Crows, Ccols, Cvals = if C isa AbstractSparseMatrix - findnz(C) - else - m, n = size(C) - I = ((i,j) for i = 1:m, j = 1:n) - getindex.(I, 1)[:], getindex.(I, 2)[:], C[:] - end - LLSModel(Arows, Acols, Avals, nvar, b, x0=x0, lvar=lvar, uvar=uvar, - Crows=Crows, Ccols=Ccols, Cvals=Cvals, lcon=lcon, ucon=ucon, y0=y0, name=name) -end - -function LLSModel(Arows :: AbstractVector{<: Integer}, - Acols :: AbstractVector{<: Integer}, - Avals :: AbstractVector, - nvar :: Integer, - b :: AbstractVector; - x0 :: AbstractVector = zeros(nvar), - lvar :: AbstractVector = fill(-Inf, nvar), - uvar :: AbstractVector = fill(Inf, nvar), - Crows :: AbstractVector{<: Integer} = Int[], - Ccols :: AbstractVector{<: Integer} = Int[], - Cvals :: AbstractVector = Float64[], - lcon :: AbstractVector = Float64[], - ucon :: AbstractVector = Float64[], - y0 :: AbstractVector = zeros(length(lcon)), - name :: String = "generic-LLSModel" - ) - - nequ = length(b) - ncon = length(lcon) - if !(ncon == length(ucon) == length(y0)) - error("The length of lcon, ucon and y0 must be the same") - end - nnzjF = length(Avals) - if !(nnzjF == length(Arows) == length(Acols)) - error("The length of Arows, Acols and Avals must be the same") - end - nnzj = length(Cvals) - if !(nnzj == length(Crows) == length(Ccols)) - error("The length of Crows, Ccols and Cvals must be the same") - end - - meta = NLPModelMeta(nvar, x0=x0, lvar=lvar, uvar=uvar, ncon=ncon, y0=y0, lin=1:ncon, - nln=Int[], lcon=lcon, ucon=ucon, nnzj=nnzj, nnzh=0, name=name) - - nls_meta = NLSMeta(nequ, nvar, nnzj=nnzjF, nnzh=0, lin=1:nequ, nln=Int[]) - - return LLSModel(meta, nls_meta, NLSCounters(), Arows, Acols, Avals, b, Crows, Ccols, Cvals) -end - -function residual!(nls :: LLSModel, x :: AbstractVector, Fx :: AbstractVector) - @lencheck nls.meta.nvar x - @lencheck nls.nls_meta.nequ Fx - increment!(nls, :neval_residual) - coo_prod!(nls.Arows, nls.Acols, nls.Avals, x, Fx) - Fx .-= nls.b - return Fx -end - -function jac_structure_residual!(nls :: LLSModel, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck nls.nls_meta.nnzj rows - @lencheck nls.nls_meta.nnzj cols - rows .= nls.Arows - cols .= nls.Acols - return rows, cols -end - -function jac_coord_residual!(nls :: LLSModel, x :: AbstractVector, vals :: AbstractVector) - @lencheck nls.meta.nvar x - @lencheck nls.nls_meta.nnzj vals - increment!(nls, :neval_jac_residual) - vals .= nls.Avals - return vals -end - -function jprod_residual!(nls :: LLSModel, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck nls.meta.nvar x - @lencheck nls.meta.nvar v - @lencheck nls.nls_meta.nequ Jv - increment!(nls, :neval_jprod_residual) - coo_prod!(nls.Arows, nls.Acols, nls.Avals, v, Jv) - return Jv -end - -function jtprod_residual!(nls :: LLSModel, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck nls.meta.nvar x - @lencheck nls.nls_meta.nequ v - @lencheck nls.meta.nvar Jtv - increment!(nls, :neval_jtprod_residual) - coo_prod!(nls.Acols, nls.Arows, nls.Avals, v, Jtv) - return Jtv -end - -function hess_residual(nls :: LLSModel, x :: AbstractVector, v :: AbstractVector) - @lencheck nls.meta.nvar x - @lencheck nls.nls_meta.nequ v - increment!(nls, :neval_hess_residual) - n = nls.meta.nvar - return spzeros(n, n) -end - -function hess_structure_residual!(nls :: LLSModel, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck 0 rows - @lencheck 0 cols - return rows, cols -end - -function hess_coord_residual!(nls :: LLSModel, x :: AbstractVector, v :: AbstractVector, vals :: AbstractVector) - @lencheck nls.meta.nvar x - @lencheck nls.nls_meta.nequ v - @lencheck 0 vals - increment!(nls, :neval_hess_residual) - return vals -end - -function jth_hess_residual(nls :: LLSModel, x :: AbstractVector, i :: Int) - @lencheck nls.meta.nvar x - increment!(nls, :neval_jhess_residual) - n = nls.meta.nvar - return spzeros(n, n) -end - -function hprod_residual!(nls :: LLSModel, x :: AbstractVector, i :: Int, v :: AbstractVector, Hiv :: AbstractVector) - @lencheck nls.meta.nvar x v Hiv - increment!(nls, :neval_hprod_residual) - fill!(Hiv, zero(eltype(x))) - return Hiv -end - -function cons!(nls :: LLSModel, x :: AbstractVector, c :: AbstractVector) - @lencheck nls.meta.nvar x - @lencheck nls.meta.ncon c - increment!(nls, :neval_cons) - coo_prod!(nls.Crows, nls.Ccols, nls.Cvals, x, c) - return c -end - -function jac_structure!(nls :: LLSModel, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck nls.meta.nnzj rows cols - rows .= nls.Crows - cols .= nls.Ccols - return rows, cols -end - -function jac_coord!(nls :: LLSModel, x :: AbstractVector, vals :: AbstractVector) - @lencheck nls.meta.nvar x - @lencheck nls.meta.nnzj vals - increment!(nls, :neval_jac) - vals .= nls.Cvals - return vals -end - -function jprod!(nls :: LLSModel, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck nls.meta.nvar x v - @lencheck nls.meta.ncon Jv - increment!(nls, :neval_jprod) - coo_prod!(nls.Crows, nls.Ccols, nls.Cvals, v, Jv) - return Jv -end - -function jtprod!(nls :: LLSModel, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck nls.meta.nvar x Jtv - @lencheck nls.meta.ncon v - increment!(nls, :neval_jtprod) - coo_prod!(nls.Ccols, nls.Crows, nls.Cvals, v, Jtv) - return Jtv -end - -function hprod!(nls :: LLSModel, x :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; obj_weight = 1.0) - @lencheck nls.meta.nvar x v Hv - increment!(nls, :neval_hprod) - Av = zeros(nls.nls_meta.nequ) - coo_prod!(nls.Arows, nls.Acols, nls.Avals, v, Av) - coo_prod!(nls.Acols, nls.Arows, nls.Avals, Av, Hv) - Hv .*= obj_weight - return Hv -end - -function hprod!(nls :: LLSModel, x :: AbstractVector, y :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; obj_weight = 1.0) - @lencheck nls.meta.nvar x v Hv - @lencheck nls.meta.ncon y - hprod!(nls, x, v, Hv, obj_weight=obj_weight) -end - -function ghjvprod!(nls :: LLSModel, x :: AbstractVector{T}, g :: AbstractVector{T}, v :: AbstractVector{T}, gHv :: AbstractVector{T}) where T - @lencheck nls.meta.nvar x g v - @lencheck nls.meta.ncon gHv - increment!(nls, :neval_hprod) - gHv .= zeros(T, nls.meta.ncon) - return gHv -end diff --git a/src/model-interaction.jl b/src/model-interaction.jl deleted file mode 100644 index bf05bdac..00000000 --- a/src/model-interaction.jl +++ /dev/null @@ -1,44 +0,0 @@ -function FeasibilityFormNLS(nls :: FeasibilityResidual; name="$(nls.meta.name)-ffnls") - meta = nls.nlp.meta - nequ = meta.ncon - nvar = meta.nvar + nequ - ncon = meta.ncon - nnzj = meta.nnzj + nequ - nnzh = meta.nnzh + nequ - meta = NLPModelMeta(nvar, x0=[meta.x0; zeros(nequ)], - lvar=[meta.lvar; fill(-Inf, nequ)], - uvar=[meta.uvar; fill( Inf, nequ)], - ncon=ncon, - lcon=meta.lcon, - ucon=meta.ucon, - y0=meta.y0, - lin=meta.lin, - nln=meta.nln, - nnzj=nnzj, nnzh=nnzh, - name=name - ) - nls_meta = NLSMeta(nequ, nvar, x0=[meta.x0; zeros(nequ)], nnzj=nequ, nnzh=0) - - nlp = FeasibilityFormNLS{FeasibilityResidual}(meta, nls_meta, nls, NLSCounters()) - finalizer(nlp -> finalize(nlp.internal), nlp) - - return nlp -end - -function hess_structure!(nlp :: FeasibilityFormNLS{LLSModel}, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck nlp.meta.nnzh rows cols - n, ne = nlp.internal.meta.nvar, nlp.internal.nls_meta.nequ - rows .= n+1:n+ne - cols .= n+1:n+ne - return rows, cols -end - -function hess_coord!(nlp :: FeasibilityFormNLS{LLSModel}, xr :: AbstractVector, y :: AbstractVector, vals :: AbstractVector; - obj_weight :: Float64=1.0) - @lencheck nlp.meta.nvar xr - @lencheck nlp.meta.ncon y - @lencheck nlp.meta.nnzh vals - increment!(nlp, :neval_hess) - vals .= obj_weight - return vals -end diff --git a/src/core/nlp/api.jl b/src/nlp/api.jl similarity index 100% rename from src/core/nlp/api.jl rename to src/nlp/api.jl diff --git a/src/core/nlp/counters.jl b/src/nlp/counters.jl similarity index 100% rename from src/core/nlp/counters.jl rename to src/nlp/counters.jl diff --git a/src/core/nlp/meta.jl b/src/nlp/meta.jl similarity index 100% rename from src/core/nlp/meta.jl rename to src/nlp/meta.jl diff --git a/src/core/nlp/show.jl b/src/nlp/show.jl similarity index 100% rename from src/core/nlp/show.jl rename to src/nlp/show.jl diff --git a/src/core/nlp/tools.jl b/src/nlp/tools.jl similarity index 100% rename from src/core/nlp/tools.jl rename to src/nlp/tools.jl diff --git a/src/core/nlp/utils.jl b/src/nlp/utils.jl similarity index 100% rename from src/core/nlp/utils.jl rename to src/nlp/utils.jl diff --git a/src/core/nls/api.jl b/src/nls/api.jl similarity index 100% rename from src/core/nls/api.jl rename to src/nls/api.jl diff --git a/src/core/nls/counters.jl b/src/nls/counters.jl similarity index 100% rename from src/core/nls/counters.jl rename to src/nls/counters.jl diff --git a/src/core/nls/meta.jl b/src/nls/meta.jl similarity index 100% rename from src/core/nls/meta.jl rename to src/nls/meta.jl diff --git a/src/core/nls/show.jl b/src/nls/show.jl similarity index 96% rename from src/core/nls/show.jl rename to src/nls/show.jl index a55ba6f9..e0eba4f9 100644 --- a/src/core/nls/show.jl +++ b/src/nls/show.jl @@ -21,7 +21,7 @@ function Base.show(io :: IO, nm :: NLSMeta) end function Base.show(io :: IO, m :: NLPModelMeta, nm :: NLSMeta) - println(" Problem name: $(m.name)") + println(io, " Problem name: $(m.name)") nlplines = lines_of_description(m) nlslines = lines_of_description(nm) append!(nlslines, repeat([""], length(nlplines) - length(nlslines))) diff --git a/src/core/nls/tools.jl b/src/nls/tools.jl similarity index 100% rename from src/core/nls/tools.jl rename to src/nls/tools.jl diff --git a/src/core/nls/utils.jl b/src/nls/utils.jl similarity index 100% rename from src/core/nls/utils.jl rename to src/nls/utils.jl diff --git a/src/qn_model.jl b/src/qn_model.jl deleted file mode 100644 index c475af71..00000000 --- a/src/qn_model.jl +++ /dev/null @@ -1,72 +0,0 @@ -export QuasiNewtonModel, LBFGSModel, LSR1Model - -abstract type QuasiNewtonModel <: AbstractNLPModel end - -mutable struct LBFGSModel <: QuasiNewtonModel - meta :: NLPModelMeta - model :: AbstractNLPModel - op :: LBFGSOperator -end - -mutable struct LSR1Model <: QuasiNewtonModel - meta :: NLPModelMeta - model :: AbstractNLPModel - op :: LSR1Operator -end - -"Construct a `LBFGSModel` from another type of model." -function LBFGSModel(nlp :: AbstractNLPModel; kwargs...) - op = LBFGSOperator(nlp.meta.nvar; kwargs...) - return LBFGSModel(nlp.meta, nlp, op) -end - -"Construct a `LSR1Model` from another type of nlp." -function LSR1Model(nlp :: AbstractNLPModel; kwargs...) - op = LSR1Operator(nlp.meta.nvar; kwargs...) - return LSR1Model(nlp.meta, nlp, op) -end - -show_header(io :: IO, nlp :: QuasiNewtonModel) = println(io, "$(typeof(nlp)) - A QuasiNewtonModel") - -function Base.show(io :: IO, nlp :: QuasiNewtonModel) - show_header(io, nlp) - show(io, nlp.meta) - show(io, nlp.model.counters) -end - -@default_counters QuasiNewtonModel model - -function reset_data!(nlp :: QuasiNewtonModel) - reset!(nlp.op) - return nlp -end - -# the following methods are not affected by the Hessian approximation -for meth in (:obj, :grad, :cons, :jac_coord, :jac) - @eval $meth(nlp :: QuasiNewtonModel, x :: AbstractVector) = $meth(nlp.model, x) -end -for meth in (:grad!, :cons!, :jprod, :jtprod, :objgrad, :objgrad!) - @eval $meth(nlp :: QuasiNewtonModel, x :: AbstractVector, y :: AbstractVector) = $meth(nlp.model, x, y) -end -for meth in (:jprod!, :jtprod!) - @eval $meth(nlp :: QuasiNewtonModel, x :: AbstractVector, y :: AbstractVector, z :: AbstractVector) = $meth(nlp.model, x, y, z) -end -jac_structure!(nlp :: QuasiNewtonModel, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) = jac_structure!(nlp.model, rows, cols) -jac_coord!(nlp :: QuasiNewtonModel, x :: AbstractVector, vals :: AbstractVector) = - jac_coord!(nlp.model, x, vals) - -# the following methods are affected by the Hessian approximation -hess_op(nlp :: QuasiNewtonModel, x :: AbstractVector; kwargs...) = nlp.op -hprod(nlp :: QuasiNewtonModel, x :: AbstractVector, v :: AbstractVector; kwargs...) = nlp.op * v -function hprod!(nlp :: QuasiNewtonModel, x :: AbstractVector, - v :: AbstractVector, Hv :: AbstractVector; kwargs...) - Hv[1:nlp.meta.nvar] .= nlp.op * v - return Hv -end - -function Base.push!(nlp :: QuasiNewtonModel, args...) - push!(nlp.op, args...) - return nlp -end - -# not implemented: hess_structure, hess_coord, hess, ghjvprod diff --git a/src/slack_model.jl b/src/slack_model.jl deleted file mode 100644 index 2b9b95c0..00000000 --- a/src/slack_model.jl +++ /dev/null @@ -1,442 +0,0 @@ -export SlackModel, SlackNLSModel - - -@doc raw"""A model whose only inequality constraints are bounds. - -Given a model, this type represents a second model in which slack variables are -introduced so as to convert linear and nonlinear inequality constraints to -equality constraints and bounds. More precisely, if the original model has the -form - -```math -\begin{aligned} - \min_x \quad & f(x)\\ -\mathrm{s.t.} \quad & c_L ≤ c(x) ≤ c_U,\\ - & ℓ ≤ x ≤ u, -\end{aligned} -``` - -the new model appears to the user as - -```math -\begin{aligned} - \min_X \quad & f(X)\\ -\mathrm{s.t.} \quad & g(X) = 0,\\ - & L ≤ X ≤ U. -\end{aligned} -``` - -The unknowns ``X = (x, s)`` contain the original variables and slack variables -``s``. The latter are such that the new model has the general form - -```math -\begin{aligned} - \min_x \quad & f(x)\\ -\mathrm{s.t.} \quad & c(x) - s = 0,\\ - & c_L ≤ s ≤ c_U,\\ - & ℓ ≤ x ≤ u. -\end{aligned} -``` - -although no slack variables are introduced for equality constraints. - -The slack variables are implicitly ordered as `[s(low), s(upp), s(rng)]`, where -`low`, `upp` and `rng` represent the indices of the constraints of the form -``c_L ≤ c(x) < ∞``, ``-∞ < c(x) ≤ c_U`` and -``c_L ≤ c(x) ≤ c_U``, respectively. -""" -mutable struct SlackModel <: AbstractNLPModel - meta :: NLPModelMeta - model :: AbstractNLPModel -end - -show_header(io :: IO, nlp :: SlackModel) = println(io, "SlackModel - Model with slack variables") - -function Base.show(io :: IO, nlp :: SlackModel) - show_header(io, nlp) - show(io, nlp.meta) - show(io, nlp.model.counters) -end - -"""Like `SlackModel`, this model converts inequalities into equalities and bounds. -""" -mutable struct SlackNLSModel <: AbstractNLSModel - meta :: NLPModelMeta - nls_meta :: NLSMeta - model :: AbstractNLPModel -end - -show_header(io :: IO, nls :: SlackNLSModel) = println(io, "SlackNLSModel - Nonlinear least-squares model with slack variables") - -function Base.show(io :: IO, nls :: SlackNLSModel) - show_header(io, nls) - show(io, nls.meta, nls.nls_meta) - show(io, nls.model.counters) -end - -function slack_meta(meta :: NLPModelMeta; name=meta.name * "-slack") - ns = meta.ncon - length(meta.jfix) - jlow = meta.jlow - jupp = meta.jupp - jrng = meta.jrng - T = eltype(meta.x0) - - # Don't introduce slacks for equality constraints! - lvar = [meta.lvar ; meta.lcon[[jlow ; jupp ; jrng]]] # l ≤ x and cₗ ≤ s - uvar = [meta.uvar ; meta.ucon[[jlow ; jupp ; jrng]]] # x ≤ u and s ≤ cᵤ - lcon = zeros(T, meta.ncon) - lcon[meta.jfix] = meta.lcon[meta.jfix] - ucon = zeros(T, meta.ncon) - ucon[meta.jfix] = meta.ucon[meta.jfix] - - return NLPModelMeta( - meta.nvar + ns, - x0=[meta.x0 ; zeros(T, ns)], - lvar=lvar, - uvar=uvar, - ncon=meta.ncon, - lcon=lcon, - ucon=ucon, - y0=meta.y0, - nnzj=meta.nnzj + ns, - nnzh=meta.nnzh, - lin=meta.lin, - nln=meta.nln, - name=name - ) -end - -"Construct a `SlackModel` from another type of model." -function SlackModel(model :: AbstractNLPModel; name=model.meta.name * "-slack") - model.meta.ncon == length(model.meta.jfix) && return model - - meta = slack_meta(model.meta, name=name) - - snlp = SlackModel(meta, model) - finalizer(nlp -> finalize(nlp.model), snlp) - - return snlp -end - -function SlackNLSModel(model :: AbstractNLSModel; name=model.meta.name * "-slack") - ns = model.meta.ncon - length(model.meta.jfix) - ns == 0 && return model - - meta = slack_meta(model.meta, name=name) - nls_meta = NLSMeta(model.nls_meta.nequ, - model.meta.nvar + ns, - x0=[model.meta.x0; zeros(eltype(model.meta.x0), ns)], - nnzj=model.nls_meta.nnzj, - nnzh=model.nls_meta.nnzh, - lin=model.nls_meta.lin, - nln=model.nls_meta.nln - ) - - snls = SlackNLSModel(meta, nls_meta, model) - finalizer(nls -> finalize(nls.model), snls) - - return snls -end - -const SlackModels = Union{SlackModel,SlackNLSModel} - -# retrieve counters from underlying model -@default_counters SlackModels model -@default_nlscounters SlackNLSModel model - -nls_meta(nlp :: SlackNLSModel) = nlp.nls_meta - -function obj(nlp :: SlackModels, x :: AbstractVector) - @lencheck nlp.meta.nvar x - # f(X) = f(x) - return obj(nlp.model, @view x[1:nlp.model.meta.nvar]) -end - -function grad!(nlp :: SlackModels, x :: AbstractVector, g :: AbstractVector) - @lencheck nlp.meta.nvar x g - # ∇f(X) = [∇f(x) ; 0] - n = nlp.model.meta.nvar - ns = nlp.meta.nvar - n - @views grad!(nlp.model, x[1:n], g[1:n]) - g[n+1:n+ns] .= 0 - return g -end - -function objgrad!(nlp :: SlackModels, x :: AbstractVector, g :: AbstractVector) - @lencheck nlp.meta.nvar x g - n = nlp.model.meta.nvar - ns = nlp.meta.nvar - n - @views f, _ = objgrad!(nlp.model, x[1:n], g[1:n]) - g[n+1:n+ns] .= 0 - return f, g -end - -function cons!(nlp :: SlackModels, x :: AbstractVector, c :: AbstractVector) - @lencheck nlp.meta.nvar x - @lencheck nlp.meta.ncon c - n = nlp.model.meta.nvar - ns = nlp.meta.nvar - n - nlow = length(nlp.model.meta.jlow) - nupp = length(nlp.model.meta.jupp) - nrng = length(nlp.model.meta.jrng) - @views begin - cons!(nlp.model, x[1:n], c) - c[nlp.model.meta.jlow] -= x[n+1:n+nlow] - c[nlp.model.meta.jupp] -= x[n+nlow+1:n+nlow+nupp] - c[nlp.model.meta.jrng] -= x[n+nlow+nupp+1:n+nlow+nupp+nrng] - end - return c -end - -function jac_structure!(nlp :: SlackModels, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck nlp.meta.nnzj rows cols - n = nlp.model.meta.nvar - ns = nlp.meta.nvar - n - nnzj = nlp.model.meta.nnzj - @views jac_structure!(nlp.model, rows[1:nnzj], cols[1:nnzj]) - jlow = nlp.model.meta.jlow - jupp = nlp.model.meta.jupp - jrng = nlp.model.meta.jrng - nj, lj = nnzj, length(jlow) - rows[nj+1:nj+lj] .= jlow - nj, lj = nj + lj, length(jupp) - rows[nj+1:nj+lj] .= jupp - nj, lj = nj + lj, length(jrng) - rows[nj+1:nj+lj] .= jrng - cols[nnzj+1:end] .= n+1:nlp.meta.nvar - return rows, cols -end - -function jac_coord!(nlp :: SlackModels, x :: AbstractVector, vals :: AbstractVector) - @lencheck nlp.meta.nvar x - @lencheck nlp.meta.nnzj vals - n = nlp.model.meta.nvar - nnzj = nlp.model.meta.nnzj - @views jac_coord!(nlp.model, x[1:n], vals[1:nnzj]) - vals[nnzj+1:nlp.meta.nnzj] .= -1 - return vals -end - -function jprod!(nlp :: SlackModels, x :: AbstractVector, v :: AbstractVector, jv :: AbstractVector) - # J(X) V = [J(x) -I] [vₓ] = J(x) vₓ - vₛ - # [vₛ] - @lencheck nlp.meta.nvar x v - @lencheck nlp.meta.ncon jv - n = nlp.model.meta.nvar - ns = nlp.meta.nvar - n - @views jprod!(nlp.model, x[1:n], v[1:n], jv) - k = 1 - # use 3 loops to avoid forming [jlow ; jupp ; jrng] - for j in nlp.model.meta.jlow - jv[j] -= v[n+k] - k += 1 - end - for j in nlp.model.meta.jupp - jv[j] -= v[n+k] - k += 1 - end - for j in nlp.model.meta.jrng - jv[j] -= v[n+k] - k += 1 - end - return jv -end - -function jtprod!(nlp :: SlackModels, x :: AbstractVector, v :: AbstractVector, jtv :: AbstractVector) - # J(X)ᵀ v = [J(x)ᵀ] v = [J(x)ᵀ v] - # [ -I ] [ -v ] - @lencheck nlp.meta.nvar x jtv - @lencheck nlp.meta.ncon v - n = nlp.model.meta.nvar - nlow = length(nlp.model.meta.jlow) - nupp = length(nlp.model.meta.jupp) - nrng = length(nlp.model.meta.jrng) - @views begin - jtprod!(nlp.model, x[1:n], v, jtv[1:n]) - jtv[n+1:n+nlow] = -v[nlp.model.meta.jlow] - jtv[n+nlow+1:n+nlow+nupp] = -v[nlp.model.meta.jupp] - jtv[n+nlow+nupp+1:nlp.meta.nvar] = -v[nlp.model.meta.jrng] - end - return jtv -end - -function hess_structure!(nlp :: SlackModels, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck nlp.meta.nnzh rows cols - return hess_structure!(nlp.model, rows, cols) -end - -function hess_coord!(nlp :: SlackModels, x :: AbstractVector, vals :: AbstractVector; - obj_weight :: Real=one(eltype(x))) - @lencheck nlp.meta.nvar x - @lencheck nlp.meta.nnzh vals - n = nlp.model.meta.nvar - return hess_coord!(nlp.model, view(x, 1:n), vals, obj_weight=obj_weight) -end - -function hess_coord!(nlp :: SlackModels, x :: AbstractVector, y :: AbstractVector, vals :: AbstractVector; - obj_weight :: Real=one(eltype(x))) - @lencheck nlp.meta.nvar x - @lencheck nlp.meta.ncon y - @lencheck nlp.meta.nnzh vals - n = nlp.model.meta.nvar - return hess_coord!(nlp.model, view(x, 1:n), y, vals, obj_weight=obj_weight) -end - -# Kept in case some model implements `hess` but not `hess_coord/structure` -function hess(nlp :: SlackModels, x :: AbstractVector{T}; kwargs...) where T - @lencheck nlp.meta.nvar x - n = nlp.model.meta.nvar - ns = nlp.meta.nvar - n - Hx = hess(nlp.model, view(x, 1:n); kwargs...) - return [Hx spzeros(T, n, ns); spzeros(T, ns, n + ns)] -end - -function hess(nlp :: SlackModels, x :: AbstractVector{T}, y :: AbstractVector{T}; kwargs...) where T - @lencheck nlp.meta.nvar x - @lencheck nlp.meta.ncon y - n = nlp.model.meta.nvar - ns = nlp.meta.nvar - n - Hx = hess(nlp.model, view(x, 1:n), y; kwargs...) - return [Hx spzeros(T, n, ns); spzeros(T, ns, n + ns)] -end - -function hprod!(nlp :: SlackModels, x :: AbstractVector, v :: AbstractVector, - hv :: AbstractVector; - obj_weight :: Real=one(eltype(x))) - @lencheck nlp.meta.nvar x v hv - n = nlp.model.meta.nvar - ns = nlp.meta.nvar - n - # using hv[1:n] doesn't seem to work here - @views hprod!(nlp.model, x[1:n], v[1:n], hv[1:n], obj_weight=obj_weight) - hv[n+1:nlp.meta.nvar] .= 0 - return hv -end - -function hprod!(nlp :: SlackModels, x :: AbstractVector, y :: AbstractVector, v :: AbstractVector, hv :: AbstractVector; obj_weight :: Real=one(eltype(x))) - @lencheck nlp.meta.nvar x v hv - @lencheck nlp.meta.ncon y - n = nlp.model.meta.nvar - ns = nlp.meta.nvar - n - # using hv[1:n] doesn't seem to work here - @views hprod!(nlp.model, x[1:n], y, v[1:n], hv[1:n], obj_weight=obj_weight) - hv[n+1:nlp.meta.nvar] .= 0 - return hv -end - -function ghjvprod!(nlp :: SlackModels, x :: AbstractVector, g :: AbstractVector, v :: AbstractVector, gHv :: AbstractVector) - @lencheck nlp.meta.nvar x g v - @lencheck nlp.meta.ncon gHv - n = nlp.model.meta.nvar - return ghjvprod!(nlp.model, view(x, 1:n), view(g, 1:n), view(v, 1:n), gHv) -end - -function residual!(nls :: SlackNLSModel, x :: AbstractVector, Fx :: AbstractVector) - @lencheck nls.meta.nvar x - @lencheck nls.nls_meta.nequ Fx - return residual!(nls.model, view(x, 1:nls.model.meta.nvar), Fx) -end - -function jac_residual(nls :: SlackNLSModel, x :: AbstractVector{T}) where T - @lencheck nls.meta.nvar x - n = nls.model.meta.nvar - ns = nls.meta.nvar - n - ne = nls.nls_meta.nequ - Jx = jac_residual(nls.model, @view x[1:n]) - if issparse(Jx) - return [Jx spzeros(T, ne, ns)] - else - return [Jx zeros(T, ne, ns)] - end -end - -function jac_structure_residual!(nls :: SlackNLSModel, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck nls.nls_meta.nnzj rows - @lencheck nls.nls_meta.nnzj cols - return jac_structure_residual!(nls.model, rows, cols) -end - -function jac_coord_residual!(nls :: SlackNLSModel, x :: AbstractVector, vals :: AbstractVector) - @lencheck nls.meta.nvar x - @lencheck nls.nls_meta.nnzj vals - return jac_coord_residual!(nls.model, view(x, 1:nls.model.meta.nvar), vals) -end - -function jprod_residual!(nls :: SlackNLSModel, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck nls.meta.nvar x v - @lencheck nls.nls_meta.nequ Jv - return jprod_residual!(nls.model, view(x, 1:nls.model.meta.nvar), - v[1:nls.model.meta.nvar], Jv) -end - -function jtprod_residual!(nls :: SlackNLSModel, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck nls.meta.nvar x Jtv - @lencheck nls.nls_meta.nequ v - n = nls.model.meta.nvar - ns = nls.meta.nvar - n - @views jtprod_residual!(nls.model, x[1:n], v, Jtv[1:n]) - Jtv[n+1:n+ns] .= 0 - return Jtv -end - -function jac_op_residual!(nls :: SlackNLSModel, x :: AbstractVector, - Jv :: AbstractVector, Jtv :: AbstractVector) - @lencheck nls.meta.nvar x Jtv - @lencheck nls.nls_meta.nequ Jv - prod = @closure v -> jprod_residual!(nls, x, v, Jv) - ctprod = @closure v -> jtprod_residual!(nls, x, v, Jtv) - return LinearOperator{eltype(x)}(nls_meta(nls).nequ, nls_meta(nls).nvar, - false, false, prod, ctprod, ctprod) -end - -function hess_residual(nls :: SlackNLSModel, x :: AbstractVector{T}, v :: AbstractVector{T}) where T - @lencheck nls.meta.nvar x - @lencheck nls.nls_meta.nequ v - n = nls.model.meta.nvar - ns = nls.meta.nvar - n - Hx = hess_residual(nls.model, view(x, 1:n), v) - if issparse(Hx) - return [Hx spzeros(T, n, ns); spzeros(T, ns, n + ns)] - else - return [Hx zeros(T, n, ns); zeros(T, ns, n + ns)] - end -end - -function hess_structure_residual!(nls :: SlackNLSModel, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck nls.nls_meta.nnzh rows cols - return hess_structure_residual!(nls.model, rows, cols) -end - -function hess_coord_residual!(nls :: SlackNLSModel, x :: AbstractVector, v :: AbstractVector, vals :: AbstractVector) - @lencheck nls.meta.nvar x - @lencheck nls.nls_meta.nequ v - @lencheck nls.nls_meta.nnzh vals - return hess_coord_residual!(nls.model, view(x, 1:nls.model.meta.nvar), v, vals) -end - -function jth_hess_residual(nls :: SlackNLSModel, x :: AbstractVector{T}, i :: Int) where T - @lencheck nls.meta.nvar x - n = nls.model.meta.nvar - ns = nls.meta.nvar - n - Hx = jth_hess_residual(nls.model, view(x, 1:n), i) - if issparse(Hx) - return [Hx spzeros(T, n, ns); spzeros(T, ns, n + ns)] - else - return [Hx zeros(T, n, ns); zeros(T, ns, n + ns)] - end -end - -function hprod_residual!(nls :: SlackNLSModel, x :: AbstractVector, i :: Int, v :: AbstractVector, Hv :: AbstractVector) - @lencheck nls.meta.nvar x v Hv - n = nls.model.meta.nvar - ns = nls.meta.nvar - n - @views hprod_residual!(nls.model, x[1:n], i, v[1:n], Hv[1:n]) - Hv[n+1:n+ns] .= 0 - return Hv -end - -function hess_op_residual!(nls :: SlackNLSModel, x :: AbstractVector, i :: Int, Hiv :: AbstractVector) - @lencheck nls.meta.nvar x Hiv - prod = @closure v -> hprod_residual!(nls, x, i, v, Hiv) - return LinearOperator{eltype(x)}(nls_meta(nls).nvar, nls_meta(nls).nvar, - true, true, prod, prod, prod) -end diff --git a/test/TestUtils/TestUtils.jl b/test/TestUtils/TestUtils.jl deleted file mode 100644 index 74b21165..00000000 --- a/test/TestUtils/TestUtils.jl +++ /dev/null @@ -1,25 +0,0 @@ -module TestUtils - -using LinearAlgebra, SparseArrays, Test - -using NLPModels - -const nlp_problems = ["BROWNDEN", "HS5", "HS6", "HS10", "HS11", "HS14", "LINCON", "LINSV", "MGH01Feas"] -const nls_problems = ["LLS", "MGH01", "NLSHS20", "NLSLC"] - -# Including problems so that they won't be multiply loaded -# GENROSE does not have a manual version, so it's separate -for problem in nlp_problems ∪ ["GENROSE"] - include("nlp/problems/$(lowercase(problem)).jl") -end -for problem in nls_problems - include("nls/problems/$(lowercase(problem)).jl") -end - -for f in ["check-dimensions", "consistency", "multiple-precision", "view-subarray"] - include("nlp/$f.jl") - include("nls/$f.jl") -end -include("nlp/coord-memory.jl") - -end \ No newline at end of file diff --git a/test/TestUtils/nlp/check-dimensions.jl b/test/TestUtils/nlp/check-dimensions.jl deleted file mode 100644 index 78e1069b..00000000 --- a/test/TestUtils/nlp/check-dimensions.jl +++ /dev/null @@ -1,104 +0,0 @@ -export check_nlp_dimensions - -""" - check_nlp_dimensions(nlp; exclude_hess=false) - -Make sure NLP API functions will throw DimensionError if the inputs are not the correct dimension. -To make this assertion in your code use - - @lencheck size input [more inputs separated by spaces] -""" -function check_nlp_dimensions(nlp; exclude_hess=false) - n, m = nlp.meta.nvar, nlp.meta.ncon - nnzh, nnzj = nlp.meta.nnzh, nlp.meta.nnzj - - x, badx = nlp.meta.x0, zeros(n + 1) - v, badv = ones(n), zeros(n + 1) - Hv, badHv = zeros(n), zeros(n + 1) - hrows, badhrows = zeros(Int, nnzh), zeros(Int, nnzh + 1) - hcols, badhcols = zeros(Int, nnzh), zeros(Int, nnzh + 1) - hvals, badhvals = zeros(nnzh), zeros(nnzh + 1) - @test_throws DimensionError obj(nlp, badx) - @test_throws DimensionError grad(nlp, badx) - @test_throws DimensionError grad!(nlp, badx, v) - @test_throws DimensionError grad!(nlp, x, badv) - @test_throws DimensionError hprod(nlp, badx, v) - @test_throws DimensionError hprod(nlp, x, badv) - @test_throws DimensionError hprod!(nlp, badx, v, Hv) - @test_throws DimensionError hprod!(nlp, x, badv, Hv) - @test_throws DimensionError hprod!(nlp, x, v, badHv) - @test_throws DimensionError hess_op(nlp, badx) - @test_throws DimensionError hess_op!(nlp, badx, Hv) - @test_throws DimensionError hess_op!(nlp, x, badHv) - @test_throws DimensionError hess_op!(nlp, badhrows, hcols, hvals, Hv) - @test_throws DimensionError hess_op!(nlp, hrows, badhcols, hvals, Hv) - @test_throws DimensionError hess_op!(nlp, hrows, hcols, badhvals, Hv) - @test_throws DimensionError hess_op!(nlp, hrows, hcols, hvals, badHv) - if !exclude_hess - @test_throws DimensionError hess(nlp, badx) - @test_throws DimensionError hess_structure!(nlp, badhrows, hcols) - @test_throws DimensionError hess_structure!(nlp, hrows, badhcols) - @test_throws DimensionError hess_coord!(nlp, badx, hvals) - @test_throws DimensionError hess_coord!(nlp, x, badhvals) - end - - if m > 0 - y, bady = nlp.meta.y0, zeros(m + 1) - w, badw = ones(m), zeros(m + 1) - Jv, badJv = zeros(m), zeros(m + 1) - Jtw, badJtw = zeros(n), zeros(n + 1) - jrows, badjrows = zeros(Int, nnzj), zeros(Int, nnzj + 1) - jcols, badjcols = zeros(Int, nnzj), zeros(Int, nnzj + 1) - jvals, badjvals = zeros(nnzj), zeros(nnzj + 1) - @test_throws DimensionError hprod(nlp, badx, y, v) - @test_throws DimensionError hprod(nlp, x, bady, v) - @test_throws DimensionError hprod(nlp, x, y, badv) - if !exclude_hess - @test_throws DimensionError hprod!(nlp, badx, y, v, Hv) - @test_throws DimensionError hprod!(nlp, x, bady, v, Hv) - @test_throws DimensionError hprod!(nlp, x, y, badv, Hv) - @test_throws DimensionError hprod!(nlp, x, y, v, badHv) - @test_throws DimensionError hess(nlp, badx, y) - @test_throws DimensionError hess(nlp, x, bady) - @test_throws DimensionError hess_op(nlp, badx, y) - @test_throws DimensionError hess_op(nlp, x, bady) - @test_throws DimensionError hess_op!(nlp, badx, y, Hv) - @test_throws DimensionError hess_op!(nlp, x, bady, Hv) - @test_throws DimensionError hess_op!(nlp, x, y, badHv) - @test_throws DimensionError hess_coord!(nlp, badx, y, hvals) - @test_throws DimensionError hess_coord!(nlp, x, bady, hvals) - @test_throws DimensionError hess_coord!(nlp, x, y, badhvals) - @test_throws DimensionError ghjvprod(nlp, badx, v, v) - @test_throws DimensionError ghjvprod(nlp, x, badv, v) - @test_throws DimensionError ghjvprod(nlp, x, v, badv) - end - @test_throws DimensionError cons(nlp, badx) - @test_throws DimensionError cons!(nlp, badx, w) - @test_throws DimensionError cons!(nlp, x, badw) - @test_throws DimensionError jac(nlp, badx) - @test_throws DimensionError jprod(nlp, badx, v) - @test_throws DimensionError jprod(nlp, x, badv) - @test_throws DimensionError jprod!(nlp, badx, v, Jv) - @test_throws DimensionError jprod!(nlp, x, badv, Jv) - @test_throws DimensionError jprod!(nlp, x, v, badJv) - @test_throws DimensionError jtprod(nlp, badx, w) - @test_throws DimensionError jtprod(nlp, x, badw) - @test_throws DimensionError jtprod!(nlp, badx, w, Jtw) - @test_throws DimensionError jtprod!(nlp, x, badw, Jtw) - @test_throws DimensionError jtprod!(nlp, x, w, badJtw) - @test_throws DimensionError jac_structure!(nlp, badjrows, jcols) - @test_throws DimensionError jac_structure!(nlp, jrows, badjcols) - @test_throws DimensionError jac_coord(nlp, badx) - @test_throws DimensionError jac_coord!(nlp, badx, jvals) - @test_throws DimensionError jac_coord!(nlp, x, badjvals) - @test_throws DimensionError jac_op(nlp, badx) - @test_throws DimensionError jac_op!(nlp, badx, Jv, Jtw) - @test_throws DimensionError jac_op!(nlp, x, badJv, Jtw) - @test_throws DimensionError jac_op!(nlp, x, Jv, badJtw) - @test_throws DimensionError jac_op!(nlp, badjrows, jcols, jvals, Jv, Jtw) - @test_throws DimensionError jac_op!(nlp, jrows, badjcols, jvals, Jv, Jtw) - @test_throws DimensionError jac_op!(nlp, jrows, jcols, badjvals, Jv, Jtw) - @test_throws DimensionError jac_op!(nlp, jrows, jcols, jvals, badJv, Jtw) - @test_throws DimensionError jac_op!(nlp, jrows, jcols, jvals, Jv, badJtw) - end -end diff --git a/test/TestUtils/nlp/consistency.jl b/test/TestUtils/nlp/consistency.jl deleted file mode 100644 index be5a028c..00000000 --- a/test/TestUtils/nlp/consistency.jl +++ /dev/null @@ -1,453 +0,0 @@ -export consistent_nlps - -""" - consistent_nlps(nlps; exclude=[], rtol=1e-8) - -Check that the all `nlp`s of the vector `nlps` are consistent, in the sense that -- Their counters are the same. -- Their `meta` information is the same. -- The API functions return the same output given the same input. - -In other words, if you create two models of the same problem, they should be consistent. - -The keyword `exclude` can be used to pass functions to be ignored, if some of the models don't implement that function. -""" -function consistent_nlps(nlps; exclude=[ghjvprod], test_meta=true, test_slack=true, rtol=1.0e-8) - consistent_counters(nlps) - test_meta && consistent_meta(nlps, rtol=rtol) - consistent_functions(nlps, rtol=rtol, exclude=exclude) - consistent_counters(nlps) - for nlp in nlps - reset!(nlp) - end - consistent_counters(nlps) - for nlp in nlps - @assert length(gradient_check(nlp)) == 0 - @assert length(jacobian_check(nlp)) == 0 - @assert sum(map(length, values(hessian_check(nlp)))) == 0 - @assert sum(map(length, values(hessian_check_from_grad(nlp)))) == 0 - end - - # Test Quasi-Newton models - qnmodels = [[LBFGSModel(nlp) for nlp in nlps]; - [LSR1Model(nlp) for nlp in nlps]] - consistent_functions([nlps; qnmodels], exclude=[hess, hess_coord, hprod, ghjvprod] ∪ exclude) - consistent_counters([nlps; qnmodels]) - - if test_slack && has_inequalities(nlps[1]) - reset!.(nlps) - slack_nlps = SlackModel.(nlps) - consistent_functions(slack_nlps, exclude=exclude) - consistent_counters(slack_nlps) - end -end - -function consistent_meta(nlps; rtol=1.0e-8) - fields = [:nvar, :x0, :lvar, :uvar, :ifix, :ilow, :iupp, :irng, :ifree, :ncon, - :y0] - N = length(nlps) - for field in fields - @testset "Field $field" begin - for i = 1:N-1 - fi = getfield(nlps[i].meta, field) - fj = getfield(nlps[i+1].meta, field) - @test isapprox(fi, fj, rtol=rtol) - end - end - end -end - -function consistent_counters(nlps) - N = length(nlps) - V = zeros(Int, N) - for field in fieldnames(Counters) - V = [eval(field)(nlp) for nlp in nlps] - @testset "Field $field" begin - for i = 1:N-1 - @test V[i] == V[i+1] - end - end - end - V = [sum_counters(nlp) for nlp in nlps] - @test all(V .== V[1]) -end - -function consistent_functions(nlps; rtol=1.0e-8, exclude=[]) - - N = length(nlps) - n = nlps[1].meta.nvar - m = nlps[1].meta.ncon - - tmp_n = zeros(n) - tmp_m = zeros(m) - tmp_nn = zeros(n,n) - - x = 10 * [-(-1.0)^i for i = 1:n] - - if !(obj in exclude) - fs = [obj(nlp, x) for nlp in nlps] - fmin = minimum(map(abs, fs)) - for i = 1:N - for j = i+1:N - @test isapprox(fs[i], fs[j], atol=rtol * max(fmin, 1.0)) - end - - if !(objcons in exclude) - # Test objcons for unconstrained problems - if m == 0 - f, c = objcons(nlps[i], x) - @test isapprox(fs[i], f, rtol=rtol) - @test c == [] - f, tmpc = objcons!(nlps[i], x, c) - @test isapprox(fs[i], f, rtol=rtol) - @test c == [] - @test tmpc == [] - end - end - end - end - - if !(grad in exclude) - gs = Any[grad(nlp, x) for nlp in nlps] - gmin = minimum(map(norm, gs)) - for i = 1:N - for j = i+1:N - @test isapprox(gs[i], gs[j], atol=rtol * max(gmin, 1.0)) - end - tmpg = grad!(nlps[i], x, tmp_n) - @test isapprox(gs[i], tmp_n, atol=rtol * max(gmin, 1.0)) - @test isapprox(tmpg, tmp_n, atol=rtol * max(gmin, 1.0)) - - if !(objgrad in exclude) - f, g = objgrad(nlps[i], x) - @test isapprox(fs[i], f, atol=rtol * max(abs(f), 1.0)) - @test isapprox(gs[i], g, atol=rtol * max(gmin, 1.0)) - f, tmpg = objgrad!(nlps[i], x, g) - @test isapprox(fs[i], f, atol=rtol * max(abs(f), 1.0)) - @test isapprox(gs[i], g, atol=rtol * max(gmin, 1.0)) - @test isapprox(g, tmpg, atol=rtol * max(gmin, 1.0)) - end - end - end - - if !(hess_coord in exclude) - Hs = Vector{Any}(undef, N) - for i = 1:N - V = hess_coord(nlps[i], x) - I, J = hess_structure(nlps[i]) - Hs[i] = sparse(I, J, V, n, n) - end - Hmin = minimum(map(norm, Hs)) - for i = 1:N - for j = i+1:N - @test isapprox(Hs[i], Hs[j], atol=rtol * max(Hmin, 1.0)) - end - V = hess_coord(nlps[i], x, obj_weight=0.0) - @test norm(V) ≈ 0 - σ = 3.14 - V = hess_coord(nlps[i], x, obj_weight=σ) - I, J = hess_structure(nlps[i]) - tmp_h = sparse(I, J, V, n, n) - @test isapprox(σ*Hs[i], tmp_h, atol=rtol * max(Hmin, 1.0)) - tmp_V = zeros(nlps[i].meta.nnzh) - hess_coord!(nlps[i], x, tmp_V, obj_weight=σ) - @test tmp_V == V - end - end - - if !(hess in exclude) - Hs = Any[hess(nlp, x) for nlp in nlps] - Hmin = minimum(map(norm, Hs)) - for i = 1:N - for j = i+1:N - @test isapprox(Hs[i], Hs[j], atol=rtol * max(Hmin, 1.0)) - end - tmp_nn = hess(nlps[i], x, obj_weight=0.0) - @test norm(tmp_nn) ≈ 0 - σ = 3.14 - tmp_nn = hess(nlps[i], x, obj_weight=σ) - @test isapprox(σ*Hs[i], tmp_nn, atol=rtol * max(Hmin, 1.0)) - end - end - - v = 10 * [-(-1.0)^i for i = 1:n] - - if !(hprod in exclude) - for σ = [1.0; 0.5; 0.0] - Hvs = Any[hprod(nlp, x, v, obj_weight=σ) for nlp in nlps] - Hopvs = Any[hess_op(nlp, x, obj_weight=σ) * v for nlp in nlps] - Hvmin = minimum(map(norm, Hvs)) - for i = 1:N - for j = i+1:N - @test isapprox(Hvs[i], Hvs[j], atol=rtol * max(Hvmin, 1.0)) - @test isapprox(Hvs[i], Hopvs[j], atol=rtol * max(Hvmin, 1.0)) - end - tmphv = hprod!(nlps[i], x, v, tmp_n, obj_weight=σ) - @test isapprox(Hvs[i], tmp_n, atol=rtol * max(Hvmin, 1.0)) - @test isapprox(tmphv, tmp_n, atol=rtol * max(Hvmin, 1.0)) - fill!(tmp_n, 0) - H = hess_op!(nlps[i], x, tmp_n, obj_weight=σ) - res = H * v - @test isapprox(res, Hvs[i], atol=rtol * max(Hvmin, 1.0)) - @test isapprox(res, tmp_n, atol=rtol * max(Hvmin, 1.0)) - - if !(hess_coord in exclude) - rows, cols = hess_structure(nlps[i]) - vals = hess_coord(nlps[i], x, obj_weight=σ) - hprod!(nlps[i], rows, cols, vals, v, tmp_n) - @test isapprox(Hvs[i], tmp_n, atol=rtol * max(Hvmin, 1.0)) - hprod!(nlps[i], x, rows, cols, v, tmp_n, obj_weight=σ) - @test isapprox(Hvs[i], tmp_n, atol=rtol * max(Hvmin, 1.0)) - - H = hess_op!(nlps[i], x, rows, cols, tmp_n, obj_weight=σ) - res = H * v - @test isapprox(Hvs[i], res, atol=rtol * max(Hvmin, 1.0)) - H = hess_op!(nlps[i], x, tmp_n, obj_weight=σ) - res = H * v - @test isapprox(Hvs[i], res, atol=rtol * max(Hvmin, 1.0)) - end - if σ == 1 # Check hprod! with default obj_weight - hprod!(nlps[i], x, v, tmp_n) - @test isapprox(Hvs[i], tmp_n, atol=rtol * max(Hvmin, 1.0)) - end - end - end - end - - if intersect([hess, hess_coord], exclude) == [] - for i = 1:N - nlp = nlps[i] - Hx = hess(nlp, x, obj_weight=0.5) - V = hess_coord(nlp, x, obj_weight=0.5) - I, J = hess_structure(nlp) - @test length(I) == length(J) == length(V) == nlp.meta.nnzh - @test sparse(I, J, V, n, n) == Hx - end - end - - if m > 0 - if !(cons in exclude) - cs = Any[cons(nlp, x) for nlp in nlps] - cls = [nlp.meta.lcon for nlp in nlps] - cus = [nlp.meta.ucon for nlp in nlps] - cmin = minimum(map(norm, cs)) - for i = 1:N - tmpc = cons!(nlps[i], x, tmp_m) - @test isapprox(cs[i], tmp_m, atol=rtol * max(cmin, 1.0)) - @test isapprox(tmpc, tmp_m, atol=rtol * max(cmin, 1.0)) - ci, li, ui = copy(cs[i]), cls[i], cus[i] - for k = 1:m - if li[k] > -Inf - ci[k] -= li[k] - elseif ui[k] < Inf - ci[k] -= ui[k] - end - end - for j = i+1:N - cj, lj, uj = copy(cs[j]), cls[j], cus[j] - for k = 1:m - if lj[k] > -Inf - cj[k] -= lj[k] - elseif uj[k] < Inf - cj[k] -= uj[k] - end - end - @test isapprox(norm(ci), norm(cj), atol=rtol * max(cmin, 1.0)) - end - - if !(objcons in exclude) - f, c = objcons(nlps[i], x) - @test isapprox(fs[i], f, atol=rtol * max(abs(f), 1.0)) - @test isapprox(cs[i],c, atol=rtol * max(cmin, 1.0)) - f, tmpc = objcons!(nlps[i], x, c) - @test isapprox(fs[i], f, atol=rtol * max(abs(f), 1.0)) - @test isapprox(cs[i],c, atol=rtol * max(cmin, 1.0)) - @test isapprox(c, tmpc, atol=rtol * max(cmin, 1.0)) - end - end - end - - if intersect([jac, jac_coord], exclude) == [] - Js = [jac(nlp, x) for nlp in nlps] - Jmin = minimum(map(norm, Js)) - for i = 1:N - vi = norm(Js[i]) - for j = i+1:N - @test isapprox(vi, norm(Js[j]), atol=rtol * max(Jmin, 1.0)) - end - V = jac_coord(nlps[i], x) - I, J = jac_structure(nlps[i]) - @test length(I) == length(J) == length(V) == nlps[i].meta.nnzj - @test isapprox(sparse(I, J, V, m, n), Js[i], atol=rtol * max(Jmin, 1.0)) - IS, JS = zeros(Int, nlps[i].meta.nnzj), zeros(Int, nlps[i].meta.nnzj) - jac_structure!(nlps[i], IS, JS) - @test IS == I - @test JS == J - tmp_V = zeros(nlps[i].meta.nnzj) - jac_coord!(nlps[i], x, tmp_V) - @test tmp_V == V - end - end - - if !(jprod in exclude) - Jops = Any[jac_op(nlp, x) for nlp in nlps] - Jps = Any[jprod(nlp, x, v) for nlp in nlps] - for i = 1:N - @test isapprox(Jps[i], Jops[i] * v, atol=rtol * max(Jmin, 1.0)) - vi = norm(Jps[i]) - for j = i+1:N - @test isapprox(vi, norm(Jps[j]), atol=rtol * max(Jmin, 1.0)) - end - tmpjv = jprod!(nlps[i], x, v, tmp_m) - @test isapprox(tmpjv, tmp_m, atol=rtol * max(Jmin, 1.0)) - @test isapprox(Jps[i], tmp_m, atol=rtol * max(Jmin, 1.0)) - fill!(tmp_m, 0) - J = jac_op!(nlps[i], x, tmp_m, tmp_n) - res = J * v - @test isapprox(res, Jps[i], atol=rtol * max(Jmin, 1.0)) - @test isapprox(res, tmp_m, atol=rtol * max(Jmin, 1.0)) - - if !(jac_coord in exclude) - rows, cols = jac_structure(nlps[i]) - vals = jac_coord(nlps[i], x) - jprod!(nlps[i], rows, cols, vals, v, tmp_m) - @test isapprox(Jps[i], tmp_m, atol=rtol * max(Jmin, 1.0)) - jprod!(nlps[i], x, rows, cols, v, tmp_m) - @test isapprox(Jps[i], tmp_m, atol=rtol * max(Jmin, 1.0)) - - J = jac_op!(nlps[i], x, rows, cols, tmp_m, tmp_n) - res = J * v - @test isapprox(res, Jps[i], atol=rtol * max(Jmin, 1.0)) - end - end - end - - if !(jtprod in exclude) - w = 10 * [-(-1.0)^i for i = 1:m] - Jtps = Any[jtprod(nlp, x, w) for nlp in nlps] - for i = 1:N - @test isapprox(Jtps[i], Jops[i]' * w, atol=rtol * max(Jmin, 1.0)) - vi = norm(Jtps[i]) - for j = i+1:N - @test isapprox(vi, norm(Jtps[j]), atol=rtol * max(Jmin, 1.0)) - end - tmpjtv = jtprod!(nlps[i], x, w, tmp_n) - @test isapprox(Jtps[i], tmp_n, atol=rtol * max(Jmin, 1.0)) - @test isapprox(tmpjtv, tmp_n, atol=rtol * max(Jmin, 1.0)) - fill!(tmp_n, 0) - J = jac_op!(nlps[i], x, tmp_m, tmp_n) - res = J' * w - @test isapprox(res, Jtps[i], atol=rtol * max(Jmin, 1.0)) - @test isapprox(res, tmp_n, atol=rtol * max(Jmin, 1.0)) - - if !(jac_coord in exclude) - rows, cols = jac_structure(nlps[i]) - vals = jac_coord(nlps[i], x) - jtprod!(nlps[i], rows, cols, vals, w, tmp_n) - @test isapprox(Jtps[i], tmp_n, atol=rtol * max(Jmin, 1.0)) - jtprod!(nlps[i], x, rows, cols, w, tmp_n) - @test isapprox(Jtps[i], tmp_n, atol=rtol * max(Jmin, 1.0)) - - J = jac_op!(nlps[i], x, rows, cols, tmp_m, tmp_n) - res = J' * w - @test isapprox(res, Jtps[i], atol=rtol * max(Jmin, 1.0)) - end - end - end - - y = 3.14 * ones(m) - - if !(hess_coord in exclude) - Ls = Vector{Any}(undef, N) - for i = 1:N - V = hess_coord(nlps[i], x, y) - I, J = hess_structure(nlps[i]) - Ls[i] = sparse(I, J, V, n, n) - end - Lmin = minimum(map(norm, Ls)) - for i = 1:N - for j = i+1:N - @test isapprox(Ls[i], Ls[j], atol=rtol * max(Lmin, 1.0)) - end - V = hess_coord(nlps[i], x, 0*y, obj_weight=0.0) - @test norm(V) ≈ 0 - σ = 3.14 - V = hess_coord(nlps[i], x, σ*y, obj_weight=σ) - I, J = hess_structure(nlps[i]) - tmp_h = sparse(I, J, V, n, n) - @test isapprox(σ*Ls[i], tmp_h, atol=rtol * max(Lmin, 1.0)) - tmp_V = zeros(nlps[i].meta.nnzh) - hess_coord!(nlps[i], x, σ*y, tmp_V, obj_weight=σ) - @test tmp_V == V - end - end - - if !(hess in exclude) - Ls = Any[hess(nlp, x, y) for nlp in nlps] - Lmin = minimum(map(norm, Ls)) - for i = 1:N - for j = i+1:N - @test isapprox(Ls[i], Ls[j], atol=rtol * max(Lmin, 1.0)) - end - tmp_nn = hess(nlps[i], x, 0*y, obj_weight = 0.0) - @test norm(tmp_nn) ≈ 0 - σ = 3.14 - tmp_nn = hess(nlps[i], x, σ*y, obj_weight = σ) - @test isapprox(σ*Ls[i], tmp_nn, atol=rtol * max(Hmin, 1.0)) - end - end - - if intersect([hess, hess_coord], exclude) == [] for i = 1:N - nlp = nlps[i] - Hx = hess(nlp, x, y, obj_weight=0.5) - V = hess_coord(nlp, x, y, obj_weight=0.5) - I, J = hess_structure(nlp) - @test length(I) == length(J) == length(V) == nlp.meta.nnzh - @test sparse(I, J, V, n, n) == Hx - end - end - - if !(hprod in exclude) - for σ = [1.0; 0.5; 0.0] - Lps = Any[hprod(nlp, x, y, v, obj_weight=σ) for nlp in nlps] - Hopvs = Any[hess_op(nlp, x, y, obj_weight=σ) * v for nlp in nlps] - Lpmin = minimum(map(norm, Lps)) - for i = 1:N - for j = i+1:N - @test isapprox(Lps[i], Lps[j], atol=rtol * max(Lpmin, 1.0)) - @test isapprox(Lps[i], Hopvs[j], atol=rtol * max(Lpmin, 1.0)) - end - - if !(hess_coord in exclude) - rows, cols = hess_structure(nlps[i]) - vals = hess_coord(nlps[i], x, y, obj_weight=σ) - hprod!(nlps[i], rows, cols, vals, v, tmp_n) - @test isapprox(Lps[i], tmp_n, atol=rtol * max(Lpmin, 1.0)) - hprod!(nlps[i], x, y, rows, cols, v, tmp_n, obj_weight=σ) - @test isapprox(Lps[i], tmp_n, atol=rtol * max(Lpmin, 1.0)) - - H = hess_op!(nlps[i], x, y, rows, cols, tmp_n, obj_weight=σ) - res = H * v - @test isapprox(Lps[i], res, atol=rtol * max(Lpmin, 1.0)) - H = hess_op!(nlps[i], x, y, tmp_n, obj_weight=σ) - res = H * v - @test isapprox(Lps[i], res, atol=rtol * max(Lpmin, 1.0)) - end - end - end - end - - g = 0.707 * ones(n) - - if !(ghjvprod in exclude) - Ls = Any[ghjvprod(nlp, x, g, v) for nlp in nlps] - Lmin = minimum(map(norm, Ls)) - for i = 1:N - for j = i+1:N - @test isapprox(Ls[i], Ls[j], atol=rtol * max(Lmin, 1.0)) - end - end - end - end - -end \ No newline at end of file diff --git a/test/TestUtils/nlp/coord-memory.jl b/test/TestUtils/nlp/coord-memory.jl deleted file mode 100644 index 08163817..00000000 --- a/test/TestUtils/nlp/coord-memory.jl +++ /dev/null @@ -1,38 +0,0 @@ -export coord_memory_nlp - -""" - coord_memory_nlp(nlp) - -Check that the allocated memory for in place coord methods is -sufficiently smaller than their allocating counter parts. -""" -function coord_memory_nlp(nlp :: AbstractNLPModel) - n = nlp.meta.nvar - m = nlp.meta.ncon - - x = 10 * [-(-1.0)^i for i = 1:n] - y = [-(-1.0)^i for i = 1:m] - - # Hessian unconstrained test - vals = hess_coord(nlp, x) - al1 = @allocated hess_coord(nlp, x) - V = zeros(nlp.meta.nnzh) - hess_coord!(nlp, x, V) - al2 = @allocated hess_coord!(nlp, x, V) - @test al2 < al1 - 50 - - if m > 0 - vals = hess_coord(nlp, x, y) - al1 = @allocated vals = hess_coord(nlp, x, y) - hess_coord!(nlp, x, y, V) - al2 = @allocated hess_coord!(nlp, x, y, V) - @test al2 < al1 - 50 - - vals = jac_coord(nlp, x) - al1 = @allocated vals = jac_coord(nlp, x) - V = zeros(nlp.meta.nnzj) - jac_coord!(nlp, x, vals) - al2 = @allocated jac_coord!(nlp, x, vals) - @test al2 < al1 - 50 - end -end \ No newline at end of file diff --git a/test/TestUtils/nlp/multiple-precision.jl b/test/TestUtils/nlp/multiple-precision.jl deleted file mode 100644 index 2fd644be..00000000 --- a/test/TestUtils/nlp/multiple-precision.jl +++ /dev/null @@ -1,47 +0,0 @@ -export multiple_precision_nlp - -""" - multiple_precision_nlp(nlp; precisions=[...]) - -Check that the NLP API functions output type are the same as the input. -In other words, make sure that the model handles multiple precisions. - -The array `precisions` are the tested floating point types. -Defaults to `[Float16, Float32, Float64, BigFloat]`. -""" -function multiple_precision_nlp(nlp :: AbstractNLPModel; - precisions :: Array = [Float16, Float32, Float64, BigFloat]) - for T in precisions - x = ones(T, nlp.meta.nvar) - @test typeof(obj(nlp, x)) == T - @test eltype(grad(nlp, x)) == T - @test eltype(hess(nlp, x)) == T - @test eltype(hess_op(nlp, x)) == T - rows, cols = hess_structure(nlp) - vals = hess_coord(nlp, x) - @test eltype(vals) == T - Hv = zeros(T, nlp.meta.nvar) - @test eltype(hess_op!(nlp, rows, cols, vals, Hv)) == T - if nlp.meta.ncon > 0 - y = ones(T, nlp.meta.ncon) - @test eltype(cons(nlp, x)) == T - @test eltype(jac(nlp, x)) == T - @test eltype(jac_op(nlp, x)) == T - rows, cols = jac_structure(nlp) - vals = jac_coord(nlp, x) - @test eltype(vals) == T - Av = zeros(T, nlp.meta.ncon) - Atv = zeros(T, nlp.meta.nvar) - @test eltype(jac_op!(nlp, rows, cols, vals, Av, Atv)) == T - @test eltype(hess(nlp, x, y)) == T - @test eltype(hess(nlp, x, y, obj_weight=one(T))) == T - @test eltype(hess_op(nlp, x, y)) == T - rows, cols = hess_structure(nlp) - vals = hess_coord(nlp, x, y) - @test eltype(vals) == T - Hv = zeros(T, nlp.meta.nvar) - @test eltype(hess_op!(nlp, rows, cols, vals, Hv)) == T - @test eltype(ghjvprod(nlp, x, x, x)) == T - end - end -end diff --git a/test/TestUtils/nlp/problems/brownden.jl b/test/TestUtils/nlp/problems/brownden.jl deleted file mode 100644 index 1c9bf7b6..00000000 --- a/test/TestUtils/nlp/problems/brownden.jl +++ /dev/null @@ -1,123 +0,0 @@ -export BROWNDEN, brownden_autodiff - -function brownden_autodiff() - - x0 = [25.0; 5.0; -5.0; -1.0] - f(x) = begin - T = eltype(x) - s = zero(T) - for i = 1:20 - s += ((x[1] + x[2] * T(i)/5 - exp(T(i)/5))^2 + (x[3] + x[4] * sin(T(i)/5) - cos(T(i)/5))^2)^2 - end - return s - end - - return ADNLPModel(f, x0, name="brownden_autodiff") -end - -""" - nlp = BROWNDEN() - -## Brown and Dennis function. - - Source: Problem 16 in - J.J. Moré, B.S. Garbow and K.E. Hillstrom, - "Testing Unconstrained Optimization Software", - ACM Transactions on Mathematical Software, vol. 7(1), pp. 17-41, 1981 - - classification SUR2-AN-4-0 - -```math -\\min_x \\ \\sum_{i=1}^{20} \\left(\\left(x_1 + \\tfrac{i}{5} x_2 - e^{i / 5}\\right)^2 -+ \\left(x_3 + \\sin(\\tfrac{i}{5}) x_4 - \\cos(\\tfrac{i}{5})\\right)^2\\right)^2 -``` - -Starting point: `[25.0; 5.0; -5.0; -1.0]` -""" -mutable struct BROWNDEN <: AbstractNLPModel - meta :: NLPModelMeta - counters :: Counters -end - -function BROWNDEN() - meta = NLPModelMeta(4, x0=[25.0; 5.0; -5.0; -1.0], name="BROWNDEN_manual", nnzh=10) - - return BROWNDEN(meta, Counters()) -end - -function NLPModels.obj(nlp :: BROWNDEN, x :: AbstractVector{T}) where T - @lencheck 4 x - increment!(nlp, :neval_obj) - return sum(((x[1] + x[2] * T(i)/5 - exp(T(i)/5))^2 + (x[3] + x[4] * sin(T(i)/5) - cos(T(i)/5))^2)^2 for i = 1:20) -end - -function NLPModels.grad!(nlp :: BROWNDEN, x :: AbstractVector, gx :: AbstractVector) - @lencheck 4 x gx - increment!(nlp, :neval_grad) - α(x,i) = x[1] + x[2] * i/5 - exp(i/5) - β(x,i) = x[3] + x[4] * sin(i/5) - cos(i/5) - θ(x,i) = α(x,i)^2 + β(x,i)^2 - gx .= sum(4 * θ(x,i) * (α(x,i) * [1; i/5; 0; 0] + β(x,i) * [0; 0; 1; sin(i/5)]) for i = 1:20) - return gx -end - -function NLPModels.hess(nlp :: BROWNDEN, x :: AbstractVector{T}; obj_weight=1.0) where T - @lencheck 4 x - increment!(nlp, :neval_hess) - α(x,i) = x[1] + x[2] * T(i)/5 - exp(T(i)/5) - β(x,i) = x[3] + x[4] * sin(T(i)/5) - cos(T(i)/5) - Hx = zeros(T, 4, 4) - if obj_weight == 0 - return Hx - end - for i = 1:20 - αi, βi = α(x,i), β(x,i) - vi, wi = T[1; i/5; 0; 0], T[0; 0; 1; sin(i/5)] - zi = αi * vi + βi * wi - θi = αi^2 + βi^2 - Hx += (4vi * vi' + 4wi * wi') * θi + 8zi * zi' - end - return T(obj_weight) * tril(Hx) -end - -function NLPModels.hess_structure!(nlp :: BROWNDEN, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) - @lencheck 10 rows cols - n = nlp.meta.nvar - I = ((i,j) for i = 1:n, j = 1:n if i ≥ j) - rows .= getindex.(I, 1) - cols .= getindex.(I, 2) - return rows, cols -end - -function NLPModels.hess_coord!(nlp :: BROWNDEN, x :: AbstractVector, vals :: AbstractVector; obj_weight=1.0) - @lencheck 4 x - @lencheck 10 vals - Hx = hess(nlp, x, obj_weight=obj_weight) - k = 1 - for j = 1:4 - for i = j:4 - vals[k] = Hx[i,j] - k += 1 - end - end - return vals -end - -function NLPModels.hprod!(nlp :: BROWNDEN, x :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 4 x v Hv - increment!(nlp, :neval_hprod) - α(x,i) = x[1] + x[2] * i/5 - exp(i/5) - β(x,i) = x[3] + x[4] * sin(i/5) - cos(i/5) - Hv .= 0 - if obj_weight == 0 - return Hv - end - for i = 1:20 - αi, βi = α(x,i), β(x,i) - vi, wi = [1; i/5; 0; 0], [0; 0; 1; sin(i/5)] - zi = αi * vi + βi * wi - θi = αi^2 + βi^2 - Hv .+= obj_weight * ((4 * dot(vi, v) * vi + 4 * dot(wi, v) * wi) * θi + 8 * dot(zi, v) * zi) - end - return Hv -end diff --git a/test/TestUtils/nlp/problems/genrose.jl b/test/TestUtils/nlp/problems/genrose.jl deleted file mode 100644 index f563990f..00000000 --- a/test/TestUtils/nlp/problems/genrose.jl +++ /dev/null @@ -1,56 +0,0 @@ -export genrose_autodiff - -# Generalized Rosenbrock function. -# -# Source: -# Y.-W. Shang and Y.-H. Qiu, -# A note on the extended Rosenbrock function, -# Evolutionary Computation, 14(1):119–126, 2006. -# -# Shang and Qiu claim the "extended" Rosenbrock function -# previously appeared in -# -# K. A. de Jong, -# An analysis of the behavior of a class of genetic -# adaptive systems, -# PhD Thesis, University of Michigan, Ann Arbor, -# Michigan, 1975, -# (http://hdl.handle.net/2027.42/4507) -# -# but I could not find it there, and in -# -# D. E. Goldberg, -# Genetic algorithms in search, optimization and -# machine learning, -# Reading, Massachusetts: Addison-Wesley, 1989, -# -# but I don't have access to that book. -# -# This unconstrained problem is analyzed in -# -# S. Kok and C. Sandrock, -# Locating and Characterizing the Stationary Points of -# the Extended Rosenbrock Function, -# Evolutionary Computation 17, 2009. -# https://dx.doi.org/10.1162%2Fevco.2009.17.3.437 -# -# classification SUR2-AN-V-0 -# -# D. Orban, Montreal, 08/2015. - -"Generalized Rosenbrock model in size `n`" -function genrose_autodiff(n :: Int=500) - - n < 2 && error("genrose: number of variables must be ≥ 2") - - x0 = [i/(n+1) for i = 1:n] - f(x::AbstractVector) = begin - s = 1.0 - for i = 1:n-1 - s += 100 * (x[i+1]-x[i]^2)^2 + (x[i]-1)^2 - end - return s - end - - return ADNLPModel(f, x0, name="genrose_autodiff") -end diff --git a/test/TestUtils/nlp/problems/hs10.jl b/test/TestUtils/nlp/problems/hs10.jl deleted file mode 100644 index 3e4729c9..00000000 --- a/test/TestUtils/nlp/problems/hs10.jl +++ /dev/null @@ -1,129 +0,0 @@ -export HS10, hs10_autodiff - -function hs10_autodiff() - - x0 = [-10.0; 10.0] - f(x) = x[1] - x[2] - c(x) = [-3 * x[1]^2 + 2 * x[1] * x[2] - x[2]^2 + 1] - lcon = [0.0] - ucon = [Inf] - - return ADNLPModel(f, x0, c, lcon, ucon, name="hs10_autodiff") -end - -""" - nlp = HS10() - -## Problem 10 in the Hock-Schittkowski suite - -```math -\\begin{aligned} -\\min \\quad & x_1 - x_2 \\\\ -\\text{s. to} \\quad & -3x_1^2 + 2x_1 x_2 - x_2^2 + 1 \\geq 0 -\\end{aligned} -``` - -Starting point: `[-10; 10]`. -""" -mutable struct HS10 <: AbstractNLPModel - meta :: NLPModelMeta - counters :: Counters -end - -function HS10() - meta = NLPModelMeta(2, ncon=1, x0=[-10.0; 10.0], - lcon=[0.0], ucon=[Inf], name="HS10_manual") - - return HS10(meta, Counters()) -end - -function NLPModels.obj(nlp :: HS10, x :: AbstractVector) - @lencheck 2 x - increment!(nlp, :neval_obj) - return x[1] - x[2] -end - -function NLPModels.grad!(nlp :: HS10, x :: AbstractVector{T}, gx :: AbstractVector{T}) where T - @lencheck 2 x gx - increment!(nlp, :neval_grad) - gx .= T[1; -1] - return gx -end - -function NLPModels.hess_structure!(nlp :: HS10, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) - @lencheck 3 rows cols - rows[1] = 1; rows[2] = 2; rows[3] = 2 - cols[1] = 1; cols[2] = 1; cols[3] = 2 - return rows, cols -end - -function NLPModels.hess_coord!(nlp :: HS10, x :: AbstractVector{T}, vals :: AbstractVector{T}; obj_weight=1.0) where T - @lencheck 2 x - @lencheck 3 vals - increment!(nlp, :neval_hess) - vals .= zero(T) - return vals -end - -function NLPModels.hess_coord!(nlp :: HS10, x :: AbstractVector{T}, y :: AbstractVector{T}, vals :: AbstractVector{T}; obj_weight=1.0) where T - @lencheck 2 x - @lencheck 1 y - @lencheck 3 vals - increment!(nlp, :neval_hess) - vals .= T[-6, 2, -2] * y[1] - return vals -end - -function NLPModels.hprod!(nlp :: HS10, x :: AbstractVector, y :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; obj_weight=1.0) - @lencheck 2 x v Hv - @lencheck 1 y - increment!(nlp, :neval_hprod) - Hv .= y[1] * [-6 * v[1] + 2 * v[2]; 2 * v[1] - 2 * v[2]] - return Hv -end - -function NLPModels.cons!(nlp :: HS10, x :: AbstractVector, cx :: AbstractVector) - @lencheck 2 x - @lencheck 1 cx - increment!(nlp, :neval_cons) - cx .= [-3 * x[1]^2 + 2 * x[1] * x[2] - x[2]^2 + 1] - return cx -end - -function NLPModels.jac_structure!(nlp :: HS10, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) - @lencheck 2 rows cols - rows .= [1, 1] - cols .= [1, 2] - return rows, cols -end - -function NLPModels.jac_coord!(nlp :: HS10, x :: AbstractVector, vals :: AbstractVector) - @lencheck 2 x vals - increment!(nlp, :neval_jac) - vals .= [-6 * x[1] + 2 * x[2], 2 * x[1] - 2 * x[2]] - return vals -end - -function NLPModels.jprod!(nlp :: HS10, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck 2 x v - @lencheck 1 Jv - increment!(nlp, :neval_jprod) - Jv .= [(-6 * x[1] + 2 * x[2]) * v[1] + (2 * x[1] - 2 * x[2]) * v[2]] - return Jv -end - -function NLPModels.jtprod!(nlp :: HS10, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck 2 x Jtv - @lencheck 1 v - increment!(nlp, :neval_jtprod) - Jtv .= [-6 * x[1] + 2 * x[2]; 2 * x[1] - 2 * x[2]] * v[1] - return Jtv -end - -function NLPModels.ghjvprod!(nlp :: HS10, x :: AbstractVector, g :: AbstractVector, v :: AbstractVector, gHv :: AbstractVector) - @lencheck nlp.meta.nvar x g v - @lencheck nlp.meta.ncon gHv - increment!(nlp, :neval_hprod) - gHv .= [g[1] * (-6 * v[1] + 2 * v[2]) + g[2] * (2 * v[1] - 2 * v[2])] - return gHv -end diff --git a/test/TestUtils/nlp/problems/hs11.jl b/test/TestUtils/nlp/problems/hs11.jl deleted file mode 100644 index a21fbd55..00000000 --- a/test/TestUtils/nlp/problems/hs11.jl +++ /dev/null @@ -1,129 +0,0 @@ -export HS11, hs11_autodiff - -function hs11_autodiff() - - x0 = [4.9; 0.1] - f(x) = (x[1] - 5)^2 + x[2]^2 - 25 - c(x) = [-x[1]^2 + x[2]] - lcon = [-Inf] - ucon = [0.0] - - return ADNLPModel(f, x0, c, lcon, ucon, name="hs11_autodiff") - -end - -""" - nlp = HS11() - -## Problem 11 in the Hock-Schittkowski suite - -```math -\\begin{aligned} -\\min \\quad & (x_1 - 5)^2 + x_2^2 - 25 \\\\ -\\text{s. to} \\quad & -x_1^2 + x_2 \\leq 0 -\\end{aligned} -``` - -Starting point: `[-4.9; 0.1]`. -""" -mutable struct HS11 <: AbstractNLPModel - meta :: NLPModelMeta - counters :: Counters -end - -function HS11() - meta = NLPModelMeta(2, ncon=1, nnzh=2, nnzj=2, x0=[4.9; 0.1], lcon=[-Inf], ucon=[0.0], name="HS11_manual") - - return HS11(meta, Counters()) -end - -function NLPModels.obj(nlp :: HS11, x :: AbstractVector) - @lencheck 2 x - increment!(nlp, :neval_obj) - return (x[1] - 5)^2 + x[2]^2 - 25 -end - -function NLPModels.grad!(nlp :: HS11, x :: AbstractVector, gx :: AbstractVector) - @lencheck 2 x gx - increment!(nlp, :neval_grad) - gx .= [2 * (x[1] - 5); 2 * x[2]] - return gx -end - -function NLPModels.hess_structure!(nlp :: HS11, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) - @lencheck 2 rows cols - rows[1] = 1; rows[2] = 2 - cols[1] = 1; cols[2] = 2 - return rows, cols -end - -function NLPModels.hess_coord!(nlp :: HS11, x :: AbstractVector{T}, vals :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 2 x vals - increment!(nlp, :neval_hess) - vals .= 2obj_weight - return vals -end - -function NLPModels.hess_coord!(nlp :: HS11, x :: AbstractVector{T}, y :: AbstractVector{T}, vals :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 2 x vals - @lencheck 1 y - increment!(nlp, :neval_hess) - vals .= 2obj_weight - vals[1] -= 2y[1] - return vals -end - -function NLPModels.hprod!(nlp :: HS11, x :: AbstractVector{T}, y :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 2 x v Hv - @lencheck 1 y - increment!(nlp, :neval_hprod) - Hv .= 2obj_weight * v - Hv[1] -= 2y[1] * v[1] - return Hv -end - -function NLPModels.cons!(nlp :: HS11, x :: AbstractVector, cx :: AbstractVector) - @lencheck 2 x - @lencheck 1 cx - increment!(nlp, :neval_cons) - cx .= [-x[1]^2 + x[2]] - return cx -end - -function NLPModels.jac_structure!(nlp :: HS11, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) - @lencheck 2 rows cols - rows .= [1, 1] - cols .= [1, 2] - return rows, cols -end - -function NLPModels.jac_coord!(nlp :: HS11, x :: AbstractVector, vals :: AbstractVector) - @lencheck 2 x vals - increment!(nlp, :neval_jac) - vals .= [-2 * x[1], 1] - return vals -end - -function NLPModels.jprod!(nlp :: HS11, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck 2 x v - @lencheck 1 Jv - increment!(nlp, :neval_jprod) - Jv .= [-2 * x[1] * v[1] + v[2]] - return Jv -end - -function NLPModels.jtprod!(nlp :: HS11, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck 2 x Jtv - @lencheck 1 v - increment!(nlp, :neval_jtprod) - Jtv .= [-2 * x[1]; 1] * v[1] - return Jtv -end - -function NLPModels.ghjvprod!(nlp :: HS11, x :: AbstractVector, g :: AbstractVector, v :: AbstractVector, gHv :: AbstractVector) - @lencheck nlp.meta.nvar x g v - @lencheck nlp.meta.ncon gHv - increment!(nlp, :neval_hprod) - gHv .= [-2 * g[1] * v[1]] - return gHv -end diff --git a/test/TestUtils/nlp/problems/hs14.jl b/test/TestUtils/nlp/problems/hs14.jl deleted file mode 100644 index 2cb78aa8..00000000 --- a/test/TestUtils/nlp/problems/hs14.jl +++ /dev/null @@ -1,127 +0,0 @@ -export HS14, hs14_autodiff - -function hs14_autodiff() - - x0 = [2.0; 2.0] - f(x) = (x[1] - 2)^2 + (x[2] - 1)^2 - c(x) = [x[1] - 2 * x[2] + 1; -x[1]^2/4 - x[2]^2 + 1] - lcon = [0.0; 0.0] - ucon = [0.0; Inf] - - return ADNLPModel(f, x0, c, lcon, ucon, name="hs14_autodiff") -end - -""" - nlp = HS14() - -## Problem 14 in the Hock-Schittkowski suite - -```math -\\begin{aligned} -\\min \\quad & (x_1 - 2)^2 + (x_2 - 1)^2 \\\\ -\\text{s. to} \\quad & x_1 - 2x_2 + 1 = 0 \\\\ -& -\\tfrac{1}{4} x_1^2 - x_2^2 + 1 \\geq 0 -\\end{aligned} -``` - -Starting point: `[2; 2]`. -""" -mutable struct HS14 <: AbstractNLPModel - meta :: NLPModelMeta - counters :: Counters -end - -function HS14() - meta = NLPModelMeta(2, nnzh=2, ncon=2, x0=[2.0; 2.0], lcon=[0.0; 0.0], ucon=[0.0; Inf], name="HS14_manual") - - return HS14(meta, Counters()) -end - -function NLPModels.obj(nlp :: HS14, x :: AbstractVector) - @lencheck 2 x - increment!(nlp, :neval_obj) - return (x[1] - 2)^2 + (x[2] - 1)^2 -end - -function NLPModels.grad!(nlp :: HS14, x :: AbstractVector, gx :: AbstractVector) - @lencheck 2 x gx - increment!(nlp, :neval_grad) - gx .= [2 * (x[1] - 2); 2 * (x[2] - 1)] - return gx -end - -function NLPModels.hess_structure!(nlp :: HS14, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) - @lencheck 2 rows cols - rows[1] = 1; rows[2] = 2 - cols[1] = 1; cols[2] = 2 - return rows, cols -end - -function NLPModels.hess_coord!(nlp :: HS14, x :: AbstractVector{T}, vals :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 2 x vals - increment!(nlp, :neval_hess) - vals .= 2obj_weight - return vals -end - -function NLPModels.hess_coord!(nlp :: HS14, x :: AbstractVector{T}, y :: AbstractVector{T}, vals :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 2 x y vals - increment!(nlp, :neval_hess) - vals .= 2obj_weight - vals[1] -= y[2] / 2 - vals[2] -= 2y[2] - return vals -end - -function NLPModels.hprod!(nlp :: HS14, x :: AbstractVector{T}, y :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 2 x y v Hv - increment!(nlp, :neval_hprod) - Hv .= 2obj_weight * v - Hv[1] -= y[2] * v[1] / 2 - Hv[2] -= 2y[2] * v[2] - return Hv -end - -function NLPModels.cons!(nlp :: HS14, x :: AbstractVector, cx :: AbstractVector) - @lencheck 2 x cx - increment!(nlp, :neval_cons) - cx .= [x[1] - 2 * x[2] + 1; -x[1]^2/4 - x[2]^2 + 1] - return cx -end - -function NLPModels.jac_structure!(nlp :: HS14, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) - @lencheck 4 rows cols - rows .= [1, 2, 1, 2] - cols .= [1, 1, 2, 2] - return rows, cols -end - -function NLPModels.jac_coord!(nlp :: HS14, x :: AbstractVector, vals :: AbstractVector) - @lencheck 2 x - @lencheck 4 vals - increment!(nlp, :neval_jac) - vals .= [1, -x[1] / 2, -2, -2 * x[2]] - return vals -end - -function NLPModels.jprod!(nlp :: HS14, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck 2 x v Jv - increment!(nlp, :neval_jprod) - Jv .= [v[1] - 2 * v[2]; -x[1] * v[1] / 2 - 2 * x[2] * v[2]] - return Jv -end - -function NLPModels.jtprod!(nlp :: HS14, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck 2 x v Jtv - increment!(nlp, :neval_jtprod) - Jtv .= [v[1] - x[1] * v[2] / 2; -2 * v[1] - 2 * x[2] * v[2]] - return Jtv -end - -function NLPModels.ghjvprod!(nlp :: HS14, x :: AbstractVector{T}, g :: AbstractVector{T}, v :: AbstractVector{T}, gHv :: AbstractVector{T}) where T - @lencheck nlp.meta.nvar x g v - @lencheck nlp.meta.ncon gHv - increment!(nlp, :neval_hprod) - gHv .= [T(0); - g[1] * v[1] / 2 - 2 * g[2] * v[2]] - return gHv -end diff --git a/test/TestUtils/nlp/problems/hs5.jl b/test/TestUtils/nlp/problems/hs5.jl deleted file mode 100644 index 6fb4003e..00000000 --- a/test/TestUtils/nlp/problems/hs5.jl +++ /dev/null @@ -1,74 +0,0 @@ -export HS5, hs5_autodiff - -function hs5_autodiff() - - x0 = [0.0; 0.0] - f(x) = sin(x[1] + x[2]) + (x[1] - x[2])^2 - 3x[1] / 2 + 5x[2] / 2 + 1 - l = [-1.5; -3.0] - u = [4.0; 3.0] - - return ADNLPModel(f, x0, l, u, name="hs5_autodiff") -end - -""" - nlp = HS5() - -## Problem 5 in the Hock-Schittkowski suite - -```math -\\begin{aligned} -\\min \\quad & \\sin(x_1 + x_2) + (x_1 - x_2)^2 - \\tfrac{3}{2}x_1 + \\tfrac{5}{2}x_2 + 1 \\\\ -\\text{s. to} \\quad & -1.5 \\leq x_1 \\leq 4 \\\\ -& -3 \\leq x_2 \\leq 3 -\\end{aligned} -``` - -Starting point: `[0.0; 0.0]`. -""" -mutable struct HS5 <: AbstractNLPModel - meta :: NLPModelMeta - counters :: Counters -end - -function HS5() - meta = NLPModelMeta(2, x0=zeros(2), lvar=[-1.5; -3.0], uvar=[4.0; 3.0], name="HS5_manual") - - return HS5(meta, Counters()) -end - -function NLPModels.obj(nlp :: HS5, x :: AbstractVector) - @lencheck 2 x - increment!(nlp, :neval_obj) - return sin(x[1] + x[2]) + (x[1] - x[2])^2 - 3x[1] / 2 + 5x[2] / 2 + 1 -end - -function NLPModels.grad!(nlp :: HS5, x :: AbstractVector{T}, gx :: AbstractVector{T}) where T - @lencheck 2 x gx - increment!(nlp, :neval_grad) - gx .= cos(x[1] + x[2]) * ones(T, 2) + 2 * (x[1] - x[2]) * T[1; -1] + T[-1.5; 2.5] - return gx -end - -function NLPModels.hess_structure!(nlp :: HS5, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) - @lencheck 3 rows cols - rows .= [1; 2; 2] - cols .= [1; 1; 2] - return rows, cols -end - -function NLPModels.hess_coord!(nlp :: HS5, x :: AbstractVector, vals :: AbstractVector; obj_weight=1.0) - @lencheck 2 x - @lencheck 3 vals - increment!(nlp, :neval_hess) - vals[1] = vals[3] = -sin(x[1] + x[2]) + 2 - vals[2] = -sin(x[1] + x[2]) - 2 - vals .*= obj_weight - return vals -end - -function NLPModels.hprod!(nlp :: HS5, x :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 2 x v Hv - increment!(nlp, :neval_hprod) - Hv .= (- sin(x[1] + x[2]) * (v[1] + v[2]) * ones(T, 2) + 2 * [v[1] - v[2]; v[2] - v[1]]) * obj_weight - return Hv -end diff --git a/test/TestUtils/nlp/problems/hs6.jl b/test/TestUtils/nlp/problems/hs6.jl deleted file mode 100644 index 63106be9..00000000 --- a/test/TestUtils/nlp/problems/hs6.jl +++ /dev/null @@ -1,127 +0,0 @@ -export HS6, hs6_autodiff - -function hs6_autodiff() - x0 = [-1.2; 1.0] - f(x) = (1 - x[1])^2 - c(x) = [10 * (x[2] - x[1]^2)] - lcon = [0.0] - ucon = [0.0] - - return ADNLPModel(f, x0, c, lcon, ucon, name="hs6_autodiff") -end - -""" - nlp = HS6() - -## Problem 6 in the Hock-Schittkowski suite - -```math -\\begin{aligned} -\\min \\quad & (1 - x_1)^2 \\\\ -\\text{s. to} \\quad & 10 (x_2 - x_1^2) = 0 -\\end{aligned} -``` - -Starting point: `[-1.2; 1.0]`. -""" -mutable struct HS6 <: AbstractNLPModel - meta :: NLPModelMeta - counters :: Counters -end - -function HS6() - meta = NLPModelMeta(2, ncon=1, nnzh=1, nnzj=2, x0=[-1.2; 1.0], lcon=[0.0], ucon=[0.0], name="HS6_manual") - - return HS6(meta, Counters()) -end - -function NLPModels.obj(nlp :: HS6, x :: AbstractVector) - @lencheck 2 x - increment!(nlp, :neval_obj) - return (1 - x[1])^2 -end - -function NLPModels.grad!(nlp :: HS6, x :: AbstractVector, gx :: AbstractVector) - @lencheck 2 x gx - increment!(nlp, :neval_grad) - gx .= [2 * (x[1] - 1); 0] - return gx -end - -function NLPModels.hess_structure!(nlp :: HS6, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) - @lencheck 1 rows cols - rows[1] = 1 - cols[1] = 1 - return rows, cols -end - -function NLPModels.hess_coord!(nlp :: HS6, x :: AbstractVector{T}, vals :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 2 x - @lencheck 1 vals - increment!(nlp, :neval_hess) - vals[1] = 2obj_weight - return vals -end - -function NLPModels.hess_coord!(nlp :: HS6, x :: AbstractVector{T}, y :: AbstractVector{T}, vals :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 2 x - @lencheck 1 y vals - increment!(nlp, :neval_hess) - vals[1] = 2obj_weight - 20y[1] - return vals -end - -function NLPModels.hprod!(nlp :: HS6, x :: AbstractVector{T}, y :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 2 x v Hv - @lencheck 1 y - increment!(nlp, :neval_hprod) - Hv .= [(2obj_weight - 20y[1]) * v[1]; 0] - return Hv -end - -function NLPModels.cons!(nlp :: HS6, x :: AbstractVector, cx :: AbstractVector) - @lencheck 2 x - @lencheck 1 cx - increment!(nlp, :neval_cons) - cx[1] = 10 * (x[2] - x[1]^2) - return cx -end - -function NLPModels.jac_structure!(nlp :: HS6, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) - @lencheck 2 rows cols - rows[1:2] .= [1, 1] - cols[1:2] .= [1, 2] - return rows, cols -end - -function NLPModels.jac_coord!(nlp :: HS6, x :: AbstractVector, vals :: AbstractVector) - @lencheck 2 x vals - increment!(nlp, :neval_jac) - vals[1] = -20 * x[1] - vals[2] = 10 - return vals -end - -function NLPModels.jprod!(nlp :: HS6, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck 2 x v - @lencheck 1 Jv - increment!(nlp, :neval_jprod) - Jv .= [-20 * x[1] * v[1] + 10 * v[2]] - return Jv -end - -function NLPModels.jtprod!(nlp :: HS6, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck 2 x Jtv - @lencheck 1 v - increment!(nlp, :neval_jtprod) - Jtv .= [-20 * x[1]; 10] * v[1] - return Jtv -end - -function NLPModels.ghjvprod!(nlp :: HS6, x :: AbstractVector, g :: AbstractVector, v :: AbstractVector, gHv :: AbstractVector) - @lencheck nlp.meta.nvar x g v - @lencheck nlp.meta.ncon gHv - increment!(nlp, :neval_hprod) - gHv .= [-20 * g[1] * v[1]] - return gHv -end diff --git a/test/TestUtils/nlp/problems/lincon.jl b/test/TestUtils/nlp/problems/lincon.jl deleted file mode 100644 index 713da2e0..00000000 --- a/test/TestUtils/nlp/problems/lincon.jl +++ /dev/null @@ -1,181 +0,0 @@ -export LINCON, lincon_autodiff - -function lincon_autodiff() - - A = [1 2; 3 4] - b = [5; 6] - B = diagm([3 * i for i = 3:5]) - c = [1; 2; 3] - C = [0 -2; 4 0] - d = [1; -1] - - x0 = zeros(15) - f(x) = sum(i + x[i]^4 for i = 1:15) - con(x) = [15 * x[15]; - c' * x[10:12]; - d' * x[13:14]; - b' * x[8:9]; - C * x[6:7]; - A * x[1:2]; - B * x[3:5]] - - lcon = [22.0; 1.0; -Inf; -11.0; -d; -b; -Inf * ones(3)] - ucon = [22.0; Inf; 16.0; 9.0; -d; Inf * ones(2); c] - - return ADNLPModel(f, x0, con, lcon, ucon, name="lincon_autodiff") -end - -""" - nlp = LINCON() - -## Linearly constrained problem - -```math -\\begin{aligned} -\\min \\quad & (i + x_i^4) \\\\ -\\text{s. to} \\quad & x_{15} = 0 \\\\ -& x_{10} + 2x_{11} + 3x_{12} \\geq 1 \\\\ -& x_{13} - x_{14} \\leq 16 \\\\ -& -11 \\leq 5x_8 - 6x_9 \\leq 9 \\\\ -& -2x_7 = -1 \\\\ -& 4x_6 = 1 \\\\ -& x_1 + 2x_2 \\geq -5 \\\\ -& 3x_1 + 4x_2 \\geq -6 \\\\ -& 9x_3 \\leq 1 \\\\ -& 12x_4 \\leq 2 \\\\ -& 15x_5 \\leq 3 -\\end{aligned} -``` - -Starting point: `zeros(15)`. -""" -mutable struct LINCON <: AbstractNLPModel - meta :: NLPModelMeta - counters :: Counters -end - -function LINCON() - meta = NLPModelMeta(15, nnzh=15, nnzj=17, ncon=11, x0=zeros(15), lcon = [22.0; 1.0; -Inf; -11.0; -1.0; 1.0; -5.0; -6.0; -Inf * ones(3)], ucon=[22.0; Inf; 16.0; 9.0; -1.0; 1.0; Inf * ones(2); 1.0; 2.0; 3.0], name="LINCON_manual") - - return LINCON(meta, Counters()) -end - -function NLPModels.obj(nlp :: LINCON, x :: AbstractVector) - @lencheck 15 x - increment!(nlp, :neval_obj) - return sum(i + x[i]^4 for i = 1:nlp.meta.nvar) -end - -function NLPModels.grad!(nlp :: LINCON, x :: AbstractVector, gx :: AbstractVector) - @lencheck 15 x gx - increment!(nlp, :neval_grad) - gx .= [4 * x[i]^3 for i =1:nlp.meta.nvar] - return gx -end - -function NLPModels.hess_structure!(nlp :: LINCON, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) - @lencheck 15 rows cols - for i = 1:nlp.meta.nnzh - rows[i] = i - cols[i] = i - end - return rows, cols -end - -function NLPModels.hess_coord!(nlp :: LINCON, x :: AbstractVector{T}, vals :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 15 x vals - increment!(nlp, :neval_hess) - for i = 1:nlp.meta.nnzh - vals[i] = 12 * obj_weight * x[i]^2 - end - return vals -end - -function NLPModels.hess_coord!(nlp :: LINCON, x :: AbstractVector{T}, y :: AbstractVector{T}, vals :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 15 x vals - @lencheck 11 y - hess_coord!(nlp, x, vals, obj_weight=obj_weight) -end - -function NLPModels.hprod!(nlp :: LINCON, x :: AbstractVector{T}, y :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 15 x v Hv - @lencheck 11 y - increment!(nlp, :neval_hprod) - for i=1:nlp.meta.nvar - Hv[i] = 12 * obj_weight * x[i]^2 * v[i] - end - return Hv -end - -function NLPModels.cons!(nlp :: LINCON, x :: AbstractVector, cx :: AbstractVector) - @lencheck 15 x - @lencheck 11 cx - increment!(nlp, :neval_cons) - cx .= [15 * x[15]; - [1; 2; 3]' * x[10:12]; - [1; -1]' * x[13:14]; - [5; 6]' * x[8:9]; - [0 -2; 4 0] * x[6:7]; - [1 2; 3 4] * x[1:2]; - diagm([3 * i for i = 3:5]) * x[3:5]] - return cx -end - -function NLPModels.jac_structure!(nlp :: LINCON, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) - @lencheck 17 rows cols - rows .= [ 1, 2, 2, 2, 3, 3, 4, 4, 5, 6, 7, 7, 8, 8, 9, 10, 11] - cols .= [15, 10, 11, 12, 13, 14, 8, 9, 7, 6, 1, 2, 1, 2, 3, 4, 5] - return rows, cols -end - -function NLPModels.jac_coord!(nlp :: LINCON, x :: AbstractVector, vals :: AbstractVector) - @lencheck 15 x - @lencheck 17 vals - increment!(nlp, :neval_jac) - vals .= eltype(x).([15, 1, 2, 3, 1, -1, 5, 6, -2, 4, 1, 2, 3, 4, 9, 12, 15]) - return vals -end - -function NLPModels.jprod!(nlp :: LINCON, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck 15 x v - @lencheck 11 Jv - increment!(nlp, :neval_jprod) - Jv[1] = 15 * v[15] - Jv[2] = [1; 2; 3]' * v[10:12] - Jv[3] = [1; -1]' * v[13:14] - Jv[4] = [5; 6]' * v[8:9] - Jv[5:6] = [0 -2; 4 0] * v[6:7] - Jv[7:8] = [1.0 2; 3 4] * v[1:2] - Jv[9:11] = diagm([3 * i for i = 3:5]) * v[3:5] - return Jv -end - -function NLPModels.jtprod!(nlp :: LINCON, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck 15 x Jtv - @lencheck 11 v - increment!(nlp, :neval_jtprod) - Jtv[1] = 1 * v[7] + 3 * v[8] - Jtv[2] = 2 * v[7] + 4 * v[8] - Jtv[3] = 9 * v[9] - Jtv[4] = 12 * v[10] - Jtv[5] = 15 * v[11] - Jtv[6] = 4 * v[6] - Jtv[7] = -2 * v[5] - Jtv[8] = 5 * v[4] - Jtv[9] = 6 * v[4] - Jtv[10] = 1 * v[2] - Jtv[11] = 2 * v[2] - Jtv[12] = 3 * v[2] - Jtv[13] = 1 * v[3] - Jtv[14] = -1 * v[3] - Jtv[15] = 15 * v[1] - return Jtv -end - -function NLPModels.ghjvprod!(nlp :: LINCON, x :: AbstractVector{T}, g :: AbstractVector{T}, v :: AbstractVector{T}, gHv :: AbstractVector{T}) where T - @lencheck nlp.meta.nvar x g v - @lencheck nlp.meta.ncon gHv - increment!(nlp, :neval_hprod) - gHv .= zeros(T, nlp.meta.ncon) - return gHv -end diff --git a/test/TestUtils/nlp/problems/linsv.jl b/test/TestUtils/nlp/problems/linsv.jl deleted file mode 100644 index e1b25417..00000000 --- a/test/TestUtils/nlp/problems/linsv.jl +++ /dev/null @@ -1,131 +0,0 @@ -export LINSV, linsv_autodiff - -function linsv_autodiff() - - x0 = zeros(2) - f(x) = x[1] - con(x) = [x[1] + x[2]; x[2]] - lcon = [3; 1] - ucon = [Inf; Inf] - - return ADNLPModel(f, x0, con, lcon, ucon, name="linsv_autodiff") -end - -""" - nlp = LINSV() - -## Linear problem - -```math -\\begin{aligned} -\\min \\quad & x_1 \\\\ -\\text{s. to} \\quad & x_1 + x_2 \\geq 3 \\\\ -& x_2 \\geq 1 -\\end{aligned} -``` - -Starting point: `[0; 0]`. -""" -mutable struct LINSV <: AbstractNLPModel - meta :: NLPModelMeta - counters :: Counters -end - -function LINSV() - meta = NLPModelMeta(2, nnzh=0, nnzj=3, ncon=2, x0=zeros(2), lcon = [3; 1], ucon=[Inf; Inf], name="LINSV_manual") - - return LINSV(meta, Counters()) -end - -function NLPModels.obj(nlp :: LINSV, x :: AbstractVector) - @lencheck 2 x - increment!(nlp, :neval_obj) - return x[1] -end - -function NLPModels.grad!(nlp :: LINSV, x :: AbstractVector, gx :: AbstractVector) - @lencheck 2 x gx - increment!(nlp, :neval_grad) - gx[1] = 1 - gx[2] = 0 - return gx -end - -function NLPModels.hess(nlp :: LINSV, x :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 2 x - increment!(nlp, :neval_hess) - H = zeros(T, 2, 2) - return H -end - -function NLPModels.hess_structure!(nlp :: LINSV, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) - @lencheck 0 rows cols - return rows, cols -end - -function NLPModels.hess_coord!(nlp :: LINSV, x :: AbstractVector{T}, vals :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 2 x - @lencheck 0 vals - increment!(nlp, :neval_hess) - return vals -end - -function NLPModels.hess_coord!(nlp :: LINSV, x :: AbstractVector{T}, y :: AbstractVector{T}, vals :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 2 x y - @lencheck 0 vals - increment!(nlp, :neval_hess) - return vals -end - -function NLPModels.hprod!(nlp :: LINSV, x :: AbstractVector{T}, y :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 2 x y v Hv - increment!(nlp, :neval_hprod) - Hv .= 0 - return Hv -end - -function NLPModels.cons!(nlp :: LINSV, x :: AbstractVector, cx :: AbstractVector) - @lencheck 2 x cx - increment!(nlp, :neval_cons) - cx .= [x[1] + x[2]; x[2]] - return cx -end - -function NLPModels.jac_structure!(nlp :: LINSV, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) - @lencheck 3 rows cols - rows .= [1, 1, 2] - cols .= [1, 2, 2] - return rows, cols -end - -function NLPModels.jac_coord!(nlp :: LINSV, x :: AbstractVector, vals :: AbstractVector) - @lencheck 2 x - @lencheck 3 vals - increment!(nlp, :neval_jac) - vals .= eltype(x).([1, 1, 1]) - return vals -end - -function NLPModels.jprod!(nlp :: LINSV, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck 2 x v Jv - increment!(nlp, :neval_jprod) - Jv[1] = v[1] + v[2] - Jv[2] = v[2] - return Jv -end - -function NLPModels.jtprod!(nlp :: LINSV, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck 2 x v Jtv - increment!(nlp, :neval_jtprod) - Jtv[1] = v[1] - Jtv[2] = v[1] + v[2] - return Jtv -end - -function NLPModels.ghjvprod!(nlp :: LINSV, x :: AbstractVector{T}, g :: AbstractVector{T}, v :: AbstractVector{T}, gHv :: AbstractVector{T}) where T - @lencheck nlp.meta.nvar x g v - @lencheck nlp.meta.ncon gHv - increment!(nlp, :neval_hprod) - gHv .= zeros(T, nlp.meta.ncon) - return gHv -end diff --git a/test/TestUtils/nlp/problems/mgh01feas.jl b/test/TestUtils/nlp/problems/mgh01feas.jl deleted file mode 100644 index a472ce82..00000000 --- a/test/TestUtils/nlp/problems/mgh01feas.jl +++ /dev/null @@ -1,123 +0,0 @@ -export MGH01Feas, mgh01feas_autodiff - -function mgh01feas_autodiff() - - x0 = [-1.2; 1.0] - f(x) = zero(eltype(x)) - c(x) = [1 - x[1]; 10 * (x[2] - x[1]^2)] - lcon = zeros(2) - ucon = zeros(2) - - return ADNLPModel(f, x0, c, lcon, ucon, name="mgh01feas_autodiff") -end - -""" - nlp = MGH01Feas() - -## Rosenbrock function in feasibility format - - Source: Problem 1 in - J.J. Moré, B.S. Garbow and K.E. Hillstrom, - "Testing Unconstrained Optimization Software", - ACM Transactions on Mathematical Software, vol. 7(1), pp. 17-41, 1981 - -```math -\\begin{aligned} -\\min \\quad & 0 \\\\ -\\text{s. to} \\quad & 1 - x_1 = 0 \\\\ -& 10 (x_2 - x_1^2) = 0. -\\end{aligned} -``` - -Starting point: `[-1.2; 1]`. -""" -mutable struct MGH01Feas <: AbstractNLPModel - meta :: NLPModelMeta - counters :: Counters -end - -function MGH01Feas() - meta = NLPModelMeta(2, x0=[-1.2; 1.0], name="MGH01Feas_manual", ncon=2, lcon=zeros(2), ucon=zeros(2), nnzj=3, nnzh=1) - - return MGH01Feas(meta, Counters()) -end - -function NLPModels.obj(nlp :: MGH01Feas, x :: AbstractVector) - @lencheck 2 x - increment!(nlp, :neval_obj) - return zero(eltype(x)) -end - -function NLPModels.grad!(nlp :: MGH01Feas, x :: AbstractVector{T}, gx :: AbstractVector{T}) where T - @lencheck 2 x gx - increment!(nlp, :neval_grad) - gx .= 0 - return gx -end - -function NLPModels.cons!(nls :: MGH01Feas, x :: AbstractVector, cx :: AbstractVector) - @lencheck 2 x cx - increment!(nls, :neval_cons) - cx .= [1 - x[1]; 10 * (x[2] - x[1]^2)] - return cx -end - -# Jx = [-1 0; -20x₁ 10] -function NLPModels.jac_structure!(nls :: MGH01Feas, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck 3 rows cols - rows .= [1, 2, 2] - cols .= [1, 1, 2] - return rows, cols -end - -function NLPModels.jac_coord!(nls :: MGH01Feas, x :: AbstractVector, vals :: AbstractVector) - @lencheck 2 x - @lencheck 3 vals - increment!(nls, :neval_jac) - vals .= [-1, -20x[1], 10] - return vals -end - -function NLPModels.jprod!(nls :: MGH01Feas, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck 2 x v Jv - increment!(nls, :neval_jprod) - Jv .= [-v[1]; - 20 * x[1] * v[1] + 10 * v[2]] - return Jv -end - -function NLPModels.jtprod!(nls :: MGH01Feas, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck 2 x v Jtv - increment!(nls, :neval_jtprod) - Jtv .= [-v[1] - 20 * x[1] * v[2]; 10 * v[2]] - return Jtv -end - -function NLPModels.hess_structure!(nls :: MGH01Feas, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck 1 rows cols - rows[1] = 1 - cols[1] = 1 - return rows, cols -end - -function NLPModels.hess_coord!(nls :: MGH01Feas, x :: AbstractVector, y :: AbstractVector, vals :: AbstractVector; obj_weight::Real=one(eltype(x))) - @lencheck 2 x y - @lencheck 1 vals - increment!(nls, :neval_hess) - vals[1] = -20y[2] - return vals -end - -function NLPModels.hprod!(nls :: MGH01Feas, x :: AbstractVector, y :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; obj_weight::Real=one(eltype(x))) - @lencheck 2 x y v Hv - increment!(nls, :neval_hprod) - Hv .= [-20y[2] * v[1]; 0] - return Hv -end - -function NLPModels.ghjvprod!(nls :: MGH01Feas, x :: AbstractVector{T}, g :: AbstractVector{T}, v :: AbstractVector{T}, gHv :: AbstractVector{T}) where T - @lencheck nls.meta.nvar x g v - @lencheck nls.meta.ncon gHv - increment!(nls, :neval_hprod) - gHv .= [T(0); - g[1] * 20v[1]] - return gHv -end diff --git a/test/TestUtils/nlp/view-subarray.jl b/test/TestUtils/nlp/view-subarray.jl deleted file mode 100644 index c54a46c2..00000000 --- a/test/TestUtils/nlp/view-subarray.jl +++ /dev/null @@ -1,120 +0,0 @@ -export view_subarray_nlp - -""" - view_subarray_nlp(nlp) - -Check that the API work with views, and that the results is correct. -""" -function view_subarray_nlp(nlp) - @testset "Test view subarray of NLPs" begin - n, m = nlp.meta.nvar, nlp.meta.ncon - N = 2n - Vidxs = [1:2:N, collect(N:-2:1)] - Cidxs = if m > 0 - N = 2m - [1:2:N, collect(N:-2:1)] - else - [] - end - - # Inputs - x = [-(-1.1)^i for i = 1:2n] # Instead of [1, -1, …], because it needs to - v = [-(-1.1)^i for i = 1:2n] # access different parts of the vector and - y = [-(-1.1)^i for i = 1:2m] # make a difference - - # Outputs - g = zeros(n) - g2 = zeros(2n) - c = zeros(m) - c2 = zeros(2m) - jv = zeros(m) - jv2 = zeros(2m) - jty = zeros(n) - jty2 = zeros(2n) - hv = zeros(n) - hv2 = zeros(2n) - - for I = Vidxs - xv = @view x[I] - for foo in (obj, grad, hess) - @test foo(nlp, x[I]) ≈ foo(nlp, xv) - end - - # Some NLS don't implement hess_coord - #vals1 = hess_coord(nlp, x[I]) - #vals2 = hess_coord(nlp, xv) - #@test vals1 ≈ vals2 - - if m > 0 - for foo in (cons, jac) - @test foo(nlp, x[I]) ≈ foo(nlp, xv) - end - vals1 = jac_coord(nlp, x[I]) - vals2 = jac_coord(nlp, xv) - @test vals1 ≈ vals2 - end - - for J = Cidxs - yv = @view y[J] - @test hess(nlp, x[I], y[J]) ≈ hess(nlp, xv, yv) - yv = @view y[J] - #vals1 = hess_coord(nlp, x[I], y[J]) - #vals2 = hess_coord(nlp, xv, yv) - #@test vals1 ≈ vals2 - end - - # Inplace methods can have input and output as view, so 4 possibilities - for J = Vidxs - gv = @view g2[J] - grad!(nlp, x[I], g) - grad!(nlp, x[I], gv); @test g ≈ g2[J] - grad!(nlp, xv, gv); @test g ≈ g2[J] - grad!(nlp, xv, g); @test g ≈ g2[J] - end - - for J = Cidxs - cv = @view c2[J] - cons!(nlp, x[I], c) - cons!(nlp, x[I], cv); @test c ≈ c2[J] - cons!(nlp, xv, cv); @test c ≈ c2[J] - cons!(nlp, xv, c); @test c ≈ c2[J] - end - - for J = Cidxs, K in Vidxs - vv = @view v[K] - jvv = @view jv2[J] - @test jprod(nlp, x[I], v[K]) ≈ jprod(nlp, xv, vv) - jprod!(nlp, x[I], v[K], jv) - jprod!(nlp, x[I], v[K], jvv); @test jv ≈ jv2[J] - jprod!(nlp, xv, vv, jvv); @test jv ≈ jv2[J] - jprod!(nlp, xv, vv, jv); @test jv ≈ jv2[J] - - yv = @view y[J] - jtyv = @view jty2[K] - @test jtprod(nlp, x[I], y[J]) ≈ jtprod(nlp, xv, yv) - jtprod!(nlp, x[I], y[J], jty) - jtprod!(nlp, x[I], y[J], jtyv); @test jty ≈ jty2[K] - jtprod!(nlp, xv, yv, jtyv); @test jty ≈ jty2[K] - jtprod!(nlp, xv, yv, jty); @test jty ≈ jty2[K] - end - - for J = Vidxs, K in Vidxs - vv = @view v[J] - hvv = @view hv2[K] - @test hprod(nlp, x[I], v[J]) ≈ hprod(nlp, xv, vv) - hprod!(nlp, x[I], v[J], hv) - hprod!(nlp, x[I], v[J], hvv); @test hv ≈ hv2[K] - hprod!(nlp, xv, vv, hvv); @test hv ≈ hv2[K] - hprod!(nlp, xv, vv, hv); @test hv ≈ hv2[K] - for P in Cidxs - yv = @view y[P] - @test hprod(nlp, x[I], y[P], v[J]) ≈ hprod(nlp, xv, yv, vv) - hprod!(nlp, x[I], y[P], v[J], hv) - hprod!(nlp, x[I], y[P], v[J], hvv); @test hv ≈ hv2[K] - hprod!(nlp, xv, yv, vv, hvv); @test hv ≈ hv2[K] - hprod!(nlp, xv, yv, vv, hv); @test hv ≈ hv2[K] - end - end - end - end -end \ No newline at end of file diff --git a/test/TestUtils/nls/check-dimensions.jl b/test/TestUtils/nls/check-dimensions.jl deleted file mode 100644 index afe7a6dd..00000000 --- a/test/TestUtils/nls/check-dimensions.jl +++ /dev/null @@ -1,100 +0,0 @@ -export check_nls_dimensions - -""" - check_nls_dimensions(nlp; exclude_hess=false) - -Make sure NLS API functions will throw DimensionError if the inputs are not the correct dimension. -To make this assertion in your code use - - @lencheck size input [more inputs separated by spaces] -""" -function check_nls_dimensions(nls) - n, m = nls.meta.nvar, nls_meta(nls).nequ - nnzh, nnzj = nls_meta(nls).nnzh, nls_meta(nls).nnzj - - x, badx = nls.meta.x0, zeros(n + 1) - Fx, badFx = zeros(m), zeros(m + 1) - v, badv = ones(n), zeros(n + 1) - w, badw = ones(m), zeros(m + 1) - Jv, badJv = zeros(m), zeros(m + 1) - Jtw, badJtw = zeros(n), zeros(n + 1) - Hv, badHv = zeros(n), zeros(n + 1) - jrows, badjrows = zeros(Int, nnzj), zeros(Int, nnzj + 1) - jcols, badjcols = zeros(Int, nnzj), zeros(Int, nnzj + 1) - jvals, badjvals = zeros(nnzj), zeros(nnzj + 1) - hrows, badhrows = zeros(Int, nnzh), zeros(Int, nnzh + 1) - hcols, badhcols = zeros(Int, nnzh), zeros(Int, nnzh + 1) - hvals, badhvals = zeros(nnzh), zeros(nnzh + 1) - - @test_throws DimensionError residual(nls, badx) - @test_throws DimensionError residual!(nls, badx, Fx) - @test_throws DimensionError residual!(nls, x, badFx) - @test_throws DimensionError jac_residual(nls, badx) - @test_throws DimensionError jac_structure_residual!(nls, badjrows, jcols) - @test_throws DimensionError jac_structure_residual!(nls, jrows, badjcols) - @test_throws DimensionError jac_coord_residual(nls, badx) - @test_throws DimensionError jac_coord_residual!(nls, badx, jvals) - @test_throws DimensionError jac_coord_residual!(nls, x, badjvals) - @test_throws DimensionError jprod_residual(nls, badx, v) - @test_throws DimensionError jprod_residual(nls, x, badv) - @test_throws DimensionError jprod_residual!(nls, badx, v, Jv) - @test_throws DimensionError jprod_residual!(nls, x, badv, Jv) - @test_throws DimensionError jprod_residual!(nls, x, v, badJv) - @test_throws DimensionError jprod_residual!(nls, badjrows, jcols, jvals, v, Jv) - @test_throws DimensionError jprod_residual!(nls, jrows, badjcols, jvals, v, Jv) - @test_throws DimensionError jprod_residual!(nls, jrows, jcols, badjvals, v, Jv) - @test_throws DimensionError jprod_residual!(nls, jrows, jcols, jvals, badv, Jv) - @test_throws DimensionError jprod_residual!(nls, jrows, jcols, jvals, v, badJv) - @test_throws DimensionError jprod_residual!(nls, badx, jrows, jcols, v, Jv) - @test_throws DimensionError jprod_residual!(nls, x, badjrows, jcols, v, Jv) - @test_throws DimensionError jprod_residual!(nls, x, jrows, badjcols, v, Jv) - @test_throws DimensionError jprod_residual!(nls, x, jrows, jcols, badv, Jv) - @test_throws DimensionError jprod_residual!(nls, x, jrows, jcols, v, badJv) - @test_throws DimensionError jtprod_residual(nls, badx, w) - @test_throws DimensionError jtprod_residual(nls, x, badw) - @test_throws DimensionError jtprod_residual!(nls, badx, w, Jtw) - @test_throws DimensionError jtprod_residual!(nls, x, badw, Jtw) - @test_throws DimensionError jtprod_residual!(nls, x, w, badJtw) - @test_throws DimensionError jtprod_residual!(nls, badjrows, jcols, jvals, w, Jtw) - @test_throws DimensionError jtprod_residual!(nls, jrows, badjcols, jvals, w, Jtw) - @test_throws DimensionError jtprod_residual!(nls, jrows, jcols, badjvals, w, Jtw) - @test_throws DimensionError jtprod_residual!(nls, jrows, jcols, jvals, badw, Jtw) - @test_throws DimensionError jtprod_residual!(nls, jrows, jcols, jvals, w, badJtw) - @test_throws DimensionError jtprod_residual!(nls, badx, jrows, jcols, w, Jtw) - @test_throws DimensionError jtprod_residual!(nls, x, badjrows, jcols, w, Jtw) - @test_throws DimensionError jtprod_residual!(nls, x, jrows, badjcols, w, Jtw) - @test_throws DimensionError jtprod_residual!(nls, x, jrows, jcols, badw, Jtw) - @test_throws DimensionError jtprod_residual!(nls, x, jrows, jcols, w, badJtw) - @test_throws DimensionError jac_op_residual(nls, badx) - @test_throws DimensionError jac_op_residual!(nls, badx, Jv, Jtw) - @test_throws DimensionError jac_op_residual!(nls, x, badJv, Jtw) - @test_throws DimensionError jac_op_residual!(nls, x, Jv, badJtw) - @test_throws DimensionError jac_op_residual!(nls, badjrows, jcols, jvals, Jv, Jtw) - @test_throws DimensionError jac_op_residual!(nls, jrows, badjcols, jvals, Jv, Jtw) - @test_throws DimensionError jac_op_residual!(nls, jrows, jcols, badjvals, Jv, Jtw) - @test_throws DimensionError jac_op_residual!(nls, jrows, jcols, jvals, badJv, Jtw) - @test_throws DimensionError jac_op_residual!(nls, jrows, jcols, jvals, Jv, badJtw) - @test_throws DimensionError jac_op_residual!(nls, badx, jrows, jcols, Jv, Jtw) - @test_throws DimensionError jac_op_residual!(nls, x, badjrows, jcols, Jv, Jtw) - @test_throws DimensionError jac_op_residual!(nls, x, jrows, badjcols, Jv, Jtw) - @test_throws DimensionError jac_op_residual!(nls, x, jrows, jcols, badJv, Jtw) - @test_throws DimensionError jac_op_residual!(nls, x, jrows, jcols, Jv, badJtw) - @test_throws DimensionError hess_residual(nls, badx, Fx) - @test_throws DimensionError hess_residual(nls, x, badFx) - @test_throws DimensionError hess_structure_residual!(nls, badhrows, hcols) - @test_throws DimensionError hess_structure_residual!(nls, hrows, badhcols) - @test_throws DimensionError hess_coord_residual(nls, badx, Fx) - @test_throws DimensionError hess_coord_residual(nls, x, badFx) - @test_throws DimensionError hess_coord_residual!(nls, badx, Fx, hvals) - @test_throws DimensionError hess_coord_residual!(nls, x, badFx, hvals) - @test_throws DimensionError hess_coord_residual!(nls, x, Fx, badhvals) - @test_throws DimensionError jth_hess_residual(nls, badx, 1) - @test_throws DimensionError hprod_residual(nls, badx, 1, v) - @test_throws DimensionError hprod_residual(nls, x, 1, badv) - @test_throws DimensionError hprod_residual!(nls, badx, 1, v, Hv) - @test_throws DimensionError hprod_residual!(nls, x, 1, badv, Hv) - @test_throws DimensionError hprod_residual!(nls, x, 1, v, badHv) - @test_throws DimensionError hess_op_residual(nls, badx, 1) - @test_throws DimensionError hess_op_residual!(nls, badx, 1, Hv) - @test_throws DimensionError hess_op_residual!(nls, x, 1, badHv) -end diff --git a/test/TestUtils/nls/consistency.jl b/test/TestUtils/nls/consistency.jl deleted file mode 100644 index 6a58ea88..00000000 --- a/test/TestUtils/nls/consistency.jl +++ /dev/null @@ -1,207 +0,0 @@ -import LinearAlgebra: I - -export consistent_nlss - -""" - consistent_nlss(nlps; exclude=[hess, hprod, hess_coord]) - -Check that the all `nls`s of the vector `nlss` are consistent, in the sense that -- Their counters are the same. -- Their `meta` information is the same. -- The API functions return the same output given the same input. - -In other words, if you create two models of the same problem, they should be consistent. - -By default, the functions `hess`, `hprod` and `hess_coord` (and therefore associated functions) are excluded from this check, since some models don't implement them. -""" -function consistent_nlss(nlss; exclude=[hess, hess_coord, ghjvprod], test_slack=true, test_ff=true) - consistent_nls_counters(nlss) - consistent_counters(nlss) - consistent_nls_functions(nlss, exclude=exclude) - consistent_nls_counters(nlss) - consistent_counters(nlss) - for nls in nlss - reset!(nls) - end - consistent_functions(nlss, exclude=exclude) - - if test_slack && has_inequalities(nlss[1]) - reset!.(nlss) - slack_nlss = SlackNLSModel.(nlss) - consistent_nls_functions(slack_nlss, exclude=exclude) - consistent_nls_counters(slack_nlss) - consistent_counters(slack_nlss) - consistent_functions(slack_nlss, exclude=exclude) - end - - if test_ff - reset!.(nlss) - ff_nlss = FeasibilityFormNLS.(nlss) - consistent_nls_functions(ff_nlss, exclude=exclude) - consistent_nls_counters(ff_nlss) - consistent_counters(ff_nlss) - consistent_functions(ff_nlss, exclude=exclude) - end -end - -function consistent_nls_counters(nlss) - N = length(nlss) - V = zeros(Int, N) - for field in fieldnames(NLSCounters) - field == :counters && continue - @testset "Field $field" begin - V = [eval(field)(nls) for nls in nlss] - @test all(V .== V[1]) - end - end - V = [sum_counters(nls) for nls in nlss] - @test all(V .== V[1]) -end - -function consistent_nls_functions(nlss; rtol=1.0e-8, exclude=[]) - N = length(nlss) - n = nls_meta(nlss[1]).nvar - m = nls_meta(nlss[1]).nequ - - tmp_n = zeros(n) - tmp_m = zeros(m) - - x = 10 * [-(-1.0)^i for i = 1:n] - - if !(residual in exclude) - Fs = Any[residual(nls, x) for nls in nlss] - for i = 1:N - for j = i+1:N - @test isapprox(Fs[i], Fs[j], rtol=rtol) - end - - r = residual!(nlss[i], x, tmp_m) - @test isapprox(r, Fs[i], rtol=rtol) - @test isapprox(Fs[i], tmp_m, rtol=rtol) - end - end - - if intersect([jac_residual,jac_coord_residual], exclude) == [] - Js = Any[jac_residual(nls, x) for nls in nlss] - for i = 1:N - for j = i+1:N - @test isapprox(Js[i], Js[j], rtol=rtol) - end - V = jac_coord_residual(nlss[i], x) - I, J = jac_structure_residual(nlss[i]) - @test length(I) == length(J) == length(V) == nlss[i].nls_meta.nnzj - I2, J2 = copy(I), copy(J) - jac_structure_residual!(nlss[i], I2, J2) - @test I == I2 - @test J == J2 - tmp_V = zeros(nlss[i].nls_meta.nnzj) - jac_coord_residual!(nlss[i], x, tmp_V) - @test tmp_V == V - end - end - - if intersect([jac_op_residual, jprod_residual, jtprod_residual], exclude) == [] - J_ops = Any[jac_op_residual(nls, x) for nls in nlss] - Jv, Jtv = zeros(m), zeros(n) - J_ops_inplace = Any[jac_op_residual!(nls, x, Jv, Jtv) for nls in nlss] - - v = [-(-1.0)^i for i = 1:n] - - Jps = Any[jprod_residual(nls, x, v) for nls in nlss] - for i = 1:N - for j = i+1:N - @test isapprox(Jps[i], Jps[j], rtol=rtol) - end - - jps = jprod_residual!(nlss[i], x, v, tmp_m) - @test isapprox(jps, Jps[i], rtol=rtol) - @test isapprox(Jps[i], tmp_m, rtol=rtol) - @test isapprox(Jps[i], J_ops[i] * v, rtol=rtol) - @test isapprox(Jps[i], J_ops_inplace[i] * v, rtol=rtol) - - rows, cols = jac_structure_residual(nlss[i]) - vals = jac_coord_residual(nlss[i], x) - jprod_residual!(nlss[i], rows, cols, vals, v, tmp_m) - @test isapprox(Jps[i], tmp_m, rtol=rtol) - jprod_residual!(nlss[i], x, rows, cols, v, tmp_m) - @test isapprox(Jps[i], tmp_m, rtol=rtol) - - J = jac_op_residual!(nlss[i], x, rows, cols, tmp_m, tmp_n) - res = J * v - @test isapprox(Jps[i], res, rtol=rtol) - end - - v = [-(-1.0)^i for i = 1:m] - - Jtps = Any[jtprod_residual(nls, x, v) for nls in nlss] - for i = 1:N - for j = i+1:N - @test isapprox(Jtps[i], Jtps[j], rtol=rtol) - end - - jtps = jtprod_residual!(nlss[i], x, v, tmp_n) - @test isapprox(jtps, Jtps[i], rtol=rtol) - @test isapprox(Jtps[i], tmp_n, rtol=rtol) - @test isapprox(Jtps[i], J_ops[i]' * v, rtol=rtol) - @test isapprox(Jtps[i], J_ops_inplace[i]' * v, rtol=rtol) - - rows, cols = jac_structure_residual(nlss[i]) - vals = jac_coord_residual(nlss[i], x) - jtprod_residual!(nlss[i], rows, cols, vals, v, tmp_n) - @test isapprox(Jtps[i], tmp_n, rtol=rtol) - jtprod_residual!(nlss[i], x, rows, cols, v, tmp_n) - @test isapprox(Jtps[i], tmp_n, rtol=rtol) - - J = jac_op_residual!(nlss[i], x, rows, cols, tmp_m, tmp_n) - res = J' * v - @test isapprox(Jtps[i], res, rtol=rtol) - end - end - - if intersect([hess_residual, hprod_residual, hess_op_residual], exclude) == [] - v = [-(-1.0)^i for i = 1:n] - w = [-(-1.0)^i for i = 1:m] - - Hs = Any[hess_residual(nls, x, w) for nls in nlss] - Hsi = Any[sum(jth_hess_residual(nls, x, i) * w[i] for i = 1:m) for nls in nlss] - for i = 1:N - for j = i+1:N - @test isapprox(Hs[i], Hs[j], rtol=rtol) - end - @test isapprox(Hs[i], Hsi[i], rtol=rtol) - if !(hess_coord_residual in exclude) - V = hess_coord_residual(nlss[i], x, w) - I, J = hess_structure_residual(nlss[i]) - @test length(I) == length(J) == length(V) == nlss[i].nls_meta.nnzh - @test sparse(I, J, V, n, n) == Hs[i] - I2, J2 = copy(I), copy(J) - hess_structure_residual!(nlss[i], I2, J2) - @test I == I2 - @test J == J2 - tmp_V = zeros(nlss[i].nls_meta.nnzh) - hess_coord_residual!(nlss[i], x, w, tmp_V) - @test tmp_V == V - end - end - - for k = 1:m - Hs = Any[jth_hess_residual(nls, x, k) for nls in nlss] - Hvs = Any[hprod_residual(nls, x, k, v) for nls in nlss] - Hops = Any[hess_op_residual(nls, x, k) for nls in nlss] - Hiv = zeros(n) - Hops_inplace = Any[hess_op_residual!(nls, x, k, Hiv) for nls in nlss] - for i = 1:N - for j = i+1:N - @test isapprox(Hs[i], Hs[j], rtol=rtol) - @test isapprox(Hvs[i], Hvs[j], rtol=rtol) - end - - hvs = hprod_residual!(nlss[i], x, k, v, tmp_n) - @test isapprox(hvs, Hvs[i], rtol=rtol) - @test isapprox(Hvs[i], tmp_n, rtol=rtol) - @test isapprox(Hvs[i], Hops[i] * v, rtol=rtol) - @test isapprox(Hvs[i], Hops_inplace[i] * v, rtol=rtol) - end - end - end -end diff --git a/test/TestUtils/nls/multiple-precision.jl b/test/TestUtils/nls/multiple-precision.jl deleted file mode 100644 index d25755ac..00000000 --- a/test/TestUtils/nls/multiple-precision.jl +++ /dev/null @@ -1,43 +0,0 @@ -export multiple_precision_nls - -""" - multiple_precision_nls(nls; precisions=[...]) - -Check that the NLS API functions output type are the same as the input. -In other words, make sure that the model handles multiple precisions. - -The array `precisions` are the tested floating point types. -Defaults to `[Float16, Float32, Float64, BigFloat]`. -""" -function multiple_precision_nls(nls :: AbstractNLSModel; - precisions :: Array = [Float16, Float32, Float64, BigFloat]) - for T in precisions - x = ones(T, nls.meta.nvar) - @test eltype(residual(nls, x)) == T - @test eltype(jac_residual(nls, x)) == T - @test eltype(jac_op_residual(nls, x)) == T - rows, cols = jac_structure_residual(nls) - vals = jac_coord_residual(nls, x) - @test eltype(vals) == T - Av = zeros(T, nls.nls_meta.nequ) - Atv = zeros(T, nls.meta.nvar) - @test eltype(jac_op_residual!(nls, rows, cols, vals, Av, Atv)) == T - @test eltype(hess_residual(nls, x, ones(T, nls.nls_meta.nequ))) == T - for i = 1:nls.nls_meta.nequ - @test eltype(hess_op_residual(nls, x, i)) == T - end - @test typeof(obj(nls, x)) == T - @test eltype(grad(nls, x)) == T - if nls.meta.ncon > 0 - @test eltype(cons(nls, x)) == T - @test eltype(jac(nls, x)) == T - @test eltype(jac_op(nls, x)) == T - rows, cols = jac_structure(nls) - vals = jac_coord(nls, x) - @test eltype(vals) == T - Av = zeros(T, nls.meta.ncon) - Atv = zeros(T, nls.meta.nvar) - @test eltype(jac_op!(nls, rows, cols, vals, Av, Atv)) == T - end - end -end diff --git a/test/TestUtils/nls/problems/lls.jl b/test/TestUtils/nls/problems/lls.jl deleted file mode 100644 index b24d0f87..00000000 --- a/test/TestUtils/nls/problems/lls.jl +++ /dev/null @@ -1,203 +0,0 @@ -export LLS, LLS_special, lls_autodiff - -function lls_autodiff() - - x0 = [0.0; 0.0] - F(x) = [x[1] - x[2]; x[1] + x[2] - 2; x[2] - 2] - c(x) = [x[1] + x[2]] - lcon = [0.0] - ucon = [Inf] - - return ADNLSModel(F, x0, 3, c, lcon, ucon, name="lls_autodiff") -end - -function LLS_special() - return LLSModel([1.0 -1; 1 1; 0 1], [0.0; 2; 2], x0=zeros(2), C=[1.0 1], lcon=[0.0], ucon=[Inf], name="lls_LLSModel") -end - -""" - nls = LLS() - -## Linear least squares - -```math -\\begin{aligned} -\\min \\quad & \\tfrac{1}{2}\\| F(x) \\|^2 \\\\ -\\text{s. to} \\quad & x_1 + x_2 \\geq 0 -\\end{aligned} -``` -where -```math -F(x) = \\begin{bmatrix} -x_1 - x_2 \\\\ -x_1 + x_2 - 2 \\\\ -x_2 - 2 -\\end{bmatrix}. -``` - -Starting point: `[0; 0]`. -""" -mutable struct LLS <: AbstractNLSModel - meta :: NLPModelMeta - nls_meta :: NLSMeta - counters :: NLSCounters -end - -function LLS() - meta = NLPModelMeta(2, x0=zeros(2), name="LLS_manual", ncon=1, lcon=[0.0], ucon=[Inf], nnzj=2) - nls_meta = NLSMeta(3, 2, nnzj=5, nnzh=0) - - return LLS(meta, nls_meta, NLSCounters()) -end - -function NLPModels.residual!(nls :: LLS, x :: AbstractVector, Fx :: AbstractVector) - @lencheck 2 x - @lencheck 3 Fx - increment!(nls, :neval_residual) - Fx .= [x[1] - x[2]; x[1] + x[2] - 2; x[2] - 2] - return Fx -end - -function NLPModels.jac_structure_residual!(nls :: LLS, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck 5 rows cols - rows .= [1, 1, 2, 2, 3] - cols .= [1, 2, 1, 2, 2] - return rows, cols -end - -function NLPModels.jac_coord_residual!(nls :: LLS, x :: AbstractVector, vals :: AbstractVector) - @lencheck 2 x - @lencheck 5 vals - increment!(nls, :neval_jac_residual) - T = eltype(x) - vals .= T[1, -1, 1, 1, 1] - return vals -end - -function NLPModels.jprod_residual!(nls :: LLS, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck 2 x v - @lencheck 3 Jv - increment!(nls, :neval_jprod_residual) - Jv .= [v[1] - v[2]; v[1] + v[2]; v[2]] - return Jv -end - -function NLPModels.jtprod_residual!(nls :: LLS, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck 2 x Jtv - @lencheck 3 v - increment!(nls, :neval_jtprod_residual) - Jtv .= [v[1] + v[2]; -v[1] + v[2] + v[3]] - return Jtv -end - -function NLPModels.hess_structure_residual!(nls :: LLS, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck 0 rows cols - return rows, cols -end - -function NLPModels.hess_coord_residual!(nls :: LLS, x :: AbstractVector, v :: AbstractVector, vals :: AbstractVector) - @lencheck 2 x - @lencheck 3 v - @lencheck 0 vals - increment!(nls, :neval_hess_residual) - return vals -end - -function NLPModels.hprod_residual!(nls :: LLS, x :: AbstractVector, i :: Int, v :: AbstractVector, Hiv :: AbstractVector) - @lencheck 2 x v Hiv - increment!(nls, :neval_hprod_residual) - Hiv .= zero(eltype(x)) - return Hiv -end - -function NLPModels.cons!(nls :: LLS, x :: AbstractVector, cx :: AbstractVector) - @lencheck 2 x - @lencheck 1 cx - increment!(nls, :neval_cons) - cx[1] = x[1] + x[2] - return cx -end - -function NLPModels.jac_structure!(nls :: LLS, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck 2 rows cols - rows .= [1, 1] - cols .= [1, 2] - return rows, cols -end - -function NLPModels.jac_coord!(nls :: LLS, x :: AbstractVector, vals :: AbstractVector) - @lencheck 2 x vals - increment!(nls, :neval_jac) - T = eltype(x) - vals .= T[1, 1] - return vals -end - -function NLPModels.jprod!(nls :: LLS, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck 2 x v - @lencheck 1 Jv - increment!(nls, :neval_jprod) - Jv[1] = v[1] + v[2] - return Jv -end - -function NLPModels.jtprod!(nls :: LLS, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck 2 x Jtv - @lencheck 1 v - increment!(nls, :neval_jtprod) - Jtv .= v - return Jtv -end - -function NLPModels.hess(nls :: LLS, x :: AbstractVector{T}; obj_weight=1.0) where T - @lencheck 2 x - increment!(nls, :neval_hess) - return obj_weight * [2. 0.;0. 3.] -end - -function NLPModels.hess_structure!(nls :: LLS, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) - @lencheck 3 rows cols - n = nls.meta.nvar - I = ((i,j) for i = 1:n, j = 1:n if i ≥ j) - rows .= getindex.(I, 1) - cols .= getindex.(I, 2) - return rows, cols -end - -function NLPModels.hess_coord!(nls :: LLS, x :: AbstractVector, vals :: AbstractVector; obj_weight=1.0) - @lencheck 2 x - @lencheck 3 vals - Hx = hess(nls, x, obj_weight=obj_weight) - k = 1 - for j = 1:2 - for i = j:2 - vals[k] = Hx[i,j] - k += 1 - end - end - return vals -end - -function NLPModels.hprod!(nls :: LLS, x :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 2 x v Hv - increment!(nls, :neval_hprod) - Hv[1] = 2*obj_weight*v[1] - Hv[2] = 3*obj_weight*v[2] - return Hv -end - -function NLPModels.hprod!(nls :: LLS, x :: AbstractVector{T}, y :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 2 x v Hv - increment!(nls, :neval_hprod) - Hv[1] = 2*obj_weight*v[1] - Hv[2] = 3*obj_weight*v[2] - return Hv -end - -function NLPModels.ghjvprod!(nls :: LLS, x :: AbstractVector{T}, g :: AbstractVector{T}, v :: AbstractVector{T}, gHv :: AbstractVector{T}) where T - @lencheck nls.meta.nvar x g v - @lencheck nls.meta.ncon gHv - increment!(nls, :neval_hprod) - gHv .= zeros(T, nls.meta.ncon) - return gHv -end diff --git a/test/TestUtils/nls/problems/mgh01.jl b/test/TestUtils/nls/problems/mgh01.jl deleted file mode 100644 index ab2702b1..00000000 --- a/test/TestUtils/nls/problems/mgh01.jl +++ /dev/null @@ -1,148 +0,0 @@ -export MGH01, MGH01_special, mgh01_autodiff - -function mgh01_autodiff() - - x0 = [-1.2; 1.0] - F(x) = [1 - x[1]; 10 * (x[2] - x[1]^2)] - - return ADNLSModel(F, x0, 2, name="mgh01_autodiff") -end - -MGH01_special() = FeasibilityResidual(MGH01Feas()) - -""" - nls = MGH01() - -## Rosenbrock function in nonlinear least squares format - - Source: Problem 1 in - J.J. Moré, B.S. Garbow and K.E. Hillstrom, - "Testing Unconstrained Optimization Software", - ACM Transactions on Mathematical Software, vol. 7(1), pp. 17-41, 1981 - -```math -\\begin{aligned} -\\min \\quad & \\tfrac{1}{2}\\| F(x) \\|^2 -\\end{aligned} -``` -where -```math -F(x) = \\begin{bmatrix} -1 - x_1 \\\\ -10 (x_2 - x_1^2) -\\end{bmatrix}. -``` - -Starting point: `[-1.2; 1]`. -""" -mutable struct MGH01 <: AbstractNLSModel - meta :: NLPModelMeta - nls_meta :: NLSMeta - counters :: NLSCounters -end - -function MGH01() - meta = NLPModelMeta(2, x0=[-1.2; 1.0], name="MGH01_manual") - nls_meta = NLSMeta(2, 2, nnzj=3, nnzh=1) - - return MGH01(meta, nls_meta, NLSCounters()) -end - -function NLPModels.residual!(nls :: MGH01, x :: AbstractVector, Fx :: AbstractVector) - @lencheck 2 x Fx - increment!(nls, :neval_residual) - Fx .= [1 - x[1]; 10 * (x[2] - x[1]^2)] - return Fx -end - -# Jx = [-1 0; -20x₁ 10] -function NLPModels.jac_structure_residual!(nls :: MGH01, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck 3 rows cols - rows .= [1, 2, 2] - cols .= [1, 1, 2] - return rows, cols -end - -function NLPModels.jac_coord_residual!(nls :: MGH01, x :: AbstractVector, vals :: AbstractVector) - @lencheck 2 x - @lencheck 3 vals - increment!(nls, :neval_jac_residual) - vals .= [-1, -20x[1], 10] - return vals -end - -function NLPModels.jprod_residual!(nls :: MGH01, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck 2 x v Jv - increment!(nls, :neval_jprod_residual) - Jv .= [-v[1]; - 20 * x[1] * v[1] + 10 * v[2]] - return Jv -end - -function NLPModels.jtprod_residual!(nls :: MGH01, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck 2 x v Jtv - increment!(nls, :neval_jtprod_residual) - Jtv .= [-v[1] - 20 * x[1] * v[2]; 10 * v[2]] - return Jtv -end - -function NLPModels.hess_structure_residual!(nls :: MGH01, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck 1 rows cols - rows[1] = 1 - cols[1] = 1 - return rows, cols -end - -function NLPModels.hess_coord_residual!(nls :: MGH01, x :: AbstractVector, v :: AbstractVector, vals :: AbstractVector) - @lencheck 2 x v - @lencheck 1 vals - increment!(nls, :neval_hess_residual) - vals[1] = -20v[2] - return vals -end - -function NLPModels.hprod_residual!(nls :: MGH01, x :: AbstractVector, i :: Int, v :: AbstractVector, Hiv :: AbstractVector) - @lencheck 2 x v Hiv - increment!(nls, :neval_hprod_residual) - if i == 2 - Hiv .= [-20v[1]; 0] - else - Hiv .= zero(eltype(x)) - end - return Hiv -end - -function NLPModels.hess(nls :: MGH01, x :: AbstractVector{T}; obj_weight=1.0) where T - @lencheck 2 x - increment!(nls, :neval_hess) - return obj_weight * [T(1)-200*x[2]+600*x[1]^2 T(0);-200*x[1] T(100)] -end - -function NLPModels.hess_structure!(nls :: MGH01, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) - @lencheck 3 rows cols - n = nls.meta.nvar - I = ((i,j) for i = 1:n, j = 1:n if i ≥ j) - rows .= getindex.(I, 1) - cols .= getindex.(I, 2) - return rows, cols -end - -function NLPModels.hess_coord!(nls :: MGH01, x :: AbstractVector, vals :: AbstractVector; obj_weight=1.0) - @lencheck 2 x - @lencheck 3 vals - Hx = hess(nls, x, obj_weight=obj_weight) - k = 1 - for j = 1:2 - for i = j:2 - vals[k] = Hx[i,j] - k += 1 - end - end - return vals -end - -function NLPModels.hprod!(nls :: MGH01, x :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 2 x v Hv - increment!(nls, :neval_hprod) - Hv .= obj_weight * [T(1)-200*x[2]+600*x[1]^2 -200*x[1];-200*x[1] T(100)] * v - return Hv -end diff --git a/test/TestUtils/nls/problems/nlshs20.jl b/test/TestUtils/nls/problems/nlshs20.jl deleted file mode 100644 index 7929d6d7..00000000 --- a/test/TestUtils/nls/problems/nlshs20.jl +++ /dev/null @@ -1,228 +0,0 @@ -export NLSHS20, nlshs20_autodiff - -function nlshs20_autodiff() - - x0 = [-2.0; 1.0] - F(x) = [1 - x[1]; 10 * (x[2] - x[1]^2)] - lvar = [-0.5; -Inf] - uvar = [0.5; Inf] - c(x) = [x[1] + x[2]^2; x[1]^2 + x[2]; x[1]^2 + x[2]^2 - 1] - lcon = zeros(3) - ucon = fill(Inf, 3) - - return ADNLSModel(F, x0, 2, lvar, uvar, c, lcon, ucon, name="nlshs20_autodiff") -end - -""" - nls = NLSH20() - -## Problem 20 in the Hock-Schittkowski suite in nonlinear least squares format - -```math -\\begin{aligned} -\\min \\quad & \\tfrac{1}{2}\\| F(x) \\|^2 \\\\ -\\text{s. to} \\quad & x_1 + x_2^2 \\geq 0 \\\\ -& x_1^2 + x_2 \\geq 0 \\\\ -& x_1^2 + x_2^2 -1 \\geq 0 \\\\ -& -0.5 \\leq x_1 \\leq 0.5 -\\end{aligned} -``` -where -```math -F(x) = \\begin{bmatrix} -1 - x_1 \\\\ -10 (x_2 - x_1^2) -\\end{bmatrix}. -``` - -Starting point: `[-2; 1]`. -""" -mutable struct NLSHS20 <: AbstractNLSModel - meta :: NLPModelMeta - nls_meta :: NLSMeta - counters :: NLSCounters -end - -function NLSHS20() - meta = NLPModelMeta(2, x0=[-2.0; 1.0], name="NLSHS20_manual", lvar=[-0.5; -Inf], uvar=[0.5; Inf], ncon=3, lcon=zeros(3), ucon=fill(Inf, 3), nnzj=6) - nls_meta = NLSMeta(2, 2, nnzj=3, nnzh=1) - - return NLSHS20(meta, nls_meta, NLSCounters()) -end - -function NLPModels.residual!(nls :: NLSHS20, x :: AbstractVector, Fx :: AbstractVector) - @lencheck 2 x Fx - increment!(nls, :neval_residual) - Fx .= [1 - x[1]; 10 * (x[2] - x[1]^2)] - return Fx -end - -# Jx = [-1 0; -20x₁ 10] -function NLPModels.jac_structure_residual!(nls :: NLSHS20, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck 3 rows cols - rows .= [1, 2, 2] - cols .= [1, 1, 2] - return rows, cols -end - -function NLPModels.jac_coord_residual!(nls :: NLSHS20, x :: AbstractVector, vals :: AbstractVector) - @lencheck 2 x - @lencheck 3 vals - increment!(nls, :neval_jac_residual) - vals .= [-1, -20x[1], 10] - return vals -end - -function NLPModels.jprod_residual!(nls :: NLSHS20, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck 2 x v Jv - increment!(nls, :neval_jprod_residual) - Jv .= [-v[1]; - 20 * x[1] * v[1] + 10 * v[2]] - return Jv -end - -function NLPModels.jtprod_residual!(nls :: NLSHS20, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck 2 x v Jtv - increment!(nls, :neval_jtprod_residual) - Jtv .= [-v[1] - 20 * x[1] * v[2]; 10 * v[2]] - return Jtv -end - -function NLPModels.hess_structure_residual!(nls :: NLSHS20, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck 1 rows cols - rows[1] = 1 - cols[1] = 1 - return rows, cols -end - -function NLPModels.hess_coord_residual!(nls :: NLSHS20, x :: AbstractVector, v :: AbstractVector, vals :: AbstractVector) - @lencheck 2 x v - @lencheck 1 vals - increment!(nls, :neval_hess_residual) - vals[1] = -20v[2] - return vals -end - -function NLPModels.hprod_residual!(nls :: NLSHS20, x :: AbstractVector, i :: Int, v :: AbstractVector, Hiv :: AbstractVector) - @lencheck 2 x v Hiv - increment!(nls, :neval_hprod_residual) - if i == 2 - Hiv .= [-20v[1]; 0] - else - Hiv .= zero(eltype(x)) - end - return Hiv -end - -function NLPModels.cons!(nls :: NLSHS20, x :: AbstractVector, cx :: AbstractVector) - @lencheck 2 x - @lencheck 3 cx - increment!(nls, :neval_cons) - cx .= [x[1] + x[2]^2; x[1]^2 + x[2]; x[1]^2 + x[2]^2 - 1] - return cx -end - -function NLPModels.jac_structure!(nls :: NLSHS20, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck 6 rows cols - rows .= [1, 1, 2, 2, 3, 3] - cols .= [1, 2, 1, 2, 1, 2] - return rows, cols -end - -function NLPModels.jac_coord!(nls :: NLSHS20, x :: AbstractVector, vals :: AbstractVector) - @lencheck 2 x - @lencheck 6 vals - increment!(nls, :neval_jac) - vals .= [1, 2x[2], 2x[1], 1, 2x[1], 2x[2]] - return vals -end - -function NLPModels.jprod!(nls :: NLSHS20, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck 2 x v - @lencheck 3 Jv - increment!(nls, :neval_jprod) - Jv .= [v[1] + 2x[2] * v[2]; 2x[1] * v[1] + v[2]; 2x[1] * v[1] + 2x[2] * v[2]] - return Jv -end - -function NLPModels.jtprod!(nls :: NLSHS20, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck 2 x Jtv - @lencheck 3 v - increment!(nls, :neval_jtprod) - Jtv .= [v[1] + 2x[1] * (v[2] + v[3]); v[2] + 2x[2] * (v[1] + v[3])] - return Jtv -end - -function NLPModels.hess(nls :: NLSHS20, x :: AbstractVector{T}; obj_weight=1.0) where T - @lencheck 2 x - increment!(nls, :neval_hess) - return obj_weight * [T(1)-200*x[2]+600*x[1]^2 T(0);-200*x[1] T(100)] -end - -function NLPModels.hess(nls :: NLSHS20, x :: AbstractVector{T}, y :: AbstractVector{T}; obj_weight=1.0) where T - @lencheck 2 x - @lencheck 3 y - increment!(nls, :neval_hess) - return [obj_weight*(T(1)-200*x[2]+600*x[1]^2)+2*y[2]+2*y[3] T(0);-obj_weight*200*x[1] obj_weight*T(100)+2*y[1]+2*y[3]] -end - -function NLPModels.hess_structure!(nls :: NLSHS20, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) - @lencheck 3 rows cols - n = nls.meta.nvar - I = ((i,j) for i = 1:n, j = 1:n if i ≥ j) - rows .= getindex.(I, 1) - cols .= getindex.(I, 2) - return rows, cols -end - -function NLPModels.hess_coord!(nls :: NLSHS20, x :: AbstractVector, vals :: AbstractVector; obj_weight=1.0) - @lencheck 2 x - @lencheck 3 vals - Hx = hess(nls, x, obj_weight=obj_weight) - k = 1 - for j = 1:2 - for i = j:2 - vals[k] = Hx[i,j] - k += 1 - end - end - return vals -end - -function NLPModels.hess_coord!(nls :: NLSHS20, x :: AbstractVector, y :: AbstractVector, vals :: AbstractVector; obj_weight=1.0) - @lencheck 2 x - @lencheck 3 y - @lencheck 3 vals - Hx = hess(nls, x, y, obj_weight=obj_weight) - k = 1 - for j = 1:2 - for i = j:2 - vals[k] = Hx[i,j] - k += 1 - end - end - return vals -end - -function NLPModels.hprod!(nls :: NLSHS20, x :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 2 x v Hv - increment!(nls, :neval_hprod) - Hv .= obj_weight * [T(1)-200*x[2]+600*x[1]^2 -200*x[1];-200*x[1] T(100)] * v - return Hv -end - -function NLPModels.hprod!(nls :: NLSHS20, x :: AbstractVector{T}, y :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 2 x v Hv - increment!(nls, :neval_hprod) - Hv .= [obj_weight*(T(1)-200*x[2]+600*x[1]^2)+2*y[2]+2*y[3] -obj_weight*200*x[1];-obj_weight*200*x[1] obj_weight*T(100)+2*y[1]+2*y[3]] * v - return Hv -end - -function NLPModels.ghjvprod!(nls :: NLSHS20, x :: AbstractVector{T}, g :: AbstractVector{T}, v :: AbstractVector{T}, gHv :: AbstractVector{T}) where T - @lencheck nls.meta.nvar x g v - @lencheck nls.meta.ncon gHv - increment!(nls, :neval_hprod) - gHv[1] = g[2] * 2v[2] - gHv[2] = g[1] * 2v[1] - gHv[3] = g[1] * 2v[1] + g[2] * 2v[2] - return gHv -end diff --git a/test/TestUtils/nls/problems/nlslc.jl b/test/TestUtils/nls/problems/nlslc.jl deleted file mode 100644 index 36bfb6f7..00000000 --- a/test/TestUtils/nls/problems/nlslc.jl +++ /dev/null @@ -1,271 +0,0 @@ -export NLSLC, nlslc_autodiff - -function nlslc_autodiff() - - A = [1 2; 3 4] - b = [5; 6] - B = diagm([3 * i for i = 3:5]) - c = [1; 2; 3] - C = [0 -2; 4 0] - d = [1; -1] - - x0 = zeros(15) - F(x) = [x[i]^2 - i^2 for i=1:15] - con(x) = [15 * x[15]; - c' * x[10:12]; - d' * x[13:14]; - b' * x[8:9]; - C * x[6:7]; - A * x[1:2]; - B * x[3:5]] - - lcon = [22.0; 1.0; -Inf; -11.0; -d; -b; -Inf * ones(3)] - ucon = [22.0; Inf; 16.0; 9.0; -d; Inf * ones(2); c] - - return ADNLSModel(F, x0, 15, con, lcon, ucon, name="nlslincon_autodiff") -end - -""" - nls = NLSLC() - -## Linearly constrained nonlinear least squares problem - -```math -\\begin{aligned} -\\min \\quad & \\tfrac{1}{2}\\| F(x) \\|^2 \\\\ -\\text{s. to} \\quad & x_{15} = 0 \\\\ -& x_{10} + 2x_{11} + 3x_{12} \\geq 1 \\\\ -& x_{13} - x_{14} \\leq 16 \\\\ -& -11 \\leq 5x_8 - 6x_9 \\leq 9 \\\\ -& -2x_7 = -1 \\\\ -& 4x_6 = 1 \\\\ -& x_1 + 2x_2 \\geq -5 \\\\ -& 3x_1 + 4x_2 \\geq -6 \\\\ -& 9x_3 \\leq 1 \\\\ -& 12x_4 \\leq 2 \\\\ -& 15x_5 \\leq 3 -\\end{aligned} -``` -where -```math -F(x) = \\begin{bmatrix} -x_1^2 - 1 \\\\ -x_2^2 - 2^2 \\\\ -\\vdots \\\\ -x_{15}^2 - 15^2 -\\end{bmatrix} -``` - -Starting point: `zeros(15)`. -""" -mutable struct NLSLC <: AbstractNLSModel - meta :: NLPModelMeta - nls_meta :: NLSMeta - counters :: NLSCounters -end - -function NLSLC() - meta = NLPModelMeta(15, nnzj=17, ncon=11, x0=zeros(15), lcon = [22.0; 1.0; -Inf; -11.0; -1.0; 1.0; -5.0; -6.0; -Inf * ones(3)], ucon=[22.0; Inf; 16.0; 9.0; -1.0; 1.0; Inf * ones(2); 1.0; 2.0; 3.0], name="NLSLINCON") - nls_meta = NLSMeta(15, 15, nnzj=15, nnzh=15) - - return NLSLC(meta, nls_meta, NLSCounters()) -end - -function NLPModels.residual!(nls :: NLSLC, x :: AbstractVector, Fx :: AbstractVector) - @lencheck 15 x Fx - increment!(nls, :neval_residual) - Fx .= [x[i]^2 - i^2 for i = 1:nls.nls_meta.nequ] - return Fx -end - -function NLPModels.jac_structure_residual!(nls :: NLSLC, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck 15 rows cols - for i = 1:nls.nls_meta.nnzj - rows[i] = i - cols[i] = i - end - return rows, cols -end - -function NLPModels.jac_coord_residual!(nls :: NLSLC, x :: AbstractVector, vals :: AbstractVector) - @lencheck 15 x vals - increment!(nls, :neval_jac_residual) - vals .= [2 * x[i] for i = 1:nls.nls_meta.nnzj] - return vals -end - -function NLPModels.jprod_residual!(nls :: NLSLC, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck 15 x v Jv - increment!(nls, :neval_jprod_residual) - Jv .= [2 * x[i] * v[i] for i = 1:nls.nls_meta.nnzj] - return Jv -end - -function NLPModels.jtprod_residual!(nls :: NLSLC, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck 15 x v Jtv - increment!(nls, :neval_jtprod_residual) - Jtv .= [2 * x[i] * v[i] for i = 1:nls.nls_meta.nnzj] - return Jtv -end - -function NLPModels.hess_structure_residual!(nls :: NLSLC, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck 15 rows cols - for i = 1:nls.nls_meta.nnzh - rows[i] = i - cols[i] = i - end - return rows, cols -end - -function NLPModels.hess_coord_residual!(nls :: NLSLC, x :: AbstractVector, v :: AbstractVector, vals :: AbstractVector) - @lencheck 15 x v vals - increment!(nls, :neval_hess_residual) - vals .= [2 * v[i] for i = 1:nls.nls_meta.nnzh] - return vals -end - -function NLPModels.hprod_residual!(nls :: NLSLC, x :: AbstractVector, i :: Int, v :: AbstractVector, Hiv :: AbstractVector) - @lencheck 15 x v Hiv - increment!(nls, :neval_hprod_residual) - Hiv .= zero(eltype(x)) - Hiv[i] = 2 * v[i] - return Hiv -end - -function NLPModels.cons!(nls :: NLSLC, x :: AbstractVector, cx :: AbstractVector) - @lencheck 15 x - @lencheck 11 cx - increment!(nls, :neval_cons) - cx .= [15 * x[15]; - [1; 2; 3]' * x[10:12]; - [1; -1]' * x[13:14]; - [5; 6]' * x[8:9]; - [0 -2; 4 0] * x[6:7]; - [1 2; 3 4] * x[1:2]; - diagm([3 * i for i = 3:5]) * x[3:5]] - return cx -end - -function NLPModels.jac_structure!(nls :: NLSLC, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) - @lencheck 17 rows cols - rows .= [ 1, 2, 2, 2, 3, 3, 4, 4, 5, 6, 7, 7, 8, 8, 9, 10, 11] - cols .= [15, 10, 11, 12, 13, 14, 8, 9, 7, 6, 1, 2, 1, 2, 3, 4, 5] - return rows, cols -end - -function NLPModels.jac_coord!(nls :: NLSLC, x :: AbstractVector, vals :: AbstractVector) - @lencheck 15 x - @lencheck 17 vals - increment!(nls, :neval_jac) - vals .= eltype(x).([15, 1, 2, 3, 1, -1, 5, 6, -2, 4, 1, 2, 3, 4, 9, 12, 15]) - return vals -end - -function NLPModels.jprod!(nls :: NLSLC, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) - @lencheck 15 x v - @lencheck 11 Jv - increment!(nls, :neval_jprod) - Jv[1] = 15 * v[15] - Jv[2] = [1; 2; 3]' * v[10:12] - Jv[3] = [1; -1]' * v[13:14] - Jv[4] = [5; 6]' * v[8:9] - Jv[5:6] = [0 -2; 4 0] * v[6:7] - Jv[7:8] = [1.0 2; 3 4] * v[1:2] - Jv[9:11] = diagm([3 * i for i = 3:5]) * v[3:5] - return Jv -end - -function NLPModels.jtprod!(nls :: NLSLC, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) - @lencheck 15 x Jtv - @lencheck 11 v - increment!(nls, :neval_jtprod) - Jtv[1] = 1 * v[7] + 3 * v[8] - Jtv[2] = 2 * v[7] + 4 * v[8] - Jtv[3] = 9 * v[9] - Jtv[4] = 12 * v[10] - Jtv[5] = 15 * v[11] - Jtv[6] = 4 * v[6] - Jtv[7] = -2 * v[5] - Jtv[8] = 5 * v[4] - Jtv[9] = 6 * v[4] - Jtv[10] = 1 * v[2] - Jtv[11] = 2 * v[2] - Jtv[12] = 3 * v[2] - Jtv[13] = 1 * v[3] - Jtv[14] = -1 * v[3] - Jtv[15] = 15 * v[1] - return Jtv -end - -function NLPModels.hess(nls :: NLSLC, x :: AbstractVector{T}; obj_weight=1.0) where T - @lencheck 15 x - increment!(nls, :neval_hess) - return obj_weight * diagm(0 => [6*x[i]^2-2*i^2 for i=1:15]) -end - -function NLPModels.hess(nls :: NLSLC, x :: AbstractVector{T}, y :: AbstractVector{T}; obj_weight=1.0) where T - @lencheck 15 x - @lencheck 11 y - increment!(nls, :neval_hess) - return hess(nls, x, obj_weight=obj_weight) -end - -function NLPModels.hess_structure!(nls :: NLSLC, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) - @lencheck 120 rows cols - n = nls.meta.nvar - I = ((i,j) for i = 1:n, j = 1:n if i ≥ j) - rows .= getindex.(I, 1) - cols .= getindex.(I, 2) - return rows, cols -end - -function NLPModels.hess_coord!(nls :: NLSLC, x :: AbstractVector, vals :: AbstractVector; obj_weight=1.0) - @lencheck 15 x - @lencheck 120 vals - Hx = hess(nls, x, obj_weight=obj_weight) - k = 1 - for j = 1:15 - for i = j:15 - vals[k] = Hx[i,j] - k += 1 - end - end - return vals -end - -function NLPModels.hess_coord!(nls :: NLSLC, x :: AbstractVector, y :: AbstractVector, vals :: AbstractVector; obj_weight=1.0) - @lencheck 15 x - @lencheck 11 y - @lencheck 120 vals - Hx = hess(nls, x, y, obj_weight=obj_weight) - k = 1 - for j = 1:15 - for i = j:15 - vals[k] = Hx[i,j] - k += 1 - end - end - return vals -end - -function NLPModels.hprod!(nls :: NLSLC, x :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 15 x v Hv - increment!(nls, :neval_hprod) - Hv .= obj_weight * [(6*x[i]^2-2*i^2)*v[i] for i=1:15] - return Hv -end - -function NLPModels.hprod!(nls :: NLSLC, x :: AbstractVector{T}, y :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T - @lencheck 15 x v Hv - @lencheck 11 y - increment!(nls, :neval_hprod) - return hprod!(nls, x, v, Hv, obj_weight=obj_weight) -end - -function NLPModels.ghjvprod!(nls :: NLSLC, x :: AbstractVector{T}, g :: AbstractVector{T}, v :: AbstractVector{T}, gHv :: AbstractVector{T}) where T - @lencheck nls.meta.nvar x g v - @lencheck nls.meta.ncon gHv - increment!(nls, :neval_hprod) - gHv .= zeros(T, nls.meta.ncon) - return gHv -end diff --git a/test/TestUtils/nls/view-subarray.jl b/test/TestUtils/nls/view-subarray.jl deleted file mode 100644 index 40179efe..00000000 --- a/test/TestUtils/nls/view-subarray.jl +++ /dev/null @@ -1,79 +0,0 @@ -export view_subarray_nls - -""" - view_subarray_nls(nls) - -Check that the API work with views, and that the results is correct. -""" -function view_subarray_nls(nls) - @testset "Test view subarray of NLSs" begin - n, ne = nls.meta.nvar, nls.nls_meta.nequ - N = 2n - Vidxs = [1:n, n.+(1:n), 1:2:N, collect(N:-2:1)] - N = 2ne - Fidxs = [1:ne, ne.+(1:ne), 1:2:N, collect(N:-2:1)] - - # Inputs - x = [-(-1.1)^i for i = 1:2n] # Instead of [1, -1, …], because it needs to - v = [-(-1.1)^i for i = 1:2n] # access different parts of the vector and - y = [-(-1.1)^i for i = 1:2ne] # make a difference - - # Outputs - F = zeros(ne) - F2 = zeros(2ne) - jv = zeros(ne) - jv2 = zeros(2ne) - jty = zeros(n) - jty2 = zeros(2n) - hv = zeros(n) - hv2 = zeros(2n) - - for I = Vidxs - xv = @view x[I] - for foo in (residual, jac_residual) - @test foo(nls, x[I]) ≈ foo(nls, xv) - end - - # Inplace methods can have input and output as view, so 4 possibilities - for J = Fidxs - Fv = @view F2[J] - residual!(nls, x[I], F) - residual!(nls, x[I], Fv); @test F ≈ F2[J] - residual!(nls, xv, Fv); @test F ≈ F2[J] - residual!(nls, xv, F); @test F ≈ F2[J] - end - - for J = Fidxs, K in Vidxs - vv = @view v[K] - jvv = @view jv2[J] - @test jprod_residual(nls, x[I], v[K]) ≈ jprod_residual(nls, xv, vv) - jprod_residual!(nls, x[I], v[K], jv) - jprod_residual!(nls, x[I], v[K], jvv); @test jv ≈ jv2[J] - jprod_residual!(nls, xv, vv, jvv); @test jv ≈ jv2[J] - jprod_residual!(nls, xv, vv, jv); @test jv ≈ jv2[J] - - yv = @view y[J] - jtyv = @view jty2[K] - @test jtprod_residual(nls, x[I], y[J]) ≈ jtprod_residual(nls, xv, yv) - jtprod_residual!(nls, x[I], y[J], jty) - jtprod_residual!(nls, x[I], y[J], jtyv); @test jty ≈ jty2[K] - jtprod_residual!(nls, xv, yv, jtyv); @test jty ≈ jty2[K] - jtprod_residual!(nls, xv, yv, jty); @test jty ≈ jty2[K] - end - - for i = 1:ne - @test jth_hess_residual(nls, x[I], i) ≈ jth_hess_residual(nls, xv, i) - - for J = Vidxs, K in Vidxs - vv = @view v[J] - hvv = @view hv2[K] - @test hprod_residual(nls, x[I], i, v[J]) ≈ hprod_residual(nls, xv, i, vv) - hprod_residual!(nls, x[I], i, v[J], hv) - hprod_residual!(nls, x[I], i, v[J], hvv); @test hv ≈ hv2[K] - hprod_residual!(nls, xv, i, vv, hvv); @test hv ≈ hv2[K] - hprod_residual!(nls, xv, i, vv, hv); @test hv ≈ hv2[K] - end - end - end - end -end \ No newline at end of file diff --git a/test/nlp/api.jl b/test/nlp/api.jl new file mode 100644 index 00000000..b92ff3e8 --- /dev/null +++ b/test/nlp/api.jl @@ -0,0 +1,87 @@ +@testset "NLP API test on a simple model" begin + f(x) = (x[1] - 2)^2 + (x[2] - 1)^2 + ∇f(x) = [2 * (x[1] - 2); 2 * (x[2] - 1)] + H(x) = [2.0 0; 0 2.0] + c(x) = [x[1] - 2x[2] + 1; -x[1]^2 / 4 - x[2]^2 + 1] + J(x) = [1.0 -2.0; -0.5x[1] -2.0x[2]] + H(x,y) = H(x) + y[2] * [-0.5 0; 0 -2.0] + + nlp = SimpleNLPModel() + n = nlp.meta.nvar + m = nlp.meta.ncon + + x = randn(n) + y = randn(m) + v = randn(n) + w = randn(m) + Jv = zeros(m) + Jtw = zeros(n) + Hv = zeros(n) + Hvals = zeros(nlp.meta.nnzh) + + # Basic methods + @test obj(nlp, x) ≈ f(x) + @test grad(nlp, x) ≈ ∇f(x) + @test hess(nlp, x) ≈ tril(H(x)) + @test hprod(nlp, x, v) ≈ H(x) * v + @test cons(nlp, x) ≈ c(x) + @test jac(nlp, x) ≈ J(x) + @test jprod(nlp, x, v) ≈ J(x) * v + @test jtprod(nlp, x, w) ≈ J(x)' * w + @test hess(nlp, x, y) ≈ tril(H(x,y)) + @test hprod(nlp, x, y, v) ≈ H(x, y) * v + + # Increasing coverage + fx, cx = objcons(nlp, x) + @test fx ≈ f(x) + @test cx ≈ c(x) + fx, _ = objcons!(nlp, x, cx) + @test fx ≈ f(x) + @test cx ≈ c(x) + fx, gx = objgrad(nlp, x) + @test fx ≈ f(x) + @test gx ≈ ∇f(x) + fx, _ = objgrad!(nlp, x, gx) + @test fx ≈ f(x) + @test gx ≈ ∇f(x) + @test jprod!(nlp, jac_structure(nlp)..., jac_coord(nlp, x), v, Jv) ≈ J(x) * v + @test jprod!(nlp, x, jac_structure(nlp)..., v, Jv) ≈ J(x) * v + @test jtprod!(nlp, jac_structure(nlp)..., jac_coord(nlp, x), w, Jtw) ≈ J(x)' * w + @test jtprod!(nlp, x, jac_structure(nlp)..., w, Jtw) ≈ J(x)' * w + Jop = jac_op!(nlp, x, Jv, Jtw) + @test Jop * v ≈ J(x) * v + @test Jop' * w ≈ J(x)' * w + Jop = jac_op!(nlp, jac_structure(nlp)..., jac_coord(nlp, x), Jv, Jtw) + @test Jop * v ≈ J(x) * v + @test Jop' * w ≈ J(x)' * w + Jop = jac_op!(nlp, x, jac_structure(nlp)..., Jv, Jtw) + @test Jop * v ≈ J(x) * v + @test Jop' * w ≈ J(x)' * w + ghjv = zeros(m) + for j = 1:m + eⱼ = [i == j ? 1.0 : 0.0 for i = 1:m] + Cⱼ(x) = H(x, eⱼ) - H(x) + ghjv[j] = dot(gx, Cⱼ(x) * v) + end + @test ghjvprod(nlp, x, gx, v) ≈ ghjv + @test hess_coord!(nlp, x, Hvals) == hess_coord!(nlp, x, y * 0, Hvals) + @test hprod!(nlp, hess_structure(nlp)..., hess_coord(nlp, x), v, Hv) ≈ H(x) * v + @test hprod!(nlp, x, hess_structure(nlp)..., v, Hv) ≈ H(x) * v + @test hprod!(nlp, x, y, hess_structure(nlp)..., v, Hv) ≈ H(x, y) * v + Hop = hess_op(nlp, x) + @test Hop * v ≈ H(x) * v + Hop = hess_op!(nlp, x, Hv) + @test Hop * v ≈ H(x) * v + Hop = hess_op!(nlp, hess_structure(nlp)..., hess_coord(nlp, x), Hv) + @test Hop * v ≈ H(x) * v + Hop = hess_op!(nlp, x, hess_structure(nlp)..., Hv) + @test Hop * v ≈ H(x) * v + Hop = hess_op(nlp, x, y) + @test Hop * v ≈ H(x, y) * v + Hop = hess_op!(nlp, x, y, Hv) + @test Hop * v ≈ H(x, y) * v + Hop = hess_op!(nlp, hess_structure(nlp)..., hess_coord(nlp, x, y), Hv) + @test Hop * v ≈ H(x, y) * v + Hop = hess_op!(nlp, x, y, hess_structure(nlp)..., Hv) + @test Hop * v ≈ H(x, y) * v +end diff --git a/test/nlp/counters.jl b/test/nlp/counters.jl new file mode 100644 index 00000000..c0c1f57d --- /dev/null +++ b/test/nlp/counters.jl @@ -0,0 +1,13 @@ +@testset "Basic Counters check" begin + nlp = SimpleNLPModel() + + for counter in fieldnames(Counters) + @eval @test $counter($nlp) == 0 + end + + obj(nlp, nlp.meta.x0) + grad(nlp, nlp.meta.x0) + @test sum_counters(nlp) == 2 + reset!(nlp) + @test sum_counters(nlp) == 0 +end \ No newline at end of file diff --git a/test/nlp/dummy-model.jl b/test/nlp/dummy-model.jl new file mode 100644 index 00000000..f503f657 --- /dev/null +++ b/test/nlp/dummy-model.jl @@ -0,0 +1,28 @@ +mutable struct DummyModel <: AbstractNLPModel + meta :: NLPModelMeta +end + +@testset "Default methods throw MethodError on DummyModel since they're not defined" begin + + model = DummyModel(NLPModelMeta(1)) + @test_throws(MethodError, lagscale(model, 1.0)) + @test_throws(MethodError, obj(model, [0.0])) + @test_throws(MethodError, varscale(model, [0.0])) + @test_throws(MethodError, conscale(model, [0.0])) + @test_throws(MethodError, jac_structure(model, [0], [1])) + @test_throws(MethodError, hess_structure(model, [0], [1])) + @test_throws(MethodError, grad!(model, [0.0], [1.0])) + @test_throws(MethodError, cons!(model, [0.0], [1.0])) + @test_throws(MethodError, jac_coord!(model, [0.0], [1.0])) + @test_throws(MethodError, jth_con(model, [0.0], 1)) + @test_throws(MethodError, jth_congrad(model, [0.0], 1)) + @test_throws(MethodError, jth_sparse_congrad(model, [0.0], 1)) + @test_throws(MethodError, jth_congrad!(model, [0.0], 1, [2.0])) + @test_throws(MethodError, jprod!(model, [0.0], [1.0], [2.0])) + @test_throws(MethodError, jtprod!(model, [0.0], [1.0], [2.0])) + @test_throws(MethodError, jth_hprod(model, [0.0], [1.0], 2)) + @test_throws(MethodError, jth_hprod!(model, [0.0], [1.0], 2, [3.0])) + @test_throws(MethodError, ghjvprod!(model, [0.0], [1.0], [2.0], [3.0])) + @assert isa(hess_op(model, [0.]), LinearOperator) + @assert isa(jac_op(model, [0.]), LinearOperator) +end \ No newline at end of file diff --git a/test/nlp/meta.jl b/test/nlp/meta.jl new file mode 100644 index 00000000..0e56d4cb --- /dev/null +++ b/test/nlp/meta.jl @@ -0,0 +1,3 @@ +@testset "A problem with zero variables doesn't make sense." begin + @test_throws ErrorException NLPModelMeta(0) +end \ No newline at end of file diff --git a/test/nlp/show.jl b/test/nlp/show.jl new file mode 100644 index 00000000..62c2ae09 --- /dev/null +++ b/test/nlp/show.jl @@ -0,0 +1,41 @@ +@testset "Increase coverage of nlp/show.jl" begin + nlp = SimpleNLPModel() + io = IOBuffer() + show(io, nlp) + showed = String(take!(io)) + expected = """ SimpleNLPModel + Problem name: Simple NLP Model + All variables: ████████████████████ 2 All constraints: ████████████████████ 2 + free: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 free: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + lower: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 lower: ██████████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1 + upper: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 upper: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + low/upp: ████████████████████ 2 low/upp: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + fixed: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 fixed: ██████████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1 + infeas: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 infeas: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + nnzh: ( 33.33% sparsity) 2 linear: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + nonlinear: ████████████████████ 2 + nnzj: ( 0.00% sparsity) 4 + + Counters: + obj: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 grad: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 cons: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + jcon: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jgrad: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jac: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + jprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jtprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 hess: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + hprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jhprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0""" + @test strip.(split(chomp(showed), "\n")) == strip.(split(chomp(expected), "\n")) + + io = IOBuffer() + show(io, NLPModelMeta(1)) + showed = String(take!(io)) + expected = """ Problem name: Generic + All variables: ████████████████████ 1 All constraints: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + free: ████████████████████ 1 free: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + lower: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 lower: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + upper: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 upper: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + low/upp: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 low/upp: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + fixed: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 fixed: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + infeas: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 infeas: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + nnzh: ( 0.00% sparsity) 1 linear: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + nonlinear: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + nnzj: (------% sparsity)\n\n""" + @test strip.(split(chomp(showed), "\n")) == strip.(split(chomp(expected), "\n")) +end \ No newline at end of file diff --git a/test/nlp/simple-model.jl b/test/nlp/simple-model.jl new file mode 100644 index 00000000..c5df62a8 --- /dev/null +++ b/test/nlp/simple-model.jl @@ -0,0 +1,105 @@ +""" + SimpleNLPModel <: AbstractNLPModel + +Simple model for testing purposes. +Modified problem 14 in the Hock-Schittkowski suite + + min (x₁ - 2)² + (x₂ - 1)² + s.to x₁ - 2x₂ + 1 = 0 + -x₁² / 4 - x₂² + 1 ≥ 0 + 0 ≤ x ≤ 1 + +x₀ = [2.0, 2.0]. +""" +mutable struct SimpleNLPModel <: AbstractNLPModel + meta :: NLPModelMeta + counters :: Counters +end + +function SimpleNLPModel() + meta = NLPModelMeta(2, nnzh=2, ncon=2, lvar=zeros(2), uvar=ones(2), x0=[2.0; 2.0], lcon=[0.0; 0.0], ucon=[0.0; Inf], name="Simple NLP Model") + + return SimpleNLPModel(meta, Counters()) +end + +function NLPModels.obj(nlp :: SimpleNLPModel, x :: AbstractVector) + @lencheck 2 x + increment!(nlp, :neval_obj) + return (x[1] - 2)^2 + (x[2] - 1)^2 +end + +function NLPModels.grad!(nlp :: SimpleNLPModel, x :: AbstractVector, gx :: AbstractVector) + @lencheck 2 x gx + increment!(nlp, :neval_grad) + gx .= [2 * (x[1] - 2); 2 * (x[2] - 1)] + return gx +end + +function NLPModels.hess_structure!(nlp :: SimpleNLPModel, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) + @lencheck 2 rows cols + rows[1] = 1; rows[2] = 2 + cols[1] = 1; cols[2] = 2 + return rows, cols +end + +function NLPModels.hess_coord!(nlp :: SimpleNLPModel, x :: AbstractVector{T}, y :: AbstractVector{T}, vals :: AbstractVector{T}; obj_weight=one(T)) where T + @lencheck 2 x y vals + increment!(nlp, :neval_hess) + vals .= 2obj_weight + vals[1] -= y[2] / 2 + vals[2] -= 2y[2] + return vals +end + +function NLPModels.hprod!(nlp :: SimpleNLPModel, x :: AbstractVector{T}, y :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T + @lencheck 2 x y v Hv + increment!(nlp, :neval_hprod) + Hv .= 2obj_weight * v + Hv[1] -= y[2] * v[1] / 2 + Hv[2] -= 2y[2] * v[2] + return Hv +end + +function NLPModels.cons!(nlp :: SimpleNLPModel, x :: AbstractVector, cx :: AbstractVector) + @lencheck 2 x cx + increment!(nlp, :neval_cons) + cx .= [x[1] - 2 * x[2] + 1; -x[1]^2/4 - x[2]^2 + 1] + return cx +end + +function NLPModels.jac_structure!(nlp :: SimpleNLPModel, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) + @lencheck 4 rows cols + rows .= [1, 2, 1, 2] + cols .= [1, 1, 2, 2] + return rows, cols +end + +function NLPModels.jac_coord!(nlp :: SimpleNLPModel, x :: AbstractVector, vals :: AbstractVector) + @lencheck 2 x + @lencheck 4 vals + increment!(nlp, :neval_jac) + vals .= [1, -x[1] / 2, -2, -2 * x[2]] + return vals +end + +function NLPModels.jprod!(nlp :: SimpleNLPModel, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) + @lencheck 2 x v Jv + increment!(nlp, :neval_jprod) + Jv .= [v[1] - 2 * v[2]; -x[1] * v[1] / 2 - 2 * x[2] * v[2]] + return Jv +end + +function NLPModels.jtprod!(nlp :: SimpleNLPModel, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) + @lencheck 2 x v Jtv + increment!(nlp, :neval_jtprod) + Jtv .= [v[1] - x[1] * v[2] / 2; -2 * v[1] - 2 * x[2] * v[2]] + return Jtv +end + +function NLPModels.ghjvprod!(nlp :: SimpleNLPModel, x :: AbstractVector{T}, g :: AbstractVector{T}, v :: AbstractVector{T}, gHv :: AbstractVector{T}) where T + @lencheck nlp.meta.nvar x g v + @lencheck nlp.meta.ncon gHv + increment!(nlp, :neval_hprod) + gHv .= [T(0); - g[1] * v[1] / 2 - 2 * g[2] * v[2]] + return gHv +end diff --git a/test/nlp/tools.jl b/test/nlp/tools.jl new file mode 100644 index 00000000..8e86bcf9 --- /dev/null +++ b/test/nlp/tools.jl @@ -0,0 +1,35 @@ +@testset "Problem type functions" begin + foo_list = [has_bounds, bound_constrained, unconstrained, linearly_constrained, equality_constrained, inequality_constrained, has_equalities, has_inequalities] + meta_list = [ + NLPModelMeta(2), + NLPModelMeta(2, lvar=zeros(2), uvar=ones(2)), + NLPModelMeta(2, ncon=1, lcon=[0.0], ucon=[0.0]), + NLPModelMeta(2, ncon=1, lcon=[0.0], ucon=[1.0]), + NLPModelMeta(2, ncon=1, lcon=[0.0], ucon=[Inf]), + NLPModelMeta(2, ncon=1, lcon=[-Inf], ucon=[0.0]), + NLPModelMeta(2, ncon=1, lcon=[0.0], ucon=[1.0], lin=[1]), + NLPModelMeta(2, ncon=2, lcon=[0.0, 0.0], ucon=[1.0, 1.0], lin=[1]), + NLPModelMeta(2, ncon=2, lcon=[0.0, 0.0], ucon=[1.0, 0.0], lin=[1]), + NLPModelMeta(2, lvar=zeros(2), uvar=ones(2), ncon=1, lcon=[0.0], ucon=[0.0]), + NLPModelMeta(2, lvar=zeros(2), uvar=ones(2), ncon=1, lcon=[0.0], ucon=[1.0]), + NLPModelMeta(2, lvar=zeros(2), uvar=ones(2), ncon=1, lcon=[0.0], ucon=[Inf]), + NLPModelMeta(2, lvar=zeros(2), uvar=ones(2), ncon=1, lcon=[-Inf], ucon=[0.0]), + NLPModelMeta(2, lvar=zeros(2), uvar=ones(2), ncon=1, lcon=[0.0], ucon=[1.0], lin=[1]), + NLPModelMeta(2, lvar=zeros(2), uvar=ones(2), ncon=2, lcon=[0.0, 0.0], ucon=[1.0, 1.0], lin=[1]), + NLPModelMeta(2, lvar=zeros(2), uvar=ones(2), ncon=2, lcon=[0.0, 0.0], ucon=[1.0, 0.0], lin=[1]) + ] + results = Bool[ + 0 1 0 0 0 0 0 0 0 1 1 1 1 1 1 1 + 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 + 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 + 0 0 0 1 1 1 1 1 0 0 1 1 1 1 1 0 + 0 0 1 0 0 0 0 0 1 1 0 0 0 0 0 1 + 0 0 0 1 1 1 1 1 1 0 1 1 1 1 1 1 + ] + for (i,f) in enumerate(foo_list), (j,meta) in enumerate(meta_list) + @test f(meta) == results[i,j] + @test f(DummyModel(meta)) == results[i,j] + end +end \ No newline at end of file diff --git a/test/nlp/utils.jl b/test/nlp/utils.jl new file mode 100644 index 00000000..d1960df3 --- /dev/null +++ b/test/nlp/utils.jl @@ -0,0 +1,25 @@ +mutable struct SuperNLPModel <: AbstractNLPModel + model +end + +@testset "Testing @lencheck e @rangecheck" begin + x = zeros(2) + @lencheck 2 x + @test_throws DimensionError @lencheck 1 x + @test_throws DimensionError @lencheck 3 x + + @rangecheck 1 3 2 + @test_throws ErrorException @rangecheck 1 3 0 + @test_throws ErrorException @rangecheck 1 3 4 + + io = IOBuffer() + showerror(io, DimensionError(:A, 1, 2)) + @test String(take!(io)) == "DimensionError: Input A should have length 1 not 2" +end + +@testset "Increase coverage of default_NLPcounters" begin + @default_counters SuperNLPModel model + nlp = SuperNLPModel(SimpleNLPModel()) + increment!(nlp, :neval_obj) + @test neval_obj(nlp.model) == 1 +end \ No newline at end of file diff --git a/test/nlp_testutils.jl b/test/nlp_testutils.jl deleted file mode 100644 index 80808ff9..00000000 --- a/test/nlp_testutils.jl +++ /dev/null @@ -1,25 +0,0 @@ -for problem in TestUtils.nlp_problems - @testset "Checking TestUtils tests on problem $problem" begin - nlp_ad = eval(Meta.parse(lowercase(problem) * "_autodiff"))() - nlp_man = eval(Meta.parse(problem))() - - show(IOBuffer(), nlp_ad) - - nlps = [nlp_ad, nlp_man] - @testset "Check Consistency" begin - consistent_nlps(nlps) - end - @testset "Check dimensions" begin - check_nlp_dimensions(nlp_ad) - end - @testset "Check multiple precision" begin - multiple_precision_nlp(nlp_ad) - end - @testset "Check view subarray" begin - view_subarray_nlp(nlp_ad) - end - @testset "Check coordinate memory" begin - coord_memory_nlp(nlp_ad) - end - end -end \ No newline at end of file diff --git a/test/nls/api.jl b/test/nls/api.jl new file mode 100644 index 00000000..5199accb --- /dev/null +++ b/test/nls/api.jl @@ -0,0 +1,144 @@ +@testset "NLS API test on a simple model" begin + + F(x) = [1 - x[1]; 10 * (x[2] - x[1]^2)] + JF(x) = [-1.0 0.0; -20 * x[1] 10] + HF(x,w) = w[2] * [-20.0 0; 0 0] + c(x) = [x[1] + x[2]^2; x[1]^2 + x[2]; x[1]^2 + x[2]^2 - 1] + J(x) = [1.0 2x[2]; 2x[1] 1.0; 2x[1] 2x[2]] + + nls = SimpleNLSModel() + n = nls.meta.nvar + m = nls.meta.ncon + ne = nls_meta(nls).nequ + + x = randn(n) + v = randn(n) + w = randn(ne) + Jv = zeros(ne) + Jtw = zeros(n) + Hv = zeros(n) + + @test residual(nls, x) ≈ F(x) + @test jac_residual(nls, x) ≈ JF(x) + @test hess_residual(nls, x, w) ≈ HF(x, w) + @test jprod_residual(nls, x, v) ≈ JF(x) * v + @test jtprod_residual(nls, x, w) ≈ JF(x)' * w + @test jprod_residual!(nls, jac_structure_residual(nls)..., jac_coord_residual(nls, x), v, Jv) ≈ JF(x) * v + @test jtprod_residual!(nls, jac_structure_residual(nls)..., jac_coord_residual(nls, x), w, Jtw) ≈ JF(x)' * w + @test jprod_residual!(nls, x, jac_structure_residual(nls)..., v, Jv) ≈ JF(x) * v + @test jtprod_residual!(nls, x, jac_structure_residual(nls)..., w, Jtw) ≈ JF(x)' * w + Jop = jac_op_residual(nls, x) + @test Jop * v ≈ JF(x) * v + @test Jop' * w ≈ JF(x)' * w + Jop = jac_op_residual!(nls, x, Jv, Jtw) + @test Jop * v ≈ JF(x) * v + @test Jop' * w ≈ JF(x)' * w + Jop = jac_op_residual!(nls, jac_structure_residual(nls)..., jac_coord_residual(nls, x), Jv, Jtw) + @test Jop * v ≈ JF(x) * v + @test Jop' * w ≈ JF(x)' * w + Jop = jac_op_residual!(nls, x, jac_structure_residual(nls)..., Jv, Jtw) + @test Jop * v ≈ JF(x) * v + @test Jop' * w ≈ JF(x)' * w + I, J, V = findnz(sparse(HF(x, w))) + @test hess_structure_residual(nls) == (I, J) + @test hess_coord_residual(nls, x, w) ≈ V + for j = 1:ne + eⱼ = [i == j ? 1.0 : 0.0 for i = 1:ne] + @test jth_hess_residual(nls, x, j) ≈ HF(x, eⱼ) + @test hprod_residual(nls, x, j, v) ≈ HF(x, eⱼ) * v + Hop = hess_op_residual(nls, x, j) + @test Hop * v ≈ HF(x, eⱼ) * v + Hop = hess_op_residual!(nls, x, j, Hv) + @test Hop * v ≈ HF(x, eⱼ) * v + end +end + +@testset "NLP API test on a simple NLS model" begin + + F(x) = [1 - x[1]; 10 * (x[2] - x[1]^2)] + JF(x) = [-1.0 0.0; -20 * x[1] 10] + HF(x,w) = w[2] * [-20.0 0; 0 0] + f(x) = norm(F(x))^2 / 2 + ∇f(x) = JF(x)' * F(x) + H(x) = JF(x)' * JF(x) + HF(x, F(x)) + c(x) = [x[1] + x[2]^2; x[1]^2 + x[2]; x[1]^2 + x[2]^2 - 1] + J(x) = [1 2x[2]; 2x[1] 1; 2x[1] 2x[2]] + H(x,y) = H(x) + diagm(0 => [2y[2] + 2y[3]; 2y[1] + 2y[3]]) + + nls = SimpleNLSModel() + n = nls.meta.nvar + m = nls.meta.ncon + + x = randn(n) + y = randn(m) + v = randn(n) + w = randn(m) + Jv = zeros(m) + Jtw = zeros(n) + Hv = zeros(n) + Hvals = zeros(nls.meta.nnzh) + + fx, gx = objgrad!(nls, x, v) + @test obj(nls, x) ≈ norm(F(x))^2 / 2 ≈ fx ≈ f(x) + @test grad(nls, x) ≈ JF(x)' * F(x) ≈ gx ≈ ∇f(x) + @test hess(nls, x) ≈ tril(H(x)) + @test hprod(nls, x, v) ≈ H(x) * v + @test cons(nls, x) ≈ c(x) + @test jac(nls, x) ≈ J(x) + @test jprod(nls, x, v) ≈ J(x) * v + @test jtprod(nls, x, w) ≈ J(x)' * w + @test hess(nls, x, y) ≈ tril(H(x,y)) + @test hprod(nls, x, y, v) ≈ H(x, y) * v + fx, cx = objcons(nls, x) + @test fx ≈ f(x) + @test cx ≈ c(x) + fx, _ = objcons!(nls, x, cx) + @test fx ≈ f(x) + @test cx ≈ c(x) + fx, gx = objgrad(nls, x) + @test fx ≈ f(x) + @test gx ≈ ∇f(x) + fx, _ = objgrad!(nls, x, gx) + @test fx ≈ f(x) + @test gx ≈ ∇f(x) + @test jprod!(nls, jac_structure(nls)..., jac_coord(nls, x), v, Jv) ≈ J(x) * v + @test jprod!(nls, x, jac_structure(nls)..., v, Jv) ≈ J(x) * v + @test jtprod!(nls, jac_structure(nls)..., jac_coord(nls, x), w, Jtw) ≈ J(x)' * w + @test jtprod!(nls, x, jac_structure(nls)..., w, Jtw) ≈ J(x)' * w + Jop = jac_op!(nls, x, Jv, Jtw) + @test Jop * v ≈ J(x) * v + @test Jop' * w ≈ J(x)' * w + Jop = jac_op!(nls, jac_structure(nls)..., jac_coord(nls, x), Jv, Jtw) + @test Jop * v ≈ J(x) * v + @test Jop' * w ≈ J(x)' * w + Jop = jac_op!(nls, x, jac_structure(nls)..., Jv, Jtw) + @test Jop * v ≈ J(x) * v + @test Jop' * w ≈ J(x)' * w + ghjv = zeros(m) + for j = 1:m + eⱼ = [i == j ? 1.0 : 0.0 for i = 1:m] + Cⱼ(x) = H(x, eⱼ) - H(x) + ghjv[j] = dot(gx, Cⱼ(x) * v) + end + @test ghjvprod(nls, x, gx, v) ≈ ghjv + @test hess_coord!(nls, x, Hvals) == hess_coord!(nls, x, y * 0, Hvals) + @test hprod!(nls, hess_structure(nls)..., hess_coord(nls, x), v, Hv) ≈ H(x) * v + @test hprod!(nls, x, hess_structure(nls)..., v, Hv) ≈ H(x) * v + @test hprod!(nls, x, y, hess_structure(nls)..., v, Hv) ≈ H(x, y) * v + Hop = hess_op(nls, x) + @test Hop * v ≈ H(x) * v + Hop = hess_op!(nls, x, Hv) + @test Hop * v ≈ H(x) * v + Hop = hess_op!(nls, hess_structure(nls)..., hess_coord(nls, x), Hv) + @test Hop * v ≈ H(x) * v + Hop = hess_op!(nls, x, hess_structure(nls)..., Hv) + @test Hop * v ≈ H(x) * v + Hop = hess_op(nls, x, y) + @test Hop * v ≈ H(x, y) * v + Hop = hess_op!(nls, x, y, Hv) + @test Hop * v ≈ H(x, y) * v + Hop = hess_op!(nls, hess_structure(nls)..., hess_coord(nls, x, y), Hv) + @test Hop * v ≈ H(x, y) * v + Hop = hess_op!(nls, x, y, hess_structure(nls)..., Hv) + @test Hop * v ≈ H(x, y) * v +end \ No newline at end of file diff --git a/test/nls/counters.jl b/test/nls/counters.jl new file mode 100644 index 00000000..8a09a993 --- /dev/null +++ b/test/nls/counters.jl @@ -0,0 +1,12 @@ +@testset "Increase coverage of NLSCounters" begin + nls = SimpleNLSModel() + obj(nls, nls.meta.x0) + residual(nls, nls.meta.x0) + jac_residual(nls, nls.meta.x0) + @test neval_obj(nls) == 1 + @test neval_residual(nls) == 2 + @test neval_jac_residual(nls) == 1 + @test sum_counters(nls) == 4 + reset!(nls) + @test sum_counters(nls) == 0 +end \ No newline at end of file diff --git a/test/nls/show.jl b/test/nls/show.jl new file mode 100644 index 00000000..162d25af --- /dev/null +++ b/test/nls/show.jl @@ -0,0 +1,39 @@ +@testset "Increase coverage of nls/show.jl" begin + nls = SimpleNLSModel() + io = IOBuffer() + show(io, nls) + showed = String(take!(io)) + expected = """ SimpleNLSModel + Problem name: Simple NLS Model + All variables: ████████████████████ 2 All constraints: ████████████████████ 3 All residuals: ████████████████████ 2 + free: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 free: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 linear: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + lower: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 lower: ██████████████⋅⋅⋅⋅⋅⋅ 2 nonlinear: ████████████████████ 2 + upper: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 upper: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 nnzj: ( 25.00% sparsity) 3 + low/upp: ████████████████████ 2 low/upp: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 nnzh: ( 66.67% sparsity) 1 + fixed: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 fixed: ███████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1 + infeas: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 infeas: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + nnzh: ( 0.00% sparsity) 3 linear: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + nonlinear: ████████████████████ 3 + nnzj: ( 0.00% sparsity) 6 + + Counters: + obj: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 grad: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 cons: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + jcon: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jgrad: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jac: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + jprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jtprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 hess: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + hprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jhprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 residual: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + jac_residual: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jprod_residual: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jtprod_residual: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + hess_residual: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jhess_residual: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 hprod_residual: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 +""" + @test strip.(split(chomp(showed), "\n")) == strip.(split(chomp(expected), "\n")) + + + io = IOBuffer() + show(io, NLSMeta(1, 1)) + showed = String(take!(io)) + expected = """ All residuals: ████████████████████ 1 + linear: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 +nonlinear: ████████████████████ 1 + nnzj: ( 0.00% sparsity) 1 + nnzh: ( 0.00% sparsity) 1\n\n""" + @test strip.(split(chomp(showed), "\n")) == strip.(split(chomp(expected), "\n")) +end \ No newline at end of file diff --git a/test/nls/simple-model.jl b/test/nls/simple-model.jl new file mode 100644 index 00000000..3790d7a7 --- /dev/null +++ b/test/nls/simple-model.jl @@ -0,0 +1,209 @@ +""" + SimpleNLSModel <: AbstractNLSModel + +Simple NLSModel for testing purposes. +Modified problem 20 in the Hock-Schittkowski Suite. + + min ½‖F(x)‖² + s.to x₁ + x₂² ≥ 0 + x₁² + x₂ ≥ 0 + x₁² + x₂² = 1 + 0 ≤ x ≤ 1, + +where + + F(x) = [1 - x₁; 10 (x₂ - x₁²)] + +x₀ = ones(n). + +Modified SimpleNLSModel. +""" +mutable struct SimpleNLSModel <: AbstractNLSModel + meta :: NLPModelMeta + nls_meta :: NLSMeta + counters :: NLSCounters +end + +function SimpleNLSModel() + meta = NLPModelMeta(2, x0=ones(2), name="Simple NLS Model", lvar=zeros(2), uvar=ones(2), ncon=3, lcon=[0.0; 0.0; 1.0], ucon=[Inf; Inf; 1.0], nnzj=6) + nls_meta = NLSMeta(2, 2, nnzj=3, nnzh=1) + + return SimpleNLSModel(meta, nls_meta, NLSCounters()) +end + +function NLPModels.residual!(nls :: SimpleNLSModel, x :: AbstractVector, Fx :: AbstractVector) + @lencheck 2 x Fx + increment!(nls, :neval_residual) + Fx .= [1 - x[1]; 10 * (x[2] - x[1]^2)] + return Fx +end + +# Jx = [-1 0; -20x₁ 10] +function NLPModels.jac_structure_residual!(nls :: SimpleNLSModel, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) + @lencheck 3 rows cols + rows .= [1, 2, 2] + cols .= [1, 1, 2] + return rows, cols +end + +function NLPModels.jac_coord_residual!(nls :: SimpleNLSModel, x :: AbstractVector, vals :: AbstractVector) + @lencheck 2 x + @lencheck 3 vals + increment!(nls, :neval_jac_residual) + vals .= [-1, -20x[1], 10] + return vals +end + +function NLPModels.jprod_residual!(nls :: SimpleNLSModel, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) + @lencheck 2 x v Jv + increment!(nls, :neval_jprod_residual) + Jv .= [-v[1]; - 20 * x[1] * v[1] + 10 * v[2]] + return Jv +end + +function NLPModels.jtprod_residual!(nls :: SimpleNLSModel, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) + @lencheck 2 x v Jtv + increment!(nls, :neval_jtprod_residual) + Jtv .= [-v[1] - 20 * x[1] * v[2]; 10 * v[2]] + return Jtv +end + +function NLPModels.hess_structure_residual!(nls :: SimpleNLSModel, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) + @lencheck 1 rows cols + rows[1] = 1 + cols[1] = 1 + return rows, cols +end + +function NLPModels.hess_coord_residual!(nls :: SimpleNLSModel, x :: AbstractVector, v :: AbstractVector, vals :: AbstractVector) + @lencheck 2 x v + @lencheck 1 vals + increment!(nls, :neval_hess_residual) + vals[1] = -20v[2] + return vals +end + +function NLPModels.hprod_residual!(nls :: SimpleNLSModel, x :: AbstractVector, i :: Int, v :: AbstractVector, Hiv :: AbstractVector) + @lencheck 2 x v Hiv + increment!(nls, :neval_hprod_residual) + if i == 2 + Hiv .= [-20v[1]; 0] + else + Hiv .= zero(eltype(x)) + end + return Hiv +end + +function NLPModels.cons!(nls :: SimpleNLSModel, x :: AbstractVector, cx :: AbstractVector) + @lencheck 2 x + @lencheck 3 cx + increment!(nls, :neval_cons) + cx .= [x[1] + x[2]^2; x[1]^2 + x[2]; x[1]^2 + x[2]^2 - 1] + return cx +end + +function NLPModels.jac_structure!(nls :: SimpleNLSModel, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) + @lencheck 6 rows cols + rows .= [1, 1, 2, 2, 3, 3] + cols .= [1, 2, 1, 2, 1, 2] + return rows, cols +end + +function NLPModels.jac_coord!(nls :: SimpleNLSModel, x :: AbstractVector, vals :: AbstractVector) + @lencheck 2 x + @lencheck 6 vals + increment!(nls, :neval_jac) + vals .= [1, 2x[2], 2x[1], 1, 2x[1], 2x[2]] + return vals +end + +function NLPModels.jprod!(nls :: SimpleNLSModel, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector) + @lencheck 2 x v + @lencheck 3 Jv + increment!(nls, :neval_jprod) + Jv .= [v[1] + 2x[2] * v[2]; 2x[1] * v[1] + v[2]; 2x[1] * v[1] + 2x[2] * v[2]] + return Jv +end + +function NLPModels.jtprod!(nls :: SimpleNLSModel, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector) + @lencheck 2 x Jtv + @lencheck 3 v + increment!(nls, :neval_jtprod) + Jtv .= [v[1] + 2x[1] * (v[2] + v[3]); v[2] + 2x[2] * (v[1] + v[3])] + return Jtv +end + +function NLPModels.hess(nls :: SimpleNLSModel, x :: AbstractVector{T}; obj_weight=1.0) where T + @lencheck 2 x + increment!(nls, :neval_hess) + return obj_weight * [T(1)-200*x[2]+600*x[1]^2 T(0);-200*x[1] T(100)] +end + +function NLPModels.hess(nls :: SimpleNLSModel, x :: AbstractVector{T}, y :: AbstractVector{T}; obj_weight=1.0) where T + @lencheck 2 x + @lencheck 3 y + increment!(nls, :neval_hess) + return [obj_weight*(T(1)-200*x[2]+600*x[1]^2)+2*y[2]+2*y[3] T(0);-obj_weight*200*x[1] obj_weight*T(100)+2*y[1]+2*y[3]] +end + +function NLPModels.hess_structure!(nls :: SimpleNLSModel, rows :: AbstractVector{Int}, cols :: AbstractVector{Int}) + @lencheck 3 rows cols + n = nls.meta.nvar + I = ((i,j) for i = 1:n, j = 1:n if i ≥ j) + rows .= getindex.(I, 1) + cols .= getindex.(I, 2) + return rows, cols +end + +function NLPModels.hess_coord!(nls :: SimpleNLSModel, x :: AbstractVector, vals :: AbstractVector; obj_weight=1.0) + @lencheck 2 x + @lencheck 3 vals + Hx = hess(nls, x, obj_weight=obj_weight) + k = 1 + for j = 1:2 + for i = j:2 + vals[k] = Hx[i,j] + k += 1 + end + end + return vals +end + +function NLPModels.hess_coord!(nls :: SimpleNLSModel, x :: AbstractVector, y :: AbstractVector, vals :: AbstractVector; obj_weight=1.0) + @lencheck 2 x + @lencheck 3 y + @lencheck 3 vals + Hx = hess(nls, x, y, obj_weight=obj_weight) + k = 1 + for j = 1:2 + for i = j:2 + vals[k] = Hx[i,j] + k += 1 + end + end + return vals +end + +function NLPModels.hprod!(nls :: SimpleNLSModel, x :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T + @lencheck 2 x v Hv + increment!(nls, :neval_hprod) + Hv .= obj_weight * [T(1)-200*x[2]+600*x[1]^2 -200*x[1];-200*x[1] T(100)] * v + return Hv +end + +function NLPModels.hprod!(nls :: SimpleNLSModel, x :: AbstractVector{T}, y :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T + @lencheck 2 x v Hv + increment!(nls, :neval_hprod) + Hv .= [obj_weight*(T(1)-200*x[2]+600*x[1]^2)+2*y[2]+2*y[3] -obj_weight*200*x[1];-obj_weight*200*x[1] obj_weight*T(100)+2*y[1]+2*y[3]] * v + return Hv +end + +function NLPModels.ghjvprod!(nls :: SimpleNLSModel, x :: AbstractVector{T}, g :: AbstractVector{T}, v :: AbstractVector{T}, gHv :: AbstractVector{T}) where T + @lencheck nls.meta.nvar x g v + @lencheck nls.meta.ncon gHv + increment!(nls, :neval_hprod) + gHv[1] = g[2] * 2v[2] + gHv[2] = g[1] * 2v[1] + gHv[3] = g[1] * 2v[1] + g[2] * 2v[2] + return gHv +end \ No newline at end of file diff --git a/test/nls/utils.jl b/test/nls/utils.jl new file mode 100644 index 00000000..a365a245 --- /dev/null +++ b/test/nls/utils.jl @@ -0,0 +1,10 @@ +mutable struct SuperNLSModel <: AbstractNLSModel + model +end + +@testset "Increase coverage of default_nlscounters" begin + @default_nlscounters SuperNLSModel model + nls = SuperNLSModel(SimpleNLSModel()) + increment!(nls, :neval_residual) + @test neval_residual(nls.model) == 1 +end \ No newline at end of file diff --git a/test/nls_testutils.jl b/test/nls_testutils.jl deleted file mode 100644 index c8475dfc..00000000 --- a/test/nls_testutils.jl +++ /dev/null @@ -1,35 +0,0 @@ -for problem in TestUtils.nls_problems - @testset "Checking TestUtils tests on problem $problem" begin - nls_ad = eval(Meta.parse(lowercase(problem) * "_autodiff"))() - nls_man = eval(Meta.parse(problem))() - - nlss = AbstractNLSModel[nls_ad] - # *_special problems are variant definitions of a model - spc = "$(problem)_special" - if isdefined(TestUtils, Symbol(spc)) - push!(nlss, eval(Meta.parse(spc))()) - end - - for nls in nlss - show(IOBuffer(), nls) - end - - @testset "Check Consistency" begin - consistent_nlss([nlss; nls_man]) - end - @testset "Check dimensions" begin - check_nls_dimensions.(nlss) - check_nlp_dimensions.(nlss, exclude_hess=true) - end - @testset "Check multiple precision" begin - for nls in nlss - if typeof(nls) != LLSModel - multiple_precision_nls(nls) - end - end - end - @testset "Check view subarray" begin - view_subarray_nls.(nlss) - end - end -end \ No newline at end of file diff --git a/test/runtests.jl b/test/runtests.jl index 458e73fe..5ddfad05 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,70 +1,17 @@ -include("TestUtils/TestUtils.jl") -using .TestUtils - -using Test, NLPModels, LinearAlgebra, LinearOperators, Printf, SparseArrays - -@info("Testing printing of nlp.meta") -print(IOBuffer(), ADNLPModel(x->0, zeros(10), [-ones(5); -Inf*ones(5)], - [ones(3); Inf*ones(4); collect(2:4)], - name="Unconstrained example").meta) -print(IOBuffer(), ADNLPModel(x->0, zeros(10), x->[0.0;0.0;0.0], [0.0;0.0;-Inf], - [Inf;0.0;0.0], name="Constrained example").meta) - -# A problem with zero variables doesn't make sense. -@test_throws(ErrorException, NLPModelMeta(0)) - -# Default methods should throw MethodError since they're not defined -mutable struct DummyModel <: AbstractNLPModel - meta :: NLPModelMeta -end -model = DummyModel(NLPModelMeta(1)) -@test_throws(MethodError, lagscale(model, 1.0)) -for meth in [:obj, :varscale, :conscale] - @eval @test_throws(MethodError, $meth(model, [0.0])) -end -for meth in [:jac_structure!, :hess_structure!] - @eval @test_throws(MethodError, $meth(model, [0], [1])) -end -for meth in [:grad!, :cons!, :jac_coord!] - @eval @test_throws(MethodError, $meth(model, [0.0], [1.0])) -end -for meth in [:jth_con, :jth_congrad, :jth_sparse_congrad] - @eval @test_throws(MethodError, $meth(model, [0.0], 1)) -end -@test_throws(MethodError, jth_congrad!(model, [0.0], 1, [2.0])) -for meth in [:jprod!, :jtprod!] - @eval @test_throws(MethodError, $meth(model, [0.0], [1.0], [2.0])) -end -@test_throws(MethodError, jth_hprod(model, [0.0], [1.0], 2)) -@test_throws(MethodError, jth_hprod!(model, [0.0], [1.0], 2, [3.0])) -for meth in [:ghjvprod!] - @eval @test_throws(MethodError, $meth(model, [0.0], [1.0], [2.0], [3.0])) -end -@assert isa(hess_op(model, [0.]), LinearOperator) -@assert isa(jac_op(model, [0.]), LinearOperator) - -# ADNLPModel with no functions -model = ADNLPModel(x->dot(x,x), zeros(2), name="square") -@assert model.meta.name == "square" - -model = genrose_autodiff() -for counter in fieldnames(typeof(model.counters)) - @eval @assert $counter(model) == 0 -end - -obj(model, model.meta.x0) -@assert neval_obj(model) == 1 - -reset!(model) -@assert neval_obj(model) == 0 - -@test_throws(MethodError, jth_con(model, model.meta.x0, 1)) -include("test_tools.jl") - -include("test_slack_model.jl") -include("test_qn_model.jl") -include("nlp_testutils.jl") -include("nls_testutils.jl") -include("test_autodiff_model.jl") -include("test_nlsmodels.jl") -include("test_feasibility_form_nls.jl") \ No newline at end of file +using LinearAlgebra, LinearOperators, NLPModels, SparseArrays, Test + +include("nlp/simple-model.jl") +include("nlp/dummy-model.jl") + +include("nlp/api.jl") +include("nlp/counters.jl") +include("nlp/meta.jl") +include("nlp/show.jl") +include("nlp/tools.jl") +include("nlp/utils.jl") + +include("nls/simple-model.jl") +include("nls/api.jl") +include("nls/counters.jl") +include("nls/show.jl") +include("nls/utils.jl") \ No newline at end of file diff --git a/test/test-breakage-deploy.jl b/test/test-breakage-deploy.jl index 45147742..7310cd59 100644 --- a/test/test-breakage-deploy.jl +++ b/test/test-breakage-deploy.jl @@ -26,7 +26,7 @@ function test_breakage_deploy() badge_fail(x) = "![](https://img.shields.io/badge/$x-Fail-red)" badge(tf, x) = tf ? badge_pass(x) : badge_fail(x) - packages = ["AmplNLReader", "CUTEst", "CaNNOLeS", "NLPModelsIpopt", "NLPModelsJuMP", "QuadraticModels", "SolverTools"] + packages = [NLPModelsModifiers, NLPModelsTest, AmplNLReader, CaNNOLeS, CUTEst, NLPModelsJuMP, QuadraticModels, SolverTools] output = ":robot: Testing breakage of this pull request\n\n" output *= "| Package Name | master | stable |\n" diff --git a/test/test_autodiff_model.jl b/test/test_autodiff_model.jl deleted file mode 100644 index b8be0709..00000000 --- a/test/test_autodiff_model.jl +++ /dev/null @@ -1,50 +0,0 @@ -mutable struct LinearRegression - x :: Vector - y :: Vector -end - -function (regr::LinearRegression)(beta) - r = regr.y .- beta[1] - beta[2] * regr.x - return dot(r, r) / 2 -end - -function test_autodiff_model() - x0 = zeros(2) - f(x) = dot(x,x) - nlp = ADNLPModel(f, x0) - - c(x) = [sum(x) - 1] - nlp = ADNLPModel(f, x0, c, [0], [0]) - @test obj(nlp, x0) == f(x0) - - x = range(-1, stop=1, length=100) - y = 2x .+ 3 + randn(100) * 0.1 - regr = LinearRegression(x, y) - nlp = ADNLPModel(regr, ones(2)) - β = [ones(100) x] \ y - @test abs(obj(nlp, β) - norm(y .- β[1] - β[2] * x)^2 / 2) < 1e-12 - @test norm(grad(nlp, β)) < 1e-12 - - @testset "Constructors for ADNLPModel" begin - lvar, uvar, lcon, ucon, y0 = -ones(2), ones(2), -ones(1), ones(1), zeros(1) - badlvar, baduvar, badlcon, baducon, bady0 = -ones(3), ones(3), -ones(2), ones(2), zeros(2) - nlp = ADNLPModel(f, x0) - nlp = ADNLPModel(f, x0, lvar, uvar) - nlp = ADNLPModel(f, x0, c, lcon, ucon) - nlp = ADNLPModel(f, x0, c, lcon, ucon, y0=y0) - nlp = ADNLPModel(f, x0, lvar, uvar, c, lcon, ucon) - nlp = ADNLPModel(f, x0, lvar, uvar, c, lcon, ucon, y0=y0) - @test_throws DimensionError ADNLPModel(f, x0, badlvar, uvar) - @test_throws DimensionError ADNLPModel(f, x0, lvar, baduvar) - @test_throws DimensionError ADNLPModel(f, x0, c, badlcon, ucon) - @test_throws DimensionError ADNLPModel(f, x0, c, lcon, baducon) - @test_throws DimensionError ADNLPModel(f, x0, c, lcon, ucon, y0=bady0) - @test_throws DimensionError ADNLPModel(f, x0, badlvar, uvar, c, lcon, ucon) - @test_throws DimensionError ADNLPModel(f, x0, lvar, baduvar, c, lcon, ucon) - @test_throws DimensionError ADNLPModel(f, x0, lvar, uvar, c, badlcon, ucon) - @test_throws DimensionError ADNLPModel(f, x0, lvar, uvar, c, lcon, baducon) - @test_throws DimensionError ADNLPModel(f, x0, lvar, uvar, c, lcon, ucon, y0=bady0) - end -end - -test_autodiff_model() diff --git a/test/test_autodiff_nls_model.jl b/test/test_autodiff_nls_model.jl deleted file mode 100644 index b7952128..00000000 --- a/test/test_autodiff_nls_model.jl +++ /dev/null @@ -1,35 +0,0 @@ -function autodiff_nls_test() - @testset "autodiff_nls_test" begin - F(x) = [x[1] - 1; x[2] - x[1]^2] - nls = ADNLSModel(F, zeros(2), 2) - - @test isapprox(residual(nls, ones(2)), zeros(2), rtol=1e-8) - end - - @testset "Constructors for ADNLSModel" begin - F(x) = [x[1] - 1; x[2] - x[1]^2; x[1] * x[2]] - x0 = ones(2) - c(x) = [sum(x) - 1] - lvar, uvar, lcon, ucon, y0 = -ones(2), ones(2), -ones(1), ones(1), zeros(1) - badlvar, baduvar, badlcon, baducon, bady0 = -ones(3), ones(3), -ones(2), ones(2), zeros(2) - nlp = ADNLSModel(F, x0, 3) - nlp = ADNLSModel(F, x0, 3, lvar, uvar) - nlp = ADNLSModel(F, x0, 3, c, lcon, ucon) - nlp = ADNLSModel(F, x0, 3, c, lcon, ucon, y0=y0) - nlp = ADNLSModel(F, x0, 3, lvar, uvar, c, lcon, ucon) - nlp = ADNLSModel(F, x0, 3, lvar, uvar, c, lcon, ucon, y0=y0) - @test_throws DimensionError ADNLSModel(F, x0, 3, badlvar, uvar) - @test_throws DimensionError ADNLSModel(F, x0, 3, lvar, baduvar) - @test_throws DimensionError ADNLSModel(F, x0, 3, c, badlcon, ucon) - @test_throws DimensionError ADNLSModel(F, x0, 3, c, lcon, baducon) - @test_throws DimensionError ADNLSModel(F, x0, 3, c, lcon, ucon, y0=bady0) - @test_throws DimensionError ADNLSModel(F, x0, 3, badlvar, uvar, c, lcon, ucon) - @test_throws DimensionError ADNLSModel(F, x0, 3, lvar, baduvar, c, lcon, ucon) - @test_throws DimensionError ADNLSModel(F, x0, 3, lvar, uvar, c, badlcon, ucon) - @test_throws DimensionError ADNLSModel(F, x0, 3, lvar, uvar, c, lcon, baducon) - @test_throws DimensionError ADNLSModel(F, x0, 3, lvar, uvar, c, lcon, ucon, y0=bady0) - - end -end - -autodiff_nls_test() diff --git a/test/test_feasibility_form_nls.jl b/test/test_feasibility_form_nls.jl deleted file mode 100644 index 592450ef..00000000 --- a/test/test_feasibility_form_nls.jl +++ /dev/null @@ -1,80 +0,0 @@ -function test_nls_to_cons() - @testset "Test FeasibilityFormNLS consistency" begin - F1(x) = [x[1] - 1; 10 * (x[2] - x[1]^2)] - F2(x) = [x[1] * x[2] * x[3] * x[4] * x[5] - 1] - F3(x) = [x[1] + x[2] - 1; x[1]^2 + x[2]^2 - 2; x[1]^3 + x[2]^3 - 3] - c1(x) = [sum(x); x[1] * x[2] - 2] - for (F,n,ne) in [(F1,2,2), (F2,5,1), (F3,2,3)], - (c,m) in [(x->zeros(0),0), (c1,2)] - x0 = [-(1.0)^i for i = 1:n] - nls = ADNLSModel(F, x0, ne, c, zeros(m), zeros(m)) - - nlpcon = FeasibilityFormNLS(nls) - adnlp = ADNLPModel(x->sum(x[n+1:end].^2) / 2, [x0; zeros(ne)], - x->[F(x[1:n]) - x[n+1:end]; c(x[1:n])], - zeros(ne+m), zeros(ne+m)) - TestUtils.consistent_functions([nlpcon; adnlp], exclude=[ghjvprod]) - - adnls = ADNLSModel(x->x[n+1:end], [x0; zeros(ne)], ne, - x->[F(x[1:n]) - x[n+1:end]; c(x[1:n])], - zeros(ne+m), zeros(ne+m)) - TestUtils.consistent_functions([nlpcon; adnls], exclude=[ghjvprod]) - TestUtils.consistent_nls_functions([nlpcon; adnls]) - end - end - - @testset "Test FeasibilityFormNLS with LLSModel" begin - for n = [10; 30], ne = [10; 20; 30], m = [0; 20] - for T in [(rows,cols)->Matrix(1.0I, rows, cols) .+ 1, - (rows,cols)->sparse(1.0I, rows, cols) .+ 1, - (rows,cols)->sparse(1.0I, rows, cols) - ] - A = T(ne,n) - b = collect(1:ne) - C = m > 0 ? T(m,n) : zeros(0,n) - lls = LLSModel(A, b, C=C, lcon=zeros(m), ucon=zeros(m)) - nlpcon = FeasibilityFormNLS(lls) - Ine = spdiagm(0 => ones(ne)) - lls2 = LLSModel([spzeros(ne,n) Ine], zeros(ne), - C=[A -Ine; C spzeros(m,ne)], - lcon=[b; zeros(m)], ucon=[b; zeros(m)]) - - TestUtils.consistent_functions([nlpcon; lls2], exclude=[hess, hess_coord, ghjvprod]) - TestUtils.consistent_nls_functions([nlpcon; lls2]) - end - end - end - - @testset "Test FeasibilityFormNLS of a FeasibilityResidual" begin - c(x) = [x[1]^2 + x[2]^2 - 5; x[1] * x[2] - 2; x[1] - 1; x[2] - 1] - x0 = [0.5; 1.5] - nlp = ADNLPModel(x->0, x0, c, zeros(4), zeros(4)) - ffnls = FeasibilityFormNLS(FeasibilityResidual(nlp)) - nlp2 = ADNLSModel(x->x[3:6], [x0; zeros(4)], 4, - x->c(x[1:2]) - x[3:6], zeros(4), zeros(4)) - TestUtils.consistent_functions([ffnls; nlp2]) - TestUtils.consistent_nls_functions([ffnls; nlp2], exclude=[ghjvprod]) - - # The test belows verifies that the nnzj and nnzh information are not lost - n = 10 - m = 2n - A = [spdiagm(0 => 2 * ones(n), 1 => -ones(n-1), -1 => -ones(n-1)); -I] - b = zeros(m) - nlp = LLSModel(spzeros(0, n), zeros(0), C=A, lcon=b, ucon=b) - ffnls = FeasibilityFormNLS(FeasibilityResidual(nlp), name="feas-of-feas") - nlp2 = LLSModel([spzeros(m, n) I], zeros(m), - C=[A -I], lcon=b, ucon=b) - TestUtils.consistent_functions([ffnls; nlp2], exclude=[hess, hess_coord, ghjvprod]) - TestUtils.consistent_nls_functions([ffnls; nlp2]) - end - - @testset "FeasibilityFormNLS of an LLSModel should handle hess related function" begin - lls = LLSModel(rand(10, 5), rand(10), C=rand(2,5), lcon=zeros(2), ucon=zeros(2)) - nls = FeasibilityFormNLS(lls) - @test hess_structure(nls) == (6:15, 6:15) - @test hess_coord(nls, zeros(15)) == ones(10) - @test hess_coord(nls, zeros(15), obj_weight=0.3) == 0.3 * ones(10) - end -end - -test_nls_to_cons() diff --git a/test/test_feasibility_nls_model.jl b/test/test_feasibility_nls_model.jl deleted file mode 100644 index 61ff2a3a..00000000 --- a/test/test_feasibility_nls_model.jl +++ /dev/null @@ -1,21 +0,0 @@ -function feasibility_nls_test() - @testset "feasibility_nls_test" begin - nlp = ADNLPModel(x->0, zeros(2), x->[x[1] - 1; x[2] - x[1]^2], zeros(2), zeros(2)) - nls = FeasibilityResidual(nlp) - - @test isapprox(residual(nls, ones(2)), zeros(2), rtol=1e-8) - - nlp = ADNLPModel(x->0, zeros(2), [-0.3; -0.5], [1.2; 3.4], - x->[x[1] - 1; x[2] - x[1]^2], -ones(2), 2*ones(2)) - nls = FeasibilityResidual(nlp) - - @test nls.meta.nvar == 4 - @test nls.nls_meta.nequ == 2 - @test nls.meta.lvar == [-0.3; -0.5; -1.0; -1.0] - @test nls.meta.uvar == [ 1.2; 3.4; 2.0; 2.0] - @test isapprox(residual(nls, [1.0; 1.0; 0.0; 0.0]), zeros(2), rtol=1e-8) - @test isapprox(residual(nls, [0.0; 1.0; 2.0; 3.0]), [-3.0; -2.0], rtol=1e-8) - end -end - -feasibility_nls_test() diff --git a/test/test_lls_model.jl b/test/test_lls_model.jl deleted file mode 100644 index 49332f58..00000000 --- a/test/test_lls_model.jl +++ /dev/null @@ -1,34 +0,0 @@ -function lls_test() - @testset "lls_test" begin - for A = [Matrix(1.0I, 10, 3) .+ 1, sparse(1.0I, 10, 3) .+ 1], - C = [ones(1, 3), [ones(1,3); -I], sparse(ones(1,3))] - b = collect(1:10) - nequ, nvar = size(A) - ncon = size(C,1) - nls = LLSModel(A, b, C=C, lcon=zeros(ncon), ucon=zeros(ncon)) - x = [1.0; -1.0; 1.0] - - @test isapprox(A * x - b, residual(nls, x), rtol=1e-8) - @test A == jac_residual(nls, x) - I, J = jac_structure_residual(nls) - V = jac_coord_residual(nls, x) - @test A == sparse(I, J, V, nequ, nvar) - I, J = hess_structure_residual(nls) - V = hess_coord_residual(nls, x, ones(nequ)) - @test sparse(I, J, V, nvar, nvar) == zeros(nvar, nvar) - @test hess_residual(nls, x, ones(nequ)) == zeros(nvar,nvar) - for i = 1:nequ - @test isapprox(zeros(nvar, nvar), jth_hess_residual(nls, x, i), rtol=1e-8) - end - - I, J = jac_structure(nls) - V = jac_coord(nls, x) - @test sparse(I, J, V, ncon, nvar) == C - - @test nls.meta.nlin == length(nls.meta.lin) == ncon - @test nls.meta.nnln == length(nls.meta.nln) == 0 - end - end -end - -lls_test() diff --git a/test/test_nlsmodels.jl b/test/test_nlsmodels.jl deleted file mode 100644 index 76ebffe0..00000000 --- a/test/test_nlsmodels.jl +++ /dev/null @@ -1,16 +0,0 @@ -mutable struct DummyNLSModel <: AbstractNLSModel -end - -model = DummyNLSModel() - -for mtd in [:residual!, :jac_structure_residual!, :jac_coord_residual!, :hess_structure_residual!] - @eval @test_throws(MethodError, $mtd(model, [0], [1])) -end -for mtd in [:jprod_residual!, :jtprod_residual!, :hess_coord_residual!] - @eval @test_throws(MethodError, $mtd(model, [0], [1], [2])) -end -@test_throws(MethodError, hprod_residual!(model, [0], 1, [2], [3])) - -include("test_autodiff_nls_model.jl") -include("test_lls_model.jl") -include("test_feasibility_nls_model.jl") diff --git a/test/test_qn_model.jl b/test/test_qn_model.jl deleted file mode 100644 index 54cec4c2..00000000 --- a/test/test_qn_model.jl +++ /dev/null @@ -1,85 +0,0 @@ -using LinearOperators - -function check_qn_model(qnmodel) - rtol = 1e-8 - model = qnmodel.model - @assert typeof(qnmodel) <: NLPModels.QuasiNewtonModel - @assert qnmodel.meta.nvar == model.meta.nvar - @assert qnmodel.meta.ncon == model.meta.ncon - - x = [-(-1.0)^i for i = 1:qnmodel.meta.nvar] - - @assert isapprox(obj(model, x), obj(qnmodel, x), rtol=rtol) - @assert neval_obj(model) == 2 - - @assert isapprox(grad(model, x), grad(qnmodel, x), rtol=rtol) - @assert neval_grad(model) == 2 - - @assert isapprox(cons(model, x), cons(qnmodel, x), rtol=rtol) - @assert neval_cons(model) == 2 - - @assert isapprox(jac(model, x), jac(qnmodel, x), rtol=rtol) - @assert neval_jac(model) == 2 - - v = [-(-1.0)^i for i = 1:qnmodel.meta.nvar] - u = [-(-1.0)^i for i = 1:qnmodel.meta.ncon] - - @assert isapprox(jprod(model, x, v), jprod(qnmodel, x, v), rtol=rtol) - @assert neval_jprod(model) == 2 - - @assert isapprox(jtprod(model, x, u), jtprod(qnmodel, x, u), rtol=rtol) - @assert neval_jtprod(model) == 2 - - H = hess_op(qnmodel, x) - @assert typeof(H) <: LinearOperators.AbstractLinearOperator - @assert size(H) == (model.meta.nvar, model.meta.nvar) - @assert isapprox(H * v, hprod(qnmodel, x, v), rtol=rtol) - - g = grad(qnmodel, x) - gp = grad(qnmodel, x - g) - push!(qnmodel, -g, gp - g) # only testing that the call succeeds, not that the update is valid - # the quasi-Newton operator itself is tested in LinearOperators - - reset!(qnmodel) -end - -for problem in ["hs10", "hs11", "hs14", "lincon", "linsv"] - problem_f = eval(Symbol(problem * "_autodiff")) - nlp = problem_f() - @printf("Checking LBFGS formulation of %-8s\t", problem) - qn_model = LBFGSModel(nlp) - check_qn_model(qn_model) - qn_model = LBFGSModel(nlp, mem=2) - check_qn_model(qn_model) - @printf("✓\n") - @printf("Checking LSR1 formulation of %-8s\t", problem) - qn_model = LSR1Model(nlp) - check_qn_model(qn_model) - qn_model = LSR1Model(nlp, mem=2) - check_qn_model(qn_model) - @printf("✓\n") -end - -@testset "objgrad of a qnmodel" begin - struct OnlyObjgradModel <: AbstractNLPModel - meta :: NLPModelMeta - counters :: Counters - end - - function OnlyObjgradModel() - meta = NLPModelMeta(2) - OnlyObjgradModel(meta, Counters()) - end - - function NLPModels.objgrad!(:: OnlyObjgradModel, x :: AbstractVector, g :: AbstractVector) - f = (x[1] - 1)^2 + 100 * (x[2] - x[1]^2)^2 - g[1] = 2 * (x[1] - 1) - 400 * x[1] * (x[2] - x[1]^2) - g[2] = 200 * (x[2] - x[1]^2) - f, g - end - - nlp = LBFGSModel(OnlyObjgradModel()) - - @test objgrad!(nlp, nlp.meta.x0, zeros(2)) == objgrad!(nlp.model, nlp.meta.x0, zeros(2)) - @test objgrad(nlp, nlp.meta.x0) == objgrad(nlp.model, nlp.meta.x0) -end \ No newline at end of file diff --git a/test/test_slack_model.jl b/test/test_slack_model.jl deleted file mode 100644 index 0357c1f3..00000000 --- a/test/test_slack_model.jl +++ /dev/null @@ -1,120 +0,0 @@ -@testset "Slack model tests" begin - # an unconstrained problem should be returned unchanged - @printf("Checking slack formulation of genrose\t") - model = genrose_autodiff() - smodel = SlackModel(model) - @test smodel == model - @printf("✓\n") - - # a bound-constrained problem should be returned unchanged - @printf("Checking slack formulation of hs5\t") - model = hs5_autodiff() - smodel = SlackModel(model) - @test smodel == model - @printf("✓\n") - - # an equality-constrained problem should be returned unchanged - @printf("Checking slack formulation of hs6\t") - model = hs6_autodiff() - smodel = SlackModel(model) - @test smodel == model - @printf("✓\n") - - # test problems that actually have inequality constraints - - function check_slack_model(smodel) - rtol = 1e-8 - model = smodel.model - @test typeof(smodel) == NLPModels.SlackModel - n = model.meta.nvar # number of variables in original model - N = smodel.meta.nvar # number of variables in slack model - jlow = model.meta.jlow; nlow = length(jlow) - jupp = model.meta.jupp; nupp = length(jupp) - jrng = model.meta.jrng; nrng = length(jrng) - jfix = model.meta.jfix; nfix = length(jfix) - - @test N == n + model.meta.ncon - nfix - @test smodel.meta.ncon == model.meta.ncon - - x = [-(-1.0)^i for i = 1:N] - s = x[n+1:N] - y = [-(-1.0)^i for i = 1:smodel.meta.ncon] - - # slack variables do not influence objective value - @test isapprox(obj(model, x[1:n]), obj(smodel, x), rtol=rtol) - @test neval_obj(model) == 2 - - g = grad(model, x[1:n]) - G = grad(smodel, x) - @test isapprox(g, G[1:n], rtol=rtol) - @test all(i -> (i ≈ 0), G[n+1:N]) - @test neval_grad(model) == 2 - - h = hess(model, x[1:n], y) - H = hess(smodel, x, y) - @test isapprox(H[1:n, 1:n], h, rtol=rtol) - @test all(i -> (i ≈ 0), H[1:n, n+1:N]) - @test all(i -> (i ≈ 0), H[n+1:N, 1:n]) - @test all(i -> (i ≈ 0), H[n+1:N, n+1:N]) - @test neval_hess(model) == 2 - - v = [-(-1.0)^i for i = 1:N] - hv = hprod(model, x[1:n], y, v[1:n]) - HV = hprod(smodel, x, y, v) - @test isapprox(HV[1:n], hv, rtol=rtol) - @test all(i -> (i ≈ 0), HV[n+1:N]) - @test neval_hprod(model) == 2 - - c = cons(model, x[1:n]) - C = cons(smodel, x) - - # slack variables do not influence equality constraints - @test all(C[jfix] ≈ c[jfix]) - @test all(C[jlow] ≈ c[jlow] - s[1:nlow]) - @test all(C[jupp] ≈ c[jupp] - s[nlow+1:nlow+nupp]) - @test all(C[jrng] ≈ c[jrng] - s[nlow+nupp+1:nlow+nupp+nrng]) - @test neval_cons(model) == 2 - - j = jac(model, x[1:n]) - J = jac(smodel, x) - K = J[:, n+1:N] - @test all(J[:, 1:n] ≈ j) - k = 1 - for l in collect([jlow ; jupp ; jrng]) - @test J[l, n+k] ≈ -1 - K[l, k] = 0 - k += 1 - end - @test all(i -> (i ≈ 0), K) - @test neval_jac(model) == 2 - - v = [-(-1.0)^i for i = 1:N] - Jv = J * v - @test all(jprod(smodel, x, v) ≈ Jv) - jv = zeros(smodel.meta.ncon) - @test all(jprod!(smodel, x, v, jv) ≈ Jv) - - u = [-(-1.0)^i for i = 1:smodel.meta.ncon] - Jtu = J' * u - @test all(jtprod(smodel, x, u) ≈ Jtu) - jtu = zeros(N) - @test all(jtprod!(smodel, x, u, jtu) ≈ Jtu) - - reset!(smodel) - end - - for problem in ["hs10", "hs11", "hs14", "lincon", "linsv"] - @printf("Checking slack formulation of %-8s\t", problem) - problem_f = eval(Symbol(problem * "_autodiff")) - nlp = problem_f() - slack_model = SlackModel(nlp) - check_slack_model(slack_model) - @printf("✓\n") - end -end - -@testset "Test that type is maintained (#217)" begin - nlp = ADNLPModel(x -> dot(x, x), ones(Float16, 2), x->sum(x), [-1.0], [1.0]) - snlp = SlackModel(nlp) - @test eltype(snlp.meta.x0) == Float16 -end diff --git a/test/test_tools.jl b/test/test_tools.jl deleted file mode 100644 index fa786e45..00000000 --- a/test/test_tools.jl +++ /dev/null @@ -1,55 +0,0 @@ -nlp = ADNLPModel(x->dot(x,x), zeros(2)) -@test !has_bounds(nlp) -@test !bound_constrained(nlp) -@test unconstrained(nlp) -@test !linearly_constrained(nlp) -@test !equality_constrained(nlp) -@test !inequality_constrained(nlp) - -nlp = ADNLPModel(x->dot(x,x), zeros(2), zeros(2), zeros(2)) -@test has_bounds(nlp) -@test bound_constrained(nlp) -@test !unconstrained(nlp) -@test !linearly_constrained(nlp) -@test !equality_constrained(nlp) -@test !inequality_constrained(nlp) - -nlp = ADNLPModel(x->dot(x,x), zeros(2), x->[prod(x)-1], [0.0], [0.0]) -@test !has_bounds(nlp) -@test !bound_constrained(nlp) -@test !unconstrained(nlp) -@test !linearly_constrained(nlp) -@test equality_constrained(nlp) -@test !inequality_constrained(nlp) - -nlp = ADNLPModel(x->dot(x,x), zeros(2), x->[prod(x)-1], [0.0], [1.0]) -@test !has_bounds(nlp) -@test !bound_constrained(nlp) -@test !unconstrained(nlp) -@test !linearly_constrained(nlp) -@test !equality_constrained(nlp) -@test inequality_constrained(nlp) - -nlp = ADNLPModel(x->dot(x,x), zeros(2), x->[prod(x)-1; sum(x)-1], zeros(2), [0.0; Inf]) -@test !has_bounds(nlp) -@test !bound_constrained(nlp) -@test !unconstrained(nlp) -@test !linearly_constrained(nlp) -@test !equality_constrained(nlp) -@test !inequality_constrained(nlp) - -nlp = ADNLPModel(x->dot(x,x), zeros(2), x->[sum(x)-1], zeros(1), zeros(1), lin=[1]) -@test !has_bounds(nlp) -@test !bound_constrained(nlp) -@test !unconstrained(nlp) -@test linearly_constrained(nlp) -@test equality_constrained(nlp) -@test !inequality_constrained(nlp) - -nlp = ADNLPModel(x->dot(x,x), zeros(2), zeros(2), ones(2), x->[sum(x)-1], zeros(1), zeros(1), lin=[1]) -@test has_bounds(nlp) -@test !bound_constrained(nlp) -@test !unconstrained(nlp) -@test linearly_constrained(nlp) -@test equality_constrained(nlp) -@test !inequality_constrained(nlp)