Skip to content

Commit

Permalink
♻️ Refactor multiple precision tests and fix some multiple precision …
Browse files Browse the repository at this point in the history
…issues
  • Loading branch information
abelsiqueira committed Jan 25, 2020
1 parent 69de237 commit ecc7a32
Show file tree
Hide file tree
Showing 8 changed files with 111 additions and 100 deletions.
49 changes: 23 additions & 26 deletions test/multiple-precision.jl
Original file line number Diff line number Diff line change
@@ -1,34 +1,31 @@
function multiple_precision()
@testset "Test multiple precision models" begin
for T = [Float16, Float32, Float64, BigFloat]
nlp = ADNLPModel(x->sum(x.^4), ones(T, 2),
c=x->[x[1]^2 + x[2]^2 - 1; x[1] * x[2]],
lcon=zeros(T, 2), ucon=zeros(T, 2))
x = nlp.meta.x0
@test typeof(obj(nlp, x)) == T
@test eltype(grad(nlp, x)) == T
@test eltype(hess(nlp, x)) == T
@test eltype(hess(nlp, x, ones(T, 2))) == T
@test eltype(hess(nlp, x, ones(T, 2), obj_weight=one(T))) == T
function multiple_precision(nlp :: AbstractNLPModel;
precisions :: Array = [Float16, Float32, Float64, BigFloat])
for T in precisions
x = ones(T, nlp.meta.nvar)
@test typeof(obj(nlp, x)) == T
@test eltype(grad(nlp, x)) == T
@test eltype(hess(nlp, x)) == T
if nlp.meta.ncon > 0
@test eltype(cons(nlp, x)) == T
@test eltype(jac(nlp, x)) == T
@test eltype(hess(nlp, x, ones(T, nlp.meta.ncon))) == T
@test eltype(hess(nlp, x, ones(T, nlp.meta.ncon), obj_weight=one(T))) == T
end
end
end

nls = ADNLSModel(x->[x[1] - 1; exp(x[2]) - x[1]; sin(x[1]) * x[2]], ones(T, 2), 3,
c=x->[x[1]^2 + x[2]^2 - 1; x[1] * x[2]],
lcon=zeros(T, 2), ucon=zeros(T, 2))
x = nlp.meta.x0
@test typeof(obj(nls, x)) == T
@test eltype(grad(nls, x)) == T
@test eltype(hess(nls, x)) == T
@test eltype(hess(nlp, x, ones(T, 2))) == T
@test eltype(hess(nlp, x, ones(T, 2), obj_weight=one(T))) == T
function multiple_precision(nls :: AbstractNLSModel;
precisions :: Array = [Float16, Float32, Float64, BigFloat])
for T in precisions
x = ones(T, nls.meta.nvar)
@test eltype(residual(nls, x)) == T
@test eltype(jac_residual(nls, x)) == T
@test eltype(hess_residual(nls, x, ones(T, nls.nls_meta.nequ))) == T
@test typeof(obj(nls, x)) == T
@test eltype(grad(nls, x)) == T
if nls.meta.ncon > 0
@test eltype(cons(nls, x)) == T
@test eltype(jac(nls, x)) == T
@test eltype(residual(nls, x)) == T
@test eltype(jac_residual(nls, x)) == T
@test eltype(hess_residual(nls, x, ones(T, 3))) == T
end
end
end

multiple_precision()
22 changes: 11 additions & 11 deletions test/problems/brownden.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@ function brownden_autodiff()

x0 = [25.0; 5.0; -5.0; -1.0]
f(x) = begin
s = 0.0
T = eltype(x)
s = zero(T)
for i = 1:20
s += ((x[1] + x[2] * i/5 - exp(i/5))^2 + (x[3] + x[4] * sin(i/5) -
cos(i/5))^2)^2
s += ((x[1] + x[2] * T(i)/5 - exp(T(i)/5))^2 + (x[3] + x[4] * sin(T(i)/5) - cos(T(i)/5))^2)^2
end
return s
end
Expand All @@ -32,9 +32,9 @@ function BROWNDEN()
return BROWNDEN(meta, Counters())
end

function NLPModels.obj(nlp :: BROWNDEN, x :: AbstractVector)
function NLPModels.obj(nlp :: BROWNDEN, x :: AbstractVector{T}) where T
increment!(nlp, :neval_obj)
return sum(((x[1] + x[2] * i/5 - exp(i/5))^2 + (x[3] + x[4] * sin(i/5) - cos(i/5))^2)^2 for i = 1:20)
return sum(((x[1] + x[2] * T(i)/5 - exp(T(i)/5))^2 + (x[3] + x[4] * sin(T(i)/5) - cos(T(i)/5))^2)^2 for i = 1:20)
end

function NLPModels.grad!(nlp :: BROWNDEN, x :: AbstractVector, gx :: AbstractVector)
Expand All @@ -46,22 +46,22 @@ function NLPModels.grad!(nlp :: BROWNDEN, x :: AbstractVector, gx :: AbstractVec
return gx
end

function NLPModels.hess(nlp :: BROWNDEN, x :: AbstractVector; obj_weight=1.0)
function NLPModels.hess(nlp :: BROWNDEN, x :: AbstractVector{T}; obj_weight=1.0) where T
increment!(nlp, :neval_hess)
α(x,i) = x[1] + x[2] * i/5 - exp(i/5)
β(x,i) = x[3] + x[4] * sin(i/5) - cos(i/5)
Hx = zeros(4, 4)
α(x,i) = x[1] + x[2] * T(i)/5 - exp(T(i)/5)
β(x,i) = x[3] + x[4] * sin(T(i)/5) - cos(T(i)/5)
Hx = zeros(T, 4, 4)
if obj_weight == 0
return Hx
end
for i = 1:20
αi, βi = α(x,i), β(x,i)
vi, wi = [1; i/5; 0; 0], [0; 0; 1; sin(i/5)]
vi, wi = T[1; i/5; 0; 0], T[0; 0; 1; sin(i/5)]
zi = αi * vi + βi * wi
θi = αi^2 + βi^2
Hx += (4vi * vi' + 4wi * wi') * θi + 8zi * zi'
end
return obj_weight * tril(Hx)
return T(obj_weight) * tril(Hx)
end

function NLPModels.hess_structure!(nlp :: BROWNDEN, rows :: AbstractVector{Int}, cols :: AbstractVector{Int})
Expand Down
30 changes: 15 additions & 15 deletions test/problems/hs10.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ function hs10_autodiff()

x0 = [-10.0; 10.0]
f(x) = x[1] - x[2]
c(x) = [-3 * x[1]^2 + 2 * x[1] * x[2] - x[2]^2 + 1.0]
c(x) = [-3 * x[1]^2 + 2 * x[1] * x[2] - x[2]^2 + 1]
lcon = [0.0]
ucon = [Inf]

Expand All @@ -29,20 +29,20 @@ function NLPModels.obj(nlp :: HS10, x :: AbstractVector)
return x[1] - x[2]
end

function NLPModels.grad!(nlp :: HS10, x :: AbstractVector, gx :: AbstractVector)
function NLPModels.grad!(nlp :: HS10, x :: AbstractVector{T}, gx :: AbstractVector{T}) where T
increment!(nlp, :neval_grad)
gx .= [1.0; -1.0]
gx .= T[1; -1]
return gx
end

function NLPModels.hess(nlp :: HS10, x :: AbstractVector; obj_weight=1.0)
function NLPModels.hess(nlp :: HS10, x :: AbstractVector{T}; obj_weight=1.0) where T
increment!(nlp, :neval_hess)
return spzeros(2, 2)
return spzeros(T, 2, 2)
end

function NLPModels.hess(nlp :: HS10, x :: AbstractVector, y :: AbstractVector; obj_weight=1.0)
function NLPModels.hess(nlp :: HS10, x :: AbstractVector{T}, y :: AbstractVector{T}; obj_weight=1.0) where T
increment!(nlp, :neval_hess)
return y[1] * [-6.0 0.0; 2.0 -2.0]
return y[1] * T[-6.0 0.0; 2.0 -2.0]
end

function NLPModels.hess_structure!(nlp :: HS10, rows :: AbstractVector{Int}, cols :: AbstractVector{Int})
Expand All @@ -51,33 +51,33 @@ function NLPModels.hess_structure!(nlp :: HS10, rows :: AbstractVector{Int}, col
return rows, cols
end

function NLPModels.hess_coord!(nlp :: HS10, x :: AbstractVector, vals :: AbstractVector; obj_weight=1.0)
function NLPModels.hess_coord!(nlp :: HS10, x :: AbstractVector{T}, vals :: AbstractVector{T}; obj_weight=1.0) where T
increment!(nlp, :neval_hess)
vals .= 0.0
vals .= zero(T)
return vals
end

function NLPModels.hess_coord!(nlp :: HS10, x :: AbstractVector, y :: AbstractVector, vals :: AbstractVector; obj_weight=1.0)
function NLPModels.hess_coord!(nlp :: HS10, x :: AbstractVector{T}, y :: AbstractVector{T}, vals :: AbstractVector{T}; obj_weight=1.0) where T
increment!(nlp, :neval_hess)
vals .= [-6.0, 2.0, -2.0] * y[1]
vals .= T[-6, 2, -2] * y[1]
return vals
end

function NLPModels.hprod!(nlp :: HS10, x :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; obj_weight=1.0)
function NLPModels.hprod!(nlp :: HS10, x :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=1.0) where T
increment!(nlp, :neval_hprod)
fill!(Hv, 0.0)
fill!(Hv, zero(T))
return Hv
end

function NLPModels.hprod!(nlp :: HS10, x :: AbstractVector, y :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; obj_weight=1.0)
increment!(nlp, :neval_hprod)
Hv[1:nlp.meta.nvar] .= y[1] * [-6.0 * v[1] + 2.0 * v[2]; 2.0 * v[1] - 2.0 * v[2]]
Hv[1:nlp.meta.nvar] .= y[1] * [-6 * v[1] + 2 * v[2]; 2 * v[1] - 2 * v[2]]
return Hv
end

function NLPModels.cons!(nlp :: HS10, x :: AbstractVector, cx :: AbstractVector)
increment!(nlp, :neval_cons)
cx .= [-3 * x[1]^2 + 2 * x[1] * x[2] - x[2]^2 + 1.0]
cx .= [-3 * x[1]^2 + 2 * x[1] * x[2] - x[2]^2 + 1]
return cx
end

Expand Down
22 changes: 11 additions & 11 deletions test/problems/hs11.jl
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,14 @@ function NLPModels.grad!(nlp :: HS11, x :: AbstractVector, gx :: AbstractVector)
return gx
end

function NLPModels.hess(nlp :: HS11, x :: AbstractVector; obj_weight=1.0)
function NLPModels.hess(nlp :: HS11, x :: AbstractVector{T}; obj_weight=one(T)) where T
increment!(nlp, :neval_hess)
return [2.0 0; 0 2] * obj_weight
return T[2 0; 0 2] * obj_weight
end

function NLPModels.hess(nlp :: HS11, x :: AbstractVector, y :: AbstractVector; obj_weight=1.0)
function NLPModels.hess(nlp :: HS11, x :: AbstractVector{T}, y :: AbstractVector{T}; obj_weight=one(T)) where T
increment!(nlp, :neval_hess)
return y[1] * [-2.0 0; 0 0] + 2obj_weight*I
return y[1] * T[-2 0; 0 0] + 2obj_weight*I
end

function NLPModels.hess_structure!(nlp :: HS11, rows :: AbstractVector{Int}, cols :: AbstractVector{Int})
Expand All @@ -49,26 +49,26 @@ function NLPModels.hess_structure!(nlp :: HS11, rows :: AbstractVector{Int}, col
return rows, cols
end

function NLPModels.hess_coord!(nlp :: HS11, x :: AbstractVector, vals :: AbstractVector; obj_weight=1.0)
function NLPModels.hess_coord!(nlp :: HS11, x :: AbstractVector{T}, vals :: AbstractVector{T}; obj_weight=one(T)) where T
increment!(nlp, :neval_hess)
vals .= 2obj_weight
return vals
end

function NLPModels.hess_coord!(nlp :: HS11, x :: AbstractVector, y :: AbstractVector, vals :: AbstractVector; obj_weight=1.0)
function NLPModels.hess_coord!(nlp :: HS11, x :: AbstractVector{T}, y :: AbstractVector{T}, vals :: AbstractVector{T}; obj_weight=one(T)) where T
increment!(nlp, :neval_hess)
vals .= 2obj_weight
vals[1] -= 2y[1]
return vals
end

function NLPModels.hprod!(nlp :: HS11, x :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; obj_weight=1.0)
function NLPModels.hprod!(nlp :: HS11, x :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T
increment!(nlp, :neval_hprod)
Hv .= 2obj_weight * v
return Hv
end

function NLPModels.hprod!(nlp :: HS11, x :: AbstractVector, y :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; obj_weight=1.0)
function NLPModels.hprod!(nlp :: HS11, x :: AbstractVector{T}, y :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T
increment!(nlp, :neval_hprod)
Hv .= 2obj_weight * v
Hv[1] -= 2y[1] * v[1]
Expand All @@ -83,7 +83,7 @@ end

function NLPModels.jac(nlp :: HS11, x :: AbstractVector)
increment!(nlp, :neval_jac)
return [-2 * x[1] 1.0]
return [-2 * x[1] 1]
end

function NLPModels.jac_structure!(nlp :: HS11, rows :: AbstractVector{Int}, cols :: AbstractVector{Int})
Expand All @@ -94,7 +94,7 @@ end

function NLPModels.jac_coord!(nlp :: HS11, x :: AbstractVector, vals :: AbstractVector)
increment!(nlp, :neval_jac)
vals .= [-2 * x[1], 1.0]
vals .= [-2 * x[1], 1]
return vals
end

Expand All @@ -106,6 +106,6 @@ end

function NLPModels.jtprod!(nlp :: HS11, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector)
increment!(nlp, :neval_jtprod)
Jtv .= [-2 * x[1]; 1.0] * v[1]
Jtv .= [-2 * x[1]; 1] * v[1]
return Jtv
end
26 changes: 13 additions & 13 deletions test/problems/hs14.jl
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,14 @@ function NLPModels.grad!(nlp :: HS14, x :: AbstractVector, gx :: AbstractVector)
return gx
end

function NLPModels.hess(nlp :: HS14, x :: AbstractVector; obj_weight=1.0)
function NLPModels.hess(nlp :: HS14, x :: AbstractVector{T}; obj_weight=one(T)) where T
increment!(nlp, :neval_hess)
return [2.0 0; 0 2] * obj_weight
return T[2 0; 0 2] * obj_weight
end

function NLPModels.hess(nlp :: HS14, x :: AbstractVector, y :: AbstractVector; obj_weight=1.0)
function NLPModels.hess(nlp :: HS14, x :: AbstractVector{T}, y :: AbstractVector{T}; obj_weight=one(T)) where T
increment!(nlp, :neval_hess)
return y[2] * [-0.5 0.0; 0.0 -2.0] + 2obj_weight * I
return y[2] * T[-0.5 0.0; 0.0 -2.0] + 2obj_weight * I
end

function NLPModels.hess_structure!(nlp :: HS14, rows :: AbstractVector{Int}, cols :: AbstractVector{Int})
Expand All @@ -48,30 +48,30 @@ function NLPModels.hess_structure!(nlp :: HS14, rows :: AbstractVector{Int}, col
return rows, cols
end

function NLPModels.hess_coord!(nlp :: HS14, x :: AbstractVector, vals :: AbstractVector; obj_weight=1.0)
function NLPModels.hess_coord!(nlp :: HS14, x :: AbstractVector{T}, vals :: AbstractVector{T}; obj_weight=one(T)) where T
increment!(nlp, :neval_hess)
vals .= 2obj_weight
return vals
end

function NLPModels.hess_coord!(nlp :: HS14, x :: AbstractVector, y :: AbstractVector, vals :: AbstractVector; obj_weight=1.0)
function NLPModels.hess_coord!(nlp :: HS14, x :: AbstractVector{T}, y :: AbstractVector{T}, vals :: AbstractVector{T}; obj_weight=one(T)) where T
increment!(nlp, :neval_hess)
vals .= 2obj_weight
vals[1] -= 0.5y[2]
vals[2] -= 2.0y[2]
vals[1] -= y[2] / 2
vals[2] -= 2y[2]
return vals
end

function NLPModels.hprod!(nlp :: HS14, x :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; obj_weight=1.0)
function NLPModels.hprod!(nlp :: HS14, x :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T
increment!(nlp, :neval_hprod)
Hv .= 2obj_weight * v
return Hv
end

function NLPModels.hprod!(nlp :: HS14, x :: AbstractVector, y :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; obj_weight=1.0)
function NLPModels.hprod!(nlp :: HS14, x :: AbstractVector{T}, y :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T
increment!(nlp, :neval_hprod)
Hv .= 2obj_weight * v
Hv[1] -= 0.5y[2] * v[1]
Hv[1] -= y[2] * v[1] / 2
Hv[2] -= 2y[2] * v[2]
return Hv
end
Expand All @@ -84,7 +84,7 @@ end

function NLPModels.jac(nlp :: HS14, x :: AbstractVector)
increment!(nlp, :neval_jac)
return [1.0 -2.0; -x[1] / 2 -2 * x[2]]
return [1 -2; -x[1] / 2 -2 * x[2]]
end

function NLPModels.jac_structure!(nlp :: HS14, rows :: AbstractVector{Int}, cols :: AbstractVector{Int})
Expand All @@ -95,7 +95,7 @@ end

function NLPModels.jac_coord!(nlp :: HS14, x :: AbstractVector, vals :: AbstractVector)
increment!(nlp, :neval_jac)
vals .= [1.0, -x[1] / 2, -2.0, -2 * x[2]]
vals .= [1, -x[1] / 2, -2, -2 * x[2]]
return vals
end

Expand Down
16 changes: 8 additions & 8 deletions test/problems/hs5.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ using NLPModels: increment!
function hs5_autodiff()

x0 = [0.0; 0.0]
f(x) = sin(x[1] + x[2]) + (x[1] - x[2])^2 - 1.5 * x[1] + 2.5 * x[2] + 1
f(x) = sin(x[1] + x[2]) + (x[1] - x[2])^2 - 3x[1] / 2 + 5x[2] / 2 + 1
l = [-1.5; -3.0]
u = [4.0; 3.0]

Expand All @@ -24,18 +24,18 @@ end

function NLPModels.obj(nlp :: HS5, x :: AbstractVector)
increment!(nlp, :neval_obj)
return sin(x[1] + x[2]) + (x[1] - x[2])^2 - 1.5 * x[1] + 2.5 * x[2] + 1
return sin(x[1] + x[2]) + (x[1] - x[2])^2 - 3x[1] / 2 + 5x[2] / 2 + 1
end

function NLPModels.grad!(nlp :: HS5, x :: AbstractVector, gx :: AbstractVector)
function NLPModels.grad!(nlp :: HS5, x :: AbstractVector{T}, gx :: AbstractVector{T}) where T
increment!(nlp, :neval_grad)
gx .= cos(x[1] + x[2]) * ones(2) + 2 * (x[1] - x[2]) * [1.0; -1.0] + [-1.5; 2.5]
gx .= cos(x[1] + x[2]) * ones(T, 2) + 2 * (x[1] - x[2]) * T[1; -1] + T[-1.5; 2.5]
return gx
end

function NLPModels.hess(nlp :: HS5, x :: AbstractVector; obj_weight=1.0)
function NLPModels.hess(nlp :: HS5, x :: AbstractVector{T}; obj_weight=one(T)) where T
increment!(nlp, :neval_hess)
return tril(-sin(x[1] + x[2])*ones(2, 2) + [2.0 -2.0; -2.0 2.0]) * obj_weight
return tril(-sin(x[1] + x[2])*ones(T, 2, 2) + T[2 -2; -2 2]) * obj_weight
end

function NLPModels.hess_structure!(nlp :: HS5, rows :: AbstractVector{Int}, cols :: AbstractVector{Int})
Expand All @@ -53,8 +53,8 @@ function NLPModels.hess_coord!(nlp :: HS5, x :: AbstractVector, vals :: Abstract
return vals
end

function NLPModels.hprod!(nlp :: HS5, x :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; obj_weight=1.0)
function NLPModels.hprod!(nlp :: HS5, x :: AbstractVector{T}, v :: AbstractVector{T}, Hv :: AbstractVector{T}; obj_weight=one(T)) where T
increment!(nlp, :neval_hprod)
Hv .= (- sin(x[1] + x[2]) * (v[1] + v[2]) * ones(2) + 2 * [v[1] - v[2]; v[2] - v[1]]) * obj_weight
Hv .= (- sin(x[1] + x[2]) * (v[1] + v[2]) * ones(T, 2) + 2 * [v[1] - v[2]; v[2] - v[1]]) * obj_weight
return Hv
end
Loading

0 comments on commit ecc7a32

Please sign in to comment.