diff --git a/Project.toml b/Project.toml index ffaf7cb0..1a6289f1 100644 --- a/Project.toml +++ b/Project.toml @@ -14,7 +14,7 @@ SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b" [compat] DataFrames = "1" JLD2 = "0.4" -JuMP = "~0.19, ~0.20, ~0.21, ~0.22, 0.23, 1" +JuMP = "^1.15" Requires = "1" SpecialFunctions = "2" julia = "~1" diff --git a/src/ADNLPProblems/allinit.jl b/src/ADNLPProblems/allinit.jl index 62c00512..d2b2469e 100644 --- a/src/ADNLPProblems/allinit.jl +++ b/src/ADNLPProblems/allinit.jl @@ -14,21 +14,27 @@ function allinit(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wh lvar = -T(Inf) * ones(T, 4) uvar = T(Inf) * ones(T, 4) - function c!(cx, x) - cx[1] = x[2] - 1 - cx[2] = x[3] - cx[3] = x[4] - 2 - return cx - end + #function c!(cx, x) + # cx[3] = x[4] + # cx[1] = x[2] + # cx[2] = x[3] + # return cx + #end + A = T[ + 0 0 0 1; + 0 1 0 0; + 0 0 1 0 + ] return ADNLPModels.ADNLPModel!( f, x0, lvar, uvar, - c!, - T[0, -1e+10, 0], - T[Inf, 1, 0], + findnz(sparse(A))..., + (cx, x) -> cx, + T[2, 1, -1e+10], + T[2, Inf, 1], name = "allinit", minimize = true; kwargs..., diff --git a/src/ADNLPProblems/allinitc.jl b/src/ADNLPProblems/allinitc.jl index d48cae5d..c01dd1ef 100644 --- a/src/ADNLPProblems/allinitc.jl +++ b/src/ADNLPProblems/allinitc.jl @@ -15,21 +15,24 @@ function allinitc(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) w uvar = T(Inf) * ones(T, 4) function c!(cx, x) - cx[1] = x[2] - 1 - cx[2] = x[3] - cx[3] = x[4] - 2 - cx[4] = x[1]^2 + x[2]^2 - 1 + cx[1] = x[1]^2 + x[2]^2 return cx end + A = T[ + 0 0 0 1; + 0 1 0 0; + 0 0 1 0 + ] return ADNLPModels.ADNLPModel!( f, x0, lvar, uvar, + sparse(A), c!, - T[0, -1e+10, 0, -Inf], - T[Inf, 1, 0, 0], + T[2, 1, -1e+10, -Inf], + T[2, Inf, 1, 1], name = "allinitc", minimize = true; kwargs..., diff --git a/src/ADNLPProblems/alsotame.jl b/src/ADNLPProblems/alsotame.jl index e4124535..23535916 100644 --- a/src/ADNLPProblems/alsotame.jl +++ b/src/ADNLPProblems/alsotame.jl @@ -12,19 +12,22 @@ function alsotame(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) w function c!(cx, x) x, y = x[1], x[2] cx[1] = sin(-x + y - 1) - cx[2] = x - cx[3] = y return cx end + A = T[ + 1 0 0; + 0 1 0 + ] return ADNLPModels.ADNLPModel!( f, x0, lvar, uvar, + findnz(sparse(A))..., c!, - T[0, -2, -1.5], - T[0, 2, 1.5], + T[-2, -1.5, 0], + T[2, 1.5, 0], name = "alsotame", minimize = true; kwargs..., diff --git a/src/ADNLPProblems/avion2.jl b/src/ADNLPProblems/avion2.jl index dd40d10c..36f218c0 100644 --- a/src/ADNLPProblems/avion2.jl +++ b/src/ADNLPProblems/avion2.jl @@ -179,123 +179,13 @@ function avion2(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwa 1, 1, ] + A = sparse([1, 2, 3, 2, 4, 3, 1, 4, 4, 4, 4, 5, 5, 7, 10, 14, 6, 8, 14, 14, 6, 13, 15, 7, 7, 8, 15, 9, 15, 10, 15, 11, 15, 12, 15, 13, 15, 9, 14, 11, 14, 12, 14], [1, 1, 2, 5, 5, 6, 7, 7, 8, 9, 10, 10, + 19, 20, 20, 20, 22, 22, 22, 23, 24, 26, 31, 33, 34, 35, 35, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 47, 47, 48, 48, 49, 49], T[-0.13, -0.7, -1.0, 1.0, -2.0, 1.0, 1.0, -2.0, -2.0, -1.0, 1.0, -20.0, 1.0, -1.0, -0.043, 0.5, -2.0, -0.137, -1.0, 1.0, 1.0, -300.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -35.0, 660.0, -200.0, 95.0, -120.0, 70.0], 15, 49) function c!(cx, x) - SR, - LR, - PK, - EF, - SX, - LX, - SD, - SK, - ST, - SF, - LF, - AM, - CA, - CB, - SO, - SS, - IMPDER, - IMPK, - IMPFUS, - QI, - PT, - MV, - MC, - MD, - PD, - NS, - VS, - CR, - PM, - DV, - MZ, - VN, - QV, - QF, - IMPTRAIN, - IMPMOT, - IMPNMOT, - IMPPET, - IMPPIL, - IMPCAN, - IMPSNA, - MS, - EL, - DE, - DS, - IMPVOIL, - NM, - NP, - NG = x[1], - x[2], - x[3], - x[4], - x[5], - x[6], - x[7], - x[8], - x[9], - x[10], - x[11], - x[12], - x[13], - x[14], - x[15], - x[16], - x[17], - x[18], - x[19], - x[20], - x[21], - x[22], - x[23], - x[24], - x[25], - x[26], - x[27], - x[28], - x[29], - x[30], - x[31], - x[32], - x[33], - x[34], - x[35], - x[36], - x[37], - x[38], - x[39], - x[40], - x[41], - x[42], - x[43], - x[44], - x[45], - x[46], - x[47], - x[48], - x[49] - cx[1] = SD - (13 // 100) * SR - cx[2] = SX - (7 // 10) * SR - cx[3] = LX - LR - cx[4] = SF - ST - 2 * SD - 2 * SX - 2 * SK - cx[5] = IMPFUS - 20 * SF - cx[6] = MD - 2 * MV - cx[7] = QF - QI - QV - cx[8] = IMPTRAIN - (137 // 1000) * MV - cx[9] = IMPNMOT - 35 * NM - cx[10] = IMPPET - (43 // 1000) * QI - cx[11] = IMPPIL - 200 * NP - cx[12] = IMPCAN - 120 * NG - cx[13] = IMPSNA - 300 * NS - 400 - cx[14] = MC - MV + 95 * NP + 70 * NG + 660 * NM + (1 // 2) * QI - 380 - cx[15] = MZ - IMPTRAIN + IMPNMOT + IMPPET + IMPPIL + IMPCAN + IMPSNA + 290 return cx end - lcon = zeros(T, 15) - ucon = zeros(T, 15) + lcon = vcat(zeros(T, 12), 400, 380, -290) + ucon = vcat(zeros(T, 12), 400, 380, -290) lvar = T[ 10, 0, @@ -399,7 +289,7 @@ function avion2(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwa 2, ] - return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "avion2"; kwargs...) + return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, findnz(A)..., c!, lcon, ucon, name = "avion2"; kwargs...) end function avion2(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} diff --git a/src/ADNLPProblems/booth.jl b/src/ADNLPProblems/booth.jl index b81d8e67..aed920e9 100644 --- a/src/ADNLPProblems/booth.jl +++ b/src/ADNLPProblems/booth.jl @@ -6,17 +6,20 @@ function booth(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher end x0 = zeros(T, 2) + A = T[ + 1 2; + 2 1 + ] function c!(cx, x) - cx[1] = x[1] + 2 * x[2] - 7 - cx[2] = 2 * x[1] + x[2] - 5 return cx end return ADNLPModels.ADNLPModel!( f, x0, + findnz(sparse(A))..., c!, - zeros(T, 2), - zeros(T, 2), + T[7, 5], + T[7, 5], minimize = true, name = "booth"; kwargs..., diff --git a/src/ADNLPProblems/bqp1var.jl b/src/ADNLPProblems/bqp1var.jl index 2c315737..aa06739f 100644 --- a/src/ADNLPProblems/bqp1var.jl +++ b/src/ADNLPProblems/bqp1var.jl @@ -6,7 +6,6 @@ function bqp1var(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wh end x0 = T[0.25,] function c!(cx, x) - cx[1] = x[1] return cx end lcon = T[0.0,] @@ -15,6 +14,7 @@ function bqp1var(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wh return ADNLPModels.ADNLPModel!( f, x0, + findnz(sparse(ones(T, 1, 1)))..., c!, lcon, ucon, diff --git a/src/ADNLPProblems/britgas.jl b/src/ADNLPProblems/britgas.jl index 8ade774a..09dfca50 100644 --- a/src/ADNLPProblems/britgas.jl +++ b/src/ADNLPProblems/britgas.jl @@ -2296,1037 +2296,1037 @@ function britgas(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wh x[448], x[449], x[450] - cx[1] = + cx[25] = p1_1 / ((1) + (1) * p1_1) - p1_0 / ((1) + (1) * p1_0) - T(0.75) * q1_17_1 - T(0.75) * q1_2_1 + in1_1 - T(0.25) * q1_17_0 - T(0.25) * q1_2_0 - cx[2] = + cx[26] = p2_1 / ((1) + (1) * p2_1) - p2_0 / ((1) + (1) * p2_0) - T(0.75) * q2_3_1 + T(0.75) * q1_2_1 - T(0.25) * q2_3_0 + T(0.25) * q1_2_0 - 1 - cx[3] = + cx[27] = p3_1 / ((1) + (1) * p3_1) - p3_0 / ((1) + (1) * p3_0) - f3_4_1 + T(0.75) * q2_3_1 + T(0.25) * q2_3_0 - cx[4] = + cx[28] = p4_1 / ((1) + (1) * p4_1) - p4_0 / ((1) + (1) * p4_0) - T(0.75) * q4_5_1 + f3_4_1 - T(0.25) * q4_5_0 - cx[5] = + cx[29] = p5_1 / ((1) + (1) * p5_1) - p5_0 / ((1) + (1) * p5_0) - T(0.75) * q5_6_1 - f5_7_1 + T(0.75) * q4_5_1 - T(0.25) * q5_6_0 + T(0.25) * q4_5_0 - cx[6] = + cx[30] = p6_1 / ((1) + (1) * p6_1) - p6_0 / ((1) + (1) * p6_0) + T(0.75) * q5_6_1 + T(0.25) * q5_6_0 - 1 - cx[7] = + cx[31] = p7_1 / ((1) + (1) * p7_1) - p7_0 / ((1) + (1) * p7_0) - T(0.75) * q7_8_1 + f5_7_1 - T(0.25) * q7_8_0 - cx[8] = + cx[32] = p8_1 / ((1) + (1) * p8_1) - p8_0 / ((1) + (1) * p8_0) - T(0.75) * q8_9_1 - T(0.75) * q8_10_1 - T(0.75) * q8_11_1 + T(0.75) * q7_8_1 - T(0.25) * q8_9_0 - T(0.25) * q8_10_0 - T(0.25) * q8_11_0 + T(0.25) * q7_8_0 - cx[9] = + cx[33] = p9_1 / ((1) + (1) * p9_1) - p9_0 / ((1) + (1) * p9_0) + T(0.75) * q8_9_1 + T(0.25) * q8_9_0 - cx[10] = + cx[34] = p10_1 / ((1) + (1) * p10_1) - p10_0 / ((1) + (1) * p10_0) + T(0.75) * q8_10_1 + T(0.25) * q8_10_0 - 1 - cx[11] = + cx[35] = p11_1 / ((1) + (1) * p11_1) - p11_0 / ((1) + (1) * p11_0) - T(0.75) * q11_12_1 + T(0.75) * q8_11_1 - T(0.25) * q11_12_0 + T(0.25) * q8_11_0 - cx[12] = + cx[36] = p12_1 / ((1) + (1) * p12_1) - p12_0 / ((1) + (1) * p12_0) - T(0.75) * q12_13_1 + T(0.75) * q11_12_1 - T(0.25) * q12_13_0 + T(0.25) * q11_12_0 - cx[13] = + cx[37] = p13_1 / ((1) + (1) * p13_1) - p13_0 / ((1) + (1) * p13_0) - T(0.75) * q13_14_1 - T(0.75) * q13_15_1 + T(0.75) * q12_13_1 - T(0.25) * q13_14_0 - T(0.25) * q13_15_0 + T(0.25) * q12_13_0 - 1 - cx[14] = + cx[38] = p14_1 / ((1) + (1) * p14_1) - p14_0 / ((1) + (1) * p14_0) + T(0.75) * q13_14_1 + T(0.25) * q13_14_0 - cx[15] = + cx[39] = p15_1 / ((1) + (1) * p15_1) - p15_0 / ((1) + (1) * p15_0) - T(0.75) * q15_16_1 + T(0.75) * q13_15_1 - T(0.25) * q15_16_0 + T(0.25) * q13_15_0 - 1 - cx[16] = + cx[40] = p16_1 / ((1) + (1) * p16_1) - p16_0 / ((1) + (1) * p16_0) + T(0.75) * q15_16_1 + T(0.25) * q15_16_0 - out16_1 - cx[17] = + cx[41] = p17_1 / ((1) + (1) * p17_1) - p17_0 / ((1) + (1) * p17_0) - T(0.75) * q17_18_1 + T(0.75) * q1_17_1 - T(0.25) * q17_18_0 + T(0.25) * q1_17_0 - 1 - cx[18] = + cx[42] = p18_1 / ((1) + (1) * p18_1) - p18_0 / ((1) + (1) * p18_0) - T(0.75) * q18_19_1 + T(0.75) * q17_18_1 - T(0.25) * q18_19_0 + T(0.25) * q17_18_0 - 1 - cx[19] = + cx[43] = p19_1 / ((1) + (1) * p19_1) - p19_0 / ((1) + (1) * p19_0) - f19_20_1 + T(0.75) * q18_19_1 + T(0.25) * q18_19_0 - cx[20] = + cx[44] = p20_1 / ((1) + (1) * p20_1) - p20_0 / ((1) + (1) * p20_0) - T(0.75) * q20_21_1 + f19_20_1 - T(0.25) * q20_21_0 - cx[21] = + cx[45] = p21_1 / ((1) + (1) * p21_1) - p21_0 / ((1) + (1) * p21_0) - T(0.75) * q21_22_1 + T(0.75) * q20_21_1 - T(0.25) * q21_22_0 + T(0.25) * q20_21_0 - 1 - cx[22] = + cx[46] = p22_1 / ((1) + (1) * p22_1) - p22_0 / ((1) + (1) * p22_0) - T(0.75) * q22_23_1 + T(0.75) * q21_22_1 - T(0.25) * q22_23_0 + T(0.25) * q21_22_0 - 1 - cx[23] = + cx[47] = p23_1 / ((1) + (1) * p23_1) - p23_0 / ((1) + (1) * p23_0) + T(0.75) * q22_23_1 + T(0.25) * q22_23_0 - out23_1 - cx[24] = p3_1 * r3_4_1 - p4_1 - cx[25] = p5_1 * r5_7_1 - p7_1 - cx[26] = p19_1 * r19_20_1 - p20_1 - cx[27] = + cx[1] = p3_1 * r3_4_1 - p4_1 + cx[2] = p5_1 * r5_7_1 - p7_1 + cx[3] = p19_1 * r19_20_1 - p20_1 + cx[48] = p1_1 * p1_1 - p2_1 * p2_1 - T(0.01) * ((1) + (T(0.5) * 1) * (p1_1 + p2_1)) * ((abs(q1_2_1))^T(1.8539)) - cx[28] = + cx[49] = p1_1 * p1_1 - p17_1 * p17_1 - T(0.01) * ((1) + (T(0.5) * 1) * (p1_1 + p17_1)) * ((abs(q1_17_1))^T(1.8539)) - cx[29] = + cx[50] = p2_1 * p2_1 - p3_1 * p3_1 - T(0.01) * ((1) + (T(0.5) * 1) * (p2_1 + p3_1)) * ((abs(q2_3_1))^T(1.8539)) - cx[30] = + cx[51] = p4_1 * p4_1 - p5_1 * p5_1 - T(0.01) * ((1) + (T(0.5) * 1) * (p4_1 + p5_1)) * ((abs(q4_5_1))^T(1.8539)) - cx[31] = + cx[52] = p5_1 * p5_1 - p6_1 * p6_1 - T(0.01) * ((1) + (T(0.5) * 1) * (p5_1 + p6_1)) * ((abs(q5_6_1))^T(1.8539)) - cx[32] = + cx[53] = p7_1 * p7_1 - p8_1 * p8_1 - T(0.01) * ((1) + (T(0.5) * 1) * (p7_1 + p8_1)) * ((abs(q7_8_1))^T(1.8539)) - cx[33] = + cx[54] = p8_1 * p8_1 - p9_1 * p9_1 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_1 + p9_1)) * ((abs(q8_9_1))^T(1.8539)) - cx[34] = + cx[55] = p8_1 * p8_1 - p10_1 * p10_1 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_1 + p10_1)) * ((abs(q8_10_1))^T(1.8539)) - cx[35] = + cx[56] = p8_1 * p8_1 - p11_1 * p11_1 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_1 + p11_1)) * ((abs(q8_11_1))^T(1.8539)) - cx[36] = + cx[57] = p11_1 * p11_1 - p12_1 * p12_1 - T(0.01) * ((1) + (T(0.5) * 1) * (p11_1 + p12_1)) * ((abs(q11_12_1))^T(1.8539)) - cx[37] = + cx[58] = p12_1 * p12_1 - p13_1 * p13_1 - T(0.01) * ((1) + (T(0.5) * 1) * (p12_1 + p13_1)) * ((abs(q12_13_1))^T(1.8539)) - cx[38] = + cx[59] = p13_1 * p13_1 - p14_1 * p14_1 - T(0.01) * ((1) + (T(0.5) * 1) * (p13_1 + p14_1)) * ((abs(q13_14_1))^T(1.8539)) - cx[39] = + cx[60] = p13_1 * p13_1 - p15_1 * p15_1 - T(0.01) * ((1) + (T(0.5) * 1) * (p13_1 + p15_1)) * ((abs(q13_15_1))^T(1.8539)) - cx[40] = + cx[61] = p15_1 * p15_1 - p16_1 * p16_1 - T(0.01) * ((1) + (T(0.5) * 1) * (p15_1 + p16_1)) * ((abs(q15_16_1))^T(1.8539)) - cx[41] = + cx[62] = p17_1 * p17_1 - p18_1 * p18_1 - T(0.01) * ((1) + (T(0.5) * 1) * (p17_1 + p18_1)) * ((abs(q17_18_1))^T(1.8539)) - cx[42] = + cx[63] = p18_1 * p18_1 - p19_1 * p19_1 - T(0.01) * ((1) + (T(0.5) * 1) * (p18_1 + p19_1)) * ((abs(q18_19_1))^T(1.8539)) - cx[43] = + cx[64] = p20_1 * p20_1 - p21_1 * p21_1 - T(0.01) * ((1) + (T(0.5) * 1) * (p20_1 + p21_1)) * ((abs(q20_21_1))^T(1.8539)) - cx[44] = + cx[65] = p21_1 * p21_1 - p22_1 * p22_1 - T(0.01) * ((1) + (T(0.5) * 1) * (p21_1 + p22_1)) * ((abs(q21_22_1))^T(1.8539)) - cx[45] = + cx[66] = p22_1 * p22_1 - p23_1 * p23_1 - T(0.01) * ((1) + (T(0.5) * 1) * (p22_1 + p23_1)) * ((abs(q22_23_1))^T(1.8539)) - cx[46] = + cx[67] = p1_2 / ((1) + (1) * p1_2) - p1_1 / ((1) + (1) * p1_1) - T(0.75) * q1_17_2 - T(0.75) * q1_2_2 + in1_2 - T(0.25) * q1_17_1 - T(0.25) * q1_2_1 - cx[47] = + cx[68] = p2_2 / ((1) + (1) * p2_2) - p2_1 / ((1) + (1) * p2_1) - T(0.75) * q2_3_2 + T(0.75) * q1_2_2 - T(0.25) * q2_3_1 + T(0.25) * q1_2_1 - 1 - cx[48] = + cx[69] = p3_2 / ((1) + (1) * p3_2) - p3_1 / ((1) + (1) * p3_1) - f3_4_2 + T(0.75) * q2_3_2 + T(0.25) * q2_3_1 - cx[49] = + cx[70] = p4_2 / ((1) + (1) * p4_2) - p4_1 / ((1) + (1) * p4_1) - T(0.75) * q4_5_2 + f3_4_2 - T(0.25) * q4_5_1 - cx[50] = + cx[71] = p5_2 / ((1) + (1) * p5_2) - p5_1 / ((1) + (1) * p5_1) - T(0.75) * q5_6_2 - f5_7_2 + T(0.75) * q4_5_2 - T(0.25) * q5_6_1 + T(0.25) * q4_5_1 - cx[51] = + cx[72] = p6_2 / ((1) + (1) * p6_2) - p6_1 / ((1) + (1) * p6_1) + T(0.75) * q5_6_2 + T(0.25) * q5_6_1 - 1 - cx[52] = + cx[73] = p7_2 / ((1) + (1) * p7_2) - p7_1 / ((1) + (1) * p7_1) - T(0.75) * q7_8_2 + f5_7_2 - T(0.25) * q7_8_1 - cx[53] = + cx[74] = p8_2 / ((1) + (1) * p8_2) - p8_1 / ((1) + (1) * p8_1) - T(0.75) * q8_9_2 - T(0.75) * q8_10_2 - T(0.75) * q8_11_2 + T(0.75) * q7_8_2 - T(0.25) * q8_9_1 - T(0.25) * q8_10_1 - T(0.25) * q8_11_1 + T(0.25) * q7_8_1 - cx[54] = + cx[75] = p9_2 / ((1) + (1) * p9_2) - p9_1 / ((1) + (1) * p9_1) + T(0.75) * q8_9_2 + T(0.25) * q8_9_1 - cx[55] = + cx[76] = p10_2 / ((1) + (1) * p10_2) - p10_1 / ((1) + (1) * p10_1) + T(0.75) * q8_10_2 + T(0.25) * q8_10_1 - 1 - cx[56] = + cx[77] = p11_2 / ((1) + (1) * p11_2) - p11_1 / ((1) + (1) * p11_1) - T(0.75) * q11_12_2 + T(0.75) * q8_11_2 - T(0.25) * q11_12_1 + T(0.25) * q8_11_1 - cx[57] = + cx[78] = p12_2 / ((1) + (1) * p12_2) - p12_1 / ((1) + (1) * p12_1) - T(0.75) * q12_13_2 + T(0.75) * q11_12_2 - T(0.25) * q12_13_1 + T(0.25) * q11_12_1 - cx[58] = + cx[79] = p13_2 / ((1) + (1) * p13_2) - p13_1 / ((1) + (1) * p13_1) - T(0.75) * q13_14_2 - T(0.75) * q13_15_2 + T(0.75) * q12_13_2 - T(0.25) * q13_14_1 - T(0.25) * q13_15_1 + T(0.25) * q12_13_1 - 1 - cx[59] = + cx[80] = p14_2 / ((1) + (1) * p14_2) - p14_1 / ((1) + (1) * p14_1) + T(0.75) * q13_14_2 + T(0.25) * q13_14_1 - cx[60] = + cx[81] = p15_2 / ((1) + (1) * p15_2) - p15_1 / ((1) + (1) * p15_1) - T(0.75) * q15_16_2 + T(0.75) * q13_15_2 - T(0.25) * q15_16_1 + T(0.25) * q13_15_1 - 1 - cx[61] = + cx[82] = p16_2 / ((1) + (1) * p16_2) - p16_1 / ((1) + (1) * p16_1) + T(0.75) * q15_16_2 + T(0.25) * q15_16_1 - out16_2 - cx[62] = + cx[83] = p17_2 / ((1) + (1) * p17_2) - p17_1 / ((1) + (1) * p17_1) - T(0.75) * q17_18_2 + T(0.75) * q1_17_2 - T(0.25) * q17_18_1 + T(0.25) * q1_17_1 - 1 - cx[63] = + cx[84] = p18_2 / ((1) + (1) * p18_2) - p18_1 / ((1) + (1) * p18_1) - T(0.75) * q18_19_2 + T(0.75) * q17_18_2 - T(0.25) * q18_19_1 + T(0.25) * q17_18_1 - 1 - cx[64] = + cx[85] = p19_2 / ((1) + (1) * p19_2) - p19_1 / ((1) + (1) * p19_1) - f19_20_2 + T(0.75) * q18_19_2 + T(0.25) * q18_19_1 - cx[65] = + cx[86] = p20_2 / ((1) + (1) * p20_2) - p20_1 / ((1) + (1) * p20_1) - T(0.75) * q20_21_2 + f19_20_2 - T(0.25) * q20_21_1 - cx[66] = + cx[87] = p21_2 / ((1) + (1) * p21_2) - p21_1 / ((1) + (1) * p21_1) - T(0.75) * q21_22_2 + T(0.75) * q20_21_2 - T(0.25) * q21_22_1 + T(0.25) * q20_21_1 - 1 - cx[67] = + cx[88] = p22_2 / ((1) + (1) * p22_2) - p22_1 / ((1) + (1) * p22_1) - T(0.75) * q22_23_2 + T(0.75) * q21_22_2 - T(0.25) * q22_23_1 + T(0.25) * q21_22_1 - 1 - cx[68] = + cx[89] = p23_2 / ((1) + (1) * p23_2) - p23_1 / ((1) + (1) * p23_1) + T(0.75) * q22_23_2 + T(0.25) * q22_23_1 - out23_2 - cx[69] = p3_2 * r3_4_2 - p4_2 - cx[70] = p5_2 * r5_7_2 - p7_2 - cx[71] = p19_2 * r19_20_2 - p20_2 - cx[72] = + cx[4] = p3_2 * r3_4_2 - p4_2 + cx[5] = p5_2 * r5_7_2 - p7_2 + cx[6] = p19_2 * r19_20_2 - p20_2 + cx[90] = p1_2 * p1_2 - p2_2 * p2_2 - T(0.01) * ((1) + (T(0.5) * 1) * (p1_2 + p2_2)) * ((abs(q1_2_2))^T(1.8539)) - cx[73] = + cx[91] = p1_2 * p1_2 - p17_2 * p17_2 - T(0.01) * ((1) + (T(0.5) * 1) * (p1_2 + p17_2)) * ((abs(q1_17_2))^T(1.8539)) - cx[74] = + cx[92] = p2_2 * p2_2 - p3_2 * p3_2 - T(0.01) * ((1) + (T(0.5) * 1) * (p2_2 + p3_2)) * ((abs(q2_3_2))^T(1.8539)) - cx[75] = + cx[93] = p4_2 * p4_2 - p5_2 * p5_2 - T(0.01) * ((1) + (T(0.5) * 1) * (p4_2 + p5_2)) * ((abs(q4_5_2))^T(1.8539)) - cx[76] = + cx[94] = p5_2 * p5_2 - p6_2 * p6_2 - T(0.01) * ((1) + (T(0.5) * 1) * (p5_2 + p6_2)) * ((abs(q5_6_2))^T(1.8539)) - cx[77] = + cx[95] = p7_2 * p7_2 - p8_2 * p8_2 - T(0.01) * ((1) + (T(0.5) * 1) * (p7_2 + p8_2)) * ((abs(q7_8_2))^T(1.8539)) - cx[78] = + cx[96] = p8_2 * p8_2 - p9_2 * p9_2 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_2 + p9_2)) * ((abs(q8_9_2))^T(1.8539)) - cx[79] = + cx[97] = p8_2 * p8_2 - p10_2 * p10_2 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_2 + p10_2)) * ((abs(q8_10_2))^T(1.8539)) - cx[80] = + cx[98] = p8_2 * p8_2 - p11_2 * p11_2 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_2 + p11_2)) * ((abs(q8_11_2))^T(1.8539)) - cx[81] = + cx[99] = p11_2 * p11_2 - p12_2 * p12_2 - T(0.01) * ((1) + (T(0.5) * 1) * (p11_2 + p12_2)) * ((abs(q11_12_2))^T(1.8539)) - cx[82] = + cx[100] = p12_2 * p12_2 - p13_2 * p13_2 - T(0.01) * ((1) + (T(0.5) * 1) * (p12_2 + p13_2)) * ((abs(q12_13_2))^T(1.8539)) - cx[83] = + cx[101] = p13_2 * p13_2 - p14_2 * p14_2 - T(0.01) * ((1) + (T(0.5) * 1) * (p13_2 + p14_2)) * ((abs(q13_14_2))^T(1.8539)) - cx[84] = + cx[102] = p13_2 * p13_2 - p15_2 * p15_2 - T(0.01) * ((1) + (T(0.5) * 1) * (p13_2 + p15_2)) * ((abs(q13_15_2))^T(1.8539)) - cx[85] = + cx[103] = p15_2 * p15_2 - p16_2 * p16_2 - T(0.01) * ((1) + (T(0.5) * 1) * (p15_2 + p16_2)) * ((abs(q15_16_2))^T(1.8539)) - cx[86] = + cx[104] = p17_2 * p17_2 - p18_2 * p18_2 - T(0.01) * ((1) + (T(0.5) * 1) * (p17_2 + p18_2)) * ((abs(q17_18_2))^T(1.8539)) - cx[87] = + cx[105] = p18_2 * p18_2 - p19_2 * p19_2 - T(0.01) * ((1) + (T(0.5) * 1) * (p18_2 + p19_2)) * ((abs(q18_19_2))^T(1.8539)) - cx[88] = + cx[106] = p20_2 * p20_2 - p21_2 * p21_2 - T(0.01) * ((1) + (T(0.5) * 1) * (p20_2 + p21_2)) * ((abs(q20_21_2))^T(1.8539)) - cx[89] = + cx[107] = p21_2 * p21_2 - p22_2 * p22_2 - T(0.01) * ((1) + (T(0.5) * 1) * (p21_2 + p22_2)) * ((abs(q21_22_2))^T(1.8539)) - cx[90] = + cx[108] = p22_2 * p22_2 - p23_2 * p23_2 - T(0.01) * ((1) + (T(0.5) * 1) * (p22_2 + p23_2)) * ((abs(q22_23_2))^T(1.8539)) - cx[91] = + cx[109] = p1_3 / ((1) + (1) * p1_3) - p1_2 / ((1) + (1) * p1_2) - T(0.75) * q1_17_3 - T(0.75) * q1_2_3 + in1_3 - T(0.25) * q1_17_2 - T(0.25) * q1_2_2 - cx[92] = + cx[110] = p2_3 / ((1) + (1) * p2_3) - p2_2 / ((1) + (1) * p2_2) - T(0.75) * q2_3_3 + T(0.75) * q1_2_3 - T(0.25) * q2_3_2 + T(0.25) * q1_2_2 - 1 - cx[93] = + cx[111] = p3_3 / ((1) + (1) * p3_3) - p3_2 / ((1) + (1) * p3_2) - f3_4_3 + T(0.75) * q2_3_3 + T(0.25) * q2_3_2 - cx[94] = + cx[112] = p4_3 / ((1) + (1) * p4_3) - p4_2 / ((1) + (1) * p4_2) - T(0.75) * q4_5_3 + f3_4_3 - T(0.25) * q4_5_2 - cx[95] = + cx[113] = p5_3 / ((1) + (1) * p5_3) - p5_2 / ((1) + (1) * p5_2) - T(0.75) * q5_6_3 - f5_7_3 + T(0.75) * q4_5_3 - T(0.25) * q5_6_2 + T(0.25) * q4_5_2 - cx[96] = + cx[114] = p6_3 / ((1) + (1) * p6_3) - p6_2 / ((1) + (1) * p6_2) + T(0.75) * q5_6_3 + T(0.25) * q5_6_2 - 1 - cx[97] = + cx[115] = p7_3 / ((1) + (1) * p7_3) - p7_2 / ((1) + (1) * p7_2) - T(0.75) * q7_8_3 + f5_7_3 - T(0.25) * q7_8_2 - cx[98] = + cx[116] = p8_3 / ((1) + (1) * p8_3) - p8_2 / ((1) + (1) * p8_2) - T(0.75) * q8_9_3 - T(0.75) * q8_10_3 - T(0.75) * q8_11_3 + T(0.75) * q7_8_3 - T(0.25) * q8_9_2 - T(0.25) * q8_10_2 - T(0.25) * q8_11_2 + T(0.25) * q7_8_2 - cx[99] = + cx[117] = p9_3 / ((1) + (1) * p9_3) - p9_2 / ((1) + (1) * p9_2) + T(0.75) * q8_9_3 + T(0.25) * q8_9_2 - cx[100] = + cx[118] = p10_3 / ((1) + (1) * p10_3) - p10_2 / ((1) + (1) * p10_2) + T(0.75) * q8_10_3 + T(0.25) * q8_10_2 - 1 - cx[101] = + cx[119] = p11_3 / ((1) + (1) * p11_3) - p11_2 / ((1) + (1) * p11_2) - T(0.75) * q11_12_3 + T(0.75) * q8_11_3 - T(0.25) * q11_12_2 + T(0.25) * q8_11_2 - cx[102] = + cx[120] = p12_3 / ((1) + (1) * p12_3) - p12_2 / ((1) + (1) * p12_2) - T(0.75) * q12_13_3 + T(0.75) * q11_12_3 - T(0.25) * q12_13_2 + T(0.25) * q11_12_2 - cx[103] = + cx[121] = p13_3 / ((1) + (1) * p13_3) - p13_2 / ((1) + (1) * p13_2) - T(0.75) * q13_14_3 - T(0.75) * q13_15_3 + T(0.75) * q12_13_3 - T(0.25) * q13_14_2 - T(0.25) * q13_15_2 + T(0.25) * q12_13_2 - 1 - cx[104] = + cx[122] = p14_3 / ((1) + (1) * p14_3) - p14_2 / ((1) + (1) * p14_2) + T(0.75) * q13_14_3 + T(0.25) * q13_14_2 - cx[105] = + cx[123] = p15_3 / ((1) + (1) * p15_3) - p15_2 / ((1) + (1) * p15_2) - T(0.75) * q15_16_3 + T(0.75) * q13_15_3 - T(0.25) * q15_16_2 + T(0.25) * q13_15_2 - 1 - cx[106] = + cx[124] = p16_3 / ((1) + (1) * p16_3) - p16_2 / ((1) + (1) * p16_2) + T(0.75) * q15_16_3 + T(0.25) * q15_16_2 - out16_3 - cx[107] = + cx[125] = p17_3 / ((1) + (1) * p17_3) - p17_2 / ((1) + (1) * p17_2) - T(0.75) * q17_18_3 + T(0.75) * q1_17_3 - T(0.25) * q17_18_2 + T(0.25) * q1_17_2 - 1 - cx[108] = + cx[126] = p18_3 / ((1) + (1) * p18_3) - p18_2 / ((1) + (1) * p18_2) - T(0.75) * q18_19_3 + T(0.75) * q17_18_3 - T(0.25) * q18_19_2 + T(0.25) * q17_18_2 - 1 - cx[109] = + cx[127] = p19_3 / ((1) + (1) * p19_3) - p19_2 / ((1) + (1) * p19_2) - f19_20_3 + T(0.75) * q18_19_3 + T(0.25) * q18_19_2 - cx[110] = + cx[128] = p20_3 / ((1) + (1) * p20_3) - p20_2 / ((1) + (1) * p20_2) - T(0.75) * q20_21_3 + f19_20_3 - T(0.25) * q20_21_2 - cx[111] = + cx[129] = p21_3 / ((1) + (1) * p21_3) - p21_2 / ((1) + (1) * p21_2) - T(0.75) * q21_22_3 + T(0.75) * q20_21_3 - T(0.25) * q21_22_2 + T(0.25) * q20_21_2 - 1 - cx[112] = + cx[130] = p22_3 / ((1) + (1) * p22_3) - p22_2 / ((1) + (1) * p22_2) - T(0.75) * q22_23_3 + T(0.75) * q21_22_3 - T(0.25) * q22_23_2 + T(0.25) * q21_22_2 - 1 - cx[113] = + cx[131] = p23_3 / ((1) + (1) * p23_3) - p23_2 / ((1) + (1) * p23_2) + T(0.75) * q22_23_3 + T(0.25) * q22_23_2 - out23_3 - cx[114] = p3_3 * r3_4_3 - p4_3 - cx[115] = p5_3 * r5_7_3 - p7_3 - cx[116] = p19_3 * r19_20_3 - p20_3 - cx[117] = + cx[7] = p3_3 * r3_4_3 - p4_3 + cx[8] = p5_3 * r5_7_3 - p7_3 + cx[9] = p19_3 * r19_20_3 - p20_3 + cx[132] = p1_3 * p1_3 - p2_3 * p2_3 - T(0.01) * ((1) + (T(0.5) * 1) * (p1_3 + p2_3)) * ((abs(q1_2_3))^T(1.8539)) - cx[118] = + cx[133] = p1_3 * p1_3 - p17_3 * p17_3 - T(0.01) * ((1) + (T(0.5) * 1) * (p1_3 + p17_3)) * ((abs(q1_17_3))^T(1.8539)) - cx[119] = + cx[134] = p2_3 * p2_3 - p3_3 * p3_3 - T(0.01) * ((1) + (T(0.5) * 1) * (p2_3 + p3_3)) * ((abs(q2_3_3))^T(1.8539)) - cx[120] = + cx[135] = p4_3 * p4_3 - p5_3 * p5_3 - T(0.01) * ((1) + (T(0.5) * 1) * (p4_3 + p5_3)) * ((abs(q4_5_3))^T(1.8539)) - cx[121] = + cx[136] = p5_3 * p5_3 - p6_3 * p6_3 - T(0.01) * ((1) + (T(0.5) * 1) * (p5_3 + p6_3)) * ((abs(q5_6_3))^T(1.8539)) - cx[122] = + cx[137] = p7_3 * p7_3 - p8_3 * p8_3 - T(0.01) * ((1) + (T(0.5) * 1) * (p7_3 + p8_3)) * ((abs(q7_8_3))^T(1.8539)) - cx[123] = + cx[138] = p8_3 * p8_3 - p9_3 * p9_3 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_3 + p9_3)) * ((abs(q8_9_3))^T(1.8539)) - cx[124] = + cx[139] = p8_3 * p8_3 - p10_3 * p10_3 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_3 + p10_3)) * ((abs(q8_10_3))^T(1.8539)) - cx[125] = + cx[140] = p8_3 * p8_3 - p11_3 * p11_3 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_3 + p11_3)) * ((abs(q8_11_3))^T(1.8539)) - cx[126] = + cx[141] = p11_3 * p11_3 - p12_3 * p12_3 - T(0.01) * ((1) + (T(0.5) * 1) * (p11_3 + p12_3)) * ((abs(q11_12_3))^T(1.8539)) - cx[127] = + cx[142] = p12_3 * p12_3 - p13_3 * p13_3 - T(0.01) * ((1) + (T(0.5) * 1) * (p12_3 + p13_3)) * ((abs(q12_13_3))^T(1.8539)) - cx[128] = + cx[143] = p13_3 * p13_3 - p14_3 * p14_3 - T(0.01) * ((1) + (T(0.5) * 1) * (p13_3 + p14_3)) * ((abs(q13_14_3))^T(1.8539)) - cx[129] = + cx[144] = p13_3 * p13_3 - p15_3 * p15_3 - T(0.01) * ((1) + (T(0.5) * 1) * (p13_3 + p15_3)) * ((abs(q13_15_3))^T(1.8539)) - cx[130] = + cx[145] = p15_3 * p15_3 - p16_3 * p16_3 - T(0.01) * ((1) + (T(0.5) * 1) * (p15_3 + p16_3)) * ((abs(q15_16_3))^T(1.8539)) - cx[131] = + cx[146] = p17_3 * p17_3 - p18_3 * p18_3 - T(0.01) * ((1) + (T(0.5) * 1) * (p17_3 + p18_3)) * ((abs(q17_18_3))^T(1.8539)) - cx[132] = + cx[147] = p18_3 * p18_3 - p19_3 * p19_3 - T(0.01) * ((1) + (T(0.5) * 1) * (p18_3 + p19_3)) * ((abs(q18_19_3))^T(1.8539)) - cx[133] = + cx[148] = p20_3 * p20_3 - p21_3 * p21_3 - T(0.01) * ((1) + (T(0.5) * 1) * (p20_3 + p21_3)) * ((abs(q20_21_3))^T(1.8539)) - cx[134] = + cx[149] = p21_3 * p21_3 - p22_3 * p22_3 - T(0.01) * ((1) + (T(0.5) * 1) * (p21_3 + p22_3)) * ((abs(q21_22_3))^T(1.8539)) - cx[135] = + cx[150] = p22_3 * p22_3 - p23_3 * p23_3 - T(0.01) * ((1) + (T(0.5) * 1) * (p22_3 + p23_3)) * ((abs(q22_23_3))^T(1.8539)) - cx[136] = + cx[151] = p1_4 / ((1) + (1) * p1_4) - p1_3 / ((1) + (1) * p1_3) - T(0.75) * q1_17_4 - T(0.75) * q1_2_4 + in1_4 - T(0.25) * q1_17_3 - T(0.25) * q1_2_3 - cx[137] = + cx[152] = p2_4 / ((1) + (1) * p2_4) - p2_3 / ((1) + (1) * p2_3) - T(0.75) * q2_3_4 + T(0.75) * q1_2_4 - T(0.25) * q2_3_3 + T(0.25) * q1_2_3 - 1 - cx[138] = + cx[153] = p3_4 / ((1) + (1) * p3_4) - p3_3 / ((1) + (1) * p3_3) - f3_4_4 + T(0.75) * q2_3_4 + T(0.25) * q2_3_3 - cx[139] = + cx[154] = p4_4 / ((1) + (1) * p4_4) - p4_3 / ((1) + (1) * p4_3) - T(0.75) * q4_5_4 + f3_4_4 - T(0.25) * q4_5_3 - cx[140] = + cx[155] = p5_4 / ((1) + (1) * p5_4) - p5_3 / ((1) + (1) * p5_3) - T(0.75) * q5_6_4 - f5_7_4 + T(0.75) * q4_5_4 - T(0.25) * q5_6_3 + T(0.25) * q4_5_3 - cx[141] = + cx[156] = p6_4 / ((1) + (1) * p6_4) - p6_3 / ((1) + (1) * p6_3) + T(0.75) * q5_6_4 + T(0.25) * q5_6_3 - 1 - cx[142] = + cx[157] = p7_4 / ((1) + (1) * p7_4) - p7_3 / ((1) + (1) * p7_3) - T(0.75) * q7_8_4 + f5_7_4 - T(0.25) * q7_8_3 - cx[143] = + cx[158] = p8_4 / ((1) + (1) * p8_4) - p8_3 / ((1) + (1) * p8_3) - T(0.75) * q8_9_4 - T(0.75) * q8_10_4 - T(0.75) * q8_11_4 + T(0.75) * q7_8_4 - T(0.25) * q8_9_3 - T(0.25) * q8_10_3 - T(0.25) * q8_11_3 + T(0.25) * q7_8_3 - cx[144] = + cx[159] = p9_4 / ((1) + (1) * p9_4) - p9_3 / ((1) + (1) * p9_3) + T(0.75) * q8_9_4 + T(0.25) * q8_9_3 - cx[145] = + cx[160] = p10_4 / ((1) + (1) * p10_4) - p10_3 / ((1) + (1) * p10_3) + T(0.75) * q8_10_4 + T(0.25) * q8_10_3 - 1 - cx[146] = + cx[161] = p11_4 / ((1) + (1) * p11_4) - p11_3 / ((1) + (1) * p11_3) - T(0.75) * q11_12_4 + T(0.75) * q8_11_4 - T(0.25) * q11_12_3 + T(0.25) * q8_11_3 - cx[147] = + cx[162] = p12_4 / ((1) + (1) * p12_4) - p12_3 / ((1) + (1) * p12_3) - T(0.75) * q12_13_4 + T(0.75) * q11_12_4 - T(0.25) * q12_13_3 + T(0.25) * q11_12_3 - cx[148] = + cx[163] = p13_4 / ((1) + (1) * p13_4) - p13_3 / ((1) + (1) * p13_3) - T(0.75) * q13_14_4 - T(0.75) * q13_15_4 + T(0.75) * q12_13_4 - T(0.25) * q13_14_3 - T(0.25) * q13_15_3 + T(0.25) * q12_13_3 - 1 - cx[149] = + cx[164] = p14_4 / ((1) + (1) * p14_4) - p14_3 / ((1) + (1) * p14_3) + T(0.75) * q13_14_4 + T(0.25) * q13_14_3 - cx[150] = + cx[165] = p15_4 / ((1) + (1) * p15_4) - p15_3 / ((1) + (1) * p15_3) - T(0.75) * q15_16_4 + T(0.75) * q13_15_4 - T(0.25) * q15_16_3 + T(0.25) * q13_15_3 - 1 - cx[151] = + cx[166] = p16_4 / ((1) + (1) * p16_4) - p16_3 / ((1) + (1) * p16_3) + T(0.75) * q15_16_4 + T(0.25) * q15_16_3 - out16_4 - cx[152] = + cx[167] = p17_4 / ((1) + (1) * p17_4) - p17_3 / ((1) + (1) * p17_3) - T(0.75) * q17_18_4 + T(0.75) * q1_17_4 - T(0.25) * q17_18_3 + T(0.25) * q1_17_3 - 1 - cx[153] = + cx[168] = p18_4 / ((1) + (1) * p18_4) - p18_3 / ((1) + (1) * p18_3) - T(0.75) * q18_19_4 + T(0.75) * q17_18_4 - T(0.25) * q18_19_3 + T(0.25) * q17_18_3 - 1 - cx[154] = + cx[169] = p19_4 / ((1) + (1) * p19_4) - p19_3 / ((1) + (1) * p19_3) - f19_20_4 + T(0.75) * q18_19_4 + T(0.25) * q18_19_3 - cx[155] = + cx[170] = p20_4 / ((1) + (1) * p20_4) - p20_3 / ((1) + (1) * p20_3) - T(0.75) * q20_21_4 + f19_20_4 - T(0.25) * q20_21_3 - cx[156] = + cx[171] = p21_4 / ((1) + (1) * p21_4) - p21_3 / ((1) + (1) * p21_3) - T(0.75) * q21_22_4 + T(0.75) * q20_21_4 - T(0.25) * q21_22_3 + T(0.25) * q20_21_3 - 1 - cx[157] = + cx[172] = p22_4 / ((1) + (1) * p22_4) - p22_3 / ((1) + (1) * p22_3) - T(0.75) * q22_23_4 + T(0.75) * q21_22_4 - T(0.25) * q22_23_3 + T(0.25) * q21_22_3 - 1 - cx[158] = + cx[173] = p23_4 / ((1) + (1) * p23_4) - p23_3 / ((1) + (1) * p23_3) + T(0.75) * q22_23_4 + T(0.25) * q22_23_3 - out23_4 - cx[159] = p3_4 * r3_4_4 - p4_4 - cx[160] = p5_4 * r5_7_4 - p7_4 - cx[161] = p19_4 * r19_20_4 - p20_4 - cx[162] = + cx[10] = p3_4 * r3_4_4 - p4_4 + cx[11] = p5_4 * r5_7_4 - p7_4 + cx[12] = p19_4 * r19_20_4 - p20_4 + cx[174] = p1_4 * p1_4 - p2_4 * p2_4 - T(0.01) * ((1) + (T(0.5) * 1) * (p1_4 + p2_4)) * ((abs(q1_2_4))^T(1.8539)) - cx[163] = + cx[175] = p1_4 * p1_4 - p17_4 * p17_4 - T(0.01) * ((1) + (T(0.5) * 1) * (p1_4 + p17_4)) * ((abs(q1_17_4))^T(1.8539)) - cx[164] = + cx[176] = p2_4 * p2_4 - p3_4 * p3_4 - T(0.01) * ((1) + (T(0.5) * 1) * (p2_4 + p3_4)) * ((abs(q2_3_4))^T(1.8539)) - cx[165] = + cx[177] = p4_4 * p4_4 - p5_4 * p5_4 - T(0.01) * ((1) + (T(0.5) * 1) * (p4_4 + p5_4)) * ((abs(q4_5_4))^T(1.8539)) - cx[166] = + cx[178] = p5_4 * p5_4 - p6_4 * p6_4 - T(0.01) * ((1) + (T(0.5) * 1) * (p5_4 + p6_4)) * ((abs(q5_6_4))^T(1.8539)) - cx[167] = + cx[179] = p7_4 * p7_4 - p8_4 * p8_4 - T(0.01) * ((1) + (T(0.5) * 1) * (p7_4 + p8_4)) * ((abs(q7_8_4))^T(1.8539)) - cx[168] = + cx[180] = p8_4 * p8_4 - p9_4 * p9_4 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_4 + p9_4)) * ((abs(q8_9_4))^T(1.8539)) - cx[169] = + cx[181] = p8_4 * p8_4 - p10_4 * p10_4 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_4 + p10_4)) * ((abs(q8_10_4))^T(1.8539)) - cx[170] = + cx[182] = p8_4 * p8_4 - p11_4 * p11_4 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_4 + p11_4)) * ((abs(q8_11_4))^T(1.8539)) - cx[171] = + cx[183] = p11_4 * p11_4 - p12_4 * p12_4 - T(0.01) * ((1) + (T(0.5) * 1) * (p11_4 + p12_4)) * ((abs(q11_12_4))^T(1.8539)) - cx[172] = + cx[184] = p12_4 * p12_4 - p13_4 * p13_4 - T(0.01) * ((1) + (T(0.5) * 1) * (p12_4 + p13_4)) * ((abs(q12_13_4))^T(1.8539)) - cx[173] = + cx[185] = p13_4 * p13_4 - p14_4 * p14_4 - T(0.01) * ((1) + (T(0.5) * 1) * (p13_4 + p14_4)) * ((abs(q13_14_4))^T(1.8539)) - cx[174] = + cx[186] = p13_4 * p13_4 - p15_4 * p15_4 - T(0.01) * ((1) + (T(0.5) * 1) * (p13_4 + p15_4)) * ((abs(q13_15_4))^T(1.8539)) - cx[175] = + cx[187] = p15_4 * p15_4 - p16_4 * p16_4 - T(0.01) * ((1) + (T(0.5) * 1) * (p15_4 + p16_4)) * ((abs(q15_16_4))^T(1.8539)) - cx[176] = + cx[188] = p17_4 * p17_4 - p18_4 * p18_4 - T(0.01) * ((1) + (T(0.5) * 1) * (p17_4 + p18_4)) * ((abs(q17_18_4))^T(1.8539)) - cx[177] = + cx[189] = p18_4 * p18_4 - p19_4 * p19_4 - T(0.01) * ((1) + (T(0.5) * 1) * (p18_4 + p19_4)) * ((abs(q18_19_4))^T(1.8539)) - cx[178] = + cx[190] = p20_4 * p20_4 - p21_4 * p21_4 - T(0.01) * ((1) + (T(0.5) * 1) * (p20_4 + p21_4)) * ((abs(q20_21_4))^T(1.8539)) - cx[179] = + cx[191] = p21_4 * p21_4 - p22_4 * p22_4 - T(0.01) * ((1) + (T(0.5) * 1) * (p21_4 + p22_4)) * ((abs(q21_22_4))^T(1.8539)) - cx[180] = + cx[192] = p22_4 * p22_4 - p23_4 * p23_4 - T(0.01) * ((1) + (T(0.5) * 1) * (p22_4 + p23_4)) * ((abs(q22_23_4))^T(1.8539)) - cx[181] = + cx[193] = p1_5 / ((1) + (1) * p1_5) - p1_4 / ((1) + (1) * p1_4) - T(0.75) * q1_17_5 - T(0.75) * q1_2_5 + in1_5 - T(0.25) * q1_17_4 - T(0.25) * q1_2_4 - cx[182] = + cx[194] = p2_5 / ((1) + (1) * p2_5) - p2_4 / ((1) + (1) * p2_4) - T(0.75) * q2_3_5 + T(0.75) * q1_2_5 - T(0.25) * q2_3_4 + T(0.25) * q1_2_4 - 1 - cx[183] = + cx[195] = p3_5 / ((1) + (1) * p3_5) - p3_4 / ((1) + (1) * p3_4) - f3_4_5 + T(0.75) * q2_3_5 + T(0.25) * q2_3_4 - cx[184] = + cx[196] = p4_5 / ((1) + (1) * p4_5) - p4_4 / ((1) + (1) * p4_4) - T(0.75) * q4_5_5 + f3_4_5 - T(0.25) * q4_5_4 - cx[185] = + cx[197] = p5_5 / ((1) + (1) * p5_5) - p5_4 / ((1) + (1) * p5_4) - T(0.75) * q5_6_5 - f5_7_5 + T(0.75) * q4_5_5 - T(0.25) * q5_6_4 + T(0.25) * q4_5_4 - cx[186] = + cx[198] = p6_5 / ((1) + (1) * p6_5) - p6_4 / ((1) + (1) * p6_4) + T(0.75) * q5_6_5 + T(0.25) * q5_6_4 - 1 - cx[187] = + cx[199] = p7_5 / ((1) + (1) * p7_5) - p7_4 / ((1) + (1) * p7_4) - T(0.75) * q7_8_5 + f5_7_5 - T(0.25) * q7_8_4 - cx[188] = + cx[200] = p8_5 / ((1) + (1) * p8_5) - p8_4 / ((1) + (1) * p8_4) - T(0.75) * q8_9_5 - T(0.75) * q8_10_5 - T(0.75) * q8_11_5 + T(0.75) * q7_8_5 - T(0.25) * q8_9_4 - T(0.25) * q8_10_4 - T(0.25) * q8_11_4 + T(0.25) * q7_8_4 - cx[189] = + cx[201] = p9_5 / ((1) + (1) * p9_5) - p9_4 / ((1) + (1) * p9_4) + T(0.75) * q8_9_5 + T(0.25) * q8_9_4 - cx[190] = + cx[202] = p10_5 / ((1) + (1) * p10_5) - p10_4 / ((1) + (1) * p10_4) + T(0.75) * q8_10_5 + T(0.25) * q8_10_4 - 1 - cx[191] = + cx[203] = p11_5 / ((1) + (1) * p11_5) - p11_4 / ((1) + (1) * p11_4) - T(0.75) * q11_12_5 + T(0.75) * q8_11_5 - T(0.25) * q11_12_4 + T(0.25) * q8_11_4 - cx[192] = + cx[204] = p12_5 / ((1) + (1) * p12_5) - p12_4 / ((1) + (1) * p12_4) - T(0.75) * q12_13_5 + T(0.75) * q11_12_5 - T(0.25) * q12_13_4 + T(0.25) * q11_12_4 - cx[193] = + cx[205] = p13_5 / ((1) + (1) * p13_5) - p13_4 / ((1) + (1) * p13_4) - T(0.75) * q13_14_5 - T(0.75) * q13_15_5 + T(0.75) * q12_13_5 - T(0.25) * q13_14_4 - T(0.25) * q13_15_4 + T(0.25) * q12_13_4 - 1 - cx[194] = + cx[206] = p14_5 / ((1) + (1) * p14_5) - p14_4 / ((1) + (1) * p14_4) + T(0.75) * q13_14_5 + T(0.25) * q13_14_4 - cx[195] = + cx[207] = p15_5 / ((1) + (1) * p15_5) - p15_4 / ((1) + (1) * p15_4) - T(0.75) * q15_16_5 + T(0.75) * q13_15_5 - T(0.25) * q15_16_4 + T(0.25) * q13_15_4 - 1 - cx[196] = + cx[208] = p16_5 / ((1) + (1) * p16_5) - p16_4 / ((1) + (1) * p16_4) + T(0.75) * q15_16_5 + T(0.25) * q15_16_4 - out16_5 - cx[197] = + cx[209] = p17_5 / ((1) + (1) * p17_5) - p17_4 / ((1) + (1) * p17_4) - T(0.75) * q17_18_5 + T(0.75) * q1_17_5 - T(0.25) * q17_18_4 + T(0.25) * q1_17_4 - 1 - cx[198] = + cx[210] = p18_5 / ((1) + (1) * p18_5) - p18_4 / ((1) + (1) * p18_4) - T(0.75) * q18_19_5 + T(0.75) * q17_18_5 - T(0.25) * q18_19_4 + T(0.25) * q17_18_4 - 1 - cx[199] = + cx[211] = p19_5 / ((1) + (1) * p19_5) - p19_4 / ((1) + (1) * p19_4) - f19_20_5 + T(0.75) * q18_19_5 + T(0.25) * q18_19_4 - cx[200] = + cx[212] = p20_5 / ((1) + (1) * p20_5) - p20_4 / ((1) + (1) * p20_4) - T(0.75) * q20_21_5 + f19_20_5 - T(0.25) * q20_21_4 - cx[201] = + cx[213] = p21_5 / ((1) + (1) * p21_5) - p21_4 / ((1) + (1) * p21_4) - T(0.75) * q21_22_5 + T(0.75) * q20_21_5 - T(0.25) * q21_22_4 + T(0.25) * q20_21_4 - 1 - cx[202] = + cx[214] = p22_5 / ((1) + (1) * p22_5) - p22_4 / ((1) + (1) * p22_4) - T(0.75) * q22_23_5 + T(0.75) * q21_22_5 - T(0.25) * q22_23_4 + T(0.25) * q21_22_4 - 1 - cx[203] = + cx[215] = p23_5 / ((1) + (1) * p23_5) - p23_4 / ((1) + (1) * p23_4) + T(0.75) * q22_23_5 + T(0.25) * q22_23_4 - out23_5 - cx[204] = p3_5 * r3_4_5 - p4_5 - cx[205] = p5_5 * r5_7_5 - p7_5 - cx[206] = p19_5 * r19_20_5 - p20_5 - cx[207] = + cx[13] = p3_5 * r3_4_5 - p4_5 + cx[14] = p5_5 * r5_7_5 - p7_5 + cx[15] = p19_5 * r19_20_5 - p20_5 + cx[216] = p1_5 * p1_5 - p2_5 * p2_5 - T(0.01) * ((1) + (T(0.5) * 1) * (p1_5 + p2_5)) * ((abs(q1_2_5))^T(1.8539)) - cx[208] = + cx[217] = p1_5 * p1_5 - p17_5 * p17_5 - T(0.01) * ((1) + (T(0.5) * 1) * (p1_5 + p17_5)) * ((abs(q1_17_5))^T(1.8539)) - cx[209] = + cx[218] = p2_5 * p2_5 - p3_5 * p3_5 - T(0.01) * ((1) + (T(0.5) * 1) * (p2_5 + p3_5)) * ((abs(q2_3_5))^T(1.8539)) - cx[210] = + cx[219] = p4_5 * p4_5 - p5_5 * p5_5 - T(0.01) * ((1) + (T(0.5) * 1) * (p4_5 + p5_5)) * ((abs(q4_5_5))^T(1.8539)) - cx[211] = + cx[220] = p5_5 * p5_5 - p6_5 * p6_5 - T(0.01) * ((1) + (T(0.5) * 1) * (p5_5 + p6_5)) * ((abs(q5_6_5))^T(1.8539)) - cx[212] = + cx[221] = p7_5 * p7_5 - p8_5 * p8_5 - T(0.01) * ((1) + (T(0.5) * 1) * (p7_5 + p8_5)) * ((abs(q7_8_5))^T(1.8539)) - cx[213] = + cx[222] = p8_5 * p8_5 - p9_5 * p9_5 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_5 + p9_5)) * ((abs(q8_9_5))^T(1.8539)) - cx[214] = + cx[223] = p8_5 * p8_5 - p10_5 * p10_5 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_5 + p10_5)) * ((abs(q8_10_5))^T(1.8539)) - cx[215] = + cx[224] = p8_5 * p8_5 - p11_5 * p11_5 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_5 + p11_5)) * ((abs(q8_11_5))^T(1.8539)) - cx[216] = + cx[225] = p11_5 * p11_5 - p12_5 * p12_5 - T(0.01) * ((1) + (T(0.5) * 1) * (p11_5 + p12_5)) * ((abs(q11_12_5))^T(1.8539)) - cx[217] = + cx[226] = p12_5 * p12_5 - p13_5 * p13_5 - T(0.01) * ((1) + (T(0.5) * 1) * (p12_5 + p13_5)) * ((abs(q12_13_5))^T(1.8539)) - cx[218] = + cx[227] = p13_5 * p13_5 - p14_5 * p14_5 - T(0.01) * ((1) + (T(0.5) * 1) * (p13_5 + p14_5)) * ((abs(q13_14_5))^T(1.8539)) - cx[219] = + cx[228] = p13_5 * p13_5 - p15_5 * p15_5 - T(0.01) * ((1) + (T(0.5) * 1) * (p13_5 + p15_5)) * ((abs(q13_15_5))^T(1.8539)) - cx[220] = + cx[229] = p15_5 * p15_5 - p16_5 * p16_5 - T(0.01) * ((1) + (T(0.5) * 1) * (p15_5 + p16_5)) * ((abs(q15_16_5))^T(1.8539)) - cx[221] = + cx[230] = p17_5 * p17_5 - p18_5 * p18_5 - T(0.01) * ((1) + (T(0.5) * 1) * (p17_5 + p18_5)) * ((abs(q17_18_5))^T(1.8539)) - cx[222] = + cx[231] = p18_5 * p18_5 - p19_5 * p19_5 - T(0.01) * ((1) + (T(0.5) * 1) * (p18_5 + p19_5)) * ((abs(q18_19_5))^T(1.8539)) - cx[223] = + cx[232] = p20_5 * p20_5 - p21_5 * p21_5 - T(0.01) * ((1) + (T(0.5) * 1) * (p20_5 + p21_5)) * ((abs(q20_21_5))^T(1.8539)) - cx[224] = + cx[233] = p21_5 * p21_5 - p22_5 * p22_5 - T(0.01) * ((1) + (T(0.5) * 1) * (p21_5 + p22_5)) * ((abs(q21_22_5))^T(1.8539)) - cx[225] = + cx[234] = p22_5 * p22_5 - p23_5 * p23_5 - T(0.01) * ((1) + (T(0.5) * 1) * (p22_5 + p23_5)) * ((abs(q22_23_5))^T(1.8539)) - cx[226] = + cx[235] = p1_6 / ((1) + (1) * p1_6) - p1_5 / ((1) + (1) * p1_5) - T(0.75) * q1_17_6 - T(0.75) * q1_2_6 + in1_6 - T(0.25) * q1_17_5 - T(0.25) * q1_2_5 - cx[227] = + cx[236] = p2_6 / ((1) + (1) * p2_6) - p2_5 / ((1) + (1) * p2_5) - T(0.75) * q2_3_6 + T(0.75) * q1_2_6 - T(0.25) * q2_3_5 + T(0.25) * q1_2_5 - 1 - cx[228] = + cx[237] = p3_6 / ((1) + (1) * p3_6) - p3_5 / ((1) + (1) * p3_5) - f3_4_6 + T(0.75) * q2_3_6 + T(0.25) * q2_3_5 - cx[229] = + cx[238] = p4_6 / ((1) + (1) * p4_6) - p4_5 / ((1) + (1) * p4_5) - T(0.75) * q4_5_6 + f3_4_6 - T(0.25) * q4_5_5 - cx[230] = + cx[239] = p5_6 / ((1) + (1) * p5_6) - p5_5 / ((1) + (1) * p5_5) - T(0.75) * q5_6_6 - f5_7_6 + T(0.75) * q4_5_6 - T(0.25) * q5_6_5 + T(0.25) * q4_5_5 - cx[231] = + cx[240] = p6_6 / ((1) + (1) * p6_6) - p6_5 / ((1) + (1) * p6_5) + T(0.75) * q5_6_6 + T(0.25) * q5_6_5 - 1 - cx[232] = + cx[241] = p7_6 / ((1) + (1) * p7_6) - p7_5 / ((1) + (1) * p7_5) - T(0.75) * q7_8_6 + f5_7_6 - T(0.25) * q7_8_5 - cx[233] = + cx[242] = p8_6 / ((1) + (1) * p8_6) - p8_5 / ((1) + (1) * p8_5) - T(0.75) * q8_9_6 - T(0.75) * q8_10_6 - T(0.75) * q8_11_6 + T(0.75) * q7_8_6 - T(0.25) * q8_9_5 - T(0.25) * q8_10_5 - T(0.25) * q8_11_5 + T(0.25) * q7_8_5 - cx[234] = + cx[243] = p9_6 / ((1) + (1) * p9_6) - p9_5 / ((1) + (1) * p9_5) + T(0.75) * q8_9_6 + T(0.25) * q8_9_5 - cx[235] = + cx[244] = p10_6 / ((1) + (1) * p10_6) - p10_5 / ((1) + (1) * p10_5) + T(0.75) * q8_10_6 + T(0.25) * q8_10_5 - 1 - cx[236] = + cx[245] = p11_6 / ((1) + (1) * p11_6) - p11_5 / ((1) + (1) * p11_5) - T(0.75) * q11_12_6 + T(0.75) * q8_11_6 - T(0.25) * q11_12_5 + T(0.25) * q8_11_5 - cx[237] = + cx[246] = p12_6 / ((1) + (1) * p12_6) - p12_5 / ((1) + (1) * p12_5) - T(0.75) * q12_13_6 + T(0.75) * q11_12_6 - T(0.25) * q12_13_5 + T(0.25) * q11_12_5 - cx[238] = + cx[247] = p13_6 / ((1) + (1) * p13_6) - p13_5 / ((1) + (1) * p13_5) - T(0.75) * q13_14_6 - T(0.75) * q13_15_6 + T(0.75) * q12_13_6 - T(0.25) * q13_14_5 - T(0.25) * q13_15_5 + T(0.25) * q12_13_5 - 1 - cx[239] = + cx[248] = p14_6 / ((1) + (1) * p14_6) - p14_5 / ((1) + (1) * p14_5) + T(0.75) * q13_14_6 + T(0.25) * q13_14_5 - cx[240] = + cx[249] = p15_6 / ((1) + (1) * p15_6) - p15_5 / ((1) + (1) * p15_5) - T(0.75) * q15_16_6 + T(0.75) * q13_15_6 - T(0.25) * q15_16_5 + T(0.25) * q13_15_5 - 1 - cx[241] = + cx[250] = p16_6 / ((1) + (1) * p16_6) - p16_5 / ((1) + (1) * p16_5) + T(0.75) * q15_16_6 + T(0.25) * q15_16_5 - out16_6 - cx[242] = + cx[251] = p17_6 / ((1) + (1) * p17_6) - p17_5 / ((1) + (1) * p17_5) - T(0.75) * q17_18_6 + T(0.75) * q1_17_6 - T(0.25) * q17_18_5 + T(0.25) * q1_17_5 - 1 - cx[243] = + cx[252] = p18_6 / ((1) + (1) * p18_6) - p18_5 / ((1) + (1) * p18_5) - T(0.75) * q18_19_6 + T(0.75) * q17_18_6 - T(0.25) * q18_19_5 + T(0.25) * q17_18_5 - 1 - cx[244] = + cx[253] = p19_6 / ((1) + (1) * p19_6) - p19_5 / ((1) + (1) * p19_5) - f19_20_6 + T(0.75) * q18_19_6 + T(0.25) * q18_19_5 - cx[245] = + cx[254] = p20_6 / ((1) + (1) * p20_6) - p20_5 / ((1) + (1) * p20_5) - T(0.75) * q20_21_6 + f19_20_6 - T(0.25) * q20_21_5 - cx[246] = + cx[255] = p21_6 / ((1) + (1) * p21_6) - p21_5 / ((1) + (1) * p21_5) - T(0.75) * q21_22_6 + T(0.75) * q20_21_6 - T(0.25) * q21_22_5 + T(0.25) * q20_21_5 - 1 - cx[247] = + cx[256] = p22_6 / ((1) + (1) * p22_6) - p22_5 / ((1) + (1) * p22_5) - T(0.75) * q22_23_6 + T(0.75) * q21_22_6 - T(0.25) * q22_23_5 + T(0.25) * q21_22_5 - 1 - cx[248] = + cx[257] = p23_6 / ((1) + (1) * p23_6) - p23_5 / ((1) + (1) * p23_5) + T(0.75) * q22_23_6 + T(0.25) * q22_23_5 - out23_6 - cx[249] = p3_6 * r3_4_6 - p4_6 - cx[250] = p5_6 * r5_7_6 - p7_6 - cx[251] = p19_6 * r19_20_6 - p20_6 - cx[252] = + cx[16] = p3_6 * r3_4_6 - p4_6 + cx[17] = p5_6 * r5_7_6 - p7_6 + cx[18] = p19_6 * r19_20_6 - p20_6 + cx[258] = p1_6 * p1_6 - p2_6 * p2_6 - T(0.01) * ((1) + (T(0.5) * 1) * (p1_6 + p2_6)) * ((abs(q1_2_6))^T(1.8539)) - cx[253] = + cx[259] = p1_6 * p1_6 - p17_6 * p17_6 - T(0.01) * ((1) + (T(0.5) * 1) * (p1_6 + p17_6)) * ((abs(q1_17_6))^T(1.8539)) - cx[254] = + cx[260] = p2_6 * p2_6 - p3_6 * p3_6 - T(0.01) * ((1) + (T(0.5) * 1) * (p2_6 + p3_6)) * ((abs(q2_3_6))^T(1.8539)) - cx[255] = + cx[261] = p4_6 * p4_6 - p5_6 * p5_6 - T(0.01) * ((1) + (T(0.5) * 1) * (p4_6 + p5_6)) * ((abs(q4_5_6))^T(1.8539)) - cx[256] = + cx[262] = p5_6 * p5_6 - p6_6 * p6_6 - T(0.01) * ((1) + (T(0.5) * 1) * (p5_6 + p6_6)) * ((abs(q5_6_6))^T(1.8539)) - cx[257] = + cx[263] = p7_6 * p7_6 - p8_6 * p8_6 - T(0.01) * ((1) + (T(0.5) * 1) * (p7_6 + p8_6)) * ((abs(q7_8_6))^T(1.8539)) - cx[258] = + cx[264] = p8_6 * p8_6 - p9_6 * p9_6 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_6 + p9_6)) * ((abs(q8_9_6))^T(1.8539)) - cx[259] = + cx[265] = p8_6 * p8_6 - p10_6 * p10_6 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_6 + p10_6)) * ((abs(q8_10_6))^T(1.8539)) - cx[260] = + cx[266] = p8_6 * p8_6 - p11_6 * p11_6 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_6 + p11_6)) * ((abs(q8_11_6))^T(1.8539)) - cx[261] = + cx[267] = p11_6 * p11_6 - p12_6 * p12_6 - T(0.01) * ((1) + (T(0.5) * 1) * (p11_6 + p12_6)) * ((abs(q11_12_6))^T(1.8539)) - cx[262] = + cx[268] = p12_6 * p12_6 - p13_6 * p13_6 - T(0.01) * ((1) + (T(0.5) * 1) * (p12_6 + p13_6)) * ((abs(q12_13_6))^T(1.8539)) - cx[263] = + cx[269] = p13_6 * p13_6 - p14_6 * p14_6 - T(0.01) * ((1) + (T(0.5) * 1) * (p13_6 + p14_6)) * ((abs(q13_14_6))^T(1.8539)) - cx[264] = + cx[270] = p13_6 * p13_6 - p15_6 * p15_6 - T(0.01) * ((1) + (T(0.5) * 1) * (p13_6 + p15_6)) * ((abs(q13_15_6))^T(1.8539)) - cx[265] = + cx[271] = p15_6 * p15_6 - p16_6 * p16_6 - T(0.01) * ((1) + (T(0.5) * 1) * (p15_6 + p16_6)) * ((abs(q15_16_6))^T(1.8539)) - cx[266] = + cx[272] = p17_6 * p17_6 - p18_6 * p18_6 - T(0.01) * ((1) + (T(0.5) * 1) * (p17_6 + p18_6)) * ((abs(q17_18_6))^T(1.8539)) - cx[267] = + cx[273] = p18_6 * p18_6 - p19_6 * p19_6 - T(0.01) * ((1) + (T(0.5) * 1) * (p18_6 + p19_6)) * ((abs(q18_19_6))^T(1.8539)) - cx[268] = + cx[274] = p20_6 * p20_6 - p21_6 * p21_6 - T(0.01) * ((1) + (T(0.5) * 1) * (p20_6 + p21_6)) * ((abs(q20_21_6))^T(1.8539)) - cx[269] = + cx[275] = p21_6 * p21_6 - p22_6 * p22_6 - T(0.01) * ((1) + (T(0.5) * 1) * (p21_6 + p22_6)) * ((abs(q21_22_6))^T(1.8539)) - cx[270] = + cx[276] = p22_6 * p22_6 - p23_6 * p23_6 - T(0.01) * ((1) + (T(0.5) * 1) * (p22_6 + p23_6)) * ((abs(q22_23_6))^T(1.8539)) - cx[271] = + cx[277] = p1_7 / ((1) + (1) * p1_7) - p1_6 / ((1) + (1) * p1_6) - T(0.75) * q1_17_7 - T(0.75) * q1_2_7 + in1_7 - T(0.25) * q1_17_6 - T(0.25) * q1_2_6 - cx[272] = + cx[278] = p2_7 / ((1) + (1) * p2_7) - p2_6 / ((1) + (1) * p2_6) - T(0.75) * q2_3_7 + T(0.75) * q1_2_7 - T(0.25) * q2_3_6 + T(0.25) * q1_2_6 - 1 - cx[273] = + cx[279] = p3_7 / ((1) + (1) * p3_7) - p3_6 / ((1) + (1) * p3_6) - f3_4_7 + T(0.75) * q2_3_7 + T(0.25) * q2_3_6 - cx[274] = + cx[280] = p4_7 / ((1) + (1) * p4_7) - p4_6 / ((1) + (1) * p4_6) - T(0.75) * q4_5_7 + f3_4_7 - T(0.25) * q4_5_6 - cx[275] = + cx[281] = p5_7 / ((1) + (1) * p5_7) - p5_6 / ((1) + (1) * p5_6) - T(0.75) * q5_6_7 - f5_7_7 + T(0.75) * q4_5_7 - T(0.25) * q5_6_6 + T(0.25) * q4_5_6 - cx[276] = + cx[282] = p6_7 / ((1) + (1) * p6_7) - p6_6 / ((1) + (1) * p6_6) + T(0.75) * q5_6_7 + T(0.25) * q5_6_6 - 1 - cx[277] = + cx[283] = p7_7 / ((1) + (1) * p7_7) - p7_6 / ((1) + (1) * p7_6) - T(0.75) * q7_8_7 + f5_7_7 - T(0.25) * q7_8_6 - cx[278] = + cx[284] = p8_7 / ((1) + (1) * p8_7) - p8_6 / ((1) + (1) * p8_6) - T(0.75) * q8_9_7 - T(0.75) * q8_10_7 - T(0.75) * q8_11_7 + T(0.75) * q7_8_7 - T(0.25) * q8_9_6 - T(0.25) * q8_10_6 - T(0.25) * q8_11_6 + T(0.25) * q7_8_6 - cx[279] = + cx[285] = p9_7 / ((1) + (1) * p9_7) - p9_6 / ((1) + (1) * p9_6) + T(0.75) * q8_9_7 + T(0.25) * q8_9_6 - cx[280] = + cx[286] = p10_7 / ((1) + (1) * p10_7) - p10_6 / ((1) + (1) * p10_6) + T(0.75) * q8_10_7 + T(0.25) * q8_10_6 - 1 - cx[281] = + cx[287] = p11_7 / ((1) + (1) * p11_7) - p11_6 / ((1) + (1) * p11_6) - T(0.75) * q11_12_7 + T(0.75) * q8_11_7 - T(0.25) * q11_12_6 + T(0.25) * q8_11_6 - cx[282] = + cx[288] = p12_7 / ((1) + (1) * p12_7) - p12_6 / ((1) + (1) * p12_6) - T(0.75) * q12_13_7 + T(0.75) * q11_12_7 - T(0.25) * q12_13_6 + T(0.25) * q11_12_6 - cx[283] = + cx[289] = p13_7 / ((1) + (1) * p13_7) - p13_6 / ((1) + (1) * p13_6) - T(0.75) * q13_14_7 - T(0.75) * q13_15_7 + T(0.75) * q12_13_7 - T(0.25) * q13_14_6 - T(0.25) * q13_15_6 + T(0.25) * q12_13_6 - 1 - cx[284] = + cx[290] = p14_7 / ((1) + (1) * p14_7) - p14_6 / ((1) + (1) * p14_6) + T(0.75) * q13_14_7 + T(0.25) * q13_14_6 - cx[285] = + cx[291] = p15_7 / ((1) + (1) * p15_7) - p15_6 / ((1) + (1) * p15_6) - T(0.75) * q15_16_7 + T(0.75) * q13_15_7 - T(0.25) * q15_16_6 + T(0.25) * q13_15_6 - 1 - cx[286] = + cx[292] = p16_7 / ((1) + (1) * p16_7) - p16_6 / ((1) + (1) * p16_6) + T(0.75) * q15_16_7 + T(0.25) * q15_16_6 - out16_7 - cx[287] = + cx[293] = p17_7 / ((1) + (1) * p17_7) - p17_6 / ((1) + (1) * p17_6) - T(0.75) * q17_18_7 + T(0.75) * q1_17_7 - T(0.25) * q17_18_6 + T(0.25) * q1_17_6 - 1 - cx[288] = + cx[294] = p18_7 / ((1) + (1) * p18_7) - p18_6 / ((1) + (1) * p18_6) - T(0.75) * q18_19_7 + T(0.75) * q17_18_7 - T(0.25) * q18_19_6 + T(0.25) * q17_18_6 - 1 - cx[289] = + cx[295] = p19_7 / ((1) + (1) * p19_7) - p19_6 / ((1) + (1) * p19_6) - f19_20_7 + T(0.75) * q18_19_7 + T(0.25) * q18_19_6 - cx[290] = + cx[296] = p20_7 / ((1) + (1) * p20_7) - p20_6 / ((1) + (1) * p20_6) - T(0.75) * q20_21_7 + f19_20_7 - T(0.25) * q20_21_6 - cx[291] = + cx[297] = p21_7 / ((1) + (1) * p21_7) - p21_6 / ((1) + (1) * p21_6) - T(0.75) * q21_22_7 + T(0.75) * q20_21_7 - T(0.25) * q21_22_6 + T(0.25) * q20_21_6 - 1 - cx[292] = + cx[298] = p22_7 / ((1) + (1) * p22_7) - p22_6 / ((1) + (1) * p22_6) - T(0.75) * q22_23_7 + T(0.75) * q21_22_7 - T(0.25) * q22_23_6 + T(0.25) * q21_22_6 - 1 - cx[293] = + cx[299] = p23_7 / ((1) + (1) * p23_7) - p23_6 / ((1) + (1) * p23_6) + T(0.75) * q22_23_7 + T(0.25) * q22_23_6 - out23_7 - cx[294] = p3_7 * r3_4_7 - p4_7 - cx[295] = p5_7 * r5_7_7 - p7_7 - cx[296] = p19_7 * r19_20_7 - p20_7 - cx[297] = + cx[19] = p3_7 * r3_4_7 - p4_7 + cx[20] = p5_7 * r5_7_7 - p7_7 + cx[21] = p19_7 * r19_20_7 - p20_7 + cx[300] = p1_7 * p1_7 - p2_7 * p2_7 - T(0.01) * ((1) + (T(0.5) * 1) * (p1_7 + p2_7)) * ((abs(q1_2_7))^T(1.8539)) - cx[298] = + cx[301] = p1_7 * p1_7 - p17_7 * p17_7 - T(0.01) * ((1) + (T(0.5) * 1) * (p1_7 + p17_7)) * ((abs(q1_17_7))^T(1.8539)) - cx[299] = + cx[302] = p2_7 * p2_7 - p3_7 * p3_7 - T(0.01) * ((1) + (T(0.5) * 1) * (p2_7 + p3_7)) * ((abs(q2_3_7))^T(1.8539)) - cx[300] = + cx[303] = p4_7 * p4_7 - p5_7 * p5_7 - T(0.01) * ((1) + (T(0.5) * 1) * (p4_7 + p5_7)) * ((abs(q4_5_7))^T(1.8539)) - cx[301] = + cx[304] = p5_7 * p5_7 - p6_7 * p6_7 - T(0.01) * ((1) + (T(0.5) * 1) * (p5_7 + p6_7)) * ((abs(q5_6_7))^T(1.8539)) - cx[302] = + cx[305] = p7_7 * p7_7 - p8_7 * p8_7 - T(0.01) * ((1) + (T(0.5) * 1) * (p7_7 + p8_7)) * ((abs(q7_8_7))^T(1.8539)) - cx[303] = + cx[306] = p8_7 * p8_7 - p9_7 * p9_7 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_7 + p9_7)) * ((abs(q8_9_7))^T(1.8539)) - cx[304] = + cx[307] = p8_7 * p8_7 - p10_7 * p10_7 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_7 + p10_7)) * ((abs(q8_10_7))^T(1.8539)) - cx[305] = + cx[308] = p8_7 * p8_7 - p11_7 * p11_7 - T(0.01) * ((1) + (T(0.5) * 1) * (p8_7 + p11_7)) * ((abs(q8_11_7))^T(1.8539)) - cx[306] = + cx[309] = p11_7 * p11_7 - p12_7 * p12_7 - T(0.01) * ((1) + (T(0.5) * 1) * (p11_7 + p12_7)) * ((abs(q11_12_7))^T(1.8539)) - cx[307] = + cx[310] = p12_7 * p12_7 - p13_7 * p13_7 - T(0.01) * ((1) + (T(0.5) * 1) * (p12_7 + p13_7)) * ((abs(q12_13_7))^T(1.8539)) - cx[308] = + cx[311] = p13_7 * p13_7 - p14_7 * p14_7 - T(0.01) * ((1) + (T(0.5) * 1) * (p13_7 + p14_7)) * ((abs(q13_14_7))^T(1.8539)) - cx[309] = + cx[312] = p13_7 * p13_7 - p15_7 * p15_7 - T(0.01) * ((1) + (T(0.5) * 1) * (p13_7 + p15_7)) * ((abs(q13_15_7))^T(1.8539)) - cx[310] = + cx[313] = p15_7 * p15_7 - p16_7 * p16_7 - T(0.01) * ((1) + (T(0.5) * 1) * (p15_7 + p16_7)) * ((abs(q15_16_7))^T(1.8539)) - cx[311] = + cx[314] = p17_7 * p17_7 - p18_7 * p18_7 - T(0.01) * ((1) + (T(0.5) * 1) * (p17_7 + p18_7)) * ((abs(q17_18_7))^T(1.8539)) - cx[312] = + cx[315] = p18_7 * p18_7 - p19_7 * p19_7 - T(0.01) * ((1) + (T(0.5) * 1) * (p18_7 + p19_7)) * ((abs(q18_19_7))^T(1.8539)) - cx[313] = + cx[316] = p20_7 * p20_7 - p21_7 * p21_7 - T(0.01) * ((1) + (T(0.5) * 1) * (p20_7 + p21_7)) * ((abs(q20_21_7))^T(1.8539)) - cx[314] = + cx[317] = p21_7 * p21_7 - p22_7 * p22_7 - T(0.01) * ((1) + (T(0.5) * 1) * (p21_7 + p22_7)) * ((abs(q21_22_7))^T(1.8539)) - cx[315] = + cx[318] = p22_7 * p22_7 - p23_7 * p23_7 - T(0.01) * ((1) + (T(0.5) * 1) * (p22_7 + p23_7)) * ((abs(q22_23_7))^T(1.8539)) - cx[316] = + cx[319] = p1_8 / ((1) + (1) * p1_8) - p1_7 / ((1) + (1) * p1_7) - T(0.75) * q1_17_8 - T(0.75) * q1_2_8 + in1_8 - T(0.25) * q1_17_7 - T(0.25) * q1_2_7 - cx[317] = + cx[320] = p2_8 / ((1) + (1) * p2_8) - p2_7 / ((1) + (1) * p2_7) - T(0.75) * q2_3_8 + T(0.75) * q1_2_8 - T(0.25) * q2_3_7 + T(0.25) * q1_2_7 - 1 - cx[318] = + cx[321] = p3_8 / ((1) + (1) * p3_8) - p3_7 / ((1) + (1) * p3_7) - f3_4_8 + T(0.75) * q2_3_8 + T(0.25) * q2_3_7 - cx[319] = + cx[322] = p4_8 / ((1) + (1) * p4_8) - p4_7 / ((1) + (1) * p4_7) - T(0.75) * q4_5_8 + f3_4_8 - T(0.25) * q4_5_7 - cx[320] = + cx[323] = p5_8 / ((1) + (1) * p5_8) - p5_7 / ((1) + (1) * p5_7) - T(0.75) * q5_6_8 - f5_7_8 + T(0.75) * q4_5_8 - T(0.25) * q5_6_7 + T(0.25) * q4_5_7 - cx[321] = + cx[324] = p6_8 / ((1) + (1) * p6_8) - p6_7 / ((1) + (1) * p6_7) + T(0.75) * q5_6_8 + T(0.25) * q5_6_7 - 1 - cx[322] = + cx[325] = p7_8 / ((1) + (1) * p7_8) - p7_7 / ((1) + (1) * p7_7) - T(0.75) * q7_8_8 + f5_7_8 - T(0.25) * q7_8_7 - cx[323] = + cx[326] = p8_8 / ((1) + (1) * p8_8) - p8_7 / ((1) + (1) * p8_7) - T(0.75) * q8_9_8 - T(0.75) * q8_10_8 - T(0.75) * q8_11_8 + T(0.75) * q7_8_8 - T(0.25) * q8_9_7 - T(0.25) * q8_10_7 - T(0.25) * q8_11_7 + T(0.25) * q7_8_7 - cx[324] = + cx[327] = p9_8 / ((1) + (1) * p9_8) - p9_7 / ((1) + (1) * p9_7) + T(0.75) * q8_9_8 + T(0.25) * q8_9_7 - cx[325] = + cx[328] = p10_8 / ((1) + (1) * p10_8) - p10_7 / ((1) + (1) * p10_7) + T(0.75) * q8_10_8 + T(0.25) * q8_10_7 - 1 - cx[326] = + cx[329] = p11_8 / ((1) + (1) * p11_8) - p11_7 / ((1) + (1) * p11_7) - T(0.75) * q11_12_8 + T(0.75) * q8_11_8 - T(0.25) * q11_12_7 + T(0.25) * q8_11_7 - cx[327] = + cx[330] = p12_8 / ((1) + (1) * p12_8) - p12_7 / ((1) + (1) * p12_7) - T(0.75) * q12_13_8 + T(0.75) * q11_12_8 - T(0.25) * q12_13_7 + T(0.25) * q11_12_7 - cx[328] = + cx[331] = p13_8 / ((1) + (1) * p13_8) - p13_7 / ((1) + (1) * p13_7) - T(0.75) * q13_14_8 - T(0.75) * q13_15_8 + T(0.75) * q12_13_8 - T(0.25) * q13_14_7 - T(0.25) * q13_15_7 + T(0.25) * q12_13_7 - 1 - cx[329] = + cx[332] = p14_8 / ((1) + (1) * p14_8) - p14_7 / ((1) + (1) * p14_7) + T(0.75) * q13_14_8 + T(0.25) * q13_14_7 - cx[330] = + cx[333] = p15_8 / ((1) + (1) * p15_8) - p15_7 / ((1) + (1) * p15_7) - T(0.75) * q15_16_8 + T(0.75) * q13_15_8 - T(0.25) * q15_16_7 + T(0.25) * q13_15_7 - 1 - cx[331] = + cx[334] = p16_8 / ((1) + (1) * p16_8) - p16_7 / ((1) + (1) * p16_7) + T(0.75) * q15_16_8 + T(0.25) * q15_16_7 - out16_8 - cx[332] = + cx[335] = p17_8 / ((1) + (1) * p17_8) - p17_7 / ((1) + (1) * p17_7) - T(0.75) * q17_18_8 + T(0.75) * q1_17_8 - T(0.25) * q17_18_7 + T(0.25) * q1_17_7 - 1 - cx[333] = + cx[336] = p18_8 / ((1) + (1) * p18_8) - p18_7 / ((1) + (1) * p18_7) - T(0.75) * q18_19_8 + T(0.75) * q17_18_8 - T(0.25) * q18_19_7 + T(0.25) * q17_18_7 - 1 - cx[334] = + cx[337] = p19_8 / ((1) + (1) * p19_8) - p19_7 / ((1) + (1) * p19_7) - f19_20_8 + T(0.75) * q18_19_8 + T(0.25) * q18_19_7 - cx[335] = + cx[338] = p20_8 / ((1) + (1) * p20_8) - p20_7 / ((1) + (1) * p20_7) - T(0.75) * q20_21_8 + f19_20_8 - T(0.25) * q20_21_7 - cx[336] = + cx[339] = p21_8 / ((1) + (1) * p21_8) - p21_7 / ((1) + (1) * p21_7) - T(0.75) * q21_22_8 + T(0.75) * q20_21_8 - T(0.25) * q21_22_7 + T(0.25) * q20_21_7 - 1 - cx[337] = + cx[340] = p22_8 / ((1) + (1) * p22_8) - p22_7 / ((1) + (1) * p22_7) - T(0.75) * q22_23_8 + T(0.75) * q21_22_8 - T(0.25) * q22_23_7 + T(0.25) * q21_22_7 - 1 - cx[338] = + cx[341] = p23_8 / ((1) + (1) * p23_8) - p23_7 / ((1) + (1) * p23_7) + T(0.75) * q22_23_8 + T(0.25) * q22_23_7 - out23_8 - cx[339] = p3_8 * r3_4_8 - p4_8 - cx[340] = p5_8 * r5_7_8 - p7_8 - cx[341] = p19_8 * r19_20_8 - p20_8 + cx[22] = p3_8 * r3_4_8 - p4_8 + cx[23] = p5_8 * r5_7_8 - p7_8 + cx[24] = p19_8 * r19_20_8 - p20_8 cx[342] = p1_8 * p1_8 - p2_8 * p2_8 - T(0.01) * ((1) + (T(0.5) * 1) * (p1_8 + p2_8)) * ((abs(q1_2_8))^T(1.8539)) diff --git a/src/ADNLPProblems/bt1.jl b/src/ADNLPProblems/bt1.jl index a329b2aa..7620ffd0 100644 --- a/src/ADNLPProblems/bt1.jl +++ b/src/ADNLPProblems/bt1.jl @@ -7,10 +7,10 @@ function bt1(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where end x0 = T[0.08, 0.06] function c!(cx, x) - cx[1] = x[1]^2 + x[2]^2 - 1 + cx[1] = x[1]^2 + x[2]^2 return cx end - lcon = zeros(T, 1) - ucon = zeros(T, 1) + lcon = T[1] + ucon = T[1] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "bt1"; kwargs...) end diff --git a/src/ADNLPProblems/camshape.jl b/src/ADNLPProblems/camshape.jl index e16a5cbb..35b16126 100644 --- a/src/ADNLPProblems/camshape.jl +++ b/src/ADNLPProblems/camshape.jl @@ -11,25 +11,37 @@ function camshape(args...; n::Int = default_nvar, type::Type{T} = Float64, kwarg return -Ti(R_v * pi / n) * sum(y[i] for i = 1:n) end function c!(cx, y::V; n = n, R_max = R_max, R_min = R_min, θ = eltype(y)(θ)) where {V} - cx[1] = R_max - y[n] - cx[2] = y[1] - R_min - for i = 1:(n - 1) - cx[2 + i] = y[i + 1] - y[i] - end - cx[n + 2] = -R_min * y[1] - y[1] * y[2] + 2 * R_min * y[2] * cos(θ) - cx[n + 3] = -R_min^2 - R_min * y[1] + 2 * R_min * y[1] * cos(θ) - cx[n + 4] = -y[n - 1] * y[n] - y[n] * R_max + 2 * y[n - 1] * R_max * cos(θ) - cx[n + 5] = -2 * R_max * y[n] + 2 * y[n]^2 * cos(θ) + cx[1] = -R_min * y[1] - y[1] * y[2] + 2 * R_min * y[2] * cos(θ) # quadratic + cx[2] = -y[n - 1] * y[n] - y[n] * R_max + 2 * y[n - 1] * R_max * cos(θ) # quadratic + cx[3] = -2 * R_max * y[n] + 2 * y[n]^2 * cos(θ) # quadratic for i = 2:(n - 1) - cx[n + 4 + i] = -y[i - 1] * y[i] - y[i] * y[i + 1] + 2 * y[i - 1] * y[i + 1] * cos(θ) + cx[2 + i] = -y[i - 1] * y[i] - y[i] * y[i + 1] + 2 * y[i - 1] * y[i + 1] * cos(θ) # quadratic end return cx end + lcon = vcat(-T(Inf), T(-α * θ) * ones(T, n + 1), -T(Inf) * ones(T, n + 1)) + ucon = vcat(T(0), T(α * θ) * ones(T, n + 1), zeros(T, n + 1)) + + A = zeros(T, n + 2, n) + A[2,n] = -1 + lcon[2] -= R_max + ucon[2] -= R_max + A[3,1] = 1 + lcon[3] += R_min + ucon[3] += R_min + for i = 1:(n - 1) + A[3 + i, i + 1] = 1 + A[3 + i, i] = -1 + end + # cx[n + 3] = -R_min^2 - R_min * y[1] + 2 * R_min * y[1] * cos(θ) + A[1, 1] = -R_min + 2 * R_min * cos(θ) + lcon[1] += R_min^2 + ucon[1] += R_min^2 + lvar = T(R_min) * ones(T, n) uvar = T(R_max) * ones(T, n) - lcon = vcat(T(-α * θ) * ones(T, n + 1), -T(Inf) * ones(T, n + 2)) - ucon = vcat(T(α * θ) * ones(T, n + 1), zeros(T, n + 2)) + x0 = T((R_min + R_max) / 2) * ones(T, n) - return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "camshape", ; kwargs...) + return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, findnz(sparse(A))..., c!, lcon, ucon, name = "camshape", ; kwargs...) end diff --git a/src/ADNLPProblems/catenary.jl b/src/ADNLPProblems/catenary.jl index bcf82223..34d1c4f7 100644 --- a/src/ADNLPProblems/catenary.jl +++ b/src/ADNLPProblems/catenary.jl @@ -1,6 +1,6 @@ export catenary -function catenary(args...; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} +function catenary(args...; n::Int = default_nvar, type::Type{T} = Float64, Bl = 1, FRACT = 0.6, kwargs...) where {T} (n % 3 == 0) || @warn("catenary: number of variables adjusted to be a multiple of 3") n = 3 * max(1, div(n, 3)) (n < 6) || @warn("catenary: number of variables adjusted to be greater or equal to 6") @@ -8,8 +8,6 @@ function catenary(args...; n::Int = default_nvar, type::Type{T} = Float64, kwarg ## Model Parameters N = div(n, 3) - 2 - Bl = 1 - FRACT = 0.6 d = Bl * (N + 1) * FRACT gamma = 9.81 @@ -26,7 +24,7 @@ function catenary(args...; n::Int = default_nvar, type::Type{T} = Float64, kwarg cx[i] = (x[1 + 3 * i] - x[-2 + 3 * i])^2 + (x[2 + 3 * i] - x[-1 + 3 * i])^2 + - (x[3 + 3 * i] - x[3 * i])^2 - Bl^2 + (x[3 + 3 * i] - x[3 * i])^2 end return cx end @@ -38,8 +36,8 @@ function catenary(args...; n::Int = default_nvar, type::Type{T} = Float64, kwarg lvar[n - 2] = T(d) uvar[n - 2] = T(d) - lcon = zeros(T, N + 1) - ucon = zeros(T, N + 1) + lcon = zeros(T, N + 1) .+ Bl^2 + ucon = zeros(T, N + 1) .+ Bl^2 x0 = zeros(T, n) for i = 0:(N + 1) diff --git a/src/ADNLPProblems/elec.jl b/src/ADNLPProblems/elec.jl index 054c2659..058d7f77 100644 --- a/src/ADNLPProblems/elec.jl +++ b/src/ADNLPProblems/elec.jl @@ -15,13 +15,13 @@ function elec(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where # Define the constraints on these points (sum of the square of the coordinates = 1) function c!(cx, x; n = n) for k = 1:n - cx[k] = x[k]^2 + x[n + k]^2 + x[2n + k]^2 - 1 + cx[k] = x[k]^2 + x[n + k]^2 + x[2n + k]^2 end return cx end # bounds on the constraints - lcon = ucon = zeros(T, n) + lcon = ucon = ones(T, n) # building a feasible x0 range0 = T[i / n for i = 1:n] diff --git a/src/ADNLPProblems/hs10.jl b/src/ADNLPProblems/hs10.jl index 80e5394a..f40380f3 100644 --- a/src/ADNLPProblems/hs10.jl +++ b/src/ADNLPProblems/hs10.jl @@ -4,10 +4,10 @@ function hs10(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where x0 = T[-10; 10] f(x) = x[1] - x[2] function c!(cx, x) - cx[1] = -3 * x[1]^2 + 2 * x[1] * x[2] - x[2]^2 + 1 + cx[1] = -3 * x[1]^2 + 2 * x[1] * x[2] - x[2]^2 return cx end - lcon = T[0.0] + lcon = T[-1] ucon = T[Inf] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs10"; kwargs...) diff --git a/src/ADNLPProblems/hs100.jl b/src/ADNLPProblems/hs100.jl index 51552a72..a8e4d2e4 100644 --- a/src/ADNLPProblems/hs100.jl +++ b/src/ADNLPProblems/hs100.jl @@ -2,7 +2,6 @@ export hs100 function hs100(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} function f(x) - n = length(x) return (x[1] - 10)^2 + 5 * (x[2] - 12)^2 + x[3]^4 + @@ -13,13 +12,13 @@ function hs100(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher end x0 = T[1, 2, 0, 4, 0, 1, 1] function c!(cx, x) - cx[1] = 127 - 2 * x[1]^2 - 3 * x[2]^4 - x[3] - 4 * x[4]^2 - 5 * x[5] - cx[2] = 282 - 7 * x[1] - 3 * x[2] - 10 * x[3]^2 - x[4] + x[5] - cx[3] = 196 - 23 * x[1] - x[2]^2 - 6 * x[6]^2 + 8 * x[7] - cx[4] = -4 * x[1]^2 - x[2]^2 + 3 * x[1] * x[2] - 2 * x[3]^2 - 5 * x[6] + 11 * x[7] + cx[1] = - 7 * x[1] - 3 * x[2] - 10 * x[3]^2 - x[4] + x[5] + cx[2] = - 23 * x[1] - x[2]^2 - 6 * x[6]^2 + 8 * x[7] + cx[4] = 127 - 2 * x[1]^2 - 3 * x[2]^4 - x[3] - 4 * x[4]^2 - 5 * x[5] + cx[3] = -4 * x[1]^2 - x[2]^2 + 3 * x[1] * x[2] - 2 * x[3]^2 - 5 * x[6] + 11 * x[7] return cx end - lcon = zeros(T, 4) + lcon = T[-282, -196, 0.0, 0.0] ucon = T(Inf) * ones(T, 4) return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs100"; kwargs...) end diff --git a/src/ADNLPProblems/hs101.jl b/src/ADNLPProblems/hs101.jl index 4912ad8a..96f97b8c 100644 --- a/src/ADNLPProblems/hs101.jl +++ b/src/ADNLPProblems/hs101.jl @@ -26,7 +26,6 @@ function hs101(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher exposant_c4[3, :] = [-3, -2, 1, 0, 1, 0, 3 // 4] exposant_c4[4, :] = [0, 0, -2, 1, 0, 0, 1 // 2] function f(x) - n = length(x) f = 10 * prod(x[i]^exposant_f[1, i] for i = 1:7) + 15 * prod(x[i]^exposant_f[2, i] for i = 1:7) + diff --git a/src/ADNLPProblems/hs104.jl b/src/ADNLPProblems/hs104.jl index 5baed709..3e81b925 100644 --- a/src/ADNLPProblems/hs104.jl +++ b/src/ADNLPProblems/hs104.jl @@ -10,14 +10,14 @@ function hs104(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher lvar = T(0.1) * ones(T, 8) uvar = 10 * ones(T, 8) function c!(cx, x) - cx[1] = 1 - 0.0588 * x[5] * x[7] - 0.1 * x[1] - cx[2] = 1 - 0.0588 * x[6] * x[8] - 0.1 * x[1] - 0.1 * x[2] + cx[1] = -0.0588 * x[5] * x[7] - 0.1 * x[1] + cx[2] = -0.0588 * x[6] * x[8] - 0.1 * x[1] - 0.1 * x[2] cx[3] = 1 - 4 * x[3] / x[5] - 2 * x[3]^(-0.71) / x[5] - 0.0588 * x[3]^(-1.3) * x[7] cx[4] = 1 - 4 * x[4] / x[6] - 2 * x[4]^(-0.71) / x[6] - 0.0588 * x[4]^(-1.3) * x[8] cx[5] = 0.4 * (x[1] / x[7])^(0.67) + 0.4 * (x[2] / x[8])^(0.67) + 10 - x[1] - x[2] return cx end - lcon = vcat(zeros(T, 4), 1) + lcon = T[-1, -1, 0, 0, 1] ucon = vcat(T(Inf) * ones(T, 4), T(4.2)) return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs104"; kwargs...) end diff --git a/src/ADNLPProblems/hs106.jl b/src/ADNLPProblems/hs106.jl index 6554ef09..8f690d45 100644 --- a/src/ADNLPProblems/hs106.jl +++ b/src/ADNLPProblems/hs106.jl @@ -9,12 +9,12 @@ function hs106(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher lvar = T[100, 1000, 1000, 10, 10, 10, 10, 10] uvar = T[10000, 10000, 10000, 1000, 1000, 1000, 1000, 1000] function c!(cx, x) - cx[1] = x[1] * x[6] - 833.33252 * x[4] - 100 * x[1] + 83333.333 + cx[1] = x[1] * x[6] - 833.33252 * x[4] - 100 * x[1] cx[2] = x[2] * x[7] - 1250 * x[5] - x[2] * x[4] + 1250 * x[4] - cx[3] = x[3] * x[8] - 1250000 - x[3] * x[5] + 2500 * x[5] + cx[3] = x[3] * x[8] - x[3] * x[5] + 2500 * x[5] return cx end - lcon = vcat(-ones(T, 3), zeros(T, 3)) + lcon = vcat(-ones(T, 3), -83333333 // 1000, 0, 1250000) ucon = T(Inf) * ones(T, 6) return ADNLPModels.ADNLPModel!( f, diff --git a/src/ADNLPProblems/hs108.jl b/src/ADNLPProblems/hs108.jl index d6bf91b4..170ad9fb 100644 --- a/src/ADNLPProblems/hs108.jl +++ b/src/ADNLPProblems/hs108.jl @@ -10,21 +10,21 @@ function hs108(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher lvar = T[-Inf, -Inf, -Inf, -Inf, -Inf, -Inf, -Inf, -Inf, 0] uvar = T(Inf) * ones(T, 9) function c!(cx, x) - cx[1] = 1 - x[3]^2 - x[4]^2 - cx[2] = 1 - x[5]^2 - x[6]^2 - cx[3] = 1 - (x[1] - x[5])^2 - (x[2] - x[6])^2 - cx[4] = 1 - (x[1] - x[7])^2 - (x[2] - x[8])^2 - cx[5] = 1 - (x[3] - x[5])^2 - (x[4] - x[6])^2 - cx[6] = 1 - (x[3] - x[7])^2 - (x[4] - x[8])^2 + cx[1] = - x[3]^2 - x[4]^2 + cx[2] = - x[5]^2 - x[6]^2 + cx[3] = - (x[1] - x[5])^2 - (x[2] - x[6])^2 + cx[4] = - (x[1] - x[7])^2 - (x[2] - x[8])^2 + cx[5] = - (x[3] - x[5])^2 - (x[4] - x[6])^2 + cx[6] = - (x[3] - x[7])^2 - (x[4] - x[8])^2 cx[7] = x[3] * x[9] cx[8] = x[5] * x[8] - x[6] * x[7] - cx[9] = 1 - x[9]^2 - cx[10] = 1 - x[1]^2 - (x[2] - x[9])^2 + cx[9] = - x[9]^2 + cx[10] = - x[1]^2 - (x[2] - x[9])^2 cx[11] = x[1] * x[4] - x[2] * x[3] cx[12] = -x[5] * x[9] return cx end - lcon = zeros(T, 12) + lcon = T[-1, -1, -1, -1, -1, -1, 0, 0, -1, -1, 0, 0] ucon = T(Inf) * ones(T, 12) return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs108"; kwargs...) end diff --git a/src/ADNLPProblems/hs109.jl b/src/ADNLPProblems/hs109.jl index fe503850..b694e13e 100644 --- a/src/ADNLPProblems/hs109.jl +++ b/src/ADNLPProblems/hs109.jl @@ -12,8 +12,8 @@ function hs109(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher a = 50176 // 1000 b = sin(eltype(x)(25 // 100)) ci = cos(eltype(x)(25 // 100)) - cx[1] = 2250000 - x[1]^2 - x[8]^2 - cx[2] = 2250000 - x[2]^2 - x[9]^2 + cx[1] = - x[1]^2 - x[8]^2 + cx[2] = - x[2]^2 - x[9]^2 cx[3] = x[5] * x[6] * sin(-x[3] - 1 / 4) + x[5] * x[7] * sin(-x[4] - 1 / 4) + 2 * b * x[5]^2 - a * x[1] + 400 * a @@ -36,7 +36,7 @@ function hs109(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher 2 * ci * x[7]^2 + 0.7533e-3 * a * x[7]^2 return cx end - lcon = vcat(-T(0.55), zeros(T, 8)) + lcon = vcat(-T(0.55), - 2250000, -2250000, zeros(T, 6)) ucon = vcat(T(0.55), T(Inf), T(Inf), zeros(T, 6)) return ADNLPModels.ADNLPModel!( f, diff --git a/src/ADNLPProblems/hs113.jl b/src/ADNLPProblems/hs113.jl index 3d31ef7d..00e541da 100644 --- a/src/ADNLPProblems/hs113.jl +++ b/src/ADNLPProblems/hs113.jl @@ -16,14 +16,15 @@ function hs113(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher end x0 = T[2, 3, 5, 5, 1, 2, 7, 3, 6, 10] function c!(cx, x) - cx[1] = -3 * (x[1] - 2)^2 - 4 * (x[2] - 3)^2 - 2 * x[3]^2 + 7 * x[4] + 120 - cx[2] = -5 * x[1]^2 - 8 * x[2] - (x[3] - 6)^2 + 2 * x[4] + 40 - cx[3] = -0.5 * (x[1] - 8)^2 - 2 * (x[2] - 4)^2 - 3 * x[5]^2 + x[6] + 30 - cx[4] = -x[1]^2 - 2 * (x[2] - 2)^2 + 2 * x[1] * x[2] - 14 * x[5] + 6 * x[6] - cx[5] = 3 * x[1] - 6 * x[2] - 12 * (x[9] - 8)^2 + 7 * x[10] + cx[1] = -3 * (x[1]^2 - 4 * x[1]) - 4 * (x[2]^2 - 6 * x[2]) - 2 * x[3]^2 + 7 * x[4] + cx[2] = -5 * x[1]^2 - 8 * x[2] - (x[3]^2 - 12 * x[3]) + 2 * x[4] + cx[3] = -0.5 * (x[1]^2 - 16 * x[1]) - 2 * (x[2]^2 - 8 * x[2]) - 3 * x[5]^2 + x[6] + cx[4] = -x[1]^2 - 2 * (x[2]^2 - 4 * x[2]) + 2 * x[1] * x[2] - 14 * x[5] + 6 * x[6] + cx[5] = 3 * x[1] - 6 * x[2] - 12 * (x[9]^2 - 16 * x[9]) + 7 * x[10] return cx end - lcon = vcat(-105, 0, -12, zeros(T, 5)) + + lcon = T[-105, 0, -12, -72, -4, 34, 8, 768] ucon = T(Inf) * ones(T, 8) return ADNLPModels.ADNLPModel!( f, diff --git a/src/ADNLPProblems/hs114.jl b/src/ADNLPProblems/hs114.jl index 7766803e..a41c08a3 100644 --- a/src/ADNLPProblems/hs114.jl +++ b/src/ADNLPProblems/hs114.jl @@ -13,17 +13,17 @@ function hs114(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher uvar = T[2000, 16000, 120, 5000, 2000, 93, 95, 12, 4, 162] function c!(cx, x) g5 = 1.12 * x[1] + 0.13167 * x[1] * x[8] - 0.00667 * x[1] * x[8]^2 - a * x[4] - g6 = 57.425 + 1.098 * x[8] - 0.038 * x[8]^2 + 0.325 * x[6] - a * x[7] - cx[1] = g5 - cx[2] = g6 - cx[3] = -g5 + (1 / a - a) * x[4] - cx[4] = -g6 + (1 / a - a) * x[7] - cx[5] = 98000 * x[3] / (x[4] * x[9] + 1000 * x[3]) - x[6] - cx[6] = (x[2] + x[5]) / x[1] - x[8] + g6 = 1.098 * x[8] - 0.038 * x[8]^2 + 0.325 * x[6] - a * x[7] + cx[5] = g5 + cx[1] = g6 + cx[6] = -g5 + (1 / a - a) * x[4] + cx[2] = -g6 + (1 / a - a) * x[7] + cx[3] = 98000 * x[3] / (x[4] * x[9] + 1000 * x[3]) - x[6] + cx[4] = (x[2] + x[5]) / x[1] - x[8] return cx end - lcon = vcat(0, -T(35.82), 133, T(35.82), -133, zeros(T, 6)) - ucon = vcat(zero(T), T(Inf) * ones(T, 8), zeros(T, 2)) + lcon = vcat(0, -T(35.82), 133, T(35.82), -133, T[-57.425, 57.425, 0, 0, 0, 0]) + ucon = T[0, Inf, Inf, Inf, Inf, Inf, Inf, 0, 0, Inf, Inf] return ADNLPModels.ADNLPModel!( f, x0, diff --git a/src/ADNLPProblems/hs116.jl b/src/ADNLPProblems/hs116.jl index d3728fdc..1912757c 100644 --- a/src/ADNLPProblems/hs116.jl +++ b/src/ADNLPProblems/hs116.jl @@ -22,12 +22,12 @@ function hs116(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher cx[5] = x[12] - b * x[9] + ci * x[2] * x[9] cx[6] = x[11] - b * x[8] + ci * x[1] * x[8] cx[7] = x[5] * x[7] - x[1] * x[8] - x[4] * x[7] + x[4] * x[8] - cx[8] = 1 - a * (x[2] * x[9] + x[5] * x[8] - x[1] * x[8] - x[6] * x[9]) - x[5] - x[6] + cx[8] = - a * (x[2] * x[9] + x[5] * x[8] - x[1] * x[8] - x[6] * x[9]) - x[5] - x[6] cx[9] = x[2] * x[9] - x[3] * x[10] - x[6] * x[9] - 500 * x[2] + 500 * x[6] + x[2] * x[10] - cx[10] = x[2] - 0.9 - a * (x[2] * x[10] - x[3] * x[10]) + cx[10] = x[2] - a * (x[2] * x[10] - x[3] * x[10]) return cx end - lcon = vcat(zeros(T, 2), -1, 50, -T(Inf), zeros(T, 10)) + lcon = vcat(zeros(T, 2), -1, 50, -T(Inf), zeros(T, 7), T[-1; 0; 9 // 10]) ucon = vcat(T(Inf) * ones(T, 4), 250, T(Inf) * ones(T, 10)) return ADNLPModels.ADNLPModel!( f, diff --git a/src/ADNLPProblems/hs117.jl b/src/ADNLPProblems/hs117.jl index e7f32849..d9c02903 100644 --- a/src/ADNLPProblems/hs117.jl +++ b/src/ADNLPProblems/hs117.jl @@ -24,7 +24,7 @@ function hs117(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher d = [4, 8, 10, 6, 2] - e = [-15, -27, -36, -18, -12] + e = T[-15, -27, -36, -18, -12] function f(x; b = b, ci = ci, d = d) return -sum(b[j] * x[j] for j = 1:10) + sum(sum(ci[k, j] * x[10 + k] * x[10 + j] for k = 1:5) for j = 1:5) + @@ -36,12 +36,12 @@ function hs117(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher function c!(cx, x) for j = 1:5 cx[j] = - 2 * sum(ci[k, j] * x[10 + k] + 3 * d[j] * x[10 + j]^2 for k = 1:5) + e[j] - + 2 * sum(ci[k, j] * x[10 + k] + 3 * d[j] * x[10 + j]^2 for k = 1:5) - sum(a[k, j] * x[k] for k = 1:10) end return cx end - lcon = zeros(T, 5) + lcon = -e ucon = T(Inf) * ones(T, 5) return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs117"; kwargs...) end diff --git a/src/ADNLPProblems/hs12.jl b/src/ADNLPProblems/hs12.jl index 07ac3250..7e588f30 100644 --- a/src/ADNLPProblems/hs12.jl +++ b/src/ADNLPProblems/hs12.jl @@ -2,15 +2,14 @@ export hs12 function hs12(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} function f(x) - n = length(x) return x[1]^2 / 2 + x[2]^2 - x[1] * x[2] - 7 * x[1] - 7 * x[2] end x0 = zeros(T, 2) function c!(cx, x) - cx[1] = 4 * x[1]^2 + x[2]^2 - 25 + cx[1] = 4 * x[1]^2 + x[2]^2 return cx end lcon = -T(Inf) * ones(T, 1) - ucon = zeros(T, 1) + ucon = zeros(T, 1) .+ 25 return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs12"; kwargs...) end diff --git a/src/ADNLPProblems/hs14.jl b/src/ADNLPProblems/hs14.jl index 2f064aa5..61cd324c 100644 --- a/src/ADNLPProblems/hs14.jl +++ b/src/ADNLPProblems/hs14.jl @@ -9,11 +9,11 @@ function hs14(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwarg x0 = T[2; 2] f(x) = 1 // 2 * (x[1] - 2)^2 + 1 // 2 * (x[2] - 1)^2 function c!(cx, x) - cx[1] = x[1]^2 / 4 + x[2]^2 - 1 + cx[1] = x[1]^2 / 4 + x[2]^2 return cx end lcon = T[-1; -Inf] - ucon = T[-1; 0] + ucon = T[-1; 1] return ADNLPModels.ADNLPModel!( f, @@ -37,11 +37,11 @@ function hs14(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwarg return r end function c!(cx, x) - cx[1] = x[1]^2 / 4 + x[2]^2 - 1 + cx[1] = x[1]^2 / 4 + x[2]^2 return cx end lcon = T[-1; -Inf] - ucon = T[-1; 0] + ucon = T[-1; 1] return ADNLPModels.ADNLSModel!( F!, diff --git a/src/ADNLPProblems/hs15.jl b/src/ADNLPProblems/hs15.jl index ff8d34b8..0cdc9235 100644 --- a/src/ADNLPProblems/hs15.jl +++ b/src/ADNLPProblems/hs15.jl @@ -9,11 +9,11 @@ function hs15(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where lvar = -T(Inf) * ones(T, 2) uvar = T[0.5, Inf] function c!(cx, x) - cx[1] = x[1] * x[2] - 1 + cx[1] = x[1] * x[2] cx[2] = x[1] + x[2]^2 return cx end - lcon = zeros(T, 2) + lcon = T[1; 0] ucon = T(Inf) * ones(T, 2) return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs15"; kwargs...) end diff --git a/src/ADNLPProblems/hs18.jl b/src/ADNLPProblems/hs18.jl index b4efdf35..77bf4d6e 100644 --- a/src/ADNLPProblems/hs18.jl +++ b/src/ADNLPProblems/hs18.jl @@ -9,11 +9,11 @@ function hs18(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where lvar = T[2, 0] uvar = 50 * ones(T, 2) function c!(cx, x) - cx[1] = x[1] * x[2] - 25 - cx[2] = x[1]^2 + x[2]^2 - 25 + cx[1] = x[1] * x[2] + cx[2] = x[1]^2 + x[2]^2 return cx end - lcon = zeros(T, 2) + lcon = zeros(T, 2) .+ 25 ucon = T(Inf) * ones(T, 2) return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs18"; kwargs...) end diff --git a/src/ADNLPProblems/hs19.jl b/src/ADNLPProblems/hs19.jl index 026ffd7e..7e8a10e4 100644 --- a/src/ADNLPProblems/hs19.jl +++ b/src/ADNLPProblems/hs19.jl @@ -9,11 +9,11 @@ function hs19(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where lvar = T[13, 0] uvar = T[100, 100] function c!(cx, x) - cx[1] = (x[1] - 5)^2 + (x[2] - 5)^2 - 100 - cx[2] = (x[2] - 5)^2 + (x[1] - 6)^2 - 8281 // 100 + cx[1] = (x[1]^2 - 10 * x[1]) + (x[2]^2 - 10 * x[2]) + cx[2] = (x[2]^2 - 10 * x[2]) + (x[1]^2 - 12 * x[1]) return cx end - lcon = [zero(T), -T(Inf)] - ucon = [T(Inf), zero(T)] + lcon = [50, -T(Inf)] + ucon = [T(Inf), 2181 // 100] return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs19"; kwargs...) end diff --git a/src/ADNLPProblems/hs20.jl b/src/ADNLPProblems/hs20.jl index 86b65986..3546f313 100644 --- a/src/ADNLPProblems/hs20.jl +++ b/src/ADNLPProblems/hs20.jl @@ -11,10 +11,10 @@ function hs20(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where function c!(cx, x) cx[1] = x[1] + x[2]^2 cx[2] = x[1]^2 + x[2] - cx[3] = x[1]^2 + x[2]^2 - 1 + cx[3] = x[1]^2 + x[2]^2 return cx end - lcon = zeros(T, 3) + lcon = T[0; 0; 1] ucon = T(Inf) * ones(T, 3) return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs20"; kwargs...) end diff --git a/src/ADNLPProblems/hs225.jl b/src/ADNLPProblems/hs225.jl index 9dead159..18b0758c 100644 --- a/src/ADNLPProblems/hs225.jl +++ b/src/ADNLPProblems/hs225.jl @@ -6,14 +6,14 @@ function hs225(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher end x0 = T[3, 1] function c!(cx, x) - cx[1] = x[1]^2 + x[2]^2 - 1 - cx[2] = 9 * x[1]^2 + x[2]^2 - 9 + cx[1] = x[1]^2 + x[2]^2 + cx[2] = 9 * x[1]^2 + x[2]^2 cx[3] = x[1]^2 - x[2] cx[4] = x[2]^2 - x[1] return cx end A = T[1 1] - lcon = vcat(1, zeros(T, 4)) + lcon = vcat(1, 1, 9, zeros(T, 2)) ucon = T(Inf) * ones(T, 5) return ADNLPModels.ADNLPModel!(f, x0, sparse(A), c!, lcon, ucon, name = "hs225"; kwargs...) end diff --git a/src/ADNLPProblems/hs226.jl b/src/ADNLPProblems/hs226.jl index 6aa3e700..2ec29fc6 100644 --- a/src/ADNLPProblems/hs226.jl +++ b/src/ADNLPProblems/hs226.jl @@ -10,10 +10,10 @@ function hs226(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher uvar = T[Inf, Inf] function c!(cx, x) cx[1] = x[1]^2 + x[2]^2 - cx[2] = 1 - x[1]^2 - x[2]^2 + cx[2] = - x[1]^2 - x[2]^2 return cx end - lcon = zeros(T, 2) + lcon = T[0; -1] ucon = T(Inf) * ones(T, 2) return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs226"; kwargs...) end diff --git a/src/ADNLPProblems/hs228.jl b/src/ADNLPProblems/hs228.jl index 5ad7d54c..e0ea523b 100644 --- a/src/ADNLPProblems/hs228.jl +++ b/src/ADNLPProblems/hs228.jl @@ -7,11 +7,11 @@ function hs228(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher end x0 = T[0, 0] function c!(cx, x) - cx[1] = -(x[1]^2 + x[2]^2) + 9 + cx[1] = -(x[1]^2 + x[2]^2) return cx end A = T[-1 -1] - lcon = T[-1; 0] + lcon = T[-1; -9] ucon = T[Inf; Inf] return ADNLPModels.ADNLPModel!(f, x0, sparse(A), c!, lcon, ucon, name = "hs228"; kwargs...) end diff --git a/src/ADNLPProblems/hs23.jl b/src/ADNLPProblems/hs23.jl index f8a513d4..0d6cb050 100644 --- a/src/ADNLPProblems/hs23.jl +++ b/src/ADNLPProblems/hs23.jl @@ -14,13 +14,13 @@ function hs23(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwarg lvar = -50 * ones(T, 2) uvar = 50 * ones(T, 2) function c!(cx, x) - cx[1] = x[1]^2 + x[2]^2 - 1 - cx[2] = 9 * x[1]^2 + x[2]^2 - 9 + cx[1] = x[1]^2 + x[2]^2 + cx[2] = 9 * x[1]^2 + x[2]^2 cx[3] = x[1]^2 - x[2] cx[4] = x[2]^2 - x[1] return cx end - lcon = vcat(one(T), zeros(T, 4)) + lcon = vcat(one(T), 1, 9, zeros(T, 2)) ucon = T(Inf) * ones(T, 5) return ADNLPModels.ADNLPModel!( f, diff --git a/src/ADNLPProblems/hs233.jl b/src/ADNLPProblems/hs233.jl index 7966a57a..4be753bb 100644 --- a/src/ADNLPProblems/hs233.jl +++ b/src/ADNLPProblems/hs233.jl @@ -6,10 +6,10 @@ function hs233(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher end x0 = T[1.2, 1] function c!(cx, x) - cx[1] = x[1]^2 + x[2]^2 - 1 // 4 + cx[1] = x[1]^2 + x[2]^2 return cx end - lcon = T[0] + lcon = T[1 // 4] ucon = T[Inf] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs233"; kwargs...) end diff --git a/src/ADNLPProblems/hs234.jl b/src/ADNLPProblems/hs234.jl index b0001e34..0f3f702b 100644 --- a/src/ADNLPProblems/hs234.jl +++ b/src/ADNLPProblems/hs234.jl @@ -8,10 +8,10 @@ function hs234(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher lvar = T[0.2, 0.2] uvar = T[2, 2] function c!(cx, x) - cx[1] = -x[1]^2 - x[2]^2 + 1 + cx[1] = -x[1]^2 - x[2]^2 return cx end - lcon = T[0] + lcon = T[-1] ucon = T[Inf] return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs234"; kwargs...) end diff --git a/src/ADNLPProblems/hs235.jl b/src/ADNLPProblems/hs235.jl index c5c8159e..05dfcfdf 100644 --- a/src/ADNLPProblems/hs235.jl +++ b/src/ADNLPProblems/hs235.jl @@ -6,9 +6,9 @@ function hs235(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher end x0 = T[-2, 3, 1] function c!(cx, x) - cx[1] = x[1] + x[3]^2 + 1 + cx[1] = x[1] + x[3]^2 return cx end - lcon = ucon = T[0] + lcon = ucon = T[-1] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs235"; kwargs...) end diff --git a/src/ADNLPProblems/hs236.jl b/src/ADNLPProblems/hs236.jl index fbf57a24..155fb6cc 100644 --- a/src/ADNLPProblems/hs236.jl +++ b/src/ADNLPProblems/hs236.jl @@ -49,11 +49,11 @@ function hs236(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher lvar = T[0, 0] uvar = T[75, 65] function c!(cx, x) - cx[1] = x[1] * x[2] - 700 + cx[1] = x[1] * x[2] cx[2] = x[2] - 5 * (x[1] / 25)^2 return cx end - lcon = zeros(T, 2) + lcon = T[700, 0] ucon = T[Inf; Inf] return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs236"; kwargs...) end diff --git a/src/ADNLPProblems/hs237.jl b/src/ADNLPProblems/hs237.jl index 2babcafb..974b0905 100644 --- a/src/ADNLPProblems/hs237.jl +++ b/src/ADNLPProblems/hs237.jl @@ -49,12 +49,12 @@ function hs237(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher lvar = T[54, -Inf] uvar = T[75, 65] function c!(cx, x) - cx[1] = x[1] * x[2] - 700 + cx[1] = x[1] * x[2] cx[2] = x[2] - 5 * (x[1] / 25)^2 - cx[3] = (x[2] - 50)^2 - 5 * (x[1] - 55) + cx[3] = x[2]^2 - 100 * x[2] - 5 * x[1] return cx end - lcon = zeros(T, 3) + lcon = T[700, 0, -2775] ucon = T[Inf; Inf; Inf] return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs237"; kwargs...) end diff --git a/src/ADNLPProblems/hs238.jl b/src/ADNLPProblems/hs238.jl index ef97e1a7..295f730d 100644 --- a/src/ADNLPProblems/hs238.jl +++ b/src/ADNLPProblems/hs238.jl @@ -49,12 +49,12 @@ function hs238(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher lvar = T[-Inf; -Inf] uvar = T[75, 65] function c!(cx, x) - cx[1] = x[1] * x[2] - 700 + cx[1] = x[1] * x[2] cx[2] = x[2] - 5 * (x[1] / 25)^2 - cx[3] = (x[2] - 50)^2 - 5 * (x[1] - 55) + cx[3] = x[2]^2 - 100 * x[2] - 5 * x[1] return cx end - lcon = zeros(T, 3) + lcon = T[700, 0, -2775] ucon = T[Inf; Inf; Inf] return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs238"; kwargs...) end diff --git a/src/ADNLPProblems/hs239.jl b/src/ADNLPProblems/hs239.jl index c773c0f0..a809a5a9 100644 --- a/src/ADNLPProblems/hs239.jl +++ b/src/ADNLPProblems/hs239.jl @@ -49,10 +49,10 @@ function hs239(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher lvar = T[0, 0] uvar = T[75, 65] function c!(cx, x) - cx[1] = x[1] * x[2] - 700 + cx[1] = x[1] * x[2] return cx end - lcon = zeros(T, 1) + lcon = lcon = T[700] ucon = T[Inf] return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs239"; kwargs...) end diff --git a/src/ADNLPProblems/hs248.jl b/src/ADNLPProblems/hs248.jl index 27016ed2..84705f3e 100644 --- a/src/ADNLPProblems/hs248.jl +++ b/src/ADNLPProblems/hs248.jl @@ -5,13 +5,13 @@ function hs248(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher return -x[2] end x0 = T[-0.1, -1, 0.1] - lcon = T[-1; 0] - ucon = T[Inf; 0] + lcon = T[-1; 1] + ucon = T[Inf; 1] A = T[ 1 -2 ] function c!(cx, x) - cx[1] = x[1]^2 + x[2]^2 + x[3]^2 - 1 + cx[1] = x[1]^2 + x[2]^2 + x[3]^2 return cx end return ADNLPModels.ADNLPModel!(f, x0, sparse(A), c!, lcon, ucon, name = "hs248"; kwargs...) diff --git a/src/ADNLPProblems/hs249.jl b/src/ADNLPProblems/hs249.jl index b390e3c7..da7c28d0 100644 --- a/src/ADNLPProblems/hs249.jl +++ b/src/ADNLPProblems/hs249.jl @@ -8,10 +8,10 @@ function hs249(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher lvar = T[1, -Inf, -Inf] uvar = T[Inf, Inf, Inf] function c!(cx, x) - cx[1] = x[1]^2 + x[2]^2 - 1 + cx[1] = x[1]^2 + x[2]^2 return cx end - lcon = T[0] + lcon = T[1] ucon = T[Inf] return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs249"; kwargs...) end diff --git a/src/ADNLPProblems/hs252.jl b/src/ADNLPProblems/hs252.jl index 66d2d5d0..f74cf461 100644 --- a/src/ADNLPProblems/hs252.jl +++ b/src/ADNLPProblems/hs252.jl @@ -6,9 +6,9 @@ function hs252(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher end x0 = T[-1, 2, 2] function c!(cx, x) - cx[1] = x[1] + x[3]^2 + 1 + cx[1] = x[1] + x[3]^2 return cx end - lcon = ucon = T[0] + lcon = ucon = T[-1] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs252"; kwargs...) end diff --git a/src/ADNLPProblems/hs254.jl b/src/ADNLPProblems/hs254.jl index 6a1b6a27..9a86fbff 100644 --- a/src/ADNLPProblems/hs254.jl +++ b/src/ADNLPProblems/hs254.jl @@ -9,10 +9,10 @@ function hs254(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher lvar = T[-Inf, -Inf, 1] uvar = T[Inf, Inf, Inf] function c!(cx, x) - cx[1] = x[2]^2 + x[3]^2 - 4 - cx[2] = x[3] - 1 - x[2]^2 + cx[1] = x[2]^2 + x[3]^2 + cx[2] = x[3] - x[2]^2 return cx end - lcon = ucon = zeros(T, 2) + lcon = ucon = T[4; 1] return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs254"; kwargs...) end diff --git a/src/ADNLPProblems/hs263.jl b/src/ADNLPProblems/hs263.jl index 04ac1342..ba97c7b8 100644 --- a/src/ADNLPProblems/hs263.jl +++ b/src/ADNLPProblems/hs263.jl @@ -7,13 +7,13 @@ function hs263(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher end x0 = T[10, 10, 10, 10] function c!(cx, x) - cx[1] = x[2] - x[1]^3 + cx[4] = x[2] - x[1]^3 cx[2] = x[1]^2 - x[2] cx[3] = x[2] - x[1]^3 - x[3]^2 - cx[4] = x[1]^2 - x[2] - x[4]^2 + cx[1] = x[1]^2 - x[2] - x[4]^2 return cx end lcon = zeros(T, 4) - ucon = T[Inf; Inf; 0; 0] + ucon = T[0; Inf; 0; Inf] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs263"; kwargs...) end diff --git a/src/ADNLPProblems/hs264.jl b/src/ADNLPProblems/hs264.jl index 360cc7b0..a3730ae7 100644 --- a/src/ADNLPProblems/hs264.jl +++ b/src/ADNLPProblems/hs264.jl @@ -7,12 +7,12 @@ function hs264(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher end x0 = T[0, 0, 0, 0] function c!(cx, x) - cx[1] = -x[1]^2 - x[2]^2 - x[3]^2 - x[4]^2 - x[1] + x[2] + x[3] + x[4] + 8 - cx[2] = -x[1]^2 - 2 * x[2]^2 - x[3]^2 - 2 * x[4]^2 + x[1] + x[4] + 9 - cx[3] = -2 * x[1]^2 - x[2]^2 - x[3]^2 - 2 * x[1] + x[2] + x[4] + 5 + cx[1] = -x[1]^2 - x[2]^2 - x[3]^2 - x[4]^2 - x[1] + x[2] + x[3] + x[4] + cx[2] = -x[1]^2 - 2 * x[2]^2 - x[3]^2 - 2 * x[4]^2 + x[1] + x[4] + cx[3] = -2 * x[1]^2 - x[2]^2 - x[3]^2 - 2 * x[1] + x[2] + x[4] return cx end - lcon = T[0, 0, 0] + lcon = T[-8, -9, -5] ucon = T[Inf, Inf, Inf] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs264"; kwargs...) end diff --git a/src/ADNLPProblems/hs27.jl b/src/ADNLPProblems/hs27.jl index edd3ed23..2fe28d32 100644 --- a/src/ADNLPProblems/hs27.jl +++ b/src/ADNLPProblems/hs27.jl @@ -2,15 +2,15 @@ export hs27 function hs27(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} function c!(cx, x) - cx[1] = x[1] + x[3]^2 + 1 + cx[1] = x[1] + x[3]^2 return cx end return ADNLPModels.ADNLPModel!( x -> 1 // 100 * (x[1] - 1)^2 + (x[2] - x[1]^2)^2, 2ones(T, 3), c!, - zeros(T, 1), - zeros(T, 1), + zeros(T, 1) .- 1, + zeros(T, 1) .- 1, name = "hs27"; kwargs..., ) diff --git a/src/ADNLPProblems/hs29.jl b/src/ADNLPProblems/hs29.jl index 9b0d26f0..bff019e5 100644 --- a/src/ADNLPProblems/hs29.jl +++ b/src/ADNLPProblems/hs29.jl @@ -7,10 +7,10 @@ function hs29(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where end x0 = ones(T, 3) function c!(cx, x) - cx[1] = x[1]^2 + 2 * x[2]^2 + 4 * x[3]^2 - 48 + cx[1] = x[1]^2 + 2 * x[2]^2 + 4 * x[3]^2 return cx end lcon = [-T(Inf)] - ucon = zeros(T, 1) + ucon = zeros(T, 1) .+ 48 return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs29"; kwargs...) end diff --git a/src/ADNLPProblems/hs30.jl b/src/ADNLPProblems/hs30.jl index a29de1ce..12d7778e 100644 --- a/src/ADNLPProblems/hs30.jl +++ b/src/ADNLPProblems/hs30.jl @@ -14,10 +14,10 @@ function hs30(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwarg lvar = T[1, -10, -10] uvar = T[10, 10, 10] function c!(cx, x) - cx[1] = x[1]^2 + x[2]^2 - 1 + cx[1] = x[1]^2 + x[2]^2 return cx end - lcon = zeros(T, 1) + lcon = ones(T, 1) ucon = [T(Inf)] return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs30"; kwargs...) end diff --git a/src/ADNLPProblems/hs31.jl b/src/ADNLPProblems/hs31.jl index 3768f908..3cdfd1b6 100644 --- a/src/ADNLPProblems/hs31.jl +++ b/src/ADNLPProblems/hs31.jl @@ -9,10 +9,10 @@ function hs31(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where lvar = T[-10, 1, -10] uvar = T[10, 10, 1] function c!(cx, x) - cx[1] = x[1] * x[2] - 1 + cx[1] = x[1] * x[2] return cx end - lcon = zeros(T, 1) + lcon = ones(T, 1) ucon = T[Inf] return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs31"; kwargs...) end diff --git a/src/ADNLPProblems/hs316.jl b/src/ADNLPProblems/hs316.jl index 7b1f542e..8d5624a8 100644 --- a/src/ADNLPProblems/hs316.jl +++ b/src/ADNLPProblems/hs316.jl @@ -5,11 +5,10 @@ function hs316(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher return (x[1] - 20)^2 + (x[2] + 20)^2 end function c!(cx, x) - cx[1] = x[1]^2 / 100 + x[2]^2 / 100 - 1 + cx[1] = x[1]^2 / 100 + x[2]^2 / 100 return cx end x0 = zeros(T, 2) - lcon = zeros(T, 1) - ucon = zeros(T, 1) + lcon = ucon = T[1] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs316"; kwargs...) end diff --git a/src/ADNLPProblems/hs317.jl b/src/ADNLPProblems/hs317.jl index e81d5d96..cdf755c7 100644 --- a/src/ADNLPProblems/hs317.jl +++ b/src/ADNLPProblems/hs317.jl @@ -5,11 +5,10 @@ function hs317(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher return (x[1] - 20)^2 + (x[2] + 20)^2 end function c!(cx, x) - cx[1] = x[1]^2 / 100 + x[2]^2 / 64 - 1 + cx[1] = x[1]^2 / 100 + x[2]^2 / 64 return cx end x0 = zeros(T, 2) - lcon = zeros(T, 1) - ucon = zeros(T, 1) + lcon = ucon = T[1] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs317"; kwargs...) end diff --git a/src/ADNLPProblems/hs318.jl b/src/ADNLPProblems/hs318.jl index d33821ee..6523538f 100644 --- a/src/ADNLPProblems/hs318.jl +++ b/src/ADNLPProblems/hs318.jl @@ -5,11 +5,10 @@ function hs318(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher return (x[1] - 20)^2 + (x[2] + 20)^2 end function c!(cx, x) - cx[1] = x[1]^2 / 100 + x[2]^2 / 36 - 1 + cx[1] = x[1]^2 / 100 + x[2]^2 / 36 return cx end x0 = zeros(T, 2) - lcon = zeros(T, 1) - ucon = zeros(T, 1) + lcon = ucon = T[1] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs318"; kwargs...) end diff --git a/src/ADNLPProblems/hs319.jl b/src/ADNLPProblems/hs319.jl index dd66d186..b3cdb940 100644 --- a/src/ADNLPProblems/hs319.jl +++ b/src/ADNLPProblems/hs319.jl @@ -5,11 +5,10 @@ function hs319(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher return (x[1] - 20)^2 + (x[2] + 20)^2 end function c!(cx, x) - cx[1] = x[1]^2 / 100 + x[2]^2 / 16 - 1 + cx[1] = x[1]^2 / 100 + x[2]^2 / 16 return cx end x0 = zeros(T, 2) - lcon = zeros(T, 1) - ucon = zeros(T, 1) + lcon = ucon = T[1] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs319"; kwargs...) end diff --git a/src/ADNLPProblems/hs320.jl b/src/ADNLPProblems/hs320.jl index e50f900e..93fd03b0 100644 --- a/src/ADNLPProblems/hs320.jl +++ b/src/ADNLPProblems/hs320.jl @@ -5,11 +5,10 @@ function hs320(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher return (x[1] - 20)^2 + (x[2] + 20)^2 end function c!(cx, x) - cx[1] = x[1]^2 / 100 + x[2]^2 / 4 - 1 + cx[1] = x[1]^2 / 100 + x[2]^2 / 4 return cx end x0 = zeros(T, 2) - lcon = zeros(T, 1) - ucon = zeros(T, 1) + lcon = ucon = T[1] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs320"; kwargs...) end diff --git a/src/ADNLPProblems/hs321.jl b/src/ADNLPProblems/hs321.jl index e21c6aec..e363f74f 100644 --- a/src/ADNLPProblems/hs321.jl +++ b/src/ADNLPProblems/hs321.jl @@ -5,11 +5,10 @@ function hs321(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher return (x[1] - 20)^2 + (x[2] + 20)^2 end function c!(cx, x) - cx[1] = x[1]^2 / 100 + x[2]^2 - 1 + cx[1] = x[1]^2 / 100 + x[2]^2 return cx end x0 = zeros(T, 2) - lcon = zeros(T, 1) - ucon = zeros(T, 1) + lcon = ucon = T[1] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs321"; kwargs...) end diff --git a/src/ADNLPProblems/hs322.jl b/src/ADNLPProblems/hs322.jl index e80ee579..b6bca2c6 100644 --- a/src/ADNLPProblems/hs322.jl +++ b/src/ADNLPProblems/hs322.jl @@ -5,11 +5,10 @@ function hs322(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher return (x[1] - 20)^2 + (x[2] + 20)^2 end function c!(cx, x) - cx[1] = x[1]^2 / 100 + x[2]^2 * 100 - 1 + cx[1] = x[1]^2 / 100 + x[2]^2 * 100 return cx end x0 = zeros(T, 2) - lcon = zeros(T, 1) - ucon = zeros(T, 1) + lcon = ucon = T[1] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs322"; kwargs...) end diff --git a/src/ADNLPProblems/hs33.jl b/src/ADNLPProblems/hs33.jl index 37ba2b98..b7fd5901 100644 --- a/src/ADNLPProblems/hs33.jl +++ b/src/ADNLPProblems/hs33.jl @@ -9,11 +9,11 @@ function hs33(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where uvar = T[Inf, Inf, 5] lvar = zeros(T, 3) function c!(cx, x) - cx[1] = -x[3]^2 + x[2]^2 + x[1]^2 - cx[2] = x[1]^2 + x[2]^2 + x[3]^2 - 4 + cx[2] = -x[3]^2 + x[2]^2 + x[1]^2 + cx[1] = x[1]^2 + x[2]^2 + x[3]^2 return cx end - lcon = [-T(Inf), zero(T)] - ucon = [zero(T), T(Inf)] + lcon = [4, -T(Inf)] + ucon = [T(Inf), 0] return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs33"; kwargs...) end diff --git a/src/ADNLPProblems/hs36.jl b/src/ADNLPProblems/hs36.jl index 070e4ffb..e31d5599 100644 --- a/src/ADNLPProblems/hs36.jl +++ b/src/ADNLPProblems/hs36.jl @@ -8,9 +8,9 @@ function hs36(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where x0 = 10 * ones(T, 3) lvar = zeros(T, 3) uvar = T[20, 11, 42] - function c(x) - n = length(x) - return [x[1] + 2 * x[2] + 2 * x[3]] + function c!(cx, x) + cx[1] = x[1] + 2 * x[2] + 2 * x[3] + return cx end lcon = [-T(Inf)] ucon = T[72] diff --git a/src/ADNLPProblems/hs37.jl b/src/ADNLPProblems/hs37.jl index 3b0c32fd..8adcf9e5 100644 --- a/src/ADNLPProblems/hs37.jl +++ b/src/ADNLPProblems/hs37.jl @@ -8,9 +8,9 @@ function hs37(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where x0 = 10 * ones(T, 3) lvar = zeros(T, 3) uvar = 42 * ones(T, 3) - function c(x) - n = length(x) - return [x[1] + 2 * x[2] + 2 * x[3]] + function c!(cx, x) + cx[1] = x[1] + 2 * x[2] + 2 * x[3] + return cx end lcon = zeros(T, 1) ucon = T[72] diff --git a/src/ADNLPProblems/hs378.jl b/src/ADNLPProblems/hs378.jl index 99d69d39..9f6d2013 100644 --- a/src/ADNLPProblems/hs378.jl +++ b/src/ADNLPProblems/hs378.jl @@ -23,7 +23,6 @@ function hs378(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) wher return cx end x0 = T(-2.3) * ones(T, 10) - lcon = zeros(T, 3) - ucon = zeros(T, 3) + lcon = ucon = zeros(T, 3) return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs378"; kwargs...) end diff --git a/src/ADNLPProblems/hs39.jl b/src/ADNLPProblems/hs39.jl index f58c68e6..e9217fbb 100644 --- a/src/ADNLPProblems/hs39.jl +++ b/src/ADNLPProblems/hs39.jl @@ -2,8 +2,8 @@ export hs39 function hs39(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} function c!(cx, x) - cx[1] = x[2] - x[1]^3 - x[3]^2 - cx[2] = x[1]^2 - x[2] - x[4]^2 + cx[2] = x[2] - x[1]^3 - x[3]^2 + cx[1] = x[1]^2 - x[2] - x[4]^2 return cx end return ADNLPModels.ADNLPModel!( diff --git a/src/ADNLPProblems/hs40.jl b/src/ADNLPProblems/hs40.jl index de6a14a5..a34239b0 100644 --- a/src/ADNLPProblems/hs40.jl +++ b/src/ADNLPProblems/hs40.jl @@ -2,9 +2,9 @@ export hs40 function hs40(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} function c!(cx, x) - cx[1] = x[1]^3 + x[2]^2 - 1 - cx[2] = x[4] * x[1]^2 - x[3] - cx[3] = x[4]^2 - x[2] + cx[2] = x[1]^3 + x[2]^2 - 1 + cx[3] = x[4] * x[1]^2 - x[3] + cx[1] = x[4]^2 - x[2] return cx end return ADNLPModels.ADNLPModel!( diff --git a/src/ADNLPProblems/hs42.jl b/src/ADNLPProblems/hs42.jl index aafa444b..2191e275 100644 --- a/src/ADNLPProblems/hs42.jl +++ b/src/ADNLPProblems/hs42.jl @@ -7,7 +7,7 @@ end function hs42(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} function c!(cx, x) - cx[1] = x[3]^2 + x[4]^2 - 2 + cx[1] = x[3]^2 + x[4]^2 return cx end return ADNLPModels.ADNLPModel!( @@ -18,8 +18,8 @@ function hs42(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwarg [1], T[1], c!, - T[2, 0], - T[2, 0], + T[2, 2], + T[2, 2], name = "hs42"; kwargs..., ) diff --git a/src/ADNLPProblems/hs43.jl b/src/ADNLPProblems/hs43.jl index d3dfdbce..193db340 100644 --- a/src/ADNLPProblems/hs43.jl +++ b/src/ADNLPProblems/hs43.jl @@ -7,12 +7,12 @@ function hs43(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where end x0 = zeros(T, 4) function c!(cx, x) - cx[1] = -8 + x[1]^2 + x[2]^2 + x[3]^2 + x[4]^2 + x[1] - x[2] + x[3] - x[4] - cx[2] = -10 + x[1]^2 + 2 * x[2]^2 + x[3]^2 + 2 * x[4]^2 - x[1] - x[4] - cx[3] = -5 + 2 * x[1]^2 + x[2]^2 + x[3]^2 + 2 * x[1] - x[2] - x[4] + cx[1] = x[1]^2 + x[2]^2 + x[3]^2 + x[4]^2 + x[1] - x[2] + x[3] - x[4] + cx[2] = x[1]^2 + 2 * x[2]^2 + x[3]^2 + 2 * x[4]^2 - x[1] - x[4] + cx[3] = 2 * x[1]^2 + x[2]^2 + x[3]^2 + 2 * x[1] - x[2] - x[4] return cx end lcon = -T(Inf) * ones(T, 3) - ucon = zeros(T, 3) + ucon = T[8; 10; 5] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs43"; kwargs...) end diff --git a/src/ADNLPProblems/hs47.jl b/src/ADNLPProblems/hs47.jl index 688edb4d..62ccda1b 100644 --- a/src/ADNLPProblems/hs47.jl +++ b/src/ADNLPProblems/hs47.jl @@ -1,18 +1,18 @@ export hs47 function hs47(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - function c!(cx, x) - cx[1] = x[1] + x[2]^2 + x[3]^3 - 3 - cx[2] = x[2] - x[3]^2 + x[4] - 1 - cx[3] = x[1] * x[5] - 1 + function c!(cx, x) + cx[3] = x[1] + x[2]^2 + x[3]^3 - 3 + cx[1] = x[2] - x[3]^2 + x[4] + cx[2] = x[1] * x[5] return cx end return ADNLPModels.ADNLPModel!( x -> (x[1] - x[2])^2 + (x[2] - x[3])^3 + (x[3] - x[4])^4 + (x[4] - x[5])^4, T[2, sqrt(2), -1, 2 - sqrt(2), 0.5], c!, - zeros(T, 3), - zeros(T, 3), + T[1; 1; 0], + T[1; 1; 0], name = "hs47"; kwargs..., ) diff --git a/src/ADNLPProblems/hs57.jl b/src/ADNLPProblems/hs57.jl index 061ddc0f..b45569e1 100644 --- a/src/ADNLPProblems/hs57.jl +++ b/src/ADNLPProblems/hs57.jl @@ -65,10 +65,10 @@ function hs57(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwarg lvar = T[0.4, -4] uvar = T(Inf) * ones(T, 2) function c!(cx, x) - cx[1] = 49 // 100 * x[2] - x[1] * x[2] - 9 // 100 + cx[1] = 49 // 100 * x[2] - x[1] * x[2] return cx end - lcon = zeros(T, 1) + lcon = zeros(T, 1) .+ 9 // 100 ucon = T(Inf) * ones(T, 1) return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs57"; kwargs...) end diff --git a/src/ADNLPProblems/hs59.jl b/src/ADNLPProblems/hs59.jl index dfcac7cc..5621e83d 100644 --- a/src/ADNLPProblems/hs59.jl +++ b/src/ADNLPProblems/hs59.jl @@ -21,12 +21,12 @@ function hs59(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where lvar = zeros(T, 2) uvar = T[75, 65] function c!(cx, x) - cx[1] = x[1] * x[2] - 700 + cx[1] = x[1] * x[2] cx[2] = x[2] - (x[1]^2) / 125 - cx[3] = (x[2] - 50)^2 - 5 * (x[1] - 55) + cx[3] = x[2]^2 - 100 * x[2] - 5 * x[1] return cx end - lcon = zeros(T, 3) + lcon = T[700; 0; -2775] ucon = T(Inf) * ones(T, 3) return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs59"; kwargs...) end diff --git a/src/ADNLPProblems/hs60.jl b/src/ADNLPProblems/hs60.jl index 7a0237c1..db2a95ee 100644 --- a/src/ADNLPProblems/hs60.jl +++ b/src/ADNLPProblems/hs60.jl @@ -12,7 +12,6 @@ function hs60(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where cx[1] = x[1] * (1 + x[2]^2) + x[3]^4 - 4 - 3 * sqrt(2) return cx end - lcon = zeros(T, 1) - ucon = zeros(T, 1) + lcon = ucon = T[0] return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs60"; kwargs...) end diff --git a/src/ADNLPProblems/hs61.jl b/src/ADNLPProblems/hs61.jl index c2f9a2ad..83b1654a 100644 --- a/src/ADNLPProblems/hs61.jl +++ b/src/ADNLPProblems/hs61.jl @@ -11,7 +11,6 @@ function hs61(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where cx[2] = 4 * x[1] - x[3]^2 - 11 return cx end - lcon = zeros(T, 2) - ucon = zeros(T, 2) + lcon = ucon = T[0; 0] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs61"; kwargs...) end diff --git a/src/ADNLPProblems/hs63.jl b/src/ADNLPProblems/hs63.jl index 1a558510..2998f13b 100644 --- a/src/ADNLPProblems/hs63.jl +++ b/src/ADNLPProblems/hs63.jl @@ -2,7 +2,7 @@ export hs63 function hs63(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} function c!(cx, x) - cx[1] = x[1]^2 + x[2]^2 + x[3]^2 - 25 + cx[1] = x[1]^2 + x[2]^2 + x[3]^2 return cx end return ADNLPModels.ADNLPModel!( @@ -14,8 +14,8 @@ function hs63(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where [1; 2; 3], T[8; 14; 7], c!, - T[56, 0], - T[56, 0], + T[56, 25], + T[56, 25], name = "hs63"; kwargs..., ) diff --git a/src/ADNLPProblems/hs64.jl b/src/ADNLPProblems/hs64.jl index b6fc4860..2c857189 100644 --- a/src/ADNLPProblems/hs64.jl +++ b/src/ADNLPProblems/hs64.jl @@ -9,10 +9,10 @@ function hs64(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where lvar = T(1e-5) * ones(T, 3) uvar = T(Inf) * ones(T, 3) function c!(cx, x) - cx[1] = -1 + 4 / x[1] + 32 / x[2] + 120 / x[3] + cx[1] = 4 / x[1] + 32 / x[2] + 120 / x[3] - 1 return cx end lcon = T[-Inf] - ucon = zeros(T, 1) + ucon = T[0] return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs64"; kwargs...) end diff --git a/src/ADNLPProblems/hs65.jl b/src/ADNLPProblems/hs65.jl index f30aa813..58e1b5b8 100644 --- a/src/ADNLPProblems/hs65.jl +++ b/src/ADNLPProblems/hs65.jl @@ -9,10 +9,10 @@ function hs65(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where lvar = T[-4.5, -4.5, -5] uvar = T[4.5, 4.5, 5] function c!(cx, x) - cx[1] = -48 + x[1]^2 + x[2]^2 + x[3]^2 + cx[1] = x[1]^2 + x[2]^2 + x[3]^2 return cx end lcon = T[-Inf] - ucon = zeros(T, 1) + ucon = T[48] return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs65"; kwargs...) end diff --git a/src/ADNLPProblems/hs71.jl b/src/ADNLPProblems/hs71.jl index 7becd654..9660a7dd 100644 --- a/src/ADNLPProblems/hs71.jl +++ b/src/ADNLPProblems/hs71.jl @@ -9,11 +9,11 @@ function hs71(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where lvar = ones(T, 4) uvar = 5 * ones(T, 4) function c!(cx, x) - cx[1] = x[1] * x[2] * x[3] * x[4] - 25 - cx[2] = x[1]^2 + x[2]^2 + x[3]^2 + x[4]^2 - 40 + cx[2] = x[1] * x[2] * x[3] * x[4] - 25 + cx[1] = x[1]^2 + x[2]^2 + x[3]^2 + x[4]^2 return cx end - lcon = zeros(T, 2) - ucon = [T(Inf), zero(T)] + lcon = T[40; 0] + ucon = [40, T(Inf)] return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs71"; kwargs...) end diff --git a/src/ADNLPProblems/hs72.jl b/src/ADNLPProblems/hs72.jl index 71a9e5d4..9bd48a24 100644 --- a/src/ADNLPProblems/hs72.jl +++ b/src/ADNLPProblems/hs72.jl @@ -9,8 +9,8 @@ function hs72(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where lvar = T(0.001) * ones(T, 4) uvar = T[(5 - i) * 1e5 for i = 1:4] function c!(cx, x) - cx[1] = -0.0401 + 4 / x[1] + 2.25 / x[2] + 1 / x[3] + 0.25 / x[4] - cx[2] = -0.010085 + 0.16 / x[1] + 0.36 / x[2] + 0.64 / x[3] + 0.64 / x[4] + cx[1] = + 4 / x[1] + 2.25 / x[2] + 1 / x[3] + 0.25 / x[4] - 0.0401 + cx[2] = + 0.16 / x[1] + 0.36 / x[2] + 0.64 / x[3] + 0.64 / x[4] - 0.010085 return cx end lcon = -T(Inf) * ones(T, 2) diff --git a/src/ADNLPProblems/hs78.jl b/src/ADNLPProblems/hs78.jl index de755fac..56edb945 100644 --- a/src/ADNLPProblems/hs78.jl +++ b/src/ADNLPProblems/hs78.jl @@ -7,12 +7,11 @@ function hs78(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where end x0 = T[-2, 1.5, 2, -1, -1] function c!(cx, x) - cx[1] = sum(x[i]^2 for i = 1:5) - 10 + cx[1] = sum(x[i]^2 for i = 1:5) cx[2] = x[2] * x[3] - 5 * x[4] * x[5] cx[3] = x[1]^3 + x[2]^3 + 1 return cx end - lcon = zeros(T, 3) - ucon = zeros(T, 3) + lcon = ucon = T[10, 0, 0] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "hs78"; kwargs...) end diff --git a/src/ADNLPProblems/hs79.jl b/src/ADNLPProblems/hs79.jl index d8b4c79d..47210d87 100644 --- a/src/ADNLPProblems/hs79.jl +++ b/src/ADNLPProblems/hs79.jl @@ -2,18 +2,17 @@ export hs79 function hs79(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} function c!(cx, x) - Ti = eltype(x) - cx[1] = x[1] + x[2]^2 + x[3]^3 - 2 - 3 * Ti(sqrt(2)) - cx[2] = x[2] - x[3]^2 + x[4] + 2 - 2 * Ti(sqrt(2)) - cx[3] = x[1] * x[5] - 2 + cx[3] = x[1] + x[2]^2 + x[3]^3 - (2 + 3 * T(sqrt(2))) + cx[1] = x[2] - x[3]^2 + x[4] + cx[2] = x[1] * x[5] return cx end return ADNLPModels.ADNLPModel!( x -> (x[1] - 1)^2 + (x[1] - x[2])^2 + (x[2] - x[3])^2 + (x[3] - x[4])^4 + (x[4] - x[5])^4, 2 * ones(T, 5), c!, - zeros(T, 3), - zeros(T, 3), + T[-2 + 2 * T(sqrt(2)), 2, 0], + T[-2 + 2 * T(sqrt(2)), 2, 0], name = "hs79"; kwargs..., ) diff --git a/src/ADNLPProblems/hs8.jl b/src/ADNLPProblems/hs8.jl index 1f6c4522..1320e4ae 100644 --- a/src/ADNLPProblems/hs8.jl +++ b/src/ADNLPProblems/hs8.jl @@ -2,16 +2,16 @@ export hs8 function hs8(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} function c!(cx, x) - cx[1] = x[1]^2 + x[2]^2 - 25 - cx[2] = x[1] * x[2] - 9 + cx[1] = x[1]^2 + x[2]^2 + cx[2] = x[1] * x[2] return cx end return ADNLPModels.ADNLPModel!( x -> -one(eltype(x)), T[2.0; 1.0], c!, - zeros(T, 2), - zeros(T, 2), + T[25; 9], + T[25; 9], name = "hs8"; kwargs..., ) diff --git a/src/ADNLPProblems/hs80.jl b/src/ADNLPProblems/hs80.jl index cb549e37..aed07d67 100644 --- a/src/ADNLPProblems/hs80.jl +++ b/src/ADNLPProblems/hs80.jl @@ -9,12 +9,12 @@ function hs80(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where lvar = T[-2.3, -2.3, -3.2, -3.2, -3.2] uvar = T[2.3, 2.3, 3.2, 3.2, 3.2] function c!(cx, x) - cx[1] = sum(x[i]^2 for i = 1:5) - 10 + cx[1] = sum(x[i]^2 for i = 1:5) cx[2] = x[2] * x[3] - 5 * x[4] * x[5] cx[3] = x[1]^3 + x[2]^3 + 1 return cx end - lcon = zeros(T, 3) - ucon = zeros(T, 3) + lcon = T[10, 0, 0] + ucon = T[10, 0, 0] return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs80"; kwargs...) end diff --git a/src/ADNLPProblems/hs81.jl b/src/ADNLPProblems/hs81.jl index 4526c782..e6326247 100644 --- a/src/ADNLPProblems/hs81.jl +++ b/src/ADNLPProblems/hs81.jl @@ -9,12 +9,12 @@ function hs81(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where lvar = T[-2.3, -2.3, -3.2, -3.2, -3.2] uvar = T[2.3, 2.3, 3.2, 3.2, 3.2] function c!(cx, x) - cx[1] = sum(x[i]^2 for i = 1:5) - 10 + cx[1] = sum(x[i]^2 for i = 1:5) cx[2] = x[2] * x[3] - 5 * x[4] * x[5] cx[3] = x[1]^3 + x[2]^3 + 1 return cx end - lcon = zeros(T, 3) - ucon = zeros(T, 3) + lcon = T[10, 0, 0] + ucon = T[10, 0, 0] return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs81"; kwargs...) end diff --git a/src/ADNLPProblems/hs83.jl b/src/ADNLPProblems/hs83.jl index 41fc1c3a..3b6e8f52 100644 --- a/src/ADNLPProblems/hs83.jl +++ b/src/ADNLPProblems/hs83.jl @@ -26,12 +26,12 @@ function hs83(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where ] function c!(cx, x, a = a) - cx[1] = a[1] + a[2] * x[2] * x[5] + a[3] * x[1] * x[4] - a[4] * x[3] * x[5] - cx[2] = a[5] + a[6] * x[2] * x[5] + a[7] * x[1] * x[2] - a[8] * x[3] * x[3] - 90 - cx[3] = a[9] + a[10] * x[3] * x[5] + a[11] * x[1] * x[3] - a[12] * x[3] * x[4] - 20 + cx[1] = a[2] * x[2] * x[5] + a[3] * x[1] * x[4] - a[4] * x[3] * x[5] + cx[2] = a[6] * x[2] * x[5] + a[7] * x[1] * x[2] - a[8] * x[3] * x[3] + cx[3] = a[10] * x[3] * x[5] + a[11] * x[1] * x[3] - a[12] * x[3] * x[4] return cx end - lcon = zeros(T, 3) - ucon = T[92, 20, 5] + lcon = T[-a[1], 90 - a[5], 20 - a[9]] + ucon = T[92 - a[1], 110 - a[5], 25 - a[9]] return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs83"; kwargs...) end diff --git a/src/ADNLPProblems/hs95.jl b/src/ADNLPProblems/hs95.jl index 9461a106..e38ef216 100644 --- a/src/ADNLPProblems/hs95.jl +++ b/src/ADNLPProblems/hs95.jl @@ -17,17 +17,16 @@ function hs95(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where cx[1] = 17.1 * x[1] + 38.2 * x[2] + 204.2 * x[3] + 212.3 * x[4] + 623.4 * x[5] + 1495.5 * x[6] - 169 * x[1] * x[3] - 3580 * x[3] * x[5] - 3810 * x[4] * x[5] - 18500 * x[4] * x[6] - - 24300 * x[5] * x[6] - 4.97 + 24300 * x[5] * x[6] cx[2] = 17.9 * x[1] + 36.8 * x[2] + 113.9 * x[3] + 169.7 * x[4] + 337.8 * x[5] + 1385.2 * x[6] - - 139 * x[1] * x[3] - 2450 * x[4] * x[5] - 16600 * x[4] * x[6] - 17200 * x[5] * x[6] + 1.88 - cx[3] = -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] + 29.08 + 139 * x[1] * x[3] - 2450 * x[4] * x[5] - 16600 * x[4] * x[6] - 17200 * x[5] * x[6] + cx[3] = -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] cx[4] = - 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] + - 78.02 + 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] return cx end - lcon = zeros(T, 4) + lcon = T[4.97, -1.88, -29.08, -78.02] ucon = T(Inf) * ones(T, 4) return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs95"; kwargs...) end diff --git a/src/ADNLPProblems/hs96.jl b/src/ADNLPProblems/hs96.jl index 9160b494..f73a1010 100644 --- a/src/ADNLPProblems/hs96.jl +++ b/src/ADNLPProblems/hs96.jl @@ -17,17 +17,16 @@ function hs96(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where cx[1] = 17.1 * x[1] + 38.2 * x[2] + 204.2 * x[3] + 212.3 * x[4] + 623.4 * x[5] + 1495.5 * x[6] - 169 * x[1] * x[3] - 3580 * x[3] * x[5] - 3810 * x[4] * x[5] - 18500 * x[4] * x[6] - - 24300 * x[5] * x[6] - 4.97 + 24300 * x[5] * x[6] cx[2] = 17.9 * x[1] + 36.8 * x[2] + 113.9 * x[3] + 169.7 * x[4] + 337.8 * x[5] + 1385.2 * x[6] - - 139 * x[1] * x[3] - 2450 * x[4] * x[5] - 16600 * x[4] * x[6] - 17200 * x[5] * x[6] + 1.88 - cx[3] = -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] + 69.08 + 139 * x[1] * x[3] - 2450 * x[4] * x[5] - 16600 * x[4] * x[6] - 17200 * x[5] * x[6] + cx[3] = -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] cx[4] = - 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] + - 118.02 + 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] return cx end - lcon = zeros(T, 4) + lcon = T[4.97, -1.88, -69.08, -118.02] ucon = T(Inf) * ones(T, 4) return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs96"; kwargs...) end diff --git a/src/ADNLPProblems/hs97.jl b/src/ADNLPProblems/hs97.jl index b80067cb..1aba173f 100644 --- a/src/ADNLPProblems/hs97.jl +++ b/src/ADNLPProblems/hs97.jl @@ -17,17 +17,16 @@ function hs97(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where cx[1] = 17.1 * x[1] + 38.2 * x[2] + 204.2 * x[3] + 212.3 * x[4] + 623.4 * x[5] + 1495.5 * x[6] - 169 * x[1] * x[3] - 3580 * x[3] * x[5] - 3810 * x[4] * x[5] - 18500 * x[4] * x[6] - - 24300 * x[5] * x[6] - 32.97 + 24300 * x[5] * x[6] cx[2] = 17.9 * x[1] + 36.8 * x[2] + 113.9 * x[3] + 169.7 * x[4] + 337.8 * x[5] + 1385.2 * x[6] - - 139 * x[1] * x[3] - 2450 * x[4] * x[5] - 16600 * x[4] * x[6] - 17200 * x[5] * x[6] - 25.12 - cx[3] = -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] + 29.08 + 139 * x[1] * x[3] - 2450 * x[4] * x[5] - 16600 * x[4] * x[6] - 17200 * x[5] * x[6] + cx[3] = -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] cx[4] = - 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] + - 78.02 + 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] return cx end - lcon = zeros(T, 4) + lcon = T[32.97, 25.12, -29.08, -78.02] ucon = T(Inf) * ones(T, 4) return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs97"; kwargs...) end diff --git a/src/ADNLPProblems/hs98.jl b/src/ADNLPProblems/hs98.jl index eab3aea4..7f5d90fb 100644 --- a/src/ADNLPProblems/hs98.jl +++ b/src/ADNLPProblems/hs98.jl @@ -17,17 +17,16 @@ function hs98(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where cx[1] = 17.1 * x[1] + 38.2 * x[2] + 204.2 * x[3] + 212.3 * x[4] + 623.4 * x[5] + 1495.5 * x[6] - 169 * x[1] * x[3] - 3580 * x[3] * x[5] - 3810 * x[4] * x[5] - 18500 * x[4] * x[6] - - 24300 * x[5] * x[6] - 32.97 + 24300 * x[5] * x[6] cx[2] = 17.9 * x[1] + 36.8 * x[2] + 113.9 * x[3] + 169.7 * x[4] + 337.8 * x[5] + 1385.2 * x[6] - - 139 * x[1] * x[3] - 2450 * x[4] * x[5] - 16600 * x[4] * x[6] - 17200 * x[5] * x[6] - 25.12 - cx[3] = -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] + 124.08 + 139 * x[1] * x[3] - 2450 * x[4] * x[5] - 16600 * x[4] * x[6] - 17200 * x[5] * x[6] + cx[3] = -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] cx[4] = - 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] + - 173.02 + 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] return cx end - lcon = zeros(T, 4) + lcon = T[32.97, 25.12, -124.08, -173.02] ucon = T(Inf) * ones(T, 4) return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "hs98"; kwargs...) end diff --git a/src/ADNLPProblems/marine.jl b/src/ADNLPProblems/marine.jl index 13ca70a8..b3b90c7a 100644 --- a/src/ADNLPProblems/marine.jl +++ b/src/ADNLPProblems/marine.jl @@ -139,11 +139,21 @@ function marine(; n::Int = default_nvar, nc::Int = 1, type::Type{T} = Float64, k ncon = (nh - 1) * ne + nh * nc + nh * nc * (ne - 2) + nh * nc lcon = ucon = zeros(T, ncon) - function c!(cx, x; ne = ne, nh = nh, nc = nc, h = h, fact = fact) + + A = zeros(T, (nh - 1) * ne, length(x0)) + ngm = 2 * ne - 1 + ngmv = ngm + nh * ne + for i = 1:(nh - 1), s = 1:ne + A[i + (s - 1) * (nh - 1), ngm + i + (s - 1) * nh] = 1 + A[i + (s - 1) * (nh - 1), ngm + i + (s - 1) * nh + 1] = -1 + for j = 1:nc + A[i + (s - 1) * (nh - 1), ngmv + i + (s - 1) * nh + (j - 1) * nc] = h / fact[j + 1] + end + end + + function c!(cx, x; ne::Int = ne, nh::Int = nh, nc::Int = nc, h::Rational{Int} = h, fact::Vector{Int} = fact, ngm::Int = ngm, ngmv::Int = ngmv) g = view(x, 1:(ne - 1)) - ngm = 2 * ne - 1 m = view(x, ne:ngm) - ngmv = ngm + nh * ne ngmw = ngmv + nh * nc * ne v = reshape_array(view(x, (ngm + 1):(ngmv)), (nh, ne)) w = reshape_array(view(x, (ngmv + 1):(ngmw)), (nh, nc, ne)) @@ -152,15 +162,14 @@ function marine(; n::Int = default_nvar, nc::Int = 1, type::Type{T} = Float64, k Duc = reshape_array(view(x, (nuc + 1):(nuc + nh * nc * ne)), (nh, nc, ne)) # continuity - for i = 1:(nh - 1), s = 1:ne - cx[i + (s - 1) * (nh - 1)] = - v[i, s] + h * sum(w[i, j, s] / fact[j + 1] for j = 1:nc) - v[i + 1, s] - end - ncx = (nh - 1) * ne + #for i = 1:(nh - 1), s = 1:ne + # cx[i + (s - 1) * (nh - 1)] = + # v[i, s] + h * sum(w[i, j, s] / fact[j + 1] for j = 1:nc) - v[i + 1, s] + #end for i = 1:nh, j = 1:nc - cx[ncx + i + (j - 1) * nh] = Duc[i, j, 1] + (m[1] + g[1]) * uc[i, j, 1] + cx[i + (j - 1) * nh] = Duc[i, j, 1] + (m[1] + g[1]) * uc[i, j, 1] end - ncx += nc * nh + ncx = nc * nh for i = 1:nh, j = 1:nc, s = 2:(ne - 1) cx[ncx + i + (j - 1) * nh + (s - 2) * nh * nc] = Duc[i, j, s] - g[s - 1] * uc[i, j, s - 1] + (m[s] + g[s]) * uc[i, j, s] @@ -173,5 +182,5 @@ function marine(; n::Int = default_nvar, nc::Int = 1, type::Type{T} = Float64, k return cx end - return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "marine"; kwargs...) + return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, findnz(sparse(A))..., c!, lcon, ucon, name = "marine"; kwargs...) end diff --git a/src/ADNLPProblems/polygon3.jl b/src/ADNLPProblems/polygon3.jl index 8cea88d2..3207b6d2 100644 --- a/src/ADNLPProblems/polygon3.jl +++ b/src/ADNLPProblems/polygon3.jl @@ -10,12 +10,12 @@ function polygon3(args...; n::Int = default_nvar, type::Type{T} = Float64, kwarg function c!(cx, y; N = N) @views x, y = y[1:N], y[(N + 1):end] for i = 1:N - cx[i] = x[i]^2 + y[i]^2 - 1 + cx[N + i] = x[i]^2 + y[i]^2 end for i = 1:(N - 1) - cx[N + i] = x[i] * y[i + 1] - y[i] * x[i + 1] + cx[i] = x[i] * y[i + 1] - y[i] * x[i + 1] end - cx[2 * N] = x[N] * y[1] - y[N] * x[1] + cx[N] = x[N] * y[1] - y[N] * x[1] return cx end xi = zeros(T, 2 * N) @@ -23,8 +23,8 @@ function polygon3(args...; n::Int = default_nvar, type::Type{T} = Float64, kwarg f, xi, c!, - vcat(-T(Inf) * ones(T, N), zeros(T, N)), - vcat(zeros(T, N), T(Inf) * ones(T, N)), + vcat(zeros(T, N), -T(Inf) * ones(T, N)), + vcat(T(Inf) * ones(T, N), ones(T, N)), name = "polygon3"; kwargs..., ) diff --git a/src/ADNLPProblems/robotarm.jl b/src/ADNLPProblems/robotarm.jl index 7a81742e..55fc063c 100644 --- a/src/ADNLPProblems/robotarm.jl +++ b/src/ADNLPProblems/robotarm.jl @@ -23,37 +23,41 @@ function robotarm(; n::Int = default_nvar, L = 4.5, type::Type{T} = Float64, kwa x[end] end + A = zeros(T, n, 9n + 1) + for i = 1:n + A[i, 6n + i] = L + end + # constraints function function c!(cx, x; L = L, n = n) # dynamic bounds on ρ_acc, θ_acc, ϕ_acc for i = 1:n - cx[i] = L * x[6n + i] - cx[n + i] = x[7n + i] * ((L - x[i])^3 + x[i]^3) / 3 * sin(x[2n + i])^2 - cx[2 * n + i] = x[8n + i] * ((L - x[i])^3 + x[i]^3) / 3 + cx[6 * n - 6 + i] = x[7n + i] * ((L - x[i])^3 + x[i]^3) / 3 * sin(x[2n + i])^2 + cx[7 * n - 6 + i] = x[8n + i] * ((L - x[i])^3 + x[i]^3) / 3 end for i = 1:(n - 1) - cx[3 * n + i] = x[1 + i] - x[i] - x[3n + i] * x[end] / n - cx[4 * n - 1 + i] = x[n + 1 + i] - x[n + i] - x[4n + i] * x[end] / n - cx[5 * n - 2 + i] = x[2n + 1 + i] - x[2n + i] - x[5n + i] * x[end] / n - cx[6 * n - 3 + i] = x[3n + 1 + i] - x[3n + i] - x[6n + i] * x[end] / n - cx[7 * n - 4 + i] = x[4n + 1 + i] - x[4n + i] - x[7n + i] * x[end] / n - cx[8 * n - 5 + i] = x[5n + 1 + i] - x[5n + i] - x[8n + i] * x[end] / n + cx[i] = x[1 + i] - x[i] - x[3n + i] * x[end] / n + cx[n - 1 + i] = x[n + 1 + i] - x[n + i] - x[4n + i] * x[end] / n + cx[2 * n - 2 + i] = x[2n + 1 + i] - x[2n + i] - x[5n + i] * x[end] / n + cx[3 * n - 3 + i] = x[3n + 1 + i] - x[3n + i] - x[6n + i] * x[end] / n + cx[4 * n - 4 + i] = x[4n + 1 + i] - x[4n + i] - x[7n + i] * x[end] / n + cx[5 * n - 5 + i] = x[5n + 1 + i] - x[5n + i] - x[8n + i] * x[end] / n end return cx end lcon = T[ -ones(n) + zeros(6N) -ones(n) -ones(n) - zeros(6N) ] ucon = T[ ones(n) + zeros(6N) ones(n) ones(n) - zeros(6N) ] # Building a feasible x0 @@ -91,5 +95,5 @@ function robotarm(; n::Int = default_nvar, L = 4.5, type::Type{T} = Float64, kwa uvar[8n] = lvar[8n + 1] = uvar[8n + 1] = lvar[9n] = uvar[9n] = T(0) - return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, c!, lcon, ucon, name = "robotarm"; kwargs...) + return ADNLPModels.ADNLPModel!(f, x0, lvar, uvar, findnz(sparse(A))..., c!, lcon, ucon, name = "robotarm"; kwargs...) end diff --git a/src/Meta/allinit.jl b/src/Meta/allinit.jl index 6e1d0bd9..9ded5f27 100644 --- a/src/Meta/allinit.jl +++ b/src/Meta/allinit.jl @@ -10,7 +10,7 @@ allinit_meta = Dict( :has_bounds => false, :has_fixed_variables => false, :objtype => :other, - :contype => :general, + :contype => :linear, :best_known_lower_bound => -Inf, :best_known_upper_bound => Inf, :is_feasible => missing, @@ -19,7 +19,7 @@ allinit_meta = Dict( ) get_allinit_nvar(; n::Integer = default_nvar, kwargs...) = 4 get_allinit_ncon(; n::Integer = default_nvar, kwargs...) = 3 -get_allinit_nlin(; n::Integer = default_nvar, kwargs...) = 0 -get_allinit_nnln(; n::Integer = default_nvar, kwargs...) = 3 +get_allinit_nlin(; n::Integer = default_nvar, kwargs...) = 3 +get_allinit_nnln(; n::Integer = default_nvar, kwargs...) = 0 get_allinit_nequ(; n::Integer = default_nvar, kwargs...) = 1 get_allinit_nineq(; n::Integer = default_nvar, kwargs...) = 2 diff --git a/src/Meta/allinitc.jl b/src/Meta/allinitc.jl index 1d27f8cf..9681c796 100644 --- a/src/Meta/allinitc.jl +++ b/src/Meta/allinitc.jl @@ -19,7 +19,7 @@ allinitc_meta = Dict( ) get_allinitc_nvar(; n::Integer = default_nvar, kwargs...) = 4 get_allinitc_ncon(; n::Integer = default_nvar, kwargs...) = 4 -get_allinitc_nlin(; n::Integer = default_nvar, kwargs...) = 0 -get_allinitc_nnln(; n::Integer = default_nvar, kwargs...) = 4 +get_allinitc_nlin(; n::Integer = default_nvar, kwargs...) = 3 +get_allinitc_nnln(; n::Integer = default_nvar, kwargs...) = 1 get_allinitc_nequ(; n::Integer = default_nvar, kwargs...) = 1 get_allinitc_nineq(; n::Integer = default_nvar, kwargs...) = 3 diff --git a/src/Meta/alsotame.jl b/src/Meta/alsotame.jl index c4d76c77..89100104 100644 --- a/src/Meta/alsotame.jl +++ b/src/Meta/alsotame.jl @@ -19,7 +19,7 @@ alsotame_meta = Dict( ) get_alsotame_nvar(; n::Integer = default_nvar, kwargs...) = 2 get_alsotame_ncon(; n::Integer = default_nvar, kwargs...) = 3 -get_alsotame_nlin(; n::Integer = default_nvar, kwargs...) = 0 -get_alsotame_nnln(; n::Integer = default_nvar, kwargs...) = 3 +get_alsotame_nlin(; n::Integer = default_nvar, kwargs...) = 2 +get_alsotame_nnln(; n::Integer = default_nvar, kwargs...) = 1 get_alsotame_nequ(; n::Integer = default_nvar, kwargs...) = 1 get_alsotame_nineq(; n::Integer = default_nvar, kwargs...) = 2 diff --git a/src/Meta/avion2.jl b/src/Meta/avion2.jl index 36cd8e70..a07cfe57 100644 --- a/src/Meta/avion2.jl +++ b/src/Meta/avion2.jl @@ -10,7 +10,7 @@ avion2_meta = Dict( :has_bounds => true, :has_fixed_variables => false, :objtype => :least_squares, - :contype => :general, + :contype => :linear, :best_known_lower_bound => -Inf, :best_known_upper_bound => Inf, :is_feasible => missing, @@ -19,8 +19,8 @@ avion2_meta = Dict( ) get_avion2_nvar(; n::Integer = default_nvar, kwargs...) = 49 get_avion2_ncon(; n::Integer = default_nvar, kwargs...) = 15 -get_avion2_nlin(; n::Integer = default_nvar, kwargs...) = 0 -get_avion2_nnln(; n::Integer = default_nvar, kwargs...) = 15 +get_avion2_nlin(; n::Integer = default_nvar, kwargs...) = 15 +get_avion2_nnln(; n::Integer = default_nvar, kwargs...) = 0 get_avion2_nequ(; n::Integer = default_nvar, kwargs...) = 15 get_avion2_nineq(; n::Integer = default_nvar, kwargs...) = 0 get_avion2_nls_nequ(; n::Integer = default_nvar, kwargs...) = 17 diff --git a/src/Meta/booth.jl b/src/Meta/booth.jl index 1ceaf651..0f52ad34 100644 --- a/src/Meta/booth.jl +++ b/src/Meta/booth.jl @@ -10,7 +10,7 @@ booth_meta = Dict( :has_bounds => false, :has_fixed_variables => false, :objtype => :other, - :contype => :general, + :contype => :linear, :best_known_lower_bound => -Inf, :best_known_upper_bound => Inf, :is_feasible => missing, @@ -19,7 +19,7 @@ booth_meta = Dict( ) get_booth_nvar(; n::Integer = default_nvar, kwargs...) = 2 get_booth_ncon(; n::Integer = default_nvar, kwargs...) = 2 -get_booth_nlin(; n::Integer = default_nvar, kwargs...) = 0 -get_booth_nnln(; n::Integer = default_nvar, kwargs...) = 2 +get_booth_nlin(; n::Integer = default_nvar, kwargs...) = 2 +get_booth_nnln(; n::Integer = default_nvar, kwargs...) = 0 get_booth_nequ(; n::Integer = default_nvar, kwargs...) = 2 get_booth_nineq(; n::Integer = default_nvar, kwargs...) = 0 diff --git a/src/Meta/bqp1var.jl b/src/Meta/bqp1var.jl index 32e0df48..7df14737 100644 --- a/src/Meta/bqp1var.jl +++ b/src/Meta/bqp1var.jl @@ -10,7 +10,7 @@ bqp1var_meta = Dict( :has_bounds => false, :has_fixed_variables => false, :objtype => :other, - :contype => :general, + :contype => :linear, :best_known_lower_bound => -Inf, :best_known_upper_bound => 0.3125, :is_feasible => true, @@ -19,7 +19,7 @@ bqp1var_meta = Dict( ) get_bqp1var_nvar(; n::Integer = default_nvar, kwargs...) = 1 get_bqp1var_ncon(; n::Integer = default_nvar, kwargs...) = 1 -get_bqp1var_nlin(; n::Integer = default_nvar, kwargs...) = 0 -get_bqp1var_nnln(; n::Integer = default_nvar, kwargs...) = 1 +get_bqp1var_nlin(; n::Integer = default_nvar, kwargs...) = 1 +get_bqp1var_nnln(; n::Integer = default_nvar, kwargs...) = 0 get_bqp1var_nequ(; n::Integer = default_nvar, kwargs...) = 0 get_bqp1var_nineq(; n::Integer = default_nvar, kwargs...) = 1 diff --git a/src/Meta/camshape.jl b/src/Meta/camshape.jl index fb3a3045..f7536c19 100644 --- a/src/Meta/camshape.jl +++ b/src/Meta/camshape.jl @@ -10,7 +10,7 @@ camshape_meta = Dict( :has_bounds => true, :has_fixed_variables => false, :objtype => :other, - :contype => :general, + :contype => :quadratic, :best_known_lower_bound => -Inf, :best_known_upper_bound => Inf, :is_feasible => missing, @@ -19,7 +19,7 @@ camshape_meta = Dict( ) get_camshape_nvar(; n::Integer = default_nvar, kwargs...) = 1 * n + 0 get_camshape_ncon(; n::Integer = default_nvar, kwargs...) = 2 * n + 3 -get_camshape_nlin(; n::Integer = default_nvar, kwargs...) = 0 -get_camshape_nnln(; n::Integer = default_nvar, kwargs...) = 2 * n + 3 +get_camshape_nlin(; n::Integer = default_nvar, kwargs...) = n + 2 +get_camshape_nnln(; n::Integer = default_nvar, kwargs...) = n + 1 get_camshape_nequ(; n::Integer = default_nvar, kwargs...) = 0 get_camshape_nineq(; n::Integer = default_nvar, kwargs...) = 2 * n + 3 diff --git a/src/Meta/marine.jl b/src/Meta/marine.jl index bb83bf5a..c32e85d7 100644 --- a/src/Meta/marine.jl +++ b/src/Meta/marine.jl @@ -21,9 +21,9 @@ get_marine_nvar(; n::Integer = default_nvar, nc::Int = 1, kwargs...) = 8 + 7 + Int(round((n - 2 * 8 + 1) / (3 * 8 * nc + 8))) * (8 + 3 * 8 * nc) get_marine_ncon(; n::Integer = default_nvar, nc::Int = 1, kwargs...) = Int(round((n - 2 * 8 + 1) / (3 * 8 * nc + 8))) * (8 + 2 * nc + nc * (8 - 2)) - 8 -get_marine_nlin(; n::Integer = default_nvar, nc::Int = 1, kwargs...) = 0 +get_marine_nlin(; n::Integer = default_nvar, nc::Int = 1, kwargs...) = 8 * (Int(round((n - 2 * 8 + 1) / (3 * 8 * 1 + 8))) - 1) # (nh - 1) * ne get_marine_nnln(; n::Integer = default_nvar, nc::Int = 1, kwargs...) = - Int(round((n - 2 * 8 + 1) / (3 * 8 * nc + 8))) * (8 + 2 * nc + nc * (8 - 2)) - 8 + Int(round((n - 2 * 8 + 1) / (3 * 8 * nc + 8))) * (2 * nc + nc * (8 - 2)) get_marine_nequ(; n::Integer = default_nvar, nc::Int = 1, kwargs...) = Int(round((n - 2 * 8 + 1) / (3 * 8 * nc + 8))) * (8 + 2 * nc + nc * (8 - 2)) - 8 get_marine_nineq(; n::Integer = default_nvar, nc::Int = 1, kwargs...) = 0 diff --git a/src/Meta/robotarm.jl b/src/Meta/robotarm.jl index c4aa14fb..50960a66 100644 --- a/src/Meta/robotarm.jl +++ b/src/Meta/robotarm.jl @@ -20,8 +20,8 @@ robotarm_meta = Dict( get_robotarm_nvar(; n::Integer = default_nvar, kwargs...) = 9 * (max(2, div(n, 9)) + 1) + 1 get_robotarm_ncon(; n::Integer = default_nvar, kwargs...) = 3 * (max(2, div(n, 9)) + 1) + 6 * max(2, div(n, 9)) -get_robotarm_nlin(; n::Integer = default_nvar, kwargs...) = 0 +get_robotarm_nlin(; n::Integer = default_nvar, kwargs...) = max(2, div(n, 9)) + 1 get_robotarm_nnln(; n::Integer = default_nvar, kwargs...) = - 3 * (max(2, div(n, 9)) + 1) + 6 * max(2, div(n, 9)) + 2 * (max(2, div(n, 9)) + 1) + 6 * max(2, div(n, 9)) get_robotarm_nequ(; n::Integer = default_nvar, kwargs...) = 6 * max(2, div(n, 9)) get_robotarm_nineq(; n::Integer = default_nvar, kwargs...) = 3 * (max(2, div(n, 9)) + 1) diff --git a/src/PureJuMP/AMPGO02.jl b/src/PureJuMP/AMPGO02.jl index dace80a1..c10ef1ee 100644 --- a/src/PureJuMP/AMPGO02.jl +++ b/src/PureJuMP/AMPGO02.jl @@ -14,7 +14,7 @@ function AMPGO02(args...; kwargs...) @variable(nlp, x, start = 2.7) - @NLobjective(nlp, Min, sin(x) + sin((10.0 / 3.0) * x)) + @objective(nlp, Min, sin(x) + sin((10.0 / 3.0) * x)) return nlp end diff --git a/src/PureJuMP/AMPGO03.jl b/src/PureJuMP/AMPGO03.jl index d3699b54..fbe60783 100644 --- a/src/PureJuMP/AMPGO03.jl +++ b/src/PureJuMP/AMPGO03.jl @@ -14,7 +14,7 @@ function AMPGO03(args...; kwargs...) @variable(nlp, x, start = -10.0) - @NLobjective( + @objective( nlp, Min, -( diff --git a/src/PureJuMP/AMPGO04.jl b/src/PureJuMP/AMPGO04.jl index 9d85b0ff..b637c2c8 100644 --- a/src/PureJuMP/AMPGO04.jl +++ b/src/PureJuMP/AMPGO04.jl @@ -14,7 +14,7 @@ function AMPGO04(args...; kwargs...) @variable(nlp, x, start = 1.9) - @NLobjective(nlp, Min, -(16 * x^2 - 24 * x + 5) * exp(-x)) + @objective(nlp, Min, -(16 * x^2 - 24 * x + 5) * exp(-x)) return nlp end diff --git a/src/PureJuMP/AMPGO05.jl b/src/PureJuMP/AMPGO05.jl index 174f0196..2b772e77 100644 --- a/src/PureJuMP/AMPGO05.jl +++ b/src/PureJuMP/AMPGO05.jl @@ -13,7 +13,7 @@ function AMPGO05(args...; kwargs...) @variable(nlp, x, start = 0.0) - @NLobjective(nlp, Min, -(1.4 - 3.0 * x) * sin(18.0 * x)) + @objective(nlp, Min, -(1.4 - 3.0 * x) * sin(18.0 * x)) return nlp end diff --git a/src/PureJuMP/AMPGO06.jl b/src/PureJuMP/AMPGO06.jl index 1b64ebce..06050e73 100644 --- a/src/PureJuMP/AMPGO06.jl +++ b/src/PureJuMP/AMPGO06.jl @@ -14,7 +14,7 @@ function AMPGO06(args...; kwargs...) @variable(nlp, x, start = -10.0) - @NLobjective(nlp, Min, -(x + sin(x)) * exp(-x^2)) + @objective(nlp, Min, -(x + sin(x)) * exp(-x^2)) return nlp end diff --git a/src/PureJuMP/AMPGO07.jl b/src/PureJuMP/AMPGO07.jl index 0408207b..64ccbe1a 100644 --- a/src/PureJuMP/AMPGO07.jl +++ b/src/PureJuMP/AMPGO07.jl @@ -14,7 +14,7 @@ function AMPGO07(args...; kwargs...) @variable(nlp, x, start = 2.7) - @NLobjective( + @objective( nlp, Min, ifelse(x <= 0.0, Inf, sin(x) + sin((10.0 / 3.0) * x) + log(abs(x)) - 0.84 * x + 3) diff --git a/src/PureJuMP/AMPGO08.jl b/src/PureJuMP/AMPGO08.jl index 72c14b87..9d17ed01 100644 --- a/src/PureJuMP/AMPGO08.jl +++ b/src/PureJuMP/AMPGO08.jl @@ -13,7 +13,7 @@ function AMPGO08(args...; kwargs...) @variable(nlp, x, start = -10.0) - @NLobjective( + @objective( nlp, Min, -( diff --git a/src/PureJuMP/AMPGO09.jl b/src/PureJuMP/AMPGO09.jl index e6ca23b0..67ad0ea9 100644 --- a/src/PureJuMP/AMPGO09.jl +++ b/src/PureJuMP/AMPGO09.jl @@ -14,7 +14,7 @@ function AMPGO09(args...; kwargs...) @variable(nlp, x, start = 3.1) - @NLobjective(nlp, Min, sin(x) + sin((2.0 / 3.0) * x)) + @objective(nlp, Min, sin(x) + sin((2.0 / 3.0) * x)) return nlp end diff --git a/src/PureJuMP/AMPGO10.jl b/src/PureJuMP/AMPGO10.jl index 3d47c448..dcd69271 100644 --- a/src/PureJuMP/AMPGO10.jl +++ b/src/PureJuMP/AMPGO10.jl @@ -14,7 +14,7 @@ function AMPGO10(args...; kwargs...) @variable(nlp, x, start = 0.0) - @NLobjective(nlp, Min, -x * sin(x)) + @objective(nlp, Min, -x * sin(x)) return nlp end diff --git a/src/PureJuMP/AMPGO11.jl b/src/PureJuMP/AMPGO11.jl index ab360b31..2b8234a9 100644 --- a/src/PureJuMP/AMPGO11.jl +++ b/src/PureJuMP/AMPGO11.jl @@ -14,7 +14,7 @@ function AMPGO11(args...; kwargs...) @variable(nlp, x, start = -pi) - @NLobjective(nlp, Min, 2 * cos(x) + cos(2 * x)) + @objective(nlp, Min, 2 * cos(x) + cos(2 * x)) return nlp end diff --git a/src/PureJuMP/AMPGO12.jl b/src/PureJuMP/AMPGO12.jl index bfe8bf44..6006f113 100644 --- a/src/PureJuMP/AMPGO12.jl +++ b/src/PureJuMP/AMPGO12.jl @@ -14,7 +14,7 @@ function AMPGO12(args...; kwargs...) @variable(nlp, x, start = 0.0) - @NLobjective(nlp, Min, sin(x)^3 + cos(x)^3) + @objective(nlp, Min, sin(x)^3 + cos(x)^3) return nlp end diff --git a/src/PureJuMP/AMPGO13.jl b/src/PureJuMP/AMPGO13.jl index d01e3681..42160c98 100644 --- a/src/PureJuMP/AMPGO13.jl +++ b/src/PureJuMP/AMPGO13.jl @@ -14,7 +14,7 @@ function AMPGO13(args...; kwargs...) @variable(nlp, x, start = 0.001) - @NLobjective(nlp, Min, ifelse(0.0 < x < 1.0, -(x^(2 / 3) + (1 - x^2)^(1 / 3)), Inf)) + @objective(nlp, Min, ifelse(0.0 < x < 1.0, -(x^(2 / 3) + (1 - x^2)^(1 / 3)), Inf)) return nlp end diff --git a/src/PureJuMP/AMPGO14.jl b/src/PureJuMP/AMPGO14.jl index 05dd7335..f1ed6ccd 100644 --- a/src/PureJuMP/AMPGO14.jl +++ b/src/PureJuMP/AMPGO14.jl @@ -14,7 +14,7 @@ function AMPGO14(args...; kwargs...) @variable(nlp, x, start = 0.0) - @NLobjective(nlp, Min, -exp(-x) * sin(2 * pi * x)) + @objective(nlp, Min, -exp(-x) * sin(2 * pi * x)) return nlp end diff --git a/src/PureJuMP/AMPGO15.jl b/src/PureJuMP/AMPGO15.jl index 53e54309..8cfa86d2 100644 --- a/src/PureJuMP/AMPGO15.jl +++ b/src/PureJuMP/AMPGO15.jl @@ -14,7 +14,7 @@ function AMPGO15(args...; kwargs...) @variable(nlp, x, start = -5.0) - @NLobjective(nlp, Min, (x^2 - 5 * x + 6) / (x^2 + 1)) + @objective(nlp, Min, (x^2 - 5 * x + 6) / (x^2 + 1)) return nlp end diff --git a/src/PureJuMP/AMPGO18.jl b/src/PureJuMP/AMPGO18.jl index 2ba75eed..0dfaf0ec 100644 --- a/src/PureJuMP/AMPGO18.jl +++ b/src/PureJuMP/AMPGO18.jl @@ -14,7 +14,7 @@ function AMPGO18(args...; kwargs...) @variable(nlp, x, start = 0.0) - @NLobjective(nlp, Min, ifelse(x <= 3.0, (x - 2.0)^2, log(x - 2.0) + log(x - 2.0) + 1.0)) + @objective(nlp, Min, ifelse(x <= 3.0, (x - 2.0)^2, log(x - 2.0) + log(x - 2.0) + 1.0)) return nlp end diff --git a/src/PureJuMP/AMPGO20.jl b/src/PureJuMP/AMPGO20.jl index 006b60a8..f934724f 100644 --- a/src/PureJuMP/AMPGO20.jl +++ b/src/PureJuMP/AMPGO20.jl @@ -14,7 +14,7 @@ function AMPGO20(args...; kwargs...) @variable(nlp, x, start = -10.0) - @NLobjective(nlp, Min, -(x - sin(x)) * exp(-x^2)) + @objective(nlp, Min, -(x - sin(x)) * exp(-x^2)) return nlp end diff --git a/src/PureJuMP/AMPGO21.jl b/src/PureJuMP/AMPGO21.jl index a80bd0e4..09061485 100644 --- a/src/PureJuMP/AMPGO21.jl +++ b/src/PureJuMP/AMPGO21.jl @@ -14,7 +14,7 @@ function AMPGO21(args...; kwargs...) @variable(nlp, x, start = 0.0) - @NLobjective(nlp, Min, (x * sin(x)) + (x * cos(2 * x))) + @objective(nlp, Min, (x * sin(x)) + (x * cos(2 * x))) return nlp end diff --git a/src/PureJuMP/AMPGO22.jl b/src/PureJuMP/AMPGO22.jl index 8e96a470..52ecf460 100644 --- a/src/PureJuMP/AMPGO22.jl +++ b/src/PureJuMP/AMPGO22.jl @@ -14,7 +14,7 @@ function AMPGO22(args...; kwargs...) @variable(nlp, x, start = 0.0) - @NLobjective(nlp, Min, exp(-3.0 * x) - sin(x)^3) + @objective(nlp, Min, exp(-3.0 * x) - sin(x)^3) return nlp end diff --git a/src/PureJuMP/BOX2.jl b/src/PureJuMP/BOX2.jl index 1800c03b..655cc762 100644 --- a/src/PureJuMP/BOX2.jl +++ b/src/PureJuMP/BOX2.jl @@ -18,7 +18,7 @@ function BOX2(args...; n::Int = default_nvar, m::Int = 10, kwargs...) x0 = [0.0; 10.0; 1.0] @variable(nlp, x[i = 1:3], start = x0[i]) - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/BOX3.jl b/src/PureJuMP/BOX3.jl index b14a101b..73eee73e 100644 --- a/src/PureJuMP/BOX3.jl +++ b/src/PureJuMP/BOX3.jl @@ -23,7 +23,7 @@ function BOX3(args...; n::Int = default_nvar, m::Int = 2n, kwargs...) x0 = [0.0; 10.0; 20.0] @variable(nlp, x[i = 1:3], start = x0[i]) - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/Dus2_1.jl b/src/PureJuMP/Dus2_1.jl index d06b5510..9af8c5a9 100644 --- a/src/PureJuMP/Dus2_1.jl +++ b/src/PureJuMP/Dus2_1.jl @@ -14,7 +14,7 @@ function Dus2_1(args...; kwargs...) @variable(nlp, x, start = -1.0) - @NLobjective(nlp, Min, exp(x * (x - 1))) + @objective(nlp, Min, exp(x * (x - 1))) return nlp end diff --git a/src/PureJuMP/Dus2_3.jl b/src/PureJuMP/Dus2_3.jl index eb4f01eb..d4fccea5 100644 --- a/src/PureJuMP/Dus2_3.jl +++ b/src/PureJuMP/Dus2_3.jl @@ -13,7 +13,7 @@ function Dus2_3(args...; kwargs...) @variable(nlp, x, start = -2.0) - @NLobjective(nlp, Min, 1 - (1 / (5 * (x^2) - 6 * x + 5))) + @objective(nlp, Min, 1 - (1 / (5 * (x^2) - 6 * x + 5))) return nlp end diff --git a/src/PureJuMP/Dus2_9.jl b/src/PureJuMP/Dus2_9.jl index cdfe9169..5b0d0461 100644 --- a/src/PureJuMP/Dus2_9.jl +++ b/src/PureJuMP/Dus2_9.jl @@ -13,7 +13,7 @@ function Dus2_9(args...; kwargs...) @variable(nlp, x, start = 0.0) - @NLobjective(nlp, Min, 1 - 12 * x + 7.5 * x^2 - x^3) + @objective(nlp, Min, 1 - 12 * x + 7.5 * x^2 - x^3) return nlp end diff --git a/src/PureJuMP/Duscube.jl b/src/PureJuMP/Duscube.jl index 631bec6c..321b27f8 100644 --- a/src/PureJuMP/Duscube.jl +++ b/src/PureJuMP/Duscube.jl @@ -13,7 +13,7 @@ function Duscube(args...; kwargs...) @variable(nlp, x, start = 1.0) - @NLobjective(nlp, Min, x^3 - (x - 4)^2 - 100 * x) + @objective(nlp, Min, x^3 - (x - 4)^2 - 100 * x) return nlp end diff --git a/src/PureJuMP/NZF1.jl b/src/PureJuMP/NZF1.jl index 85ef14ef..f0eb746c 100644 --- a/src/PureJuMP/NZF1.jl +++ b/src/PureJuMP/NZF1.jl @@ -17,7 +17,7 @@ function NZF1(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 1) - @NLobjective( + @objective( nlp, Min, sum( diff --git a/src/PureJuMP/aircrfta.jl b/src/PureJuMP/aircrfta.jl index 38020fe4..8783a28b 100644 --- a/src/PureJuMP/aircrfta.jl +++ b/src/PureJuMP/aircrfta.jl @@ -32,8 +32,8 @@ function aircrfta(; n::Int = default_nvar, kwargs...) @variable(nlp, 0.0 <= aileron <= 0.0, start = 0.0) @variable(nlp, 0.0 <= rudderdf <= 0.0, start = 0.0) - @NLobjective(nlp, Min, 0) - @NLconstraint( + @objective(nlp, Min, 0) + @constraint( nlp, cons1, ( @@ -42,7 +42,7 @@ function aircrfta(; n::Int = default_nvar, kwargs...) 684.4 * attckang * sslipang + 63.5 * pitchrat * attckang ) == 0 ) - @NLconstraint( + @constraint( nlp, cons2, ( @@ -51,7 +51,7 @@ function aircrfta(; n::Int = default_nvar, kwargs...) 0.173 * rollrate * sslipang ) == 0 ) - @NLconstraint( + @constraint( nlp, cons3, ( @@ -59,8 +59,8 @@ function aircrfta(; n::Int = default_nvar, kwargs...) 0.716 * rollrate * pitchrat - 1.578 * rollrate * attckang + 1.132 * pitchrat * attckang ) == 0 ) - @NLconstraint(nlp, cons4, (pitchrat - attckang - 1.168 * elevator - rollrate * sslipang) == 0) - @NLconstraint( + @constraint(nlp, cons4, (pitchrat - attckang - 1.168 * elevator - rollrate * sslipang) == 0) + @constraint( nlp, cons5, (-yawrate - 0.196 * sslipang - 0.0071 * aileron + rollrate * attckang) == 0 diff --git a/src/PureJuMP/allinit.jl b/src/PureJuMP/allinit.jl index 209be6e5..86209562 100644 --- a/src/PureJuMP/allinit.jl +++ b/src/PureJuMP/allinit.jl @@ -22,7 +22,7 @@ function allinit(; n::Int = default_nvar, kwargs...) @variable(nlp, -Inf <= x[1:4] <= Inf, start = 0.0) - @NLobjective( + @objective( nlp, Min, x[3] - 1 + x[1]^2 + x[2]^2 + (x[3] + x[4])^2 + sin(x[3])^2 + x[1]^2 * x[2]^2 + x[4] - 3 + @@ -33,9 +33,9 @@ function allinit(; n::Int = default_nvar, kwargs...) (x[1] - 4 + sin(x[4])^2 + x[2]^2 * x[3]^2)^2 + sin(x[4])^4 ) - @NLconstraint(nlp, cons1, x[2] - 1 >= 0) - @NLconstraint(nlp, cons2, -1e+10 <= x[3] <= 1) - @NLconstraint(nlp, cons3, x[4] - 2 == 0) + @constraint(nlp, cons3, x[4] - 2 == 0) + @constraint(nlp, cons1, x[2] - 1 >= 0) + @constraint(nlp, cons2, -1e+10 <= x[3] <= 1) return nlp end diff --git a/src/PureJuMP/allinitc.jl b/src/PureJuMP/allinitc.jl index 694fde42..03e8cc09 100644 --- a/src/PureJuMP/allinitc.jl +++ b/src/PureJuMP/allinitc.jl @@ -22,7 +22,7 @@ function allinitc(; n::Int = default_nvar, kwargs...) @variable(nlp, -Inf <= x[1:4] <= Inf, start = 0.0) - @NLobjective( + @objective( nlp, Min, x[3] - 1 + x[1]^2 + x[2]^2 + (x[3] + x[4])^2 + sin(x[3])^2 + x[1]^2 * x[2]^2 + x[4] - 3 + @@ -33,10 +33,10 @@ function allinitc(; n::Int = default_nvar, kwargs...) (x[1] - 4 + sin(x[4])^2 + x[2]^2 * x[3]^2)^2 + sin(x[4])^4 ) - @NLconstraint(nlp, cons1, x[2] - 1 >= 0) - @NLconstraint(nlp, cons2, -1e+10 <= x[3] <= 1) - @NLconstraint(nlp, cons3, x[4] - 2 == 0) - @NLconstraint(nlp, cons4, x[1]^2 + x[2]^2 - 1 <= 0) + @constraint(nlp, cons3, x[4] - 2 == 0) + @constraint(nlp, cons1, x[2] - 1 >= 0) + @constraint(nlp, cons2, -1e+10 <= x[3] <= 1) + @constraint(nlp, cons4, x[1]^2 + x[2]^2 - 1 <= 0) return nlp end diff --git a/src/PureJuMP/allinitu.jl b/src/PureJuMP/allinitu.jl index 216e95ed..ead60259 100644 --- a/src/PureJuMP/allinitu.jl +++ b/src/PureJuMP/allinitu.jl @@ -22,7 +22,7 @@ function allinitu(; n::Int = default_nvar, kwargs...) @variable(nlp, -Inf <= x[1:4] <= Inf, start = 0.0) - @NLobjective( + @objective( nlp, Min, x[3] - 1 + x[1]^2 + x[2]^2 + (x[3] + x[4])^2 + sin(x[3])^2 + x[1]^2 * x[2]^2 + x[4] - 3 + diff --git a/src/PureJuMP/alsotame.jl b/src/PureJuMP/alsotame.jl index 5d00bdd7..85382083 100644 --- a/src/PureJuMP/alsotame.jl +++ b/src/PureJuMP/alsotame.jl @@ -25,10 +25,10 @@ function alsotame(; n::Int = default_nvar, kwargs...) @variable(nlp, -Inf <= x <= Inf, start = 0.0) @variable(nlp, -Inf <= y <= Inf, start = 0.0) - @NLobjective(nlp, Min, exp(x - 2 * y)) - @NLconstraint(nlp, cons1, sin(-x + y - 1) == 0) - @NLconstraint(nlp, cons2, -2 <= x <= 2) - @NLconstraint(nlp, cons3, -1.5 <= y <= 1.5) + @objective(nlp, Min, exp(x - 2 * y)) + @constraint(nlp, cons1, sin(-x + y - 1) == 0) + @constraint(nlp, cons2, -2 <= x <= 2) + @constraint(nlp, cons3, -1.5 <= y <= 1.5) return nlp end diff --git a/src/PureJuMP/argauss.jl b/src/PureJuMP/argauss.jl index f95cdc78..2223f88d 100644 --- a/src/PureJuMP/argauss.jl +++ b/src/PureJuMP/argauss.jl @@ -48,8 +48,8 @@ function argauss(; n::Int = default_nvar, kwargs...) @variable(nlp, -Inf <= x[i = 1:3] <= Inf, start = xinit[i]) - @NLobjective(nlp, Min, 0) - @NLconstraint( + @objective(nlp, Min, 0) + @constraint( nlp, cons[i = 1:15], x[1] * exp(-0.5 * x[2] * (0.5 * (8 - i) - x[3])^2) - rhs[i] == 0 diff --git a/src/PureJuMP/arglina.jl b/src/PureJuMP/arglina.jl index a458f0cf..7828211e 100644 --- a/src/PureJuMP/arglina.jl +++ b/src/PureJuMP/arglina.jl @@ -20,7 +20,7 @@ function arglina(args...; n::Int = default_nvar, m::Int = 2n, kwargs...) @variable(nlp, x[j = 1:n], start = 1.0) - @NLobjective( + @objective( nlp, Min, 0.5 * sum((x[i] - 2 / m * sum(x[j] for j = 1:n) - 1)^2 for i = 1:n) + diff --git a/src/PureJuMP/arglinb.jl b/src/PureJuMP/arglinb.jl index b7630883..7e30e064 100644 --- a/src/PureJuMP/arglinb.jl +++ b/src/PureJuMP/arglinb.jl @@ -20,7 +20,7 @@ function arglinb(args...; n::Int = default_nvar, m::Int = 2n, kwargs...) @variable(nlp, x[j = 1:n], start = 1.0) - @NLobjective(nlp, Min, 0.5 * sum((i * sum(j * x[j] for j = 1:n) - 1)^2 for i = 1:m)) + @objective(nlp, Min, 0.5 * sum((i * sum(j * x[j] for j = 1:n) - 1)^2 for i = 1:m)) return nlp end diff --git a/src/PureJuMP/arglinc.jl b/src/PureJuMP/arglinc.jl index f436d4ba..a7436f0a 100644 --- a/src/PureJuMP/arglinc.jl +++ b/src/PureJuMP/arglinc.jl @@ -20,7 +20,7 @@ function arglinc(args...; n::Int = default_nvar, m::Int = 2n, kwargs...) @variable(nlp, x[j = 1:n], start = 1.0) - @NLobjective( + @objective( nlp, Min, 2 + sum(((i - 1) * sum(j * x[j] for j = 2:(n - 1)) - 1)^2 for i = 2:(m - 1)) diff --git a/src/PureJuMP/argtrig.jl b/src/PureJuMP/argtrig.jl index 384acfb3..8e70a521 100644 --- a/src/PureJuMP/argtrig.jl +++ b/src/PureJuMP/argtrig.jl @@ -26,7 +26,7 @@ function argtrig(args...; n::Int = default_nvar, m::Int = 2n, kwargs...) @variable(nlp, x[j = 1:n], start = 1 / n) - @NLobjective(nlp, Min, n - sum(cos(x[j]) + j * (1 - cos(x[j])) - sin(x[j]) for j = 1:n)) + @objective(nlp, Min, n - sum(cos(x[j]) + j * (1 - cos(x[j])) - sin(x[j]) for j = 1:n)) return nlp end diff --git a/src/PureJuMP/arwhead.jl b/src/PureJuMP/arwhead.jl index fad97958..f7dcf712 100644 --- a/src/PureJuMP/arwhead.jl +++ b/src/PureJuMP/arwhead.jl @@ -35,7 +35,7 @@ function arwhead(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 1.0) - @NLobjective(nlp, Min, sum((x[i]^2 + x[n]^2)^2 - 4 * x[i] + 3 for i = 1:(n - 1))) + @objective(nlp, Min, sum((x[i]^2 + x[n]^2)^2 - 4 * x[i] + 3 for i = 1:(n - 1))) return nlp end diff --git a/src/PureJuMP/avion2.jl b/src/PureJuMP/avion2.jl index d4350fee..2fa45437 100644 --- a/src/PureJuMP/avion2.jl +++ b/src/PureJuMP/avion2.jl @@ -67,7 +67,7 @@ function avion2(; n::Int = default_nvar, kwargs...) @variable(nlp, 1 <= NP <= 2, start = 1) @variable(nlp, 1 <= NG <= 2, start = 1) - @NLobjective( + @objective( nlp, Min, (SK - 0.01 * PK * SR)^2 + @@ -88,21 +88,21 @@ function avion2(; n::Int = default_nvar, kwargs...) (CB - 2 * (VN - CA * LF^3) / (LF^2 * (3 - SO * LF)))^2 + (IMPVOIL - 1.15 * SX * (15 + 0.15 * SX) * (8 + (MC * LX / (50 * SR * EL))^1.5))^2 ) - @NLconstraint(nlp, cons1, SD - 0.13 * SR == 0) - @NLconstraint(nlp, cons2, SX - 0.7 * SR == 0) - @NLconstraint(nlp, cons3, LX - LR == 0) - @NLconstraint(nlp, cons5, SF - ST - 2 * SD - 2 * SX - 2 * SK == 0) - @NLconstraint(nlp, cons11, IMPFUS - 20 * SF == 0) - @NLconstraint(nlp, cons12, MD - 2 * MV == 0) - @NLconstraint(nlp, cons15, QF - QI - QV == 0) - @NLconstraint(nlp, cons17, IMPTRAIN - 0.137 * MV == 0) - @NLconstraint(nlp, cons19, IMPNMOT - 35 * NM == 0) - @NLconstraint(nlp, cons20, IMPPET - 0.043 * QI == 0) - @NLconstraint(nlp, cons21, IMPPIL - 200 * NP == 0) - @NLconstraint(nlp, cons22, IMPCAN - 120 * NG == 0) - @NLconstraint(nlp, cons23, IMPSNA - 300 * NS - 400 == 0) - @NLconstraint(nlp, cons24, MC - MV + 95 * NP + 70 * NG + 660 * NM + 0.5 * QI - 380 == 0) - @NLconstraint(nlp, cons25, MZ - IMPTRAIN + IMPNMOT + IMPPET + IMPPIL + IMPCAN + IMPSNA + 290 == 0) + @constraint(nlp, cons1, SD - 0.13 * SR == 0) + @constraint(nlp, cons2, SX - 0.7 * SR == 0) + @constraint(nlp, cons3, LX - LR == 0) + @constraint(nlp, cons5, SF - ST - 2 * SD - 2 * SX - 2 * SK == 0) + @constraint(nlp, cons11, IMPFUS - 20 * SF == 0) + @constraint(nlp, cons12, MD - 2 * MV == 0) + @constraint(nlp, cons15, QF - QI - QV == 0) + @constraint(nlp, cons17, IMPTRAIN - 0.137 * MV == 0) + @constraint(nlp, cons19, IMPNMOT - 35 * NM == 0) + @constraint(nlp, cons20, IMPPET - 0.043 * QI == 0) + @constraint(nlp, cons21, IMPPIL - 200 * NP == 0) + @constraint(nlp, cons22, IMPCAN - 120 * NG == 0) + @constraint(nlp, cons23, IMPSNA - 300 * NS - 400 == 0) + @constraint(nlp, cons24, MC - MV + 95 * NP + 70 * NG + 660 * NM + 0.5 * QI - 380 == 0) + @constraint(nlp, cons25, MZ - IMPTRAIN + IMPNMOT + IMPPET + IMPPIL + IMPCAN + IMPSNA + 290 == 0) return nlp end diff --git a/src/PureJuMP/bard.jl b/src/PureJuMP/bard.jl index f1359f4d..dd54e0cf 100644 --- a/src/PureJuMP/bard.jl +++ b/src/PureJuMP/bard.jl @@ -20,7 +20,7 @@ function bard(args...; n::Int = default_nvar, m::Int = 2n, kwargs...) @variable(nlp, x[j = 1:3], start = 1.0) - @NLobjective( + @objective( nlp, Min, 0.5 * sum(y[i] - (x[1] + i / ((16 - i) * x[2] + min(i, 16 - i) * x[3])) for i = 1:15) diff --git a/src/PureJuMP/bdqrtic.jl b/src/PureJuMP/bdqrtic.jl index aef13033..12868389 100644 --- a/src/PureJuMP/bdqrtic.jl +++ b/src/PureJuMP/bdqrtic.jl @@ -33,7 +33,7 @@ function bdqrtic(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 1.0) - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/beale.jl b/src/PureJuMP/beale.jl index 074372d0..13b7eff7 100644 --- a/src/PureJuMP/beale.jl +++ b/src/PureJuMP/beale.jl @@ -18,7 +18,7 @@ function beale(args...; kwargs...) @variable(nlp, x[i = 1:2], start = 1.0) - @NLobjective( + @objective( nlp, Min, 0.5 * (1.5 - x[1] * (1.0 - x[2]))^2 + diff --git a/src/PureJuMP/bearing.jl b/src/PureJuMP/bearing.jl index e4cc6e94..86a3d47b 100644 --- a/src/PureJuMP/bearing.jl +++ b/src/PureJuMP/bearing.jl @@ -46,7 +46,7 @@ function bearing( # var v {i in 0..nx+1, 0..ny+1} >= 0; @variable(nlp, uvar[i, j] >= v[i = 1:(nx + 2), j = 1:(ny + 2)] >= 0, start = max(sin(i * hx), 0)) - @NLobjective( + @objective( nlp, Min, 0.5 * diff --git a/src/PureJuMP/bennett5.jl b/src/PureJuMP/bennett5.jl index a6289826..5e6315cb 100644 --- a/src/PureJuMP/bennett5.jl +++ b/src/PureJuMP/bennett5.jl @@ -199,7 +199,7 @@ function bennett5(args...; kwargs...) @variable(nlp, x[j = 1:3]) set_start_value.(x, [-2000, 50, 0.8]) # other initial points [-1500, 45, 0.85] - @NLobjective(nlp, Min, 0.5 * sum((y[i, 1] - x[1] * (x[2] + y[i, 2])^(-1 / x[3]))^2 for i = 1:154)) + @objective(nlp, Min, 0.5 * sum((y[i, 1] - x[1] * (x[2] + y[i, 2])^(-1 / x[3]))^2 for i = 1:154)) return nlp end diff --git a/src/PureJuMP/biggs5.jl b/src/PureJuMP/biggs5.jl index c1e1da4f..5234eb0f 100644 --- a/src/PureJuMP/biggs5.jl +++ b/src/PureJuMP/biggs5.jl @@ -23,7 +23,7 @@ function biggs5(args...; n::Int = default_nvar, m::Int = 13, kwargs...) x0 = [1.0; 2.0; 1.0; 1.0; 1.0; 1.0] @variable(nlp, x[i = 1:6], start = x0[i]) fix(x[6], 3) - @NLobjective( + @objective( nlp, Min, sum( diff --git a/src/PureJuMP/biggs6.jl b/src/PureJuMP/biggs6.jl index c8d0edb3..7ad915b3 100644 --- a/src/PureJuMP/biggs6.jl +++ b/src/PureJuMP/biggs6.jl @@ -19,7 +19,7 @@ function biggs6(args...; n::Int = default_nvar, m::Int = 13, kwargs...) nlp = Model() x0 = [1.0; 2.0; 1.0; 1.0; 1.0; 1.0] @variable(nlp, x[i = 1:6], start = x0[i]) - @NLobjective( + @objective( nlp, Min, sum( diff --git a/src/PureJuMP/booth.jl b/src/PureJuMP/booth.jl index 07299896..009312e4 100644 --- a/src/PureJuMP/booth.jl +++ b/src/PureJuMP/booth.jl @@ -25,9 +25,9 @@ function booth(; n::Int = default_nvar, kwargs...) @variable(nlp, -Inf <= x[1:2] <= Inf, start = 0.0) - @NLobjective(nlp, Min, 0) - @NLconstraint(nlp, cons1, (x[1] + 2 * x[2] - 7) == 0) - @NLconstraint(nlp, cons2, (2 * x[1] + x[2] - 5) == 0) + @objective(nlp, Min, 0) + @constraint(nlp, cons1, (x[1] + 2 * x[2] - 7) == 0) + @constraint(nlp, cons2, (2 * x[1] + x[2] - 5) == 0) return nlp end diff --git a/src/PureJuMP/boxbod.jl b/src/PureJuMP/boxbod.jl index 2952b487..2ddf9aa1 100644 --- a/src/PureJuMP/boxbod.jl +++ b/src/PureJuMP/boxbod.jl @@ -51,7 +51,7 @@ function boxbod(args...; kwargs...) @variable(nlp, x[j = 1:2]) set_start_value.(x, [1, 1]) # other: [100, 0.75] - @NLobjective(nlp, Min, 0.5 * sum((y[i, 1] - x[1] * (1 - exp(-x[2] * y[i, 2])))^2 for i = 1:6)) + @objective(nlp, Min, 0.5 * sum((y[i, 1] - x[1] * (1 - exp(-x[2] * y[i, 2])))^2 for i = 1:6)) return nlp end diff --git a/src/PureJuMP/bqp1var.jl b/src/PureJuMP/bqp1var.jl index e472ca32..e9def368 100644 --- a/src/PureJuMP/bqp1var.jl +++ b/src/PureJuMP/bqp1var.jl @@ -21,8 +21,8 @@ function bqp1var(; n::Int = default_nvar, kwargs...) @variable(nlp, -Inf <= x1 <= Inf, start = 0.25) - @NLobjective(nlp, Min, x1 + x1^2) - @NLconstraint(nlp, cons1, 0.0 <= x1 <= 0.5) + @objective(nlp, Min, x1 + x1^2) + @constraint(nlp, cons1, 0.0 <= x1 <= 0.5) return nlp end diff --git a/src/PureJuMP/britgas.jl b/src/PureJuMP/britgas.jl index b0d5681f..d3b47076 100644 --- a/src/PureJuMP/britgas.jl +++ b/src/PureJuMP/britgas.jl @@ -527,7 +527,7 @@ function britgas(; n::Int = default_nvar, kwargs...) @variable(nlp, 0.0 <= out16_8 <= Inf, start = 1.0) @variable(nlp, 0.0 <= out23_8 <= Inf, start = 1.0) - @NLobjective( + @objective( nlp, Min, f3_4_1 * ((abs(r3_4_1)^0.22e+0) - 1.0e+0) + @@ -555,2121 +555,2121 @@ function britgas(; n::Int = default_nvar, kwargs...) f5_7_8 * ((abs(r5_7_8)^0.22e+0) - 1.0e+0) + f19_20_8 * ((abs(r19_20_8)^0.22e+0) - 1.0e+0) ) - @NLconstraint( + @constraint( nlp, m1_1, p1_1 / ((1.0e+0) + (1.0e+0) * p1_1) - p1_0 / ((1.0e+0) + (1.0e+0) * p1_0) - 0.75 * q1_17_1 - 0.75 * q1_2_1 + in1_1 - 0.25 * q1_17_0 - 0.25 * q1_2_0 == 0 ) - @NLconstraint( + @constraint( nlp, m2_1, p2_1 / ((1.0e+0) + (1.0e+0) * p2_1) - p2_0 / ((1.0e+0) + (1.0e+0) * p2_0) - 0.75 * q2_3_1 + 0.75 * q1_2_1 - 0.25 * q2_3_0 + 0.25 * q1_2_0 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m3_1, p3_1 / ((1.0e+0) + (1.0e+0) * p3_1) - p3_0 / ((1.0e+0) + (1.0e+0) * p3_0) - f3_4_1 + 0.75 * q2_3_1 + 0.25 * q2_3_0 == 0 ) - @NLconstraint( + @constraint( nlp, m4_1, p4_1 / ((1.0e+0) + (1.0e+0) * p4_1) - p4_0 / ((1.0e+0) + (1.0e+0) * p4_0) - 0.75 * q4_5_1 + f3_4_1 - 0.25 * q4_5_0 == 0 ) - @NLconstraint( + @constraint( nlp, m5_1, p5_1 / ((1.0e+0) + (1.0e+0) * p5_1) - p5_0 / ((1.0e+0) + (1.0e+0) * p5_0) - 0.75 * q5_6_1 - f5_7_1 + 0.75 * q4_5_1 - 0.25 * q5_6_0 + 0.25 * q4_5_0 == 0 ) - @NLconstraint( + @constraint( nlp, m6_1, p6_1 / ((1.0e+0) + (1.0e+0) * p6_1) - p6_0 / ((1.0e+0) + (1.0e+0) * p6_0) + 0.75 * q5_6_1 + 0.25 * q5_6_0 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m7_1, p7_1 / ((1.0e+0) + (1.0e+0) * p7_1) - p7_0 / ((1.0e+0) + (1.0e+0) * p7_0) - 0.75 * q7_8_1 + f5_7_1 - 0.25 * q7_8_0 == 0 ) - @NLconstraint( + @constraint( nlp, m8_1, p8_1 / ((1.0e+0) + (1.0e+0) * p8_1) - p8_0 / ((1.0e+0) + (1.0e+0) * p8_0) - 0.75 * q8_9_1 - 0.75 * q8_10_1 - 0.75 * q8_11_1 + 0.75 * q7_8_1 - 0.25 * q8_9_0 - 0.25 * q8_10_0 - 0.25 * q8_11_0 + 0.25 * q7_8_0 == 0 ) - @NLconstraint( + @constraint( nlp, m9_1, p9_1 / ((1.0e+0) + (1.0e+0) * p9_1) - p9_0 / ((1.0e+0) + (1.0e+0) * p9_0) + 0.75 * q8_9_1 + 0.25 * q8_9_0 == 0 ) - @NLconstraint( + @constraint( nlp, m10_1, p10_1 / ((1.0e+0) + (1.0e+0) * p10_1) - p10_0 / ((1.0e+0) + (1.0e+0) * p10_0) + 0.75 * q8_10_1 + 0.25 * q8_10_0 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m11_1, p11_1 / ((1.0e+0) + (1.0e+0) * p11_1) - p11_0 / ((1.0e+0) + (1.0e+0) * p11_0) - 0.75 * q11_12_1 + 0.75 * q8_11_1 - 0.25 * q11_12_0 + 0.25 * q8_11_0 == 0 ) - @NLconstraint( + @constraint( nlp, m12_1, p12_1 / ((1.0e+0) + (1.0e+0) * p12_1) - p12_0 / ((1.0e+0) + (1.0e+0) * p12_0) - 0.75 * q12_13_1 + 0.75 * q11_12_1 - 0.25 * q12_13_0 + 0.25 * q11_12_0 == 0 ) - @NLconstraint( + @constraint( nlp, m13_1, p13_1 / ((1.0e+0) + (1.0e+0) * p13_1) - p13_0 / ((1.0e+0) + (1.0e+0) * p13_0) - 0.75 * q13_14_1 - 0.75 * q13_15_1 + 0.75 * q12_13_1 - 0.25 * q13_14_0 - 0.25 * q13_15_0 + 0.25 * q12_13_0 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m14_1, p14_1 / ((1.0e+0) + (1.0e+0) * p14_1) - p14_0 / ((1.0e+0) + (1.0e+0) * p14_0) + 0.75 * q13_14_1 + 0.25 * q13_14_0 == 0 ) - @NLconstraint( + @constraint( nlp, m15_1, p15_1 / ((1.0e+0) + (1.0e+0) * p15_1) - p15_0 / ((1.0e+0) + (1.0e+0) * p15_0) - 0.75 * q15_16_1 + 0.75 * q13_15_1 - 0.25 * q15_16_0 + 0.25 * q13_15_0 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m16_1, p16_1 / ((1.0e+0) + (1.0e+0) * p16_1) - p16_0 / ((1.0e+0) + (1.0e+0) * p16_0) + 0.75 * q15_16_1 + 0.25 * q15_16_0 - out16_1 == 0 ) - @NLconstraint( + @constraint( nlp, m17_1, p17_1 / ((1.0e+0) + (1.0e+0) * p17_1) - p17_0 / ((1.0e+0) + (1.0e+0) * p17_0) - 0.75 * q17_18_1 + 0.75 * q1_17_1 - 0.25 * q17_18_0 + 0.25 * q1_17_0 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m18_1, p18_1 / ((1.0e+0) + (1.0e+0) * p18_1) - p18_0 / ((1.0e+0) + (1.0e+0) * p18_0) - 0.75 * q18_19_1 + 0.75 * q17_18_1 - 0.25 * q18_19_0 + 0.25 * q17_18_0 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m19_1, p19_1 / ((1.0e+0) + (1.0e+0) * p19_1) - p19_0 / ((1.0e+0) + (1.0e+0) * p19_0) - f19_20_1 + 0.75 * q18_19_1 + 0.25 * q18_19_0 == 0 ) - @NLconstraint( + @constraint( nlp, m20_1, p20_1 / ((1.0e+0) + (1.0e+0) * p20_1) - p20_0 / ((1.0e+0) + (1.0e+0) * p20_0) - 0.75 * q20_21_1 + f19_20_1 - 0.25 * q20_21_0 == 0 ) - @NLconstraint( + @constraint( nlp, m21_1, p21_1 / ((1.0e+0) + (1.0e+0) * p21_1) - p21_0 / ((1.0e+0) + (1.0e+0) * p21_0) - 0.75 * q21_22_1 + 0.75 * q20_21_1 - 0.25 * q21_22_0 + 0.25 * q20_21_0 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m22_1, p22_1 / ((1.0e+0) + (1.0e+0) * p22_1) - p22_0 / ((1.0e+0) + (1.0e+0) * p22_0) - 0.75 * q22_23_1 + 0.75 * q21_22_1 - 0.25 * q22_23_0 + 0.25 * q21_22_0 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m23_1, p23_1 / ((1.0e+0) + (1.0e+0) * p23_1) - p23_0 / ((1.0e+0) + (1.0e+0) * p23_0) + 0.75 * q22_23_1 + 0.25 * q22_23_0 - out23_1 == 0 ) - @NLconstraint(nlp, c3_4_1, p3_1 * r3_4_1 - p4_1 == 0) - @NLconstraint(nlp, c5_7_1, p5_1 * r5_7_1 - p7_1 == 0) - @NLconstraint(nlp, c19_20_1, p19_1 * r19_20_1 - p20_1 == 0) - @NLconstraint( + @constraint(nlp, c3_4_1, p3_1 * r3_4_1 - p4_1 == 0) + @constraint(nlp, c5_7_1, p5_1 * r5_7_1 - p7_1 == 0) + @constraint(nlp, c19_20_1, p19_1 * r19_20_1 - p20_1 == 0) + @constraint( nlp, p1_2_1, p1_1 * p1_1 - p2_1 * p2_1 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p1_1 + p2_1)) * ((abs(q1_2_1))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p1_17_1, p1_1 * p1_1 - p17_1 * p17_1 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p1_1 + p17_1)) * ((abs(q1_17_1))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p2_3_1, p2_1 * p2_1 - p3_1 * p3_1 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p2_1 + p3_1)) * ((abs(q2_3_1))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p4_5_1, p4_1 * p4_1 - p5_1 * p5_1 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p4_1 + p5_1)) * ((abs(q4_5_1))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p5_6_1, p5_1 * p5_1 - p6_1 * p6_1 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p5_1 + p6_1)) * ((abs(q5_6_1))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p7_8_1, p7_1 * p7_1 - p8_1 * p8_1 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p7_1 + p8_1)) * ((abs(q7_8_1))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_9_1, p8_1 * p8_1 - p9_1 * p9_1 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_1 + p9_1)) * ((abs(q8_9_1))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_10_1, p8_1 * p8_1 - p10_1 * p10_1 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_1 + p10_1)) * ((abs(q8_10_1))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_11_1, p8_1 * p8_1 - p11_1 * p11_1 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_1 + p11_1)) * ((abs(q8_11_1))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p11_12_1, p11_1 * p11_1 - p12_1 * p12_1 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p11_1 + p12_1)) * ((abs(q11_12_1))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p12_13_1, p12_1 * p12_1 - p13_1 * p13_1 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p12_1 + p13_1)) * ((abs(q12_13_1))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p13_14_1, p13_1 * p13_1 - p14_1 * p14_1 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p13_1 + p14_1)) * ((abs(q13_14_1))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p13_15_1, p13_1 * p13_1 - p15_1 * p15_1 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p13_1 + p15_1)) * ((abs(q13_15_1))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p15_16_1, p15_1 * p15_1 - p16_1 * p16_1 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p15_1 + p16_1)) * ((abs(q15_16_1))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p17_18_1, p17_1 * p17_1 - p18_1 * p18_1 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p17_1 + p18_1)) * ((abs(q17_18_1))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p18_19_1, p18_1 * p18_1 - p19_1 * p19_1 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p18_1 + p19_1)) * ((abs(q18_19_1))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p20_21_1, p20_1 * p20_1 - p21_1 * p21_1 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p20_1 + p21_1)) * ((abs(q20_21_1))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p21_22_1, p21_1 * p21_1 - p22_1 * p22_1 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p21_1 + p22_1)) * ((abs(q21_22_1))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p22_23_1, p22_1 * p22_1 - p23_1 * p23_1 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p22_1 + p23_1)) * ((abs(q22_23_1))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, m1_2, p1_2 / ((1.0e+0) + (1.0e+0) * p1_2) - p1_1 / ((1.0e+0) + (1.0e+0) * p1_1) - 0.75 * q1_17_2 - 0.75 * q1_2_2 + in1_2 - 0.25 * q1_17_1 - 0.25 * q1_2_1 == 0 ) - @NLconstraint( + @constraint( nlp, m2_2, p2_2 / ((1.0e+0) + (1.0e+0) * p2_2) - p2_1 / ((1.0e+0) + (1.0e+0) * p2_1) - 0.75 * q2_3_2 + 0.75 * q1_2_2 - 0.25 * q2_3_1 + 0.25 * q1_2_1 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m3_2, p3_2 / ((1.0e+0) + (1.0e+0) * p3_2) - p3_1 / ((1.0e+0) + (1.0e+0) * p3_1) - f3_4_2 + 0.75 * q2_3_2 + 0.25 * q2_3_1 == 0 ) - @NLconstraint( + @constraint( nlp, m4_2, p4_2 / ((1.0e+0) + (1.0e+0) * p4_2) - p4_1 / ((1.0e+0) + (1.0e+0) * p4_1) - 0.75 * q4_5_2 + f3_4_2 - 0.25 * q4_5_1 == 0 ) - @NLconstraint( + @constraint( nlp, m5_2, p5_2 / ((1.0e+0) + (1.0e+0) * p5_2) - p5_1 / ((1.0e+0) + (1.0e+0) * p5_1) - 0.75 * q5_6_2 - f5_7_2 + 0.75 * q4_5_2 - 0.25 * q5_6_1 + 0.25 * q4_5_1 == 0 ) - @NLconstraint( + @constraint( nlp, m6_2, p6_2 / ((1.0e+0) + (1.0e+0) * p6_2) - p6_1 / ((1.0e+0) + (1.0e+0) * p6_1) + 0.75 * q5_6_2 + 0.25 * q5_6_1 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m7_2, p7_2 / ((1.0e+0) + (1.0e+0) * p7_2) - p7_1 / ((1.0e+0) + (1.0e+0) * p7_1) - 0.75 * q7_8_2 + f5_7_2 - 0.25 * q7_8_1 == 0 ) - @NLconstraint( + @constraint( nlp, m8_2, p8_2 / ((1.0e+0) + (1.0e+0) * p8_2) - p8_1 / ((1.0e+0) + (1.0e+0) * p8_1) - 0.75 * q8_9_2 - 0.75 * q8_10_2 - 0.75 * q8_11_2 + 0.75 * q7_8_2 - 0.25 * q8_9_1 - 0.25 * q8_10_1 - 0.25 * q8_11_1 + 0.25 * q7_8_1 == 0 ) - @NLconstraint( + @constraint( nlp, m9_2, p9_2 / ((1.0e+0) + (1.0e+0) * p9_2) - p9_1 / ((1.0e+0) + (1.0e+0) * p9_1) + 0.75 * q8_9_2 + 0.25 * q8_9_1 == 0 ) - @NLconstraint( + @constraint( nlp, m10_2, p10_2 / ((1.0e+0) + (1.0e+0) * p10_2) - p10_1 / ((1.0e+0) + (1.0e+0) * p10_1) + 0.75 * q8_10_2 + 0.25 * q8_10_1 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m11_2, p11_2 / ((1.0e+0) + (1.0e+0) * p11_2) - p11_1 / ((1.0e+0) + (1.0e+0) * p11_1) - 0.75 * q11_12_2 + 0.75 * q8_11_2 - 0.25 * q11_12_1 + 0.25 * q8_11_1 == 0 ) - @NLconstraint( + @constraint( nlp, m12_2, p12_2 / ((1.0e+0) + (1.0e+0) * p12_2) - p12_1 / ((1.0e+0) + (1.0e+0) * p12_1) - 0.75 * q12_13_2 + 0.75 * q11_12_2 - 0.25 * q12_13_1 + 0.25 * q11_12_1 == 0 ) - @NLconstraint( + @constraint( nlp, m13_2, p13_2 / ((1.0e+0) + (1.0e+0) * p13_2) - p13_1 / ((1.0e+0) + (1.0e+0) * p13_1) - 0.75 * q13_14_2 - 0.75 * q13_15_2 + 0.75 * q12_13_2 - 0.25 * q13_14_1 - 0.25 * q13_15_1 + 0.25 * q12_13_1 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m14_2, p14_2 / ((1.0e+0) + (1.0e+0) * p14_2) - p14_1 / ((1.0e+0) + (1.0e+0) * p14_1) + 0.75 * q13_14_2 + 0.25 * q13_14_1 == 0 ) - @NLconstraint( + @constraint( nlp, m15_2, p15_2 / ((1.0e+0) + (1.0e+0) * p15_2) - p15_1 / ((1.0e+0) + (1.0e+0) * p15_1) - 0.75 * q15_16_2 + 0.75 * q13_15_2 - 0.25 * q15_16_1 + 0.25 * q13_15_1 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m16_2, p16_2 / ((1.0e+0) + (1.0e+0) * p16_2) - p16_1 / ((1.0e+0) + (1.0e+0) * p16_1) + 0.75 * q15_16_2 + 0.25 * q15_16_1 - out16_2 == 0 ) - @NLconstraint( + @constraint( nlp, m17_2, p17_2 / ((1.0e+0) + (1.0e+0) * p17_2) - p17_1 / ((1.0e+0) + (1.0e+0) * p17_1) - 0.75 * q17_18_2 + 0.75 * q1_17_2 - 0.25 * q17_18_1 + 0.25 * q1_17_1 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m18_2, p18_2 / ((1.0e+0) + (1.0e+0) * p18_2) - p18_1 / ((1.0e+0) + (1.0e+0) * p18_1) - 0.75 * q18_19_2 + 0.75 * q17_18_2 - 0.25 * q18_19_1 + 0.25 * q17_18_1 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m19_2, p19_2 / ((1.0e+0) + (1.0e+0) * p19_2) - p19_1 / ((1.0e+0) + (1.0e+0) * p19_1) - f19_20_2 + 0.75 * q18_19_2 + 0.25 * q18_19_1 == 0 ) - @NLconstraint( + @constraint( nlp, m20_2, p20_2 / ((1.0e+0) + (1.0e+0) * p20_2) - p20_1 / ((1.0e+0) + (1.0e+0) * p20_1) - 0.75 * q20_21_2 + f19_20_2 - 0.25 * q20_21_1 == 0 ) - @NLconstraint( + @constraint( nlp, m21_2, p21_2 / ((1.0e+0) + (1.0e+0) * p21_2) - p21_1 / ((1.0e+0) + (1.0e+0) * p21_1) - 0.75 * q21_22_2 + 0.75 * q20_21_2 - 0.25 * q21_22_1 + 0.25 * q20_21_1 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m22_2, p22_2 / ((1.0e+0) + (1.0e+0) * p22_2) - p22_1 / ((1.0e+0) + (1.0e+0) * p22_1) - 0.75 * q22_23_2 + 0.75 * q21_22_2 - 0.25 * q22_23_1 + 0.25 * q21_22_1 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m23_2, p23_2 / ((1.0e+0) + (1.0e+0) * p23_2) - p23_1 / ((1.0e+0) + (1.0e+0) * p23_1) + 0.75 * q22_23_2 + 0.25 * q22_23_1 - out23_2 == 0 ) - @NLconstraint(nlp, c3_4_2, p3_2 * r3_4_2 - p4_2 == 0) - @NLconstraint(nlp, c5_7_2, p5_2 * r5_7_2 - p7_2 == 0) - @NLconstraint(nlp, c19_20_2, p19_2 * r19_20_2 - p20_2 == 0) - @NLconstraint( + @constraint(nlp, c3_4_2, p3_2 * r3_4_2 - p4_2 == 0) + @constraint(nlp, c5_7_2, p5_2 * r5_7_2 - p7_2 == 0) + @constraint(nlp, c19_20_2, p19_2 * r19_20_2 - p20_2 == 0) + @constraint( nlp, p1_2_2, p1_2 * p1_2 - p2_2 * p2_2 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p1_2 + p2_2)) * ((abs(q1_2_2))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p1_17_2, p1_2 * p1_2 - p17_2 * p17_2 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p1_2 + p17_2)) * ((abs(q1_17_2))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p2_3_2, p2_2 * p2_2 - p3_2 * p3_2 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p2_2 + p3_2)) * ((abs(q2_3_2))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p4_5_2, p4_2 * p4_2 - p5_2 * p5_2 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p4_2 + p5_2)) * ((abs(q4_5_2))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p5_6_2, p5_2 * p5_2 - p6_2 * p6_2 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p5_2 + p6_2)) * ((abs(q5_6_2))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p7_8_2, p7_2 * p7_2 - p8_2 * p8_2 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p7_2 + p8_2)) * ((abs(q7_8_2))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_9_2, p8_2 * p8_2 - p9_2 * p9_2 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_2 + p9_2)) * ((abs(q8_9_2))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_10_2, p8_2 * p8_2 - p10_2 * p10_2 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_2 + p10_2)) * ((abs(q8_10_2))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_11_2, p8_2 * p8_2 - p11_2 * p11_2 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_2 + p11_2)) * ((abs(q8_11_2))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p11_12_2, p11_2 * p11_2 - p12_2 * p12_2 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p11_2 + p12_2)) * ((abs(q11_12_2))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p12_13_2, p12_2 * p12_2 - p13_2 * p13_2 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p12_2 + p13_2)) * ((abs(q12_13_2))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p13_14_2, p13_2 * p13_2 - p14_2 * p14_2 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p13_2 + p14_2)) * ((abs(q13_14_2))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p13_15_2, p13_2 * p13_2 - p15_2 * p15_2 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p13_2 + p15_2)) * ((abs(q13_15_2))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p15_16_2, p15_2 * p15_2 - p16_2 * p16_2 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p15_2 + p16_2)) * ((abs(q15_16_2))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p17_18_2, p17_2 * p17_2 - p18_2 * p18_2 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p17_2 + p18_2)) * ((abs(q17_18_2))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p18_19_2, p18_2 * p18_2 - p19_2 * p19_2 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p18_2 + p19_2)) * ((abs(q18_19_2))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p20_21_2, p20_2 * p20_2 - p21_2 * p21_2 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p20_2 + p21_2)) * ((abs(q20_21_2))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p21_22_2, p21_2 * p21_2 - p22_2 * p22_2 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p21_2 + p22_2)) * ((abs(q21_22_2))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p22_23_2, p22_2 * p22_2 - p23_2 * p23_2 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p22_2 + p23_2)) * ((abs(q22_23_2))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, m1_3, p1_3 / ((1.0e+0) + (1.0e+0) * p1_3) - p1_2 / ((1.0e+0) + (1.0e+0) * p1_2) - 0.75 * q1_17_3 - 0.75 * q1_2_3 + in1_3 - 0.25 * q1_17_2 - 0.25 * q1_2_2 == 0 ) - @NLconstraint( + @constraint( nlp, m2_3, p2_3 / ((1.0e+0) + (1.0e+0) * p2_3) - p2_2 / ((1.0e+0) + (1.0e+0) * p2_2) - 0.75 * q2_3_3 + 0.75 * q1_2_3 - 0.25 * q2_3_2 + 0.25 * q1_2_2 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m3_3, p3_3 / ((1.0e+0) + (1.0e+0) * p3_3) - p3_2 / ((1.0e+0) + (1.0e+0) * p3_2) - f3_4_3 + 0.75 * q2_3_3 + 0.25 * q2_3_2 == 0 ) - @NLconstraint( + @constraint( nlp, m4_3, p4_3 / ((1.0e+0) + (1.0e+0) * p4_3) - p4_2 / ((1.0e+0) + (1.0e+0) * p4_2) - 0.75 * q4_5_3 + f3_4_3 - 0.25 * q4_5_2 == 0 ) - @NLconstraint( + @constraint( nlp, m5_3, p5_3 / ((1.0e+0) + (1.0e+0) * p5_3) - p5_2 / ((1.0e+0) + (1.0e+0) * p5_2) - 0.75 * q5_6_3 - f5_7_3 + 0.75 * q4_5_3 - 0.25 * q5_6_2 + 0.25 * q4_5_2 == 0 ) - @NLconstraint( + @constraint( nlp, m6_3, p6_3 / ((1.0e+0) + (1.0e+0) * p6_3) - p6_2 / ((1.0e+0) + (1.0e+0) * p6_2) + 0.75 * q5_6_3 + 0.25 * q5_6_2 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m7_3, p7_3 / ((1.0e+0) + (1.0e+0) * p7_3) - p7_2 / ((1.0e+0) + (1.0e+0) * p7_2) - 0.75 * q7_8_3 + f5_7_3 - 0.25 * q7_8_2 == 0 ) - @NLconstraint( + @constraint( nlp, m8_3, p8_3 / ((1.0e+0) + (1.0e+0) * p8_3) - p8_2 / ((1.0e+0) + (1.0e+0) * p8_2) - 0.75 * q8_9_3 - 0.75 * q8_10_3 - 0.75 * q8_11_3 + 0.75 * q7_8_3 - 0.25 * q8_9_2 - 0.25 * q8_10_2 - 0.25 * q8_11_2 + 0.25 * q7_8_2 == 0 ) - @NLconstraint( + @constraint( nlp, m9_3, p9_3 / ((1.0e+0) + (1.0e+0) * p9_3) - p9_2 / ((1.0e+0) + (1.0e+0) * p9_2) + 0.75 * q8_9_3 + 0.25 * q8_9_2 == 0 ) - @NLconstraint( + @constraint( nlp, m10_3, p10_3 / ((1.0e+0) + (1.0e+0) * p10_3) - p10_2 / ((1.0e+0) + (1.0e+0) * p10_2) + 0.75 * q8_10_3 + 0.25 * q8_10_2 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m11_3, p11_3 / ((1.0e+0) + (1.0e+0) * p11_3) - p11_2 / ((1.0e+0) + (1.0e+0) * p11_2) - 0.75 * q11_12_3 + 0.75 * q8_11_3 - 0.25 * q11_12_2 + 0.25 * q8_11_2 == 0 ) - @NLconstraint( + @constraint( nlp, m12_3, p12_3 / ((1.0e+0) + (1.0e+0) * p12_3) - p12_2 / ((1.0e+0) + (1.0e+0) * p12_2) - 0.75 * q12_13_3 + 0.75 * q11_12_3 - 0.25 * q12_13_2 + 0.25 * q11_12_2 == 0 ) - @NLconstraint( + @constraint( nlp, m13_3, p13_3 / ((1.0e+0) + (1.0e+0) * p13_3) - p13_2 / ((1.0e+0) + (1.0e+0) * p13_2) - 0.75 * q13_14_3 - 0.75 * q13_15_3 + 0.75 * q12_13_3 - 0.25 * q13_14_2 - 0.25 * q13_15_2 + 0.25 * q12_13_2 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m14_3, p14_3 / ((1.0e+0) + (1.0e+0) * p14_3) - p14_2 / ((1.0e+0) + (1.0e+0) * p14_2) + 0.75 * q13_14_3 + 0.25 * q13_14_2 == 0 ) - @NLconstraint( + @constraint( nlp, m15_3, p15_3 / ((1.0e+0) + (1.0e+0) * p15_3) - p15_2 / ((1.0e+0) + (1.0e+0) * p15_2) - 0.75 * q15_16_3 + 0.75 * q13_15_3 - 0.25 * q15_16_2 + 0.25 * q13_15_2 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m16_3, p16_3 / ((1.0e+0) + (1.0e+0) * p16_3) - p16_2 / ((1.0e+0) + (1.0e+0) * p16_2) + 0.75 * q15_16_3 + 0.25 * q15_16_2 - out16_3 == 0 ) - @NLconstraint( + @constraint( nlp, m17_3, p17_3 / ((1.0e+0) + (1.0e+0) * p17_3) - p17_2 / ((1.0e+0) + (1.0e+0) * p17_2) - 0.75 * q17_18_3 + 0.75 * q1_17_3 - 0.25 * q17_18_2 + 0.25 * q1_17_2 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m18_3, p18_3 / ((1.0e+0) + (1.0e+0) * p18_3) - p18_2 / ((1.0e+0) + (1.0e+0) * p18_2) - 0.75 * q18_19_3 + 0.75 * q17_18_3 - 0.25 * q18_19_2 + 0.25 * q17_18_2 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m19_3, p19_3 / ((1.0e+0) + (1.0e+0) * p19_3) - p19_2 / ((1.0e+0) + (1.0e+0) * p19_2) - f19_20_3 + 0.75 * q18_19_3 + 0.25 * q18_19_2 == 0 ) - @NLconstraint( + @constraint( nlp, m20_3, p20_3 / ((1.0e+0) + (1.0e+0) * p20_3) - p20_2 / ((1.0e+0) + (1.0e+0) * p20_2) - 0.75 * q20_21_3 + f19_20_3 - 0.25 * q20_21_2 == 0 ) - @NLconstraint( + @constraint( nlp, m21_3, p21_3 / ((1.0e+0) + (1.0e+0) * p21_3) - p21_2 / ((1.0e+0) + (1.0e+0) * p21_2) - 0.75 * q21_22_3 + 0.75 * q20_21_3 - 0.25 * q21_22_2 + 0.25 * q20_21_2 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m22_3, p22_3 / ((1.0e+0) + (1.0e+0) * p22_3) - p22_2 / ((1.0e+0) + (1.0e+0) * p22_2) - 0.75 * q22_23_3 + 0.75 * q21_22_3 - 0.25 * q22_23_2 + 0.25 * q21_22_2 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m23_3, p23_3 / ((1.0e+0) + (1.0e+0) * p23_3) - p23_2 / ((1.0e+0) + (1.0e+0) * p23_2) + 0.75 * q22_23_3 + 0.25 * q22_23_2 - out23_3 == 0 ) - @NLconstraint(nlp, c3_4_3, p3_3 * r3_4_3 - p4_3 == 0) - @NLconstraint(nlp, c5_7_3, p5_3 * r5_7_3 - p7_3 == 0) - @NLconstraint(nlp, c19_20_3, p19_3 * r19_20_3 - p20_3 == 0) - @NLconstraint( + @constraint(nlp, c3_4_3, p3_3 * r3_4_3 - p4_3 == 0) + @constraint(nlp, c5_7_3, p5_3 * r5_7_3 - p7_3 == 0) + @constraint(nlp, c19_20_3, p19_3 * r19_20_3 - p20_3 == 0) + @constraint( nlp, p1_2_3, p1_3 * p1_3 - p2_3 * p2_3 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p1_3 + p2_3)) * ((abs(q1_2_3))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p1_17_3, p1_3 * p1_3 - p17_3 * p17_3 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p1_3 + p17_3)) * ((abs(q1_17_3))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p2_3_3, p2_3 * p2_3 - p3_3 * p3_3 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p2_3 + p3_3)) * ((abs(q2_3_3))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p4_5_3, p4_3 * p4_3 - p5_3 * p5_3 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p4_3 + p5_3)) * ((abs(q4_5_3))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p5_6_3, p5_3 * p5_3 - p6_3 * p6_3 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p5_3 + p6_3)) * ((abs(q5_6_3))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p7_8_3, p7_3 * p7_3 - p8_3 * p8_3 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p7_3 + p8_3)) * ((abs(q7_8_3))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_9_3, p8_3 * p8_3 - p9_3 * p9_3 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_3 + p9_3)) * ((abs(q8_9_3))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_10_3, p8_3 * p8_3 - p10_3 * p10_3 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_3 + p10_3)) * ((abs(q8_10_3))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_11_3, p8_3 * p8_3 - p11_3 * p11_3 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_3 + p11_3)) * ((abs(q8_11_3))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p11_12_3, p11_3 * p11_3 - p12_3 * p12_3 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p11_3 + p12_3)) * ((abs(q11_12_3))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p12_13_3, p12_3 * p12_3 - p13_3 * p13_3 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p12_3 + p13_3)) * ((abs(q12_13_3))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p13_14_3, p13_3 * p13_3 - p14_3 * p14_3 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p13_3 + p14_3)) * ((abs(q13_14_3))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p13_15_3, p13_3 * p13_3 - p15_3 * p15_3 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p13_3 + p15_3)) * ((abs(q13_15_3))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p15_16_3, p15_3 * p15_3 - p16_3 * p16_3 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p15_3 + p16_3)) * ((abs(q15_16_3))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p17_18_3, p17_3 * p17_3 - p18_3 * p18_3 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p17_3 + p18_3)) * ((abs(q17_18_3))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p18_19_3, p18_3 * p18_3 - p19_3 * p19_3 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p18_3 + p19_3)) * ((abs(q18_19_3))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p20_21_3, p20_3 * p20_3 - p21_3 * p21_3 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p20_3 + p21_3)) * ((abs(q20_21_3))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p21_22_3, p21_3 * p21_3 - p22_3 * p22_3 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p21_3 + p22_3)) * ((abs(q21_22_3))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p22_23_3, p22_3 * p22_3 - p23_3 * p23_3 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p22_3 + p23_3)) * ((abs(q22_23_3))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, m1_4, p1_4 / ((1.0e+0) + (1.0e+0) * p1_4) - p1_3 / ((1.0e+0) + (1.0e+0) * p1_3) - 0.75 * q1_17_4 - 0.75 * q1_2_4 + in1_4 - 0.25 * q1_17_3 - 0.25 * q1_2_3 == 0 ) - @NLconstraint( + @constraint( nlp, m2_4, p2_4 / ((1.0e+0) + (1.0e+0) * p2_4) - p2_3 / ((1.0e+0) + (1.0e+0) * p2_3) - 0.75 * q2_3_4 + 0.75 * q1_2_4 - 0.25 * q2_3_3 + 0.25 * q1_2_3 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m3_4, p3_4 / ((1.0e+0) + (1.0e+0) * p3_4) - p3_3 / ((1.0e+0) + (1.0e+0) * p3_3) - f3_4_4 + 0.75 * q2_3_4 + 0.25 * q2_3_3 == 0 ) - @NLconstraint( + @constraint( nlp, m4_4, p4_4 / ((1.0e+0) + (1.0e+0) * p4_4) - p4_3 / ((1.0e+0) + (1.0e+0) * p4_3) - 0.75 * q4_5_4 + f3_4_4 - 0.25 * q4_5_3 == 0 ) - @NLconstraint( + @constraint( nlp, m5_4, p5_4 / ((1.0e+0) + (1.0e+0) * p5_4) - p5_3 / ((1.0e+0) + (1.0e+0) * p5_3) - 0.75 * q5_6_4 - f5_7_4 + 0.75 * q4_5_4 - 0.25 * q5_6_3 + 0.25 * q4_5_3 == 0 ) - @NLconstraint( + @constraint( nlp, m6_4, p6_4 / ((1.0e+0) + (1.0e+0) * p6_4) - p6_3 / ((1.0e+0) + (1.0e+0) * p6_3) + 0.75 * q5_6_4 + 0.25 * q5_6_3 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m7_4, p7_4 / ((1.0e+0) + (1.0e+0) * p7_4) - p7_3 / ((1.0e+0) + (1.0e+0) * p7_3) - 0.75 * q7_8_4 + f5_7_4 - 0.25 * q7_8_3 == 0 ) - @NLconstraint( + @constraint( nlp, m8_4, p8_4 / ((1.0e+0) + (1.0e+0) * p8_4) - p8_3 / ((1.0e+0) + (1.0e+0) * p8_3) - 0.75 * q8_9_4 - 0.75 * q8_10_4 - 0.75 * q8_11_4 + 0.75 * q7_8_4 - 0.25 * q8_9_3 - 0.25 * q8_10_3 - 0.25 * q8_11_3 + 0.25 * q7_8_3 == 0 ) - @NLconstraint( + @constraint( nlp, m9_4, p9_4 / ((1.0e+0) + (1.0e+0) * p9_4) - p9_3 / ((1.0e+0) + (1.0e+0) * p9_3) + 0.75 * q8_9_4 + 0.25 * q8_9_3 == 0 ) - @NLconstraint( + @constraint( nlp, m10_4, p10_4 / ((1.0e+0) + (1.0e+0) * p10_4) - p10_3 / ((1.0e+0) + (1.0e+0) * p10_3) + 0.75 * q8_10_4 + 0.25 * q8_10_3 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m11_4, p11_4 / ((1.0e+0) + (1.0e+0) * p11_4) - p11_3 / ((1.0e+0) + (1.0e+0) * p11_3) - 0.75 * q11_12_4 + 0.75 * q8_11_4 - 0.25 * q11_12_3 + 0.25 * q8_11_3 == 0 ) - @NLconstraint( + @constraint( nlp, m12_4, p12_4 / ((1.0e+0) + (1.0e+0) * p12_4) - p12_3 / ((1.0e+0) + (1.0e+0) * p12_3) - 0.75 * q12_13_4 + 0.75 * q11_12_4 - 0.25 * q12_13_3 + 0.25 * q11_12_3 == 0 ) - @NLconstraint( + @constraint( nlp, m13_4, p13_4 / ((1.0e+0) + (1.0e+0) * p13_4) - p13_3 / ((1.0e+0) + (1.0e+0) * p13_3) - 0.75 * q13_14_4 - 0.75 * q13_15_4 + 0.75 * q12_13_4 - 0.25 * q13_14_3 - 0.25 * q13_15_3 + 0.25 * q12_13_3 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m14_4, p14_4 / ((1.0e+0) + (1.0e+0) * p14_4) - p14_3 / ((1.0e+0) + (1.0e+0) * p14_3) + 0.75 * q13_14_4 + 0.25 * q13_14_3 == 0 ) - @NLconstraint( + @constraint( nlp, m15_4, p15_4 / ((1.0e+0) + (1.0e+0) * p15_4) - p15_3 / ((1.0e+0) + (1.0e+0) * p15_3) - 0.75 * q15_16_4 + 0.75 * q13_15_4 - 0.25 * q15_16_3 + 0.25 * q13_15_3 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m16_4, p16_4 / ((1.0e+0) + (1.0e+0) * p16_4) - p16_3 / ((1.0e+0) + (1.0e+0) * p16_3) + 0.75 * q15_16_4 + 0.25 * q15_16_3 - out16_4 == 0 ) - @NLconstraint( + @constraint( nlp, m17_4, p17_4 / ((1.0e+0) + (1.0e+0) * p17_4) - p17_3 / ((1.0e+0) + (1.0e+0) * p17_3) - 0.75 * q17_18_4 + 0.75 * q1_17_4 - 0.25 * q17_18_3 + 0.25 * q1_17_3 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m18_4, p18_4 / ((1.0e+0) + (1.0e+0) * p18_4) - p18_3 / ((1.0e+0) + (1.0e+0) * p18_3) - 0.75 * q18_19_4 + 0.75 * q17_18_4 - 0.25 * q18_19_3 + 0.25 * q17_18_3 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m19_4, p19_4 / ((1.0e+0) + (1.0e+0) * p19_4) - p19_3 / ((1.0e+0) + (1.0e+0) * p19_3) - f19_20_4 + 0.75 * q18_19_4 + 0.25 * q18_19_3 == 0 ) - @NLconstraint( + @constraint( nlp, m20_4, p20_4 / ((1.0e+0) + (1.0e+0) * p20_4) - p20_3 / ((1.0e+0) + (1.0e+0) * p20_3) - 0.75 * q20_21_4 + f19_20_4 - 0.25 * q20_21_3 == 0 ) - @NLconstraint( + @constraint( nlp, m21_4, p21_4 / ((1.0e+0) + (1.0e+0) * p21_4) - p21_3 / ((1.0e+0) + (1.0e+0) * p21_3) - 0.75 * q21_22_4 + 0.75 * q20_21_4 - 0.25 * q21_22_3 + 0.25 * q20_21_3 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m22_4, p22_4 / ((1.0e+0) + (1.0e+0) * p22_4) - p22_3 / ((1.0e+0) + (1.0e+0) * p22_3) - 0.75 * q22_23_4 + 0.75 * q21_22_4 - 0.25 * q22_23_3 + 0.25 * q21_22_3 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m23_4, p23_4 / ((1.0e+0) + (1.0e+0) * p23_4) - p23_3 / ((1.0e+0) + (1.0e+0) * p23_3) + 0.75 * q22_23_4 + 0.25 * q22_23_3 - out23_4 == 0 ) - @NLconstraint(nlp, c3_4_4, p3_4 * r3_4_4 - p4_4 == 0) - @NLconstraint(nlp, c5_7_4, p5_4 * r5_7_4 - p7_4 == 0) - @NLconstraint(nlp, c19_20_4, p19_4 * r19_20_4 - p20_4 == 0) - @NLconstraint( + @constraint(nlp, c3_4_4, p3_4 * r3_4_4 - p4_4 == 0) + @constraint(nlp, c5_7_4, p5_4 * r5_7_4 - p7_4 == 0) + @constraint(nlp, c19_20_4, p19_4 * r19_20_4 - p20_4 == 0) + @constraint( nlp, p1_2_4, p1_4 * p1_4 - p2_4 * p2_4 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p1_4 + p2_4)) * ((abs(q1_2_4))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p1_17_4, p1_4 * p1_4 - p17_4 * p17_4 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p1_4 + p17_4)) * ((abs(q1_17_4))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p2_3_4, p2_4 * p2_4 - p3_4 * p3_4 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p2_4 + p3_4)) * ((abs(q2_3_4))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p4_5_4, p4_4 * p4_4 - p5_4 * p5_4 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p4_4 + p5_4)) * ((abs(q4_5_4))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p5_6_4, p5_4 * p5_4 - p6_4 * p6_4 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p5_4 + p6_4)) * ((abs(q5_6_4))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p7_8_4, p7_4 * p7_4 - p8_4 * p8_4 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p7_4 + p8_4)) * ((abs(q7_8_4))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_9_4, p8_4 * p8_4 - p9_4 * p9_4 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_4 + p9_4)) * ((abs(q8_9_4))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_10_4, p8_4 * p8_4 - p10_4 * p10_4 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_4 + p10_4)) * ((abs(q8_10_4))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_11_4, p8_4 * p8_4 - p11_4 * p11_4 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_4 + p11_4)) * ((abs(q8_11_4))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p11_12_4, p11_4 * p11_4 - p12_4 * p12_4 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p11_4 + p12_4)) * ((abs(q11_12_4))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p12_13_4, p12_4 * p12_4 - p13_4 * p13_4 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p12_4 + p13_4)) * ((abs(q12_13_4))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p13_14_4, p13_4 * p13_4 - p14_4 * p14_4 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p13_4 + p14_4)) * ((abs(q13_14_4))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p13_15_4, p13_4 * p13_4 - p15_4 * p15_4 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p13_4 + p15_4)) * ((abs(q13_15_4))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p15_16_4, p15_4 * p15_4 - p16_4 * p16_4 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p15_4 + p16_4)) * ((abs(q15_16_4))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p17_18_4, p17_4 * p17_4 - p18_4 * p18_4 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p17_4 + p18_4)) * ((abs(q17_18_4))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p18_19_4, p18_4 * p18_4 - p19_4 * p19_4 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p18_4 + p19_4)) * ((abs(q18_19_4))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p20_21_4, p20_4 * p20_4 - p21_4 * p21_4 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p20_4 + p21_4)) * ((abs(q20_21_4))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p21_22_4, p21_4 * p21_4 - p22_4 * p22_4 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p21_4 + p22_4)) * ((abs(q21_22_4))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p22_23_4, p22_4 * p22_4 - p23_4 * p23_4 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p22_4 + p23_4)) * ((abs(q22_23_4))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, m1_5, p1_5 / ((1.0e+0) + (1.0e+0) * p1_5) - p1_4 / ((1.0e+0) + (1.0e+0) * p1_4) - 0.75 * q1_17_5 - 0.75 * q1_2_5 + in1_5 - 0.25 * q1_17_4 - 0.25 * q1_2_4 == 0 ) - @NLconstraint( + @constraint( nlp, m2_5, p2_5 / ((1.0e+0) + (1.0e+0) * p2_5) - p2_4 / ((1.0e+0) + (1.0e+0) * p2_4) - 0.75 * q2_3_5 + 0.75 * q1_2_5 - 0.25 * q2_3_4 + 0.25 * q1_2_4 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m3_5, p3_5 / ((1.0e+0) + (1.0e+0) * p3_5) - p3_4 / ((1.0e+0) + (1.0e+0) * p3_4) - f3_4_5 + 0.75 * q2_3_5 + 0.25 * q2_3_4 == 0 ) - @NLconstraint( + @constraint( nlp, m4_5, p4_5 / ((1.0e+0) + (1.0e+0) * p4_5) - p4_4 / ((1.0e+0) + (1.0e+0) * p4_4) - 0.75 * q4_5_5 + f3_4_5 - 0.25 * q4_5_4 == 0 ) - @NLconstraint( + @constraint( nlp, m5_5, p5_5 / ((1.0e+0) + (1.0e+0) * p5_5) - p5_4 / ((1.0e+0) + (1.0e+0) * p5_4) - 0.75 * q5_6_5 - f5_7_5 + 0.75 * q4_5_5 - 0.25 * q5_6_4 + 0.25 * q4_5_4 == 0 ) - @NLconstraint( + @constraint( nlp, m6_5, p6_5 / ((1.0e+0) + (1.0e+0) * p6_5) - p6_4 / ((1.0e+0) + (1.0e+0) * p6_4) + 0.75 * q5_6_5 + 0.25 * q5_6_4 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m7_5, p7_5 / ((1.0e+0) + (1.0e+0) * p7_5) - p7_4 / ((1.0e+0) + (1.0e+0) * p7_4) - 0.75 * q7_8_5 + f5_7_5 - 0.25 * q7_8_4 == 0 ) - @NLconstraint( + @constraint( nlp, m8_5, p8_5 / ((1.0e+0) + (1.0e+0) * p8_5) - p8_4 / ((1.0e+0) + (1.0e+0) * p8_4) - 0.75 * q8_9_5 - 0.75 * q8_10_5 - 0.75 * q8_11_5 + 0.75 * q7_8_5 - 0.25 * q8_9_4 - 0.25 * q8_10_4 - 0.25 * q8_11_4 + 0.25 * q7_8_4 == 0 ) - @NLconstraint( + @constraint( nlp, m9_5, p9_5 / ((1.0e+0) + (1.0e+0) * p9_5) - p9_4 / ((1.0e+0) + (1.0e+0) * p9_4) + 0.75 * q8_9_5 + 0.25 * q8_9_4 == 0 ) - @NLconstraint( + @constraint( nlp, m10_5, p10_5 / ((1.0e+0) + (1.0e+0) * p10_5) - p10_4 / ((1.0e+0) + (1.0e+0) * p10_4) + 0.75 * q8_10_5 + 0.25 * q8_10_4 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m11_5, p11_5 / ((1.0e+0) + (1.0e+0) * p11_5) - p11_4 / ((1.0e+0) + (1.0e+0) * p11_4) - 0.75 * q11_12_5 + 0.75 * q8_11_5 - 0.25 * q11_12_4 + 0.25 * q8_11_4 == 0 ) - @NLconstraint( + @constraint( nlp, m12_5, p12_5 / ((1.0e+0) + (1.0e+0) * p12_5) - p12_4 / ((1.0e+0) + (1.0e+0) * p12_4) - 0.75 * q12_13_5 + 0.75 * q11_12_5 - 0.25 * q12_13_4 + 0.25 * q11_12_4 == 0 ) - @NLconstraint( + @constraint( nlp, m13_5, p13_5 / ((1.0e+0) + (1.0e+0) * p13_5) - p13_4 / ((1.0e+0) + (1.0e+0) * p13_4) - 0.75 * q13_14_5 - 0.75 * q13_15_5 + 0.75 * q12_13_5 - 0.25 * q13_14_4 - 0.25 * q13_15_4 + 0.25 * q12_13_4 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m14_5, p14_5 / ((1.0e+0) + (1.0e+0) * p14_5) - p14_4 / ((1.0e+0) + (1.0e+0) * p14_4) + 0.75 * q13_14_5 + 0.25 * q13_14_4 == 0 ) - @NLconstraint( + @constraint( nlp, m15_5, p15_5 / ((1.0e+0) + (1.0e+0) * p15_5) - p15_4 / ((1.0e+0) + (1.0e+0) * p15_4) - 0.75 * q15_16_5 + 0.75 * q13_15_5 - 0.25 * q15_16_4 + 0.25 * q13_15_4 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m16_5, p16_5 / ((1.0e+0) + (1.0e+0) * p16_5) - p16_4 / ((1.0e+0) + (1.0e+0) * p16_4) + 0.75 * q15_16_5 + 0.25 * q15_16_4 - out16_5 == 0 ) - @NLconstraint( + @constraint( nlp, m17_5, p17_5 / ((1.0e+0) + (1.0e+0) * p17_5) - p17_4 / ((1.0e+0) + (1.0e+0) * p17_4) - 0.75 * q17_18_5 + 0.75 * q1_17_5 - 0.25 * q17_18_4 + 0.25 * q1_17_4 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m18_5, p18_5 / ((1.0e+0) + (1.0e+0) * p18_5) - p18_4 / ((1.0e+0) + (1.0e+0) * p18_4) - 0.75 * q18_19_5 + 0.75 * q17_18_5 - 0.25 * q18_19_4 + 0.25 * q17_18_4 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m19_5, p19_5 / ((1.0e+0) + (1.0e+0) * p19_5) - p19_4 / ((1.0e+0) + (1.0e+0) * p19_4) - f19_20_5 + 0.75 * q18_19_5 + 0.25 * q18_19_4 == 0 ) - @NLconstraint( + @constraint( nlp, m20_5, p20_5 / ((1.0e+0) + (1.0e+0) * p20_5) - p20_4 / ((1.0e+0) + (1.0e+0) * p20_4) - 0.75 * q20_21_5 + f19_20_5 - 0.25 * q20_21_4 == 0 ) - @NLconstraint( + @constraint( nlp, m21_5, p21_5 / ((1.0e+0) + (1.0e+0) * p21_5) - p21_4 / ((1.0e+0) + (1.0e+0) * p21_4) - 0.75 * q21_22_5 + 0.75 * q20_21_5 - 0.25 * q21_22_4 + 0.25 * q20_21_4 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m22_5, p22_5 / ((1.0e+0) + (1.0e+0) * p22_5) - p22_4 / ((1.0e+0) + (1.0e+0) * p22_4) - 0.75 * q22_23_5 + 0.75 * q21_22_5 - 0.25 * q22_23_4 + 0.25 * q21_22_4 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m23_5, p23_5 / ((1.0e+0) + (1.0e+0) * p23_5) - p23_4 / ((1.0e+0) + (1.0e+0) * p23_4) + 0.75 * q22_23_5 + 0.25 * q22_23_4 - out23_5 == 0 ) - @NLconstraint(nlp, c3_4_5, p3_5 * r3_4_5 - p4_5 == 0) - @NLconstraint(nlp, c5_7_5, p5_5 * r5_7_5 - p7_5 == 0) - @NLconstraint(nlp, c19_20_5, p19_5 * r19_20_5 - p20_5 == 0) - @NLconstraint( + @constraint(nlp, c3_4_5, p3_5 * r3_4_5 - p4_5 == 0) + @constraint(nlp, c5_7_5, p5_5 * r5_7_5 - p7_5 == 0) + @constraint(nlp, c19_20_5, p19_5 * r19_20_5 - p20_5 == 0) + @constraint( nlp, p1_2_5, p1_5 * p1_5 - p2_5 * p2_5 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p1_5 + p2_5)) * ((abs(q1_2_5))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p1_17_5, p1_5 * p1_5 - p17_5 * p17_5 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p1_5 + p17_5)) * ((abs(q1_17_5))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p2_3_5, p2_5 * p2_5 - p3_5 * p3_5 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p2_5 + p3_5)) * ((abs(q2_3_5))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p4_5_5, p4_5 * p4_5 - p5_5 * p5_5 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p4_5 + p5_5)) * ((abs(q4_5_5))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p5_6_5, p5_5 * p5_5 - p6_5 * p6_5 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p5_5 + p6_5)) * ((abs(q5_6_5))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p7_8_5, p7_5 * p7_5 - p8_5 * p8_5 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p7_5 + p8_5)) * ((abs(q7_8_5))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_9_5, p8_5 * p8_5 - p9_5 * p9_5 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_5 + p9_5)) * ((abs(q8_9_5))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_10_5, p8_5 * p8_5 - p10_5 * p10_5 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_5 + p10_5)) * ((abs(q8_10_5))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_11_5, p8_5 * p8_5 - p11_5 * p11_5 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_5 + p11_5)) * ((abs(q8_11_5))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p11_12_5, p11_5 * p11_5 - p12_5 * p12_5 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p11_5 + p12_5)) * ((abs(q11_12_5))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p12_13_5, p12_5 * p12_5 - p13_5 * p13_5 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p12_5 + p13_5)) * ((abs(q12_13_5))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p13_14_5, p13_5 * p13_5 - p14_5 * p14_5 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p13_5 + p14_5)) * ((abs(q13_14_5))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p13_15_5, p13_5 * p13_5 - p15_5 * p15_5 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p13_5 + p15_5)) * ((abs(q13_15_5))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p15_16_5, p15_5 * p15_5 - p16_5 * p16_5 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p15_5 + p16_5)) * ((abs(q15_16_5))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p17_18_5, p17_5 * p17_5 - p18_5 * p18_5 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p17_5 + p18_5)) * ((abs(q17_18_5))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p18_19_5, p18_5 * p18_5 - p19_5 * p19_5 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p18_5 + p19_5)) * ((abs(q18_19_5))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p20_21_5, p20_5 * p20_5 - p21_5 * p21_5 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p20_5 + p21_5)) * ((abs(q20_21_5))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p21_22_5, p21_5 * p21_5 - p22_5 * p22_5 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p21_5 + p22_5)) * ((abs(q21_22_5))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p22_23_5, p22_5 * p22_5 - p23_5 * p23_5 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p22_5 + p23_5)) * ((abs(q22_23_5))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, m1_6, p1_6 / ((1.0e+0) + (1.0e+0) * p1_6) - p1_5 / ((1.0e+0) + (1.0e+0) * p1_5) - 0.75 * q1_17_6 - 0.75 * q1_2_6 + in1_6 - 0.25 * q1_17_5 - 0.25 * q1_2_5 == 0 ) - @NLconstraint( + @constraint( nlp, m2_6, p2_6 / ((1.0e+0) + (1.0e+0) * p2_6) - p2_5 / ((1.0e+0) + (1.0e+0) * p2_5) - 0.75 * q2_3_6 + 0.75 * q1_2_6 - 0.25 * q2_3_5 + 0.25 * q1_2_5 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m3_6, p3_6 / ((1.0e+0) + (1.0e+0) * p3_6) - p3_5 / ((1.0e+0) + (1.0e+0) * p3_5) - f3_4_6 + 0.75 * q2_3_6 + 0.25 * q2_3_5 == 0 ) - @NLconstraint( + @constraint( nlp, m4_6, p4_6 / ((1.0e+0) + (1.0e+0) * p4_6) - p4_5 / ((1.0e+0) + (1.0e+0) * p4_5) - 0.75 * q4_5_6 + f3_4_6 - 0.25 * q4_5_5 == 0 ) - @NLconstraint( + @constraint( nlp, m5_6, p5_6 / ((1.0e+0) + (1.0e+0) * p5_6) - p5_5 / ((1.0e+0) + (1.0e+0) * p5_5) - 0.75 * q5_6_6 - f5_7_6 + 0.75 * q4_5_6 - 0.25 * q5_6_5 + 0.25 * q4_5_5 == 0 ) - @NLconstraint( + @constraint( nlp, m6_6, p6_6 / ((1.0e+0) + (1.0e+0) * p6_6) - p6_5 / ((1.0e+0) + (1.0e+0) * p6_5) + 0.75 * q5_6_6 + 0.25 * q5_6_5 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m7_6, p7_6 / ((1.0e+0) + (1.0e+0) * p7_6) - p7_5 / ((1.0e+0) + (1.0e+0) * p7_5) - 0.75 * q7_8_6 + f5_7_6 - 0.25 * q7_8_5 == 0 ) - @NLconstraint( + @constraint( nlp, m8_6, p8_6 / ((1.0e+0) + (1.0e+0) * p8_6) - p8_5 / ((1.0e+0) + (1.0e+0) * p8_5) - 0.75 * q8_9_6 - 0.75 * q8_10_6 - 0.75 * q8_11_6 + 0.75 * q7_8_6 - 0.25 * q8_9_5 - 0.25 * q8_10_5 - 0.25 * q8_11_5 + 0.25 * q7_8_5 == 0 ) - @NLconstraint( + @constraint( nlp, m9_6, p9_6 / ((1.0e+0) + (1.0e+0) * p9_6) - p9_5 / ((1.0e+0) + (1.0e+0) * p9_5) + 0.75 * q8_9_6 + 0.25 * q8_9_5 == 0 ) - @NLconstraint( + @constraint( nlp, m10_6, p10_6 / ((1.0e+0) + (1.0e+0) * p10_6) - p10_5 / ((1.0e+0) + (1.0e+0) * p10_5) + 0.75 * q8_10_6 + 0.25 * q8_10_5 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m11_6, p11_6 / ((1.0e+0) + (1.0e+0) * p11_6) - p11_5 / ((1.0e+0) + (1.0e+0) * p11_5) - 0.75 * q11_12_6 + 0.75 * q8_11_6 - 0.25 * q11_12_5 + 0.25 * q8_11_5 == 0 ) - @NLconstraint( + @constraint( nlp, m12_6, p12_6 / ((1.0e+0) + (1.0e+0) * p12_6) - p12_5 / ((1.0e+0) + (1.0e+0) * p12_5) - 0.75 * q12_13_6 + 0.75 * q11_12_6 - 0.25 * q12_13_5 + 0.25 * q11_12_5 == 0 ) - @NLconstraint( + @constraint( nlp, m13_6, p13_6 / ((1.0e+0) + (1.0e+0) * p13_6) - p13_5 / ((1.0e+0) + (1.0e+0) * p13_5) - 0.75 * q13_14_6 - 0.75 * q13_15_6 + 0.75 * q12_13_6 - 0.25 * q13_14_5 - 0.25 * q13_15_5 + 0.25 * q12_13_5 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m14_6, p14_6 / ((1.0e+0) + (1.0e+0) * p14_6) - p14_5 / ((1.0e+0) + (1.0e+0) * p14_5) + 0.75 * q13_14_6 + 0.25 * q13_14_5 == 0 ) - @NLconstraint( + @constraint( nlp, m15_6, p15_6 / ((1.0e+0) + (1.0e+0) * p15_6) - p15_5 / ((1.0e+0) + (1.0e+0) * p15_5) - 0.75 * q15_16_6 + 0.75 * q13_15_6 - 0.25 * q15_16_5 + 0.25 * q13_15_5 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m16_6, p16_6 / ((1.0e+0) + (1.0e+0) * p16_6) - p16_5 / ((1.0e+0) + (1.0e+0) * p16_5) + 0.75 * q15_16_6 + 0.25 * q15_16_5 - out16_6 == 0 ) - @NLconstraint( + @constraint( nlp, m17_6, p17_6 / ((1.0e+0) + (1.0e+0) * p17_6) - p17_5 / ((1.0e+0) + (1.0e+0) * p17_5) - 0.75 * q17_18_6 + 0.75 * q1_17_6 - 0.25 * q17_18_5 + 0.25 * q1_17_5 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m18_6, p18_6 / ((1.0e+0) + (1.0e+0) * p18_6) - p18_5 / ((1.0e+0) + (1.0e+0) * p18_5) - 0.75 * q18_19_6 + 0.75 * q17_18_6 - 0.25 * q18_19_5 + 0.25 * q17_18_5 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m19_6, p19_6 / ((1.0e+0) + (1.0e+0) * p19_6) - p19_5 / ((1.0e+0) + (1.0e+0) * p19_5) - f19_20_6 + 0.75 * q18_19_6 + 0.25 * q18_19_5 == 0 ) - @NLconstraint( + @constraint( nlp, m20_6, p20_6 / ((1.0e+0) + (1.0e+0) * p20_6) - p20_5 / ((1.0e+0) + (1.0e+0) * p20_5) - 0.75 * q20_21_6 + f19_20_6 - 0.25 * q20_21_5 == 0 ) - @NLconstraint( + @constraint( nlp, m21_6, p21_6 / ((1.0e+0) + (1.0e+0) * p21_6) - p21_5 / ((1.0e+0) + (1.0e+0) * p21_5) - 0.75 * q21_22_6 + 0.75 * q20_21_6 - 0.25 * q21_22_5 + 0.25 * q20_21_5 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m22_6, p22_6 / ((1.0e+0) + (1.0e+0) * p22_6) - p22_5 / ((1.0e+0) + (1.0e+0) * p22_5) - 0.75 * q22_23_6 + 0.75 * q21_22_6 - 0.25 * q22_23_5 + 0.25 * q21_22_5 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m23_6, p23_6 / ((1.0e+0) + (1.0e+0) * p23_6) - p23_5 / ((1.0e+0) + (1.0e+0) * p23_5) + 0.75 * q22_23_6 + 0.25 * q22_23_5 - out23_6 == 0 ) - @NLconstraint(nlp, c3_4_6, p3_6 * r3_4_6 - p4_6 == 0) - @NLconstraint(nlp, c5_7_6, p5_6 * r5_7_6 - p7_6 == 0) - @NLconstraint(nlp, c19_20_6, p19_6 * r19_20_6 - p20_6 == 0) - @NLconstraint( + @constraint(nlp, c3_4_6, p3_6 * r3_4_6 - p4_6 == 0) + @constraint(nlp, c5_7_6, p5_6 * r5_7_6 - p7_6 == 0) + @constraint(nlp, c19_20_6, p19_6 * r19_20_6 - p20_6 == 0) + @constraint( nlp, p1_2_6, p1_6 * p1_6 - p2_6 * p2_6 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p1_6 + p2_6)) * ((abs(q1_2_6))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p1_17_6, p1_6 * p1_6 - p17_6 * p17_6 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p1_6 + p17_6)) * ((abs(q1_17_6))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p2_3_6, p2_6 * p2_6 - p3_6 * p3_6 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p2_6 + p3_6)) * ((abs(q2_3_6))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p4_5_6, p4_6 * p4_6 - p5_6 * p5_6 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p4_6 + p5_6)) * ((abs(q4_5_6))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p5_6_6, p5_6 * p5_6 - p6_6 * p6_6 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p5_6 + p6_6)) * ((abs(q5_6_6))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p7_8_6, p7_6 * p7_6 - p8_6 * p8_6 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p7_6 + p8_6)) * ((abs(q7_8_6))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_9_6, p8_6 * p8_6 - p9_6 * p9_6 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_6 + p9_6)) * ((abs(q8_9_6))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_10_6, p8_6 * p8_6 - p10_6 * p10_6 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_6 + p10_6)) * ((abs(q8_10_6))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_11_6, p8_6 * p8_6 - p11_6 * p11_6 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_6 + p11_6)) * ((abs(q8_11_6))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p11_12_6, p11_6 * p11_6 - p12_6 * p12_6 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p11_6 + p12_6)) * ((abs(q11_12_6))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p12_13_6, p12_6 * p12_6 - p13_6 * p13_6 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p12_6 + p13_6)) * ((abs(q12_13_6))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p13_14_6, p13_6 * p13_6 - p14_6 * p14_6 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p13_6 + p14_6)) * ((abs(q13_14_6))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p13_15_6, p13_6 * p13_6 - p15_6 * p15_6 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p13_6 + p15_6)) * ((abs(q13_15_6))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p15_16_6, p15_6 * p15_6 - p16_6 * p16_6 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p15_6 + p16_6)) * ((abs(q15_16_6))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p17_18_6, p17_6 * p17_6 - p18_6 * p18_6 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p17_6 + p18_6)) * ((abs(q17_18_6))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p18_19_6, p18_6 * p18_6 - p19_6 * p19_6 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p18_6 + p19_6)) * ((abs(q18_19_6))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p20_21_6, p20_6 * p20_6 - p21_6 * p21_6 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p20_6 + p21_6)) * ((abs(q20_21_6))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p21_22_6, p21_6 * p21_6 - p22_6 * p22_6 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p21_6 + p22_6)) * ((abs(q21_22_6))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p22_23_6, p22_6 * p22_6 - p23_6 * p23_6 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p22_6 + p23_6)) * ((abs(q22_23_6))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, m1_7, p1_7 / ((1.0e+0) + (1.0e+0) * p1_7) - p1_6 / ((1.0e+0) + (1.0e+0) * p1_6) - 0.75 * q1_17_7 - 0.75 * q1_2_7 + in1_7 - 0.25 * q1_17_6 - 0.25 * q1_2_6 == 0 ) - @NLconstraint( + @constraint( nlp, m2_7, p2_7 / ((1.0e+0) + (1.0e+0) * p2_7) - p2_6 / ((1.0e+0) + (1.0e+0) * p2_6) - 0.75 * q2_3_7 + 0.75 * q1_2_7 - 0.25 * q2_3_6 + 0.25 * q1_2_6 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m3_7, p3_7 / ((1.0e+0) + (1.0e+0) * p3_7) - p3_6 / ((1.0e+0) + (1.0e+0) * p3_6) - f3_4_7 + 0.75 * q2_3_7 + 0.25 * q2_3_6 == 0 ) - @NLconstraint( + @constraint( nlp, m4_7, p4_7 / ((1.0e+0) + (1.0e+0) * p4_7) - p4_6 / ((1.0e+0) + (1.0e+0) * p4_6) - 0.75 * q4_5_7 + f3_4_7 - 0.25 * q4_5_6 == 0 ) - @NLconstraint( + @constraint( nlp, m5_7, p5_7 / ((1.0e+0) + (1.0e+0) * p5_7) - p5_6 / ((1.0e+0) + (1.0e+0) * p5_6) - 0.75 * q5_6_7 - f5_7_7 + 0.75 * q4_5_7 - 0.25 * q5_6_6 + 0.25 * q4_5_6 == 0 ) - @NLconstraint( + @constraint( nlp, m6_7, p6_7 / ((1.0e+0) + (1.0e+0) * p6_7) - p6_6 / ((1.0e+0) + (1.0e+0) * p6_6) + 0.75 * q5_6_7 + 0.25 * q5_6_6 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m7_7, p7_7 / ((1.0e+0) + (1.0e+0) * p7_7) - p7_6 / ((1.0e+0) + (1.0e+0) * p7_6) - 0.75 * q7_8_7 + f5_7_7 - 0.25 * q7_8_6 == 0 ) - @NLconstraint( + @constraint( nlp, m8_7, p8_7 / ((1.0e+0) + (1.0e+0) * p8_7) - p8_6 / ((1.0e+0) + (1.0e+0) * p8_6) - 0.75 * q8_9_7 - 0.75 * q8_10_7 - 0.75 * q8_11_7 + 0.75 * q7_8_7 - 0.25 * q8_9_6 - 0.25 * q8_10_6 - 0.25 * q8_11_6 + 0.25 * q7_8_6 == 0 ) - @NLconstraint( + @constraint( nlp, m9_7, p9_7 / ((1.0e+0) + (1.0e+0) * p9_7) - p9_6 / ((1.0e+0) + (1.0e+0) * p9_6) + 0.75 * q8_9_7 + 0.25 * q8_9_6 == 0 ) - @NLconstraint( + @constraint( nlp, m10_7, p10_7 / ((1.0e+0) + (1.0e+0) * p10_7) - p10_6 / ((1.0e+0) + (1.0e+0) * p10_6) + 0.75 * q8_10_7 + 0.25 * q8_10_6 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m11_7, p11_7 / ((1.0e+0) + (1.0e+0) * p11_7) - p11_6 / ((1.0e+0) + (1.0e+0) * p11_6) - 0.75 * q11_12_7 + 0.75 * q8_11_7 - 0.25 * q11_12_6 + 0.25 * q8_11_6 == 0 ) - @NLconstraint( + @constraint( nlp, m12_7, p12_7 / ((1.0e+0) + (1.0e+0) * p12_7) - p12_6 / ((1.0e+0) + (1.0e+0) * p12_6) - 0.75 * q12_13_7 + 0.75 * q11_12_7 - 0.25 * q12_13_6 + 0.25 * q11_12_6 == 0 ) - @NLconstraint( + @constraint( nlp, m13_7, p13_7 / ((1.0e+0) + (1.0e+0) * p13_7) - p13_6 / ((1.0e+0) + (1.0e+0) * p13_6) - 0.75 * q13_14_7 - 0.75 * q13_15_7 + 0.75 * q12_13_7 - 0.25 * q13_14_6 - 0.25 * q13_15_6 + 0.25 * q12_13_6 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m14_7, p14_7 / ((1.0e+0) + (1.0e+0) * p14_7) - p14_6 / ((1.0e+0) + (1.0e+0) * p14_6) + 0.75 * q13_14_7 + 0.25 * q13_14_6 == 0 ) - @NLconstraint( + @constraint( nlp, m15_7, p15_7 / ((1.0e+0) + (1.0e+0) * p15_7) - p15_6 / ((1.0e+0) + (1.0e+0) * p15_6) - 0.75 * q15_16_7 + 0.75 * q13_15_7 - 0.25 * q15_16_6 + 0.25 * q13_15_6 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m16_7, p16_7 / ((1.0e+0) + (1.0e+0) * p16_7) - p16_6 / ((1.0e+0) + (1.0e+0) * p16_6) + 0.75 * q15_16_7 + 0.25 * q15_16_6 - out16_7 == 0 ) - @NLconstraint( + @constraint( nlp, m17_7, p17_7 / ((1.0e+0) + (1.0e+0) * p17_7) - p17_6 / ((1.0e+0) + (1.0e+0) * p17_6) - 0.75 * q17_18_7 + 0.75 * q1_17_7 - 0.25 * q17_18_6 + 0.25 * q1_17_6 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m18_7, p18_7 / ((1.0e+0) + (1.0e+0) * p18_7) - p18_6 / ((1.0e+0) + (1.0e+0) * p18_6) - 0.75 * q18_19_7 + 0.75 * q17_18_7 - 0.25 * q18_19_6 + 0.25 * q17_18_6 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m19_7, p19_7 / ((1.0e+0) + (1.0e+0) * p19_7) - p19_6 / ((1.0e+0) + (1.0e+0) * p19_6) - f19_20_7 + 0.75 * q18_19_7 + 0.25 * q18_19_6 == 0 ) - @NLconstraint( + @constraint( nlp, m20_7, p20_7 / ((1.0e+0) + (1.0e+0) * p20_7) - p20_6 / ((1.0e+0) + (1.0e+0) * p20_6) - 0.75 * q20_21_7 + f19_20_7 - 0.25 * q20_21_6 == 0 ) - @NLconstraint( + @constraint( nlp, m21_7, p21_7 / ((1.0e+0) + (1.0e+0) * p21_7) - p21_6 / ((1.0e+0) + (1.0e+0) * p21_6) - 0.75 * q21_22_7 + 0.75 * q20_21_7 - 0.25 * q21_22_6 + 0.25 * q20_21_6 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m22_7, p22_7 / ((1.0e+0) + (1.0e+0) * p22_7) - p22_6 / ((1.0e+0) + (1.0e+0) * p22_6) - 0.75 * q22_23_7 + 0.75 * q21_22_7 - 0.25 * q22_23_6 + 0.25 * q21_22_6 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m23_7, p23_7 / ((1.0e+0) + (1.0e+0) * p23_7) - p23_6 / ((1.0e+0) + (1.0e+0) * p23_6) + 0.75 * q22_23_7 + 0.25 * q22_23_6 - out23_7 == 0 ) - @NLconstraint(nlp, c3_4_7, p3_7 * r3_4_7 - p4_7 == 0) - @NLconstraint(nlp, c5_7_7, p5_7 * r5_7_7 - p7_7 == 0) - @NLconstraint(nlp, c19_20_7, p19_7 * r19_20_7 - p20_7 == 0) - @NLconstraint( + @constraint(nlp, c3_4_7, p3_7 * r3_4_7 - p4_7 == 0) + @constraint(nlp, c5_7_7, p5_7 * r5_7_7 - p7_7 == 0) + @constraint(nlp, c19_20_7, p19_7 * r19_20_7 - p20_7 == 0) + @constraint( nlp, p1_2_7, p1_7 * p1_7 - p2_7 * p2_7 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p1_7 + p2_7)) * ((abs(q1_2_7))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p1_17_7, p1_7 * p1_7 - p17_7 * p17_7 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p1_7 + p17_7)) * ((abs(q1_17_7))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p2_3_7, p2_7 * p2_7 - p3_7 * p3_7 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p2_7 + p3_7)) * ((abs(q2_3_7))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p4_5_7, p4_7 * p4_7 - p5_7 * p5_7 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p4_7 + p5_7)) * ((abs(q4_5_7))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p5_6_7, p5_7 * p5_7 - p6_7 * p6_7 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p5_7 + p6_7)) * ((abs(q5_6_7))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p7_8_7, p7_7 * p7_7 - p8_7 * p8_7 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p7_7 + p8_7)) * ((abs(q7_8_7))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_9_7, p8_7 * p8_7 - p9_7 * p9_7 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_7 + p9_7)) * ((abs(q8_9_7))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_10_7, p8_7 * p8_7 - p10_7 * p10_7 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_7 + p10_7)) * ((abs(q8_10_7))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_11_7, p8_7 * p8_7 - p11_7 * p11_7 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_7 + p11_7)) * ((abs(q8_11_7))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p11_12_7, p11_7 * p11_7 - p12_7 * p12_7 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p11_7 + p12_7)) * ((abs(q11_12_7))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p12_13_7, p12_7 * p12_7 - p13_7 * p13_7 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p12_7 + p13_7)) * ((abs(q12_13_7))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p13_14_7, p13_7 * p13_7 - p14_7 * p14_7 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p13_7 + p14_7)) * ((abs(q13_14_7))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p13_15_7, p13_7 * p13_7 - p15_7 * p15_7 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p13_7 + p15_7)) * ((abs(q13_15_7))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p15_16_7, p15_7 * p15_7 - p16_7 * p16_7 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p15_7 + p16_7)) * ((abs(q15_16_7))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p17_18_7, p17_7 * p17_7 - p18_7 * p18_7 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p17_7 + p18_7)) * ((abs(q17_18_7))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p18_19_7, p18_7 * p18_7 - p19_7 * p19_7 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p18_7 + p19_7)) * ((abs(q18_19_7))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p20_21_7, p20_7 * p20_7 - p21_7 * p21_7 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p20_7 + p21_7)) * ((abs(q20_21_7))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p21_22_7, p21_7 * p21_7 - p22_7 * p22_7 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p21_7 + p22_7)) * ((abs(q21_22_7))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p22_23_7, p22_7 * p22_7 - p23_7 * p23_7 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p22_7 + p23_7)) * ((abs(q22_23_7))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, m1_8, p1_8 / ((1.0e+0) + (1.0e+0) * p1_8) - p1_7 / ((1.0e+0) + (1.0e+0) * p1_7) - 0.75 * q1_17_8 - 0.75 * q1_2_8 + in1_8 - 0.25 * q1_17_7 - 0.25 * q1_2_7 == 0 ) - @NLconstraint( + @constraint( nlp, m2_8, p2_8 / ((1.0e+0) + (1.0e+0) * p2_8) - p2_7 / ((1.0e+0) + (1.0e+0) * p2_7) - 0.75 * q2_3_8 + 0.75 * q1_2_8 - 0.25 * q2_3_7 + 0.25 * q1_2_7 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m3_8, p3_8 / ((1.0e+0) + (1.0e+0) * p3_8) - p3_7 / ((1.0e+0) + (1.0e+0) * p3_7) - f3_4_8 + 0.75 * q2_3_8 + 0.25 * q2_3_7 == 0 ) - @NLconstraint( + @constraint( nlp, m4_8, p4_8 / ((1.0e+0) + (1.0e+0) * p4_8) - p4_7 / ((1.0e+0) + (1.0e+0) * p4_7) - 0.75 * q4_5_8 + f3_4_8 - 0.25 * q4_5_7 == 0 ) - @NLconstraint( + @constraint( nlp, m5_8, p5_8 / ((1.0e+0) + (1.0e+0) * p5_8) - p5_7 / ((1.0e+0) + (1.0e+0) * p5_7) - 0.75 * q5_6_8 - f5_7_8 + 0.75 * q4_5_8 - 0.25 * q5_6_7 + 0.25 * q4_5_7 == 0 ) - @NLconstraint( + @constraint( nlp, m6_8, p6_8 / ((1.0e+0) + (1.0e+0) * p6_8) - p6_7 / ((1.0e+0) + (1.0e+0) * p6_7) + 0.75 * q5_6_8 + 0.25 * q5_6_7 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m7_8, p7_8 / ((1.0e+0) + (1.0e+0) * p7_8) - p7_7 / ((1.0e+0) + (1.0e+0) * p7_7) - 0.75 * q7_8_8 + f5_7_8 - 0.25 * q7_8_7 == 0 ) - @NLconstraint( + @constraint( nlp, m8_8, p8_8 / ((1.0e+0) + (1.0e+0) * p8_8) - p8_7 / ((1.0e+0) + (1.0e+0) * p8_7) - 0.75 * q8_9_8 - 0.75 * q8_10_8 - 0.75 * q8_11_8 + 0.75 * q7_8_8 - 0.25 * q8_9_7 - 0.25 * q8_10_7 - 0.25 * q8_11_7 + 0.25 * q7_8_7 == 0 ) - @NLconstraint( + @constraint( nlp, m9_8, p9_8 / ((1.0e+0) + (1.0e+0) * p9_8) - p9_7 / ((1.0e+0) + (1.0e+0) * p9_7) + 0.75 * q8_9_8 + 0.25 * q8_9_7 == 0 ) - @NLconstraint( + @constraint( nlp, m10_8, p10_8 / ((1.0e+0) + (1.0e+0) * p10_8) - p10_7 / ((1.0e+0) + (1.0e+0) * p10_7) + 0.75 * q8_10_8 + 0.25 * q8_10_7 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m11_8, p11_8 / ((1.0e+0) + (1.0e+0) * p11_8) - p11_7 / ((1.0e+0) + (1.0e+0) * p11_7) - 0.75 * q11_12_8 + 0.75 * q8_11_8 - 0.25 * q11_12_7 + 0.25 * q8_11_7 == 0 ) - @NLconstraint( + @constraint( nlp, m12_8, p12_8 / ((1.0e+0) + (1.0e+0) * p12_8) - p12_7 / ((1.0e+0) + (1.0e+0) * p12_7) - 0.75 * q12_13_8 + 0.75 * q11_12_8 - 0.25 * q12_13_7 + 0.25 * q11_12_7 == 0 ) - @NLconstraint( + @constraint( nlp, m13_8, p13_8 / ((1.0e+0) + (1.0e+0) * p13_8) - p13_7 / ((1.0e+0) + (1.0e+0) * p13_7) - 0.75 * q13_14_8 - 0.75 * q13_15_8 + 0.75 * q12_13_8 - 0.25 * q13_14_7 - 0.25 * q13_15_7 + 0.25 * q12_13_7 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m14_8, p14_8 / ((1.0e+0) + (1.0e+0) * p14_8) - p14_7 / ((1.0e+0) + (1.0e+0) * p14_7) + 0.75 * q13_14_8 + 0.25 * q13_14_7 == 0 ) - @NLconstraint( + @constraint( nlp, m15_8, p15_8 / ((1.0e+0) + (1.0e+0) * p15_8) - p15_7 / ((1.0e+0) + (1.0e+0) * p15_7) - 0.75 * q15_16_8 + 0.75 * q13_15_8 - 0.25 * q15_16_7 + 0.25 * q13_15_7 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m16_8, p16_8 / ((1.0e+0) + (1.0e+0) * p16_8) - p16_7 / ((1.0e+0) + (1.0e+0) * p16_7) + 0.75 * q15_16_8 + 0.25 * q15_16_7 - out16_8 == 0 ) - @NLconstraint( + @constraint( nlp, m17_8, p17_8 / ((1.0e+0) + (1.0e+0) * p17_8) - p17_7 / ((1.0e+0) + (1.0e+0) * p17_7) - 0.75 * q17_18_8 + 0.75 * q1_17_8 - 0.25 * q17_18_7 + 0.25 * q1_17_7 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m18_8, p18_8 / ((1.0e+0) + (1.0e+0) * p18_8) - p18_7 / ((1.0e+0) + (1.0e+0) * p18_7) - 0.75 * q18_19_8 + 0.75 * q17_18_8 - 0.25 * q18_19_7 + 0.25 * q17_18_7 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m19_8, p19_8 / ((1.0e+0) + (1.0e+0) * p19_8) - p19_7 / ((1.0e+0) + (1.0e+0) * p19_7) - f19_20_8 + 0.75 * q18_19_8 + 0.25 * q18_19_7 == 0 ) - @NLconstraint( + @constraint( nlp, m20_8, p20_8 / ((1.0e+0) + (1.0e+0) * p20_8) - p20_7 / ((1.0e+0) + (1.0e+0) * p20_7) - 0.75 * q20_21_8 + f19_20_8 - 0.25 * q20_21_7 == 0 ) - @NLconstraint( + @constraint( nlp, m21_8, p21_8 / ((1.0e+0) + (1.0e+0) * p21_8) - p21_7 / ((1.0e+0) + (1.0e+0) * p21_7) - 0.75 * q21_22_8 + 0.75 * q20_21_8 - 0.25 * q21_22_7 + 0.25 * q20_21_7 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m22_8, p22_8 / ((1.0e+0) + (1.0e+0) * p22_8) - p22_7 / ((1.0e+0) + (1.0e+0) * p22_7) - 0.75 * q22_23_8 + 0.75 * q21_22_8 - 0.25 * q22_23_7 + 0.25 * q21_22_7 - 1.0 == 0 ) - @NLconstraint( + @constraint( nlp, m23_8, p23_8 / ((1.0e+0) + (1.0e+0) * p23_8) - p23_7 / ((1.0e+0) + (1.0e+0) * p23_7) + 0.75 * q22_23_8 + 0.25 * q22_23_7 - out23_8 == 0 ) - @NLconstraint(nlp, c3_4_8, p3_8 * r3_4_8 - p4_8 == 0) - @NLconstraint(nlp, c5_7_8, p5_8 * r5_7_8 - p7_8 == 0) - @NLconstraint(nlp, c19_20_8, p19_8 * r19_20_8 - p20_8 == 0) - @NLconstraint( + @constraint(nlp, c3_4_8, p3_8 * r3_4_8 - p4_8 == 0) + @constraint(nlp, c5_7_8, p5_8 * r5_7_8 - p7_8 == 0) + @constraint(nlp, c19_20_8, p19_8 * r19_20_8 - p20_8 == 0) + @constraint( nlp, p1_2_8, p1_8 * p1_8 - p2_8 * p2_8 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p1_8 + p2_8)) * ((abs(q1_2_8))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p1_17_8, p1_8 * p1_8 - p17_8 * p17_8 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p1_8 + p17_8)) * ((abs(q1_17_8))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p2_3_8, p2_8 * p2_8 - p3_8 * p3_8 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p2_8 + p3_8)) * ((abs(q2_3_8))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p4_5_8, p4_8 * p4_8 - p5_8 * p5_8 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p4_8 + p5_8)) * ((abs(q4_5_8))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p5_6_8, p5_8 * p5_8 - p6_8 * p6_8 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p5_8 + p6_8)) * ((abs(q5_6_8))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p7_8_8, p7_8 * p7_8 - p8_8 * p8_8 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p7_8 + p8_8)) * ((abs(q7_8_8))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_9_8, p8_8 * p8_8 - p9_8 * p9_8 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_8 + p9_8)) * ((abs(q8_9_8))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_10_8, p8_8 * p8_8 - p10_8 * p10_8 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_8 + p10_8)) * ((abs(q8_10_8))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p8_11_8, p8_8 * p8_8 - p11_8 * p11_8 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p8_8 + p11_8)) * ((abs(q8_11_8))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p11_12_8, p11_8 * p11_8 - p12_8 * p12_8 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p11_8 + p12_8)) * ((abs(q11_12_8))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p12_13_8, p12_8 * p12_8 - p13_8 * p13_8 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p12_8 + p13_8)) * ((abs(q12_13_8))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p13_14_8, p13_8 * p13_8 - p14_8 * p14_8 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p13_8 + p14_8)) * ((abs(q13_14_8))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p13_15_8, p13_8 * p13_8 - p15_8 * p15_8 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p13_8 + p15_8)) * ((abs(q13_15_8))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p15_16_8, p15_8 * p15_8 - p16_8 * p16_8 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p15_8 + p16_8)) * ((abs(q15_16_8))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p17_18_8, p17_8 * p17_8 - p18_8 * p18_8 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p17_8 + p18_8)) * ((abs(q17_18_8))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p18_19_8, p18_8 * p18_8 - p19_8 * p19_8 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p18_8 + p19_8)) * ((abs(q18_19_8))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p20_21_8, p20_8 * p20_8 - p21_8 * p21_8 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p20_8 + p21_8)) * ((abs(q20_21_8))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p21_22_8, p21_8 * p21_8 - p22_8 * p22_8 - 0.01 * ((1.0e+0) + (5.0e-1 * 1.0e+0) * (p21_8 + p22_8)) * ((abs(q21_22_8))^1.8539e+0) == 0 ) - @NLconstraint( + @constraint( nlp, p22_23_8, p22_8 * p22_8 - p23_8 * p23_8 - diff --git a/src/PureJuMP/brownal.jl b/src/PureJuMP/brownal.jl index 87fc9d5d..31d9ddb8 100644 --- a/src/PureJuMP/brownal.jl +++ b/src/PureJuMP/brownal.jl @@ -19,7 +19,7 @@ function brownal(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 1 / 2) - @NLobjective( + @objective( nlp, Min, 0.5 * sum((x[i] + sum(x[j] for j = 1:n) - (n + 1))^2 for i = 1:(n - 1)) + diff --git a/src/PureJuMP/brownbs.jl b/src/PureJuMP/brownbs.jl index d4f2366c..6eca4d17 100644 --- a/src/PureJuMP/brownbs.jl +++ b/src/PureJuMP/brownbs.jl @@ -18,7 +18,7 @@ function brownbs(args...; kwargs...) @variable(nlp, x[i = 1:2], start = 1.0) - @NLobjective( + @objective( nlp, Min, 0.5 * (x[1] - 1e6)^2 + 0.5 * (x[2] - 2 * 1e-6)^2 + 0.5 * (x[1] * x[2] - 2)^2 diff --git a/src/PureJuMP/brownden.jl b/src/PureJuMP/brownden.jl index 84b2c9b4..24cba0fd 100644 --- a/src/PureJuMP/brownden.jl +++ b/src/PureJuMP/brownden.jl @@ -24,7 +24,7 @@ function brownden(args...; m::Int = default_nvar, kwargs...) t = Float64[i / 5 for i = 1:m] - @NLobjective( + @objective( nlp, Min, sum( diff --git a/src/PureJuMP/broyden3d.jl b/src/PureJuMP/broyden3d.jl index ffc81dca..169e0fee 100644 --- a/src/PureJuMP/broyden3d.jl +++ b/src/PureJuMP/broyden3d.jl @@ -18,7 +18,7 @@ function broyden3d(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = -1.0) - @NLobjective( + @objective( nlp, Min, 0.5 * sum(((3 - 2 * x[i]) * x[i] - x[i - 1] - 2 * x[i + 1] + 1)^2 for i = 2:(n - 1)) + diff --git a/src/PureJuMP/broydn7d.jl b/src/PureJuMP/broydn7d.jl index a490c01e..5778efbc 100644 --- a/src/PureJuMP/broydn7d.jl +++ b/src/PureJuMP/broydn7d.jl @@ -54,7 +54,7 @@ function broydn7d(args...; n::Int = default_nvar, p::Float64 = 7 / 3, kwargs...) @variable(nlp, x[i = 1:n], start = (-1.0)) - @NLobjective( + @objective( nlp, Min, abs(1 - 2 * x[2] + (3 - x[1] / 2) * x[1])^p + diff --git a/src/PureJuMP/brybnd.jl b/src/PureJuMP/brybnd.jl index 1680a1c0..309d58f6 100644 --- a/src/PureJuMP/brybnd.jl +++ b/src/PureJuMP/brybnd.jl @@ -48,7 +48,7 @@ function brybnd(args...; n::Int = default_nvar, ml::Int = 5, mu::Int = 1, kwargs @variable(nlp, x[i = 1:n], start = (-1.0)) - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/bt1.jl b/src/PureJuMP/bt1.jl index 5958b326..f6bdcd6e 100644 --- a/src/PureJuMP/bt1.jl +++ b/src/PureJuMP/bt1.jl @@ -20,9 +20,9 @@ function bt1(args...; kwargs...) @variable(nlp, x[i = 1:2], start = x0[i]) - @NLobjective(nlp, Min, 100 * x[1]^2 + 100 * x[2]^2 - x[1] - 100) + @objective(nlp, Min, 100 * x[1]^2 + 100 * x[2]^2 - x[1] - 100) - @NLconstraint(nlp, constr1, x[1]^2 + x[2]^2 - 1.0 == 0) + @constraint(nlp, constr1, x[1]^2 + x[2]^2 - 1.0 == 0) return nlp end diff --git a/src/PureJuMP/camshape.jl b/src/PureJuMP/camshape.jl index 1f155a57..9e4d1cc6 100644 --- a/src/PureJuMP/camshape.jl +++ b/src/PureJuMP/camshape.jl @@ -21,19 +21,20 @@ function camshape(args...; n::Int = default_nvar, kwargs...) @variable(nlp, R_min <= x[i = 1:n] <= R_max, start = (R_min + R_max) / 2) - @NLobjective(nlp, Min, -R_v * pi / n * sum(x[i] for i = 1:n)) + @objective(nlp, Min, -R_v * pi / n * sum(x[i] for i = 1:n)) - @NLconstraint(nlp, -α * θ <= R_max - x[n] <= α * θ) - @NLconstraint(nlp, -α * θ <= x[1] - R_min <= α * θ) + @constraint(nlp, -α * θ <= R_max - x[n] <= α * θ) + @constraint(nlp, -α * θ <= x[1] - R_min <= α * θ) for i = 1:(n - 1) - @NLconstraint(nlp, -α * θ <= x[i + 1] - x[i] <= α * θ) + @constraint(nlp, -α * θ <= x[i + 1] - x[i] <= α * θ) end - @NLconstraint(nlp, -R_min * x[1] - x[1] * x[2] + 2 * R_min * x[2] * cos(θ) <= 0) - @NLconstraint(nlp, -R_min^2 - R_min * x[1] + 2 * R_min * x[1] * cos(θ) <= 0) - @NLconstraint(nlp, -x[n - 1] * x[n] - x[n] * R_max + 2 * x[n - 1] * R_max * cos(θ) <= 0) - @NLconstraint(nlp, -2 * R_max * x[n] + 2 * x[n]^2 * cos(θ) <= 0) + @constraint(nlp, -R_min^2 - R_min * x[1] + 2 * R_min * x[1] * cos(θ) <= 0) + # quadratic + @constraint(nlp, -R_min * x[1] - x[1] * x[2] + 2 * R_min * x[2] * cos(θ) <= 0) + @constraint(nlp, -x[n - 1] * x[n] - x[n] * R_max + 2 * x[n - 1] * R_max * cos(θ) <= 0) + @constraint(nlp, -2 * R_max * x[n] + 2 * x[n]^2 * cos(θ) <= 0) for i = 2:(n - 1) - @NLconstraint(nlp, -x[i - 1] * x[i] - x[i] * x[i + 1] + 2 * x[i - 1] * x[i + 1] * cos(θ) <= 0) + @constraint(nlp, -x[i - 1] * x[i] - x[i] * x[i + 1] + 2 * x[i - 1] * x[i + 1] * cos(θ) <= 0) end return nlp diff --git a/src/PureJuMP/catenary.jl b/src/PureJuMP/catenary.jl index 258ba2a5..4da81b57 100644 --- a/src/PureJuMP/catenary.jl +++ b/src/PureJuMP/catenary.jl @@ -16,7 +16,7 @@ export catenary -function catenary(args...; n::Int = default_nvar, kwargs...) +function catenary(args...; n::Int = default_nvar, Bl = 1.0, FRACT = 0.6, kwargs...) (n % 3 == 0) || @warn("catenary: number of variables adjusted to be a multiple of 3") n = 3 * max(1, div(n, 3)) (n < 6) || @warn("catenary: number of variables adjusted to be greater or equal to 6") @@ -25,8 +25,6 @@ function catenary(args...; n::Int = default_nvar, kwargs...) ## Model Parameters N = div(n, 3) - 2 - Bl = 1.0 - FRACT = 0.6 d = Bl * (N + 1) * FRACT gamma = 9.81 @@ -53,7 +51,7 @@ function catenary(args...; n::Int = default_nvar, kwargs...) @objective(nlp, Min, mg * x[2] / 2 + sum(mg * x[2 + 3 * i] for i = 1:N) + mg * x[3 * N + 5] / 2) - @NLconstraint( + @constraint( nlp, c[i = 1:(N + 1)], (x[1 + 3 * i] - x[-2 + 3 * i])^2 + diff --git a/src/PureJuMP/chain.jl b/src/PureJuMP/chain.jl index 6be8755a..befb5f6c 100644 --- a/src/PureJuMP/chain.jl +++ b/src/PureJuMP/chain.jl @@ -34,7 +34,7 @@ function chain(args...; n::Int = default_nvar, kwargs...) ) @variable(nlp, x3[k = 1:(nh + 1)], start = 4 * abs(b - a) * (k / nh - tmin)) - @NLobjective(nlp, Min, x2[nh + 1]) + @objective(nlp, Min, x2[nh + 1]) for j = 1:nh @constraint(nlp, x1[j + 1] - x1[j] - 1 / 2 * h * (u[j] + u[j + 1]) == 0) @@ -45,13 +45,13 @@ function chain(args...; n::Int = default_nvar, kwargs...) @constraint(nlp, x3[1] == 0) @constraint(nlp, x3[nh + 1] == L) - @NLconstraint( + @constraint( nlp, [j = 1:nh], x2[j + 1] - x2[j] - 1 / 2 * h * (x1[j] * sqrt(1 + u[j]^2) + x1[j + 1] * sqrt(1 + u[j + 1]^2)) == 0 ) - @NLconstraint( + @constraint( nlp, [j = 1:nh], x3[j + 1] - x3[j] - 1 / 2 * h * (sqrt(1 + u[j]^2) + sqrt(1 + u[j + 1]^2)) == 0 diff --git a/src/PureJuMP/chainwoo.jl b/src/PureJuMP/chainwoo.jl index d6183c91..0fc8cb89 100644 --- a/src/PureJuMP/chainwoo.jl +++ b/src/PureJuMP/chainwoo.jl @@ -49,7 +49,7 @@ function chainwoo(args...; n::Int = default_nvar, kwargs...) set_start_value(x[3], -3) set_start_value(x[4], -1) - @NLobjective( + @objective( nlp, Min, 1.0 + sum( diff --git a/src/PureJuMP/channel.jl b/src/PureJuMP/channel.jl index 42de77d6..cce63466 100644 --- a/src/PureJuMP/channel.jl +++ b/src/PureJuMP/channel.jl @@ -40,7 +40,7 @@ function channel(args...; n::Int = default_nvar, kwargs...) @variable(nlp, v[i = 1:nh, j = 1:nd], start = x0[i + (j - 1) * nh]) @variable(nlp, w[i = 1:nh, j = 1:nc], start = 0.0) - @NLobjective(nlp, Min, 1.0) + @objective(nlp, Min, 1.0) @constraint(nlp, v[1, 1] == bc[1, 1]) @constraint(nlp, v[1, 2] == bc[2, 1]) @@ -65,7 +65,7 @@ function channel(args...; n::Int = default_nvar, kwargs...) end for j = 1:nc, i = 1:nh - @NLconstraint( + @constraint( nlp, sum(w[i, k] * (ρ[j]^(k - 1) / prod(j for j = 1:(k - 1))) for k = 1:nc) - R * ( diff --git a/src/PureJuMP/chnrosnb_mod.jl b/src/PureJuMP/chnrosnb_mod.jl index b7371d97..8abab9b2 100644 --- a/src/PureJuMP/chnrosnb_mod.jl +++ b/src/PureJuMP/chnrosnb_mod.jl @@ -23,7 +23,7 @@ function chnrosnb_mod(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = -1.0) - @NLobjective( + @objective( nlp, Min, 16 * sum((x[i - 1] - x[i]^2)^2 * (1.5 + sin(i))^2 for i = 2:n) + diff --git a/src/PureJuMP/chwirut1.jl b/src/PureJuMP/chwirut1.jl index 700a95ca..aa9d4478 100644 --- a/src/PureJuMP/chwirut1.jl +++ b/src/PureJuMP/chwirut1.jl @@ -258,7 +258,7 @@ function chwirut1(args...; kwargs...) @variable(nlp, x[j = 1:3]) set_start_value.(x, [0.1, 0.01, 0.02]) # [0.15, 0.008, 0.010] - @NLobjective( + @objective( nlp, Min, 0.5 * sum((y[i, 1] - exp(-y[i, 2] * x[1]) / exp(x[2] + x[3] * y[i, 2]))^2 for i = 1:214) diff --git a/src/PureJuMP/chwirut2.jl b/src/PureJuMP/chwirut2.jl index 731d00d4..cab1727d 100644 --- a/src/PureJuMP/chwirut2.jl +++ b/src/PureJuMP/chwirut2.jl @@ -99,7 +99,7 @@ function chwirut2(args...; kwargs...) @variable(nlp, x[j = 1:3]) set_start_value.(x, [0.1, 0.01, 0.02]) # other [0.15, 0.008, 0.010] - @NLobjective( + @objective( nlp, Min, 0.5 * sum((y[i, 1] - exp(-y[i, 2] * x[1]) / exp(x[2] + x[3] * y[i, 2]))^2 for i = 1:54) diff --git a/src/PureJuMP/cliff.jl b/src/PureJuMP/cliff.jl index 0dd3f508..141b1016 100644 --- a/src/PureJuMP/cliff.jl +++ b/src/PureJuMP/cliff.jl @@ -18,7 +18,7 @@ function cliff(args...; kwargs...) @variable(nlp, x[i = 1:2], start = x0[i]) - @NLobjective(nlp, Min, (0.01 * x[1] - 0.03)^2 - x[1] + x[2] + exp(20 * (x[1] - x[2]))) + @objective(nlp, Min, (0.01 * x[1] - 0.03)^2 - x[1] + x[2] + exp(20 * (x[1] - x[2]))) return nlp end diff --git a/src/PureJuMP/clnlbeam.jl b/src/PureJuMP/clnlbeam.jl index 56c10363..3849c7fe 100644 --- a/src/PureJuMP/clnlbeam.jl +++ b/src/PureJuMP/clnlbeam.jl @@ -23,14 +23,14 @@ function clnlbeam(args...; n::Int = default_nvar, kwargs...) -0.05 <= x[1:(N + 1)] <= 0.05 u[1:(N + 1)] end) - @NLobjective( + @objective( model, Min, sum( 0.5 * h * (u[i + 1]^2 + u[i]^2) + 0.5 * alpha * h * (cos(t[i + 1]) + cos(t[i])) for i = 1:N ), ) - @NLconstraint(model, [i = 1:N], x[i + 1] - x[i] - 0.5 * h * (sin(t[i + 1]) + sin(t[i])) == 0,) + @constraint(model, [i = 1:N], x[i + 1] - x[i] - 0.5 * h * (sin(t[i + 1]) + sin(t[i])) == 0,) @constraint(model, [i = 1:N], t[i + 1] - t[i] - 0.5 * h * u[i + 1] - 0.5 * h * u[i] == 0,) return model diff --git a/src/PureJuMP/clplatea.jl b/src/PureJuMP/clplatea.jl index 7d71e537..011e0db9 100644 --- a/src/PureJuMP/clplatea.jl +++ b/src/PureJuMP/clplatea.jl @@ -36,7 +36,7 @@ function clplatea(args...; n::Int = default_nvar, wght::Float64 = -0.1, kwargs.. hp2 = 0.5 * p^2 - @NLobjective( + @objective( nlp, Min, (wght * x[p, p]) + diff --git a/src/PureJuMP/clplateb.jl b/src/PureJuMP/clplateb.jl index d2fe780a..575e9fe5 100644 --- a/src/PureJuMP/clplateb.jl +++ b/src/PureJuMP/clplateb.jl @@ -38,7 +38,7 @@ function clplateb(args...; n::Int = default_nvar, wght::Float64 = -0.1, kwargs.. hp2 = 0.5 * p^2 disw = wght / (p - 1) - @NLobjective( + @objective( nlp, Min, sum(disw * x[p, j] for j = 1:p) + diff --git a/src/PureJuMP/clplatec.jl b/src/PureJuMP/clplatec.jl index 9cb14349..274feda4 100644 --- a/src/PureJuMP/clplatec.jl +++ b/src/PureJuMP/clplatec.jl @@ -45,7 +45,7 @@ function clplatec( wr = wght * r wl = wght * l - @NLobjective( + @objective( nlp, Min, wr * x[p, p] + diff --git a/src/PureJuMP/controlinvestment.jl b/src/PureJuMP/controlinvestment.jl index 9961a25a..7a8d74a7 100644 --- a/src/PureJuMP/controlinvestment.jl +++ b/src/PureJuMP/controlinvestment.jl @@ -14,11 +14,11 @@ function controlinvestment(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[1:N], start = x0) @variable(nlp, 0 ≤ u[1:N] ≤ 1, start = 0) - @NLexpression(nlp, f[k = 1:N], (u[k] - 1) * x[k]) - @NLobjective(nlp, Min, 0.5 * h * sum(f[k] + f[k + 1] for k = 1:(N - 1))) + @expression(nlp, f[k = 1:N], (u[k] - 1) * x[k]) + @objective(nlp, Min, 0.5 * h * sum(f[k] + f[k + 1] for k = 1:(N - 1))) @constraint(nlp, x[1] == x0) - @NLconstraint( + @constraint( nlp, dx[k = 1:(N - 1)], x[k + 1] - x[k] - 0.5 * h * gamma * (u[k] * x[k] + u[k + 1] * x[k + 1]) == 0.0 diff --git a/src/PureJuMP/cosine.jl b/src/PureJuMP/cosine.jl index b7c297b8..a96bfbb4 100644 --- a/src/PureJuMP/cosine.jl +++ b/src/PureJuMP/cosine.jl @@ -24,7 +24,7 @@ function cosine(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 1) - @NLobjective(nlp, Min, sum(cos(x[i]^2 - 0.5 * x[i + 1]) for i = 1:(n - 1))) + @objective(nlp, Min, sum(cos(x[i]^2 - 0.5 * x[i + 1]) for i = 1:(n - 1))) return nlp end diff --git a/src/PureJuMP/cragglvy.jl b/src/PureJuMP/cragglvy.jl index 1772e68e..b17027fd 100644 --- a/src/PureJuMP/cragglvy.jl +++ b/src/PureJuMP/cragglvy.jl @@ -37,7 +37,7 @@ function cragglvy(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 2) set_start_value(x[1], 1) - @NLobjective( + @objective( nlp, Min, sum( diff --git a/src/PureJuMP/cragglvy2.jl b/src/PureJuMP/cragglvy2.jl index 2c3a6fe9..366743e5 100644 --- a/src/PureJuMP/cragglvy2.jl +++ b/src/PureJuMP/cragglvy2.jl @@ -32,7 +32,7 @@ function cragglvy2(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 2) set_start_value(x[1], 1) - @NLobjective( + @objective( nlp, Min, sum( diff --git a/src/PureJuMP/curly.jl b/src/PureJuMP/curly.jl index 7d12c9da..bb3719a2 100644 --- a/src/PureJuMP/curly.jl +++ b/src/PureJuMP/curly.jl @@ -33,9 +33,9 @@ function curly(args...; n::Int = default_nvar, b::Int = 10, kwargs...) @variable(nlp, x[i = 1:n], start = x0[i]) - @NLexpression(nlp, f[i = 1:n], sum(x[j] for j = i:min(i + b, n))) + @expression(nlp, f[i = 1:n], sum(x[j] for j = i:min(i + b, n))) - @NLobjective(nlp, Min, sum(f[i] * (f[i] * (f[i]^2 - 20) - 0.1) for i = 1:n)) + @objective(nlp, Min, sum(f[i] * (f[i] * (f[i]^2 - 20) - 0.1) for i = 1:n)) return nlp end diff --git a/src/PureJuMP/danwood.jl b/src/PureJuMP/danwood.jl index f3313815..37bb05e8 100644 --- a/src/PureJuMP/danwood.jl +++ b/src/PureJuMP/danwood.jl @@ -55,7 +55,7 @@ function danwood(args...; kwargs...) @variable(nlp, x[j = 1:2]) set_start_value.(x, [1, 5]) # other: [0.7, 4] - @NLobjective(nlp, Min, 0.5 * sum((y[i, 1] - x[1] * y[i, 1]^(x[2]))^2 for i = 1:6)) + @objective(nlp, Min, 0.5 * sum((y[i, 1] - x[1] * y[i, 1]^(x[2]))^2 for i = 1:6)) return nlp end diff --git a/src/PureJuMP/dixmaan_efgh.jl b/src/PureJuMP/dixmaan_efgh.jl index c5d0138e..39e2c597 100644 --- a/src/PureJuMP/dixmaan_efgh.jl +++ b/src/PureJuMP/dixmaan_efgh.jl @@ -41,7 +41,7 @@ function dixmaane( @variable(nlp, x[i = 1:n], start = 2) - @NLobjective( + @objective( nlp, Min, 1 + diff --git a/src/PureJuMP/dixmaan_ijkl.jl b/src/PureJuMP/dixmaan_ijkl.jl index 48cb34d8..11f29171 100644 --- a/src/PureJuMP/dixmaan_ijkl.jl +++ b/src/PureJuMP/dixmaan_ijkl.jl @@ -41,7 +41,7 @@ function dixmaani( @variable(nlp, x[i = 1:n], start = 2) - @NLobjective( + @objective( nlp, Min, 1 + diff --git a/src/PureJuMP/dixmaan_mnop.jl b/src/PureJuMP/dixmaan_mnop.jl index ef94f19a..c4537bcd 100644 --- a/src/PureJuMP/dixmaan_mnop.jl +++ b/src/PureJuMP/dixmaan_mnop.jl @@ -39,7 +39,7 @@ function dixmaanm( @variable(nlp, x[i = 1:n], start = 2) - @NLobjective( + @objective( nlp, Min, 1 + diff --git a/src/PureJuMP/dixon3dq.jl b/src/PureJuMP/dixon3dq.jl index 949cc757..6b7ee3b7 100644 --- a/src/PureJuMP/dixon3dq.jl +++ b/src/PureJuMP/dixon3dq.jl @@ -16,7 +16,7 @@ function dixon3dq(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = -1.0) - @NLobjective( + @objective( nlp, Min, 0.5 * (x[1] - 1.0)^2 + 0.5 * (x[n] - 1.0)^2 + 0.5 * sum((x[i] - x[i + 1])^2 for i = 2:(n - 1)) diff --git a/src/PureJuMP/dqdrtic.jl b/src/PureJuMP/dqdrtic.jl index 62445d2a..8d611f6a 100644 --- a/src/PureJuMP/dqdrtic.jl +++ b/src/PureJuMP/dqdrtic.jl @@ -16,7 +16,7 @@ function dqdrtic(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[j = 1:n], start = 3.0) - @NLobjective(nlp, Min, sum(x[i]^2 + 100 * (x[i + 1]^2 + x[i + 2]^2) for i = 1:(n - 2))) + @objective(nlp, Min, sum(x[i]^2 + 100 * (x[i + 1]^2 + x[i + 2]^2) for i = 1:(n - 2))) return nlp end diff --git a/src/PureJuMP/dqrtic.jl b/src/PureJuMP/dqrtic.jl index 3915e1d7..4efd23fd 100644 --- a/src/PureJuMP/dqrtic.jl +++ b/src/PureJuMP/dqrtic.jl @@ -29,7 +29,7 @@ function dqrtic(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 2) - @NLobjective(nlp, Min, sum((x[i] - i)^4 for i = 1:n)) + @objective(nlp, Min, sum((x[i] - i)^4 for i = 1:n)) return nlp end diff --git a/src/PureJuMP/eckerle4.jl b/src/PureJuMP/eckerle4.jl index bb235f2c..7b471f1c 100644 --- a/src/PureJuMP/eckerle4.jl +++ b/src/PureJuMP/eckerle4.jl @@ -79,7 +79,7 @@ function eckerle4(args...; kwargs...) @variable(nlp, x[j = 1:3]) set_start_value.(x, [1, 10, 500]) # other: [1.5, 5, 450] - @NLobjective( + @objective( nlp, Min, 0.5 * sum((y[i, 1] - (x[1] / x[2] * exp(-0.5 * ((y[i, 2] - x[3]) / x[2])^2)))^2 for i = 1:35) diff --git a/src/PureJuMP/edensch.jl b/src/PureJuMP/edensch.jl index d1ff1a64..2f401839 100644 --- a/src/PureJuMP/edensch.jl +++ b/src/PureJuMP/edensch.jl @@ -32,7 +32,7 @@ function edensch(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 0) - @NLobjective( + @objective( nlp, Min, 16 + diff --git a/src/PureJuMP/eg2.jl b/src/PureJuMP/eg2.jl index 452c745b..1c4e49ac 100644 --- a/src/PureJuMP/eg2.jl +++ b/src/PureJuMP/eg2.jl @@ -32,7 +32,7 @@ function eg2(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 0) - @NLobjective(nlp, Min, sum(sin(x[1] + x[i]^2 - 1) for i = 1:(n - 1)) + 0.5 * sin(x[n]^2)) + @objective(nlp, Min, sum(sin(x[1] + x[i]^2 - 1) for i = 1:(n - 1)) + 0.5 * sin(x[n]^2)) return nlp end diff --git a/src/PureJuMP/elec.jl b/src/PureJuMP/elec.jl index b79c1549..a1f0ca3d 100644 --- a/src/PureJuMP/elec.jl +++ b/src/PureJuMP/elec.jl @@ -26,7 +26,7 @@ function elec(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:(3n)], start = x0[i]) - @NLobjective( + @objective( nlp, Min, sum( @@ -37,7 +37,7 @@ function elec(args...; n::Int = default_nvar, kwargs...) ) ) - @NLconstraint(nlp, [k = 1:n], x[k]^2 + x[n + k]^2 + x[2n + k]^2 == 1) + @constraint(nlp, [k = 1:n], x[k]^2 + x[n + k]^2 + x[2n + k]^2 == 1) return nlp end diff --git a/src/PureJuMP/engval1.jl b/src/PureJuMP/engval1.jl index e525c886..a3bcdefb 100644 --- a/src/PureJuMP/engval1.jl +++ b/src/PureJuMP/engval1.jl @@ -32,7 +32,7 @@ function engval1(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 2) - @NLobjective(nlp, Min, sum((x[i]^2 + x[i + 1]^2)^2 - 4 * x[i] + 3 for i = 1:(n - 1))) + @objective(nlp, Min, sum((x[i]^2 + x[i + 1]^2)^2 - 4 * x[i] + 3 for i = 1:(n - 1))) return nlp end diff --git a/src/PureJuMP/enso.jl b/src/PureJuMP/enso.jl index 85c30c47..1991f0dc 100644 --- a/src/PureJuMP/enso.jl +++ b/src/PureJuMP/enso.jl @@ -220,7 +220,7 @@ function enso(args...; kwargs...) set_start_value.(x, [11, 3, 0.5, 40, -0.7, -1.3, 25, -0.3, 1.4]) # other: [10, 3, 0.5, 44, -1.5, 0.5, 26, -0.1, 1.5] - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/errinros_mod.jl b/src/PureJuMP/errinros_mod.jl index 1bcf5a41..00c56c75 100644 --- a/src/PureJuMP/errinros_mod.jl +++ b/src/PureJuMP/errinros_mod.jl @@ -23,7 +23,7 @@ function errinros_mod(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = -1.0) - @NLobjective( + @objective( nlp, Min, 0.5 * sum((x[i - 1] - 16.0 * x[i]^2 * (1.5 + sin(i))^2)^2 for i = 2:n) + diff --git a/src/PureJuMP/extrosnb.jl b/src/PureJuMP/extrosnb.jl index 9aeb32ad..b2387296 100644 --- a/src/PureJuMP/extrosnb.jl +++ b/src/PureJuMP/extrosnb.jl @@ -30,7 +30,7 @@ function extrosnb(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = -1.0) # Strange to start at the solution? - @NLobjective(nlp, Min, 100.0 * sum((x[i] - x[i - 1]^2)^2 for i = 2:n) + (1.0 - x[1])^2) + @objective(nlp, Min, 100.0 * sum((x[i] - x[i - 1]^2)^2 for i = 2:n) + (1.0 - x[1])^2) return nlp end diff --git a/src/PureJuMP/fletcbv2.jl b/src/PureJuMP/fletcbv2.jl index 64e688a6..7f26cbe2 100644 --- a/src/PureJuMP/fletcbv2.jl +++ b/src/PureJuMP/fletcbv2.jl @@ -32,7 +32,7 @@ function fletcbv2(args...; n::Int = default_nvar, kwargs...) h = 1.0 / (n + 1) - @NLobjective( + @objective( nlp, Min, 0.5 * (x[1]^2 + sum((x[i] - x[i + 1])^2 for i = 1:(n - 1)) + x[n]^2) - diff --git a/src/PureJuMP/fletcbv3_mod.jl b/src/PureJuMP/fletcbv3_mod.jl index c703f5bb..00199e09 100644 --- a/src/PureJuMP/fletcbv3_mod.jl +++ b/src/PureJuMP/fletcbv3_mod.jl @@ -33,7 +33,7 @@ function fletcbv3_mod(args...; n::Int = default_nvar, kwargs...) p = 10.0^(-8) h = 1.0 / (n + 1) - @NLobjective( + @objective( nlp, Min, (p / 2.0) * (x[1]^2 + sum((x[i] - x[i + 1])^2 for i = 1:(n - 1)) + x[n]^2) - diff --git a/src/PureJuMP/fletchcr.jl b/src/PureJuMP/fletchcr.jl index 678fa198..c9c9cc85 100644 --- a/src/PureJuMP/fletchcr.jl +++ b/src/PureJuMP/fletchcr.jl @@ -40,7 +40,7 @@ function fletchcr(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 0.0) - @NLobjective(nlp, Min, 100.0 * sum((x[i + 1] - x[i] + 1 - x[i]^2)^2 for i = 1:(n - 1))) + @objective(nlp, Min, 100.0 * sum((x[i + 1] - x[i] + 1 - x[i]^2)^2 for i = 1:(n - 1))) return nlp end diff --git a/src/PureJuMP/fminsrf2.jl b/src/PureJuMP/fminsrf2.jl index 70a648c5..b6acb5a8 100644 --- a/src/PureJuMP/fminsrf2.jl +++ b/src/PureJuMP/fminsrf2.jl @@ -53,7 +53,7 @@ function fminsrf2(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:p, j = 1:p], start = x0[i, j]) - @NLobjective( + @objective( nlp, Min, sum( diff --git a/src/PureJuMP/freuroth.jl b/src/PureJuMP/freuroth.jl index a1c3de3a..3568b52c 100644 --- a/src/PureJuMP/freuroth.jl +++ b/src/PureJuMP/freuroth.jl @@ -35,7 +35,7 @@ function freuroth(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = x0[i]) - @NLobjective( + @objective( nlp, Min, 0.5 * sum(((5.0 - x[i + 1]) * x[i + 1]^2 + x[i] - 2 * x[i + 1] - 13.0)^2 for i = 1:ngs) + diff --git a/src/PureJuMP/gauss1.jl b/src/PureJuMP/gauss1.jl index 08ea0c17..57841e11 100644 --- a/src/PureJuMP/gauss1.jl +++ b/src/PureJuMP/gauss1.jl @@ -295,7 +295,7 @@ function gauss1(args...; kwargs...) set_start_value.(x, [97, 0.009, 100, 65, 20, 70, 178, 16.5]) # other: [94, 0.0105, 99, 63, 25, 71, 180, 20] - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/gauss2.jl b/src/PureJuMP/gauss2.jl index 60bae9cd..b8b22a51 100644 --- a/src/PureJuMP/gauss2.jl +++ b/src/PureJuMP/gauss2.jl @@ -294,7 +294,7 @@ function gauss2(args...; kwargs...) set_start_value.(x, [96, 0.009, 103, 106, 18, 72, 151, 18]) # other: [98, 0.0105, 103, 105, 20, 73, 150, 20] - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/gauss3.jl b/src/PureJuMP/gauss3.jl index c5cc7fb7..03295966 100644 --- a/src/PureJuMP/gauss3.jl +++ b/src/PureJuMP/gauss3.jl @@ -294,7 +294,7 @@ function gauss3(args...; kwargs...) set_start_value.(x, [94.9, 0.009, 90.1, 113, 20, 73, 140, 20]) # other: [96, 0.0096, 80, 110, 25, 74, 139, 25] - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/gaussian.jl b/src/PureJuMP/gaussian.jl index f83b6b12..84d20ce9 100644 --- a/src/PureJuMP/gaussian.jl +++ b/src/PureJuMP/gaussian.jl @@ -32,7 +32,7 @@ function gaussian(args...; n::Int = default_nvar, kwargs...) 0.0009, ] - @NLobjective( + @objective( nlp, Min, 0.5 * (x[1] * exp(-x[2] / 2 * ((8 - 1) / 2 - x[3])^2) - y[1])^2 + diff --git a/src/PureJuMP/genhumps.jl b/src/PureJuMP/genhumps.jl index fa64f24e..274b74b6 100644 --- a/src/PureJuMP/genhumps.jl +++ b/src/PureJuMP/genhumps.jl @@ -27,7 +27,7 @@ function genhumps(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = x0[i]) - @NLobjective( + @objective( nlp, Min, sum((sin(ζ * x[i])^2 * sin(ζ * x[i + 1])^2 + 0.05 * (x[i]^2 + x[i + 1]^2)) for i = 1:(n - 1)) diff --git a/src/PureJuMP/genrose.jl b/src/PureJuMP/genrose.jl index d7be89bc..60c85b16 100644 --- a/src/PureJuMP/genrose.jl +++ b/src/PureJuMP/genrose.jl @@ -80,7 +80,7 @@ function genrose(args...; n::Int = default_nvar, kwargs...) # end @variable(nlp, x[i = 1:n], start = x0[i]) - @NLobjective( + @objective( nlp, Min, 1.0 + diff --git a/src/PureJuMP/genrose_nash.jl b/src/PureJuMP/genrose_nash.jl index d1ac1133..25a800a5 100644 --- a/src/PureJuMP/genrose_nash.jl +++ b/src/PureJuMP/genrose_nash.jl @@ -40,7 +40,7 @@ function genrose_nash(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = (i / (n + 1))) - @NLobjective( + @objective( nlp, Min, 1.0 + 100 * sum((x[i] - x[i - 1]^2)^2 for i = 2:n) + sum((1.0 - x[i])^2 for i = 2:n) diff --git a/src/PureJuMP/gulf.jl b/src/PureJuMP/gulf.jl index d715c18f..e9fa9572 100644 --- a/src/PureJuMP/gulf.jl +++ b/src/PureJuMP/gulf.jl @@ -24,7 +24,7 @@ function gulf(args...; n::Int = default_nvar, m::Int = 100, kwargs...) @variable(nlp, x[j = 1:n]) set_start_value.(x, [5, 2.5, 0.15]) - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/hahn1.jl b/src/PureJuMP/hahn1.jl index 076e39e3..3ecfbe74 100644 --- a/src/PureJuMP/hahn1.jl +++ b/src/PureJuMP/hahn1.jl @@ -283,7 +283,7 @@ function hahn1(args...; kwargs...) set_start_value.(x, [10, -1, 0.05, -0.00001, -0.05, 0.001, -0.000001]) # other: [1, -0.1, 0.005, -0.000001, -0.005, 0.0001, -0.0000001] - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/helical.jl b/src/PureJuMP/helical.jl index 93e4224c..9619b0f6 100644 --- a/src/PureJuMP/helical.jl +++ b/src/PureJuMP/helical.jl @@ -9,7 +9,7 @@ function helical(args...; n::Int = default_nvar, kwargs...) nlp = Model() x0 = [-1.0; 0.0; 0.0] @variable(nlp, x[i = 1:3], start = x0[i]) - @NLobjective( + @objective( nlp, Min, (10 * (x[3] - 10 * (atan(x[2] / x[1]) - 0.25 * (x[1] - abs(x[1]) / x[1])) / (2 * pi)))^2 + diff --git a/src/PureJuMP/hs1.jl b/src/PureJuMP/hs1.jl index ad6c87ef..1364f2f1 100644 --- a/src/PureJuMP/hs1.jl +++ b/src/PureJuMP/hs1.jl @@ -20,7 +20,7 @@ function hs1(args...; kwargs...) lvar = [-Inf, -1.5] @variable(nlp, x[i = 1:2] ≥ lvar[i], start = x0[i]) - @NLobjective(nlp, Min, 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2) + @objective(nlp, Min, 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2) return nlp end diff --git a/src/PureJuMP/hs10.jl b/src/PureJuMP/hs10.jl index d3a2a388..288d29a9 100644 --- a/src/PureJuMP/hs10.jl +++ b/src/PureJuMP/hs10.jl @@ -21,7 +21,7 @@ function hs10(args...; kwargs...) @objective(nlp, Min, x[1] - x[2]) - @NLconstraint(nlp, -3 * x[1]^2 + 2 * x[1] * x[2] - x[2]^2 + 1 ≥ 0) + @constraint(nlp, -3 * x[1]^2 + 2 * x[1] * x[2] - x[2]^2 + 1 ≥ 0) return nlp end diff --git a/src/PureJuMP/hs100.jl b/src/PureJuMP/hs100.jl index 35d918fc..eb54e520 100644 --- a/src/PureJuMP/hs100.jl +++ b/src/PureJuMP/hs100.jl @@ -19,12 +19,12 @@ function hs100(args...; kwargs...) x0 = [1, 2, 0, 4, 0, 1, 1] @variable(nlp, x[i = 1:7], start = x0[i]) - @NLconstraint(nlp, 127 - 2 * x[1]^2 - 3 * x[2]^4 - x[3] - 4 * x[4]^2 - 5 * x[5] ≥ 0) - @NLconstraint(nlp, 282 - 7 * x[1] - 3 * x[2] - 10 * x[3]^2 - x[4] + x[5] ≥ 0) - @NLconstraint(nlp, 196 - 23 * x[1] - x[2]^2 - 6 * x[6]^2 + 8 * x[7] ≥ 0) - @NLconstraint(nlp, -4 * x[1]^2 - x[2]^2 + 3 * x[1] * x[2] - 2 * x[3]^2 - 5 * x[6] + 11 * x[7] ≥ 0) + @constraint(nlp, 127 - 2 * x[1]^2 - 3 * x[2]^4 - x[3] - 4 * x[4]^2 - 5 * x[5] ≥ 0) + @constraint(nlp, 282 - 7 * x[1] - 3 * x[2] - 10 * x[3]^2 - x[4] + x[5] ≥ 0) + @constraint(nlp, 196 - 23 * x[1] - x[2]^2 - 6 * x[6]^2 + 8 * x[7] ≥ 0) + @constraint(nlp, -4 * x[1]^2 - x[2]^2 + 3 * x[1] * x[2] - 2 * x[3]^2 - 5 * x[6] + 11 * x[7] ≥ 0) - @NLobjective( + @objective( nlp, Min, (x[1] - 10)^2 + diff --git a/src/PureJuMP/hs101.jl b/src/PureJuMP/hs101.jl index bac58d16..b022a57e 100644 --- a/src/PureJuMP/hs101.jl +++ b/src/PureJuMP/hs101.jl @@ -26,7 +26,7 @@ function hs101(args...; kwargs...) exposant_f[3, :] = [-2, 1, 0, -1, -2, 1, 0] exposant_f[4, :] = [2, 2, -1, 0, 0.5, -2, 1] - @NLexpression( + @expression( nlp, f, 10 * prod(x[i]^exposant_f[1, i] for i = 1:7) + @@ -39,7 +39,7 @@ function hs101(args...; kwargs...) exposant_c1[1, :] = [0.5, 0, -1, 0, 0, -2, 1] exposant_c1[2, :] = [3, 1, -2, 0, 0, 1, 0.5] exposant_c1[3, :] = [0, -1, 1, -0.5, 0, 2 / 3, 1 / 4] - @NLconstraint( + @constraint( nlp, 1 - 0.5 * prod(x[i]^exposant_c1[1, i] for i = 1:7) - 0.7 * prod(x[i]^exposant_c1[2, i] for i = 1:7) - @@ -50,7 +50,7 @@ function hs101(args...; kwargs...) exposant_c2[1, :] = [-0.5, 1, -1, 0, -1, 1, 0] exposant_c2[2, :] = [0, 0, 1, -1, -1, 2, 0] exposant_c2[3, :] = [-1, 0.5, 0, -2, -1, 1 / 3, 0] - @NLconstraint( + @constraint( nlp, 1 - 1.3 * prod(x[i]^exposant_c2[1, i] for i = 1:7) - 0.8 * prod(x[i]^exposant_c2[2, i] for i = 1:7) - @@ -62,7 +62,7 @@ function hs101(args...; kwargs...) exposant_c3[2, :] = [0, 1, -0.5, 0, 1, -1, -0.5] exposant_c3[3, :] = [-1, 1, 0.5, 0, 1, 0, 0] exposant_c3[4, :] = [0, -2, 1, 0, 1, -1, 1] - @NLconstraint( + @constraint( nlp, 1 - 2 * prod(x[i]^exposant_c3[1, i] for i = 1:7) - 0.1 * prod(x[i]^exposant_c3[2, i] for i = 1:7) - 1 * prod(x[i]^exposant_c3[3, i] for i = 1:7) - @@ -74,7 +74,7 @@ function hs101(args...; kwargs...) exposant_c4[2, :] = [0.5, 2, 1, 1 / 3, -2 / 3, 0, 1 / 4] exposant_c4[3, :] = [-3, -2, 1, 0, 1, 0, 3 / 4] exposant_c4[4, :] = [0, 0, -2, 1, 0, 0, 0.5] - @NLconstraint( + @constraint( nlp, 1 - 0.2 * prod(x[i]^exposant_c4[1, i] for i = 1:7) - 0.3 * prod(x[i]^exposant_c4[2, i] for i = 1:7) - @@ -82,9 +82,9 @@ function hs101(args...; kwargs...) 0.5 * prod(x[i]^exposant_c4[4, i] for i = 1:7) ≥ 0 ) - @NLconstraint(nlp, 100 ≤ f ≤ 3000) + @constraint(nlp, 100 ≤ f ≤ 3000) - @NLobjective(nlp, Min, f) + @objective(nlp, Min, f) return nlp end diff --git a/src/PureJuMP/hs102.jl b/src/PureJuMP/hs102.jl index 3acad964..014e9b12 100644 --- a/src/PureJuMP/hs102.jl +++ b/src/PureJuMP/hs102.jl @@ -26,7 +26,7 @@ function hs102(args...; kwargs...) exposant_f[3, :] = [-2, 1, 0, -1, -2, 1, 0] exposant_f[4, :] = [2, 2, -1, 0, 0.5, -2, 1] - @NLexpression( + @expression( nlp, f, 10 * prod(x[i]^exposant_f[1, i] for i = 1:7) + @@ -39,7 +39,7 @@ function hs102(args...; kwargs...) exposant_c1[1, :] = [0.5, 0, -1, 0, 0, -2, 1] exposant_c1[2, :] = [3, 1, -2, 0, 0, 1, 0.5] exposant_c1[3, :] = [0, -1, 1, -0.5, 0, 2 / 3, 1 / 4] - @NLconstraint( + @constraint( nlp, 1 - 0.5 * prod(x[i]^exposant_c1[1, i] for i = 1:7) - 0.7 * prod(x[i]^exposant_c1[2, i] for i = 1:7) - @@ -50,7 +50,7 @@ function hs102(args...; kwargs...) exposant_c2[1, :] = [-0.5, 1, -1, 0, -1, 1, 0] exposant_c2[2, :] = [0, 0, 1, -1, -1, 2, 0] exposant_c2[3, :] = [-1, 0.5, 0, -2, -1, 1 / 3, 0] - @NLconstraint( + @constraint( nlp, 1 - 1.3 * prod(x[i]^exposant_c2[1, i] for i = 1:7) - 0.8 * prod(x[i]^exposant_c2[2, i] for i = 1:7) - @@ -62,7 +62,7 @@ function hs102(args...; kwargs...) exposant_c3[2, :] = [0, 1, -0.5, 0, 1, -1, -0.5] exposant_c3[3, :] = [-1, 1, 0.5, 0, 1, 0, 0] exposant_c3[4, :] = [0, -2, 1, 0, 1, -1, 1] - @NLconstraint( + @constraint( nlp, 1 - 2 * prod(x[i]^exposant_c3[1, i] for i = 1:7) - 0.1 * prod(x[i]^exposant_c3[2, i] for i = 1:7) - 1 * prod(x[i]^exposant_c3[3, i] for i = 1:7) - @@ -74,7 +74,7 @@ function hs102(args...; kwargs...) exposant_c4[2, :] = [0.5, 2, 1, 1 / 3, -2 / 3, 0, 1 / 4] exposant_c4[3, :] = [-3, -2, 1, 0, 1, 0, 3 / 4] exposant_c4[4, :] = [0, 0, -2, 1, 0, 0, 0.5] - @NLconstraint( + @constraint( nlp, 1 - 0.2 * prod(x[i]^exposant_c4[1, i] for i = 1:7) - 0.3 * prod(x[i]^exposant_c4[2, i] for i = 1:7) - @@ -82,9 +82,9 @@ function hs102(args...; kwargs...) 0.5 * prod(x[i]^exposant_c4[4, i] for i = 1:7) ≥ 0 ) - @NLconstraint(nlp, 100 ≤ f ≤ 3000) + @constraint(nlp, 100 ≤ f ≤ 3000) - @NLobjective(nlp, Min, f) + @objective(nlp, Min, f) return nlp end diff --git a/src/PureJuMP/hs103.jl b/src/PureJuMP/hs103.jl index 6a7d09cc..daee0005 100644 --- a/src/PureJuMP/hs103.jl +++ b/src/PureJuMP/hs103.jl @@ -26,7 +26,7 @@ function hs103(args...; kwargs...) exposant_f[3, :] = [-2, 1, 0, -1, -2, 1, 0] exposant_f[4, :] = [2, 2, -1, 0, 0.5, -2, 1] - @NLexpression( + @expression( nlp, f, 10 * prod(x[i]^exposant_f[1, i] for i = 1:7) + @@ -39,7 +39,7 @@ function hs103(args...; kwargs...) exposant_c1[1, :] = [0.5, 0, -1, 0, 0, -2, 1] exposant_c1[2, :] = [3, 1, -2, 0, 0, 1, 0.5] exposant_c1[3, :] = [0, -1, 1, -0.5, 0, 2 / 3, 1 / 4] - @NLconstraint( + @constraint( nlp, 1 - 0.5 * prod(x[i]^exposant_c1[1, i] for i = 1:7) - 0.7 * prod(x[i]^exposant_c1[2, i] for i = 1:7) - @@ -50,7 +50,7 @@ function hs103(args...; kwargs...) exposant_c2[1, :] = [-0.5, 1, -1, 0, -1, 1, 0] exposant_c2[2, :] = [0, 0, 1, -1, -1, 2, 0] exposant_c2[3, :] = [-1, 0.5, 0, -2, -1, 1 / 3, 0] - @NLconstraint( + @constraint( nlp, 1 - 1.3 * prod(x[i]^exposant_c2[1, i] for i = 1:7) - 0.8 * prod(x[i]^exposant_c2[2, i] for i = 1:7) - @@ -62,7 +62,7 @@ function hs103(args...; kwargs...) exposant_c3[2, :] = [0, 1, -0.5, 0, 1, -1, -0.5] exposant_c3[3, :] = [-1, 1, 0.5, 0, 1, 0, 0] exposant_c3[4, :] = [0, -2, 1, 0, 1, -1, 1] - @NLconstraint( + @constraint( nlp, 1 - 2 * prod(x[i]^exposant_c3[1, i] for i = 1:7) - 0.1 * prod(x[i]^exposant_c3[2, i] for i = 1:7) - 1 * prod(x[i]^exposant_c3[3, i] for i = 1:7) - @@ -74,7 +74,7 @@ function hs103(args...; kwargs...) exposant_c4[2, :] = [0.5, 2, 1, 1 / 3, -2 / 3, 0, 1 / 4] exposant_c4[3, :] = [-3, -2, 1, 0, 1, 0, 3 / 4] exposant_c4[4, :] = [0, 0, -2, 1, 0, 0, 0.5] - @NLconstraint( + @constraint( nlp, 1 - 0.2 * prod(x[i]^exposant_c4[1, i] for i = 1:7) - 0.3 * prod(x[i]^exposant_c4[2, i] for i = 1:7) - @@ -82,9 +82,9 @@ function hs103(args...; kwargs...) 0.5 * prod(x[i]^exposant_c4[4, i] for i = 1:7) ≥ 0 ) - @NLconstraint(nlp, 100 ≤ f ≤ 3000) + @constraint(nlp, 100 ≤ f ≤ 3000) - @NLobjective(nlp, Min, f) + @objective(nlp, Min, f) return nlp end diff --git a/src/PureJuMP/hs104.jl b/src/PureJuMP/hs104.jl index 3e06bd85..2e1cfc97 100644 --- a/src/PureJuMP/hs104.jl +++ b/src/PureJuMP/hs104.jl @@ -19,21 +19,21 @@ function hs104(args...; kwargs...) x0 = [6, 3, 0.4, 0.2, 6, 6, 1, 0.5] @variable(nlp, 0.1 ≤ x[i = 1:8] ≤ 10, start = x0[i]) - @NLexpression(nlp, f, 0.4 * (x[1] / x[7])^(0.67) + 0.4 * (x[2] / x[8])^(0.67) + 10 - x[1] - x[2]) + @expression(nlp, f, 0.4 * (x[1] / x[7])^(0.67) + 0.4 * (x[2] / x[8])^(0.67) + 10 - x[1] - x[2]) - @NLconstraint(nlp, 1 - 0.0588 * x[5] * x[7] - 0.1 * x[1] ≥ 0) - @NLconstraint(nlp, 1 - 0.0588 * x[6] * x[8] - 0.1 * x[1] - 0.1 * x[2] ≥ 0) - @NLconstraint( + @constraint(nlp, 1 - 0.0588 * x[5] * x[7] - 0.1 * x[1] ≥ 0) + @constraint(nlp, 1 - 0.0588 * x[6] * x[8] - 0.1 * x[1] - 0.1 * x[2] ≥ 0) + @constraint( nlp, 1 - 4 * x[3] / x[5] - 2 * x[3]^(-0.71) / x[5] - 0.0588 * x[3]^(-1.3) * x[7] ≥ 0 ) - @NLconstraint( + @constraint( nlp, 1 - 4 * x[4] / x[6] - 2 * x[4]^(-0.71) / x[6] - 0.0588 * x[4]^(-1.3) * x[8] ≥ 0 ) - @NLconstraint(nlp, 1 ≤ f ≤ 4.2) + @constraint(nlp, 1 ≤ f ≤ 4.2) - @NLobjective(nlp, Min, f) + @objective(nlp, Min, f) return nlp end diff --git a/src/PureJuMP/hs105.jl b/src/PureJuMP/hs105.jl index 7e846ed3..12ed6d7a 100644 --- a/src/PureJuMP/hs105.jl +++ b/src/PureJuMP/hs105.jl @@ -53,13 +53,13 @@ function hs105(args...; kwargs...) y[233] = 245 y[234:235] .= 250 - @NLexpression(nlp, a[i = 1:235], x[1] / x[6] * exp(-(y[i] - x[3])^2 / (2 * x[6]^2))) - @NLexpression(nlp, b[i = 1:235], x[2] / x[7] * exp(-(y[i] - x[4])^2 / (2 * x[7]^2))) - @NLexpression(nlp, c[i = 1:235], (1 - x[2] - x[1]) / x[8] * exp(-(y[i] - x[5])^2 / (2 * x[8]^2))) + @expression(nlp, a[i = 1:235], x[1] / x[6] * exp(-(y[i] - x[3])^2 / (2 * x[6]^2))) + @expression(nlp, b[i = 1:235], x[2] / x[7] * exp(-(y[i] - x[4])^2 / (2 * x[7]^2))) + @expression(nlp, c[i = 1:235], (1 - x[2] - x[1]) / x[8] * exp(-(y[i] - x[5])^2 / (2 * x[8]^2))) @constraint(nlp, 1 - x[1] - x[2] ≥ 0) - @NLobjective(nlp, Min, -sum(log((a[i] + b[i] + c[i]) / sqrt(2 * pi)) for i = 1:235)) + @objective(nlp, Min, -sum(log((a[i] + b[i] + c[i]) / sqrt(2 * pi)) for i = 1:235)) return nlp end diff --git a/src/PureJuMP/hs106.jl b/src/PureJuMP/hs106.jl index a08a9a8a..b4cf45de 100644 --- a/src/PureJuMP/hs106.jl +++ b/src/PureJuMP/hs106.jl @@ -24,9 +24,9 @@ function hs106(args...; kwargs...) @constraint(nlp, 1 - 0.0025 * (x[4] + x[6]) ≥ 0) @constraint(nlp, 1 - 0.0025 * (x[5] + x[7] - x[4]) ≥ 0) @constraint(nlp, 1 - 0.01 * (x[8] - x[5]) ≥ 0) - @NLconstraint(nlp, x[1] * x[6] - 833.33252 * x[4] - 100 * x[1] + 83333.333 ≥ 0) - @NLconstraint(nlp, x[2] * x[7] - 1250 * x[5] - x[2] * x[4] + 1250 * x[4] ≥ 0) - @NLconstraint(nlp, x[3] * x[8] - 1250000 - x[3] * x[5] + 2500 * x[5] ≥ 0) + @constraint(nlp, x[1] * x[6] - 833.33252 * x[4] - 100 * x[1] + 83333.333 ≥ 0) + @constraint(nlp, x[2] * x[7] - 1250 * x[5] - x[2] * x[4] + 1250 * x[4] ≥ 0) + @constraint(nlp, x[3] * x[8] - 1250000 - x[3] * x[5] + 2500 * x[5] ≥ 0) @objective(nlp, Min, x[1] + x[2] + x[3]) diff --git a/src/PureJuMP/hs107.jl b/src/PureJuMP/hs107.jl index b52bad68..b04d5ca3 100644 --- a/src/PureJuMP/hs107.jl +++ b/src/PureJuMP/hs107.jl @@ -24,48 +24,48 @@ function hs107(args...; kwargs...) c = (48.4 / 50.176) * sin(0.25) d = (48.4 / 50.176) * cos(0.25) - @NLexpression(nlp, y1, sin(x[8])) - @NLexpression(nlp, y2, cos(x[8])) - @NLexpression(nlp, y3, sin(x[9])) - @NLexpression(nlp, y4, cos(x[9])) - @NLexpression(nlp, y5, sin(x[8] - x[9])) - @NLexpression(nlp, y6, cos(x[8] - x[9])) + @expression(nlp, y1, sin(x[8])) + @expression(nlp, y2, cos(x[8])) + @expression(nlp, y3, sin(x[9])) + @expression(nlp, y4, cos(x[9])) + @expression(nlp, y5, sin(x[8] - x[9])) + @expression(nlp, y6, cos(x[8] - x[9])) - @NLconstraint( + @constraint( nlp, 0.4 - x[1] + 2 * c * x[5]^2 - x[5] * x[6] * (d * y1 + c * y2) - x[5] * x[7] * (d * y3 + c * y4) == 0 ) - @NLconstraint( + @constraint( nlp, 0.4 - x[2] + 2 * c * x[6]^2 + x[5] * x[6] * (d * y1 - c * y2) + x[6] * x[7] * (d * y5 - c * y6) == 0 ) - @NLconstraint( + @constraint( nlp, 0.8 + 2 * c * x[7]^2 + x[5] * x[7] * (d * y3 - c * y4) - x[6] * x[7] * (d * y5 + c * y6) == 0 ) - @NLconstraint( + @constraint( nlp, 0.2 - x[3] + 2 * d * x[5]^2 + x[5] * x[6] * (c * y1 - d * y2) + x[5] * x[7] * (c * y3 - d * y4) == 0 ) - @NLconstraint( + @constraint( nlp, 0.2 - x[4] + 2 * d * x[6]^2 - x[5] * x[6] * (c * y1 + d * y2) - x[6] * x[7] * (c * y5 + d * y6) == 0 ) - @NLconstraint( + @constraint( nlp, -0.337 + 2 * d * x[7]^2 - x[5] * x[7] * (c * y3 + d * y4) + x[6] * x[7] * (c * y5 - d * y6) == 0 ) - @NLobjective(nlp, Min, 3000 * x[1] + 1000 * x[1]^3 + 2000 * x[2] + 666.667 * x[2]^3) + @objective(nlp, Min, 3000 * x[1] + 1000 * x[1]^3 + 2000 * x[2] + 666.667 * x[2]^3) return nlp end diff --git a/src/PureJuMP/hs108.jl b/src/PureJuMP/hs108.jl index 84e923c4..4c0633b0 100644 --- a/src/PureJuMP/hs108.jl +++ b/src/PureJuMP/hs108.jl @@ -19,20 +19,20 @@ function hs108(args...; kwargs...) lvar = [-Inf, -Inf, -Inf, -Inf, -Inf, -Inf, -Inf, -Inf, 0] @variable(nlp, x[i = 1:9] ≥ lvar[i], start = 1) - @NLconstraint(nlp, 1 - x[3]^2 - x[4]^2 ≥ 0) - @NLconstraint(nlp, 1 - x[5]^2 - x[6]^2 ≥ 0) - @NLconstraint(nlp, 1 - (x[1] - x[5])^2 - (x[2] - x[6])^2 ≥ 0) - @NLconstraint(nlp, 1 - (x[1] - x[7])^2 - (x[2] - x[8])^2 ≥ 0) - @NLconstraint(nlp, 1 - (x[3] - x[5])^2 - (x[4] - x[6])^2 ≥ 0) - @NLconstraint(nlp, 1 - (x[3] - x[7])^2 - (x[4] - x[8])^2 ≥ 0) - @NLconstraint(nlp, x[3] * x[9] ≥ 0) - @NLconstraint(nlp, x[5] * x[8] - x[6] * x[7] ≥ 0) - @NLconstraint(nlp, 1 - x[9]^2 ≥ 0) - @NLconstraint(nlp, 1 - x[1]^2 - (x[2] - x[9])^2 ≥ 0) - @NLconstraint(nlp, x[1] * x[4] - x[2] * x[3] ≥ 0) - @NLconstraint(nlp, -x[5] * x[9] ≥ 0) + @constraint(nlp, 1 - x[3]^2 - x[4]^2 ≥ 0) + @constraint(nlp, 1 - x[5]^2 - x[6]^2 ≥ 0) + @constraint(nlp, 1 - (x[1] - x[5])^2 - (x[2] - x[6])^2 ≥ 0) + @constraint(nlp, 1 - (x[1] - x[7])^2 - (x[2] - x[8])^2 ≥ 0) + @constraint(nlp, 1 - (x[3] - x[5])^2 - (x[4] - x[6])^2 ≥ 0) + @constraint(nlp, 1 - (x[3] - x[7])^2 - (x[4] - x[8])^2 ≥ 0) + @constraint(nlp, x[3] * x[9] ≥ 0) + @constraint(nlp, x[5] * x[8] - x[6] * x[7] ≥ 0) + @constraint(nlp, 1 - x[9]^2 ≥ 0) + @constraint(nlp, 1 - x[1]^2 - (x[2] - x[9])^2 ≥ 0) + @constraint(nlp, x[1] * x[4] - x[2] * x[3] ≥ 0) + @constraint(nlp, -x[5] * x[9] ≥ 0) - @NLobjective( + @objective( nlp, Min, -0.5 * (x[1] * x[4] - x[2] * x[3] + x[3] * x[9] - x[5] * x[9] + x[5] * x[8] - x[6] * x[7]) diff --git a/src/PureJuMP/hs109.jl b/src/PureJuMP/hs109.jl index d7dc4325..d78ba29d 100644 --- a/src/PureJuMP/hs109.jl +++ b/src/PureJuMP/hs109.jl @@ -24,42 +24,42 @@ function hs109(args...; kwargs...) b = sin(0.25) c = cos(0.25) @constraint(nlp, -0.55 ≤ x[4] - x[3] ≤ 0.55) - @NLconstraint(nlp, 2250000 - x[1]^2 - x[8]^2 ≥ 0) - @NLconstraint(nlp, 2250000 - x[2]^2 - x[9]^2 ≥ 0) - @NLconstraint( + @constraint(nlp, 2250000 - x[1]^2 - x[8]^2 ≥ 0) + @constraint(nlp, 2250000 - x[2]^2 - x[9]^2 ≥ 0) + @constraint( nlp, x[5] * x[6] * sin(-x[3] - 1 / 4) + x[5] * x[7] * sin(-x[4] - 1 / 4) + 2 * b * x[5]^2 - a * x[1] + 400 * a == 0 ) - @NLconstraint( + @constraint( nlp, x[5] * x[6] * sin(x[3] - 1 / 4) + x[6] * x[7] * sin(x[3] - x[4] - 1 / 4) + 2 * b * x[6]^2 - a * x[2] + 400 * a == 0 ) - @NLconstraint( + @constraint( nlp, x[5] * x[7] * sin(x[4] - 1 / 4) + x[6] * x[7] * sin(x[4] - x[3] - 1 / 4) + 2 * b * x[7]^2 + 881.779 * a == 0 ) - @NLconstraint( + @constraint( nlp, a * x[8] + x[5] * x[6] * cos(-x[3] - 1 / 4) + x[5] * x[7] * cos(-x[4] - 1 / 4) - 200 * a - 2 * c * x[5]^2 + 0.7533e-3 * a * x[5]^2 == 0 ) - @NLconstraint( + @constraint( nlp, a * x[9] + x[5] * x[6] * cos(x[3] - 1 / 4) + x[6] * x[7] * cos(x[3] - x[4] - 1 / 4) - 200 * a - 2 * c * x[6]^2 + 0.7533e-3 * a * x[6]^2 == 0 ) - @NLconstraint( + @constraint( nlp, x[5] * x[7] * cos(x[4] - 1 / 4) + x[6] * x[7] * cos(x[4] - x[3] - 1 / 4) + 22.938 * a - 2 * c * x[7]^2 + 0.7533e-3 * a * x[7]^2 == 0 ) - @NLobjective(nlp, Min, 3 * x[1] + 1e-6 * x[1]^3 + 2 * x[2] + 0.522074e-6 * x[2]^3) + @objective(nlp, Min, 3 * x[1] + 1e-6 * x[1]^3 + 2 * x[2] + 0.522074e-6 * x[2]^3) return nlp end diff --git a/src/PureJuMP/hs11.jl b/src/PureJuMP/hs11.jl index fc6d83ec..aae78bb6 100644 --- a/src/PureJuMP/hs11.jl +++ b/src/PureJuMP/hs11.jl @@ -19,9 +19,9 @@ function hs11(args...; kwargs...) x0 = [4.9, 0.1] @variable(nlp, x[i = 1:2], start = x0[i]) - @NLobjective(nlp, Min, (x[1] - 5)^2 + x[2]^2 - 25) + @objective(nlp, Min, (x[1] - 5)^2 + x[2]^2 - 25) - @NLconstraint(nlp, x[1]^2 ≤ x[2]) + @constraint(nlp, x[1]^2 ≤ x[2]) return nlp end diff --git a/src/PureJuMP/hs110.jl b/src/PureJuMP/hs110.jl index 72af44bb..1939ce1d 100644 --- a/src/PureJuMP/hs110.jl +++ b/src/PureJuMP/hs110.jl @@ -18,7 +18,7 @@ function hs110(args...; kwargs...) nlp = Model() @variable(nlp, 2.001 ≤ x[i = 1:10] ≤ 9.999, start = 9) - @NLobjective( + @objective( nlp, Min, sum(log(x[i] - 2)^2 + log(10 - x[i])^2 for i = 1:10) - prod(x[i] for i = 1:10)^0.2 diff --git a/src/PureJuMP/hs111.jl b/src/PureJuMP/hs111.jl index dd03fe60..5aa0e677 100644 --- a/src/PureJuMP/hs111.jl +++ b/src/PureJuMP/hs111.jl @@ -19,17 +19,17 @@ function hs111(args...; kwargs...) c = [-6.089, -17.164, -34.054, -5.914, -24.721, -14.986, -24.100, -10.708, -26.662, -22.179] @variable(nlp, -100 ≤ x[i = 1:10] ≤ 100, start = -2.3) - @NLobjective( + @objective( nlp, Min, sum(exp(x[j]) * (c[j] + x[j] - log(sum(exp(x[k]) for k = 1:10))) for j = 1:10) ) - @NLconstraint(nlp, exp(x[1]) + 2 * exp(x[2]) + 2 * exp(x[3]) + exp(x[6]) + exp(x[10]) - 2 == 0) + @constraint(nlp, exp(x[1]) + 2 * exp(x[2]) + 2 * exp(x[3]) + exp(x[6]) + exp(x[10]) - 2 == 0) - @NLconstraint(nlp, exp(x[4]) + 2 * exp(x[5]) + exp(x[6]) + exp(x[7]) - 1 == 0) + @constraint(nlp, exp(x[4]) + 2 * exp(x[5]) + exp(x[6]) + exp(x[7]) - 1 == 0) - @NLconstraint(nlp, exp(x[3]) + exp(x[7]) + exp(x[8]) + 2 * exp(x[9]) + exp(x[10]) - 1 == 0) + @constraint(nlp, exp(x[3]) + exp(x[7]) + exp(x[8]) + 2 * exp(x[9]) + exp(x[10]) - 1 == 0) return nlp end diff --git a/src/PureJuMP/hs112.jl b/src/PureJuMP/hs112.jl index 5e1d3bb9..9411db65 100644 --- a/src/PureJuMP/hs112.jl +++ b/src/PureJuMP/hs112.jl @@ -19,7 +19,7 @@ function hs112(args...; kwargs...) c = [-6.089, -17.164, -34.054, -5.914, -24.721, -14.986, -24.100, -10.708, -26.662, -22.179] @variable(nlp, x[i = 1:10] ≥ 1.0e-6, start = 0.1) - @NLobjective(nlp, Min, sum(x[j] * (c[j] + log(x[j] / (sum(x[k] for k = 1:10)))) for j = 1:10)) + @objective(nlp, Min, sum(x[j] * (c[j] + log(x[j] / (sum(x[k] for k = 1:10)))) for j = 1:10)) @constraint(nlp, x[1] + 2 * x[2] + 2 * x[3] + x[6] + x[10] - 2 == 0) diff --git a/src/PureJuMP/hs113.jl b/src/PureJuMP/hs113.jl index ad5c692d..bec2acdc 100644 --- a/src/PureJuMP/hs113.jl +++ b/src/PureJuMP/hs113.jl @@ -22,13 +22,13 @@ function hs113(args...; kwargs...) @constraint(nlp, 105 - 4 * x[1] - 5 * x[2] + 3 * x[7] - 9 * x[8] ≥ 0) @constraint(nlp, -10 * x[1] + 8 * x[2] + 17 * x[7] - 2 * x[8] ≥ 0) @constraint(nlp, 8 * x[1] - 2 * x[2] - 5 * x[9] + 2 * x[10] + 12 ≥ 0) - @NLconstraint(nlp, -3 * (x[1] - 2)^2 - 4 * (x[2] - 3)^2 - 2 * x[3]^2 + 7 * x[4] + 120 ≥ 0) - @NLconstraint(nlp, -5 * x[1]^2 - 8 * x[2] - (x[3] - 6)^2 + 2 * x[4] + 40 ≥ 0) - @NLconstraint(nlp, -0.5 * (x[1] - 8)^2 - 2 * (x[2] - 4)^2 - 3 * x[5]^2 + x[6] + 30 ≥ 0) - @NLconstraint(nlp, -x[1]^2 - 2 * (x[2] - 2)^2 + 2 * x[1] * x[2] - 14 * x[5] + 6 * x[6] ≥ 0) - @NLconstraint(nlp, 3 * x[1] - 6 * x[2] - 12 * (x[9] - 8)^2 + 7 * x[10] ≥ 0) + @constraint(nlp, -3 * (x[1] - 2)^2 - 4 * (x[2] - 3)^2 - 2 * x[3]^2 + 7 * x[4] + 120 ≥ 0) + @constraint(nlp, -5 * x[1]^2 - 8 * x[2] - (x[3] - 6)^2 + 2 * x[4] + 40 ≥ 0) + @constraint(nlp, -0.5 * (x[1] - 8)^2 - 2 * (x[2] - 4)^2 - 3 * x[5]^2 + x[6] + 30 ≥ 0) + @constraint(nlp, -x[1]^2 - 2 * (x[2] - 2)^2 + 2 * x[1] * x[2] - 14 * x[5] + 6 * x[6] ≥ 0) + @constraint(nlp, 3 * x[1] - 6 * x[2] - 12 * (x[9] - 8)^2 + 7 * x[10] ≥ 0) - @NLobjective( + @objective( nlp, Min, x[1]^2 + x[2]^2 + x[1] * x[2] - 14 * x[1] - 16 * x[2] + diff --git a/src/PureJuMP/hs114.jl b/src/PureJuMP/hs114.jl index 28ee7044..b6537e39 100644 --- a/src/PureJuMP/hs114.jl +++ b/src/PureJuMP/hs114.jl @@ -26,22 +26,22 @@ function hs114(args...; kwargs...) @expression(nlp, g1, 35.82 - 0.222 * x[10] - b * x[9]) @expression(nlp, g2, -133 + 3 * x[7] - a * x[10]) - @NLexpression(nlp, g5, 1.12 * x[1] + 0.13167 * x[1] * x[8] - 0.00667 * x[1] * x[8]^2 - a * x[4]) - @NLexpression(nlp, g6, 57.425 + 1.098 * x[8] - 0.038 * x[8]^2 + 0.325 * x[6] - a * x[7]) + @expression(nlp, g5, 1.12 * x[1] + 0.13167 * x[1] * x[8] - 0.00667 * x[1] * x[8]^2 - a * x[4]) + @expression(nlp, g6, 57.425 + 1.098 * x[8] - 0.038 * x[8]^2 + 0.325 * x[6] - a * x[7]) @constraint(nlp, g1 ≥ 0) @constraint(nlp, g2 ≥ 0) @constraint(nlp, -g1 + x[9] * (1 / b - b) ≥ 0) @constraint(nlp, -g2 + (1 / a - a) * x[10] ≥ 0) - @NLconstraint(nlp, g5 ≥ 0) - @NLconstraint(nlp, g6 ≥ 0) - @NLconstraint(nlp, -g5 + (1 / a - a) * x[4] ≥ 0) - @NLconstraint(nlp, -g6 + (1 / a - a) * x[7] ≥ 0) + @constraint(nlp, g5 ≥ 0) + @constraint(nlp, g6 ≥ 0) + @constraint(nlp, -g5 + (1 / a - a) * x[4] ≥ 0) + @constraint(nlp, -g6 + (1 / a - a) * x[7] ≥ 0) @constraint(nlp, 1.22 * x[4] - x[1] - x[5] == 0) - @NLconstraint(nlp, 98000 * x[3] / (x[4] * x[9] + 1000 * x[3]) - x[6] == 0) - @NLconstraint(nlp, (x[2] + x[5]) / x[1] - x[8] == 0) + @constraint(nlp, 98000 * x[3] / (x[4] * x[9] + 1000 * x[3]) - x[6] == 0) + @constraint(nlp, (x[2] + x[5]) / x[1] - x[8] == 0) - @NLobjective(nlp, Min, 5.04 * x[1] + 0.035 * x[2] + 10 * x[3] + 3.36 * x[5] - 0.063 * x[4] * x[7]) + @objective(nlp, Min, 5.04 * x[1] + 0.035 * x[2] + 10 * x[3] + 3.36 * x[5] - 0.063 * x[4] * x[7]) return nlp end diff --git a/src/PureJuMP/hs116.jl b/src/PureJuMP/hs116.jl index 3553a71f..84d346a6 100644 --- a/src/PureJuMP/hs116.jl +++ b/src/PureJuMP/hs116.jl @@ -32,22 +32,22 @@ function hs116(args...; kwargs...) @constraint(nlp, x[2] - x[1] ≥ 0) @constraint(nlp, 1 - a * x[7] + a * x[8] ≥ 0) @constraint(nlp, x[11] + x[12] + x[13] ≥ 50) - @NLconstraint(nlp, x[13] - b * x[10] + c * x[3] * x[10] ≥ 0) - @NLconstraint(nlp, x[5] - d * x[2] - e * x[2] * x[5] + f * x[2]^2 ≥ 0) - @NLconstraint(nlp, x[6] - d * x[3] - e * x[3] * x[6] + f * x[3]^2 ≥ 0) - @NLconstraint(nlp, x[4] - d * x[1] - e * x[1] * x[4] + f * x[1]^2 ≥ 0) - @NLconstraint(nlp, x[12] - b * x[9] + c * x[2] * x[9] ≥ 0) - @NLconstraint(nlp, x[11] - b * x[8] + c * x[1] * x[8] ≥ 0) - @NLconstraint(nlp, x[5] * x[7] - x[1] * x[8] - x[4] * x[7] + x[4] * x[8] ≥ 0) - @NLconstraint( + @constraint(nlp, x[13] - b * x[10] + c * x[3] * x[10] ≥ 0) + @constraint(nlp, x[5] - d * x[2] - e * x[2] * x[5] + f * x[2]^2 ≥ 0) + @constraint(nlp, x[6] - d * x[3] - e * x[3] * x[6] + f * x[3]^2 ≥ 0) + @constraint(nlp, x[4] - d * x[1] - e * x[1] * x[4] + f * x[1]^2 ≥ 0) + @constraint(nlp, x[12] - b * x[9] + c * x[2] * x[9] ≥ 0) + @constraint(nlp, x[11] - b * x[8] + c * x[1] * x[8] ≥ 0) + @constraint(nlp, x[5] * x[7] - x[1] * x[8] - x[4] * x[7] + x[4] * x[8] ≥ 0) + @constraint( nlp, 1 - a * (x[2] * x[9] + x[5] * x[8] - x[1] * x[8] - x[6] * x[9]) - x[5] - x[6] ≥ 0 ) - @NLconstraint( + @constraint( nlp, x[2] * x[9] - x[3] * x[10] - x[6] * x[9] - 500 * x[2] + 500 * x[6] + x[2] * x[10] ≥ 0 ) - @NLconstraint(nlp, x[2] - 0.9 - a * (x[2] * x[10] - x[3] * x[10]) ≥ 0) + @constraint(nlp, x[2] - 0.9 - a * (x[2] * x[10] - x[3] * x[10]) ≥ 0) @constraint(nlp, x[11] + x[12] + x[13] ≤ 250) @objective(nlp, Min, x[11] + x[12] + x[13]) diff --git a/src/PureJuMP/hs117.jl b/src/PureJuMP/hs117.jl index 7efcb882..e8e003ad 100644 --- a/src/PureJuMP/hs117.jl +++ b/src/PureJuMP/hs117.jl @@ -45,14 +45,14 @@ function hs117(args...; kwargs...) e = [-15, -27, -36, -18, -12] for j = 1:5 - @NLconstraint( + @constraint( nlp, 2 * sum(c[k, j] * x[10 + k] + 3 * d[j] * x[10 + j]^2 for k = 1:5) + e[j] - sum(a[k, j] * x[k] for k = 1:10) ≥ 0 ) end - @NLobjective( + @objective( nlp, Min, -sum(b[j] * x[j] for j = 1:10) + diff --git a/src/PureJuMP/hs118.jl b/src/PureJuMP/hs118.jl index 65a12544..45078c3a 100644 --- a/src/PureJuMP/hs118.jl +++ b/src/PureJuMP/hs118.jl @@ -36,7 +36,7 @@ function hs118(args...; kwargs...) @constraint(nlp, x[10] + x[11] + x[12] - 50 ≥ 0) @constraint(nlp, x[13] + x[14] + x[15] - 85 ≥ 0) - @NLobjective( + @objective( nlp, Min, sum( diff --git a/src/PureJuMP/hs119.jl b/src/PureJuMP/hs119.jl index b584bc77..2263268c 100644 --- a/src/PureJuMP/hs119.jl +++ b/src/PureJuMP/hs119.jl @@ -61,7 +61,7 @@ function hs119(args...; kwargs...) @constraint(nlp, sum(b[i, j] * x[j] - c[i] for j = 1:16) == 0) end - @NLobjective( + @objective( nlp, Min, sum(sum(a[i, j] * (x[i]^2 + x[i] + 1) * (x[j]^2 + x[j] + 1) for j = 1:16) for i = 1:16) diff --git a/src/PureJuMP/hs12.jl b/src/PureJuMP/hs12.jl index 5fb5c591..f4bd81de 100644 --- a/src/PureJuMP/hs12.jl +++ b/src/PureJuMP/hs12.jl @@ -18,9 +18,9 @@ function hs12(args...; kwargs...) nlp = Model() @variable(nlp, x[i = 1:2], start = 0) - @NLobjective(nlp, Min, x[1]^2 / 2 + x[2]^2 - x[1] * x[2] - 7 * x[1] - 7 * x[2]) + @objective(nlp, Min, x[1]^2 / 2 + x[2]^2 - x[1] * x[2] - 7 * x[1] - 7 * x[2]) - @NLconstraint(nlp, 4 * x[1]^2 + x[2]^2 ≤ 25) + @constraint(nlp, 4 * x[1]^2 + x[2]^2 ≤ 25) return nlp end diff --git a/src/PureJuMP/hs13.jl b/src/PureJuMP/hs13.jl index 6836926c..968e1eb3 100644 --- a/src/PureJuMP/hs13.jl +++ b/src/PureJuMP/hs13.jl @@ -18,9 +18,9 @@ function hs13(args...; kwargs...) nlp = Model() @variable(nlp, x[i = 1:2] ≥ 0, start = -2) - @NLobjective(nlp, Min, 0.5 * (x[1] - 2)^2 + 0.5 * x[2]^2) + @objective(nlp, Min, 0.5 * (x[1] - 2)^2 + 0.5 * x[2]^2) - @NLconstraint(nlp, (1 - x[1])^3 ≥ x[2]) + @constraint(nlp, (1 - x[1])^3 ≥ x[2]) return nlp end diff --git a/src/PureJuMP/hs14.jl b/src/PureJuMP/hs14.jl index 42c9cba1..98cd3071 100644 --- a/src/PureJuMP/hs14.jl +++ b/src/PureJuMP/hs14.jl @@ -18,11 +18,11 @@ function hs14(args...; kwargs...) nlp = Model() @variable(nlp, x[i = 1:2], start = 2) - @NLconstraint(nlp, 0.25 * x[1]^2 + x[2]^2 - 1 ≤ 0) + @constraint(nlp, 0.25 * x[1]^2 + x[2]^2 - 1 ≤ 0) @constraint(nlp, x[1] - 2 * x[2] + 1 == 0) - @NLobjective(nlp, Min, 0.5 * (x[1] - 2)^2 + 0.5 * (x[2] - 1)^2) + @objective(nlp, Min, 0.5 * (x[1] - 2)^2 + 0.5 * (x[2] - 1)^2) return nlp end diff --git a/src/PureJuMP/hs15.jl b/src/PureJuMP/hs15.jl index fb58b6b3..17c35440 100644 --- a/src/PureJuMP/hs15.jl +++ b/src/PureJuMP/hs15.jl @@ -20,11 +20,11 @@ function hs15(args...; kwargs...) uvar = [0.5, Inf] @variable(nlp, x[i = 1:2] ≤ uvar[i], start = x0[i]) - @NLobjective(nlp, Min, 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2) + @objective(nlp, Min, 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2) - @NLconstraint(nlp, x[1] * x[2] - 1 ≥ 0) + @constraint(nlp, x[1] * x[2] - 1 ≥ 0) - @NLconstraint(nlp, x[1] + x[2]^2 ≥ 0) + @constraint(nlp, x[1] + x[2]^2 ≥ 0) return nlp end diff --git a/src/PureJuMP/hs16.jl b/src/PureJuMP/hs16.jl index 8424b970..8dac8659 100644 --- a/src/PureJuMP/hs16.jl +++ b/src/PureJuMP/hs16.jl @@ -21,11 +21,11 @@ function hs16(args...; kwargs...) uvar = [0.5, 1] @variable(nlp, lvar[i] ≤ x[i = 1:2] ≤ uvar[i], start = x0[i]) - @NLobjective(nlp, Min, 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2) + @objective(nlp, Min, 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2) - @NLconstraint(nlp, x[1]^2 + x[2] ≥ 0) + @constraint(nlp, x[1]^2 + x[2] ≥ 0) - @NLconstraint(nlp, x[1] + x[2]^2 ≥ 0) + @constraint(nlp, x[1] + x[2]^2 ≥ 0) return nlp end diff --git a/src/PureJuMP/hs17.jl b/src/PureJuMP/hs17.jl index daea5308..3f1e164d 100644 --- a/src/PureJuMP/hs17.jl +++ b/src/PureJuMP/hs17.jl @@ -21,11 +21,11 @@ function hs17(args...; kwargs...) uvar = [0.5, 1] @variable(nlp, lvar[i] ≤ x[i = 1:2] ≤ uvar[i], start = x0[i]) - @NLobjective(nlp, Min, 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2) + @objective(nlp, Min, 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2) - @NLconstraint(nlp, -x[1] + x[2]^2 ≥ 0) + @constraint(nlp, -x[1] + x[2]^2 ≥ 0) - @NLconstraint(nlp, x[1]^2 - x[2] ≥ 0) + @constraint(nlp, x[1]^2 - x[2] ≥ 0) return nlp end diff --git a/src/PureJuMP/hs18.jl b/src/PureJuMP/hs18.jl index 8e368c1c..c2d6d780 100644 --- a/src/PureJuMP/hs18.jl +++ b/src/PureJuMP/hs18.jl @@ -19,11 +19,11 @@ function hs18(args...; kwargs...) lvar = [2, 0] @variable(nlp, lvar[i] ≤ x[i = 1:2] ≤ 50, start = 2) - @NLobjective(nlp, Min, x[1]^2 / 100 + x[2]^2) + @objective(nlp, Min, x[1]^2 / 100 + x[2]^2) - @NLconstraint(nlp, x[1] * x[2] ≥ 25) + @constraint(nlp, x[1] * x[2] ≥ 25) - @NLconstraint(nlp, x[1]^2 + x[2]^2 ≥ 25) + @constraint(nlp, x[1]^2 + x[2]^2 ≥ 25) return nlp end diff --git a/src/PureJuMP/hs19.jl b/src/PureJuMP/hs19.jl index b1af9335..f36bbe3d 100644 --- a/src/PureJuMP/hs19.jl +++ b/src/PureJuMP/hs19.jl @@ -21,10 +21,10 @@ function hs19(args...; kwargs...) uvar = [100, 100] @variable(nlp, lvar[i] ≤ x[i = 1:2] ≤ uvar[i], start = x0[i]) - @NLconstraint(nlp, (x[1] - 5)^2 + (x[2] - 5)^2 - 100 ≥ 0) - @NLconstraint(nlp, (x[2] - 5)^2 + (x[1] - 6)^2 - 82.81 ≤ 0) + @constraint(nlp, (x[1] - 5)^2 + (x[2] - 5)^2 - 100 ≥ 0) + @constraint(nlp, (x[2] - 5)^2 + (x[1] - 6)^2 - 82.81 ≤ 0) - @NLobjective(nlp, Min, (x[1] - 10)^3 + (x[2] - 20)^3) + @objective(nlp, Min, (x[1] - 10)^3 + (x[2] - 20)^3) return nlp end diff --git a/src/PureJuMP/hs2.jl b/src/PureJuMP/hs2.jl index be363060..75f60d3c 100644 --- a/src/PureJuMP/hs2.jl +++ b/src/PureJuMP/hs2.jl @@ -20,7 +20,7 @@ function hs2(args...; kwargs...) lvar = [-Inf, 1.5] @variable(nlp, x[i = 1:2] ≥ lvar[i], start = x0[i]) - @NLobjective(nlp, Min, 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2) + @objective(nlp, Min, 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2) return nlp end diff --git a/src/PureJuMP/hs20.jl b/src/PureJuMP/hs20.jl index d48d83e8..f5163d14 100644 --- a/src/PureJuMP/hs20.jl +++ b/src/PureJuMP/hs20.jl @@ -21,13 +21,13 @@ function hs20(args...; kwargs...) uvar = [0.5, Inf] @variable(nlp, lvar[i] ≤ x[i = 1:2] ≤ uvar[i], start = x0[i]) - @NLobjective(nlp, Min, 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2) + @objective(nlp, Min, 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2) - @NLconstraint(nlp, x[1] + x[2]^2 ≥ 0) + @constraint(nlp, x[1] + x[2]^2 ≥ 0) - @NLconstraint(nlp, x[1]^2 + x[2] ≥ 0) + @constraint(nlp, x[1]^2 + x[2] ≥ 0) - @NLconstraint(nlp, x[1]^2 + x[2]^2 ≥ 1) + @constraint(nlp, x[1]^2 + x[2]^2 ≥ 1) return nlp end diff --git a/src/PureJuMP/hs21.jl b/src/PureJuMP/hs21.jl index 2ca4b860..5c7f6bc0 100644 --- a/src/PureJuMP/hs21.jl +++ b/src/PureJuMP/hs21.jl @@ -22,7 +22,7 @@ function hs21(args...; kwargs...) @constraint(nlp, 10 * x[1] - x[2] - 10 ≥ 0) - @NLobjective(nlp, Min, 0.01 * x[1]^2 + x[2]^2 - 100) + @objective(nlp, Min, 0.01 * x[1]^2 + x[2]^2 - 100) return nlp end diff --git a/src/PureJuMP/hs219.jl b/src/PureJuMP/hs219.jl index edc9862b..66be3f09 100644 --- a/src/PureJuMP/hs219.jl +++ b/src/PureJuMP/hs219.jl @@ -18,9 +18,9 @@ function hs219(; n::Int = default_nvar, kwargs...) x0 = [10, 10, 10, 10] @variable(nlp, x[i = 1:4], start = x0[i]) - @NLconstraint(nlp, x[1]^2 - x[2] - x[4]^2 == 0) - @NLconstraint(nlp, x[2] - x[1]^3 - x[3]^2 == 0) + @constraint(nlp, x[1]^2 - x[2] - x[4]^2 == 0) + @constraint(nlp, x[2] - x[1]^3 - x[3]^2 == 0) - @NLobjective(nlp, Min, -x[1]) + @objective(nlp, Min, -x[1]) return nlp end diff --git a/src/PureJuMP/hs22.jl b/src/PureJuMP/hs22.jl index def4a350..82e607af 100644 --- a/src/PureJuMP/hs22.jl +++ b/src/PureJuMP/hs22.jl @@ -19,9 +19,9 @@ function hs22(args...; kwargs...) @variable(nlp, x[i = 1:2], start = 2) @constraint(nlp, x[1] + x[2] - 2 ≤ 0) - @NLconstraint(nlp, -x[1]^2 + x[2] ≥ 0) + @constraint(nlp, -x[1]^2 + x[2] ≥ 0) - @NLobjective(nlp, Min, 0.5 * (x[1] - 2)^2 + 0.5 * (x[2] - 1)^2) + @objective(nlp, Min, 0.5 * (x[1] - 2)^2 + 0.5 * (x[2] - 1)^2) return nlp end diff --git a/src/PureJuMP/hs220.jl b/src/PureJuMP/hs220.jl index 93d82345..f7fe7e82 100644 --- a/src/PureJuMP/hs220.jl +++ b/src/PureJuMP/hs220.jl @@ -20,8 +20,8 @@ function hs220(args...; kwargs...) lvar = [1, 0] @variable(nlp, x[i = 1:2] ≥ lvar[i], start = x0[i]) - @NLobjective(nlp, Min, x[1]) - @NLconstraint(nlp, (x[1] - 1)^3 - x[2] == 0) + @objective(nlp, Min, x[1]) + @constraint(nlp, (x[1] - 1)^3 - x[2] == 0) return nlp end diff --git a/src/PureJuMP/hs221.jl b/src/PureJuMP/hs221.jl index 7d82c222..7013200c 100644 --- a/src/PureJuMP/hs221.jl +++ b/src/PureJuMP/hs221.jl @@ -20,8 +20,8 @@ function hs221(args...; kwargs...) lvar = [0, 0] @variable(nlp, x[i = 1:2] ≥ lvar[i], start = x0[i]) - @NLobjective(nlp, Min, -x[1]) - @NLconstraint(nlp, (1 - x[1])^3 - x[2] >= 0) + @objective(nlp, Min, -x[1]) + @constraint(nlp, (1 - x[1])^3 - x[2] >= 0) return nlp end diff --git a/src/PureJuMP/hs222.jl b/src/PureJuMP/hs222.jl index e6dc927c..3f2337f7 100644 --- a/src/PureJuMP/hs222.jl +++ b/src/PureJuMP/hs222.jl @@ -20,8 +20,8 @@ function hs222(args...; kwargs...) lvar = [0, 0] @variable(nlp, x[i = 1:2] ≥ lvar[i], start = x0[i]) - @NLobjective(nlp, Min, -x[1]) - @NLconstraint(nlp, (1 - x[1])^3 - x[2] + 0.125 >= 0) + @objective(nlp, Min, -x[1]) + @constraint(nlp, (1 - x[1])^3 - x[2] + 0.125 >= 0) return nlp end diff --git a/src/PureJuMP/hs223.jl b/src/PureJuMP/hs223.jl index 94a64ecc..14b6b8ac 100644 --- a/src/PureJuMP/hs223.jl +++ b/src/PureJuMP/hs223.jl @@ -21,9 +21,9 @@ function hs223(args...; kwargs...) uvar = [10, 10] @variable(nlp, uvar[i] ≥ x[i = 1:2] ≥ lvar[i], start = x0[i]) - @NLobjective(nlp, Min, -x[1]) - @NLconstraint(nlp, exp(exp(x[1])) >= 0) - @NLconstraint(nlp, x[2] - exp(exp(x[1])) >= 0) + @objective(nlp, Min, -x[1]) + @constraint(nlp, exp(exp(x[1])) >= 0) + @constraint(nlp, x[2] - exp(exp(x[1])) >= 0) return nlp end diff --git a/src/PureJuMP/hs224.jl b/src/PureJuMP/hs224.jl index 96c7bc12..26ab094a 100644 --- a/src/PureJuMP/hs224.jl +++ b/src/PureJuMP/hs224.jl @@ -21,7 +21,7 @@ function hs224(args...; kwargs...) uvar = [6, 6] @variable(nlp, uvar[i] ≥ x[i = 1:2] ≥ lvar[i], start = x0[i]) - @NLobjective(nlp, Min, 2 * x[1]^2 + x[2]^2 - 48 * x[1] - 40 * x[2]) + @objective(nlp, Min, 2 * x[1]^2 + x[2]^2 - 48 * x[1] - 40 * x[2]) @constraint(nlp, x[1] + 3 * x[2] >= 0) @constraint(nlp, 18 - x[1] - 3 * x[2] >= 0) @constraint(nlp, x[1] + x[2] >= 0) diff --git a/src/PureJuMP/hs225.jl b/src/PureJuMP/hs225.jl index 54730f93..6334bd0b 100644 --- a/src/PureJuMP/hs225.jl +++ b/src/PureJuMP/hs225.jl @@ -20,12 +20,12 @@ function hs225(args...; kwargs...) @variable(nlp, x[i = 1:2], start = x0[i]) - @NLobjective(nlp, Min, x[1]^2 + x[2]^2) + @objective(nlp, Min, x[1]^2 + x[2]^2) @constraint(nlp, x[1] + x[2] - 1 >= 0) - @NLconstraint(nlp, x[1]^2 + x[2]^2 - 1 >= 0) - @NLconstraint(nlp, 9 * x[1]^2 + x[2]^2 - 9 >= 0) - @NLconstraint(nlp, x[1]^2 - x[2] >= 0) - @NLconstraint(nlp, x[2]^2 - x[1] >= 0) + @constraint(nlp, x[1]^2 + x[2]^2 - 1 >= 0) + @constraint(nlp, 9 * x[1]^2 + x[2]^2 - 9 >= 0) + @constraint(nlp, x[1]^2 - x[2] >= 0) + @constraint(nlp, x[2]^2 - x[1] >= 0) return nlp end diff --git a/src/PureJuMP/hs226.jl b/src/PureJuMP/hs226.jl index b43c1187..ea6386c2 100644 --- a/src/PureJuMP/hs226.jl +++ b/src/PureJuMP/hs226.jl @@ -20,9 +20,9 @@ function hs226(args...; kwargs...) lvar = [0, 0] @variable(nlp, x[i = 1:2] ≥ lvar[i], start = x0[i]) - @NLobjective(nlp, Min, -x[1] * x[2]) - @NLconstraint(nlp, x[1]^2 + x[2]^2 >= 0) - @NLconstraint(nlp, 1 - x[1]^2 - x[2]^2 >= 0) + @objective(nlp, Min, -x[1] * x[2]) + @constraint(nlp, x[1]^2 + x[2]^2 >= 0) + @constraint(nlp, 1 - x[1]^2 - x[2]^2 >= 0) return nlp end diff --git a/src/PureJuMP/hs227.jl b/src/PureJuMP/hs227.jl index 9aa8c1fb..b4db98e9 100644 --- a/src/PureJuMP/hs227.jl +++ b/src/PureJuMP/hs227.jl @@ -19,9 +19,9 @@ function hs227(args...; kwargs...) x0 = [0.5, 0.5] @variable(nlp, x[i = 1:2], start = x0[i]) - @NLobjective(nlp, Min, (x[1] - 2)^2 + (x[2] - 1)^2) - @NLconstraint(nlp, -x[1]^2 + x[2] >= 0) - @NLconstraint(nlp, x[1] - x[2]^2 >= 0) + @objective(nlp, Min, (x[1] - 2)^2 + (x[2] - 1)^2) + @constraint(nlp, -x[1]^2 + x[2] >= 0) + @constraint(nlp, x[1] - x[2]^2 >= 0) return nlp end diff --git a/src/PureJuMP/hs228.jl b/src/PureJuMP/hs228.jl index 3916a620..5da3e1f2 100644 --- a/src/PureJuMP/hs228.jl +++ b/src/PureJuMP/hs228.jl @@ -19,9 +19,9 @@ function hs228(args...; kwargs...) x0 = [0, 0] @variable(nlp, x[i = 1:2], start = x0[i]) - @NLobjective(nlp, Min, x[1]^2 + x[2]) + @objective(nlp, Min, x[1]^2 + x[2]) @constraint(nlp, -x[1] - x[2] + 1 >= 0) - @NLconstraint(nlp, -(x[1]^2 + x[2]^2) + 9 >= 0) + @constraint(nlp, -(x[1]^2 + x[2]^2) + 9 >= 0) return nlp end diff --git a/src/PureJuMP/hs229.jl b/src/PureJuMP/hs229.jl index 3c049229..f5a64a12 100644 --- a/src/PureJuMP/hs229.jl +++ b/src/PureJuMP/hs229.jl @@ -21,7 +21,7 @@ function hs229(args...; kwargs...) uvar = [2, 2] @variable(nlp, uvar[i] ≥ x[i = 1:2] ≥ lvar[i], start = x0[i]) - @NLobjective(nlp, Min, 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2) + @objective(nlp, Min, 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2) return nlp end diff --git a/src/PureJuMP/hs23.jl b/src/PureJuMP/hs23.jl index c15e5c23..6f731cb4 100644 --- a/src/PureJuMP/hs23.jl +++ b/src/PureJuMP/hs23.jl @@ -20,12 +20,12 @@ function hs23(args...; kwargs...) @variable(nlp, -50 ≤ x[i = 1:2] ≤ 50, start = x0[i]) @constraint(nlp, x[1] + x[2] - 1 ≥ 0) - @NLconstraint(nlp, x[1]^2 + x[2]^2 - 1 ≥ 0) - @NLconstraint(nlp, 9 * x[1]^2 + x[2]^2 - 9 ≥ 0) - @NLconstraint(nlp, x[1]^2 - x[2] ≥ 0) - @NLconstraint(nlp, x[2]^2 - x[1] ≥ 0) + @constraint(nlp, x[1]^2 + x[2]^2 - 1 ≥ 0) + @constraint(nlp, 9 * x[1]^2 + x[2]^2 - 9 ≥ 0) + @constraint(nlp, x[1]^2 - x[2] ≥ 0) + @constraint(nlp, x[2]^2 - x[1] ≥ 0) - @NLobjective(nlp, Min, 0.5 * x[1]^2 + 0.5 * x[2]^2) + @objective(nlp, Min, 0.5 * x[1]^2 + 0.5 * x[2]^2) return nlp end diff --git a/src/PureJuMP/hs230.jl b/src/PureJuMP/hs230.jl index 89e47d2f..3c4aa9e7 100644 --- a/src/PureJuMP/hs230.jl +++ b/src/PureJuMP/hs230.jl @@ -19,9 +19,9 @@ function hs230(args...; kwargs...) x0 = [0, 0] @variable(nlp, x[i = 1:2], start = x0[i]) - @NLobjective(nlp, Min, x[2]) - @NLconstraint(nlp, -2 * x[1]^2 + x[1]^3 + x[2] >= 0) - @NLconstraint(nlp, -2 * (1 - x[1])^2 + (1 - x[1])^3 + x[2] >= 0) + @objective(nlp, Min, x[2]) + @constraint(nlp, -2 * x[1]^2 + x[1]^3 + x[2] >= 0) + @constraint(nlp, -2 * (1 - x[1])^2 + (1 - x[1])^3 + x[2] >= 0) return nlp end diff --git a/src/PureJuMP/hs231.jl b/src/PureJuMP/hs231.jl index 7ed7f3f1..9c598ff8 100644 --- a/src/PureJuMP/hs231.jl +++ b/src/PureJuMP/hs231.jl @@ -19,7 +19,7 @@ function hs231(args...; kwargs...) x0 = [-1.2, 1] @variable(nlp, x[i = 1:2], start = x0[i]) - @NLobjective(nlp, Min, 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2) + @objective(nlp, Min, 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2) @constraint(nlp, 1 / 3 * x[1] + x[2] + 0.1 >= 0) @constraint(nlp, -1 / 3 * x[1] + x[2] + 0.1 >= 0) diff --git a/src/PureJuMP/hs232.jl b/src/PureJuMP/hs232.jl index f0f9b5a5..d7e73dfc 100644 --- a/src/PureJuMP/hs232.jl +++ b/src/PureJuMP/hs232.jl @@ -20,7 +20,7 @@ function hs232(args...; kwargs...) lvar = [0, 0] @variable(nlp, x[i = 1:2] ≥ lvar[i], start = x0[i]) - @NLobjective(nlp, Min, -(9 - (x[1] - 3)^2) * x[2]^3 / (27 * sqrt(3))) + @objective(nlp, Min, -(9 - (x[1] - 3)^2) * x[2]^3 / (27 * sqrt(3))) @constraint(nlp, x[1] / sqrt(3) - x[2] >= 0) @constraint(nlp, x[1] + sqrt(3) * x[2] >= 0) @constraint(nlp, 6 - x[1] - sqrt(3) * x[2] >= 0) diff --git a/src/PureJuMP/hs233.jl b/src/PureJuMP/hs233.jl index 1db8ef84..ccafae2b 100644 --- a/src/PureJuMP/hs233.jl +++ b/src/PureJuMP/hs233.jl @@ -19,7 +19,7 @@ function hs233(args...; kwargs...) x0 = [1.2, 1] @variable(nlp, x[i = 1:2], start = x0[i]) - @NLobjective(nlp, Min, 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2) - @NLconstraint(nlp, x[1]^2 + x[2]^2 - 0.25 >= 0) + @objective(nlp, Min, 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2) + @constraint(nlp, x[1]^2 + x[2]^2 - 0.25 >= 0) return nlp end diff --git a/src/PureJuMP/hs234.jl b/src/PureJuMP/hs234.jl index 8d30067e..3025d105 100644 --- a/src/PureJuMP/hs234.jl +++ b/src/PureJuMP/hs234.jl @@ -21,8 +21,8 @@ function hs234(args...; kwargs...) uvar = [2, 2] @variable(nlp, uvar[i] ≥ x[i = 1:2] ≥ lvar[i], start = x0[i]) - @NLobjective(nlp, Min, (x[2] - x[1])^4 - (1 - x[1])) - @NLconstraint(nlp, -x[1]^2 - x[2]^2 + 1 >= 0) + @objective(nlp, Min, (x[2] - x[1])^4 - (1 - x[1])) + @constraint(nlp, -x[1]^2 - x[2]^2 + 1 >= 0) return nlp end diff --git a/src/PureJuMP/hs235.jl b/src/PureJuMP/hs235.jl index 5105a3a5..f690113a 100644 --- a/src/PureJuMP/hs235.jl +++ b/src/PureJuMP/hs235.jl @@ -19,7 +19,7 @@ function hs235(args...; kwargs...) x0 = [-2, 3, 1] @variable(nlp, x[i = 1:3], start = x0[i]) - @NLobjective(nlp, Min, 0.01 * (x[1] - 1)^2 + (x[2] - x[1]^2)^2) - @NLconstraint(nlp, x[1] + x[3]^2 + 1 == 0) + @objective(nlp, Min, 0.01 * (x[1] - 1)^2 + (x[2] - x[1]^2)^2) + @constraint(nlp, x[1] + x[3]^2 + 1 == 0) return nlp end diff --git a/src/PureJuMP/hs236.jl b/src/PureJuMP/hs236.jl index 03bf0fc7..b1cc31da 100644 --- a/src/PureJuMP/hs236.jl +++ b/src/PureJuMP/hs236.jl @@ -44,7 +44,7 @@ function hs236(args...; kwargs...) -2.8673112392 ] - @NLobjective( + @objective( nlp, Min, B[1] + @@ -68,7 +68,7 @@ function hs236(args...; kwargs...) B[19] * x[1] * x[2]^3 + B[20] * exp(0.0005 * x[1] * x[2]) ) - @NLconstraint(nlp, x[1] * x[2] - 700 >= 0) - @NLconstraint(nlp, x[2] - 5 * (x[1] / 25)^2 >= 0) + @constraint(nlp, x[1] * x[2] - 700 >= 0) + @constraint(nlp, x[2] - 5 * (x[1] / 25)^2 >= 0) return nlp end diff --git a/src/PureJuMP/hs237.jl b/src/PureJuMP/hs237.jl index 25781422..d2ff6c54 100644 --- a/src/PureJuMP/hs237.jl +++ b/src/PureJuMP/hs237.jl @@ -44,7 +44,7 @@ function hs237(args...; kwargs...) -2.8673112392 ] - @NLobjective( + @objective( nlp, Min, B[1] + @@ -68,9 +68,9 @@ function hs237(args...; kwargs...) B[19] * x[1] * x[2]^3 + B[20] * exp(0.0005 * x[1] * x[2]) ) - @NLconstraint(nlp, x[1] * x[2] - 700 >= 0) - @NLconstraint(nlp, x[2] - 5 * (x[1] / 25)^2 >= 0) - @NLconstraint(nlp, (x[2] - 50)^2 - 5 * (x[1] - 55) >= 0) + @constraint(nlp, x[1] * x[2] - 700 >= 0) + @constraint(nlp, x[2] - 5 * (x[1] / 25)^2 >= 0) + @constraint(nlp, (x[2] - 50)^2 - 5 * (x[1] - 55) >= 0) return nlp end diff --git a/src/PureJuMP/hs238.jl b/src/PureJuMP/hs238.jl index a1db020e..764da081 100644 --- a/src/PureJuMP/hs238.jl +++ b/src/PureJuMP/hs238.jl @@ -43,7 +43,7 @@ function hs238(args...; kwargs...) -2.8673112392 ] - @NLobjective( + @objective( nlp, Min, B[1] + @@ -67,9 +67,9 @@ function hs238(args...; kwargs...) B[19] * x[1] * x[2]^3 + B[20] * exp(0.0005 * x[1] * x[2]) ) - @NLconstraint(nlp, x[1] * x[2] - 700 >= 0) - @NLconstraint(nlp, x[2] - 5 * (x[1] / 25)^2 >= 0) - @NLconstraint(nlp, (x[2] - 50)^2 - 5 * (x[1] - 55) >= 0) + @constraint(nlp, x[1] * x[2] - 700 >= 0) + @constraint(nlp, x[2] - 5 * (x[1] / 25)^2 >= 0) + @constraint(nlp, (x[2] - 50)^2 - 5 * (x[1] - 55) >= 0) return nlp end diff --git a/src/PureJuMP/hs239.jl b/src/PureJuMP/hs239.jl index d0723199..b5d3865e 100644 --- a/src/PureJuMP/hs239.jl +++ b/src/PureJuMP/hs239.jl @@ -44,7 +44,7 @@ function hs239(args...; kwargs...) -2.8673112392 ] - @NLobjective( + @objective( nlp, Min, B[1] + @@ -68,7 +68,7 @@ function hs239(args...; kwargs...) B[19] * x[1] * x[2]^3 + B[20] * exp(0.0005 * x[1] * x[2]) ) - @NLconstraint(nlp, x[1] * x[2] - 700 >= 0) + @constraint(nlp, x[1] * x[2] - 700 >= 0) return nlp end diff --git a/src/PureJuMP/hs24.jl b/src/PureJuMP/hs24.jl index a596e954..03236ec0 100644 --- a/src/PureJuMP/hs24.jl +++ b/src/PureJuMP/hs24.jl @@ -22,7 +22,7 @@ function hs24(args...; kwargs...) @constraint(nlp, x[1] / sqrt(3) - x[2] ≥ 0) @constraint(nlp, 0 ≤ x[1] + sqrt(3) * x[2] ≤ 6) - @NLobjective(nlp, Min, 1 / (27 * sqrt(3)) * ((x[1] - 3)^2 - 9) * x[2]^3) + @objective(nlp, Min, 1 / (27 * sqrt(3)) * ((x[1] - 3)^2 - 9) * x[2]^3) return nlp end diff --git a/src/PureJuMP/hs240.jl b/src/PureJuMP/hs240.jl index 75e2c89c..e0ae7914 100644 --- a/src/PureJuMP/hs240.jl +++ b/src/PureJuMP/hs240.jl @@ -19,7 +19,7 @@ function hs240(args...; kwargs...) x0 = [100, -1, 2.5] @variable(nlp, x[i = 1:3], start = x0[i]) - @NLobjective(nlp, Min, (x[1] - x[2] + x[3])^2 + (-x[1] + x[2] + x[3])^2 + (x[1] + x[2] - x[3])^2) + @objective(nlp, Min, (x[1] - x[2] + x[3])^2 + (-x[1] + x[2] + x[3])^2 + (x[1] + x[2] - x[3])^2) return nlp end diff --git a/src/PureJuMP/hs241.jl b/src/PureJuMP/hs241.jl index 11ccc51b..4c8c2428 100644 --- a/src/PureJuMP/hs241.jl +++ b/src/PureJuMP/hs241.jl @@ -19,7 +19,7 @@ function hs241(args...; kwargs...) x0 = [1, 2, 0] @variable(nlp, x[i = 1:3], start = x0[i]) - @NLobjective( + @objective( nlp, Min, (x[1]^2 + x[2]^2 + x[3]^2 - 1)^2 + diff --git a/src/PureJuMP/hs242.jl b/src/PureJuMP/hs242.jl index 59979fb3..c5506a7b 100644 --- a/src/PureJuMP/hs242.jl +++ b/src/PureJuMP/hs242.jl @@ -22,7 +22,7 @@ function hs242(args...; kwargs...) @variable(nlp, uvar[i] ≥ x[i = 1:3] ≥ lvar[i], start = x0[i]) t = [(10 + i) / 100 for i = 1:10] - @NLobjective( + @objective( nlp, Min, sum( diff --git a/src/PureJuMP/hs243.jl b/src/PureJuMP/hs243.jl index a21a4a29..f8a0f735 100644 --- a/src/PureJuMP/hs243.jl +++ b/src/PureJuMP/hs243.jl @@ -33,7 +33,7 @@ function hs243(args...; kwargs...) -0.869487 0.586387 0.289826 ] - @NLobjective( + @objective( nlp, Min, sum( diff --git a/src/PureJuMP/hs244.jl b/src/PureJuMP/hs244.jl index a0b4c835..4a680d4d 100644 --- a/src/PureJuMP/hs244.jl +++ b/src/PureJuMP/hs244.jl @@ -19,7 +19,7 @@ function hs244(args...; kwargs...) x0 = [1, 2, 1] @variable(nlp, x[i = 1:3], start = x0[i]) - @NLobjective( + @objective( nlp, Min, sum( diff --git a/src/PureJuMP/hs245.jl b/src/PureJuMP/hs245.jl index b25f62fe..607c5cd5 100644 --- a/src/PureJuMP/hs245.jl +++ b/src/PureJuMP/hs245.jl @@ -19,7 +19,7 @@ function hs245(args...; kwargs...) x0 = [0, 10, 20] @variable(nlp, x[i = 1:3], start = x0[i]) - @NLobjective( + @objective( nlp, Min, sum( diff --git a/src/PureJuMP/hs246.jl b/src/PureJuMP/hs246.jl index 5dd2de44..39c61a69 100644 --- a/src/PureJuMP/hs246.jl +++ b/src/PureJuMP/hs246.jl @@ -19,7 +19,7 @@ function hs246(args...; kwargs...) x0 = [-1.2; 2; 0] @variable(nlp, x[i = 1:3], start = x0[i]) - @NLobjective(nlp, Min, 100 * (x[3] - ((x[1] + x[2]) / 2)^2)^2 + (1 - x[1])^2 + (1 - x[2])^2) + @objective(nlp, Min, 100 * (x[3] - ((x[1] + x[2]) / 2)^2)^2 + (1 - x[1])^2 + (1 - x[2])^2) return nlp end diff --git a/src/PureJuMP/hs248.jl b/src/PureJuMP/hs248.jl index 7f54f48b..ccb0008f 100644 --- a/src/PureJuMP/hs248.jl +++ b/src/PureJuMP/hs248.jl @@ -19,9 +19,9 @@ function hs248(args...; kwargs...) x0 = [-0.1, -1, 0.1] @variable(nlp, x[i = 1:3], start = x0[i]) - @NLobjective(nlp, Min, -x[2]) + @objective(nlp, Min, -x[2]) @constraint(nlp, 1 - 2 * x[2] + x[1] >= 0) - @NLconstraint(nlp, x[1]^2 + x[2]^2 + x[3]^2 - 1 == 0) + @constraint(nlp, x[1]^2 + x[2]^2 + x[3]^2 - 1 == 0) return nlp end diff --git a/src/PureJuMP/hs249.jl b/src/PureJuMP/hs249.jl index 226ab509..2ef1c84e 100644 --- a/src/PureJuMP/hs249.jl +++ b/src/PureJuMP/hs249.jl @@ -20,8 +20,8 @@ function hs249(args...; kwargs...) lvar = [1, -Inf, -Inf] @variable(nlp, x[i = 1:3] ≥ lvar[i], start = x0[i]) - @NLobjective(nlp, Min, x[1]^2 + x[2]^2 + x[3]^2) - @NLconstraint(nlp, x[1]^2 + x[2]^2 - 1 >= 0) + @objective(nlp, Min, x[1]^2 + x[2]^2 + x[3]^2) + @constraint(nlp, x[1]^2 + x[2]^2 - 1 >= 0) return nlp end diff --git a/src/PureJuMP/hs25.jl b/src/PureJuMP/hs25.jl index 59ae0d07..483b9876 100644 --- a/src/PureJuMP/hs25.jl +++ b/src/PureJuMP/hs25.jl @@ -21,10 +21,10 @@ function hs25(args...; kwargs...) uvar = [100, 25.6, 5] @variable(nlp, lvar[i] ≤ x[i = 1:3] ≤ uvar[i], start = x0[i]) - @NLexpression(nlp, u[i = 1:99], 25 + (-50 * log(0.01 * i))^(2 / 3)) - @NLexpression(nlp, f[i = 1:99], -0.01 * i + exp(-1 / x[1] * (u[i] - x[2])^x[3])) + @expression(nlp, u[i = 1:99], 25 + (-50 * log(0.01 * i))^(2 / 3)) + @expression(nlp, f[i = 1:99], -0.01 * i + exp(-1 / x[1] * (u[i] - x[2])^x[3])) - @NLobjective(nlp, Min, sum(f[i]^2 for i = 1:99)) + @objective(nlp, Min, sum(f[i]^2 for i = 1:99)) return nlp end diff --git a/src/PureJuMP/hs250.jl b/src/PureJuMP/hs250.jl index 1f0ba5df..32f8820d 100644 --- a/src/PureJuMP/hs250.jl +++ b/src/PureJuMP/hs250.jl @@ -21,7 +21,7 @@ function hs250(args...; kwargs...) uvar = [20, 11, 42] @variable(nlp, uvar[i] ≥ x[i = 1:3] ≥ lvar[i], start = x0[i]) - @NLobjective(nlp, Min, -x[1] * x[2] * x[3]) + @objective(nlp, Min, -x[1] * x[2] * x[3]) @constraint(nlp, x[1] + 2 * x[2] + 2 * x[3] >= 0) @constraint(nlp, 72 - x[1] - 2 * x[2] - 2 * x[3] >= 0) diff --git a/src/PureJuMP/hs251.jl b/src/PureJuMP/hs251.jl index 8e940b91..d3150335 100644 --- a/src/PureJuMP/hs251.jl +++ b/src/PureJuMP/hs251.jl @@ -21,7 +21,7 @@ function hs251(args...; kwargs...) uvar = [42, 42, 42] @variable(nlp, uvar[i] ≥ x[i = 1:3] ≥ lvar[i], start = x0[i]) - @NLobjective(nlp, Min, -x[1] * x[2] * x[3]) + @objective(nlp, Min, -x[1] * x[2] * x[3]) @constraint(nlp, 72 - x[1] - 2 * x[2] - 2 * x[3] >= 0) return nlp diff --git a/src/PureJuMP/hs252.jl b/src/PureJuMP/hs252.jl index 88783461..ccc9c3d4 100644 --- a/src/PureJuMP/hs252.jl +++ b/src/PureJuMP/hs252.jl @@ -19,7 +19,7 @@ function hs252(args...; kwargs...) x0 = [-1, 2, 2] @variable(nlp, x[i = 1:3], start = x0[i]) - @NLobjective(nlp, Min, 0.01 * (x[1] - 1)^2 + (x[2] - x[1]^2)^2) - @NLconstraint(nlp, x[1] + x[3]^2 + 1 == 0) + @objective(nlp, Min, 0.01 * (x[1] - 1)^2 + (x[2] - x[1]^2)^2) + @constraint(nlp, x[1] + x[3]^2 + 1 == 0) return nlp end diff --git a/src/PureJuMP/hs253.jl b/src/PureJuMP/hs253.jl index 26c11b3d..f350ca6f 100644 --- a/src/PureJuMP/hs253.jl +++ b/src/PureJuMP/hs253.jl @@ -25,7 +25,7 @@ function hs253(args...; kwargs...) a2 = [0; 0; 10; 10; 0; 0; 10; 10] a3 = [0; 0; 0; 0; 10; 10; 10; 10] - @NLobjective( + @objective( nlp, Min, sum(c[j] * sqrt((a1[j] - x[1]^2 + (a2[j] - x[2])^2 + (a3[j] - x[3])^2)) for j = 1:8) diff --git a/src/PureJuMP/hs254.jl b/src/PureJuMP/hs254.jl index f2034814..35010432 100644 --- a/src/PureJuMP/hs254.jl +++ b/src/PureJuMP/hs254.jl @@ -20,9 +20,9 @@ function hs254(args...; kwargs...) lvar = [-Inf, -Inf, 1] @variable(nlp, x[i = 1:3] ≥ lvar[i], start = x0[i]) - @NLobjective(nlp, Min, log(x[3]) - x[2]) - @NLconstraint(nlp, x[2]^2 + x[3]^2 - 4 == 0) - @NLconstraint(nlp, x[3] - 1 - x[2]^2 == 0) + @objective(nlp, Min, log(x[3]) - x[2]) + @constraint(nlp, x[2]^2 + x[3]^2 - 4 == 0) + @constraint(nlp, x[3] - 1 - x[2]^2 == 0) return nlp end diff --git a/src/PureJuMP/hs255.jl b/src/PureJuMP/hs255.jl index ec15af63..24b2763d 100644 --- a/src/PureJuMP/hs255.jl +++ b/src/PureJuMP/hs255.jl @@ -19,7 +19,7 @@ function hs255(args...; kwargs...) x0 = [-3, 1, -3, 1] @variable(nlp, x[i = 1:4], start = x0[i]) - @NLobjective( + @objective( nlp, Min, 100 * (x[2] - x[1]^2) + diff --git a/src/PureJuMP/hs256.jl b/src/PureJuMP/hs256.jl index 2c67dbbd..c54f7be0 100644 --- a/src/PureJuMP/hs256.jl +++ b/src/PureJuMP/hs256.jl @@ -19,7 +19,7 @@ function hs256(args...; kwargs...) x0 = [3, -1, 0, 1] @variable(nlp, x[i = 1:4], start = x0[i]) - @NLobjective( + @objective( nlp, Min, (x[1] + 10 * x[2])^2 + 5 * (x[3] - x[4])^2 + (x[2] - 2 * x[3])^4 + 10 * (x[1] - x[4])^4 diff --git a/src/PureJuMP/hs257.jl b/src/PureJuMP/hs257.jl index a118b639..2684372c 100644 --- a/src/PureJuMP/hs257.jl +++ b/src/PureJuMP/hs257.jl @@ -20,7 +20,7 @@ function hs257(args...; kwargs...) lvar = [0, -Inf, 0, -Inf] @variable(nlp, x[i = 1:4] ≥ lvar[i], start = x0[i]) - @NLobjective( + @objective( nlp, Min, 100 * (x[2] - x[1]^2)^2 + diff --git a/src/PureJuMP/hs258.jl b/src/PureJuMP/hs258.jl index d642d104..82fbf88e 100644 --- a/src/PureJuMP/hs258.jl +++ b/src/PureJuMP/hs258.jl @@ -20,7 +20,7 @@ function hs258(args...; kwargs...) @variable(nlp, x[i = 1:4], start = x0[i]) - @NLobjective( + @objective( nlp, Min, 100 * (x[2] - x[1]^2)^2 + diff --git a/src/PureJuMP/hs259.jl b/src/PureJuMP/hs259.jl index 5dd8a9a4..d145da3f 100644 --- a/src/PureJuMP/hs259.jl +++ b/src/PureJuMP/hs259.jl @@ -20,7 +20,7 @@ function hs259(args...; kwargs...) @variable(nlp, x[i = 1:4], start = x0[i]) - @NLobjective( + @objective( nlp, Min, 100 * (x[2] - x[1]^2)^2 + diff --git a/src/PureJuMP/hs26.jl b/src/PureJuMP/hs26.jl index 135c9fcf..3cadbb82 100644 --- a/src/PureJuMP/hs26.jl +++ b/src/PureJuMP/hs26.jl @@ -19,9 +19,9 @@ function hs26(args...; kwargs...) x0 = [-2.6, 2.0, 2.0] @variable(nlp, x[i = 1:3], start = x0[i]) - @NLobjective(nlp, Min, (x[1] - x[2])^2 + (x[2] - x[3])^4) + @objective(nlp, Min, (x[1] - x[2])^2 + (x[2] - x[3])^4) - @NLconstraint(nlp, constr1, (1 + x[2]^2) * x[1] + x[3]^4 - 3 == 0) + @constraint(nlp, constr1, (1 + x[2]^2) * x[1] + x[3]^4 - 3 == 0) return nlp end diff --git a/src/PureJuMP/hs260.jl b/src/PureJuMP/hs260.jl index 3dfa576f..f3ccc7a5 100644 --- a/src/PureJuMP/hs260.jl +++ b/src/PureJuMP/hs260.jl @@ -19,7 +19,7 @@ function hs260(args...; kwargs...) x0 = [-3, -1, -3, -1] @variable(nlp, x[i = 1:4], start = x0[i]) - @NLobjective( + @objective( nlp, Min, 100 * (x[2] - x[1]^2)^2 + diff --git a/src/PureJuMP/hs261.jl b/src/PureJuMP/hs261.jl index 7d00c433..ec6a5f16 100644 --- a/src/PureJuMP/hs261.jl +++ b/src/PureJuMP/hs261.jl @@ -19,7 +19,7 @@ function hs261(args...; kwargs...) x0 = [0, 0, 0, 0] @variable(nlp, x[i = 1:4], start = x0[i]) - @NLobjective( + @objective( nlp, Min, (exp(x[1]) - x[2])^4 + 100 * (x[2] - x[3])^6 + tan(x[3] - x[4])^4 + x[1]^8 + (x[4] - 1)^2 diff --git a/src/PureJuMP/hs262.jl b/src/PureJuMP/hs262.jl index ec9e850d..2a72a2ed 100644 --- a/src/PureJuMP/hs262.jl +++ b/src/PureJuMP/hs262.jl @@ -20,7 +20,7 @@ function hs262(args...; kwargs...) lvar = [0, 0, 0, 0] @variable(nlp, x[i = 1:4] ≥ lvar[i], start = x0[i]) - @NLobjective(nlp, Min, -0.5 * x[1] - x[2] - 0.5 * x[3] - x[4]) + @objective(nlp, Min, -0.5 * x[1] - x[2] - 0.5 * x[3] - x[4]) @constraint(nlp, x[1] + x[2] + x[3] - 2 * x[4] - 6 == 0) @constraint(nlp, 10 - x[1] - x[2] - x[3] - x[4] >= 0) diff --git a/src/PureJuMP/hs263.jl b/src/PureJuMP/hs263.jl index 3a7ad9b5..954a5971 100644 --- a/src/PureJuMP/hs263.jl +++ b/src/PureJuMP/hs263.jl @@ -19,11 +19,11 @@ function hs263(args...; kwargs...) x0 = [10, 10, 10, 10] @variable(nlp, x[i = 1:4], start = x0[i]) - @NLobjective(nlp, Min, -x[1]) - @NLconstraint(nlp, x[2] - x[1]^3 >= 0) - @NLconstraint(nlp, x[1]^2 - x[2] >= 0) - @NLconstraint(nlp, x[2] - x[1]^3 - x[3]^2 == 0) - @NLconstraint(nlp, x[1]^2 - x[2] - x[4]^2 == 0) + @objective(nlp, Min, -x[1]) + @constraint(nlp, x[2] - x[1]^3 >= 0) + @constraint(nlp, x[1]^2 - x[2] >= 0) + @constraint(nlp, x[2] - x[1]^3 - x[3]^2 == 0) + @constraint(nlp, x[1]^2 - x[2] - x[4]^2 == 0) return nlp end diff --git a/src/PureJuMP/hs264.jl b/src/PureJuMP/hs264.jl index 97dad0cb..e66c90d7 100644 --- a/src/PureJuMP/hs264.jl +++ b/src/PureJuMP/hs264.jl @@ -20,13 +20,13 @@ function hs264(args...; kwargs...) @variable(nlp, x[i = 1:4], start = x0[i]) - @NLobjective( + @objective( nlp, Min, x[1]^2 + x[2]^2 + 2 * x[3]^2 + x[4]^2 - 5 * x[1] - 5 * x[2] - 21 * x[3] + 7 * x[4] ) - @NLconstraint(nlp, -x[1]^2 - x[2]^2 - x[3]^2 - x[4]^2 - x[1] + x[2] + x[3] + x[4] + 8 >= 0) - @NLconstraint(nlp, -x[1]^2 - 2 * x[2]^2 - x[3]^2 - 2 * x[4]^2 + x[1] + x[4] + 9 >= 0) - @NLconstraint(nlp, -2 * x[1]^2 - x[2]^2 - x[3]^2 - 2 * x[1] + x[2] + x[4] + 5 >= 0) + @constraint(nlp, -x[1]^2 - x[2]^2 - x[3]^2 - x[4]^2 - x[1] + x[2] + x[3] + x[4] + 8 >= 0) + @constraint(nlp, -x[1]^2 - 2 * x[2]^2 - x[3]^2 - 2 * x[4]^2 + x[1] + x[4] + 9 >= 0) + @constraint(nlp, -2 * x[1]^2 - x[2]^2 - x[3]^2 - 2 * x[1] + x[2] + x[4] + 5 >= 0) return nlp end diff --git a/src/PureJuMP/hs265.jl b/src/PureJuMP/hs265.jl index e72083f5..6970e76a 100644 --- a/src/PureJuMP/hs265.jl +++ b/src/PureJuMP/hs265.jl @@ -21,7 +21,7 @@ function hs265(args...; kwargs...) uvar = [1, 1, 1, 1] @variable(nlp, uvar[i] ≥ x[i = 1:4] ≥ lvar[i], start = x0[i]) - @NLobjective(nlp, Min, sum(1 - exp(-10 * x[i] * exp(-x[i + 2])) for i = 1:2)) + @objective(nlp, Min, sum(1 - exp(-10 * x[i] * exp(-x[i + 2])) for i = 1:2)) @constraint(nlp, x[1] + x[2] - 1 == 0) @constraint(nlp, x[3] + x[4] - 1 == 0) diff --git a/src/PureJuMP/hs27.jl b/src/PureJuMP/hs27.jl index ed443d23..231d70d9 100644 --- a/src/PureJuMP/hs27.jl +++ b/src/PureJuMP/hs27.jl @@ -18,9 +18,9 @@ function hs27(args...; kwargs...) nlp = Model() @variable(nlp, x[i = 1:3], start = 2) - @NLobjective(nlp, Min, 0.01 * (x[1] - 1)^2 + (x[2] - x[1]^2)^2) + @objective(nlp, Min, 0.01 * (x[1] - 1)^2 + (x[2] - x[1]^2)^2) - @NLconstraint(nlp, constr1, x[1] + x[3]^2 + 1 == 0) + @constraint(nlp, constr1, x[1] + x[3]^2 + 1 == 0) return nlp end diff --git a/src/PureJuMP/hs28.jl b/src/PureJuMP/hs28.jl index 31e78043..9aa494a7 100644 --- a/src/PureJuMP/hs28.jl +++ b/src/PureJuMP/hs28.jl @@ -21,7 +21,7 @@ function hs28(args...; kwargs...) @constraint(nlp, x[1] + 2 * x[2] + 3 * x[3] - 1 == 0) - @NLobjective(nlp, Min, 0.5 * (x[1] + x[2])^2 + 0.5 * (x[2] + x[3])^2) + @objective(nlp, Min, 0.5 * (x[1] + x[2])^2 + 0.5 * (x[2] + x[3])^2) return nlp end diff --git a/src/PureJuMP/hs29.jl b/src/PureJuMP/hs29.jl index 6acbdc57..cf43fadb 100644 --- a/src/PureJuMP/hs29.jl +++ b/src/PureJuMP/hs29.jl @@ -20,9 +20,9 @@ function hs29(args...; kwargs...) uvar = [Inf, Inf, Inf] @variable(nlp, lvar[i] ≤ x[i = 1:3] ≤ uvar[i], start = 1) - @NLconstraint(nlp, x[1]^2 + 2 * x[2]^2 + 4 * x[3]^2 - 48 ≤ 0) + @constraint(nlp, x[1]^2 + 2 * x[2]^2 + 4 * x[3]^2 - 48 ≤ 0) - @NLobjective(nlp, Min, -x[1] * x[2] * x[3]) + @objective(nlp, Min, -x[1] * x[2] * x[3]) return nlp end diff --git a/src/PureJuMP/hs3.jl b/src/PureJuMP/hs3.jl index b483572d..72f44e7e 100644 --- a/src/PureJuMP/hs3.jl +++ b/src/PureJuMP/hs3.jl @@ -20,7 +20,7 @@ function hs3(args...; kwargs...) lvar = [-Inf, 0] @variable(nlp, x[i = 1:2] ≥ lvar[i], start = x0[i]) - @NLobjective(nlp, Min, x[2] + 0.00001 * (x[2] - x[1])^2) + @objective(nlp, Min, x[2] + 0.00001 * (x[2] - x[1])^2) return nlp end diff --git a/src/PureJuMP/hs30.jl b/src/PureJuMP/hs30.jl index 47b3e78d..56b13aa6 100644 --- a/src/PureJuMP/hs30.jl +++ b/src/PureJuMP/hs30.jl @@ -20,9 +20,9 @@ function hs30(args...; kwargs...) uvar = [10, 10, 10] @variable(nlp, lvar[i] ≤ x[i = 1:3] ≤ uvar[i], start = 1) - @NLconstraint(nlp, x[1]^2 + x[2]^2 - 1 ≥ 0) + @constraint(nlp, x[1]^2 + x[2]^2 - 1 ≥ 0) - @NLobjective(nlp, Min, 0.5 * sum(x[i]^2 for i = 1:3)) + @objective(nlp, Min, 0.5 * sum(x[i]^2 for i = 1:3)) return nlp end diff --git a/src/PureJuMP/hs31.jl b/src/PureJuMP/hs31.jl index 5bcac2dd..eae38245 100644 --- a/src/PureJuMP/hs31.jl +++ b/src/PureJuMP/hs31.jl @@ -20,9 +20,9 @@ function hs31(args...; kwargs...) uvar = [10, 10, 1] @variable(nlp, lvar[i] ≤ x[i = 1:3] ≤ uvar[i], start = 1) - @NLconstraint(nlp, x[1] * x[2] - 1 ≥ 0) + @constraint(nlp, x[1] * x[2] - 1 ≥ 0) - @NLobjective(nlp, Min, 9 * x[1]^2 + x[2]^2 + 9 * x[3]^2) + @objective(nlp, Min, 9 * x[1]^2 + x[2]^2 + 9 * x[3]^2) return nlp end diff --git a/src/PureJuMP/hs316.jl b/src/PureJuMP/hs316.jl index 7cd1e827..9ba7aea8 100644 --- a/src/PureJuMP/hs316.jl +++ b/src/PureJuMP/hs316.jl @@ -17,8 +17,8 @@ function hs316(args...; kwargs...) nlp = Model() @variable(nlp, x[i = 1:2], start = 0) - @NLconstraint(nlp, x[1]^2 / 100 + x[2]^2 / 100 - 1 == 0) - @NLobjective(nlp, Min, (x[1] - 20)^2 + (x[2] + 20)^2) + @constraint(nlp, x[1]^2 / 100 + x[2]^2 / 100 - 1 == 0) + @objective(nlp, Min, (x[1] - 20)^2 + (x[2] + 20)^2) return nlp end diff --git a/src/PureJuMP/hs317.jl b/src/PureJuMP/hs317.jl index 3d924b67..bfedc26f 100644 --- a/src/PureJuMP/hs317.jl +++ b/src/PureJuMP/hs317.jl @@ -17,8 +17,8 @@ function hs317(args...; kwargs...) nlp = Model() @variable(nlp, x[i = 1:2], start = 0) - @NLconstraint(nlp, x[1]^2 / 100 + x[2]^2 / 64 - 1 == 0) - @NLobjective(nlp, Min, (x[1] - 20)^2 + (x[2] + 20)^2) + @constraint(nlp, x[1]^2 / 100 + x[2]^2 / 64 - 1 == 0) + @objective(nlp, Min, (x[1] - 20)^2 + (x[2] + 20)^2) return nlp end diff --git a/src/PureJuMP/hs318.jl b/src/PureJuMP/hs318.jl index 748c3a32..0c6dc012 100644 --- a/src/PureJuMP/hs318.jl +++ b/src/PureJuMP/hs318.jl @@ -17,8 +17,8 @@ function hs318(args...; kwargs...) nlp = Model() @variable(nlp, x[i = 1:2], start = 0) - @NLconstraint(nlp, x[1]^2 / 100 + x[2]^2 / 36 - 1 == 0) - @NLobjective(nlp, Min, (x[1] - 20)^2 + (x[2] + 20)^2) + @constraint(nlp, x[1]^2 / 100 + x[2]^2 / 36 - 1 == 0) + @objective(nlp, Min, (x[1] - 20)^2 + (x[2] + 20)^2) return nlp end diff --git a/src/PureJuMP/hs319.jl b/src/PureJuMP/hs319.jl index 1c890c56..a0f79bbe 100644 --- a/src/PureJuMP/hs319.jl +++ b/src/PureJuMP/hs319.jl @@ -17,8 +17,8 @@ function hs319(args...; kwargs...) nlp = Model() @variable(nlp, x[i = 1:2], start = 0) - @NLconstraint(nlp, x[1]^2 / 100 + x[2]^2 / 16 - 1 == 0) - @NLobjective(nlp, Min, (x[1] - 20)^2 + (x[2] + 20)^2) + @constraint(nlp, x[1]^2 / 100 + x[2]^2 / 16 - 1 == 0) + @objective(nlp, Min, (x[1] - 20)^2 + (x[2] + 20)^2) return nlp end diff --git a/src/PureJuMP/hs32.jl b/src/PureJuMP/hs32.jl index 54d31a25..9a79a5f7 100644 --- a/src/PureJuMP/hs32.jl +++ b/src/PureJuMP/hs32.jl @@ -19,10 +19,10 @@ function hs32(args...; kwargs...) x0 = [0.1, 0.7, 0.2] @variable(nlp, x[i = 1:3] ≥ 0, start = x0[i]) - @NLconstraint(nlp, 6 * x[2] + 4 * x[3] - x[1]^3 - 3 ≥ 0) + @constraint(nlp, 6 * x[2] + 4 * x[3] - x[1]^3 - 3 ≥ 0) @constraint(nlp, -1 + x[1] + x[2] + x[3] == 0) - @NLobjective(nlp, Min, (x[1] + 3 * x[2] + x[3])^2 + 4 * (x[1] - x[2])^2) + @objective(nlp, Min, (x[1] + 3 * x[2] + x[3])^2 + 4 * (x[1] - x[2])^2) return nlp end diff --git a/src/PureJuMP/hs320.jl b/src/PureJuMP/hs320.jl index fbe65e5e..1acea65d 100644 --- a/src/PureJuMP/hs320.jl +++ b/src/PureJuMP/hs320.jl @@ -17,8 +17,8 @@ function hs320(args...; kwargs...) nlp = Model() @variable(nlp, x[i = 1:2], start = 0) - @NLconstraint(nlp, x[1]^2 / 100 + x[2]^2 / 4 - 1 == 0) - @NLobjective(nlp, Min, (x[1] - 20)^2 + (x[2] + 20)^2) + @constraint(nlp, x[1]^2 / 100 + x[2]^2 / 4 - 1 == 0) + @objective(nlp, Min, (x[1] - 20)^2 + (x[2] + 20)^2) return nlp end diff --git a/src/PureJuMP/hs321.jl b/src/PureJuMP/hs321.jl index d790aa1a..1404c1dc 100644 --- a/src/PureJuMP/hs321.jl +++ b/src/PureJuMP/hs321.jl @@ -17,8 +17,8 @@ function hs321(args...; kwargs...) nlp = Model() @variable(nlp, x[i = 1:2], start = 0) - @NLconstraint(nlp, x[1]^2 / 100 + x[2]^2 - 1 == 0) - @NLobjective(nlp, Min, (x[1] - 20)^2 + (x[2] + 20)^2) + @constraint(nlp, x[1]^2 / 100 + x[2]^2 - 1 == 0) + @objective(nlp, Min, (x[1] - 20)^2 + (x[2] + 20)^2) return nlp end diff --git a/src/PureJuMP/hs322.jl b/src/PureJuMP/hs322.jl index 5b6ff57b..07762e53 100644 --- a/src/PureJuMP/hs322.jl +++ b/src/PureJuMP/hs322.jl @@ -17,8 +17,8 @@ function hs322(args...; kwargs...) nlp = Model() @variable(nlp, x[i = 1:2], start = 0) - @NLconstraint(nlp, x[1]^2 / 100 + x[2]^2 * 100 - 1 == 0) - @NLobjective(nlp, Min, (x[1] - 20)^2 + (x[2] + 20)^2) + @constraint(nlp, x[1]^2 / 100 + x[2]^2 * 100 - 1 == 0) + @objective(nlp, Min, (x[1] - 20)^2 + (x[2] + 20)^2) return nlp end diff --git a/src/PureJuMP/hs33.jl b/src/PureJuMP/hs33.jl index 91f3570a..60ec4f09 100644 --- a/src/PureJuMP/hs33.jl +++ b/src/PureJuMP/hs33.jl @@ -20,10 +20,10 @@ function hs33(args...; kwargs...) uvar = [Inf, Inf, 5] @variable(nlp, 0 ≤ x[i = 1:3] ≤ uvar[i], start = x0[i]) - @NLconstraint(nlp, -x[3]^2 + x[2]^2 + x[1]^2 ≤ 0) - @NLconstraint(nlp, x[1]^2 + x[2]^2 + x[3]^2 - 4 ≥ 0) + @constraint(nlp, -x[3]^2 + x[2]^2 + x[1]^2 ≤ 0) + @constraint(nlp, x[1]^2 + x[2]^2 + x[3]^2 - 4 ≥ 0) - @NLobjective(nlp, Min, (x[1] - 1) * (x[1] - 2) * (x[1] - 3) + x[3]) + @objective(nlp, Min, (x[1] - 1) * (x[1] - 2) * (x[1] - 3) + x[3]) return nlp end diff --git a/src/PureJuMP/hs34.jl b/src/PureJuMP/hs34.jl index 7b5a1cec..b95ea91b 100644 --- a/src/PureJuMP/hs34.jl +++ b/src/PureJuMP/hs34.jl @@ -20,8 +20,8 @@ function hs34(args...; kwargs...) uvar = [100, 100, 10] @variable(nlp, 0 ≤ x[i = 1:3] ≤ uvar[i], start = x0[i]) - @NLconstraint(nlp, x[2] - exp(x[1]) ≥ 0) - @NLconstraint(nlp, x[3] - exp(x[2]) ≥ 0) + @constraint(nlp, x[2] - exp(x[1]) ≥ 0) + @constraint(nlp, x[3] - exp(x[2]) ≥ 0) @objective(nlp, Min, -x[1]) diff --git a/src/PureJuMP/hs35.jl b/src/PureJuMP/hs35.jl index a8bdd5d3..fa362f84 100644 --- a/src/PureJuMP/hs35.jl +++ b/src/PureJuMP/hs35.jl @@ -20,7 +20,7 @@ function hs35(args...; kwargs...) @constraint(nlp, -3 + x[1] + x[2] + 2 * x[3] ≤ 0) - @NLobjective( + @objective( nlp, Min, 9 - 8 * x[1] - 6 * x[2] - 4 * x[3] + diff --git a/src/PureJuMP/hs36.jl b/src/PureJuMP/hs36.jl index 2089611e..88f6d52f 100644 --- a/src/PureJuMP/hs36.jl +++ b/src/PureJuMP/hs36.jl @@ -21,7 +21,7 @@ function hs36(args...; kwargs...) @constraint(nlp, -72 + x[1] + 2 * x[2] + 2 * x[3] ≤ 0) - @NLobjective(nlp, Min, -x[1] * x[2] * x[3]) + @objective(nlp, Min, -x[1] * x[2] * x[3]) return nlp end diff --git a/src/PureJuMP/hs37.jl b/src/PureJuMP/hs37.jl index a85de6ef..66048207 100644 --- a/src/PureJuMP/hs37.jl +++ b/src/PureJuMP/hs37.jl @@ -20,7 +20,7 @@ function hs37(args...; kwargs...) @constraint(nlp, 0 ≤ x[1] + 2 * x[2] + 2 * x[3] ≤ 72) - @NLobjective(nlp, Min, -x[1] * x[2] * x[3]) + @objective(nlp, Min, -x[1] * x[2] * x[3]) return nlp end diff --git a/src/PureJuMP/hs378.jl b/src/PureJuMP/hs378.jl index fc565de9..e5140af1 100644 --- a/src/PureJuMP/hs378.jl +++ b/src/PureJuMP/hs378.jl @@ -31,11 +31,11 @@ function hs378(args...; kwargs...) @variable(nlp, x[i = 1:10], start = -2.3) - @NLconstraint(nlp, exp(x[1]) + 2 * exp(x[2]) + 2 * exp(x[3]) + exp(x[6]) + exp(x[10]) - 2 == 0) - @NLconstraint(nlp, exp(x[4]) + 2 * exp(x[5]) + exp(x[6]) + exp(x[7]) - 1 == 0) - @NLconstraint(nlp, exp(x[3]) + exp(x[7]) + exp(x[8]) + 2 * exp(x[9]) + exp(x[10]) - 1 == 0) + @constraint(nlp, exp(x[1]) + 2 * exp(x[2]) + 2 * exp(x[3]) + exp(x[6]) + exp(x[10]) - 2 == 0) + @constraint(nlp, exp(x[4]) + 2 * exp(x[5]) + exp(x[6]) + exp(x[7]) - 1 == 0) + @constraint(nlp, exp(x[3]) + exp(x[7]) + exp(x[8]) + 2 * exp(x[9]) + exp(x[10]) - 1 == 0) - @NLobjective( + @objective( nlp, Min, sum(exp(x[i]) * (A[i] + x[i] - log(sum(exp(x[j]) for j = 1:10))) for i = 1:10) diff --git a/src/PureJuMP/hs38.jl b/src/PureJuMP/hs38.jl index a11f32db..6147716c 100644 --- a/src/PureJuMP/hs38.jl +++ b/src/PureJuMP/hs38.jl @@ -19,7 +19,7 @@ function hs38(args...; kwargs...) x0 = [-3, -1, -3, -1] @variable(nlp, -10 ≤ x[i = 1:4] ≤ 10, start = x0[i]) - @NLobjective( + @objective( nlp, Min, 100 * (x[2] - x[1]^2)^2 + diff --git a/src/PureJuMP/hs39.jl b/src/PureJuMP/hs39.jl index 5610a7b2..3d5240d6 100644 --- a/src/PureJuMP/hs39.jl +++ b/src/PureJuMP/hs39.jl @@ -20,9 +20,9 @@ function hs39(args...; kwargs...) @objective(nlp, Min, -x[1]) - @NLconstraint(nlp, x[2] - x[1]^3 - x[3]^2 == 0) + @constraint(nlp, x[2] - x[1]^3 - x[3]^2 == 0) - @NLconstraint(nlp, x[1]^2 - x[2] - x[4]^2 == 0) + @constraint(nlp, x[1]^2 - x[2] - x[4]^2 == 0) return nlp end diff --git a/src/PureJuMP/hs4.jl b/src/PureJuMP/hs4.jl index 3e2de26f..bccf71ea 100644 --- a/src/PureJuMP/hs4.jl +++ b/src/PureJuMP/hs4.jl @@ -20,7 +20,7 @@ function hs4(args...; kwargs...) lvar = [1, 0] @variable(nlp, x[i = 1:2] ≥ lvar[i], start = x0[i]) - @NLobjective(nlp, Min, (x[1] + 1)^3 / 3 + x[2]) + @objective(nlp, Min, (x[1] + 1)^3 / 3 + x[2]) return nlp end diff --git a/src/PureJuMP/hs40.jl b/src/PureJuMP/hs40.jl index 9430e79a..5f073ac4 100644 --- a/src/PureJuMP/hs40.jl +++ b/src/PureJuMP/hs40.jl @@ -18,13 +18,13 @@ function hs40(args...; kwargs...) nlp = Model() @variable(nlp, x[i = 1:4], start = 0.8) - @NLobjective(nlp, Min, -x[1] * x[2] * x[3] * x[4]) + @objective(nlp, Min, -x[1] * x[2] * x[3] * x[4]) - @NLconstraint(nlp, x[1]^3 + x[2]^2 - 1 == 0) + @constraint(nlp, x[1]^3 + x[2]^2 - 1 == 0) - @NLconstraint(nlp, x[4] * x[1]^2 - x[3] == 0) + @constraint(nlp, x[4] * x[1]^2 - x[3] == 0) - @NLconstraint(nlp, x[4]^2 - x[2] == 0) + @constraint(nlp, x[4]^2 - x[2] == 0) return nlp end diff --git a/src/PureJuMP/hs41.jl b/src/PureJuMP/hs41.jl index fd4c15da..3020a868 100644 --- a/src/PureJuMP/hs41.jl +++ b/src/PureJuMP/hs41.jl @@ -21,7 +21,7 @@ function hs41(args...; kwargs...) @constraint(nlp, x[1] + 2 * x[2] + 2 * x[3] - x[4] == 0) - @NLobjective(nlp, Min, 2 - x[1] * x[2] * x[3]) + @objective(nlp, Min, 2 - x[1] * x[2] * x[3]) return nlp end diff --git a/src/PureJuMP/hs42.jl b/src/PureJuMP/hs42.jl index 4f601e05..5820852e 100644 --- a/src/PureJuMP/hs42.jl +++ b/src/PureJuMP/hs42.jl @@ -18,11 +18,11 @@ function hs42(args...; kwargs...) nlp = Model() @variable(nlp, x[i = 1:4], start = 1) - @NLconstraint(nlp, x[3]^2 + x[4]^2 - 2 == 0) + @constraint(nlp, x[3]^2 + x[4]^2 - 2 == 0) @constraint(nlp, x[1] - 2 == 0) - @NLobjective( + @objective( nlp, Min, 0.5 * (x[1] - 1)^2 + 0.5 * (x[2] - 2)^2 + 0.5 * (x[3] - 3)^2 + 0.5 * (x[4] - 4)^2 diff --git a/src/PureJuMP/hs43.jl b/src/PureJuMP/hs43.jl index 17279e68..0ff1cf6b 100644 --- a/src/PureJuMP/hs43.jl +++ b/src/PureJuMP/hs43.jl @@ -18,11 +18,11 @@ function hs43(args...; kwargs...) nlp = Model() @variable(nlp, x[i = 1:4], start = 0) - @NLconstraint(nlp, -8 + x[1]^2 + x[2]^2 + x[3]^2 + x[4]^2 + x[1] - x[2] + x[3] - x[4] ≤ 0) - @NLconstraint(nlp, -10 + x[1]^2 + 2 * x[2]^2 + x[3]^2 + 2 * x[4]^2 - x[1] - x[4] ≤ 0) - @NLconstraint(nlp, -5 + 2 * x[1]^2 + x[2]^2 + x[3]^2 + 2 * x[1] - x[2] - x[4] ≤ 0) + @constraint(nlp, -8 + x[1]^2 + x[2]^2 + x[3]^2 + x[4]^2 + x[1] - x[2] + x[3] - x[4] ≤ 0) + @constraint(nlp, -10 + x[1]^2 + 2 * x[2]^2 + x[3]^2 + 2 * x[4]^2 - x[1] - x[4] ≤ 0) + @constraint(nlp, -5 + 2 * x[1]^2 + x[2]^2 + x[3]^2 + 2 * x[1] - x[2] - x[4] ≤ 0) - @NLobjective( + @objective( nlp, Min, x[1]^2 + x[2]^2 + 2 * x[3]^2 + x[4]^2 - 5 * x[1] - 5 * x[2] - 21 * x[3] + 7 * x[4] diff --git a/src/PureJuMP/hs44.jl b/src/PureJuMP/hs44.jl index 60509828..f235226e 100644 --- a/src/PureJuMP/hs44.jl +++ b/src/PureJuMP/hs44.jl @@ -25,7 +25,7 @@ function hs44(args...; kwargs...) @constraint(nlp, -8 + x[3] + 2 * x[4] ≤ 0) @constraint(nlp, -5 + x[3] + x[4] ≤ 0) - @NLobjective(nlp, Min, x[1] - x[2] - x[3] - x[1] * x[3] + x[1] * x[4] + x[2] * x[3] - x[2] * x[4]) + @objective(nlp, Min, x[1] - x[2] - x[3] - x[1] * x[3] + x[1] * x[4] + x[2] * x[3] - x[2] * x[4]) return nlp end diff --git a/src/PureJuMP/hs45.jl b/src/PureJuMP/hs45.jl index 0d40bc1e..99abf436 100644 --- a/src/PureJuMP/hs45.jl +++ b/src/PureJuMP/hs45.jl @@ -18,7 +18,7 @@ function hs45(args...; kwargs...) nlp = Model() @variable(nlp, 0 ≤ x[i = 1:5] ≤ i, start = 2) - @NLobjective(nlp, Min, 2 - x[1] * x[2] * x[3] * x[4] * x[5] / 120) + @objective(nlp, Min, 2 - x[1] * x[2] * x[3] * x[4] * x[5] / 120) return nlp end diff --git a/src/PureJuMP/hs46.jl b/src/PureJuMP/hs46.jl index dd74c83a..dcb2e8a3 100644 --- a/src/PureJuMP/hs46.jl +++ b/src/PureJuMP/hs46.jl @@ -19,10 +19,10 @@ function hs46(args...; kwargs...) x0 = [sqrt(2) / 2, 1.75, 0.5, 2, 2] @variable(nlp, x[i = 1:5], start = x0[i]) - @NLconstraint(nlp, (x[1]^2) * x[4] + sin(x[4] - x[5]) - 1 == 0) - @NLconstraint(nlp, x[2] + (x[3]^4) * (x[4]^2) - 2 == 0) + @constraint(nlp, (x[1]^2) * x[4] + sin(x[4] - x[5]) - 1 == 0) + @constraint(nlp, x[2] + (x[3]^4) * (x[4]^2) - 2 == 0) - @NLobjective(nlp, Min, (x[1] - x[2])^2 + (x[3] - 1)^2 + (x[4] - 1)^4 + (x[5] - 1)^6) + @objective(nlp, Min, (x[1] - x[2])^2 + (x[3] - 1)^2 + (x[4] - 1)^4 + (x[5] - 1)^6) return nlp end diff --git a/src/PureJuMP/hs47.jl b/src/PureJuMP/hs47.jl index 29d68f8e..88da0bed 100644 --- a/src/PureJuMP/hs47.jl +++ b/src/PureJuMP/hs47.jl @@ -19,11 +19,11 @@ function hs47(args...; kwargs...) x0 = [2, sqrt(2), -1, 2 - sqrt(2), 0.5] @variable(nlp, x[i = 1:5], start = x0[i]) - @NLconstraint(nlp, x[1] + x[2]^2 + x[3]^3 - 3 == 0) - @NLconstraint(nlp, x[2] - x[3]^2 + x[4] - 1 == 0) - @NLconstraint(nlp, x[1] * x[5] - 1 == 0) + @constraint(nlp, x[1] + x[2]^2 + x[3]^3 - 3 == 0) + @constraint(nlp, x[2] - x[3]^2 + x[4] - 1 == 0) + @constraint(nlp, x[1] * x[5] - 1 == 0) - @NLobjective(nlp, Min, (x[1] - x[2])^2 + (x[2] - x[3])^3 + (x[3] - x[4])^4 + (x[4] - x[5])^4) + @objective(nlp, Min, (x[1] - x[2])^2 + (x[2] - x[3])^3 + (x[3] - x[4])^4 + (x[4] - x[5])^4) return nlp end diff --git a/src/PureJuMP/hs48.jl b/src/PureJuMP/hs48.jl index f5e0c259..1bcf73e2 100644 --- a/src/PureJuMP/hs48.jl +++ b/src/PureJuMP/hs48.jl @@ -22,7 +22,7 @@ function hs48(args...; kwargs...) @constraint(nlp, x[1] + x[2] + x[3] + x[4] + x[5] - 5 == 0) @constraint(nlp, x[3] - 2 * (x[4] + x[5]) + 3 == 0) - @NLobjective(nlp, Min, 0.5 * (x[1] - 1)^2 + 0.5 * (x[2] - x[3])^2 + 0.5 * (x[4] - x[5])^2) + @objective(nlp, Min, 0.5 * (x[1] - 1)^2 + 0.5 * (x[2] - x[3])^2 + 0.5 * (x[4] - x[5])^2) return nlp end diff --git a/src/PureJuMP/hs49.jl b/src/PureJuMP/hs49.jl index c1eceb5c..58c749ee 100644 --- a/src/PureJuMP/hs49.jl +++ b/src/PureJuMP/hs49.jl @@ -22,7 +22,7 @@ function hs49(args...; kwargs...) @constraint(nlp, x[1] + x[2] + x[3] + 4 * x[4] - 7 == 0) @constraint(nlp, x[3] + 5 * x[5] - 6 == 0) - @NLobjective(nlp, Min, (x[1] - x[2])^2 + (x[3] - 1)^2 + (x[4] - 1)^4 + (x[5] - 1)^6) + @objective(nlp, Min, (x[1] - x[2])^2 + (x[3] - 1)^2 + (x[4] - 1)^4 + (x[5] - 1)^6) return nlp end diff --git a/src/PureJuMP/hs5.jl b/src/PureJuMP/hs5.jl index cf105eb1..212b63da 100644 --- a/src/PureJuMP/hs5.jl +++ b/src/PureJuMP/hs5.jl @@ -20,7 +20,7 @@ function hs5(args...; kwargs...) uvar = [4, 3] @variable(nlp, lvar[i] ≤ x[i = 1:2] ≤ uvar[i], start = 0) - @NLobjective(nlp, Min, sin(x[1] + x[2]) + (x[1] - x[2])^2 - 1.5 * x[1] + 2.5 * x[2] + 1) + @objective(nlp, Min, sin(x[1] + x[2]) + (x[1] - x[2])^2 - 1.5 * x[1] + 2.5 * x[2] + 1) return nlp end diff --git a/src/PureJuMP/hs50.jl b/src/PureJuMP/hs50.jl index 70d32a22..97d743e3 100644 --- a/src/PureJuMP/hs50.jl +++ b/src/PureJuMP/hs50.jl @@ -23,7 +23,7 @@ function hs50(args...; kwargs...) @constraint(nlp, x[2] + 2 * x[3] + 3 * x[4] - 6 == 0) @constraint(nlp, x[3] + 2 * x[4] + 3 * x[5] - 6 == 0) - @NLobjective(nlp, Min, (x[1] - x[2])^2 + (x[2] - x[3])^2 + (x[3] - x[4])^4 + (x[4] - x[5])^2) + @objective(nlp, Min, (x[1] - x[2])^2 + (x[2] - x[3])^2 + (x[3] - x[4])^4 + (x[4] - x[5])^2) return nlp end diff --git a/src/PureJuMP/hs51.jl b/src/PureJuMP/hs51.jl index b4a1adb1..7a36d22b 100644 --- a/src/PureJuMP/hs51.jl +++ b/src/PureJuMP/hs51.jl @@ -23,7 +23,7 @@ function hs51(args...; kwargs...) @constraint(nlp, x[3] + x[4] - 2 * x[5] == 0) @constraint(nlp, x[2] - x[5] == 0) - @NLobjective( + @objective( nlp, Min, 0.5 * (x[1] - x[2])^2 + 0.5 * (x[2] + x[3] - 2)^2 + 0.5 * (x[4] - 1)^2 + 0.5 * (x[5] - 1)^2 diff --git a/src/PureJuMP/hs52.jl b/src/PureJuMP/hs52.jl index e1536966..7b58bf53 100644 --- a/src/PureJuMP/hs52.jl +++ b/src/PureJuMP/hs52.jl @@ -22,7 +22,7 @@ function hs52(args...; kwargs...) @constraint(nlp, x[3] + x[4] - 2 * x[5] == 0) @constraint(nlp, x[2] - x[5] == 0) - @NLobjective( + @objective( nlp, Min, 0.5 * (4 * x[1] - x[2])^2 + 0.5 * (x[2] + x[3] - 2)^2 + 0.5 * (x[4] - 1)^2 + 0.5 * (x[5] - 1)^2 diff --git a/src/PureJuMP/hs53.jl b/src/PureJuMP/hs53.jl index e4747d19..c9316236 100644 --- a/src/PureJuMP/hs53.jl +++ b/src/PureJuMP/hs53.jl @@ -22,7 +22,7 @@ function hs53(args...; kwargs...) @constraint(nlp, x[3] + x[4] - 2 * x[5] == 0) @constraint(nlp, x[2] - x[5] == 0) - @NLobjective( + @objective( nlp, Min, 0.5 * (x[1] - x[2])^2 + 0.5 * (x[2] + x[3] - 2)^2 + 0.5 * (x[4] - 1)^2 + 0.5 * (x[5] - 1)^2 diff --git a/src/PureJuMP/hs54.jl b/src/PureJuMP/hs54.jl index a9dc0223..7e392c01 100644 --- a/src/PureJuMP/hs54.jl +++ b/src/PureJuMP/hs54.jl @@ -23,7 +23,7 @@ function hs54(args...; kwargs...) @constraint(nlp, x[1] + 3 * x[2] == 0) - @NLexpression( + @expression( nlp, h, (((x[1] - 1e4)^2) / 6.4e7 + (x[1] - 1e4) * (x[2] - 1) / 2e4 + (x[2] - 1)^2) / 0.96 + @@ -33,7 +33,7 @@ function hs54(args...; kwargs...) ((x[6] - 1.e8)^2) / 2.5e17 ) - @NLobjective(nlp, Min, -exp(-h / 2)) + @objective(nlp, Min, -exp(-h / 2)) return nlp end diff --git a/src/PureJuMP/hs55.jl b/src/PureJuMP/hs55.jl index a8d2ca27..75fcfad7 100644 --- a/src/PureJuMP/hs55.jl +++ b/src/PureJuMP/hs55.jl @@ -27,7 +27,7 @@ function hs55(args...; kwargs...) @constraint(nlp, x[2] + x[5] - 2 == 0) @constraint(nlp, x[3] + x[6] - 2 == 0) - @NLobjective(nlp, Min, x[1] + 2 * x[2] + 4 * x[5] + exp(x[1] * x[4])) + @objective(nlp, Min, x[1] + 2 * x[2] + 4 * x[5] + exp(x[1] * x[4])) return nlp end diff --git a/src/PureJuMP/hs56.jl b/src/PureJuMP/hs56.jl index d1fc5f1e..b99a3cac 100644 --- a/src/PureJuMP/hs56.jl +++ b/src/PureJuMP/hs56.jl @@ -21,12 +21,12 @@ function hs56(args...; kwargs...) x0 = [1, 1, 1, a, a, a, b] @variable(nlp, x[i = 1:7], start = x0[i]) - @NLconstraint(nlp, x[1] - 4.2 * sin(x[4])^2 == 0) - @NLconstraint(nlp, x[2] - 4.2 * sin(x[5])^2 == 0) - @NLconstraint(nlp, x[3] - 4.2 * sin(x[6])^2 == 0) - @NLconstraint(nlp, x[1] + 2 * x[2] + 2 * x[3] - 7.2 * sin(x[7])^2 == 0) + @constraint(nlp, x[1] - 4.2 * sin(x[4])^2 == 0) + @constraint(nlp, x[2] - 4.2 * sin(x[5])^2 == 0) + @constraint(nlp, x[3] - 4.2 * sin(x[6])^2 == 0) + @constraint(nlp, x[1] + 2 * x[2] + 2 * x[3] - 7.2 * sin(x[7])^2 == 0) - @NLobjective(nlp, Min, -x[1] * x[2] * x[3]) + @objective(nlp, Min, -x[1] * x[2] * x[3]) return nlp end diff --git a/src/PureJuMP/hs57.jl b/src/PureJuMP/hs57.jl index cf2bfae1..8ba4fb81 100644 --- a/src/PureJuMP/hs57.jl +++ b/src/PureJuMP/hs57.jl @@ -73,11 +73,11 @@ function hs57(args...; kwargs...) b[41:42] .= 0.40 b[43:44] .= 0.39 - @NLconstraint(nlp, 0.49 * x[2] - x[1] * x[2] - 0.09 ≥ 0) + @constraint(nlp, 0.49 * x[2] - x[1] * x[2] - 0.09 ≥ 0) - @NLexpression(nlp, f[i = 1:44], b[i] - x[1] - (0.49 - x[1]) * exp(-x[2] * (a[i] - 8))) + @expression(nlp, f[i = 1:44], b[i] - x[1] - (0.49 - x[1]) * exp(-x[2] * (a[i] - 8))) - @NLobjective(nlp, Min, 0.5 * sum(f[i]^2 for i = 1:44)) + @objective(nlp, Min, 0.5 * sum(f[i]^2 for i = 1:44)) return nlp end diff --git a/src/PureJuMP/hs59.jl b/src/PureJuMP/hs59.jl index a9eee420..283ba69c 100644 --- a/src/PureJuMP/hs59.jl +++ b/src/PureJuMP/hs59.jl @@ -20,11 +20,11 @@ function hs59(args...; kwargs...) uvar = [75, 65] @variable(nlp, 0 ≤ x[i = 1:2] ≤ uvar[i], start = x0[i]) - @NLconstraint(nlp, x[1] * x[2] - 700 ≥ 0) - @NLconstraint(nlp, x[2] - (x[1]^2) / 125 ≥ 0) - @NLconstraint(nlp, (x[2] - 50)^2 - 5 * (x[1] - 55) ≥ 0) + @constraint(nlp, x[1] * x[2] - 700 ≥ 0) + @constraint(nlp, x[2] - (x[1]^2) / 125 ≥ 0) + @constraint(nlp, (x[2] - 50)^2 - 5 * (x[1] - 55) ≥ 0) - @NLobjective( + @objective( nlp, Min, -75.196 + 3.8112 * x[1] + 0.0020567 * x[1]^3 - 1.0345e-5 * x[1]^4 + 6.8306 * x[2] - diff --git a/src/PureJuMP/hs6.jl b/src/PureJuMP/hs6.jl index c3c94a3f..23014e4c 100644 --- a/src/PureJuMP/hs6.jl +++ b/src/PureJuMP/hs6.jl @@ -19,9 +19,9 @@ function hs6(args...; kwargs...) x0 = [-1.2, 1] @variable(nlp, x[i = 1:2], start = x0[i]) - @NLobjective(nlp, Min, 0.5 * (1 - x[1])^2) + @objective(nlp, Min, 0.5 * (1 - x[1])^2) - @NLconstraint(nlp, 10 * (x[2] - x[1]^2) == 0) + @constraint(nlp, 10 * (x[2] - x[1]^2) == 0) return nlp end diff --git a/src/PureJuMP/hs60.jl b/src/PureJuMP/hs60.jl index e8ac9a1f..f620d0eb 100644 --- a/src/PureJuMP/hs60.jl +++ b/src/PureJuMP/hs60.jl @@ -18,9 +18,9 @@ function hs60(args...; kwargs...) nlp = Model() @variable(nlp, -10 ≤ x[i = 1:3] ≤ 10, start = 2) - @NLconstraint(nlp, x[1] * (1 + x[2]^2) + x[3]^4 - 4 - 3 * sqrt(2) == 0) + @constraint(nlp, x[1] * (1 + x[2]^2) + x[3]^4 - 4 - 3 * sqrt(2) == 0) - @NLobjective(nlp, Min, (x[1] - 1)^2 + (x[1] - x[2])^2 + (x[2] - x[3])^4) + @objective(nlp, Min, (x[1] - 1)^2 + (x[1] - x[2])^2 + (x[2] - x[3])^4) return nlp end diff --git a/src/PureJuMP/hs61.jl b/src/PureJuMP/hs61.jl index 4868c1b0..12607ed9 100644 --- a/src/PureJuMP/hs61.jl +++ b/src/PureJuMP/hs61.jl @@ -21,7 +21,7 @@ function hs61(args...; kwargs...) @NLconstraint(nlp, 3 * x[1] - 2 * x[2]^2 - 7 == 0) @NLconstraint(nlp, 4 * x[1] - x[3]^2 - 11 == 0) - @NLobjective(nlp, Min, 4 * x[1]^2 + 2 * x[2]^2 + 2 * x[3]^2 - 33 * x[1] + 16 * x[2] - 24 * x[3]) + @objective(nlp, Min, 4 * x[1]^2 + 2 * x[2]^2 + 2 * x[3]^2 - 33 * x[1] + 16 * x[2] - 24 * x[3]) return nlp end diff --git a/src/PureJuMP/hs62.jl b/src/PureJuMP/hs62.jl index 07a4d1e7..e0806d8a 100644 --- a/src/PureJuMP/hs62.jl +++ b/src/PureJuMP/hs62.jl @@ -21,7 +21,7 @@ function hs62(args...; kwargs...) @constraint(nlp, x[1] + x[2] + x[3] - 1 == 0) - @NLobjective( + @objective( nlp, Min, -32.174 * ( diff --git a/src/PureJuMP/hs63.jl b/src/PureJuMP/hs63.jl index 2e8f67d4..ec62356d 100644 --- a/src/PureJuMP/hs63.jl +++ b/src/PureJuMP/hs63.jl @@ -19,9 +19,9 @@ function hs63(args...; kwargs...) @variable(nlp, x[i = 1:3] ≥ 0, start = 2) @constraint(nlp, 8 * x[1] + 14 * x[2] + 7 * x[3] - 56 == 0) - @NLconstraint(nlp, x[1]^2 + x[2]^2 + x[3]^2 - 25 == 0) + @constraint(nlp, x[1]^2 + x[2]^2 + x[3]^2 - 25 == 0) - @NLobjective(nlp, Min, 1000 - x[1]^2 - 2 * x[2]^2 - x[3]^2 - x[1] * x[2] - x[1] * x[3]) + @objective(nlp, Min, 1000 - x[1]^2 - 2 * x[2]^2 - x[3]^2 - x[1] * x[2] - x[1] * x[3]) return nlp end diff --git a/src/PureJuMP/hs64.jl b/src/PureJuMP/hs64.jl index 65d3559f..6d1188af 100644 --- a/src/PureJuMP/hs64.jl +++ b/src/PureJuMP/hs64.jl @@ -18,9 +18,9 @@ function hs64(args...; kwargs...) nlp = Model() @variable(nlp, x[i = 1:3] ≥ 1e-5, start = 1) - @NLconstraint(nlp, -1 + 4 / x[1] + 32 / x[2] + 120 / x[3] ≤ 0) + @constraint(nlp, -1 + 4 / x[1] + 32 / x[2] + 120 / x[3] ≤ 0) - @NLobjective( + @objective( nlp, Min, 5 * x[1] + 50000 / x[1] + 20 * x[2] + 72000 / x[2] + 10 * x[3] + 144000 / x[3] diff --git a/src/PureJuMP/hs65.jl b/src/PureJuMP/hs65.jl index 2b95f233..b21ef38f 100644 --- a/src/PureJuMP/hs65.jl +++ b/src/PureJuMP/hs65.jl @@ -21,9 +21,9 @@ function hs65(args...; kwargs...) uvar = [4.5, 4.5, 5] @variable(nlp, lvar[i] ≤ x[i = 1:3] ≤ uvar[i], start = x0[i]) - @NLconstraint(nlp, -48 + x[1]^2 + x[2]^2 + x[3]^2 ≤ 0) + @constraint(nlp, -48 + x[1]^2 + x[2]^2 + x[3]^2 ≤ 0) - @NLobjective(nlp, Min, (x[1] - x[2])^2 + ((x[1] + x[2] - 10)^2) / 9 + (x[3] - 5)^2) + @objective(nlp, Min, (x[1] - x[2])^2 + ((x[1] + x[2] - 10)^2) / 9 + (x[3] - 5)^2) return nlp end diff --git a/src/PureJuMP/hs66.jl b/src/PureJuMP/hs66.jl index fe45a0fc..41ed94b3 100644 --- a/src/PureJuMP/hs66.jl +++ b/src/PureJuMP/hs66.jl @@ -20,8 +20,8 @@ function hs66(args...; kwargs...) uvar = [100, 100, 10] @variable(nlp, 0 ≤ x[i = 1:3] ≤ uvar[i], start = x0[i]) - @NLconstraint(nlp, x[2] - exp(x[1]) ≥ 0) - @NLconstraint(nlp, x[3] - exp(x[2]) ≥ 0) + @constraint(nlp, x[2] - exp(x[1]) ≥ 0) + @constraint(nlp, x[3] - exp(x[2]) ≥ 0) @objective(nlp, Min, 0.2 * x[3] - 0.8 * x[1]) diff --git a/src/PureJuMP/hs68.jl b/src/PureJuMP/hs68.jl index e1a37608..e18c4a38 100644 --- a/src/PureJuMP/hs68.jl +++ b/src/PureJuMP/hs68.jl @@ -29,11 +29,11 @@ function hs68(args...; kwargs...) @variable(nlp, lvar[i] ≤ x[i = 1:4] ≤ uvar[i], start = x0[i]) phi(t) = 1 // 2 * (erf(t / sqrt(2)) + 1) - register(nlp, :phi, 1, phi; autodiff = true) - @NLconstraint(nlp, x[3] - 2 * phi(x[2]) == 0) - @NLconstraint(nlp, x[4] - phi(-x[2] + d1 * sqrt(n1)) - phi(-x[2] - d1 * sqrt(n1)) == 0) + @expression(nlp, phi) + @constraint(nlp, x[3] - 2 * phi(x[2]) == 0) + @constraint(nlp, x[4] - phi(-x[2] + d1 * sqrt(n1)) - phi(-x[2] - d1 * sqrt(n1)) == 0) - @NLobjective( + @objective( nlp, Min, (a1 * n1 - (b1 * (exp(x[1] - 1) - x[3])) / (exp(x[1] - 1 + x[4])) * x[4]) / x[1] diff --git a/src/PureJuMP/hs69.jl b/src/PureJuMP/hs69.jl index 1443d725..7592c468 100644 --- a/src/PureJuMP/hs69.jl +++ b/src/PureJuMP/hs69.jl @@ -29,11 +29,11 @@ function hs69(args...; kwargs...) @variable(nlp, lvar[i] ≤ x[i = 1:4] ≤ uvar[i], start = x0[i]) phi(t) = 1 // 2 * (erf(t / sqrt(2)) + 1) - register(nlp, :phi, 1, phi; autodiff = true) - @NLconstraint(nlp, x[3] - 2 * phi(x[2]) == 0) - @NLconstraint(nlp, x[4] - phi(-x[2] + d1 * sqrt(n1)) - phi(-x[2] - d1 * sqrt(n1)) == 0) + @expression(nlp, phi) + @constraint(nlp, x[3] - 2 * phi(x[2]) == 0) + @constraint(nlp, x[4] - phi(-x[2] + d1 * sqrt(n1)) - phi(-x[2] - d1 * sqrt(n1)) == 0) - @NLobjective( + @objective( nlp, Min, (a1 * n1 - (b1 * (exp(x[1] - 1) - x[3])) / (exp(x[1] - 1 + x[4])) * x[4]) / x[1] diff --git a/src/PureJuMP/hs7.jl b/src/PureJuMP/hs7.jl index 78ac3140..776e1a67 100644 --- a/src/PureJuMP/hs7.jl +++ b/src/PureJuMP/hs7.jl @@ -18,9 +18,9 @@ function hs7(args...; kwargs...) nlp = Model() @variable(nlp, x[i = 1:2], start = 2) - @NLobjective(nlp, Min, log(1 + x[1]^2) - x[2]) + @objective(nlp, Min, log(1 + x[1]^2) - x[2]) - @NLconstraint(nlp, (1 + x[1]^2)^2 + x[2]^2 - 4 == 0) + @constraint(nlp, (1 + x[1]^2)^2 + x[2]^2 - 4 == 0) return nlp end diff --git a/src/PureJuMP/hs70.jl b/src/PureJuMP/hs70.jl index 11a431ee..d7a60536 100644 --- a/src/PureJuMP/hs70.jl +++ b/src/PureJuMP/hs70.jl @@ -30,8 +30,8 @@ function hs70(args...; kwargs...) yobs[1:10] = [0.00189, 0.1038, 0.268, 0.506, 0.577, 0.604, 0.725, 0.898, 0.947, 0.845] yobs[11:19] = [0.702, 0.528, 0.385, 0.257, 0.159, 0.0869, 0.0453, 0.01509, 0.00189] - @NLexpression(nlp, b, x[3] + (1 - x[3]) * x[4]) - @NLexpression( + @expression(nlp, b, x[3] + (1 - x[3]) * x[4]) + @expression( nlp, ycal[i = 1:19], (1 + 1 / (12 * x[2])) * ( @@ -49,9 +49,9 @@ function hs70(args...; kwargs...) exp(x[1] - b * c[i] * x[1] / (7.658 * x[4])) ) - @NLconstraint(nlp, x[3] + (1 - x[3]) * x[4] ≥ 0) + @constraint(nlp, x[3] + (1 - x[3]) * x[4] ≥ 0) - @NLobjective(nlp, Min, sum((ycal[i] - yobs[i])^2 for i = 1:19)) + @objective(nlp, Min, sum((ycal[i] - yobs[i])^2 for i = 1:19)) return nlp end diff --git a/src/PureJuMP/hs71.jl b/src/PureJuMP/hs71.jl index b8d88894..ea216273 100644 --- a/src/PureJuMP/hs71.jl +++ b/src/PureJuMP/hs71.jl @@ -19,10 +19,10 @@ function hs71(args...; kwargs...) x0 = [1, 5, 5, 1] @variable(nlp, 1 ≤ x[i = 1:4] ≤ 5, start = x0[i]) - @NLconstraint(nlp, x[1] * x[2] * x[3] * x[4] - 25 ≥ 0) - @NLconstraint(nlp, x[1]^2 + x[2]^2 + x[3]^2 + x[4]^2 - 40 == 0) + @constraint(nlp, x[1] * x[2] * x[3] * x[4] - 25 ≥ 0) + @constraint(nlp, x[1]^2 + x[2]^2 + x[3]^2 + x[4]^2 - 40 == 0) - @NLobjective(nlp, Min, x[1] * x[4] * (x[1] + x[2] + x[3]) + x[3]) + @objective(nlp, Min, x[1] * x[4] * (x[1] + x[2] + x[3]) + x[3]) return nlp end diff --git a/src/PureJuMP/hs72.jl b/src/PureJuMP/hs72.jl index 1bcffb47..8e26f100 100644 --- a/src/PureJuMP/hs72.jl +++ b/src/PureJuMP/hs72.jl @@ -19,8 +19,8 @@ function hs72(args...; kwargs...) uvar = [(5 - i) * 1e5 for i = 1:4] @variable(nlp, 0.001 ≤ x[i = 1:4] ≤ uvar[i], start = 1) - @NLconstraint(nlp, -0.0401 + 4 / x[1] + 2.25 / x[2] + 1 / x[3] + 0.25 / x[4] ≤ 0) - @NLconstraint(nlp, -0.010085 + 0.16 / x[1] + 0.36 / x[2] + 0.64 / x[3] + 0.64 / x[4] ≤ 0) + @constraint(nlp, -0.0401 + 4 / x[1] + 2.25 / x[2] + 1 / x[3] + 0.25 / x[4] ≤ 0) + @constraint(nlp, -0.010085 + 0.16 / x[1] + 0.36 / x[2] + 0.64 / x[3] + 0.64 / x[4] ≤ 0) @objective(nlp, Min, 1 + x[1] + x[2] + x[3] + x[4]) diff --git a/src/PureJuMP/hs73.jl b/src/PureJuMP/hs73.jl index 3f0e49ad..6db9223f 100644 --- a/src/PureJuMP/hs73.jl +++ b/src/PureJuMP/hs73.jl @@ -19,7 +19,7 @@ function hs73(args...; kwargs...) @variable(nlp, x[i = 1:4] ≥ 0, start = 1) @constraint(nlp, 2.3 * x[1] + 5.6 * x[2] + 11.1 * x[3] + 1.3 * x[4] - 5 ≥ 0) - @NLconstraint( + @constraint( nlp, 12 * x[1] + 11.9 * x[2] + 41.8 * x[3] + 52.1 * x[4] - 21 - 1.645 * sqrt(0.28x[1]^2 + 0.19 * x[2]^2 + 20.5 * x[3]^2 + 0.62x[4]^2) ≥ 0 diff --git a/src/PureJuMP/hs74.jl b/src/PureJuMP/hs74.jl index 6852ff59..092ef3a6 100644 --- a/src/PureJuMP/hs74.jl +++ b/src/PureJuMP/hs74.jl @@ -22,11 +22,11 @@ function hs74(args...; kwargs...) @variable(nlp, lvar[i] ≤ x[i = 1:4] ≤ uvar[i], start = 0) @constraint(nlp, -a ≤ x[4] - x[3] ≤ a) - @NLconstraint(nlp, 1000 * sin(-x[3] - 0.25) + 1000 * sin(-x[4] - 0.25) + 894.8 - x[1] == 0) - @NLconstraint(nlp, 1000 * sin(x[3] - 0.25) + 1000 * sin(x[3] - x[4] - 0.25) + 894.8 - x[2] == 0) - @NLconstraint(nlp, 1000 * sin(x[4] - 0.25) + 1000 * sin(x[4] - x[3] - 0.25) + 1294.8 == 0) + @constraint(nlp, 1000 * sin(-x[3] - 0.25) + 1000 * sin(-x[4] - 0.25) + 894.8 - x[1] == 0) + @constraint(nlp, 1000 * sin(x[3] - 0.25) + 1000 * sin(x[3] - x[4] - 0.25) + 894.8 - x[2] == 0) + @constraint(nlp, 1000 * sin(x[4] - 0.25) + 1000 * sin(x[4] - x[3] - 0.25) + 1294.8 == 0) - @NLobjective(nlp, Min, 3 * x[1] + 1e-6 * x[1]^3 + 2 * x[2] + 1e-6 * (2 / 3) * x[2]^3) + @objective(nlp, Min, 3 * x[1] + 1e-6 * x[1]^3 + 2 * x[2] + 1e-6 * (2 / 3) * x[2]^3) return nlp end diff --git a/src/PureJuMP/hs75.jl b/src/PureJuMP/hs75.jl index 6f73a85c..ab845a02 100644 --- a/src/PureJuMP/hs75.jl +++ b/src/PureJuMP/hs75.jl @@ -22,11 +22,11 @@ function hs75(args...; kwargs...) @variable(nlp, lvar[i] ≤ x[i = 1:4] ≤ uvar[i], start = 0) @constraint(nlp, -a ≤ x[4] - x[3] ≤ a) - @NLconstraint(nlp, 1000 * sin(-x[3] - 0.25) + 1000 * sin(-x[4] - 0.25) + 894.8 - x[1] == 0) - @NLconstraint(nlp, 1000 * sin(x[3] - 0.25) + 1000 * sin(x[3] - x[4] - 0.25) + 894.8 - x[2] == 0) - @NLconstraint(nlp, 1000 * sin(x[4] - 0.25) + 1000 * sin(x[4] - x[3] - 0.25) + 1294.8 == 0) + @constraint(nlp, 1000 * sin(-x[3] - 0.25) + 1000 * sin(-x[4] - 0.25) + 894.8 - x[1] == 0) + @constraint(nlp, 1000 * sin(x[3] - 0.25) + 1000 * sin(x[3] - x[4] - 0.25) + 894.8 - x[2] == 0) + @constraint(nlp, 1000 * sin(x[4] - 0.25) + 1000 * sin(x[4] - x[3] - 0.25) + 1294.8 == 0) - @NLobjective(nlp, Min, 3 * x[1] + 1e-6 * x[1]^3 + 2 * x[2] + 1e-6 * (2 / 3) * x[2]^3) + @objective(nlp, Min, 3 * x[1] + 1e-6 * x[1]^3 + 2 * x[2] + 1e-6 * (2 / 3) * x[2]^3) return nlp end diff --git a/src/PureJuMP/hs76.jl b/src/PureJuMP/hs76.jl index e677f4fc..70b7e32f 100644 --- a/src/PureJuMP/hs76.jl +++ b/src/PureJuMP/hs76.jl @@ -22,7 +22,7 @@ function hs76(args...; kwargs...) @constraint(nlp, -4 + 3 * x[1] + x[2] + 2 * x[3] - x[4] ≤ 0) @constraint(nlp, x[2] + 4 * x[3] - 1.5 ≥ 0) - @NLobjective( + @objective( nlp, Min, x[1]^2 + 0.5 * x[2]^2 + x[3]^2 + 0.5 * x[4]^2 - x[1] * x[3] + x[3] * x[4] - x[1] - 3 * x[2] + diff --git a/src/PureJuMP/hs77.jl b/src/PureJuMP/hs77.jl index cf81bf59..79f733d2 100644 --- a/src/PureJuMP/hs77.jl +++ b/src/PureJuMP/hs77.jl @@ -18,10 +18,10 @@ function hs77(args...; kwargs...) nlp = Model() @variable(nlp, x[i = 1:5], start = 2) - @NLconstraint(nlp, x[1]^2 * x[4] + sin(x[4] - x[5]) - 2 * sqrt(2) == 0) - @NLconstraint(nlp, x[2] + x[3]^4 * x[4]^2 - 8 - sqrt(2) == 0) + @constraint(nlp, x[1]^2 * x[4] + sin(x[4] - x[5]) - 2 * sqrt(2) == 0) + @constraint(nlp, x[2] + x[3]^4 * x[4]^2 - 8 - sqrt(2) == 0) - @NLobjective( + @objective( nlp, Min, (x[1] - 1)^2 + (x[1] - x[2])^2 + (x[3] - 1)^2 + (x[4] - 1)^4 + (x[5] - 1)^6 diff --git a/src/PureJuMP/hs78.jl b/src/PureJuMP/hs78.jl index ed436a84..eaef92d7 100644 --- a/src/PureJuMP/hs78.jl +++ b/src/PureJuMP/hs78.jl @@ -19,11 +19,11 @@ function hs78(args...; kwargs...) x0 = [-2, 1.5, 2, -1, -1] @variable(nlp, x[i = 1:5], start = x0[i]) - @NLconstraint(nlp, sum(x[i]^2 for i = 1:5) - 10 == 0) - @NLconstraint(nlp, x[2] * x[3] - 5 * x[4] * x[5] == 0) - @NLconstraint(nlp, x[1]^3 + x[2]^3 + 1 == 0) + @constraint(nlp, sum(x[i]^2 for i = 1:5) - 10 == 0) + @constraint(nlp, x[2] * x[3] - 5 * x[4] * x[5] == 0) + @constraint(nlp, x[1]^3 + x[2]^3 + 1 == 0) - @NLobjective(nlp, Min, prod(x[i] for i = 1:5)) + @objective(nlp, Min, prod(x[i] for i = 1:5)) return nlp end diff --git a/src/PureJuMP/hs79.jl b/src/PureJuMP/hs79.jl index 1e005231..ca1260cf 100644 --- a/src/PureJuMP/hs79.jl +++ b/src/PureJuMP/hs79.jl @@ -18,11 +18,11 @@ function hs79(args...; kwargs...) nlp = Model() @variable(nlp, x[i = 1:5], start = 2) - @NLconstraint(nlp, x[1] + x[2]^2 + x[3]^3 - 2 - 3 * sqrt(2) == 0) - @NLconstraint(nlp, x[2] - x[3]^2 + x[4] + 2 - 2 * sqrt(2) == 0) - @NLconstraint(nlp, x[1] * x[5] - 2 == 0) + @constraint(nlp, x[1] + x[2]^2 + x[3]^3 - 2 - 3 * sqrt(2) == 0) + @constraint(nlp, x[2] - x[3]^2 + x[4] + 2 - 2 * sqrt(2) == 0) + @constraint(nlp, x[1] * x[5] - 2 == 0) - @NLobjective( + @objective( nlp, Min, (x[1] - 1)^2 + (x[1] - x[2])^2 + (x[2] - x[3])^2 + (x[3] - x[4])^4 + (x[4] - x[5])^4 diff --git a/src/PureJuMP/hs8.jl b/src/PureJuMP/hs8.jl index 25aa064b..51f4be3c 100644 --- a/src/PureJuMP/hs8.jl +++ b/src/PureJuMP/hs8.jl @@ -21,9 +21,9 @@ function hs8(args...; kwargs...) @objective(nlp, Min, -1) - @NLconstraint(nlp, constr1, x[1]^2 + x[2]^2 - 25 == 0) + @constraint(nlp, constr1, x[1]^2 + x[2]^2 - 25 == 0) - @NLconstraint(nlp, constr2, x[1] * x[2] - 9 == 0) + @constraint(nlp, constr2, x[1] * x[2] - 9 == 0) return nlp end diff --git a/src/PureJuMP/hs80.jl b/src/PureJuMP/hs80.jl index 1280e0ec..82a74c5a 100644 --- a/src/PureJuMP/hs80.jl +++ b/src/PureJuMP/hs80.jl @@ -21,11 +21,11 @@ function hs80(args...; kwargs...) uvar = [2.3, 2.3, 3.2, 3.2, 3.2] @variable(nlp, lvar[i] ≤ x[i = 1:5] ≤ uvar[i], start = x0[i]) - @NLconstraint(nlp, sum(x[i]^2 for i = 1:5) - 10 == 0) - @NLconstraint(nlp, x[2] * x[3] - 5 * x[4] * x[5] == 0) - @NLconstraint(nlp, x[1]^3 + x[2]^3 + 1 == 0) + @constraint(nlp, sum(x[i]^2 for i = 1:5) - 10 == 0) + @constraint(nlp, x[2] * x[3] - 5 * x[4] * x[5] == 0) + @constraint(nlp, x[1]^3 + x[2]^3 + 1 == 0) - @NLobjective(nlp, Min, exp(prod(x[i] for i = 1:5))) + @objective(nlp, Min, exp(prod(x[i] for i = 1:5))) return nlp end diff --git a/src/PureJuMP/hs81.jl b/src/PureJuMP/hs81.jl index 47ddefd6..1e636cdc 100644 --- a/src/PureJuMP/hs81.jl +++ b/src/PureJuMP/hs81.jl @@ -21,11 +21,11 @@ function hs81(args...; kwargs...) uvar = [2.3, 2.3, 3.2, 3.2, 3.2] @variable(nlp, lvar[i] ≤ x[i = 1:5] ≤ uvar[i], start = x0[i]) - @NLconstraint(nlp, sum(x[i]^2 for i = 1:5) - 10 == 0) - @NLconstraint(nlp, x[2] * x[3] - 5 * x[4] * x[5] == 0) - @NLconstraint(nlp, x[1]^3 + x[2]^3 + 1 == 0) + @constraint(nlp, sum(x[i]^2 for i = 1:5) - 10 == 0) + @constraint(nlp, x[2] * x[3] - 5 * x[4] * x[5] == 0) + @constraint(nlp, x[1]^3 + x[2]^3 + 1 == 0) - @NLobjective(nlp, Min, exp(prod(x[i] for i = 1:5)) - 0.5 * (x[1]^3 + x[2]^3 + 1)^2) + @objective(nlp, Min, exp(prod(x[i] for i = 1:5)) - 0.5 * (x[1]^3 + x[2]^3 + 1)^2) return nlp end diff --git a/src/PureJuMP/hs83.jl b/src/PureJuMP/hs83.jl index d2ed0426..2add7ce1 100644 --- a/src/PureJuMP/hs83.jl +++ b/src/PureJuMP/hs83.jl @@ -25,17 +25,17 @@ function hs83(args...; kwargs...) a[1:6] = [85.334407, 0.0056858, 0.0006262, 0.0022053, 80.51249, 0.0071317] a[7:12] = [0.0029955, 0.0021813, 9.300961, 0.0047026, 0.0012547, 0.0019085] - @NLconstraint(nlp, 0 ≤ a[1] + a[2] * x[2] * x[5] + a[3] * x[1] * x[4] - a[4] * x[3] * x[5] ≤ 92) - @NLconstraint( + @constraint(nlp, 0 ≤ a[1] + a[2] * x[2] * x[5] + a[3] * x[1] * x[4] - a[4] * x[3] * x[5] ≤ 92) + @constraint( nlp, 0 ≤ a[5] + a[6] * x[2] * x[5] + a[7] * x[1] * x[2] - a[8] * x[3] * x[3] - 90 ≤ 20 ) - @NLconstraint( + @constraint( nlp, 0 ≤ a[9] + a[10] * x[3] * x[5] + a[11] * x[1] * x[3] - a[12] * x[3] * x[4] - 20 ≤ 5 ) - @NLobjective( + @objective( nlp, Min, 5.3578547 * x[3]^2 + 0.8356891 * x[1] * x[5] + 37.293239 * x[1] - 40792.141 diff --git a/src/PureJuMP/hs84.jl b/src/PureJuMP/hs84.jl index 02ce5b89..db368757 100644 --- a/src/PureJuMP/hs84.jl +++ b/src/PureJuMP/hs84.jl @@ -26,7 +26,7 @@ function hs84(args...; kwargs...) a[8:14] = [2931.1506, -40.427932, 5106.192, 15711.36, -155011.1084, 4360.53352, 12.9492344] a[15:21] = [10236.884, 13176.786, -326669.5104, 7390.68412, -27.8986976, 16643.076, 30988.146] - @NLconstraint( + @constraint( nlp, 0 ≤ a[7] * x[1] + @@ -36,7 +36,7 @@ function hs84(args...; kwargs...) a[11] * x[1] * x[5] ≤ 294000 ) - @NLconstraint( + @constraint( nlp, 0 ≤ a[12] * x[1] + @@ -46,7 +46,7 @@ function hs84(args...; kwargs...) a[16] * x[1] * x[5] ≤ 294000 ) - @NLconstraint( + @constraint( nlp, 0 ≤ a[17] * x[1] + @@ -57,7 +57,7 @@ function hs84(args...; kwargs...) 277200 ) - @NLobjective( + @objective( nlp, Min, -a[1] - a[2] * x[1] - a[3] * x[1] * x[2] - a[4] * x[1] * x[3] - a[5] * x[1] * x[4] - diff --git a/src/PureJuMP/hs86.jl b/src/PureJuMP/hs86.jl index 9cb489bc..6f046192 100644 --- a/src/PureJuMP/hs86.jl +++ b/src/PureJuMP/hs86.jl @@ -48,7 +48,7 @@ function hs86(args...; kwargs...) @constraint(nlp, sum(a[i, j] * x[j] for j = 1:5) - b[i] ≥ 0) end - @NLobjective( + @objective( nlp, Min, sum(e[j] * x[j] for j = 1:5) + diff --git a/src/PureJuMP/hs87.jl b/src/PureJuMP/hs87.jl index 8b40242d..ae41bb1c 100644 --- a/src/PureJuMP/hs87.jl +++ b/src/PureJuMP/hs87.jl @@ -25,35 +25,37 @@ function hs87(args...; kwargs...) d = cos(147588 // 100000) e = sin(147588 // 100000) - @NLconstraint(nlp, 300 - x[1] - 1 / a * x[3] * x[4] * cos(b - x[6]) + ci / a * d * x[3] == 0) - @NLconstraint(nlp, -x[2] - 1 / a * x[3] * x[4] * cos(b + x[6]) + ci / a * d * x[4]^2 == 0) - @NLconstraint(nlp, -x[5] - 1 / a * x[3] * x[4] * cos(b + x[6]) + ci / a * e * x[4]^2 == 0) - @NLconstraint(nlp, 200 - 1 / a * x[3] * x[4] * sin(b - x[6]) + ci / a * e * x[3]^2 == 0) + @constraint(nlp, 300 - x[1] - 1 / a * x[3] * x[4] * cos(b - x[6]) + ci / a * d * x[3] == 0) + @constraint(nlp, -x[2] - 1 / a * x[3] * x[4] * cos(b + x[6]) + ci / a * d * x[4]^2 == 0) + @constraint(nlp, -x[5] - 1 / a * x[3] * x[4] * cos(b + x[6]) + ci / a * e * x[4]^2 == 0) + @constraint(nlp, 200 - 1 / a * x[3] * x[4] * sin(b - x[6]) + ci / a * e * x[3]^2 == 0) - function f1(x) - return if 0 <= x <= 300 - 30 * x - elseif 300 <= x <= 400 - 31 * x + function f1(t) + return if 0 <= t <= 300 + 30 * t + elseif 300 <= t <= 400 + 31 * t else eltype(x)(Inf) end end - function f2(x) - return if 0 <= x <= 100 - 28 * x - elseif 100 <= x <= 200 - 29 * x - elseif 200 <= x <= 1000 - 30 * x + function f2(t) + return if 0 <= t <= 100 + 28 * t + elseif 100 <= t <= 200 + 29 * t + elseif 200 <= t <= 1000 + 30 * t else - eltype(x)(Inf) + eltype(t)(Inf) end end - register(nlp, :f1, 1, f1, autodiff = true) - register(nlp, :f2, 1, f2, autodiff = true) - @NLobjective(nlp, Min, f1(x[1]) + f2(x[2])) + @operator(nlp, op_f1, 1, f1) + @expression(nlp, op_f1) + @operator(nlp, op_f2, 1, f2) + @expression(nlp, op_f2) + @objective(nlp, Min, op_f1(x[1]) + op_f2(x[2])) return nlp end diff --git a/src/PureJuMP/hs9.jl b/src/PureJuMP/hs9.jl index f5a4ecd0..af7785e2 100644 --- a/src/PureJuMP/hs9.jl +++ b/src/PureJuMP/hs9.jl @@ -18,7 +18,7 @@ function hs9(args...; kwargs...) nlp = Model() @variable(nlp, x[i = 1:2], start = 0) - @NLobjective(nlp, Min, sin(π * x[1] / 12) * cos(π * x[2] / 16)) + @objective(nlp, Min, sin(π * x[1] / 12) * cos(π * x[2] / 16)) @constraint(nlp, constr1, 4 * x[1] - 3 * x[2] == 0) diff --git a/src/PureJuMP/hs93.jl b/src/PureJuMP/hs93.jl index 92e06883..38c9247b 100644 --- a/src/PureJuMP/hs93.jl +++ b/src/PureJuMP/hs93.jl @@ -19,14 +19,14 @@ function hs93(args...; kwargs...) x0 = [5.54, 4.4, 12.02, 11.82, 0.702, 0.852] @variable(nlp, x[i = 1:6] ≥ 0, start = x0[i]) - @NLconstraint(nlp, 0.001 * prod(x[i] for i = 1:6) - 2.07 ≥ 0) - @NLconstraint( + @constraint(nlp, 0.001 * prod(x[i] for i = 1:6) - 2.07 ≥ 0) + @constraint( nlp, 1 - 0.00062 * x[1] * x[4] * x[5]^2 * (x[1] + x[2] + x[3]) - 0.00058 * x[2] * x[3] * x[6]^2 * (x[1] + 1.57 * x[2] + x[4]) ≥ 0 ) - @NLobjective( + @objective( nlp, Min, 0.0204 * x[1] * x[4] * (x[1] + x[2] + x[3]) + diff --git a/src/PureJuMP/hs95.jl b/src/PureJuMP/hs95.jl index 707687e6..5521c67a 100644 --- a/src/PureJuMP/hs95.jl +++ b/src/PureJuMP/hs95.jl @@ -20,19 +20,19 @@ function hs95(args...; kwargs...) @variable(nlp, 0 ≤ x[i = 1:6] ≤ uvar[i], start = 0) b = [4.97, -1.88, -29.08, -78.02] - @NLconstraint( + @constraint( nlp, 17.1 * x[1] + 38.2 * x[2] + 204.2 * x[3] + 212.3 * x[4] + 623.4 * x[5] + 1495.5 * x[6] - 169 * x[1] * x[3] - 3580 * x[3] * x[5] - 3810 * x[4] * x[5] - 18500 * x[4] * x[6] - 24300 * x[5] * x[6] ≥ b[1] ) - @NLconstraint( + @constraint( nlp, 17.9 * x[1] + 36.8 * x[2] + 113.9 * x[3] + 169.7 * x[4] + 337.8 * x[5] + 1385.2 * x[6] - 139 * x[1] * x[3] - 2450 * x[4] * x[5] - 16600 * x[4] * x[6] - 17200 * x[5] * x[6] ≥ b[2] ) - @NLconstraint(nlp, -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] ≥ b[3]) - @NLconstraint( + @constraint(nlp, -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] ≥ b[3]) + @constraint( nlp, 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] ≥ b[4] ) diff --git a/src/PureJuMP/hs96.jl b/src/PureJuMP/hs96.jl index 54d9246a..a6e9bfbb 100644 --- a/src/PureJuMP/hs96.jl +++ b/src/PureJuMP/hs96.jl @@ -20,19 +20,19 @@ function hs96(args...; kwargs...) @variable(nlp, 0 ≤ x[i = 1:6] ≤ uvar[i], start = 0) b = [4.97, -1.88, -69.08, -118.02] - @NLconstraint( + @constraint( nlp, 17.1 * x[1] + 38.2 * x[2] + 204.2 * x[3] + 212.3 * x[4] + 623.4 * x[5] + 1495.5 * x[6] - 169 * x[1] * x[3] - 3580 * x[3] * x[5] - 3810 * x[4] * x[5] - 18500 * x[4] * x[6] - 24300 * x[5] * x[6] ≥ b[1] ) - @NLconstraint( + @constraint( nlp, 17.9 * x[1] + 36.8 * x[2] + 113.9 * x[3] + 169.7 * x[4] + 337.8 * x[5] + 1385.2 * x[6] - 139 * x[1] * x[3] - 2450 * x[4] * x[5] - 16600 * x[4] * x[6] - 17200 * x[5] * x[6] ≥ b[2] ) - @NLconstraint(nlp, -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] ≥ b[3]) - @NLconstraint( + @constraint(nlp, -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] ≥ b[3]) + @constraint( nlp, 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] ≥ b[4] ) diff --git a/src/PureJuMP/hs97.jl b/src/PureJuMP/hs97.jl index 67e1620c..20b862be 100644 --- a/src/PureJuMP/hs97.jl +++ b/src/PureJuMP/hs97.jl @@ -20,19 +20,19 @@ function hs97(args...; kwargs...) @variable(nlp, 0 ≤ x[i = 1:6] ≤ uvar[i], start = 0) b = [32.97, 25.12, -29.08, -78.02] - @NLconstraint( + @constraint( nlp, 17.1 * x[1] + 38.2 * x[2] + 204.2 * x[3] + 212.3 * x[4] + 623.4 * x[5] + 1495.5 * x[6] - 169 * x[1] * x[3] - 3580 * x[3] * x[5] - 3810 * x[4] * x[5] - 18500 * x[4] * x[6] - 24300 * x[5] * x[6] ≥ b[1] ) - @NLconstraint( + @constraint( nlp, 17.9 * x[1] + 36.8 * x[2] + 113.9 * x[3] + 169.7 * x[4] + 337.8 * x[5] + 1385.2 * x[6] - 139 * x[1] * x[3] - 2450 * x[4] * x[5] - 16600 * x[4] * x[6] - 17200 * x[5] * x[6] ≥ b[2] ) - @NLconstraint(nlp, -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] ≥ b[3]) - @NLconstraint( + @constraint(nlp, -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] ≥ b[3]) + @constraint( nlp, 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] ≥ b[4] ) diff --git a/src/PureJuMP/hs98.jl b/src/PureJuMP/hs98.jl index d9c90d0d..816efaac 100644 --- a/src/PureJuMP/hs98.jl +++ b/src/PureJuMP/hs98.jl @@ -20,19 +20,19 @@ function hs98(args...; kwargs...) @variable(nlp, 0 ≤ x[i = 1:6] ≤ uvar[i], start = 0) b = [32.97, 25.12, -124.08, -173.02] - @NLconstraint( + @constraint( nlp, 17.1 * x[1] + 38.2 * x[2] + 204.2 * x[3] + 212.3 * x[4] + 623.4 * x[5] + 1495.5 * x[6] - 169 * x[1] * x[3] - 3580 * x[3] * x[5] - 3810 * x[4] * x[5] - 18500 * x[4] * x[6] - 24300 * x[5] * x[6] ≥ b[1] ) - @NLconstraint( + @constraint( nlp, 17.9 * x[1] + 36.8 * x[2] + 113.9 * x[3] + 169.7 * x[4] + 337.8 * x[5] + 1385.2 * x[6] - 139 * x[1] * x[3] - 2450 * x[4] * x[5] - 16600 * x[4] * x[6] - 17200 * x[5] * x[6] ≥ b[2] ) - @NLconstraint(nlp, -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] ≥ b[3]) - @NLconstraint( + @constraint(nlp, -273 * x[2] - 70 * x[4] - 819 * x[5] + 26000 * x[4] * x[5] ≥ b[3]) + @constraint( nlp, 159.9 * x[1] - 311 * x[2] + 587 * x[4] + 391 * x[5] + 2198 * x[6] - 14000 * x[1] * x[6] ≥ b[4] ) diff --git a/src/PureJuMP/hs99.jl b/src/PureJuMP/hs99.jl index 5067a44a..ea01f719 100644 --- a/src/PureJuMP/hs99.jl +++ b/src/PureJuMP/hs99.jl @@ -22,7 +22,7 @@ function hs99(args...; kwargs...) @variable(nlp, 0 ≤ x[i = 1:7] ≤ 1.58, start = 0.5) - @NLconstraint( + @constraint( nlp, sum( 0.5 * (t[i] - t[i - 1])^2 * (a[i] * sin(x[i - 1]) - b) + @@ -30,9 +30,9 @@ function hs99(args...; kwargs...) i = 2:8 ) - 1e5 == 0 ) - @NLconstraint(nlp, sum((t[i] - t[i - 1]) * (a[i] * sin(x[i - 1]) - b) for i = 2:8) - 1e3 == 0) + @constraint(nlp, sum((t[i] - t[i - 1]) * (a[i] * sin(x[i - 1]) - b) for i = 2:8) - 1e3 == 0) - @NLobjective(nlp, Min, -(sum(a[i] * (t[i] - t[i - 1]) * cos(x[i - 1]) for i = 2:8))^2) + @objective(nlp, Min, -(sum(a[i] * (t[i] - t[i - 1]) * cos(x[i - 1]) for i = 2:8))^2) return nlp end diff --git a/src/PureJuMP/indef_mod.jl b/src/PureJuMP/indef_mod.jl index 946c0b89..9e08386e 100644 --- a/src/PureJuMP/indef_mod.jl +++ b/src/PureJuMP/indef_mod.jl @@ -22,7 +22,7 @@ function indef_mod(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = i / (n + 1)) - @NLobjective( + @objective( nlp, Min, 100.0 * sum(sin(x[i] / 100.0) for i = 1:n) + diff --git a/src/PureJuMP/integreq.jl b/src/PureJuMP/integreq.jl index 9d8dc893..ffc07d19 100644 --- a/src/PureJuMP/integreq.jl +++ b/src/PureJuMP/integreq.jl @@ -24,7 +24,7 @@ function integreq(args...; n::Int = default_nvar, kwargs...) x0 = [j * h * (j * h - 1) for j = 1:n] set_start_value.(x, x0) - @NLobjective( + @objective( nlp, Min, 1 / 2 * sum( diff --git a/src/PureJuMP/jennrichsampson.jl b/src/PureJuMP/jennrichsampson.jl index 1d21231d..7818251a 100644 --- a/src/PureJuMP/jennrichsampson.jl +++ b/src/PureJuMP/jennrichsampson.jl @@ -11,7 +11,7 @@ function jennrichsampson(args...; n::Int = default_nvar, m::Int = 10, kwargs...) set_start_value(x1, 0.3) @variable(nlp, x2) set_start_value(x2, 0.4) - @NLobjective(nlp, Min, 0.5 * sum((2 + 2 * i - (exp(i * x1) + exp(i * x2)))^2 for i = 1:m)) + @objective(nlp, Min, 0.5 * sum((2 + 2 * i - (exp(i * x1) + exp(i * x2)))^2 for i = 1:m)) return nlp end diff --git a/src/PureJuMP/kirby2.jl b/src/PureJuMP/kirby2.jl index 6aa7329c..5f394ecc 100644 --- a/src/PureJuMP/kirby2.jl +++ b/src/PureJuMP/kirby2.jl @@ -194,7 +194,7 @@ function kirby2(args...; kwargs...) @variable(nlp, x[j = 1:5]) set_start_value.(x, [2, -0.1, 0.003, -0.001, 0.00001]) # other: [1.5, -0.15, 0.0025, -0.0015, 0.00002] - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/kowosb.jl b/src/PureJuMP/kowosb.jl index 36c83e0d..0db8ee6d 100644 --- a/src/PureJuMP/kowosb.jl +++ b/src/PureJuMP/kowosb.jl @@ -16,7 +16,7 @@ function kowosb(args...; n::Int = default_nvar, kwargs...) y = [0.1957, 0.1947, 0.1735, 0.1600, 0.0844, 0.0627, 0.0456, 0.0342, 0.0323, 0.0235, 0.0246] u = [4, 2, 1, 0.5, 0.25, 0.167, 0.125, 0.1, 0.833, 0.0714, 0.0625] - @NLobjective( + @objective( nlp, Min, 0.5 * diff --git a/src/PureJuMP/lanczos1.jl b/src/PureJuMP/lanczos1.jl index 35935a90..98af125a 100644 --- a/src/PureJuMP/lanczos1.jl +++ b/src/PureJuMP/lanczos1.jl @@ -68,7 +68,7 @@ function lanczos1(args...; kwargs...) @variable(nlp, x[j = 1:6]) set_start_value.(x, [1.2, 0.3, 5.6, 5.5, 6.5, 7.6]) # other: [0.5, 0.7, 3.6, 4.2, 4, 6.3] - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/lanczos2.jl b/src/PureJuMP/lanczos2.jl index d7dcd783..0838fa53 100644 --- a/src/PureJuMP/lanczos2.jl +++ b/src/PureJuMP/lanczos2.jl @@ -69,7 +69,7 @@ function lanczos2(args...; kwargs...) @variable(nlp, x[j = 1:6]) set_start_value.(x, [1.2, 0.3, 5.6, 5.5, 6.5, 7.6]) # other: [0.5, 0.7, 3.6, 4.2, 4, 6.3] - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/lanczos3.jl b/src/PureJuMP/lanczos3.jl index 897cb8c4..01cca7b7 100644 --- a/src/PureJuMP/lanczos3.jl +++ b/src/PureJuMP/lanczos3.jl @@ -71,7 +71,7 @@ function lanczos3(args...; kwargs...) @variable(nlp, x[j = 1:6]) set_start_value.(x, [1.2, 0.3, 5.6, 5.5, 6.5, 7.6]) # other: [0.5, 0.7, 3.6, 4.2, 4, 6.3] - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/liarwhd.jl b/src/PureJuMP/liarwhd.jl index d9cc9c16..6e2a1ce0 100644 --- a/src/PureJuMP/liarwhd.jl +++ b/src/PureJuMP/liarwhd.jl @@ -31,7 +31,7 @@ function liarwhd(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 4.0) - @NLobjective(nlp, Min, sum(4.0 * (x[i]^2 - x[1])^2 + (x[i] - 1)^2 for i = 1:n)) + @objective(nlp, Min, sum(4.0 * (x[i]^2 - x[1])^2 + (x[i] - 1)^2 for i = 1:n)) return nlp end diff --git a/src/PureJuMP/lincon.jl b/src/PureJuMP/lincon.jl index 42f2feb5..59035c24 100644 --- a/src/PureJuMP/lincon.jl +++ b/src/PureJuMP/lincon.jl @@ -24,7 +24,7 @@ function lincon(args...; n::Int = default_nvar, kwargs...) @constraint(nlp, sum(B[j, i] * x[2 + i] for i = 1:3) <= c[j]) end @constraint(nlp, -11 <= sum(b[i] * x[7 + i] for i = 1:2) <= 9) - @NLobjective(nlp, Min, sum(i + x[i]^4 for i = 1:15)) + @objective(nlp, Min, sum(i + x[i]^4 for i = 1:15)) return nlp end diff --git a/src/PureJuMP/marine.jl b/src/PureJuMP/marine.jl index e21f5a67..afd07b22 100644 --- a/src/PureJuMP/marine.jl +++ b/src/PureJuMP/marine.jl @@ -178,7 +178,7 @@ function marine(args...; n::Int = default_nvar, nc::Int = 1, kwargs...) @variable(nlp, Duc[1:nh, 1:nc, 1:ne]) set_start_value.(Duc, Duc0) - @NLobjective( + @objective( nlp, Min, sum( @@ -191,18 +191,18 @@ function marine(args...; n::Int = default_nvar, nc::Int = 1, kwargs...) ) for j = 1:nm ) ) - @NLconstraint( + @constraint( nlp, [i = 1:(nh - 1), s = 1:ne], v[i, s] + h * sum(w[i, j, s] / fact[j + 1] for j = 1:nc) - v[i + 1, s] == 0 ) - @NLconstraint(nlp, [i = 1:nh, j = 1:nc], Duc[i, j, 1] + (m[1] + g[1]) * uc[i, j, 1] == 0) - @NLconstraint( + @constraint(nlp, [i = 1:nh, j = 1:nc], Duc[i, j, 1] + (m[1] + g[1]) * uc[i, j, 1] == 0) + @constraint( nlp, [i = 1:nh, j = 1:nc, s = 2:(ne - 1)], Duc[i, j, s] - g[s - 1] * uc[i, j, s - 1] + (m[s] + g[s]) * uc[i, j, s] == 0 ) - @NLconstraint( + @constraint( nlp, [i = 1:nh, j = 1:nc], Duc[i, j, ne] - g[ne - 1] * uc[i, j, ne - 1] + m[ne] * uc[i, j, ne] == 0 diff --git a/src/PureJuMP/meyer3.jl b/src/PureJuMP/meyer3.jl index 8f583044..310e8b19 100644 --- a/src/PureJuMP/meyer3.jl +++ b/src/PureJuMP/meyer3.jl @@ -40,7 +40,7 @@ function meyer3(args...; kwargs...) t = 45 .+ 5 * (1:16) - @NLobjective(nlp, Min, 0.5 * sum((x[1] * exp(x[2] / (t[i] + x[3])) - y[i])^2 for i = 1:16)) + @objective(nlp, Min, 0.5 * sum((x[1] * exp(x[2] / (t[i] + x[3])) - y[i])^2 for i = 1:16)) return nlp end diff --git a/src/PureJuMP/mgh01feas.jl b/src/PureJuMP/mgh01feas.jl index 4737326a..50969c42 100644 --- a/src/PureJuMP/mgh01feas.jl +++ b/src/PureJuMP/mgh01feas.jl @@ -8,7 +8,7 @@ function mgh01feas(args...; n::Int = default_nvar, kwargs...) @objective(nlp, Min, 0) @constraint(nlp, 1 - x1 == 0) - @NLconstraint(nlp, 10 * (x2 - x1^2) == 0) + @constraint(nlp, 10 * (x2 - x1^2) == 0) return nlp end diff --git a/src/PureJuMP/mgh09.jl b/src/PureJuMP/mgh09.jl index 979acaf4..a089c52b 100644 --- a/src/PureJuMP/mgh09.jl +++ b/src/PureJuMP/mgh09.jl @@ -60,7 +60,7 @@ function mgh09(args...; kwargs...) @variable(nlp, x[j = 1:4]) set_start_value.(x, [25, 39, 41.5, 39]) # other: [0.25, 0.39, 0.415, 0.39] - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/mgh10.jl b/src/PureJuMP/mgh10.jl index a4d07627..8c73cab9 100644 --- a/src/PureJuMP/mgh10.jl +++ b/src/PureJuMP/mgh10.jl @@ -66,7 +66,7 @@ function mgh10(args...; kwargs...) set_start_value.(x, [2, 400000, 25000]) # alternative: [0.02, 4000, 250] - @NLobjective(nlp, Min, 0.5 * sum((y[i, 1] - x[1] * exp(x[2] / (x[3] + y[i, 2])))^2 for i = 1:16)) + @objective(nlp, Min, 0.5 * sum((y[i, 1] - x[1] * exp(x[2] / (x[3] + y[i, 2])))^2 for i = 1:16)) return nlp end diff --git a/src/PureJuMP/mgh17.jl b/src/PureJuMP/mgh17.jl index e09766fb..2bc4176f 100644 --- a/src/PureJuMP/mgh17.jl +++ b/src/PureJuMP/mgh17.jl @@ -83,7 +83,7 @@ function mgh17(args...; kwargs...) @variable(nlp, x[j = 1:5]) set_start_value.(x, [50, 150, -100, 1, 2]) # other: [0.5, 1.5, -1, 0.01, 0.02] - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/misra1a.jl b/src/PureJuMP/misra1a.jl index 8d885773..c2d0d82c 100644 --- a/src/PureJuMP/misra1a.jl +++ b/src/PureJuMP/misra1a.jl @@ -58,7 +58,7 @@ function misra1a(args...; kwargs...) @variable(nlp, x[j = 1:2]) set_start_value.(x, [500, 0.0001]) - @NLobjective(nlp, Min, 0.5 * sum((y[i, 1] - x[1] * (1 - exp(-x[2] * y[i, 2])))^2 for i = 1:14)) + @objective(nlp, Min, 0.5 * sum((y[i, 1] - x[1] * (1 - exp(-x[2] * y[i, 2])))^2 for i = 1:14)) return nlp end diff --git a/src/PureJuMP/misra1b.jl b/src/PureJuMP/misra1b.jl index b77763e0..3237cbc9 100644 --- a/src/PureJuMP/misra1b.jl +++ b/src/PureJuMP/misra1b.jl @@ -59,7 +59,7 @@ function misra1b(args...; kwargs...) @variable(nlp, x[j = 1:2]) set_start_value.(x, [500, 0.0001]) # other: [0.0001, 0.0002] - @NLobjective( + @objective( nlp, Min, 0.5 * sum((y[i, 1] - x[1] * (1 - (1 + x[2] * y[i, 2] / 2)^(-2)))^2 for i = 1:14) diff --git a/src/PureJuMP/misra1c.jl b/src/PureJuMP/misra1c.jl index f27a0d5d..1415b4f4 100644 --- a/src/PureJuMP/misra1c.jl +++ b/src/PureJuMP/misra1c.jl @@ -58,7 +58,7 @@ function misra1c(args...; kwargs...) @variable(nlp, x[j = 1:2]) set_start_value.(x, [500, 0.0001]) # other: [600, 0.0002] - @NLobjective( + @objective( nlp, Min, 0.5 * sum((y[i, 1] - x[1] * (1 - (1 + 2 * x[2] * y[i, 2])^(-1 / 2)))^2 for i = 1:14) diff --git a/src/PureJuMP/misra1d.jl b/src/PureJuMP/misra1d.jl index 6f8012aa..044a7ce1 100644 --- a/src/PureJuMP/misra1d.jl +++ b/src/PureJuMP/misra1d.jl @@ -55,7 +55,7 @@ function misra1d(args...; kwargs...) @variable(nlp, x[j = 1:2]) set_start_value.(x, [500, 0.0001]) # other: [450, 0.0003] - @NLobjective( + @objective( nlp, Min, 0.5 * sum((y[i, 1] - x[1] * x[2] * y[i, 2] * (1 + x[2] * y[i, 2])^(-1))^2 for i = 1:14) diff --git a/src/PureJuMP/morebv.jl b/src/PureJuMP/morebv.jl index f28cf238..48e63925 100644 --- a/src/PureJuMP/morebv.jl +++ b/src/PureJuMP/morebv.jl @@ -36,7 +36,7 @@ function morebv(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = x0[i]) # AMPL starts at i*h*(i*h - 1), ref https://github.com/mpf/Optimization-Test-Problems/blob/master/cute/morebv.mod - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/nasty.jl b/src/PureJuMP/nasty.jl index 166654cf..32a54930 100644 --- a/src/PureJuMP/nasty.jl +++ b/src/PureJuMP/nasty.jl @@ -14,7 +14,7 @@ function nasty(args...; kwargs...) @variable(nlp, x[i = 1:2], start = x0[i]) - @NLobjective(nlp, Min, 0.5 * ((1.0e10 * x[1])^2 + x[2]^2)) + @objective(nlp, Min, 0.5 * ((1.0e10 * x[1])^2 + x[2]^2)) return nlp end diff --git a/src/PureJuMP/ncb20.jl b/src/PureJuMP/ncb20.jl index 7c875594..5ee28619 100644 --- a/src/PureJuMP/ncb20.jl +++ b/src/PureJuMP/ncb20.jl @@ -32,7 +32,7 @@ function ncb20(args...; n::Int = default_nvar, kwargs...) nlp = Model() @variable(nlp, x[i = 1:n], start = x0[i]) - @NLobjective( + @objective( nlp, Min, 2.0 + diff --git a/src/PureJuMP/ncb20b.jl b/src/PureJuMP/ncb20b.jl index a0fb948f..fe738fea 100644 --- a/src/PureJuMP/ncb20b.jl +++ b/src/PureJuMP/ncb20b.jl @@ -30,7 +30,7 @@ function ncb20b(args...; n::Int = default_nvar, kwargs...) nlp = Model() @variable(nlp, x[i = 1:n], start = x0[i]) - @NLobjective( + @objective( nlp, Min, sum( diff --git a/src/PureJuMP/nelson.jl b/src/PureJuMP/nelson.jl index 9392c295..8d76b2f5 100644 --- a/src/PureJuMP/nelson.jl +++ b/src/PureJuMP/nelson.jl @@ -177,7 +177,7 @@ function nelson(args...; kwargs...) @variable(nlp, x[j = 1:3]) set_start_value.(x, [2, 0.0001, -0.01]) # other: [2.5, 0.000000005, -0.05] - @NLobjective( + @objective( nlp, Min, 0.5 * sum((log(y[i, 1]) - (x[1] - x[2] * y[i, 2] * exp(-x[3] * y[i, 3])))^2 for i = 1:128) diff --git a/src/PureJuMP/noncvxu2.jl b/src/PureJuMP/noncvxu2.jl index 846a864a..3152b467 100644 --- a/src/PureJuMP/noncvxu2.jl +++ b/src/PureJuMP/noncvxu2.jl @@ -21,7 +21,7 @@ function noncvxu2(args...; n::Int = default_nvar, kwargs...) nlp = Model() @variable(nlp, x[i = 1:n], start = i) - @NLobjective( + @objective( nlp, Min, sum( diff --git a/src/PureJuMP/noncvxun.jl b/src/PureJuMP/noncvxun.jl index 0c995f1e..e713c11f 100644 --- a/src/PureJuMP/noncvxun.jl +++ b/src/PureJuMP/noncvxun.jl @@ -21,7 +21,7 @@ function noncvxun(args...; n::Int = default_nvar, kwargs...) nlp = Model() @variable(nlp, x[i = 1:n], start = i) - @NLobjective( + @objective( nlp, Min, sum( diff --git a/src/PureJuMP/nondia.jl b/src/PureJuMP/nondia.jl index fdb16dc6..b65b29d1 100644 --- a/src/PureJuMP/nondia.jl +++ b/src/PureJuMP/nondia.jl @@ -32,7 +32,7 @@ function nondia(args...; n::Int = default_nvar, kwargs...) nlp = Model() @variable(nlp, x[i = 1:n], start = -1.0) - @NLobjective(nlp, Min, (x[1] - 1.0)^2 + 100 * sum((x[1] - x[i]^2)^2 for i = 2:n)) + @objective(nlp, Min, (x[1] - 1.0)^2 + 100 * sum((x[1] - x[i]^2)^2 for i = 2:n)) return nlp end diff --git a/src/PureJuMP/nondquar.jl b/src/PureJuMP/nondquar.jl index 0dba4811..f48d2571 100644 --- a/src/PureJuMP/nondquar.jl +++ b/src/PureJuMP/nondquar.jl @@ -34,7 +34,7 @@ function nondquar(args...; n::Int = default_nvar, kwargs...) nlp = Model() @variable(nlp, x[i = 1:n], start = x0[i]) - @NLobjective( + @objective( nlp, Min, (x[1] - x[2])^2 + (x[n - 1] - x[n])^2 + sum((x[i] + x[i + 1] + x[n])^4 for i = 1:(n - 2)) diff --git a/src/PureJuMP/osborne1.jl b/src/PureJuMP/osborne1.jl index 3904fd6b..3291b6ea 100644 --- a/src/PureJuMP/osborne1.jl +++ b/src/PureJuMP/osborne1.jl @@ -49,7 +49,7 @@ function osborne1(args...; n::Int = default_nvar, kwargs...) nlp = Model() x0 = [0.5, 1.5, -1.0, 0.01, 0.02] @variable(nlp, x[i = 1:5], start = x0[i]) - @NLobjective( + @objective( nlp, Min, 0.5 * sum((y[j] - (x[1] + x[2] * exp(-j * x[4]) + x[3] * exp(-j * x[5])))^2 for j = 1:m) diff --git a/src/PureJuMP/osborne2.jl b/src/PureJuMP/osborne2.jl index 5600d279..b9b76713 100644 --- a/src/PureJuMP/osborne2.jl +++ b/src/PureJuMP/osborne2.jl @@ -81,7 +81,7 @@ function osborne2(args...; n::Int = default_nvar, kwargs...) nlp = Model() x0 = [1.3, 0.65, 0.65, 0.7, 0.6, 3, 5, 7, 2, 4.5, 5.5] @variable(nlp, x[i = 1:11], start = x0[i]) - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/palmer1c.jl b/src/PureJuMP/palmer1c.jl index 5a2eb238..02860c2e 100644 --- a/src/PureJuMP/palmer1c.jl +++ b/src/PureJuMP/palmer1c.jl @@ -94,7 +94,7 @@ function palmer1c(args...; kwargs...) 92.733676, ] - @NLobjective( + @objective( nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:35) diff --git a/src/PureJuMP/palmer1d.jl b/src/PureJuMP/palmer1d.jl index d48cf5a9..3ec56c13 100644 --- a/src/PureJuMP/palmer1d.jl +++ b/src/PureJuMP/palmer1d.jl @@ -94,7 +94,7 @@ function palmer1d(args...; kwargs...) 92.733676, ] - @NLobjective( + @objective( nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:7))^2 for i = 1:35) diff --git a/src/PureJuMP/palmer2c.jl b/src/PureJuMP/palmer2c.jl index be65745d..2f89b08d 100644 --- a/src/PureJuMP/palmer2c.jl +++ b/src/PureJuMP/palmer2c.jl @@ -70,7 +70,7 @@ function palmer2c(args...; kwargs...) 72.676767, ] - @NLobjective( + @objective( nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:23) diff --git a/src/PureJuMP/palmer3c.jl b/src/PureJuMP/palmer3c.jl index bb624c7d..785fedf4 100644 --- a/src/PureJuMP/palmer3c.jl +++ b/src/PureJuMP/palmer3c.jl @@ -70,7 +70,7 @@ function palmer3c(args...; kwargs...) 64.87939, ] - @NLobjective( + @objective( nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:23) diff --git a/src/PureJuMP/palmer4c.jl b/src/PureJuMP/palmer4c.jl index 6624c8d8..06e44811 100644 --- a/src/PureJuMP/palmer4c.jl +++ b/src/PureJuMP/palmer4c.jl @@ -70,7 +70,7 @@ function palmer4c(args...; kwargs...) 67.27625, ] - @NLobjective( + @objective( nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:23) diff --git a/src/PureJuMP/palmer5c.jl b/src/PureJuMP/palmer5c.jl index 36b62ead..20091d4b 100644 --- a/src/PureJuMP/palmer5c.jl +++ b/src/PureJuMP/palmer5c.jl @@ -62,7 +62,7 @@ function palmer5c(args...; kwargs...) end end - @NLobjective(nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * t[i, 2 * j - 1] for j = 1:6))^2 for i = 1:12)) + @objective(nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * t[i, 2 * j - 1] for j = 1:6))^2 for i = 1:12)) return nlp end diff --git a/src/PureJuMP/palmer5d.jl b/src/PureJuMP/palmer5d.jl index 7d199bed..8cd3896b 100644 --- a/src/PureJuMP/palmer5d.jl +++ b/src/PureJuMP/palmer5d.jl @@ -47,7 +47,7 @@ function palmer5d(args...; kwargs...) 77.719674, ] - @NLobjective( + @objective( nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:4))^2 for i = 1:12) diff --git a/src/PureJuMP/palmer6c.jl b/src/PureJuMP/palmer6c.jl index c4057a43..bc38e0e5 100644 --- a/src/PureJuMP/palmer6c.jl +++ b/src/PureJuMP/palmer6c.jl @@ -50,7 +50,7 @@ function palmer6c(args...; kwargs...) 9.026378, ] - @NLobjective( + @objective( nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:13) diff --git a/src/PureJuMP/palmer7c.jl b/src/PureJuMP/palmer7c.jl index d5e914ca..f8b48aec 100644 --- a/src/PureJuMP/palmer7c.jl +++ b/src/PureJuMP/palmer7c.jl @@ -50,7 +50,7 @@ function palmer7c(args...; kwargs...) 117.630959, ] - @NLobjective( + @objective( nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:13) diff --git a/src/PureJuMP/palmer8c.jl b/src/PureJuMP/palmer8c.jl index 8c739281..ab12100c 100644 --- a/src/PureJuMP/palmer8c.jl +++ b/src/PureJuMP/palmer8c.jl @@ -48,7 +48,7 @@ function palmer8c(args...; kwargs...) 97.874528, ] - @NLobjective( + @objective( nlp, Min, 0.5 * sum((Y[i] - sum(x[j] * X[i]^(2 * j - 2) for j = 1:8))^2 for i = 1:12) diff --git a/src/PureJuMP/penalty1.jl b/src/PureJuMP/penalty1.jl index 4534e9cb..8e705435 100644 --- a/src/PureJuMP/penalty1.jl +++ b/src/PureJuMP/penalty1.jl @@ -23,7 +23,7 @@ function penalty1(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[j = 1:n]) set_start_value.(x, [j for j = 1:n]) - @NLobjective( + @objective( nlp, Min, 1 / 2 * sum((a * (x[i] - 1))^2 for i = 1:n) + 1 / 2 * (sum(x[j]^2 for j = 1:n) - 1 / 4)^2 diff --git a/src/PureJuMP/penalty2.jl b/src/PureJuMP/penalty2.jl index 25bf0746..ea44e7ca 100644 --- a/src/PureJuMP/penalty2.jl +++ b/src/PureJuMP/penalty2.jl @@ -25,7 +25,7 @@ function penalty2(args...; n::Int = default_nvar, kwargs...) nlp = Model() @variable(nlp, x[i = 1:n], start = 1 / 2) - @NLobjective( + @objective( nlp, Min, (x[1] - 0.2)^2 + diff --git a/src/PureJuMP/penalty3.jl b/src/PureJuMP/penalty3.jl index cebd7e9b..8a1328e9 100644 --- a/src/PureJuMP/penalty3.jl +++ b/src/PureJuMP/penalty3.jl @@ -29,7 +29,7 @@ function penalty3(args...; n::Int = default_nvar, kwargs...) nlp = Model() @variable(nlp, x[i = 1:n], start = i / (n + 1)) - @NLobjective( + @objective( nlp, Min, 1.0 + diff --git a/src/PureJuMP/polygon.jl b/src/PureJuMP/polygon.jl index d1cf3153..b1767a91 100644 --- a/src/PureJuMP/polygon.jl +++ b/src/PureJuMP/polygon.jl @@ -28,10 +28,10 @@ function polygon(args...; n::Int = default_nvar, kwargs...) end for i = 1:(N - 1) for j = (i + 1):N - @NLconstraint(nlp, r[i]^2 + r[j]^2 - 2 * r[i] * r[j] * cos(θ[i] - θ[j]) - 1 <= 0) + @constraint(nlp, r[i]^2 + r[j]^2 - 2 * r[i] * r[j] * cos(θ[i] - θ[j]) - 1 <= 0) end end - @NLobjective(nlp, Min, -0.5 * sum(r[i] * r[i + 1] * sin(θ[i + 1] - θ[i]) for i = 1:(N - 1))) + @objective(nlp, Min, -0.5 * sum(r[i] * r[i + 1] * sin(θ[i + 1] - θ[i]) for i = 1:(N - 1))) return nlp end diff --git a/src/PureJuMP/polygon1.jl b/src/PureJuMP/polygon1.jl index 47f30978..38104341 100644 --- a/src/PureJuMP/polygon1.jl +++ b/src/PureJuMP/polygon1.jl @@ -25,7 +25,7 @@ function polygon1(args...; n::Int = default_nvar, kwargs...) @constraint(nlp, θ[i + 1] - θ[i] >= 0.0) end - @NLobjective( + @objective( nlp, Min, -0.5 * sum(r[i] * r[i + 1] * sin(θ[i + 1] - θ[i]) for i = 1:(N - 1)) - diff --git a/src/PureJuMP/polygon2.jl b/src/PureJuMP/polygon2.jl index f43be3ad..75924d51 100644 --- a/src/PureJuMP/polygon2.jl +++ b/src/PureJuMP/polygon2.jl @@ -17,7 +17,7 @@ function polygon2(args...; n::Int = default_nvar, kwargs...) # impose an order to the angles @constraint(nlp, sum(α) == 2π) - @NLobjective( + @objective( nlp, Min, -0.5 * sum(r[i] * r[i + 1] * sin(α[i]) for i = 1:(N - 1)) - 0.5 * r[1] * r[N] * sin(α[N]) diff --git a/src/PureJuMP/polygon3.jl b/src/PureJuMP/polygon3.jl index 44097ada..9188d237 100644 --- a/src/PureJuMP/polygon3.jl +++ b/src/PureJuMP/polygon3.jl @@ -15,16 +15,16 @@ function polygon3(args...; n::Int = default_nvar, kwargs...) @variable(nlp, y[1:N]) for i = 1:N - @NLconstraint(nlp, x[i]^2 + y[i]^2 - 1 <= 0) + @constraint(nlp, x[i]^2 + y[i]^2 - 1 <= 0) end # add ordering constraint to the vertices for i = 1:(N - 1) - @NLconstraint(nlp, x[i] * y[i + 1] - y[i] * x[i + 1] >= 0) + @constraint(nlp, x[i] * y[i + 1] - y[i] * x[i + 1] >= 0) end - @NLconstraint(nlp, x[N] * y[1] - y[N] * x[1] >= 0) + @constraint(nlp, x[N] * y[1] - y[N] * x[1] >= 0) - @NLobjective( + @objective( nlp, Min, -0.5 * sum(x[i] * y[i + 1] - y[i] * x[i + 1] for i = 1:(N - 1)) - diff --git a/src/PureJuMP/powellbs.jl b/src/PureJuMP/powellbs.jl index 3126ae58..9010976b 100644 --- a/src/PureJuMP/powellbs.jl +++ b/src/PureJuMP/powellbs.jl @@ -13,6 +13,6 @@ function powellbs(args...; kwargs...) nlp = Model() @variable(nlp, x1, start = 0) @variable(nlp, x2, start = 1) - @NLobjective(nlp, Min, 1 / 2 * ((10^(4) * x1 * x2 - 1)^2 + (exp(-x1) + exp(-x2) - 1.0001)^2)) + @objective(nlp, Min, 1 / 2 * ((10^(4) * x1 * x2 - 1)^2 + (exp(-x1) + exp(-x2) - 1.0001)^2)) return nlp end diff --git a/src/PureJuMP/powellsg.jl b/src/PureJuMP/powellsg.jl index f3b2b918..bcb9f7de 100644 --- a/src/PureJuMP/powellsg.jl +++ b/src/PureJuMP/powellsg.jl @@ -48,7 +48,7 @@ function powellsg(args...; n::Int = default_nvar, kwargs...) nlp = Model() @variable(nlp, x[i = 1:n], start = x0[i]) - @NLobjective( + @objective( nlp, Min, sum( diff --git a/src/PureJuMP/power.jl b/src/PureJuMP/power.jl index 2b5578a9..f38630ae 100644 --- a/src/PureJuMP/power.jl +++ b/src/PureJuMP/power.jl @@ -17,7 +17,7 @@ function power(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 1.0) - @NLobjective(nlp, Min, 0.5 * (sum((i * x[i]^2) for i = 1:n))^2) + @objective(nlp, Min, 0.5 * (sum((i * x[i]^2) for i = 1:n))^2) return nlp end diff --git a/src/PureJuMP/quartc.jl b/src/PureJuMP/quartc.jl index 013c26a8..be702780 100644 --- a/src/PureJuMP/quartc.jl +++ b/src/PureJuMP/quartc.jl @@ -16,7 +16,7 @@ function quartc(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 2.0) - @NLobjective(nlp, Min, sum((x[i] - i)^4 for i = 1:n)) + @objective(nlp, Min, sum((x[i] - i)^4 for i = 1:n)) return nlp end diff --git a/src/PureJuMP/rat42.jl b/src/PureJuMP/rat42.jl index d9f112be..822a25b3 100644 --- a/src/PureJuMP/rat42.jl +++ b/src/PureJuMP/rat42.jl @@ -54,7 +54,7 @@ function rat42(args...; kwargs...) @variable(nlp, x[j = 1:3]) set_start_value.(x, [100, 1, 0.1]) # other: [75, 2.5, 0.07] - @NLobjective( + @objective( nlp, Min, 0.5 * sum((y[i, 1] - x[1] / (1 + exp(x[2] - x[3] * y[i, 2])))^2 for i = 1:9) diff --git a/src/PureJuMP/rat43.jl b/src/PureJuMP/rat43.jl index d09edfb2..55fe3f9c 100644 --- a/src/PureJuMP/rat43.jl +++ b/src/PureJuMP/rat43.jl @@ -60,7 +60,7 @@ function rat43(args...; kwargs...) @variable(nlp, x[j = 1:4]) set_start_value.(x, [100, 10, 1, 1]) # other: [700, 5, 0.75, 1.3] - @NLobjective( + @objective( nlp, Min, 0.5 * sum((y[i, 1] - x[1] / ((1 + exp(x[2] - x[3] * y[i, 2]))^(1 / x[4])))^2 for i = 1:15) diff --git a/src/PureJuMP/robotarm.jl b/src/PureJuMP/robotarm.jl index 668d67c1..b6252bd9 100644 --- a/src/PureJuMP/robotarm.jl +++ b/src/PureJuMP/robotarm.jl @@ -59,31 +59,31 @@ function robotarm(; n::Int = default_nvar, L = 4.5, kwargs...) @objective(nlp, Min, x[end]) for j = 1:n - @NLconstraint(nlp, -1 <= L * x[6n + j] <= 1) + @constraint(nlp, -1 <= L * x[6n + j] <= 1) end for j = 1:n - @NLconstraint(nlp, -1 <= x[7n + j] * ((L - x[j])^3 + x[j]^3) / 3 * sin(x[2n + j])^2 <= 1) + @constraint(nlp, -1 <= x[7n + j] * ((L - x[j])^3 + x[j]^3) / 3 * sin(x[2n + j])^2 <= 1) end for j = 1:n - @NLconstraint(nlp, -1 <= x[8n + j] * ((L - x[j])^3 + x[j]^3) / 3 <= 1) + @constraint(nlp, -1 <= x[8n + j] * ((L - x[j])^3 + x[j]^3) / 3 <= 1) end for j = 1:(n - 1) - @NLconstraint(nlp, x[j + 1] - x[j] - x[3n + j] * x[end] / n == 0) + @constraint(nlp, x[j + 1] - x[j] - x[3n + j] * x[end] / n == 0) end for j = 1:(n - 1) - @NLconstraint(nlp, x[n + 1 + j] - x[n + j] - x[4n + j] * x[end] / n == 0) + @constraint(nlp, x[n + 1 + j] - x[n + j] - x[4n + j] * x[end] / n == 0) end for j = 1:(n - 1) - @NLconstraint(nlp, x[2n + 1 + j] - x[2n + j] - x[5n + j] * x[end] / n == 0) + @constraint(nlp, x[2n + 1 + j] - x[2n + j] - x[5n + j] * x[end] / n == 0) end for j = 1:(n - 1) - @NLconstraint(nlp, x[3n + 1 + j] - x[3n + j] - x[6n + j] * x[end] / n == 0) + @constraint(nlp, x[3n + 1 + j] - x[3n + j] - x[6n + j] * x[end] / n == 0) end for j = 1:(n - 1) - @NLconstraint(nlp, x[4n + 1 + j] - x[4n + j] - x[7n + j] * x[end] / n == 0) + @constraint(nlp, x[4n + 1 + j] - x[4n + j] - x[7n + j] * x[end] / n == 0) end for j = 1:(n - 1) - @NLconstraint(nlp, x[5n + 1 + j] - x[5n + j] - x[8n + j] * x[end] / n == 0) + @constraint(nlp, x[5n + 1 + j] - x[5n + j] - x[8n + j] * x[end] / n == 0) end return nlp diff --git a/src/PureJuMP/rozman1.jl b/src/PureJuMP/rozman1.jl index c41ac7a3..9531dbb8 100644 --- a/src/PureJuMP/rozman1.jl +++ b/src/PureJuMP/rozman1.jl @@ -70,7 +70,7 @@ function rozman1(args...; kwargs...) @variable(nlp, x[j = 1:4]) set_start_value.(x, [0.1, -0.00001, 1000, -100]) # other: [0.2, -0.000005, 1200, -150] - @NLobjective( + @objective( nlp, Min, 0.5 * diff --git a/src/PureJuMP/sbrybnd.jl b/src/PureJuMP/sbrybnd.jl index dbf4d255..32271f33 100644 --- a/src/PureJuMP/sbrybnd.jl +++ b/src/PureJuMP/sbrybnd.jl @@ -41,7 +41,7 @@ function sbrybnd(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 1.0 / p[i]) - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/schmvett.jl b/src/PureJuMP/schmvett.jl index ce897668..8dce714b 100644 --- a/src/PureJuMP/schmvett.jl +++ b/src/PureJuMP/schmvett.jl @@ -33,7 +33,7 @@ function schmvett(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 3.0) - @NLobjective( + @objective( nlp, Min, sum( diff --git a/src/PureJuMP/scosine.jl b/src/PureJuMP/scosine.jl index 75ec56ee..92855613 100644 --- a/src/PureJuMP/scosine.jl +++ b/src/PureJuMP/scosine.jl @@ -35,7 +35,7 @@ function scosine(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 1.0 / p[i]) - @NLobjective(nlp, Min, sum(cos(p[i]^2 * x[i]^2 - p[i + 1] * x[i + 1] / 2.0) for i = 1:(n - 1))) + @objective(nlp, Min, sum(cos(p[i]^2 * x[i]^2 - p[i + 1] * x[i + 1] / 2.0) for i = 1:(n - 1))) return nlp end diff --git a/src/PureJuMP/shpak1.jl b/src/PureJuMP/shpak1.jl index b74a5e6a..caad9552 100644 --- a/src/PureJuMP/shpak1.jl +++ b/src/PureJuMP/shpak1.jl @@ -15,7 +15,7 @@ function Shpak1(args...; kwargs...) @variable(nlp, x, start = 2.7) - @NLobjective(nlp, Min, sin(x) + sin((10.0 / 3.0) * x)) + @objective(nlp, Min, sin(x) + sin((10.0 / 3.0) * x)) return nlp end diff --git a/src/PureJuMP/shpak2.jl b/src/PureJuMP/shpak2.jl index 266b5b7f..ce7c8033 100644 --- a/src/PureJuMP/shpak2.jl +++ b/src/PureJuMP/shpak2.jl @@ -16,7 +16,7 @@ function Shpak2(args...; kwargs...) @variable(nlp, x, start = 3.1) - @NLobjective(nlp, Min, sin(x) + sin((10.0 / 3.0) * x)) + @objective(nlp, Min, sin(x) + sin((10.0 / 3.0) * x)) return nlp end diff --git a/src/PureJuMP/shpak3.jl b/src/PureJuMP/shpak3.jl index 80846fcd..2cde6649 100644 --- a/src/PureJuMP/shpak3.jl +++ b/src/PureJuMP/shpak3.jl @@ -16,7 +16,7 @@ function Shpak3(args...; kwargs...) @variable(nlp, x, start = -10.0) - @NLobjective( + @objective( nlp, Min, 1 * sin(2 * x + 1) + diff --git a/src/PureJuMP/shpak4.jl b/src/PureJuMP/shpak4.jl index 11dd4696..84498d85 100644 --- a/src/PureJuMP/shpak4.jl +++ b/src/PureJuMP/shpak4.jl @@ -17,7 +17,7 @@ function Shpak4(args...; kwargs...) @variable(nlp, x, start = -1.0) - @NLobjective(nlp, Min, (x + sin(x)) * exp(-(x)^2)) + @objective(nlp, Min, (x + sin(x)) * exp(-(x)^2)) return nlp end diff --git a/src/PureJuMP/shpak5.jl b/src/PureJuMP/shpak5.jl index 850083ed..367c89b0 100644 --- a/src/PureJuMP/shpak5.jl +++ b/src/PureJuMP/shpak5.jl @@ -20,7 +20,7 @@ function Shpak5(args...; kwargs...) @variable(nlp, x, start = 0.0) - @NLobjective(nlp, Min, -sum(1.0 / (veck[i]^2 * (x - veca[i])^2 + vecc[i]) for i = 1:10)) + @objective(nlp, Min, -sum(1.0 / (veck[i]^2 * (x - veca[i])^2 + vecc[i]) for i = 1:10)) return nlp end diff --git a/src/PureJuMP/shpak6.jl b/src/PureJuMP/shpak6.jl index 0e217e27..f2ed7b8d 100644 --- a/src/PureJuMP/shpak6.jl +++ b/src/PureJuMP/shpak6.jl @@ -21,7 +21,7 @@ function Shpak6(args...; kwargs...) @variable(nlp, x, start = 0.0) - @NLobjective(nlp, Min, -sum(1.0 / (veck[i]^2 * (x - veca[i])^2 + vecc[i]) for i = 1:10)) + @objective(nlp, Min, -sum(1.0 / (veck[i]^2 * (x - veca[i])^2 + vecc[i]) for i = 1:10)) return nlp end diff --git a/src/PureJuMP/sinquad.jl b/src/PureJuMP/sinquad.jl index c480162c..cd18df0b 100644 --- a/src/PureJuMP/sinquad.jl +++ b/src/PureJuMP/sinquad.jl @@ -27,7 +27,7 @@ function sinquad(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 0.1) - @NLobjective( + @objective( nlp, Min, (x[1] - 1.0)^4 + diff --git a/src/PureJuMP/sparsine.jl b/src/PureJuMP/sparsine.jl index fd780781..bb884af3 100644 --- a/src/PureJuMP/sparsine.jl +++ b/src/PureJuMP/sparsine.jl @@ -23,7 +23,7 @@ function sparsine(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 0.5) - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/sparsqur.jl b/src/PureJuMP/sparsqur.jl index d5f66cd4..966c805b 100644 --- a/src/PureJuMP/sparsqur.jl +++ b/src/PureJuMP/sparsqur.jl @@ -23,7 +23,7 @@ function sparsqur(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 0.5) - @NLobjective( + @objective( nlp, Min, 1 / 8 * sum( diff --git a/src/PureJuMP/spmsrtls.jl b/src/PureJuMP/spmsrtls.jl index 7616481e..98ae91cc 100644 --- a/src/PureJuMP/spmsrtls.jl +++ b/src/PureJuMP/spmsrtls.jl @@ -28,7 +28,7 @@ function spmsrtls(args...; n::Int = default_nvar, kwargs...) nlp = Model() @variable(nlp, x[i = 1:n], start = x0[i]) - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/srosenbr.jl b/src/PureJuMP/srosenbr.jl index eb96c3da..fc971e61 100644 --- a/src/PureJuMP/srosenbr.jl +++ b/src/PureJuMP/srosenbr.jl @@ -30,7 +30,7 @@ function srosenbr(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = x0[i]) - @NLobjective( + @objective( nlp, Min, sum(100.0 * (x[2 * i] - x[2 * i - 1]^2)^2 + (x[2 * i - 1] - 1.0)^2 for i = 1:div(n, 2)) diff --git a/src/PureJuMP/tetra.jl b/src/PureJuMP/tetra.jl index 3f040579..712437d8 100644 --- a/src/PureJuMP/tetra.jl +++ b/src/PureJuMP/tetra.jl @@ -34,7 +34,7 @@ function tetra( @variable(nlp, lvar[i] <= x[i = 1:n] <= uvar[i], start = x0[i]) - @NLobjective( + @objective( nlp, Min, sum( @@ -62,7 +62,7 @@ function tetra( ) for e = 1:E - @NLconstraint( + @constraint( nlp, sum( (x[TETS[e + E] + N * i] - x[TETS[e] + N * i]) * diff --git a/src/PureJuMP/threepk.jl b/src/PureJuMP/threepk.jl index f9c52846..c27df67d 100644 --- a/src/PureJuMP/threepk.jl +++ b/src/PureJuMP/threepk.jl @@ -143,7 +143,7 @@ function threepk(; n::Int = default_nvar, kwargs...) @variable(nlp, 0.0 <= t3_5 <= Inf, start = 150.0) @variable(nlp, 0.0 <= t4_5 <= Inf, start = 20.0) - @NLobjective( + @objective( nlp, Min, (0.01 * t0_3 - 1.0) * (0.01 * t0_3 - 1.0) + diff --git a/src/PureJuMP/thurber.jl b/src/PureJuMP/thurber.jl index a655e7c5..2f28695c 100644 --- a/src/PureJuMP/thurber.jl +++ b/src/PureJuMP/thurber.jl @@ -83,7 +83,7 @@ function thurber(args...; kwargs...) set_start_value.(x, [1000, 1000, 400, 40, 0.7, 0.3, 0.03]) # other: [1300, 1500, 500, 75, 1, 0.4, 0.05] - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/tointgss.jl b/src/PureJuMP/tointgss.jl index 9cb09b8b..b78fbfef 100644 --- a/src/PureJuMP/tointgss.jl +++ b/src/PureJuMP/tointgss.jl @@ -32,7 +32,7 @@ function tointgss(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 3.0) - @NLobjective( + @objective( nlp, Min, sum( diff --git a/src/PureJuMP/tquartic.jl b/src/PureJuMP/tquartic.jl index c33c26a7..d2acf2dd 100644 --- a/src/PureJuMP/tquartic.jl +++ b/src/PureJuMP/tquartic.jl @@ -27,7 +27,7 @@ function tquartic(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = 0.1) - @NLobjective( + @objective( nlp, Min, 0.5 * (x[1] - 1.0)^2 + 0.5 * sum((x[1]^2 - x[i + 1]^2)^2 for i = 1:(n - 2)) diff --git a/src/PureJuMP/triangle.jl b/src/PureJuMP/triangle.jl index 91cdde57..7356d2fc 100644 --- a/src/PureJuMP/triangle.jl +++ b/src/PureJuMP/triangle.jl @@ -26,7 +26,7 @@ function triangle(x0 = xe, TRIS::Vector{Int64} = Tr, Const::Vector{Int64} = Cons @variable(nlp, lvar[i] <= x[i = 1:n] <= uvar[i], start = x0[i]) - @NLobjective( + @objective( nlp, Min, sum( @@ -46,7 +46,7 @@ function triangle(x0 = xe, TRIS::Vector{Int64} = Tr, Const::Vector{Int64} = Cons ) for e = 1:E - @NLconstraint( + @constraint( nlp, 2 * ( (x[TRIS[e + E]] - x[TRIS[e]]) * (x[TRIS[e + 2 * E] + N] - x[TRIS[e] + N]) - diff --git a/src/PureJuMP/tridia.jl b/src/PureJuMP/tridia.jl index 8aecc508..f8cba1f6 100644 --- a/src/PureJuMP/tridia.jl +++ b/src/PureJuMP/tridia.jl @@ -29,7 +29,7 @@ function tridia( @variable(nlp, x[i = 1:n], start = 1.0) - @NLobjective(nlp, Min, γ * (x[1] * δ - 1.0)^2 + sum(i * (-β * x[i - 1] + α * x[i])^2 for i = 2:n)) + @objective(nlp, Min, γ * (x[1] * δ - 1.0)^2 + sum(i * (-β * x[i - 1] + α * x[i])^2 for i = 2:n)) return nlp end diff --git a/src/PureJuMP/vardim.jl b/src/PureJuMP/vardim.jl index 8d3ca50e..c5dc0a46 100644 --- a/src/PureJuMP/vardim.jl +++ b/src/PureJuMP/vardim.jl @@ -20,7 +20,7 @@ function vardim(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n], start = (1 - i / n)) - @NLobjective( + @objective( nlp, Min, sum((x[i] - 1)^2 for i = 1:n) + diff --git a/src/PureJuMP/vibrbeam.jl b/src/PureJuMP/vibrbeam.jl index c1c86aea..8d813277 100644 --- a/src/PureJuMP/vibrbeam.jl +++ b/src/PureJuMP/vibrbeam.jl @@ -135,7 +135,7 @@ function vibrbeam(args...; n::Int = default_nvar, kwargs...) x0 = [-3.5; 1; 0; 0; 1.7; 0; 0; 0] @variable(nlp, x[j = 1:8], start = x0[j]) - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/watson.jl b/src/PureJuMP/watson.jl index 155ca0d0..f1f6028c 100644 --- a/src/PureJuMP/watson.jl +++ b/src/PureJuMP/watson.jl @@ -24,7 +24,7 @@ function watson(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[j = 1:n], start = 0.0) - @NLobjective( + @objective( nlp, Min, 0.5 * sum( diff --git a/src/PureJuMP/woods.jl b/src/PureJuMP/woods.jl index eabcafac..8127b51a 100644 --- a/src/PureJuMP/woods.jl +++ b/src/PureJuMP/woods.jl @@ -50,7 +50,7 @@ function woods(args...; n::Int = default_nvar, kwargs...) @variable(nlp, x[i = 1:n]) set_start_value.(x, x0) - @NLobjective( + @objective( nlp, Min, sum( diff --git a/src/PureJuMP/zangwil3.jl b/src/PureJuMP/zangwil3.jl index e77f1692..7f04cdc8 100644 --- a/src/PureJuMP/zangwil3.jl +++ b/src/PureJuMP/zangwil3.jl @@ -20,7 +20,7 @@ function zangwil3(args...; kwargs...) @variable(nlp, x[i = 1:3], start = x0[i]) - @NLobjective(nlp, Min, 0) + @objective(nlp, Min, 0) @constraint(nlp, constr1, x[1] - x[2] + x[3] == 0) diff --git a/test/runtests.jl b/test/runtests.jl index 51a861e2..a57bd3d2 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -14,7 +14,6 @@ include("test_utils.jl") @test ndef == OptimizationProblems.PureJuMP.default_nvar @test ndef == OptimizationProblems.ADNLPProblems.default_nvar -# Fix: :hs247 @testset "problem: $prob" for prob in list_problems pb = string(prob) diff --git a/test/test_utils.jl b/test/test_utils.jl index 62d384ed..08dab5e0 100644 --- a/test/test_utils.jl +++ b/test/test_utils.jl @@ -98,26 +98,28 @@ function test_compatibility( x1 = nlp_ad.meta.x0 x2 = nlp_ad.meta.x0 .+ 0.01 n0 = max(abs(obj(nlp_ad, nlp_ad.meta.x0)), 1) - if !(prob in ["brownal"]) # precision issue + obj_tol = 1e-10 + if !(prob in [:triangle_pacman, :triangle_deer]) # precision issue if isnan(n0) @test isnan(obj(nlp_jump, x1)) else - @test isapprox(obj(nlp_ad, x1), obj(nlp_jump, x1), atol = 1e-14 * n0) + @test isapprox(obj(nlp_ad, x1), obj(nlp_jump, x1), atol = obj_tol * n0) end n0 = max(abs(obj(nlp_ad, x2)), 1) if isnan(n0) @test isnan(obj(nlp_jump, x2)) else - @test isapprox(obj(nlp_ad, x2), obj(nlp_jump, x2), atol = 1e-14 * n0) + @test isapprox(obj(nlp_ad, x2), obj(nlp_jump, x2), atol = obj_tol * n0) end end grad(nlp_ad, x1) # just test that it runs if nlp_ad.meta.ncon > 0 - @test nlp_ad.meta.lcon == nlp_jump.meta.lcon - @test nlp_ad.meta.ucon == nlp_jump.meta.ucon - @test all(isapprox.(cons(nlp_ad, x1), cons(nlp_jump, x1), atol = 1e-10 * n0)) - @test all(isapprox.(cons(nlp_ad, x2), cons(nlp_jump, x2), atol = 1e-10 * n0)) + cons_tol = 1e-10 + @test nlp_ad.meta.lcon ≈ nlp_jump.meta.lcon + @test nlp_ad.meta.ucon ≈ nlp_jump.meta.ucon + @test all(isapprox.(cons(nlp_ad, x1), cons(nlp_jump, x1), atol = cons_tol * n0)) + @test all(isapprox.(cons(nlp_ad, x2), cons(nlp_jump, x2), atol = cons_tol * n0)) @test nlp_jump.meta.lin == nlp_ad.meta.lin end