JuMP优化中的UndefVarError



我正试图使用JuMP在Julia中编写一个优化问题。目标函数有两个矩阵乘法
首先将大小为(10(的w的向量乘以大小为(20,10(的arr_C的矩阵。因此,应该将w转换为大小(1,10(以执行矩阵乘法
第二个,将大小为(20(的w_each_sim的向量乘以第一次乘法的结果,这也是大小为(20%(的向量。因此,乘法应该像(1x20(x(20x1(一样来实现标量。请阅读到问题的最后一行,因为我根据建议应用了更新。我的第一次尝试如下:

julia> using JuMP, Ipopt
julia> a = rand(20, 10);
julia> b = rand(20); b = b./sum(b)
julia> function port_opt(
n_assets::Int8,
arr_C::Matrix{Float64},
w_each_sim::Vector{Float64})
"""
Calculate weight of each asset through optimization
Parameters
----------
n_assets::Int8 - number of assets
arr_C::Matrix{Float64} - array of C
w_each_sim::Vector{Float64} - weights of each similar TW
Returns
-------
w_opt::Vector{Float64} - weights of each asset
"""
model = Model(Ipopt.Optimizer)
@variable(model, 0<= w[1:n_assets] <=1)
@NLconstraint(model, sum([w[i] for i in 1:n_assets]) == 1)
@NLobjective(model, Max, w_each_sim * log10.([w[i]*arr_C[i] for i in 1:n_assets]))
optimize!(model)
@show value.(w)
return value.(w)
end

julia> port_opt(Int8(10), a, b)
ERROR: UndefVarError: i not defined
Stacktrace:
[1] macro expansion
@ C:UsersJL.juliapackagesJuMPZ1pVnsrcmacros.jl:1834 [inlined]
[2] port_opt(n_assets::Int8, arr_C::Matrix{Float64}, w_each_sim::Vector{Float64})
@ Main e:MyWorkpaperbase.jl:237
[3] top-level scope
@ REPL[4]:1

问题出在@NLconstraint线路上。我如何才能使此代码正常工作并完成优化?

脂肪测试

正如@Shayan所建议的,我将目标函数纠正如下:

function Obj(w, arr_C, w_each_sim)
first_expr = w'*arr_C'
second_expr = map(first_expr) do x
log10(x)
end
return w_each_sim * second_expr
end
function port_opt(
n_assets::Int8,
arr_C::Matrix{Float64},
w_each_sim::Vector{Float64})

....
....
@NLconstraint(model, sum(w[i] for i in 1:n_assets) == 1)
@NLobjective(model, Max, Obj(w, arr_C, w_each_sim))
optimize!(model)
@show value.(w)
return value.(w)
end
a, b = rand(20, 10), rand(20); b = b./sum(b);
port_opt(Int8(10), a, b)
# Threw this:
ERROR: Unexpected array VariableRef[w[1], w[2], w[3], w[4], w[5], w[6], w[7], w[8], w[9], w[10]] in nonlinear expression. Nonlinear expressions may contain only scalar expressions.

现在,根据@PrzemyslawSzufel的建议,我尝试了这个:

function Obj(w, arr_C::Matrix{T}, w_each_sim::Vector{T}) where {T<:Real}
first_expr = zeros(T, length(w_each_sim))
for i∈size(w_each_sim, 1), j∈eachindex(w)
first_expr[i] += w[j]*arr_C[i, j]
end
second_expr = map(first_expr) do x
log(x)
end
res = 0
for idx∈eachindex(w_each_sim)
res += w_each_sim[idx]*second_expr[idx]
end
return res
end
function port_opt(
n_assets::Int8,
arr_C::Matrix{Float64},
w_each_sim::Vector{Float64})
model = Model()
@variable(model, 0<= w[1:n_assets] <=1)
@NLconstraint(model, +(w...) == 1)
register(model, :Obj, Int64(n_assets), Obj, autodiff=true)
@NLobjective(model, Max, Obj(w, arr_C, w_each_sim))
optimize!(model)
@show value.(w)
return value.(w)
end
a, b = rand(20, 10), rand(20); b = b./sum(b);
port_opt(Int8(10), a, b)
# threw this
ERROR: Unable to register the function :Obj because it does not support differentiation via ForwardDiff.
Common reasons for this include:
* the function assumes `Float64` will be passed as input, it must work for any
generic `Real` type.
* the function allocates temporary storage using `zeros(3)` or similar. This
defaults to `Float64`, so use `zeros(T, 3)` instead.

这是JuMP社区论坛上的提问:https://discourse.julialang.org/t/optimization-using-jump/89720

交叉发布我的解决方案:

using JuMP, Ipopt
a = rand(20, 10)
b = rand(20); b = b./sum(b)
"""
Calculate weight of each asset through optimization
Parameters
----------
n_assets::Int8 - number of assets
arr_C::Matrix{Float64} - array of C
w_each_sim::Vector{Float64} - weights of each similar TW
Returns
-------
w_opt::Vector{Float64} - weights of each asset
"""
function port_opt(
n_assets::Int8,
arr_C::Matrix{Float64},
w_each_sim::Vector{Float64},
)
model = Model(Ipopt.Optimizer)
@variable(model, 0<= w[1:n_assets] <=1)
@NLconstraint(model, sum(w[i] for i in 1:n_assets) == 1)
@NLobjective(
model,
Max, 
sum(
w_each_sim[i] * log10(sum(w[j] * arr_C[i, j] for j in 1:size(arr_C, 2)))
for i in 1:length(w_each_sim)
)
)
optimize!(model)
return value.(w)
end
port_opt(Int8(10), a, b)

您需要将约束重新表述为:

@constraint(model, sum([w[i] for i in 1:n_assets]) == 1)

@NLconstraint(model, +(w...) == 1)

关于评论中指出的目标,向量乘法有问题。你想要的是最有可能的(这很有效,模型也得到了解决(:

@NLobjective(model, Max,sum(w_each_sim[i]*log(w[i]*arr_C[i]) for i in 1:n_assets))

编辑:

关于同一问题:https://discourse.julialang.org/t/optimization-using-jump/89720

最新更新