Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 10 additions & 3 deletions src/NonLinearProgram/nlp_utilities.jl
Original file line number Diff line number Diff line change
Expand Up @@ -489,9 +489,16 @@ function _compute_sensitivity(model::Model; tol = 1e-6)
# Dual bounds upper
∂s[((num_w+num_cons+num_lower+1):end), :] *= -_sense_multiplier

# dual wrt parameter
grad = _compute_gradient(model)
# `grad` = [∇ₓf(x,p); ∇ₚf(x,p)] where `x` is the primal vars, `p` is the params,
# and `f(x,p)` is the objective function. we extract the components
# so we can form `∇ₚfᵒ(x,p) = ∇ₓf(x,p) * ∇ₚxᵒ(p) + ∇ₚf(x,p) * ∇ₚpᵒ(p)`
# where `ᵒ` denotes "optimal". note that parameters are fixed, so
# pᵒ(p) = p and ∇ₚpᵒ(p) = 𝐈ₚ.
primal_idx = [i.value for i in model.cache.primal_vars]
df_dx = _compute_gradient(model)[primal_idx]
df_dp = df_dx'∂s[1:num_vars, :]
params_idx = [i.value for i in model.cache.params]
df_dx = grad[primal_idx] # ∇ₓf(x,p)
df_dp_direct = grad[params_idx] # ∇ₚf(x,p)
df_dp = df_dx'∂s[1:num_vars, :] + df_dp_direct' # ∇ₚfᵒ(x,p) = ∇ₓf(x,p) * ∇ₚxᵒ(p) + ∇ₚf(x,p) * 𝐈ₚ
return ∂s, df_dp
end
34 changes: 34 additions & 0 deletions test/nlp_program.jl
Original file line number Diff line number Diff line change
Expand Up @@ -740,6 +740,40 @@ function test_ObjectiveSensitivity_model2()
@test isapprox(dp, -1.5; atol = 1e-4)
end

function test_ObjectiveSensitivity_direct_param_contrib()
model = DiffOpt.nonlinear_diff_model(Ipopt.Optimizer)
set_silent(model)

p_val = 3.0
@variable(model, p ∈ MOI.Parameter(p_val))
@variable(model, x ≥ 1)
@objective(model, Min, p^2 * x^2)

optimize!(model)
@assert is_solved_and_feasible(model)

Δp = 0.1
DiffOpt.set_forward_parameter(model, p, Δp)
DiffOpt.forward_differentiate!(model)

df_dp = MOI.get(model, DiffOpt.ForwardObjectiveSensitivity())
@test isapprox(df_dp, 2 * p_val * Δp, atol = 1e-8) # ≈ 0.6 for p=3

ε = 1e-6
df_dp_fdpos = begin
set_parameter_value(p, p_val + ε)
optimize!(model)
Δp * objective_value(model)
end
df_dp_fdneg = begin
set_parameter_value(p, p_val - ε)
optimize!(model)
Δp * objective_value(model)
end
df_dp_fd = (df_dp_fdpos - df_dp_fdneg) / (2ε)

@test isapprox(df_dp, df_dp_fd, atol = 1e-4)
end
function test_ObjectiveSensitivity_subset_parameters()
# Model with 10 parameters, differentiate only w.r.t. 3rd and 7th
model = Model(() -> DiffOpt.diff_optimizer(Ipopt.Optimizer))
Expand Down
Loading