diff --git a/src/NonLinearProgram/nlp_utilities.jl b/src/NonLinearProgram/nlp_utilities.jl index edcbe6e8..12e639fa 100644 --- a/src/NonLinearProgram/nlp_utilities.jl +++ b/src/NonLinearProgram/nlp_utilities.jl @@ -489,9 +489,16 @@ function _compute_sensitivity(model::Model; tol = 1e-6) # Dual bounds upper ∂s[((num_w+num_cons+num_lower+1):end), :] *= -_sense_multiplier - # dual wrt parameter + grad = _compute_gradient(model) + # `grad` = [∇ₓf(x,p); ∇ₚf(x,p)] where `x` is the primal vars, `p` is the params, + # and `f(x,p)` is the objective function. we extract the components + # so we can form `∇ₚfᵒ(x,p) = ∇ₓf(x,p) * ∇ₚxᵒ(p) + ∇ₚf(x,p) * ∇ₚpᵒ(p)` + # where `ᵒ` denotes "optimal". note that parameters are fixed, so + # pᵒ(p) = p and ∇ₚpᵒ(p) = 𝐈ₚ. primal_idx = [i.value for i in model.cache.primal_vars] - df_dx = _compute_gradient(model)[primal_idx] - df_dp = df_dx'∂s[1:num_vars, :] + params_idx = [i.value for i in model.cache.params] + df_dx = grad[primal_idx] # ∇ₓf(x,p) + df_dp_direct = grad[params_idx] # ∇ₚf(x,p) + df_dp = df_dx'∂s[1:num_vars, :] + df_dp_direct' # ∇ₚfᵒ(x,p) = ∇ₓf(x,p) * ∇ₚxᵒ(p) + ∇ₚf(x,p) * 𝐈ₚ return ∂s, df_dp end diff --git a/test/nlp_program.jl b/test/nlp_program.jl index c266c715..53fd9c12 100644 --- a/test/nlp_program.jl +++ b/test/nlp_program.jl @@ -740,6 +740,40 @@ function test_ObjectiveSensitivity_model2() @test isapprox(dp, -1.5; atol = 1e-4) end +function test_ObjectiveSensitivity_direct_param_contrib() + model = DiffOpt.nonlinear_diff_model(Ipopt.Optimizer) + set_silent(model) + + p_val = 3.0 + @variable(model, p ∈ MOI.Parameter(p_val)) + @variable(model, x ≥ 1) + @objective(model, Min, p^2 * x^2) + + optimize!(model) + @assert is_solved_and_feasible(model) + + Δp = 0.1 + DiffOpt.set_forward_parameter(model, p, Δp) + DiffOpt.forward_differentiate!(model) + + df_dp = MOI.get(model, DiffOpt.ForwardObjectiveSensitivity()) + @test isapprox(df_dp, 2 * p_val * Δp, atol = 1e-8) # ≈ 0.6 for p=3 + + ε = 1e-6 + df_dp_fdpos = begin + set_parameter_value(p, p_val + ε) + optimize!(model) + Δp * objective_value(model) + end + df_dp_fdneg = begin + set_parameter_value(p, p_val - ε) + optimize!(model) + Δp * objective_value(model) + end + df_dp_fd = (df_dp_fdpos - df_dp_fdneg) / (2ε) + + @test isapprox(df_dp, df_dp_fd, atol = 1e-4) +end function test_ObjectiveSensitivity_subset_parameters() # Model with 10 parameters, differentiate only w.r.t. 3rd and 7th model = Model(() -> DiffOpt.diff_optimizer(Ipopt.Optimizer))