Example #1
0
n_scenarios = 5
np.random.seed(seed=985739465)
bs = [b + np.random.normal(scale=2.0, size=1) for i in range(n_scenarios)]

for i in range(n_scenarios):
    instance = create_basic_dense_qp(G, A, bs[i], c)

    nlp = PyomoNLP(instance)
    models.append(instance)
    scenario_name = "s{}".format(i)
    scenarios[scenario_name] = nlp
    coupling_vars[scenario_name] = [nlp.variable_idx(instance.x[0])]

nlp = TwoStageStochasticNLP(scenarios, coupling_vars)

x = nlp.x_init()
y = nlp.y_init()

jac_c = nlp.jacobian_c(x)
plt.spy(jac_c)
plt.title('Jacobian of the constraints\n')
plt.show()

hess_lag = nlp.hessian_lag(x, y)
plt.spy(hess_lag.tocoo())
plt.title('Hessian of the Lagrangian function\n')
plt.show()

kkt = BlockSymMatrix(2)
kkt[0, 0] = hess_lag
kkt[1, 0] = jac_c
Example #2
0
                                  model.x3 - 1 == 0)
    model.cost = aml.Objective(expr=model.x1**2 + model.x2**2 + model.x3**2)
    model.consteta1 = aml.Constraint(expr=model.eta1 == model.nominal_eta1)
    model.consteta2 = aml.Constraint(expr=model.eta2 == model.nominal_eta2)

    return model


#################################################################
m = create_model(4.5, 1.0)
opt = aml.SolverFactory('ipopt')
results = opt.solve(m, tee=True)

#################################################################
nlp = PyomoNLP(m)
x = nlp.x_init()
y = compute_init_lam(nlp, x=x)

J = nlp.jacobian_g(x)
H = nlp.hessian_lag(x, y)

M = BlockSymMatrix(2)
M[0, 0] = H
M[1, 0] = J

Np = BlockMatrix(2, 1)
Np[0, 0] = nlp.hessian_lag(x, y, subset_variables_col=[m.eta1, m.eta2])
Np[1, 0] = nlp.jacobian_g(x, subset_variables=[m.eta1, m.eta2])

ds = spsolve(M.tocsc(), Np.tocsc())
print(nlp.variable_order())
Example #3
0
for i in range(n_scenarios):
    instance = create_basic_dense_qp(G,
                                     A,
                                     bs[i],
                                     c)

    nlp = PyomoNLP(instance)
    models.append(instance)
    scenario_name = "s{}".format(i)
    scenarios[scenario_name] = nlp
    coupling_vars[scenario_name] = [nlp.variable_idx(instance.x[0])]

nlp = TwoStageStochasticNLP(scenarios, coupling_vars)

x = nlp.x_init()
y = nlp.y_init()

jac_c = nlp.jacobian_c(x)
plt.spy(jac_c)
plt.title('Jacobian of the constraints\n')
plt.show()

hess_lag = nlp.hessian_lag(x, y)
plt.spy(hess_lag.tocoo())
plt.title('Hessian of the Lagrangian function\n')
plt.show()

kkt = BlockSymMatrix(2)
kkt[0, 0] = hess_lag
kkt[1, 0] = jac_c
Example #4
0
    m.x[3].setlb(0.0)
    m.x[2].setub(100.0)
    m.obj = aml.Objective(expr=m.x[2]**2)
    return m


model = create_basic_model()
solver = aml.SolverFactory('ipopt')
solver.solve(model, tee=True)

# build nlp initialized at the solution
nlp = PyomoNLP(model)

# get initial point
print(nlp.variable_order())
x0 = nlp.x_init()

# vectors of finite lower and upper bounds
xl = nlp.xl(condensed=True)
xu = nlp.xu(condensed=True)

# build expansion matrices
Pxl = nlp.expansion_matrix_xl()
Pxu = nlp.expansion_matrix_xu()

# lower and upper bounds residual
res_xl = Pxl.transpose() * x0 - xl
res_xu = xu - Pxu.transpose() * x0
print("Residuals lower bounds x-xl:", res_xl)
print("Residuals upper bounds xu-x:", res_xu)
Example #5
0
    m.x[2].setlb(0.0)
    m.x[3].setlb(0.0)
    m.x[2].setub(100.0)
    m.obj = aml.Objective(expr=m.x[2]**2)
    return m

model = create_basic_model()
solver = aml.SolverFactory('ipopt')
solver.solve(model, tee=True)

# build nlp initialized at the solution
nlp = PyomoNLP(model)

# get initial point
print(nlp.variable_order())
x0 = nlp.x_init()

# vectors of finite lower and upper bounds
xl = nlp.xl(condensed=True)
xu = nlp.xu(condensed=True)

# build expansion matrices
Pxl = nlp.expansion_matrix_xl()
Pxu = nlp.expansion_matrix_xu()

# lower and upper bounds residual
res_xl = Pxl.transpose() * x0 - xl
res_xu = xu - Pxu.transpose() * x0
print("Residuals lower bounds x-xl:", res_xl)
print("Residuals upper bounds xu-x:", res_xu)