예제 #1
0
    def test_composite_nlp(self):

        G = np.array([[6, 2, 1], [2, 5, 2], [1, 2, 4]])
        A = np.array([[1, 0, 1], [0, 1, 1]])
        b = np.array([3, 0])
        c = np.array([-8, -3, -3])

        scenarios = dict()
        coupling_vars = dict()
        n_scenarios = 2
        np.random.seed(seed=985739465)
        bs = [b, b + 0.001]

        for i in range(n_scenarios):
            instance = create_model3(G, A, bs[i], c)
            nlp = PyomoNLP(instance)
            scenario_name = "s{}".format(i)
            scenarios[scenario_name] = nlp
            coupling_vars[scenario_name] = [nlp.variable_idx(instance.x[0])]

        nlp = TwoStageStochasticNLP(scenarios, coupling_vars)

        solver = CyIpoptSolver(nlp)
        x, info = solver.solve(tee=False)
        x_sol = np.array([
            2.00003846, -0.99996154, 0.99996154, 2.00003846, -0.99996154,
            1.00096154, 2.00003846
        ])

        self.assertTrue(np.allclose(x, x_sol, rtol=1e-4))
        self.assertAlmostEqual(nlp.objective(x), -6.99899, 3)
예제 #2
0
 def test_model2(self):
     model = create_model2()
     nlp = PyomoNLP(model)
     solver = CyIpoptSolver(nlp)
     x, info = solver.solve(tee=False)
     x_sol = np.array([3.0, 1.99997807])
     y_sol = np.array([0.00017543])
     self.assertTrue(np.allclose(x, x_sol, rtol=1e-4))
     self.assertAlmostEqual(nlp.objective(x), -31.000000057167462, 3)
     self.assertTrue(np.allclose(info['mult_g'], y_sol, rtol=1e-4))
예제 #3
0
 def test_model1(self):
     model = create_model1()
     nlp = PyomoNLP(model)
     solver = CyIpoptSolver(nlp)
     x, info = solver.solve(tee=False)
     x_sol = np.array([3.85958688, 4.67936007, 3.10358931])
     y_sol = np.array([-1.0, 53.90357665])
     self.assertTrue(np.allclose(x, x_sol, rtol=1e-4))
     self.assertAlmostEqual(nlp.objective(x), -428.6362455416348)
     self.assertTrue(np.allclose(info['mult_g'], y_sol, rtol=1e-4))
예제 #4
0
    def test_model3(self):
        G = np.array([[6, 2, 1], [2, 5, 2], [1, 2, 4]])
        A = np.array([[1, 0, 1], [0, 1, 1]])
        b = np.array([3, 0])
        c = np.array([-8, -3, -3])

        model = create_model3(G, A, b, c)
        nlp = PyomoNLP(model)
        solver = CyIpoptSolver(nlp)
        x, info = solver.solve(tee=False)
        x_sol = np.array([2.0, -1.0, 1.0])
        y_sol = np.array([-3., 2.])
        self.assertTrue(np.allclose(x, x_sol, rtol=1e-4))
        self.assertAlmostEqual(nlp.objective(x), -3.5, 3)
        self.assertTrue(np.allclose(info['mult_g'], y_sol, rtol=1e-4))
예제 #5
0
G = np.array([[6, 2, 1], [2, 5, 2], [1, 2, 4]])
A = np.array([[1, 0, 1], [0, 1, 1]])
b = np.array([3, 0])
c = np.array([-8, -3, -3])

models = []
scenarios = dict()
coupling_vars = dict()
n_scenarios = 5
np.random.seed(seed=985739465)
bs = [b + np.random.normal(scale=2.0, size=1) for i in range(n_scenarios)]

for i in range(n_scenarios):
    instance = create_basic_dense_qp(G, A, bs[i], c)

    nlp = PyomoNLP(instance)
    models.append(instance)
    scenario_name = "s{}".format(i)
    scenarios[scenario_name] = nlp
    coupling_vars[scenario_name] = [nlp.variable_idx(instance.x[0])]

nlp = TwoStageStochasticNLP(scenarios, coupling_vars)

x = nlp.x_init()
y = nlp.y_init()

jac_c = nlp.jacobian_c(x)
plt.spy(jac_c)
plt.title('Jacobian of the constraints\n')
plt.show()
예제 #6
0
파일: derivatives.py 프로젝트: CanLi1/pyomo
    m.init_condition_names = ['init_conditions']
    return m


instance = create_problem(0.0, 10.0)
# Discretize model using Orthogonal Collocation
discretizer = aml.TransformationFactory('dae.collocation')
discretizer.apply_to(instance, nfe=100, ncp=3, scheme='LAGRANGE-RADAU')
discretizer.reduce_collocation_points(instance,
                                      var=instance.u,
                                      ncp=1,
                                      contset=instance.t)

# Interface pyomo model with nlp
nlp = PyomoNLP(instance)
x = nlp.create_vector_x()
lam = nlp.create_vector_y()

# Evaluate jacobian
jac_c = nlp.jacobian_g(x)
plt.spy(jac_c)
plt.title('Jacobian of the constraints\n')
plt.show()

# Evaluate hessian of the lagrangian
hess_lag = nlp.hessian_lag(x, lam)
plt.spy(hess_lag)
plt.title('Hessian of the Lagrangian function\n')
plt.show()
예제 #7
0
파일: sensitivity.py 프로젝트: CanLi1/pyomo
    model.const2 = aml.Constraint(expr=model.eta2 * model.x1 + model.x2 -
                                  model.x3 - 1 == 0)
    model.cost = aml.Objective(expr=model.x1**2 + model.x2**2 + model.x3**2)
    model.consteta1 = aml.Constraint(expr=model.eta1 == model.nominal_eta1)
    model.consteta2 = aml.Constraint(expr=model.eta2 == model.nominal_eta2)

    return model


#################################################################
m = create_model(4.5, 1.0)
opt = aml.SolverFactory('ipopt')
results = opt.solve(m, tee=True)

#################################################################
nlp = PyomoNLP(m)
x = nlp.x_init()
y = compute_init_lam(nlp, x=x)

J = nlp.jacobian_g(x)
H = nlp.hessian_lag(x, y)

M = BlockSymMatrix(2)
M[0, 0] = H
M[1, 0] = J

Np = BlockMatrix(2, 1)
Np[0, 0] = nlp.hessian_lag(x, y, subset_variables_col=[m.eta1, m.eta2])
Np[1, 0] = nlp.jacobian_g(x, subset_variables=[m.eta1, m.eta2])

ds = spsolve(M.tocsc(), Np.tocsc())
예제 #8
0
파일: derivatives.py 프로젝트: Pyomo/pyomo
    m.integral = dae.Integral(m.t, wrt=m.t, rule=_int_rule)

    m.obj = aml.Objective(expr=m.integral)

    m.init_condition_names = ['init_conditions']
    return m

instance = create_problem(0.0, 10.0)
# Discretize model using Orthogonal Collocation
discretizer = aml.TransformationFactory('dae.collocation')
discretizer.apply_to(instance, nfe=100, ncp=3, scheme='LAGRANGE-RADAU')
discretizer.reduce_collocation_points(instance, var=instance.u, ncp=1, contset=instance.t)

# Interface pyomo model with nlp
nlp = PyomoNLP(instance)
x = nlp.create_vector_x()
lam = nlp.create_vector_y()

# Evaluate jacobian
jac_c = nlp.jacobian_g(x)
plt.spy(jac_c)
plt.title('Jacobian of the constraints\n')
plt.show()

# Evaluate hessian of the lagrangian
hess_lag = nlp.hessian_lag(x, lam)
plt.spy(hess_lag)
plt.title('Hessian of the Lagrangian function\n')
plt.show()
예제 #9
0
파일: feasibility.py 프로젝트: CanLi1/pyomo
    m.d1 = aml.Constraint(expr=m.x[1] + m.x[2] <= 100.0)
    m.d2 = aml.Constraint(expr=m.x[2] + m.x[3] >= -100.0)
    m.d3 = aml.Constraint(expr=m.x[2] + m.x[3] + m.x[1] >= -500.0)
    m.x[2].setlb(0.0)
    m.x[3].setlb(0.0)
    m.x[2].setub(100.0)
    m.obj = aml.Objective(expr=m.x[2]**2)
    return m


model = create_basic_model()
solver = aml.SolverFactory('ipopt')
solver.solve(model, tee=True)

# build nlp initialized at the solution
nlp = PyomoNLP(model)

# get initial point
print(nlp.variable_order())
x0 = nlp.x_init()

# vectors of finite lower and upper bounds
xl = nlp.xl(condensed=True)
xu = nlp.xu(condensed=True)

# build expansion matrices
Pxl = nlp.expansion_matrix_xl()
Pxu = nlp.expansion_matrix_xu()

# lower and upper bounds residual
res_xl = Pxl.transpose() * x0 - xl
예제 #10
0
c = np.array([-8, -3, -3])

models = []
scenarios = dict()
coupling_vars = dict()
n_scenarios = 5
np.random.seed(seed=985739465)
bs = [b+np.random.normal(scale=2.0, size=1) for i in range(n_scenarios)]

for i in range(n_scenarios):
    instance = create_basic_dense_qp(G,
                                     A,
                                     bs[i],
                                     c)

    nlp = PyomoNLP(instance)
    models.append(instance)
    scenario_name = "s{}".format(i)
    scenarios[scenario_name] = nlp
    coupling_vars[scenario_name] = [nlp.variable_idx(instance.x[0])]

nlp = TwoStageStochasticNLP(scenarios, coupling_vars)

x = nlp.x_init()
y = nlp.y_init()

jac_c = nlp.jacobian_c(x)
plt.spy(jac_c)
plt.title('Jacobian of the constraints\n')
plt.show()
예제 #11
0
파일: feasibility.py 프로젝트: Pyomo/pyomo
    m.c2 = aml.Constraint(expr=m.x[1] - m.x[3] - 0.5 == 0)
    m.d1 = aml.Constraint(expr=m.x[1] + m.x[2] <= 100.0)
    m.d2 = aml.Constraint(expr=m.x[2] + m.x[3] >= -100.0)
    m.d3 = aml.Constraint(expr=m.x[2] + m.x[3] + m.x[1] >= -500.0)
    m.x[2].setlb(0.0)
    m.x[3].setlb(0.0)
    m.x[2].setub(100.0)
    m.obj = aml.Objective(expr=m.x[2]**2)
    return m

model = create_basic_model()
solver = aml.SolverFactory('ipopt')
solver.solve(model, tee=True)

# build nlp initialized at the solution
nlp = PyomoNLP(model)

# get initial point
print(nlp.variable_order())
x0 = nlp.x_init()

# vectors of finite lower and upper bounds
xl = nlp.xl(condensed=True)
xu = nlp.xu(condensed=True)

# build expansion matrices
Pxl = nlp.expansion_matrix_xl()
Pxu = nlp.expansion_matrix_xu()

# lower and upper bounds residual
res_xl = Pxl.transpose() * x0 - xl