コード例 #1
0
    def test_compare_evaluations(self):
        A1 = 5
        A2 = 10
        c1 = 3
        c2 = 4
        N = 6
        dt = 1

        m = create_pyomo_model(A1, A2, c1, c2, N, dt)
        solver = pyo.SolverFactory('ipopt')
        solver.options['linear_solver'] = 'mumps'
        status = solver.solve(m, tee=False)
        m_nlp = PyomoNLP(m)

        mex = create_pyomo_external_grey_box_model(A1, A2, c1, c2, N, dt)
        # mex_nlp = PyomoGreyBoxNLP(mex)
        mex_nlp = PyomoNLPWithGreyBoxBlocks(mex)

        # get the variable and constraint order and create the maps
        # reliable order independent comparisons
        m_x_order = m_nlp.primals_names()
        m_c_order = m_nlp.constraint_names()
        mex_x_order = mex_nlp.primals_names()
        mex_c_order = mex_nlp.constraint_names()

        x1list = [
            'h1[0]', 'h1[1]', 'h1[2]', 'h1[3]', 'h1[4]', 'h1[5]', 'h2[0]',
            'h2[1]', 'h2[2]', 'h2[3]', 'h2[4]', 'h2[5]', 'F1[1]', 'F1[2]',
            'F1[3]', 'F1[4]', 'F1[5]', 'F2[1]', 'F2[2]', 'F2[3]', 'F2[4]',
            'F2[5]', 'F12[0]', 'F12[1]', 'F12[2]', 'F12[3]', 'F12[4]',
            'F12[5]', 'Fo[0]', 'Fo[1]', 'Fo[2]', 'Fo[3]', 'Fo[4]', 'Fo[5]'
        ]
        x2list = [
            'egb.inputs[h1_0]', 'egb.inputs[h1_1]', 'egb.inputs[h1_2]',
            'egb.inputs[h1_3]', 'egb.inputs[h1_4]', 'egb.inputs[h1_5]',
            'egb.inputs[h2_0]', 'egb.inputs[h2_1]', 'egb.inputs[h2_2]',
            'egb.inputs[h2_3]', 'egb.inputs[h2_4]', 'egb.inputs[h2_5]',
            'egb.inputs[F1_1]', 'egb.inputs[F1_2]', 'egb.inputs[F1_3]',
            'egb.inputs[F1_4]', 'egb.inputs[F1_5]', 'egb.inputs[F2_1]',
            'egb.inputs[F2_2]', 'egb.inputs[F2_3]', 'egb.inputs[F2_4]',
            'egb.inputs[F2_5]', 'egb.outputs[F12_0]', 'egb.outputs[F12_1]',
            'egb.outputs[F12_2]', 'egb.outputs[F12_3]', 'egb.outputs[F12_4]',
            'egb.outputs[F12_5]', 'egb.outputs[Fo_0]', 'egb.outputs[Fo_1]',
            'egb.outputs[Fo_2]', 'egb.outputs[Fo_3]', 'egb.outputs[Fo_4]',
            'egb.outputs[Fo_5]'
        ]
        x1_x2_map = dict(zip(x1list, x2list))
        x1idx_x2idx_map = {
            i: mex_x_order.index(x1_x2_map[m_x_order[i]])
            for i in range(len(m_x_order))
        }

        c1list = [
            'h1bal[1]', 'h1bal[2]', 'h1bal[3]', 'h1bal[4]', 'h1bal[5]',
            'h2bal[1]', 'h2bal[2]', 'h2bal[3]', 'h2bal[4]', 'h2bal[5]',
            'F12con[0]', 'F12con[1]', 'F12con[2]', 'F12con[3]', 'F12con[4]',
            'F12con[5]', 'Focon[0]', 'Focon[1]', 'Focon[2]', 'Focon[3]',
            'Focon[4]', 'Focon[5]', 'min_inflow[1]', 'min_inflow[2]',
            'min_inflow[3]', 'min_inflow[4]', 'min_inflow[5]',
            'max_outflow[0]', 'max_outflow[1]', 'max_outflow[2]',
            'max_outflow[3]', 'max_outflow[4]', 'max_outflow[5]', 'h10', 'h20'
        ]
        c2list = [
            'egb.h1bal_1', 'egb.h1bal_2', 'egb.h1bal_3', 'egb.h1bal_4',
            'egb.h1bal_5', 'egb.h2bal_1', 'egb.h2bal_2', 'egb.h2bal_3',
            'egb.h2bal_4', 'egb.h2bal_5', 'egb.output_constraints[F12_0]',
            'egb.output_constraints[F12_1]', 'egb.output_constraints[F12_2]',
            'egb.output_constraints[F12_3]', 'egb.output_constraints[F12_4]',
            'egb.output_constraints[F12_5]', 'egb.output_constraints[Fo_0]',
            'egb.output_constraints[Fo_1]', 'egb.output_constraints[Fo_2]',
            'egb.output_constraints[Fo_3]', 'egb.output_constraints[Fo_4]',
            'egb.output_constraints[Fo_5]', 'min_inflow[1]', 'min_inflow[2]',
            'min_inflow[3]', 'min_inflow[4]', 'min_inflow[5]',
            'max_outflow[0]', 'max_outflow[1]', 'max_outflow[2]',
            'max_outflow[3]', 'max_outflow[4]', 'max_outflow[5]', 'h10', 'h20'
        ]
        c1_c2_map = dict(zip(c1list, c2list))
        c1idx_c2idx_map = {
            i: mex_c_order.index(c1_c2_map[m_c_order[i]])
            for i in range(len(m_c_order))
        }

        # get the primals from m and put them in the correct order for mex
        m_x = m_nlp.get_primals()
        mex_x = np.zeros(len(m_x))
        for i in range(len(m_x)):
            mex_x[x1idx_x2idx_map[i]] = m_x[i]

        # get the duals from m and put them in the correct order for mex
        m_lam = m_nlp.get_duals()
        mex_lam = np.zeros(len(m_lam))
        for i in range(len(m_x)):
            mex_lam[c1idx_c2idx_map[i]] = m_lam[i]

        mex_nlp.set_primals(mex_x)
        mex_nlp.set_duals(mex_lam)

        m_obj = m_nlp.evaluate_objective()
        mex_obj = mex_nlp.evaluate_objective()
        self.assertAlmostEqual(m_obj, mex_obj, places=4)

        m_gobj = m_nlp.evaluate_grad_objective()
        mex_gobj = mex_nlp.evaluate_grad_objective()
        check_vectors_specific_order(self, m_gobj, m_x_order, mex_gobj,
                                     mex_x_order, x1_x2_map)

        m_c = m_nlp.evaluate_constraints()
        mex_c = mex_nlp.evaluate_constraints()
        check_vectors_specific_order(self, m_c, m_c_order, mex_c, mex_c_order,
                                     c1_c2_map)

        m_j = m_nlp.evaluate_jacobian()
        mex_j = mex_nlp.evaluate_jacobian().todense()
        check_sparse_matrix_specific_order(self, m_j, m_c_order, m_x_order,
                                           mex_j, mex_c_order, mex_x_order,
                                           c1_c2_map, x1_x2_map)

        m_h = m_nlp.evaluate_hessian_lag()
        mex_h = mex_nlp.evaluate_hessian_lag()
        check_sparse_matrix_specific_order(self, m_h, m_x_order, m_x_order,
                                           mex_h, mex_x_order, mex_x_order,
                                           x1_x2_map, x1_x2_map)

        mex_h = 0 * mex_h
        mex_nlp.evaluate_hessian_lag(out=mex_h)
        check_sparse_matrix_specific_order(self, m_h, m_x_order, m_x_order,
                                           mex_h, mex_x_order, mex_x_order,
                                           x1_x2_map, x1_x2_map)
コード例 #2
0
ファイル: feasibility.py プロジェクト: CanLi1/pyomo-1
Cx_xl = build_compression_matrix(xlb_mask)
Cx_xu = build_compression_matrix(xub_mask)

# lower and upper bounds residual
res_xl = Cx_xl * x0 - compressed_xl
res_xu = compressed_xu - Cx_xu * x0
print("Residuals lower bounds x-xl:", res_xl)
print("Residuals upper bounds xu-x:", res_xu)

# set the value of the primals (we can skip the duals)
# here we set them to the initial values, but we could
# set them to anything
nlp.set_primals(x0)

# evaluate residual of equality constraints
print(nlp.constraint_names())
res_eq = nlp.evaluate_eq_constraints()
print("Residuals of equality constraints:", res_eq)

# evaluate residual of inequality constraints
res_ineq = nlp.evaluate_ineq_constraints()

# demonstrate the use of compression from full set of
# lower and upper bounds on the inequality constraints
# to only the finite values using masks
ineqlb_mask = build_bounds_mask(nlp.ineq_lb())
inequb_mask = build_bounds_mask(nlp.ineq_ub())
# get the compressed vector
compressed_ineq_lb = full_to_compressed(nlp.ineq_lb(), ineqlb_mask)
compressed_ineq_ub = full_to_compressed(nlp.ineq_ub(), inequb_mask)
# we can also build compression matrices
コード例 #3
0
    def test_indices_methods(self):
        nlp = PyomoNLP(self.pm)

        # get_pyomo_variables
        variables = nlp.get_pyomo_variables()
        expected_ids = [id(self.pm.x[i]) for i in range(1, 10)]
        ids = [id(variables[i]) for i in range(9)]
        self.assertTrue(expected_ids == ids)

        variable_names = nlp.variable_names()
        expected_names = [self.pm.x[i].getname() for i in range(1, 10)]
        self.assertTrue(variable_names == expected_names)

        # get_pyomo_constraints
        constraints = nlp.get_pyomo_constraints()
        expected_ids = [id(self.pm.c[i]) for i in range(1, 10)]
        ids = [id(constraints[i]) for i in range(9)]
        self.assertTrue(expected_ids == ids)

        constraint_names = nlp.constraint_names()
        expected_names = [c.getname() for c in nlp.get_pyomo_constraints()]
        self.assertTrue(constraint_names == expected_names)

        # get_pyomo_equality_constraints
        eq_constraints = nlp.get_pyomo_equality_constraints()
        # 2 and 6 are the equality constraints
        eq_indices = [2, 6]  # "indices" here is a bit overloaded
        expected_eq_ids = [id(self.pm.c[i]) for i in eq_indices]
        eq_ids = [id(con) for con in eq_constraints]
        self.assertEqual(eq_ids, expected_eq_ids)

        eq_constraint_names = nlp.equality_constraint_names()
        expected_eq_names = [
            c.getname(fully_qualified=True)
            for c in nlp.get_pyomo_equality_constraints()
        ]
        self.assertEqual(eq_constraint_names, expected_eq_names)

        # get_pyomo_inequality_constraints
        ineq_constraints = nlp.get_pyomo_inequality_constraints()
        # 1, 3, 4, 5, 7, 8, and 9 are the inequality constraints
        ineq_indices = [1, 3, 4, 5, 7, 8, 9]
        expected_ineq_ids = [id(self.pm.c[i]) for i in ineq_indices]
        ineq_ids = [id(con) for con in ineq_constraints]
        self.assertEqual(eq_ids, expected_eq_ids)

        # get_primal_indices
        expected_primal_indices = [i for i in range(9)]
        self.assertTrue(
            expected_primal_indices == nlp.get_primal_indices([self.pm.x]))
        expected_primal_indices = [0, 3, 8, 4]
        variables = [self.pm.x[1], self.pm.x[4], self.pm.x[9], self.pm.x[5]]
        self.assertTrue(
            expected_primal_indices == nlp.get_primal_indices(variables))

        # get_constraint_indices
        expected_constraint_indices = [i for i in range(9)]
        self.assertTrue(expected_constraint_indices ==
                        nlp.get_constraint_indices([self.pm.c]))
        expected_constraint_indices = [0, 3, 8, 4]
        constraints = [self.pm.c[1], self.pm.c[4], self.pm.c[9], self.pm.c[5]]
        self.assertTrue(expected_constraint_indices ==
                        nlp.get_constraint_indices(constraints))

        # get_equality_constraint_indices
        pyomo_eq_indices = [2, 6]
        with self.assertRaises(KeyError):
            # At least one data object in container is not an equality
            nlp.get_equality_constraint_indices([self.pm.c])
        eq_constraints = [self.pm.c[i] for i in pyomo_eq_indices]
        expected_eq_indices = [0, 1]
        # ^indices in the list of equality constraints
        eq_constraint_indices = nlp.get_equality_constraint_indices(
            eq_constraints)
        self.assertEqual(expected_eq_indices, eq_constraint_indices)

        # get_inequality_constraint_indices
        pyomo_ineq_indices = [1, 3, 4, 5, 7, 9]
        with self.assertRaises(KeyError):
            # At least one data object in container is not an equality
            nlp.get_inequality_constraint_indices([self.pm.c])
        ineq_constraints = [self.pm.c[i] for i in pyomo_ineq_indices]
        expected_ineq_indices = [0, 1, 2, 3, 4, 6]
        # ^indices in the list of equality constraints; didn't include 8
        ineq_constraint_indices = nlp.get_inequality_constraint_indices(
            ineq_constraints)
        self.assertEqual(expected_ineq_indices, ineq_constraint_indices)

        # extract_subvector_grad_objective
        expected_gradient = np.asarray(
            [2 * sum((i + 1) * (j + 1) for j in range(9)) for i in range(9)],
            dtype=np.float64)
        grad_obj = nlp.extract_subvector_grad_objective([self.pm.x])
        self.assertTrue(np.array_equal(expected_gradient, grad_obj))

        expected_gradient = np.asarray([
            2 * sum((i + 1) * (j + 1) for j in range(9)) for i in [0, 3, 8, 4]
        ],
                                       dtype=np.float64)
        variables = [self.pm.x[1], self.pm.x[4], self.pm.x[9], self.pm.x[5]]
        grad_obj = nlp.extract_subvector_grad_objective(variables)
        self.assertTrue(np.array_equal(expected_gradient, grad_obj))

        # extract_subvector_constraints
        expected_con = np.asarray(
            [45, 88, 3 * 45, 4 * 45, 5 * 45, 276, 7 * 45, 8 * 45, 9 * 45],
            dtype=np.float64)
        con = nlp.extract_subvector_constraints([self.pm.c])
        self.assertTrue(np.array_equal(expected_con, con))

        expected_con = np.asarray([45, 4 * 45, 9 * 45, 5 * 45],
                                  dtype=np.float64)
        constraints = [self.pm.c[1], self.pm.c[4], self.pm.c[9], self.pm.c[5]]
        con = nlp.extract_subvector_constraints(constraints)
        self.assertTrue(np.array_equal(expected_con, con))

        # extract_submatrix_jacobian
        expected_jac = [[(i) * (j) for j in range(1, 10)]
                        for i in range(1, 10)]
        expected_jac = np.asarray(expected_jac, dtype=np.float64)
        jac = nlp.extract_submatrix_jacobian(pyomo_variables=[self.pm.x],
                                             pyomo_constraints=[self.pm.c])
        dense_jac = jac.todense()
        self.assertTrue(np.array_equal(dense_jac, expected_jac))

        expected_jac = [[(i) * (j) for j in [1, 4, 9, 5]] for i in [2, 6, 4]]
        expected_jac = np.asarray(expected_jac, dtype=np.float64)
        variables = [self.pm.x[1], self.pm.x[4], self.pm.x[9], self.pm.x[5]]
        constraints = [self.pm.c[2], self.pm.c[6], self.pm.c[4]]
        jac = nlp.extract_submatrix_jacobian(pyomo_variables=variables,
                                             pyomo_constraints=constraints)
        dense_jac = jac.todense()
        self.assertTrue(np.array_equal(dense_jac, expected_jac))

        # extract_submatrix_hessian_lag
        expected_hess = [[2.0 * i * j for j in range(1, 10)]
                         for i in range(1, 10)]
        expected_hess = np.asarray(expected_hess, dtype=np.float64)
        hess = nlp.extract_submatrix_hessian_lag(
            pyomo_variables_rows=[self.pm.x], pyomo_variables_cols=[self.pm.x])
        dense_hess = hess.todense()
        self.assertTrue(np.array_equal(dense_hess, expected_hess))

        expected_hess = [[2.0 * i * j for j in [1, 4, 9, 5]]
                         for i in [1, 4, 9, 5]]
        expected_hess = np.asarray(expected_hess, dtype=np.float64)
        variables = [self.pm.x[1], self.pm.x[4], self.pm.x[9], self.pm.x[5]]
        hess = nlp.extract_submatrix_hessian_lag(
            pyomo_variables_rows=variables, pyomo_variables_cols=variables)
        dense_hess = hess.todense()
        self.assertTrue(np.array_equal(dense_hess, expected_hess))
コード例 #4
0
    def test_indices_methods(self):
        nlp = PyomoNLP(self.pm)

        # get_pyomo_variables
        variables = nlp.get_pyomo_variables()
        expected_ids = [id(self.pm.x[i]) for i in range(1, 10)]
        ids = [id(variables[i]) for i in range(9)]
        self.assertTrue(expected_ids == ids)

        variable_names = nlp.variable_names()
        expected_names = [self.pm.x[i].getname() for i in range(1, 10)]
        self.assertTrue(variable_names == expected_names)

        # get_pyomo_constraints
        constraints = nlp.get_pyomo_constraints()
        expected_ids = [id(self.pm.c[i]) for i in range(1, 10)]
        ids = [id(constraints[i]) for i in range(9)]
        self.assertTrue(expected_ids == ids)

        constraint_names = nlp.constraint_names()
        expected_names = [c.getname() for c in nlp.get_pyomo_constraints()]
        self.assertTrue(constraint_names == expected_names)

        # get_primal_indices
        expected_primal_indices = [i for i in range(9)]
        self.assertTrue(
            expected_primal_indices == nlp.get_primal_indices([self.pm.x]))
        expected_primal_indices = [0, 3, 8, 4]
        variables = [self.pm.x[1], self.pm.x[4], self.pm.x[9], self.pm.x[5]]
        self.assertTrue(
            expected_primal_indices == nlp.get_primal_indices(variables))

        # get_constraint_indices
        expected_constraint_indices = [i for i in range(9)]
        self.assertTrue(expected_constraint_indices ==
                        nlp.get_constraint_indices([self.pm.c]))
        expected_constraint_indices = [0, 3, 8, 4]
        constraints = [self.pm.c[1], self.pm.c[4], self.pm.c[9], self.pm.c[5]]
        self.assertTrue(expected_constraint_indices ==
                        nlp.get_constraint_indices(constraints))

        # extract_subvector_grad_objective
        expected_gradient = np.asarray(
            [2 * sum((i + 1) * (j + 1) for j in range(9)) for i in range(9)],
            dtype=np.float64)
        grad_obj = nlp.extract_subvector_grad_objective([self.pm.x])
        self.assertTrue(np.array_equal(expected_gradient, grad_obj))

        expected_gradient = np.asarray([
            2 * sum((i + 1) * (j + 1) for j in range(9)) for i in [0, 3, 8, 4]
        ],
                                       dtype=np.float64)
        variables = [self.pm.x[1], self.pm.x[4], self.pm.x[9], self.pm.x[5]]
        grad_obj = nlp.extract_subvector_grad_objective(variables)
        self.assertTrue(np.array_equal(expected_gradient, grad_obj))

        # extract_subvector_constraints
        expected_con = np.asarray(
            [45, 88, 3 * 45, 4 * 45, 5 * 45, 276, 7 * 45, 8 * 45, 9 * 45],
            dtype=np.float64)
        con = nlp.extract_subvector_constraints([self.pm.c])
        self.assertTrue(np.array_equal(expected_con, con))

        expected_con = np.asarray([45, 4 * 45, 9 * 45, 5 * 45],
                                  dtype=np.float64)
        constraints = [self.pm.c[1], self.pm.c[4], self.pm.c[9], self.pm.c[5]]
        con = nlp.extract_subvector_constraints(constraints)
        self.assertTrue(np.array_equal(expected_con, con))

        # extract_submatrix_jacobian
        expected_jac = [[(i) * (j) for j in range(1, 10)]
                        for i in range(1, 10)]
        expected_jac = np.asarray(expected_jac, dtype=np.float64)
        jac = nlp.extract_submatrix_jacobian(pyomo_variables=[self.pm.x],
                                             pyomo_constraints=[self.pm.c])
        dense_jac = jac.todense()
        self.assertTrue(np.array_equal(dense_jac, expected_jac))

        expected_jac = [[(i) * (j) for j in [1, 4, 9, 5]] for i in [2, 6, 4]]
        expected_jac = np.asarray(expected_jac, dtype=np.float64)
        variables = [self.pm.x[1], self.pm.x[4], self.pm.x[9], self.pm.x[5]]
        constraints = [self.pm.c[2], self.pm.c[6], self.pm.c[4]]
        jac = nlp.extract_submatrix_jacobian(pyomo_variables=variables,
                                             pyomo_constraints=constraints)
        dense_jac = jac.todense()
        self.assertTrue(np.array_equal(dense_jac, expected_jac))

        # extract_submatrix_hessian_lag
        expected_hess = [[2.0 * i * j for j in range(1, 10)]
                         for i in range(1, 10)]
        expected_hess = np.asarray(expected_hess, dtype=np.float64)
        hess = nlp.extract_submatrix_hessian_lag(
            pyomo_variables_rows=[self.pm.x], pyomo_variables_cols=[self.pm.x])
        dense_hess = hess.todense()
        self.assertTrue(np.array_equal(dense_hess, expected_hess))

        expected_hess = [[2.0 * i * j for j in [1, 4, 9, 5]]
                         for i in [1, 4, 9, 5]]
        expected_hess = np.asarray(expected_hess, dtype=np.float64)
        variables = [self.pm.x[1], self.pm.x[4], self.pm.x[9], self.pm.x[5]]
        hess = nlp.extract_submatrix_hessian_lag(
            pyomo_variables_rows=variables, pyomo_variables_cols=variables)
        dense_hess = hess.todense()
        self.assertTrue(np.array_equal(dense_hess, expected_hess))
コード例 #5
0
def main():
    model = create_basic_model()
    solver = pyo.SolverFactory('ipopt')
    solver.solve(model, tee=True)

    # build nlp initialized at the solution
    nlp = PyomoNLP(model)

    # get initial point
    print(nlp.primals_names())
    x0 = nlp.get_primals()

    # vectors of lower and upper bounds
    xl = nlp.primals_lb()
    xu = nlp.primals_ub()

    # demonstrate use of compression from full set of bounds
    # to only finite bounds using masks
    xlb_mask = build_bounds_mask(xl)
    xub_mask = build_bounds_mask(xu)
    # get the compressed vector
    compressed_xl = full_to_compressed(xl, xlb_mask)
    compressed_xu = full_to_compressed(xu, xub_mask)
    # we can also build compression matrices
    Cx_xl = build_compression_matrix(xlb_mask)
    Cx_xu = build_compression_matrix(xub_mask)

    # lower and upper bounds residual
    res_xl = Cx_xl * x0 - compressed_xl
    res_xu = compressed_xu - Cx_xu * x0
    print("Residuals lower bounds x-xl:", res_xl)
    print("Residuals upper bounds xu-x:", res_xu)

    # set the value of the primals (we can skip the duals)
    # here we set them to the initial values, but we could
    # set them to anything
    nlp.set_primals(x0)

    # evaluate residual of equality constraints
    print(nlp.constraint_names())
    res_eq = nlp.evaluate_eq_constraints()
    print("Residuals of equality constraints:", res_eq)

    # evaluate residual of inequality constraints
    res_ineq = nlp.evaluate_ineq_constraints()

    # demonstrate the use of compression from full set of
    # lower and upper bounds on the inequality constraints
    # to only the finite values using masks
    ineqlb_mask = build_bounds_mask(nlp.ineq_lb())
    inequb_mask = build_bounds_mask(nlp.ineq_ub())
    # get the compressed vector
    compressed_ineq_lb = full_to_compressed(nlp.ineq_lb(), ineqlb_mask)
    compressed_ineq_ub = full_to_compressed(nlp.ineq_ub(), inequb_mask)
    # we can also build compression matrices
    Cineq_ineqlb = build_compression_matrix(ineqlb_mask)
    Cineq_inequb = build_compression_matrix(inequb_mask)

    # lower and upper inequalities residual
    res_ineq_lb = Cineq_ineqlb * res_ineq - compressed_ineq_lb
    res_ineq_ub = compressed_ineq_ub - Cineq_inequb * res_ineq
    print("Residuals of inequality constraints lower bounds:", res_ineq_lb)
    print("Residuals of inequality constraints upper bounds:", res_ineq_ub)

    feasible = False
    if np.all(res_xl >= 0) and np.all(res_xu >= 0) \
        and np.all(res_ineq_lb >= 0) and np.all(res_ineq_ub >= 0) and \
        np.allclose(res_eq, np.zeros(nlp.n_eq_constraints()), atol=1e-5):
        feasible = True

    print("Is x0 feasible:", feasible)

    return feasible
コード例 #6
0
M[0, 0] = H
M[1, 0] = J

sens_vars = [m.eta1, m.eta2]
nsens  = len(sens_vars)
nr = M.shape[0]

#Np = BlockMatrix(2, 1)
#Np[0, 0] = nlp.extract_submatrix_hessian_lag(pyomo_variables_rows=nlp.get_pyomo_variables(), pyomo_variables_cols=[m.eta1, m.eta2])
#Np[1, 0] = nlp.extract_submatrix_jacobian(pyomo_variables=[m.eta1, m.eta2], pyomo_constraints=nlp.get_pyomo_constraints())

Np = np.zeros((nr, nsens))
Np[(nr - nsens):nr,:] = np.eye(nsens)

sens_cons = ['consteta1', 'consteta2']
clist = nlp.constraint_names()
nc = len(clist)
Np = np.zeros((nr, nsens))
for i, cons in enumerate(sens_cons):
    Np[nr - nc + clist.index(cons), i] = 1

ds = spsolve(M.tocsc(), Np)
print(nlp.variable_names())

#################################################################

p0 = np.array([pyo.value(m.nominal_eta1), pyo.value(m.nominal_eta2)])
p = np.array([4.45, 1.05])
dp = p - p0
dx = ds.dot(dp)[0:nlp.n_primals()]
new_x = x + dx