Example #1
0
    def setUpClass(cls):

        # test problem 1
        G = np.array([[6, 2, 1], [2, 5, 2], [1, 2, 4]])
        A = np.array([[1, 0, 1], [0, 1, 1]])
        b = np.array([3, 0])
        c = np.array([-8, -3, -3])

        cls.model = create_basic_dense_qp(G, A, b, c)
        cls.pyomo_nlp = PyomoNLP(cls.model)
        cls.coupling_vars = cls.pyomo_nlp.get_primal_indices(
            [cls.model.x[0], cls.model.x[2]])
        cls.nlp = AdmmNLP(cls.pyomo_nlp, cls.coupling_vars, rho=2.0)

        # test problem 2
        cls.model2 = create_model2()
        cls.pyomo_nlp2 = PyomoNLP(cls.model2)
        cls.coupling_vars2 = cls.pyomo_nlp2.get_primal_indices(
            [cls.model2.x[1], cls.model2.x[3], cls.model2.x[5]])
        cls.nlp2 = AdmmNLP(cls.pyomo_nlp2, cls.coupling_vars2, rho=1.0)

        # test problem 3
        cls.model3 = create_basic_model()
        cls.pyomo_nlp3 = PyomoNLP(cls.model3)
        cls.coupling_vars3 = cls.pyomo_nlp3.get_primal_indices(
            [cls.model3.x[1]])
        cls.nlp3 = AdmmNLP(cls.pyomo_nlp3, cls.coupling_vars3, rho=1.0)
    def __init__(
        self,
        input_vars,
        external_vars,
        residual_cons,
        external_cons,
        solver=None,
    ):
        if solver is None:
            solver = SolverFactory("ipopt")
        self._solver = solver

        # We only need this block to construct the NLP, which wouldn't
        # be necessary if we could compute Hessians of Pyomo constraints.
        self._block = create_subsystem_block(
            residual_cons + external_cons,
            input_vars + external_vars,
        )
        self._block._obj = Objective(expr=0.0)
        self._nlp = PyomoNLP(self._block)

        self._scc_list = list(
            generate_strongly_connected_components(external_cons,
                                                   variables=external_vars))

        assert len(external_vars) == len(external_cons)

        self.input_vars = input_vars
        self.external_vars = external_vars
        self.residual_cons = residual_cons
        self.external_cons = external_cons

        self.residual_con_multipliers = [None for _ in residual_cons]
        self.residual_scaling_factors = None
Example #3
0
def get_condition_number(model):
    if isinstance(model, pyo.Block):
        if len(list(model.component_data_objects(pyo.Objective,
                                                 active=True))) == 0:
            model._obj = pyo.Objective(expr=0.0)
        nlp = PyomoNLP(model)
        jacobian = nlp.evaluate_jacobian()
        model.del_component(model._obj)
    elif isinstance(model, sps.coo_matrix):
        jacobian = model
    else:
        raise ValueError()
    if jacobian.shape == (1, 1):
        return 1.0
    else:
        # HACK: Use dense linear algebra for minimum singular value
        #jjt = jacobian.dot(jacobian.transpose())
        #jjt_dense = jjt.toarray()
        #sv, _ = np.linalg.eig(jjt_dense)
        _, smin, _ = sps.linalg.svds(jacobian,
                                     k=24,
                                     which="SM",
                                     solver="lobpcg")
        _, smax, _ = sps.linalg.svds(jacobian,
                                     k=1,
                                     which="LM",
                                     solver="lobpcg")
        cond = smax[0] / smin[0]
        return cond
Example #4
0
 def test_options(self):
     model = create_model1()
     nlp = PyomoNLP(model)
     solver = CyIpoptSolver(CyIpoptNLP(nlp), options={'max_iter': 1})
     x, info = solver.solve(tee=False)
     nlp.set_primals(x)
     self.assertAlmostEqual(nlp.evaluate_objective(), -5.0879028e+02, places=5)
 def test_solve_gsl_function(self):
     DLL = find_GSL()
     if not DLL:
         self.skipTest("Could not find the amplgsl.dll library")
     model = ConcreteModel()
     model.z_func = ExternalFunction(library=DLL, function="gsl_sf_gamma")
     model.x = Var(initialize=3, bounds=(1e-5, None))
     model.o = Objective(expr=model.z_func(model.x))
     nlp = PyomoNLP(model)
     self.assertAlmostEqual(nlp.evaluate_objective(), 2, 7)
     assert "AMPLFUNC" not in os.environ
Example #6
0
 def test_rename(self):
     m = create_pyomo_model()
     nlp = PyomoNLP(m)
     expected_names = ['x[0]', 'x[1]', 'x[2]']
     self.assertEqual(nlp.primals_names(), expected_names)
     renamed_nlp = RenamedNLP(nlp, {
         'x[0]': 'y[0]',
         'x[1]': 'y[1]',
         'x[2]': 'y[2]'
     })
     expected_names = ['y[0]', 'y[1]', 'y[2]']
def test_have_pynumero(model):
    assert PyomoNLP is not None
    nlp = PyomoNLP(model)
    f = nlp.evaluate_objective()
    assert f == pytest.approx(-504.0)

    jac = nlp.evaluate_jacobian().toarray()
    assert jac[0][0] == pytest.approx(0)
    assert jac[0][1] == pytest.approx(8)
    assert jac[0][2] == pytest.approx(1)
    assert jac[1][0] == pytest.approx(8)
    assert jac[1][1] == pytest.approx(0)
    assert jac[1][2] == pytest.approx(1)
Example #8
0
def get_numeric_incidence_matrix(variables, constraints):
    """
    This function gets the numeric incidence matrix (Jacobian) of Pyomo
    constraints with respect to variables.
    """
    # NOTE: There are several ways to get a numeric incidence matrix
    # from a Pyomo model. Here we get the numeric incidence matrix by
    # creating a temporary block and using the PyNumero ASL interface.
    comps = list(variables) + list(constraints)
    _check_unindexed(comps)
    block = create_subsystem_block(constraints, variables)
    block._obj = Objective(expr=0)
    nlp = PyomoNLP(block)
    return nlp.extract_submatrix_jacobian(variables, constraints)
Example #9
0
    def test_model1_with_scaling(self):
        m = create_model1()
        m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT)
        m.scaling_factor[m.o] = 1e-6 # scale the objective
        m.scaling_factor[m.c] = 2.0  # scale the equality constraint
        m.scaling_factor[m.d] = 3.0  # scale the inequality constraint
        m.scaling_factor[m.x[1]] = 4.0  # scale one of the x variables

        cynlp = CyIpoptNLP(PyomoNLP(m))
        options={'nlp_scaling_method': 'user-scaling',
                 'output_file': '_cyipopt-scaling.log',
                 'file_print_level':10,
                 'max_iter': 0}
        solver = CyIpoptSolver(cynlp, options=options)
        x, info = solver.solve()

        with open('_cyipopt-scaling.log', 'r') as fd:
            solver_trace = fd.read()
        os.remove('_cyipopt-scaling.log')

        # check for the following strings in the log and then delete the log
        self.assertIn('nlp_scaling_method = user-scaling', solver_trace)
        self.assertIn('output_file = _cyipopt-scaling.log', solver_trace)
        self.assertIn('objective scaling factor = 1e-06', solver_trace)
        self.assertIn('x scaling provided', solver_trace)
        self.assertIn('c scaling provided', solver_trace)
        self.assertIn('d scaling provided', solver_trace)
        self.assertIn('DenseVector "x scaling vector" with 3 elements:', solver_trace)
        self.assertIn('x scaling vector[    1]= 1.0000000000000000e+00', solver_trace)
        self.assertIn('x scaling vector[    2]= 1.0000000000000000e+00', solver_trace)
        self.assertIn('x scaling vector[    3]= 4.0000000000000000e+00', solver_trace)
        self.assertIn('DenseVector "c scaling vector" with 1 elements:', solver_trace)
        self.assertIn('c scaling vector[    1]= 2.0000000000000000e+00', solver_trace)
        self.assertIn('DenseVector "d scaling vector" with 1 elements:', solver_trace)
        self.assertIn('d scaling vector[    1]= 3.0000000000000000e+00', solver_trace)
Example #10
0
    def test_perfect_matching(self):
        model = make_gas_expansion_model()
        model.obj = pyo.Objective(expr=0)
        nlp = PyomoNLP(model)
        igraph = IncidenceGraphInterface(nlp)

        # These are the variables and constraints of the square,
        # nonsingular subsystem
        variables = []
        variables.extend(model.P.values())
        variables.extend(model.T[i] for i in model.streams
                         if i != model.streams.first())
        variables.extend(model.rho[i] for i in model.streams
                         if i != model.streams.first())
        variables.extend(model.F[i] for i in model.streams
                         if i != model.streams.first())

        constraints = list(model.component_data_objects(pyo.Constraint))

        n_var = len(variables)
        matching = igraph.maximum_matching(variables, constraints)
        values = ComponentSet(matching.values())
        self.assertEqual(len(matching), n_var)
        self.assertEqual(len(values), n_var)

        # The subset of variables and equations we have identified
        # do not have a unique perfect matching. But we at least know
        # this much.
        self.assertIs(matching[model.ideal_gas[0]], model.P[0])
Example #11
0
def main(show_plot=True):
    if show_plot:
        import matplotlib.pylab as plt

    instance = create_problem(0.0, 10.0)
    # Discretize model using Orthogonal Collocation
    discretizer = pyo.TransformationFactory('dae.collocation')
    discretizer.apply_to(instance, nfe=100, ncp=3, scheme='LAGRANGE-RADAU')
    discretizer.reduce_collocation_points(instance,
                                          var=instance.u,
                                          ncp=1,
                                          contset=instance.t)

    # Interface pyomo model with nlp
    nlp = PyomoNLP(instance)
    x = nlp.create_new_vector('primals')
    x.fill(1.0)
    nlp.set_primals(x)

    lam = nlp.create_new_vector('duals')
    lam.fill(1.0)
    nlp.set_duals(lam)

    # Evaluate jacobian
    jac = nlp.evaluate_jacobian()
    if show_plot:
        plt.spy(jac)
        plt.title('Jacobian of the constraints\n')
        plt.show()

    # Evaluate hessian of the lagrangian
    hess_lag = nlp.evaluate_hessian_lag()
    if show_plot:
        plt.spy(hess_lag)
        plt.title('Hessian of the Lagrangian function\n')
        plt.show()

    # Build KKT matrix
    kkt = BlockMatrix(2, 2)
    kkt.set_block(0, 0, hess_lag)
    kkt.set_block(1, 0, jac)
    kkt.set_block(0, 1, jac.transpose())
    if show_plot:
        plt.spy(kkt.tocoo())
        plt.title('KKT system\n')
        plt.show()
Example #12
0
 def test_nlp_active_error(self):
     m = pyo.ConcreteModel()
     m.v1 = pyo.Var()
     m.c1 = pyo.Constraint(expr=m.v1 == 1.0)
     m.c2 = pyo.Constraint(expr=m.v1 == 2.0)
     m._obj = pyo.Objective(expr=0.0)
     nlp = PyomoNLP(m)
     with self.assertRaisesRegex(ValueError, "inactive constraints"):
         igraph = IncidenceGraphInterface(nlp, active=False)
Example #13
0
 def test_nlp_fixed_error(self):
     m = pyo.ConcreteModel()
     m.v1 = pyo.Var()
     m.v2 = pyo.Var()
     m.c1 = pyo.Constraint(expr=m.v1 + m.v2 == 1.0)
     m.v2.fix(2.0)
     m._obj = pyo.Objective(expr=0.0)
     nlp = PyomoNLP(m)
     with self.assertRaisesRegex(ValueError, "fixed variables"):
         igraph = IncidenceGraphInterface(nlp, include_fixed=True)
Example #14
0
    def test_imperfect_matching(self):
        model = make_gas_expansion_model()
        model.obj = pyo.Objective(expr=0)
        nlp = PyomoNLP(model)
        igraph = IncidenceGraphInterface(nlp)

        n_eqn = len(list(model.component_data_objects(pyo.Constraint)))
        matching = igraph.maximum_matching()
        values = ComponentSet(matching.values())
        self.assertEqual(len(matching), n_eqn)
        self.assertEqual(len(values), n_eqn)
Example #15
0
 def test_invalid_bounds(self):
     m = pyo.ConcreteModel()
     m.x = pyo.Var([1, 2, 3], domain=pyo.NonNegativeReals)
     for i in m.x:
         m.x[i].ub = i-2
         m.x[i].value = i
     m.i3 = pyo.Constraint(expr=m.x[2] + m.x[3] + m.x[1] >= -500.0)
     m.obj = pyo.Objective(expr=m.x[2]**2)
     with self.assertRaisesRegex(
             RuntimeError, "Some variables have lower bounds that "
             "are greater than the upper bounds"):
         nlp = PyomoNLP(m)
Example #16
0
    def test_nnz_hessian_lag(self):
        self.assertEqual(self.nlp.nnz_hessian_lag, 9)

        m = self.model2
        transform = AdmmModel()
        aug_model = transform.create_using(
            m,
            complicating_vars=[m.x[1], m.x[3], m.x[5]],
            # z_estimates=[1, 2, 3],
            # w_estimates=[1, 2, 3],
            rho=1.0)
        nl = PyomoNLP(aug_model)
        self.assertEqual(self.nlp2.nnz_hessian_lag, nl.nnz_hessian_lag)
Example #17
0
def get_numeric_incidence_matrix(variables, constraints):
    """
    This function gets the numeric incidence matrix (Jacobian) of Pyomo
    constraints with respect to variables.
    """
    # NOTE: There are several ways to get a numeric incidence matrix
    # from a Pyomo model. This function implements a somewhat roundabout
    # method, which is to construct a dummy Block with the necessary
    # variables and constraints, then construct a PyNumero PyomoNLP
    # from the block and have PyNumero evaluate the desired Jacobian
    # via ASL.
    comps = list(variables) + list(constraints)
    _check_unindexed(comps)
    M, N = len(constraints), len(variables)
    _block = Block()
    _block.construct()
    _block.obj = Objective(expr=0)
    _block.vars = Reference(variables)
    _block.cons = Reference(constraints)
    var_set = ComponentSet(variables)
    other_vars = []
    for con in constraints:
        for var in identify_variables(con.body, include_fixed=False):
            # Fixed vars will be ignored by the nl file write, so
            # there is no point to including them here.
            # A different method of assembling this matrix, e.g.
            # Pyomo's automatic differentiation, could support taking
            # derivatives with respect to fixed variables.
            if var not in var_set:
                other_vars.append(var)
                var_set.add(var)
    # These variables are necessary due to the nl writer's philosophy
    # about what constitutes a model. Note that we take derivatives with
    # respect to them even though this is not necessary. We could fix them
    # here to avoid doing this extra work, but that would alter the user's
    # model, which we would rather not do.
    _block.other_vars = Reference(other_vars)
    _nlp = PyomoNLP(_block)
    return _nlp.extract_submatrix_jacobian(variables, constraints)
Example #18
0
    def set_input_values(self, input_values):
        solver = self._solver
        external_cons = self.external_cons
        external_vars = self.external_vars
        input_vars = self.input_vars

        for var, val in zip(input_vars, input_values):
            var.set_value(val)

        _temp = create_subsystem_block(external_cons, variables=external_vars)
        possible_input_vars = ComponentSet(input_vars)
        #for var in _temp.input_vars.values():
        #    # TODO: Is this check necessary?
        #    assert var in possible_input_vars

        with TemporarySubsystemManager(to_fix=list(_temp.input_vars.values())):
            solver.solve(_temp)

        # Should we create the NLP from the original block or the temp block?
        # Need to create it from the original block because temp block won't
        # have residual constraints, whose derivatives are necessary.
        self._nlp = PyomoNLP(self._block)
Example #19
0
 def test_model2(self):
     model = create_model2()
     nlp = PyomoNLP(model)
     solver = CyIpoptSolver(CyIpoptNLP(nlp))
     x, info = solver.solve(tee=False)
     x_sol = np.array([3.0, 1.99997807])
     y_sol = np.array([0.00017543])
     self.assertTrue(np.allclose(x, x_sol, rtol=1e-4))
     nlp.set_primals(x)
     nlp.set_duals(y_sol)
     self.assertAlmostEqual(nlp.evaluate_objective(), -31.000000057167462, places=5)
     self.assertTrue(np.allclose(info['mult_g'], y_sol, rtol=1e-4))
Example #20
0
 def test_model1(self):
     model = create_model1()
     nlp = PyomoNLP(model)
     solver = CyIpoptSolver(CyIpoptNLP(nlp))
     x, info = solver.solve(tee=False)
     x_sol = np.array([3.85958688, 4.67936007, 3.10358931])
     y_sol = np.array([-1.0, 53.90357665])
     self.assertTrue(np.allclose(x, x_sol, rtol=1e-4))
     nlp.set_primals(x)
     nlp.set_duals(y_sol)
     self.assertAlmostEqual(nlp.evaluate_objective(), -428.6362455416348, places=5)
     self.assertTrue(np.allclose(info['mult_g'], y_sol, rtol=1e-4))
Example #21
0
    def calculate_duals(self):
        """Get duals of the current model

        :return: self.duals
        :rtype: dict
        """

        # For testing - very slow and should not be used!
        if self.kkt_method == 'pynumero':

            nlp = PyomoNLP(self.model_object)
            varList = nlp.get_pyomo_variables()
            conList = nlp.get_pyomo_constraints()
            duals = nlp.get_duals()

            J = nlp.extract_submatrix_jacobian(pyomo_variables=varList,
                                               pyomo_constraints=conList)
            H = nlp.extract_submatrix_hessian_lag(pyomo_variables_rows=varList,
                                                  pyomo_variables_cols=varList)
            J = csc_matrix(J)

            var_index_names = [v.name for v in varList]
            con_index_names = [v.name for v in conList]

            dummy_constraints = [
                f'{self.global_constraint_name}[{k}]'
                for k in self.parameter_set
            ]
            jac_row_ind = [con_index_names.index(d) for d in dummy_constraints]
            duals_imp = [duals[i] for i in jac_row_ind]

            self.duals = dict(zip(self.parameter_set, duals_imp))
            if self.verbose:
                print(f'The pynumero results are:')
                print(self.duals)

        else:

            self.duals = {
                key: self.model_object.dual[getattr(
                    self.model_object, self.global_constraint_name)[key]]
                for key, val in getattr(self.model_object,
                                        self.global_param_name).items()
            }

            if self.verbose:
                print('The duals are:')
                print(self.duals)

        self.delete_sol_files()

        return self.duals
Example #22
0
    def test_exception(self):
        model = make_gas_expansion_model()
        model.obj = pyo.Objective(expr=0)
        nlp = PyomoNLP(model)
        igraph = IncidenceGraphInterface(nlp)

        with self.assertRaises(ValueError) as exc:
            variables = [model.P]
            constraints = [model.ideal_gas]
            igraph.maximum_matching(variables, constraints)
        self.assertIn('must be unindexed', str(exc.exception))

        with self.assertRaises(ValueError) as exc:
            variables = [model.P]
            constraints = [model.ideal_gas]
            igraph.block_triangularize(variables, constraints)
        self.assertIn('must be unindexed', str(exc.exception))
Example #23
0
    def test_model3(self):
        G = np.array([[6, 2, 1], [2, 5, 2], [1, 2, 4]])
        A = np.array([[1, 0, 1], [0, 1, 1]])
        b = np.array([3, 0])
        c = np.array([-8, -3, -3])

        model = create_model3(G, A, b, c)
        nlp = PyomoNLP(model)
        solver = CyIpoptSolver(CyIpoptNLP(nlp))
        x, info = solver.solve(tee=False)
        x_sol = np.array([2.0, -1.0, 1.0])
        y_sol = np.array([-3., 2.])
        self.assertTrue(np.allclose(x, x_sol, rtol=1e-4))
        nlp.set_primals(x)
        nlp.set_duals(y_sol)
        self.assertAlmostEqual(nlp.evaluate_objective(), -3.5, places=5)
        self.assertTrue(np.allclose(info['mult_g'], y_sol, rtol=1e-4))
Example #24
0
    def _get_kkt_info(self, model):
        """Takes the model and uses PyNumero to get the jacobian and Hessian
        information as dataframes
        
        Args:
            model (pyomo ConcreteModel): A pyomo model instance of the current
            problem (used in calculating the reduced Hessian)
    
        Returns:
            
            KKT (pd.DataFrame): the KKT matrix as a dataframe
            
            H_df (pd.DataFrame): the Hessian as a dataframe
            
            J_df (pd.DataFrame): the jacobian as a dataframe
            
            var_index_names (list): the index of variables
            
            con_index_names (list): the index of constraints
            
        """
        nlp = PyomoNLP(model)
        varList = nlp.get_pyomo_variables()
        conList = nlp.get_pyomo_constraints()

        J = nlp.extract_submatrix_jacobian(pyomo_variables=varList,
                                           pyomo_constraints=conList)
        H = nlp.extract_submatrix_hessian_lag(pyomo_variables_rows=varList,
                                              pyomo_variables_cols=varList)

        var_index_names = [v.name for v in varList]
        con_index_names = [v.name for v in conList]

        J_df = pd.DataFrame(J.todense(),
                            columns=var_index_names,
                            index=con_index_names)
        H_df = pd.DataFrame(H.todense(),
                            columns=var_index_names,
                            index=var_index_names)

        var_index_names = pd.DataFrame(var_index_names)

        KKT_up = pd.merge(H_df,
                          J_df.transpose(),
                          left_index=True,
                          right_index=True)
        KKT = pd.concat((KKT_up, J_df))
        KKT = KKT.fillna(0)

        return KKT, H_df, J_df, var_index_names, con_index_names
Example #25
0
    def test_nlp_interface(self):
        nlp = PyomoNLP(self.pm)
        execute_extended_nlp_interface(self, nlp)
        self.assertTrue(nlp.pyomo_model() is self.pm)

        self.assertEqual(float(nlp.get_obj_scaling()), 5.0)

        xs = nlp.get_primals_scaling()
        expected_xs = np.asarray([2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0])
        self.assertTrue(np.array_equal(xs, expected_xs))

        cs = nlp.get_constraints_scaling()
        expected_cs = np.asarray([ 3.0,  6.0,  9.0, 12.0, 15.0, 18.0, 21.0, 24.0, 27.0 ])
        self.assertTrue(np.array_equal(cs, expected_cs))

        eqcs = nlp.get_eq_constraints_scaling()
        expected_eqcs = np.asarray([ 6.0, 18.0 ])
        self.assertTrue(np.array_equal(eqcs, expected_eqcs))

        ineqcs = nlp.get_ineq_constraints_scaling()
        expected_ineqcs = np.asarray([ 3.0,  9.0, 12.0, 15.0, 21.0, 24.0, 27.0 ])
        self.assertTrue(np.array_equal(ineqcs, expected_ineqcs))
Example #26
0
    def test_triangularize(self):
        N = 5
        model = make_gas_expansion_model(N)
        model.obj = pyo.Objective(expr=0)
        nlp = PyomoNLP(model)
        igraph = IncidenceGraphInterface(nlp)

        # These are the variables and constraints of the square,
        # nonsingular subsystem
        variables = []
        variables.extend(model.P.values())
        variables.extend(model.T[i] for i in model.streams
                         if i != model.streams.first())
        variables.extend(model.rho[i] for i in model.streams
                         if i != model.streams.first())
        variables.extend(model.F[i] for i in model.streams
                         if i != model.streams.first())

        constraints = list(model.component_data_objects(pyo.Constraint))

        var_block_map, con_block_map = igraph.block_triangularize(
            variables, constraints)
        var_values = set(var_block_map.values())
        con_values = set(con_block_map.values())
        self.assertEqual(len(var_values), N + 1)
        self.assertEqual(len(con_values), N + 1)

        self.assertEqual(var_block_map[model.P[0]], 0)

        for i in model.streams:
            if i != model.streams.first():
                self.assertEqual(var_block_map[model.rho[i]], i)
                self.assertEqual(var_block_map[model.T[i]], i)
                self.assertEqual(var_block_map[model.P[i]], i)
                self.assertEqual(var_block_map[model.F[i]], i)

                self.assertEqual(con_block_map[model.ideal_gas[i]], i)
                self.assertEqual(con_block_map[model.expansion[i]], i)
                self.assertEqual(con_block_map[model.mbal[i]], i)
                self.assertEqual(con_block_map[model.ebal[i]], i)
def _get_kkt_matrix(model):
    """This uses pynumero to get the Hessian and Jacobian in order to build the
    KKT matrix
    
    Args:
        model (pyomo ConcreteModel): the current model used in the inner 
            problem optimization
            
    Returns:
        KKT (pandas.DataFrame): KKT matrix for the current iteration
        
        var_index_names (list): list of variable names
        
        con_index_names (list): list of constraint names
    
    """
    nlp = PyomoNLP(model)
    varList = nlp.get_pyomo_variables()
    conList = nlp.get_pyomo_constraints()
    duals = nlp.get_duals()
    
    J = nlp.extract_submatrix_jacobian(pyomo_variables=varList, pyomo_constraints=conList)
    H = nlp.extract_submatrix_hessian_lag(pyomo_variables_rows=varList, pyomo_variables_cols=varList)
    
    var_index_names = [v.name for v in varList]
    con_index_names = [v.name for v in conList]

    J_df = pd.DataFrame(J.todense(), columns=var_index_names, index=con_index_names)
    H_df = pd.DataFrame(H.todense(), columns=var_index_names, index=var_index_names)
    
    var_index_names = pd.DataFrame(var_index_names)
    
    KKT_up = pd.merge(H_df, J_df.transpose(), left_index=True, right_index=True)
    KKT = pd.concat((KKT_up, J_df))
    KKT = KKT.fillna(0)
    
    return KKT, var_index_names, con_index_names
Example #28
0
    def test_compare_evaluations(self):
        A1 = 5
        A2 = 10
        c1 = 3
        c2 = 4
        N = 6
        dt = 1

        m = create_pyomo_model(A1, A2, c1, c2, N, dt)
        solver = pyo.SolverFactory('ipopt')
        solver.options['linear_solver'] = 'mumps'
        status = solver.solve(m, tee=False)
        m_nlp = PyomoNLP(m)

        mex = create_pyomo_external_grey_box_model(A1, A2, c1, c2, N, dt)
        # mex_nlp = PyomoGreyBoxNLP(mex)
        mex_nlp = PyomoNLPWithGreyBoxBlocks(mex)

        # get the variable and constraint order and create the maps
        # reliable order independent comparisons
        m_x_order = m_nlp.primals_names()
        m_c_order = m_nlp.constraint_names()
        mex_x_order = mex_nlp.primals_names()
        mex_c_order = mex_nlp.constraint_names()

        x1list = [
            'h1[0]', 'h1[1]', 'h1[2]', 'h1[3]', 'h1[4]', 'h1[5]', 'h2[0]',
            'h2[1]', 'h2[2]', 'h2[3]', 'h2[4]', 'h2[5]', 'F1[1]', 'F1[2]',
            'F1[3]', 'F1[4]', 'F1[5]', 'F2[1]', 'F2[2]', 'F2[3]', 'F2[4]',
            'F2[5]', 'F12[0]', 'F12[1]', 'F12[2]', 'F12[3]', 'F12[4]',
            'F12[5]', 'Fo[0]', 'Fo[1]', 'Fo[2]', 'Fo[3]', 'Fo[4]', 'Fo[5]'
        ]
        x2list = [
            'egb.inputs[h1_0]', 'egb.inputs[h1_1]', 'egb.inputs[h1_2]',
            'egb.inputs[h1_3]', 'egb.inputs[h1_4]', 'egb.inputs[h1_5]',
            'egb.inputs[h2_0]', 'egb.inputs[h2_1]', 'egb.inputs[h2_2]',
            'egb.inputs[h2_3]', 'egb.inputs[h2_4]', 'egb.inputs[h2_5]',
            'egb.inputs[F1_1]', 'egb.inputs[F1_2]', 'egb.inputs[F1_3]',
            'egb.inputs[F1_4]', 'egb.inputs[F1_5]', 'egb.inputs[F2_1]',
            'egb.inputs[F2_2]', 'egb.inputs[F2_3]', 'egb.inputs[F2_4]',
            'egb.inputs[F2_5]', 'egb.outputs[F12_0]', 'egb.outputs[F12_1]',
            'egb.outputs[F12_2]', 'egb.outputs[F12_3]', 'egb.outputs[F12_4]',
            'egb.outputs[F12_5]', 'egb.outputs[Fo_0]', 'egb.outputs[Fo_1]',
            'egb.outputs[Fo_2]', 'egb.outputs[Fo_3]', 'egb.outputs[Fo_4]',
            'egb.outputs[Fo_5]'
        ]
        x1_x2_map = dict(zip(x1list, x2list))
        x1idx_x2idx_map = {
            i: mex_x_order.index(x1_x2_map[m_x_order[i]])
            for i in range(len(m_x_order))
        }

        c1list = [
            'h1bal[1]', 'h1bal[2]', 'h1bal[3]', 'h1bal[4]', 'h1bal[5]',
            'h2bal[1]', 'h2bal[2]', 'h2bal[3]', 'h2bal[4]', 'h2bal[5]',
            'F12con[0]', 'F12con[1]', 'F12con[2]', 'F12con[3]', 'F12con[4]',
            'F12con[5]', 'Focon[0]', 'Focon[1]', 'Focon[2]', 'Focon[3]',
            'Focon[4]', 'Focon[5]', 'min_inflow[1]', 'min_inflow[2]',
            'min_inflow[3]', 'min_inflow[4]', 'min_inflow[5]',
            'max_outflow[0]', 'max_outflow[1]', 'max_outflow[2]',
            'max_outflow[3]', 'max_outflow[4]', 'max_outflow[5]', 'h10', 'h20'
        ]
        c2list = [
            'egb.h1bal_1', 'egb.h1bal_2', 'egb.h1bal_3', 'egb.h1bal_4',
            'egb.h1bal_5', 'egb.h2bal_1', 'egb.h2bal_2', 'egb.h2bal_3',
            'egb.h2bal_4', 'egb.h2bal_5', 'egb.output_constraints[F12_0]',
            'egb.output_constraints[F12_1]', 'egb.output_constraints[F12_2]',
            'egb.output_constraints[F12_3]', 'egb.output_constraints[F12_4]',
            'egb.output_constraints[F12_5]', 'egb.output_constraints[Fo_0]',
            'egb.output_constraints[Fo_1]', 'egb.output_constraints[Fo_2]',
            'egb.output_constraints[Fo_3]', 'egb.output_constraints[Fo_4]',
            'egb.output_constraints[Fo_5]', 'min_inflow[1]', 'min_inflow[2]',
            'min_inflow[3]', 'min_inflow[4]', 'min_inflow[5]',
            'max_outflow[0]', 'max_outflow[1]', 'max_outflow[2]',
            'max_outflow[3]', 'max_outflow[4]', 'max_outflow[5]', 'h10', 'h20'
        ]
        c1_c2_map = dict(zip(c1list, c2list))
        c1idx_c2idx_map = {
            i: mex_c_order.index(c1_c2_map[m_c_order[i]])
            for i in range(len(m_c_order))
        }

        # get the primals from m and put them in the correct order for mex
        m_x = m_nlp.get_primals()
        mex_x = np.zeros(len(m_x))
        for i in range(len(m_x)):
            mex_x[x1idx_x2idx_map[i]] = m_x[i]

        # get the duals from m and put them in the correct order for mex
        m_lam = m_nlp.get_duals()
        mex_lam = np.zeros(len(m_lam))
        for i in range(len(m_x)):
            mex_lam[c1idx_c2idx_map[i]] = m_lam[i]

        mex_nlp.set_primals(mex_x)
        mex_nlp.set_duals(mex_lam)

        m_obj = m_nlp.evaluate_objective()
        mex_obj = mex_nlp.evaluate_objective()
        self.assertAlmostEqual(m_obj, mex_obj, places=4)

        m_gobj = m_nlp.evaluate_grad_objective()
        mex_gobj = mex_nlp.evaluate_grad_objective()
        check_vectors_specific_order(self, m_gobj, m_x_order, mex_gobj,
                                     mex_x_order, x1_x2_map)

        m_c = m_nlp.evaluate_constraints()
        mex_c = mex_nlp.evaluate_constraints()
        check_vectors_specific_order(self, m_c, m_c_order, mex_c, mex_c_order,
                                     c1_c2_map)

        m_j = m_nlp.evaluate_jacobian()
        mex_j = mex_nlp.evaluate_jacobian().todense()
        check_sparse_matrix_specific_order(self, m_j, m_c_order, m_x_order,
                                           mex_j, mex_c_order, mex_x_order,
                                           c1_c2_map, x1_x2_map)

        m_h = m_nlp.evaluate_hessian_lag()
        mex_h = mex_nlp.evaluate_hessian_lag()
        check_sparse_matrix_specific_order(self, m_h, m_x_order, m_x_order,
                                           mex_h, mex_x_order, mex_x_order,
                                           x1_x2_map, x1_x2_map)

        mex_h = 0 * mex_h
        mex_nlp.evaluate_hessian_lag(out=mex_h)
        check_sparse_matrix_specific_order(self, m_h, m_x_order, m_x_order,
                                           mex_h, mex_x_order, mex_x_order,
                                           x1_x2_map, x1_x2_map)
Example #29
0
    def __init__(self, pyomo_model):
        super(PyomoNLPWithGreyBoxBlocks,self).__init__()

        # get the list of all grey box blocks and build _ExternalGreyBoxAsNLP objects
        greybox_components = []
        # build a map from the names to the variable data objects
        # this is done over *all* variables in active blocks, even
        # if they are not included in this model
        self._pyomo_model_var_names_to_datas = None
        try:
            # We support Pynumero's ExternalGreyBoxBlock modeling
            # objects that are provided through ExternalGreyBoxBlock objects
            # We reclassify these as Pyomo Block objects before building the
            # PyomoNLP object to expose any variables on the block to
            # the underlying Pyomo machinery
            for greybox in pyomo_model.component_objects(
                    ExternalGreyBoxBlock, descend_into=True):
                greybox.parent_block().reclassify_component_type(
                    greybox, pyo.Block)
                greybox_components.append(greybox)

            # store the pyomo model
            self._pyomo_model = pyomo_model
            # build a PyomoNLP object (will include the "pyomo"
            # part of the model only)
            self._pyomo_nlp = PyomoNLP(pyomo_model)
            self._pyomo_model_var_names_to_datas = {
                v.getname(
                    fully_qualified=True
                ): v
                for v in pyomo_model.component_data_objects(
                    ctype=pyo.Var, descend_into=True
                )
            }
            self._pyomo_model_constraint_names_to_datas = {
                c.getname(
                    fully_qualified=True
                ): c 
                for c in pyomo_model.component_data_objects(
                    ctype=pyo.Constraint, descend_into=True
                )
            }

        finally:
            # Restore the ctypes of the ExternalGreyBoxBlock components
            for greybox in greybox_components:
                greybox.parent_block().reclassify_component_type(
                    greybox, ExternalGreyBoxBlock)

        if self._pyomo_nlp.n_primals() == 0:
            raise ValueError(
                "No variables were found in the Pyomo part of the model."
                " PyomoGreyBoxModel requires at least one variable"
                " to be active in a Pyomo objective or constraint")

        # build the list of NLP wrappers for the greybox objects
        greybox_nlps = []
        fixed_vars = []
        for greybox in greybox_components:
            # iterate through the data objects if component is indexed
            for data in greybox.values():
                if data.active:
                    # check that no variables are fixed
                    fixed_vars.extend(v for v in data.inputs.values() if v.fixed)
                    fixed_vars.extend(v for v in data.outputs.values() if v.fixed)
                    greybox_nlp = _ExternalGreyBoxAsNLP(data)
                    greybox_nlps.append(greybox_nlp)

        if fixed_vars:
            logging.getLogger(__name__).error('PyomoNLPWithGreyBoxBlocks found fixed variables for the'
                                              ' inputs and/or outputs of an ExternalGreyBoxBlock. This'
                                              ' is not currently supported. The fixed variables were:\n\t'
                                              + '\n\t'.join(f.getname(fully_qualified=True) for f in fixed_vars)
                                              )
            raise NotImplementedError('PyomoNLPWithGreyBoxBlocks does not support fixed inputs or outputs')

        # let's build up the union of all the primal variables names
        # RBP: Why use names here? Why not just ComponentSet of all
        # data objects?
        primals_names = set(self._pyomo_nlp.primals_names())
        for gbnlp in greybox_nlps:
            primals_names.update(gbnlp.primals_names())

        # sort the names for consistency run to run
        self._n_primals = len(primals_names)
        self._primals_names = primals_names = sorted(primals_names)
        self._pyomo_model_var_datas = [self._pyomo_model_var_names_to_datas[nm] for nm in self._primals_names]

        # get the names of all the constraints
        self._constraint_names = list(self._pyomo_nlp.constraint_names())
        self._constraint_datas = [self._pyomo_model_constraint_names_to_datas.get(nm) for nm in self._constraint_names]
        for gbnlp in greybox_nlps:
            self._constraint_names.extend(gbnlp.constraint_names())
            self._constraint_datas.extend([(gbnlp._block, nm) for nm in gbnlp.constraint_names()])
        self._n_constraints = len(self._constraint_names)

        self._has_hessian_support = True
        for nlp in greybox_nlps:
            if not nlp.has_hessian_support():
                self._has_hessian_support = False

        # wrap all the nlp objects with projected nlp objects
        self._pyomo_nlp = ProjectedNLP(self._pyomo_nlp, primals_names)
        for i,gbnlp in enumerate(greybox_nlps):
            greybox_nlps[i] = ProjectedNLP(greybox_nlps[i], primals_names)

        # build a list of all the nlps in order
        self._nlps = nlps = [self._pyomo_nlp]
        nlps.extend(greybox_nlps)

        # build the primal and dual inits and lb, ub vectors
        self._init_primals = self._pyomo_nlp.init_primals()
        self._primals_lb = self._pyomo_nlp.primals_lb()
        self._primals_ub = self._pyomo_nlp.primals_ub()
        for gbnlp in greybox_nlps:
            local = gbnlp.init_primals()
            mask = ~np.isnan(local)
            self._init_primals[mask] = local[mask]

            local = gbnlp.primals_lb()
            mask = ~np.isnan(local)
            self._primals_lb[mask] = np.maximum(self._primals_lb[mask], local[mask])

            local = gbnlp.primals_ub()
            mask = ~np.isnan(local)
            self._primals_ub[mask] = np.minimum(self._primals_ub[mask], local[mask])

        # all the nan's should be gone (every primal should be initialized)
        if np.any(np.isnan(self._init_primals)) \
           or np.any(np.isnan(self._primals_lb)) \
           or np.any(np.isnan(self._primals_ub)):
            raise ValueError('NaN values found in initialization of primals or'
                             ' primals_lb or primals_ub in _PyomoNLPWithGreyBoxBlocks.')

        self._init_duals = BlockVector(len(nlps))
        self._dual_values_blockvector = BlockVector(len(nlps))
        self._constraints_lb = BlockVector(len(nlps))
        self._constraints_ub = BlockVector(len(nlps))
        for i,nlp in enumerate(nlps):
            self._init_duals.set_block(i, nlp.init_duals())
            self._constraints_lb.set_block(i, nlp.constraints_lb())
            self._constraints_ub.set_block(i, nlp.constraints_ub())
            self._dual_values_blockvector.set_block(i, np.nan*np.zeros(nlp.n_constraints()))
        self._init_duals = self._init_duals.flatten()
        self._constraints_lb = self._constraints_lb.flatten()
        self._constraints_ub = self._constraints_ub.flatten()
        # verify that there are no nans in the init_duals
        if np.any(np.isnan(self._init_duals)) \
           or np.any(np.isnan(self._constraints_lb)) \
           or np.any(np.isnan(self._constraints_ub)):
            raise ValueError('NaN values found in initialization of duals or'
                             ' constraints_lb or constraints_ub in'
                             ' _PyomoNLPWithGreyBoxBlocks.')

        self._primal_values = np.nan*np.ones(self._n_primals)
        # set the values of the primals and duals to make sure initial
        # values get all the way through to the underlying models
        self.set_primals(self._init_primals)
        self.set_duals(self._init_duals)
        assert not np.any(np.isnan(self._primal_values))
        assert not np.any(np.isnan(self._dual_values_blockvector))

        # if any of the problem is scaled (i.e., one or more of primals,
        # constraints, or objective), then we want scaling factors for
        # all of them (defaulted to 1)
        need_scaling = False
        # objective is owned by self._pyomo_nlp, not in any of the greybox models
        self._obj_scaling = self._pyomo_nlp.get_obj_scaling()
        if self._obj_scaling is None:
            self._obj_scaling = 1.0
        else:
            need_scaling = True

        self._primals_scaling = np.ones(self.n_primals())
        scaling_suffix = pyomo_model.component('scaling_factor')
        if scaling_suffix and scaling_suffix.ctype is pyo.Suffix:
            need_scaling = True
            for i,v in enumerate(self._pyomo_model_var_datas):
                if v in scaling_suffix:
                    self._primals_scaling[i] = scaling_suffix[v]

        self._constraints_scaling = BlockVector(len(nlps))
        for i,nlp in enumerate(nlps):
            local_constraints_scaling = nlp.get_constraints_scaling()
            if local_constraints_scaling is None:
                self._constraints_scaling.set_block(i, np.ones(nlp.n_constraints()))
            else:
                self._constraints_scaling.set_block(i, local_constraints_scaling)
                need_scaling = True
        if need_scaling:
            self._constraints_scaling = self._constraints_scaling.flatten()
        else:
            self._obj_scaling = None
            self._primals_scaling = None
            self._constraints_scaling = None

        # compute the jacobian and the hessian to get nnz
        jac = self.evaluate_jacobian()
        self._nnz_jacobian = len(jac.data)

        self._sparse_hessian_summation = None
        self._nnz_hessian_lag = None
        if self._has_hessian_support:
            hess = self.evaluate_hessian_lag()
            self._nnz_hessian_lag = len(hess.data)
def get_hessian_of_constraint(constraint, wrt1=None, wrt2=None, nlp=None):
    constraints = [constraint]
    if wrt1 is None and wrt2 is None:
        variables = list(
            identify_variables(constraint.expr, include_fixed=False))
        wrt1 = variables
        wrt2 = variables
    elif wrt1 is not None and wrt2 is not None:
        variables = wrt1 + wrt2
    elif wrt1 is not None:  # but wrt2 is None
        wrt2 = wrt1
        variables = wrt1
    else:
        # wrt2 is not None and wrt1 is None
        wrt1 = wrt2
        variables = wrt1

    if nlp is None:
        block = create_subsystem_block(constraints, variables=variables)
        # Could fix input_vars so I don't evaluate the Hessian with respect
        # to variables I don't care about...

        # HUGE HACK: Variables not included in a constraint are not written
        # to the nl file, so we cannot take the derivative with respect to
        # them, even though we know this derivative is zero. To work around,
        # we make sure all variables appear on the block in the form of a
        # dummy constraint. Then we can take derivatives of any constraint
        # with respect to them. Conveniently, the extract_submatrix_
        # call deals with extracting the variables and constraint we care
        # about, in the proper order.
        block._dummy_var = Var()
        block._dummy_con = Constraint(expr=sum(variables) == block._dummy_var)
        block._obj = Objective(expr=0.0)
        nlp = PyomoNLP(block)

    saved_duals = nlp.get_duals()
    saved_obj_factor = nlp.get_obj_factor()
    temp_duals = np.zeros(len(saved_duals))

    # NOTE: This makes some assumption about how the Lagrangian is constructed.
    # TODO: Define the convention we assume and convert if necessary.
    idx = nlp.get_constraint_indices(constraints)[0]
    temp_duals[idx] = 1.0
    nlp.set_duals(temp_duals)
    nlp.set_obj_factor(0.0)

    # NOTE: The returned matrix preserves explicit zeros. I.e. it contains
    # coordinates for every entry that could possibly be nonzero.
    submatrix = nlp.extract_submatrix_hessian_lag(wrt1, wrt2)

    nlp.set_obj_factor(saved_obj_factor)
    nlp.set_duals(saved_duals)
    return submatrix