Exemplo n.º 1
0
    def __init__(self, pyomo_model):
        super(PyomoNLPWithGreyBoxBlocks,self).__init__()

        # get the list of all grey box blocks and build _ExternalGreyBoxAsNLP objects
        greybox_components = []
        # build a map from the names to the variable data objects
        # this is done over *all* variables in active blocks, even
        # if they are not included in this model
        self._pyomo_model_var_names_to_datas = None
        try:
            # We support Pynumero's ExternalGreyBoxBlock modeling
            # objects that are provided through ExternalGreyBoxBlock objects
            # We reclassify these as Pyomo Block objects before building the
            # PyomoNLP object to expose any variables on the block to
            # the underlying Pyomo machinery
            for greybox in pyomo_model.component_objects(
                    ExternalGreyBoxBlock, descend_into=True):
                greybox.parent_block().reclassify_component_type(
                    greybox, pyo.Block)
                greybox_components.append(greybox)

            # store the pyomo model
            self._pyomo_model = pyomo_model
            # build a PyomoNLP object (will include the "pyomo"
            # part of the model only)
            self._pyomo_nlp = PyomoNLP(pyomo_model)
            self._pyomo_model_var_names_to_datas = {
                v.getname(
                    fully_qualified=True
                ): v
                for v in pyomo_model.component_data_objects(
                    ctype=pyo.Var, descend_into=True
                )
            }
            self._pyomo_model_constraint_names_to_datas = {
                c.getname(
                    fully_qualified=True
                ): c 
                for c in pyomo_model.component_data_objects(
                    ctype=pyo.Constraint, descend_into=True
                )
            }

        finally:
            # Restore the ctypes of the ExternalGreyBoxBlock components
            for greybox in greybox_components:
                greybox.parent_block().reclassify_component_type(
                    greybox, ExternalGreyBoxBlock)

        if self._pyomo_nlp.n_primals() == 0:
            raise ValueError(
                "No variables were found in the Pyomo part of the model."
                " PyomoGreyBoxModel requires at least one variable"
                " to be active in a Pyomo objective or constraint")

        # build the list of NLP wrappers for the greybox objects
        greybox_nlps = []
        fixed_vars = []
        for greybox in greybox_components:
            # iterate through the data objects if component is indexed
            for data in greybox.values():
                if data.active:
                    # check that no variables are fixed
                    fixed_vars.extend(v for v in data.inputs.values() if v.fixed)
                    fixed_vars.extend(v for v in data.outputs.values() if v.fixed)
                    greybox_nlp = _ExternalGreyBoxAsNLP(data)
                    greybox_nlps.append(greybox_nlp)

        if fixed_vars:
            logging.getLogger(__name__).error('PyomoNLPWithGreyBoxBlocks found fixed variables for the'
                                              ' inputs and/or outputs of an ExternalGreyBoxBlock. This'
                                              ' is not currently supported. The fixed variables were:\n\t'
                                              + '\n\t'.join(f.getname(fully_qualified=True) for f in fixed_vars)
                                              )
            raise NotImplementedError('PyomoNLPWithGreyBoxBlocks does not support fixed inputs or outputs')

        # let's build up the union of all the primal variables names
        # RBP: Why use names here? Why not just ComponentSet of all
        # data objects?
        primals_names = set(self._pyomo_nlp.primals_names())
        for gbnlp in greybox_nlps:
            primals_names.update(gbnlp.primals_names())

        # sort the names for consistency run to run
        self._n_primals = len(primals_names)
        self._primals_names = primals_names = sorted(primals_names)
        self._pyomo_model_var_datas = [self._pyomo_model_var_names_to_datas[nm] for nm in self._primals_names]

        # get the names of all the constraints
        self._constraint_names = list(self._pyomo_nlp.constraint_names())
        self._constraint_datas = [self._pyomo_model_constraint_names_to_datas.get(nm) for nm in self._constraint_names]
        for gbnlp in greybox_nlps:
            self._constraint_names.extend(gbnlp.constraint_names())
            self._constraint_datas.extend([(gbnlp._block, nm) for nm in gbnlp.constraint_names()])
        self._n_constraints = len(self._constraint_names)

        self._has_hessian_support = True
        for nlp in greybox_nlps:
            if not nlp.has_hessian_support():
                self._has_hessian_support = False

        # wrap all the nlp objects with projected nlp objects
        self._pyomo_nlp = ProjectedNLP(self._pyomo_nlp, primals_names)
        for i,gbnlp in enumerate(greybox_nlps):
            greybox_nlps[i] = ProjectedNLP(greybox_nlps[i], primals_names)

        # build a list of all the nlps in order
        self._nlps = nlps = [self._pyomo_nlp]
        nlps.extend(greybox_nlps)

        # build the primal and dual inits and lb, ub vectors
        self._init_primals = self._pyomo_nlp.init_primals()
        self._primals_lb = self._pyomo_nlp.primals_lb()
        self._primals_ub = self._pyomo_nlp.primals_ub()
        for gbnlp in greybox_nlps:
            local = gbnlp.init_primals()
            mask = ~np.isnan(local)
            self._init_primals[mask] = local[mask]

            local = gbnlp.primals_lb()
            mask = ~np.isnan(local)
            self._primals_lb[mask] = np.maximum(self._primals_lb[mask], local[mask])

            local = gbnlp.primals_ub()
            mask = ~np.isnan(local)
            self._primals_ub[mask] = np.minimum(self._primals_ub[mask], local[mask])

        # all the nan's should be gone (every primal should be initialized)
        if np.any(np.isnan(self._init_primals)) \
           or np.any(np.isnan(self._primals_lb)) \
           or np.any(np.isnan(self._primals_ub)):
            raise ValueError('NaN values found in initialization of primals or'
                             ' primals_lb or primals_ub in _PyomoNLPWithGreyBoxBlocks.')

        self._init_duals = BlockVector(len(nlps))
        self._dual_values_blockvector = BlockVector(len(nlps))
        self._constraints_lb = BlockVector(len(nlps))
        self._constraints_ub = BlockVector(len(nlps))
        for i,nlp in enumerate(nlps):
            self._init_duals.set_block(i, nlp.init_duals())
            self._constraints_lb.set_block(i, nlp.constraints_lb())
            self._constraints_ub.set_block(i, nlp.constraints_ub())
            self._dual_values_blockvector.set_block(i, np.nan*np.zeros(nlp.n_constraints()))
        self._init_duals = self._init_duals.flatten()
        self._constraints_lb = self._constraints_lb.flatten()
        self._constraints_ub = self._constraints_ub.flatten()
        # verify that there are no nans in the init_duals
        if np.any(np.isnan(self._init_duals)) \
           or np.any(np.isnan(self._constraints_lb)) \
           or np.any(np.isnan(self._constraints_ub)):
            raise ValueError('NaN values found in initialization of duals or'
                             ' constraints_lb or constraints_ub in'
                             ' _PyomoNLPWithGreyBoxBlocks.')

        self._primal_values = np.nan*np.ones(self._n_primals)
        # set the values of the primals and duals to make sure initial
        # values get all the way through to the underlying models
        self.set_primals(self._init_primals)
        self.set_duals(self._init_duals)
        assert not np.any(np.isnan(self._primal_values))
        assert not np.any(np.isnan(self._dual_values_blockvector))

        # if any of the problem is scaled (i.e., one or more of primals,
        # constraints, or objective), then we want scaling factors for
        # all of them (defaulted to 1)
        need_scaling = False
        # objective is owned by self._pyomo_nlp, not in any of the greybox models
        self._obj_scaling = self._pyomo_nlp.get_obj_scaling()
        if self._obj_scaling is None:
            self._obj_scaling = 1.0
        else:
            need_scaling = True

        self._primals_scaling = np.ones(self.n_primals())
        scaling_suffix = pyomo_model.component('scaling_factor')
        if scaling_suffix and scaling_suffix.ctype is pyo.Suffix:
            need_scaling = True
            for i,v in enumerate(self._pyomo_model_var_datas):
                if v in scaling_suffix:
                    self._primals_scaling[i] = scaling_suffix[v]

        self._constraints_scaling = BlockVector(len(nlps))
        for i,nlp in enumerate(nlps):
            local_constraints_scaling = nlp.get_constraints_scaling()
            if local_constraints_scaling is None:
                self._constraints_scaling.set_block(i, np.ones(nlp.n_constraints()))
            else:
                self._constraints_scaling.set_block(i, local_constraints_scaling)
                need_scaling = True
        if need_scaling:
            self._constraints_scaling = self._constraints_scaling.flatten()
        else:
            self._obj_scaling = None
            self._primals_scaling = None
            self._constraints_scaling = None

        # compute the jacobian and the hessian to get nnz
        jac = self.evaluate_jacobian()
        self._nnz_jacobian = len(jac.data)

        self._sparse_hessian_summation = None
        self._nnz_hessian_lag = None
        if self._has_hessian_support:
            hess = self.evaluate_hessian_lag()
            self._nnz_hessian_lag = len(hess.data)
Exemplo n.º 2
0
    def test_projected(self):
        m = create_pyomo_model()
        nlp = PyomoNLP(m)
        projected_nlp = ProjectedNLP(nlp, ['x[0]', 'x[1]', 'x[2]'])
        expected_names = ['x[0]', 'x[1]', 'x[2]']
        self.assertEqual(projected_nlp.primals_names(), expected_names)
        self.assertTrue(
            np.array_equal(projected_nlp.get_primals(),
                           np.asarray([1.0, 2.0, 4.0])))
        self.assertTrue(
            np.array_equal(projected_nlp.evaluate_grad_objective(),
                           np.asarray([8.0, 1.0, 9.0])))
        self.assertEqual(projected_nlp.nnz_jacobian(), 5)
        self.assertEqual(projected_nlp.nnz_hessian_lag(), 6)

        J = projected_nlp.evaluate_jacobian()
        self.assertEqual(len(J.data), 5)
        denseJ = J.todense()
        expected_jac = np.asarray([[6.0, 1.0, 1.0], [1.0, 0.0, 1.0]])
        self.assertTrue(np.array_equal(denseJ, expected_jac))

        # test the use of "out"
        J = 0.0 * J
        projected_nlp.evaluate_jacobian(out=J)
        denseJ = J.todense()
        self.assertTrue(np.array_equal(denseJ, expected_jac))

        H = projected_nlp.evaluate_hessian_lag()
        self.assertEqual(len(H.data), 6)
        expectedH = np.asarray([[2.0, 1.0, 1.0], [1.0, 0.0, 0.0],
                                [1.0, 0.0, 2.0]])
        denseH = H.todense()
        self.assertTrue(np.array_equal(denseH, expectedH))

        # test the use of "out"
        H = 0.0 * H
        projected_nlp.evaluate_hessian_lag(out=H)
        denseH = H.todense()
        self.assertTrue(np.array_equal(denseH, expectedH))

        # now test a reordering
        projected_nlp = ProjectedNLP(nlp, ['x[0]', 'x[2]', 'x[1]'])
        expected_names = ['x[0]', 'x[2]', 'x[1]']
        self.assertEqual(projected_nlp.primals_names(), expected_names)
        self.assertTrue(
            np.array_equal(projected_nlp.get_primals(),
                           np.asarray([1.0, 4.0, 2.0])))
        self.assertTrue(
            np.array_equal(projected_nlp.evaluate_grad_objective(),
                           np.asarray([8.0, 9.0, 1.0])))
        self.assertEqual(projected_nlp.nnz_jacobian(), 5)
        self.assertEqual(projected_nlp.nnz_hessian_lag(), 6)

        J = projected_nlp.evaluate_jacobian()
        self.assertEqual(len(J.data), 5)
        denseJ = J.todense()
        expected_jac = np.asarray([[6.0, 1.0, 1.0], [1.0, 1.0, 0.0]])
        self.assertTrue(np.array_equal(denseJ, expected_jac))

        # test the use of "out"
        J = 0.0 * J
        projected_nlp.evaluate_jacobian(out=J)
        denseJ = J.todense()
        self.assertTrue(np.array_equal(denseJ, expected_jac))

        H = projected_nlp.evaluate_hessian_lag()
        self.assertEqual(len(H.data), 6)
        expectedH = np.asarray([[2.0, 1.0, 1.0], [1.0, 2.0, 0.0],
                                [1.0, 0.0, 0.0]])
        denseH = H.todense()
        self.assertTrue(np.array_equal(denseH, expectedH))

        # test the use of "out"
        H = 0.0 * H
        projected_nlp.evaluate_hessian_lag(out=H)
        denseH = H.todense()
        self.assertTrue(np.array_equal(denseH, expectedH))

        # now test an expansion
        projected_nlp = ProjectedNLP(nlp, ['x[0]', 'x[2]', 'y', 'x[1]'])
        expected_names = ['x[0]', 'x[2]', 'y', 'x[1]']
        self.assertEqual(projected_nlp.primals_names(), expected_names)
        np.testing.assert_equal(projected_nlp.get_primals(),
                                np.asarray([1.0, 4.0, np.nan, 2.0]))

        self.assertTrue(
            np.array_equal(projected_nlp.evaluate_grad_objective(),
                           np.asarray([8.0, 9.0, 0.0, 1.0])))
        self.assertEqual(projected_nlp.nnz_jacobian(), 5)
        self.assertEqual(projected_nlp.nnz_hessian_lag(), 6)

        J = projected_nlp.evaluate_jacobian()
        self.assertEqual(len(J.data), 5)
        denseJ = J.todense()
        expected_jac = np.asarray([[6.0, 1.0, 0.0, 1.0], [1.0, 1.0, 0.0, 0.0]])
        self.assertTrue(np.array_equal(denseJ, expected_jac))

        # test the use of "out"
        J = 0.0 * J
        projected_nlp.evaluate_jacobian(out=J)
        denseJ = J.todense()
        self.assertTrue(np.array_equal(denseJ, expected_jac))

        H = projected_nlp.evaluate_hessian_lag()
        self.assertEqual(len(H.data), 6)
        expectedH = np.asarray([[2.0, 1.0, 0.0, 1.0], [1.0, 2.0, 0.0, 0.0],
                                [0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]])
        denseH = H.todense()
        self.assertTrue(np.array_equal(denseH, expectedH))

        # test the use of "out"
        H = 0.0 * H
        projected_nlp.evaluate_hessian_lag(out=H)
        denseH = H.todense()
        self.assertTrue(np.array_equal(denseH, expectedH))

        # now test an expansion
        projected_nlp = ProjectedNLP(nlp, ['x[0]', 'x[2]'])
        expected_names = ['x[0]', 'x[2]']
        self.assertEqual(projected_nlp.primals_names(), expected_names)
        np.testing.assert_equal(projected_nlp.get_primals(),
                                np.asarray([1.0, 4.0]))

        self.assertTrue(
            np.array_equal(projected_nlp.evaluate_grad_objective(),
                           np.asarray([8.0, 9.0])))
        self.assertEqual(projected_nlp.nnz_jacobian(), 4)
        self.assertEqual(projected_nlp.nnz_hessian_lag(), 4)

        J = projected_nlp.evaluate_jacobian()
        self.assertEqual(len(J.data), 4)
        denseJ = J.todense()
        expected_jac = np.asarray([[6.0, 1.0], [1.0, 1.0]])
        self.assertTrue(np.array_equal(denseJ, expected_jac))

        # test the use of "out"
        J = 0.0 * J
        projected_nlp.evaluate_jacobian(out=J)
        denseJ = J.todense()
        self.assertTrue(np.array_equal(denseJ, expected_jac))

        H = projected_nlp.evaluate_hessian_lag()
        self.assertEqual(len(H.data), 4)
        expectedH = np.asarray([[2.0, 1.0], [1.0, 2.0]])
        denseH = H.todense()
        self.assertTrue(np.array_equal(denseH, expectedH))

        # test the use of "out"
        H = 0.0 * H
        projected_nlp.evaluate_hessian_lag(out=H)
        denseH = H.todense()
        self.assertTrue(np.array_equal(denseH, expectedH))
Exemplo n.º 3
0
class PyomoNLPWithGreyBoxBlocks(NLP):
    def __init__(self, pyomo_model):
        super(PyomoNLPWithGreyBoxBlocks,self).__init__()

        # get the list of all grey box blocks and build _ExternalGreyBoxAsNLP objects
        greybox_components = []
        # build a map from the names to the variable data objects
        # this is done over *all* variables in active blocks, even
        # if they are not included in this model
        self._pyomo_model_var_names_to_datas = None
        try:
            # We support Pynumero's ExternalGreyBoxBlock modeling
            # objects that are provided through ExternalGreyBoxBlock objects
            # We reclassify these as Pyomo Block objects before building the
            # PyomoNLP object to expose any variables on the block to
            # the underlying Pyomo machinery
            for greybox in pyomo_model.component_objects(
                    ExternalGreyBoxBlock, descend_into=True):
                greybox.parent_block().reclassify_component_type(
                    greybox, pyo.Block)
                greybox_components.append(greybox)

            # store the pyomo model
            self._pyomo_model = pyomo_model
            # build a PyomoNLP object (will include the "pyomo"
            # part of the model only)
            self._pyomo_nlp = PyomoNLP(pyomo_model)
            self._pyomo_model_var_names_to_datas = {
                v.getname(
                    fully_qualified=True
                ): v
                for v in pyomo_model.component_data_objects(
                    ctype=pyo.Var, descend_into=True
                )
            }
            self._pyomo_model_constraint_names_to_datas = {
                c.getname(
                    fully_qualified=True
                ): c 
                for c in pyomo_model.component_data_objects(
                    ctype=pyo.Constraint, descend_into=True
                )
            }

        finally:
            # Restore the ctypes of the ExternalGreyBoxBlock components
            for greybox in greybox_components:
                greybox.parent_block().reclassify_component_type(
                    greybox, ExternalGreyBoxBlock)

        if self._pyomo_nlp.n_primals() == 0:
            raise ValueError(
                "No variables were found in the Pyomo part of the model."
                " PyomoGreyBoxModel requires at least one variable"
                " to be active in a Pyomo objective or constraint")

        # build the list of NLP wrappers for the greybox objects
        greybox_nlps = []
        fixed_vars = []
        for greybox in greybox_components:
            # iterate through the data objects if component is indexed
            for data in greybox.values():
                if data.active:
                    # check that no variables are fixed
                    fixed_vars.extend(v for v in data.inputs.values() if v.fixed)
                    fixed_vars.extend(v for v in data.outputs.values() if v.fixed)
                    greybox_nlp = _ExternalGreyBoxAsNLP(data)
                    greybox_nlps.append(greybox_nlp)

        if fixed_vars:
            logging.getLogger(__name__).error('PyomoNLPWithGreyBoxBlocks found fixed variables for the'
                                              ' inputs and/or outputs of an ExternalGreyBoxBlock. This'
                                              ' is not currently supported. The fixed variables were:\n\t'
                                              + '\n\t'.join(f.getname(fully_qualified=True) for f in fixed_vars)
                                              )
            raise NotImplementedError('PyomoNLPWithGreyBoxBlocks does not support fixed inputs or outputs')

        # let's build up the union of all the primal variables names
        # RBP: Why use names here? Why not just ComponentSet of all
        # data objects?
        primals_names = set(self._pyomo_nlp.primals_names())
        for gbnlp in greybox_nlps:
            primals_names.update(gbnlp.primals_names())

        # sort the names for consistency run to run
        self._n_primals = len(primals_names)
        self._primals_names = primals_names = sorted(primals_names)
        self._pyomo_model_var_datas = [self._pyomo_model_var_names_to_datas[nm] for nm in self._primals_names]

        # get the names of all the constraints
        self._constraint_names = list(self._pyomo_nlp.constraint_names())
        self._constraint_datas = [self._pyomo_model_constraint_names_to_datas.get(nm) for nm in self._constraint_names]
        for gbnlp in greybox_nlps:
            self._constraint_names.extend(gbnlp.constraint_names())
            self._constraint_datas.extend([(gbnlp._block, nm) for nm in gbnlp.constraint_names()])
        self._n_constraints = len(self._constraint_names)

        self._has_hessian_support = True
        for nlp in greybox_nlps:
            if not nlp.has_hessian_support():
                self._has_hessian_support = False

        # wrap all the nlp objects with projected nlp objects
        self._pyomo_nlp = ProjectedNLP(self._pyomo_nlp, primals_names)
        for i,gbnlp in enumerate(greybox_nlps):
            greybox_nlps[i] = ProjectedNLP(greybox_nlps[i], primals_names)

        # build a list of all the nlps in order
        self._nlps = nlps = [self._pyomo_nlp]
        nlps.extend(greybox_nlps)

        # build the primal and dual inits and lb, ub vectors
        self._init_primals = self._pyomo_nlp.init_primals()
        self._primals_lb = self._pyomo_nlp.primals_lb()
        self._primals_ub = self._pyomo_nlp.primals_ub()
        for gbnlp in greybox_nlps:
            local = gbnlp.init_primals()
            mask = ~np.isnan(local)
            self._init_primals[mask] = local[mask]

            local = gbnlp.primals_lb()
            mask = ~np.isnan(local)
            self._primals_lb[mask] = np.maximum(self._primals_lb[mask], local[mask])

            local = gbnlp.primals_ub()
            mask = ~np.isnan(local)
            self._primals_ub[mask] = np.minimum(self._primals_ub[mask], local[mask])

        # all the nan's should be gone (every primal should be initialized)
        if np.any(np.isnan(self._init_primals)) \
           or np.any(np.isnan(self._primals_lb)) \
           or np.any(np.isnan(self._primals_ub)):
            raise ValueError('NaN values found in initialization of primals or'
                             ' primals_lb or primals_ub in _PyomoNLPWithGreyBoxBlocks.')

        self._init_duals = BlockVector(len(nlps))
        self._dual_values_blockvector = BlockVector(len(nlps))
        self._constraints_lb = BlockVector(len(nlps))
        self._constraints_ub = BlockVector(len(nlps))
        for i,nlp in enumerate(nlps):
            self._init_duals.set_block(i, nlp.init_duals())
            self._constraints_lb.set_block(i, nlp.constraints_lb())
            self._constraints_ub.set_block(i, nlp.constraints_ub())
            self._dual_values_blockvector.set_block(i, np.nan*np.zeros(nlp.n_constraints()))
        self._init_duals = self._init_duals.flatten()
        self._constraints_lb = self._constraints_lb.flatten()
        self._constraints_ub = self._constraints_ub.flatten()
        # verify that there are no nans in the init_duals
        if np.any(np.isnan(self._init_duals)) \
           or np.any(np.isnan(self._constraints_lb)) \
           or np.any(np.isnan(self._constraints_ub)):
            raise ValueError('NaN values found in initialization of duals or'
                             ' constraints_lb or constraints_ub in'
                             ' _PyomoNLPWithGreyBoxBlocks.')

        self._primal_values = np.nan*np.ones(self._n_primals)
        # set the values of the primals and duals to make sure initial
        # values get all the way through to the underlying models
        self.set_primals(self._init_primals)
        self.set_duals(self._init_duals)
        assert not np.any(np.isnan(self._primal_values))
        assert not np.any(np.isnan(self._dual_values_blockvector))

        # if any of the problem is scaled (i.e., one or more of primals,
        # constraints, or objective), then we want scaling factors for
        # all of them (defaulted to 1)
        need_scaling = False
        # objective is owned by self._pyomo_nlp, not in any of the greybox models
        self._obj_scaling = self._pyomo_nlp.get_obj_scaling()
        if self._obj_scaling is None:
            self._obj_scaling = 1.0
        else:
            need_scaling = True

        self._primals_scaling = np.ones(self.n_primals())
        scaling_suffix = pyomo_model.component('scaling_factor')
        if scaling_suffix and scaling_suffix.ctype is pyo.Suffix:
            need_scaling = True
            for i,v in enumerate(self._pyomo_model_var_datas):
                if v in scaling_suffix:
                    self._primals_scaling[i] = scaling_suffix[v]

        self._constraints_scaling = BlockVector(len(nlps))
        for i,nlp in enumerate(nlps):
            local_constraints_scaling = nlp.get_constraints_scaling()
            if local_constraints_scaling is None:
                self._constraints_scaling.set_block(i, np.ones(nlp.n_constraints()))
            else:
                self._constraints_scaling.set_block(i, local_constraints_scaling)
                need_scaling = True
        if need_scaling:
            self._constraints_scaling = self._constraints_scaling.flatten()
        else:
            self._obj_scaling = None
            self._primals_scaling = None
            self._constraints_scaling = None

        # compute the jacobian and the hessian to get nnz
        jac = self.evaluate_jacobian()
        self._nnz_jacobian = len(jac.data)

        self._sparse_hessian_summation = None
        self._nnz_hessian_lag = None
        if self._has_hessian_support:
            hess = self.evaluate_hessian_lag()
            self._nnz_hessian_lag = len(hess.data)

    # overloaded from NLP
    def n_primals(self):
        return self._n_primals

    # overloaded from NLP
    def primals_names(self):
        return self._primals_names

    # overloaded from NLP
    def n_constraints(self):
        return self._n_constraints

    # overloaded from NLP
    def constraint_names(self):
        return self._constraint_names

    # overloaded from NLP
    def nnz_jacobian(self):
        return self._nnz_jacobian

    # overloaded from NLP
    def nnz_hessian_lag(self):
        return self._nnz_hessian_lag

    # overloaded from NLP
    def primals_lb(self):
        return self._primals_lb

    # overloaded from NLP
    def primals_ub(self):
        return self._primals_ub

    # overloaded from NLP
    def constraints_lb(self):
        return self._constraints_lb

    # overloaded from NLP
    def constraints_ub(self):
        return self._constraints_ub

    # overloaded from NLP
    def init_primals(self):
        return self._init_primals

    # overloaded from NLP
    def init_duals(self):
        return self._init_duals

    # overloaded from NLP / Extended NLP
    def create_new_vector(self, vector_type):
        if vector_type == 'primals':
            return np.zeros(self.n_primals(), dtype=np.float64)
        elif vector_type == 'constraints' or vector_type == 'duals':
            return np.zeros(self.n_constraints(), dtype=np.float64)
        else:
            raise RuntimeError('Called create_new_vector with an unknown vector_type')

    # overloaded from NLP
    def set_primals(self, primals):
        np.copyto(self._primal_values, primals)
        for nlp in self._nlps:
            nlp.set_primals(primals)

    # overloaded from AslNLP
    def get_primals(self):
        return np.copy(self._primal_values)

    # overloaded from NLP
    def set_duals(self, duals):
        self._dual_values_blockvector.copyfrom(duals)
        for i,nlp in enumerate(self._nlps):
            nlp.set_duals(self._dual_values_blockvector.get_block(i))

    # overloaded from NLP
    def get_duals(self):
        return self._dual_values_blockvector.flatten()

    # overloaded from NLP
    def set_obj_factor(self, obj_factor):
        # objective is owned by the pyomo model
        self._pyomo_nlp.set_obj_factor(obj_factor)

    # overloaded from NLP
    def get_obj_factor(self):
        # objective is owned by the pyomo model
        return self._pyomo_nlp.get_obj_factor()

    # overloaded from NLP
    def get_obj_scaling(self):
        return self._obj_scaling

    # overloaded from NLP
    def get_primals_scaling(self):
        return self._primals_scaling

    # overloaded from NLP
    def get_constraints_scaling(self):
        return self._constraints_scaling

    # overloaded from NLP
    def evaluate_objective(self):
        # objective is owned by the pyomo model
        return self._pyomo_nlp.evaluate_objective()

    # overloaded from NLP
    def evaluate_grad_objective(self, out=None):
        return self._pyomo_nlp.evaluate_grad_objective(out=out)

    # overloaded from NLP
    def evaluate_constraints(self, out=None):
        # todo: implement the "out" version more efficiently
        ret = BlockVector(len(self._nlps))
        for i,nlp in enumerate(self._nlps):
            ret.set_block(i, nlp.evaluate_constraints())

        if out is not None:
            ret.copyto(out)
            return out

        return ret.flatten()

    # overloaded from NLP
    def evaluate_jacobian(self, out=None):
        ret = BlockMatrix(len(self._nlps),1)
        for i,nlp in enumerate(self._nlps):
            ret.set_block(i, 0, nlp.evaluate_jacobian())
        ret = ret.tocoo()

        if out is not None:
            assert np.array_equal(ret.row, out.row)
            assert np.array_equal(ret.col, out.col)
            np.copyto(out.data, ret.data)
            return out
        return ret

    def evaluate_hessian_lag(self, out=None):
        list_of_hessians = [nlp.evaluate_hessian_lag() for nlp in self._nlps]
        if self._sparse_hessian_summation is None:
            # This is assuming that the nonzero structures of Hessians
            # do not change
            self._sparse_hessian_summation = CondensedSparseSummation(list_of_hessians)
        ret = self._sparse_hessian_summation.sum(list_of_hessians)

        if out is not None:
            assert np.array_equal(ret.row, out.row)
            assert np.array_equal(ret.col, out.col)
            np.copyto(out.data, ret.data)
            return out
        return ret

    def report_solver_status(self, status_code, status_message):
        raise NotImplementedError('This is not yet implemented.')

    def load_state_into_pyomo(self, bound_multipliers=None):
        # load the values of the primals into the pyomo
        primals = self.get_primals()
        for value,vardata in zip(primals, self._pyomo_model_var_datas):
            vardata.set_value(value)

        # get the active suffixes
        m = self._pyomo_model
        model_suffixes = dict(
            pyo.suffix.active_import_suffix_generator(m))

        # we need to correct the sign of the multipliers based on whether or
        # not we are minimizing or maximizing - this is done in the ASL interface
        # for ipopt, but does not appear to be done in cyipopt.
        obj_sign = 1.0
        objs = list(m.component_data_objects(ctype=pyo.Objective, descend_into=True))
        assert len(objs) == 1
        if objs[0].sense == pyo.maximize:
            obj_sign = -1.0

        if 'dual' in model_suffixes:
            model_suffixes['dual'].clear()
            dual_values = self._dual_values_blockvector.flatten()
            for value,t in zip(dual_values, self._constraint_datas):
                if type(t) is tuple:
                    model_suffixes['dual'].setdefault(t[0], {})[t[1]] = -obj_sign*value
                else:
                    # t is a constraint data
                    model_suffixes['dual'][t] = -obj_sign*value

        if 'ipopt_zL_out' in model_suffixes:
            model_suffixes['ipopt_zL_out'].clear()
            if bound_multipliers is not None:
                model_suffixes['ipopt_zL_out'].update(
                    zip(self._pyomo_model_var_datas, obj_sign*bound_multipliers[0]))
        if 'ipopt_zU_out' in model_suffixes:
            model_suffixes['ipopt_zU_out'].clear()
            if bound_multipliers is not None:
                model_suffixes['ipopt_zU_out'].update(
                    zip(self._pyomo_model_var_datas, -obj_sign*bound_multipliers[1]))
Exemplo n.º 4
0
    def __init__(
        self,
        input_vars,
        external_vars,
        residual_cons,
        external_cons,
        use_cyipopt=None,
        solver=None,
    ):
        """
        Arguments:
        ----------
        input_vars: list
            List of variables sent to this system by the outer solver
        external_vars: list
            List of variables that are solved for internally by this system
        residual_cons: list
            List of equality constraints whose residuals are exposed to
            the outer solver
        external_cons: list
            List of equality constraints used to solve for the external
            variables
        use_cyipopt: bool
            Whether to use CyIpopt to solve strongly connected components of
            the implicit function that have dimension greater than one.
        solver: Pyomo solver object
            Used to solve strongly connected components of the implicit function
            that have dimension greater than one. Only used if use_cyipopt
            is False.

        """
        if use_cyipopt is None:
            use_cyipopt = cyipopt_available
        if use_cyipopt and not cyipopt_available:
            raise RuntimeError(
                "Constructing an ExternalPyomoModel with CyIpopt unavailable. "
                "Please set the use_cyipopt argument to False.")
        if solver is not None and use_cyipopt:
            raise RuntimeError(
                "Constructing an ExternalPyomoModel with a solver specified "
                "and use_cyipopt set to True. Please set use_cyipopt to False "
                "to use the desired solver.")
        elif solver is None and not use_cyipopt:
            solver = SolverFactory("ipopt")
        # If use_cyipopt is True, this solver is None and will not be used.
        self._solver = solver
        self._use_cyipopt = use_cyipopt

        # We only need this block to construct the NLP, which wouldn't
        # be necessary if we could compute Hessians of Pyomo constraints.
        self._block = create_subsystem_block(
            residual_cons + external_cons,
            input_vars + external_vars,
        )
        self._block._obj = Objective(expr=0.0)
        self._nlp = PyomoNLP(self._block)

        self._scc_list = list(
            generate_strongly_connected_components(external_cons,
                                                   variables=external_vars))

        if use_cyipopt:
            # Using CyIpopt allows us to solve inner problems without
            # costly rewriting of the nl file. It requires quite a bit
            # of preprocessing, however, to construct the ProjectedNLP
            # for each block of the decomposition.

            # Get "vector-valued" SCCs, those of dimension > 0.
            # We will solve these with a direct IPOPT interface, which requires
            # some preprocessing.
            self._vector_scc_list = [(scc, inputs)
                                     for scc, inputs in self._scc_list
                                     if len(scc.vars) > 1]

            # Need a dummy objective to create an NLP
            for scc, inputs in self._vector_scc_list:
                scc._obj = Objective(expr=0.0)

                # I need scaling_factor so Pyomo NLPs I create from these blocks
                # don't break when ProjectedNLP calls get_primals_scaling
                scc.scaling_factor = Suffix(direction=Suffix.EXPORT)
                # HACK: scaling_factor just needs to be nonempty.
                scc.scaling_factor[scc._obj] = 1.0

            # These are the "original NLPs" that will be projected
            self._vector_scc_nlps = [
                PyomoNLP(scc) for scc, inputs in self._vector_scc_list
            ]
            self._vector_scc_var_names = [[
                var.name for var in scc.vars.values()
            ] for scc, inputs in self._vector_scc_list]
            self._vector_proj_nlps = [
                ProjectedNLP(nlp, names) for nlp, names in zip(
                    self._vector_scc_nlps, self._vector_scc_var_names)
            ]

            # We will solve the ProjectedNLPs rather than the original NLPs
            self._cyipopt_nlps = [
                CyIpoptNLP(nlp) for nlp in self._vector_proj_nlps
            ]
            self._cyipopt_solvers = [
                CyIpoptSolver(nlp) for nlp in self._cyipopt_nlps
            ]
            self._vector_scc_input_coords = [
                nlp.get_primal_indices(inputs) for nlp, (scc, inputs) in zip(
                    self._vector_scc_nlps, self._vector_scc_list)
            ]

        assert len(external_vars) == len(external_cons)

        self.input_vars = input_vars
        self.external_vars = external_vars
        self.residual_cons = residual_cons
        self.external_cons = external_cons

        self.residual_con_multipliers = [None for _ in residual_cons]
        self.residual_scaling_factors = None