class PyomoNLPWithGreyBoxBlocks(NLP): def __init__(self, pyomo_model): super(PyomoNLPWithGreyBoxBlocks,self).__init__() # get the list of all grey box blocks and build _ExternalGreyBoxAsNLP objects greybox_components = [] # build a map from the names to the variable data objects # this is done over *all* variables in active blocks, even # if they are not included in this model self._pyomo_model_var_names_to_datas = None try: # We support Pynumero's ExternalGreyBoxBlock modeling # objects that are provided through ExternalGreyBoxBlock objects # We reclassify these as Pyomo Block objects before building the # PyomoNLP object to expose any variables on the block to # the underlying Pyomo machinery for greybox in pyomo_model.component_objects( ExternalGreyBoxBlock, descend_into=True): greybox.parent_block().reclassify_component_type( greybox, pyo.Block) greybox_components.append(greybox) # store the pyomo model self._pyomo_model = pyomo_model # build a PyomoNLP object (will include the "pyomo" # part of the model only) self._pyomo_nlp = PyomoNLP(pyomo_model) self._pyomo_model_var_names_to_datas = { v.getname( fully_qualified=True ): v for v in pyomo_model.component_data_objects( ctype=pyo.Var, descend_into=True ) } self._pyomo_model_constraint_names_to_datas = { c.getname( fully_qualified=True ): c for c in pyomo_model.component_data_objects( ctype=pyo.Constraint, descend_into=True ) } finally: # Restore the ctypes of the ExternalGreyBoxBlock components for greybox in greybox_components: greybox.parent_block().reclassify_component_type( greybox, ExternalGreyBoxBlock) if self._pyomo_nlp.n_primals() == 0: raise ValueError( "No variables were found in the Pyomo part of the model." " PyomoGreyBoxModel requires at least one variable" " to be active in a Pyomo objective or constraint") # build the list of NLP wrappers for the greybox objects greybox_nlps = [] fixed_vars = [] for greybox in greybox_components: # iterate through the data objects if component is indexed for data in greybox.values(): if data.active: # check that no variables are fixed fixed_vars.extend(v for v in data.inputs.values() if v.fixed) fixed_vars.extend(v for v in data.outputs.values() if v.fixed) greybox_nlp = _ExternalGreyBoxAsNLP(data) greybox_nlps.append(greybox_nlp) if fixed_vars: logging.getLogger(__name__).error('PyomoNLPWithGreyBoxBlocks found fixed variables for the' ' inputs and/or outputs of an ExternalGreyBoxBlock. This' ' is not currently supported. The fixed variables were:\n\t' + '\n\t'.join(f.getname(fully_qualified=True) for f in fixed_vars) ) raise NotImplementedError('PyomoNLPWithGreyBoxBlocks does not support fixed inputs or outputs') # let's build up the union of all the primal variables names # RBP: Why use names here? Why not just ComponentSet of all # data objects? primals_names = set(self._pyomo_nlp.primals_names()) for gbnlp in greybox_nlps: primals_names.update(gbnlp.primals_names()) # sort the names for consistency run to run self._n_primals = len(primals_names) self._primals_names = primals_names = sorted(primals_names) self._pyomo_model_var_datas = [self._pyomo_model_var_names_to_datas[nm] for nm in self._primals_names] # get the names of all the constraints self._constraint_names = list(self._pyomo_nlp.constraint_names()) self._constraint_datas = [self._pyomo_model_constraint_names_to_datas.get(nm) for nm in self._constraint_names] for gbnlp in greybox_nlps: self._constraint_names.extend(gbnlp.constraint_names()) self._constraint_datas.extend([(gbnlp._block, nm) for nm in gbnlp.constraint_names()]) self._n_constraints = len(self._constraint_names) self._has_hessian_support = True for nlp in greybox_nlps: if not nlp.has_hessian_support(): self._has_hessian_support = False # wrap all the nlp objects with projected nlp objects self._pyomo_nlp = ProjectedNLP(self._pyomo_nlp, primals_names) for i,gbnlp in enumerate(greybox_nlps): greybox_nlps[i] = ProjectedNLP(greybox_nlps[i], primals_names) # build a list of all the nlps in order self._nlps = nlps = [self._pyomo_nlp] nlps.extend(greybox_nlps) # build the primal and dual inits and lb, ub vectors self._init_primals = self._pyomo_nlp.init_primals() self._primals_lb = self._pyomo_nlp.primals_lb() self._primals_ub = self._pyomo_nlp.primals_ub() for gbnlp in greybox_nlps: local = gbnlp.init_primals() mask = ~np.isnan(local) self._init_primals[mask] = local[mask] local = gbnlp.primals_lb() mask = ~np.isnan(local) self._primals_lb[mask] = np.maximum(self._primals_lb[mask], local[mask]) local = gbnlp.primals_ub() mask = ~np.isnan(local) self._primals_ub[mask] = np.minimum(self._primals_ub[mask], local[mask]) # all the nan's should be gone (every primal should be initialized) if np.any(np.isnan(self._init_primals)) \ or np.any(np.isnan(self._primals_lb)) \ or np.any(np.isnan(self._primals_ub)): raise ValueError('NaN values found in initialization of primals or' ' primals_lb or primals_ub in _PyomoNLPWithGreyBoxBlocks.') self._init_duals = BlockVector(len(nlps)) self._dual_values_blockvector = BlockVector(len(nlps)) self._constraints_lb = BlockVector(len(nlps)) self._constraints_ub = BlockVector(len(nlps)) for i,nlp in enumerate(nlps): self._init_duals.set_block(i, nlp.init_duals()) self._constraints_lb.set_block(i, nlp.constraints_lb()) self._constraints_ub.set_block(i, nlp.constraints_ub()) self._dual_values_blockvector.set_block(i, np.nan*np.zeros(nlp.n_constraints())) self._init_duals = self._init_duals.flatten() self._constraints_lb = self._constraints_lb.flatten() self._constraints_ub = self._constraints_ub.flatten() # verify that there are no nans in the init_duals if np.any(np.isnan(self._init_duals)) \ or np.any(np.isnan(self._constraints_lb)) \ or np.any(np.isnan(self._constraints_ub)): raise ValueError('NaN values found in initialization of duals or' ' constraints_lb or constraints_ub in' ' _PyomoNLPWithGreyBoxBlocks.') self._primal_values = np.nan*np.ones(self._n_primals) # set the values of the primals and duals to make sure initial # values get all the way through to the underlying models self.set_primals(self._init_primals) self.set_duals(self._init_duals) assert not np.any(np.isnan(self._primal_values)) assert not np.any(np.isnan(self._dual_values_blockvector)) # if any of the problem is scaled (i.e., one or more of primals, # constraints, or objective), then we want scaling factors for # all of them (defaulted to 1) need_scaling = False # objective is owned by self._pyomo_nlp, not in any of the greybox models self._obj_scaling = self._pyomo_nlp.get_obj_scaling() if self._obj_scaling is None: self._obj_scaling = 1.0 else: need_scaling = True self._primals_scaling = np.ones(self.n_primals()) scaling_suffix = pyomo_model.component('scaling_factor') if scaling_suffix and scaling_suffix.ctype is pyo.Suffix: need_scaling = True for i,v in enumerate(self._pyomo_model_var_datas): if v in scaling_suffix: self._primals_scaling[i] = scaling_suffix[v] self._constraints_scaling = BlockVector(len(nlps)) for i,nlp in enumerate(nlps): local_constraints_scaling = nlp.get_constraints_scaling() if local_constraints_scaling is None: self._constraints_scaling.set_block(i, np.ones(nlp.n_constraints())) else: self._constraints_scaling.set_block(i, local_constraints_scaling) need_scaling = True if need_scaling: self._constraints_scaling = self._constraints_scaling.flatten() else: self._obj_scaling = None self._primals_scaling = None self._constraints_scaling = None # compute the jacobian and the hessian to get nnz jac = self.evaluate_jacobian() self._nnz_jacobian = len(jac.data) self._sparse_hessian_summation = None self._nnz_hessian_lag = None if self._has_hessian_support: hess = self.evaluate_hessian_lag() self._nnz_hessian_lag = len(hess.data) # overloaded from NLP def n_primals(self): return self._n_primals # overloaded from NLP def primals_names(self): return self._primals_names # overloaded from NLP def n_constraints(self): return self._n_constraints # overloaded from NLP def constraint_names(self): return self._constraint_names # overloaded from NLP def nnz_jacobian(self): return self._nnz_jacobian # overloaded from NLP def nnz_hessian_lag(self): return self._nnz_hessian_lag # overloaded from NLP def primals_lb(self): return self._primals_lb # overloaded from NLP def primals_ub(self): return self._primals_ub # overloaded from NLP def constraints_lb(self): return self._constraints_lb # overloaded from NLP def constraints_ub(self): return self._constraints_ub # overloaded from NLP def init_primals(self): return self._init_primals # overloaded from NLP def init_duals(self): return self._init_duals # overloaded from NLP / Extended NLP def create_new_vector(self, vector_type): if vector_type == 'primals': return np.zeros(self.n_primals(), dtype=np.float64) elif vector_type == 'constraints' or vector_type == 'duals': return np.zeros(self.n_constraints(), dtype=np.float64) else: raise RuntimeError('Called create_new_vector with an unknown vector_type') # overloaded from NLP def set_primals(self, primals): np.copyto(self._primal_values, primals) for nlp in self._nlps: nlp.set_primals(primals) # overloaded from AslNLP def get_primals(self): return np.copy(self._primal_values) # overloaded from NLP def set_duals(self, duals): self._dual_values_blockvector.copyfrom(duals) for i,nlp in enumerate(self._nlps): nlp.set_duals(self._dual_values_blockvector.get_block(i)) # overloaded from NLP def get_duals(self): return self._dual_values_blockvector.flatten() # overloaded from NLP def set_obj_factor(self, obj_factor): # objective is owned by the pyomo model self._pyomo_nlp.set_obj_factor(obj_factor) # overloaded from NLP def get_obj_factor(self): # objective is owned by the pyomo model return self._pyomo_nlp.get_obj_factor() # overloaded from NLP def get_obj_scaling(self): return self._obj_scaling # overloaded from NLP def get_primals_scaling(self): return self._primals_scaling # overloaded from NLP def get_constraints_scaling(self): return self._constraints_scaling # overloaded from NLP def evaluate_objective(self): # objective is owned by the pyomo model return self._pyomo_nlp.evaluate_objective() # overloaded from NLP def evaluate_grad_objective(self, out=None): return self._pyomo_nlp.evaluate_grad_objective(out=out) # overloaded from NLP def evaluate_constraints(self, out=None): # todo: implement the "out" version more efficiently ret = BlockVector(len(self._nlps)) for i,nlp in enumerate(self._nlps): ret.set_block(i, nlp.evaluate_constraints()) if out is not None: ret.copyto(out) return out return ret.flatten() # overloaded from NLP def evaluate_jacobian(self, out=None): ret = BlockMatrix(len(self._nlps),1) for i,nlp in enumerate(self._nlps): ret.set_block(i, 0, nlp.evaluate_jacobian()) ret = ret.tocoo() if out is not None: assert np.array_equal(ret.row, out.row) assert np.array_equal(ret.col, out.col) np.copyto(out.data, ret.data) return out return ret def evaluate_hessian_lag(self, out=None): list_of_hessians = [nlp.evaluate_hessian_lag() for nlp in self._nlps] if self._sparse_hessian_summation is None: # This is assuming that the nonzero structures of Hessians # do not change self._sparse_hessian_summation = CondensedSparseSummation(list_of_hessians) ret = self._sparse_hessian_summation.sum(list_of_hessians) if out is not None: assert np.array_equal(ret.row, out.row) assert np.array_equal(ret.col, out.col) np.copyto(out.data, ret.data) return out return ret def report_solver_status(self, status_code, status_message): raise NotImplementedError('This is not yet implemented.') def load_state_into_pyomo(self, bound_multipliers=None): # load the values of the primals into the pyomo primals = self.get_primals() for value,vardata in zip(primals, self._pyomo_model_var_datas): vardata.set_value(value) # get the active suffixes m = self._pyomo_model model_suffixes = dict( pyo.suffix.active_import_suffix_generator(m)) # we need to correct the sign of the multipliers based on whether or # not we are minimizing or maximizing - this is done in the ASL interface # for ipopt, but does not appear to be done in cyipopt. obj_sign = 1.0 objs = list(m.component_data_objects(ctype=pyo.Objective, descend_into=True)) assert len(objs) == 1 if objs[0].sense == pyo.maximize: obj_sign = -1.0 if 'dual' in model_suffixes: model_suffixes['dual'].clear() dual_values = self._dual_values_blockvector.flatten() for value,t in zip(dual_values, self._constraint_datas): if type(t) is tuple: model_suffixes['dual'].setdefault(t[0], {})[t[1]] = -obj_sign*value else: # t is a constraint data model_suffixes['dual'][t] = -obj_sign*value if 'ipopt_zL_out' in model_suffixes: model_suffixes['ipopt_zL_out'].clear() if bound_multipliers is not None: model_suffixes['ipopt_zL_out'].update( zip(self._pyomo_model_var_datas, obj_sign*bound_multipliers[0])) if 'ipopt_zU_out' in model_suffixes: model_suffixes['ipopt_zU_out'].clear() if bound_multipliers is not None: model_suffixes['ipopt_zU_out'].update( zip(self._pyomo_model_var_datas, -obj_sign*bound_multipliers[1]))
def test_projected(self): m = create_pyomo_model() nlp = PyomoNLP(m) projected_nlp = ProjectedNLP(nlp, ['x[0]', 'x[1]', 'x[2]']) expected_names = ['x[0]', 'x[1]', 'x[2]'] self.assertEqual(projected_nlp.primals_names(), expected_names) self.assertTrue( np.array_equal(projected_nlp.get_primals(), np.asarray([1.0, 2.0, 4.0]))) self.assertTrue( np.array_equal(projected_nlp.evaluate_grad_objective(), np.asarray([8.0, 1.0, 9.0]))) self.assertEqual(projected_nlp.nnz_jacobian(), 5) self.assertEqual(projected_nlp.nnz_hessian_lag(), 6) J = projected_nlp.evaluate_jacobian() self.assertEqual(len(J.data), 5) denseJ = J.todense() expected_jac = np.asarray([[6.0, 1.0, 1.0], [1.0, 0.0, 1.0]]) self.assertTrue(np.array_equal(denseJ, expected_jac)) # test the use of "out" J = 0.0 * J projected_nlp.evaluate_jacobian(out=J) denseJ = J.todense() self.assertTrue(np.array_equal(denseJ, expected_jac)) H = projected_nlp.evaluate_hessian_lag() self.assertEqual(len(H.data), 6) expectedH = np.asarray([[2.0, 1.0, 1.0], [1.0, 0.0, 0.0], [1.0, 0.0, 2.0]]) denseH = H.todense() self.assertTrue(np.array_equal(denseH, expectedH)) # test the use of "out" H = 0.0 * H projected_nlp.evaluate_hessian_lag(out=H) denseH = H.todense() self.assertTrue(np.array_equal(denseH, expectedH)) # now test a reordering projected_nlp = ProjectedNLP(nlp, ['x[0]', 'x[2]', 'x[1]']) expected_names = ['x[0]', 'x[2]', 'x[1]'] self.assertEqual(projected_nlp.primals_names(), expected_names) self.assertTrue( np.array_equal(projected_nlp.get_primals(), np.asarray([1.0, 4.0, 2.0]))) self.assertTrue( np.array_equal(projected_nlp.evaluate_grad_objective(), np.asarray([8.0, 9.0, 1.0]))) self.assertEqual(projected_nlp.nnz_jacobian(), 5) self.assertEqual(projected_nlp.nnz_hessian_lag(), 6) J = projected_nlp.evaluate_jacobian() self.assertEqual(len(J.data), 5) denseJ = J.todense() expected_jac = np.asarray([[6.0, 1.0, 1.0], [1.0, 1.0, 0.0]]) self.assertTrue(np.array_equal(denseJ, expected_jac)) # test the use of "out" J = 0.0 * J projected_nlp.evaluate_jacobian(out=J) denseJ = J.todense() self.assertTrue(np.array_equal(denseJ, expected_jac)) H = projected_nlp.evaluate_hessian_lag() self.assertEqual(len(H.data), 6) expectedH = np.asarray([[2.0, 1.0, 1.0], [1.0, 2.0, 0.0], [1.0, 0.0, 0.0]]) denseH = H.todense() self.assertTrue(np.array_equal(denseH, expectedH)) # test the use of "out" H = 0.0 * H projected_nlp.evaluate_hessian_lag(out=H) denseH = H.todense() self.assertTrue(np.array_equal(denseH, expectedH)) # now test an expansion projected_nlp = ProjectedNLP(nlp, ['x[0]', 'x[2]', 'y', 'x[1]']) expected_names = ['x[0]', 'x[2]', 'y', 'x[1]'] self.assertEqual(projected_nlp.primals_names(), expected_names) np.testing.assert_equal(projected_nlp.get_primals(), np.asarray([1.0, 4.0, np.nan, 2.0])) self.assertTrue( np.array_equal(projected_nlp.evaluate_grad_objective(), np.asarray([8.0, 9.0, 0.0, 1.0]))) self.assertEqual(projected_nlp.nnz_jacobian(), 5) self.assertEqual(projected_nlp.nnz_hessian_lag(), 6) J = projected_nlp.evaluate_jacobian() self.assertEqual(len(J.data), 5) denseJ = J.todense() expected_jac = np.asarray([[6.0, 1.0, 0.0, 1.0], [1.0, 1.0, 0.0, 0.0]]) self.assertTrue(np.array_equal(denseJ, expected_jac)) # test the use of "out" J = 0.0 * J projected_nlp.evaluate_jacobian(out=J) denseJ = J.todense() self.assertTrue(np.array_equal(denseJ, expected_jac)) H = projected_nlp.evaluate_hessian_lag() self.assertEqual(len(H.data), 6) expectedH = np.asarray([[2.0, 1.0, 0.0, 1.0], [1.0, 2.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]]) denseH = H.todense() self.assertTrue(np.array_equal(denseH, expectedH)) # test the use of "out" H = 0.0 * H projected_nlp.evaluate_hessian_lag(out=H) denseH = H.todense() self.assertTrue(np.array_equal(denseH, expectedH)) # now test an expansion projected_nlp = ProjectedNLP(nlp, ['x[0]', 'x[2]']) expected_names = ['x[0]', 'x[2]'] self.assertEqual(projected_nlp.primals_names(), expected_names) np.testing.assert_equal(projected_nlp.get_primals(), np.asarray([1.0, 4.0])) self.assertTrue( np.array_equal(projected_nlp.evaluate_grad_objective(), np.asarray([8.0, 9.0]))) self.assertEqual(projected_nlp.nnz_jacobian(), 4) self.assertEqual(projected_nlp.nnz_hessian_lag(), 4) J = projected_nlp.evaluate_jacobian() self.assertEqual(len(J.data), 4) denseJ = J.todense() expected_jac = np.asarray([[6.0, 1.0], [1.0, 1.0]]) self.assertTrue(np.array_equal(denseJ, expected_jac)) # test the use of "out" J = 0.0 * J projected_nlp.evaluate_jacobian(out=J) denseJ = J.todense() self.assertTrue(np.array_equal(denseJ, expected_jac)) H = projected_nlp.evaluate_hessian_lag() self.assertEqual(len(H.data), 4) expectedH = np.asarray([[2.0, 1.0], [1.0, 2.0]]) denseH = H.todense() self.assertTrue(np.array_equal(denseH, expectedH)) # test the use of "out" H = 0.0 * H projected_nlp.evaluate_hessian_lag(out=H) denseH = H.todense() self.assertTrue(np.array_equal(denseH, expectedH))