def get_kkt_info(self): """Takes the model and uses PyNumero to get the jacobian and Hessian information as dataframes Args: model_object (pyomo ConcreteModel): A pyomo model instance of the current problem (used in calculating the reduced Hessian) method (str): defaults to k_aug, method by which to obtain optimization results Returns: kkt_data (dict): dictionary with the following structure: { 'J': J, # Jacobian 'H': H, # Hessian 'var_ind': var_index_names, # Variable index 'con_ind': con_index_names, # Constraint index 'duals': duals, # Duals } """ self.get_file_info() if self.kkt_method == 'pynumero': nlp = PyomoNLP(self.model_object) varList = nlp.get_pyomo_variables() conList = nlp.get_pyomo_constraints() duals = nlp.get_duals() J = nlp.extract_submatrix_jacobian(pyomo_variables=varList, pyomo_constraints=conList) H = nlp.extract_submatrix_hessian_lag(pyomo_variables_rows=varList, pyomo_variables_cols=varList) J = csc_matrix(J) var_index_names = [v.name for v in varList] con_index_names = [v.name for v in conList] elif self.kkt_method == 'k_aug': kaug = SolverFactory('k_aug') kaug.options["deb_kkt"] = "" kaug.solve(self.model_object, tee=False) hess = pd.read_csv('hess_debug.in', delim_whitespace=True, header=None, skipinitialspace=True) hess.columns = ['irow', 'jcol', 'vals'] hess.irow -= 1 hess.jcol -= 1 os.unlink('hess_debug.in') jac = pd.read_csv('jacobi_debug.in', delim_whitespace=True, header=None, skipinitialspace=True) m = jac.iloc[0, 0] n = jac.iloc[0, 1] jac.drop(index=[0], inplace=True) jac.columns = ['irow', 'jcol', 'vals'] jac.irow -= 1 jac.jcol -= 1 os.unlink('jacobi_debug.in') #try: # duals = read_duals(stub + '.sol') #except: duals = None J = coo_matrix((jac.vals, (jac.irow, jac.jcol)), shape=(m, n)) Hess_coo = coo_matrix((hess.vals, (hess.irow, hess.jcol)), shape=(n, n)) H = Hess_coo + triu(Hess_coo, 1).T var_index_names = pd.read_csv(self.sol_files['col'], sep=';', header=None) # dummy sep con_index_names = pd.read_csv(self.sol_files['row'], sep=';', header=None) # dummy sep var_index_names = [var_name for var_name in var_index_names[0]] con_index_names = [ con_name for con_name in con_index_names[0].iloc[:-1] ] con_index_number = {v: k for k, v in enumerate(con_index_names)} self.delete_sol_files() self.kkt_data = { 'J': J, 'H': H, 'var_ind': var_index_names, 'con_ind': con_index_names, 'duals': duals, } return None
return sol[nlp.n_primals():nlp.n_primals() + nlp.n_constraints()] ################################################################# m = create_model(4.5, 1.0) opt = pyo.SolverFactory('ipopt') results = opt.solve(m, tee=True) ################################################################# nlp = PyomoNLP(m) x = nlp.init_primals() y = compute_init_lam(nlp, x=x) nlp.set_primals(x) nlp.set_duals(y) J = nlp.extract_submatrix_jacobian(pyomo_variables=[m.x1, m.x2, m.x3], pyomo_constraints=[m.const1, m.const2]) H = nlp.extract_submatrix_hessian_lag(pyomo_variables_rows=[m.x1, m.x2, m.x3], pyomo_variables_cols=[m.x1, m.x2, m.x3]) M = BlockMatrix(2, 2) M.set_block(0, 0, H) M.set_block(1, 0, J) M.set_block(0, 1, J.transpose()) Np = BlockMatrix(2, 1) Np.set_block( 0, 0, nlp.extract_submatrix_hessian_lag(pyomo_variables_rows=[m.x1, m.x2, m.x3], pyomo_variables_cols=[m.eta1, m.eta2])) Np.set_block( 1, 0,
def test_indices_methods(self): nlp = PyomoNLP(self.pm) # get_pyomo_variables variables = nlp.get_pyomo_variables() expected_ids = [id(self.pm.x[i]) for i in range(1, 10)] ids = [id(variables[i]) for i in range(9)] self.assertTrue(expected_ids == ids) variable_names = nlp.variable_names() expected_names = [self.pm.x[i].getname() for i in range(1, 10)] self.assertTrue(variable_names == expected_names) # get_pyomo_constraints constraints = nlp.get_pyomo_constraints() expected_ids = [id(self.pm.c[i]) for i in range(1, 10)] ids = [id(constraints[i]) for i in range(9)] self.assertTrue(expected_ids == ids) constraint_names = nlp.constraint_names() expected_names = [c.getname() for c in nlp.get_pyomo_constraints()] self.assertTrue(constraint_names == expected_names) # get_pyomo_equality_constraints eq_constraints = nlp.get_pyomo_equality_constraints() # 2 and 6 are the equality constraints eq_indices = [2, 6] # "indices" here is a bit overloaded expected_eq_ids = [id(self.pm.c[i]) for i in eq_indices] eq_ids = [id(con) for con in eq_constraints] self.assertEqual(eq_ids, expected_eq_ids) eq_constraint_names = nlp.equality_constraint_names() expected_eq_names = [ c.getname(fully_qualified=True) for c in nlp.get_pyomo_equality_constraints() ] self.assertEqual(eq_constraint_names, expected_eq_names) # get_pyomo_inequality_constraints ineq_constraints = nlp.get_pyomo_inequality_constraints() # 1, 3, 4, 5, 7, 8, and 9 are the inequality constraints ineq_indices = [1, 3, 4, 5, 7, 8, 9] expected_ineq_ids = [id(self.pm.c[i]) for i in ineq_indices] ineq_ids = [id(con) for con in ineq_constraints] self.assertEqual(eq_ids, expected_eq_ids) # get_primal_indices expected_primal_indices = [i for i in range(9)] self.assertTrue( expected_primal_indices == nlp.get_primal_indices([self.pm.x])) expected_primal_indices = [0, 3, 8, 4] variables = [self.pm.x[1], self.pm.x[4], self.pm.x[9], self.pm.x[5]] self.assertTrue( expected_primal_indices == nlp.get_primal_indices(variables)) # get_constraint_indices expected_constraint_indices = [i for i in range(9)] self.assertTrue(expected_constraint_indices == nlp.get_constraint_indices([self.pm.c])) expected_constraint_indices = [0, 3, 8, 4] constraints = [self.pm.c[1], self.pm.c[4], self.pm.c[9], self.pm.c[5]] self.assertTrue(expected_constraint_indices == nlp.get_constraint_indices(constraints)) # get_equality_constraint_indices pyomo_eq_indices = [2, 6] with self.assertRaises(KeyError): # At least one data object in container is not an equality nlp.get_equality_constraint_indices([self.pm.c]) eq_constraints = [self.pm.c[i] for i in pyomo_eq_indices] expected_eq_indices = [0, 1] # ^indices in the list of equality constraints eq_constraint_indices = nlp.get_equality_constraint_indices( eq_constraints) self.assertEqual(expected_eq_indices, eq_constraint_indices) # get_inequality_constraint_indices pyomo_ineq_indices = [1, 3, 4, 5, 7, 9] with self.assertRaises(KeyError): # At least one data object in container is not an equality nlp.get_inequality_constraint_indices([self.pm.c]) ineq_constraints = [self.pm.c[i] for i in pyomo_ineq_indices] expected_ineq_indices = [0, 1, 2, 3, 4, 6] # ^indices in the list of equality constraints; didn't include 8 ineq_constraint_indices = nlp.get_inequality_constraint_indices( ineq_constraints) self.assertEqual(expected_ineq_indices, ineq_constraint_indices) # extract_subvector_grad_objective expected_gradient = np.asarray( [2 * sum((i + 1) * (j + 1) for j in range(9)) for i in range(9)], dtype=np.float64) grad_obj = nlp.extract_subvector_grad_objective([self.pm.x]) self.assertTrue(np.array_equal(expected_gradient, grad_obj)) expected_gradient = np.asarray([ 2 * sum((i + 1) * (j + 1) for j in range(9)) for i in [0, 3, 8, 4] ], dtype=np.float64) variables = [self.pm.x[1], self.pm.x[4], self.pm.x[9], self.pm.x[5]] grad_obj = nlp.extract_subvector_grad_objective(variables) self.assertTrue(np.array_equal(expected_gradient, grad_obj)) # extract_subvector_constraints expected_con = np.asarray( [45, 88, 3 * 45, 4 * 45, 5 * 45, 276, 7 * 45, 8 * 45, 9 * 45], dtype=np.float64) con = nlp.extract_subvector_constraints([self.pm.c]) self.assertTrue(np.array_equal(expected_con, con)) expected_con = np.asarray([45, 4 * 45, 9 * 45, 5 * 45], dtype=np.float64) constraints = [self.pm.c[1], self.pm.c[4], self.pm.c[9], self.pm.c[5]] con = nlp.extract_subvector_constraints(constraints) self.assertTrue(np.array_equal(expected_con, con)) # extract_submatrix_jacobian expected_jac = [[(i) * (j) for j in range(1, 10)] for i in range(1, 10)] expected_jac = np.asarray(expected_jac, dtype=np.float64) jac = nlp.extract_submatrix_jacobian(pyomo_variables=[self.pm.x], pyomo_constraints=[self.pm.c]) dense_jac = jac.todense() self.assertTrue(np.array_equal(dense_jac, expected_jac)) expected_jac = [[(i) * (j) for j in [1, 4, 9, 5]] for i in [2, 6, 4]] expected_jac = np.asarray(expected_jac, dtype=np.float64) variables = [self.pm.x[1], self.pm.x[4], self.pm.x[9], self.pm.x[5]] constraints = [self.pm.c[2], self.pm.c[6], self.pm.c[4]] jac = nlp.extract_submatrix_jacobian(pyomo_variables=variables, pyomo_constraints=constraints) dense_jac = jac.todense() self.assertTrue(np.array_equal(dense_jac, expected_jac)) # extract_submatrix_hessian_lag expected_hess = [[2.0 * i * j for j in range(1, 10)] for i in range(1, 10)] expected_hess = np.asarray(expected_hess, dtype=np.float64) hess = nlp.extract_submatrix_hessian_lag( pyomo_variables_rows=[self.pm.x], pyomo_variables_cols=[self.pm.x]) dense_hess = hess.todense() self.assertTrue(np.array_equal(dense_hess, expected_hess)) expected_hess = [[2.0 * i * j for j in [1, 4, 9, 5]] for i in [1, 4, 9, 5]] expected_hess = np.asarray(expected_hess, dtype=np.float64) variables = [self.pm.x[1], self.pm.x[4], self.pm.x[9], self.pm.x[5]] hess = nlp.extract_submatrix_hessian_lag( pyomo_variables_rows=variables, pyomo_variables_cols=variables) dense_hess = hess.todense() self.assertTrue(np.array_equal(dense_hess, expected_hess))
def test_indices_methods(self): nlp = PyomoNLP(self.pm) # get_pyomo_variables variables = nlp.get_pyomo_variables() expected_ids = [id(self.pm.x[i]) for i in range(1, 10)] ids = [id(variables[i]) for i in range(9)] self.assertTrue(expected_ids == ids) variable_names = nlp.variable_names() expected_names = [self.pm.x[i].getname() for i in range(1, 10)] self.assertTrue(variable_names == expected_names) # get_pyomo_constraints constraints = nlp.get_pyomo_constraints() expected_ids = [id(self.pm.c[i]) for i in range(1, 10)] ids = [id(constraints[i]) for i in range(9)] self.assertTrue(expected_ids == ids) constraint_names = nlp.constraint_names() expected_names = [c.getname() for c in nlp.get_pyomo_constraints()] self.assertTrue(constraint_names == expected_names) # get_primal_indices expected_primal_indices = [i for i in range(9)] self.assertTrue( expected_primal_indices == nlp.get_primal_indices([self.pm.x])) expected_primal_indices = [0, 3, 8, 4] variables = [self.pm.x[1], self.pm.x[4], self.pm.x[9], self.pm.x[5]] self.assertTrue( expected_primal_indices == nlp.get_primal_indices(variables)) # get_constraint_indices expected_constraint_indices = [i for i in range(9)] self.assertTrue(expected_constraint_indices == nlp.get_constraint_indices([self.pm.c])) expected_constraint_indices = [0, 3, 8, 4] constraints = [self.pm.c[1], self.pm.c[4], self.pm.c[9], self.pm.c[5]] self.assertTrue(expected_constraint_indices == nlp.get_constraint_indices(constraints)) # extract_subvector_grad_objective expected_gradient = np.asarray( [2 * sum((i + 1) * (j + 1) for j in range(9)) for i in range(9)], dtype=np.float64) grad_obj = nlp.extract_subvector_grad_objective([self.pm.x]) self.assertTrue(np.array_equal(expected_gradient, grad_obj)) expected_gradient = np.asarray([ 2 * sum((i + 1) * (j + 1) for j in range(9)) for i in [0, 3, 8, 4] ], dtype=np.float64) variables = [self.pm.x[1], self.pm.x[4], self.pm.x[9], self.pm.x[5]] grad_obj = nlp.extract_subvector_grad_objective(variables) self.assertTrue(np.array_equal(expected_gradient, grad_obj)) # extract_subvector_constraints expected_con = np.asarray( [45, 88, 3 * 45, 4 * 45, 5 * 45, 276, 7 * 45, 8 * 45, 9 * 45], dtype=np.float64) con = nlp.extract_subvector_constraints([self.pm.c]) self.assertTrue(np.array_equal(expected_con, con)) expected_con = np.asarray([45, 4 * 45, 9 * 45, 5 * 45], dtype=np.float64) constraints = [self.pm.c[1], self.pm.c[4], self.pm.c[9], self.pm.c[5]] con = nlp.extract_subvector_constraints(constraints) self.assertTrue(np.array_equal(expected_con, con)) # extract_submatrix_jacobian expected_jac = [[(i) * (j) for j in range(1, 10)] for i in range(1, 10)] expected_jac = np.asarray(expected_jac, dtype=np.float64) jac = nlp.extract_submatrix_jacobian(pyomo_variables=[self.pm.x], pyomo_constraints=[self.pm.c]) dense_jac = jac.todense() self.assertTrue(np.array_equal(dense_jac, expected_jac)) expected_jac = [[(i) * (j) for j in [1, 4, 9, 5]] for i in [2, 6, 4]] expected_jac = np.asarray(expected_jac, dtype=np.float64) variables = [self.pm.x[1], self.pm.x[4], self.pm.x[9], self.pm.x[5]] constraints = [self.pm.c[2], self.pm.c[6], self.pm.c[4]] jac = nlp.extract_submatrix_jacobian(pyomo_variables=variables, pyomo_constraints=constraints) dense_jac = jac.todense() self.assertTrue(np.array_equal(dense_jac, expected_jac)) # extract_submatrix_hessian_lag expected_hess = [[2.0 * i * j for j in range(1, 10)] for i in range(1, 10)] expected_hess = np.asarray(expected_hess, dtype=np.float64) hess = nlp.extract_submatrix_hessian_lag( pyomo_variables_rows=[self.pm.x], pyomo_variables_cols=[self.pm.x]) dense_hess = hess.todense() self.assertTrue(np.array_equal(dense_hess, expected_hess)) expected_hess = [[2.0 * i * j for j in [1, 4, 9, 5]] for i in [1, 4, 9, 5]] expected_hess = np.asarray(expected_hess, dtype=np.float64) variables = [self.pm.x[1], self.pm.x[4], self.pm.x[9], self.pm.x[5]] hess = nlp.extract_submatrix_hessian_lag( pyomo_variables_rows=variables, pyomo_variables_cols=variables) dense_hess = hess.todense() self.assertTrue(np.array_equal(dense_hess, expected_hess))
def main(plot_switch=False): # This tests the same model constructed in the test_nmpc_constructor_1 file m_controller = make_model(horizon=3, ntfe=30, ntcp=2, bounds=True) sample_time = 0.5 m_plant = make_model(horizon=sample_time, ntfe=5, ntcp=2) time_plant = m_plant.fs.time solve_consistent_initial_conditions(m_plant, time_plant, solver) ##### # Flatten and categorize controller model ##### model = m_controller time = model.fs.time t0 = time.first() t1 = time[2] scalar_vars, dae_vars = flatten_dae_components( model, time, pyo.Var, ) scalar_cons, dae_cons = flatten_dae_components( model, time, pyo.Constraint, ) inputs = [ model.fs.mixer.S_inlet.flow_vol, model.fs.mixer.E_inlet.flow_vol, ] measurements = [ pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'C']), pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'E']), pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'S']), pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'P']), model.fs.cstr.outlet.temperature, ] model.fs.cstr.control_volume.material_holdup[:, 'aq', 'Solvent'].fix() model.fs.cstr.total_flow_balance.deactivate() var_partition, con_partition = categorize_dae_variables_and_constraints( model, dae_vars, dae_cons, time, input_vars=inputs, ) controller = ControllerBlock( model=model, time=time, measurements=measurements, category_dict={None: var_partition}, ) controller.construct() solve_consistent_initial_conditions(m_controller, time, solver) controller.initialize_to_initial_conditions() m_controller._dummy_obj = pyo.Objective(expr=0) nlp = PyomoNLP(m_controller) igraph = IncidenceGraphInterface(nlp) m_controller.del_component(m_controller._dummy_obj) diff_vars = [var[t1] for var in var_partition[VC.DIFFERENTIAL]] alg_vars = [var[t1] for var in var_partition[VC.ALGEBRAIC]] deriv_vars = [var[t1] for var in var_partition[VC.DERIVATIVE]] diff_eqns = [con[t1] for con in con_partition[CC.DIFFERENTIAL]] alg_eqns = [con[t1] for con in con_partition[CC.ALGEBRAIC]] # Assemble and factorize "derivative Jacobian" dfdz = nlp.extract_submatrix_jacobian(diff_vars, diff_eqns) dfdy = nlp.extract_submatrix_jacobian(alg_vars, diff_eqns) dgdz = nlp.extract_submatrix_jacobian(diff_vars, alg_eqns) dgdy = nlp.extract_submatrix_jacobian(alg_vars, alg_eqns) dfdzdot = nlp.extract_submatrix_jacobian(deriv_vars, diff_eqns) fact = sps.linalg.splu(dgdy.tocsc()) dydz = fact.solve(dgdz.toarray()) deriv_jac = dfdz - dfdy.dot(dydz) fact = sps.linalg.splu(dfdzdot.tocsc()) dzdotdz = -fact.solve(deriv_jac) # Use some heuristic on the eigenvalues of the derivative Jacobian # to identify fast states. w, V = np.linalg.eig(dzdotdz) w_max = np.max(np.abs(w)) fast_modes, = np.where(np.abs(w) > w_max / 2) fast_states = [] for idx in fast_modes: evec = V[:, idx] _fast_states, _ = np.where(np.abs(evec) > 0.5) fast_states.extend(_fast_states) fast_states = set(fast_states) # Store components necessary for model reduction in a model- # independent form. fast_state_derivs = [ pyo.ComponentUID(var_partition[VC.DERIVATIVE][idx].referent, context=model) for idx in fast_states ] fast_state_diffs = [ pyo.ComponentUID(var_partition[VC.DIFFERENTIAL][idx].referent, context=model) for idx in fast_states ] fast_state_discs = [ pyo.ComponentUID(con_partition[CC.DISCRETIZATION][idx].referent, context=model) for idx in fast_states ] # Perform pseudo-steady state model reduction on the fast states # and re-categorize for cuid in fast_state_derivs: var = cuid.find_component_on(m_controller) var.fix(0.0) for cuid in fast_state_diffs: var = cuid.find_component_on(m_controller) var[t0].unfix() for cuid in fast_state_discs: con = cuid.find_component_on(m_controller) con.deactivate() var_partition, con_partition = categorize_dae_variables_and_constraints( model, dae_vars, dae_cons, time, input_vars=inputs, ) controller.del_component(model) # Re-construct controller block with new categorization measurements = [ pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'C']), pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'E']), pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'S']), pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'P']), ] controller = ControllerBlock( model=model, time=time, measurements=measurements, category_dict={None: var_partition}, ) controller.construct() ##### # Construct dynamic block for plant ##### model = m_plant time = model.fs.time t0 = time.first() t1 = time[2] scalar_vars, dae_vars = flatten_dae_components( model, time, pyo.Var, ) scalar_cons, dae_cons = flatten_dae_components( model, time, pyo.Constraint, ) inputs = [ model.fs.mixer.S_inlet.flow_vol, model.fs.mixer.E_inlet.flow_vol, ] measurements = [ pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'C']), pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'E']), pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'S']), pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'P']), ] model.fs.cstr.control_volume.material_holdup[:, 'aq', 'Solvent'].fix() model.fs.cstr.total_flow_balance.deactivate() var_partition, con_partition = categorize_dae_variables_and_constraints( model, dae_vars, dae_cons, time, input_vars=inputs, ) plant = DynamicBlock( model=model, time=time, measurements=measurements, category_dict={None: var_partition}, ) plant.construct() p_t0 = plant.time.first() c_t0 = controller.time.first() p_ts = plant.sample_points[1] c_ts = controller.sample_points[1] controller.set_sample_time(sample_time) plant.set_sample_time(sample_time) # We now perform the "RTO" calculation: Find the optimal steady state # to achieve the following setpoint setpoint = [ (controller.mod.fs.cstr.outlet.conc_mol[0, 'P'], 0.4), #(controller.mod.fs.cstr.outlet.conc_mol[0, 'S'], 0.01), (controller.mod.fs.cstr.outlet.conc_mol[0, 'S'], 0.1), (controller.mod.fs.cstr.control_volume.energy_holdup[0, 'aq'], 300), (controller.mod.fs.mixer.E_inlet.flow_vol[0], 0.1), (controller.mod.fs.mixer.S_inlet.flow_vol[0], 2.0), (controller.mod.fs.cstr.volume[0], 1.0), ] setpoint_weights = [ (controller.mod.fs.cstr.outlet.conc_mol[0, 'P'], 1.), (controller.mod.fs.cstr.outlet.conc_mol[0, 'S'], 1.), (controller.mod.fs.cstr.control_volume.energy_holdup[0, 'aq'], 1.), (controller.mod.fs.mixer.E_inlet.flow_vol[0], 1.), (controller.mod.fs.mixer.S_inlet.flow_vol[0], 1.), (controller.mod.fs.cstr.volume[0], 1.), ] # Some of the "differential variables" that have been fixed in the # model file are different from the measurements listed above. We # unfix them here so the RTO solve is not overconstrained. # (The RTO solve will only automatically unfix inputs and measurements.) controller.mod.fs.cstr.control_volume.material_holdup[0, ...].unfix() controller.mod.fs.cstr.control_volume.energy_holdup[0, ...].unfix() #controller.mod.fs.cstr.volume[0].unfix() controller.mod.fs.cstr.control_volume.material_holdup[0, 'aq', 'Solvent'].fix() controller.add_setpoint_objective(setpoint, setpoint_weights) controller.solve_setpoint(solver) # Now we are ready to construct the tracking NMPC problem tracking_weights = [ *((v, 1.) for v in controller.vectors.differential[:, 0]), *((v, 1.) for v in controller.vectors.input[:, 0]), ] controller.add_tracking_objective(tracking_weights) controller.constrain_control_inputs_piecewise_constant() controller.initialize_to_initial_conditions() # Solve the first control problem controller.vectors.input[...].unfix() controller.vectors.input[:, 0].fix() solver.solve(controller, tee=True) # For a proper NMPC simulation, we must have noise. # We do this by treating inputs and measurements as Gaussian random # variables with the following variances (and bounds). cstr = controller.mod.fs.cstr variance = [ (cstr.outlet.conc_mol[0.0, 'S'], 0.01), (cstr.outlet.conc_mol[0.0, 'E'], 0.005), (cstr.outlet.conc_mol[0.0, 'C'], 0.01), (cstr.outlet.conc_mol[0.0, 'P'], 0.005), (cstr.outlet.temperature[0.0], 1.), (cstr.volume[0.0], 0.05), ] controller.set_variance(variance) measurement_variance = [ v.variance for v in controller.MEASUREMENT_BLOCK[:].var ] measurement_noise_bounds = [(0.0, var[c_t0].ub) for var in controller.MEASUREMENT_BLOCK[:].var] mx = plant.mod.fs.mixer variance = [ (mx.S_inlet_state[0.0].flow_vol, 0.02), (mx.E_inlet_state[0.0].flow_vol, 0.001), ] plant.set_variance(variance) input_variance = [v.variance for v in plant.INPUT_BLOCK[:].var] input_noise_bounds = [(0.0, var[p_t0].ub) for var in plant.INPUT_BLOCK[:].var] random.seed(100) # Extract inputs from controller and inject them into plant inputs = controller.generate_inputs_at_time(c_ts) plant.inject_inputs(inputs) # This "initialization" really simulates the plant with the new inputs. plant.vectors.input[:, :].fix() plant.initialize_by_solving_elements(solver) plant.vectors.input[:, :].fix() solver.solve(plant, tee=True) for i in range(1, 11): print('\nENTERING NMPC LOOP ITERATION %s\n' % i) measured = plant.generate_measurements_at_time(p_ts) plant.advance_one_sample() plant.initialize_to_initial_conditions() measured = apply_noise_with_bounds( measured, measurement_variance, random.gauss, measurement_noise_bounds, ) controller.advance_one_sample() controller.load_measurements(measured) solver.solve(controller, tee=True) inputs = controller.generate_inputs_at_time(c_ts) inputs = apply_noise_with_bounds( inputs, input_variance, random.gauss, input_noise_bounds, ) plant.inject_inputs(inputs) plant.initialize_by_solving_elements(solver) solver.solve(plant) import pdb pdb.set_trace()
x = nlp.init_primals() y = nlp.init_duals() nlp.set_primals(x) nlp.set_duals(y) J = nlp.evaluate_jacobian() H = nlp.evaluate_hessian_lag() kkt = BlockSymMatrix(2) kkt[0, 0] = H kkt[1, 0] = J d_vars = [m.x2, m.x3] nd = len(d_vars) Ad = nlp.extract_submatrix_jacobian(pyomo_variables=d_vars, pyomo_constraints=[m.const1]) xd_indices = nlp.get_primal_indices(d_vars) b_vars = [m.x1] nb= len(b_vars) Ab = nlp.extract_submatrix_jacobian(pyomo_variables=b_vars, pyomo_constraints=[m.const1]) xb_indices = nlp.get_primal_indices(b_vars) # null space matrix Z = BlockMatrix(2,1) Z[0,0] = spsolve(-Ab.tocsc(), Ad.tocsc()) Z[1,0] = identity(nd) Z_sparse = Z.tocsr() print("Null space matrix:\n",Z.toarray()) # computing reduced hessian with null space matriz
def get_kkt_info(self): """Takes the model and uses PyNumero or k_aug to get the jacobian and Hessian information as dataframes. This is done in place and does not return anything. kkt_data (dict): dictionary with the following structure: { 'J': J, # Jacobian 'H': H, # Hessian 'var_ind': var_index_names, # Variable index 'con_ind': con_index_names, # Constraint index 'duals': duals, # Duals } :return: None """ self.get_file_info() if self.kkt_method == 'pynumero': nlp = PyomoNLP(self.model_object) varList = nlp.get_pyomo_variables() conList = nlp.get_pyomo_constraints() duals = nlp.get_duals() J = nlp.extract_submatrix_jacobian(pyomo_variables=varList, pyomo_constraints=conList) H = nlp.extract_submatrix_hessian_lag(pyomo_variables_rows=varList, pyomo_variables_cols=varList) J = csc_matrix(J) var_index_names = [v.name for v in varList] con_index_names = [v.name for v in conList] elif self.kkt_method == 'k_aug': kaug = SolverFactory('k_aug') kaug.options["print_kkt"] = "" kaug.solve(self.model_object, tee=True) kaug_files = Path('GJH') var_index_names = pd.read_csv(self.sol_files['col'], sep=';', header=None) # dummy sep con_index_names = pd.read_csv(self.sol_files['row'], sep=';', header=None) # dummy sep var_index_names = [var_name for var_name in var_index_names[0]] con_index_names = [ con_name for con_name in con_index_names[0].iloc[:-1] ] # con_index_number = {v: k for k, v in enumerate(con_index_names)} n = len(var_index_names) m = len(con_index_names) print(f'size: vars: {n}, cons {m}') hess_file = kaug_files.joinpath('H_print.txt') hess = pd.read_csv(hess_file, delim_whitespace=True, header=None, skipinitialspace=True) hess.columns = ['irow', 'jcol', 'vals'] hess.irow -= 1 hess.jcol -= 1 # os.unlink(f'{kaug_files}hess_debug.in') jac_file = kaug_files.joinpath('A_print.txt') jac = pd.read_csv(jac_file, delim_whitespace=True, header=None, skipinitialspace=True) jac.columns = ['irow', 'jcol', 'vals'] jac.irow -= 1 jac.jcol -= 1 # os.unlink(f'{kaug_files}jacobi_debug.in') # try: # duals = read_duals(stub + '.sol') # except: duals = None J = coo_matrix((jac.vals, (jac.jcol, jac.irow)), shape=(m, n)) Hess_coo = coo_matrix((hess.vals, (hess.irow, hess.jcol)), shape=(n, n)) H = Hess_coo + triu(Hess_coo, 1).T print('This sizes of H and J') print(H.shape) print(J.shape) self.delete_sol_files() self.kkt_data = { 'J': J, 'H': H, 'var_ind': var_index_names, 'con_ind': con_index_names, 'duals': duals, } return None
def main(): horizon = 600.0 ntfe = 40 #horizon = 30.0 #ntfe = 2 t1 = 0.0 ch4_cuid = "fs.MB.gas_inlet.mole_frac_comp[*,CH4]" co2_cuid = "fs.MB.gas_inlet.mole_frac_comp[*,CO2]" h2o_cuid = "fs.MB.gas_inlet.mole_frac_comp[*,H2O]" input_dict = { ch4_cuid: { (t1, horizon): 0.5 }, co2_cuid: { (t1, horizon): 0.5 }, h2o_cuid: { (t1, horizon): 0.0 }, } m, var_cat, con_cat = get_model_for_simulation(horizon, ntfe) time = m.fs.time load_inputs_into_model(m, time, input_dict) solver = pyo.SolverFactory("ipopt") solve_kwds = {"tee": True} res_list = initialize_by_time_element(m, time, solver=solver, solve_kwds=solve_kwds) res = solver.solve(m, **solve_kwds) msg = res if type(res) is str else res.solver.termination_condition print(horizon, ntfe, msg) m._obj = pyo.Objective(expr=0.0) nlp = PyomoNLP(m) igraph = IncidenceGraphInterface() # TODO: I should be able to do categorization in the pre-time-discretized # model. This is somewhat nicer as the time points are all independent # in that case. solid_enth_conds = [] gas_enth_conds = [] solid_dens_conds = [] gas_dens_conds = [] for t in time: var_set = ComponentSet(var[t] for var in var_cat[VC.ALGEBRAIC]) constraints = [con[t] for con in con_cat[CC.ALGEBRAIC] if t in con] variables = [ var for var in _generate_variables_in_constraints(constraints) if var in var_set ] assert len(variables) == len(constraints) alg_jac = nlp.extract_submatrix_jacobian(variables, constraints) N, M = alg_jac.shape assert N == M matching = igraph.maximum_matching(variables, constraints) assert len(matching) == N try_factorization(alg_jac) # Condition number of the entire algebraic Jacobian seems # inconsistent, so I don't calculate it. #cond = np.linalg.cond(alg_jac.toarray()) #cond = get_condition_number(alg_jac) var_blocks, con_blocks = igraph.get_diagonal_blocks( variables, constraints) block_matrices = [ nlp.extract_submatrix_jacobian(vars, cons) for vars, cons in zip(var_blocks, con_blocks) ] gas_enth_blocks = [ i for i, (vars, cons) in enumerate(zip(var_blocks, con_blocks)) if any("gas_phase" in var.name and "temperature" in var.name for var in vars) ] solid_enth_blocks = [ i for i, (vars, cons) in enumerate(zip(var_blocks, con_blocks)) if any("solid_phase" in var.name and "temperature" in var.name for var in vars) ] gas_dens_blocks = [ i for i, (vars, cons) in enumerate(zip(var_blocks, con_blocks)) if any("gas_phase" in con.name and "sum_component_eqn" in con.name for con in cons) ] solid_dens_blocks = [ i for i, (vars, cons) in enumerate(zip(var_blocks, con_blocks)) if any( "solid_phase" in con.name and "sum_component_eqn" in con.name for con in cons) ] gas_enth_cond = [ np.linalg.cond(block_matrices[i].toarray()) for i in gas_enth_blocks ] solid_enth_cond = [ np.linalg.cond(block_matrices[i].toarray()) for i in solid_enth_blocks ] gas_dens_cond = [ np.linalg.cond(block_matrices[i].toarray()) for i in gas_dens_blocks ] solid_dens_cond = [ np.linalg.cond(block_matrices[i].toarray()) for i in solid_dens_blocks ] max_gas_enth_cond = max(gas_enth_cond) max_solid_enth_cond = max(solid_enth_cond) max_gas_dens_cond = max(gas_dens_cond) max_solid_dens_cond = max(solid_dens_cond) gas_enth_conds.append(max_gas_enth_cond) solid_enth_conds.append(max_solid_enth_cond) gas_dens_conds.append(max_gas_dens_cond) solid_dens_conds.append(max_solid_dens_cond) # Plot condition numbers over time plt.rcParams.update({"font.size": 16}) fig = plt.figure() ax = fig.add_subplot() t_list = list(time) ax.plot(t_list, gas_enth_conds, label="Gas enth.", linewidth=3, linestyle="solid") ax.plot(t_list, solid_enth_conds, label="Solid enth.", linewidth=3, linestyle="dotted") ax.plot(t_list, gas_dens_conds, label="Gas dens.", linewidth=3, linestyle="dashed") ax.plot(t_list, solid_dens_conds, label="Solid dens.", linewidth=3, linestyle="dashdot") ax.set_yscale("log") ax.set_ylim(bottom=1.0, top=1e7) ax.set_xlabel("Time (s)") ax.set_ylabel("Condition number") fig.legend(loc="center right", bbox_to_anchor=(1.0, 0.65)) fig.tight_layout() fig.show() fig.savefig("condition_over_time.png", transparent=True) # Generate some structural results with the incidence matrix at a single # point in time. t = time.at(2) var_set = ComponentSet(var[t] for var in var_cat[VC.ALGEBRAIC]) constraints = [con[t] for con in con_cat[CC.ALGEBRAIC] if t in con] variables = [ var for var in _generate_variables_in_constraints(constraints) if var in var_set ] alg_jac = nlp.extract_submatrix_jacobian(variables, constraints) var_blocks, con_blocks = igraph.get_diagonal_blocks(variables, constraints) dim = len(constraints) n_blocks = len(var_blocks) print("Number of variables/constraints: %s" % dim) print("Number of diagonal blocks: %s" % n_blocks) block_polynomial_degrees = [ get_polynomial_degree_wrt(cons, vars) for cons, vars in zip(con_blocks, var_blocks) ] nonlinear_blocks = [ i for i, d in enumerate(block_polynomial_degrees) if d is None or d > 1 ] print("Number of nonlinear blocks: %s" % len(nonlinear_blocks)) print("\nNonlinear blocks:") for i in nonlinear_blocks: vars = var_blocks[i] cons = con_blocks[i] dim = len(vars) print(" Block %s, dim = %s" % (i, dim)) print(" Variables:") for var in vars: print(" %s" % var.name) print(" Constraints:") for con in cons: print(" %s" % con.name) ordered_variables = [var for vars in var_blocks for var in vars] ordered_constraints = [con for cons in con_blocks for con in cons] ordered_jacobian = nlp.extract_submatrix_jacobian(ordered_variables, ordered_constraints) plt.rcParams.update({"font.size": 18}) fig, ax = plot_spy( ordered_jacobian, markersize=3, ) ax.xaxis.set_tick_params(bottom=False) ax.xaxis.set_label_position("top") ax.set_xticks([0, 200, 400, 600]) ax.set_yticks([0, 200, 400, 600]) ax.set_xlabel("Column (variable) coordinates") ax.set_ylabel("Row (equation) coordinates") fig.tight_layout() fig.savefig("block_triangular_alg_jac.png", transparent=True) fig.show()