def test_degenerate_solid_phase_model(self): m = make_degenerate_solid_phase_model() variables = list(m.component_data_objects(pyo.Var)) constraints = list(m.component_data_objects(pyo.Constraint)) igraph = IncidenceGraphInterface() var_dmp, con_dmp = igraph.dulmage_mendelsohn(variables, constraints) underconstrained_vars = ComponentSet(m.flow_comp.values()) underconstrained_vars.add(m.flow) underconstrained_cons = ComponentSet(m.flow_eqn.values()) self.assertEqual(len(var_dmp[0] + var_dmp[1]), len(underconstrained_vars)) for var in var_dmp[0] + var_dmp[1]: self.assertIn(var, underconstrained_vars) self.assertEqual(len(con_dmp[2]), len(underconstrained_cons)) for con in con_dmp[2]: self.assertIn(con, underconstrained_cons) overconstrained_cons = ComponentSet(m.holdup_eqn.values()) overconstrained_cons.add(m.density_eqn) overconstrained_cons.add(m.sum_eqn) overconstrained_vars = ComponentSet(m.x.values()) overconstrained_vars.add(m.rho) self.assertEqual(len(var_dmp[2]), len(overconstrained_vars)) for var in var_dmp[2]: self.assertIn(var, overconstrained_vars) self.assertEqual(len(con_dmp[0] + con_dmp[1]), len(overconstrained_cons)) for con in con_dmp[0] + con_dmp[1]: self.assertIn(con, overconstrained_cons)
def test_perfect_matching(self): model = make_gas_expansion_model() igraph = IncidenceGraphInterface() # These are the variables and constraints of the square, # nonsingular subsystem variables = [] variables.extend(model.P.values()) variables.extend(model.T[i] for i in model.streams if i != model.streams.first()) variables.extend(model.rho[i] for i in model.streams if i != model.streams.first()) variables.extend(model.F[i] for i in model.streams if i != model.streams.first()) constraints = list(model.component_data_objects(pyo.Constraint)) n_var = len(variables) matching = igraph.maximum_matching(variables, constraints) values = ComponentSet(matching.values()) self.assertEqual(len(matching), n_var) self.assertEqual(len(values), n_var) # The subset of variables and equations we have identified # do not have a unique perfect matching. But we at least know # this much. self.assertIs(matching[model.ideal_gas[0]], model.P[0])
def test_imperfect_matching(self): model = make_gas_expansion_model() igraph = IncidenceGraphInterface(model) n_eqn = len(list(model.component_data_objects(pyo.Constraint))) matching = igraph.maximum_matching() values = ComponentSet(matching.values()) self.assertEqual(len(matching), n_eqn) self.assertEqual(len(values), n_eqn)
def test_reference(self): m = pyo.ConcreteModel() m.v1 = pyo.Var() m.ref = pyo.Reference(m.v1) m.c1 = pyo.Constraint(expr=m.v1 == 1.0) igraph = IncidenceGraphInterface(m) self.assertEqual(igraph.incidence_matrix.shape, (1, 1))
def test_diagonal_blocks_with_cached_maps(self): N = 5 model = make_gas_expansion_model(N) igraph = IncidenceGraphInterface() # These are the variables and constraints of the square, # nonsingular subsystem variables = [] variables.extend(model.P.values()) variables.extend(model.T[i] for i in model.streams if i != model.streams.first()) variables.extend(model.rho[i] for i in model.streams if i != model.streams.first()) variables.extend(model.F[i] for i in model.streams if i != model.streams.first()) constraints = list(model.component_data_objects(pyo.Constraint)) igraph.block_triangularize(variables, constraints) var_blocks, con_blocks = igraph.get_diagonal_blocks( variables, constraints) self.assertIsNot(igraph.row_block_map, None) self.assertIsNot(igraph.col_block_map, None) self.assertEqual(len(var_blocks), N + 1) self.assertEqual(len(con_blocks), N + 1) for i, (vars, cons) in enumerate(zip(var_blocks, con_blocks)): var_set = ComponentSet(vars) con_set = ComponentSet(cons) if i == 0: pred_var_set = ComponentSet([model.P[0]]) self.assertEqual(pred_var_set, var_set) pred_con_set = ComponentSet([model.ideal_gas[0]]) self.assertEqual(pred_con_set, con_set) else: pred_var_set = ComponentSet( [model.rho[i], model.T[i], model.P[i], model.F[i]]) pred_con_set = ComponentSet([ model.ideal_gas[i], model.expansion[i], model.mbal[i], model.ebal[i], ]) self.assertEqual(pred_var_set, var_set) self.assertEqual(pred_con_set, con_set)
def test_triangularize_submatrix(self): # This test exercises the extraction of a somewhat nontrivial # submatrix from a cached incidence matrix. N = 5 model = make_gas_expansion_model(N) igraph = IncidenceGraphInterface(model) # These are the variables and constraints of a square, # nonsingular subsystem variables = [] half = N // 2 variables.extend(model.P[i] for i in model.streams if i >= half) variables.extend(model.T[i] for i in model.streams if i > half) variables.extend(model.rho[i] for i in model.streams if i > half) variables.extend(model.F[i] for i in model.streams if i > half) constraints = [] constraints.extend(model.ideal_gas[i] for i in model.streams if i >= half) constraints.extend(model.expansion[i] for i in model.streams if i > half) constraints.extend(model.mbal[i] for i in model.streams if i > half) constraints.extend(model.ebal[i] for i in model.streams if i > half) var_block_map, con_block_map = igraph.block_triangularize( variables, constraints) var_values = set(var_block_map.values()) con_values = set(con_block_map.values()) self.assertEqual(len(var_values), (N - half) + 1) self.assertEqual(len(con_values), (N - half) + 1) self.assertEqual(var_block_map[model.P[half]], 0) for i in model.streams: if i > half: idx = i - half self.assertEqual(var_block_map[model.rho[i]], idx) self.assertEqual(var_block_map[model.T[i]], idx) self.assertEqual(var_block_map[model.P[i]], idx) self.assertEqual(var_block_map[model.F[i]], idx) self.assertEqual(con_block_map[model.ideal_gas[i]], idx) self.assertEqual(con_block_map[model.expansion[i]], idx) self.assertEqual(con_block_map[model.mbal[i]], idx) self.assertEqual(con_block_map[model.ebal[i]], idx)
def test_nlp_active_error(self): m = pyo.ConcreteModel() m.v1 = pyo.Var() m.c1 = pyo.Constraint(expr=m.v1 == 1.0) m.c2 = pyo.Constraint(expr=m.v1 == 2.0) m._obj = pyo.Objective(expr=0.0) nlp = PyomoNLP(m) with self.assertRaisesRegex(ValueError, "inactive constraints"): igraph = IncidenceGraphInterface(nlp, active=False)
def test_nlp_fixed_error(self): m = pyo.ConcreteModel() m.v1 = pyo.Var() m.v2 = pyo.Var() m.c1 = pyo.Constraint(expr=m.v1 + m.v2 == 1.0) m.v2.fix(2.0) m._obj = pyo.Objective(expr=0.0) nlp = PyomoNLP(m) with self.assertRaisesRegex(ValueError, "fixed variables"): igraph = IncidenceGraphInterface(nlp, include_fixed=True)
def test_triangularize(self): N = 5 model = make_gas_expansion_model(N) model.obj = pyo.Objective(expr=0) nlp = PyomoNLP(model) igraph = IncidenceGraphInterface(nlp) # These are the variables and constraints of the square, # nonsingular subsystem variables = [] variables.extend(model.P.values()) variables.extend(model.T[i] for i in model.streams if i != model.streams.first()) variables.extend(model.rho[i] for i in model.streams if i != model.streams.first()) variables.extend(model.F[i] for i in model.streams if i != model.streams.first()) constraints = list(model.component_data_objects(pyo.Constraint)) var_block_map, con_block_map = igraph.block_triangularize( variables, constraints) var_values = set(var_block_map.values()) con_values = set(con_block_map.values()) self.assertEqual(len(var_values), N + 1) self.assertEqual(len(con_values), N + 1) self.assertEqual(var_block_map[model.P[0]], 0) for i in model.streams: if i != model.streams.first(): self.assertEqual(var_block_map[model.rho[i]], i) self.assertEqual(var_block_map[model.T[i]], i) self.assertEqual(var_block_map[model.P[i]], i) self.assertEqual(var_block_map[model.F[i]], i) self.assertEqual(con_block_map[model.ideal_gas[i]], i) self.assertEqual(con_block_map[model.expansion[i]], i) self.assertEqual(con_block_map[model.mbal[i]], i) self.assertEqual(con_block_map[model.ebal[i]], i)
def test_remove(self): m = make_degenerate_solid_phase_model() variables = list(m.component_data_objects(pyo.Var)) constraints = list(m.component_data_objects(pyo.Constraint)) igraph = IncidenceGraphInterface(m) var_dmp, con_dmp = igraph.dulmage_mendelsohn() var_con_set = ComponentSet(igraph.variables + igraph.constraints) underconstrained_set = ComponentSet(var_dmp.unmatched + var_dmp.underconstrained) self.assertIn(m.flow_comp[1], var_con_set) self.assertIn(m.flow_eqn[1], var_con_set) self.assertIn(m.flow_comp[1], underconstrained_set) N, M = igraph.incidence_matrix.shape # flow_comp[1] is underconstrained, but we think it should be # specified by flow_eqn[1], so we remove these from the incidence # matrix. vars_to_remove = [m.flow_comp[1]] cons_to_remove = [m.flow_eqn[1]] igraph.remove_nodes(vars_to_remove + cons_to_remove) var_dmp, con_dmp = igraph.dulmage_mendelsohn() var_con_set = ComponentSet(igraph.variables + igraph.constraints) underconstrained_set = ComponentSet(var_dmp.unmatched + var_dmp.underconstrained) self.assertNotIn(m.flow_comp[1], var_con_set) self.assertNotIn(m.flow_eqn[1], var_con_set) self.assertNotIn(m.flow_comp[1], underconstrained_set) N_new, M_new = igraph.incidence_matrix.shape self.assertEqual(N_new, N - len(cons_to_remove)) self.assertEqual(M_new, M - len(vars_to_remove))
def test_exception(self): model = make_gas_expansion_model() igraph = IncidenceGraphInterface(model) with self.assertRaises(ValueError) as exc: variables = [model.P] constraints = [model.ideal_gas] igraph.maximum_matching(variables, constraints) self.assertIn('must be unindexed', str(exc.exception)) with self.assertRaises(ValueError) as exc: variables = [model.P] constraints = [model.ideal_gas] igraph.block_triangularize(variables, constraints) self.assertIn('must be unindexed', str(exc.exception))
def test_remove(self): model = make_gas_expansion_model() igraph = IncidenceGraphInterface(model) n_eqn = len(list(model.component_data_objects(pyo.Constraint))) matching = igraph.maximum_matching() values = ComponentSet(matching.values()) self.assertEqual(len(matching), n_eqn) self.assertEqual(len(values), n_eqn) variable_set = ComponentSet(igraph.variables) self.assertIn(model.F[0], variable_set) self.assertIn(model.F[2], variable_set) var_dmp, con_dmp = igraph.dulmage_mendelsohn() underconstrained_set = ComponentSet(var_dmp.unmatched + var_dmp.underconstrained) self.assertIn(model.F[0], underconstrained_set) self.assertIn(model.F[2], underconstrained_set) N, M = igraph.incidence_matrix.shape # Say we know that these variables and constraints should # be matched... vars_to_remove = [model.F[0], model.F[2]] cons_to_remove = (model.mbal[1], model.mbal[2]) igraph.remove_nodes(vars_to_remove, cons_to_remove) variable_set = ComponentSet(igraph.variables) self.assertNotIn(model.F[0], variable_set) self.assertNotIn(model.F[2], variable_set) var_dmp, con_dmp = igraph.dulmage_mendelsohn() underconstrained_set = ComponentSet(var_dmp.unmatched + var_dmp.underconstrained) self.assertNotIn(model.F[0], underconstrained_set) self.assertNotIn(model.F[2], underconstrained_set) N_new, M_new = igraph.incidence_matrix.shape self.assertEqual(N_new, N - len(cons_to_remove)) self.assertEqual(M_new, M - len(vars_to_remove))
def main(plot_switch=False): # This tests the same model constructed in the test_nmpc_constructor_1 file m_controller = make_model(horizon=3, ntfe=30, ntcp=2, bounds=True) sample_time = 0.5 m_plant = make_model(horizon=sample_time, ntfe=5, ntcp=2) time_plant = m_plant.fs.time solve_consistent_initial_conditions(m_plant, time_plant, solver) ##### # Flatten and categorize controller model ##### model = m_controller time = model.fs.time t0 = time.first() t1 = time[2] scalar_vars, dae_vars = flatten_dae_components( model, time, pyo.Var, ) scalar_cons, dae_cons = flatten_dae_components( model, time, pyo.Constraint, ) inputs = [ model.fs.mixer.S_inlet.flow_vol, model.fs.mixer.E_inlet.flow_vol, ] measurements = [ pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'C']), pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'E']), pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'S']), pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'P']), model.fs.cstr.outlet.temperature, ] model.fs.cstr.control_volume.material_holdup[:, 'aq', 'Solvent'].fix() model.fs.cstr.total_flow_balance.deactivate() var_partition, con_partition = categorize_dae_variables_and_constraints( model, dae_vars, dae_cons, time, input_vars=inputs, ) controller = ControllerBlock( model=model, time=time, measurements=measurements, category_dict={None: var_partition}, ) controller.construct() solve_consistent_initial_conditions(m_controller, time, solver) controller.initialize_to_initial_conditions() m_controller._dummy_obj = pyo.Objective(expr=0) nlp = PyomoNLP(m_controller) igraph = IncidenceGraphInterface(nlp) m_controller.del_component(m_controller._dummy_obj) diff_vars = [var[t1] for var in var_partition[VC.DIFFERENTIAL]] alg_vars = [var[t1] for var in var_partition[VC.ALGEBRAIC]] deriv_vars = [var[t1] for var in var_partition[VC.DERIVATIVE]] diff_eqns = [con[t1] for con in con_partition[CC.DIFFERENTIAL]] alg_eqns = [con[t1] for con in con_partition[CC.ALGEBRAIC]] # Assemble and factorize "derivative Jacobian" dfdz = nlp.extract_submatrix_jacobian(diff_vars, diff_eqns) dfdy = nlp.extract_submatrix_jacobian(alg_vars, diff_eqns) dgdz = nlp.extract_submatrix_jacobian(diff_vars, alg_eqns) dgdy = nlp.extract_submatrix_jacobian(alg_vars, alg_eqns) dfdzdot = nlp.extract_submatrix_jacobian(deriv_vars, diff_eqns) fact = sps.linalg.splu(dgdy.tocsc()) dydz = fact.solve(dgdz.toarray()) deriv_jac = dfdz - dfdy.dot(dydz) fact = sps.linalg.splu(dfdzdot.tocsc()) dzdotdz = -fact.solve(deriv_jac) # Use some heuristic on the eigenvalues of the derivative Jacobian # to identify fast states. w, V = np.linalg.eig(dzdotdz) w_max = np.max(np.abs(w)) fast_modes, = np.where(np.abs(w) > w_max / 2) fast_states = [] for idx in fast_modes: evec = V[:, idx] _fast_states, _ = np.where(np.abs(evec) > 0.5) fast_states.extend(_fast_states) fast_states = set(fast_states) # Store components necessary for model reduction in a model- # independent form. fast_state_derivs = [ pyo.ComponentUID(var_partition[VC.DERIVATIVE][idx].referent, context=model) for idx in fast_states ] fast_state_diffs = [ pyo.ComponentUID(var_partition[VC.DIFFERENTIAL][idx].referent, context=model) for idx in fast_states ] fast_state_discs = [ pyo.ComponentUID(con_partition[CC.DISCRETIZATION][idx].referent, context=model) for idx in fast_states ] # Perform pseudo-steady state model reduction on the fast states # and re-categorize for cuid in fast_state_derivs: var = cuid.find_component_on(m_controller) var.fix(0.0) for cuid in fast_state_diffs: var = cuid.find_component_on(m_controller) var[t0].unfix() for cuid in fast_state_discs: con = cuid.find_component_on(m_controller) con.deactivate() var_partition, con_partition = categorize_dae_variables_and_constraints( model, dae_vars, dae_cons, time, input_vars=inputs, ) controller.del_component(model) # Re-construct controller block with new categorization measurements = [ pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'C']), pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'E']), pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'S']), pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'P']), ] controller = ControllerBlock( model=model, time=time, measurements=measurements, category_dict={None: var_partition}, ) controller.construct() ##### # Construct dynamic block for plant ##### model = m_plant time = model.fs.time t0 = time.first() t1 = time[2] scalar_vars, dae_vars = flatten_dae_components( model, time, pyo.Var, ) scalar_cons, dae_cons = flatten_dae_components( model, time, pyo.Constraint, ) inputs = [ model.fs.mixer.S_inlet.flow_vol, model.fs.mixer.E_inlet.flow_vol, ] measurements = [ pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'C']), pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'E']), pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'S']), pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'P']), ] model.fs.cstr.control_volume.material_holdup[:, 'aq', 'Solvent'].fix() model.fs.cstr.total_flow_balance.deactivate() var_partition, con_partition = categorize_dae_variables_and_constraints( model, dae_vars, dae_cons, time, input_vars=inputs, ) plant = DynamicBlock( model=model, time=time, measurements=measurements, category_dict={None: var_partition}, ) plant.construct() p_t0 = plant.time.first() c_t0 = controller.time.first() p_ts = plant.sample_points[1] c_ts = controller.sample_points[1] controller.set_sample_time(sample_time) plant.set_sample_time(sample_time) # We now perform the "RTO" calculation: Find the optimal steady state # to achieve the following setpoint setpoint = [ (controller.mod.fs.cstr.outlet.conc_mol[0, 'P'], 0.4), #(controller.mod.fs.cstr.outlet.conc_mol[0, 'S'], 0.01), (controller.mod.fs.cstr.outlet.conc_mol[0, 'S'], 0.1), (controller.mod.fs.cstr.control_volume.energy_holdup[0, 'aq'], 300), (controller.mod.fs.mixer.E_inlet.flow_vol[0], 0.1), (controller.mod.fs.mixer.S_inlet.flow_vol[0], 2.0), (controller.mod.fs.cstr.volume[0], 1.0), ] setpoint_weights = [ (controller.mod.fs.cstr.outlet.conc_mol[0, 'P'], 1.), (controller.mod.fs.cstr.outlet.conc_mol[0, 'S'], 1.), (controller.mod.fs.cstr.control_volume.energy_holdup[0, 'aq'], 1.), (controller.mod.fs.mixer.E_inlet.flow_vol[0], 1.), (controller.mod.fs.mixer.S_inlet.flow_vol[0], 1.), (controller.mod.fs.cstr.volume[0], 1.), ] # Some of the "differential variables" that have been fixed in the # model file are different from the measurements listed above. We # unfix them here so the RTO solve is not overconstrained. # (The RTO solve will only automatically unfix inputs and measurements.) controller.mod.fs.cstr.control_volume.material_holdup[0, ...].unfix() controller.mod.fs.cstr.control_volume.energy_holdup[0, ...].unfix() #controller.mod.fs.cstr.volume[0].unfix() controller.mod.fs.cstr.control_volume.material_holdup[0, 'aq', 'Solvent'].fix() controller.add_setpoint_objective(setpoint, setpoint_weights) controller.solve_setpoint(solver) # Now we are ready to construct the tracking NMPC problem tracking_weights = [ *((v, 1.) for v in controller.vectors.differential[:, 0]), *((v, 1.) for v in controller.vectors.input[:, 0]), ] controller.add_tracking_objective(tracking_weights) controller.constrain_control_inputs_piecewise_constant() controller.initialize_to_initial_conditions() # Solve the first control problem controller.vectors.input[...].unfix() controller.vectors.input[:, 0].fix() solver.solve(controller, tee=True) # For a proper NMPC simulation, we must have noise. # We do this by treating inputs and measurements as Gaussian random # variables with the following variances (and bounds). cstr = controller.mod.fs.cstr variance = [ (cstr.outlet.conc_mol[0.0, 'S'], 0.01), (cstr.outlet.conc_mol[0.0, 'E'], 0.005), (cstr.outlet.conc_mol[0.0, 'C'], 0.01), (cstr.outlet.conc_mol[0.0, 'P'], 0.005), (cstr.outlet.temperature[0.0], 1.), (cstr.volume[0.0], 0.05), ] controller.set_variance(variance) measurement_variance = [ v.variance for v in controller.MEASUREMENT_BLOCK[:].var ] measurement_noise_bounds = [(0.0, var[c_t0].ub) for var in controller.MEASUREMENT_BLOCK[:].var] mx = plant.mod.fs.mixer variance = [ (mx.S_inlet_state[0.0].flow_vol, 0.02), (mx.E_inlet_state[0.0].flow_vol, 0.001), ] plant.set_variance(variance) input_variance = [v.variance for v in plant.INPUT_BLOCK[:].var] input_noise_bounds = [(0.0, var[p_t0].ub) for var in plant.INPUT_BLOCK[:].var] random.seed(100) # Extract inputs from controller and inject them into plant inputs = controller.generate_inputs_at_time(c_ts) plant.inject_inputs(inputs) # This "initialization" really simulates the plant with the new inputs. plant.vectors.input[:, :].fix() plant.initialize_by_solving_elements(solver) plant.vectors.input[:, :].fix() solver.solve(plant, tee=True) for i in range(1, 11): print('\nENTERING NMPC LOOP ITERATION %s\n' % i) measured = plant.generate_measurements_at_time(p_ts) plant.advance_one_sample() plant.initialize_to_initial_conditions() measured = apply_noise_with_bounds( measured, measurement_variance, random.gauss, measurement_noise_bounds, ) controller.advance_one_sample() controller.load_measurements(measured) solver.solve(controller, tee=True) inputs = controller.generate_inputs_at_time(c_ts) inputs = apply_noise_with_bounds( inputs, input_variance, random.gauss, input_noise_bounds, ) plant.inject_inputs(inputs) plant.initialize_by_solving_elements(solver) solver.solve(plant) import pdb pdb.set_trace()
def categorize_dae_variables_and_constraints( model, dae_vars, dae_cons, time, index=None, input_vars=None, disturbance_vars=None, input_cons=None, active_inequalities=None, ): # Index that we access when we need to work with a specific data # object. This would be less necessary if constructing CUIDs was # efficient, or if we could do the equivalent of `identify_variables` # in a templatized constraint. if index is not None: t1 = index else: # Use the first non-initial time point as a "representative # index." Don't use get_finite_elements so this will be valid # for general ordered sets. t1 = time.at(2) if input_vars is None: input_vars = [] if input_cons is None: input_cons = [] if disturbance_vars is None: disturbance_vars = [] if active_inequalities is None: active_inequalities = [] # We will check these sets to determine which components # are inputs and disturbances. # # NOTE: Specified input vars/cons and disturbance vars should be # in the form of components indexed only by time. The user can # accomplish this easily with the `Reference` function. # input_var_set = ComponentSet(inp[t1] for inp in input_vars) disturbance_var_set = ComponentSet(dist[t1] for dist in disturbance_vars) input_con_set = ComponentSet(inp[t1] for inp in input_cons) active_inequality_set = ComponentSet(con[t1] for con in active_inequalities) # Filter vars and cons for duplicates. # # Here we assume that if any two components refer to the same # data object at our "representative index" t1, they are # effectively "the same" components, and do not need to both # be included. # visited = set() filtered_vars = [] duplicate_vars = [] for var in dae_vars: _id = id(var[t1]) if _id not in visited: visited.add(_id) filtered_vars.append(var) else: duplicate_vars.append(var) filtered_cons = [] duplicate_cons = [] for con in dae_cons: _id = id(con[t1]) if _id not in visited: visited.add(_id) filtered_cons.append(con) else: duplicate_cons.append(con) dae_vars = filtered_vars dae_cons = filtered_cons # Filter out inputs and disturbances. These are "not variables" # for the sake of having a square DAE model. dae_vars = [ var for var in dae_vars if var[t1] not in input_var_set and var[t1] not in disturbance_var_set ] dae_cons = [ con for con in dae_cons if con[t1] not in input_con_set and ( con[t1].equality or con[t1] in active_inequality_set) ] dae_map = ComponentMap() dae_map.update((var[t1], var) for var in dae_vars) dae_map.update((con[t1], con) for con in dae_cons) diff_eqn_map = ComponentMap() for con in dae_cons: condata = con[t1] is_diff, deriv = _identify_derivative_if_differential(condata, time) if is_diff: diff_eqn_map[deriv] = condata potential_deriv = [] potential_diff_var = [] potential_disc = [] potential_diff_eqn = [] for var in dae_vars: vardata = var[t1] if vardata in diff_eqn_map: # This check ensures that vardata is differential wrt time # and participates in exactly one non-discretization equation. # This equation is that derivative's "differential equation." diff_vardata = _get_state_vardata(vardata) if diff_vardata in dae_map: # May not be the case if diff_var is an input... potential_diff_var.append(dae_map[diff_vardata]) potential_diff_eqn.append(dae_map[diff_eqn_map[vardata]]) potential_deriv.append(var) potential_disc.append(dae_map[_get_disc_eq(vardata)]) # PyNumero requires exactly one objective on the model. dummy_obj = False if len(list(model.component_objects(Objective, active=True))) == 0: dummy_obj = True model._temp_dummy_obj = Objective(expr=0) igraph = IncidenceGraphInterface() variables = [var[t1] for var in dae_vars] constraints = [con[t1] for con in dae_cons] present_cons = [con for con in constraints if con.active] active_var_set = ComponentSet( var for con in present_cons for var in identify_variables(con.expr, include_fixed=False)) present_vars = [var for var in variables if var in active_var_set] # Filter out fixed vars and inactive constraints. # We could do this check earlier (before constructing igraph) # by just checking var.fixed and con.active... #_present_vars = [var for var in variables if not var.fixed] #_present_cons = [con for con in constraints if con.active] #present_vars = [var for var in variables if var in _nlp._vardata_to_idx] #present_cons = [con for con in constraints if con in _nlp._condata_to_idx] var_block_map, con_block_map = igraph.block_triangularize( present_vars, present_cons, ) derivdatas = [] diff_vardatas = [] discdatas = [] diff_condatas = [] for deriv, disc, diff_var, diff_con in zip(potential_deriv, potential_disc, potential_diff_var, potential_diff_eqn): derivdata = deriv[t1] discdata = disc[t1] diff_vardata = diff_var[t1] diff_condata = diff_con[t1] # Check: if ( # a. Variables are actually used (not fixed), and # constraints are active derivdata in var_block_map and diff_vardata in var_block_map and discdata in con_block_map and diff_condata in con_block_map and # b. The diff var can be matched with the disc eqn and # the deriv var can be matched with the diff eqn. (var_block_map[diff_vardata] == con_block_map[discdata]) and (var_block_map[derivdata] == con_block_map[diff_condata])): # Under these conditions, assuming the Jacobian of the diff eqns # with respect to the derivatives is nonsingular, a sufficient # condition for nonsingularity (of the submodel with fixed inputs # at t1) is that the Jacobian of algebraic variables with respect # to algebraic equations is nonsingular. derivdatas.append(derivdata) diff_vardatas.append(diff_vardata) discdatas.append(discdata) diff_condatas.append(diff_condata) derivs = [dae_map[vardata] for vardata in derivdatas] diff_vars = [dae_map[vardata] for vardata in diff_vardatas] discs = [dae_map[condata] for condata in discdatas] diff_cons = [dae_map[condata] for condata in diff_condatas] not_alg_set = ComponentSet(derivdatas + diff_vardatas + discdatas + diff_condatas) alg_vars = [] unused_vars = [] for vardata in variables: var = dae_map[vardata] if vardata not in var_block_map: unused_vars.append(var) elif vardata not in not_alg_set: alg_vars.append(var) # else var is differential, derivative, input, or disturbance alg_cons = [] unused_cons = [] for condata in constraints: con = dae_map[condata] if condata not in con_block_map: unused_cons.append(con) elif condata not in not_alg_set: alg_cons.append(con) # else con is differential or discretization (or a constraint # on inputs) if dummy_obj: model.del_component(model._temp_dummy_obj) var_category_dict = { VC.INPUT: input_vars, VC.DIFFERENTIAL: diff_vars, VC.DERIVATIVE: derivs, VC.ALGEBRAIC: alg_vars, VC.DISTURBANCE: disturbance_vars, VC.UNUSED: unused_vars, } con_category_dict = { CC.INPUT: input_cons, CC.DIFFERENTIAL: diff_cons, CC.DISCRETIZATION: discs, CC.ALGEBRAIC: alg_cons, CC.UNUSED: unused_cons, } return var_category_dict, con_category_dict
def test_remove_no_matrix(self): m = pyo.ConcreteModel() m.v1 = pyo.Var() igraph = IncidenceGraphInterface() with self.assertRaisesRegex(RuntimeError, "no incidence matrix"): igraph.remove_nodes([m.v1])
def generate_strongly_connected_components( constraints, variables=None, include_fixed=False, ): """ Performs a block triangularization of the incidence matrix of the provided constraints and variables, and yields a block that contains the constraints and variables of each diagonal block (strongly connected component). Arguments --------- constraints: List of Pyomo constraint data objects Constraints used to generate strongly connected components. variables: List of Pyomo variable data objects Variables that may participate in strongly connected components. If not provided, all variables in the constraints will be used. include_fixed: Bool Indicates whether fixed variables will be included when identifying variables in constraints. Yields ------ Blocks containing the variables and constraints of every strongly connected component, in a topological order, as well as the "input variables" for that block """ if variables is None: var_set = ComponentSet() variables = [] for con in constraints: for var in identify_variables( con.expr, include_fixed=include_fixed, ): if var not in var_set: variables.append(var) var_set.add(var) assert len(variables) == len(constraints) igraph = IncidenceGraphInterface() var_block_map, con_block_map = igraph.block_triangularize( variables=variables, constraints=constraints, ) blocks = set(var_block_map.values()) n_blocks = len(blocks) var_blocks = [[] for b in range(n_blocks)] con_blocks = [[] for b in range(n_blocks)] for var, b in var_block_map.items(): var_blocks[b].append(var) for con, b in con_block_map.items(): con_blocks[b].append(con) subsets = list(zip(con_blocks, var_blocks)) for block, inputs in generate_subsystem_blocks( subsets, include_fixed=include_fixed, ): # TODO: How does len scale for reference-to-list? assert len(block.vars) == len(block.cons) yield (block, inputs)
def main(): horizon = 600.0 ntfe = 40 #horizon = 30.0 #ntfe = 2 t1 = 0.0 ch4_cuid = "fs.MB.gas_inlet.mole_frac_comp[*,CH4]" co2_cuid = "fs.MB.gas_inlet.mole_frac_comp[*,CO2]" h2o_cuid = "fs.MB.gas_inlet.mole_frac_comp[*,H2O]" input_dict = { ch4_cuid: { (t1, horizon): 0.5 }, co2_cuid: { (t1, horizon): 0.5 }, h2o_cuid: { (t1, horizon): 0.0 }, } m, var_cat, con_cat = get_model_for_simulation(horizon, ntfe) time = m.fs.time load_inputs_into_model(m, time, input_dict) solver = pyo.SolverFactory("ipopt") solve_kwds = {"tee": True} res_list = initialize_by_time_element(m, time, solver=solver, solve_kwds=solve_kwds) res = solver.solve(m, **solve_kwds) msg = res if type(res) is str else res.solver.termination_condition print(horizon, ntfe, msg) m._obj = pyo.Objective(expr=0.0) nlp = PyomoNLP(m) igraph = IncidenceGraphInterface() # TODO: I should be able to do categorization in the pre-time-discretized # model. This is somewhat nicer as the time points are all independent # in that case. solid_enth_conds = [] gas_enth_conds = [] solid_dens_conds = [] gas_dens_conds = [] for t in time: var_set = ComponentSet(var[t] for var in var_cat[VC.ALGEBRAIC]) constraints = [con[t] for con in con_cat[CC.ALGEBRAIC] if t in con] variables = [ var for var in _generate_variables_in_constraints(constraints) if var in var_set ] assert len(variables) == len(constraints) alg_jac = nlp.extract_submatrix_jacobian(variables, constraints) N, M = alg_jac.shape assert N == M matching = igraph.maximum_matching(variables, constraints) assert len(matching) == N try_factorization(alg_jac) # Condition number of the entire algebraic Jacobian seems # inconsistent, so I don't calculate it. #cond = np.linalg.cond(alg_jac.toarray()) #cond = get_condition_number(alg_jac) var_blocks, con_blocks = igraph.get_diagonal_blocks( variables, constraints) block_matrices = [ nlp.extract_submatrix_jacobian(vars, cons) for vars, cons in zip(var_blocks, con_blocks) ] gas_enth_blocks = [ i for i, (vars, cons) in enumerate(zip(var_blocks, con_blocks)) if any("gas_phase" in var.name and "temperature" in var.name for var in vars) ] solid_enth_blocks = [ i for i, (vars, cons) in enumerate(zip(var_blocks, con_blocks)) if any("solid_phase" in var.name and "temperature" in var.name for var in vars) ] gas_dens_blocks = [ i for i, (vars, cons) in enumerate(zip(var_blocks, con_blocks)) if any("gas_phase" in con.name and "sum_component_eqn" in con.name for con in cons) ] solid_dens_blocks = [ i for i, (vars, cons) in enumerate(zip(var_blocks, con_blocks)) if any( "solid_phase" in con.name and "sum_component_eqn" in con.name for con in cons) ] gas_enth_cond = [ np.linalg.cond(block_matrices[i].toarray()) for i in gas_enth_blocks ] solid_enth_cond = [ np.linalg.cond(block_matrices[i].toarray()) for i in solid_enth_blocks ] gas_dens_cond = [ np.linalg.cond(block_matrices[i].toarray()) for i in gas_dens_blocks ] solid_dens_cond = [ np.linalg.cond(block_matrices[i].toarray()) for i in solid_dens_blocks ] max_gas_enth_cond = max(gas_enth_cond) max_solid_enth_cond = max(solid_enth_cond) max_gas_dens_cond = max(gas_dens_cond) max_solid_dens_cond = max(solid_dens_cond) gas_enth_conds.append(max_gas_enth_cond) solid_enth_conds.append(max_solid_enth_cond) gas_dens_conds.append(max_gas_dens_cond) solid_dens_conds.append(max_solid_dens_cond) # Plot condition numbers over time plt.rcParams.update({"font.size": 16}) fig = plt.figure() ax = fig.add_subplot() t_list = list(time) ax.plot(t_list, gas_enth_conds, label="Gas enth.", linewidth=3, linestyle="solid") ax.plot(t_list, solid_enth_conds, label="Solid enth.", linewidth=3, linestyle="dotted") ax.plot(t_list, gas_dens_conds, label="Gas dens.", linewidth=3, linestyle="dashed") ax.plot(t_list, solid_dens_conds, label="Solid dens.", linewidth=3, linestyle="dashdot") ax.set_yscale("log") ax.set_ylim(bottom=1.0, top=1e7) ax.set_xlabel("Time (s)") ax.set_ylabel("Condition number") fig.legend(loc="center right", bbox_to_anchor=(1.0, 0.65)) fig.tight_layout() fig.show() fig.savefig("condition_over_time.png", transparent=True) # Generate some structural results with the incidence matrix at a single # point in time. t = time.at(2) var_set = ComponentSet(var[t] for var in var_cat[VC.ALGEBRAIC]) constraints = [con[t] for con in con_cat[CC.ALGEBRAIC] if t in con] variables = [ var for var in _generate_variables_in_constraints(constraints) if var in var_set ] alg_jac = nlp.extract_submatrix_jacobian(variables, constraints) var_blocks, con_blocks = igraph.get_diagonal_blocks(variables, constraints) dim = len(constraints) n_blocks = len(var_blocks) print("Number of variables/constraints: %s" % dim) print("Number of diagonal blocks: %s" % n_blocks) block_polynomial_degrees = [ get_polynomial_degree_wrt(cons, vars) for cons, vars in zip(con_blocks, var_blocks) ] nonlinear_blocks = [ i for i, d in enumerate(block_polynomial_degrees) if d is None or d > 1 ] print("Number of nonlinear blocks: %s" % len(nonlinear_blocks)) print("\nNonlinear blocks:") for i in nonlinear_blocks: vars = var_blocks[i] cons = con_blocks[i] dim = len(vars) print(" Block %s, dim = %s" % (i, dim)) print(" Variables:") for var in vars: print(" %s" % var.name) print(" Constraints:") for con in cons: print(" %s" % con.name) ordered_variables = [var for vars in var_blocks for var in vars] ordered_constraints = [con for cons in con_blocks for con in cons] ordered_jacobian = nlp.extract_submatrix_jacobian(ordered_variables, ordered_constraints) plt.rcParams.update({"font.size": 18}) fig, ax = plot_spy( ordered_jacobian, markersize=3, ) ax.xaxis.set_tick_params(bottom=False) ax.xaxis.set_label_position("top") ax.set_xticks([0, 200, 400, 600]) ax.set_yticks([0, 200, 400, 600]) ax.set_xlabel("Column (variable) coordinates") ax.set_ylabel("Row (equation) coordinates") fig.tight_layout() fig.savefig("block_triangular_alg_jac.png", transparent=True) fig.show()