def _repns_match(repn, test_repn): if not len(repn.linear_vars) == len(test_repn.linear_vars): return False coef_map = ComponentMap((var, coef) for var, coef in zip(repn.linear_vars, repn.linear_coefs)) for var, coef in zip(test_repn.linear_vars, test_repn.linear_coefs): if not coef_map.get(var, 0) == coef: return False return True
def test_row_ordering(self): model = ConcreteModel() model.a = Var() components = {} components["obj"] = Objective(expr=model.a) components["con1"] = Constraint(expr=model.a >= 0) components["con2"] = Constraint(expr=model.a <= 1) components["con3"] = Constraint(expr=(0, model.a, 1)) components["con4"] = Constraint([1, 2], rule=lambda m, i: model.a == i) # add components in random order random_order = list(components.keys()) random.shuffle(random_order) for key in random_order: model.add_component(key, components[key]) # reverse the symbol and index order row_order = ComponentMap() row_order[model.con1] = 100 row_order[model.con2] = 2 row_order[model.con3] = 1 row_order[model.con4[1]] = 0 row_order[model.con4[2]] = -1 self._check_baseline(model, row_order=row_order)
def _replace_parameters_in_constraints(self, variableSubMap): instance = self.model_instance block = self.block # Visitor that we will use to replace user-provided parameters # in the objective and the constraints. param_replacer = ExpressionReplacementVisitor( substitute=variableSubMap, remove_named_expressions=True, ) # TODO: Flag to ExpressionReplacementVisitor to only replace # named expressions if a node has been replaced within that # expression. new_old_comp_map = ComponentMap() # clone Objective, add to Block, and update any Expressions for obj in list(instance.component_data_objects(Objective, active=True, descend_into=True)): tempName = unique_component_name(block, obj.local_name) new_expr = param_replacer.dfs_postorder_stack(obj.expr) block.add_component(tempName, Objective(expr=new_expr)) new_old_comp_map[block.component(tempName)] = obj obj.deactivate() # clone Constraints, add to Block, and update any Expressions # # Unfortunate that this deactivates and replaces constraints # even if they don't contain the parameters. # old_con_list = list(instance.component_data_objects(Constraint, active=True, descend_into=True)) last_idx = 0 for con in old_con_list: if (con.equality or con.lower is None or con.upper is None): new_expr = param_replacer.dfs_postorder_stack(con.expr) block.constList.add(expr=new_expr) last_idx += 1 new_old_comp_map[block.constList[last_idx]] = con else: # Constraint must be a ranged inequality, break into # separate constraints new_body = param_replacer.dfs_postorder_stack(con.body) new_lower = param_replacer.dfs_postorder_stack(con.lower) new_upper = param_replacer.dfs_postorder_stack(con.upper) # Add constraint for lower bound block.constList.add(expr=(new_lower <= new_body)) last_idx += 1 new_old_comp_map[block.constList[last_idx]] = con # Add constraint for upper bound block.constList.add(expr=(new_body <= new_upper)) last_idx += 1 new_old_comp_map[block.constList[last_idx]] = con con.deactivate() return new_old_comp_map
def test_column_ordering_linear(self): model = ConcreteModel() model.a = Var() model.b = Var() model.c = Var() terms = [model.a, model.b, model.c] model.obj = Objective(expr=self._gen_expression(terms)) model.con = Constraint(expr=self._gen_expression(terms) <= 1) # reverse the symbolic ordering column_order = ComponentMap() column_order[model.a] = 2 column_order[model.b] = 1 column_order[model.c] = 0 self._check_baseline(model, column_order=column_order)
def test_cnf(self): m = ConcreteModel() m.Y1 = BooleanVar() m.Y2 = BooleanVar() implication = implies(m.Y1, m.Y2) x = to_cnf(implication)[0] _check_equivalent(self, implication, x) atleast_expr = atleast(1, m.Y1, m.Y2) x = to_cnf(atleast_expr)[0] self.assertIs(atleast_expr, x) # should be no change nestedatleast = implies(m.Y1, atleast_expr) m.extraY = BooleanVarList() indicator_map = ComponentMap() x = to_cnf(nestedatleast, m.extraY, indicator_map) self.assertEqual(str(x[0]), "extraY[1] ∨ ~Y1") self.assertIs(indicator_map[m.extraY[1]], atleast_expr)
def sipopt(instance, paramSubList, perturbList, cloneModel=True, streamSoln=False, keepfiles=False): """This function accepts a Pyomo ConcreteModel, a list of parameters, along with their corresponding perterbation list. The model is then converted into the design structure required to call sipopt to get an approximation perturbed solution with updated bounds on the decision variable. Parameters ---------- instance: ConcreteModel pyomo model object paramSubList: list list of mutable parameters perturbList: list list of perturbed parameter values cloneModel: bool, optional indicator to clone the model. If set to False, the original model will be altered streamSoln: bool, optional indicator to stream IPOPT solution keepfiles: bool, optional preserve solver interface files Returns ------- model: ConcreteModel The model modified for use with sipopt. The returned model has three :class:`Suffix` members defined: - ``model.sol_state_1``: the approximated results at the perturbation point - ``model.sol_state_1_z_L``: the updated lower bound - ``model.sol_state_1_z_U``: the updated upper bound Raises ------ ValueError perturbList argument is expecting a List of Params ValueError length(paramSubList) must equal length(perturbList) ValueError paramSubList will not map to perturbList """ #Verify User Inputs if len(paramSubList)!=len(perturbList): raise ValueError("Length of paramSubList argument does not equal " "length of perturbList") for pp in paramSubList: if pp.ctype is not Param: raise ValueError("paramSubList argument is expecting a list of Params") for pp in paramSubList: if not pp._mutable: raise ValueError("parameters within paramSubList must be mutable") for pp in perturbList: if pp.ctype is not Param: raise ValueError("perturbList argument is expecting a list of Params") #Add model block to compartmentalize all sipopt data b=Block() block_name = unique_component_name(instance, '_sipopt_data') instance.add_component(block_name, b) #Based on user input clone model or use orignal model for anlaysis if cloneModel: b.tmp_lists = (paramSubList, perturbList) m = instance.clone() instance.del_component(block_name) b = getattr(m, block_name) paramSubList, perturbList = b.tmp_lists del b.tmp_lists else: m = instance #Generate component maps for associating Variables to perturbations varSubList = [] for parameter in paramSubList: tempName = unique_component_name(b,parameter.local_name) b.add_component(tempName,Var(parameter.index_set())) myVar = b.component(tempName) varSubList.append(myVar) #Note: substitutions are not currently compatible with # ComponentMap [ECSA 2018/11/23], this relates to Issue #755 paramCompMap = ComponentMap(zip(paramSubList, varSubList)) variableSubMap = {} #variableSubMap = ComponentMap() paramPerturbMap = ComponentMap(zip(paramSubList,perturbList)) perturbSubMap = {} #perturbSubMap = ComponentMap() paramDataList = [] for parameter in paramSubList: # Loop over each ParamData in the Param Component # # Note: Sets are unordered in Pyomo. For this to be # deterministic, we need to sort the index (otherwise, the # ordering of things in the paramDataList may change). We use # sorted_robust to guard against mixed-type Sets in Python 3.x for kk in sorted_robust(parameter): variableSubMap[id(parameter[kk])]=paramCompMap[parameter][kk] perturbSubMap[id(parameter[kk])]=paramPerturbMap[parameter][kk] paramDataList.append(parameter[kk]) #clone Objective, add to Block, and update any Expressions for cc in list(m.component_data_objects(Objective, active=True, descend_into=True)): tempName=unique_component_name(m,cc.local_name) b.add_component(tempName, Objective(expr=ExpressionReplacementVisitor( substitute=variableSubMap, remove_named_expressions=True).dfs_postorder_stack(cc.expr))) cc.deactivate() #clone Constraints, add to Block, and update any Expressions b.constList = ConstraintList() for cc in list(m.component_data_objects(Constraint, active=True, descend_into=True)): if cc.equality: b.constList.add(expr= ExpressionReplacementVisitor( substitute=variableSubMap, remove_named_expressions=True).dfs_postorder_stack(cc.expr)) else: if cc.lower is None or cc.upper is None: b.constList.add(expr=ExpressionReplacementVisitor( substitute=variableSubMap, remove_named_expressions=True).dfs_postorder_stack(cc.expr)) else: # Constraint must be a ranged inequality, break into separate constraints # Add constraint for lower bound b.constList.add(expr=ExpressionReplacementVisitor( substitute=variableSubMap, remove_named_expressions=True).dfs_postorder_stack( cc.lower) <= ExpressionReplacementVisitor( substitute=variableSubMap, remove_named_expressions= True).dfs_postorder_stack(cc.body) ) # Add constraint for upper bound b.constList.add(expr=ExpressionReplacementVisitor( substitute=variableSubMap, remove_named_expressions=True).dfs_postorder_stack( cc.upper) >= ExpressionReplacementVisitor( substitute=variableSubMap, remove_named_expressions= True).dfs_postorder_stack(cc.body) ) cc.deactivate() #paramData to varData constraint list b.paramConst = ConstraintList() for ii in paramDataList: jj=variableSubMap[id(ii)] b.paramConst.add(ii==jj) #Create the ipopt_sens (aka sIPOPT) solver plugin using the ASL interface opt = SolverFactory('ipopt_sens', solver_io='nl') if not opt.available(False): raise ImportError('ipopt_sens is not available') #Declare Suffixes m.sens_state_0 = Suffix(direction=Suffix.EXPORT) m.sens_state_1 = Suffix(direction=Suffix.EXPORT) m.sens_state_value_1 = Suffix(direction=Suffix.EXPORT) m.sens_init_constr = Suffix(direction=Suffix.EXPORT) m.sens_sol_state_1 = Suffix(direction=Suffix.IMPORT) m.sens_sol_state_1_z_L = Suffix(direction=Suffix.IMPORT) m.sens_sol_state_1_z_U = Suffix(direction=Suffix.IMPORT) #set sIPOPT data opt.options['run_sens'] = 'yes' # for reasons that are not entirely clear, # ipopt_sens requires the indices to start at 1 kk=1 for ii in paramDataList: m.sens_state_0[variableSubMap[id(ii)]] = kk m.sens_state_1[variableSubMap[id(ii)]] = kk m.sens_state_value_1[variableSubMap[id(ii)]] = \ value(perturbSubMap[id(ii)]) m.sens_init_constr[b.paramConst[kk]] = kk kk += 1 #Send the model to the ipopt_sens and collect the solution results = opt.solve(m, keepfiles=keepfiles, tee=streamSoln) return m
def to_matrix_form(model): """ Converts a concrete Pyomo model with a linear objective and linear constraints into matrix form. Args: model: A concrete Pyomo model. Returns: Objects that define the following LP representation: min(max) c0 + c^T x s.t. bL <= Ax <= bU xL <= x <= xU where, c0: scalar representing the aggregation of all constants found in the objective expression c: nvars-length list of objective coefficients. bL: ncons-length list of constraint upper bounds bU: ncons-length list of constraint lower bounds A: 3-tuple consisting of list objects (data, indices, indptr) defining a sparse matrix in Compressed Sparse Row format xL: nvars-length list of variable lower bounds xU: nvars-length list of variable upper bounds In addition, the following mapping objects are returned: vartocol: maps model variable objects to their integer column index in the A matrix. E.g., vartocol[model.x[5]] # -> 0 contorow: maps model constraint objects to their integer row index in the A matrix. E.g., contorow[model.c] # -> 19 All variable and constraint bound vectors will contain values of float('-inf') and float('inf') where the corresponding bound does not exist. """ # Assign each variable a deterministic symbol (an index # in a list) so that we can guarantee the same matrix # ordering for a given Pyomo model. We can not assign a # column index until after collecting the list of # variables that are actually used. sortOrder = (SortComponents.indices | SortComponents.alphabetical) all_blocks = [ _b for _b in model.block_data_objects(active=True, sort=sortOrder) ] VarSymbolToVarObject = [] for block in all_blocks: VarSymbolToVarObject.extend( block.component_data_objects(Var, sort=sortOrder, descend_into=False)) VarIDToVarSymbol = \ dict((id(var), index) for index, var in enumerate(VarSymbolToVarObject)) # Loop over objective and constraints to generate the # cost vector and matrix rows. Raise an exception if any # nonlinear expressions are encountered. negative_infinity = float('-inf') positive_infinity = float('inf') nobjs = 0 referenced_var_symbols = set() A_indptr = [0] A_indices = [] A_data = [] bL = [] bU = [] c_sparse = {} c0 = 0.0 RowIndexToConstraintObject = [] for block in all_blocks: for objective in block.component_data_objects(Objective, active=True, sort=sortOrder, descend_into=False): nobjs += 1 if nobjs > 1: raise ValueError("This function does not support " "multiple objectives") polynomial_degree = \ objective.expr.polynomial_degree() if (polynomial_degree != 0) and \ (polynomial_degree != 1): raise ValueError("This function does not support " "nonlinear objectives") canonical_repn = \ generate_canonical_repn(objective.expr) variables = canonical_repn.variables coefficients = canonical_repn.linear if variables is not None: for var, coef in zip(variables, coefficients): var_symbol = VarIDToVarSymbol[id(var)] c_sparse[var_symbol] = coef referenced_var_symbols.add(var_symbol) if canonical_repn.constant is not None: c0 = value(canonical_repn.constant) for sosconstraint in block.component_data_objects(SOSConstraint, active=True, sort=sortOrder, descend_into=False): raise ValueError("This function does not " "support SOSConstraints") for constraint in block.component_data_objects(Constraint, active=True, sort=sortOrder, descend_into=False): polynomial_degree = \ constraint.body.polynomial_degree() if (polynomial_degree != 0) and \ (polynomial_degree != 1): raise ValueError("This function does not support " "nonlinear constraints") RowIndexToConstraintObject.append(constraint) canonical_repn = \ generate_canonical_repn(constraint.body) variables = canonical_repn.variables coefficients = canonical_repn.linear row_variable_symbols = [] row_coefficients = [] if variables is not None: row_variable_symbols = \ [VarIDToVarSymbol[id(var)] for var in variables] referenced_var_symbols.\ update(row_variable_symbols) row_coefficients = coefficients A_indptr.append(A_indptr[-1] + len(row_variable_symbols)) A_indices.extend(row_variable_symbols) A_data.extend(row_coefficients) L = negative_infinity U = positive_infinity constant = 0.0 if constraint.lower is not None: L = value(constraint.lower) if constraint.upper is not None: U = value(constraint.upper) if canonical_repn.constant is not None: constant = value(canonical_repn.constant) bL.append(L - constant) bU.append(U - constant) ncols = len(referenced_var_symbols) # Assign a column index to the set of referenced # variables ColumnIndexToVarSymbol = sorted(referenced_var_symbols) VarSymbolToColumnIndex = \ dict((symbol, col) for col, symbol in enumerate(ColumnIndexToVarSymbol)) A_indices = [VarSymbolToColumnIndex[symbol] for symbol in A_indices] ColumnIndexToVarObject = \ [VarSymbolToVarObject[var_symbol] for var_symbol in ColumnIndexToVarSymbol] # Convert the sparse cost vector into a dense list based # on the variable column id assignments. c = [0.0 for j in range(ncols)] for var_symbol, coef in c_sparse.items(): c[VarSymbolToColumnIndex[var_symbol]] = coef # Generate dense xL and xU variable bound lists based on # the variable column id assignments xL = [negative_infinity for j in range(ncols)] xU = [positive_infinity for j in range(ncols)] for j, var in enumerate(ColumnIndexToVarObject): if var.lb is not None: xL[j] = value(var.lb) if var.ub is not None: xU[j] = value(var.ub) # Generate the component maps that allow one to recover # the row/column index from a constraint/variable # object. The reverse maps are easy enough to generate # from these two maps if needed. vartocol = ComponentMap( (var, j) for j, var in enumerate(ColumnIndexToVarObject)) contorow = ComponentMap( (con, i) for i, con in enumerate(RowIndexToConstraintObject)) return (c0, c, bL, bU, (A_data, A_indices, A_indptr), xL, xU, vartocol, contorow)