def preprocess_block_objectives(block, idMap=None): # Get/Create the ComponentMap for the canonical_repn if not hasattr(block, '_canonical_repn'): block._canonical_repn = ComponentMap() block_canonical_repn = block._canonical_repn for objective_data in block.component_data_objects(Objective, active=True, descend_into=False): if objective_data.expr is None: raise ValueError("No expression has been defined for objective %s" % (objective_data.name)) try: objective_data_repn = generate_canonical_repn(objective_data.expr, idMap=idMap) except Exception: err = sys.exc_info()[1] logging.getLogger('pyomo.core').error( "exception generating a canonical representation for objective %s: %s" % (objective_data.name, str(err))) raise block_canonical_repn[objective_data] = objective_data_repn
def preprocess_block_objectives(block, idMap=None): # Get/Create the ComponentMap for the canonical_repn if not hasattr(block, '_canonical_repn'): block._canonical_repn = ComponentMap() block_canonical_repn = block._canonical_repn for objective_data in block.component_data_objects(Objective, active=True, descend_into=False): if objective_data.expr is None: raise ValueError("No expression has been defined for objective %s" % (objective_data.cname(True))) try: objective_data_repn = generate_canonical_repn(objective_data.expr, idMap=idMap) except Exception: err = sys.exc_info()[1] logging.getLogger('pyomo.core').error( "exception generating a canonical representation for objective %s: %s" % (objective_data.cname(True), str(err))) raise block_canonical_repn[objective_data] = objective_data_repn
def constraint_generator(): for block in all_blocks: gen_con_canonical_repn = \ getattr(block, "_gen_con_canonical_repn", True) # Get/Create the ComponentMap for the repn if not hasattr(block,'_canonical_repn'): block._canonical_repn = ComponentMap() block_canonical_repn = block._canonical_repn for constraint_data in block.component_data_objects( Constraint, active=True, sort=sortOrder, descend_into=False): if isinstance(constraint_data, LinearCanonicalRepn): canonical_repn = constraint_data else: if gen_con_canonical_repn: canonical_repn = generate_canonical_repn( constraint_data.body) block_canonical_repn[constraint_data] = canonical_repn else: canonical_repn = block_canonical_repn[constraint_data] yield constraint_data, canonical_repn
def evaluate(expr, seconds): gc.collect() _clear_expression_pool() start = time.time() # expr_ = expr.clone() # stop = time.time() seconds['clone'] = stop - start gc.collect() _clear_expression_pool() start = time.time() # d_ = expr.polynomial_degree() # stop = time.time() seconds['polynomial_degree'] = stop - start if False: gc.collect() _clear_expression_pool() start = time.time() # s_ = expr.to_string() # stop = time.time() seconds['to_string'] = stop - start gc.collect() _clear_expression_pool() start = time.time() # s_ = expr.is_constant() # stop = time.time() seconds['is_constant'] = stop - start gc.collect() _clear_expression_pool() start = time.time() # s_ = expr.is_fixed() # stop = time.time() seconds['is_fixed'] = stop - start try: gc.collect() _clear_expression_pool() start = time.time() # r_ = generate_canonical_repn(expr) # stop = time.time() seconds['generate_canonical'] = stop - start except: seconds['generate_canonical'] = -1 return seconds
def constraint_generator(): for block in all_blocks: gen_con_canonical_repn = \ getattr(block, "_gen_con_canonical_repn", True) # Get/Create the ComponentMap for the repn if not hasattr(block, '_canonical_repn'): block._canonical_repn = ComponentMap() block_canonical_repn = block._canonical_repn for constraint_data in block.component_data_objects( Constraint, active=True, sort=sortOrder, descend_into=False): if isinstance(constraint_data, LinearCanonicalRepn): canonical_repn = constraint_data else: if gen_con_canonical_repn: canonical_repn = generate_canonical_repn( constraint_data.body) block_canonical_repn[ constraint_data] = canonical_repn else: canonical_repn = block_canonical_repn[ constraint_data] yield constraint_data, canonical_repn
def constraint_generator(): for block in all_blocks: gen_con_canonical_repn = \ getattr(block, "_gen_con_canonical_repn", True) # Get/Create the ComponentMap for the repn if not hasattr(block,'_canonical_repn'): block._canonical_repn = ComponentMap() block_canonical_repn = block._canonical_repn for constraint_data in block.component_data_objects( Constraint, active=True, sort=sortOrder, descend_into=False): if (not constraint_data.has_lb()) and \ (not constraint_data.has_ub()): assert not constraint_data.equality continue # non-binding, so skip if constraint_data._linear_canonical_form: canonical_repn = constraint_data.canonical_form() elif isinstance(constraint_data, LinearCanonicalRepn): canonical_repn = constraint_data else: if gen_con_canonical_repn: canonical_repn = generate_canonical_repn(constraint_data.body) block_canonical_repn[constraint_data] = canonical_repn else: canonical_repn = block_canonical_repn[constraint_data] yield constraint_data, canonical_repn
def coef_via_pyomo(CostExpression): canonical_repn = generate_canonical_repn(CostExpression.expr) cost_coefficients = {} var_names = {} for (index, variable) in enumerate(canonical_repn.variables): variable_id = symbol_map.getSymbol(variable) cost_coefficients[variable_id] = canonical_repn.linear[index] var_names[variable_id] = variable.name return (cost_coefficients, var_names)
def return_c_vector(block, unfixed): # Note that this function is adapted function collect_linear_terms defined # in pyomo/repn/collect.py. from pyutilib.misc import Bunch from pyomo.core.base import Var, Constraint, Objective, maximize, minimize from pyomo.repn import generate_canonical_repn # # Variables are constraints of block # Constraints are unfixed variables of block and the parent model. # vnames = set() for (name, data) in block.component_map(Constraint, active=True).items(): vnames.add((name, data.is_indexed())) cnames = set(unfixed) for (name, data) in block.component_map(Var, active=True).items(): cnames.add((name, data.is_indexed())) # A = {} b_coef = {} c_rhs = {} c_sense = {} d_sense = None v_domain = {} # # Collect objective # for (oname, odata) in block.component_map(Objective, active=True).items(): for ndx in odata: if odata[ndx].sense == maximize: o_terms = generate_canonical_repn(-1 * odata[ndx].expr, compute_values=False) d_sense = minimize else: o_terms = generate_canonical_repn(odata[ndx].expr, compute_values=False) d_sense = maximize for i in range(len(o_terms.variables)): c_rhs[o_terms.variables[i].parent_component().local_name, o_terms.variables[i].index()] = o_terms.linear[i] # Stop after the first objective break return c_rhs
def preprocess_constraint(block, constraint, idMap=None, block_canonical_repn=None): from pyomo.repn.beta.matrix import MatrixConstraint if isinstance(constraint, MatrixConstraint): return # Get/Create the ComponentMap for the canonical_repn if not hasattr(block, '_canonical_repn'): block._canonical_repn = ComponentMap() block_canonical_repn = block._canonical_repn for index, constraint_data in iteritems(constraint): if not constraint_data.active: continue if isinstance(constraint_data, LinearCanonicalRepn): continue if constraint_data.body is None: raise ValueError("No expression has been defined for " "the body of constraint %s, index=%s" % (str(constraint.name), str(index))) # FIXME: This is a huge hack to keep canonical_repn from # trying to generate representations representations of # Constraints with Connectors (which will be # deactivated once they have been expanded # anyways). This can go away when preprocess is moved # out of the model.create() phase and into the future # model validation phase. (ZBF) ignore_connector = False if hasattr(constraint_data.body, "_args") and constraint_data.body._args is not None: for arg in constraint_data.body._args: if arg.__class__ is pyomo.core.base.connector.SimpleConnector: ignore_connector = True if ignore_connector: #print "Ignoring",constraint.name,index continue try: canonical_repn = generate_canonical_repn(constraint_data.body, idMap=idMap) except Exception: logging.getLogger('pyomo.core').error \ ( "exception generating a canonical representation for constraint %s (index %s)" \ % (str(constraint.name), str(index)) ) raise block_canonical_repn[constraint_data] = canonical_repn
def _get_expr_from_pyomo_expr(self, expr, max_degree=2): repn = generate_canonical_repn(expr) try: gurobi_expr, referenced_vars = self._get_expr_from_pyomo_repn(repn, max_degree) except DegreeError as e: msg = e.args[0] msg += '\nexpr: {0}'.format(expr) raise DegreeError(msg) return gurobi_expr, referenced_vars
def preprocess_constraint(block, constraint, idMap=None, block_canonical_repn=None): from pyomo.repn.beta.matrix import MatrixConstraint if isinstance(constraint, MatrixConstraint): return # Get/Create the ComponentMap for the canonical_repn if not hasattr(block,'_canonical_repn'): block._canonical_repn = ComponentMap() block_canonical_repn = block._canonical_repn for index, constraint_data in iteritems(constraint): if not constraint_data.active: continue if isinstance(constraint_data, LinearCanonicalRepn): continue if constraint_data.body is None: raise ValueError("No expression has been defined for " "the body of constraint %s, index=%s" % (str(constraint.name), str(index))) # FIXME: This is a huge hack to keep canonical_repn from # trying to generate representations representations of # Constraints with Connectors (which will be # deactivated once they have been expanded # anyways). This can go away when preprocess is moved # out of the model.create() phase and into the future # model validation phase. (ZBF) ignore_connector = False if hasattr(constraint_data.body,"_args") and constraint_data.body._args is not None: for arg in constraint_data.body._args: if arg.__class__ is pyomo.core.base.connector.SimpleConnector: ignore_connector = True if ignore_connector: #print "Ignoring",constraint.name,index continue try: canonical_repn = generate_canonical_repn(constraint_data.body, idMap=idMap) except Exception: logging.getLogger('pyomo.core').error \ ( "exception generating a canonical representation for constraint %s (index %s)" \ % (str(constraint.name), str(index)) ) raise block_canonical_repn[constraint_data] = canonical_repn
def _xfrm_bilinearities(self, dual): """ Replace bilinear terms in constraints with disjunctions """ for (name, data) in dual.component_map(Constraint, active=True).items(): for ndx in data: con = data[ndx] degree = con.body.polynomial_degree() if degree > 2: raise "RuntimeError: Cannot transform a model with polynomial degree %d" % degree if degree == 2: terms = generate_canonical_repn(con.body) for term in terms: print("%s %s %s" % (name, ndx, term))
def set_rho_values(ph, scenario_tree, scenario, cost_expr): """ Set values for rho for this model, based on linear coefficients in the provided expression. """ # This Rho coefficient is set to 1.0 to implement the CP(1.0) strategy # that Watson & Woodruff report as a good trade off between convergence # to the extensive form optimum and number of PH iterations. rho_coefficient = 1.0 scenario_instance = scenario._instance symbol_map = scenario_instance._ScenarioTreeSymbolMap if newPyomo: standard_repn = generate_standard_repn(cost_expr.expr) if standard_repn.nonlinear_vars or standard_repn.quadratic_vars: raise ValueError("This code does not work with nonlinear models.") else: standard_repn = generate_canonical_repn(cost_expr.expr) standard_repn.linear_vars = standard_repn.variables standard_repn.linear_coefs = standard_repn.linear cost_coefficients = {} var_names = {} for (variable, coef) in \ zip(standard_repn.linear_vars, standard_repn.linear_coefs): variable_id = symbol_map.getSymbol(variable) cost_coefficients[variable_id] = coef var_names[variable_id] = variable.name return (cost_coefficients, var_names) for variable_id in cost_coefficients: set_rho = False for tree_node in scenario._node_list: if variable_id in tree_node._standard_variable_ids: ph.setRhoOneScenario( tree_node, scenario, variable_id, cost_coefficients[variable_id] * rho_coefficient) set_rho = True break if not set_rho: print( "Warning! Could not find tree node for variable {}; rho not set." .format(var_names[variable_id]))
def _estimate_M(self, expr, name): # Calculate a best guess at M repn = generate_canonical_repn(expr) M = [0, 0] if isinstance(repn, LinearCanonicalRepn): if repn.constant is not None: for i in (0, 1): if M[i] is not None: M[i] += repn.constant for i, coef in enumerate(repn.linear or []): var = repn.variables[i] coef = repn.linear[i] bounds = (value(var.lb), value(var.ub)) for i in (0, 1): # reverse the bounds if the coefficient is negative if coef > 0: j = i else: j = 1 - i if bounds[i] is not None: M[j] += value(bounds[i]) * coef else: raise GDP_Error( "Cannot estimate M for " "expressions with unbounded variables." "\n\t(found unbounded var %s while processing " "constraint %s)" % (var.name, name)) else: raise GDP_Error("Cannot estimate M for nonlinear " "expressions.\n\t(found while processing " "constraint %s)" % name) return tuple(M)
def compile_objective(self, pyomo_instance): from pyomo.core.base import Objective from pyomo.repn import canonical_is_constant, LinearCanonicalRepn, canonical_degree if self._active_cplex_instance is None: raise RuntimeError("***The CPLEXPersistent solver plugin " "cannot compile objective - no " "instance is presently compiled") cplex_instance = self._active_cplex_instance cntr = 0 for block in pyomo_instance.block_data_objects(active=True): gen_obj_canonical_repn = \ getattr(block, "_gen_obj_canonical_repn", True) # Get/Create the ComponentMap for the repn if not hasattr(block,'_canonical_repn'): block._canonical_repn = ComponentMap() block_canonical_repn = block._canonical_repn for obj_data in block.component_data_objects(Objective, active=True, descend_into=False): cntr += 1 if cntr > 1: raise ValueError( "Multiple active objectives found on Pyomo instance '%s'. " "Solver '%s' will only handle a single active objective" \ % (pyomo_instance.cname(True), self.type)) if obj_data.is_minimizing(): cplex_instance.objective.set_sense( cplex_instance.objective.sense.minimize) else: cplex_instance.objective.set_sense( cplex_instance.objective.sense.maximize) cplex_instance.objective.set_name( self._symbol_map.getSymbol(obj_data, self._labeler)) if gen_obj_canonical_repn: obj_repn = generate_canonical_repn(obj_data.expr) block_canonical_repn[obj_data] = obj_repn else: obj_repn = block_canonical_repn[obj_data] if (isinstance(obj_repn, LinearCanonicalRepn) and \ (obj_repn.linear == None)) or \ canonical_is_constant(obj_repn): print("Warning: Constant objective detected, replacing " "with a placeholder to prevent solver failure.") offset = obj_repn.constant if offset is None: offset = 0.0 objective_expression = [("ONE_VAR_CONSTANT",offset)] cplex_instance.objective.set_linear(objective_expression) else: if isinstance(obj_repn, LinearCanonicalRepn): objective_expression, offset = \ self._encode_constraint_body_linear_specialized( obj_repn, self._labeler, use_variable_names=False, cplex_variable_name_index_map=self._cplex_variable_ids, as_pairs=True) if offset != 0.0: objective_expression.append((self._cplex_variable_ids["ONE_VAR_CONSTANT"],offset)) cplex_instance.objective.set_linear(objective_expression) else: #Linear terms if 1 in obj_repn: objective_expression, offset = \ self._encode_constraint_body_linear( obj_repn, self._labeler, as_pairs=True) if offset != 0.0: objective_expression.append(("ONE_VAR_CONSTANT",offset)) cplex_instance.objective.set_linear(objective_expression) #Quadratic terms if 2 in obj_repn: self._has_quadratic_objective = True objective_expression = \ self._encode_constraint_body_quadratic(obj_repn, self._labeler, as_triples=True, is_obj=2.0) cplex_instance.objective.\ set_quadratic_coefficients(objective_expression) degree = canonical_degree(obj_repn) if (degree is None) or (degree > 2): raise ValueError( "CPLEXPersistent plugin does not support general nonlinear " "objective expressions (only linear or quadratic).\n" "Objective: %s" % (obj_data.cname(True)))
def _print_model_LP(self, model, output_file, solver_capability, labeler, output_fixed_variable_bounds=False, file_determinism=1, row_order=None, column_order=None, skip_trivial_constraints=False, force_objective_constant=False, include_all_variable_bounds=False): symbol_map = SymbolMap() variable_symbol_map = SymbolMap() # NOTE: we use createSymbol instead of getSymbol because we # know whether or not the symbol exists, and don't want # to the overhead of error/duplicate checking. # cache frequently called functions create_symbol_func = SymbolMap.createSymbol create_symbols_func = SymbolMap.createSymbols alias_symbol_func = SymbolMap.alias variable_label_pairs = [] # populate the symbol map in a single pass. #objective_list, constraint_list, sosconstraint_list, variable_list \ # = self._populate_symbol_map(model, # symbol_map, # labeler, # variable_symbol_map, # file_determinism=file_determinism) sortOrder = SortComponents.unsorted if file_determinism >= 1: sortOrder = sortOrder | SortComponents.indices if file_determinism >= 2: sortOrder = sortOrder | SortComponents.alphabetical # # Create variable symbols (and cache the block list) # all_blocks = [] variable_list = [] for block in model.block_data_objects(active=True, sort=sortOrder): all_blocks.append(block) for vardata in block.component_data_objects(Var, active=True, sort=sortOrder, descend_into=False): variable_list.append(vardata) variable_label_pairs.append( (vardata, create_symbol_func(symbol_map, vardata, labeler))) variable_symbol_map.addSymbols(variable_label_pairs) # and extract the information we'll need for rapid labeling. object_symbol_dictionary = symbol_map.byObject variable_symbol_dictionary = variable_symbol_map.byObject # cache - these are called all the time. print_expr_canonical = self._print_expr_canonical # print the model name and the source, so we know roughly where # it came from. # # NOTE: this *must* use the "\* ... *\" comment format: the GLPK # LP parser does not correctly handle other formats (notably, "%"). output_file.write("\\* Source Pyomo model name=%s *\\\n\n" % (model.name, )) # # Objective # supports_quadratic_objective = \ solver_capability('quadratic_objective') numObj = 0 onames = [] for block in all_blocks: gen_obj_canonical_repn = \ getattr(block, "_gen_obj_canonical_repn", True) # Get/Create the ComponentMap for the repn if not hasattr(block, '_canonical_repn'): block._canonical_repn = ComponentMap() block_canonical_repn = block._canonical_repn for objective_data in block.component_data_objects( Objective, active=True, sort=sortOrder, descend_into=False): numObj += 1 onames.append(objective_data.name) if numObj > 1: raise ValueError( "More than one active objective defined for input " "model '%s'; Cannot write legal LP file\n" "Objectives: %s" % (model.name, ' '.join(onames))) create_symbol_func(symbol_map, objective_data, labeler) symbol_map.alias(objective_data, '__default_objective__') if objective_data.is_minimizing(): output_file.write("min \n") else: output_file.write("max \n") if gen_obj_canonical_repn: canonical_repn = \ generate_canonical_repn(objective_data.expr) block_canonical_repn[objective_data] = canonical_repn else: canonical_repn = block_canonical_repn[objective_data] degree = canonical_degree(canonical_repn) if degree == 0: logger.warning( "Constant objective detected, replacing " "with a placeholder to prevent solver failure.") force_objective_constant = True elif degree == 2: if not supports_quadratic_objective: raise RuntimeError( "Selected solver is unable to handle " "objective functions with quadratic terms. " "Objective at issue: %s." % objective_data.name) elif degree != 1: raise RuntimeError( "Cannot write legal LP file. Objective '%s' " "has nonlinear terms that are not quadratic." % objective_data.name) output_file.write( object_symbol_dictionary[id(objective_data)] + ':\n') offset = print_expr_canonical( canonical_repn, output_file, object_symbol_dictionary, variable_symbol_dictionary, True, column_order, force_objective_constant=force_objective_constant) if numObj == 0: raise ValueError( "ERROR: No objectives defined for input model '%s'; " " cannot write legal LP file" % str(model.name)) # Constraints # # If there are no non-trivial constraints, you'll end up with an empty # constraint block. CPLEX is OK with this, but GLPK isn't. And # eliminating the constraint block (i.e., the "s.t." line) causes GLPK # to whine elsewhere. Output a warning if the constraint block is empty, # so users can quickly determine the cause of the solve failure. output_file.write("\n") output_file.write("s.t.\n") output_file.write("\n") have_nontrivial = False supports_quadratic_constraint = solver_capability( 'quadratic_constraint') def constraint_generator(): for block in all_blocks: gen_con_canonical_repn = \ getattr(block, "_gen_con_canonical_repn", True) # Get/Create the ComponentMap for the repn if not hasattr(block, '_canonical_repn'): block._canonical_repn = ComponentMap() block_canonical_repn = block._canonical_repn for constraint_data in block.component_data_objects( Constraint, active=True, sort=sortOrder, descend_into=False): if isinstance(constraint_data, LinearCanonicalRepn): canonical_repn = constraint_data else: if gen_con_canonical_repn: canonical_repn = generate_canonical_repn( constraint_data.body) block_canonical_repn[ constraint_data] = canonical_repn else: canonical_repn = block_canonical_repn[ constraint_data] yield constraint_data, canonical_repn if row_order is not None: sorted_constraint_list = list(constraint_generator()) sorted_constraint_list.sort(key=lambda x: row_order[x[0]]) def yield_all_constraints(): for constraint_data, canonical_repn in sorted_constraint_list: yield constraint_data, canonical_repn else: yield_all_constraints = constraint_generator # FIXME: This is a hack to get nested blocks working... eq_string_template = "= %" + self._precision_string + '\n' geq_string_template = ">= %" + self._precision_string + '\n\n' leq_string_template = "<= %" + self._precision_string + '\n\n' for constraint_data, canonical_repn in yield_all_constraints(): have_nontrivial = True degree = canonical_degree(canonical_repn) # # Write constraint # # There are conditions, e.g., when fixing variables, under which # a constraint block might be empty. Ignore these, for both # practical reasons and the fact that the CPLEX LP format # requires a variable in the constraint body. It is also # possible that the body of the constraint consists of only a # constant, in which case the "variable" of if degree == 0: if skip_trivial_constraints: continue elif degree == 2: if not supports_quadratic_constraint: raise ValueError( "Solver unable to handle quadratic expressions. Constraint" " at issue: '%s'" % (constraint_data.name)) elif degree != 1: raise ValueError( "Cannot write legal LP file. Constraint '%s' has a body " "with nonlinear terms." % (constraint_data.name)) # Create symbol con_symbol = create_symbol_func(symbol_map, constraint_data, labeler) if constraint_data.equality: label = 'c_e_' + con_symbol + '_' alias_symbol_func(symbol_map, constraint_data, label) output_file.write(label + ':\n') offset = print_expr_canonical(canonical_repn, output_file, object_symbol_dictionary, variable_symbol_dictionary, False, column_order) bound = constraint_data.lower bound = self._get_bound(bound) - offset output_file.write(eq_string_template % (_no_negative_zero(bound))) output_file.write("\n") else: if constraint_data.lower is not None: if constraint_data.upper is not None: label = 'r_l_' + con_symbol + '_' else: label = 'c_l_' + con_symbol + '_' alias_symbol_func(symbol_map, constraint_data, label) output_file.write(label + ':\n') offset = print_expr_canonical(canonical_repn, output_file, object_symbol_dictionary, variable_symbol_dictionary, False, column_order) bound = constraint_data.lower bound = self._get_bound(bound) - offset output_file.write(geq_string_template % (_no_negative_zero(bound))) if constraint_data.upper is not None: if constraint_data.lower is not None: label = 'r_u_' + con_symbol + '_' else: label = 'c_u_' + con_symbol + '_' alias_symbol_func(symbol_map, constraint_data, label) output_file.write(label + ':\n') offset = print_expr_canonical(canonical_repn, output_file, object_symbol_dictionary, variable_symbol_dictionary, False, column_order) bound = constraint_data.upper bound = self._get_bound(bound) - offset output_file.write(leq_string_template % (_no_negative_zero(bound))) if not have_nontrivial: logger.warning('Empty constraint block written in LP format ' \ '- solver may error') # the CPLEX LP format doesn't allow constants in the objective (or # constraint body), which is a bit silly. To avoid painful # book-keeping, we introduce the following "variable", constrained # to the value 1. This is used when quadratic terms are present. # worst-case, if not used, is that CPLEX easily pre-processes it out. prefix = "" output_file.write('%sc_e_ONE_VAR_CONSTANT: \n' % prefix) output_file.write('%sONE_VAR_CONSTANT = 1.0\n' % prefix) output_file.write("\n") # SOS constraints # # For now, we write out SOS1 and SOS2 constraints in the cplex format # # All Component objects are stored in model._component, which is a # dictionary of {class: {objName: object}}. # # Consider the variable X, # # model.X = Var(...) # # We print X to CPLEX format as X(i,j,k,...) where i, j, k, ... are the # indices of X. # SOSlines = StringIO() sos1 = solver_capability("sos1") sos2 = solver_capability("sos2") writtenSOS = False for block in all_blocks: for soscondata in block.component_data_objects(SOSConstraint, active=True, sort=sortOrder, descend_into=False): create_symbol_func(symbol_map, soscondata, labeler) level = soscondata.level if (level == 1 and not sos1) or \ (level == 2 and not sos2) or \ (level > 2): raise ValueError( "Solver does not support SOS level %s constraints" % (level)) if writtenSOS == False: SOSlines.write("SOS\n") writtenSOS = True # This updates the referenced_variable_ids, just in case # there is a variable that only appears in an # SOSConstraint, in which case this needs to be known # before we write the "bounds" section (Cplex does not # handle this correctly, Gurobi does) self.printSOS(symbol_map, labeler, variable_symbol_map, soscondata, SOSlines) # # Bounds # output_file.write("bounds\n") # Scan all variables even if we're only writing a subset of them. # required because we don't store maps by variable type currently. # FIXME: This is a hack to get nested blocks working... lb_string_template = "%" + self._precision_string + " <= " ub_string_template = " <= %" + self._precision_string + "\n" # Track the number of integer and binary variables, so you can # output their status later. integer_vars = [] binary_vars = [] for vardata in variable_list: # TODO: We could just loop over the set of items in # self._referenced_variable_ids, except this is # a dictionary that is hashed by id(vardata) # which would make the bounds section # nondeterministic (bad for unit testing) if (not include_all_variable_bounds) and \ (id(vardata) not in self._referenced_variable_ids): continue if vardata.fixed: if not output_fixed_variable_bounds: raise ValueError( "Encountered a fixed variable (%s) inside an active " "objective or constraint expression on model %s, which is " "usually indicative of a preprocessing error. Use the " "IO-option 'output_fixed_variable_bounds=True' to suppress " "this error and fix the variable by overwriting its bounds " "in the LP file." % (vardata.name, model.name)) if vardata.value is None: raise ValueError( "Variable cannot be fixed to a value of None.") vardata_lb = value(vardata.value) vardata_ub = value(vardata.value) else: vardata_lb = self._get_bound(vardata.lb) vardata_ub = self._get_bound(vardata.ub) name_to_output = variable_symbol_dictionary[id(vardata)] # track the number of integer and binary variables, so we know whether # to output the general / binary sections below. if vardata.is_binary(): binary_vars.append(name_to_output) elif vardata.is_integer(): integer_vars.append(name_to_output) elif not vardata.is_continuous(): raise TypeError( "Invalid domain type for variable with name '%s'. " "Variable is not continuous, integer, or binary." % (vardata.name)) # in the CPLEX LP file format, the default variable # bounds are 0 and +inf. These bounds are in # conflict with Pyomo, which assumes -inf and +inf # (which we would argue is more rational). output_file.write(" ") if (vardata_lb is not None) and (vardata_lb != -infinity): output_file.write(lb_string_template % (_no_negative_zero(vardata_lb))) else: output_file.write(" -inf <= ") if name_to_output == "e": raise ValueError( "Attempting to write variable with name 'e' in a CPLEX LP " "formatted file will cause a parse failure due to confusion with " "numeric values expressed in scientific notation") output_file.write(name_to_output) if (vardata_ub is not None) and (vardata_ub != infinity): output_file.write(ub_string_template % (_no_negative_zero(vardata_ub))) else: output_file.write(" <= +inf\n") if len(integer_vars) > 0: output_file.write("general\n") for var_name in integer_vars: output_file.write(' %s\n' % var_name) if len(binary_vars) > 0: output_file.write("binary\n") for var_name in binary_vars: output_file.write(' %s\n' % var_name) # Write the SOS section output_file.write(SOSlines.getvalue()) # # wrap-up # output_file.write("end\n") # Clean up the symbol map to only contain variables referenced # in the active constraints **Note**: warm start method may # rely on this for choosing the set of potential warm start # variables vars_to_delete = set(variable_symbol_map.byObject.keys()) - \ set(self._referenced_variable_ids.keys()) sm_byObject = symbol_map.byObject sm_bySymbol = symbol_map.bySymbol var_sm_byObject = variable_symbol_map.byObject for varid in vars_to_delete: symbol = var_sm_byObject[varid] del sm_byObject[varid] del sm_bySymbol[symbol] del variable_symbol_map return symbol_map
def to_standard_form(self): """ Produces a standard-form representation of the model. Returns the coefficient matrix (A), the cost vector (c), and the constraint vector (b), where the 'standard form' problem is min/max c'x s.t. Ax = b x >= 0 All three returned values are instances of the array.array class, and store Python floats (C doubles). """ from pyomo.repn import generate_canonical_repn # We first need to create an map of all variables to their column # number colID = {} ID2name = {} id = 0 tmp = self.variables().keys() tmp.sort() for v in tmp: colID[v] = id ID2name[id] = v id += 1 # First we go through the constraints and introduce slack and excess # variables to eliminate inequality constraints # # N.B. Structure heirarchy: # # active_components: {class: {attr_name: object}} # object -> Constraint: ._data: {ndx: _ConstraintData} # _ConstraintData: .lower, .body, .upper # # So, altogether, we access a lower bound via # # model.component_map(active=True)[Constraint]['con_name']['index'].lower # # {le,ge,eq}Constraints are # {constraint_name: {index: {variable_or_none: coefficient}} objects # that represent each constraint. None in the innermost dictionary # represents the constant term. # # i.e. # # min x1 + 2*x2 + x4 # s.t. x1 = 1 # x2 + 3*x3 <= -1 # x1 + x4 >= 3 # x1 + 2*x2 + + 3*x4 >= 0 # # # would be represented as (modulo the names of the variables, # constraints, and indices) # # eqConstraints = {'c1': {None: {'x1':1, None:-1}}} # leConstraints = {'c2': {None: {'x2':1, 'x3':3, None:1}}} # geConstraints = {'c3': {None: {'x1':1, 'x4':1, None:-3}}, # 'c4': {None: {'x1':1, 'x2':2, 'x4':1, None:0}}} # # Note the we have the luxury of dealing only with linear terms. var_id_map = {} leConstraints = {} geConstraints = {} eqConstraints = {} objectives = {} # For each registered component for c in self.component_map(active=True): # Get all subclasses of Constraint if issubclass(c, Constraint): cons = self.component_map(c, active=True) # Get the name of the constraint, and the constraint set itself for con_set_name in cons: con_set = cons[con_set_name] # For each indexed constraint in the constraint set for ndx in con_set._data: con = con_set._data[ndx] # Process the body terms = self._process_canonical_repn( generate_canonical_repn(con.body, var_id_map)) # Process the bounds of the constraint if con.equality: # Equality constraint, only check lower bound lb = self._process_canonical_repn( generate_canonical_repn(con.lower, var_id_map)) # Update terms for k in lb: v = lb[k] if k in terms: terms[k] -= v else: terms[k] = -v # Add constraint to equality constraints eqConstraints[(con_set_name, ndx)] = terms else: # Process upper bounds (<= constraints) if con.upper is not None: # Less than or equal to constraint tmp = dict(terms) ub = self._process_canonical_repn( generate_canonical_repn(con.upper, var_id_map)) # Update terms for k in ub: if k in terms: tmp[k] -= ub[k] else: tmp[k] = -ub[k] # Add constraint to less than or equal to # constraints leConstraints[(con_set_name, ndx)] = tmp # Process lower bounds (>= constraints) if con.lower is not None: # Less than or equal to constraint tmp = dict(terms) lb = self._process_canonical_repn( generate_canonical_repn(con.lower, var_id_map)) # Update terms for k in lb: if k in terms: tmp[k] -= lb[k] else: tmp[k] = -lb[k] # Add constraint to less than or equal to # constraints geConstraints[(con_set_name, ndx)] = tmp elif issubclass(c, Objective): # Process objectives objs = self.component_map(c, active=True) # Get the name of the objective, and the objective set itself for obj_set_name in objs: obj_set = objs[obj_set_name] # For each indexed objective in the objective set for ndx in obj_set._data: obj = obj_set._data[ndx] # Process the objective terms = self._process_canonical_repn( generate_canonical_repn(obj.expr, var_id_map)) objectives[(obj_set_name, ndx)] = terms # We now have all the constraints. Add a slack variable for every # <= constraint and an excess variable for every >= constraint. nSlack = len(leConstraints) nExcess = len(geConstraints) nConstraints = len(leConstraints) + len(geConstraints) + \ len(eqConstraints) nVariables = len(colID) + nSlack + nExcess nRegVariables = len(colID) # Make the arrays coefficients = array.array("d", [0] * nConstraints * nVariables) constraints = array.array("d", [0] * nConstraints) costs = array.array("d", [0] * nVariables) # Populate the coefficient matrix constraintID = 0 # Add less than or equal to constraints for ndx in leConstraints: con = leConstraints[ndx] for termKey in con: coef = con[termKey] if termKey is None: # Constraint coefficient constraints[constraintID] = -coef else: # Variable coefficient col = colID[termKey] coefficients[constraintID * nVariables + col] = coef # Add the slack coefficients[constraintID*nVariables + nRegVariables + \ constraintID] = 1 constraintID += 1 # Add greater than or equal to constraints for ndx in geConstraints: con = geConstraints[ndx] for termKey in con: coef = con[termKey] if termKey is None: # Constraint coefficient constraints[constraintID] = -coef else: # Variable coefficient col = colID[termKey] coefficients[constraintID * nVariables + col] = coef # Add the slack coefficients[constraintID*nVariables + nRegVariables + \ constraintID] = -1 constraintID += 1 # Add equality constraints for ndx in eqConstraints: con = eqConstraints[ndx] for termKey in con: coef = con[termKey] if termKey is None: # Constraint coefficient constraints[constraintID] = -coef else: # Variable coefficient col = colID[termKey] coefficients[constraintID * nVariables + col] = coef constraintID += 1 # Determine cost coefficients for obj_name in objectives: obj = objectives[obj_name]() for var in obj: costs[colID[var]] = obj[var] # Print the model # # The goal is to print # # var1 var2 var3 ... # +-- --+ # | cost1 cost2 cost3 ...| # +-- --+ # +-- --+ +-- --+ # con1 | coef11 coef12 coef13 ...| | eq1 | # con2 | coef21 coef22 coef23 ...| | eq2 | # con2 | coef31 coef32 coef33 ...| | eq3 | # . | . . . . | | . | # . | . . . . | | . | # . | . . . . | | . | constraintPadding = 2 numFmt = "% 1.4f" altFmt = "% 1.1g" maxColWidth = max(len(numFmt % 0.0), len(altFmt % 0.0)) maxConstraintColWidth = max(len(numFmt % 0.0), len(altFmt % 0.0)) # Generate constraint names maxConNameLen = 0 conNames = [] for name in leConstraints: strName = str(name) if len(strName) > maxConNameLen: maxConNameLen = len(strName) conNames.append(strName) for name in geConstraints: strName = str(name) if len(strName) > maxConNameLen: maxConNameLen = len(strName) conNames.append(strName) for name in eqConstraints: strName = str(name) if len(strName) > maxConNameLen: maxConNameLen = len(strName) conNames.append(strName) # Generate the variable names varNames = [None] * len(colID) for name in colID: tmp_name = " " + name if len(tmp_name) > maxColWidth: maxColWidth = len(tmp_name) varNames[colID[name]] = tmp_name for i in xrange(0, nSlack): tmp_name = " _slack_%i" % i if len(tmp_name) > maxColWidth: maxColWidth = len(tmp_name) varNames.append(tmp_name) for i in xrange(0, nExcess): tmp_name = " _excess_%i" % i if len(tmp_name) > maxColWidth: maxColWidth = len(tmp_name) varNames.append(tmp_name) # Variable names line = " " * maxConNameLen + (" " * constraintPadding) + " " for col in xrange(0, nVariables): # Format entry token = varNames[col] # Pad with trailing whitespace token += " " * (maxColWidth - len(token)) # Add to line line += " " + token + " " print(line + '\n') # Cost vector print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \ " "*((maxColWidth+2)*nVariables - 4) + "--+" + '\n') line = " " * maxConNameLen + (" " * constraintPadding) + "|" for col in xrange(0, nVariables): # Format entry token = numFmt % costs[col] if len(token) > maxColWidth: token = altFmt % costs[col] # Pad with trailing whitespace token += " " * (maxColWidth - len(token)) # Add to line line += " " + token + " " line += "|" print(line + '\n') print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \ " "*((maxColWidth+2)*nVariables - 4) + "--+"+'\n') # Constraints print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \ " "*((maxColWidth+2)*nVariables - 4) + "--+" + \ (" "*constraintPadding) + "+--" + \ (" "*(maxConstraintColWidth-1)) + "--+"+'\n') for row in xrange(0, nConstraints): # Print constraint name line = conNames[row] + (" " * constraintPadding) + ( " " * (maxConNameLen - len(conNames[row]))) + "|" # Print each coefficient for col in xrange(0, nVariables): # Format entry token = numFmt % coefficients[nVariables * row + col] if len(token) > maxColWidth: token = altFmt % coefficients[nVariables * row + col] # Pad with trailing whitespace token += " " * (maxColWidth - len(token)) # Add to line line += " " + token + " " line += "|" + (" " * constraintPadding) + "|" # Add constraint vector token = numFmt % constraints[row] if len(token) > maxConstraintColWidth: token = altFmt % constraints[row] # Pad with trailing whitespace token += " " * (maxConstraintColWidth - len(token)) line += " " + token + " |" print(line + '\n') print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \ " "*((maxColWidth+2)*nVariables - 4) + "--+" + \ (" "*constraintPadding) + "+--" + (" "*(maxConstraintColWidth-1))\ + "--+"+'\n') return (coefficients, costs, constraints)
def compile_objective(self, pyomo_instance): from pyomo.core.base import Objective from pyomo.repn import canonical_is_constant, LinearCanonicalRepn, canonical_degree if self._active_cplex_instance is None: raise RuntimeError("***The CPLEXPersistent solver plugin " "cannot compile objective - no " "instance is presently compiled") cplex_instance = self._active_cplex_instance self._has_quadratic_objective = False cntr = 0 for block in pyomo_instance.block_data_objects(active=True): gen_obj_canonical_repn = \ getattr(block, "_gen_obj_canonical_repn", True) # Get/Create the ComponentMap for the repn if not hasattr(block,'_canonical_repn'): block._canonical_repn = ComponentMap() block_canonical_repn = block._canonical_repn for obj_data in block.component_data_objects(Objective, active=True, descend_into=False): cntr += 1 if cntr > 1: raise ValueError( "Multiple active objectives found on Pyomo instance '%s'. " "Solver '%s' will only handle a single active objective" \ % (pyomo_instance.name, self.type)) if obj_data.is_minimizing(): cplex_instance.objective.set_sense( cplex_instance.objective.sense.minimize) else: cplex_instance.objective.set_sense( cplex_instance.objective.sense.maximize) cplex_instance.objective.set_name( self._symbol_map.getSymbol(obj_data, self._labeler)) if gen_obj_canonical_repn: obj_repn = generate_canonical_repn(obj_data.expr) block_canonical_repn[obj_data] = obj_repn else: obj_repn = block_canonical_repn[obj_data] if (isinstance(obj_repn, LinearCanonicalRepn) and \ ((obj_repn.linear == None) or \ (len(obj_repn.linear) == 0))) or \ canonical_is_constant(obj_repn): print("Warning: Constant objective detected, replacing " "with a placeholder to prevent solver failure.") offset = obj_repn.constant if offset is None: offset = 0.0 objective_expression = [("ONE_VAR_CONSTANT",offset)] cplex_instance.objective.set_linear(objective_expression) else: if isinstance(obj_repn, LinearCanonicalRepn): objective_expression, offset = \ self._encode_constraint_body_linear_specialized( obj_repn, self._labeler, use_variable_names=False, cplex_variable_name_index_map=self._cplex_variable_ids, as_pairs=True) if offset != 0.0: objective_expression.append((self._cplex_variable_ids["ONE_VAR_CONSTANT"],offset)) cplex_instance.objective.set_linear(objective_expression) else: #Linear terms if 1 in obj_repn: objective_expression, offset = \ self._encode_constraint_body_linear( obj_repn, self._labeler, as_pairs=True) if offset != 0.0: objective_expression.append(("ONE_VAR_CONSTANT",offset)) cplex_instance.objective.set_linear(objective_expression) #Quadratic terms if 2 in obj_repn: self._has_quadratic_objective = True objective_expression = \ self._encode_constraint_body_quadratic(obj_repn, self._labeler, as_triples=True, is_obj=2.0) cplex_instance.objective.\ set_quadratic_coefficients(objective_expression) degree = canonical_degree(obj_repn) if (degree is None) or (degree > 2): raise ValueError( "CPLEXPersistent plugin does not support general nonlinear " "objective expressions (only linear or quadratic).\n" "Objective: %s" % (obj_data.name))
def _add_optimality_conditions(self, instance, submodel): """ Add optimality conditions for the submodel This assumes that the original model has the form: min c1*x + d1*y A3*x <= b3 A1*x + B1*y <= b1 min c2*x + d2*y y >= 0 A2*x + B2*y <= b2 NOTE THE VARIABLE BOUNDS! """ # # Populate the block with the linear constraints. # Note that we don't simply clone the current block. # We need to collect a single set of equations that # can be easily expressed. # d2 = {} B2 = {} vtmp = {} utmp = {} sids_set = set() sids_list = [] # block = Block(concrete=True) block.u = VarList() block.v = VarList() block.c1 = ConstraintList() block.c2 = ComplementarityList() block.c3 = ComplementarityList() # # Collect submodel objective terms # for odata in submodel.component_data_objects(Objective, active=True): if odata.sense == maximize: d_sense = -1 else: d_sense = 1 # # Iterate through the variables in the canonical representation # o_terms = generate_canonical_repn(odata.expr, compute_values=False) for i in range(len(o_terms.variables)): var = o_terms.variables[i] if var.parent_component().local_name in self._fixed_upper_vars: # # Skip fixed upper variables # continue # # Store the coefficient for the variable. The coefficient is # negated if the objective is maximized. # id_ = id(var) d2[id_] = d_sense * o_terms.linear[i] if not id_ in sids_set: sids_set.add(id_) sids_list.append(id_) # Stop after the first objective break # # Iterate through all lower level variables, adding dual variables # and complementarity slackness conditions for y bound constraints # for vcomponent in instance.component_objects(Var, active=True): if vcomponent.local_name in self._fixed_upper_vars: # # Skip fixed upper variables # continue for ndx in vcomponent: # # For each index, get the bounds for the variable # lb, ub = vcomponent[ndx].bounds if not lb is None: # # Add the complementarity slackness condition for a lower bound # v = block.v.add() block.c3.add( complements(vcomponent[ndx] >= lb, v >= 0) ) else: v = None if not ub is None: # # Add the complementarity slackness condition for an upper bound # w = block.v.add() vtmp[id(vcomponent[ndx])] = w block.c3.add( complements(vcomponent[ndx] <= ub, w >= 0) ) else: w = None if not (v is None and w is None): # # Record the variables for which complementarity slackness conditions # were created. # id_ = id(vcomponent[ndx]) vtmp[id_] = (v,w) if not id_ in sids_set: sids_set.add(id_) sids_list.append(id_) # # Iterate through all constraints, adding dual variables and # complementary slackness conditions (for inequality constraints) # for cdata in submodel.component_data_objects(Constraint, active=True): if cdata.equality: # Don't add a complementary slackness condition for an equality constraint u = block.u.add() utmp[id(cdata)] = (None,u) else: if not cdata.lower is None: # # Add the complementarity slackness condition for a greater-than inequality # u = block.u.add() block.c2.add( complements(- cdata.body <= - cdata.lower, u >= 0) ) else: u = None if not cdata.upper is None: # # Add the complementarity slackness condition for a less-than inequality # w = block.u.add() block.c2.add( complements(cdata.body <= cdata.upper, w >= 0) ) else: w = None if not (u is None and w is None): utmp[id(cdata)] = (u,w) # # Store the coefficients for the contraint variables that are not fixed # c_terms = generate_canonical_repn(cdata.body, compute_values=False) for i in range(len(c_terms.variables)): var = c_terms.variables[i] if var.parent_component().local_name in self._fixed_upper_vars: continue id_ = id(var) B2.setdefault(id_,{}).setdefault(id(cdata),c_terms.linear[i]) if not id_ in sids_set: sids_set.add(id_) sids_list.append(id_) # # Generate stationarity equations # tmp__ = (None, None) for vid in sids_list: exp = d2.get(vid,0) # lb_dual, ub_dual = vtmp.get(vid, tmp__) if vid in vtmp: if not lb_dual is None: exp -= lb_dual # dual for variable lower bound if not ub_dual is None: exp += ub_dual # dual for variable upper bound # B2_ = B2.get(vid,{}) utmp_keys = list(utmp.keys()) if self._deterministic: utmp_keys.sort(key=lambda x:utmp[x][0].local_name if utmp[x][1] is None else utmp[x][1].local_name) for uid in utmp_keys: if uid in B2_: lb_dual, ub_dual = utmp[uid] if not lb_dual is None: exp -= B2_[uid] * lb_dual if not ub_dual is None: exp += B2_[uid] * ub_dual if type(exp) in six.integer_types or type(exp) is float: # TODO: Annotate the model as unbounded raise IOError("Unbounded variable without side constraints") else: block.c1.add( exp == 0 ) # # Return block # return block
def _replace_bilinear(self, expr, instance): idMap = {} terms = generate_canonical_repn(expr, idMap=idMap) # Constant if 0 in terms: e = terms[0][None] else: e = 0 # Linear terms if 1 in terms: for key in terms[1]: e += terms[1][key] * idMap[key] # Quadratic terms if 2 in terms: for key in terms[2]: vars = [] for v in key: vars.append(idMap[v]) coef = terms[2][key] # if isinstance(vars[0].domain, BooleanSet): instance.bilinear_data_.vlist_boolean.append(vars[0]) v = instance.bilinear_data_.vlist.add() bounds = vars[1].bounds v.setlb(bounds[0]) v.setub(bounds[1]) id = len(instance.bilinear_data_.vlist) instance.bilinear_data_.index.add(id) # First disjunct d0 = instance.bilinear_data_.disjuncts_[id,0] d0.c1 = Constraint(expr=vars[0] == 1) d0.c2 = Constraint(expr=v == coef*vars[1]) # Second disjunct d1 = instance.bilinear_data_.disjuncts_[id,1] d1.c1 = Constraint(expr=vars[0] == 0) d1.c2 = Constraint(expr=v == 0) # Disjunction instance.bilinear_data_.disjunction_data[id] = [instance.bilinear_data_.disjuncts_[id,0], instance.bilinear_data_.disjuncts_[id,1]] instance.bilinear_data_.disjunction_data[id] = [instance.bilinear_data_.disjuncts_[id,0], instance.bilinear_data_.disjuncts_[id,1]] # The disjunctive variable is the expression e += v # elif isinstance(vars[1].domain, BooleanSet): instance.bilinear_data_.vlist_boolean.append(vars[1]) v = instance.bilinear_data_.vlist.add() bounds = vars[0].bounds v.setlb(bounds[0]) v.setub(bounds[1]) id = len(instance.bilinear_data_.vlist) instance.bilinear_data_.index.add(id) # First disjunct d0 = instance.bilinear_data_.disjuncts_[id,0] d0.c1 = Constraint(expr=vars[1] == 1) d0.c2 = Constraint(expr=v == coef*vars[0]) # Second disjunct d1 = instance.bilinear_data_.disjuncts_[id,1] d1.c1 = Constraint(expr=vars[1] == 0) d1.c2 = Constraint(expr=v == 0) # Disjunction instance.bilinear_data_.disjunction_data[id] = [instance.bilinear_data_.disjuncts_[id,0], instance.bilinear_data_.disjuncts_[id,1]] # The disjunctive variable is the expression e += v else: # If neither variable is boolean, just reinsert the original bilinear term e += coef*vars[0]*vars[1] # return e
def _add_optimality_conditions(self, instance, submodel): """ Add optimality conditions for the submodel This assumes that the original model has the form: min c1*x + d1*y A3*x <= b3 A1*x + B1*y <= b1 min c2*x + d2*y y >= 0 A2*x + B2*y <= b2 NOTE THE VARIABLE BOUNDS! """ # # Populate the block with the linear constraints. # Note that we don't simply clone the current block. # We need to collect a single set of equations that # can be easily expressed. # d2 = {} B2 = {} vtmp = {} utmp = {} sids_set = set() sids_list = [] # block = Block(concrete=True) block.u = VarList() block.v = VarList() block.c1 = ConstraintList() block.c2 = ComplementarityList() block.c3 = ComplementarityList() # # Collect submodel objective terms # for odata in submodel.component_data_objects(Objective, active=True): if odata.sense == maximize: d_sense = -1 else: d_sense = 1 # # Iterate through the variables in the canonical representation # o_terms = generate_canonical_repn(odata.expr, compute_values=False) for i in range(len(o_terms.variables)): var = o_terms.variables[i] if var.parent_component().local_name in self._fixed_upper_vars: # # Skip fixed upper variables # continue # # Store the coefficient for the variable. The coefficient is # negated if the objective is maximized. # id_ = id(var) d2[id_] = d_sense * o_terms.linear[i] if not id_ in sids_set: sids_set.add(id_) sids_list.append(id_) # Stop after the first objective break # # Iterate through all lower level variables, adding dual variables # and complementarity slackness conditions for y bound constraints # for vcomponent in instance.component_objects(Var, active=True): if vcomponent.local_name in self._fixed_upper_vars: # # Skip fixed upper variables # continue for ndx in vcomponent: # # For each index, get the bounds for the variable # lb, ub = vcomponent[ndx].bounds if not lb is None: # # Add the complementarity slackness condition for a lower bound # v = block.v.add() block.c3.add(complements(vcomponent[ndx] >= lb, v >= 0)) else: v = None if not ub is None: # # Add the complementarity slackness condition for an upper bound # w = block.v.add() vtmp[id(vcomponent[ndx])] = w block.c3.add(complements(vcomponent[ndx] <= ub, w >= 0)) else: w = None if not (v is None and w is None): # # Record the variables for which complementarity slackness conditions # were created. # id_ = id(vcomponent[ndx]) vtmp[id_] = (v, w) if not id_ in sids_set: sids_set.add(id_) sids_list.append(id_) # # Iterate through all constraints, adding dual variables and # complementary slackness conditions (for inequality constraints) # for cdata in submodel.component_data_objects(Constraint, active=True): if cdata.equality: # Don't add a complementary slackness condition for an equality constraint u = block.u.add() utmp[id(cdata)] = (None, u) else: if not cdata.lower is None: # # Add the complementarity slackness condition for a greater-than inequality # u = block.u.add() block.c2.add( complements(-cdata.body <= -cdata.lower, u >= 0)) else: u = None if not cdata.upper is None: # # Add the complementarity slackness condition for a less-than inequality # w = block.u.add() block.c2.add(complements(cdata.body <= cdata.upper, w >= 0)) else: w = None if not (u is None and w is None): utmp[id(cdata)] = (u, w) # # Store the coefficients for the contraint variables that are not fixed # c_terms = generate_canonical_repn(cdata.body, compute_values=False) for i in range(len(c_terms.variables)): var = c_terms.variables[i] if var.parent_component().local_name in self._fixed_upper_vars: continue id_ = id(var) B2.setdefault(id_, {}).setdefault(id(cdata), c_terms.linear[i]) if not id_ in sids_set: sids_set.add(id_) sids_list.append(id_) # # Generate stationarity equations # tmp__ = (None, None) for vid in sids_list: exp = d2.get(vid, 0) # lb_dual, ub_dual = vtmp.get(vid, tmp__) if vid in vtmp: if not lb_dual is None: exp -= lb_dual # dual for variable lower bound if not ub_dual is None: exp += ub_dual # dual for variable upper bound # B2_ = B2.get(vid, {}) utmp_keys = list(utmp.keys()) if self._deterministic: utmp_keys.sort(key=lambda x: utmp[x][0].local_name if utmp[x][ 1] is None else utmp[x][1].local_name) for uid in utmp_keys: if uid in B2_: lb_dual, ub_dual = utmp[uid] if not lb_dual is None: exp -= B2_[uid] * lb_dual if not ub_dual is None: exp += B2_[uid] * ub_dual if type(exp) in six.integer_types or type(exp) is float: # TODO: Annotate the model as unbounded raise IOError("Unbounded variable without side constraints") else: block.c1.add(exp == 0) # # Return block # return block
def _print_model_LP(self, model, output_file, solver_capability, labeler, output_fixed_variable_bounds=False, file_determinism=1, row_order=None, column_order=None, skip_trivial_constraints=False, force_objective_constant=False, include_all_variable_bounds=False): symbol_map = SymbolMap() variable_symbol_map = SymbolMap() # NOTE: we use createSymbol instead of getSymbol because we # know whether or not the symbol exists, and don't want # to the overhead of error/duplicate checking. # cache frequently called functions create_symbol_func = SymbolMap.createSymbol create_symbols_func = SymbolMap.createSymbols alias_symbol_func = SymbolMap.alias variable_label_pairs = [] # populate the symbol map in a single pass. #objective_list, constraint_list, sosconstraint_list, variable_list \ # = self._populate_symbol_map(model, # symbol_map, # labeler, # variable_symbol_map, # file_determinism=file_determinism) sortOrder = SortComponents.unsorted if file_determinism >= 1: sortOrder = sortOrder | SortComponents.indices if file_determinism >= 2: sortOrder = sortOrder | SortComponents.alphabetical # # Create variable symbols (and cache the block list) # all_blocks = [] variable_list = [] for block in model.block_data_objects(active=True, sort=sortOrder): all_blocks.append(block) for vardata in block.component_data_objects( Var, active=True, sort=sortOrder, descend_into=False): variable_list.append(vardata) variable_label_pairs.append( (vardata,create_symbol_func(symbol_map, vardata, labeler))) variable_symbol_map.addSymbols(variable_label_pairs) # and extract the information we'll need for rapid labeling. object_symbol_dictionary = symbol_map.byObject variable_symbol_dictionary = variable_symbol_map.byObject # cache - these are called all the time. print_expr_canonical = self._print_expr_canonical # print the model name and the source, so we know roughly where # it came from. # # NOTE: this *must* use the "\* ... *\" comment format: the GLPK # LP parser does not correctly handle other formats (notably, "%"). output_file.write( "\\* Source Pyomo model name=%s *\\\n\n" % (model.name,) ) # # Objective # supports_quadratic_objective = \ solver_capability('quadratic_objective') numObj = 0 onames = [] for block in all_blocks: gen_obj_canonical_repn = \ getattr(block, "_gen_obj_canonical_repn", True) # Get/Create the ComponentMap for the repn if not hasattr(block,'_canonical_repn'): block._canonical_repn = ComponentMap() block_canonical_repn = block._canonical_repn for objective_data in block.component_data_objects( Objective, active=True, sort=sortOrder, descend_into=False): numObj += 1 onames.append(objective_data.name) if numObj > 1: raise ValueError( "More than one active objective defined for input " "model '%s'; Cannot write legal LP file\n" "Objectives: %s" % (model.name, ' '.join(onames))) create_symbol_func(symbol_map, objective_data, labeler) symbol_map.alias(objective_data, '__default_objective__') if objective_data.is_minimizing(): output_file.write("min \n") else: output_file.write("max \n") if gen_obj_canonical_repn: canonical_repn = \ generate_canonical_repn(objective_data.expr) block_canonical_repn[objective_data] = canonical_repn else: canonical_repn = block_canonical_repn[objective_data] degree = canonical_degree(canonical_repn) if degree == 0: logger.warning("Constant objective detected, replacing " "with a placeholder to prevent solver failure.") force_objective_constant = True elif degree == 2: if not supports_quadratic_objective: raise RuntimeError( "Selected solver is unable to handle " "objective functions with quadratic terms. " "Objective at issue: %s." % objective_data.name) elif degree != 1: raise RuntimeError( "Cannot write legal LP file. Objective '%s' " "has nonlinear terms that are not quadratic." % objective_data.name) output_file.write( object_symbol_dictionary[id(objective_data)]+':\n') offset = print_expr_canonical( canonical_repn, output_file, object_symbol_dictionary, variable_symbol_dictionary, True, column_order, force_objective_constant=force_objective_constant) if numObj == 0: raise ValueError( "ERROR: No objectives defined for input model '%s'; " " cannot write legal LP file" % str(model.name)) # Constraints # # If there are no non-trivial constraints, you'll end up with an empty # constraint block. CPLEX is OK with this, but GLPK isn't. And # eliminating the constraint block (i.e., the "s.t." line) causes GLPK # to whine elsewhere. Output a warning if the constraint block is empty, # so users can quickly determine the cause of the solve failure. output_file.write("\n") output_file.write("s.t.\n") output_file.write("\n") have_nontrivial = False supports_quadratic_constraint = solver_capability('quadratic_constraint') def constraint_generator(): for block in all_blocks: gen_con_canonical_repn = \ getattr(block, "_gen_con_canonical_repn", True) # Get/Create the ComponentMap for the repn if not hasattr(block,'_canonical_repn'): block._canonical_repn = ComponentMap() block_canonical_repn = block._canonical_repn for constraint_data in block.component_data_objects( Constraint, active=True, sort=sortOrder, descend_into=False): if isinstance(constraint_data, LinearCanonicalRepn): canonical_repn = constraint_data else: if gen_con_canonical_repn: canonical_repn = generate_canonical_repn(constraint_data.body) block_canonical_repn[constraint_data] = canonical_repn else: canonical_repn = block_canonical_repn[constraint_data] yield constraint_data, canonical_repn if row_order is not None: sorted_constraint_list = list(constraint_generator()) sorted_constraint_list.sort(key=lambda x: row_order[x[0]]) def yield_all_constraints(): for constraint_data, canonical_repn in sorted_constraint_list: yield constraint_data, canonical_repn else: yield_all_constraints = constraint_generator # FIXME: This is a hack to get nested blocks working... eq_string_template = "= %"+self._precision_string+'\n' geq_string_template = ">= %"+self._precision_string+'\n\n' leq_string_template = "<= %"+self._precision_string+'\n\n' for constraint_data, canonical_repn in yield_all_constraints(): have_nontrivial = True degree = canonical_degree(canonical_repn) # # Write constraint # # There are conditions, e.g., when fixing variables, under which # a constraint block might be empty. Ignore these, for both # practical reasons and the fact that the CPLEX LP format # requires a variable in the constraint body. It is also # possible that the body of the constraint consists of only a # constant, in which case the "variable" of if degree == 0: if skip_trivial_constraints: continue elif degree == 2: if not supports_quadratic_constraint: raise ValueError( "Solver unable to handle quadratic expressions. Constraint" " at issue: '%s'" % (constraint_data.name)) elif degree != 1: raise ValueError( "Cannot write legal LP file. Constraint '%s' has a body " "with nonlinear terms." % (constraint_data.name)) # Create symbol con_symbol = create_symbol_func(symbol_map, constraint_data, labeler) if constraint_data.equality: label = 'c_e_' + con_symbol + '_' alias_symbol_func(symbol_map, constraint_data, label) output_file.write(label+':\n') offset = print_expr_canonical(canonical_repn, output_file, object_symbol_dictionary, variable_symbol_dictionary, False, column_order) bound = constraint_data.lower bound = self._get_bound(bound) - offset output_file.write(eq_string_template % (_no_negative_zero(bound))) output_file.write("\n") else: if constraint_data.lower is not None: if constraint_data.upper is not None: label = 'r_l_' + con_symbol + '_' else: label = 'c_l_' + con_symbol + '_' alias_symbol_func(symbol_map, constraint_data, label) output_file.write(label+':\n') offset = print_expr_canonical(canonical_repn, output_file, object_symbol_dictionary, variable_symbol_dictionary, False, column_order) bound = constraint_data.lower bound = self._get_bound(bound) - offset output_file.write(geq_string_template % (_no_negative_zero(bound))) if constraint_data.upper is not None: if constraint_data.lower is not None: label = 'r_u_' + con_symbol + '_' else: label = 'c_u_' + con_symbol + '_' alias_symbol_func(symbol_map, constraint_data, label) output_file.write(label+':\n') offset = print_expr_canonical(canonical_repn, output_file, object_symbol_dictionary, variable_symbol_dictionary, False, column_order) bound = constraint_data.upper bound = self._get_bound(bound) - offset output_file.write(leq_string_template % (_no_negative_zero(bound))) if not have_nontrivial: logger.warning('Empty constraint block written in LP format ' \ '- solver may error') # the CPLEX LP format doesn't allow constants in the objective (or # constraint body), which is a bit silly. To avoid painful # book-keeping, we introduce the following "variable", constrained # to the value 1. This is used when quadratic terms are present. # worst-case, if not used, is that CPLEX easily pre-processes it out. prefix = "" output_file.write('%sc_e_ONE_VAR_CONSTANT: \n' % prefix) output_file.write('%sONE_VAR_CONSTANT = 1.0\n' % prefix) output_file.write("\n") # SOS constraints # # For now, we write out SOS1 and SOS2 constraints in the cplex format # # All Component objects are stored in model._component, which is a # dictionary of {class: {objName: object}}. # # Consider the variable X, # # model.X = Var(...) # # We print X to CPLEX format as X(i,j,k,...) where i, j, k, ... are the # indices of X. # SOSlines = StringIO() sos1 = solver_capability("sos1") sos2 = solver_capability("sos2") writtenSOS = False for block in all_blocks: for soscondata in block.component_data_objects( SOSConstraint, active=True, sort=sortOrder, descend_into=False): create_symbol_func(symbol_map, soscondata, labeler) level = soscondata.level if (level == 1 and not sos1) or \ (level == 2 and not sos2) or \ (level > 2): raise ValueError( "Solver does not support SOS level %s constraints" % (level)) if writtenSOS == False: SOSlines.write("SOS\n") writtenSOS = True # This updates the referenced_variable_ids, just in case # there is a variable that only appears in an # SOSConstraint, in which case this needs to be known # before we write the "bounds" section (Cplex does not # handle this correctly, Gurobi does) self.printSOS(symbol_map, labeler, variable_symbol_map, soscondata, SOSlines) # # Bounds # output_file.write("bounds\n") # Scan all variables even if we're only writing a subset of them. # required because we don't store maps by variable type currently. # FIXME: This is a hack to get nested blocks working... lb_string_template = "%"+self._precision_string+" <= " ub_string_template = " <= %"+self._precision_string+"\n" # Track the number of integer and binary variables, so you can # output their status later. integer_vars = [] binary_vars = [] for vardata in variable_list: # TODO: We could just loop over the set of items in # self._referenced_variable_ids, except this is # a dictionary that is hashed by id(vardata) # which would make the bounds section # nondeterministic (bad for unit testing) if (not include_all_variable_bounds) and \ (id(vardata) not in self._referenced_variable_ids): continue if vardata.fixed: if not output_fixed_variable_bounds: raise ValueError( "Encountered a fixed variable (%s) inside an active " "objective or constraint expression on model %s, which is " "usually indicative of a preprocessing error. Use the " "IO-option 'output_fixed_variable_bounds=True' to suppress " "this error and fix the variable by overwriting its bounds " "in the LP file." % (vardata.name, model.name)) if vardata.value is None: raise ValueError("Variable cannot be fixed to a value of None.") vardata_lb = value(vardata.value) vardata_ub = value(vardata.value) else: vardata_lb = self._get_bound(vardata.lb) vardata_ub = self._get_bound(vardata.ub) name_to_output = variable_symbol_dictionary[id(vardata)] # track the number of integer and binary variables, so we know whether # to output the general / binary sections below. if vardata.is_integer(): integer_vars.append(name_to_output) elif vardata.is_binary(): binary_vars.append(name_to_output) elif not vardata.is_continuous(): raise TypeError("Invalid domain type for variable with name '%s'. " "Variable is not continuous, integer, or binary." % (vardata.name)) # in the CPLEX LP file format, the default variable # bounds are 0 and +inf. These bounds are in # conflict with Pyomo, which assumes -inf and +inf # (which we would argue is more rational). output_file.write(" ") if (vardata_lb is not None) and (vardata_lb != -infinity): output_file.write(lb_string_template % (_no_negative_zero(vardata_lb))) else: output_file.write(" -inf <= ") if name_to_output == "e": raise ValueError( "Attempting to write variable with name 'e' in a CPLEX LP " "formatted file will cause a parse failure due to confusion with " "numeric values expressed in scientific notation") output_file.write(name_to_output) if (vardata_ub is not None) and (vardata_ub != infinity): output_file.write(ub_string_template % (_no_negative_zero(vardata_ub))) else: output_file.write(" <= +inf\n") if len(integer_vars) > 0: output_file.write("general\n") for var_name in integer_vars: output_file.write(' %s\n' % var_name) if len(binary_vars) > 0: output_file.write("binary\n") for var_name in binary_vars: output_file.write(' %s\n' % var_name) # Write the SOS section output_file.write(SOSlines.getvalue()) # # wrap-up # output_file.write("end\n") # Clean up the symbol map to only contain variables referenced # in the active constraints **Note**: warm start method may # rely on this for choosing the set of potential warm start # variables vars_to_delete = set(variable_symbol_map.byObject.keys()) - \ set(self._referenced_variable_ids.keys()) sm_byObject = symbol_map.byObject sm_bySymbol = symbol_map.bySymbol var_sm_byObject = variable_symbol_map.byObject for varid in vars_to_delete: symbol = var_sm_byObject[varid] del sm_byObject[varid] del sm_bySymbol[symbol] del variable_symbol_map return symbol_map
def _populate_gurobi_instance (self, pyomo_instance): from pyomo.core.base import Var, Objective, Constraint, SOSConstraint from pyomo.repn import LinearCanonicalRepn, canonical_degree try: grbmodel = Model(name=pyomo_instance.name) except Exception: e = sys.exc_info()[1] msg = 'Unable to create Gurobi model. Have you installed the Python'\ '\n bindings for Gurobi?\n\n\tError message: %s' raise Exception(msg % e) if self._symbolic_solver_labels: labeler = TextLabeler() else: labeler = NumericLabeler('x') # cache to avoid dictionary getitem calls in the loops below. self_symbol_map = self._symbol_map = SymbolMap() pyomo_instance.solutions.add_symbol_map(self_symbol_map) self._smap_id = id(self_symbol_map) # we use this when iterating over the constraints because it # will have a much smaller hash table, we also use this for # the warm start code after it is cleaned to only contain # variables referenced in the constraints self_variable_symbol_map = self._variable_symbol_map = SymbolMap() var_symbol_pairs = [] # maps _VarData labels to the corresponding Gurobi variable object pyomo_gurobi_variable_map = {} self._referenced_variable_ids.clear() # cache to avoid dictionary getitem calls in the loop below. grb_infinity = GRB.INFINITY for var_value in pyomo_instance.component_data_objects(Var, active=True): lb = -grb_infinity ub = grb_infinity if (var_value.lb is not None) and (var_value.lb != -infinity): lb = value(var_value.lb) if (var_value.ub is not None) and (var_value.ub != infinity): ub = value(var_value.ub) # _VarValue objects will not be in the symbol map yet, so # avoid some checks. var_value_label = self_symbol_map.createSymbol(var_value, labeler) var_symbol_pairs.append((var_value, var_value_label)) # be sure to impart the integer and binary nature of any variables if var_value.is_integer(): var_type = GRB.INTEGER elif var_value.is_binary(): var_type = GRB.BINARY elif var_value.is_continuous(): var_type = GRB.CONTINUOUS else: raise TypeError("Invalid domain type for variable with name '%s'. " "Variable is not continuous, integer, or binary.") pyomo_gurobi_variable_map[var_value_label] = \ grbmodel.addVar(lb=lb, \ ub=ub, \ vtype=var_type, \ name=var_value_label) self_variable_symbol_map.addSymbols(var_symbol_pairs) grbmodel.update() # The next loop collects the following component types from the model: # - SOSConstraint # - Objective # - Constraint sos1 = self._capabilities.sos1 sos2 = self._capabilities.sos2 modelSOS = ModelSOS() objective_cntr = 0 # Track the range constraints and their associated variables added by gurobi self._last_native_var_idx = grbmodel.NumVars-1 range_var_idx = grbmodel.NumVars _self_range_con_var_pairs = self._range_con_var_pairs = [] for block in pyomo_instance.block_data_objects(active=True): gen_obj_canonical_repn = \ getattr(block, "_gen_obj_canonical_repn", True) gen_con_canonical_repn = \ getattr(block, "_gen_con_canonical_repn", True) # Get/Create the ComponentMap for the repn if not hasattr(block,'_canonical_repn'): block._canonical_repn = ComponentMap() block_canonical_repn = block._canonical_repn # SOSConstraints for soscondata in block.component_data_objects(SOSConstraint, active=True, descend_into=False): level = soscondata.level if (level == 1 and not sos1) or \ (level == 2 and not sos2) or \ (level > 2): raise RuntimeError( "Solver does not support SOS level %s constraints" % (level,)) modelSOS.count_constraint(self_symbol_map, labeler, self_variable_symbol_map, pyomo_gurobi_variable_map, soscondata) # Objective for obj_data in block.component_data_objects(Objective, active=True, descend_into=False): if objective_cntr > 1: raise ValueError( "Multiple active objectives found on Pyomo instance '%s'. " "Solver '%s' will only handle a single active objective" \ % (pyomo_instance.cname(True), self.type)) sense = GRB_MIN if (obj_data.is_minimizing()) else GRB_MAX grbmodel.ModelSense = sense obj_expr = LinExpr() if gen_obj_canonical_repn: obj_repn = generate_canonical_repn(obj_data.expr) block_canonical_repn[obj_data] = obj_repn else: obj_repn = block_canonical_repn[obj_data] if isinstance(obj_repn, LinearCanonicalRepn): if obj_repn.constant != None: obj_expr.addConstant(obj_repn.constant) if obj_repn.linear != None: for i in xrange(len(obj_repn.linear)): var_coefficient = obj_repn.linear[i] var_value = obj_repn.variables[i] self._referenced_variable_ids.add(id(var_value)) label = self_variable_symbol_map.getSymbol(var_value) obj_expr.addTerms(var_coefficient, pyomo_gurobi_variable_map[label]) else: if 0 in obj_repn: # constant term obj_expr.addConstant(obj_repn[0][None]) if 1 in obj_repn: # first-order terms hash_to_variable_map = obj_repn[-1] for var_hash, var_coefficient in iteritems(obj_repn[1]): vardata = hash_to_variable_map[var_hash] self._referenced_variable_ids.add(id(vardata)) label = self_variable_symbol_map.getSymbol(vardata) obj_expr.addTerms(var_coefficient, pyomo_gurobi_variable_map[label]) if 2 in obj_repn: obj_expr = QuadExpr(obj_expr) hash_to_variable_map = obj_repn[-1] for quad_repn, coef in iteritems(obj_repn[2]): gurobi_expr = QuadExpr(coef) for var_hash, exponent in iteritems(quad_repn): vardata = hash_to_variable_map[var_hash] self._referenced_variable_ids.add(id(vardata)) gurobi_var = pyomo_gurobi_variable_map\ [self_variable_symbol_map.\ getSymbol(vardata)] gurobi_expr *= gurobi_var if exponent == 2: gurobi_expr *= gurobi_var obj_expr += gurobi_expr degree = canonical_degree(obj_repn) if (degree is None) or (degree > 2): raise ValueError( "gurobi_direct plugin does not support general nonlinear " "objective expressions (only linear or quadratic).\n" "Objective: %s" % (obj_data.cname(True))) # need to cache the objective label, because the # GUROBI python interface doesn't track this. # _ObjectiveData objects will not be in the symbol map # yet, so avoid some checks. self._objective_label = \ self_symbol_map.createSymbol(obj_data, labeler) grbmodel.setObjective(obj_expr, sense=sense) # Constraint for constraint_data in block.component_data_objects(Constraint, active=True, descend_into=False): if (constraint_data.lower is None) and \ (constraint_data.upper is None): continue # not binding at all, don't bother con_repn = None if isinstance(constraint_data, LinearCanonicalRepn): con_repn = constraint_data else: if gen_con_canonical_repn: con_repn = generate_canonical_repn(constraint_data.body) block_canonical_repn[constraint_data] = con_repn else: con_repn = block_canonical_repn[constraint_data] offset = 0.0 # _ConstraintData objects will not be in the symbol # map yet, so avoid some checks. constraint_label = \ self_symbol_map.createSymbol(constraint_data, labeler) trivial = False if isinstance(con_repn, LinearCanonicalRepn): # # optimization (these might be generated on the fly) # constant = con_repn.constant coefficients = con_repn.linear variables = con_repn.variables if constant is not None: offset = constant expr = LinExpr() + offset if coefficients is not None: linear_coefs = list() linear_vars = list() for i in xrange(len(coefficients)): var_coefficient = coefficients[i] var_value = variables[i] self._referenced_variable_ids.add(id(var_value)) label = self_variable_symbol_map.getSymbol(var_value) linear_coefs.append(var_coefficient) linear_vars.append(pyomo_gurobi_variable_map[label]) expr += LinExpr(linear_coefs, linear_vars) else: trivial = True else: if 0 in con_repn: offset = con_repn[0][None] expr = LinExpr() + offset if 1 in con_repn: # first-order terms linear_coefs = list() linear_vars = list() hash_to_variable_map = con_repn[-1] for var_hash, var_coefficient in iteritems(con_repn[1]): var = hash_to_variable_map[var_hash] self._referenced_variable_ids.add(id(var)) label = self_variable_symbol_map.getSymbol(var) linear_coefs.append( var_coefficient ) linear_vars.append( pyomo_gurobi_variable_map[label] ) expr += LinExpr(linear_coefs, linear_vars) if 2 in con_repn: # quadratic constraint if _GUROBI_VERSION_MAJOR < 5: raise ValueError( "The gurobi_direct plugin does not handle quadratic " "constraint expressions for Gurobi major versions " "< 5. Current version: Gurobi %s.%s%s" % (gurobi.version())) expr = QuadExpr(expr) hash_to_variable_map = con_repn[-1] for quad_repn, coef in iteritems(con_repn[2]): gurobi_expr = QuadExpr(coef) for var_hash, exponent in iteritems(quad_repn): vardata = hash_to_variable_map[var_hash] self._referenced_variable_ids.add(id(vardata)) gurobi_var = pyomo_gurobi_variable_map\ [self_variable_symbol_map.\ getSymbol(vardata)] gurobi_expr *= gurobi_var if exponent == 2: gurobi_expr *= gurobi_var expr += gurobi_expr degree = canonical_degree(con_repn) if (degree is None) or (degree > 2): raise ValueError( "gurobi_direct plugin does not support general nonlinear " "constraint expressions (only linear or quadratic).\n" "Constraint: %s" % (constraint_data.cname(True))) if (not trivial) or (not self._skip_trivial_constraints): if constraint_data.equality: sense = GRB.EQUAL bound = self._get_bound(constraint_data.lower) grbmodel.addConstr(lhs=expr, sense=sense, rhs=bound, name=constraint_label) else: # L <= body <= U if (constraint_data.upper is not None) and \ (constraint_data.lower is not None): grb_con = grbmodel.addRange( expr, self._get_bound(constraint_data.lower), self._get_bound(constraint_data.upper), constraint_label) _self_range_con_var_pairs.append((grb_con,range_var_idx)) range_var_idx += 1 # body <= U elif constraint_data.upper is not None: bound = self._get_bound(constraint_data.upper) if bound < float('inf'): grbmodel.addConstr( lhs=expr, sense=GRB.LESS_EQUAL, rhs=bound, name=constraint_label ) # L <= body else: bound = self._get_bound(constraint_data.lower) if bound > -float('inf'): grbmodel.addConstr( lhs=expr, sense=GRB.GREATER_EQUAL, rhs=bound, name=constraint_label ) if modelSOS.sosType: for key in modelSOS.sosType: grbmodel.addSOS(modelSOS.sosType[key], \ modelSOS.varnames[key], \ modelSOS.weights[key] ) self._referenced_variable_ids.update(modelSOS.varids[key]) for var_id in self._referenced_variable_ids: varname = self._variable_symbol_map.byObject[var_id] vardata = self._variable_symbol_map.bySymbol[varname]() if vardata.fixed: if not self._output_fixed_variable_bounds: raise ValueError("Encountered a fixed variable (%s) inside an active objective " "or constraint expression on model %s, which is usually indicative of " "a preprocessing error. Use the IO-option 'output_fixed_variable_bounds=True' " "to suppress this error and fix the variable by overwriting its bounds in " "the Gurobi instance." % (vardata.cname(True),pyomo_instance.cname(True),)) grbvar = pyomo_gurobi_variable_map[varname] grbvar.setAttr(GRB.Attr.UB, vardata.value) grbvar.setAttr(GRB.Attr.LB, vardata.value) grbmodel.update() self._gurobi_instance = grbmodel self._pyomo_gurobi_variable_map = pyomo_gurobi_variable_map
def _xform_constraint(self, _name, constraint, varMap, disjunct, block): lin_body_map = getattr(block, "lin_body", None) for cname, c in iteritems(constraint._data): name = _name + ('.%s' % (cname, ) if cname else '') if (not lin_body_map is None) and ( not lin_body_map.get(c) is None): raise GDP_Error('GDP(cHull) cannot process linear ' \ 'constraint bodies (yet) (found at ' + name + ').') constant = 0 try: cannonical = generate_canonical_repn(c.body) if isinstance(cannonical, LinearCanonicalRepn): NL = False else: NL = canonical_is_nonlinear(cannonical) except: NL = True # We need to evaluate teh expression at the origin *before* # we substitute the expression variables with the # disaggregated variables if NL and self._mode == NL_Mode_FurmanSawayaGrossmann: h_0 = value( self._eval_at_origin(NL, c.body.clone(), disjunct.indicator_var, varMap)) expr = self._var_subst(NL, c.body, disjunct.indicator_var, varMap) if NL: y = disjunct.indicator_var if self._mode == NL_Mode_LeeGrossmann: expr = expr * y elif self._mode == NL_Mode_GrossmannLee: expr = (y + EPS) * expr elif self._mode == NL_Mode_FurmanSawayaGrossmann: expr = ((1 - EPS) * y + EPS) * expr - EPS * h_0 * (1 - y) else: raise RuntimeError("Unknown NL CHull mode") else: # We need to make sure to pull out the constant terms # from the expression and put them into the lb/ub if cannonical.constant == None: constant = 0 else: constant = cannonical.constant if c.lower is not None: if __debug__ and logger.isEnabledFor(logging.DEBUG): logger.debug( "GDP(cHull): Promoting constraint " + "'%s' as '%s_lo'", name, name) bound = c.lower() - constant if bound != 0: newC = Constraint( expr = bound*disjunct.indicator_var \ <= expr - constant ) else: newC = Constraint(expr=bound <= expr - constant) block.add_component(name + "_lo", newC) newC.construct() if c.upper is not None: if __debug__ and logger.isEnabledFor(logging.DEBUG): logger.debug( "GDP(cHull): Promoting constraint " + "'%s' as '%s_hi'", name, name) bound = c.upper() - constant if bound != 0: newC = Constraint( expr = expr - constant <= \ bound*disjunct.indicator_var ) else: newC = Constraint(expr=expr - constant <= bound) block.add_component(name + "_hi", newC) newC.construct()
def _populate_gurobi_instance(self, pyomo_instance): from pyomo.core.base import Var, Objective, Constraint, SOSConstraint from pyomo.repn import LinearCanonicalRepn, canonical_degree try: grbmodel = Model(name=pyomo_instance.name) except Exception: e = sys.exc_info()[1] msg = 'Unable to create Gurobi model. Have you installed the Python'\ '\n bindings for Gurobi?\n\n\tError message: %s' raise Exception(msg % e) if self._symbolic_solver_labels: labeler = TextLabeler() else: labeler = NumericLabeler('x') # cache to avoid dictionary getitem calls in the loops below. self_symbol_map = self._symbol_map = SymbolMap() pyomo_instance.solutions.add_symbol_map(self_symbol_map) self._smap_id = id(self_symbol_map) # we use this when iterating over the constraints because it # will have a much smaller hash table, we also use this for # the warm start code after it is cleaned to only contain # variables referenced in the constraints self_variable_symbol_map = self._variable_symbol_map = SymbolMap() var_symbol_pairs = [] # maps _VarData labels to the corresponding Gurobi variable object pyomo_gurobi_variable_map = {} self._referenced_variable_ids.clear() # cache to avoid dictionary getitem calls in the loop below. grb_infinity = GRB.INFINITY for var_value in pyomo_instance.component_data_objects(Var, active=True): lb = -grb_infinity ub = grb_infinity if (var_value.lb is not None) and (var_value.lb != -infinity): lb = value(var_value.lb) if (var_value.ub is not None) and (var_value.ub != infinity): ub = value(var_value.ub) # _VarValue objects will not be in the symbol map yet, so # avoid some checks. var_value_label = self_symbol_map.createSymbol(var_value, labeler) var_symbol_pairs.append((var_value, var_value_label)) # be sure to impart the integer and binary nature of any variables if var_value.is_integer(): var_type = GRB.INTEGER elif var_value.is_binary(): var_type = GRB.BINARY elif var_value.is_continuous(): var_type = GRB.CONTINUOUS else: raise TypeError( "Invalid domain type for variable with name '%s'. " "Variable is not continuous, integer, or binary.") pyomo_gurobi_variable_map[var_value_label] = \ grbmodel.addVar(lb=lb, \ ub=ub, \ vtype=var_type, \ name=var_value_label) self_variable_symbol_map.addSymbols(var_symbol_pairs) grbmodel.update() # The next loop collects the following component types from the model: # - SOSConstraint # - Objective # - Constraint sos1 = self._capabilities.sos1 sos2 = self._capabilities.sos2 modelSOS = ModelSOS() objective_cntr = 0 # Track the range constraints and their associated variables added by gurobi self._last_native_var_idx = grbmodel.NumVars - 1 range_var_idx = grbmodel.NumVars _self_range_con_var_pairs = self._range_con_var_pairs = [] for block in pyomo_instance.block_data_objects(active=True): gen_obj_canonical_repn = \ getattr(block, "_gen_obj_canonical_repn", True) gen_con_canonical_repn = \ getattr(block, "_gen_con_canonical_repn", True) # Get/Create the ComponentMap for the repn if not hasattr(block, '_canonical_repn'): block._canonical_repn = ComponentMap() block_canonical_repn = block._canonical_repn # SOSConstraints for soscondata in block.component_data_objects(SOSConstraint, active=True, descend_into=False): level = soscondata.level if (level == 1 and not sos1) or \ (level == 2 and not sos2) or \ (level > 2): raise RuntimeError( "Solver does not support SOS level %s constraints" % (level, )) modelSOS.count_constraint(self_symbol_map, labeler, self_variable_symbol_map, pyomo_gurobi_variable_map, soscondata) # Objective for obj_data in block.component_data_objects(Objective, active=True, descend_into=False): if objective_cntr > 1: raise ValueError( "Multiple active objectives found on Pyomo instance '%s'. " "Solver '%s' will only handle a single active objective" \ % (pyomo_instance.cname(True), self.type)) sense = GRB_MIN if (obj_data.is_minimizing()) else GRB_MAX grbmodel.ModelSense = sense obj_expr = LinExpr() if gen_obj_canonical_repn: obj_repn = generate_canonical_repn(obj_data.expr) block_canonical_repn[obj_data] = obj_repn else: obj_repn = block_canonical_repn[obj_data] if isinstance(obj_repn, LinearCanonicalRepn): if obj_repn.constant != None: obj_expr.addConstant(obj_repn.constant) if obj_repn.linear != None: for i in xrange(len(obj_repn.linear)): var_coefficient = obj_repn.linear[i] var_value = obj_repn.variables[i] self._referenced_variable_ids.add(id(var_value)) label = self_variable_symbol_map.getSymbol( var_value) obj_expr.addTerms(var_coefficient, pyomo_gurobi_variable_map[label]) else: if 0 in obj_repn: # constant term obj_expr.addConstant(obj_repn[0][None]) if 1 in obj_repn: # first-order terms hash_to_variable_map = obj_repn[-1] for var_hash, var_coefficient in iteritems( obj_repn[1]): vardata = hash_to_variable_map[var_hash] self._referenced_variable_ids.add(id(vardata)) label = self_variable_symbol_map.getSymbol(vardata) obj_expr.addTerms(var_coefficient, pyomo_gurobi_variable_map[label]) if 2 in obj_repn: obj_expr = QuadExpr(obj_expr) hash_to_variable_map = obj_repn[-1] for quad_repn, coef in iteritems(obj_repn[2]): gurobi_expr = QuadExpr(coef) for var_hash, exponent in iteritems(quad_repn): vardata = hash_to_variable_map[var_hash] self._referenced_variable_ids.add(id(vardata)) gurobi_var = pyomo_gurobi_variable_map\ [self_variable_symbol_map.\ getSymbol(vardata)] gurobi_expr *= gurobi_var if exponent == 2: gurobi_expr *= gurobi_var obj_expr += gurobi_expr degree = canonical_degree(obj_repn) if (degree is None) or (degree > 2): raise ValueError( "gurobi_direct plugin does not support general nonlinear " "objective expressions (only linear or quadratic).\n" "Objective: %s" % (obj_data.cname(True))) # need to cache the objective label, because the # GUROBI python interface doesn't track this. # _ObjectiveData objects will not be in the symbol map # yet, so avoid some checks. self._objective_label = \ self_symbol_map.createSymbol(obj_data, labeler) grbmodel.setObjective(obj_expr, sense=sense) # Constraint for constraint_data in block.component_data_objects( Constraint, active=True, descend_into=False): if (constraint_data.lower is None) and \ (constraint_data.upper is None): continue # not binding at all, don't bother con_repn = None if isinstance(constraint_data, LinearCanonicalRepn): con_repn = constraint_data else: if gen_con_canonical_repn: con_repn = generate_canonical_repn( constraint_data.body) block_canonical_repn[constraint_data] = con_repn else: con_repn = block_canonical_repn[constraint_data] offset = 0.0 # _ConstraintData objects will not be in the symbol # map yet, so avoid some checks. constraint_label = \ self_symbol_map.createSymbol(constraint_data, labeler) trivial = False if isinstance(con_repn, LinearCanonicalRepn): # # optimization (these might be generated on the fly) # constant = con_repn.constant coefficients = con_repn.linear variables = con_repn.variables if constant is not None: offset = constant expr = LinExpr() + offset if coefficients is not None: linear_coefs = list() linear_vars = list() for i in xrange(len(coefficients)): var_coefficient = coefficients[i] var_value = variables[i] self._referenced_variable_ids.add(id(var_value)) label = self_variable_symbol_map.getSymbol( var_value) linear_coefs.append(var_coefficient) linear_vars.append( pyomo_gurobi_variable_map[label]) expr += LinExpr(linear_coefs, linear_vars) else: trivial = True else: if 0 in con_repn: offset = con_repn[0][None] expr = LinExpr() + offset if 1 in con_repn: # first-order terms linear_coefs = list() linear_vars = list() hash_to_variable_map = con_repn[-1] for var_hash, var_coefficient in iteritems( con_repn[1]): var = hash_to_variable_map[var_hash] self._referenced_variable_ids.add(id(var)) label = self_variable_symbol_map.getSymbol(var) linear_coefs.append(var_coefficient) linear_vars.append( pyomo_gurobi_variable_map[label]) expr += LinExpr(linear_coefs, linear_vars) if 2 in con_repn: # quadratic constraint if _GUROBI_VERSION_MAJOR < 5: raise ValueError( "The gurobi_direct plugin does not handle quadratic " "constraint expressions for Gurobi major versions " "< 5. Current version: Gurobi %s.%s%s" % (gurobi.version())) expr = QuadExpr(expr) hash_to_variable_map = con_repn[-1] for quad_repn, coef in iteritems(con_repn[2]): gurobi_expr = QuadExpr(coef) for var_hash, exponent in iteritems(quad_repn): vardata = hash_to_variable_map[var_hash] self._referenced_variable_ids.add(id(vardata)) gurobi_var = pyomo_gurobi_variable_map\ [self_variable_symbol_map.\ getSymbol(vardata)] gurobi_expr *= gurobi_var if exponent == 2: gurobi_expr *= gurobi_var expr += gurobi_expr degree = canonical_degree(con_repn) if (degree is None) or (degree > 2): raise ValueError( "gurobi_direct plugin does not support general nonlinear " "constraint expressions (only linear or quadratic).\n" "Constraint: %s" % (constraint_data.cname(True))) if (not trivial) or (not self._skip_trivial_constraints): if constraint_data.equality: sense = GRB.EQUAL bound = self._get_bound(constraint_data.lower) grbmodel.addConstr(lhs=expr, sense=sense, rhs=bound, name=constraint_label) else: # L <= body <= U if (constraint_data.upper is not None) and \ (constraint_data.lower is not None): grb_con = grbmodel.addRange( expr, self._get_bound(constraint_data.lower), self._get_bound(constraint_data.upper), constraint_label) _self_range_con_var_pairs.append( (grb_con, range_var_idx)) range_var_idx += 1 # body <= U elif constraint_data.upper is not None: bound = self._get_bound(constraint_data.upper) if bound < float('inf'): grbmodel.addConstr(lhs=expr, sense=GRB.LESS_EQUAL, rhs=bound, name=constraint_label) # L <= body else: bound = self._get_bound(constraint_data.lower) if bound > -float('inf'): grbmodel.addConstr(lhs=expr, sense=GRB.GREATER_EQUAL, rhs=bound, name=constraint_label) if modelSOS.sosType: for key in modelSOS.sosType: grbmodel.addSOS(modelSOS.sosType[key], \ modelSOS.varnames[key], \ modelSOS.weights[key] ) self._referenced_variable_ids.update(modelSOS.varids[key]) for var_id in self._referenced_variable_ids: varname = self._variable_symbol_map.byObject[var_id] vardata = self._variable_symbol_map.bySymbol[varname]() if vardata.fixed: if not self._output_fixed_variable_bounds: raise ValueError( "Encountered a fixed variable (%s) inside an active objective " "or constraint expression on model %s, which is usually indicative of " "a preprocessing error. Use the IO-option 'output_fixed_variable_bounds=True' " "to suppress this error and fix the variable by overwriting its bounds in " "the Gurobi instance." % ( vardata.cname(True), pyomo_instance.cname(True), )) grbvar = pyomo_gurobi_variable_map[varname] grbvar.setAttr(GRB.Attr.UB, vardata.value) grbvar.setAttr(GRB.Attr.LB, vardata.value) grbmodel.update() self._gurobi_instance = grbmodel self._pyomo_gurobi_variable_map = pyomo_gurobi_variable_map
def _print_model_MPS(self, model, output_file, solver_capability, labeler, output_fixed_variable_bounds=False, file_determinism=1, row_order=None, column_order=None, skip_trivial_constraints=False, force_objective_constant=False, include_all_variable_bounds=False, skip_objective_sense=False): symbol_map = SymbolMap() variable_symbol_map = SymbolMap() # NOTE: we use createSymbol instead of getSymbol because we # know whether or not the symbol exists, and don't want # to the overhead of error/duplicate checking. # cache frequently called functions extract_variable_coefficients = self._extract_variable_coefficients create_symbol_func = SymbolMap.createSymbol create_symbols_func = SymbolMap.createSymbols alias_symbol_func = SymbolMap.alias variable_label_pairs = [] sortOrder = SortComponents.unsorted if file_determinism >= 1: sortOrder = sortOrder | SortComponents.indices if file_determinism >= 2: sortOrder = sortOrder | SortComponents.alphabetical # # Create variable symbols (and cache the block list) # all_blocks = [] variable_list = [] for block in model.block_data_objects(active=True, sort=sortOrder): all_blocks.append(block) for vardata in block.component_data_objects( Var, active=True, sort=sortOrder, descend_into=False): variable_list.append(vardata) variable_label_pairs.append( (vardata,create_symbol_func(symbol_map, vardata, labeler))) variable_symbol_map.addSymbols(variable_label_pairs) # and extract the information we'll need for rapid labeling. object_symbol_dictionary = symbol_map.byObject variable_symbol_dictionary = variable_symbol_map.byObject # sort the variable ordering by the user # column_order ComponentMap if column_order is not None: variable_list.sort(key=lambda _x: column_order[_x]) # prepare to hold the sparse columns variable_to_column = ComponentMap( (vardata, i) for i, vardata in enumerate(variable_list)) # add one position for ONE_VAR_CONSTANT column_data = [[] for i in xrange(len(variable_list)+1)] quadobj_data = [] quadmatrix_data = [] # constraint rhs rhs_data = [] # print the model name and the source, so we know # roughly where output_file.write("* Source: Pyomo MPS Writer\n") output_file.write("* Format: Free MPS\n") output_file.write("*\n") output_file.write("NAME %s\n" % (model.name,)) # # ROWS section # objective_label = None numObj = 0 onames = [] for block in all_blocks: gen_obj_canonical_repn = \ getattr(block, "_gen_obj_canonical_repn", True) # Get/Create the ComponentMap for the repn if not hasattr(block,'_canonical_repn'): block._canonical_repn = ComponentMap() block_canonical_repn = block._canonical_repn for objective_data in block.component_data_objects( Objective, active=True, sort=sortOrder, descend_into=False): numObj += 1 onames.append(objective_data.cname()) if numObj > 1: raise ValueError( "More than one active objective defined for input " "model '%s'; Cannot write legal MPS file\n" "Objectives: %s" % (model.cname(True), ' '.join(onames))) objective_label = create_symbol_func(symbol_map, objective_data, labeler) symbol_map.alias(objective_data, '__default_objective__') if not skip_objective_sense: output_file.write("OBJSENSE\n") if objective_data.is_minimizing(): output_file.write(" MIN\n") else: output_file.write(" MAX\n") # This section is not recognized by the COIN-OR # MPS reader #output_file.write("OBJNAME\n") #output_file.write(" %s\n" % (objective_label)) output_file.write("ROWS\n") output_file.write(" N %s\n" % (objective_label)) if gen_obj_canonical_repn: canonical_repn = \ generate_canonical_repn(objective_data.expr) block_canonical_repn[objective_data] = canonical_repn else: canonical_repn = block_canonical_repn[objective_data] degree = canonical_degree(canonical_repn) if degree == 0: print("Warning: Constant objective detected, replacing " "with a placeholder to prevent solver failure.") force_objective_constant = True elif (degree != 1) and (degree != 2): raise RuntimeError( "Cannot write legal MPS file. Objective '%s' " "has nonlinear terms that are not quadratic." % objective_data.cname(True)) constant = extract_variable_coefficients( objective_label, canonical_repn, column_data, quadobj_data, variable_to_column) if force_objective_constant or (constant != 0.0): # ONE_VAR_CONSTANT column_data[-1].append((objective_label, constant)) if numObj == 0: raise ValueError( "Cannot write legal MPS file: No objective defined " "for input model '%s'." % str(model)) assert objective_label is not None # Constraints def constraint_generator(): for block in all_blocks: gen_con_canonical_repn = \ getattr(block, "_gen_con_canonical_repn", True) # Get/Create the ComponentMap for the repn if not hasattr(block,'_canonical_repn'): block._canonical_repn = ComponentMap() block_canonical_repn = block._canonical_repn for constraint_data in block.component_data_objects( Constraint, active=True, sort=sortOrder, descend_into=False): if isinstance(constraint_data, LinearCanonicalRepn): canonical_repn = constraint_data else: if gen_con_canonical_repn: canonical_repn = generate_canonical_repn( constraint_data.body) block_canonical_repn[constraint_data] = canonical_repn else: canonical_repn = block_canonical_repn[constraint_data] yield constraint_data, canonical_repn if row_order is not None: sorted_constraint_list = list(constraint_generator()) sorted_constraint_list.sort(key=lambda x: row_order[x[0]]) def yield_all_constraints(): for constraint_data, canonical_repn in sorted_constraint_list: yield constraint_data, canonical_repn else: yield_all_constraints = constraint_generator for constraint_data, canonical_repn in yield_all_constraints(): degree = canonical_degree(canonical_repn) # Write constraint if degree == 0: if skip_trivial_constraints: continue elif (degree != 1) and (degree != 2): raise RuntimeError( "Cannot write legal MPS file. Constraint '%s' " "has nonlinear terms that are not quadratic." % constraint_data.cname(True)) # Create symbol con_symbol = create_symbol_func(symbol_map, constraint_data, labeler) if constraint_data.equality: label = 'c_e_' + con_symbol + '_' alias_symbol_func(symbol_map, constraint_data, label) output_file.write(" E %s\n" % (label)) offset = extract_variable_coefficients( label, canonical_repn, column_data, quadmatrix_data, variable_to_column) bound = constraint_data.lower bound = self._get_bound(bound) - offset rhs_data.append((label, bound)) else: if constraint_data.lower is not None: if constraint_data.upper is not None: label = 'r_l_' + con_symbol + '_' else: label = 'c_l_' + con_symbol + '_' alias_symbol_func(symbol_map, constraint_data, label) output_file.write(" G %s\n" % (label)) offset = extract_variable_coefficients( label, canonical_repn, column_data, quadmatrix_data, variable_to_column) bound = constraint_data.lower bound = self._get_bound(bound) - offset rhs_data.append((label, bound)) if constraint_data.upper is not None: if constraint_data.lower is not None: label = 'r_u_' + con_symbol + '_' else: label = 'c_u_' + con_symbol + '_' alias_symbol_func(symbol_map, constraint_data, label) output_file.write(" L %s\n" % (label)) offset = extract_variable_coefficients( label, canonical_repn, column_data, quadmatrix_data, variable_to_column) bound = constraint_data.upper bound = self._get_bound(bound) - offset rhs_data.append((label, bound)) if len(column_data[-1]) > 0: # ONE_VAR_CONSTANT = 1 output_file.write(" E c_e_ONE_VAR_CONSTANT\n") column_data[-1].append(("c_e_ONE_VAR_CONSTANT",1)) rhs_data.append(("c_e_ONE_VAR_CONSTANT",1)) # # COLUMNS section # column_template = " %s %s %"+self._precision_string+"\n" output_file.write("COLUMNS\n") cnt = 0 for vardata in variable_list: col_entries = column_data[variable_to_column[vardata]] cnt += 1 if len(col_entries) > 0: var_label = variable_symbol_dictionary[id(vardata)] for i, (row_label, coef) in enumerate(col_entries): output_file.write(column_template % (var_label, row_label, coef)) elif include_all_variable_bounds: # the column is empty, so add a (0 * var) # term to the objective # * Note that some solvers (e.g., Gurobi) # will accept an empty column as a line # with just the column name. This doesn't # seem to work for CPLEX 12.6, so I am # doing it this way so that it will work for both var_label = variable_symbol_dictionary[id(vardata)] output_file.write(column_template % (var_label, objective_label, 0)) assert cnt == len(column_data)-1 if len(column_data[-1]) > 0: col_entries = column_data[-1] var_label = "ONE_VAR_CONSTANT" for i, (row_label, coef) in enumerate(col_entries): output_file.write(column_template % (var_label, row_label, coef)) # # RHS section # rhs_template = " RHS %s %"+self._precision_string+"\n" output_file.write("RHS\n") for i, (row_label, rhs) in enumerate(rhs_data): output_file.write(rhs_template % (row_label, rhs)) # SOS constraints SOSlines = StringIO() sos1 = solver_capability("sos1") sos2 = solver_capability("sos2") for block in all_blocks: for soscondata in block.component_data_objects( SOSConstraint, active=True, sort=sortOrder, descend_into=False): create_symbol_func(symbol_map, soscondata, labeler) level = soscondata.level if (level == 1 and not sos1) or \ (level == 2 and not sos2) or \ (level > 2): raise ValueError( "Solver does not support SOS level %s constraints" % (level)) # This updates the referenced_variable_ids, just in case # there is a variable that only appears in an # SOSConstraint, in which case this needs to be known # before we write the "bounds" section (Cplex does not # handle this correctly, Gurobi does) self._printSOS(symbol_map, labeler, variable_symbol_map, soscondata, SOSlines) # # BOUNDS section # entry_template = "%s %"+self._precision_string+"\n" output_file.write("BOUNDS\n") for vardata in variable_list: if include_all_variable_bounds or \ (id(vardata) in self._referenced_variable_ids): var_label = variable_symbol_dictionary[id(vardata)] if vardata.fixed: if not output_fixed_variable_bounds: raise ValueError( "Encountered a fixed variable (%s) inside an active " "objective or constraint expression on model %s, which is " "usually indicative of a preprocessing error. Use the " "IO-option 'output_fixed_variable_bounds=True' to suppress " "this error and fix the variable by overwriting its bounds " "in the MPS file." % (vardata.cname(True), model.cname(True))) if vardata.value is None: raise ValueError("Variable cannot be fixed to a value of None.") output_file.write((" FX BOUND "+entry_template) % (var_label, value(vardata.value))) continue vardata_lb = self._get_bound(vardata.lb) vardata_ub = self._get_bound(vardata.ub) # Make it harder for -0 to show up in # the output. This makes file diffing # for test baselines slightly less # annoying if vardata_lb == 0: vardata_lb = 0 if vardata_ub == 0: vardata_ub = 0 unbounded_lb = (vardata_lb is None) or (vardata_lb == -infinity) unbounded_ub = (vardata_ub is None) or (vardata_ub == infinity) treat_as_integer = False if vardata.is_binary(): if (vardata_lb == 0) and (vardata_ub == 1): output_file.write(" BV BOUND %s\n" % (var_label)) continue else: # so we can add bounds treat_as_integer = True if treat_as_integer or vardata.is_integer(): # Indicating unbounded integers is tricky because # the only way to indicate a variable is integer # is using the bounds section. Thus, we signify # infinity with a large number (10E20) # * Note: Gurobi allows values like inf and -inf # but CPLEX 12.6 does not, so I am just # using a large value if not unbounded_lb: output_file.write((" LI BOUND "+entry_template) % (var_label, vardata_lb)) else: output_file.write(" LI BOUND %s -10E20\n" % (var_label)) if not unbounded_ub: output_file.write((" UI BOUND "+entry_template) % (var_label, vardata_ub)) else: output_file.write(" UI BOUND %s 10E20\n" % (var_label)) else: assert vardata.is_continuous() if unbounded_lb and unbounded_ub: output_file.write(" FR BOUND %s\n" % (var_label)) else: if not unbounded_lb: output_file.write((" LO BOUND "+entry_template) % (var_label, vardata_lb)) else: output_file.write(" MI BOUND %s\n" % (var_label)) if not unbounded_ub: output_file.write((" UP BOUND "+entry_template) % (var_label, vardata_ub)) # # SOS section # output_file.write(SOSlines.getvalue()) # Formatting of the next two sections comes from looking # at Gurobi and Cplex output # # QUADOBJ section # if len(quadobj_data) > 0: assert len(quadobj_data) == 1 # it looks like the COIN-OR MPS Reader only # recognizes QUADOBJ (Gurobi and Cplex seem to # be okay with this) output_file.write("QUADOBJ\n") #output_file.write("QMATRIX\n") label, quad_terms = quadobj_data[0] assert label == objective_label for (var1, var2), coef in sorted(quad_terms, key=lambda _x: (variable_to_column[_x[0][0]], variable_to_column[_x[0][1]])): var1_label = variable_symbol_dictionary[id(var1)] var2_label = variable_symbol_dictionary[id(var2)] # Don't forget that a quadratic objective is always # assumed to be divided by 2 if var1_label == var2_label: output_file.write(column_template % (var1_label, var2_label, coef * 2)) else: # the matrix needs to be symmetric so split # the coefficient (but remember it is divided by 2) output_file.write(column_template % (var1_label, var2_label, coef)) output_file.write(column_template % (var2_label, var1_label, coef)) # # QCMATRIX section # if len(quadmatrix_data) > 0: for row_label, quad_terms in quadmatrix_data: output_file.write("QCMATRIX %s\n" % (row_label)) for (var1, var2), coef in sorted(quad_terms, key=lambda _x: (variable_to_column[_x[0][0]], variable_to_column[_x[0][1]])): var1_label = variable_symbol_dictionary[id(var1)] var2_label = variable_symbol_dictionary[id(var2)] if var1_label == var2_label: output_file.write(column_template % (var1_label, var2_label, coef)) else: # the matrix needs to be symmetric so split # the coefficient output_file.write(column_template % (var1_label, var2_label, coef * 0.5)) output_file.write(column_template % (var2_label, var1_label, coef * 0.5)) output_file.write("ENDATA\n") # Clean up the symbol map to only contain variables referenced # in the active constraints **Note**: warm start method may # rely on this for choosing the set of potential warm start # variables vars_to_delete = set(variable_symbol_map.byObject.keys()) - \ set(self._referenced_variable_ids.keys()) sm_byObject = symbol_map.byObject sm_bySymbol = symbol_map.bySymbol var_sm_byObject = variable_symbol_map.byObject for varid in vars_to_delete: symbol = var_sm_byObject[varid] del sm_byObject[varid] del sm_bySymbol[symbol] del variable_symbol_map return symbol_map
def _xform_constraint(self, _name, constraint, varMap, disjunct, block): lin_body_map = getattr(block,"lin_body",None) for cname, c in iteritems(constraint._data): name = _name + ('.%s' % (cname,) if cname else '') if (not lin_body_map is None) and (not lin_body_map.get(c) is None): raise GDP_Error('GDP(cHull) cannot process linear ' \ 'constraint bodies (yet) (found at ' + name + ').') constant = 0 try: cannonical = generate_canonical_repn(c.body) if isinstance(cannonical, LinearCanonicalRepn): NL = False else: NL = canonical_is_nonlinear(cannonical) except: NL = True # We need to evaluate teh expression at the origin *before* # we substitute the expression variables with the # disaggregated variables if NL and self._mode == NL_Mode_FurmanSawayaGrossmann: h_0 = value( self._eval_at_origin( NL, c.body.clone(), disjunct.indicator_var, varMap ) ) expr = self._var_subst(NL, c.body, disjunct.indicator_var, varMap) if NL: y = disjunct.indicator_var if self._mode == NL_Mode_LeeGrossmann: expr = expr * y elif self._mode == NL_Mode_GrossmannLee: expr = (y + EPS) * expr elif self._mode == NL_Mode_FurmanSawayaGrossmann: expr = ((1-EPS)*y + EPS)*expr - EPS*h_0*(1-y) else: raise RuntimeError("Unknown NL CHull mode") else: # We need to make sure to pull out the constant terms # from the expression and put them into the lb/ub if cannonical.constant == None: constant = 0 else: constant = cannonical.constant if c.lower is not None: if __debug__ and logger.isEnabledFor(logging.DEBUG): logger.debug("GDP(cHull): Promoting constraint " + "'%s' as '%s_lo'", name, name) bound = c.lower() - constant if bound != 0: newC = Constraint( expr = bound*disjunct.indicator_var \ <= expr - constant ) else: newC = Constraint( expr = bound <= expr - constant ) block.add_component( name+"_lo", newC ) newC.construct() if c.upper is not None: if __debug__ and logger.isEnabledFor(logging.DEBUG): logger.debug("GDP(cHull): Promoting constraint " + "'%s' as '%s_hi'", name, name) bound = c.upper() - constant if bound != 0: newC = Constraint( expr = expr - constant <= \ bound*disjunct.indicator_var ) else: newC = Constraint( expr = expr - constant <= bound ) block.add_component( name+"_hi", newC ) newC.construct()
def compile_instance(self, pyomo_instance, symbolic_solver_labels=False, output_fixed_variable_bounds=False, skip_trivial_constraints=False): from pyomo.core.base import Var, Constraint, SOSConstraint from pyomo.repn import canonical_is_constant, LinearCanonicalRepn, canonical_degree self._symbolic_solver_labels = symbolic_solver_labels self._output_fixed_variable_bounds = output_fixed_variable_bounds self._skip_trivial_constraints = skip_trivial_constraints self._has_quadratic_constraints = False self._has_quadratic_objective = False used_sos_constraints = False self._active_cplex_instance = cplex.Cplex() if self._symbolic_solver_labels: labeler = self._labeler = TextLabeler() else: labeler = self._labeler = NumericLabeler('x') self._symbol_map = SymbolMap() self._instance = pyomo_instance pyomo_instance.solutions.add_symbol_map(self._symbol_map) self._smap_id = id(self._symbol_map) # we use this when iterating over the constraints because it # will have a much smaller hash table, we also use this for # the warm start code after it is cleaned to only contain # variables referenced in the constraints self._variable_symbol_map = SymbolMap() # cplex wants the caller to set the problem type, which is (for # current purposes) strictly based on variable type counts. num_binary_variables = 0 num_integer_variables = 0 num_continuous_variables = 0 ############################################# # populate the variables in the cplex model # ############################################# var_names = [] var_lbs = [] var_ubs = [] var_types = [] self._referenced_variable_ids.clear() # maps pyomo var data labels to the corresponding CPLEX variable id. self._cplex_variable_ids.clear() # cached in the loop below - used to update the symbol map # immediately following loop termination. var_label_pairs = [] for var_data in pyomo_instance.component_data_objects(Var, active=True): if var_data.fixed and not self._output_fixed_variable_bounds: # if a variable is fixed, and we're preprocessing # fixed variables (as in not outputting them), there # is no need to add them to the compiled model. continue var_name = self._symbol_map.getSymbol(var_data, labeler) var_names.append(var_name) var_label_pairs.append((var_data, var_name)) self._cplex_variable_ids[var_name] = len(self._cplex_variable_ids) if (var_data.lb is None) or (var_data.lb == -infinity): var_lbs.append(-cplex.infinity) else: var_lbs.append(value(var_data.lb)) if (var_data.ub is None) or (var_data.ub == infinity): var_ubs.append(cplex.infinity) else: var_ubs.append(value(var_data.ub)) if var_data.is_integer(): var_types.append(self._active_cplex_instance.variables.type.integer) num_integer_variables += 1 elif var_data.is_binary(): var_types.append(self._active_cplex_instance.variables.type.binary) num_binary_variables += 1 elif var_data.is_continuous(): var_types.append(self._active_cplex_instance.variables.type.continuous) num_continuous_variables += 1 else: raise TypeError("Invalid domain type for variable with name '%s'. " "Variable is not continuous, integer, or binary.") self._active_cplex_instance.variables.add(names=var_names, lb=var_lbs, ub=var_ubs, types=var_types) self._active_cplex_instance.variables.add(lb=[1], ub=[1], names=["ONE_VAR_CONSTANT"]) self._cplex_variable_ids["ONE_VAR_CONSTANT"] = len(self._cplex_variable_ids) self._variable_symbol_map.addSymbols(var_label_pairs) self._cplex_variable_names = self._active_cplex_instance.variables.get_names() ######################################################## # populate the standard constraints in the cplex model # ######################################################## expressions = [] senses = [] rhss = [] range_values = [] names = [] qexpressions = [] qlinears = [] qsenses = [] qrhss = [] qnames = [] for block in pyomo_instance.block_data_objects(active=True): gen_con_canonical_repn = \ getattr(block, "_gen_con_canonical_repn", True) # Get/Create the ComponentMap for the repn if not hasattr(block,'_canonical_repn'): block._canonical_repn = ComponentMap() block_canonical_repn = block._canonical_repn for con in block.component_data_objects(Constraint, active=True, descend_into=False): if (con.lower is None) and \ (con.upper is None): continue # not binding at all, don't bother con_repn = None if isinstance(con, LinearCanonicalRepn): con_repn = con else: if gen_con_canonical_repn: con_repn = generate_canonical_repn(con.body) block_canonical_repn[con] = con_repn else: con_repn = block_canonical_repn[con] # There are conditions, e.g., when fixing variables, under which # a constraint block might be empty. Ignore these, for both # practical reasons and the fact that the CPLEX LP format # requires a variable in the constraint body. It is also # possible that the body of the constraint consists of only a # constant, in which case the "variable" of if isinstance(con_repn, LinearCanonicalRepn): if (con_repn.linear is None) and \ self._skip_trivial_constraints: continue else: # we shouldn't come across a constant canonical repn # that is not LinearCanonicalRepn assert not canonical_is_constant(con_repn) name = self._symbol_map.getSymbol(con, labeler) expr = None qexpr = None quadratic = False if isinstance(con_repn, LinearCanonicalRepn): expr, offset = \ self._encode_constraint_body_linear_specialized(con_repn, labeler, use_variable_names=False, cplex_variable_name_index_map=self._cplex_variable_ids) else: degree = canonical_degree(con_repn) if degree == 2: quadratic = True elif (degree != 0) or (degree != 1): raise ValueError( "CPLEXPersistent plugin does not support general nonlinear " "constraint expression (only linear or quadratic).\n" "Constraint: %s" % (con.cname(True))) expr, offset = self._encode_constraint_body_linear(con_repn, labeler) if quadratic: if expr is None: expr = cplex.SparsePair(ind=[0],val=[0.0]) self._has_quadratic_constraints = True qexpr = self._encode_constraint_body_quadratic(con_repn,labeler) qnames.append(name) if con.equality: # equality constraint. qsenses.append('E') qrhss.append(self._get_bound(con.lower) - offset) elif (con.lower is not None) and (con.upper is not None): raise RuntimeError( "The CPLEXDirect plugin can not translate range " "constraints containing quadratic expressions.") elif con.lower is not None: assert con.upper is None qsenses.append('G') qrhss.append(self._get_bound(con.lower) - offset) else: qsenses.append('L') qrhss.append(self._get_bound(con.upper) - offset) qlinears.append(expr) qexpressions.append(qexpr) else: names.append(name) expressions.append(expr) if con.equality: # equality constraint. senses.append('E') rhss.append(self._get_bound(con.lower) - offset) range_values.append(0.0) elif (con.lower is not None) and (con.upper is not None): # ranged constraint. senses.append('R') lower_bound = self._get_bound(con.lower) - offset upper_bound = self._get_bound(con.upper) - offset rhss.append(lower_bound) range_values.append(upper_bound - lower_bound) elif con.lower is not None: senses.append('G') rhss.append(self._get_bound(con.lower) - offset) range_values.append(0.0) else: senses.append('L') rhss.append(self._get_bound(con.upper) - offset) range_values.append(0.0) ################################################### # populate the SOS constraints in the cplex model # ################################################### # SOS constraints - largely taken from cpxlp.py so updates there, # should be applied here # TODO: Allow users to specify the variables coefficients for custom # branching/set orders - refer to cpxlp.py sosn = self._capabilities.sosn sos1 = self._capabilities.sos1 sos2 = self._capabilities.sos2 modelSOS = ModelSOS() for soscondata in pyomo_instance.component_data_objects(SOSConstraint, active=True): level = soscondata.level if (level == 1 and not sos1) or \ (level == 2 and not sos2) or \ (level > 2 and not sosn): raise Exception("Solver does not support SOS level %s constraints" % (level,)) modelSOS.count_constraint(self._symbol_map, labeler, self._variable_symbol_map, soscondata) if modelSOS.sosType: for key in modelSOS.sosType: self._active_cplex_instance.SOS.add(type = modelSOS.sosType[key], name = modelSOS.sosName[key], SOS = [modelSOS.varnames[key], modelSOS.weights[key]]) self._referenced_variable_ids.update(modelSOS.varids[key]) used_sos_constraints = True self._active_cplex_instance.linear_constraints.add( lin_expr=expressions, senses=senses, rhs=rhss, range_values=range_values, names=names) for index in xrange(len(qexpressions)): self._active_cplex_instance.quadratic_constraints.add( lin_expr=qlinears[index], quad_expr=qexpressions[index], sense=qsenses[index], rhs=qrhss[index], name=qnames[index]) ############################################# # populate the objective in the cplex model # ############################################# self.compile_objective(pyomo_instance) ################################################ # populate the problem type in the cplex model # ################################################ # This gets rid of the annoying "Freeing MIP data." message. def _filter_freeing_mip_data(val): if val.strip() == 'Freeing MIP data.': return "" return val self._active_cplex_instance.set_warning_stream(sys.stderr, fn=_filter_freeing_mip_data) if (self._has_quadratic_objective is True) or \ (self._has_quadratic_constraints is True): if (num_integer_variables > 0) or \ (num_binary_variables > 0) or \ (used_sos_constraints): if self._has_quadratic_constraints is True: self._active_cplex_instance.set_problem_type( self._active_cplex_instance.problem_type.MIQCP) else: self._active_cplex_instance.set_problem_type( self._active_cplex_instance.problem_type.MIQP) else: if self._has_quadratic_constraints is True: self._active_cplex_instance.set_problem_type( self._active_cplex_instance.problem_type.QCP) else: self._active_cplex_instance.set_problem_type( self._active_cplex_instance.problem_type.QP) elif (num_integer_variables > 0) or \ (num_binary_variables > 0) or \ (used_sos_constraints): self._active_cplex_instance.set_problem_type( self._active_cplex_instance.problem_type.MILP) else: self._active_cplex_instance.set_problem_type( self._active_cplex_instance.problem_type.LP) # restore the warning stream without our filter function self._active_cplex_instance.set_warning_stream(sys.stderr)
def compile_instance(self, pyomo_instance, symbolic_solver_labels=False, output_fixed_variable_bounds=False, skip_trivial_constraints=False): from pyomo.core.base import Var, Constraint, SOSConstraint from pyomo.repn import canonical_is_constant, LinearCanonicalRepn, canonical_degree self._symbolic_solver_labels = symbolic_solver_labels self._output_fixed_variable_bounds = output_fixed_variable_bounds self._skip_trivial_constraints = skip_trivial_constraints self._has_quadratic_constraints = False self._has_quadratic_objective = False self._active_cplex_instance = CPLEXDirect._cplex_module.Cplex() if self._symbolic_solver_labels: labeler = self._labeler = TextLabeler() else: labeler = self._labeler = NumericLabeler('x') self._symbol_map = SymbolMap() self._instance = pyomo_instance if isinstance(pyomo_instance, IBlockStorage): # BIG HACK if not hasattr(pyomo_instance, "._symbol_maps"): setattr(pyomo_instance, "._symbol_maps", {}) getattr(pyomo_instance, "._symbol_maps")[id(self._symbol_map)] = \ self._symbol_map else: pyomo_instance.solutions.add_symbol_map(self._symbol_map) self._smap_id = id(self._symbol_map) # we use this when iterating over the constraints because it # will have a much smaller hash table, we also use this for # the warm start code after it is cleaned to only contain # variables referenced in the constraints self._variable_symbol_map = SymbolMap() # cplex wants the caller to set the problem type, which is (for # current purposes) strictly based on variable type counts. self._num_binary_variables = 0 self._num_integer_variables = 0 self._num_continuous_variables = 0 self._used_sos_constraints = False ############################################# # populate the variables in the cplex model # ############################################# var_names = [] var_lbs = [] var_ubs = [] var_types = [] self._referenced_variable_ids.clear() # maps pyomo var data labels to the corresponding CPLEX variable id. self._cplex_variable_ids.clear() # cached in the loop below - used to update the symbol map # immediately following loop termination. var_label_pairs = [] for var_data in pyomo_instance.component_data_objects(Var, active=True): if var_data.fixed and not self._output_fixed_variable_bounds: # if a variable is fixed, and we're preprocessing # fixed variables (as in not outputting them), there # is no need to add them to the compiled model. continue var_name = self._symbol_map.getSymbol(var_data, labeler) var_names.append(var_name) var_label_pairs.append((var_data, var_name)) self._cplex_variable_ids[var_name] = len(self._cplex_variable_ids) if not var_data.has_lb(): var_lbs.append(-CPLEXDirect._cplex_module.infinity) else: var_lbs.append(value(var_data.lb)) if not var_data.has_ub(): var_ubs.append(CPLEXDirect._cplex_module.infinity) else: var_ubs.append(value(var_data.ub)) if var_data.is_integer(): var_types.append(self._active_cplex_instance.variables.type.integer) self._num_integer_variables += 1 elif var_data.is_binary(): var_types.append(self._active_cplex_instance.variables.type.binary) self._num_binary_variables += 1 elif var_data.is_continuous(): var_types.append(self._active_cplex_instance.variables.type.continuous) self._num_continuous_variables += 1 else: raise TypeError("Invalid domain type for variable with name '%s'. " "Variable is not continuous, integer, or binary.") self._active_cplex_instance.variables.add(names=var_names, lb=var_lbs, ub=var_ubs, types=var_types) self._active_cplex_instance.variables.add(lb=[1], ub=[1], names=["ONE_VAR_CONSTANT"]) self._cplex_variable_ids["ONE_VAR_CONSTANT"] = len(self._cplex_variable_ids) self._variable_symbol_map.addSymbols(var_label_pairs) self._cplex_variable_names = self._active_cplex_instance.variables.get_names() ######################################################## # populate the standard constraints in the cplex model # ######################################################## expressions = [] senses = [] rhss = [] range_values = [] names = [] qexpressions = [] qlinears = [] qsenses = [] qrhss = [] qnames = [] for block in pyomo_instance.block_data_objects(active=True): gen_con_canonical_repn = \ getattr(block, "_gen_con_canonical_repn", True) # Get/Create the ComponentMap for the repn if not hasattr(block,'_canonical_repn'): block._canonical_repn = ComponentMap() block_canonical_repn = block._canonical_repn for con in block.component_data_objects(Constraint, active=True, descend_into=False): if (not con.has_lb()) and \ (not con.has_ub()): assert not con.equality continue # not binding at all, don't bother con_repn = None if con._linear_canonical_form: con_repn = con.canonical_form() elif isinstance(con, LinearCanonicalRepn): con_repn = con else: if gen_con_canonical_repn: con_repn = generate_canonical_repn(con.body) block_canonical_repn[con] = con_repn else: con_repn = block_canonical_repn[con] # There are conditions, e.g., when fixing variables, under which # a constraint block might be empty. Ignore these, for both # practical reasons and the fact that the CPLEX LP format # requires a variable in the constraint body. It is also # possible that the body of the constraint consists of only a # constant, in which case the "variable" of if isinstance(con_repn, LinearCanonicalRepn): if self._skip_trivial_constraints and \ ((con_repn.linear is None) or \ (len(con_repn.linear) == 0)): continue else: # we shouldn't come across a constant canonical repn # that is not LinearCanonicalRepn assert not canonical_is_constant(con_repn) name = self._symbol_map.getSymbol(con, labeler) expr = None qexpr = None quadratic = False if isinstance(con_repn, LinearCanonicalRepn): expr, offset = \ self._encode_constraint_body_linear_specialized(con_repn, labeler, use_variable_names=False, cplex_variable_name_index_map=self._cplex_variable_ids) else: degree = canonical_degree(con_repn) if degree == 2: quadratic = True elif (degree != 0) or (degree != 1): raise ValueError( "CPLEXPersistent plugin does not support general nonlinear " "constraint expression (only linear or quadratic).\n" "Constraint: %s" % (con.name)) expr, offset = self._encode_constraint_body_linear(con_repn, labeler) if quadratic: if expr is None: expr = CPLEXDirect._cplex_module.SparsePair(ind=[0],val=[0.0]) self._has_quadratic_constraints = True qexpr = self._encode_constraint_body_quadratic(con_repn,labeler) qnames.append(name) if con.equality: # equality constraint. qsenses.append('E') qrhss.append(self._get_bound(con.lower) - offset) elif con.has_lb() and con.has_ub(): raise RuntimeError( "The CPLEXDirect plugin can not translate range " "constraints containing quadratic expressions.") elif con.has_lb(): assert not con.has_ub() qsenses.append('G') qrhss.append(self._get_bound(con.lower) - offset) else: assert con.has_ub() qsenses.append('L') qrhss.append(self._get_bound(con.upper) - offset) qlinears.append(expr) qexpressions.append(qexpr) else: names.append(name) expressions.append(expr) if con.equality: # equality constraint. senses.append('E') rhss.append(self._get_bound(con.lower) - offset) range_values.append(0.0) elif con.has_lb() and con.has_ub(): # ranged constraint. senses.append('R') lower_bound = self._get_bound(con.lower) - offset upper_bound = self._get_bound(con.upper) - offset rhss.append(lower_bound) range_values.append(upper_bound - lower_bound) elif con.has_lb(): senses.append('G') rhss.append(self._get_bound(con.lower) - offset) range_values.append(0.0) else: assert con.has_ub() senses.append('L') rhss.append(self._get_bound(con.upper) - offset) range_values.append(0.0) ################################################### # populate the SOS constraints in the cplex model # ################################################### # SOS constraints - largely taken from cpxlp.py so updates there, # should be applied here # TODO: Allow users to specify the variables coefficients for custom # branching/set orders - refer to cpxlp.py sosn = self._capabilities.sosn sos1 = self._capabilities.sos1 sos2 = self._capabilities.sos2 modelSOS = ModelSOS() for soscondata in pyomo_instance.component_data_objects(SOSConstraint, active=True): level = soscondata.level if (level == 1 and not sos1) or \ (level == 2 and not sos2) or \ (level > 2 and not sosn): raise Exception("Solver does not support SOS level %s constraints" % (level,)) modelSOS.count_constraint(self._symbol_map, labeler, self._variable_symbol_map, soscondata) if modelSOS.sosType: for key in modelSOS.sosType: self._active_cplex_instance.SOS.add(type = modelSOS.sosType[key], name = modelSOS.sosName[key], SOS = [modelSOS.varnames[key], modelSOS.weights[key]]) self._referenced_variable_ids.update(modelSOS.varids[key]) self._used_sos_constraints = True self._active_cplex_instance.linear_constraints.add( lin_expr=expressions, senses=senses, rhs=rhss, range_values=range_values, names=names) for index in xrange(len(qexpressions)): self._active_cplex_instance.quadratic_constraints.add( lin_expr=qlinears[index], quad_expr=qexpressions[index], sense=qsenses[index], rhs=qrhss[index], name=qnames[index]) ############################################# # populate the objective in the cplex model # ############################################# self.compile_objective(pyomo_instance)
def to_standard_form(self): """ Produces a standard-form representation of the model. Returns the coefficient matrix (A), the cost vector (c), and the constraint vector (b), where the 'standard form' problem is min/max c'x s.t. Ax = b x >= 0 All three returned values are instances of the array.array class, and store Python floats (C doubles). """ from pyomo.repn import generate_canonical_repn # We first need to create an map of all variables to their column # number colID = {} ID2name = {} id = 0 tmp = self.variables().keys() tmp.sort() for v in tmp: colID[v] = id ID2name[id] = v id += 1 # First we go through the constraints and introduce slack and excess # variables to eliminate inequality constraints # # N.B. Structure heirarchy: # # active_components: {class: {attr_name: object}} # object -> Constraint: ._data: {ndx: _ConstraintData} # _ConstraintData: .lower, .body, .upper # # So, altogether, we access a lower bound via # # model.component_map(active=True)[Constraint]['con_name']['index'].lower # # {le,ge,eq}Constraints are # {constraint_name: {index: {variable_or_none: coefficient}} objects # that represent each constraint. None in the innermost dictionary # represents the constant term. # # i.e. # # min x1 + 2*x2 + x4 # s.t. x1 = 1 # x2 + 3*x3 <= -1 # x1 + x4 >= 3 # x1 + 2*x2 + + 3*x4 >= 0 # # # would be represented as (modulo the names of the variables, # constraints, and indices) # # eqConstraints = {'c1': {None: {'x1':1, None:-1}}} # leConstraints = {'c2': {None: {'x2':1, 'x3':3, None:1}}} # geConstraints = {'c3': {None: {'x1':1, 'x4':1, None:-3}}, # 'c4': {None: {'x1':1, 'x2':2, 'x4':1, None:0}}} # # Note the we have the luxury of dealing only with linear terms. var_id_map = {} leConstraints = {} geConstraints = {} eqConstraints = {} objectives = {} # For each registered component for c in self.component_map(active=True): # Get all subclasses of Constraint if issubclass(c, Constraint): cons = self.component_map(c, active=True) # Get the name of the constraint, and the constraint set itself for con_set_name in cons: con_set = cons[con_set_name] # For each indexed constraint in the constraint set for ndx in con_set._data: con = con_set._data[ndx] # Process the body terms = self._process_canonical_repn( generate_canonical_repn(con.body, var_id_map)) # Process the bounds of the constraint if con.equality: # Equality constraint, only check lower bound lb = self._process_canonical_repn( generate_canonical_repn(con.lower, var_id_map)) # Update terms for k in lb: v = lb[k] if k in terms: terms[k] -= v else: terms[k] = -v # Add constraint to equality constraints eqConstraints[(con_set_name, ndx)] = terms else: # Process upper bounds (<= constraints) if con.upper is not None: # Less than or equal to constraint tmp = dict(terms) ub = self._process_canonical_repn( generate_canonical_repn(con.upper, var_id_map)) # Update terms for k in ub: if k in terms: tmp[k] -= ub[k] else: tmp[k] = -ub[k] # Add constraint to less than or equal to # constraints leConstraints[(con_set_name, ndx)] = tmp # Process lower bounds (>= constraints) if con.lower is not None: # Less than or equal to constraint tmp = dict(terms) lb = self._process_canonical_repn( generate_canonical_repn(con.lower, var_id_map)) # Update terms for k in lb: if k in terms: tmp[k] -= lb[k] else: tmp[k] = -lb[k] # Add constraint to less than or equal to # constraints geConstraints[(con_set_name, ndx)] = tmp elif issubclass(c, Objective): # Process objectives objs = self.component_map(c, active=True) # Get the name of the objective, and the objective set itself for obj_set_name in objs: obj_set = objs[obj_set_name] # For each indexed objective in the objective set for ndx in obj_set._data: obj = obj_set._data[ndx] # Process the objective terms = self._process_canonical_repn( generate_canonical_repn(obj.expr, var_id_map)) objectives[(obj_set_name, ndx)] = terms # We now have all the constraints. Add a slack variable for every # <= constraint and an excess variable for every >= constraint. nSlack = len(leConstraints) nExcess = len(geConstraints) nConstraints = len(leConstraints) + len(geConstraints) + \ len(eqConstraints) nVariables = len(colID) + nSlack + nExcess nRegVariables = len(colID) # Make the arrays coefficients = array.array("d", [0]*nConstraints*nVariables) constraints = array.array("d", [0]*nConstraints) costs = array.array("d", [0]*nVariables) # Populate the coefficient matrix constraintID = 0 # Add less than or equal to constraints for ndx in leConstraints: con = leConstraints[ndx] for termKey in con: coef = con[termKey] if termKey is None: # Constraint coefficient constraints[constraintID] = -coef else: # Variable coefficient col = colID[termKey] coefficients[constraintID*nVariables + col] = coef # Add the slack coefficients[constraintID*nVariables + nRegVariables + \ constraintID] = 1 constraintID += 1 # Add greater than or equal to constraints for ndx in geConstraints: con = geConstraints[ndx] for termKey in con: coef = con[termKey] if termKey is None: # Constraint coefficient constraints[constraintID] = -coef else: # Variable coefficient col = colID[termKey] coefficients[constraintID*nVariables + col] = coef # Add the slack coefficients[constraintID*nVariables + nRegVariables + \ constraintID] = -1 constraintID += 1 # Add equality constraints for ndx in eqConstraints: con = eqConstraints[ndx] for termKey in con: coef = con[termKey] if termKey is None: # Constraint coefficient constraints[constraintID] = -coef else: # Variable coefficient col = colID[termKey] coefficients[constraintID*nVariables + col] = coef constraintID += 1 # Determine cost coefficients for obj_name in objectives: obj = objectives[obj_name]() for var in obj: costs[colID[var]] = obj[var] # Print the model # # The goal is to print # # var1 var2 var3 ... # +-- --+ # | cost1 cost2 cost3 ...| # +-- --+ # +-- --+ +-- --+ # con1 | coef11 coef12 coef13 ...| | eq1 | # con2 | coef21 coef22 coef23 ...| | eq2 | # con2 | coef31 coef32 coef33 ...| | eq3 | # . | . . . . | | . | # . | . . . . | | . | # . | . . . . | | . | constraintPadding = 2 numFmt = "% 1.4f" altFmt = "% 1.1g" maxColWidth = max(len(numFmt % 0.0), len(altFmt % 0.0)) maxConstraintColWidth = max(len(numFmt % 0.0), len(altFmt % 0.0)) # Generate constraint names maxConNameLen = 0 conNames = [] for name in leConstraints: strName = str(name) if len(strName) > maxConNameLen: maxConNameLen = len(strName) conNames.append(strName) for name in geConstraints: strName = str(name) if len(strName) > maxConNameLen: maxConNameLen = len(strName) conNames.append(strName) for name in eqConstraints: strName = str(name) if len(strName) > maxConNameLen: maxConNameLen = len(strName) conNames.append(strName) # Generate the variable names varNames = [None]*len(colID) for name in colID: tmp_name = " " + name if len(tmp_name) > maxColWidth: maxColWidth = len(tmp_name) varNames[colID[name]] = tmp_name for i in xrange(0, nSlack): tmp_name = " _slack_%i" % i if len(tmp_name) > maxColWidth: maxColWidth = len(tmp_name) varNames.append(tmp_name) for i in xrange(0, nExcess): tmp_name = " _excess_%i" % i if len(tmp_name) > maxColWidth: maxColWidth = len(tmp_name) varNames.append(tmp_name) # Variable names line = " "*maxConNameLen + (" "*constraintPadding) + " " for col in xrange(0, nVariables): # Format entry token = varNames[col] # Pad with trailing whitespace token += " "*(maxColWidth - len(token)) # Add to line line += " " + token + " " print(line+'\n') # Cost vector print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \ " "*((maxColWidth+2)*nVariables - 4) + "--+" + '\n') line = " "*maxConNameLen + (" "*constraintPadding) + "|" for col in xrange(0, nVariables): # Format entry token = numFmt % costs[col] if len(token) > maxColWidth: token = altFmt % costs[col] # Pad with trailing whitespace token += " "*(maxColWidth - len(token)) # Add to line line += " " + token + " " line += "|" print(line+'\n') print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \ " "*((maxColWidth+2)*nVariables - 4) + "--+"+'\n') # Constraints print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \ " "*((maxColWidth+2)*nVariables - 4) + "--+" + \ (" "*constraintPadding) + "+--" + \ (" "*(maxConstraintColWidth-1)) + "--+"+'\n') for row in xrange(0, nConstraints): # Print constraint name line = conNames[row] + (" "*constraintPadding) + (" "*(maxConNameLen - len(conNames[row]))) + "|" # Print each coefficient for col in xrange(0, nVariables): # Format entry token = numFmt % coefficients[nVariables*row + col] if len(token) > maxColWidth: token = altFmt % coefficients[nVariables*row + col] # Pad with trailing whitespace token += " "*(maxColWidth - len(token)) # Add to line line += " " + token + " " line += "|" + (" "*constraintPadding) + "|" # Add constraint vector token = numFmt % constraints[row] if len(token) > maxConstraintColWidth: token = altFmt % constraints[row] # Pad with trailing whitespace token += " "*(maxConstraintColWidth - len(token)) line += " " + token + " |" print(line+'\n') print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \ " "*((maxColWidth+2)*nVariables - 4) + "--+" + \ (" "*constraintPadding) + "+--" + (" "*(maxConstraintColWidth-1))\ + "--+"+'\n') return (coefficients, costs, constraints)
def collect_linear_terms(block, unfixed): # # Variables are constraints of block # Constraints are unfixed variables of block and the parent model. # vnames = set() for (name, data) in block.component_map(Constraint, active=True).items(): vnames.add((name, data.is_indexed())) cnames = set(unfixed) for (name, data) in block.component_map(Var, active=True).items(): cnames.add((name, data.is_indexed())) # A = {} b_coef = {} c_rhs = {} c_sense = {} d_sense = None v_domain = {} # # Collect objective # for (oname, odata) in block.component_map(Objective, active=True).items(): for ndx in odata: if odata[ndx].sense == maximize: o_terms = generate_canonical_repn(-1 * odata[ndx].expr, compute_values=False) d_sense = minimize else: o_terms = generate_canonical_repn(odata[ndx].expr, compute_values=False) d_sense = maximize for i in range(len(o_terms.variables)): c_rhs[o_terms.variables[i].parent_component().name, o_terms.variables[i].index()] = o_terms.linear[i] # Stop after the first objective break # # Collect constraints # for (name, data) in block.component_map(Constraint, active=True).items(): for ndx in data: con = data[ndx] body_terms = generate_canonical_repn(con.body, compute_values=False) lower_terms = generate_canonical_repn( con.lower, compute_values=False) if not con.lower is None else None upper_terms = generate_canonical_repn( con.upper, compute_values=False) if not con.upper is None else None # if body_terms.constant is None: body_terms.constant = 0 if not lower_terms is None and not lower_terms.variables is None: raise ( RuntimeError, "Error during dualization: Constraint '%s' has a lower bound that is non-constant" ) if not upper_terms is None and not upper_terms.variables is None: raise ( RuntimeError, "Error during dualization: Constraint '%s' has an upper bound that is non-constant" ) # for i in range(len(body_terms.variables)): varname = body_terms.variables[i].parent_component().name varndx = body_terms.variables[i].index() A.setdefault(body_terms.variables[i].parent_component().name, {}).setdefault(varndx, []).append( Bunch(coef=body_terms.linear[i], var=name, ndx=ndx)) # if not con.equality: # # Inequality constraint # if lower_terms is None or lower_terms.constant is None: # # body <= upper # v_domain[name, ndx] = -1 b_coef[name, ndx] = upper_terms.constant - body_terms.constant elif upper_terms is None or upper_terms.constant is None: # # lower <= body # v_domain[name, ndx] = 1 b_coef[name, ndx] = lower_terms.constant - body_terms.constant else: # # lower <= body <= upper # # Dual for lower bound # ndx_ = tuple(list(ndx).append('lb')) v_domain[name, ndx_] = 1 b_coef[name, ndx] = lower_terms.constant - body_terms.constant # # Dual for upper bound # ndx_ = tuple(list(ndx).append('ub')) v_domain[name, ndx_] = -1 b_coef[name, ndx] = upper_terms.constant - body_terms.constant else: # # Equality constraint # v_domain[name, ndx] = 0 b_coef[name, ndx] = lower_terms.constant - body_terms.constant # # Collect bound constraints # def all_vars(block): """ This conditionally chains together the active variables in the current block with the active variables in all of the parent blocks (if any exist). """ while not block is None: for (name, data) in block.component_map(Var, active=True).items(): yield (name, data) block = block.parent_block() for (name, data) in all_vars(block): # # Skip fixed variables (in the parent) # if not (name, data.is_indexed()) in cnames: continue # # Iterate over all variable indices # for ndx in data: var = data[ndx] bounds = var.bounds if bounds[0] is None and bounds[1] is None: c_sense[name, ndx] = 'e' elif bounds[0] is None: if bounds[1] == 0.0: c_sense[name, ndx] = 'g' else: c_sense[name, ndx] = 'e' # # Add constraint that defines the upper bound # name_ = name + "_upper_" varname = data.parent_component().name varndx = data[ndx].index() A.setdefault(varname, {}).setdefault(varndx, []).append( Bunch(coef=1.0, var=name_, ndx=ndx)) # v_domain[name_, ndx] = -1 b_coef[name_, ndx] = bounds[1] elif bounds[1] is None: if bounds[0] == 0.0: c_sense[name, ndx] = 'l' else: c_sense[name, ndx] = 'e' # # Add constraint that defines the lower bound # name_ = name + "_lower_" varname = data.parent_component().name varndx = data[ndx].index() A.setdefault(varname, {}).setdefault(varndx, []).append( Bunch(coef=1.0, var=name_, ndx=ndx)) # v_domain[name_, ndx] = 1 b_coef[name_, ndx] = bounds[0] else: # Bounded above and below c_sense[name, ndx] = 'e' # # Add constraint that defines the upper bound # name_ = name + "_upper_" varname = data.parent_component().name varndx = data[ndx].index() A.setdefault(varname, {}).setdefault(varndx, []).append( Bunch(coef=1.0, var=name_, ndx=ndx)) # v_domain[name_, ndx] = -1 b_coef[name_, ndx] = bounds[1] # # Add constraint that defines the lower bound # name_ = name + "_lower_" varname = data.parent_component().name varndx = data[ndx].index() A.setdefault(varname, {}).setdefault(varndx, []).append( Bunch(coef=1.0, var=name_, ndx=ndx)) # v_domain[name_, ndx] = 1 b_coef[name_, ndx] = bounds[0] # return (A, b_coef, c_rhs, c_sense, d_sense, vnames, cnames, v_domain)
def collect_linear_terms(block, unfixed): # # Variables are constraints of block # Constraints are unfixed variables of block and the parent model. # vnames = set() for (name, data) in block.component_map(Constraint, active=True).items(): vnames.add((name, data.is_indexed())) cnames = set(unfixed) for (name, data) in block.component_map(Var, active=True).items(): cnames.add((name, data.is_indexed())) # A = {} b_coef = {} c_rhs = {} c_sense = {} d_sense = None v_domain = {} # # Collect objective # for (oname, odata) in block.component_map(Objective, active=True).items(): for ndx in odata: if odata[ndx].sense == maximize: o_terms = generate_canonical_repn(-1*odata[ndx].expr, compute_values=False) d_sense = minimize else: o_terms = generate_canonical_repn(odata[ndx].expr, compute_values=False) d_sense = maximize for i in range(len(o_terms.variables)): c_rhs[ o_terms.variables[i].parent_component().name, o_terms.variables[i].index() ] = o_terms.linear[i] # Stop after the first objective break # # Collect constraints # for (name, data) in block.component_map(Constraint, active=True).items(): for ndx in data: con = data[ndx] body_terms = generate_canonical_repn(con.body, compute_values=False) lower_terms = generate_canonical_repn(con.lower, compute_values=False) if not con.lower is None else None upper_terms = generate_canonical_repn(con.upper, compute_values=False) if not con.upper is None else None # if body_terms.constant is None: body_terms.constant = 0 if not lower_terms is None and not lower_terms.variables is None: raise(RuntimeError, "Error during dualization: Constraint '%s' has a lower bound that is non-constant") if not upper_terms is None and not upper_terms.variables is None: raise(RuntimeError, "Error during dualization: Constraint '%s' has an upper bound that is non-constant") # for i in range(len(body_terms.variables)): varname = body_terms.variables[i].parent_component().name varndx = body_terms.variables[i].index() A.setdefault(body_terms.variables[i].parent_component().name, {}).setdefault(varndx,[]).append( Bunch(coef=body_terms.linear[i], var=name, ndx=ndx) ) # if not con.equality: # # Inequality constraint # if lower_terms is None or lower_terms.constant is None: # # body <= upper # v_domain[name, ndx] = -1 b_coef[name,ndx] = upper_terms.constant - body_terms.constant elif upper_terms is None or upper_terms.constant is None: # # lower <= body # v_domain[name, ndx] = 1 b_coef[name,ndx] = lower_terms.constant - body_terms.constant else: # # lower <= body <= upper # # Dual for lower bound # ndx_ = tuple(list(ndx).append('lb')) v_domain[name, ndx_] = 1 b_coef[name,ndx] = lower_terms.constant - body_terms.constant # # Dual for upper bound # ndx_ = tuple(list(ndx).append('ub')) v_domain[name, ndx_] = -1 b_coef[name,ndx] = upper_terms.constant - body_terms.constant else: # # Equality constraint # v_domain[name, ndx] = 0 b_coef[name,ndx] = lower_terms.constant - body_terms.constant # # Collect bound constraints # def all_vars(block): """ This conditionally chains together the active variables in the current block with the active variables in all of the parent blocks (if any exist). """ while not block is None: for (name, data) in block.component_map(Var, active=True).items(): yield (name, data) block = block.parent_block() for (name, data) in all_vars(block): # # Skip fixed variables (in the parent) # if not (name, data.is_indexed()) in cnames: continue # # Iterate over all variable indices # for ndx in data: var = data[ndx] bounds = var.bounds if bounds[0] is None and bounds[1] is None: c_sense[name,ndx] = 'e' elif bounds[0] is None: if bounds[1] == 0.0: c_sense[name,ndx] = 'g' else: c_sense[name,ndx] = 'e' # # Add constraint that defines the upper bound # name_ = name + "_upper_" varname = data.parent_component().name varndx = data[ndx].index() A.setdefault(varname, {}).setdefault(varndx,[]).append( Bunch(coef=1.0, var=name_, ndx=ndx) ) # v_domain[name_,ndx] = -1 b_coef[name_,ndx] = bounds[1] elif bounds[1] is None: if bounds[0] == 0.0: c_sense[name,ndx] = 'l' else: c_sense[name,ndx] = 'e' # # Add constraint that defines the lower bound # name_ = name + "_lower_" varname = data.parent_component().name varndx = data[ndx].index() A.setdefault(varname, {}).setdefault(varndx,[]).append( Bunch(coef=1.0, var=name_, ndx=ndx) ) # v_domain[name_,ndx] = 1 b_coef[name_,ndx] = bounds[0] else: # Bounded above and below c_sense[name,ndx] = 'e' # # Add constraint that defines the upper bound # name_ = name + "_upper_" varname = data.parent_component().name varndx = data[ndx].index() A.setdefault(varname, {}).setdefault(varndx,[]).append( Bunch(coef=1.0, var=name_, ndx=ndx) ) # v_domain[name_,ndx] = -1 b_coef[name_,ndx] = bounds[1] # # Add constraint that defines the lower bound # name_ = name + "_lower_" varname = data.parent_component().name varndx = data[ndx].index() A.setdefault(varname, {}).setdefault(varndx,[]).append( Bunch(coef=1.0, var=name_, ndx=ndx) ) # v_domain[name_,ndx] = 1 b_coef[name_,ndx] = bounds[0] # return (A, b_coef, c_rhs, c_sense, d_sense, vnames, cnames, v_domain)