def test_unique_component_name(self): m = ConcreteModel() m.x = 5 m.y = Var() name = unique_component_name(m, 'z') self.assertEqual(name, 'z') name = unique_component_name(m, 'x') self.assertEqual(len(name), 3) self.assertEqual(name[:2], 'x_') self.assertIn(name[2], '0123456789') name = unique_component_name(m, 'y') self.assertEqual(len(name), 3) self.assertEqual(name[:2], 'y_') self.assertIn(name[2], '0123456789') name = unique_component_name(m, 'component') self.assertEqual(len(name), 11) self.assertEqual(name[:10], 'component_') self.assertIn(name[10], '0123456789') for i in range(10): setattr(m, 'y_%s' % i, 0) name = unique_component_name(m, 'y') self.assertEqual(len(name), 4) self.assertEqual(name[:2], 'y_') self.assertIn(name[2], '0123456789') self.assertIn(name[3], '0123456789')
def _add_sensitivity_data(self, param_list): block = self.block sens_data_list = block._sens_data_list for i, comp in enumerate(param_list): if comp.ctype is Param: parent = comp.parent_component() if not parent.mutable: raise ValueError( "Parameters within paramList must be mutable. " "Got %s, which is not mutable." % comp.name ) # Add a Var: if comp.is_indexed(): d = {k: value(comp[k]) for k in comp.index_set()} var = Var(comp.index_set(), initialize=d) else: d = value(comp) var = Var(initialize=d) name = self.get_default_var_name(parent.local_name) name = unique_component_name(block, name) block.add_component(name, var) if comp.is_indexed(): sens_data_list.extend( (var[idx], param, i, idx) for idx, param in _generate_component_items(comp) ) else: sens_data_list.append((var, comp, i, _NotAnIndex)) elif comp.ctype is Var: parent = comp.parent_component() for _, data in _generate_component_items(comp): if not data.fixed: raise ValueError( "Specified \"parameter\" variables must be " "fixed. Got %s, which is not fixed." % comp.name ) # Add a Param: if comp.is_indexed(): d = {k: value(comp[k]) for k in comp.index_set()} param = Param(comp.index_set(), mutable=True, initialize=d) else: d = value(comp) param = Param(mutable=True, initialize=d) name = self.get_default_param_name(parent.local_name) name = unique_component_name(block, name) block.add_component(name, param) if comp.is_indexed(): sens_data_list.extend( (var, param[idx], i, idx) for idx, var in _generate_component_items(comp) ) else: sens_data_list.append((comp, param, i, _NotAnIndex))
def __init__(self, model, decision_variables, ext_fcn_surrogate_map_rule, config): self.original_model = model tmp_name = unique_component_name(self.original_model, 'tmp') setattr(self.original_model, tmp_name, decision_variables) self.config = config self.model = self.original_model.clone() self.decision_variables = getattr(self.model, tmp_name) delattr(self.original_model, tmp_name) self.data = Block() self.model.add_component(unique_component_name(self.model, 'trf_data'), self.data) self.basis_expression_rule = ext_fcn_surrogate_map_rule self.efSet = None self.solver = SolverFactory(self.config.solver)
def _Combine(port, name, index_set): port_parent = port.parent_block() var = port.vars[name] in_vars = [] sources = port.sources(active=True) if not len(sources): return in_vars if len(sources) == 1 and len(sources[0].source.dests(active=True)) == 1: # This is a 1-to-1 connection, no need for evar, just equality. arc = sources[0] Port._add_equality_constraint(arc, name, index_set) return in_vars for arc in sources: eblock = arc.expanded_block # Make and record new variables for every arc with this member. evar = Port._create_evar(port.vars[name], name, eblock, index_set) in_vars.append(evar) # Create constraint: var == sum of evars # Same logic as Port._Split cname = unique_component_name(port_parent, "%s_%s_insum" % (alphanum_label_from_name(port.local_name), name)) def rule(m, *args): if len(args): return sum(evar[args] for evar in in_vars) == var[args] else: return sum(evar for evar in in_vars) == var con = Constraint(index_set, rule=rule) port_parent.add_component(cname, con) return in_vars
def _transform_boolean_varData(self, bool_vardata, new_varlists): # This transformation tries to group the binaries it creates for indexed # BooleanVars onto the same VarList. This won't work across separate # calls to the transformation, but within one call it's fine. So we have # two cases: 1) either we have created a VarList for this # BooleanVarData's parent_component, but have yet to add its binary to # said list, or 2) we have neither the binary nor the VarList parent_component = bool_vardata.parent_component() new_varlist = new_varlists.get(parent_component) if new_varlist is None and \ bool_vardata.get_associated_binary() is None: # Case 2) we have neither the VarList nor an associated binary parent_block = bool_vardata.parent_block() new_var_list_name = unique_component_name( parent_block, parent_component.local_name + '_asbinary') new_varlist = VarList(domain=Binary) setattr(parent_block, new_var_list_name, new_varlist) new_varlists[parent_component] = new_varlist if bool_vardata.get_associated_binary() is None: # Case 1) we already have a VarList, but need to create the # associated binary new_binary_vardata = new_varlist.add() bool_vardata.associate_binary_var(new_binary_vardata) if bool_vardata.value is not None: new_binary_vardata.value = int(bool_vardata.value) if bool_vardata.fixed: new_binary_vardata.fix()
def _add_xor_constraint(self, disjunction, transBlock): # Put the disjunction constraint on the transformation block and # determine whether it is an OR or XOR constraint. # We never do this for just a DisjunctionData because we need to know # about the index set of its parent component (so that we can make the # index of this constraint match). So if we called this on a # DisjunctionData, we did something wrong. assert isinstance(disjunction, Disjunction) # first check if the constraint already exists if disjunction._algebraic_constraint is not None: return disjunction._algebraic_constraint() # add the XOR (or OR) constraints to parent block (with unique name) # It's indexed if this is an IndexedDisjunction, not otherwise orC = Constraint(disjunction.index_set()) if \ disjunction.is_indexed() else Constraint() # The name used to indicate if there were OR or XOR disjunctions, # however now that Disjunctions are allowed to mix the state we # can no longer make that distinction in the name. # nm = '_xor' if xor else '_or' nm = '_xor' orCname = unique_component_name( transBlock, disjunction.getname( fully_qualified=True, name_buffer=NAME_BUFFER) + nm) transBlock.add_component(orCname, orC) disjunction._algebraic_constraint = weakref_ref(orC) return orC
def _apply_to(self, model, **kwds): for boolean_var in model.component_objects(ctype=BooleanVar, descend_into=(Block, Disjunct)): new_varlist = None for bool_vardata in boolean_var.values(): if new_varlist is None and bool_vardata.get_associated_binary( ) is None: new_var_list_name = unique_component_name( model, boolean_var.local_name + '_asbinary') new_varlist = VarList(domain=Binary) setattr(model, new_var_list_name, new_varlist) if bool_vardata.get_associated_binary() is None: new_binary_vardata = new_varlist.add() bool_vardata.associate_binary_var(new_binary_vardata) if bool_vardata.value is not None: new_binary_vardata.value = int(bool_vardata.value) if bool_vardata.fixed: new_binary_vardata.fix() # Process statements in global (entire model) context _process_logical_constraints_in_logical_context(model) # Process statements that appear in disjuncts for disjunct in model.component_data_objects(Disjunct, descend_into=(Block, Disjunct), active=True): _process_logical_constraints_in_logical_context(disjunct)
def _apply_to_impl(self, instance): self.variable_partitions = self._config.variable_partitions if \ self._config.variable_partitions is not \ None else {} self.partitioning_method = self._config.variable_partitioning_method # create a model to store the global constraints on that we will pass to # the compute_bounds_method, for if it wants them. We're making it a # separate model because we don't need it again global_constraints = ConcreteModel() for cons in instance.component_objects( Constraint, active=True, descend_into=Block, sort=SortComponents.deterministic): global_constraints.add_component( unique_component_name( global_constraints, cons.getname(fully_qualified=True, name_buffer=NAME_BUFFER)), Reference(cons)) for var in instance.component_objects( Var, descend_into=(Block, Disjunct), sort=SortComponents.deterministic): global_constraints.add_component( unique_component_name( global_constraints, var.getname(fully_qualified=True, name_buffer=NAME_BUFFER)), Reference(var)) self._global_constraints = global_constraints # we can support targets as usual. targets = self._config.targets knownBlocks = {} if targets is None: targets = (instance, ) # Disjunctions in targets will transform their Disjuncts which will in # turn transform all the GDP components declared on themselves. So we # only need to list root nodes of the GDP tree as targets, and # everything will be transformed (and in the correct order) targets = self._preprocess_targets(targets, instance, knownBlocks) for t in targets: if t.ctype is Disjunction: # After preprocessing, we know that this is not indexed. self._transform_disjunctionData(t, t.index()) else: # We know this is a DisjunctData after preprocessing self._transform_blockData(t)
def _add_relaxation_block(self, instance, name): # creates transformation block with a unique name based on name, adds it # to instance, and returns it. transBlockName = unique_component_name( instance, '_pyomo_gdp_cuttingplane_relaxation') transBlock = Block() instance.add_component(transBlockName, transBlock) return transBlockName, transBlock
def _transform_block_components(self, block, disjunct, bigM, arg_list, suffix_list): # We find any transformed disjunctions that might be here because we # need to move their transformation blocks up onto the parent block # before we transform anything else on this block. Note that we do this # before we create references to local variables because we do not want # duplicate references to indicator variables and local variables on # nested disjuncts. disjunctBlock = disjunct._transformation_block() destinationBlock = disjunctBlock.parent_block() for obj in block.component_data_objects( Disjunction, sort=SortComponents.deterministic, descend_into=(Block)): if obj.algebraic_constraint is None: # This could be bad if it's active since that means its # untransformed, but we'll wait to yell until the next loop continue # get this disjunction's relaxation block. transBlock = obj.algebraic_constraint().parent_block() # move transBlock up to parent component self._transfer_transBlock_data(transBlock, destinationBlock) # we leave the transformation block because it still has the XOR # constraints, which we want to be on the parent disjunct. # Find all the variables declared here (including the indicator_var) and # add a reference on the transformation block so these will be # accessible when the Disjunct is deactivated. We don't descend into # Disjuncts because we just moved the references to their local # variables up in the previous loop. varRefBlock = disjunctBlock.localVarReferences for v in block.component_objects(Var, descend_into=Block, active=None): varRefBlock.add_component( unique_component_name( varRefBlock, v.getname(fully_qualified=True, name_buffer=NAME_BUFFER)), Reference(v)) # Now look through the component map of block and transform everything # we have a handler for. Yell if we don't know how to handle it. (Note # that because we only iterate through active components, this means # non-ActiveComponent types cannot have handlers.) for obj in block.component_objects(active=True, descend_into=False): handler = self.handlers.get(obj.ctype, None) if not handler: if handler is None: raise GDP_Error( "No BigM transformation handler registered " "for modeling components of type %s. If your " "disjuncts contain non-GDP Pyomo components that " "require transformation, please transform them first." % obj.ctype) continue # obj is what we are transforming, we pass disjunct # through so that we will have access to the indicator # variables down the line. handler(obj, disjunct, bigM, arg_list, suffix_list)
def _replace_parameters_in_constraints(self, variableSubMap): instance = self.model_instance block = self.block # Visitor that we will use to replace user-provided parameters # in the objective and the constraints. param_replacer = ExpressionReplacementVisitor( substitute=variableSubMap, remove_named_expressions=True, ) # TODO: Flag to ExpressionReplacementVisitor to only replace # named expressions if a node has been replaced within that # expression. new_old_comp_map = ComponentMap() # clone Objective, add to Block, and update any Expressions for obj in list(instance.component_data_objects(Objective, active=True, descend_into=True)): tempName = unique_component_name(block, obj.local_name) new_expr = param_replacer.dfs_postorder_stack(obj.expr) block.add_component(tempName, Objective(expr=new_expr)) new_old_comp_map[block.component(tempName)] = obj obj.deactivate() # clone Constraints, add to Block, and update any Expressions # # Unfortunate that this deactivates and replaces constraints # even if they don't contain the parameters. # old_con_list = list(instance.component_data_objects(Constraint, active=True, descend_into=True)) last_idx = 0 for con in old_con_list: if (con.equality or con.lower is None or con.upper is None): new_expr = param_replacer.dfs_postorder_stack(con.expr) block.constList.add(expr=new_expr) last_idx += 1 new_old_comp_map[block.constList[last_idx]] = con else: # Constraint must be a ranged inequality, break into # separate constraints new_body = param_replacer.dfs_postorder_stack(con.body) new_lower = param_replacer.dfs_postorder_stack(con.lower) new_upper = param_replacer.dfs_postorder_stack(con.upper) # Add constraint for lower bound block.constList.add(expr=(new_lower <= new_body)) last_idx += 1 new_old_comp_map[block.constList[last_idx]] = con # Add constraint for upper bound block.constList.add(expr=(new_body <= new_upper)) last_idx += 1 new_old_comp_map[block.constList[last_idx]] = con con.deactivate() return new_old_comp_map
def _transform_block_components(self, block, disjunct, var_substitute_map, zero_substitute_map): # As opposed to bigm, in hull the only special thing we need to do for # nested Disjunctions is to make sure that we move up local var # references and also references to the disaggregated variables so that # all will be accessible after we transform this Disjunct. The indicator # variables and disaggregated variables of the inner disjunction will # need to be disaggregated again, but the transformed constraints will # not be. But this way nothing will get double-bigm-ed. (If an # untransformed disjunction is lurking here, we will catch it below). disjunctBlock = disjunct._transformation_block() destinationBlock = disjunctBlock.parent_block() for obj in block.component_data_objects( Disjunction, sort=SortComponents.deterministic, descend_into=(Block)): if obj.algebraic_constraint is None: # This could be bad if it's active since that means its # untransformed, but we'll wait to yell until the next loop continue # get this disjunction's relaxation block. transBlock = obj.algebraic_constraint().parent_block() self._transfer_var_references(transBlock, destinationBlock) # add references to all local variables on block (including the # indicator_var). Note that we do this after we have moved up the # transformation blocks for nested disjunctions, so that we don't have # duplicate references. varRefBlock = disjunctBlock.localVarReferences for v in block.component_objects(Var, descend_into=Block, active=None): varRefBlock.add_component( unique_component_name( varRefBlock, v.getname(fully_qualified=True, name_buffer=NAME_BUFFER)), Reference(v)) # Look through the component map of block and transform everything we # have a handler for. Yell if we don't know how to handle it. (Note that # because we only iterate through active components, this means # non-ActiveComponent types cannot have handlers.) for obj in block.component_objects(active=True, descend_into=False): handler = self.handlers.get(obj.ctype, None) if not handler: if handler is None: raise GDP_Error( "No hull transformation handler registered " "for modeling components of type %s. If your " "disjuncts contain non-GDP Pyomo components that " "require transformation, please transform them first." % obj.ctype) continue # obj is what we are transforming, we pass disjunct # through so that we will have access to the indicator # variables down the line. handler(obj, disjunct, var_substitute_map, zero_substitute_map)
def _create_transformation_block(self, context): new_xfrm_block_name = unique_component_name(context, 'logic_to_linear') new_xfrm_block = Block(doc="Transformation objects for logic_to_linear") setattr(context, new_xfrm_block_name, new_xfrm_block) new_xfrm_block.transformed_constraints = ConstraintList() new_xfrm_block.augmented_vars = BooleanVarList() new_xfrm_block.augmented_vars_asbinary = VarList( domain=Binary) return new_xfrm_block
def _add_blocks(self, instance): # iterate over component_objects so we can make indexed blocks for arc in instance.component_objects(**obj_iter_kwds): blk = Block(arc.index_set()) bname = unique_component_name( arc.parent_block(), "%s_expanded" % arc.local_name) arc.parent_block().add_component(bname, blk) arc._expanded_block = blk if arc.is_indexed(): for i in arc: arc[i]._expanded_block = blk[i]
def _add_blocks(self, instance): # iterate over component_objects so we can make indexed blocks for arc in instance.component_objects(**obj_iter_kwds): blk = Block(arc.index_set()) bname = unique_component_name(arc.parent_block(), "%s_expanded" % arc.local_name) arc.parent_block().add_component(bname, blk) arc._expanded_block = blk if arc.is_indexed(): for i in arc: arc[i]._expanded_block = blk[i]
def _add_transformation_block(self, instance): # make a transformation block on instance to put transformed disjuncts # on transBlockName = unique_component_name(instance, '_pyomo_gdp_bigm_relaxation') transBlock = Block() instance.add_component(transBlockName, transBlock) transBlock.relaxedDisjuncts = Block(NonNegativeIntegers) transBlock.lbub = Set(initialize=['lb', 'ub']) return transBlock
def compute_optimal_bounds(expr, global_constraints, opt): """ Returns a tuple (LB, UB) where LB and UB are the results of minimizing and maximizing expr over the variable bounds and the constraints on the global_constraints block. Note that if expr is nonlinear, even if one of the min and max problems is convex, the other won't be! Arguments: ---------- expr : The subexpression whose bounds we will return global_constraints : A Block which contains the global Constraints and Vars of the original model opt : A configured SolverFactory to use to minimize and maximize expr over the set defined by global_constraints. Note that if expr is nonlinear, opt will need to be capable of optimizing nonconvex problems. """ if opt is None: raise GDP_Error("No solver was specified to optimize the " "subproblems for computing expression bounds! " "Please specify a configured solver in the " "'compute_bounds_solver' argument if using " "'compute_optimal_bounds.'") # add temporary objective and calculate bounds obj = Objective(expr=expr) global_constraints.add_component( unique_component_name(global_constraints, "tmp_obj"), obj) # Solve first minimizing, to get a lower bound results = opt.solve(global_constraints) if verify_successful_solve(results) is not NORMAL: logger.warning("Problem to find lower bound for expression %s" "did not solve normally.\n\n%s" % (expr, results)) LB = None else: # This has some risks, if you're using a solver the gives a lower bound, # getting that would be better. But this is why this is a callback. LB = value(obj.expr) # Now solve maximizing, to get an upper bound obj.sense = maximize results = opt.solve(global_constraints) if verify_successful_solve(results) is not NORMAL: logger.warning("Problem to find upper bound for expression %s" "did not solve normally.\n\n%s" % (expr, results)) UB = None else: UB = value(obj.expr) # clean up global_constraints.del_component(obj) del obj return (LB, UB)
def set_value(self, expr): for e in expr: # The user gave us a proper Disjunct block # [ESJ 06/21/2019] This is really an issue with the reclassifier, # but in the case where you are iteratively adding to an # IndexedDisjunct indexed by Any which has already been transformed, # the new Disjuncts are Blocks already. This catches them for who # they are anyway. if isinstance(e, _DisjunctData): #if hasattr(e, 'type') and e.ctype == Disjunct: self.disjuncts.append(e) continue # The user was lazy and gave us a single constraint # expression or an iterable of expressions expressions = [] if hasattr(e, '__iter__'): e_iter = e else: e_iter = [e] for _tmpe in e_iter: try: isexpr = _tmpe.is_expression_type() except AttributeError: isexpr = False if not isexpr or not _tmpe.is_relational(): msg = "\n\tin %s" % (type(e), ) if e_iter is e else "" raise ValueError( "Unexpected term for Disjunction %s.\n" "\tExpected a Disjunct object, relational expression, " "or iterable of\n" "\trelational expressions but got %s%s" % (self.name, type(_tmpe), msg)) else: expressions.append(_tmpe) comp = self.parent_component() if comp._autodisjuncts is None: b = self.parent_block() comp._autodisjuncts = Disjunct(Any) b.add_component( unique_component_name(b, comp.local_name + "_disjuncts"), comp._autodisjuncts) # TODO: I am not at all sure why we need to # explicitly construct this block - that should # happen automatically. comp._autodisjuncts.construct() disjunct = comp._autodisjuncts[len(comp._autodisjuncts)] disjunct.constraint = c = ConstraintList() for e in expressions: c.add(e) self.disjuncts.append(disjunct)
def _process_bilinear_constraints(block, v1, v2, var_values, bilinear_constrs): # TODO check that the appropriate variable bounds exist. if not (v2.has_lb() and v2.has_ub()): logger.warning( textwrap.dedent("""\ Attempting to transform bilinear term {v1} * {v2} using effectively discrete variable {v1}, but {v2} is missing a lower or upper bound: ({v2lb}, {v2ub}). """.format(v1=v1, v2=v2, v2lb=v2.lb, v2ub=v2.ub)).strip()) return False blk = Block() unique_name = unique_component_name( block, ("%s_%s_bilinear" % (v1.local_name, v2.local_name)).replace( '[', '').replace(']', '')) block._induced_linearity_info.add_component(unique_name, blk) # TODO think about not using floats as indices in a set blk.valid_values = Set(initialize=sorted(var_values)) blk.x_active = Var(blk.valid_values, domain=Binary, initialize=1) blk.v_increment = Var(blk.valid_values, domain=v2.domain, bounds=(v2.lb, v2.ub), initialize=v2.value) blk.v_defn = Constraint(expr=v2 == summation(blk.v_increment)) @blk.Constraint(blk.valid_values) def v_lb(blk, val): return v2.lb * blk.x_active[val] <= blk.v_increment[val] @blk.Constraint(blk.valid_values) def v_ub(blk, val): return blk.v_increment[val] <= v2.ub * blk.x_active[val] blk.select_one_value = Constraint(expr=summation(blk.x_active) == 1) # Categorize as case 1 or case 2 for bilinear_constr in bilinear_constrs: # repn = generate_standard_repn(bilinear_constr.body) # Case 1: no other variables besides bilinear term in constraint. v1 # (effectively discrete variable) is positive. # if (len(repn.quadratic_vars) == 1 and len(repn.linear_vars) == 0 # and repn.nonlinear_expr is None): # _reformulate_case_1(v1, v2, discrete_constr, bilinear_constr) # NOTE: Case 1 is left unimplemented for now, because it involves some # messier logic with respect to how the transformation needs to happen. # Case 2: this is everything else, but do we want to have a special # case if there are nonlinear expressions involved with the constraint? pass _reformulate_case_2(blk, v1, v2, bilinear_constr) pass
def _create_using(self, model, **kwds): # Put all the kwds onto the model so that when we clone the # model any references to things on the model are correctly # updated to point to the new instance. Note that users & # transformation developers cannot rely on things happening by # argument side effect. name = unique_component_name(model, '_kwds') setattr(model, name, kwds) instance = model.clone() kwds = getattr(instance, name) delattr(model, name) delattr(instance, name) self._apply_to(instance, **kwds) return instance
def _add_dual_suffix(self, rHull): # rHull is our model and we aren't giving it back (unless in the future # we we add a callback to do basic steps to it...), so we just check if # dual is there. If it's a Suffix, we'll borrow it. If it's something # else we'll rename it and add the Sufix. dual = rHull.component("dual") if dual is None: rHull.dual = Suffix(direction=Suffix.IMPORT) else: if dual.ctype is Suffix: return rHull.del_component(dual) rHull.dual = Suffix(direction=Suffix.IMPORT) rHull.add_component(unique_component_name(rHull, "dual"), dual)
def transform_to_standard_form(model): """ Recast all model inequality constraints of the form `a <= g(v)` (`<= b`) to the 'standard' form `a - g(v) <= 0` (and `g(v) - b <= 0`), in which `v` denotes all model variables and `a` and `b` are contingent on model parameters. Parameters ---------- model : ConcreteModel The model to search for constraints. This will descend into all active Blocks and sub-Blocks as well. Note ---- If `a` and `b` are identical and the constraint is not classified as an equality (i.e. the `equality` attribute of the constraint object is `False`), then the constraint is recast to the equality `g(v) == a`. """ # Note: because we will be adding / modifying the number of # constraints, we want to resolve the generator to a list before # starting. cons = list(model.component_data_objects( Constraint, descend_into=True, active=True)) for con in cons: if not con.equality: has_lb = con.lower is not None has_ub = con.upper is not None if has_lb and has_ub: if con.lower is con.upper: # recast as equality Constraint con.set_value(con.lower == con.body) else: # range inequality; split into two Constraints. uniq_name = unique_component_name(model, con.name + '_lb') model.add_component( uniq_name, Constraint(expr=con.lower - con.body <= 0) ) con.set_value(con.body - con.upper <= 0) elif has_lb: # not in standard form; recast. con.set_value(con.lower - con.body <= 0) elif has_ub: # move upper bound to body. con.set_value(con.body - con.upper <= 0) else: # unbounded constraint: deactivate con.deactivate()
def _getXorConstraint(self, disjunction): # Put the disjunction constraint on its parent block and # determine whether it is an OR or XOR constraint. # We never do this for just a DisjunctionData because we need # to know about the index set of its parent component. So if # we called this on a DisjunctionData, we did something wrong. assert isinstance(disjunction, Disjunction) parent = disjunction.parent_block() if hasattr(parent, "_gdp_transformation_info"): infodict = parent._gdp_transformation_info if type(infodict) is not dict: raise GDP_Error( "Component %s contains an attribute named " "_gdp_transformation_info. The transformation requires " "that it can create this attribute!" % parent.name) try: # On the off-chance that another GDP transformation went # first, the infodict may exist, but the specific map we # want will not be present orConstraintMap = infodict['disjunction_or_constraint'] except KeyError: orConstraintMap = infodict['disjunction_or_constraint'] \ = ComponentMap() else: infodict = parent._gdp_transformation_info = {} orConstraintMap = infodict['disjunction_or_constraint'] \ = ComponentMap() # If the Constraint already exists, return it if disjunction in orConstraintMap: return orConstraintMap[disjunction] # add the XOR (or OR) constraints to parent block (with unique name) # It's indexed if this is an IndexedDisjunction, not otherwise orC = Constraint(disjunction.index_set()) if \ disjunction.is_indexed() else Constraint() # The name used to indicate if thee were OR or XOR disjunctions, # however now that Disjunctions ae allowed to mix the state we # can no longer make that distinction in the name. # nm = '_xor' if xor else '_or' nm = '_xor' orCname = unique_component_name( parent, '_gdp_bigm_relaxation_' + disjunction.getname( fully_qualified=True, name_buffer=NAME_BUFFER) + nm) parent.add_component(orCname, orC) orConstraintMap[disjunction] = orC return orC
def _process_bilinear_constraints(block, v1, v2, var_values, bilinear_constrs): # TODO check that the appropriate variable bounds exist. if not (v2.has_lb() and v2.has_ub()): logger.warning(textwrap.dedent("""\ Attempting to transform bilinear term {v1} * {v2} using effectively discrete variable {v1}, but {v2} is missing a lower or upper bound: ({v2lb}, {v2ub}). """.format(v1=v1, v2=v2, v2lb=v2.lb, v2ub=v2.ub)).strip()) return False blk = Block() unique_name = unique_component_name( block, ("%s_%s_bilinear" % (v1.local_name, v2.local_name)) .replace('[', '').replace(']', '')) block._induced_linearity_info.add_component(unique_name, blk) # TODO think about not using floats as indices in a set blk.valid_values = Set(initialize=var_values) blk.x_active = Var(blk.valid_values, domain=Binary, initialize=1) blk.v_increment = Var( blk.valid_values, domain=v2.domain, bounds=(v2.lb, v2.ub), initialize=v2.value) blk.v_defn = Constraint(expr=v2 == summation(blk.v_increment)) @blk.Constraint(blk.valid_values) def v_lb(blk, val): return v2.lb * blk.x_active[val] <= blk.v_increment[val] @blk.Constraint(blk.valid_values) def v_ub(blk, val): return blk.v_increment[val] <= v2.ub * blk.x_active[val] blk.select_one_value = Constraint(expr=summation(blk.x_active) == 1) # Categorize as case 1 or case 2 for bilinear_constr in bilinear_constrs: # repn = generate_standard_repn(bilinear_constr.body) # Case 1: no other variables besides bilinear term in constraint. v1 # (effectively discrete variable) is positive. # if (len(repn.quadratic_vars) == 1 and len(repn.linear_vars) == 0 # and repn.nonlinear_expr is None): # _reformulate_case_1(v1, v2, discrete_constr, bilinear_constr) # NOTE: Case 1 is left unimplemented for now, because it involves some # messier logic with respect to how the transformation needs to happen. # Case 2: this is everything else, but do we want to have a special # case if there are nonlinear expressions involved with the constraint? pass _reformulate_case_2(blk, v1, v2, bilinear_constr) pass
def _get_transformation_block(self, block): if self._transformation_blocks.get(block) is not None: return self._transformation_blocks[block] # create a transformation block on which we will create the reformulated # GDP... self._transformation_blocks[block] = transformation_block = Block() block.add_component( unique_component_name( block, '_pyomo_gdp_partition_disjuncts_reformulation'), transformation_block) transformation_block.indicator_var_equalities = LogicalConstraint( NonNegativeIntegers) return transformation_block
def _getXorConstraint(self, disjunction): # Put the disjunction constraint on its parent block and # determine whether it is an OR or XOR constraint. # We never do this for just a DisjunctionData because we need # to know about the index set of its parent component. So if # we called this on a DisjunctionData, we did something wrong. assert isinstance(disjunction, Disjunction) parent = disjunction.parent_block() if hasattr(parent, "_gdp_transformation_info"): infodict = parent._gdp_transformation_info if type(infodict) is not dict: raise GDP_Error( "Component %s contains an attribute named " "_gdp_transformation_info. The transformation requires " "that it can create this attribute!" % parent.name) try: # On the off-chance that another GDP transformation went # first, the infodict may exist, but the specific map we # want will not be present orConstraintMap = infodict['disjunction_or_constraint'] except KeyError: orConstraintMap = infodict['disjunction_or_constraint'] \ = ComponentMap() else: infodict = parent._gdp_transformation_info = {} orConstraintMap = infodict['disjunction_or_constraint'] \ = ComponentMap() # If the Constraint already exists, return it if disjunction in orConstraintMap: return orConstraintMap[disjunction] # add the XOR (or OR) constraints to parent block (with unique name) # It's indexed if this is an IndexedDisjunction, not otherwise orC = Constraint(disjunction.index_set()) if \ disjunction.is_indexed() else Constraint() # The name used to indicate if thee were OR or XOR disjunctions, # however now that Disjunctions ae allowed to mix the state we # can no longer make that distinction in the name. # nm = '_xor' if xor else '_or' nm = '_xor' orCname = unique_component_name(parent, '_gdp_bigm_relaxation_' + disjunction.name + nm) parent.add_component(orCname, orC) orConstraintMap[disjunction] = orC return orC
def replace_uncertain_bounds_with_constraints(model, uncertain_params): """ For variables of which the bounds are dependent on the parameters in the list `uncertain_params`, remove the bounds and add explicit variable bound inequality constraints. :param model: Model in which to make the bounds/constraint replacements :type model: class:`pyomo.core.base.PyomoModel.ConcreteModel` :param uncertain_params: List of uncertain model parameters :type uncertain_params: list """ uncertain_param_set = ComponentSet(uncertain_params) # component for explicit inequality constraints uncertain_var_bound_constrs = ConstraintList() model.add_component(unique_component_name(model, 'uncertain_var_bound_cons'), uncertain_var_bound_constrs) # get all variables in active objective and constraint expression(s) vars_in_cons = ComponentSet(get_vars_from_component(model, Constraint)) vars_in_obj = ComponentSet(get_vars_from_component(model, Objective)) for v in vars_in_cons | vars_in_obj: # get mutable parameters in variable bounds expressions ub = v.upper mutable_params_ub = ComponentSet(identify_mutable_parameters(ub)) lb = v.lower mutable_params_lb = ComponentSet(identify_mutable_parameters(lb)) # add explicit inequality constraint(s), remove variable bound(s) if mutable_params_ub & uncertain_param_set: if type(ub) is NPV_MinExpression: upper_bounds = ub.args else: upper_bounds = (ub,) for u_bnd in upper_bounds: uncertain_var_bound_constrs.add(v - u_bnd <= 0) v.setub(None) if mutable_params_lb & uncertain_param_set: if type(ub) is NPV_MaxExpression: lower_bounds = lb.args else: lower_bounds = (lb,) for l_bnd in lower_bounds: uncertain_var_bound_constrs.add(l_bnd - v <= 0) v.setlb(None)
def __call__(self): deprecation_warning( "Relying on core.logical_to_linear to transform " "BooleanVars that do not appear in LogicalConstraints " "is deprecated. Please associate your own binaries if " "you have BooleanVars not used in logical expressions.", version='6.2') parent_block = self._boolvar().parent_block() new_var = Var(domain=Binary) parent_block.add_component( unique_component_name(parent_block, self._boolvar().local_name + "_asbinary"), new_var) self._boolvar()._associated_binary = None self._boolvar().associate_binary_var(new_var) return new_var
def set_value(self, expr): for e in expr: # The user gave us a proper Disjunct block if hasattr(e, 'type') and e.type() == Disjunct: self.disjuncts.append(e) continue # The user was lazy and gave us a single constraint # expression or an iterable of expressions expressions = [] if hasattr(e, '__iter__'): e_iter = e else: e_iter = [e] for _tmpe in e_iter: try: isexpr = _tmpe.is_expression_type() except AttributeError: isexpr = False if not isexpr or not _tmpe.is_relational(): msg = "\n\tin %s" % (type(e),) if e_iter is e else "" raise ValueError( "Unexpected term for Disjunction %s.\n" "\tExpected a Disjunct object, relational expression, " "or iterable of\n" "\trelational expressions but got %s%s" % (self.name, type(_tmpe), msg) ) else: expressions.append(_tmpe) comp = self.parent_component() if comp._autodisjuncts is None: b = self.parent_block() comp._autodisjuncts = Disjunct(Any) b.add_component( unique_component_name(b, comp.local_name + "_disjuncts"), comp._autodisjuncts ) # TODO: I am not at all sure why we need to # explicitly construct this block - that should # happen automatically. comp._autodisjuncts.construct() disjunct = comp._autodisjuncts[len(comp._autodisjuncts)] disjunct.constraint = c = ConstraintList() for e in expressions: c.add(e) self.disjuncts.append(disjunct)
def set_value(self, expr): for e in expr: # The user gave us a proper Disjunct block if hasattr(e, 'type') and e.type() == Disjunct: self.disjuncts.append(e) continue # The user was lazy and gave us a single constraint # expression or an iterable of expressions expressions = [] if hasattr(e, '__iter__'): e_iter = e else: e_iter = [e] for _tmpe in e_iter: try: isexpr = _tmpe.is_expression_type() except AttributeError: isexpr = False if not isexpr or not _tmpe.is_relational(): msg = "\n\tin %s" % (type(e), ) if e_iter is e else "" raise ValueError( "Unexpected term for Disjunction %s.\n" "\tExpected a Disjunct object, relational expression, " "or iterable of\n" "\trelational expressions but got %s%s" % (self.name, type(_tmpe), msg)) else: expressions.append(_tmpe) comp = self.parent_component() if comp._autodisjuncts is None: b = self.parent_block() comp._autodisjuncts = Disjunct(Any) b.add_component( unique_component_name(b, comp.local_name + "_disjuncts"), comp._autodisjuncts) # TODO: I am not at all sure why we need to # explicitly construct this block - that should # happen automatically. comp._autodisjuncts.construct() disjunct = comp._autodisjuncts[len(comp._autodisjuncts)] disjunct.constraint = c = ConstraintList() for e in expressions: c.add(e) self.disjuncts.append(disjunct)
def _add_transformation_block(self, instance): # make a transformation block on instance where we will store # transformed components transBlockName = unique_component_name( instance, '_pyomo_gdp_hull_reformulation') transBlock = Block() instance.add_component(transBlockName, transBlock) transBlock.relaxedDisjuncts = Block(NonNegativeIntegers) transBlock.lbub = Set(initialize=['lb', 'ub', 'eq']) # We will store all of the disaggregation constraints for any # Disjunctions we transform onto this block here. transBlock.disaggregationConstraints = Constraint( NonNegativeIntegers, Any) # This will map from srcVar to a map of srcDisjunction to the # disaggregation constraint corresponding to srcDisjunction transBlock._disaggregationConstraintMap = ComponentMap() return transBlock
def prune_possible_values(block_scope, possible_values, config): # Prune the set of possible values by solving a series of feasibility # problems top_level_scope = block_scope.model() tmp_name = unique_component_name(top_level_scope, '_induced_linearity_prune_data') tmp_orig_blk = Block() setattr(top_level_scope, tmp_name, tmp_orig_blk) tmp_orig_blk._possible_values = possible_values tmp_orig_blk._possible_value_vars = list(v for v in possible_values) tmp_orig_blk._tmp_block_scope = (block_scope, ) model = top_level_scope.clone() tmp_clone_blk = getattr(model, tmp_name) for obj in model.component_data_objects(Objective, active=True): obj.deactivate() for constr in model.component_data_objects(Constraint, active=True, descend_into=(Block, Disjunct)): if constr.body.polynomial_degree() not in (1, 0): constr.deactivate() if block_scope.ctype == Disjunct: disj = tmp_clone_blk._tmp_block_scope[0] disj.indicator_var.fix(1) TransformationFactory('gdp.bigm').apply_to(model) tmp_clone_blk.test_feasible = Constraint() tmp_clone_blk._obj = Objective(expr=1) for eff_discr_var, vals in tmp_clone_blk._possible_values.items(): val_feasible = {} for val in vals: tmp_clone_blk.test_feasible.set_value(eff_discr_var == val) with SuppressConstantObjectiveWarning(): res = SolverFactory(config.pruning_solver).solve(model) if res.solver.termination_condition is tc.infeasible: val_feasible[val] = False tmp_clone_blk._possible_values[eff_discr_var] = set( v for v in tmp_clone_blk._possible_values[eff_discr_var] if val_feasible.get(v, True)) for i, var in enumerate(tmp_orig_blk._possible_value_vars): possible_values[var] = tmp_clone_blk._possible_values[ tmp_clone_blk._possible_value_vars[i]] return possible_values
def rename_components(model, component_list, prefix): """ Rename components in component_list using the prefix AND unique_component_name Parameters ---------- model : Pyomo model (or Block) The variables, constraints and objective will be renamed on this model component_list : list List of components to rename prefix : str The prefix to use when building the new names Examples -------- >>> c_list = list(model.component_objects(ctype=Var, descend_into=True)) >>> rename_components(model, component_list=c_list, prefix='special_') Returns ------- ComponentMap : maps the renamed Component objects to a string that provides their old fully qualified names ToDo ---- - need to add a check to see if someone accidentally passes a generator since this can lead to an infinite loop """ name_map = ComponentMap() for c in component_list: # get the parent block - we will use this to ensure new names are # unique and to add the new "scaled" components parent = c.parent_block() old_name = c.name new_name = unique_component_name(parent, prefix + c.local_name) parent.del_component(c) parent.add_component(new_name, c) name_map[c] = old_name return name_map
def prune_possible_values(block_scope, possible_values, config): # Prune the set of possible values by solving a series of feasibility # problems top_level_scope = block_scope.model() tmp_name = unique_component_name( top_level_scope, '_induced_linearity_prune_data') tmp_orig_blk = Block() setattr(top_level_scope, tmp_name, tmp_orig_blk) tmp_orig_blk._possible_values = possible_values tmp_orig_blk._possible_value_vars = list(v for v in possible_values) tmp_orig_blk._tmp_block_scope = (block_scope,) model = top_level_scope.clone() tmp_clone_blk = getattr(model, tmp_name) for obj in model.component_data_objects(Objective, active=True): obj.deactivate() for constr in model.component_data_objects( Constraint, active=True, descend_into=(Block, Disjunct)): if constr.body.polynomial_degree() not in (1, 0): constr.deactivate() if block_scope.type() == Disjunct: disj = tmp_clone_blk._tmp_block_scope[0] disj.indicator_var.fix(1) TransformationFactory('gdp.bigm').apply_to(model) tmp_clone_blk.test_feasible = Constraint() tmp_clone_blk._obj = Objective(expr=1) for eff_discr_var, vals in tmp_clone_blk._possible_values.items(): val_feasible = {} for val in vals: tmp_clone_blk.test_feasible.set_value(eff_discr_var == val) with SuppressConstantObjectiveWarning(): res = SolverFactory(config.pruning_solver).solve(model) if res.solver.termination_condition is tc.infeasible: val_feasible[val] = False tmp_clone_blk._possible_values[eff_discr_var] = set( v for v in tmp_clone_blk._possible_values[eff_discr_var] if val_feasible.get(v, True)) for i, var in enumerate(tmp_orig_blk._possible_value_vars): possible_values[var] = tmp_clone_blk._possible_values[ tmp_clone_blk._possible_value_vars[i]] return possible_values
def add_local_external_functions(block): ef_exprs = [] for comp in block.component_data_objects( (Constraint, Expression), active=True ): ef_exprs.extend(identify_external_functions(comp.expr)) unique_functions = [] fcn_set = set() for expr in ef_exprs: fcn = expr._fcn data = (fcn._library, fcn._function) if data not in fcn_set: fcn_set.add(data) unique_functions.append(data) fcn_comp_map = {} for lib, name in unique_functions: comp_name = unique_component_name(block, "_" + name) comp = ExternalFunction(library=lib, function=name) block.add_component(comp_name, comp) fcn_comp_map[lib, name] = comp return fcn_comp_map
def transformForTrustRegion(self,model,eflist): # transform and model into suitable form for TRF method # # Arguments: # model : pyomo model containing ExternalFunctions # eflist : a list of the external functions that will be # handled with TRF method rather than calls to compiled code efSet = set([id(x) for x in eflist]) TRF = Block() # Get all varibles seenVar = Set() allVariables = [] for var in model.component_data_objects(Var): if id(var) not in seenVar: seenVar.add(id(var)) allVariables.append(var) # This assumes that an external funtion call is present, required! model.add_component(unique_component_name(model,'tR'), TRF) TRF.y = VarList() TRF.x = VarList() TRF.conset = ConstraintList() TRF.external_fcns = [] TRF.exfn_xvars = [] # TODO: Copy constraints onto block so that transformation can be reversed. for con in model.component_data_objects(Constraint,active=True): con.set_value((con.lower, self.substituteEF(con.body,TRF,efSet), con.upper)) for obj in model.component_data_objects(Objective,active=True): obj.set_value(self.substituteEF(obj.expr,TRF,efSet)) ## Assume only one ative objective function here self.objective=obj if self.objective.sense == maximize: self.objective.expr = -1* self.objective.expr self.objective.sense = minimize # xvars and zvars are lists of x and z varibles as in the paper TRF.xvars = [] TRF.zvars = [] seenVar = Set() for varss in TRF.exfn_xvars: for var in varss: if id(var) not in seenVar: seenVar.add(id(var)) TRF.xvars.append(var) for var in allVariables: if id(var) not in seenVar: seenVar.add(id(var)) TRF.zvars.append(var) # TODO: build dict for exfn_xvars # assume it is not bottleneck of the code self.exfn_xvars_ind = [] for varss in TRF.exfn_xvars: listtmp = [] for var in varss: for i in range(len(TRF.xvars)): if(id(var)==id(TRF.xvars[i])): listtmp.append(i) break self.exfn_xvars_ind.append(listtmp) return TRF
def _Split(port, name, index_set, include_splitfrac=False, write_var_sum=True): port_parent = port.parent_block() var = port.vars[name] out_vars = [] no_splitfrac = False dests = port.dests(active=True) if not len(dests): return out_vars if len(dests) == 1: # No need for splitting on one outlet. # Make sure they do not try to fix splitfrac not at 1. splitfracspec = port.get_split_fraction(dests[0]) if splitfracspec is not None: if splitfracspec[0] != 1 and splitfracspec[1] == True: raise ValueError( "Cannot fix splitfrac not at 1 for port '%s' with a " "single dest '%s'" % (port.name, dests[0].name)) no_splitfrac = True if len(dests[0].destination.sources(active=True)) == 1: # This is a 1-to-1 connection, no need for evar, just equality. arc = dests[0] Port._add_equality_constraint(arc, name, index_set) return out_vars for arc in dests: eblock = arc.expanded_block # Make and record new variables for every arc with this member. evar = Port._create_evar(port.vars[name], name, eblock, index_set) out_vars.append(evar) if no_splitfrac: continue # Create and potentially initialize split fraction variables. # This function will be called for every Extensive member of this # port, but we only need one splitfrac variable per arc, so check # if it already exists before making a new one. However, we do not # need a splitfrac if there is only one Extensive data object, # so first check whether or not we need it. if eblock.component("splitfrac") is None: if not include_splitfrac: num_data_objs = 0 for k, v in iteritems(port.vars): if port.is_extensive(k): if v.is_indexed(): num_data_objs += len(v) else: num_data_objs += 1 if num_data_objs > 1: break if num_data_objs <= 1: # Do not make splitfrac, do not make split constraints. # Make sure they didn't specify splitfracs. # This inner loop will only run once. for arc in dests: if port.get_split_fraction(arc) is not None: raise ValueError( "Cannot specify splitfracs for port '%s' " "(found arc '%s') because this port only " "has one variable. To have control over " "splitfracs, please pass the " " include_splitfrac=True argument." % (port.name, arc.name)) no_splitfrac = True continue eblock.splitfrac = Var() splitfracspec = port.get_split_fraction(arc) if splitfracspec is not None: eblock.splitfrac = splitfracspec[0] if splitfracspec[1]: eblock.splitfrac.fix() # Create constraint for this member using splitfrac. cname = "%s_split" % name def rule(m, *args): if len(args): return evar[args] == eblock.splitfrac * var[args] else: return evar == eblock.splitfrac * var con = Constraint(index_set, rule=rule) eblock.add_component(cname, con) if write_var_sum: # Create var total sum constraint: var == sum of evars # Need to alphanum port name in case it is indexed. cname = unique_component_name(port_parent, "%s_%s_outsum" % (alphanum_label_from_name(port.local_name), name)) def rule(m, *args): if len(args): return sum(evar[args] for evar in out_vars) == var[args] else: return sum(evar for evar in out_vars) == var con = Constraint(index_set, rule=rule) port_parent.add_component(cname, con) else: # OR create constraint on splitfrac vars: sum == 1 if no_splitfrac: raise ValueError( "Cannot choose to write split fraction sum constraint for " "ports with a single destination or a single Extensive " "variable.\nSplit fractions are skipped in this case to " "simplify the model.\nPlease use write_var_sum=True on " "this port (the default).") cname = unique_component_name(port_parent, "%s_frac_sum" % alphanum_label_from_name(port.local_name)) con = Constraint(expr= sum(a.expanded_block.splitfrac for a in dests) == 1) port_parent.add_component(cname, con) return out_vars
def _apply_to(self, instance, **kwds): targets = kwds.pop('targets', None) if kwds: logger.warning("Unrecognized keyword arguments in add slack " "variable transformation:\n%s" % ( '\n'.join(iterkeys(kwds)), )) if targets is None: constraintDatas = instance.component_data_objects( Constraint, descend_into=True) else: constraintDatas = [] for cuid in targets: cons = cuid.find_component(instance) if cons.is_indexed(): for i in cons: constraintDatas.append(cons[i]) else: constraintDatas.append(cons) # deactivate the objective for o in instance.component_data_objects(Objective): o.deactivate() # create block where we can add slack variables safely xblockname = unique_component_name(instance, "_core_add_slack_variables") instance.add_component(xblockname, Block()) xblock = instance.component(xblockname) obj_expr = 0 for cons in constraintDatas: if (cons.lower is not None and cons.upper is not None) and \ value(cons.lower) > value(cons.upper): # this is a structural infeasibility so slacks aren't going to # help: raise RuntimeError("Lower bound exceeds upper bound in " "constraint %s" % cons.name) if not cons.active: continue if cons.lower is not None: # we add positive slack variable to body: # declare positive slack varName = "_slack_plus_" + cons.name posSlack = Var(within=NonNegativeReals) xblock.add_component(varName, posSlack) # add positive slack to body expression cons._body += posSlack # penalize slack in objective obj_expr += posSlack if cons.upper is not None: # we subtract a positive slack variable from the body: # declare slack varName = "_slack_minus_" + cons.name negSlack = Var(within=NonNegativeReals) xblock.add_component(varName, negSlack) # add negative slack to body expression cons._body -= negSlack # add slack to objective obj_expr += negSlack # make a new objective that minimizes sum of slack variables xblock._slack_objective = Objective(expr=obj_expr)
def _apply_to(self, instance, **kwds): config = self.CONFIG(kwds.pop('options', {})) # We will let args override suffixes and estimate as a last # resort. More specific args/suffixes override ones anywhere in # the tree. Suffixes lower down in the tree override ones higher # up. if 'default_bigM' in kwds: logger.warn("DEPRECATED: the 'default_bigM=' argument has been " "replaced by 'bigM='") config.bigM = kwds.pop('default_bigM') config.set_value(kwds) bigM = config.bigM # make a transformation block to put transformed disjuncts on transBlockName = unique_component_name( instance, '_pyomo_gdp_bigm_relaxation') transBlock = Block() instance.add_component(transBlockName, transBlock) transBlock.relaxedDisjuncts = Block(Any) transBlock.lbub = Set(initialize=['lb', 'ub']) # this is a dictionary for keeping track of IndexedDisjuncts # and IndexedDisjunctions so that, at the end of the # transformation, we can check that the ones with no active # DisjstuffDatas are deactivated. transBlock.disjContainers = ComponentSet() targets = config.targets if targets is None: targets = (instance, ) _HACK_transform_whole_instance = True else: _HACK_transform_whole_instance = False for _t in targets: t = _t.find_component(instance) if t is None: raise GDP_Error( "Target %s is not a component on the instance!" % _t) if t.type() is Disjunction: if t.parent_component() is t: self._transformDisjunction(t, transBlock, bigM) else: self._transformDisjunctionData( t, transBlock, bigM, t.index()) elif t.type() in (Block, Disjunct): if t.parent_component() is t: self._transformBlock(t, transBlock, bigM) else: self._transformBlockData(t, transBlock, bigM) else: raise GDP_Error( "Target %s was not a Block, Disjunct, or Disjunction. " "It was of type %s and can't be transformed." % (t.name, type(t))) # Go through our dictionary of indexed things and deactivate # the containers that don't have any active guys inside of # them. So the invalid component logic will tell us if we # missed something getting transformed. for obj in transBlock.disjContainers: if not obj.active: continue for i in obj: if obj[i].active: break else: # HACK due to active flag implementation. # # Ideally we would not have to do any of this (an # ActiveIndexedComponent would get its active status by # querring the active status of all the contained Data # objects). As a fallback, we would like to call: # # obj._deactivate_without_fixing_indicator() # # However, the sreaightforward implementation of that # method would have unintended side effects (fixing the # contained _DisjunctData's indicator_vars!) due to our # class hierarchy. Instead, we will directly call the # relevant base class (safe-ish since we are verifying # that all the contained _DisjunctionData are # deactivated directly above). ActiveComponent.deactivate(obj) # HACK for backwards compatibility with the older GDP transformations # # Until the writers are updated to find variables on things # other than active blocks, we need to reclassify the Disjuncts # as Blocks after transformation so that the writer will pick up # all the variables that it needs (in this case, indicator_vars). if _HACK_transform_whole_instance: HACK_GDP_Disjunct_Reclassifier().apply_to(instance)
def solve(self, model, **kwds): config = self.CONFIG(kwds.pop('options', {})) config.set_value(kwds) # Validate model to be used with gdpbb self.validate_model(model) # Set solver as an MINLP solver = SolverFactory(config.solver) solve_data = GDPbbSolveData() solve_data.timing = Container() solve_data.original_model = model solve_data.results = SolverResults() old_logger_level = config.logger.getEffectiveLevel() with time_code(solve_data.timing, 'total'), \ restore_logger_level(config.logger), \ create_utility_block(model, 'GDPbb_utils', solve_data): if config.tee and old_logger_level > logging.INFO: # If the logger does not already include INFO, include it. config.logger.setLevel(logging.INFO) config.logger.info( "Starting GDPbb version %s using %s as subsolver" % (".".join(map(str, self.version())), config.solver) ) # Setup results solve_data.results.solver.name = 'GDPbb - %s' % (str(config.solver)) setup_results_object(solve_data, config) # Initialize list containing indicator vars for reupdating model after solving indicator_list_name = unique_component_name(model, "_indicator_list") indicator_vars = [] for disjunction in model.component_data_objects( ctype=Disjunction, active=True): for disjunct in disjunction.disjuncts: indicator_vars.append(disjunct.indicator_var) setattr(model, indicator_list_name, indicator_vars) # get objective sense objectives = model.component_data_objects(Objective, active=True) obj = next(objectives, None) obj_sign = 1 if obj.sense == minimize else -1 solve_data.results.problem.sense = obj.sense # clone original model for root node of branch and bound root = model.clone() # set up lists to keep track of which disjunctions have been covered. # this list keeps track of the original disjunctions that were active and are soon to be inactive root.GDPbb_utils.unenforced_disjunctions = list( disjunction for disjunction in root.GDPbb_utils.disjunction_list if disjunction.active ) # this list keeps track of the disjunctions that have been activated by the branch and bound root.GDPbb_utils.curr_active_disjunctions = [] # deactivate all disjunctions in the model # self.indicate(root) for djn in root.GDPbb_utils.unenforced_disjunctions: djn.deactivate() # Deactivate all disjuncts in model. To be reactivated when disjunction # is reactivated. for disj in root.component_data_objects(Disjunct, active=True): disj._deactivate_without_fixing_indicator() # Satisfiability check would go here # solve the root node config.logger.info("Solving the root node.") obj_value, result, _ = self.subproblem_solve(root, solver, config) # initialize minheap for Branch and Bound algorithm # Heap structure: (ordering tuple, model) # Ordering tuple: (objective value, disjunctions_left, -counter) # - select solutions with lower objective value, # then fewer disjunctions left to explore (depth first), # then more recently encountered (tiebreaker) heap = [] counter = 0 disjunctions_left = len(root.GDPbb_utils.unenforced_disjunctions) heapq.heappush(heap, ((obj_sign * obj_value, disjunctions_left, -counter), root, result, root.GDPbb_utils.variable_list)) # loop to branch through the tree while len(heap) > 0: # pop best model off of heap sort_tup, mdl, mdl_results, vars = heapq.heappop(heap) old_obj_val, disjunctions_left, _ = sort_tup config.logger.info("Exploring node with LB %.10g and %s inactive disjunctions." % ( old_obj_val, disjunctions_left )) # if all the originally active disjunctions are active, solve and # return solution if disjunctions_left == 0: config.logger.info("Model solved.") # Model is solved. Copy over solution values. for orig_var, soln_var in zip(model.GDPbb_utils.variable_list, vars): orig_var.value = soln_var.value solve_data.results.problem.lower_bound = mdl_results.problem.lower_bound solve_data.results.problem.upper_bound = mdl_results.problem.upper_bound solve_data.results.solver.timing = solve_data.timing solve_data.results.solver.termination_condition = mdl_results.solver.termination_condition return solve_data.results next_disjunction = mdl.GDPbb_utils.unenforced_disjunctions.pop(0) config.logger.info("Activating disjunction %s" % next_disjunction.name) next_disjunction.activate() mdl.GDPbb_utils.curr_active_disjunctions.append(next_disjunction) djn_left = len(mdl.GDPbb_utils.unenforced_disjunctions) for disj in next_disjunction.disjuncts: disj._activate_without_unfixing_indicator() if not disj.indicator_var.fixed: disj.indicator_var = 0 # initially set all indicator vars to zero added_disj_counter = 0 for disj in next_disjunction.disjuncts: if not disj.indicator_var.fixed: disj.indicator_var = 1 mnew = mdl.clone() if not disj.indicator_var.fixed: disj.indicator_var = 0 # Check feasibility if config.check_sat and satisfiable(mnew, config.logger) is False: # problem is not satisfiable. Skip this disjunct. continue obj_value, result, vars = self.subproblem_solve(mnew, solver, config) counter += 1 ordering_tuple = (obj_sign * obj_value, djn_left, -counter) heapq.heappush(heap, (ordering_tuple, mnew, result, vars)) added_disj_counter = added_disj_counter + 1 config.logger.info("Added %s new nodes with %s relaxed disjunctions to the heap. Size now %s." % ( added_disj_counter, djn_left, len(heap)))
def sipopt(instance,paramSubList,perturbList,cloneModel=True, streamSoln=False, keepfiles=False): """ This function accepts a Pyomo ConcreteModel, a list of parameters, along with their corresponding perterbation list. The model is then converted into the design structure required to call sipopt to get an approximation perturbed solution with updated bounds on the decision variable. Arguments: instance : ConcreteModel: Expectation No Exceptions pyomo model object paramSubList : Param list of mutable parameters Exception : "paramSubList argument is expecting a List of Params" perturbList : Param list of perturbed parameter values Exception : "perturbList argument is expecting a List of Params" length(paramSubList) must equal length(perturbList) Exception : "paramSubList will not map to perturbList" cloneModel : boolean : default=True indicator to clone the model -if set to False, the original model will be altered streamSoln : boolean : default=False indicator to stream IPOPT solution keepfiles : boolean : default=False indicator to print intermediate file names Returns: m : ConcreteModel converted model for sipopt m.sol_state_1 : Suffix approximated results at perturbation m.sol_state_1_z_L : Suffix updated lower bound m.sol_state_1_z_U : Suffix updated upper bound """ #Verify User Inputs if len(paramSubList)!=len(perturbList): raise ValueError("Length of paramSubList argument does not equal " "length of perturbList") for pp in paramSubList: if pp.type() is not Param: raise ValueError("paramSubList argument is expecting a list of Params") for pp in paramSubList: if not pp._mutable: raise ValueError("parameters within paramSubList must be mutable") for pp in perturbList: if pp.type() is not Param: raise ValueError("perturbList argument is expecting a list of Params") #Add model block to compartmentalize all sipopt data b=Block() block_name = unique_component_name(instance, '_sipopt_data') instance.add_component(block_name, b) #Based on user input clone model or use orignal model for anlaysis if cloneModel: b.tmp_lists = (paramSubList, perturbList) m = instance.clone() instance.del_component(block_name) b = getattr(m, block_name) paramSubList, perturbList = b.tmp_lists del b.tmp_lists else: m = instance #Generate component maps for associating Variables to perturbations varSubList = [] for parameter in paramSubList: tempName = unique_component_name(b,parameter.local_name) b.add_component(tempName,Var(parameter.index_set())) myVar = b.component(tempName) varSubList.append(myVar) #Note: substitutions are not currently compatible with # ComponentMap [ECSA 2018/11/23], this relates to Issue #755 paramCompMap = ComponentMap(zip(paramSubList, varSubList)) variableSubMap = {} #variableSubMap = ComponentMap() paramPerturbMap = ComponentMap(zip(paramSubList,perturbList)) perturbSubMap = {} #perturbSubMap = ComponentMap() paramDataList = [] for parameter in paramSubList: # Loop over each ParamData in the Param Component # # Note: Sets are unordered in Pyomo. For this to be # deterministic, we need to sort the index (otherwise, the # ordering of things in the paramDataList may change). We use # sorted_robust to guard against mixed-type Sets in Python 3.x for kk in sorted_robust(parameter): variableSubMap[id(parameter[kk])]=paramCompMap[parameter][kk] perturbSubMap[id(parameter[kk])]=paramPerturbMap[parameter][kk] paramDataList.append(parameter[kk]) #clone Objective, add to Block, and update any Expressions for cc in list(m.component_data_objects(Objective, active=True, descend_into=True)): tempName=unique_component_name(m,cc.local_name) b.add_component(tempName, Objective(expr=ExpressionReplacementVisitor( substitute=variableSubMap, remove_named_expressions=True).dfs_postorder_stack(cc.expr))) cc.deactivate() #clone Constraints, add to Block, and update any Expressions b.constList = ConstraintList() for cc in list(m.component_data_objects(Constraint, active=True, descend_into=True)): if cc.equality: b.constList.add(expr= ExpressionReplacementVisitor( substitute=variableSubMap, remove_named_expressions=True).dfs_postorder_stack(cc.expr)) else: try: b.constList.add(expr=ExpresssionReplacementVisitor( substitute=variableSubMap, remove_named_expressions=True).dfs_postorder_stack(cc.expr)) except: # Params in either the upper or lower bounds of a ranged # inequaltiy will result in an invalid expression (you cannot # have variables in the bounds of a constraint). If we hit that # problem, we will break up the ranged inequality into separate # constraints # Note that the test for lower / upper == None is probably not # necessary, as the only way we should get here (especially if # we are more careful about the exception that we catch) is if # this is a ranged inequality and we are attempting to insert a # variable into either the lower or upper bound. if cc.lower is not None: b.constList.add(expr=ExpressionReplacementVisitor( substitute=variableSubMap, remove_named_expressions=True).dfs_postorder_stack( cc.lower) <= ExpressionReplacementVisitor( substitute=variableSubMap, remove_named_expressions= True).dfs_postorder_stack(cc.body) ) #if cc.lower is not None: # b.constList.add(expr=0<=ExpressionReplacementVisitor( # substitute=variableSubMap, # remove_named_expressions=True).dfs_postorder_stack( # cc.lower) - ExpressionReplacementVisitor( # substitute=variableSubMap, # remove_named_expressions= # True).dfs_postorder_stack(cc.body) # ) if cc.upper is not None: b.constList.add(expr=ExpressionReplacementVisitor( substitute=variableSubMap, remove_named_expressions=True).dfs_postorder_stack( cc.upper) >= ExpressionReplacementVisitor( substitute=variableSubMap, remove_named_expressions= True).dfs_postorder_stack(cc.body) ) cc.deactivate() #paramData to varData constraint list b.paramConst = ConstraintList() for ii in paramDataList: jj=variableSubMap[id(ii)] b.paramConst.add(ii==jj) #Create the ipopt_sens (aka sIPOPT) solver plugin using the ASL interface opt = SolverFactory('ipopt_sens', solver_io='nl') if not opt.available(False): raise ImportError('ipopt_sens is not available') #Declare Suffixes m.sens_state_0 = Suffix(direction=Suffix.EXPORT) m.sens_state_1 = Suffix(direction=Suffix.EXPORT) m.sens_state_value_1 = Suffix(direction=Suffix.EXPORT) m.sens_init_constr = Suffix(direction=Suffix.EXPORT) m.sens_sol_state_1 = Suffix(direction=Suffix.IMPORT) m.sens_sol_state_1_z_L = Suffix(direction=Suffix.IMPORT) m.sens_sol_state_1_z_U = Suffix(direction=Suffix.IMPORT) #set sIPOPT data opt.options['run_sens'] = 'yes' # for reasons that are not entirely clear, # ipopt_sens requires the indices to start at 1 kk=1 for ii in paramDataList: m.sens_state_0[variableSubMap[id(ii)]] = kk m.sens_state_1[variableSubMap[id(ii)]] = kk m.sens_state_value_1[variableSubMap[id(ii)]] = \ value(perturbSubMap[id(ii)]) m.sens_init_constr[b.paramConst[kk]] = kk kk += 1 #Send the model to the ipopt_sens and collect the solution results = opt.solve(m, keepfiles=keepfiles, tee=streamSoln) return m
def _transform_disjunct(self, obj, transBlock, varSet, localVars): if hasattr(obj, "_gdp_transformation_info"): infodict = obj._gdp_transformation_info # If the user has something with our name that is not a dict, we # scream. If they have a dict with this name then we are just going # to use it... if type(infodict) is not dict: raise GDP_Error( "Disjunct %s contains an attribute named " "_gdp_transformation_info. The transformation requires " "that it can create this attribute!" % obj.name) else: infodict = obj._gdp_transformation_info = {} # deactivated means either we've already transformed or user deactivated if not obj.active: if obj.indicator_var.is_fixed(): if value(obj.indicator_var) == 0: # The user cleanly deactivated the disjunct: there # is nothing for us to do here. return else: raise GDP_Error( "The disjunct %s is deactivated, but the " "indicator_var is fixed to %s. This makes no sense." % ( obj.name, value(obj.indicator_var) )) if not infodict.get('relaxed', False): raise GDP_Error( "The disjunct %s is deactivated, but the " "indicator_var is not fixed and the disjunct does not " "appear to have been relaxed. This makes no sense." % ( obj.name, )) if 'chull' in infodict: # we've transformed it (with CHull), so don't do it again. return # add reference to original disjunct to info dict on # transformation block relaxedDisjuncts = transBlock.relaxedDisjuncts relaxationBlock = relaxedDisjuncts[len(relaxedDisjuncts)] relaxationBlockInfo = relaxationBlock._gdp_transformation_info = { 'src': obj, 'srcVars': ComponentMap(), 'srcConstraints': ComponentMap(), 'boundConstraintToSrcVar': ComponentMap(), } infodict['chull'] = chull = { 'relaxationBlock': relaxationBlock, 'relaxedConstraints': ComponentMap(), 'disaggregatedVars': ComponentMap(), 'bigmConstraints': ComponentMap(), } # if this is a disjunctData from an indexed disjunct, we are # going to want to check at the end that the container is # deactivated if everything in it is. So we save it in our # dictionary of things to check if it isn't there already. disjParent = obj.parent_component() if disjParent.is_indexed() and \ disjParent not in transBlock.disjContainers: transBlock.disjContainers.add(disjParent) # add the disaggregated variables and their bigm constraints # to the relaxationBlock for var in varSet: lb = var.lb ub = var.ub if lb is None or ub is None: raise GDP_Error("Variables that appear in disjuncts must be " "bounded in order to use the chull " "transformation! Missing bound for %s." % (var.name)) disaggregatedVar = Var(within=Reals, bounds=(min(0, lb), max(0, ub)), initialize=var.value) # naming conflicts are possible here since this is a bunch # of variables from different blocks coming together, so we # get a unique name disaggregatedVarName = unique_component_name( relaxationBlock, var.local_name) relaxationBlock.add_component( disaggregatedVarName, disaggregatedVar) chull['disaggregatedVars'][var] = disaggregatedVar relaxationBlockInfo['srcVars'][disaggregatedVar] = var bigmConstraint = Constraint(transBlock.lbub) relaxationBlock.add_component( disaggregatedVarName + "_bounds", bigmConstraint) if lb: bigmConstraint.add( 'lb', obj.indicator_var*lb <= disaggregatedVar) if ub: bigmConstraint.add( 'ub', disaggregatedVar <= obj.indicator_var*ub) chull['bigmConstraints'][var] = bigmConstraint relaxationBlockInfo['boundConstraintToSrcVar'][bigmConstraint] = var for var in localVars: lb = var.lb ub = var.ub if lb is None or ub is None: raise GDP_Error("Variables that appear in disjuncts must be " "bounded in order to use the chull " "transformation! Missing bound for %s." % (var.name)) if value(lb) > 0: var.setlb(0) if value(ub) < 0: var.setub(0) # naming conflicts are possible here since this is a bunch # of variables from different blocks coming together, so we # get a unique name conName = unique_component_name( relaxationBlock, var.local_name+"_bounds") bigmConstraint = Constraint(transBlock.lbub) relaxationBlock.add_component(conName, bigmConstraint) bigmConstraint.add('lb', obj.indicator_var*lb <= var) bigmConstraint.add('ub', var <= obj.indicator_var*ub) chull['bigmConstraints'][var] = bigmConstraint relaxationBlockInfo['boundConstraintToSrcVar'][bigmConstraint] = var var_substitute_map = dict((id(v), newV) for v, newV in iteritems(chull['disaggregatedVars'])) zero_substitute_map = dict((id(v), ZeroConstant) for v, newV in iteritems(chull['disaggregatedVars'])) zero_substitute_map.update((id(v), ZeroConstant) for v in localVars) # Transform each component within this disjunct self._transform_block_components(obj, obj, infodict, var_substitute_map, zero_substitute_map) # deactivate disjunct so we know we've relaxed it obj._deactivate_without_fixing_indicator() infodict['relaxed'] = True
def _xform_constraint(self, obj, disjunct, infodict, var_substitute_map, zero_substitute_map): # we will put a new transformed constraint on the relaxation block. relaxationBlock = infodict['chull']['relaxationBlock'] transBlock = relaxationBlock.parent_block() varMap = infodict['chull']['disaggregatedVars'] # Though rare, it is possible to get naming conflicts here # since constraints from all blocks are getting moved onto the # same block. So we get a unique name name = unique_component_name(relaxationBlock, obj.name) if obj.is_indexed(): try: newConstraint = Constraint(obj.index_set(), transBlock.lbub) except: # The original constraint may have been indexed by a # non-concrete set (like an Any). We will give up on # strict index verification and just blindly proceed. newConstraint = Constraint(Any) else: newConstraint = Constraint(transBlock.lbub) relaxationBlock.add_component(name, newConstraint) # add mapping of original constraint to transformed constraint # in transformation info dictionary infodict['chull']['relaxedConstraints'][obj] = newConstraint # add mapping of transformed constraint back to original constraint (we # know that the info dict is already created because this only got # called if we were transforming a disjunct...) relaxationBlock._gdp_transformation_info['srcConstraints'][ newConstraint] = obj for i in sorted(iterkeys(obj)): c = obj[i] if not c.active: continue NL = c.body.polynomial_degree() not in (0,1) EPS = self._config.EPS mode = self._config.perspective_function # We need to evaluate the expression at the origin *before* # we substitute the expression variables with the # disaggregated variables if not NL or mode == "FurmanSawayaGrossmann": h_0 = clone_without_expression_components( c.body, substitute=zero_substitute_map) y = disjunct.indicator_var if NL: if mode == "LeeGrossmann": sub_expr = clone_without_expression_components( c.body, substitute=dict( (var, subs/y) for var, subs in iteritems(var_substitute_map) ) ) expr = sub_expr * y elif mode == "GrossmannLee": sub_expr = clone_without_expression_components( c.body, substitute=dict( (var, subs/(y + EPS)) for var, subs in iteritems(var_substitute_map) ) ) expr = (y + EPS) * sub_expr elif mode == "FurmanSawayaGrossmann": sub_expr = clone_without_expression_components( c.body, substitute=dict( (var, subs/((1 - EPS)*y + EPS)) for var, subs in iteritems(var_substitute_map) ) ) expr = ((1-EPS)*y + EPS)*sub_expr - EPS*h_0*(1-y) else: raise RuntimeError("Unknown NL CHull mode") else: expr = clone_without_expression_components( c.body, substitute=var_substitute_map) if c.equality: if NL: newConsExpr = expr == c.lower*y else: v = list(EXPR.identify_variables(expr)) if len(v) == 1 and not c.lower: # Setting a variable to 0 in a disjunct is # *very* common. We should recognize that in # that structure, the disaggregated variable # will also be fixed to 0. v[0].fix(0) continue newConsExpr = expr - (1-y)*h_0 == c.lower*y if obj.is_indexed(): newConstraint.add((i, 'eq'), newConsExpr) else: newConstraint.add('eq', newConsExpr) continue if c.lower is not None: # TODO: At the moment there is no reason for this to be in both # lower and upper... I think there could be though if I say what # the new constraint is going to be or something. if __debug__ and logger.isEnabledFor(logging.DEBUG): logger.debug("GDP(cHull): Transforming constraint " + "'%s'", c.name) if NL: newConsExpr = expr >= c.lower*y else: newConsExpr = expr - (1-y)*h_0 >= c.lower*y if obj.is_indexed(): newConstraint.add((i, 'lb'), newConsExpr) else: newConstraint.add('lb', newConsExpr) if c.upper is not None: if __debug__ and logger.isEnabledFor(logging.DEBUG): logger.debug("GDP(cHull): Transforming constraint " + "'%s'", c.name) if NL: newConsExpr = expr <= c.upper*y else: newConsExpr = expr - (1-y)*h_0 <= c.upper*y if obj.is_indexed(): newConstraint.add((i, 'ub'), newConsExpr) else: newConstraint.add('ub', newConsExpr)
def _apply_to(self, model, **kwds): """Apply the transformation to the given model.""" config = self.CONFIG(kwds.pop('options', {})) config.set_value(kwds) integer_vars = list( v for v in model.component_data_objects( ctype=Var, descend_into=(Block, Disjunct)) if v.is_integer() and not v.fixed) if len(integer_vars) == 0: logger.info("Model has no free integer variables. No reformulation needed.") return vars_on_constr = ComponentSet() for c in model.component_data_objects( ctype=Constraint, descend_into=(Block, Disjunct), active=True): vars_on_constr.update(v for v in identify_variables(c.body, include_fixed=False) if v.is_integer()) if config.ignore_unused: num_vars_not_on_constr = len(integer_vars) - len(vars_on_constr) if num_vars_not_on_constr > 0: logger.info( "%s integer variables on the model are not attached to any constraints. " "Ignoring unused variables." ) integer_vars = list(vars_on_constr) logger.info( "Reformulating integer variables using the %s strategy." % config.strategy) # Set up reformulation block blk_name = unique_component_name(model, "_int_to_binary_reform") reform_block = Block( doc="Holds variables and constraints for reformulating " "integer variables to binary variables." ) setattr(model, blk_name, reform_block) reform_block.int_var_set = RangeSet(0, len(integer_vars) - 1) reform_block.new_binary_var = Var( Any, domain=Binary, dense=False, doc="Binary variable with index (int_var_idx, idx)") reform_block.integer_to_binary_constraint = Constraint( reform_block.int_var_set, doc="Equality constraints mapping the binary variable values " "to the integer variable value.") # check that variables are bounded and non-negative for idx, int_var in enumerate(integer_vars): if not (int_var.has_lb() and int_var.has_ub()): raise ValueError( "Integer variable %s is missing an " "upper or lower bound. LB: %s; UB: %s. " "Integer to binary reformulation does not support unbounded integer variables." % (int_var.name, int_var.lb, int_var.ub)) if int_var.lb < 0: raise ValueError( "Integer variable %s can be negative. " "Integer to binary reformulation currently only supports non-negative integer " "variables." % (int_var.name,) ) # do the reformulation highest_power = int(floor(log(value(int_var.ub), 2))) # TODO potentially fragile due to floating point reform_block.integer_to_binary_constraint.add( idx, expr=int_var == sum( reform_block.new_binary_var[idx, pwr] * (2 ** pwr) for pwr in range(0, highest_power + 1))) # Relax the original integer variable int_var.domain = NonNegativeReals logger.info( "Reformulated %s integer variables using " "%s binary variables and %s constraints." % (len(integer_vars), len(reform_block.new_binary_var), len(reform_block.integer_to_binary_constraint)))
def _validate_and_expand_port_set(self, ports): ref = {} # First, go through the ports and get the superset of all fields for p in ports: for k, v in iteritems(p.vars): if k in ref: # We have already seen this var continue if v is None: # This is an implicit var continue # OK: New var, so add it to the reference list _len = ( -1 if not v.is_indexed() else len(v)) ref[k] = (v, _len, p, p.rule_for(k)) if not ref: logger.warning( "Cannot identify a reference port: no ports " "in the port set have assigned variables:\n\t(%s)" % ', '.join(sorted(p.name for p in itervalues(ports)))) return ref # Now make sure that ports match empty_or_partial = [] for p in ports: p_is_partial = False if not p.vars: # This is an empty port and should be defined with # "auto" vars empty_or_partial.append(p) continue for k, v in iteritems(ref): if k not in p.vars: raise ValueError( "Port mismatch: Port '%s' missing variable " "'%s' (appearing in reference port '%s')" % (p.name, k, v[2].name)) _v = p.vars[k] if _v is None: if not p_is_partial: empty_or_partial.append(p) p_is_partial = True continue _len = ( -1 if not _v.is_indexed() else len(_v)) if (_len >= 0) ^ (v[1] >= 0): raise ValueError( "Port mismatch: Port variable '%s' mixing " "indexed and non-indexed targets on ports '%s' " "and '%s'" % (k, v[2].name, p.name)) if _len >= 0 and _len != v[1]: raise ValueError( "Port mismatch: Port variable '%s' index " "mismatch (%s elements in reference port '%s', " "but %s elements in port '%s')" % (k, v[1], v[2].name, _len, p.name)) if v[1] >= 0 and len(v[0].index_set() ^ _v.index_set()): raise ValueError( "Port mismatch: Port variable '%s' has " "mismatched indices on ports '%s' and '%s'" % (k, v[2].name, p.name)) if p.rule_for(k) is not v[3]: raise ValueError( "Port mismatch: Port variable '%s' has " "different rules on ports '%s' and '%s'" % (k, v[2].name, p.name)) # as we are adding things to the model, sort by key so that # the order things are added is deterministic sorted_refs = sorted(iteritems(ref)) if len(empty_or_partial) > 1: # This is expensive (names aren't cheap), but does result in # a deterministic ordering empty_or_partial.sort(key=lambda x: x.getname( fully_qualified=True, name_buffer=self._name_buffer)) # Fill in any empty ports for p in empty_or_partial: block = p.parent_block() for k, v in sorted_refs: if k in p.vars and p.vars[k] is not None: continue vname = unique_component_name( block, '%s_auto_%s' % (p.getname( fully_qualified=True, name_buffer=self._name_buffer),k)) new_var = replicate_var(v[0], vname, block) # add this new variable to the port so that it has a rule p.add(new_var, k, rule=v[3]) return ref
def _xform_constraint(self, obj, disjunct, infodict, bigMargs, suffix_list): # add constraint to the transformation block, we'll transform it there. relaxationBlock = infodict['bigm']['relaxationBlock'] transBlock = relaxationBlock.parent_block() # Though rare, it is possible to get naming conflicts here # since constraints from all blocks are getting moved onto the # same block. So we get a unique name name = unique_component_name(relaxationBlock, obj.name) if obj.is_indexed(): try: newConstraint = Constraint(obj.index_set(), transBlock.lbub) except TypeError: # The original constraint may have been indexed by a # non-concrete set (like an Any). We will give up on # strict index verification and just blindly proceed. newConstraint = Constraint(Any) else: newConstraint = Constraint(transBlock.lbub) relaxationBlock.add_component(name, newConstraint) # add mapping of original constraint to transformed constraint # in transformation info dictionary infodict['bigm']['relaxedConstraints'][obj] = newConstraint # add mapping of transformed constraint back to original constraint (we # know that the info dict is already created because this only got # called if we were transforming a disjunct...) relaxationBlock._gdp_transformation_info['srcConstraints'][ newConstraint] = obj for i in sorted(iterkeys(obj)): c = obj[i] if not c.active: continue # first, we see if an M value was specified in the arguments. # (This returns None if not) M = self._get_M_from_args(c, bigMargs) if __debug__ and logger.isEnabledFor(logging.DEBUG): logger.debug("GDP(BigM): The value for M for constraint %s " "from the BigM argument is %s." % (obj.name, str(M))) # if we didn't get something from args, try suffixes: if M is None: M = self._get_M_from_suffixes(c, suffix_list) if __debug__ and logger.isEnabledFor(logging.DEBUG): logger.debug("GDP(BigM): The value for M for constraint %s " "after checking suffixes is %s." % (obj.name, str(M))) if not isinstance(M, (tuple, list)): if M is None: M = (None, None) else: try: M = (-M, M) except: logger.error("Error converting scalar M-value %s " "to (-M,M). Is %s not a numeric type?" % (M, type(M))) raise if len(M) != 2: raise GDP_Error("Big-M %s for constraint %s is not of " "length two. " "Expected either a single value or " "tuple or list of length two for M." % (str(M), name)) if c.lower is not None and M[0] is None: M = (self._estimate_M(c.body, name)[0] - c.lower, M[1]) if c.upper is not None and M[1] is None: M = (M[0], self._estimate_M(c.body, name)[1] - c.upper) if __debug__ and logger.isEnabledFor(logging.DEBUG): logger.debug("GDP(BigM): The value for M for constraint %s " "after estimating (if needed) is %s." % (obj.name, str(M))) # Handle indices for both SimpleConstraint and IndexedConstraint if i.__class__ is tuple: i_lb = i + ('lb',) i_ub = i + ('ub',) elif obj.is_indexed(): i_lb = (i, 'lb',) i_ub = (i, 'ub',) else: i_lb = 'lb' i_ub = 'ub' if c.lower is not None: if M[0] is None: raise GDP_Error("Cannot relax disjunctive constraint %s " "because M is not defined." % name) M_expr = M[0] * (1 - disjunct.indicator_var) newConstraint.add(i_lb, c.lower <= c. body - M_expr) if c.upper is not None: if M[1] is None: raise GDP_Error("Cannot relax disjunctive constraint %s " "because M is not defined." % name) M_expr = M[1] * (1 - disjunct.indicator_var) newConstraint.add(i_ub, c.body - M_expr <= c.upper)
def _getDisjunctionConstraints(self, disjunction): # Put the disjunction constraint on its parent block # We never do this for just a DisjunctionData because we need # to know about the index set of its parent component. So if # we called this on a DisjunctionData, we did something wrong. assert isinstance(disjunction, Disjunction) parent = disjunction.parent_block() if hasattr(parent, "_gdp_transformation_info"): infodict = parent._gdp_transformation_info if type(infodict) is not dict: raise GDP_Error( "Component %s contains an attribute named " "_gdp_transformation_info. The transformation requires " "that it can create this attribute!" % parent.name) try: # On the off-chance that another GDP transformation went # first, the infodict may exist, but the specific map we # want will not be present orConstraintMap = infodict['disjunction_or_constraint'] except KeyError: orConstraintMap = infodict['disjunction_or_constraint'] \ = ComponentMap() try: disaggregationConstraintMap = infodict[ 'disjunction_disaggregation_constraints'] except KeyError: disaggregationConstraintMap = infodict[ 'disjunction_disaggregation_constraints'] \ = ComponentMap() else: infodict = parent._gdp_transformation_info = {} orConstraintMap = infodict['disjunction_or_constraint'] \ = ComponentMap() disaggregationConstraintMap = infodict[ 'disjunction_disaggregation_constraints'] \ = ComponentMap() if disjunction in disaggregationConstraintMap: disaggregationConstraint = disaggregationConstraintMap[disjunction] else: # add the disaggregation constraint disaggregationConstraint \ = disaggregationConstraintMap[disjunction] = Constraint(Any) parent.add_component( unique_component_name(parent, '_gdp_chull_relaxation_' + \ disjunction.name + '_disaggregation'), disaggregationConstraint) # If the Constraint already exists, return it if disjunction in orConstraintMap: orC = orConstraintMap[disjunction] else: # add the XOR (or OR) constraints to parent block (with # unique name) It's indexed if this is an # IndexedDisjunction, not otherwise orC = Constraint(disjunction.index_set()) if \ disjunction.is_indexed() else Constraint() parent.add_component( unique_component_name(parent, '_gdp_chull_relaxation_' + disjunction.name + '_xor'), orC) orConstraintMap[disjunction] = orC return orC, disaggregationConstraint