def test_var_aggregate(self): """Test for transitivity in a variable equality set.""" m = self.build_model() TransformationFactory('contrib.aggregate_vars').apply_to(m) z_to_vars = m._var_aggregator_info.z_to_vars var_to_z = m._var_aggregator_info.var_to_z z = m._var_aggregator_info.z self.assertEqual(z_to_vars[z[1]], ComponentSet([m.v3, m.v4])) self.assertEqual(z_to_vars[z[2]], ComponentSet([m.x[1], m.x[2], m.x[3], m.x[4]])) self.assertEqual(z_to_vars[z[3]], ComponentSet([m.y[1], m.y[2]])) self.assertIs(var_to_z[m.v3], z[1]) self.assertIs(var_to_z[m.v4], z[1]) self.assertIs(var_to_z[m.x[1]], z[2]) self.assertIs(var_to_z[m.x[2]], z[2]) self.assertIs(var_to_z[m.x[3]], z[2]) self.assertIs(var_to_z[m.x[4]], z[2]) self.assertIs(var_to_z[m.y[1]], z[3]) self.assertIs(var_to_z[m.y[2]], z[3]) self.assertEqual(z[1].value, 2) self.assertEqual(z[1].lb, 2) self.assertEqual(z[1].ub, 4) self.assertEqual(z[3].value, 3.5)
def detect_unfixed_discrete_vars(model): """Detect unfixed discrete variables in use on the model.""" var_set = ComponentSet() for constr in model.component_data_objects(Constraint, active=True, descend_into=True): var_set.update( v for v in EXPR.identify_variables(constr.body, include_fixed=False) if v.is_binary()) return var_set
def visit(self, node, values): if node.__class__ is not EXPR.ExternalFunctionExpression: return node if id(node._fcn) not in self.efSet: return node # At this point we know this is an ExternalFunctionExpression # node that we want to replace with an auliliary variable (y) new_args = [] seen = ComponentSet() # TODO: support more than PythonCallbackFunctions assert isinstance(node._fcn, PythonCallbackFunction) # # Note: the first argument to PythonCallbackFunction is the # function ID. Since we are going to complain about constant # parameters, we need to skip the first argument when processing # the argument list. This is really not good: we should allow # for constant arguments to the functions, and we should relax # the restriction that the external functions implement the # PythonCallbackFunction API (that restriction leads unfortunate # things later; i.e., accessing the private _fcn attribute # below). for arg in list(values)[1:]: if type(arg) in nonpyomo_leaf_types or arg.is_fixed(): # We currently do not allow constants or parameters for # the external functions. raise RuntimeError( "TrustRegion does not support black boxes with " "constant or parameter inputs\n\tExpression: %s" % (node,) ) if arg.is_expression_type(): # All expressions (including simple linear expressions) # are replaced with a single auxiliary variable (and # eventually an additional constraint equating the # auxiliary variable to the original expression) _x = self.trf.x.add() _x.set_value( value(arg) ) self.trf.conset.add(_x == arg) new_args.append(_x) else: # The only thing left is bare variables: check for duplicates. if arg in seen: raise RuntimeError( "TrustRegion does not support black boxes with " "duplicate input arguments\n\tExpression: %s" % (node,) ) seen.add(arg) new_args.append(arg) _y = self.trf.y.add() self.trf.external_fcns.append(node) self.trf.exfn_xvars.append(new_args) return _y
def _process_activated_container(blk): """Process a container object, returning the new components found.""" new_fixed_true_disjuncts = ComponentSet( disj for disj in blk.component_data_objects(Disjunct, active=True) if disj.indicator_var.value == 1 and disj.indicator_var.fixed) new_activated_disjunctions = ComponentSet( blk.component_data_objects(Disjunction, active=True)) new_activated_disjuncts = ComponentSet( disj for disjtn in new_activated_disjunctions for disj in _activated_disjuncts_in_disjunction(disjtn)) new_activated_constraints = ComponentSet( blk.component_data_objects(Constraint, active=True)) return (new_activated_disjunctions, new_fixed_true_disjuncts, new_activated_disjuncts, new_activated_constraints)
def _build_equality_set(model): """Construct an equality set map. Maps all variables to the set of variables that are linked to them by equality. Mapping takes place using id(). That is, if you have x = y, then you would have id(x) -> ComponentSet([x, y]) and id(y) -> ComponentSet([x, y]) in the mapping. """ # Map of variables to their equality set (ComponentSet) eq_var_map = ComponentMap() # Loop through all the active constraints in the model for constraint in model.component_data_objects( ctype=Constraint, active=True, descend_into=True): eq_linked_vars = _get_equality_linked_variables(constraint) if not eq_linked_vars: continue # if we get an empty tuple, skip to next constraint. v1, v2 = eq_linked_vars set1 = eq_var_map.get(v1, ComponentSet((v1, v2))) set2 = eq_var_map.get(v2, (v2,)) # if set1 and set2 are equivalent, skip to next constraint. if set1 is set2: continue # add all elements of set2 to set 1 set1.update(set2) # Update all elements to point to set 1 for v in set1: eq_var_map[v] = set1 return eq_var_map
def add_integer_cut(var_values, solve_data, config, feasible=False): """Add an integer cut to the linear GDP model.""" m = solve_data.linear_GDP GDPopt = m.GDPopt_utils var_value_is_one = ComponentSet() var_value_is_zero = ComponentSet() for var, val in zip(GDPopt.working_var_list, var_values): if not var.is_binary(): continue if var.fixed: if val is not None and var.value != val: # val needs to be None or match var.value. Otherwise, we have a # contradiction. raise ValueError("Fixed variable %s has value %s != " "provided value of %s." % (var.name, var.value, val)) val = var.value # TODO we can also add a check to skip binary variables that are not an # indicator_var on disjuncts. if fabs(val - 1) <= config.integer_tolerance: var_value_is_one.add(var) elif fabs(val) <= config.integer_tolerance: var_value_is_zero.add(var) else: raise ValueError('Binary %s = %s is not 0 or 1' % (var.name, val)) if not (var_value_is_one or var_value_is_zero): # if no remaining binary variables, then terminate algorithm. config.logger.info( 'Adding integer cut to a model without binary variables. ' 'Model is now infeasible.') if solve_data.objective_sense == minimize: solve_data.LB = float('inf') else: solve_data.UB = float('-inf') return False int_cut = (sum(1 - v for v in var_value_is_one) + sum(v for v in var_value_is_zero)) >= 1 if not feasible: config.logger.info('Adding integer cut') GDPopt.integer_cuts.add(expr=int_cut) else: backtracking_enabled = ("disabled" if GDPopt.no_backtracking.active else "allowed") config.logger.info('Registering explored configuration. ' 'Backtracking is currently %s.' % backtracking_enabled) GDPopt.no_backtracking.add(expr=int_cut)
def test_equality_set(self): """Test for equality set map generation.""" m = self.build_model() eq_var_map = _build_equality_set(m) self.assertIsNone(eq_var_map.get(m.z1, None)) self.assertIsNone(eq_var_map.get(m.v1, None)) self.assertIsNone(eq_var_map.get(m.v2, None)) self.assertEqual(eq_var_map[m.v3], ComponentSet([m.v3, m.v4])) self.assertEqual(eq_var_map[m.v4], ComponentSet([m.v3, m.v4])) self.assertEqual(eq_var_map[m.x[1]], ComponentSet([m.x[1], m.x[2], m.x[3], m.x[4]])) self.assertEqual(eq_var_map[m.x[2]], ComponentSet([m.x[1], m.x[2], m.x[3], m.x[4]])) self.assertEqual(eq_var_map[m.x[3]], ComponentSet([m.x[1], m.x[2], m.x[3], m.x[4]])) self.assertEqual(eq_var_map[m.x[4]], ComponentSet([m.x[1], m.x[2], m.x[3], m.x[4]])) self.assertEqual(eq_var_map[m.y[1]], ComponentSet([m.y[1], m.y[2]])) self.assertEqual(eq_var_map[m.y[2]], ComponentSet([m.y[1], m.y[2]]))
def build_ordered_component_lists(model, prefix='working'): """Define lists used for future data transfer.""" GDPopt = model.GDPopt_utils var_set = ComponentSet() setattr( GDPopt, '%s_constraints_list' % prefix, list( model.component_data_objects(ctype=Constraint, active=True, descend_into=(Block, Disjunct)))) setattr( GDPopt, '%s_disjuncts_list' % prefix, list( model.component_data_objects(ctype=Disjunct, descend_into=(Block, Disjunct)))) setattr( GDPopt, '%s_disjunctions_list' % prefix, list( model.component_data_objects(ctype=Disjunction, active=True, descend_into=(Disjunct, Block)))) # Identify the non-fixed variables in (potentially) active constraints for constr in getattr(GDPopt, '%s_constraints_list' % prefix): for v in EXPR.identify_variables(constr.body, include_fixed=False): var_set.add(v) # Disjunct indicator variables might not appear in active constraints. In # fact, if we consider them Logical variables, they should not appear in # active algebraic constraints. For now, they need to be added to the # variable set. for disj in getattr(GDPopt, '%s_disjuncts_list' % prefix): var_set.add(disj.indicator_var) # We use component_data_objects rather than list(var_set) in order to # preserve a deterministic ordering. setattr( GDPopt, '%s_var_list' % prefix, list(v for v in model.component_data_objects( ctype=Var, descend_into=(Block, Disjunct)) if v in var_set)) setattr(GDPopt, '%s_nonlinear_constraints' % prefix, [ v for v in getattr(GDPopt, '%s_constraints_list' % prefix) if v.body.polynomial_degree() not in (0, 1) ])
def _transformDisjunctionData(self, obj, transBlock, index): # Convex hull doesn't work if this is an or constraint. So if # xor is false, give up if not obj.xor: raise GDP_Error( "Cannot do convex hull transformation for " "disjunction %s with or constraint. Must be an xor!" % obj.name) parent_component = obj.parent_component() transBlock.disjContainers.add(parent_component) orConstraint, disaggregationConstraint \ = self._getDisjunctionConstraints(parent_component) # We first go through and collect all the variables that we # are going to disaggregate. varOrder_set = ComponentSet() varOrder = [] varsByDisjunct = ComponentMap() for disjunct in obj.disjuncts: # This is crazy, but if the disjunct has been previously # relaxed, the disjunct *could* be deactivated. not_active = not disjunct.active if not_active: disjunct._activate_without_unfixing_indicator() try: disjunctVars = varsByDisjunct[disjunct] = ComponentSet() for cons in disjunct.component_data_objects( Constraint, active=True, sort=SortComponents.deterministic, descend_into=Block): # we aren't going to disaggregate fixed # variables. This means there is trouble if they are # unfixed later... for var in EXPR.identify_variables(cons.body, include_fixed=False): # Note the use of a list so that we will # eventually disaggregate the vars in a # deterministic order (the order that we found # them) disjunctVars.add(var) if var not in varOrder_set: varOrder.append(var) varOrder_set.add(var) finally: if not_active: disjunct._deactivate_without_fixing_indicator() # We will only disaggregate variables that # 1) appear in multiple disjuncts, or # 2) are not contained in this disjunct, or # 3) are not themselves disaggregated variables varSet = [] localVars = ComponentMap((d, []) for d in obj.disjuncts) for var in varOrder: disjuncts = [d for d in varsByDisjunct if var in varsByDisjunct[d]] if len(disjuncts) > 1: varSet.append(var) elif self._contained_in(var, disjuncts[0]): localVars[disjuncts[0]].append(var) elif self._contained_in(var, transBlock): # There is nothing to do here: these are already # disaggregated vars that can/will be forced to 0 when # their disjunct is not active. pass else: varSet.append(var) # Now that we know who we need to disaggregate, we will do it # while we also transform the disjuncts. or_expr = 0 for disjunct in obj.disjuncts: or_expr += disjunct.indicator_var self._transform_disjunct(disjunct, transBlock, varSet, localVars[disjunct]) orConstraint.add(index, (or_expr, 1)) for i, var in enumerate(varSet): disaggregatedExpr = 0 for disjunct in obj.disjuncts: if 'chull' not in disjunct._gdp_transformation_info: if not disjunct.indicator_var.is_fixed() \ or value(disjunct.indicator_var) != 0: raise RuntimeError( "GDP chull: disjunct was not relaxed, but " "does not appear to be correctly deactivated.") continue disaggregatedVar = disjunct._gdp_transformation_info['chull'][ 'disaggregatedVars'][var] disaggregatedExpr += disaggregatedVar if type(index) is tuple: consIdx = index + (i, ) elif parent_component.is_indexed(): consIdx = (index, ) + (i, ) else: consIdx = i disaggregationConstraint.add(consIdx, var == disaggregatedExpr)
def _apply_to(self, instance, **kwds): self._config = self.CONFIG(kwds.pop('options', {})) self._config.set_value(kwds) # make a transformation block transBlockName = unique_component_name(instance, '_pyomo_gdp_chull_relaxation') transBlock = Block() instance.add_component(transBlockName, transBlock) transBlock.relaxedDisjuncts = Block(Any) transBlock.lbub = Set(initialize=['lb', 'ub', 'eq']) transBlock.disjContainers = ComponentSet() targets = self._config.targets if targets is None: targets = (instance, ) _HACK_transform_whole_instance = True else: _HACK_transform_whole_instance = False for _t in targets: t = _t.find_component(instance) if t is None: raise GDP_Error( "Target %s is not a component on the instance!" % _t) if t.type() is Disjunction: if t.parent_component() is t: self._transformDisjunction(t, transBlock) else: self._transformDisjunctionData(t, transBlock, t.index()) elif t.type() in (Block, Disjunct): if t.parent_component() is t: self._transformBlock(t, transBlock) else: self._transformBlockData(t, transBlock) else: raise GDP_Error( "Target %s was not a Block, Disjunct, or Disjunction. " "It was of type %s and can't be transformed" % (t.name, type(t))) # Go through our dictionary of indexed things and deactivate # the containers that don't have any active guys inside of # them. So the invalid component logic will tell us if we # missed something getting transformed. for obj in transBlock.disjContainers: if not obj.active: continue for i in obj: if obj[i].active: break else: # HACK due to active flag implementation. # # Ideally we would not have to do any of this (an # ActiveIndexedComponent would get its active status by # querring the active status of all the contained Data # objects). As a fallback, we would like to call: # # obj._deactivate_without_fixing_indicator() # # However, the sreaightforward implementation of that # method would have unintended side effects (fixing the # contained _DisjunctData's indicator_vars!) due to our # class hierarchy. Instead, we will directly call the # relevant base class (safe-ish since we are verifying # that all the contained _DisjunctionData are # deactivated directly above). ActiveComponent.deactivate(obj) # HACK for backwards compatibility with the older GDP transformations # # Until the writers are updated to find variables on things # other than active blocks, we need to reclassify the Disjuncts # as Blocks after transformation so that the writer will pick up # all the variables that it needs (in this case, indicator_vars). if _HACK_transform_whole_instance: HACK_GDP_Disjunct_Reclassifier().apply_to(instance)
def _apply_to(self, model, detect_fixed_vars=True): """Apply the transformation to the given model.""" # Generate the equality sets eq_var_map = _build_equality_set(model) # Detect and process fixed variables. if detect_fixed_vars: _fix_equality_fixed_variables(model) # Generate aggregation infrastructure model._var_aggregator_info = Block( doc="Holds information for the variable aggregation " "transformation system.") z = model._var_aggregator_info.z = VarList(doc="Aggregated variables.") # Map of the aggregate var to the equalty set (ComponentSet) z_to_vars = model._var_aggregator_info.z_to_vars = ComponentMap() # Map of variables to their corresponding aggregate var var_to_z = model._var_aggregator_info.var_to_z = ComponentMap() processed_vars = ComponentSet() # TODO This iteritems is sorted by the variable name of the key in # order to preserve determinism. Unfortunately, var.name() is an # expensive operation right now. for var, eq_set in sorted(eq_var_map.items(), key=lambda tup: tup[0].name): if var in processed_vars: continue # Skip already-process variables # This would be weird. The variable hasn't been processed, but is # in the map. Raise an exception. assert var_to_z.get(var, None) is None z_agg = z.add() z_to_vars[z_agg] = eq_set var_to_z.update(ComponentMap((v, z_agg) for v in eq_set)) # Set the bounds of the aggregate variable based on the bounds of # the variables in its equality set. z_agg.setlb(max_if_not_None(v.lb for v in eq_set if v.has_lb())) z_agg.setub(min_if_not_None(v.ub for v in eq_set if v.has_ub())) # Set the fixed status of the aggregate var fixed_vars = [v for v in eq_set if v.fixed] if fixed_vars: # Check to make sure all the fixed values are the same. if any(var.value != fixed_vars[0].value for var in fixed_vars[1:]): raise ValueError( "Aggregate variable for equality set is fixed to " "multiple different values: %s" % (fixed_vars,)) z_agg.fix(fixed_vars[0].value) # Check that the fixed value lies within bounds. if z_agg.has_lb() and z_agg.value < value(z_agg.lb): raise ValueError( "Aggregate variable for equality set is fixed to " "a value less than its lower bound: %s < LB %s" % (z_agg.value, value(z_agg.lb)) ) if z_agg.has_ub() and z_agg.value > value(z_agg.ub): raise ValueError( "Aggregate variable for equality set is fixed to " "a value greater than its upper bound: %s > UB %s" % (z_agg.value, value(z_agg.ub)) ) else: # Set the value to be the average of the values within the # bounds only if the value is not already fixed. values_within_bounds = [ v.value for v in eq_set if ( v.value is not None and ((z_agg.has_lb() and v.value >= value(z_agg.lb)) or not z_agg.has_lb()) and ((z_agg.has_ub() and v.value <= value(z_agg.ub)) or not z_agg.has_ub()) )] num_vals = len(values_within_bounds) z_agg.value = ( sum(val for val in values_within_bounds) / num_vals) \ if num_vals > 0 else None processed_vars.update(eq_set) # Do the substitution substitution_map = {id(var): z_var for var, z_var in var_to_z.items()} for constr in model.component_data_objects( ctype=Constraint, active=True ): new_body = ExpressionReplacementVisitor( substitute=substitution_map ).dfs_postorder_stack(constr.body) constr.set_value((constr.lower, new_body, constr.upper)) for objective in model.component_data_objects( ctype=Objective, active=True ): new_expr = ExpressionReplacementVisitor( substitute=substitution_map ).dfs_postorder_stack(objective.expr) objective.set_value(new_expr)
def build_model_size_report(model): """Build a model size report object.""" report = ModelSizeReport() activated_disjunctions = ComponentSet() activated_disjuncts = ComponentSet() fixed_true_disjuncts = ComponentSet() activated_constraints = ComponentSet() activated_vars = ComponentSet() new_containers = (model, ) while new_containers: new_activated_disjunctions = ComponentSet() new_activated_disjuncts = ComponentSet() new_fixed_true_disjuncts = ComponentSet() new_activated_constraints = ComponentSet() for container in new_containers: (next_activated_disjunctions, next_fixed_true_disjuncts, next_activated_disjuncts, next_activated_constraints ) = _process_activated_container(container) new_activated_disjunctions.update(next_activated_disjunctions) new_activated_disjuncts.update(next_activated_disjuncts) new_fixed_true_disjuncts.update(next_fixed_true_disjuncts) new_activated_constraints.update(next_activated_constraints) new_containers = ((new_activated_disjuncts - activated_disjuncts) | (new_fixed_true_disjuncts - fixed_true_disjuncts)) activated_disjunctions.update(new_activated_disjunctions) activated_disjuncts.update(new_activated_disjuncts) fixed_true_disjuncts.update(new_fixed_true_disjuncts) activated_constraints.update(new_activated_constraints) activated_vars.update( var for constr in new_activated_constraints for var in EXPR.identify_variables(constr.body, include_fixed=False)) activated_vars.update(disj.indicator_var for disj in activated_disjuncts) report.activated = Container() report.activated.variables = len(activated_vars) report.activated.binary_variables = sum(1 for v in activated_vars if v.is_binary()) report.activated.integer_variables = sum(1 for v in activated_vars if v.is_integer()) report.activated.continuous_variables = sum(1 for v in activated_vars if v.is_continuous()) report.activated.disjunctions = len(activated_disjunctions) report.activated.disjuncts = len(activated_disjuncts) report.activated.constraints = len(activated_constraints) report.activated.nonlinear_constraints = sum( 1 for c in activated_constraints if c.body.polynomial_degree() not in (1, 0)) report.overall = Container() block_like = (Block, Disjunct) all_vars = ComponentSet( model.component_data_objects(Var, descend_into=block_like)) report.overall.variables = len(all_vars) report.overall.binary_variables = sum(1 for v in all_vars if v.is_binary()) report.overall.integer_variables = sum(1 for v in all_vars if v.is_integer()) report.overall.continuous_variables = sum(1 for v in all_vars if v.is_continuous()) report.overall.disjunctions = sum(1 for d in model.component_data_objects( Disjunction, descend_into=block_like)) report.overall.disjuncts = sum(1 for d in model.component_data_objects( Disjunct, descend_into=block_like)) report.overall.constraints = sum(1 for c in model.component_data_objects( Constraint, descend_into=block_like)) report.overall.nonlinear_constraints = sum( 1 for c in model.component_data_objects(Constraint, descend_into=block_like) if c.body.polynomial_degree() not in (1, 0)) report.warning = Container() report.warning.unassociated_disjuncts = sum( 1 for d in model.component_data_objects(Disjunct, descend_into=block_like) if not d.indicator_var.fixed and d not in activated_disjuncts) return report
def add_outer_approximation_cuts(var_values, duals, solve_data, config): """Add outer approximation cuts to the linear GDP model.""" m = solve_data.linear_GDP GDPopt = m.GDPopt_utils sign_adjust = -1 if GDPopt.objective.sense == minimize else 1 # copy values over for var, val in zip(GDPopt.working_var_list, var_values): if val is not None and not var.fixed: var.value = val # TODO some kind of special handling if the dual is phenomenally small? config.logger.debug('Adding OA cuts.') nonlinear_constraints = ComponentSet(GDPopt.working_nonlinear_constraints) counter = 0 for constr, dual_value in zip(GDPopt.working_constraints_list, duals): if dual_value is None or constr not in nonlinear_constraints: continue # Determine if the user pre-specified that OA cuts should not be # generated for the given constraint. parent_block = constr.parent_block() ignore_set = getattr(parent_block, 'GDPopt_ignore_OA', None) config.logger.debug('Ignore_set %s' % ignore_set) if (ignore_set and (constr in ignore_set or constr.parent_component() in ignore_set)): config.logger.debug( 'OA cut addition for %s skipped because it is in ' 'the ignore set.' % constr.name) continue config.logger.debug("Adding OA cut for %s with dual value %s" % (constr.name, dual_value)) # TODO make this more efficient by not having to use differentiate() # at each iteration. constr_vars = list(EXPR.identify_variables(constr.body)) jac_list = differentiate(constr.body, wrt_list=constr_vars) jacobians = ComponentMap(zip(constr_vars, jac_list)) # Create a block on which to put outer approximation cuts. oa_utils = parent_block.component('GDPopt_OA') if oa_utils is None: oa_utils = parent_block.GDPopt_OA = Block( doc="Block holding outer approximation cuts " "and associated data.") oa_utils.GDPopt_OA_cuts = ConstraintList() oa_utils.GDPopt_OA_slacks = VarList(bounds=(0, config.max_slack), domain=NonNegativeReals, initialize=0) oa_cuts = oa_utils.GDPopt_OA_cuts slack_var = oa_utils.GDPopt_OA_slacks.add() oa_cuts.add(expr=copysign(1, sign_adjust * dual_value) * (value(constr.body) + sum( value(jacobians[var]) * (var - value(var)) for var in constr_vars)) + slack_var <= 0) counter += 1 config.logger.info('Added %s OA cuts' % counter)
def _apply_to(self, instance, targets=None, **kwds): config = self.CONFIG().set_value(kwds.pop('options', {})) # For now, we're not accepting options. We will let args override # suffixes and estimate as a last resort. More specific args/suffixes # override ones anywhere in the tree. Suffixes lower down in the tree # override ones higher up. if 'default_bigM' in kwds: logger.warn("DEPRECATED: the 'default_bigM=' argument has been " "replaced by 'bigM='") config.bigM = kwds.pop('default_bigM') config.set_value(kwds) bigM = config.bigM # make a transformation block to put transformed disjuncts on transBlockName = unique_component_name(instance, '_pyomo_gdp_bigm_relaxation') transBlock = Block() instance.add_component(transBlockName, transBlock) transBlock.relaxedDisjuncts = Block(Any) transBlock.lbub = Set(initialize=['lb', 'ub']) # this is a dictionary for keeping track of IndexedDisjuncts # and IndexedDisjunctions so that, at the end of the # transformation, we can check that the ones with no active # DisjstuffDatas are deactivated. transBlock.disjContainers = ComponentSet() if targets is None: targets = (instance, ) _HACK_transform_whole_instance = True else: _HACK_transform_whole_instance = False for _t in targets: t = _t.find_component(instance) if t is None: raise GDP_Error( "Target %s is not a component on the instance!" % _t) if not t.active: continue if t.type() is Disjunction: if t.parent_component() is t: self._transformDisjunction(t, transBlock, bigM) else: self._transformDisjunctionData(t, transBlock, bigM, t.index()) elif t.type() in (Block, Disjunct): if t.parent_component() is t: self._transformBlock(t, transBlock, bigM) else: self._transformBlockData(t, transBlock, bigM) else: raise GDP_Error( "Target %s was not a Block, Disjunct, or Disjunction. " "It was of type %s and can't be transformed." % (t.name, type(t))) # Go through our dictionary of indexed things and deactivate # the containers that don't have any active guys inside of # them. So the invalid component logic will tell us if we # missed something getting transformed. for obj in transBlock.disjContainers: if not obj.active: continue for i in obj: if obj[i].active: break else: # HACK due to active flag implementation. # # Ideally we would not have to do any of this (an # ActiveIndexedComponent would get its active status by # querring the active status of all the contained Data # objects). As a fallback, we would like to call: # # obj._deactivate_without_fixing_indicator() # # However, the sreaightforward implementation of that # method would have unintended side effects (fixing the # contained _DisjunctData's indicator_vars!) due to our # class hierarchy. Instead, we will directly call the # relevant base class (safe-ish since we are verifying # that all the contained _DisjunctionData are # deactivated directly above). ActiveComponent.deactivate(obj) # HACK for backwards compatibility with the older GDP transformations # # Until the writers are updated to find variables on things # other than active blocks, we need to reclassify the Disjuncts # as Blocks after transformation so that the writer will pick up # all the variables that it needs (in this case, indicator_vars). if _HACK_transform_whole_instance: HACK_GDP_Disjunct_Reclassifier().apply_to(instance)