def get_active_hx(instance): active_hx = { stream_hx : [], cu_hx : [], hu_hx : [] } inactive_hx = { stream_hx : [], cu_hx : [], hu_hx : [] } for index in instance.z: if value(instance.z[index]) >= active_lb: active_hx[stream_hx].append(index) else: inactive_hx[stream_hx].append(index) for index in instance.z_cu: if value(instance.z_cu[index]) >= active_lb: active_hx[cu_hx].append(index) else: inactive_hx[cu_hx].append(index) for index in instance.z_hu: if value(instance.z_hu[index]) >= active_lb: active_hx[hu_hx].append(index) else: inactive_hx[hu_hx].append(index) return active_hx, inactive_hx
def lb(self): """Return the lower bound for this variable.""" dlb, _ = self.domain.bounds() if self._lb is None: return dlb elif dlb is None: return value(self._lb) return max(value(self._lb), dlb)
def ub(self): """Return the upper bound for this variable.""" _, dub = self.domain.bounds() if self._ub is None: return dub elif dub is None: return value(self._ub) return min(value(self._ub), dub)
def uslack(self): """ Returns the value of U-f(x) for constraints of the form: (L <=) f(x) <= U U >= f(x) (>= L) """ if self.upper is None: return float('inf') else: return value(self.upper)-value(self.body)
def _line_generator(k,v): for _k, _v in sorted(iteritems(v.vars)): if _v is None: _val = '-' elif _v.is_expression() or not _v.is_indexed(): _val = str(value( _v )) else: _val = "{%s}" % (', '.join('%r: %r' % ( x, value(_v[x])) for x in sorted(_v._data) ),) yield _k, _val
def _pprint(self): """Print component information.""" return ( [ ("Size", len(self)), ("Index", self._index if self.is_indexed() else None), ], self._data.items(), ("Lower", "Value", "Upper", "Fixed", "Stale", "Domain"), lambda k, v: [value(v.lb), v.value, value(v.ub), v.fixed, v.stale, v.domain])
def _line_generator(k, v): for _k, _v in sorted(iteritems(v.vars)): if _v is None: _val = '-' elif not hasattr(_v, 'is_indexed') or not _v.is_indexed(): _val = str(value(_v)) else: _val = "{%s}" % (', '.join('%r: %r' % (x, value(_v[x])) for x in sorted(_v._data)), ) yield _k, _val
def lslack(self): """ Returns the value of f(x)-L for constraints of the form: L <= f(x) (<= U) (U >=) f(x) >= L """ if self.lower is None: return float('-inf') else: return value(self.body) - value(self.lower)
def uslack(self): """ Returns the value of U-f(x) for constraints of the form: (L <=) f(x) <= U U >= f(x) (>= L) """ if self.upper is None: return float('inf') else: return value(self.upper) - value(self.body)
def lslack(self): """ Returns the value of f(x)-L for constraints of the form: L <= f(x) (<= U) (U >=) f(x) >= L """ if self.lower is None: return float('-inf') else: return value(self.body)-value(self.lower)
def construct(self, values=None): """ Initialize set data """ if self._constructed: return timer = ConstructionTimer(self) self._constructed = True # # We call value() here for cases like Expressions, mutable # Params and the like # self._start_val = value(self._start) self._end_val = value(self._end) self._step_val = value(self._step) # # The set generates integer values if the starting value, # step and end value are all integers. Otherwise, the set # generates real values. # if type(self._start_val) is int and type(self._step) is int and type( self._end_val) is int: self.domain = Integers else: self.domain = Reals # # Compute the set length and upper bound # if self.filter is None and self.validate is None: # # Directly compute the number of elements in the set, from # which the upper-bound is computed. # self._len = int( math.floor( (self._end_val - self._start_val + self._step_val + 1e-7) // self._step_val)) ub = self._start_val + (self._len - 1) * self._step_val else: # # Iterate through the set to compute the upper bound # and number of elements. # ub = self._start_val ctr = 0 for i in self: ub = i ctr += 1 self._len = ctr # # Set the bounds information # self._bounds = (self._start_val, ub) timer.report()
def construct(self, values=None): """ Initialize set data """ if self._constructed: return self._constructed = True # # We call value() here for cases like Expressions, mutable # Params and the like # self._start_val = value(self._start) self._end_val = value(self._end) self._step_val = value(self._step) # # The set generates integer values if the starting value, # step and end value are all integers. Otherwise, the set # generates real values. # if type(self._start_val) is int and type(self._step) is int and type( self._end_val) is int: self.domain = Integers else: self.domain = Reals # # Compute the set length and upper bound # if self.filter is None and self.validate is None: # # Directly compute the number of elements in the set, from # which the upper-bound is computed. # self._len = int( math.floor( (self._end_val - self._start_val + self._step_val + 1e-7) // self._step_val)) ub = self._start_val + (self._len - 1) * self._step_val else: # # Iterate through the set to compute the upper bound # and number of elements. # ub = self._start_val ctr = 0 for i in self: ub = i ctr += 1 self._len = ctr # # Set the bounds information # self._bounds = (self._start_val, ub)
def _pprint(self): """Print component information.""" headers = [ ("Size", len(self)), ("Index", self._index if self.is_indexed() else None), ] if self._units is not None: headers.append(('Units', str(self._units))) return ( headers, self._data.items(), ("Lower", "Value", "Upper", "Fixed", "Stale", "Domain"), lambda k, v: [value(v.lb), v.value, value(v.ub), v.fixed, v.stale, v.domain])
def store_to(self, results, cuid=False, skip_stale_vars=False): """ Return a Solution() object that is populated with the values in the model. """ instance = self._instance() results.solution.clear() results._smap_id = None for soln_ in self.solutions: soln = Solution() soln._cuid = cuid for key, val in soln_._metadata.items(): setattr(soln, key, val) if cuid: labeler = CuidLabeler() else: labeler = CNameLabeler() sm = SymbolMap() entry = soln_._entry['objective'] for obj in instance.component_data_objects(Objective, active=True): vals = entry.get(id(obj), None) if vals is None: vals = {} else: vals = vals[1] vals['Value'] = value(obj) soln.objective[sm.getSymbol(obj, labeler)] = vals entry = soln_._entry['variable'] for obj in instance.component_data_objects(Var, active=True): if obj.stale and skip_stale_vars: continue vals = entry.get(id(obj), None) if vals is None: vals = {} else: vals = vals[1] vals['Value'] = value(obj) soln.variable[sm.getSymbol(obj, labeler)] = vals entry = soln_._entry['constraint'] for obj in instance.component_data_objects(Constraint, active=True): vals = entry.get(id(obj), None) if vals is None: continue else: vals = vals[1] soln.constraint[sm.getSymbol(obj, labeler)] = vals results.solution.insert(soln)
def _pprint(self): """Print component information.""" return ( [("Size", len(self)), ("Index", self._index if self.is_indexed() else None), ], iteritems(self._data), ( "Lower","Value","Upper","Fixed","Stale","Domain"), lambda k, v: [ value(v.lb), v.value, value(v.ub), v.fixed, v.stale, v.domain ] )
def set_value(self, val, valid=False): """ Set the value of this numeric object, after validating its value. If the 'valid' flag is True, then the validation step is skipped. """ if not valid and val is not None: # TODO: warn/error: check if this Var has units: assigning # a dimensionless value to a united variable should be an error if type(val) not in native_numeric_types: if self.parent_component()._units is not None: _src_magnitude = value(val) _src_units = units.get_units(val) val = units.convert_value( num_value=_src_magnitude, from_units=_src_units, to_units=self.parent_component()._units) if val not in self.domain: raise ValueError("Numeric value `%s` (%s) is not in " "domain %s for Var %s" % (val, type(val), self.domain, self.name)) self.value = val self.stale = False
def _pprint(self): """Print component information.""" return ( [("Size", len(self)), ("Index", self._index \ if self._index != UnindexedComponent_set else None), ], iteritems(self._data), ( "Key","Lower","Value","Upper","Fixed","Stale","Domain"), lambda k, v: [ k, value(v.lb), v.value, value(v.ub), v.fixed, v.stale, v.domain ] )
def _pprint(self): """Print component information.""" return ( [("Size", len(self)), ("Index", self._index \ if self._index != UnindexedComponent_set else None), ("Domain", None if self._domain_init_value is None else self._domain_init_value.name), ], iteritems(self._data), ( "Key","Lower","Value","Upper","Fixed","Stale" ), lambda k, v: [ k, value(v.lb), v.value, value(v.ub), v.fixed, v.stale, ] )
def slack(self): """ Returns the smaller of lslack and uslack values """ if self.lower is None: return value(self.upper)-value(self.body) elif self.upper is None: return value(self.body)-value(self.lower) return min(value(self.upper)-value(self.body), value(self.body)-value(self.lower))
def get_new_tangent_points(instance, active_hx): new_tangents = {stream_hx: {}, cu_hx: {}, hu_hx: {}} for index in active_hx[stream_hx]: x_index = index y_index = (index[0], index[1], index[2] + 1) x = value(instance.dt[x_index]) y = value(instance.dt[y_index]) new_tangent_point = (x, y) if x >= y else (y, x) new_tangents[stream_hx][index] = new_tangent_point for index in active_hx[cu_hx]: x = value(instance.dt_cu[index]) new_tangents[cu_hx][index] = x for index in active_hx[hu_hx]: x = value(instance.dt_hu[index]) new_tangents[hu_hx][index] = x return new_tangents
def value(self, val): """Set the value for this variable.""" if type(val) in native_numeric_types: # TODO: warn/error: check if this Var has units: assigning # a dimensionless value to a united variable should be an error pass elif val is not None and self.parent_component()._units is not None: _src_magnitude = value(val) _src_units = units.get_units(val) val = units.convert_value(num_value=_src_magnitude, from_units=_src_units, to_units=self.parent_component()._units) self._value = val
def display(self, prefix="", ostream=None): """ Print component state information This duplicates logic in Component.pprint() """ if not self.active: return if ostream is None: ostream = sys.stdout tab=" " ostream.write(prefix+self.local_name+" : ") ostream.write("Size="+str(len(self))) ostream.write("\n") tabular_writer( ostream, prefix+tab, ((k,v) for k,v in iteritems(self._data) if v.active), ( "Lower","Body","Upper" ), lambda k, v: [ value(v.lower), v.body(), value(v.upper), ] )
def _pprint(self): """ Return data that will be printed for this component. """ return ( [("Size", len(self)), ("Index", self._index if self.is_indexed() else None), ("Domain", self.domain.name), ("Default", "(function)" if type(self._default_val) \ is types.FunctionType else self._default_val), ("Mutable", self._mutable), ], self.sparse_iteritems(), ("Value",), lambda k, v: [ value(v) ] )
def _pprint(self): """ Return data that will be printed for this component. """ return ( [("Size", len(self)), ("Index", self._index \ if self._index != UnindexedComponent_set else None), ("Domain", self.domain.name), ("Default", "(function)" if type(self._default_val) \ is types.FunctionType else self._default_val), ("Mutable", self._mutable), ], self.sparse_iteritems(), ("Value",), lambda k, v: [ value(v) ] )
def _pprint(self): """ Return data that will be printed for this component. """ if self._default_val is _NotValid: default = "None" # for backwards compatibility in reporting elif type(self._default_val) is types.FunctionType: default = "(function)" else: default = str(self._default_val) return ([ ("Size", len(self)), ("Index", self._index if self.is_indexed() else None), ("Domain", self.domain.name), ("Default", default), ("Mutable", self._mutable), ], self.sparse_iteritems(), ("Value", ), lambda k, v: [value(v)])
def _get_bound(expr, bound_type, block_bounds={}): """Returns the bound of the expression or variable """ # --- TODO: This is a hack to enable support for floats or ints from pyomo.core.base.numvalue import is_constant as const, value if const(expr): return value(expr) # --- # if expr.is_constant(): # return expr.value from pyomo.core.base.var import _GeneralVarData if isinstance(expr, _GeneralVarData): bound_entry = block_bounds.get(expr.local_name, { 'lb': None, 'ub': None }) block_bound = bound_entry[bound_type] if block_bound is not None: if bound_type == 'lb': return max(block_bound, getattr(expr, bound_type)) elif bound_type == 'ub': return min(block_bound, getattr(expr, bound_type)) return getattr(expr, bound_type) from pyomo.core.base.expr import _SumExpression if isinstance(expr, _SumExpression): bnd = sum( _get_bound(expr._args[i], bound_type, block_bounds) * expr._coef[i] for i in range(len(expr._args)) if expr._coef[i] > 0) + \ sum( _get_bound( expr._args[i], _invert_bound(bound_type), block_bounds) * expr._coef[i] for i in range(len(expr._args)) if expr._coef[i] < 0) + \ expr._const return bnd from pyomo.core.base.expr_coopr3 import _ProductExpression if isinstance(expr, _ProductExpression): if len(expr._numerator) == 1 and len(expr._denominator) == 0: return expr._coef * _get_bound( expr._numerator[0], bound_type if expr._coef >= 0 else _invert_bound(bound_type), block_bounds) # else: (We don't support the expression) # else: (We don't recognize the expression) raise NotImplementedError( 'Cannot determine {} for unrecognized expression {}'.format( bound_type, expr))
def display(self, prefix="", ostream=None): """Provide a verbose display of this object""" if not self.active: return tab = " " if ostream is None: ostream = sys.stdout ostream.write(prefix+self.local_name+" : ") ostream.write(", ".join("%s=%s" % (k,v) for k,v in [ ("Size", len(self)), ("Index", self._index if self.is_indexed() else None), ("Active", self.active), ] )) ostream.write("\n") tabular_writer( ostream, prefix+tab, ((k,v) for k,v in iteritems(self._data) if v.active), ( "Active","Value" ), lambda k, v: [ v.active, value(v), ] )
def display(self, prefix="", ostream=None): """Provide a verbose display of this object""" if not self.active: return tab = " " if ostream is None: ostream = sys.stdout ostream.write(prefix+self.local_name+" : ") ostream.write(", ".join("%s=%s" % (k,v) for k,v in [ ("Size", len(self)), ("Index", self._index \ if self._index != UnindexedComponent_set else None), ("Active", self.active), ] )) ostream.write("\n") tabular_writer( ostream, prefix+tab, ((k,v) for k,v in iteritems(self._data) if v.active), ( "Active","Value" ), lambda k, v: [ v.active, value(v), ] )
def _setitem_when_not_present(self, index, value, _check_domain=True): # # We need to ensure that users don't override the value for immutable # parameters. # if self._constructed and not self._mutable: _raise_modifying_immutable_error(self, index) # # Params should contain *values*. Note that if we just call # value(), then that forces the value to be a numeric value. # Notably, we allow Params with domain==Any to hold strings, tuples, # etc. The following lets us use NumericValues to initialize # Params, but is optimized to check for "known" native types to # bypass a potentially expensive isinstance()==False call. # if value.__class__ not in native_types: if isinstance(value, NumericValue): value = value() # # Set the value depending on the type of param value. # try: if index is None and not self.is_indexed(): self._data[None] = self self.set_value(value, index) return self elif self._mutable: obj = self._data[index] = _ParamData(self) obj.set_value(value, index) return obj else: self._data[index] = value # Because we do not have a _ParamData, we cannot rely on the # validation that occurs in _ParamData.set_value() self._validate_value(index, value, _check_domain) return value except: del self._data[index] raise
def plotTotalEnergy(data, model): x = [value(item) for i, item in model.x_vars.items()] # @UnusedVariable energyFromProducts = [0] * data.numberOfTimePeriods for t in range(data.numberOfTimePeriods): for f in range(data.numberOfForwardProducts): energyFromProducts[t] += x[f]*data.forwardCharVectors[f+1, t+1] s = 18 #ax = pyplot.subplots()[1] #ax.set_prop_cycle(cycler('color', ['b', 'r'])) pyplot.figure(1, figsize=(10, 5)) pyplot.subplots_adjust(bottom=0.3) pyplot.plot(data.timePeriodNames, data.demandAsArray, 'b', data.timePeriodNames, energyFromProducts, 'r') pyplot.xticks(fontsize=s, rotation=45) pyplot.yticks(fontsize=s) return pyplot
def _setitem_when_not_present(self, index, value, _check_domain=True): # # We need to ensure that users don't override the value for immutable # parameters. # if self._constructed and not self._mutable: _raise_modifying_immutable_error(self, index) # # Params should contain *values*. Note that if we just call # value(), then that forces the value to be a numeric value. # Notably, we allow Params with domain==Any to hold strings, tuples, # etc. The following lets us use NumericValues to initialize # Params, but is optimized to check for "known" native types to # bypass a potentially expensive isinstance()==False call. # if value.__class__ not in native_types: if isinstance(value, NumericValue): value = value() # # Set the value depending on the type of param value. # try: if index is None and not self.is_indexed(): self._data[None] = self self.set_value(value, index) return self elif self._mutable: obj = self._data[index] = _ParamData(self) obj.set_value(value, index) return obj else: self._data[index] = value # Because we do not have a _ParamData, we cannot rely on the # validation that occurs in _ParamData.set_value() self._validate_value(index, value, _check_domain) return value except: del self._data[index]
def setlb(self, val): """ Set the lower bound for this variable after validating that the value is fixed (or None). """ # Note: is_fixed(None) returns True if not is_fixed(val): raise ValueError( "Non-fixed input of type '%s' supplied as variable lower " "bound - legal types must be fixed expressions or variables." % (type(val),)) if type(val) in native_numeric_types or val is None: # TODO: warn/error: check if this Var has units: assigning # a dimensionless value to a united variable should be an error pass else: if self.parent_component()._units is not None: _src_magnitude = value(val) _src_units = units.get_units(val) val = units.convert_value( num_value=_src_magnitude, from_units=_src_units, to_units=self.parent_component()._units) self._lb = val
def __init__(self, expr): self._base = expr._base self._args = [] _hash = [id(self._base)] for x in expr._args: try: logging.disable(logging.CRITICAL) val = value(x) self._args.append(val) _hash.append(val) except TemplateExpressionError as e: if x is not e.template: raise TypeError( "Cannot use the param substituter with expression " "templates\nwhere the component index has the " "IndexTemplate in an expression.\n\tFound in %s" % (expr, )) self._args.append(e.template) _hash.append(id(e.template._set)) finally: logging.disable(logging.NOTSET) self._hash = tuple(_hash)
def _setitem_impl(self, index, obj, value): """The __setitem__ method performs significant validation around the input indices, particularly when the index value is new. In various contexts, we don't need to incur this overhead (e.g. during initialization). The _setitem_impl assumes the input value is in the set native_types """ # # We need to ensure that users don't override the value for immutable # parameters. # if self._constructed and not self._mutable: _raise_modifying_immutable_error(self, index) # # Params should contain *values*. Note that if we just call # value(), then that forces the value to be a numeric value. # Notably, we allow Params with domain==Any to hold strings, tuples, # etc. The following lets us use NumericValues to initialize # Params, but is optimized to check for "known" native types to # bypass a potentially expensive isinstance()==False call. # if value.__class__ not in native_types: if isinstance(value, NumericValue): value = value() # # Set the value depending on the type of param value. # if self._mutable: obj.set_value(value, index) return obj else: self._data[index] = value # Because we do not have a _ParamData, we cannot rely on the # validation that occurs in _ParamData.set_value() self._validate_value(index, value) return value
def __init__(self, expr): self._base = expr._base self._args = [] _hash = [ id(self._base) ] for x in expr._args: try: active_level = logging.root.manager.disable logging.disable(logging.CRITICAL) val = value(x) self._args.append(val) _hash.append(val) except TemplateExpressionError as e: if x is not e.template: raise TypeError( "Cannot use the param substituter with expression " "templates\nwhere the component index has the " "IndexTemplate in an expression.\n\tFound in %s" % ( expr, )) self._args.append(e.template) _hash.append(id(e.template._set)) finally: logging.disable(active_level) self._hash = tuple(_hash)
def test_parameters(self): #print self.model.D_params[1] self.assertEquals(value(self.model.t[None]), self.data.numberOfTimePeriods, \ "Incorrect value for parameter 'h'" + ", expected: " + str(self.data.numberOfTimePeriods) + ", actual: " + str(value(self.model.t[None])) ) self.assertEquals(value(self.model.f[None]), self.data.numberOfForwardProducts, \ "Incorrect value for parameter 'f'" + ", expected: " + str(self.data.numberOfForwardProducts) + ", actual: " + str(value(self.model.f[None])) ) self.assertEquals(value(self.model.s[None]), self.data.numberOfHpfcVectors, \ "Incorrect value for parameter 's'" + ", expected: " + str(self.data.numberOfHpfcVectors) + ", actual: " + str(value(self.model.s[None])) ) self.assertEquals((self.model.T.first(), self.model.T.last()), (1, 24), \ "Incorrect value for set 'H', expected: (1, 24), actual: " + str((self.model.T.first(), self.model.T.last())) ) self.assertEquals((self.model.F.first(), self.model.F.last()), (1, 3), \ "Incorrect value for set 'F', expected: (1, 3), actual: " + str((self.model.F.first(), self.model.F.last())) ) self.assertEquals((self.model.S.first(), self.model.S.last()), (1, 4), \ "Incorrect value for set 'S', expected: (1, 4), actual: " + str((self.model.S.first(), self.model.S.last())) )
def has_lb(self): """Returns :const:`False` when the lower bound is :const:`None` or negative infinity""" lb = self.lb return (lb is not None) and \ (value(lb) != float('-inf'))
def _populate_glpk_instance ( self, model ): from pyomo.core.base import Var, Objective, Constraint, SOSConstraint try: lp = glp_create_prob() except Exception: e = sys.exc_info()[1] msg = 'Unable to create GLPK problem instance. Have you installed' \ '\n the Python bindings for GLPK?\n\n\tError message: %s' raise Exception(msg % e) objective = sorted( model.component_map(Objective, active=True).values() )[0] # so we can correctly map the solution to the correct objective label in _postsolve lp.objective_name = sorted( model.component_map(Objective, active=True).keys() )[0] sense = GLP_MAX if objective.is_minimizing(): sense = GLP_MIN constraint_list = model.component_map(Constraint, active=True) variable_list = model.component_map(Var, active=True) num_constraints = model.statistics.number_of_constraints num_variables = model.statistics.number_of_variables sosn = self._capabilities.sosn sos1 = self._capabilities.sos1 sos2 = self._capabilities.sos2 for soscondata in model.component_data_objects(SOSConstraint, active=True): raise Exception("Solver: glpk_direct does not support SOSConstraint declarations") glp_set_prob_name(lp, model.name) glp_set_obj_dir( lp, sense ) glp_add_rows( lp, num_constraints ) glp_add_cols( lp, num_variables ) # 1 extra because GLPK's arrays in this context are 1-based, not 0-based coef_count = num_constraints * num_variables + 1 Ai = intArray( coef_count ) Aj = intArray( coef_count ) Ar = doubleArray( coef_count ) row = col = coef_count = 0 colvar_map = dict() rowvar_map = dict() # In matrix parlance, variables are columns for name in variable_list: var_set = variable_list[ name ] for ii in var_set: var = var_set[ ii ] if var.fixed is True: continue lb = ub = 0.0 if var.lb is None and var.ub is None: var_type = GLP_FR elif var.lb is None: var_type = GLP_UB ub = value(var.ub) elif var.ub is None: var_type = GLP_LO lb = value(var.lb) else: var_type = GLP_DB lb = value(var.lb) ub = value(var.ub) col += 1 colvar_map[ var.label ] = col # the name is perhaps not necessary, but for completeness ... glp_set_col_name( lp, col, var.label ) glp_set_col_bnds( lp, col, var_type, lb, ub ) # Be sure to impart the integer and binary nature of any variables if var.is_integer(): glp_set_col_kind( lp, col, GLP_IV ) elif var.is_binary(): glp_set_col_kind( lp, col, GLP_BV ) elif var.is_continuous(): glp_set_col_kind( lp, col, GLP_CV ) # continuous else: raise TypeError("Invalid domain type for variable with name '%s'. " "Variable is not continuous, integer, or binary.") model_canonical_repn = getattr(model, "_canonical_repn", None) if model_canonical_repn is None: raise ValueError("No _canonical_repn ComponentMap was found on " "block with name %s. Did you forget to preprocess?" % (model.name)) for name in constraint_list: constraint_set = constraint_list[ name ] for ii in constraint_set: constraint = constraint_set[ ii ] if not constraint.active: continue elif constraint.lower is None and constraint.upper is None: continue expression = model_canonical_repn.get(constraint) if constraint is None: raise ValueError("No entry found in _canonical_repn ComponentMap on " "block %s for active constraint with name %s. " "Did you forget to preprocess?" % (model.name, constraint.name)) offset = 0.0 if 0 in expression: offset = expression[0][None] lbound = ubound = -offset if constraint.equality: var_type = GLP_FX # Fixed lbound = ubound = constraint.lower() - offset elif constraint.lower is None: var_type = GLP_UP # Upper bounded only ubound += constraint.upper() elif constraint.upper is None: var_type = GLP_LO # Lower bounded only lbound += constraint.lower() else: var_type = GLP_DB # Double bounded lbound += constraint.lower() ubound += constraint.upper() row += 1 rowvar_map[ constraint.label ] = row # just as with variables, set the name just for completeness ... glp_set_row_name( lp, row, constraint.label ) glp_set_row_bnds( lp, row, var_type, lbound, ubound ) if 1 in expression: # first-order terms keys = sorted( expression[1].keys() ) for var_key in keys: index = var_key.keys()[0] var = expression[-1][ index ] coef = expression[ 1][ var_key ] col = colvar_map[ var.label ] coef_count += 1 Ai[ coef_count ] = row Aj[ coef_count ] = col Ar[ coef_count ] = coef # with the rows and columns named and bounded, load the coefficients glp_load_matrix( lp, coef_count, Ai, Aj, Ar ) for key in objective: expression = model_canonical_repn.get(objective[key]) if expression is None: raise ValueError("No entry found in _canonical_repn ComponentMap on " "block %s for active objective with name %s. " "Did you forget to preprocess?" % (model.name, objective[key].name)) if expression.is_constant(): msg = "Ignoring objective '%s[%s]' which is constant" logger.warning( msg % (str(objective), str(key)) ) continue if 1 in expression: # first-order terms keys = sorted( expression[1].keys() ) for var_key in keys: index = var_key.keys()[0] label = expression[-1][ index ].label coef = expression[ 1][ var_key ] col = colvar_map[ label ] glp_set_obj_coef( lp, col, coef ) elif -1 in expression: pass else: msg = "Nonlinear objective to GLPK. GLPK can only handle " \ "linear problems." raise RuntimeError( msg ) self._glpk_instance = lp self._glpk_rowvar_map = rowvar_map self._glpk_colvar_map = colvar_map
def has_ub(self): """Returns :const:`False` when the upper bound is :const:`None` or positive infinity""" ub = self.upper return (ub is not None) and \ (value(ub) != float('inf'))
def has_lb(self): """Returns :const:`False` when the lower bound is :const:`None` or negative infinity""" lb = self.lower return (lb is not None) and \ (value(lb) != float('-inf'))
def add_solution(self, solution, smap_id, delete_symbol_map=True, cache=None, ignore_invalid_labels=False, ignore_missing_symbols=True, default_variable_value=None): instance = self._instance() soln = ModelSolution() soln._metadata['status'] = solution.status if not type(solution.message) is UndefinedData: soln._metadata['message'] = solution.message if not type(solution.gap) is UndefinedData: soln._metadata['gap'] = solution.gap if smap_id is None: # # Cache symbol names, which might be re-used in subsequent # calls to add_solution() # if cache is None: cache = {} if solution._cuid: # # Loading a solution with CUID keys # if len(cache) == 0: for obj in instance.component_data_objects(Var): cache[ComponentUID(obj)] = obj for obj in instance.component_data_objects(Objective, active=True): cache[ComponentUID(obj)] = obj for obj in instance.component_data_objects(Constraint, active=True): cache[ComponentUID(obj)] = obj for name in ['problem', 'objective', 'variable', 'constraint']: tmp = soln._entry[name] for cuid, val in getattr(solution, name).items(): obj = cache.get(cuid, None) if obj is None: if ignore_invalid_labels: continue raise RuntimeError( "CUID %s is missing from model %s" % (str(cuid), instance.name)) tmp[id(obj)] = (weakref_ref(obj), val) else: # # Loading a solution with string keys # if len(cache) == 0: for obj in instance.component_data_objects(Var): cache[obj.name] = obj for obj in instance.component_data_objects(Objective, active=True): cache[obj.name] = obj for obj in instance.component_data_objects(Constraint, active=True): cache[obj.name] = obj for name in ['problem', 'objective', 'variable', 'constraint']: tmp = soln._entry[name] for symb, val in getattr(solution, name).items(): obj = cache.get(symb, None) if obj is None: if ignore_invalid_labels: continue raise RuntimeError( "Symbol %s is missing from model %s" % (symb, instance.name)) tmp[id(obj)] = (weakref_ref(obj), val) else: # # Map solution # smap = self.symbol_map[smap_id] for name in ['problem', 'objective', 'variable', 'constraint']: tmp = soln._entry[name] for symb, val in getattr(solution, name).items(): if symb in smap.bySymbol: obj = smap.bySymbol[symb] elif symb in smap.aliases: obj = smap.aliases[symb] elif ignore_missing_symbols: continue else: #pragma:nocover # # This should never happen ... # raise RuntimeError( "ERROR: Symbol %s is missing from " "model %s when loading with a symbol map!" % (symb, instance.name)) tmp[id(obj())] = (obj, val) # # Wrap up # if delete_symbol_map: self.delete_symbol_map(smap_id) # # Collect fixed variables # tmp = soln._entry['variable'] for vdata in instance.component_data_objects(Var): id_ = id(vdata) if vdata.fixed: tmp[id_] = (weakref_ref(vdata), {'Value': value(vdata)}) elif (default_variable_value is not None) and \ (smap_id is not None) and \ (id_ in smap.byObject) and \ (id_ not in tmp): tmp[id_] = (weakref_ref(vdata), { 'Value': default_variable_value }) self.solutions.append(soln) return len(self.solutions) - 1
def _initialize_members(self, init_set): """Initialize variable data for all indices in a set.""" # # Initialize domains # if self._domain_init_rule is not None: # # Initialize domains with a rule # if self.is_indexed(): for ndx in init_set: self._data[ndx].domain = \ apply_indexed_rule(self, self._domain_init_rule, self._parent(), ndx) else: self.domain = self._domain_init_rule(self._parent()) else: if self.is_indexed(): # Optimization: It is assumed self._domain_init_value # is used when the _GeneralVarData objects # are created. This avoids an unnecessary # loop over init_set, which can significantly # speed up construction of variables with large # index sets. pass else: # the above optimization does not apply for # singleton objects (trying to do so breaks # some of the pickle tests) self.domain = self._domain_init_value # # Initialize values # if self._value_init_rule is not None: # # Initialize values with a rule # if self.is_indexed(): for key in init_set: vardata = self._data[key] val = apply_indexed_rule(self, self._value_init_rule, self._parent(), key) val = value(val) vardata.set_value(val) else: val = self._value_init_rule(self._parent()) val = value(val) self.set_value(val) elif self._value_init_value is not None: # # Initialize values with a value # if self._value_init_value.__class__ is dict: for key in init_set: # Skip indices that are not in the # dictionary. This arises when # initializing VarList objects with a # dictionary. if not key in self._value_init_value: continue val = self._value_init_value[key] vardata = self._data[key] vardata.set_value(val) else: val = value(self._value_init_value) for key in init_set: vardata = self._data[key] vardata.set_value(val) # # Initialize bounds # if self._bounds_init_rule is not None: # # Initialize bounds with a rule # if self.is_indexed(): for key in init_set: vardata = self._data[key] (lb, ub) = apply_indexed_rule(self, self._bounds_init_rule, self._parent(), key) vardata.setlb(lb) vardata.setub(ub) else: (lb, ub) = self._bounds_init_rule(self._parent()) self.setlb(lb) self.setub(ub) elif self._bounds_init_value is not None: # # Initialize bounds with a value # (lb, ub) = self._bounds_init_value for key in init_set: vardata = self._data[key] vardata.setlb(lb) vardata.setub(ub)
def compile_block_linear_constraints(parent_block, constraint_name, skip_trivial_constraints=False, single_precision_storage=False, verbose=False, descend_into=True): if verbose: print("") print("Compiling linear constraints on block with name: %s" % (parent_block.name)) if not parent_block.is_constructed(): raise RuntimeError( "Attempting to compile block '%s' with unconstructed " "component(s)" % (parent_block.name)) # # Linear MatrixConstraint in CSR format # SparseMat_pRows = [] SparseMat_jCols = [] SparseMat_Vals = [] Ranges = [] RangeTypes = [] def _get_bound(exp): if exp is None: return None if is_fixed(exp): return value(exp) raise ValueError("non-fixed bound: " + str(exp)) start_time = time.time() if verbose: print("Sorting active blocks...") sortOrder = SortComponents.indices | SortComponents.alphabetical all_blocks = [ _b for _b in parent_block.block_data_objects( active=True, sort=sortOrder, descend_into=descend_into) ] stop_time = time.time() if verbose: print("Time to sort active blocks: %.2f seconds" % (stop_time - start_time)) start_time = time.time() if verbose: print("Collecting variables on active blocks...") # # First Pass: assign each variable a deterministic id # (an index in a list) # VarSymbolToVarObject = [] for block in all_blocks: VarSymbolToVarObject.extend( block.component_data_objects(Var, sort=sortOrder, descend_into=False)) VarIDToVarSymbol = \ dict((id(vardata), index) for index, vardata in enumerate(VarSymbolToVarObject)) stop_time = time.time() if verbose: print("Time to collect variables on active blocks: %.2f seconds" % (stop_time - start_time)) start_time = time.time() if verbose: print("Compiling active linear constraints...") # # Second Pass: collect and remove active linear constraints # constraint_data_to_remove = [] empty_constraint_containers_to_remove = [] constraint_containers_to_remove = [] constraint_containers_to_check = set() referenced_variable_symbols = set() nnz = 0 nrows = 0 SparseMat_pRows = [0] for block in all_blocks: if hasattr(block, '_canonical_repn'): del block._canonical_repn if hasattr(block, '_ampl_repn'): del block._ampl_repn for constraint in block.component_objects(Constraint, active=True, sort=sortOrder, descend_into=False): assert not isinstance(constraint, MatrixConstraint) if len(constraint) == 0: empty_constraint_containers_to_remove.append( (block, constraint)) else: singleton = isinstance(constraint, SimpleConstraint) for index, constraint_data in iteritems(constraint): if constraint_data.body.polynomial_degree() <= 1: # collect for removal if singleton: constraint_containers_to_remove.append( (block, constraint)) else: constraint_data_to_remove.append( (constraint, index)) constraint_containers_to_check.add( (block, constraint)) canonical_repn = generate_canonical_repn( constraint_data.body) assert isinstance(canonical_repn, LinearCanonicalRepn) row_variable_symbols = [] row_coefficients = [] if canonical_repn.variables is None: if skip_trivial_constraints: continue else: row_variable_symbols = \ [VarIDToVarSymbol[id(vardata)] for vardata in canonical_repn.variables] referenced_variable_symbols.update( row_variable_symbols) assert canonical_repn.linear is not None row_coefficients = canonical_repn.linear SparseMat_pRows.append(SparseMat_pRows[-1] + \ len(row_variable_symbols)) SparseMat_jCols.extend(row_variable_symbols) SparseMat_Vals.extend(row_coefficients) nnz += len(row_variable_symbols) nrows += 1 L = _get_bound(constraint_data.lower) U = _get_bound(constraint_data.upper) constant = value(canonical_repn.constant) if constant is None: constant = 0 Ranges.append(L - constant if (L is not None) else 0) Ranges.append(U - constant if (U is not None) else 0) if (L is not None) and \ (U is not None) and \ (not constraint_data.equality): RangeTypes.append(MatrixConstraint.LowerBound | MatrixConstraint.UpperBound) elif constraint_data.equality: RangeTypes.append(MatrixConstraint.Equality) elif L is not None: assert U is None RangeTypes.append(MatrixConstraint.LowerBound) else: assert U is not None RangeTypes.append(MatrixConstraint.UpperBound) # Start freeing up memory constraint_data.set_value(None) ncols = len(referenced_variable_symbols) stop_time = time.time() if verbose: print("Time to compile active linear constraints: %.2f seconds" % (stop_time - start_time)) start_time = time.time() if verbose: print("Removing compiled constraint objects...") # # Remove compiled constraints # constraints_removed = 0 constraint_containers_removed = 0 for block, constraint in empty_constraint_containers_to_remove: block.del_component(constraint) constraint_containers_removed += 1 for constraint, index in constraint_data_to_remove: del constraint[index] constraints_removed += 1 for block, constraint in constraint_containers_to_remove: block.del_component(constraint) constraints_removed += 1 constraint_containers_removed += 1 for block, constraint in constraint_containers_to_check: if len(constraint) == 0: block.del_component(constraint) constraint_containers_removed += 1 stop_time = time.time() if verbose: print("Eliminated %s constraints and %s Constraint container objects" % (constraints_removed, constraint_containers_removed)) print("Time to remove compiled constraint objects: %.2f seconds" % (stop_time - start_time)) start_time = time.time() if verbose: print("Assigning variable column indices...") # # Assign a column index to the set of referenced variables # ColumnIndexToVarSymbol = sorted(referenced_variable_symbols) VarSymbolToColumnIndex = dict( (symbol, column) for column, symbol in enumerate(ColumnIndexToVarSymbol)) SparseMat_jCols = [ VarSymbolToColumnIndex[symbol] for symbol in SparseMat_jCols ] del VarSymbolToColumnIndex ColumnIndexToVarObject = [ VarSymbolToVarObject[var_symbol] for var_symbol in ColumnIndexToVarSymbol ] stop_time = time.time() if verbose: print("Time to assign variable column indices: %.2f seconds" % (stop_time - start_time)) start_time = time.time() if verbose: print("Converting compiled constraint data to array storage...") print(" - Using %s precision for numeric values" % ('single' if single_precision_storage else 'double')) # # Convert to array storage # number_storage = 'f' if single_precision_storage else 'd' SparseMat_pRows = array.array('L', SparseMat_pRows) SparseMat_jCols = array.array('L', SparseMat_jCols) SparseMat_Vals = array.array(number_storage, SparseMat_Vals) Ranges = array.array(number_storage, Ranges) RangeTypes = array.array('B', RangeTypes) stop_time = time.time() if verbose: storage_bytes = \ SparseMat_pRows.buffer_info()[1] * SparseMat_pRows.itemsize + \ SparseMat_jCols.buffer_info()[1] * SparseMat_jCols.itemsize + \ SparseMat_Vals.buffer_info()[1] * SparseMat_Vals.itemsize + \ Ranges.buffer_info()[1] * Ranges.itemsize + \ RangeTypes.buffer_info()[1] * RangeTypes.itemsize print("Sparse Matrix Dimension:") print(" - Rows: " + str(nrows)) print(" - Cols: " + str(ncols)) print(" - Nonzeros: " + str(nnz)) print("Compiled Data Storage: " + str(_label_bytes(storage_bytes))) print("Time to convert compiled constraint data to " "array storage: %.2f seconds" % (stop_time - start_time)) parent_block.add_component( constraint_name, MatrixConstraint(nrows, ncols, nnz, SparseMat_pRows, SparseMat_jCols, SparseMat_Vals, Ranges, RangeTypes, ColumnIndexToVarObject))
def _convert_external_setup_without_cleanup( worker, scenario, output_directory, firststage_var_suffix, enforce_derived_nonanticipativity, io_options): import pyomo.environ assert os.path.exists(output_directory) io_options = dict(io_options) scenario_tree = worker.scenario_tree reference_model = scenario._instance rootnode = scenario_tree.findRootNode() firststage = scenario_tree.stages[0] secondstage = scenario_tree.stages[1] constraint_name_buffer = {} objective_name_buffer = {} variable_name_buffer = {} all_constraints = list( con for con in reference_model.component_data_objects( Constraint, active=True, descend_into=True)) # # Check for model annotations # stochastic_rhs = locate_annotations( reference_model, StochasticConstraintBoundsAnnotation, max_allowed=1) if len(stochastic_rhs) == 0: stochastic_rhs = None stochastic_rhs_entries = {} empty_rhs_annotation = False else: assert len(stochastic_rhs) == 1 stochastic_rhs = stochastic_rhs[0][1] if stochastic_rhs.has_declarations: empty_rhs_annotation = False stochastic_rhs_entries = stochastic_rhs.expand_entries() stochastic_rhs_entries.sort( key=lambda x: x[0].getname(True, constraint_name_buffer)) if len(stochastic_rhs_entries) == 0: raise RuntimeError( "The %s annotation was declared " "with external entries but no active Constraint " "objects were recovered from those entries." % (StochasticConstraintBoundsAnnotation.__name__)) else: empty_rhs_annotation = True stochastic_rhs_entries = tuple((con, stochastic_rhs.default) for con in all_constraints) stochastic_matrix = locate_annotations( reference_model, StochasticConstraintBodyAnnotation, max_allowed=1) if len(stochastic_matrix) == 0: stochastic_matrix = None stochastic_matrix_entries = {} empty_matrix_annotation = False else: assert len(stochastic_matrix) == 1 stochastic_matrix = stochastic_matrix[0][1] if stochastic_matrix.has_declarations: empty_matrix_annotation = False stochastic_matrix_entries = stochastic_matrix.expand_entries() stochastic_matrix_entries.sort( key=lambda x: x[0].getname(True, constraint_name_buffer)) if len(stochastic_matrix_entries) == 0: raise RuntimeError( "The %s annotation was declared " "with external entries but no active Constraint " "objects were recovered from those entries." % (StochasticConstraintBoundsAnnotation.__name__)) else: empty_matrix_annotation = True stochastic_matrix_entries = tuple((con,stochastic_matrix.default) for con in all_constraints) stochastic_constraint_ids = set() stochastic_constraint_ids.update(id(con) for con,_ in stochastic_rhs_entries) stochastic_constraint_ids.update(id(con) for con,_ in stochastic_matrix_entries) stochastic_objective = locate_annotations( reference_model, StochasticObjectiveAnnotation, max_allowed=1) if len(stochastic_objective) == 0: stochastic_objective = None else: assert len(stochastic_objective) == 1 stochastic_objective = stochastic_objective[0][1] stochastic_varbounds = locate_annotations( reference_model, StochasticVariableBoundsAnnotation) if len(stochastic_varbounds) > 0: raise ValueError( "The DDSIP writer does not currently support " "stochastic variable bounds. Invalid annotation type: %s" % (StochasticVariableBoundsAnnotation.__name__)) if (stochastic_rhs is None) and \ (stochastic_matrix is None) and \ (stochastic_objective is None): raise RuntimeError( "No stochastic annotations found. DDSIP " "conversion requires at least one of the following " "annotation types:\n - %s\n - %s\n - %s" % (StochasticConstraintBoundsAnnotation.__name__, StochasticConstraintBodyAnnotation.__name__, StochasticObjectiveAnnotation.__name__)) assert not hasattr(reference_model, "_repn") repn_cache = build_repns(reference_model) assert hasattr(reference_model, "_repn") assert not reference_model._gen_obj_repn assert not reference_model._gen_con_repn # compute values for block_repns in repn_cache.values(): for repn in block_repns.values(): repn.constant = value(repn.constant) repn.linear_coefs = [value(c) for c in repn.linear_coefs] repn.quadratic_coefs = [value(c) for c in repn.quadratic_coefs] # # Write the LP file once to obtain the symbol map # output_filename = os.path.join(output_directory, scenario.name+".lp.setup") with WriterFactory("lp") as writer: assert 'column_order' not in io_options assert 'row_order' not in io_options output_fname, symbol_map = writer(reference_model, output_filename, lambda x: True, io_options) assert output_fname == output_filename _safe_remove_file(output_filename) StageToVariableMap = map_variable_stages( scenario, scenario_tree, symbol_map, enforce_derived_nonanticipativity=enforce_derived_nonanticipativity) firststage_variable_ids = \ set(id(var) for symbol, var, scenario_tree_id in StageToVariableMap[firststage.name]) secondstage_variable_ids = \ set(id(var) for symbol, var, scenario_tree_id in StageToVariableMap[secondstage.name]) StageToConstraintMap = \ map_constraint_stages( scenario, scenario_tree, symbol_map, stochastic_constraint_ids, firststage_variable_ids, secondstage_variable_ids) secondstage_constraint_ids = \ set(id(con) for symbols, con in StageToConstraintMap[secondstage.name]) assert len(scenario_tree.stages) == 2 firststage = scenario_tree.stages[0] secondstage = scenario_tree.stages[1] # # Make sure the objective references all first stage variables. # We do this by directly modifying the _repn of the # objective which the LP/MPS writer will reference next time we call # it. In addition, make sure that the first second-stage variable # in our column ordering also appears in the objective so that # ONE_VAR_CONSTANT does not get identified as the first # second-stage variable. # ** Just do NOT preprocess again until we call the writer ** # objective_object = scenario._instance_objective assert objective_object is not None objective_block = objective_object.parent_block() objective_repn = repn_cache[id(objective_block)][objective_object] # # Create column (variable) ordering maps for LP/MPS files # column_order = ComponentMap() firststage_variable_count = 0 secondstage_variable_count = 0 # first-stage variables for column_index, (symbol, var, scenario_tree_id) \ in enumerate(StageToVariableMap[firststage.name]): column_order[var] = column_index firststage_variable_count += 1 # second-stage variables for column_index, (symbol, var, scenario_tree_id) \ in enumerate(StageToVariableMap[secondstage.name], len(column_order)): column_order[var] = column_index secondstage_variable_count += 1 # account for the ONE_VAR_CONSTANT second-stage variable # added by the LP writer secondstage_variable_count += 1 # # Create row (constraint) ordering maps for LP/MPS files # firststage_constraint_count = 0 secondstage_constraint_count = 0 row_order = ComponentMap() # first-stage constraints for row_index, (symbols, con) \ in enumerate(StageToConstraintMap[firststage.name]): row_order[con] = row_index firststage_constraint_count += len(symbols) # second-stage constraints for row_index, (symbols, con) \ in enumerate(StageToConstraintMap[secondstage.name], len(row_order)): row_order[con] = row_index secondstage_constraint_count += len(symbols) # account for the ONE_VAR_CONSTANT = 1 second-stage constraint # added by the LP writer secondstage_constraint_count += 1 # # Create a custom labeler that allows DDSIP to identify # first-stage variables # if io_options.pop('symbolic_solver_labels', False): _labeler = TextLabeler() else: _labeler = NumericLabeler('x') labeler = lambda x: _labeler(x) + \ ("" if ((not isinstance(x, _VarData)) or \ (id(x) not in firststage_variable_ids)) else \ firststage_var_suffix) # # Write the ordered LP/MPS file # output_filename = os.path.join(output_directory, scenario.name+".lp") symbols_filename = os.path.join(output_directory, scenario.name+".lp.symbols") with WriterFactory("lp") as writer: assert 'column_order' not in io_options assert 'row_order' not in io_options assert 'labeler' not in io_options assert 'force_objective_constant' not in io_options io_options['column_order'] = column_order io_options['row_order'] = row_order io_options['force_objective_constant'] = True io_options['labeler'] = labeler output_fname, symbol_map = writer(reference_model, output_filename, lambda x: True, io_options) assert output_fname == output_filename # write the lp file symbol paired with the scenario # tree id for each variable in the root node with open(symbols_filename, "w") as f: st_symbol_map = reference_model._ScenarioTreeSymbolMap lines = [] for id_ in sorted(rootnode._variable_ids): var = st_symbol_map.bySymbol[id_] if not var.is_expression_type(): lp_label = symbol_map.byObject[id(var)] lines.append("%s %s\n" % (lp_label, id_)) f.writelines(lines) # re-generate these maps as the LP/MPS symbol map # is likely different StageToVariableMap = map_variable_stages( scenario, scenario_tree, symbol_map, enforce_derived_nonanticipativity=enforce_derived_nonanticipativity) StageToConstraintMap = map_constraint_stages( scenario, scenario_tree, symbol_map, stochastic_constraint_ids, firststage_variable_ids, secondstage_variable_ids) # generate a few data structures that are used # when writing the .sc files constraint_symbols = ComponentMap( (con, symbols) for stage_name in StageToConstraintMap for symbols, con in StageToConstraintMap[stage_name]) # # Write the body of the .sc files # modified_constraint_lb = ComponentMap() modified_constraint_ub = ComponentMap() # # Stochastic RHS # # **NOTE: In the code that follows we assume the LP # writer always moves constraint body # constants to the rhs and that the lower part # of any range constraints are written before # the upper part. # stochastic_rhs_count = 0 with open(os.path.join(output_directory, scenario.name+".rhs.sc.struct"),'w') as f_rhs_struct: with open(os.path.join(output_directory, scenario.name+".rhs.sc"),'w') as f_rhs: scenario_probability = scenario.probability rhs_struct_template = " %s\n" rhs_template = " %.17g\n" f_rhs.write("scen\n%.17g\n" % (_no_negative_zero(scenario_probability))) if stochastic_rhs is not None: for con, include_bound in stochastic_rhs_entries: assert isinstance(con, _ConstraintData) if not empty_rhs_annotation: # verify that this constraint was # flagged by PySP or the user as second-stage if id(con) not in secondstage_constraint_ids: raise RuntimeError( "The constraint %s has been declared " "in the %s annotation but it was not identified as " "a second-stage constraint. To correct this issue, " "remove the constraint from this annotation." % (con.name, StochasticConstraintBoundsAnnotation.__name__)) constraint_repn = \ repn_cache[id(con.parent_block())][con] if not constraint_repn.is_linear(): raise RuntimeError("Only linear constraints are " "accepted for conversion to DDSIP format. " "Constraint %s is not linear." % (con.name)) body_constant = constraint_repn.constant # We are going to rewrite the core problem file # with all stochastic values set to zero. This will # allow an easy test for missing user annotations. constraint_repn.constant = 0 if body_constant is None: body_constant = 0.0 symbols = constraint_symbols[con] assert len(symbols) > 0 for con_label in symbols: if con_label.startswith('c_e_') or \ con_label.startswith('c_l_'): assert (include_bound is True) or \ (include_bound[0] is True) stochastic_rhs_count += 1 f_rhs_struct.write(rhs_struct_template % (con_label)) f_rhs.write(rhs_template % (_no_negative_zero( value(con.lower) - \ value(body_constant)))) # We are going to rewrite the core problem file # with all stochastic values set to zero. This will # allow an easy test for missing user annotations. modified_constraint_lb[con] = con.lower con._lower = _deterministic_check_constant if con_label.startswith('c_e_'): modified_constraint_ub[con] = con.upper con._upper = _deterministic_check_constant elif con_label.startswith('r_l_') : if (include_bound is True) or \ (include_bound[0] is True): stochastic_rhs_count += 1 f_rhs_struct.write(rhs_struct_template % (con_label)) f_rhs.write(rhs_template % (_no_negative_zero( value(con.lower) - \ value(body_constant)))) # We are going to rewrite the core problem file # with all stochastic values set to zero. This will # allow an easy test for missing user annotations. modified_constraint_lb[con] = con.lower con._lower = _deterministic_check_constant elif con_label.startswith('c_u_'): assert (include_bound is True) or \ (include_bound[1] is True) stochastic_rhs_count += 1 f_rhs_struct.write(rhs_struct_template % (con_label)) f_rhs.write(rhs_template % (_no_negative_zero( value(con.upper) - \ value(body_constant)))) # We are going to rewrite the core problem file # with all stochastic values set to zero. This will # allow an easy test for missing user annotations. modified_constraint_ub[con] = con.upper con._upper = _deterministic_check_constant elif con_label.startswith('r_u_'): if (include_bound is True) or \ (include_bound[1] is True): stochastic_rhs_count += 1 f_rhs_struct.write(rhs_struct_template % (con_label)) f_rhs.write(rhs_template % (_no_negative_zero( value(con.upper) - \ value(body_constant)))) # We are going to rewrite the core problem file # with all stochastic values set to zero. This will # allow an easy test for missing user annotations. modified_constraint_ub[con] = con.upper con._upper = _deterministic_check_constant else: assert False # # Stochastic Matrix # stochastic_matrix_count = 0 with open(os.path.join(output_directory, scenario.name+".matrix.sc.struct"),'w') as f_mat_struct: with open(os.path.join(output_directory, scenario.name+".matrix.sc"),'w') as f_mat: scenario_probability = scenario.probability matrix_struct_template = " %s %s\n" matrix_template = " %.17g\n" f_mat.write("scen\n") if stochastic_matrix is not None: for con, var_list in stochastic_matrix_entries: assert isinstance(con, _ConstraintData) if not empty_matrix_annotation: # verify that this constraint was # flagged by PySP or the user as second-stage if id(con) not in secondstage_constraint_ids: raise RuntimeError( "The constraint %s has been declared " "in the %s annotation but it was not identified as " "a second-stage constraint. To correct this issue, " "remove the constraint from this annotation." % (con.name, StochasticConstraintBodyAnnotation.__name__)) constraint_repn = \ repn_cache[id(con.parent_block())][con] if not constraint_repn.is_linear(): raise RuntimeError("Only linear constraints are " "accepted for conversion to DDSIP format. " "Constraint %s is not linear." % (con.name)) assert len(constraint_repn.linear_vars) > 0 if var_list is None: var_list = constraint_repn.linear_vars assert len(var_list) > 0 symbols = constraint_symbols[con] # sort the variable list by the column ordering # so that we have deterministic output var_list = list(var_list) var_list.sort(key=lambda _v: column_order[_v]) new_coefs = list(constraint_repn.linear_coefs) for var in var_list: assert isinstance(var, _VarData) assert not var.fixed var_coef = None for i, (_var, coef) in enumerate(zip(constraint_repn.linear_vars, constraint_repn.linear_coefs)): if _var is var: var_coef = coef # We are going to rewrite with core problem file # with all stochastic values set to zero. This will # allow an easy test for missing user annotations. new_coefs[i] = _deterministic_check_value break if var_coef is None: raise RuntimeError( "The coefficient for variable %s has " "been marked as stochastic in constraint %s using " "the %s annotation, but the variable does not appear" " in the canonical constraint expression." % (var.name, con.name, StochasticConstraintBodyAnnotation.__name__)) var_label = symbol_map.byObject[id(var)] for con_label in symbols: stochastic_matrix_count += 1 f_mat_struct.write(matrix_struct_template % (con_label, var_label)) f_mat.write(matrix_template % (_no_negative_zero(value(var_coef)))) constraint_repn.linear_coefs = tuple(new_coefs) # # Stochastic Objective # stochastic_cost_count = 0 with open(os.path.join(output_directory, scenario.name+".cost.sc.struct"),'w') as f_obj_struct: with open(os.path.join(output_directory, scenario.name+".cost.sc"),'w') as f_obj: obj_struct_template = " %s\n" obj_template = " %.17g\n" f_obj.write("scen\n") if stochastic_objective is not None: if stochastic_objective.has_declarations: sorted_values = stochastic_objective.expand_entries() assert len(sorted_values) <= 1 if len(sorted_values) == 0: raise RuntimeError( "The %s annotation was declared " "with external entries but no active Objective " "objects were recovered from those entries." % (StochasticObjectiveAnnotation.__name__)) obj, (objective_variables, include_constant) = \ sorted_values[0] assert obj is objective_object else: objective_variables, include_constant = \ stochastic_objective.default if not objective_repn.is_linear(): raise RuntimeError("Only linear stochastic objectives are " "accepted for conversion to DDSIP format. " "Objective %s is not linear." % (objective_object.name)) if objective_variables is None: objective_variables = objective_repn.linear_vars stochastic_objective_label = symbol_map.byObject[id(objective_object)] # sort the variable list by the column ordering # so that we have deterministic output objective_variables = list(objective_variables) objective_variables.sort(key=lambda _v: column_order[_v]) assert (len(objective_variables) > 0) or include_constant new_coefs = list(objective_repn.linear_coefs) for var in objective_variables: assert isinstance(var, _VarData) var_coef = None for i, (_var, coef) in enumerate(zip(objective_repn.linear_vars, objective_repn.linear_coefs)): if _var is var: var_coef = coef # We are going to rewrite the core problem file # with all stochastic values set to zero. This will # allow an easy test for missing user annotations. new_coefs[i] = _deterministic_check_value break if var_coef is None: raise RuntimeError( "The coefficient for variable %s has " "been marked as stochastic in objective %s using " "the %s annotation, but the variable does not appear" " in the canonical objective expression." % (var.name, objective_object.name, StochasticObjectiveAnnotation.__name__)) var_label = symbol_map.byObject[id(var)] stochastic_cost_count += 1 f_obj_struct.write(obj_struct_template % (var_label)) f_obj.write(obj_template % (_no_negative_zero(value(var_coef)))) objective_repn.linear_coefs = tuple(new_coefs) if include_constant: obj_constant = objective_repn.constant # We are going to rewrite the core problem file # with all stochastic values set to zero. This will # allow an easy test for missing user annotations. objective_repn.constant = _deterministic_check_value if obj_constant is None: obj_constant = 0.0 stochastic_cost_count += 1 f_obj_struct.write(obj_struct_template % ("ONE_VAR_CONSTANT")) f_obj.write(obj_template % (_no_negative_zero(obj_constant))) # # Write the deterministic part of the LP/MPS-file to its own # file for debugging purposes # reference_model_name = reference_model.name reference_model._name = "ZeroStochasticData" det_output_filename = os.path.join(output_directory, scenario.name+".lp.det") with WriterFactory("lp") as writer: output_fname, symbol_map = writer(reference_model, det_output_filename, lambda x: True, io_options) assert output_fname == det_output_filename reference_model._name = reference_model_name # reset bounds on any constraints that were modified for con, lower in iteritems(modified_constraint_lb): con._lower = as_numeric(lower) for con, upper in iteritems(modified_constraint_ub): con._upper = as_numeric(upper) return (firststage_variable_count, secondstage_variable_count, firststage_constraint_count, secondstage_constraint_count, stochastic_cost_count, stochastic_rhs_count, stochastic_matrix_count)
def has_ub(self): """Returns :const:`False` when the upper bound is :const:`None` or positive infinity""" ub = self.ub return (ub is not None) and \ (value(ub) != float('inf'))
def _convert_external_setup_without_cleanup(worker, scenario, output_directory, firststage_var_suffix, enforce_derived_nonanticipativity, io_options): import pyomo.environ assert os.path.exists(output_directory) io_options = dict(io_options) scenario_tree = worker.scenario_tree reference_model = scenario._instance rootnode = scenario_tree.findRootNode() firststage = scenario_tree.stages[0] secondstage = scenario_tree.stages[1] constraint_name_buffer = {} objective_name_buffer = {} variable_name_buffer = {} all_constraints = list(con for con in reference_model.component_data_objects( Constraint, active=True, descend_into=True)) # # Check for model annotations # stochastic_rhs = locate_annotations(reference_model, StochasticConstraintBoundsAnnotation, max_allowed=1) if len(stochastic_rhs) == 0: stochastic_rhs = None stochastic_rhs_entries = {} empty_rhs_annotation = False else: assert len(stochastic_rhs) == 1 stochastic_rhs = stochastic_rhs[0][1] if stochastic_rhs.has_declarations: empty_rhs_annotation = False stochastic_rhs_entries = stochastic_rhs.expand_entries() stochastic_rhs_entries.sort( key=lambda x: x[0].getname(True, constraint_name_buffer)) if len(stochastic_rhs_entries) == 0: raise RuntimeError( "The %s annotation was declared " "with external entries but no active Constraint " "objects were recovered from those entries." % (StochasticConstraintBoundsAnnotation.__name__)) else: empty_rhs_annotation = True stochastic_rhs_entries = tuple( (con, stochastic_rhs.default) for con in all_constraints) stochastic_matrix = locate_annotations(reference_model, StochasticConstraintBodyAnnotation, max_allowed=1) if len(stochastic_matrix) == 0: stochastic_matrix = None stochastic_matrix_entries = {} empty_matrix_annotation = False else: assert len(stochastic_matrix) == 1 stochastic_matrix = stochastic_matrix[0][1] if stochastic_matrix.has_declarations: empty_matrix_annotation = False stochastic_matrix_entries = stochastic_matrix.expand_entries() stochastic_matrix_entries.sort( key=lambda x: x[0].getname(True, constraint_name_buffer)) if len(stochastic_matrix_entries) == 0: raise RuntimeError( "The %s annotation was declared " "with external entries but no active Constraint " "objects were recovered from those entries." % (StochasticConstraintBoundsAnnotation.__name__)) else: empty_matrix_annotation = True stochastic_matrix_entries = tuple( (con, stochastic_matrix.default) for con in all_constraints) stochastic_constraint_ids = set() stochastic_constraint_ids.update( id(con) for con, _ in stochastic_rhs_entries) stochastic_constraint_ids.update( id(con) for con, _ in stochastic_matrix_entries) stochastic_objective = locate_annotations(reference_model, StochasticObjectiveAnnotation, max_allowed=1) if len(stochastic_objective) == 0: stochastic_objective = None else: assert len(stochastic_objective) == 1 stochastic_objective = stochastic_objective[0][1] stochastic_varbounds = locate_annotations( reference_model, StochasticVariableBoundsAnnotation) if len(stochastic_varbounds) > 0: raise ValueError( "The DDSIP writer does not currently support " "stochastic variable bounds. Invalid annotation type: %s" % (StochasticVariableBoundsAnnotation.__name__)) if (stochastic_rhs is None) and \ (stochastic_matrix is None) and \ (stochastic_objective is None): raise RuntimeError("No stochastic annotations found. DDSIP " "conversion requires at least one of the following " "annotation types:\n - %s\n - %s\n - %s" % (StochasticConstraintBoundsAnnotation.__name__, StochasticConstraintBodyAnnotation.__name__, StochasticObjectiveAnnotation.__name__)) assert not hasattr(reference_model, "_repn") repn_cache = build_repns(reference_model) assert hasattr(reference_model, "_repn") assert not reference_model._gen_obj_repn assert not reference_model._gen_con_repn # compute values for block_repns in repn_cache.values(): for repn in block_repns.values(): repn.constant = value(repn.constant) repn.linear_coefs = [value(c) for c in repn.linear_coefs] repn.quadratic_coefs = [value(c) for c in repn.quadratic_coefs] # # Write the LP file once to obtain the symbol map # output_filename = os.path.join(output_directory, scenario.name + ".lp.setup") with WriterFactory("lp") as writer: assert 'column_order' not in io_options assert 'row_order' not in io_options output_fname, symbol_map = writer(reference_model, output_filename, lambda x: True, io_options) assert output_fname == output_filename _safe_remove_file(output_filename) StageToVariableMap = map_variable_stages( scenario, scenario_tree, symbol_map, enforce_derived_nonanticipativity=enforce_derived_nonanticipativity) firststage_variable_ids = \ set(id(var) for symbol, var, scenario_tree_id in StageToVariableMap[firststage.name]) secondstage_variable_ids = \ set(id(var) for symbol, var, scenario_tree_id in StageToVariableMap[secondstage.name]) StageToConstraintMap = \ map_constraint_stages( scenario, scenario_tree, symbol_map, stochastic_constraint_ids, firststage_variable_ids, secondstage_variable_ids) secondstage_constraint_ids = \ set(id(con) for symbols, con in StageToConstraintMap[secondstage.name]) assert len(scenario_tree.stages) == 2 firststage = scenario_tree.stages[0] secondstage = scenario_tree.stages[1] # # Make sure the objective references all first stage variables. # We do this by directly modifying the _repn of the # objective which the LP/MPS writer will reference next time we call # it. In addition, make sure that the first second-stage variable # in our column ordering also appears in the objective so that # ONE_VAR_CONSTANT does not get identified as the first # second-stage variable. # ** Just do NOT preprocess again until we call the writer ** # objective_object = scenario._instance_objective assert objective_object is not None objective_block = objective_object.parent_block() objective_repn = repn_cache[id(objective_block)][objective_object] # # Create column (variable) ordering maps for LP/MPS files # column_order = ComponentMap() firststage_variable_count = 0 secondstage_variable_count = 0 # first-stage variables for column_index, (symbol, var, scenario_tree_id) \ in enumerate(StageToVariableMap[firststage.name]): column_order[var] = column_index firststage_variable_count += 1 # second-stage variables for column_index, (symbol, var, scenario_tree_id) \ in enumerate(StageToVariableMap[secondstage.name], len(column_order)): column_order[var] = column_index secondstage_variable_count += 1 # account for the ONE_VAR_CONSTANT second-stage variable # added by the LP writer secondstage_variable_count += 1 # # Create row (constraint) ordering maps for LP/MPS files # firststage_constraint_count = 0 secondstage_constraint_count = 0 row_order = ComponentMap() # first-stage constraints for row_index, (symbols, con) \ in enumerate(StageToConstraintMap[firststage.name]): row_order[con] = row_index firststage_constraint_count += len(symbols) # second-stage constraints for row_index, (symbols, con) \ in enumerate(StageToConstraintMap[secondstage.name], len(row_order)): row_order[con] = row_index secondstage_constraint_count += len(symbols) # account for the ONE_VAR_CONSTANT = 1 second-stage constraint # added by the LP writer secondstage_constraint_count += 1 # # Create a custom labeler that allows DDSIP to identify # first-stage variables # if io_options.pop('symbolic_solver_labels', False): _labeler = TextLabeler() else: _labeler = NumericLabeler('x') labeler = lambda x: _labeler(x) + \ ("" if ((not isinstance(x, _VarData)) or \ (id(x) not in firststage_variable_ids)) else \ firststage_var_suffix) # # Write the ordered LP/MPS file # output_filename = os.path.join(output_directory, scenario.name + ".lp") symbols_filename = os.path.join(output_directory, scenario.name + ".lp.symbols") with WriterFactory("lp") as writer: assert 'column_order' not in io_options assert 'row_order' not in io_options assert 'labeler' not in io_options assert 'force_objective_constant' not in io_options io_options['column_order'] = column_order io_options['row_order'] = row_order io_options['force_objective_constant'] = True io_options['labeler'] = labeler output_fname, symbol_map = writer(reference_model, output_filename, lambda x: True, io_options) assert output_fname == output_filename # write the lp file symbol paired with the scenario # tree id for each variable in the root node with open(symbols_filename, "w") as f: st_symbol_map = reference_model._ScenarioTreeSymbolMap lines = [] for id_ in sorted(rootnode._variable_ids): var = st_symbol_map.bySymbol[id_] if not var.is_expression_type(): lp_label = symbol_map.byObject[id(var)] lines.append("%s %s\n" % (lp_label, id_)) f.writelines(lines) # re-generate these maps as the LP/MPS symbol map # is likely different StageToVariableMap = map_variable_stages( scenario, scenario_tree, symbol_map, enforce_derived_nonanticipativity=enforce_derived_nonanticipativity) StageToConstraintMap = map_constraint_stages(scenario, scenario_tree, symbol_map, stochastic_constraint_ids, firststage_variable_ids, secondstage_variable_ids) # generate a few data structures that are used # when writing the .sc files constraint_symbols = ComponentMap( (con, symbols) for stage_name in StageToConstraintMap for symbols, con in StageToConstraintMap[stage_name]) # # Write the body of the .sc files # modified_constraint_lb = ComponentMap() modified_constraint_ub = ComponentMap() # # Stochastic RHS # # **NOTE: In the code that follows we assume the LP # writer always moves constraint body # constants to the rhs and that the lower part # of any range constraints are written before # the upper part. # stochastic_rhs_count = 0 with open(os.path.join(output_directory, scenario.name + ".rhs.sc.struct"), 'w') as f_rhs_struct: with open(os.path.join(output_directory, scenario.name + ".rhs.sc"), 'w') as f_rhs: scenario_probability = scenario.probability rhs_struct_template = " %s\n" rhs_template = " %.17g\n" f_rhs.write("scen\n%.17g\n" % (_no_negative_zero(scenario_probability))) if stochastic_rhs is not None: for con, include_bound in stochastic_rhs_entries: assert isinstance(con, _ConstraintData) if not empty_rhs_annotation: # verify that this constraint was # flagged by PySP or the user as second-stage if id(con) not in secondstage_constraint_ids: raise RuntimeError( "The constraint %s has been declared " "in the %s annotation but it was not identified as " "a second-stage constraint. To correct this issue, " "remove the constraint from this annotation." % (con.name, StochasticConstraintBoundsAnnotation.__name__) ) constraint_repn = \ repn_cache[id(con.parent_block())][con] if not constraint_repn.is_linear(): raise RuntimeError( "Only linear constraints are " "accepted for conversion to DDSIP format. " "Constraint %s is not linear." % (con.name)) body_constant = constraint_repn.constant # We are going to rewrite the core problem file # with all stochastic values set to zero. This will # allow an easy test for missing user annotations. constraint_repn.constant = 0 if body_constant is None: body_constant = 0.0 symbols = constraint_symbols[con] assert len(symbols) > 0 for con_label in symbols: if con_label.startswith('c_e_') or \ con_label.startswith('c_l_'): assert (include_bound is True) or \ (include_bound[0] is True) stochastic_rhs_count += 1 f_rhs_struct.write(rhs_struct_template % (con_label)) f_rhs.write(rhs_template % (_no_negative_zero( value(con.lower) - \ value(body_constant)))) # We are going to rewrite the core problem file # with all stochastic values set to zero. This will # allow an easy test for missing user annotations. modified_constraint_lb[con] = con.lower con._lower = _deterministic_check_constant if con_label.startswith('c_e_'): modified_constraint_ub[con] = con.upper con._upper = _deterministic_check_constant elif con_label.startswith('r_l_'): if (include_bound is True) or \ (include_bound[0] is True): stochastic_rhs_count += 1 f_rhs_struct.write(rhs_struct_template % (con_label)) f_rhs.write(rhs_template % (_no_negative_zero( value(con.lower) - \ value(body_constant)))) # We are going to rewrite the core problem file # with all stochastic values set to zero. This will # allow an easy test for missing user annotations. modified_constraint_lb[con] = con.lower con._lower = _deterministic_check_constant elif con_label.startswith('c_u_'): assert (include_bound is True) or \ (include_bound[1] is True) stochastic_rhs_count += 1 f_rhs_struct.write(rhs_struct_template % (con_label)) f_rhs.write(rhs_template % (_no_negative_zero( value(con.upper) - \ value(body_constant)))) # We are going to rewrite the core problem file # with all stochastic values set to zero. This will # allow an easy test for missing user annotations. modified_constraint_ub[con] = con.upper con._upper = _deterministic_check_constant elif con_label.startswith('r_u_'): if (include_bound is True) or \ (include_bound[1] is True): stochastic_rhs_count += 1 f_rhs_struct.write(rhs_struct_template % (con_label)) f_rhs.write(rhs_template % (_no_negative_zero( value(con.upper) - \ value(body_constant)))) # We are going to rewrite the core problem file # with all stochastic values set to zero. This will # allow an easy test for missing user annotations. modified_constraint_ub[con] = con.upper con._upper = _deterministic_check_constant else: assert False # # Stochastic Matrix # stochastic_matrix_count = 0 with open( os.path.join(output_directory, scenario.name + ".matrix.sc.struct"), 'w') as f_mat_struct: with open(os.path.join(output_directory, scenario.name + ".matrix.sc"), 'w') as f_mat: scenario_probability = scenario.probability matrix_struct_template = " %s %s\n" matrix_template = " %.17g\n" f_mat.write("scen\n") if stochastic_matrix is not None: for con, var_list in stochastic_matrix_entries: assert isinstance(con, _ConstraintData) if not empty_matrix_annotation: # verify that this constraint was # flagged by PySP or the user as second-stage if id(con) not in secondstage_constraint_ids: raise RuntimeError( "The constraint %s has been declared " "in the %s annotation but it was not identified as " "a second-stage constraint. To correct this issue, " "remove the constraint from this annotation." % (con.name, StochasticConstraintBodyAnnotation.__name__)) constraint_repn = \ repn_cache[id(con.parent_block())][con] if not constraint_repn.is_linear(): raise RuntimeError( "Only linear constraints are " "accepted for conversion to DDSIP format. " "Constraint %s is not linear." % (con.name)) assert len(constraint_repn.linear_vars) > 0 if var_list is None: var_list = constraint_repn.linear_vars assert len(var_list) > 0 symbols = constraint_symbols[con] # sort the variable list by the column ordering # so that we have deterministic output var_list = list(var_list) var_list.sort(key=lambda _v: column_order[_v]) new_coefs = list(constraint_repn.linear_coefs) for var in var_list: assert isinstance(var, _VarData) assert not var.fixed var_coef = None for i, (_var, coef) in enumerate( zip(constraint_repn.linear_vars, constraint_repn.linear_coefs)): if _var is var: var_coef = coef # We are going to rewrite with core problem file # with all stochastic values set to zero. This will # allow an easy test for missing user annotations. new_coefs[i] = _deterministic_check_value break if var_coef is None: raise RuntimeError( "The coefficient for variable %s has " "been marked as stochastic in constraint %s using " "the %s annotation, but the variable does not appear" " in the canonical constraint expression." % (var.name, con.name, StochasticConstraintBodyAnnotation.__name__)) var_label = symbol_map.byObject[id(var)] for con_label in symbols: stochastic_matrix_count += 1 f_mat_struct.write(matrix_struct_template % (con_label, var_label)) f_mat.write(matrix_template % (_no_negative_zero(value(var_coef)))) constraint_repn.linear_coefs = tuple(new_coefs) # # Stochastic Objective # stochastic_cost_count = 0 with open( os.path.join(output_directory, scenario.name + ".cost.sc.struct"), 'w') as f_obj_struct: with open(os.path.join(output_directory, scenario.name + ".cost.sc"), 'w') as f_obj: obj_struct_template = " %s\n" obj_template = " %.17g\n" f_obj.write("scen\n") if stochastic_objective is not None: if stochastic_objective.has_declarations: sorted_values = stochastic_objective.expand_entries() assert len(sorted_values) <= 1 if len(sorted_values) == 0: raise RuntimeError( "The %s annotation was declared " "with external entries but no active Objective " "objects were recovered from those entries." % (StochasticObjectiveAnnotation.__name__)) obj, (objective_variables, include_constant) = \ sorted_values[0] assert obj is objective_object else: objective_variables, include_constant = \ stochastic_objective.default if not objective_repn.is_linear(): raise RuntimeError( "Only linear stochastic objectives are " "accepted for conversion to DDSIP format. " "Objective %s is not linear." % (objective_object.name)) if objective_variables is None: objective_variables = objective_repn.linear_vars stochastic_objective_label = symbol_map.byObject[id( objective_object)] # sort the variable list by the column ordering # so that we have deterministic output objective_variables = list(objective_variables) objective_variables.sort(key=lambda _v: column_order[_v]) assert (len(objective_variables) > 0) or include_constant new_coefs = list(objective_repn.linear_coefs) for var in objective_variables: assert isinstance(var, _VarData) var_coef = None for i, (_var, coef) in enumerate( zip(objective_repn.linear_vars, objective_repn.linear_coefs)): if _var is var: var_coef = coef # We are going to rewrite the core problem file # with all stochastic values set to zero. This will # allow an easy test for missing user annotations. new_coefs[i] = _deterministic_check_value break if var_coef is None: raise RuntimeError( "The coefficient for variable %s has " "been marked as stochastic in objective %s using " "the %s annotation, but the variable does not appear" " in the canonical objective expression." % (var.name, objective_object.name, StochasticObjectiveAnnotation.__name__)) var_label = symbol_map.byObject[id(var)] stochastic_cost_count += 1 f_obj_struct.write(obj_struct_template % (var_label)) f_obj.write(obj_template % (_no_negative_zero(value(var_coef)))) objective_repn.linear_coefs = tuple(new_coefs) if include_constant: obj_constant = objective_repn.constant # We are going to rewrite the core problem file # with all stochastic values set to zero. This will # allow an easy test for missing user annotations. objective_repn.constant = _deterministic_check_value if obj_constant is None: obj_constant = 0.0 stochastic_cost_count += 1 f_obj_struct.write(obj_struct_template % ("ONE_VAR_CONSTANT")) f_obj.write(obj_template % (_no_negative_zero(obj_constant))) # # Write the deterministic part of the LP/MPS-file to its own # file for debugging purposes # reference_model_name = reference_model.name reference_model._name = "ZeroStochasticData" det_output_filename = os.path.join(output_directory, scenario.name + ".lp.det") with WriterFactory("lp") as writer: output_fname, symbol_map = writer(reference_model, det_output_filename, lambda x: True, io_options) assert output_fname == det_output_filename reference_model._name = reference_model_name # reset bounds on any constraints that were modified for con, lower in iteritems(modified_constraint_lb): con._lower = as_numeric(lower) for con, upper in iteritems(modified_constraint_ub): con._upper = as_numeric(upper) return (firststage_variable_count, secondstage_variable_count, firststage_constraint_count, secondstage_constraint_count, stochastic_cost_count, stochastic_rhs_count, stochastic_matrix_count)
def compile_block_linear_constraints(parent_block, constraint_name, skip_trivial_constraints=False, single_precision_storage=False, verbose=False, descend_into=True): if verbose: print("") print("Compiling linear constraints on block with name: %s" % (parent_block.name)) if not parent_block.is_constructed(): raise RuntimeError( "Attempting to compile block '%s' with unconstructed " "component(s)" % (parent_block.name)) # # Linear MatrixConstraint in CSR format # SparseMat_pRows = [] SparseMat_jCols = [] SparseMat_Vals = [] Ranges = [] RangeTypes = [] def _get_bound(exp): if exp is None: return None if is_fixed(exp): return value(exp) raise ValueError("non-fixed bound: " + str(exp)) start_time = time.time() if verbose: print("Sorting active blocks...") sortOrder = SortComponents.indices | SortComponents.alphabetical all_blocks = [_b for _b in parent_block.block_data_objects( active=True, sort=sortOrder, descend_into=descend_into)] stop_time = time.time() if verbose: print("Time to sort active blocks: %.2f seconds" % (stop_time-start_time)) start_time = time.time() if verbose: print("Collecting variables on active blocks...") # # First Pass: assign each variable a deterministic id # (an index in a list) # VarSymbolToVarObject = [] for block in all_blocks: VarSymbolToVarObject.extend( block.component_data_objects(Var, sort=sortOrder, descend_into=False)) VarIDToVarSymbol = \ dict((id(vardata), index) for index, vardata in enumerate(VarSymbolToVarObject)) stop_time = time.time() if verbose: print("Time to collect variables on active blocks: %.2f seconds" % (stop_time-start_time)) start_time = time.time() if verbose: print("Compiling active linear constraints...") # # Second Pass: collect and remove active linear constraints # constraint_data_to_remove = [] empty_constraint_containers_to_remove = [] constraint_containers_to_remove = [] constraint_containers_to_check = set() referenced_variable_symbols = set() nnz = 0 nrows = 0 SparseMat_pRows = [0] for block in all_blocks: if hasattr(block, '_canonical_repn'): del block._canonical_repn if hasattr(block, '_ampl_repn'): del block._ampl_repn for constraint in block.component_objects(Constraint, active=True, sort=sortOrder, descend_into=False): assert not isinstance(constraint, MatrixConstraint) if len(constraint) == 0: empty_constraint_containers_to_remove.append((block, constraint)) else: singleton = isinstance(constraint, SimpleConstraint) for index, constraint_data in iteritems(constraint): if constraint_data.body.polynomial_degree() <= 1: # collect for removal if singleton: constraint_containers_to_remove.append((block, constraint)) else: constraint_data_to_remove.append((constraint, index)) constraint_containers_to_check.add((block, constraint)) canonical_repn = generate_canonical_repn(constraint_data.body) assert isinstance(canonical_repn, LinearCanonicalRepn) row_variable_symbols = [] row_coefficients = [] if canonical_repn.variables is None: if skip_trivial_constraints: continue else: row_variable_symbols = \ [VarIDToVarSymbol[id(vardata)] for vardata in canonical_repn.variables] referenced_variable_symbols.update( row_variable_symbols) assert canonical_repn.linear is not None row_coefficients = canonical_repn.linear SparseMat_pRows.append(SparseMat_pRows[-1] + \ len(row_variable_symbols)) SparseMat_jCols.extend(row_variable_symbols) SparseMat_Vals.extend(row_coefficients) nnz += len(row_variable_symbols) nrows += 1 L = _get_bound(constraint_data.lower) U = _get_bound(constraint_data.upper) constant = value(canonical_repn.constant) if constant is None: constant = 0 Ranges.append(L - constant if (L is not None) else 0) Ranges.append(U - constant if (U is not None) else 0) if (L is not None) and \ (U is not None) and \ (not constraint_data.equality): RangeTypes.append(MatrixConstraint.LowerBound | MatrixConstraint.UpperBound) elif constraint_data.equality: RangeTypes.append(MatrixConstraint.Equality) elif L is not None: assert U is None RangeTypes.append(MatrixConstraint.LowerBound) else: assert U is not None RangeTypes.append(MatrixConstraint.UpperBound) # Start freeing up memory constraint_data.set_value(None) ncols = len(referenced_variable_symbols) stop_time = time.time() if verbose: print("Time to compile active linear constraints: %.2f seconds" % (stop_time-start_time)) start_time = time.time() if verbose: print("Removing compiled constraint objects...") # # Remove compiled constraints # constraints_removed = 0 constraint_containers_removed = 0 for block, constraint in empty_constraint_containers_to_remove: block.del_component(constraint) constraint_containers_removed += 1 for constraint, index in constraint_data_to_remove: del constraint[index] constraints_removed += 1 for block, constraint in constraint_containers_to_remove: block.del_component(constraint) constraints_removed += 1 constraint_containers_removed += 1 for block, constraint in constraint_containers_to_check: if len(constraint) == 0: block.del_component(constraint) constraint_containers_removed += 1 stop_time = time.time() if verbose: print("Eliminated %s constraints and %s Constraint container objects" % (constraints_removed, constraint_containers_removed)) print("Time to remove compiled constraint objects: %.2f seconds" % (stop_time-start_time)) start_time = time.time() if verbose: print("Assigning variable column indices...") # # Assign a column index to the set of referenced variables # ColumnIndexToVarSymbol = sorted(referenced_variable_symbols) VarSymbolToColumnIndex = dict((symbol, column) for column, symbol in enumerate(ColumnIndexToVarSymbol)) SparseMat_jCols = [VarSymbolToColumnIndex[symbol] for symbol in SparseMat_jCols] del VarSymbolToColumnIndex ColumnIndexToVarObject = [VarSymbolToVarObject[var_symbol] for var_symbol in ColumnIndexToVarSymbol] stop_time = time.time() if verbose: print("Time to assign variable column indices: %.2f seconds" % (stop_time-start_time)) start_time = time.time() if verbose: print("Converting compiled constraint data to array storage...") print(" - Using %s precision for numeric values" % ('single' if single_precision_storage else 'double')) # # Convert to array storage # number_storage = 'f' if single_precision_storage else 'd' SparseMat_pRows = array.array('L', SparseMat_pRows) SparseMat_jCols = array.array('L', SparseMat_jCols) SparseMat_Vals = array.array(number_storage, SparseMat_Vals) Ranges = array.array(number_storage, Ranges) RangeTypes = array.array('B', RangeTypes) stop_time = time.time() if verbose: storage_bytes = \ SparseMat_pRows.buffer_info()[1] * SparseMat_pRows.itemsize + \ SparseMat_jCols.buffer_info()[1] * SparseMat_jCols.itemsize + \ SparseMat_Vals.buffer_info()[1] * SparseMat_Vals.itemsize + \ Ranges.buffer_info()[1] * Ranges.itemsize + \ RangeTypes.buffer_info()[1] * RangeTypes.itemsize print("Sparse Matrix Dimension:") print(" - Rows: "+str(nrows)) print(" - Cols: "+str(ncols)) print(" - Nonzeros: "+str(nnz)) print("Compiled Data Storage: "+str(_label_bytes(storage_bytes))) print("Time to convert compiled constraint data to " "array storage: %.2f seconds" % (stop_time-start_time)) parent_block.add_component(constraint_name, MatrixConstraint(nrows, ncols, nnz, SparseMat_pRows, SparseMat_jCols, SparseMat_Vals, Ranges, RangeTypes, ColumnIndexToVarObject))
def _initialize_members(self, init_set): """Initialize variable data for all indices in a set.""" # # Initialize domains # if self._domain_init_rule is not None: # # Initialize domains with a rule # if self.is_indexed(): for ndx in init_set: self._data[ndx].domain = apply_indexed_rule(self, self._domain_init_rule, self._parent(), ndx) else: self.domain = self._domain_init_rule(self._parent()) else: # # Initialize domains with a value # if self.is_indexed(): for ndx in init_set: self._data[ndx].domain = self._domain_init_value else: self.domain = self._domain_init_value # # Initialize values # if self._value_init_value is not None: # # Initialize values with a value # if self._value_init_value.__class__ is dict: for key in init_set: # # Skip indices that are not in the dictionary. This arises when # initializing VarList objects with a dictionary. # if not key in self._value_init_value: continue val = self._value_init_value[key] vardata = self._data[key] vardata.set_value(val) else: val = value(self._value_init_value) for key in init_set: vardata = self._data[key] vardata.set_value(val) elif self._value_init_rule is not None: # # Initialize values with a rule # if self.is_indexed(): for key in init_set: vardata = self._data[key] val = apply_indexed_rule( self, self._value_init_rule, self._parent(), key ) val = value(val) vardata.set_value(val) else: val = self._value_init_rule(self._parent()) val = value(val) self.set_value(val) # # Initialize bounds # if self._bounds_init_rule is not None: # # Initialize bounds with a rule # if self.is_indexed(): for key in init_set: vardata = self._data[key] (lb, ub) = apply_indexed_rule( self, self._bounds_init_rule, self._parent(), key ) vardata.setlb(lb) vardata.setub(ub) else: (lb, ub) = self._bounds_init_rule(self._parent()) self.setlb(lb) self.setub(ub) elif self._bounds_init_value is not None: # # Initialize bounds with a value # (lb, ub) = self._bounds_init_value for key in init_set: vardata = self._data[key] vardata.setlb(lb) vardata.setub(ub)
def _get_bound(self, exp): if exp is None: return None if is_fixed(exp): return value(exp) raise ValueError("non-fixed bound: " + str(exp))