def check_variable_exists(backend_model, constraint, variable, idx=None): """ Search for existence of a decision variable in a Pyomo constraint. Parameters ---------- backend_model : Pyomo ConcreteModel constraint : str, name of constraint which could exist in the backend variable : str, string to search in the list of variables to check if existing """ if getattr(backend_model, constraint) in backend_model.component_objects( ctype=po.Constraint ): expression_accessor = "body" elif getattr(backend_model, constraint) in backend_model.component_objects( ctype=po.Expression ): expression_accessor = "value" if idx is not None: if idx in getattr(backend_model, constraint)._index: variables = identify_variables( getattr(getattr(backend_model, constraint)[idx], expression_accessor) ) return any(variable in j.getname() for j in list(variables)) else: return False else: exists = [] for v in getattr(backend_model, constraint).values(): variables = identify_variables(getattr(v, expression_accessor)) exists.append(any(variable in j.getname() for j in list(variables))) return any(exists)
def test_constraintSub(self): m = ri.create_model() m.pert_a = Param(initialize=0.01) m.pert_b = Param(initialize=1.01) m_sipopt = sensitivity_calculation('sipopt', m, [m.a, m.b], [m.pert_a, m.pert_b]) # verify substitutions in equality constraint self.assertTrue(m_sipopt.C_equal.lower.ctype is Param and m_sipopt.C_equal.upper.ctype is Param) self.assertFalse(m_sipopt.C_equal.active) self.assertTrue( m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[3].lower == 0.0 and m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[3].upper == 0.0 and len( list( identify_variables( m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[3].body))) == 2) # verify substitutions in one-sided bounded constraint self.assertTrue(m_sipopt.C_singleBnd.lower is None and m_sipopt.C_singleBnd.upper.ctype is Param) self.assertFalse(m_sipopt.C_singleBnd.active) self.assertTrue( m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[4].lower is None and m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[4].upper == 0.0 and len( list( identify_variables( m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[4].body))) == 2) # verify substitutions in ranged inequality constraint self.assertTrue(m_sipopt.C_rangedIn.lower.ctype is Param and m_sipopt.C_rangedIn.upper.ctype is Param) self.assertFalse(m_sipopt.C_rangedIn.active) self.assertTrue( m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[1].lower is None and m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[1].upper == 0.0 and len( list( identify_variables( m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[1].body))) == 2) self.assertTrue( m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[2].lower is None and m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[2].upper == 0.0 and len( list( identify_variables( m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[2].body))) == 2)
def test_create_abstract_from_rule(self): def make_invalid(m): m.I = RangeSet(3) m.x = Var(m.I) m.c = Constraint(expr=sum(m.x[i] for i in m.I) >= 0) def make(m): m.I = RangeSet(3) m.x = Var(m.I) def c(b): return sum(m.x[i] for i in m.I) >= 0 m.c = Constraint(rule=c) with self.assertRaisesRegexp( ValueError, 'x\[1\]: The component has not been constructed.'): model = AbstractModel(rule=make_invalid) instance = model.create_instance() model = AbstractModel(rule=make) instance = model.create_instance() self.assertEqual([x.local_name for x in model.component_objects()], []) self.assertEqual([x.local_name for x in instance.component_objects()], ['I', 'x', 'c']) self.assertEqual(len(list(EXPR.identify_variables(instance.c.body))), 3) model = AbstractModel(rule=make) model.y = Var() instance = model.create_instance() self.assertEqual([x.local_name for x in instance.component_objects()], ['y', 'I', 'x', 'c']) self.assertEqual(len(list(EXPR.identify_variables(instance.c.body))), 3)
def test_create_abstract_from_rule(self): def make_invalid(m): m.I = RangeSet(3) m.x = Var(m.I) m.c = Constraint( expr=sum(m.x[i] for i in m.I) >= 0 ) def make(m): m.I = RangeSet(3) m.x = Var(m.I) def c(b): return sum(m.x[i] for i in m.I) >= 0 m.c = Constraint( rule=c ) model = AbstractModel(rule=make_invalid) self.assertRaises(RuntimeError, model.create_instance) model = AbstractModel(rule=make) instance = model.create_instance() self.assertEqual( [x.local_name for x in model.component_objects()], [] ) self.assertEqual( [x.local_name for x in instance.component_objects()], ['I','x','c'] ) self.assertEqual( len(list(EXPR.identify_variables(instance.c.body))), 3 ) model = AbstractModel(rule=make) model.y = Var() instance = model.create_instance() self.assertEqual( [x.local_name for x in instance.component_objects()], ['y','I','x','c'] ) self.assertEqual( len(list(EXPR.identify_variables(instance.c.body))), 3 )
def build_ordered_component_lists(model, solve_data, prefix='working'): """Define lists used for future data transfer. Also attaches ordered lists of the variables, constraints, disjuncts, and disjunctions to the model so that they can be used for mapping back and forth. """ util_blk = getattr(model, solve_data.util_block_name) var_set = ComponentSet() setattr( util_blk, '%s_constraints_list' % prefix, list( model.component_data_objects( ctype=Constraint, active=True, descend_into=(Block, Disjunct)))) setattr( util_blk, '%s_disjuncts_list' % prefix, list( model.component_data_objects( ctype=Disjunct, descend_into=(Block, Disjunct)))) setattr( util_blk, '%s_disjunctions_list' % prefix, list( model.component_data_objects( ctype=Disjunction, active=True, descend_into=(Disjunct, Block)))) # Identify the non-fixed variables in (potentially) active constraints and # objective functions for constr in getattr(util_blk, '%s_constraints_list' % prefix): for v in EXPR.identify_variables(constr.body, include_fixed=False): var_set.add(v) for obj in model.component_data_objects(ctype=Objective, active=True): for v in EXPR.identify_variables(obj.expr, include_fixed=False): var_set.add(v) # Disjunct indicator variables might not appear in active constraints. In # fact, if we consider them Logical variables, they should not appear in # active algebraic constraints. For now, they need to be added to the # variable set. for disj in getattr(util_blk, '%s_disjuncts_list' % prefix): var_set.add(disj.indicator_var) # We use component_data_objects rather than list(var_set) in order to # preserve a deterministic ordering. var_list = list( v for v in model.component_data_objects( ctype=Var, descend_into=(Block, Disjunct)) if v in var_set) setattr(util_blk, '%s_var_list' % prefix, var_list) setattr(util_blk, '%s_binary_vars' % prefix, list( v for v in var_list if v.domain == Binary)) setattr( util_blk, '%s_nonlinear_constraints' % prefix, [ v for v in getattr(util_blk, '%s_constraints_list' % prefix) if v.body.polynomial_degree() not in (0, 1)])
def detect_unfixed_discrete_vars(model): """Detect unfixed discrete variables in use on the model.""" var_set = ComponentSet() for constr in model.component_data_objects( Constraint, active=True, descend_into=True): var_set.update( v for v in EXPR.identify_variables( constr.body, include_fixed=False) if not v.is_continuous()) for obj in model.component_data_objects(Objective, active=True): var_set.update(v for v in EXPR.identify_variables(obj.expr, include_fixed=False) if not v.is_continuous()) return var_set
def load_values(self, port, default, fixed, use_guesses): sources = port.sources() for name, index, obj in port.iter_vars(fixed=False, names=True): evars = None if port.is_extensive(name): # collect evars if there are any evars = [arc.expanded_block.component(name) for arc in sources] if evars[0] is None: # no evars, so this arc is 1-to-1 evars = None else: try: # index into them if necessary, now that # we know they are not None for j in range(len(evars)): evars[j] = evars[j][index] except AttributeError: pass if evars is not None: for evar in evars: if evar.is_fixed(): continue self.check_value_fix(port, evar, default, fixed, use_guesses, extensive=True) # now all evars should be fixed so combine them # and fix the value of the extensive port member self.combine_and_fix(port, name, obj, evars, fixed) else: if obj.is_expression_type(): for var in identify_variables(obj, include_fixed=False): self.check_value_fix(port, var, default, fixed, use_guesses) else: self.check_value_fix(port, obj, default, fixed, use_guesses)
def add_oa_cut(var_values, duals, solve_data, config): m = solve_data.mip MindtPy = m.MindtPy_utils MindtPy.MindtPy_linear_cuts.nlp_iters.add(solve_data.nlp_iter) sign_adjust = -1 if solve_data.objective_sense == minimize else 1 # Copy values over for var, val in zip(MindtPy.variable_list, var_values): if val is not None and not var.fixed: var.value = val # generate new constraints # TODO some kind of special handling if the dual is phenomenally small? jacs = solve_data.jacobians for constr, dual_value in zip(MindtPy.constraint_list, duals): if constr.body.polynomial_degree() in (1, 0): continue rhs = ((0 if constr.upper is None else constr.upper) + (0 if constr.lower is None else constr.lower)) # Properly handle equality constraints and ranged inequalities # TODO special handling for ranged inequalities? a <= x <= b rhs = constr.lower if constr.has_lb() and constr.has_ub() else rhs slack_var = MindtPy.MindtPy_linear_cuts.slack_vars.add() MindtPy.MindtPy_linear_cuts.oa_cuts.add( expr=copysign(1, sign_adjust * dual_value) * (sum( value(jacs[constr][var]) * (var - value(var)) for var in list(EXPR.identify_variables(constr.body))) + value(constr.body) - rhs) - slack_var <= 0)
def generate_linear_repn(expr, evaluate=False): """ Given an expression containing UncParam return its linear representation. :param c: A pyomo expression :type c: class:`pyomo.core.expr.numeric_expr.ExpressionBase` :param evaluate: If true, evaluate fixed expressions :type c: bool, optional :return: Standard representation containing a constant term (wrt UncParam), linear coefficients, and the corresponding UncParam objects. :rtype: pyomo.repn.StandardRepn """ # Fix all Var types (and set values if necessary) uncparam_list = [] var_list = [] _fixed = [] for v in identify_variables(expr): if isinstance(v.parent_component(), UncParam): uncparam_list.append(v) else: var_list.append(v) if not v.fixed: v.fix() _fixed.append(v) repn = generate_standard_repn(expr, compute_values=False, quadratic=False) for v in _fixed: v.unfix() return repn
def disjunctive_obbt(model, solver): """Provides Optimality-based bounds tightening to a model using a solver.""" model._disjuncts_to_process = list(model.component_data_objects( ctype=Disjunct, active=True, descend_into=(Block, Disjunct), descent_order=TraversalStrategy.BreadthFirstSearch)) if model.type() == Disjunct: model._disjuncts_to_process.insert(0, model) linear_var_set = ComponentSet() for constr in model.component_data_objects( Constraint, active=True, descend_into=(Block, Disjunct)): if constr.body.polynomial_degree() in linear_degrees: linear_var_set.update(identify_variables(constr.body, include_fixed=False)) model._disj_bnds_linear_vars = list(linear_var_set) for disj_idx, disjunct in enumerate(model._disjuncts_to_process): var_bnds = obbt_disjunct(model, disj_idx, solver) if var_bnds is not None: # Add bounds to the disjunct if not hasattr(disjunct, '_disj_var_bounds'): # No bounds had been computed before. Attach the bounds dictionary. disjunct._disj_var_bounds = var_bnds else: # Update the bounds dictionary. for var, new_bnds in var_bnds.items(): old_lb, old_ub = disjunct._disj_var_bounds.get(var, (-inf, inf)) new_lb, new_ub = new_bnds disjunct._disj_var_bounds[var] = (max(old_lb, new_lb), min(old_ub, new_ub)) else: disjunct.deactivate() # prune disjunct
def add_var_bound(solve_data, config): """This function will add bounds for variables in nonlinear constraints if they are not bounded. This is to avoid an unbounded main problem in the LP/NLP algorithm. Thus, the model will be updated to include bounds for the unbounded variables in nonlinear constraints. Parameters ---------- solve_data : MindtPySolveData Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. """ m = solve_data.working_model MindtPy = m.MindtPy_utils for c in MindtPy.nonlinear_constraint_list: for var in EXPR.identify_variables(c.body): if var.has_lb() and var.has_ub(): continue elif not var.has_lb(): if var.is_integer(): var.setlb(-config.integer_var_bound - 1) else: var.setlb(-config.continuous_var_bound - 1) elif not var.has_ub(): if var.is_integer(): var.setub(config.integer_var_bound) else: var.setub(config.continuous_var_bound)
def calc_jacobians(solve_data, config): """Generates a map of jacobians for the variables in the model. This function generates a map of jacobians corresponding to the variables in the model and adds this ComponentMap to solve_data. Parameters ---------- solve_data : MindtPySolveData Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. """ # Map nonlinear_constraint --> Map( # variable --> jacobian of constraint wrt. variable) solve_data.jacobians = ComponentMap() if config.differentiate_mode == 'reverse_symbolic': mode = differentiate.Modes.reverse_symbolic elif config.differentiate_mode == 'sympy': mode = differentiate.Modes.sympy for c in solve_data.mip.MindtPy_utils.nonlinear_constraint_list: vars_in_constr = list(EXPR.identify_variables(c.body)) jac_list = differentiate(c.body, wrt_list=vars_in_constr, mode=mode) solve_data.jacobians[c] = ComponentMap( (var, jac_wrt_var) for var, jac_wrt_var in zip(vars_in_constr, jac_list))
def var_bound_add(solve_data, config): """ This function will add bounds for variables in nonlinear constraints if they are not bounded. (This is to avoid an unbounded master problem in the LP/NLP algorithm.) Thus, the model will be updated to include bounds for the unbounded variables in nonlinear constraints. Parameters ---------- solve_data: MindtPy Data Container data container that holds solve-instance data config: ConfigBlock contains the specific configurations for the algorithm """ m = solve_data.working_model MindtPy = m.MindtPy_utils for c in MindtPy.constraint_list: if c.body.polynomial_degree() not in (1, 0): for var in list(EXPR.identify_variables(c.body)): if var.has_lb() and var.has_ub(): continue elif not var.has_lb(): if var.is_integer(): var.setlb(-config.integer_var_bound - 1) else: var.setlb(-config.continuous_var_bound - 1) elif not var.has_ub(): if var.is_integer(): var.setub(config.integer_var_bound) else: var.setub(config.continuous_var_bound)
def _estimate_M(self, expr, name): # If there are fixed variables here, unfix them for this calculation, # and we'll restore them at the end. fixed_vars = ComponentMap() if not self.assume_fixed_vars_permanent: for v in EXPR.identify_variables(expr, include_fixed=True): if v.fixed: fixed_vars[v] = value(v) v.fixed = False expr_lb, expr_ub = compute_bounds_on_expr(expr) if expr_lb is None or expr_ub is None: raise GDP_Error("Cannot estimate M for unbounded " "expressions.\n\t(found while processing " "constraint '%s'). Please specify a value of M " "or ensure all variables that appear in the " "constraint are bounded." % name) else: M = (expr_lb, expr_ub) # clean up if we unfixed things (fixed_vars is empty if we were assuming # fixed vars are fixed for life) for v, val in fixed_vars.items(): v.fix(val) return tuple(M)
def calc_jacobians(solve_data, config): """ Generates a map of jacobians for the variables in the model This function generates a map of jacobians corresponding to the variables in the model and adds this ComponentMap to solve_data Parameters ---------- solve_data: MindtPy Data Container data container that holds solve-instance data config: MindtPy configurations contains the specific configurations for the algorithm """ # Map nonlinear_constraint --> Map( # variable --> jacobian of constraint wrt. variable) solve_data.jacobians = ComponentMap() if config.differentiate_mode == "reverse_symbolic": mode = differentiate.Modes.reverse_symbolic elif config.differentiate_mode == "sympy": mode = differentiate.Modes.sympy for c in solve_data.mip.MindtPy_utils.constraint_list: if c.body.polynomial_degree() in (1, 0): continue # skip linear constraints vars_in_constr = list(EXPR.identify_variables(c.body)) jac_list = differentiate(c.body, wrt_list=vars_in_constr, mode=mode) solve_data.jacobians[c] = ComponentMap( (var, jac_wrt_var) for var, jac_wrt_var in zip(vars_in_constr, jac_list))
def taylor_series_expansion(expr, diff_mode=differentiate.Modes.reverse_numeric, order=1): """ Generate a taylor series approximation for expr. Parameters ---------- expr: pyomo.core.expr.numeric_expr.ExpressionBase diff_mode: pyomo.core.expr.calculus.derivatives.Modes The method for differentiation. order: The order of the taylor series expansion If order is not 1, then symbolic differentiation must be used (differentiation.Modes.reverse_sybolic or differentiation.Modes.sympy). Returns ------- res: pyomo.core.expr.numeric_expr.ExpressionBase """ if order < 0: raise ValueError( 'Cannot compute taylor series expansion of order {0}'.format( str(order))) if order != 1 and diff_mode is differentiate.Modes.reverse_numeric: logger.warning( 'taylor_series_expansion can only use symbolic differentiation for orders larger than 1' ) diff_mode = differentiate.Modes.reverse_symbolic e_vars = list(identify_variables(expr=expr, include_fixed=False)) res = value(expr) if order >= 1: derivs = differentiate(expr=expr, wrt_list=e_vars, mode=diff_mode) res += sum( value(derivs[i]) * (e_vars[i] - e_vars[i].value) for i in range(len(e_vars))) """ This last bit of code is just for higher order taylor series expansions. The recursive function _loop modifies derivs in place so that derivs becomes a list of lists of lists... However, _loop is also a generator so that we don't have to loop through it twice. _loop yields two lists. The first is a list of indices corresponding to the first k-1 variables that differentiation is being done with respect to. The second is a list of derivatives. Each entry in this list is the derivative with respect to the first k-1 variables and the kth variable, whose index matches the index in _derivs. """ if order >= 2: for n in range(2, order + 1): coef = 1.0 / math.factorial(n) for ndx_list, _derivs in _loop(derivs, e_vars, diff_mode, list()): tmp = coef for ndx in ndx_list: tmp *= (e_vars[ndx] - e_vars[ndx].value) res += tmp * sum( value(_derivs[i]) * (e_vars[i] - e_vars[i].value) for i in range(len(e_vars))) return res
def make2dPlot(expr, numticks=10, show_plot=False): mc_ccVals = [None] * (numticks + 1) mc_cvVals = [None] * (numticks + 1) aff_cc = [None] * (numticks + 1) aff_cv = [None] * (numticks + 1) fvals = [None] * (numticks + 1) mc_expr = mc(expr) x = next(identify_variables(expr)) # get the first variable tick_length = (x.ub - x.lb) / numticks xaxis = [x.lb + tick_length * n for n in range(numticks + 1)] x_val = value(x) # initial value of x cc = mc_expr.subcc() # Concave overestimator subgradient at x_val cv = mc_expr.subcv() # Convex underestimator subgradient at x_val f_cc = mc_expr.concave() # Concave overestimator value at x_val f_cv = mc_expr.convex() # Convex underestimator value at x_val for i, x_tick in enumerate(xaxis): aff_cc[i] = cc[x] * (x_tick - x_val) + f_cc aff_cv[i] = cv[x] * (x_tick - x_val) + f_cv mc_expr.changePoint(x, x_tick) mc_ccVals[i] = mc_expr.concave() mc_cvVals[i] = mc_expr.convex() fvals[i] = value(expr) if show_plot: plt.plot(xaxis, fvals, 'r', xaxis, mc_ccVals, 'b--', xaxis, mc_cvVals, 'b--', xaxis, aff_cc, 'k|', xaxis, aff_cv, 'k|') plt.show() return mc_ccVals, mc_cvVals, aff_cc, aff_cv
def make2dPlot(expr, numticks=10, show_plot=False): mc_ccVals = [None] * (numticks + 1) mc_cvVals = [None] * (numticks + 1) aff_cc = [None] * (numticks + 1) aff_cv = [None] * (numticks + 1) fvals = [None] * (numticks + 1) mc_expr = mc(expr) x = next(identify_variables(expr)) # get the first variable tick_length = (x.ub - x.lb) / numticks xaxis = [x.lb + tick_length * n for n in range(numticks + 1)] x_val = value(x) # initial value of x cc = mc_expr.subcc() # Concave overestimator subgradient at x_val cv = mc_expr.subcv() # Convex underestimator subgradient at x_val f_cc = mc_expr.concave() # Concave overestimator value at x_val f_cv = mc_expr.convex() # Convex underestimator value at x_val for i, x_tick in enumerate(xaxis): aff_cc[i] = cc[x] * (x_tick - x_val) + f_cc aff_cv[i] = cv[x] * (x_tick - x_val) + f_cv mc_expr.changePoint(x, x_tick) mc_ccVals[i] = mc_expr.concave() mc_cvVals[i] = mc_expr.convex() fvals[i] = value(expr) if show_plot: import matplotlib.pyplot as plt plt.plot(xaxis, fvals, 'r', xaxis, mc_ccVals, 'b--', xaxis, mc_cvVals, 'b--', xaxis, aff_cc, 'k|', xaxis, aff_cv, 'k|') plt.show() return mc_ccVals, mc_cvVals, aff_cc, aff_cv
def get_vars_from_components(block, ctype, include_fixed=True, active=None, sort=False, descend_into=Block, descent_order=None): """Returns a generator of all the Var objects which are used in Constraint expressions on the block. By default, this recurses into sub-blocks. Args: ctype: The type of component from which to get Vars, assumed to have an expr attribute. include_fixed: Whether or not to include fixed variables active: Whether to find Vars that appear in Constraints accessible via the active tree sort: sort method for iterating through Constraint objects descend_into: Ctypes to descend into when finding Constraints descent_order: Traversal strategy for finding the objects of type ctype """ seen = set() for constraint in block.component_data_objects(ctype, active=active, sort=sort, descend_into=descend_into, descent_order=descent_order): for var in EXPR.identify_variables(constraint.expr, include_fixed=include_fixed): if id(var) not in seen: seen.add(id(var)) yield var
def iter_vars(self, expr_vars=False, fixed=True, with_names=False): """ Iterate through every member of the port, going through the indices of indexed members. If expr_vars, call identify_variables on expression type members. If not fixed, exclude fixed variables/expressions. """ for name, mem in iteritems(self.vars): if not mem.is_indexed(): itr = (mem, ) else: itr = itervalues(mem) for v in itr: if not fixed and v.is_fixed(): continue if v.is_expression_type() and expr_vars: for var in identify_variables(v, include_fixed=fixed): if with_names: yield name, var else: yield var else: if with_names: yield name, v else: yield v
def iter_vars(self, expr_vars=False, fixed=None, names=False): """ Iterate through every member of the port, going through the indices of indexed members. Arguments --------- expr_vars: `bool` If True, call `identify_variables` on expression type members fixed: `bool` Only include variables/expressions with this type of fixed names: `bool` If True, yield (name, var/expr) pairs """ for name, mem in iteritems(self.vars): if not mem.is_indexed(): itr = (mem,) else: itr = itervalues(mem) for v in itr: if fixed is not None and v.is_fixed() != fixed: continue if expr_vars and v.is_expression_type(): for var in identify_variables(v): if fixed is not None and var.is_fixed() != fixed: continue if names: yield name, var else: yield var else: if names: yield name, v else: yield v
def add_objective_linearization(solve_data, config): """ If objective is nonlinear, then this function adds a linearized objective. This function should be used to initialize the ECP method. Parameters ---------- solve_data: MindtPy Data Container data container that holds solve-instance data config: ConfigBlock contains the specific configurations for the algorithm """ m = solve_data.working_model MindtPy = m.MindtPy_utils solve_data.mip_iter += 1 gen = (obj for obj in MindtPy.jacs if obj is MindtPy.MindtPy_objective_expr) MindtPy.MindtPy_linear_cuts.mip_iters.add(solve_data.mip_iter) sign_adjust = 1 if MindtPy.obj.sense == minimize else -1 # generate new constraints # TODO some kind of special handling if the dual is phenomenally small? for obj in gen: c = MindtPy.MindtPy_linear_cuts.ecp_cuts.add(expr=sign_adjust * sum( value(MindtPy.jacs[obj][id(var)]) * (var - value(var)) for var in list(EXPR.identify_variables(obj.body))) + value(obj.body) <= 0) MindtPy.ECP_constr_map[obj, solve_data.mip_iter] = c
def grad_fd(c, scaled=False, h=1e-6): """Finite difference the gradient for a constraint, objective or named expression. This is only for use in examining scaling. For faster more accurate gradients refer to pynumero. Args: c: constraint to evaluate scaled: if True calculate the scaled grad (default=False) h: step size for calculating finite differnced derivatives Returns: (list of gradient values, list for varibles in the constraint) The order of the variables coresoponds to the gradient values. """ try: ex = c.body except AttributeError: ex = c.expr vars = list(EXPR.identify_variables(ex)) grad = [None]*len(vars) r = {} if scaled: # If you want the scaled grad put in variable scaling tansform orig = [pyo.value(v) for v in vars] for i, v in enumerate(vars): try: sf = v.parent_block().scaling_factor.get(v, 1) except AttributeError: sf = 1 r[id(v)] = v/sf v.value = orig[i]*sf vis = EXPR.ExpressionReplacementVisitor( substitute=r, remove_named_expressions=True, ) e = vis.dfs_postorder_stack(ex) else: e = ex for i, v in enumerate(vars): ov = pyo.value(v) # original variable value f1 = pyo.value(e) v.value = ov + h f2 = pyo.value(e) v.value = ov if scaled: try: sf = c.parent_block().scaling_factor.get(c, 1) except AttributeError: sf = 1 grad[i] = sf*(f2 - f1)/h else: grad[i] = (f2 - f1)/h if scaled: for i, v in enumerate(vars): v.value = orig[i] return grad, vars
def build_ordered_component_lists(model, prefix='working'): """Define lists used for future data transfer.""" GDPopt = model.GDPopt_utils var_set = ComponentSet() setattr( GDPopt, '%s_constraints_list' % prefix, list( model.component_data_objects(ctype=Constraint, active=True, descend_into=(Block, Disjunct)))) setattr( GDPopt, '%s_disjuncts_list' % prefix, list( model.component_data_objects(ctype=Disjunct, descend_into=(Block, Disjunct)))) setattr( GDPopt, '%s_disjunctions_list' % prefix, list( model.component_data_objects(ctype=Disjunction, active=True, descend_into=(Disjunct, Block)))) # Identify the non-fixed variables in (potentially) active constraints and # objective functions for constr in getattr(GDPopt, '%s_constraints_list' % prefix): for v in EXPR.identify_variables(constr.body, include_fixed=False): var_set.add(v) for obj in model.component_data_objects(ctype=Objective, active=True): for v in EXPR.identify_variables(obj.expr, include_fixed=False): var_set.add(v) # Disjunct indicator variables might not appear in active constraints. In # fact, if we consider them Logical variables, they should not appear in # active algebraic constraints. For now, they need to be added to the # variable set. for disj in getattr(GDPopt, '%s_disjuncts_list' % prefix): var_set.add(disj.indicator_var) # We use component_data_objects rather than list(var_set) in order to # preserve a deterministic ordering. setattr( GDPopt, '%s_var_list' % prefix, list(v for v in model.component_data_objects( ctype=Var, descend_into=(Block, Disjunct)) if v in var_set)) setattr(GDPopt, '%s_nonlinear_constraints' % prefix, [ v for v in getattr(GDPopt, '%s_constraints_list' % prefix) if v.body.polynomial_degree() not in (0, 1) ])
def build_ordered_component_lists(model, solve_data): """Define lists used for future data transfer. Also attaches ordered lists of the variables, constraints, disjuncts, and disjunctions to the model so that they can be used for mapping back and forth. """ util_blk = getattr(model, solve_data.util_block_name) var_set = ComponentSet() setattr( util_blk, 'constraint_list', list( model.component_data_objects( ctype=Constraint, active=True, descend_into=(Block, Disjunct)))) setattr( util_blk, 'disjunct_list', list( model.component_data_objects( ctype=Disjunct, descend_into=(Block, Disjunct)))) setattr( util_blk, 'disjunction_list', list( model.component_data_objects( ctype=Disjunction, active=True, descend_into=(Disjunct, Block)))) # Identify the non-fixed variables in (potentially) active constraints and # objective functions for constr in getattr(util_blk, 'constraint_list'): for v in identify_variables(constr.body, include_fixed=False): var_set.add(v) for obj in model.component_data_objects(ctype=Objective, active=True): for v in identify_variables(obj.expr, include_fixed=False): var_set.add(v) # Disjunct indicator variables might not appear in active constraints. In # fact, if we consider them Logical variables, they should not appear in # active algebraic constraints. For now, they need to be added to the # variable set. for disj in getattr(util_blk, 'disjunct_list'): var_set.add(disj.indicator_var) # We use component_data_objects rather than list(var_set) in order to # preserve a deterministic ordering. var_list = list( v for v in model.component_data_objects( ctype=Var, descend_into=(Block, Disjunct)) if v in var_set) setattr(util_blk, 'variable_list', var_list)
def _transform_constraintData(self, logical_constraint, new_varlists, transBlocks): # first find all the relevant BooleanVars and associate a binary (if # they don't have one already) for bool_vardata in identify_variables(logical_constraint.expr): if bool_vardata.ctype is BooleanVar: self._transform_boolean_varData(bool_vardata, new_varlists) # now create a transformation block on the constraint's parent block (if # we don't have one already) parent_block = logical_constraint.parent_block() xfrm_block = transBlocks.get(parent_block) if xfrm_block is None: xfrm_block = self._create_transformation_block(parent_block) transBlocks[parent_block] = xfrm_block new_constrlist = xfrm_block.transformed_constraints new_boolvarlist = xfrm_block.augmented_vars new_varlist = xfrm_block.augmented_vars_asbinary old_boolvarlist_length = len(new_boolvarlist) indicator_map = ComponentMap() cnf_statements = to_cnf(logical_constraint.body, new_boolvarlist, indicator_map) logical_constraint.deactivate() # Associate new Boolean vars to new binary variables num_new = len(new_boolvarlist) - old_boolvarlist_length list_o_vars = list(new_boolvarlist.values()) if num_new: for bool_vardata in list_o_vars[-num_new:]: new_binary_vardata = new_varlist.add() bool_vardata.associate_binary_var(new_binary_vardata) # Add constraints associated with each CNF statement for cnf_statement in cnf_statements: for linear_constraint in _cnf_to_linear_constraint_list( cnf_statement): new_constrlist.add(expr=linear_constraint) # Add bigM associated with special atoms # Note: this ad-hoc reformulation may be revisited for tightness in the # future. old_varlist_length = len(new_varlist) for indicator_var, special_atom in indicator_map.items(): for linear_constraint in _cnf_to_linear_constraint_list( special_atom, indicator_var, new_varlist): new_constrlist.add(expr=linear_constraint) # Previous step may have added auxiliary binaries. Associate augmented # Booleans to them. num_new = len(new_varlist) - old_varlist_length list_o_vars = list(new_varlist.values()) if num_new: for binary_vardata in list_o_vars[-num_new:]: new_bool_vardata = new_boolvarlist.add() new_bool_vardata.associate_binary_var(binary_vardata)
def getHessian(objective): gradient = getGradient(objective) varList = list(identify_variables(objective.expr)) hessian = [ differentiate(gradient[i], wrt_list=varList) for i, v in enumerate(varList) ] return hessian
def test_1(self): ''' The simplest case that the black box has only two inputs and there is only one black block involved ''' def blackbox(a,b): return sin(a-b) m = self.m bb = ExternalFunction(blackbox) m.eflist = [bb] m.c1 = Constraint(expr=m.x[0] * m.z[0]**2 + bb(m.x[0],m.x[1]) == 2*sqrt(2.0)) pI = PyomoInterface(m, [bb], ConfigBlock()) self.assertEqual(pI.lx,2) self.assertEqual(pI.ly,1) self.assertEqual(pI.lz,3) self.assertEqual(len(list(identify_variables(m.c1.body))),3) self.assertEqual(len(list(identify_variables(m.c2.body))),2)
def free_variables_in_active_equalities_set(blk): """ Return a set of variables that are contined in active equalities. """ vin = ComponentSet() for c in active_equalities(blk): for v in identify_variables(c.body): if not v.fixed: vin.add(v) return vin
def test_create_concrete_from_rule(self): def make(m): m.I = RangeSet(3) m.x = Var(m.I) m.c = Constraint( expr=sum(m.x[i] for i in m.I) >= 0 ) model = ConcreteModel(rule=make) self.assertEqual( [x.local_name for x in model.component_objects()], ['I','x','c'] ) self.assertEqual( len(list(EXPR.identify_variables(model.c.body))), 3 )
def _generate_additively_separable_repn(nonlinear_part): if nonlinear_part.__class__ is not EXPR.SumExpression: # This isn't separable, so we just have the one expression return { 'nonlinear_vars': [tuple(v for v in EXPR.identify_variables(nonlinear_part))], 'nonlinear_exprs': [nonlinear_part] } # else, it was a SumExpression, and we will break it into the summands, # recording which variables are there. nonlinear_decomp = {'nonlinear_vars': [], 'nonlinear_exprs': []} for summand in nonlinear_part.args: nonlinear_decomp['nonlinear_exprs'].append(summand) nonlinear_decomp['nonlinear_vars'].append( tuple(v for v in EXPR.identify_variables(summand))) return nonlinear_decomp
def test_2(self): ''' The simplest case that the black box has only one inputs and there is only a formula ''' def blackbox(a): return sin(a) m = self.m bb = ExternalFunction(blackbox) m.eflist = [bb] m.c1 = Constraint(expr=m.x[0] * m.z[0]**2 + bb(m.x[0]-m.x[1]) == 2*sqrt(2.0)) pI = PyomoInterface(m, [bb], ConfigBlock()) self.assertEqual(pI.lx,1) self.assertEqual(pI.ly,1) self.assertEqual(pI.lz,5) self.assertEqual(len(list(identify_variables(m.c1.body))),3) self.assertEqual(len(list(identify_variables(m.c2.body))),2) self.assertEqual(len(m.tR.conset),1) self.assertEqual(len(list(identify_variables(m.tR.conset[1].body))),3)
def detect_unfixed_discrete_vars(model): """Detect unfixed discrete variables in use on the model.""" var_set = ComponentSet() for constr in model.component_data_objects( Constraint, active=True, descend_into=True): var_set.update( v for v in EXPR.identify_variables( constr.body, include_fixed=False) if not v.is_continuous()) return var_set
def test_constraintSub(self): m = ri.create_model() m.pert_a = Param(initialize=0.01) m.pert_b = Param(initialize=1.01) m_sipopt = sipopt(m,[m.a,m.b], [m.pert_a,m.pert_b]) #verify substitutions in equality constraint self.assertTrue(m_sipopt.C_equal.lower.type() is Param and m_sipopt.C_equal.upper.type() is Param) self.assertFalse(m_sipopt.C_equal.active) self.assertTrue(m_sipopt._sipopt_data.constList[3].lower == 0.0 and m_sipopt._sipopt_data.constList[3].upper == 0.0 and len(list(identify_variables( m_sipopt._sipopt_data.constList[3].body))) == 2) #verify substitutions in one-sided bounded constraint self.assertTrue(m_sipopt.C_singleBnd.lower is None and m_sipopt.C_singleBnd.upper.type() is Param) self.assertFalse(m_sipopt.C_singleBnd.active) self.assertTrue(m_sipopt._sipopt_data.constList[4].lower is None and m_sipopt._sipopt_data.constList[4].upper == 0.0 and len(list(identify_variables( m_sipopt._sipopt_data.constList[4].body))) == 2) #verify substitutions in ranged inequality constraint self.assertTrue(m_sipopt.C_rangedIn.lower.type() is Param and m_sipopt.C_rangedIn.upper.type() is Param) self.assertFalse(m_sipopt.C_rangedIn.active) self.assertTrue(m_sipopt._sipopt_data.constList[1].lower is None and m_sipopt._sipopt_data.constList[1].upper == 0.0 and len(list(identify_variables( m_sipopt._sipopt_data.constList[1].body))) == 2) self.assertTrue(m_sipopt._sipopt_data.constList[2].lower is None and m_sipopt._sipopt_data.constList[2].upper == 0.0 and len(list(identify_variables( m_sipopt._sipopt_data.constList[2].body))) == 2)
def _estimate_M(self, expr, name): # If there are fixed variables here, unfix them for this calculation, # and we'll restore them at the end. fixed_vars = ComponentMap() if not self.assume_fixed_vars_permanent: for v in EXPR.identify_variables(expr, include_fixed=True): if v.fixed: fixed_vars[v] = value(v) v.fixed = False # Calculate a best guess at M repn = generate_standard_repn(expr, quadratic=False) M = [0, 0] if not repn.is_nonlinear(): if repn.constant is not None: for i in (0, 1): if M[i] is not None: M[i] += repn.constant for i, coef in enumerate(repn.linear_coefs or []): var = repn.linear_vars[i] bounds = (value(var.lb), value(var.ub)) for i in (0, 1): # reverse the bounds if the coefficient is negative if coef > 0: j = i else: j = 1 - i if bounds[i] is not None: M[j] += value(bounds[i]) * coef else: raise GDP_Error( "Cannot estimate M for " "expressions with unbounded variables." "\n\t(found unbounded var '%s' while processing " "constraint '%s')" % (var.name, name)) else: # expression is nonlinear. Try using `contrib.fbbt` to estimate. expr_lb, expr_ub = compute_bounds_on_expr(expr) if expr_lb is None or expr_ub is None: raise GDP_Error("Cannot estimate M for unbounded nonlinear " "expressions.\n\t(found while processing " "constraint '%s')" % name) else: M = (expr_lb, expr_ub) # clean up if we unfixed things (fixed_vars is empty if we were assuming # fixed vars are fixed for life) for v, val in fixed_vars.items(): v.fix(val) return tuple(M)
def make_separation_objective_functions(model, config): """ Inequality constraints referencing control variables, state variables, or uncertain parameters must be separated against in separation problem. """ performance_constraints = [] for c in model.component_data_objects(Constraint, active=True, descend_into=True): _vars = ComponentSet(identify_variables(expr=c.expr)) uncertain_params_in_expr = list( v for v in model.util.uncertain_param_vars.values() if v in _vars) state_vars_in_expr = list(v for v in model.util.state_vars if v in _vars) second_stage_variables_in_expr = list( v for v in model.util.second_stage_variables if v in _vars) if not c.equality and (uncertain_params_in_expr or state_vars_in_expr or second_stage_variables_in_expr): # This inequality constraint depends on uncertain parameters therefore it must be separated against performance_constraints.append(c) elif not c.equality and not (uncertain_params_in_expr or state_vars_in_expr or second_stage_variables_in_expr): c.deactivate( ) # These are x \in X constraints, not active in separation because x is fixed to x* from previous master model.util.performance_constraints = performance_constraints model.util.separation_objectives = [] map_obj_to_constr = ComponentMap() if len(model.util.performance_constraints) == 0: raise ValueError( "No performance constraints identified for the postulated robust optimization problem." ) for idx, c in enumerate(performance_constraints): # Separation objective constraints standardized to be MAXIMIZATION of <= constraints c.deactivate() if c.upper is not None: # This is an <= constraint, maximized in separation obj = Objective(expr=c.body - c.upper, sense=maximize) map_obj_to_constr[c] = obj model.add_component("separation_obj_" + str(idx), obj) model.util.separation_objectives.append(obj) elif c.lower is not None: # This is an >= constraint, not supported raise ValueError( "All inequality constraints in model must be in standard form (<= RHS)" ) model.util.map_obj_to_constr = map_obj_to_constr for obj in model.util.separation_objectives: obj.deactivate() return
def calc_jacobians(solve_data, config): """Generate a map of jacobians.""" # Map nonlinear_constraint --> Map( # variable --> jacobian of constraint wrt. variable) solve_data.jacobians = ComponentMap() for c in solve_data.mip.MindtPy_utils.constraint_list: if c.body.polynomial_degree() in (1, 0): continue # skip linear constraints vars_in_constr = list(EXPR.identify_variables(c.body)) jac_list = differentiate(c.body, wrt_list=vars_in_constr) solve_data.jacobians[c] = ComponentMap( (var, jac_wrt_var) for var, jac_wrt_var in zip(vars_in_constr, jac_list))
def obbt_disjunct(orig_model, idx, solver): model = orig_model.clone() # Fix the disjunct to be active disjunct = model._disjuncts_to_process[idx] disjunct.indicator_var.fix(1) for obj in model.component_data_objects(Objective, active=True): obj.deactivate() # Deactivate nonlinear constraints for constr in model.component_data_objects( Constraint, active=True, descend_into=(Block, Disjunct)): if constr.body.polynomial_degree() not in linear_degrees: constr.deactivate() # Only look at the variables participating in active constraints within the scope relevant_var_set = ComponentSet() for constr in disjunct.component_data_objects(Constraint, active=True): relevant_var_set.update(identify_variables(constr.body, include_fixed=False)) TransformationFactory('gdp.bigm').apply_to(model) model._var_bounding_obj = Objective(expr=1, sense=minimize) for var in relevant_var_set: model._var_bounding_obj.set_value(expr=var) var_lb = solve_bounding_problem(model, solver) if var_lb is None: return None # bounding problem infeasible model._var_bounding_obj.set_value(expr=-var) var_ub = solve_bounding_problem(model, solver) if var_ub is None: return None # bounding problem infeasible else: var_ub = -var_ub # sign correction var.setlb(var_lb) var.setub(var_ub) # Maps original variable --> (new computed LB, new computed UB) var_bnds = ComponentMap( ((orig_var, ( clone_var.lb if clone_var.has_lb() else -inf, clone_var.ub if clone_var.has_ub() else inf)) for orig_var, clone_var in zip( orig_model._disj_bnds_linear_vars, model._disj_bnds_linear_vars) if clone_var in relevant_var_set) ) return var_bnds
def test_zero_term_removal(self): """Test for removing zero terms from linear constraints.""" m = ConcreteModel() m.v0 = Var() m.v1 = Var() m.v2 = Var() m.v3 = Var() m.c = Constraint(expr=m.v0 == m.v1 * m.v2 + m.v3) m.c2 = Constraint(expr=m.v1 * m.v2 + m.v3 <= m.v0) m.c3 = Constraint(expr=m.v0 <= m.v1 * m.v2 + m.v3) m.c4 = Constraint(expr=EXPR.inequality(1, m.v1 * m.v2 + m.v3, 3)) m.v1.fix(0) TransformationFactory('contrib.remove_zero_terms').apply_to(m) m.v1.unfix() # Check that the term no longer exists self.assertFalse(any(id(m.v1) == id(v) for v in EXPR.identify_variables(m.c.body))) self.assertFalse(any(id(m.v1) == id(v) for v in EXPR.identify_variables(m.c2.body))) self.assertFalse(any(id(m.v1) == id(v) for v in EXPR.identify_variables(m.c3.body))) self.assertFalse(any(id(m.v1) == id(v) for v in EXPR.identify_variables(m.c4.body)))
def _transformDisjunctionData(self, obj, transBlock, index): # Convex hull doesn't work if this is an or constraint. So if # xor is false, give up if not obj.xor: raise GDP_Error("Cannot do convex hull transformation for " "disjunction %s with or constraint. Must be an xor!" % obj.name) parent_component = obj.parent_component() transBlock.disjContainers.add(parent_component) orConstraint, disaggregationConstraint \ = self._getDisjunctionConstraints(parent_component) # We first go through and collect all the variables that we # are going to disaggregate. varOrder_set = ComponentSet() varOrder = [] varsByDisjunct = ComponentMap() for disjunct in obj.disjuncts: # This is crazy, but if the disjunct has been previously # relaxed, the disjunct *could* be deactivated. not_active = not disjunct.active if not_active: disjunct._activate_without_unfixing_indicator() try: disjunctVars = varsByDisjunct[disjunct] = ComponentSet() for cons in disjunct.component_data_objects( Constraint, active = True, sort=SortComponents.deterministic, descend_into=Block): # we aren't going to disaggregate fixed # variables. This means there is trouble if they are # unfixed later... for var in EXPR.identify_variables( cons.body, include_fixed=False): # Note the use of a list so that we will # eventually disaggregate the vars in a # deterministic order (the order that we found # them) disjunctVars.add(var) if var not in varOrder_set: varOrder.append(var) varOrder_set.add(var) finally: if not_active: disjunct._deactivate_without_fixing_indicator() # We will only disaggregate variables that # 1) appear in multiple disjuncts, or # 2) are not contained in this disjunct, or # 3) are not themselves disaggregated variables varSet = [] localVars = ComponentMap((d,[]) for d in obj.disjuncts) for var in varOrder: disjuncts = [d for d in varsByDisjunct if var in varsByDisjunct[d]] if len(disjuncts) > 1: varSet.append(var) elif self._contained_in(var, disjuncts[0]): localVars[disjuncts[0]].append(var) elif self._contained_in(var, transBlock): # There is nothing to do here: these are already # disaggregated vars that can/will be forced to 0 when # their disjunct is not active. pass else: varSet.append(var) # Now that we know who we need to disaggregate, we will do it # while we also transform the disjuncts. or_expr = 0 for disjunct in obj.disjuncts: or_expr += disjunct.indicator_var self._transform_disjunct(disjunct, transBlock, varSet, localVars[disjunct]) orConstraint.add(index, (or_expr, 1)) for i, var in enumerate(varSet): disaggregatedExpr = 0 for disjunct in obj.disjuncts: if 'chull' not in disjunct._gdp_transformation_info: if not disjunct.indicator_var.is_fixed() \ or value(disjunct.indicator_var) != 0: raise RuntimeError( "GDP chull: disjunct was not relaxed, but " "does not appear to be correctly deactivated.") continue disaggregatedVar = disjunct._gdp_transformation_info['chull'][ 'disaggregatedVars'][var] disaggregatedExpr += disaggregatedVar if type(index) is tuple: consIdx = index + (i,) elif parent_component.is_indexed(): consIdx = (index,) + (i,) else: consIdx = i disaggregationConstraint.add( consIdx, var == disaggregatedExpr)
def _apply_to(self, model, **kwds): """Apply the transformation to the given model.""" config = self.CONFIG(kwds.pop('options', {})) config.set_value(kwds) integer_vars = list( v for v in model.component_data_objects( ctype=Var, descend_into=(Block, Disjunct)) if v.is_integer() and not v.fixed) if len(integer_vars) == 0: logger.info("Model has no free integer variables. No reformulation needed.") return vars_on_constr = ComponentSet() for c in model.component_data_objects( ctype=Constraint, descend_into=(Block, Disjunct), active=True): vars_on_constr.update(v for v in identify_variables(c.body, include_fixed=False) if v.is_integer()) if config.ignore_unused: num_vars_not_on_constr = len(integer_vars) - len(vars_on_constr) if num_vars_not_on_constr > 0: logger.info( "%s integer variables on the model are not attached to any constraints. " "Ignoring unused variables." ) integer_vars = list(vars_on_constr) logger.info( "Reformulating integer variables using the %s strategy." % config.strategy) # Set up reformulation block blk_name = unique_component_name(model, "_int_to_binary_reform") reform_block = Block( doc="Holds variables and constraints for reformulating " "integer variables to binary variables." ) setattr(model, blk_name, reform_block) reform_block.int_var_set = RangeSet(0, len(integer_vars) - 1) reform_block.new_binary_var = Var( Any, domain=Binary, dense=False, doc="Binary variable with index (int_var_idx, idx)") reform_block.integer_to_binary_constraint = Constraint( reform_block.int_var_set, doc="Equality constraints mapping the binary variable values " "to the integer variable value.") # check that variables are bounded and non-negative for idx, int_var in enumerate(integer_vars): if not (int_var.has_lb() and int_var.has_ub()): raise ValueError( "Integer variable %s is missing an " "upper or lower bound. LB: %s; UB: %s. " "Integer to binary reformulation does not support unbounded integer variables." % (int_var.name, int_var.lb, int_var.ub)) if int_var.lb < 0: raise ValueError( "Integer variable %s can be negative. " "Integer to binary reformulation currently only supports non-negative integer " "variables." % (int_var.name,) ) # do the reformulation highest_power = int(floor(log(value(int_var.ub), 2))) # TODO potentially fragile due to floating point reform_block.integer_to_binary_constraint.add( idx, expr=int_var == sum( reform_block.new_binary_var[idx, pwr] * (2 ** pwr) for pwr in range(0, highest_power + 1))) # Relax the original integer variable int_var.domain = NonNegativeReals logger.info( "Reformulated %s integer variables using " "%s binary variables and %s constraints." % (len(integer_vars), len(reform_block.new_binary_var), len(reform_block.integer_to_binary_constraint)))
def make3dPlot(expr, numticks=30, show_plot=False): ccSurf = [None] * ((numticks + 1)**2) cvSurf = [None] * ((numticks + 1)**2) fvals = [None] * ((numticks + 1)**2) xaxis2d = [None] * ((numticks + 1)**2) yaxis2d = [None] * ((numticks + 1)**2) ccAffine = [None] * ((numticks + 1)**2) cvAffine = [None] * ((numticks + 1)**2) eqn = mc(expr) vars = identify_variables(expr) x = next(vars) y = next(vars) x_tick_length = (x.ub - x.lb) / numticks y_tick_length = (y.ub - y.lb) / numticks xaxis = [x.lb + x_tick_length * n for n in range(numticks + 1)] yaxis = [y.lb + y_tick_length * n for n in range(numticks + 1)] # Making the affine tangent planes ccSlope = eqn.subcc() cvSlope = eqn.subcv() x_val = value(x) y_val = value(y) f_cc = eqn.concave() f_cv = eqn.convex() # To Visualize Concave Affine Plane for different points for i, x_tick in enumerate(xaxis): eqn.changePoint(x, x_tick) for j, y_tick in enumerate(yaxis): ccAffine[i + (numticks + 1) * j] = ( ccSlope[x] * (x_tick - x_val) + ccSlope[y] * (y_tick - y_val) + f_cc) cvAffine[i + (numticks + 1) * j] = ( cvSlope[x] * (x_tick - x_val) + cvSlope[y] * (y_tick - y_val) + f_cv) xaxis2d[i + (numticks + 1) * j] = x_tick yaxis2d[i + (numticks + 1) * j] = y_tick eqn.changePoint(y, y_tick) ccSurf[i + (numticks + 1) * j] = eqn.concave() cvSurf[i + (numticks + 1) * j] = eqn.convex() fvals[i + (numticks + 1) * j] = value(expr) if show_plot: import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D assert Axes3D # silence pyflakes # Plotting Solutions in 3D fig = plt.figure() ax = fig.add_subplot(1, 1, 1, projection='3d') ax.scatter(xaxis2d, yaxis2d, cvSurf, color='b') ax.scatter(xaxis2d, yaxis2d, fvals, color='r') ax.scatter(xaxis2d, yaxis2d, ccSurf, color='b') # To Visualize Concave Affine Plane for different points ax.scatter(xaxis2d, yaxis2d, cvAffine, color='k') # Create a better view ax.view_init(10, 270) plt.show() return ccSurf, cvSurf, ccAffine, cvAffine
def build_model_size_report(model): """Build a model size report object.""" report = ModelSizeReport() activated_disjunctions = ComponentSet() activated_disjuncts = ComponentSet() fixed_true_disjuncts = ComponentSet() activated_constraints = ComponentSet() activated_vars = ComponentSet() new_containers = (model,) while new_containers: new_activated_disjunctions = ComponentSet() new_activated_disjuncts = ComponentSet() new_fixed_true_disjuncts = ComponentSet() new_activated_constraints = ComponentSet() for container in new_containers: (next_activated_disjunctions, next_fixed_true_disjuncts, next_activated_disjuncts, next_activated_constraints ) = _process_activated_container(container) new_activated_disjunctions.update(next_activated_disjunctions) new_activated_disjuncts.update(next_activated_disjuncts) new_fixed_true_disjuncts.update(next_fixed_true_disjuncts) new_activated_constraints.update(next_activated_constraints) new_containers = ((new_activated_disjuncts - activated_disjuncts) | (new_fixed_true_disjuncts - fixed_true_disjuncts)) activated_disjunctions.update(new_activated_disjunctions) activated_disjuncts.update(new_activated_disjuncts) fixed_true_disjuncts.update(new_fixed_true_disjuncts) activated_constraints.update(new_activated_constraints) activated_vars.update( var for constr in activated_constraints for var in EXPR.identify_variables( constr.body, include_fixed=False)) activated_vars.update( disj.indicator_var for disj in activated_disjuncts) report.activated = Container() report.activated.variables = len(activated_vars) report.activated.binary_variables = sum( 1 for v in activated_vars if v.is_binary()) report.activated.integer_variables = sum( 1 for v in activated_vars if v.is_integer()) report.activated.continuous_variables = sum( 1 for v in activated_vars if v.is_continuous()) report.activated.disjunctions = len(activated_disjunctions) report.activated.disjuncts = len(activated_disjuncts) report.activated.constraints = len(activated_constraints) report.activated.nonlinear_constraints = sum( 1 for c in activated_constraints if c.body.polynomial_degree() not in (1, 0)) report.overall = Container() block_like = (Block, Disjunct) all_vars = ComponentSet( model.component_data_objects(Var, descend_into=block_like)) report.overall.variables = len(all_vars) report.overall.binary_variables = sum(1 for v in all_vars if v.is_binary()) report.overall.integer_variables = sum( 1 for v in all_vars if v.is_integer()) report.overall.continuous_variables = sum( 1 for v in all_vars if v.is_continuous()) report.overall.disjunctions = sum( 1 for d in model.component_data_objects( Disjunction, descend_into=block_like)) report.overall.disjuncts = sum( 1 for d in model.component_data_objects( Disjunct, descend_into=block_like)) report.overall.constraints = sum( 1 for c in model.component_data_objects( Constraint, descend_into=block_like)) report.overall.nonlinear_constraints = sum( 1 for c in model.component_data_objects( Constraint, descend_into=block_like) if c.body.polynomial_degree() not in (1, 0)) report.warning = Container() report.warning.unassociated_disjuncts = sum( 1 for d in model.component_data_objects( Disjunct, descend_into=block_like) if not d.indicator_var.fixed and d not in activated_disjuncts) return report
assert(list(EXPR.identify_components(e, s)) == [M.p]) # @ex8 #--------------------------------------------- # @ex9 from pyomo.core.expr import current as EXPR M = ConcreteModel() M.x = Var() M.y = Var() e = M.x+M.y M.y.value = 1 M.y.fixed = True assert(set(id(v) for v in EXPR.identify_variables(e)) == set([id(M.x), id(M.y)])) assert(set(id(v) for v in EXPR.identify_variables(e, include_fixed=False)) == set([id(M.x)])) # @ex9 #--------------------------------------------- # @visitor1 from pyomo.core.expr import current as EXPR class SizeofVisitor(EXPR.SimpleExpressionVisitor): def __init__(self): self.counter = 0 def visit(self, node): self.counter += 1
def _xform_constraint(self, obj, disjunct, infodict, var_substitute_map, zero_substitute_map): # we will put a new transformed constraint on the relaxation block. relaxationBlock = infodict['chull']['relaxationBlock'] transBlock = relaxationBlock.parent_block() varMap = infodict['chull']['disaggregatedVars'] # Though rare, it is possible to get naming conflicts here # since constraints from all blocks are getting moved onto the # same block. So we get a unique name name = unique_component_name(relaxationBlock, obj.name) if obj.is_indexed(): try: newConstraint = Constraint(obj.index_set(), transBlock.lbub) except: # The original constraint may have been indexed by a # non-concrete set (like an Any). We will give up on # strict index verification and just blindly proceed. newConstraint = Constraint(Any) else: newConstraint = Constraint(transBlock.lbub) relaxationBlock.add_component(name, newConstraint) # add mapping of original constraint to transformed constraint # in transformation info dictionary infodict['chull']['relaxedConstraints'][obj] = newConstraint # add mapping of transformed constraint back to original constraint (we # know that the info dict is already created because this only got # called if we were transforming a disjunct...) relaxationBlock._gdp_transformation_info['srcConstraints'][ newConstraint] = obj for i in sorted(iterkeys(obj)): c = obj[i] if not c.active: continue NL = c.body.polynomial_degree() not in (0,1) EPS = self._config.EPS mode = self._config.perspective_function # We need to evaluate the expression at the origin *before* # we substitute the expression variables with the # disaggregated variables if not NL or mode == "FurmanSawayaGrossmann": h_0 = clone_without_expression_components( c.body, substitute=zero_substitute_map) y = disjunct.indicator_var if NL: if mode == "LeeGrossmann": sub_expr = clone_without_expression_components( c.body, substitute=dict( (var, subs/y) for var, subs in iteritems(var_substitute_map) ) ) expr = sub_expr * y elif mode == "GrossmannLee": sub_expr = clone_without_expression_components( c.body, substitute=dict( (var, subs/(y + EPS)) for var, subs in iteritems(var_substitute_map) ) ) expr = (y + EPS) * sub_expr elif mode == "FurmanSawayaGrossmann": sub_expr = clone_without_expression_components( c.body, substitute=dict( (var, subs/((1 - EPS)*y + EPS)) for var, subs in iteritems(var_substitute_map) ) ) expr = ((1-EPS)*y + EPS)*sub_expr - EPS*h_0*(1-y) else: raise RuntimeError("Unknown NL CHull mode") else: expr = clone_without_expression_components( c.body, substitute=var_substitute_map) if c.equality: if NL: newConsExpr = expr == c.lower*y else: v = list(EXPR.identify_variables(expr)) if len(v) == 1 and not c.lower: # Setting a variable to 0 in a disjunct is # *very* common. We should recognize that in # that structure, the disaggregated variable # will also be fixed to 0. v[0].fix(0) continue newConsExpr = expr - (1-y)*h_0 == c.lower*y if obj.is_indexed(): newConstraint.add((i, 'eq'), newConsExpr) else: newConstraint.add('eq', newConsExpr) continue if c.lower is not None: # TODO: At the moment there is no reason for this to be in both # lower and upper... I think there could be though if I say what # the new constraint is going to be or something. if __debug__ and logger.isEnabledFor(logging.DEBUG): logger.debug("GDP(cHull): Transforming constraint " + "'%s'", c.name) if NL: newConsExpr = expr >= c.lower*y else: newConsExpr = expr - (1-y)*h_0 >= c.lower*y if obj.is_indexed(): newConstraint.add((i, 'lb'), newConsExpr) else: newConstraint.add('lb', newConsExpr) if c.upper is not None: if __debug__ and logger.isEnabledFor(logging.DEBUG): logger.debug("GDP(cHull): Transforming constraint " + "'%s'", c.name) if NL: newConsExpr = expr <= c.upper*y else: newConsExpr = expr - (1-y)*h_0 <= c.upper*y if obj.is_indexed(): newConstraint.add((i, 'ub'), newConsExpr) else: newConstraint.add('ub', newConsExpr)
def _collect_variables(exp): ans = {} for var in EXPR.identify_variables(exp): ans[id(var)] = var return ans