Пример #1
0
 def _apply_to(self, model, **kwds):
     submodel_name = kwds.pop('submodel', None)
     use_dual_objective = kwds.pop('use_dual_objective', False)
     #
     # Process options
     #
     submodel = self._preprocess('pao.bilevel.linear_dual',
                                 model,
                                 sub=submodel_name)
     self._fix_all()
     #
     # Generate the dual block
     #
     transform = TransformationFactory('pao.duality.linear_dual')
     dual = transform.create_using(submodel, fixed=self._fixed_vardata)
     #
     # Figure out which objective is being used
     #
     if use_dual_objective:
         #
         # Deactivate the upper-level objective
         #
         # TODO: Warn if there are multiple objectives?
         #
         for odata in submodel._parent().component_map(
                 Objective, active=True).values():
             odata.deactivate()
     else:
         #
         # Add a constraint that maps the dual objective to the primal objective
         #
         # NOTE: It might be numerically more stable to replace the upper
         # objective with a variable, and then set the dual equal to that variable.
         # But that transformation would not be limited to the submodel.  If that's
         # an issue for a user, they can make that change, and see the benefit.
         #
         dual_obj = None
         for odata in dual.component_objects(Objective, active=True):
             dual_obj = odata
             dual_obj.deactivate()
             break
         primal_obj = None
         for odata in submodel.component_objects(Objective, active=True):
             primal_obj = odata
             break
         dual.equiv_objs = Constraint(expr=dual_obj.expr == primal_obj.expr)
     #
     # Add the dual block
     #
     setattr(model, self._submodel + '_dual', dual)
     model.reclassify_component_type(self._submodel + '_dual', Block)
     #
     # Unfix the upper variables
     #
     self._unfix_all()
     #
     # Disable the original submodel
     #
     # Q: Are the last steps redundant?  Will we recurse into deactivated blocks?
     #
     submodel.deactivate()
     for data in submodel.component_map(active=True).values():
         if not isinstance(data, Var) and not isinstance(data, Set):
             data.deactivate()
Пример #2
0
def log_lp(m: Block,
           tri: qhull.Delaunay,
           values: List[float],
           input: List[SimpleVar] = None,
           output: SimpleVar = None,
           bound: str = 'eq',
           **kw):
    num = kw["num"]
    values = np.array(values).tolist()
    ndim = len(input)
    npoints = len(tri.points)
    pointsT = list(zip(*tri.points))
    dims = list(range(ndim))
    vertices = list(range(npoints))
    bound = bound.lower()
    # 与 npoints 匹配的索引
    # vertices_idx = list(zip(*generate_points([list(range(num)) for _ in range(ndim)])))
    vertices_idx = generate_points([list(range(num))
                                    for _ in range(ndim)]).tolist()
    if len(vertices_idx) != len(vertices):
        raise RuntimeError("生成的的tri需要是米字形!")
    # (9a)
    m.lmbda = Var(vertices, domain=NonNegativeReals)  # 非负
    m.a0 = Constraint(dims,
                      rule=lambda m, d: sum(m.lmbda[v] * pointsT[d][v]
                                            for v in vertices) == input[d])
    m.a_sum = Expression(expr=sum(m.lmbda[v] * values[v] for v in vertices))
    if bound == 'eq':
        m.a1 = Constraint(expr=output == m.a_sum)
    elif bound == 'lb':
        m.a1 = Constraint(expr=output <= m.a_sum)
    elif bound == 'ub':
        m.a1 = Constraint(expr=output >= m.a_sum)
    else:
        raise RuntimeError("bound值错误!bound=" + bound)
    # (9b)
    m.b = Constraint(expr=sum(m.lmbda[v] for v in vertices) == 1)

    # (9c)

    # 约束a和b与cc方法是一样的
    K = num - 1  # K 必须是偶数
    N = list(range(1, ndim + 1))
    log2K = math.ceil(math.log2(K))
    L = list(range(1, log2K + 1))
    G = generate_gray_code(log2K)

    def O(l, b):
        # k == 0 或者 k == K 的意思是不能是第一个和最后一个,避免越界
        res = []
        for k in range(K + 1):
            if (k == 0 or G[k - 1][l - 1] == b) and (k == K
                                                     or G[k][l - 1] == b):
                res.append(k)
        return res
        # return [k for k in range(K + 1) if (k == 0 or G[k][l] == b) and (k == K or G[k + 1][l] == b)]

    m.y_s1 = Var(N, L, domain=NonNegativeReals, bounds=(0, 1))  # 二进制
    m.c1_s1 = Constraint(N,
                         L,
                         rule=lambda m, s1, s2: sum(
                             m.lmbda[v]
                             for v, idx in zip(vertices, vertices_idx)
                             if idx[s1 - 1] in O(s2, 1)) <= m.y_s1[s1, s2])
    m.c2_s1 = Constraint(N,
                         L,
                         rule=lambda m, s1, s2: sum(
                             m.lmbda[v]
                             for v, idx in zip(vertices, vertices_idx)
                             if idx[s1 - 1] in O(s2, 0)) <= 1 - m.y_s1[s1, s2])
    S2 = [(s1, s2) for s1 in N for s2 in N if s1 < s2]
    S2_idx = list(range(len(S2)))
    m.y_s2 = Var(S2_idx, domain=NonNegativeReals, bounds=(0, 1))
    m.c1_s2 = Constraint(
        S2_idx,
        rule=lambda m, i: sum(m.lmbda[v]
                              for v, idx in zip(vertices, vertices_idx)
                              if idx[S2[i][0] - 1] % 2 == 0 and idx[S2[i][
                                  1] - 1] % 2 == 1) <= m.y_s2[i])
    m.c2_s2 = Constraint(
        S2_idx,
        rule=lambda m, i: sum(m.lmbda[v]
                              for v, idx in zip(vertices, vertices_idx)
                              if idx[S2[i][0] - 1] % 2 == 1 and idx[S2[i][
                                  1] - 1] % 2 == 0) <= 1 - m.y_s2[i])
    return m
Пример #3
0
    def _xform_constraint(self, obj, disjunct, infodict, var_substitute_map,
                          zero_substitute_map):
        # we will put a new transformed constraint on the relaxation block.
        relaxationBlock = infodict['chull']['relaxationBlock']
        transBlock = relaxationBlock.parent_block()
        varMap = infodict['chull']['disaggregatedVars']

        # Though rare, it is possible to get naming conflicts here
        # since constraints from all blocks are getting moved onto the
        # same block. So we get a unique name
        name = unique_component_name(
            relaxationBlock,
            obj.getname(fully_qualified=True, name_buffer=NAME_BUFFER))

        if obj.is_indexed():
            try:
                newConstraint = Constraint(obj.index_set(), transBlock.lbub)
            except:
                # The original constraint may have been indexed by a
                # non-concrete set (like an Any).  We will give up on
                # strict index verification and just blindly proceed.
                newConstraint = Constraint(Any)
        else:
            newConstraint = Constraint(transBlock.lbub)
        relaxationBlock.add_component(name, newConstraint)
        # add mapping of original constraint to transformed constraint
        # in transformation info dictionary
        infodict['chull']['relaxedConstraints'][obj] = newConstraint
        # add mapping of transformed constraint back to original constraint (we
        # know that the info dict is already created because this only got
        # called if we were transforming a disjunct...)
        relaxationBlock._gdp_transformation_info['srcConstraints'][
            newConstraint] = obj

        for i in sorted(iterkeys(obj)):
            c = obj[i]
            if not c.active:
                continue

            NL = c.body.polynomial_degree() not in (0, 1)
            EPS = self._config.EPS
            mode = self._config.perspective_function

            # We need to evaluate the expression at the origin *before*
            # we substitute the expression variables with the
            # disaggregated variables
            if not NL or mode == "FurmanSawayaGrossmann":
                h_0 = clone_without_expression_components(
                    c.body, substitute=zero_substitute_map)

            y = disjunct.indicator_var
            if NL:
                if mode == "LeeGrossmann":
                    sub_expr = clone_without_expression_components(
                        c.body,
                        substitute=dict(
                            (var, subs / y)
                            for var, subs in iteritems(var_substitute_map)))
                    expr = sub_expr * y
                elif mode == "GrossmannLee":
                    sub_expr = clone_without_expression_components(
                        c.body,
                        substitute=dict(
                            (var, subs / (y + EPS))
                            for var, subs in iteritems(var_substitute_map)))
                    expr = (y + EPS) * sub_expr
                elif mode == "FurmanSawayaGrossmann":
                    sub_expr = clone_without_expression_components(
                        c.body,
                        substitute=dict(
                            (var, subs / ((1 - EPS) * y + EPS))
                            for var, subs in iteritems(var_substitute_map)))
                    expr = (
                        (1 - EPS) * y + EPS) * sub_expr - EPS * h_0 * (1 - y)
                else:
                    raise RuntimeError("Unknown NL CHull mode")
            else:
                expr = clone_without_expression_components(
                    c.body, substitute=var_substitute_map)

            if c.equality:
                if NL:
                    newConsExpr = expr == c.lower * y
                else:
                    v = list(EXPR.identify_variables(expr))
                    if len(v) == 1 and not c.lower:
                        # Setting a variable to 0 in a disjunct is
                        # *very* common.  We should recognize that in
                        # that structure, the disaggregated variable
                        # will also be fixed to 0.
                        v[0].fix(0)
                        continue
                    newConsExpr = expr - (1 - y) * h_0 == c.lower * y

                if obj.is_indexed():
                    newConstraint.add((i, 'eq'), newConsExpr)
                else:
                    newConstraint.add('eq', newConsExpr)
                continue

            if c.lower is not None:
                # TODO: At the moment there is no reason for this to be in both
                # lower and upper... I think there could be though if I say what
                # the new constraint is going to be or something.
                if __debug__ and logger.isEnabledFor(logging.DEBUG):
                    _name = c.getname(fully_qualified=True,
                                      name_buffer=NAME_BUFFER)
                    logger.debug(
                        "GDP(cHull): Transforming constraint " + "'%s'", _name)
                if NL:
                    newConsExpr = expr >= c.lower * y
                else:
                    newConsExpr = expr - (1 - y) * h_0 >= c.lower * y

                if obj.is_indexed():
                    newConstraint.add((i, 'lb'), newConsExpr)
                else:
                    newConstraint.add('lb', newConsExpr)

            if c.upper is not None:
                if __debug__ and logger.isEnabledFor(logging.DEBUG):
                    _name = c.getname(fully_qualified=True,
                                      name_buffer=NAME_BUFFER)
                    logger.debug(
                        "GDP(cHull): Transforming constraint " + "'%s'", _name)
                if NL:
                    newConsExpr = expr <= c.upper * y
                else:
                    newConsExpr = expr - (1 - y) * h_0 <= c.upper * y

                if obj.is_indexed():
                    newConstraint.add((i, 'ub'), newConsExpr)
                else:
                    newConstraint.add('ub', newConsExpr)
Пример #4
0
    def _transform_disjunct(self, obj, transBlock, varSet, localVars):
        # deactivated should only come from the user
        if not obj.active:
            if obj.indicator_var.is_fixed():
                if value(obj.indicator_var) == 0:
                    # The user cleanly deactivated the disjunct: there
                    # is nothing for us to do here.
                    return
                else:
                    raise GDP_Error(
                        "The disjunct '%s' is deactivated, but the "
                        "indicator_var is fixed to %s. This makes no sense." %
                        (obj.name, value(obj.indicator_var)))
            if obj._transformation_block is None:
                raise GDP_Error(
                    "The disjunct '%s' is deactivated, but the "
                    "indicator_var is not fixed and the disjunct does not "
                    "appear to have been relaxed. This makes no sense. "
                    "(If the intent is to deactivate the disjunct, fix its "
                    "indicator_var to 0.)" % (obj.name, ))

        if obj._transformation_block is not None:
            # we've transformed it, which means this is the second time it's
            # appearing in a Disjunction
            raise GDP_Error(
                "The disjunct '%s' has been transformed, but a disjunction "
                "it appears in has not. Putting the same disjunct in "
                "multiple disjunctions is not supported." % obj.name)

        # create a relaxation block for this disjunct
        relaxedDisjuncts = transBlock.relaxedDisjuncts
        relaxationBlock = relaxedDisjuncts[len(relaxedDisjuncts)]

        relaxationBlock.localVarReferences = Block()

        # Put the disaggregated variables all on their own block so that we can
        # isolate the name collisions and still have complete control over the
        # names on this block. (This is for peace of mind now, but will matter
        # in the future for adding the binaries corresponding to Boolean
        # indicator vars.)
        relaxationBlock.disaggregatedVars = Block()

        # add the map that will link back and forth between transformed
        # constraints and their originals.
        relaxationBlock._constraintMap = {
            'srcConstraints': ComponentMap(),
            'transformedConstraints': ComponentMap()
        }
        # Map between disaggregated variables for this disjunct and their
        # originals
        relaxationBlock._disaggregatedVarMap = {
            'srcVar': ComponentMap(),
            'disaggregatedVar': ComponentMap(),
        }
        # Map between disaggregated variables and their lb*indicator <= var <=
        # ub*indicator constraints
        relaxationBlock._bigMConstraintMap = ComponentMap()

        # add mappings to source disjunct (so we'll know we've relaxed)
        obj._transformation_block = weakref_ref(relaxationBlock)
        relaxationBlock._srcDisjunct = weakref_ref(obj)

        # add Suffix to the relaxation block that disaggregated variables are
        # local (in case this is nested in another Disjunct)
        local_var_set = None
        parent_disjunct = obj.parent_block()
        while parent_disjunct is not None:
            if parent_disjunct.ctype is Disjunct:
                break
            parent_disjunct = parent_disjunct.parent_block()
        if parent_disjunct is not None:
            localVarSuffix = relaxationBlock.LocalVars = Suffix(
                direction=Suffix.LOCAL)
            local_var_set = localVarSuffix[parent_disjunct] = ComponentSet()

        # add the disaggregated variables and their bigm constraints
        # to the relaxationBlock
        for var in varSet:
            lb = var.lb
            ub = var.ub
            if lb is None or ub is None:
                raise GDP_Error("Variables that appear in disjuncts must be "
                                "bounded in order to use the hull "
                                "transformation! Missing bound for %s." %
                                (var.name))

            disaggregatedVar = Var(within=Reals,
                                   bounds=(min(0, lb), max(0, ub)),
                                   initialize=var.value)
            # naming conflicts are possible here since this is a bunch
            # of variables from different blocks coming together, so we
            # get a unique name
            disaggregatedVarName = unique_component_name(
                relaxationBlock.disaggregatedVars,
                var.getname(fully_qualified=False, name_buffer=NAME_BUFFER),
            )
            relaxationBlock.disaggregatedVars.add_component(
                disaggregatedVarName, disaggregatedVar)
            # mark this as local because we won't re-disaggregate if this is a
            # nested disjunction
            if local_var_set is not None:
                local_var_set.add(disaggregatedVar)
            # store the mappings from variables to their disaggregated selves on
            # the transformation block.
            relaxationBlock._disaggregatedVarMap['disaggregatedVar'][
                var] = disaggregatedVar
            relaxationBlock._disaggregatedVarMap['srcVar'][
                disaggregatedVar] = var

            bigmConstraint = Constraint(transBlock.lbub)
            relaxationBlock.add_component(disaggregatedVarName + "_bounds",
                                          bigmConstraint)
            if lb:
                bigmConstraint.add('lb',
                                   obj.indicator_var * lb <= disaggregatedVar)
            if ub:
                bigmConstraint.add('ub',
                                   disaggregatedVar <= obj.indicator_var * ub)

            relaxationBlock._bigMConstraintMap[
                disaggregatedVar] = bigmConstraint

        for var in localVars:
            lb = var.lb
            ub = var.ub
            if lb is None or ub is None:
                raise GDP_Error("Variables that appear in disjuncts must be "
                                "bounded in order to use the hull "
                                "transformation! Missing bound for %s." %
                                (var.name))
            if value(lb) > 0:
                var.setlb(0)
            if value(ub) < 0:
                var.setub(0)

            # map it to itself
            relaxationBlock._disaggregatedVarMap['disaggregatedVar'][var] = var
            relaxationBlock._disaggregatedVarMap['srcVar'][var] = var

            # naming conflicts are possible here since this is a bunch
            # of variables from different blocks coming together, so we
            # get a unique name
            conName = unique_component_name(
                relaxationBlock,
                var.getname(fully_qualified=False, name_buffer=NAME_BUFFER) + \
                "_bounds")
            bigmConstraint = Constraint(transBlock.lbub)
            relaxationBlock.add_component(conName, bigmConstraint)
            if lb:
                bigmConstraint.add('lb', obj.indicator_var * lb <= var)
            if ub:
                bigmConstraint.add('ub', var <= obj.indicator_var * ub)
            relaxationBlock._bigMConstraintMap[var] = bigmConstraint

        var_substitute_map = dict((id(v), newV) for v, newV in iteritems(
            relaxationBlock._disaggregatedVarMap['disaggregatedVar']))
        zero_substitute_map = dict((id(v), ZeroConstant) for v, newV in \
                                   iteritems(
                                       relaxationBlock._disaggregatedVarMap[
                                           'disaggregatedVar']))
        zero_substitute_map.update((id(v), ZeroConstant) for v in localVars)

        # Transform each component within this disjunct
        self._transform_block_components(obj, obj, var_substitute_map,
                                         zero_substitute_map)

        # deactivate disjunct so writers can be happy
        obj._deactivate_without_fixing_indicator()
Пример #5
0
    def disj2_rule(disjunct):
        m = disjunct.model()

        def c_rule(d, s):
            return m.a[s] <= 3
        disjunct.c = Constraint(m.s, rule=c_rule)
Пример #6
0
    def _getDisjunctionConstraints(self, disjunction):
        # Put the disjunction constraint on its parent block

        # We never do this for just a DisjunctionData because we need
        # to know about the index set of its parent component. So if
        # we called this on a DisjunctionData, we did something wrong.
        assert isinstance(disjunction, Disjunction)
        parent = disjunction.parent_block()
        if hasattr(parent, "_gdp_transformation_info"):
            infodict = parent._gdp_transformation_info
            if type(infodict) is not dict:
                raise GDP_Error(
                    "Component %s contains an attribute named "
                    "_gdp_transformation_info. The transformation requires "
                    "that it can create this attribute!" % parent.name)
            try:
                # On the off-chance that another GDP transformation went
                # first, the infodict may exist, but the specific map we
                # want will not be present
                orConstraintMap = infodict['disjunction_or_constraint']
            except KeyError:
                orConstraintMap = infodict['disjunction_or_constraint'] \
                                  = ComponentMap()
            try:
                disaggregationConstraintMap = infodict[
                    'disjunction_disaggregation_constraints']
            except KeyError:
                disaggregationConstraintMap = infodict[
                    'disjunction_disaggregation_constraints'] \
                    = ComponentMap()
        else:
            infodict = parent._gdp_transformation_info = {}
            orConstraintMap = infodict['disjunction_or_constraint'] \
                              = ComponentMap()
            disaggregationConstraintMap = infodict[
                'disjunction_disaggregation_constraints'] \
                = ComponentMap()

        if disjunction in disaggregationConstraintMap:
            disaggregationConstraint = disaggregationConstraintMap[disjunction]
        else:
            # add the disaggregation constraint
            disaggregationConstraint \
                = disaggregationConstraintMap[disjunction] = Constraint(Any)
            parent.add_component(
                unique_component_name(
                    parent, '_gdp_chull_relaxation_' + disjunction.getname(
                        fully_qualified=True, name_buffer=NAME_BUFFER) +
                    '_disaggregation'), disaggregationConstraint)

        # If the Constraint already exists, return it
        if disjunction in orConstraintMap:
            orC = orConstraintMap[disjunction]
        else:
            # add the XOR (or OR) constraints to parent block (with
            # unique name) It's indexed if this is an
            # IndexedDisjunction, not otherwise
            orC = Constraint(disjunction.index_set()) if \
                  disjunction.is_indexed() else Constraint()
            parent.add_component(
                unique_component_name(
                    parent, '_gdp_chull_relaxation_' +
                    disjunction.getname(fully_qualified=True,
                                        name_buffer=NAME_BUFFER) + '_xor'),
                orC)
            orConstraintMap[disjunction] = orC

        return orC, disaggregationConstraint
Пример #7
0
 def disjunct_rule(d, i, k):
     m = d.model()
     if k == 'a':
         d.cons_a = Constraint(expr=m.x[i] >= 5)
     if k == 'b':
         d.cons_b = Constraint(expr=m.x[i] <= 0)
Пример #8
0
 def d2_rule(disjunct, flag):
     if not flag:
         disjunct.c = Constraint(expr=m.a >= 30)
     else:
         disjunct.c = Constraint(expr=m.a == 100)
Пример #9
0
def define_model(**kwds):

    model = ConcreteModel()

    model.x = Var(INDEX_SET, bounds=(-5, 4))  # domain variable
    model.Fx = Var(INDEX_SET)  # range variable
    model.p = Param(INDEX_SET, initialize=1.0)

    model.obj = Objective(expr=sum_product(model.Fx),
                          sense=kwds.pop('sense', maximize))

    model.piecewise = Piecewise(INDEX_SET,
                                model.Fx,
                                model.x,
                                pw_pts=DOMAIN_PTS,
                                f_rule=F,
                                **kwds)

    #Fix the answer for testing purpose
    model.set_answer_constraint1 = Constraint(expr=model.x[1, 0] == -5.0)
    model.set_answer_constraint2 = Constraint(expr=model.x[2, 0] == -3.0)
    model.set_answer_constraint3 = Constraint(expr=model.x[3, 0] == -2.5)
    model.set_answer_constraint4 = Constraint(expr=model.x[4, 0] == -1.5)
    model.set_answer_constraint5 = Constraint(expr=model.x[5, 0] == 2.0)
    model.set_answer_constraint6 = Constraint(expr=model.x[6, 0] == 3.5)
    model.set_answer_constraint7 = Constraint(expr=model.x[7, 0] == 4.0)
    model.set_answer_constraint8 = Constraint(expr=model.x[1, 1] == -5.0)
    model.set_answer_constraint9 = Constraint(expr=model.x[2, 1] == -3.0)
    model.set_answer_constraint10 = Constraint(expr=model.x[3, 1] == -2.5)
    model.set_answer_constraint11 = Constraint(expr=model.x[4, 1] == -1.5)
    model.set_answer_constraint12 = Constraint(expr=model.x[5, 1] == 2.0)
    model.set_answer_constraint13 = Constraint(expr=model.x[6, 1] == 3.5)
    model.set_answer_constraint14 = Constraint(expr=model.x[7, 1] == 4.0)

    return model
Пример #10
0
 def d_rule(d, j):
     m = d.model()
     d.c = Constraint(m.I[:j], rule=c_rule)
Пример #11
0
    def criticalityCheck(self, x, y, z, rom_params, worstcase=False, M=[0.0]):

        model = self.model

        self.setVarValue(x=x, y=y, z=z)
        self.setBound(x, y, z, 1e10)
        self.deactiveExtraConObj()
        self.activateRomCons(x, rom_params)

        optGJH = SolverFactory('contrib.gjh')
        optGJH.solve(model, tee=False, symbolic_solver_labels=True)
        g, J, varlist, conlist = model._gjh_info

        l = ConcreteModel()
        l.v = Var(varlist, domain=Reals)
        for i in varlist:
            #dummy = model.find_component(i)
            l.v[i] = 0.0
            l.v[i].setlb(-1.0)
            l.v[i].setub(1.0)
        if worstcase:
            if M.all() == 0.0:
                print(
                    'WARNING: worstcase criticality was requested but Jacobian error bound is zero'
                )
            l.t = Var(range(0, self.ly), domain=Reals)
            for i in range(0, self.ly):
                l.t[i].setlb(-M[i])
                l.t[i].setub(M[i])

        def linConMaker(l, i):
            # i should be range(len(conlist) - 1)
            # because last element of conlist is the objective
            con_i = model.find_component(conlist[i])

            isEquality = con_i.equality

            isROM = False

            if conlist[i][:7] == '.' + self.TRF.name + '.rom':
                isROM = True
                romIndex = int(filter(str.isdigit, conlist[i]))

            # This is very inefficient
            # Fix this later if these problems get big
            # This is the ith row of J times v
            Jv = sum(x[2] * l.v[varlist[x[1]]] for x in J if x[0] == i)

            if isEquality:
                if worstcase and isROM:
                    return Jv + l.t[romIndex] == 0
                else:
                    return Jv == 0
            else:
                lo = con_i.lower
                up = con_i.upper
                if lo is not None:
                    level = lo.value - con_i.lslack()
                    if up is not None:
                        return (lo.value <= level + Jv <= up.value)
                    else:
                        return (lo.value <= level + Jv)
                elif up is not None:
                    level = up.value - con_i.uslack()
                    return (level + Jv <= up.value)
                else:
                    raise Exception(
                        "This constraint seems to be neither equality or inequality: "
                        + conlist(i))

        l.lincons = Constraint(range(len(conlist) - 1), rule=linConMaker)

        l.obj = Objective(expr=sum(x[1] * l.v[varlist[x[0]]] for x in g))

        # Calculate gradient norm for scaling purposes
        gfnorm = sqrt(sum(x[1]**2 for x in g))

        opt = SolverFactory(self.solver, solver_io=self.solver_io)
        opt.options['halt_on_ampl_error'] = 'yes'
        opt.options['max_iter'] = 5000
        results = opt.solve(l,
                            keepfiles=self.keepfiles,
                            tee=self.stream_solver)

        if ((results.solver.status == SolverStatus.ok)
                and (results.solver.termination_condition
                     == TerminationCondition.optimal)):
            l.solutions.load_from(results)
            if gfnorm > 1:
                return True, abs(l.obj()) / gfnorm
            else:
                return True, abs(l.obj())
        else:
            print("Waring: Crticality check fails with solver Status: " +
                  str(results.solver.status))
            print("And Termination Conditions: " +
                  str(results.solver.termination_condition))
            return False, infinity
Пример #12
0
def device_scheduler(  # noqa C901
    device_constraints: List[pd.DataFrame],
    ems_constraints: pd.DataFrame,
    commitment_quantities: List[pd.Series],
    commitment_downwards_deviation_price: Union[List[pd.Series], List[float]],
    commitment_upwards_deviation_price: Union[List[pd.Series], List[float]],
) -> Tuple[List[pd.Series], float, SolverResults]:
    """This generic device scheduler is able to handle an EMS with multiple devices,
    with various types of constraints on the EMS level and on the device level,
    and with multiple market commitments on the EMS level.
    A typical example is a house with many devices.
    The commitments are assumed to be with regard to the flow of energy to the device (positive for consumption,
    negative for production). The solver minimises the costs of deviating from the commitments.

    Device constraints are on a device level. Handled constraints (listed by column name):
        max: maximum stock assuming an initial stock of zero (e.g. in MWh or boxes)
        min: minimum stock assuming an initial stock of zero
        equal: exact amount of stock (we do this by clamping min and max)
        derivative max: maximum flow (e.g. in MW or boxes/h)
        derivative min: minimum flow
        derivative equals: exact amount of flow (we do this by clamping derivative min and derivative max)
        derivative down efficiency: ratio of downwards flows (flow into EMS : flow out of device)
        derivative up efficiency: ratio of upwards flows (flow into device : flow out of EMS)
    EMS constraints are on an EMS level. Handled constraints (listed by column name):
        derivative max: maximum flow
        derivative min: minimum flow
    Commitments are on an EMS level. Parameter explanations:
        commitment_quantities: amounts of flow specified in commitments (both previously ordered and newly requested)
            - e.g. in MW or boxes/h
        commitment_downwards_deviation_price: penalty for downwards deviations of the flow
            - e.g. in EUR/MW or EUR/(boxes/h)
            - either a single value (same value for each flow value) or a Series (different value for each flow value)
        commitment_upwards_deviation_price: penalty for upwards deviations of the flow

    All Series and DataFrames should have the same resolution.

    For now, we pass in the various constraints and prices as separate variables, from which we make a MultiIndex
    DataFrame. Later we could pass in a MultiIndex DataFrame directly.
    """

    # If the EMS has no devices, don't bother
    if len(device_constraints) == 0:
        return [], 0, SolverResults()

    # Check if commitments have the same time window and resolution as the constraints
    start = device_constraints[0].index.to_pydatetime()[0]
    resolution = pd.to_timedelta(device_constraints[0].index.freq)
    end = device_constraints[0].index.to_pydatetime()[-1] + resolution
    if len(commitment_quantities) != 0:
        start_c = commitment_quantities[0].index.to_pydatetime()[0]
        resolution_c = pd.to_timedelta(commitment_quantities[0].index.freq)
        end_c = commitment_quantities[0].index.to_pydatetime()[-1] + resolution
        if not (start_c == start and end_c == end):
            raise Exception(
                "Not implemented for different time windows.\n(%s,%s)\n(%s,%s)"
                % (start, end, start_c, end_c))
        if resolution_c != resolution:
            raise Exception(
                "Not implemented for different resolutions.\n%s\n%s" %
                (resolution, resolution_c))

    # Turn prices per commitment into prices per commitment flow
    if len(commitment_downwards_deviation_price) != 0:
        if all(
                isinstance(price, float)
                for price in commitment_downwards_deviation_price):
            commitment_downwards_deviation_price = [
                initialize_series(price, start, end, resolution)
                for price in commitment_downwards_deviation_price
            ]
    if len(commitment_upwards_deviation_price) != 0:
        if all(
                isinstance(price, float)
                for price in commitment_upwards_deviation_price):
            commitment_upwards_deviation_price = [
                initialize_series(price, start, end, resolution)
                for price in commitment_upwards_deviation_price
            ]

    model = ConcreteModel()

    # Add indices for devices (d), datetimes (j) and commitments (c)
    model.d = RangeSet(0, len(device_constraints) - 1, doc="Set of devices")
    model.j = RangeSet(0,
                       len(device_constraints[0].index.to_pydatetime()) - 1,
                       doc="Set of datetimes")
    model.c = RangeSet(0,
                       len(commitment_quantities) - 1,
                       doc="Set of commitments")

    # Add parameters
    def price_down_select(m, c, j):
        return commitment_downwards_deviation_price[c].iloc[j]

    def price_up_select(m, c, j):
        return commitment_upwards_deviation_price[c].iloc[j]

    def commitment_quantity_select(m, c, j):
        return commitment_quantities[c].iloc[j]

    def device_max_select(m, d, j):
        max_v = device_constraints[d]["max"].iloc[j]
        equal_v = device_constraints[d]["equals"].iloc[j]
        if np.isnan(max_v) and np.isnan(equal_v):
            return infinity
        else:
            return np.nanmin([max_v, equal_v])

    def device_min_select(m, d, j):
        min_v = device_constraints[d]["min"].iloc[j]
        equal_v = device_constraints[d]["equals"].iloc[j]
        if np.isnan(min_v) and np.isnan(equal_v):
            return -infinity
        else:
            return np.nanmax([min_v, equal_v])

    def device_derivative_max_select(m, d, j):
        max_v = device_constraints[d]["derivative max"].iloc[j]
        equal_v = device_constraints[d]["derivative equals"].iloc[j]
        if np.isnan(max_v) and np.isnan(equal_v):
            return infinity
        else:
            return np.nanmin([max_v, equal_v])

    def device_derivative_min_select(m, d, j):
        min_v = device_constraints[d]["derivative min"].iloc[j]
        equal_v = device_constraints[d]["derivative equals"].iloc[j]
        if np.isnan(min_v) and np.isnan(equal_v):
            return -infinity
        else:
            return np.nanmax([min_v, equal_v])

    def ems_derivative_max_select(m, j):
        v = ems_constraints["derivative max"].iloc[j]
        if np.isnan(v):
            return infinity
        else:
            return v

    def ems_derivative_min_select(m, j):
        v = ems_constraints["derivative min"].iloc[j]
        if np.isnan(v):
            return -infinity
        else:
            return v

    def device_derivative_down_efficiency(m, d, j):
        try:
            return device_constraints[d]["derivative down efficiency"].iloc[j]
        except KeyError:
            return 1

    def device_derivative_up_efficiency(m, d, j):
        try:
            return device_constraints[d]["derivative up efficiency"].iloc[j]
        except KeyError:
            return 1

    model.up_price = Param(model.c, model.j, initialize=price_up_select)
    model.down_price = Param(model.c, model.j, initialize=price_down_select)
    model.commitment_quantity = Param(model.c,
                                      model.j,
                                      initialize=commitment_quantity_select)
    model.device_max = Param(model.d, model.j, initialize=device_max_select)
    model.device_min = Param(model.d, model.j, initialize=device_min_select)
    model.device_derivative_max = Param(
        model.d, model.j, initialize=device_derivative_max_select)
    model.device_derivative_min = Param(
        model.d, model.j, initialize=device_derivative_min_select)
    model.ems_derivative_max = Param(model.j,
                                     initialize=ems_derivative_max_select)
    model.ems_derivative_min = Param(model.j,
                                     initialize=ems_derivative_min_select)
    model.device_derivative_down_efficiency = Param(
        model.d, model.j, initialize=device_derivative_down_efficiency)
    model.device_derivative_up_efficiency = Param(
        model.d, model.j, initialize=device_derivative_up_efficiency)

    # Add variables
    model.ems_power = Var(model.d, model.j, domain=Reals, initialize=0)
    model.device_power_down = Var(model.d,
                                  model.j,
                                  domain=NonPositiveReals,
                                  initialize=0)
    model.device_power_up = Var(model.d,
                                model.j,
                                domain=NonNegativeReals,
                                initialize=0)
    model.commitment_downwards_deviation = Var(model.c,
                                               model.j,
                                               domain=NonPositiveReals,
                                               initialize=0)
    model.commitment_upwards_deviation = Var(model.c,
                                             model.j,
                                             domain=NonNegativeReals,
                                             initialize=0)

    # Add constraints as a tuple of (lower bound, value, upper bound)
    def device_bounds(m, d, j):
        return (
            m.device_min[d, j],
            sum(m.device_power_down[d, k] + m.device_power_up[d, k]
                for k in range(0, j + 1)),
            m.device_max[d, j],
        )

    def device_derivative_bounds(m, d, j):
        return (
            m.device_derivative_min[d, j],
            m.device_power_down[d, j] + m.device_power_up[d, j],
            m.device_derivative_max[d, j],
        )

    def device_down_derivative_bounds(m, d, j):
        return (
            m.device_derivative_min[d, j],
            m.device_power_down[d, j],
            0,
        )

    def device_up_derivative_bounds(m, d, j):
        return (
            0,
            m.device_power_up[d, j],
            m.device_derivative_max[d, j],
        )

    def ems_derivative_bounds(m, j):
        return m.ems_derivative_min[j], sum(
            m.ems_power[:, j]), m.ems_derivative_max[j]

    def ems_flow_commitment_equalities(m, j):
        """Couple EMS flows (sum over devices) to commitments."""
        return (
            0,
            sum(m.commitment_quantity[:, j]) +
            sum(m.commitment_downwards_deviation[:, j]) +
            sum(m.commitment_upwards_deviation[:, j]) - sum(m.ems_power[:, j]),
            0,
        )

    def device_derivative_equalities(m, d, j):
        """Couple device flows to EMS flows per device, applying efficiencies."""
        return (
            0,
            m.device_power_up[d, j] / m.device_derivative_up_efficiency[d, j] +
            m.device_power_down[d, j] *
            m.device_derivative_down_efficiency[d, j] - m.ems_power[d, j],
            0,
        )

    model.device_energy_bounds = Constraint(model.d,
                                            model.j,
                                            rule=device_bounds)
    model.device_power_bounds = Constraint(model.d,
                                           model.j,
                                           rule=device_derivative_bounds)
    model.device_power_down_bounds = Constraint(
        model.d, model.j, rule=device_down_derivative_bounds)
    model.device_power_up_bounds = Constraint(model.d,
                                              model.j,
                                              rule=device_up_derivative_bounds)
    model.ems_power_bounds = Constraint(model.j, rule=ems_derivative_bounds)
    model.ems_power_commitment_equalities = Constraint(
        model.j, rule=ems_flow_commitment_equalities)
    model.device_power_equalities = Constraint(
        model.d, model.j, rule=device_derivative_equalities)

    # Add objective
    def cost_function(m):
        costs = 0
        for c in m.c:
            for j in m.j:
                costs += m.commitment_downwards_deviation[c,
                                                          j] * m.down_price[c,
                                                                            j]
                costs += m.commitment_upwards_deviation[c, j] * m.up_price[c,
                                                                           j]
        return costs

    model.costs = Objective(rule=cost_function, sense=minimize)

    # Solve
    results = SolverFactory(
        current_app.config.get("FLEXMEASURES_LP_SOLVER")).solve(model)

    planned_costs = value(model.costs)
    planned_power_per_device = []
    for d in model.d:
        planned_device_power = [
            model.device_power_down[d, j].value +
            model.device_power_up[d, j].value for j in model.j
        ]
        planned_power_per_device.append(
            pd.Series(
                index=pd.date_range(start=start,
                                    end=end,
                                    freq=to_offset(resolution),
                                    closed="left"),
                data=planned_device_power,
            ))

    # model.pprint()
    # print(results.solver.termination_condition)
    # print(planned_costs)
    # model.display()
    return planned_power_per_device, planned_costs, results
Пример #13
0
def handle_subproblem_optimal(fixed_nlp, solve_data, config, cb_opt=None, fp=False):
    """This function copies the result of the NLP solver function ('solve_subproblem') to the working model, updates
    the bounds, adds OA and no-good cuts, and then stores the new solution if it is the new best solution. This
    function handles the result of the latest iteration of solving the NLP subproblem given an optimal solution.

    Args:
        fixed_nlp (Pyomo model): integer-variable-fixed NLP model.
        solve_data (MindtPySolveData): data container that holds solve-instance data.
        config (ConfigBlock): the specific configurations for MindtPy.
        cb_opt (SolverFactory, optional): the gurobi_persistent solver. Defaults to None.
        fp (bool, optional): whether it is in the loop of feasibility pump. Defaults to False.
    """
    copy_var_list_values(
        fixed_nlp.MindtPy_utils.variable_list,
        solve_data.working_model.MindtPy_utils.variable_list,
        config)
    if config.calculate_dual:
        for c in fixed_nlp.tmp_duals:
            if fixed_nlp.dual.get(c, None) is None:
                fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c]
        dual_values = list(fixed_nlp.dual[c]
                           for c in fixed_nlp.MindtPy_utils.constraint_list)
    else:
        dual_values = None
    main_objective = fixed_nlp.MindtPy_utils.objective_list[-1]
    update_primal_bound(solve_data, value(main_objective.expr))
    if solve_data.solution_improved:
        solve_data.best_solution_found = fixed_nlp.clone()
        solve_data.best_solution_found_time = get_main_elapsed_time(
            solve_data.timing)
        if config.strategy == 'GOA':
            if solve_data.objective_sense == minimize:
                solve_data.num_no_good_cuts_added.update(
                    {solve_data.UB: len(solve_data.mip.MindtPy_utils.cuts.no_good_cuts)})
            else:
                solve_data.num_no_good_cuts_added.update(
                    {solve_data.LB: len(solve_data.mip.MindtPy_utils.cuts.no_good_cuts)})

        # add obj increasing constraint for fp
        if fp:
            solve_data.mip.MindtPy_utils.cuts.del_component(
                'improving_objective_cut')
            if solve_data.objective_sense == minimize:
                solve_data.mip.MindtPy_utils.cuts.improving_objective_cut = Constraint(expr=solve_data.mip.MindtPy_utils.objective_value
                                                                                       <= solve_data.UB - config.fp_cutoffdecr*max(1, abs(solve_data.UB)))
            else:
                solve_data.mip.MindtPy_utils.cuts.improving_objective_cut = Constraint(expr=solve_data.mip.MindtPy_utils.objective_value
                                                                                       >= solve_data.LB + config.fp_cutoffdecr*max(1, abs(solve_data.UB)))

    # Add the linear cut
    if config.strategy == 'OA' or fp:
        copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
                             solve_data.mip.MindtPy_utils.variable_list,
                             config)
        add_oa_cuts(solve_data.mip, dual_values, solve_data, config, cb_opt)
    elif config.strategy == 'GOA':
        copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
                             solve_data.mip.MindtPy_utils.variable_list,
                             config)
        add_affine_cuts(solve_data, config)
    # elif config.strategy == 'PSC':
    #     # !!THIS SEEMS LIKE A BUG!! - mrmundt #
    #     add_psc_cut(solve_data, config)
    # elif config.strategy == 'GBD':
    #     # !!THIS SEEMS LIKE A BUG!! - mrmundt #
    #     add_gbd_cut(solve_data, config)

    var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list)
    if config.add_no_good_cuts:
        add_no_good_cuts(var_values, solve_data, config)

    config.call_after_subproblem_feasible(fixed_nlp, solve_data)

    config.logger.info(solve_data.fixed_nlp_log_formatter.format('*' if solve_data.solution_improved else ' ',
                                                                 solve_data.nlp_iter if not fp else solve_data.fp_iter,
                                                                 'Fixed NLP',
                                                                 value(
                                                                     main_objective.expr),
                                                                 solve_data.LB,
                                                                 solve_data.UB,
                                                                 solve_data.rel_gap,
                                                                 get_main_elapsed_time(solve_data.timing)))
Пример #14
0
    def _xform_constraint(self, obj, disjunct, infodict, var_substitute_map,
                          zero_substitute_map):
        # we will put a new transformed constraint on the relaxation block.
        relaxationBlock = infodict['chull']['relaxationBlock']
        transBlock = relaxationBlock.parent_block()
        varMap = infodict['chull']['disaggregatedVars']

        # Though rare, it is possible to get naming conflicts here
        # since constraints from all blocks are getting moved onto the
        # same block. So we get a unique name
        name = unique_component_name(relaxationBlock, obj.name)

        if obj.is_indexed():
            try:
                newConstraint = Constraint(obj.index_set(), transBlock.lbub)
            except:
                # The original constraint may have been indexed by a
                # non-concrete set (like an Any).  We will give up on
                # strict index verification and just blindly proceed.
                newConstraint = Constraint(Any)
        else:
            newConstraint = Constraint(transBlock.lbub)
        relaxationBlock.add_component(name, newConstraint)
        # add mapping of original constraint to transformed constraint
        # in transformation info dictionary
        infodict['chull']['relaxedConstraints'][obj] = newConstraint
        # add mapping of transformed constraint back to original constraint (we
        # know that the info dict is already created because this only got
        # called if we were transforming a disjunct...)
        relaxationBlock._gdp_transformation_info['srcConstraints'][
            newConstraint] = obj

        for i in sorted(iterkeys(obj)):
            c = obj[i]
            if not c.active:
                continue

            NL = c.body.polynomial_degree() not in (0,1)
            EPS = self._config.EPS
            mode = self._config.perspective_function

            # We need to evaluate the expression at the origin *before*
            # we substitute the expression variables with the
            # disaggregated variables
            if not NL or mode == "FurmanSawayaGrossmann":
                h_0 = clone_without_expression_components(
                    c.body, substitute=zero_substitute_map)

            y = disjunct.indicator_var
            if NL:
                if mode == "LeeGrossmann":
                    sub_expr = clone_without_expression_components(
                        c.body,
                        substitute=dict(
                            (var,  subs/y)
                            for var, subs in iteritems(var_substitute_map) )
                    )
                    expr = sub_expr * y
                elif mode == "GrossmannLee":
                    sub_expr = clone_without_expression_components(
                        c.body,
                        substitute=dict(
                            (var, subs/(y + EPS))
                            for var, subs in iteritems(var_substitute_map) )
                    )
                    expr = (y + EPS) * sub_expr
                elif mode == "FurmanSawayaGrossmann":
                    sub_expr = clone_without_expression_components(
                        c.body,
                        substitute=dict(
                            (var, subs/((1 - EPS)*y + EPS))
                            for var, subs in iteritems(var_substitute_map) )
                    )
                    expr = ((1-EPS)*y + EPS)*sub_expr - EPS*h_0*(1-y)
                else:
                    raise RuntimeError("Unknown NL CHull mode")
            else:
                expr = clone_without_expression_components(
                    c.body, substitute=var_substitute_map)

            if c.equality:
                if NL:
                    newConsExpr = expr == c.lower*y
                else:
                    v = list(EXPR.identify_variables(expr))
                    if len(v) == 1 and not c.lower:
                        # Setting a variable to 0 in a disjunct is
                        # *very* common.  We should recognize that in
                        # that structure, the disaggregated variable
                        # will also be fixed to 0.
                        v[0].fix(0)
                        continue
                    newConsExpr = expr - (1-y)*h_0 == c.lower*y

                if obj.is_indexed():
                    newConstraint.add((i, 'eq'), newConsExpr)
                else:
                    newConstraint.add('eq', newConsExpr)
                continue

            if c.lower is not None:
                # TODO: At the moment there is no reason for this to be in both
                # lower and upper... I think there could be though if I say what
                # the new constraint is going to be or something.
                if __debug__ and logger.isEnabledFor(logging.DEBUG):
                    logger.debug("GDP(cHull): Transforming constraint " +
                                 "'%s'", c.name)
                if NL:
                    newConsExpr = expr >= c.lower*y
                else:
                    newConsExpr = expr - (1-y)*h_0 >= c.lower*y

                if obj.is_indexed():
                    newConstraint.add((i, 'lb'), newConsExpr)
                else:
                    newConstraint.add('lb', newConsExpr)

            if c.upper is not None:
                if __debug__ and logger.isEnabledFor(logging.DEBUG):
                    logger.debug("GDP(cHull): Transforming constraint " +
                                 "'%s'", c.name)
                if NL:
                    newConsExpr = expr <= c.upper*y
                else:
                    newConsExpr = expr - (1-y)*h_0 <= c.upper*y

                if obj.is_indexed():
                    newConstraint.add((i, 'ub'), newConsExpr)
                else:
                    newConstraint.add('ub', newConsExpr)
Пример #15
0
 def d_rule(disjunct, flag, s, t):
     m = disjunct.model()
     if flag:
         disjunct.c = Constraint(expr=m.a[s, t] == 0)
     else:
         disjunct.c = Constraint(expr=m.a[s, t] >= 5)
Пример #16
0
 def _d(d, i):
     d.x = Var(range(i))
     d.silly = Constraint(expr=d.indicator_var == i)
Пример #17
0
 def disjunct_rule(d, s, flag):
     m = d.model()
     if flag:
         d.c = Constraint(expr=m.a[s] >= 6)
     else:
         d.c = Constraint(expr=m.a[s] <= 3)
Пример #18
0
 def innerdisj_rule(d, flag):
     m = d.model()
     if flag:
         d.c = Constraint(expr=m.x >= 2)
     else:
         d.c = Constraint(expr=m.x == 0)
Пример #19
0
 def disjunct(disjunct, flag):
     m = disjunct.model()
     if flag:
         disjunct.c = Constraint(expr=m.a <= 3)
     else:
         disjunct.c = Constraint(expr=m.a == 0)
Пример #20
0
def handle_subproblem_optimal(fixed_nlp, solve_data, config, fp=False):
    """
    This function copies the result of the NLP solver function ('solve_subproblem') to the working model, updates
    the bounds, adds OA and no-good cuts, and then stores the new solution if it is the new best solution. This
    function handles the result of the latest iteration of solving the NLP subproblem given an optimal solution.

    Parameters
    ----------
    fixed_nlp: Pyomo model
        Fixed-NLP from the model
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm
    """
    copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
                         solve_data.working_model.MindtPy_utils.variable_list,
                         config)
    if config.calculate_dual:
        for c in fixed_nlp.tmp_duals:
            if fixed_nlp.dual.get(c, None) is None:
                fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c]
        dual_values = list(fixed_nlp.dual[c]
                           for c in fixed_nlp.MindtPy_utils.constraint_list)
    else:
        dual_values = None
    main_objective = fixed_nlp.MindtPy_utils.objective_list[-1]
    if solve_data.objective_sense == minimize:
        solve_data.UB = min(value(main_objective.expr), solve_data.UB)
        solve_data.solution_improved = solve_data.UB < solve_data.UB_progress[
            -1]
        solve_data.UB_progress.append(solve_data.UB)
    else:
        solve_data.LB = max(value(main_objective.expr), solve_data.LB)
        solve_data.solution_improved = solve_data.LB > solve_data.LB_progress[
            -1]
        solve_data.LB_progress.append(solve_data.LB)
    config.logger.info(
        'Fixed-NLP {}: OBJ: {}  LB: {}  UB: {}  TIME: {}s'.format(
            solve_data.nlp_iter if not fp else solve_data.fp_iter,
            value(main_objective.expr), solve_data.LB, solve_data.UB,
            round(get_main_elapsed_time(solve_data.timing), 2)))

    if solve_data.solution_improved:
        solve_data.best_solution_found = fixed_nlp.clone()
        solve_data.best_solution_found_time = get_main_elapsed_time(
            solve_data.timing)
        if config.strategy == 'GOA':
            if solve_data.objective_sense == minimize:
                solve_data.num_no_good_cuts_added.update({
                    solve_data.UB:
                    len(solve_data.mip.MindtPy_utils.cuts.no_good_cuts)
                })
            else:
                solve_data.num_no_good_cuts_added.update({
                    solve_data.LB:
                    len(solve_data.mip.MindtPy_utils.cuts.no_good_cuts)
                })

        # add obj increasing constraint for fp
        if fp:
            solve_data.mip.MindtPy_utils.cuts.del_component(
                'improving_objective_cut')
            if solve_data.objective_sense == minimize:
                solve_data.mip.MindtPy_utils.cuts.improving_objective_cut = Constraint(
                    expr=solve_data.mip.MindtPy_utils.objective_value <=
                    solve_data.UB -
                    config.fp_cutoffdecr * max(1, abs(solve_data.UB)))
            else:
                solve_data.mip.MindtPy_utils.cuts.improving_objective_cut = Constraint(
                    expr=solve_data.mip.MindtPy_utils.objective_value >=
                    solve_data.LB +
                    config.fp_cutoffdecr * max(1, abs(solve_data.UB)))

    # Add the linear cut
    if config.strategy == 'OA' or fp:
        copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
                             solve_data.mip.MindtPy_utils.variable_list,
                             config)
        add_oa_cuts(solve_data.mip, dual_values, solve_data, config)
    elif config.strategy == 'GOA':
        copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
                             solve_data.mip.MindtPy_utils.variable_list,
                             config)
        add_affine_cuts(solve_data, config)
    # elif config.strategy == 'PSC':
    #     # !!THIS SEEMS LIKE A BUG!! - mrmundt #
    #     add_psc_cut(solve_data, config)
    # elif config.strategy == 'GBD':
    #     # !!THIS SEEMS LIKE A BUG!! - mrmundt #
    #     add_gbd_cut(solve_data, config)

    var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list)
    if config.add_no_good_cuts:
        add_no_good_cuts(var_values, solve_data, config, feasible=True)

    config.call_after_subproblem_feasible(fixed_nlp, solve_data)
Пример #21
0
 def disjunct1(disjunct, s, flag):
     m = disjunct.model()
     if not flag:
         disjunct.c = Constraint(expr=m.a[s] == 0)
     else:
         disjunct.c = Constraint(expr=m.a[s] >= 7)
Пример #22
0
    def _transform_disjunct(self, obj, transBlock, varSet, localVars):
        if hasattr(obj, "_gdp_transformation_info"):
            infodict = obj._gdp_transformation_info
            # If the user has something with our name that is not a dict, we
            # scream. If they have a dict with this name then we are just going
            # to use it...
            if type(infodict) is not dict:
                raise GDP_Error(
                    "Disjunct %s contains an attribute named "
                    "_gdp_transformation_info. The transformation requires "
                    "that it can create this attribute!" % obj.name)
        else:
            infodict = obj._gdp_transformation_info = {}
        # deactivated means either we've already transformed or user deactivated
        if not obj.active:
            if obj.indicator_var.is_fixed():
                if value(obj.indicator_var) == 0:
                    # The user cleanly deactivated the disjunct: there
                    # is nothing for us to do here.
                    return
                else:
                    raise GDP_Error(
                        "The disjunct %s is deactivated, but the "
                        "indicator_var is fixed to %s. This makes no sense." %
                        (obj.name, value(obj.indicator_var)))
            if not infodict.get('relaxed', False):
                raise GDP_Error(
                    "The disjunct %s is deactivated, but the "
                    "indicator_var is not fixed and the disjunct does not "
                    "appear to have been relaxed. This makes no sense." %
                    (obj.name, ))

        if 'chull' in infodict:
            # we've transformed it (with CHull), so don't do it again.
            return

        # add reference to original disjunct to info dict on
        # transformation block
        relaxedDisjuncts = transBlock.relaxedDisjuncts
        relaxationBlock = relaxedDisjuncts[len(relaxedDisjuncts)]
        relaxationBlockInfo = relaxationBlock._gdp_transformation_info = {
            'src': obj,
            'srcVars': ComponentMap(),
            'srcConstraints': ComponentMap(),
            'boundConstraintToSrcVar': ComponentMap(),
        }
        infodict['chull'] = chull = {
            'relaxationBlock': relaxationBlock,
            'relaxedConstraints': ComponentMap(),
            'disaggregatedVars': ComponentMap(),
            'bigmConstraints': ComponentMap(),
        }

        # if this is a disjunctData from an indexed disjunct, we are
        # going to want to check at the end that the container is
        # deactivated if everything in it is. So we save it in our
        # dictionary of things to check if it isn't there already.
        disjParent = obj.parent_component()
        if disjParent.is_indexed() and \
           disjParent not in transBlock.disjContainers:
            transBlock.disjContainers.add(disjParent)

        # add the disaggregated variables and their bigm constraints
        # to the relaxationBlock
        for var in varSet:
            lb = var.lb
            ub = var.ub
            if lb is None or ub is None:
                raise GDP_Error("Variables that appear in disjuncts must be "
                                "bounded in order to use the chull "
                                "transformation! Missing bound for %s." %
                                (var.name))

            disaggregatedVar = Var(within=Reals,
                                   bounds=(min(0, lb), max(0, ub)),
                                   initialize=var.value)
            # naming conflicts are possible here since this is a bunch
            # of variables from different blocks coming together, so we
            # get a unique name
            disaggregatedVarName = unique_component_name(
                relaxationBlock,
                var.getname(fully_qualified=False, name_buffer=NAME_BUFFER),
            )
            relaxationBlock.add_component(disaggregatedVarName,
                                          disaggregatedVar)
            chull['disaggregatedVars'][var] = disaggregatedVar
            relaxationBlockInfo['srcVars'][disaggregatedVar] = var

            bigmConstraint = Constraint(transBlock.lbub)
            relaxationBlock.add_component(disaggregatedVarName + "_bounds",
                                          bigmConstraint)
            if lb:
                bigmConstraint.add('lb',
                                   obj.indicator_var * lb <= disaggregatedVar)
            if ub:
                bigmConstraint.add('ub',
                                   disaggregatedVar <= obj.indicator_var * ub)
            chull['bigmConstraints'][var] = bigmConstraint
            relaxationBlockInfo['boundConstraintToSrcVar'][
                bigmConstraint] = var

        for var in localVars:
            lb = var.lb
            ub = var.ub
            if lb is None or ub is None:
                raise GDP_Error("Variables that appear in disjuncts must be "
                                "bounded in order to use the chull "
                                "transformation! Missing bound for %s." %
                                (var.name))
            if value(lb) > 0:
                var.setlb(0)
            if value(ub) < 0:
                var.setub(0)

            # naming conflicts are possible here since this is a bunch
            # of variables from different blocks coming together, so we
            # get a unique name
            conName = unique_component_name(
                relaxationBlock,
                var.getname(fully_qualified=False, name_buffer=NAME_BUFFER) +
                "_bounds")
            bigmConstraint = Constraint(transBlock.lbub)
            relaxationBlock.add_component(conName, bigmConstraint)
            bigmConstraint.add('lb', obj.indicator_var * lb <= var)
            bigmConstraint.add('ub', var <= obj.indicator_var * ub)
            chull['bigmConstraints'][var] = bigmConstraint
            relaxationBlockInfo['boundConstraintToSrcVar'][
                bigmConstraint] = var

        var_substitute_map = dict(
            (id(v), newV) for v, newV in iteritems(chull['disaggregatedVars']))
        zero_substitute_map = dict(
            (id(v), ZeroConstant)
            for v, newV in iteritems(chull['disaggregatedVars']))
        zero_substitute_map.update((id(v), ZeroConstant) for v in localVars)

        # Transform each component within this disjunct
        self._transform_block_components(obj, obj, infodict,
                                         var_substitute_map,
                                         zero_substitute_map)

        # deactivate disjunct so we know we've relaxed it
        obj._deactivate_without_fixing_indicator()
        infodict['relaxed'] = True
Пример #23
0
 def disjunct2_rule(disjunct, flag):
     if not flag:
         disjunct.c = Constraint(expr=m.b[0].x <= 0)
     else:
         disjunct.c = Constraint(expr=m.b[0].x >= 0)
Пример #24
0
    def _transform_constraint(self, obj, disjunct, bigMargs, arg_list,
                              disjunct_suffix_list):
        # add constraint to the transformation block, we'll transform it there.
        transBlock = disjunct._transformation_block()
        bigm_src = transBlock.bigm_src
        constraintMap = self._get_constraint_map_dict(transBlock)

        disjunctionRelaxationBlock = transBlock.parent_block()
        # Though rare, it is possible to get naming conflicts here
        # since constraints from all blocks are getting moved onto the
        # same block. So we get a unique name
        cons_name = obj.getname(fully_qualified=True, name_buffer=NAME_BUFFER)
        name = unique_component_name(transBlock, cons_name)

        if obj.is_indexed():
            newConstraint = Constraint(obj.index_set(),
                                       disjunctionRelaxationBlock.lbub)
            # we map the container of the original to the container of the
            # transformed constraint. Don't do this if obj is a SimpleConstraint
            # because we will treat that like a _ConstraintData and map to a
            # list of transformed _ConstraintDatas
            constraintMap['transformedConstraints'][obj] = newConstraint
        else:
            newConstraint = Constraint(disjunctionRelaxationBlock.lbub)
        transBlock.add_component(name, newConstraint)
        # add mapping of transformed constraint to original constraint
        constraintMap['srcConstraints'][newConstraint] = obj

        for i in sorted(iterkeys(obj)):
            c = obj[i]
            if not c.active:
                continue

            # first, we see if an M value was specified in the arguments.
            # (This returns None if not)
            M = self._get_M_from_args(c, bigMargs, arg_list, bigm_src)

            if __debug__ and logger.isEnabledFor(logging.DEBUG):
                _name = obj.getname(fully_qualified=True,
                                    name_buffer=NAME_BUFFER)
                logger.debug("GDP(BigM): The value for M for constraint '%s' "
                             "from the BigM argument is %s." %
                             (cons_name, str(M)))

            # if we didn't get something from args, try suffixes:
            if M is None:
                # first get anything parent to c but below disjunct
                suffix_list = self._get_bigm_suffix_list(
                    c.parent_block(), stopping_block=disjunct)
                # prepend that to what we already collected for the disjunct.
                suffix_list.extend(disjunct_suffix_list)
                M = self._get_M_from_suffixes(c, suffix_list, bigm_src)

            if __debug__ and logger.isEnabledFor(logging.DEBUG):
                _name = obj.getname(fully_qualified=True,
                                    name_buffer=NAME_BUFFER)
                logger.debug("GDP(BigM): The value for M for constraint '%s' "
                             "after checking suffixes is %s." %
                             (cons_name, str(M)))

            if not isinstance(M, (tuple, list)):
                if M is None:
                    M = (None, None)
                else:
                    try:
                        M = (-M, M)
                    except:
                        logger.error("Error converting scalar M-value %s "
                                     "to (-M,M).  Is %s not a numeric type?" %
                                     (M, type(M)))
                        raise
            if len(M) != 2:
                raise GDP_Error("Big-M %s for constraint %s is not of "
                                "length two. "
                                "Expected either a single value or "
                                "tuple or list of length two for M." %
                                (str(M), name))

            if c.lower is not None and M[0] is None:
                M = (self._estimate_M(c.body, name)[0] - c.lower, M[1])
                bigm_src[c] = M
            if c.upper is not None and M[1] is None:
                M = (M[0], self._estimate_M(c.body, name)[1] - c.upper)
                bigm_src[c] = M

            if __debug__ and logger.isEnabledFor(logging.DEBUG):
                _name = obj.getname(fully_qualified=True,
                                    name_buffer=NAME_BUFFER)
                logger.debug("GDP(BigM): The value for M for constraint '%s' "
                             "after estimating (if needed) is %s." %
                             (cons_name, str(M)))

            # Handle indices for both SimpleConstraint and IndexedConstraint
            if i.__class__ is tuple:
                i_lb = i + ('lb', )
                i_ub = i + ('ub', )
            elif obj.is_indexed():
                i_lb = (
                    i,
                    'lb',
                )
                i_ub = (
                    i,
                    'ub',
                )
            else:
                i_lb = 'lb'
                i_ub = 'ub'

            if c.lower is not None:
                if M[0] is None:
                    raise GDP_Error("Cannot relax disjunctive constraint '%s' "
                                    "because M is not defined." % name)
                M_expr = M[0] * (1 - disjunct.indicator_var)
                newConstraint.add(i_lb, c.lower <= c.body - M_expr)
                constraintMap['transformedConstraints'][c] = [
                    newConstraint[i_lb]
                ]
                constraintMap['srcConstraints'][newConstraint[i_lb]] = c
            if c.upper is not None:
                if M[1] is None:
                    raise GDP_Error("Cannot relax disjunctive constraint '%s' "
                                    "because M is not defined." % name)
                M_expr = M[1] * (1 - disjunct.indicator_var)
                newConstraint.add(i_ub, c.body - M_expr <= c.upper)
                transformed = constraintMap['transformedConstraints'].get(c)
                if transformed is not None:
                    constraintMap['transformedConstraints'][c].append(
                        newConstraint[i_ub])
                else:
                    constraintMap['transformedConstraints'][c] = [
                        newConstraint[i_ub]
                    ]
                constraintMap['srcConstraints'][newConstraint[i_ub]] = c

            # deactivate because we relaxed
            c.deactivate()
Пример #25
0
 def innerdisj_rule(disjunct, flag):
     m = disjunct.model()
     if flag:
         disjunct.c = Constraint(expr=m.z >= 5)
     else:
         disjunct.c = Constraint(expr=m.z == 0)
Пример #26
0
    def _transform_constraint(self, obj, disjunct, var_substitute_map,
                              zero_substitute_map):
        # we will put a new transformed constraint on the relaxation block.
        relaxationBlock = disjunct._transformation_block()
        transBlock = relaxationBlock.parent_block()
        varMap = relaxationBlock._disaggregatedVarMap['disaggregatedVar']
        constraintMap = relaxationBlock._constraintMap

        # Though rare, it is possible to get naming conflicts here
        # since constraints from all blocks are getting moved onto the
        # same block. So we get a unique name
        name = unique_component_name(
            relaxationBlock,
            obj.getname(fully_qualified=True, name_buffer=NAME_BUFFER))

        if obj.is_indexed():
            newConstraint = Constraint(obj.index_set(), transBlock.lbub)
        else:
            newConstraint = Constraint(transBlock.lbub)
        relaxationBlock.add_component(name, newConstraint)
        # map the containers:
        # add mapping of original constraint to transformed constraint
        if obj.is_indexed():
            constraintMap['transformedConstraints'][obj] = newConstraint
        # add mapping of transformed constraint container back to original
        # constraint container (or SimpleConstraint)
        constraintMap['srcConstraints'][newConstraint] = obj

        for i in sorted(iterkeys(obj)):
            c = obj[i]
            if not c.active:
                continue

            NL = c.body.polynomial_degree() not in (0, 1)
            EPS = self._config.EPS
            mode = self._config.perspective_function

            # We need to evaluate the expression at the origin *before*
            # we substitute the expression variables with the
            # disaggregated variables
            if not NL or mode == "FurmanSawayaGrossmann":
                h_0 = clone_without_expression_components(
                    c.body, substitute=zero_substitute_map)

            y = disjunct.indicator_var
            if NL:
                if mode == "LeeGrossmann":
                    sub_expr = clone_without_expression_components(
                        c.body,
                        substitute=dict(
                            (var, subs / y)
                            for var, subs in iteritems(var_substitute_map)))
                    expr = sub_expr * y
                elif mode == "GrossmannLee":
                    sub_expr = clone_without_expression_components(
                        c.body,
                        substitute=dict(
                            (var, subs / (y + EPS))
                            for var, subs in iteritems(var_substitute_map)))
                    expr = (y + EPS) * sub_expr
                elif mode == "FurmanSawayaGrossmann":
                    sub_expr = clone_without_expression_components(
                        c.body,
                        substitute=dict(
                            (var, subs / ((1 - EPS) * y + EPS))
                            for var, subs in iteritems(var_substitute_map)))
                    expr = (
                        (1 - EPS) * y + EPS) * sub_expr - EPS * h_0 * (1 - y)
                else:
                    raise RuntimeError("Unknown NL Hull mode")
            else:
                expr = clone_without_expression_components(
                    c.body, substitute=var_substitute_map)

            if c.equality:
                if NL:
                    # ESJ TODO: This can't happen right? This is the only
                    # obvious case where someone has messed up, but this has to
                    # be nonconvex, right? Shouldn't we tell them?
                    newConsExpr = expr == c.lower * y
                else:
                    v = list(EXPR.identify_variables(expr))
                    if len(v) == 1 and not c.lower:
                        # Setting a variable to 0 in a disjunct is
                        # *very* common.  We should recognize that in
                        # that structure, the disaggregated variable
                        # will also be fixed to 0.
                        v[0].fix(0)
                        # ESJ: If you ask where the transformed constraint is,
                        # the answer is nowhere. Really, it is in the bounds of
                        # this variable, so I'm going to return
                        # it. Alternatively we could return an empty list, but I
                        # think I like this better.
                        constraintMap['transformedConstraints'][c] = [v[0]]
                        # Reverse map also (this is strange)
                        constraintMap['srcConstraints'][v[0]] = c
                        continue
                    newConsExpr = expr - (1 - y) * h_0 == c.lower * y

                if obj.is_indexed():
                    newConstraint.add((i, 'eq'), newConsExpr)
                    # map the _ConstraintDatas (we mapped the container above)
                    constraintMap['transformedConstraints'][c] = [
                        newConstraint[i, 'eq']
                    ]
                    constraintMap['srcConstraints'][newConstraint[i, 'eq']] = c
                else:
                    newConstraint.add('eq', newConsExpr)
                    # map to the _ConstraintData (And yes, for
                    # SimpleConstraints, this is overwriting the map to the
                    # container we made above, and that is what I want to
                    # happen. SimpleConstraints will map to lists. For
                    # IndexedConstraints, we can map the container to the
                    # container, but more importantly, we are mapping the
                    # _ConstraintDatas to each other above)
                    constraintMap['transformedConstraints'][c] = [
                        newConstraint['eq']
                    ]
                    constraintMap['srcConstraints'][newConstraint['eq']] = c

                continue

            if c.lower is not None:
                if __debug__ and logger.isEnabledFor(logging.DEBUG):
                    _name = c.getname(fully_qualified=True,
                                      name_buffer=NAME_BUFFER)
                    logger.debug(
                        "GDP(Hull): Transforming constraint " + "'%s'", _name)
                if NL:
                    newConsExpr = expr >= c.lower * y
                else:
                    newConsExpr = expr - (1 - y) * h_0 >= c.lower * y

                if obj.is_indexed():
                    newConstraint.add((i, 'lb'), newConsExpr)
                    constraintMap['transformedConstraints'][c] = [
                        newConstraint[i, 'lb']
                    ]
                    constraintMap['srcConstraints'][newConstraint[i, 'lb']] = c
                else:
                    newConstraint.add('lb', newConsExpr)
                    constraintMap['transformedConstraints'][c] = [
                        newConstraint['lb']
                    ]
                    constraintMap['srcConstraints'][newConstraint['lb']] = c

            if c.upper is not None:
                if __debug__ and logger.isEnabledFor(logging.DEBUG):
                    _name = c.getname(fully_qualified=True,
                                      name_buffer=NAME_BUFFER)
                    logger.debug(
                        "GDP(Hull): Transforming constraint " + "'%s'", _name)
                if NL:
                    newConsExpr = expr <= c.upper * y
                else:
                    newConsExpr = expr - (1 - y) * h_0 <= c.upper * y

                if obj.is_indexed():
                    newConstraint.add((i, 'ub'), newConsExpr)
                    # map (have to account for fact we might have created list
                    # above
                    transformed = constraintMap['transformedConstraints'].get(
                        c)
                    if transformed is not None:
                        transformed.append(newConstraint[i, 'ub'])
                    else:
                        constraintMap['transformedConstraints'][c] = [
                            newConstraint[i, 'ub']
                        ]
                    constraintMap['srcConstraints'][newConstraint[i, 'ub']] = c
                else:
                    newConstraint.add('ub', newConsExpr)
                    transformed = constraintMap['transformedConstraints'].get(
                        c)
                    if transformed is not None:
                        transformed.append(newConstraint['ub'])
                    else:
                        constraintMap['transformedConstraints'][c] = [
                            newConstraint['ub']
                        ]
                    constraintMap['srcConstraints'][newConstraint['ub']] = c

        # deactivate now that we have transformed
        obj.deactivate()
Пример #27
0
 def innerdisjunct0(disjunct):
     disjunct.c = Constraint(expr=m.x <= 2)
Пример #28
0
    def _transform_constraint(self, cons, disjunct, transformed_disjunct,
                              transBlock, partition):
        instance = disjunct.model()
        cons_name = cons.getname(fully_qualified=True, name_buffer=NAME_BUFFER)

        # create place on transformed Disjunct for the new constraint and
        # for the auxiliary variables
        transformed_constraint = Constraint(NonNegativeIntegers)
        transformed_disjunct.add_component(
            unique_component_name(transformed_disjunct, cons_name),
            transformed_constraint)
        aux_vars = Var(NonNegativeIntegers, dense=False)
        transformed_disjunct.add_component(
            unique_component_name(transformed_disjunct,
                                  cons_name + "_aux_vars"), aux_vars)

        # create a place on the transBlock for the split constraints
        split_constraints = Constraint(NonNegativeIntegers)
        transBlock.add_component(
            unique_component_name(transBlock,
                                  cons_name + "_split_constraints"),
            split_constraints)

        # this is a list which might have two constraints in it if we had
        # both a lower and upper value.
        leq_constraints = self._get_leq_constraints(cons)
        for (body, rhs) in leq_constraints:
            repn = generate_standard_repn(body, compute_values=True)
            nonlinear_repn = None
            if repn.nonlinear_expr is not None:
                nonlinear_repn = _generate_additively_separable_repn(
                    repn.nonlinear_expr)
            split_exprs = []
            split_aux_vars = []
            vars_not_accounted_for = ComponentSet(
                v for v in EXPR.identify_variables(body, include_fixed=False))
            vars_accounted_for = ComponentSet()
            for idx, var_list in enumerate(partition):
                # we are going to recreate the piece of the expression
                # involving the vars in var_list
                split_exprs.append(0)
                expr = split_exprs[-1]
                for i, v in enumerate(repn.linear_vars):
                    if v in var_list:
                        expr += repn.linear_coefs[i] * v
                        vars_accounted_for.add(v)
                for i, (v1, v2) in enumerate(repn.quadratic_vars):
                    if v1 in var_list:
                        if v2 not in var_list:
                            raise GDP_Error("Variables '%s' and '%s' are "
                                            "multiplied in Constraint '%s', "
                                            "but they are in different "
                                            "partitions! Please ensure that "
                                            "all the constraints in the "
                                            "disjunction are "
                                            "additively separable with "
                                            "respect to the specified "
                                            "partition." %
                                            (v1.name, v2.name, cons.name))
                        expr += repn.quadratic_coefs[i] * v1 * v2
                        vars_accounted_for.add(v1)
                        vars_accounted_for.add(v2)
                if nonlinear_repn is not None:
                    for i, expr_var_set in enumerate(
                            nonlinear_repn['nonlinear_vars']):
                        # check if v_list is a subset of var_list. If it is
                        # not and there is no intersection, we move on. If
                        # it is not and there is an intersection, we raise
                        # an error: It's not a valid partition. If it is,
                        # then we add this piece of the expression.
                        # subset?
                        if all(v in var_list for v in list(expr_var_set)):
                            expr += nonlinear_repn['nonlinear_exprs'][i]
                            for var in expr_var_set:
                                vars_accounted_for.add(var)
                        # intersection?
                        elif len(ComponentSet(expr_var_set) & var_list) != 0:
                            raise GDP_Error(
                                "Variables which appear in the "
                                "expression %s are in different "
                                "partitions, but this "
                                "expression doesn't appear "
                                "additively separable. Please "
                                "expand it if it is additively "
                                "separable or, more likely, "
                                "ensure that all the "
                                "constraints in the disjunction "
                                "are additively separable with "
                                "respect to the specified "
                                "partition. If you did not "
                                "specify a partition, only "
                                "a value of P, note that to "
                                "automatically partition the "
                                "variables, we assume all the "
                                "expressions are additively "
                                "separable." %
                                nonlinear_repn['nonlinear_exprs'][i])

                expr_lb, expr_ub = self._config.compute_bounds_method(
                    expr, self._global_constraints,
                    self._config.compute_bounds_solver)
                if expr_lb is None or expr_ub is None:
                    raise GDP_Error("Expression %s from constraint '%s' "
                                    "is unbounded! Please ensure all "
                                    "variables that appear "
                                    "in the constraint are bounded or "
                                    "specify compute_bounds_method="
                                    "compute_optimal_bounds"
                                    " if the expression is bounded by the "
                                    "global constraints." % (expr, cons.name))
                # if the expression was empty wrt the partition, we don't
                # need to bother with any of this. The aux_var doesn't need
                # to exist because it would be 0.
                if type(expr) is not int or expr != 0:
                    aux_var = aux_vars[len(aux_vars)]
                    aux_var.setlb(expr_lb)
                    aux_var.setub(expr_ub)
                    split_aux_vars.append(aux_var)
                    split_constraints[len(split_constraints)] = expr <= aux_var

            if len(vars_accounted_for) < len(vars_not_accounted_for):
                orphans = vars_not_accounted_for - vars_accounted_for
                orphan_string = ""
                for v in orphans:
                    orphan_string += "'%s', " % v.name
                orphan_string = orphan_string[:-2]
                raise GDP_Error("Partition specified for disjunction "
                                "containing Disjunct '%s' does not "
                                "include all the variables that appear "
                                "in the disjunction. The following "
                                "variables are not assigned to any part "
                                "of the partition: %s" %
                                (disjunct.name, orphan_string))
            transformed_constraint[
                len(transformed_constraint)] = sum(v for v in
                                                   split_aux_vars) <= \
                rhs - repn.constant
        # deactivate the constraint since we've transformed it
        cons.deactivate()
Пример #29
0
 def innerdisjunct1(disjunct):
     disjunct.c = Constraint(expr=m.x >= 4)
Пример #30
0
    def _transform_disjunct(self, obj, transBlock, varSet, localVars):
        if hasattr(obj, "_gdp_transformation_info"):
            infodict = obj._gdp_transformation_info
            # If the user has something with our name that is not a dict, we
            # scream. If they have a dict with this name then we are just going
            # to use it...
            if type(infodict) is not dict:
                raise GDP_Error(
                    "Disjunct %s contains an attribute named "
                    "_gdp_transformation_info. The transformation requires "
                    "that it can create this attribute!" % obj.name)
        else:
            infodict = obj._gdp_transformation_info = {}
        # deactivated means either we've already transformed or user deactivated
        if not obj.active:
            if obj.indicator_var.is_fixed():
                if value(obj.indicator_var) == 0:
                    # The user cleanly deactivated the disjunct: there
                    # is nothing for us to do here.
                    return
                else:
                    raise GDP_Error(
                        "The disjunct %s is deactivated, but the "
                        "indicator_var is fixed to %s. This makes no sense."
                        % ( obj.name, value(obj.indicator_var) ))
            if not infodict.get('relaxed', False):
                raise GDP_Error(
                    "The disjunct %s is deactivated, but the "
                    "indicator_var is not fixed and the disjunct does not "
                    "appear to have been relaxed. This makes no sense."
                    % ( obj.name, ))

        if 'chull' in infodict:
            # we've transformed it (with CHull), so don't do it again.
            return

        # add reference to original disjunct to info dict on
        # transformation block
        relaxedDisjuncts = transBlock.relaxedDisjuncts
        relaxationBlock = relaxedDisjuncts[len(relaxedDisjuncts)]
        relaxationBlockInfo = relaxationBlock._gdp_transformation_info = {
            'src': obj,
            'srcVars': ComponentMap(),
            'srcConstraints': ComponentMap(),
            'boundConstraintToSrcVar': ComponentMap(),
        }
        infodict['chull'] = chull = {
            'relaxationBlock': relaxationBlock,
            'relaxedConstraints': ComponentMap(),
            'disaggregatedVars': ComponentMap(),
            'bigmConstraints': ComponentMap(),
        }

        # if this is a disjunctData from an indexed disjunct, we are
        # going to want to check at the end that the container is
        # deactivated if everything in it is. So we save it in our
        # dictionary of things to check if it isn't there already.
        disjParent = obj.parent_component()
        if disjParent.is_indexed() and \
           disjParent not in transBlock.disjContainers:
            transBlock.disjContainers.add(disjParent)

        # add the disaggregated variables and their bigm constraints
        # to the relaxationBlock
        for var in varSet:
            lb = var.lb
            ub = var.ub
            if lb is None or ub is None:
                raise GDP_Error("Variables that appear in disjuncts must be "
                                "bounded in order to use the chull "
                                "transformation! Missing bound for %s."
                                % (var.name))

            disaggregatedVar = Var(within=Reals,
                                   bounds=(min(0, lb), max(0, ub)),
                                   initialize=var.value)
            # naming conflicts are possible here since this is a bunch
            # of variables from different blocks coming together, so we
            # get a unique name
            disaggregatedVarName = unique_component_name(
                relaxationBlock, var.local_name)
            relaxationBlock.add_component(
                disaggregatedVarName, disaggregatedVar)
            chull['disaggregatedVars'][var] = disaggregatedVar
            relaxationBlockInfo['srcVars'][disaggregatedVar] = var

            bigmConstraint = Constraint(transBlock.lbub)
            relaxationBlock.add_component(
                disaggregatedVarName + "_bounds", bigmConstraint)
            if lb:
                bigmConstraint.add(
                    'lb', obj.indicator_var*lb <= disaggregatedVar)
            if ub:
                bigmConstraint.add(
                    'ub', disaggregatedVar <= obj.indicator_var*ub)
            chull['bigmConstraints'][var] = bigmConstraint
            relaxationBlockInfo['boundConstraintToSrcVar'][bigmConstraint] = var

        for var in localVars:
            lb = var.lb
            ub = var.ub
            if lb is None or ub is None:
                raise GDP_Error("Variables that appear in disjuncts must be "
                                "bounded in order to use the chull "
                                "transformation! Missing bound for %s."
                                % (var.name))
            if value(lb) > 0:
                var.setlb(0)
            if value(ub) < 0:
                var.setub(0)

            # naming conflicts are possible here since this is a bunch
            # of variables from different blocks coming together, so we
            # get a unique name
            conName = unique_component_name(
                relaxationBlock, var.local_name+"_bounds")
            bigmConstraint = Constraint(transBlock.lbub)
            relaxationBlock.add_component(conName, bigmConstraint)
            bigmConstraint.add('lb', obj.indicator_var*lb <= var)
            bigmConstraint.add('ub', var <= obj.indicator_var*ub)
            chull['bigmConstraints'][var] = bigmConstraint
            relaxationBlockInfo['boundConstraintToSrcVar'][bigmConstraint] = var

        var_substitute_map = dict((id(v), newV) for v, newV in
                                  iteritems(chull['disaggregatedVars']))
        zero_substitute_map = dict((id(v), ZeroConstant) for v, newV in
                                   iteritems(chull['disaggregatedVars']))
        zero_substitute_map.update((id(v), ZeroConstant)
                                   for v in localVars)

        # Transform each component within this disjunct
        self._transform_block_components(obj, obj, infodict, var_substitute_map,
                                         zero_substitute_map)

        # deactivate disjunct so we know we've relaxed it
        obj._deactivate_without_fixing_indicator()
        infodict['relaxed'] = True
Пример #31
0
 def d1_rule(disjunct, flag):
     m = disjunct.model()
     if flag:
         disjunct.c = Constraint(expr=m.a == 0)
     else:
         disjunct.c = Constraint(expr=m.a >= 5)
Пример #32
0
    def _xform_constraint(self, obj, disjunct, infodict,
                          bigMargs, suffix_list):
        # add constraint to the transformation block, we'll transform it there.

        relaxationBlock = infodict['bigm']['relaxationBlock']
        transBlock = relaxationBlock.parent_block()
        # Though rare, it is possible to get naming conflicts here
        # since constraints from all blocks are getting moved onto the
        # same block. So we get a unique name
        name = unique_component_name(relaxationBlock, obj.name)

        if obj.is_indexed():
            try:
                newConstraint = Constraint(obj.index_set(), transBlock.lbub)
            except TypeError:
                # The original constraint may have been indexed by a
                # non-concrete set (like an Any).  We will give up on
                # strict index verification and just blindly proceed.
                newConstraint = Constraint(Any)
        else:
            newConstraint = Constraint(transBlock.lbub)
        relaxationBlock.add_component(name, newConstraint)
        # add mapping of original constraint to transformed constraint
        # in transformation info dictionary
        infodict['bigm']['relaxedConstraints'][obj] = newConstraint
        # add mapping of transformed constraint back to original constraint (we
        # know that the info dict is already created because this only got
        # called if we were transforming a disjunct...)
        relaxationBlock._gdp_transformation_info['srcConstraints'][
            newConstraint] = obj

        for i in sorted(iterkeys(obj)):
            c = obj[i]
            if not c.active:
                continue

            # first, we see if an M value was specified in the arguments.
            # (This returns None if not)
            M = self._get_M_from_args(c, bigMargs)

            if __debug__ and logger.isEnabledFor(logging.DEBUG):
                logger.debug("GDP(BigM): The value for M for constraint %s "
                             "from the BigM argument is %s." % (obj.name,
                                                                str(M)))

            # if we didn't get something from args, try suffixes:
            if M is None:
                M = self._get_M_from_suffixes(c, suffix_list)

            if __debug__ and logger.isEnabledFor(logging.DEBUG):
                logger.debug("GDP(BigM): The value for M for constraint %s "
                             "after checking suffixes is %s." % (obj.name,
                                                                 str(M)))

            if not isinstance(M, (tuple, list)):
                if M is None:
                    M = (None, None)
                else:
                    try:
                        M = (-M, M)
                    except:
                        logger.error("Error converting scalar M-value %s "
                                     "to (-M,M).  Is %s not a numeric type?"
                                     % (M, type(M)))
                        raise
            if len(M) != 2:
                raise GDP_Error("Big-M %s for constraint %s is not of "
                                "length two. "
                                "Expected either a single value or "
                                "tuple or list of length two for M."
                                % (str(M), name))

            if c.lower is not None and M[0] is None:
                M = (self._estimate_M(c.body, name)[0] - c.lower, M[1])
            if c.upper is not None and M[1] is None:
                M = (M[0], self._estimate_M(c.body, name)[1] - c.upper)

            if __debug__ and logger.isEnabledFor(logging.DEBUG):
                logger.debug("GDP(BigM): The value for M for constraint %s "
                             "after estimating (if needed) is %s." %
                             (obj.name, str(M)))

            # Handle indices for both SimpleConstraint and IndexedConstraint
            if i.__class__ is tuple:
                i_lb = i + ('lb',)
                i_ub = i + ('ub',)
            elif obj.is_indexed():
                i_lb = (i, 'lb',)
                i_ub = (i, 'ub',)
            else:
                i_lb = 'lb'
                i_ub = 'ub'

            if c.lower is not None:
                if M[0] is None:
                    raise GDP_Error("Cannot relax disjunctive constraint %s "
                                    "because M is not defined." % name)
                M_expr = M[0] * (1 - disjunct.indicator_var)
                newConstraint.add(i_lb, c.lower <= c. body - M_expr)
            if c.upper is not None:
                if M[1] is None:
                    raise GDP_Error("Cannot relax disjunctive constraint %s "
                                    "because M is not defined." % name)
                M_expr = M[1] * (1 - disjunct.indicator_var)
                newConstraint.add(i_ub, c.body - M_expr <= c.upper)
    def test_numerical_instability_early_elimination(self):
        # A more subtle numerical problem is that, in infinite precision, a
        # variable might be eliminated early. However, if this goes wrong, the
        # result can be unexpected (including getting no constraints when some
        # are expected.)
        m = ConcreteModel()
        m.x = Var()
        m.x0 = Var()
        m.y = Var()
        
        # we'll pretend that the 1.123e-9 is noise from previous calculations
        m.cons1 = Constraint(expr=0 <= (4.27 + 1.123e-9)*m.x + 13*m.y - m.x0)
        m.cons2 = Constraint(expr=m.x0 >= 12*m.y + 4.27*m.x)

        fme = TransformationFactory('contrib.fourier_motzkin_elimination')
        
        # doing my own clones because I want assertIs tests
        first = m.clone()
        second = m.clone()
        third = m.clone()

        fme.apply_to(first, vars_to_eliminate=[first.x0], zero_tolerance=1e-10)
        constraints = first._pyomo_contrib_fme_transformation.\
                      projected_constraints
        cons = constraints[1]
        self.assertEqual(cons.lower, 0)
        repn = generate_standard_repn(cons.body)
        self.assertTrue(repn.is_linear())
        self.assertEqual(repn.constant, 0)
        self.assertEqual(len(repn.linear_coefs), 2) # x is still around
        self.assertIs(repn.linear_vars[0], first.x)
        self.assertAlmostEqual(repn.linear_coefs[0], 1.123e-9)
        self.assertIs(repn.linear_vars[1], first.y)
        self.assertEqual(repn.linear_coefs[1], 1)
        self.assertIsNone(cons.upper)

        # so just to drive home the point, this results in no constraints:
        # (Though also note that that only happens if x0 is the first to be
        # projected out)
        fme.apply_to(second, vars_to_eliminate=[second.x0, second.x],
                     zero_tolerance=1e-10)
        self.assertEqual(len(second._pyomo_contrib_fme_transformation.\
                             projected_constraints), 0)
        
        # but in this version, we assume that x is already gone...
        fme.apply_to(third, vars_to_eliminate=[third.x0], verbose=True,
                     zero_tolerance=1e-8)
        constraints = third._pyomo_contrib_fme_transformation.\
                      projected_constraints
        cons = constraints[1]
        self.assertEqual(cons.lower, 0)
        self.assertIs(cons.body, third.y)
        self.assertIsNone(cons.upper)

        # and this is exactly the same as the above:
        fme.apply_to(m, vars_to_eliminate=[m.x0, m.x], verbose=True,
                     zero_tolerance=1e-8)
        constraints = m._pyomo_contrib_fme_transformation.projected_constraints
        cons = constraints[1]
        self.assertEqual(cons.lower, 0)
        self.assertIs(cons.body, m.y)
        self.assertIsNone(cons.upper)