예제 #1
0
class BigM_Transformation(Transformation):
    """Relax disjunctive model using big-M terms.

    Relaxes a disjunctive model into an algebraic model by adding Big-M
    terms to all disjunctive constraints.

    This transformation accepts the following keyword arguments:
        bigM: A user-specified value (or dict) of M values to use (see below)
        targets: the targets to transform [default: the instance]

    M values are determined as follows:
       1) if the constraint appears in the bigM argument dict
       2) if the constraint parent_component appears in the bigM
          argument dict
       3) if any block which is an ancestor to the constraint appears in 
          the bigM argument dict
       3) if 'None' is in the bigM argument dict
       4) if the constraint or the constraint parent_component appear in
          a BigM Suffix attached to any parent_block() beginning with the
          constraint's parent_block and moving up to the root model.
       5) if None appears in a BigM Suffix attached to any
          parent_block() between the constraint and the root model.
       6) if the constraint is linear, estimate M using the variable bounds

    M values may be a single value or a 2-tuple specifying the M for the
    lower bound and the upper bound of the constraint body.

    Specifying "bigM=N" is automatically mapped to "bigM={None: N}".

    The transformation will create a new Block with a unique
    name beginning "_pyomo_gdp_bigm_relaxation".  That Block will
    contain an indexed Block named "relaxedDisjuncts", which will hold
    the relaxed disjuncts.  This block is indexed by an integer
    indicating the order in which the disjuncts were relaxed.
    Each block has a dictionary "_constraintMap":
    
        'srcConstraints': ComponentMap(<transformed constraint>:
                                       <src constraint>)
        'transformedConstraints': ComponentMap(<src constraint>:
                                               <transformed constraint>)

    All transformed Disjuncts will have a pointer to the block their transformed
    constraints are on, and all transformed Disjunctions will have a 
    pointer to the corresponding OR or XOR constraint.

    """

    CONFIG = ConfigBlock("gdp.bigm")
    CONFIG.declare('targets', ConfigValue(
        default=None,
        domain=target_list,
        description="target or list of targets that will be relaxed",
        doc="""

        This specifies the list of components to relax. If None (default), the
        entire model is transformed. Note that if the transformation is done out
        of place, the list of targets should be attached to the model before it
        is cloned, and the list will specify the targets on the cloned
        instance."""
    ))
    CONFIG.declare('bigM', ConfigValue(
        default=None,
        domain=_to_dict,
        description="Big-M value used for constraint relaxation",
        doc="""

        A user-specified value, dict, or ComponentMap of M values that override
        M-values found through model Suffixes or that would otherwise be
        calculated using variable domains."""
    ))

    def __init__(self):
        """Initialize transformation object."""
        super(BigM_Transformation, self).__init__()
        self.handlers = {
            Constraint:  self._transform_constraint,
            Var:         False, # Note that if a Var appears on a Disjunct, we
                                # still treat its bounds as global. If the
                                # intent is for its bounds to be on the
                                # disjunct, it should be declared with no bounds
                                # and the bounds should be set in constraints on
                                # the Disjunct.
            Connector:   False,
            Expression:  False,
            Suffix:      False,
            Param:       False,
            Set:         False,
            RangeSet:    False,
            Disjunction: self._warn_for_active_disjunction,
            Disjunct:    self._warn_for_active_disjunct,
            Block:       self._transform_block_on_disjunct,
        }

    def _get_bigm_suffix_list(self, block):
        # Note that you can only specify suffixes on BlockData objects or
        # SimpleBlocks. Though it is possible at this point to stick them
        # on whatever components you want, we won't pick them up.
        suffix_list = []
        while block is not None:
            bigm = block.component('BigM')
            if type(bigm) is Suffix:
                suffix_list.append(bigm)
            block = block.parent_block()
        return suffix_list

    def _get_bigm_arg_list(self, bigm_args, block):
        # Gather what we know about blocks from args exactly once. We'll still
        # check for constraints in the moment, but if that fails, we've
        # preprocessed the time-consuming part of traversing up the tree.
        arg_list = []
        if bigm_args is None:
            return arg_list
        while block is not None:
            if block in bigm_args:
                arg_list.append({block: bigm_args[block]})
            block = block.parent_block()
        return arg_list

    def _apply_to(self, instance, **kwds):
        assert not NAME_BUFFER
        self.used_args = ComponentMap() # If everything was sure to go well,
                                        # this could be a dictionary. But if
                                        # someone messes up and gives us a Var
                                        # as a key in bigMargs, I need the error
                                        # not to be when I try to put it into
                                        # this map!
        try:
            self._apply_to_impl(instance, **kwds)
        finally:
            # Clear the global name buffer now that we are done
            NAME_BUFFER.clear()
            # same for our bookkeeping about what we used from bigM arg dict
            self.used_args.clear()
    
    def _apply_to_impl(self, instance, **kwds):
        config = self.CONFIG(kwds.pop('options', {}))

        # We will let args override suffixes and estimate as a last
        # resort. More specific args/suffixes override ones anywhere in
        # the tree. Suffixes lower down in the tree override ones higher
        # up.
        if 'default_bigM' in kwds:
            logger.warn("DEPRECATED: the 'default_bigM=' argument has been "
                        "replaced by 'bigM='")
            config.bigM = kwds.pop('default_bigM')

        config.set_value(kwds)
        bigM = config.bigM

        targets = config.targets
        if targets is None:
            targets = (instance, )
            _HACK_transform_whole_instance = True
        else:
            _HACK_transform_whole_instance = False
        # We need to check that all the targets are in fact on instance. As we
        # do this, we will use the set below to cache components we know to be
        # in the tree rooted at instance.
        knownBlocks = {}
        for t in targets:
            # check that t is in fact a child of instance
            if not is_child_of(parent=instance, child=t,
                               knownBlocks=knownBlocks):
                raise GDP_Error("Target %s is not a component on instance %s!"
                                % (t.name, instance.name))
            elif t.ctype is Disjunction:
                if t.parent_component() is t:
                    self._transform_disjunction(t, bigM)
                else:
                    self._transform_disjunctionData( t, bigM, t.index())
            elif t.ctype in (Block, Disjunct):
                if t.parent_component() is t:
                    self._transform_block(t, bigM)
                else:
                    self._transform_blockData(t, bigM)
            else:
                raise GDP_Error(
                    "Target %s was not a Block, Disjunct, or Disjunction. "
                    "It was of type %s and can't be transformed."
                    % (t.name, type(t)))

        # issue warnings about anything that was in the bigM args dict that we
        # didn't use
        if bigM is not None:
            unused_args = ComponentSet(bigM.keys()) - \
                          ComponentSet(self.used_args.keys())
            if len(unused_args) > 0:
                warning_msg = ("Unused arguments in the bigM map! "
                               "These arguments were not used by the "
                               "transformation:\n")
                for component in unused_args:
                    if hasattr(component, 'name'):
                        warning_msg += "\t%s\n" % component.name
                    else:
                        warning_msg += "\t%s\n" % component
                logger.warn(warning_msg)

        # HACK for backwards compatibility with the older GDP transformations
        #
        # Until the writers are updated to find variables on things
        # other than active blocks, we need to reclassify the Disjuncts
        # as Blocks after transformation so that the writer will pick up
        # all the variables that it needs (in this case, indicator_vars).
        if _HACK_transform_whole_instance:
            HACK_GDP_Disjunct_Reclassifier().apply_to(instance)

    def _add_transformation_block(self, instance):
        # make a transformation block on instance to put transformed disjuncts
        # on
        transBlockName = unique_component_name(
            instance,
            '_pyomo_gdp_bigm_relaxation')
        transBlock = Block()
        instance.add_component(transBlockName, transBlock)
        transBlock.relaxedDisjuncts = Block(Any)
        transBlock.lbub = Set(initialize=['lb', 'ub'])

        return transBlock

    def _transform_block(self, obj, bigM):
        for i in sorted(iterkeys(obj)):
            self._transform_blockData(obj[i], bigM)

    def _transform_blockData(self, obj, bigM):
        # Transform every (active) disjunction in the block
        for disjunction in obj.component_objects(
                Disjunction,
                active=True,
                sort=SortComponents.deterministic,
                descend_into=(Block, Disjunct),
                descent_order=TraversalStrategy.PostfixDFS):
            self._transform_disjunction(disjunction, bigM)

    def _add_xor_constraint(self, disjunction, transBlock):
        # Put the disjunction constraint on the transformation block and
        # determine whether it is an OR or XOR constraint.

        # We never do this for just a DisjunctionData because we need to know
        # about the index set of its parent component (so that we can make the
        # index of this constraint match). So if we called this on a
        # DisjunctionData, we did something wrong.
        assert isinstance(disjunction, Disjunction)

        # first check if the constraint already exists
        if not disjunction._algebraic_constraint is None:
            return disjunction._algebraic_constraint()

        # add the XOR (or OR) constraints to parent block (with unique name)
        # It's indexed if this is an IndexedDisjunction, not otherwise
        orC = Constraint(disjunction.index_set()) if \
            disjunction.is_indexed() else Constraint()
        # The name used to indicate if there were OR or XOR disjunctions,
        # however now that Disjunctions are allowed to mix the state we
        # can no longer make that distinction in the name.
        #    nm = '_xor' if xor else '_or'
        nm = '_xor'
        orCname = unique_component_name( transBlock, disjunction.getname(
            fully_qualified=True, name_buffer=NAME_BUFFER) + nm)
        transBlock.add_component(orCname, orC)
        disjunction._algebraic_constraint = weakref_ref(orC)

        return orC

    def _transform_disjunction(self, obj, bigM):
        if not obj.active:
            return

        # if this is an IndexedDisjunction we have seen in a prior call to the
        # transformation, we already have a transformation block for it. We'll
        # use that.
        if obj._algebraic_constraint is not None:
            transBlock = obj._algebraic_constraint().parent_block()
        else:
            transBlock = self._add_transformation_block(obj.parent_block())

        # If this is an IndexedDisjunction, we have to create the XOR constraint
        # here because we want its index to match the disjunction. In any case,
        # we might as well.
        xorConstraint = self._add_xor_constraint(obj, transBlock)

        # relax each of the disjunctionDatas
        for i in sorted(iterkeys(obj)):
            self._transform_disjunctionData(obj[i], bigM, i, xorConstraint,
                                            transBlock)

        # deactivate so the writers don't scream
        obj.deactivate()

    def _transform_disjunctionData(self, obj, bigM, index, xorConstraint=None,
                                   transBlock=None):
        if not obj.active:
            return  # Do not process a deactivated disjunction 
        # We won't have these arguments if this got called straight from
        # targets. But else, we created them earlier, and have just been passing
        # them through.
        if transBlock is None:
            # It's possible that we have already created a transformation block
            # for another disjunctionData from this same container. If that's
            # the case, let's use the same transformation block. (Else it will
            # be really confusing that the XOR constraint goes to that old block
            # but we create a new one here.)
            if not obj.parent_component()._algebraic_constraint is None:
                transBlock = obj.parent_component()._algebraic_constraint().\
                             parent_block()
            else:
                transBlock = self._add_transformation_block(obj.parent_block())
        if xorConstraint is None:
            xorConstraint = self._add_xor_constraint(obj.parent_component(),
                                                     transBlock)

        xor = obj.xor
        or_expr = 0
        # Just because it's unlikely this is what someone meant to do...    
        if len(obj.disjuncts) == 0:
            raise GDP_Error("Disjunction %s is empty. This is " 
                            "likely indicative of a modeling error."  %
                            obj.getname(fully_qualified=True,
                                        name_buffer=NAME_BUFFER))
        for disjunct in obj.disjuncts:
            or_expr += disjunct.indicator_var
            # make suffix list. (We don't need it until we are
            # transforming constraints, but it gets created at the
            # disjunct level, so more efficient to make it here and
            # pass it down.)
            suffix_list = self._get_bigm_suffix_list(disjunct)
            arg_list = self._get_bigm_arg_list(bigM, disjunct)
            # relax the disjunct
            self._transform_disjunct(disjunct, transBlock, bigM, arg_list,
                                     suffix_list)

        # add or (or xor) constraint
        if xor:
            xorConstraint[index] = or_expr == 1
        else:
            xorConstraint[index] = or_expr >= 1
        # Mark the DisjunctionData as transformed by mapping it to its XOR
        # constraint.
        obj._algebraic_constraint = weakref_ref(xorConstraint[index])
        
        # and deactivate for the writers
        obj.deactivate()

    def _transform_disjunct(self, obj, transBlock, bigM, arg_list, suffix_list):
        # deactivated -> either we've already transformed or user deactivated
        if not obj.active:
            if obj.indicator_var.is_fixed():
                if value(obj.indicator_var) == 0:
                    # The user cleanly deactivated the disjunct: there
                    # is nothing for us to do here.
                    return
                else:
                    raise GDP_Error(
                        "The disjunct %s is deactivated, but the "
                        "indicator_var is fixed to %s. This makes no sense."
                        % ( obj.name, value(obj.indicator_var) ))
            if obj._transformation_block is None:
                raise GDP_Error(
                    "The disjunct %s is deactivated, but the "
                    "indicator_var is not fixed and the disjunct does not "
                    "appear to have been relaxed. This makes no sense. "
                    "(If the intent is to deactivate the disjunct, fix its "
                    "indicator_var to 0.)"
                    % ( obj.name, ))
                
        if not obj._transformation_block is None:
            # we've transformed it, which means this is the second time it's
            # appearing in a Disjunction
            raise GDP_Error(
                    "The disjunct %s has been transformed, but a disjunction "
                    "it appears in has not. Putting the same disjunct in "
                    "multiple disjunctions is not supported." % obj.name)

        # add reference to original disjunct on transformation block
        relaxedDisjuncts = transBlock.relaxedDisjuncts
        relaxationBlock = relaxedDisjuncts[len(relaxedDisjuncts)]
        # we will keep a map of constraints (hashable, ha!) to a tuple to
        # indicate where their m value came from, either (arg dict, key) if it
        # came from args, (Suffix, key) if it came from Suffixes, or (M_lower,
        # M_upper) if we calcualted it ourselves. I am keeping it here because I
        # want it to move with the disjunct transformation blocks in the case of
        # nested constraints, to make it easier to query.
        relaxationBlock.bigm_src = {}
        obj._transformation_block = weakref_ref(relaxationBlock)
        relaxationBlock._srcDisjunct = weakref_ref(obj)

        # This is crazy, but if the disjunction has been previously
        # relaxed, the disjunct *could* be deactivated.  This is a big
        # deal for CHull, as it uses the component_objects /
        # component_data_objects generators.  For BigM, that is OK,
        # because we never use those generators with active=True.  I am
        # only noting it here for the future when someone (me?) is
        # comparing the two relaxations.
        #
        # Transform each component within this disjunct
        self._transform_block_components(obj, obj, bigM, arg_list, suffix_list)

        # deactivate disjunct to keep the writers happy
        obj._deactivate_without_fixing_indicator()

    def _transform_block_components(self, block, disjunct, bigM, arg_list,
                                    suffix_list):
        # We first need to find any transformed disjunctions that might be here
        # because we need to move their transformation blocks up onto the parent
        # block before we transform anything else on this block 
        destinationBlock = disjunct._transformation_block().parent_block()
        for obj in block.component_data_objects(
                Disjunction, 
                sort=SortComponents.deterministic, 
                descend_into=(Block)):
            if obj.algebraic_constraint is None:
                # This could be bad if it's active since that means its
                # untransformed, but we'll wait to yell until the next loop
                continue
            # get this disjunction's relaxation block.
            transBlock = obj.algebraic_constraint().parent_block()
            
            # move transBlock up to parent component
            self._transfer_transBlock_data(transBlock, destinationBlock)
            # we leave the transformation block because it still has the XOR
            # constraints, which we want to be on the parent disjunct.

        # Now look through the component map of block and transform everything
        # we have a handler for. Yell if we don't know how to handle it. (Note
        # that because we only iterate through active components, this means
        # non-ActiveComponent types cannot have handlers.)
        for obj in block.component_objects(active=True, descend_into=False):
            handler = self.handlers.get(obj.ctype, None)
            if not handler:
                if handler is None:
                    raise GDP_Error(
                        "No BigM transformation handler registered "
                        "for modeling components of type %s. If your " 
                        "disjuncts contain non-GDP Pyomo components that "
                        "require transformation, please transform them first."
                        % obj.ctype)
                continue
            # obj is what we are transforming, we pass disjunct
            # through so that we will have access to the indicator
            # variables down the line.
            handler(obj, disjunct, bigM, arg_list, suffix_list)

    def _transfer_transBlock_data(self, fromBlock, toBlock):
        # We know that we have a list of transformed disjuncts on both. We need
        # to move those over. We know the XOR constraints are on the block, and
        # we need to leave those on the disjunct.
        disjunctList = toBlock.relaxedDisjuncts
        for idx, disjunctBlock in iteritems(fromBlock.relaxedDisjuncts):
            newblock = disjunctList[len(disjunctList)]
            newblock.transfer_attributes_from(disjunctBlock)

            # update the mappings
            original = disjunctBlock._srcDisjunct()
            original._transformation_block = weakref_ref(newblock)
            newblock._srcDisjunct = weakref_ref(original)

        # we delete this container because we just moved everything out
        del fromBlock.relaxedDisjuncts

        # Note that we could handle other components here if we ever needed
        # to, but we control what is on the transformation block and
        # currently everything is on the blocks that we just moved...

    def _warn_for_active_disjunction(self, disjunction, disjunct, bigMargs,
                                     arg_list, suffix_list):
        # this should only have gotten called if the disjunction is active
        assert disjunction.active
        problemdisj = disjunction
        if disjunction.is_indexed():
            for i in sorted(iterkeys(disjunction)):
                if disjunction[i].active:
                    # a _DisjunctionData is active, we will yell about
                    # it specifically.
                    problemdisj = disjunction[i]
                    break

        parentblock = problemdisj.parent_block()
        # the disjunction should only have been active if it wasn't transformed
        assert problemdisj.algebraic_constraint is None
        _probDisjName = problemdisj.getname(
            fully_qualified=True, name_buffer=NAME_BUFFER)
        raise GDP_Error("Found untransformed disjunction %s in disjunct %s! "
                        "The disjunction must be transformed before the "
                        "disjunct. If you are using targets, put the "
                        "disjunction before the disjunct in the list."
                        % (_probDisjName, disjunct.name))

    def _warn_for_active_disjunct(self, innerdisjunct, outerdisjunct, bigMargs,
                                  arg_list, suffix_list):
        assert innerdisjunct.active
        problemdisj = innerdisjunct
        if innerdisjunct.is_indexed():
            for i in sorted(iterkeys(innerdisjunct)):
                if innerdisjunct[i].active:
                    # This is shouldn't be true, we will complain about it.
                    problemdisj = innerdisjunct[i]
                    break

        raise GDP_Error("Found active disjunct {0} in disjunct {1}! "
                        "Either {0} "
                        "is not in a disjunction or the disjunction it is in "
                        "has not been transformed. "
                        "{0} needs to be deactivated "
                        "or its disjunction transformed before {1} can be "
                        "transformed.".format(problemdisj.name,
                                              outerdisjunct.name))

    def _transform_block_on_disjunct(self, block, disjunct, bigMargs, arg_list,
                                     suffix_list):
        # We look through everything on the component map of the block
        # and transform it just as we would if it was on the disjunct
        # directly.  (We are passing the disjunct through so that when
        # we find constraints, _xform_constraint will have access to
        # the correct indicator variable.)
        for i in sorted(iterkeys(block)):
            self._transform_block_components( block[i], disjunct, bigMargs,
                                              arg_list, suffix_list)

    def _get_constraint_map_dict(self, transBlock):
        if not hasattr(transBlock, "_constraintMap"):
            transBlock._constraintMap = {
                'srcConstraints': ComponentMap(),
                'transformedConstraints': ComponentMap()}
        return transBlock._constraintMap

    def _transform_constraint(self, obj, disjunct, bigMargs, arg_list,
                              suffix_list):
        # add constraint to the transformation block, we'll transform it there.
        transBlock = disjunct._transformation_block()
        bigm_src = transBlock.bigm_src
        constraintMap = self._get_constraint_map_dict(transBlock)
        
        disjunctionRelaxationBlock = transBlock.parent_block()
        # Though rare, it is possible to get naming conflicts here
        # since constraints from all blocks are getting moved onto the
        # same block. So we get a unique name
        cons_name = obj.getname(fully_qualified=True, name_buffer=NAME_BUFFER)
        name = unique_component_name(transBlock, cons_name)

        if obj.is_indexed():
            try:
                newConstraint = Constraint(obj.index_set(),
                                           disjunctionRelaxationBlock.lbub)
            # HACK: We get burned by #191 here... When #1319 is merged we
            # can revist this and I think stop catching the AttributeError.
            except (TypeError, AttributeError):
                # The original constraint may have been indexed by a
                # non-concrete set (like an Any).  We will give up on
                # strict index verification and just blindly proceed.
                newConstraint = Constraint(Any)
        else:
            newConstraint = Constraint(disjunctionRelaxationBlock.lbub)
        transBlock.add_component(name, newConstraint)
        # add mapping of original constraint to transformed constraint
        constraintMap['srcConstraints'][newConstraint] = obj
        constraintMap['transformedConstraints'][obj] = newConstraint

        for i in sorted(iterkeys(obj)):
            c = obj[i]
            if not c.active:
                continue

            # first, we see if an M value was specified in the arguments.
            # (This returns None if not)
            M = self._get_M_from_args(c, bigMargs, arg_list, bigm_src)

            if __debug__ and logger.isEnabledFor(logging.DEBUG):
                _name = obj.getname(
                    fully_qualified=True, name_buffer=NAME_BUFFER)
                logger.debug("GDP(BigM): The value for M for constraint %s "
                             "from the BigM argument is %s." % (cons_name,
                                                                str(M)))

            # if we didn't get something from args, try suffixes:
            if M is None:
                M = self._get_M_from_suffixes(c, suffix_list, bigm_src)

            if __debug__ and logger.isEnabledFor(logging.DEBUG):
                _name = obj.getname(
                    fully_qualified=True, name_buffer=NAME_BUFFER)
                logger.debug("GDP(BigM): The value for M for constraint %s "
                             "after checking suffixes is %s." % (cons_name,
                                                                 str(M)))

            if not isinstance(M, (tuple, list)):
                if M is None:
                    M = (None, None)
                else:
                    try:
                        M = (-M, M)
                    except:
                        logger.error("Error converting scalar M-value %s "
                                     "to (-M,M).  Is %s not a numeric type?"
                                     % (M, type(M)))
                        raise
            if len(M) != 2:
                raise GDP_Error("Big-M %s for constraint %s is not of "
                                "length two. "
                                "Expected either a single value or "
                                "tuple or list of length two for M."
                                % (str(M), name))

            if c.lower is not None and M[0] is None:
                M = (self._estimate_M(c.body, name)[0] - c.lower, M[1])
                bigm_src[c] = M
            if c.upper is not None and M[1] is None:
                M = (M[0], self._estimate_M(c.body, name)[1] - c.upper)
                bigm_src[c] = M

            if __debug__ and logger.isEnabledFor(logging.DEBUG):
                _name = obj.getname(
                    fully_qualified=True, name_buffer=NAME_BUFFER)
                logger.debug("GDP(BigM): The value for M for constraint %s "
                             "after estimating (if needed) is %s." %
                             (cons_name, str(M)))

            # Handle indices for both SimpleConstraint and IndexedConstraint
            if i.__class__ is tuple:
                i_lb = i + ('lb',)
                i_ub = i + ('ub',)
            elif obj.is_indexed():
                i_lb = (i, 'lb',)
                i_ub = (i, 'ub',)
            else:
                i_lb = 'lb'
                i_ub = 'ub'

            if c.lower is not None:
                if M[0] is None:
                    raise GDP_Error("Cannot relax disjunctive constraint %s "
                                    "because M is not defined." % name)
                M_expr = M[0] * (1 - disjunct.indicator_var)
                newConstraint.add(i_lb, c.lower <= c. body - M_expr)
            if c.upper is not None:
                if M[1] is None:
                    raise GDP_Error("Cannot relax disjunctive constraint %s "
                                    "because M is not defined." % name)
                M_expr = M[1] * (1 - disjunct.indicator_var)
                newConstraint.add(i_ub, c.body - M_expr <= c.upper)
            # deactivate because we relaxed
            c.deactivate()

    def _get_M_from_args(self, constraint, bigMargs, arg_list, bigm_src):
        # check args: we first look in the keys for constraint and
        # constraintdata. In the absence of those, we traverse up the blocks,
        # and as a last resort check for a value for None
        if bigMargs is None:
            return None

        # check for the constraint itself and its container
        parent = constraint.parent_component()
        if constraint in bigMargs:
            m = bigMargs[constraint]
            self.used_args[constraint] = m
            bigm_src[constraint] = (bigMargs, constraint)
            return m
        elif parent in bigMargs:
            m = bigMargs[parent]
            self.used_args[parent] = m
            bigm_src[constraint] = (bigMargs, parent)
            return m

        # use the precomputed traversal up the blocks
        for arg in arg_list:
            for block, val in iteritems(arg):
                self.used_args[block] = val
                bigm_src[constraint] = (bigMargs, block)
                return val
                
        # last check for value for None!
        if None in bigMargs:
            m = bigMargs[None]
            self.used_args[None] = m
            bigm_src[constraint] = (bigMargs, None)
            return m
        return None

    def _get_M_from_suffixes(self, constraint, suffix_list, bigm_src):
        M = None
        # first we check if the constraint or its parent is a key in any of the
        # suffix lists
        for bigm in suffix_list:
            if constraint in bigm:
                M = bigm[constraint]
                bigm_src[constraint] = (bigm, constraint)
                break

            # if c is indexed, check for the parent component
            if constraint.parent_component() in bigm:
                M = bigm[constraint.parent_component()]
                bigm_src[constraint] = (bigm, constraint.parent_component())
                break

        # if we didn't get an M that way, traverse upwards through the blocks
        # and see if None has a value on any of them.
        if M is None:
            for bigm in suffix_list:
                if None in bigm:
                    M = bigm[None]
                    bigm_src[constraint] = (bigm, None)
                    break
        return M

    def _estimate_M(self, expr, name):
        # Calculate a best guess at M
        repn = generate_standard_repn(expr, quadratic=False)
        M = [0, 0]

        if not repn.is_nonlinear():
            if repn.constant is not None:
                for i in (0, 1):
                    if M[i] is not None:
                        M[i] += repn.constant

            for i, coef in enumerate(repn.linear_coefs or []):
                var = repn.linear_vars[i]
                bounds = (value(var.lb), value(var.ub))
                for i in (0, 1):
                    # reverse the bounds if the coefficient is negative
                    if coef > 0:
                        j = i
                    else:
                        j = 1 - i

                    if bounds[i] is not None:
                        M[j] += value(bounds[i]) * coef
                    else:
                        raise GDP_Error(
                            "Cannot estimate M for "
                            "expressions with unbounded variables."
                            "\n\t(found unbounded var %s while processing "
                            "constraint %s)" % (var.name, name))
        else:
            # expression is nonlinear. Try using `contrib.fbbt` to estimate.
            expr_lb, expr_ub = compute_bounds_on_expr(expr)
            if expr_lb is None or expr_ub is None:
                raise GDP_Error("Cannot estimate M for unbounded nonlinear "
                                "expressions.\n\t(found while processing "
                                "constraint %s)" % name)
            else:
                M = (expr_lb, expr_ub)

        return tuple(M)

    # These are all functions to retrieve transformed components from original
    # ones and vice versa.
    def get_src_disjunct(self, transBlock):
        """Return the Disjunct object whose transformed components are on
        transBlock.

        Parameters
        ----------
        transBlock: _BlockData which is in the relaxedDisjuncts IndexedBlock
                    on a transformation block.
        """
        try:
            return transBlock._srcDisjunct()
        except:
            raise GDP_Error("Block %s doesn't appear to be a transformation "
                            "block for a disjunct. No source disjunct found." 
                            "\n\t(original error: %s)" 
                            % (transBlock.name, sys.exc_info()[1]))

    def get_src_constraint(self, transformedConstraint):
        """Return the original Constraint whose transformed counterpart is
        transformedConstraint

        Parameters
        ----------
        transformedConstraint: Constraint, which must be a component on one of 
        the BlockDatas in the relaxedDisjuncts Block of 
        a transformation block
        """
        transBlock = transformedConstraint.parent_block()
        # This should be our block, so if it's not, the user messed up and gave
        # us the wrong thing. If they happen to also have a _constraintMap then
        # the world is really against us.
        if not hasattr(transBlock, "_constraintMap"):
            raise GDP_Error("Constraint %s is not a transformed constraint" 
                            % transformedConstraint.name)
        # if something goes wrong here, it's a bug in the mappings.
        return transBlock._constraintMap['srcConstraints'][transformedConstraint]

    def _find_parent_disjunct(self, constraint):
        # traverse up until we find the disjunct this constraint lives on
        parent_disjunct = constraint.parent_block()
        while not isinstance(parent_disjunct, _DisjunctData):
            if parent_disjunct is None:
                raise GDP_Error(
                    "Constraint %s is not on a disjunct and so was not "
                    "transformed" % constraint.name)
            parent_disjunct = parent_disjunct.parent_block()

        return parent_disjunct

    def _get_constraint_transBlock(self, constraint):
        parent_disjunct = self._find_parent_disjunct(constraint)
        # we know from _find_parent_disjunct that parent_disjunct is a Disjunct,
        # so the below is OK
        transBlock = parent_disjunct._transformation_block
        if transBlock is None:
            raise GDP_Error("Constraint %s is on a disjunct which has not been "
                            "transformed" % constraint.name)
        # if it's not None, it's the weakref we wanted.
        transBlock = transBlock()

        return transBlock

    def get_transformed_constraint(self, srcConstraint):
        """Return the transformed version of srcConstraint

        Parameters
        ----------
        srcConstraint: Constraint, which must be in the subtree of a
                       transformed Disjunct
        """
        transBlock = self._get_constraint_transBlock(srcConstraint)
        
        if hasattr(transBlock, "_constraintMap") and transBlock._constraintMap[
                'transformedConstraints'].get(srcConstraint):
            return transBlock._constraintMap['transformedConstraints'][
                srcConstraint]
        raise GDP_Error("Constraint %s has not been transformed." 
                        % srcConstraint.name)

    def get_src_disjunction(self, xor_constraint):
        """Return the Disjunction corresponding to xor_constraint

        Parameters
        ----------
        xor_constraint: Constraint, which must be the logical constraint 
                        (located on the transformation block) of some 
                        Disjunction
        """
        # NOTE: This is indeed a linear search through the Disjunctions on the
        # model. I am leaving it this way on the assumption that asking XOR
        # constraints for their Disjunction is not going to be a common
        # question. If we ever need efficiency then we should store a reverse
        # map from the XOR constraint to the Disjunction on the transformation
        # block while we do the transformation. And then this method could query
        # that map.
        m = xor_constraint.model()
        for disjunction in m.component_data_objects(Disjunction):
            if disjunction._algebraic_constraint:
                if disjunction._algebraic_constraint() is xor_constraint:
                    return disjunction
        raise GDP_Error("It appears that %s is not an XOR or OR constraint "
                        "resulting from transforming a Disjunction."
                        % xor_constraint.name)

    def get_m_value_src(self, constraint):
        """Return a tuple indicating how the M value used to transform 
        constraint was specified. (In particular, this can be used to 
        verify which BigM Suffixes were actually necessary to the 
        transformation.)

        If the M value came from an arg, returns (bigm_arg_dict, key), where 
        bigm_arg_dict is the dictionary itself and key is the key in that 
        dictionary which gave us the M value.

        If the M value came from a Suffix, returns (suffix, key) where suffix 
        is the BigM suffix used and key is the key in that Suffix.

        If the transformation calculated the value, returns (M_lower, M_upper),
        where M_lower is the float we calculated for the lower bound constraint
        and M_upper is the value calculated for the upper bound constraint.

        Parameters
        ----------
        constraint: Constraint, which must be in the subtree of a transformed 
                    Disjunct
        """
        transBlock = self._get_constraint_transBlock(constraint)
        # This is a KeyError if it fails, but it is also my fault if it
        # fails... (That is, it's a bug in the mapping.)
        return transBlock.bigm_src[constraint]
예제 #2
0
class XpressDirect(DirectSolver):
    def __init__(self, **kwds):
        if 'type' not in kwds:
            kwds['type'] = 'xpress_direct'
        super(XpressDirect, self).__init__(**kwds)
        self._pyomo_var_to_solver_var_map = ComponentMap()
        self._solver_var_to_pyomo_var_map = ComponentMap()
        self._pyomo_con_to_solver_con_map = dict()
        self._solver_con_to_pyomo_con_map = ComponentMap()

        self._name = None
        try:
            import xpress
            self._xpress = xpress
            self._python_api_exists = True
            self._version = tuple(
                int(k) for k in self._xpress.getversion().split('.'))
            self._name = "Xpress %s.%s.%s" % self._version
            self._version_major = self._version[0]
            # in versions prior to 34, xpress raised a RuntimeError, but in more
            # recent versions it raises a xpress.ModelError. We'll cache the appropriate
            # one here
            if self._version_major < 34:
                self._XpressException = RuntimeError
            else:
                self._XpressException = xpress.ModelError
        except ImportError:
            self._python_api_exists = False
        except Exception as e:
            # other forms of exceptions can be thrown by the xpress python
            # import. for example, a xpress.InterfaceError exception is thrown
            # if the Xpress license is not valid. Unfortunately, you can't
            # import without a license, which means we can't test for the
            # exception above!
            print("Import of xpress failed - xpress message=" + str(e) + "\n")
            self._python_api_exists = False

        self._range_constraints = set()

        # TODO: this isn't a limit of XPRESS, which implements an SLP
        #       method for NLPs. But it is a limit of *this* interface
        self._max_obj_degree = 2
        self._max_constraint_degree = 2

        # There does not seem to be an easy way to get the
        # wallclock time out of xpress, so we will measure it
        # ourselves
        self._opt_time = None

        # Note: Undefined capabilites default to None
        self._capabilities.linear = True
        self._capabilities.quadratic_objective = True
        self._capabilities.quadratic_constraint = True
        self._capabilities.integer = True
        self._capabilities.sos1 = True
        self._capabilities.sos2 = True

    def _apply_solver(self):
        if not self._save_results:
            for block in self._pyomo_model.block_data_objects(
                    descend_into=True, active=True):
                for var in block.component_data_objects(
                        ctype=pyomo.core.base.var.Var,
                        descend_into=False,
                        active=True,
                        sort=False):
                    var.stale = True

        self._solver_model.setlogfile(self._log_file)
        if self._keepfiles:
            print("Solver log file: " + self.log_file)

        # setting a log file in xpress disables all output
        # this callback prints all messages to stdout
        if self._tee:
            self._solver_model.addcbmessage(_print_message, None, 0)

        # set xpress options
        # if the user specifies a 'mipgap', set it, and
        # set xpress's related options to 0.
        if self.options.mipgap is not None:
            self._solver_model.setControl('miprelstop',
                                          float(self.options.mipgap))
            self._solver_model.setControl('miprelcutoff', 0.0)
            self._solver_model.setControl('mipaddcutoff', 0.0)
        # xpress is picky about the type which is passed
        # into a control. So we will infer and cast
        # get the xpress valid controls
        xp_controls = self._xpress.controls
        for key, option in self.options.items():
            if key == 'mipgap':  # handled above
                continue
            try:
                self._solver_model.setControl(key, option)
            except self._XpressException:
                # take another try, converting to its type
                # we'll wrap this in a function to raise the
                # xpress error
                contr_type = type(getattr(xp_controls, key))
                if not _is_convertable(contr_type, option):
                    raise
                self._solver_model.setControl(key, contr_type(option))

        start_time = time.time()
        self._solver_model.solve()
        self._opt_time = time.time() - start_time

        self._solver_model.setlogfile('')
        if self._tee:
            self._solver_model.removecbmessage(_print_message, None)

        # FIXME: can we get a return code indicating if XPRESS had a significant failure?
        return Bunch(rc=None, log=None)

    def _get_expr_from_pyomo_repn(self, repn, max_degree=2):
        referenced_vars = ComponentSet()

        degree = repn.polynomial_degree()
        if (degree is None) or (degree > max_degree):
            raise DegreeError(
                'XpressDirect does not support expressions of degree {0}.'.
                format(degree))

        # NOTE: xpress's python interface only allows for expresions
        #       with native numeric types. Others, like numpy.float64,
        #       will cause an exception when constructing expressions
        if len(repn.linear_vars) > 0:
            referenced_vars.update(repn.linear_vars)
            new_expr = self._xpress.Sum(
                float(coef) * self._pyomo_var_to_solver_var_map[var]
                for coef, var in zip(repn.linear_coefs, repn.linear_vars))
        else:
            new_expr = 0.0

        for coef, (x, y) in zip(repn.quadratic_coefs, repn.quadratic_vars):
            new_expr += float(coef) * self._pyomo_var_to_solver_var_map[
                x] * self._pyomo_var_to_solver_var_map[y]
            referenced_vars.add(x)
            referenced_vars.add(y)

        new_expr += repn.constant

        return new_expr, referenced_vars

    def _get_expr_from_pyomo_expr(self, expr, max_degree=2):
        if max_degree == 2:
            repn = generate_standard_repn(expr, quadratic=True)
        else:
            repn = generate_standard_repn(expr, quadratic=False)

        try:
            xpress_expr, referenced_vars = self._get_expr_from_pyomo_repn(
                repn, max_degree)
        except DegreeError as e:
            msg = e.args[0]
            msg += '\nexpr: {0}'.format(expr)
            raise DegreeError(msg)

        return xpress_expr, referenced_vars

    def _add_var(self, var):
        varname = self._symbol_map.getSymbol(var, self._labeler)
        vartype = self._xpress_vartype_from_var(var)
        if var.has_lb():
            lb = value(var.lb)
        else:
            lb = -self._xpress.infinity
        if var.has_ub():
            ub = value(var.ub)
        else:
            ub = self._xpress.infinity
        if var.is_fixed():
            lb = value(var.value)
            ub = value(var.value)

        xpress_var = self._xpress.var(name=varname,
                                      lb=lb,
                                      ub=ub,
                                      vartype=vartype)
        self._solver_model.addVariable(xpress_var)

        ## bounds on binary variables don't seem to be set correctly
        ## by the method above
        if vartype == self._xpress.binary:
            if lb == ub:
                self._solver_model.chgbounds([xpress_var], ['B'], [lb])
            else:
                self._solver_model.chgbounds([xpress_var, xpress_var],
                                             ['L', 'U'], [lb, ub])

        self._pyomo_var_to_solver_var_map[var] = xpress_var
        self._solver_var_to_pyomo_var_map[xpress_var] = var
        self._referenced_variables[var] = 0

    def _set_instance(self, model, kwds={}):
        self._range_constraints = set()
        DirectOrPersistentSolver._set_instance(self, model, kwds)
        self._pyomo_con_to_solver_con_map = dict()
        self._solver_con_to_pyomo_con_map = ComponentMap()
        self._pyomo_var_to_solver_var_map = ComponentMap()
        self._solver_var_to_pyomo_var_map = ComponentMap()
        try:
            if model.name is not None:
                self._solver_model = self._xpress.problem(name=model.name)
            else:
                self._solver_model = self._xpress.problem()
        except Exception:
            e = sys.exc_info()[1]
            msg = ("Unable to create Xpress model. "
                   "Have you installed the Python "
                   "bindings for Xpress?\n\n\t" +
                   "Error message: {0}".format(e))
            raise Exception(msg)
        self._add_block(model)

    def _add_block(self, block):
        DirectOrPersistentSolver._add_block(self, block)

    def _add_constraint(self, con):
        if not con.active:
            return None

        if is_fixed(con.body):
            if self._skip_trivial_constraints:
                return None

        conname = self._symbol_map.getSymbol(con, self._labeler)

        if con._linear_canonical_form:
            xpress_expr, referenced_vars = self._get_expr_from_pyomo_repn(
                con.canonical_form(), self._max_constraint_degree)
        else:
            xpress_expr, referenced_vars = self._get_expr_from_pyomo_expr(
                con.body, self._max_constraint_degree)

        if con.has_lb():
            if not is_fixed(con.lower):
                raise ValueError("Lower bound of constraint {0} "
                                 "is not constant.".format(con))
        if con.has_ub():
            if not is_fixed(con.upper):
                raise ValueError("Upper bound of constraint {0} "
                                 "is not constant.".format(con))

        if con.equality:
            xpress_con = self._xpress.constraint(body=xpress_expr,
                                                 sense=self._xpress.eq,
                                                 rhs=value(con.lower),
                                                 name=conname)
        elif con.has_lb() and con.has_ub():
            xpress_con = self._xpress.constraint(body=xpress_expr,
                                                 sense=self._xpress.range,
                                                 lb=value(con.lower),
                                                 ub=value(con.upper),
                                                 name=conname)
            self._range_constraints.add(xpress_con)
        elif con.has_lb():
            xpress_con = self._xpress.constraint(body=xpress_expr,
                                                 sense=self._xpress.geq,
                                                 rhs=value(con.lower),
                                                 name=conname)
        elif con.has_ub():
            xpress_con = self._xpress.constraint(body=xpress_expr,
                                                 sense=self._xpress.leq,
                                                 rhs=value(con.upper),
                                                 name=conname)
        else:
            raise ValueError("Constraint does not have a lower "
                             "or an upper bound: {0} \n".format(con))

        self._solver_model.addConstraint(xpress_con)

        for var in referenced_vars:
            self._referenced_variables[var] += 1
        self._vars_referenced_by_con[con] = referenced_vars
        self._pyomo_con_to_solver_con_map[con] = xpress_con
        self._solver_con_to_pyomo_con_map[xpress_con] = con

    def _add_sos_constraint(self, con):
        if not con.active:
            return None

        conname = self._symbol_map.getSymbol(con, self._labeler)
        level = con.level
        if level not in [1, 2]:
            raise ValueError("Solver does not support SOS "
                             "level {0} constraints".format(level))

        xpress_vars = []
        weights = []

        self._vars_referenced_by_con[con] = ComponentSet()

        if hasattr(con, 'get_items'):
            # aml sos constraint
            sos_items = list(con.get_items())
        else:
            # kernel sos constraint
            sos_items = list(con.items())

        for v, w in sos_items:
            self._vars_referenced_by_con[con].add(v)
            xpress_vars.append(self._pyomo_var_to_solver_var_map[v])
            self._referenced_variables[v] += 1
            weights.append(w)

        xpress_con = self._xpress.sos(xpress_vars, weights, level, conname)
        self._solver_model.addSOS(xpress_con)
        self._pyomo_con_to_solver_con_map[con] = xpress_con
        self._solver_con_to_pyomo_con_map[xpress_con] = con

    def _xpress_vartype_from_var(self, var):
        """
        This function takes a pyomo variable and returns the appropriate xpress variable type
        :param var: pyomo.core.base.var.Var
        :return: xpress.continuous or xpress.binary or xpress.integer
        """
        if var.is_binary():
            vartype = self._xpress.binary
        elif var.is_integer():
            vartype = self._xpress.integer
        elif var.is_continuous():
            vartype = self._xpress.continuous
        else:
            raise ValueError(
                'Variable domain type is not recognized for {0}'.format(
                    var.domain))
        return vartype

    def _set_objective(self, obj):
        if self._objective is not None:
            for var in self._vars_referenced_by_obj:
                self._referenced_variables[var] -= 1
            self._vars_referenced_by_obj = ComponentSet()
            self._objective = None

        if obj.active is False:
            raise ValueError('Cannot add inactive objective to solver.')

        if obj.sense == minimize:
            sense = self._xpress.minimize
        elif obj.sense == maximize:
            sense = self._xpress.maximize
        else:
            raise ValueError('Objective sense is not recognized: {0}'.format(
                obj.sense))

        xpress_expr, referenced_vars = self._get_expr_from_pyomo_expr(
            obj.expr, self._max_obj_degree)

        for var in referenced_vars:
            self._referenced_variables[var] += 1

        self._solver_model.setObjective(xpress_expr, sense=sense)
        self._objective = obj
        self._vars_referenced_by_obj = referenced_vars

    def _postsolve(self):
        # the only suffixes that we extract from XPRESS are
        # constraint duals, constraint slacks, and variable
        # reduced-costs. scan through the solver suffix list
        # and throw an exception if the user has specified
        # any others.
        extract_duals = False
        extract_slacks = False
        extract_reduced_costs = False
        for suffix in self._suffixes:
            flag = False
            if re.match(suffix, "dual"):
                extract_duals = True
                flag = True
            if re.match(suffix, "slack"):
                extract_slacks = True
                flag = True
            if re.match(suffix, "rc"):
                extract_reduced_costs = True
                flag = True
            if not flag:
                raise RuntimeError(
                    "***The xpress_direct solver plugin cannot extract solution suffix="
                    + suffix)

        xprob = self._solver_model
        xp = self._xpress
        xprob_attrs = xprob.attributes

        ## XPRESS's status codes depend on this
        ## (number of integer vars > 0) or (number of special order sets > 0)
        is_mip = (xprob_attrs.mipents > 0) or (xprob_attrs.sets > 0)

        if is_mip:
            if extract_reduced_costs:
                logger.warning("Cannot get reduced costs for MIP.")
            if extract_duals:
                logger.warning("Cannot get duals for MIP.")
            extract_reduced_costs = False
            extract_duals = False

        self.results = SolverResults()
        soln = Solution()

        self.results.solver.name = self._name
        self.results.solver.wallclock_time = self._opt_time

        if is_mip:
            status = xprob_attrs.mipstatus
            mip_sols = xprob_attrs.mipsols
            if status == xp.mip_not_loaded:
                self.results.solver.status = SolverStatus.aborted
                self.results.solver.termination_message = "Model is not loaded; no solution information is available."
                self.results.solver.termination_condition = TerminationCondition.error
                soln.status = SolutionStatus.unknown
            #no MIP solution, first LP did not solve, second LP did, third search started but incomplete
            elif status == xp.mip_lp_not_optimal \
                    or status == xp.mip_lp_optimal \
                    or status == xp.mip_no_sol_found:
                self.results.solver.status = SolverStatus.aborted
                self.results.solver.termination_message = "Model is loaded, but no solution information is available."
                self.results.solver.termination_condition = TerminationCondition.error
                soln.status = SolutionStatus.unknown
            elif status == xp.mip_solution:  # some solution available
                self.results.solver.status = SolverStatus.warning
                self.results.solver.termination_message = "Unable to satisfy optimality tolerances; a sub-optimal " \
                                                          "solution is available."
                self.results.solver.termination_condition = TerminationCondition.other
                soln.status = SolutionStatus.feasible
            elif status == xp.mip_infeas:  # MIP proven infeasible
                self.results.solver.status = SolverStatus.warning
                self.results.solver.termination_message = "Model was proven to be infeasible"
                self.results.solver.termination_condition = TerminationCondition.infeasible
                soln.status = SolutionStatus.infeasible
            elif status == xp.mip_optimal:  # optimal
                self.results.solver.status = SolverStatus.ok
                self.results.solver.termination_message = "Model was solved to optimality (subject to tolerances), " \
                                                          "and an optimal solution is available."
                self.results.solver.termination_condition = TerminationCondition.optimal
                soln.status = SolutionStatus.optimal
            elif status == xp.mip_unbounded and mip_sols > 0:
                self.results.solver.status = SolverStatus.warning
                self.results.solver.termination_message = "LP relaxation was proven to be unbounded, " \
                                                          "but a solution is available."
                self.results.solver.termination_condition = TerminationCondition.unbounded
                soln.status = SolutionStatus.unbounded
            elif status == xp.mip_unbounded and mip_sols <= 0:
                self.results.solver.status = SolverStatus.warning
                self.results.solver.termination_message = "LP relaxation was proven to be unbounded."
                self.results.solver.termination_condition = TerminationCondition.unbounded
                soln.status = SolutionStatus.unbounded
            else:
                self.results.solver.status = SolverStatus.error
                self.results.solver.termination_message = \
                    ("Unhandled Xpress solve status "
                     "("+str(status)+")")
                self.results.solver.termination_condition = TerminationCondition.error
                soln.status = SolutionStatus.error
        else:  ## an LP, we'll check the lpstatus
            status = xprob_attrs.lpstatus
            if status == xp.lp_unstarted:
                self.results.solver.status = SolverStatus.aborted
                self.results.solver.termination_message = "Model is not loaded; no solution information is available."
                self.results.solver.termination_condition = TerminationCondition.error
                soln.status = SolutionStatus.unknown
            elif status == xp.lp_optimal:
                self.results.solver.status = SolverStatus.ok
                self.results.solver.termination_message = "Model was solved to optimality (subject to tolerances), " \
                                                          "and an optimal solution is available."
                self.results.solver.termination_condition = TerminationCondition.optimal
                soln.status = SolutionStatus.optimal
            elif status == xp.lp_infeas:
                self.results.solver.status = SolverStatus.warning
                self.results.solver.termination_message = "Model was proven to be infeasible"
                self.results.solver.termination_condition = TerminationCondition.infeasible
                soln.status = SolutionStatus.infeasible
            elif status == xp.lp_cutoff:
                self.results.solver.status = SolverStatus.ok
                self.results.solver.termination_message = "Optimal objective for model was proven to be worse than the " \
                                                          "cutoff value specified; a solution is available."
                self.results.solver.termination_condition = TerminationCondition.minFunctionValue
                soln.status = SolutionStatus.optimal
            elif status == xp.lp_unfinished:
                self.results.solver.status = SolverStatus.aborted
                self.results.solver.termination_message = "Optimization was terminated by the user."
                self.results.solver.termination_condition = TerminationCondition.error
                soln.status = SolutionStatus.error
            elif status == xp.lp_unbounded:
                self.results.solver.status = SolverStatus.warning
                self.results.solver.termination_message = "Model was proven to be unbounded."
                self.results.solver.termination_condition = TerminationCondition.unbounded
                soln.status = SolutionStatus.unbounded
            elif status == xp.lp_cutoff_in_dual:
                self.results.solver.status = SolverStatus.ok
                self.results.solver.termination_message = "Xpress reported the LP was cutoff in the dual."
                self.results.solver.termination_condition = TerminationCondition.minFunctionValue
                soln.status = SolutionStatus.optimal
            elif status == xp.lp_unsolved:
                self.results.solver.status = SolverStatus.error
                self.results.solver.termination_message = "Optimization was terminated due to unrecoverable numerical " \
                                                          "difficulties."
                self.results.solver.termination_condition = TerminationCondition.error
                soln.status = SolutionStatus.error
            elif status == xp.lp_nonconvex:
                self.results.solver.status = SolverStatus.error
                self.results.solver.termination_message = "Optimization was terminated because nonconvex quadratic data " \
                                                          "were found."
                self.results.solver.termination_condition = TerminationCondition.error
                soln.status = SolutionStatus.error
            else:
                self.results.solver.status = SolverStatus.error
                self.results.solver.termination_message = \
                    ("Unhandled Xpress solve status "
                     "("+str(status)+")")
                self.results.solver.termination_condition = TerminationCondition.error
                soln.status = SolutionStatus.error

        self.results.problem.name = xprob_attrs.matrixname

        if xprob_attrs.objsense == 1.0:
            self.results.problem.sense = minimize
        elif xprob_attrs.objsense == -1.0:
            self.results.problem.sense = maximize
        else:
            raise RuntimeError(
                'Unrecognized Xpress objective sense: {0}'.format(
                    xprob_attrs.objsense))

        self.results.problem.upper_bound = None
        self.results.problem.lower_bound = None
        if not is_mip:  #LP or continuous problem
            try:
                self.results.problem.upper_bound = xprob_attrs.lpobjval
                self.results.problem.lower_bound = xprob_attrs.lpobjval
            except (self._XpressException, AttributeError):
                pass
        elif xprob_attrs.objsense == 1.0:  # minimizing MIP
            try:
                self.results.problem.upper_bound = xprob_attrs.mipbestobjval
            except (self._XpressException, AttributeError):
                pass
            try:
                self.results.problem.lower_bound = xprob_attrs.bestbound
            except (self._XpressException, AttributeError):
                pass
        elif xprob_attrs.objsense == -1.0:  # maximizing MIP
            try:
                self.results.problem.upper_bound = xprob_attrs.bestbound
            except (self._XpressException, AttributeError):
                pass
            try:
                self.results.problem.lower_bound = xprob_attrs.mipbestobjval
            except (self._XpressException, AttributeError):
                pass
        else:
            raise RuntimeError(
                'Unrecognized xpress objective sense: {0}'.format(
                    xprob_attrs.objsense))

        try:
            soln.gap = self.results.problem.upper_bound - self.results.problem.lower_bound
        except TypeError:
            soln.gap = None

        self.results.problem.number_of_constraints = xprob_attrs.rows + xprob_attrs.sets + xprob_attrs.qconstraints
        self.results.problem.number_of_nonzeros = xprob_attrs.elems
        self.results.problem.number_of_variables = xprob_attrs.cols
        self.results.problem.number_of_integer_variables = xprob_attrs.mipents
        self.results.problem.number_of_continuous_variables = xprob_attrs.cols - xprob_attrs.mipents
        self.results.problem.number_of_objectives = 1
        self.results.problem.number_of_solutions = xprob_attrs.mipsols if is_mip else 1

        # if a solve was stopped by a limit, we still need to check to
        # see if there is a solution available - this may not always
        # be the case, both in LP and MIP contexts.
        if self._save_results:
            """
            This code in this if statement is only needed for backwards compatability. It is more efficient to set
            _save_results to False and use load_vars, load_duals, etc.
            """
            if xprob_attrs.lpstatus in \
                    [xp.lp_optimal, xp.lp_cutoff, xp.lp_cutoff_in_dual] or \
                    xprob_attrs.mipsols > 0:
                soln_variables = soln.variable
                soln_constraints = soln.constraint

                xpress_vars = list(self._solver_var_to_pyomo_var_map.keys())
                var_vals = xprob.getSolution(xpress_vars)
                for xpress_var, val in zip(xpress_vars, var_vals):
                    pyomo_var = self._solver_var_to_pyomo_var_map[xpress_var]
                    if self._referenced_variables[pyomo_var] > 0:
                        pyomo_var.stale = False
                        soln_variables[xpress_var.name] = {"Value": val}

                if extract_reduced_costs:
                    vals = xprob.getRCost(xpress_vars)
                    for xpress_var, val in zip(xpress_vars, vals):
                        pyomo_var = self._solver_var_to_pyomo_var_map[
                            xpress_var]
                        if self._referenced_variables[pyomo_var] > 0:
                            soln_variables[xpress_var.name]["Rc"] = val

                if extract_duals or extract_slacks:
                    xpress_cons = list(
                        self._solver_con_to_pyomo_con_map.keys())
                    for con in xpress_cons:
                        soln_constraints[con.name] = {}

                if extract_duals:
                    vals = xprob.getDual(xpress_cons)
                    for val, con in zip(vals, xpress_cons):
                        soln_constraints[con.name]["Dual"] = val

                if extract_slacks:
                    vals = xprob.getSlack(xpress_cons)
                    for con, val in zip(xpress_cons, vals):
                        if con in self._range_constraints:
                            ## for xpress, the slack on a range constraint
                            ## is based on the upper bound
                            lb = con.lb
                            ub = con.ub
                            ub_s = val
                            expr_val = ub - ub_s
                            lb_s = lb - expr_val
                            if abs(ub_s) > abs(lb_s):
                                soln_constraints[con.name]["Slack"] = ub_s
                            else:
                                soln_constraints[con.name]["Slack"] = lb_s
                        else:
                            soln_constraints[con.name]["Slack"] = val

        elif self._load_solutions:
            if xprob_attrs.lpstatus == xp.lp_optimal and \
                    ((not is_mip) or (xprob_attrs.mipsols > 0)):

                self._load_vars()

                if extract_reduced_costs:
                    self._load_rc()

                if extract_duals:
                    self._load_duals()

                if extract_slacks:
                    self._load_slacks()

        self.results.solution.insert(soln)

        # finally, clean any temporary files registered with the temp file
        # manager, created populated *directly* by this plugin.
        TempfileManager.pop(remove=not self._keepfiles)
        return DirectOrPersistentSolver._postsolve(self)

    def warm_start_capable(self):
        return True

    def _warm_start(self):
        mipsolval = list()
        mipsolcol = list()
        for pyomo_var, xpress_var in self._pyomo_var_to_solver_var_map.items():
            if pyomo_var.value is not None:
                mipsolval.append(value(pyomo_var))
                mipsolcol.append(xpress_var)
        self._solver_model.addmipsol(mipsolval, mipsolcol)

    def _load_vars(self, vars_to_load=None):
        var_map = self._pyomo_var_to_solver_var_map
        ref_vars = self._referenced_variables
        if vars_to_load is None:
            vars_to_load = var_map.keys()

        xpress_vars_to_load = [
            var_map[pyomo_var] for pyomo_var in vars_to_load
        ]
        vals = self._solver_model.getSolution(xpress_vars_to_load)

        for var, val in zip(vars_to_load, vals):
            if ref_vars[var] > 0:
                var.stale = False
                var.value = val

    def _load_rc(self, vars_to_load=None):
        if not hasattr(self._pyomo_model, 'rc'):
            self._pyomo_model.rc = Suffix(direction=Suffix.IMPORT)
        var_map = self._pyomo_var_to_solver_var_map
        ref_vars = self._referenced_variables
        rc = self._pyomo_model.rc
        if vars_to_load is None:
            vars_to_load = var_map.keys()

        xpress_vars_to_load = [
            var_map[pyomo_var] for pyomo_var in vars_to_load
        ]
        vals = self._solver_model.getRCost(xpress_vars_to_load)

        for var, val in zip(vars_to_load, vals):
            if ref_vars[var] > 0:
                rc[var] = val

    def _load_duals(self, cons_to_load=None):
        if not hasattr(self._pyomo_model, 'dual'):
            self._pyomo_model.dual = Suffix(direction=Suffix.IMPORT)
        con_map = self._pyomo_con_to_solver_con_map
        dual = self._pyomo_model.dual

        if cons_to_load is None:
            cons_to_load = con_map.keys()

        xpress_cons_to_load = [
            con_map[pyomo_con] for pyomo_con in cons_to_load
        ]
        vals = self._solver_model.getDual(xpress_cons_to_load)

        for pyomo_con, val in zip(cons_to_load, vals):
            dual[pyomo_con] = val

    def _load_slacks(self, cons_to_load=None):
        if not hasattr(self._pyomo_model, 'slack'):
            self._pyomo_model.slack = Suffix(direction=Suffix.IMPORT)
        con_map = self._pyomo_con_to_solver_con_map
        slack = self._pyomo_model.slack

        if cons_to_load is None:
            cons_to_load = con_map.keys()

        xpress_cons_to_load = [
            con_map[pyomo_con] for pyomo_con in cons_to_load
        ]
        vals = self._solver_model.getSlack(xpress_cons_to_load)

        for pyomo_con, xpress_con, val in zip(cons_to_load,
                                              xpress_cons_to_load, vals):
            if xpress_con in self._range_constraints:
                ## for xpress, the slack on a range constraint
                ## is based on the upper bound
                lb = con.lb
                ub = con.ub
                ub_s = val
                expr_val = ub - ub_s
                lb_s = lb - expr_val
                if abs(ub_s) > abs(lb_s):
                    slack[pyomo_con] = ub_s
                else:
                    slack[pyomo_con] = lb_s
            else:
                slack[pyomo_con] = val

    def load_duals(self, cons_to_load=None):
        """
        Load the duals into the 'dual' suffix. The 'dual' suffix must live on the parent model.

        Parameters
        ----------
        cons_to_load: list of Constraint
        """
        self._load_duals(cons_to_load)

    def load_rc(self, vars_to_load=None):
        """
        Load the reduced costs into the 'rc' suffix. The 'rc' suffix must live on the parent model.

        Parameters
        ----------
        vars_to_load: list of Var
        """
        self._load_rc(vars_to_load)

    def load_slacks(self, cons_to_load=None):
        """
        Load the values of the slack variables into the 'slack' suffix. The 'slack' suffix must live on the parent
        model.

        Parameters
        ----------
        cons_to_load: list of Constraint
        """
        self._load_slacks(cons_to_load)
예제 #3
0
 def test_keys(self):
     cmap = ComponentMap(self._components)
     self.assertEqual(
         sorted(cmap.keys(), key=id),
         sorted(list(c for c, val in self._components), key=id))
예제 #4
0
 def test_keys(self):
     cmap = ComponentMap(self._components)
     self.assertEqual(sorted(cmap.keys(), key=id),
                      sorted(list(c for c,val in self._components),
                             key=id))
예제 #5
0
    def categorize_variables(model, initial_inputs):
        """Creates lists of time-only-slices of the different types of variables
        in a model, given knowledge of which are inputs. These lists are added 
        as attributes to the model's namespace.

        Possible variable categories are:

            - INPUT --- Those specified by the user to be inputs
            - DERIVATIVE --- Those declared as Pyomo DerivativeVars, whose 
                             "state variable" is not fixed, except possibly as an
                             initial condition
            - DIFFERENTIAL --- Those referenced as the "state variable" by an
                               unfixed (except possibly as an initial condition)
                               DerivativeVar
            - FIXED --- Those that are fixed at non-initial time points. These
                        are typically disturbances, design variables, or 
                        uncertain parameters.
            - ALGEBRAIC --- Unfixed, time-indexed variables that are neither
                            inputs nor referenced by an unfixed derivative.
            - SCALAR --- Variables unindexed by time. These could be variables
                         that refer to a specific point in time (initial or
                         final conditions), averages over time, or truly 
                         time-independent variables like diameter.

        Args:
            model : Model whose variables will be flattened and categorized
            initial_inputs : List of VarData objects that are input variables
                             at the initial time point

        """
        namespace = getattr(model,
                DynamicBase.get_namespace_name())
        time = namespace.get_time()
        t0 = time.first()
        t1 = time.get_finite_elements()[1]
        deriv_vars = []
        diff_vars = []
        input_vars = []
        alg_vars = []
        fixed_vars = []

        ic_vars = []

        # Create list of time-only-slices of time indexed variables
        # (And list of VarData objects for scalar variables)
        scalar_vars, dae_vars = flatten_dae_variables(model, time)

        dae_map = ComponentMap([(v[t0], v) for v in dae_vars])
        t0_vardata = list(dae_map.keys())
        namespace.dae_vars = list(dae_map.values())
        namespace.scalar_vars = \
            NMPCVarGroup(
                list(ComponentMap([(v, v) for v in scalar_vars]).values()),
                index_set=None, is_scalar=True)
        namespace.n_scalar_vars = \
                namespace.scalar_vars.n_vars
        input_set = ComponentSet(initial_inputs)
        updated_input_set = ComponentSet(initial_inputs)

        # Iterate over initial vardata, popping from dae map when an input,
        # derivative, or differential var is found.
        for var0 in t0_vardata:
            if var0 in updated_input_set:
                input_set.remove(var0)
                time_slice = dae_map.pop(var0)
                input_vars.append(time_slice)

            parent = var0.parent_component()
            if not isinstance(parent, DerivativeVar):
                continue
            if not time in ComponentSet(parent.get_continuousset_list()):
                continue
            index0 = var0.index()
            var1 = dae_map[var0][t1]
            index1 = var1.index()
            state = parent.get_state_var()

            if state[index1].fixed:
                # Assume state var is fixed everywhere, so derivative
                # 'isn't really' a derivative.
                # Should be safe to remove state from dae_map here
                state_slice = dae_map.pop(state[index0])
                fixed_vars.append(state_slice)
                continue
            if state[index0] in input_set:
                # If differential variable is an input, then this DerivativeVar
                # is 'not really a derivative'
                continue

            deriv_slice = dae_map.pop(var0)

            if var1.fixed:
                # Assume derivative has been fixed everywhere.
                # Add to list of fixed variables, and don't remove its state variable.
                fixed_vars.append(deriv_slice)
            elif var0.fixed:
                # In this case the derivative has been used as an initial condition. 
                # Still want to include it in the list of derivatives.
                ic_vars.append(deriv_slice)
                state_slice = dae_map.pop(state[index0])
                if state[index0].fixed:
                    ic_vars.append(state_slice)
                deriv_vars.append(deriv_slice)
                diff_vars.append(state_slice)
            else:
                # Neither is fixed. This should be the most common case.
                state_slice = dae_map.pop(state[index0])
                if state[index0].fixed:
                    ic_vars.append(state_slice)
                deriv_vars.append(deriv_slice)
                diff_vars.append(state_slice)

        if not updated_input_set:
            raise RuntimeError('Not all inputs could be found')
        assert len(deriv_vars) == len(diff_vars)

        for var0, time_slice in dae_map.items():
            var1 = time_slice[t1]
            # If the variable is still in the list of time-indexed vars,
            # it must either be fixed (not a var) or be an algebraic var
            if var1.fixed:
                fixed_vars.append(time_slice)
            else:
                if var0.fixed:
                    ic_vars.append(time_slice)
                alg_vars.append(time_slice)

        namespace.deriv_vars = NMPCVarGroup(deriv_vars, time)
        namespace.diff_vars = NMPCVarGroup(diff_vars, time)
        namespace.n_diff_vars = len(diff_vars)
        namespace.n_deriv_vars = len(deriv_vars)
        assert (namespace.n_diff_vars == 
                namespace.n_deriv_vars)
                
        # ic_vars will not be stored as a NMPCVarGroup - don't want to store
        # all the info twice
        namespace.ic_vars = ic_vars
        namespace.n_ic_vars = len(ic_vars)
        #assert model.n_dv == len(ic_vars)
        # Would like this to be true, but accurately detecting differential
        # variables that are not implicitly fixed (by fixing some input)
        # is difficult
        # Also, a categorization can have no input vars and still be
        # valid for MHE

        namespace.input_vars = NMPCVarGroup(input_vars, time)
        namespace.n_input_vars = len(input_vars)

        namespace.alg_vars = NMPCVarGroup(alg_vars, time)
        namespace.n_alg_vars = len(alg_vars)

        namespace.fixed_vars = NMPCVarGroup(fixed_vars, time)
        namespace.n_fixed_vars = len(fixed_vars)

        namespace.variables_categorized = True