Exemplo n.º 1
0
def _generate_filtered_docstring():
    cfg = PyROS.CONFIG()
    del cfg['first_stage_variables']
    del cfg['second_stage_variables']
    del cfg['uncertain_params']
    del cfg['uncertainty_set']
    del cfg['local_solver']
    del cfg['global_solver']
    return add_docstring_list(PyROS.solve.__doc__, cfg, indent_by=8)
Exemplo n.º 2
0
class FixedVarDetector(IsomorphicTransformation):
    """Detects variables that are de-facto fixed but not considered fixed.

    For each variable :math:`v` found on the model, check to see if its lower
    bound :math:`v^{LB}` is within some tolerance of its upper bound
    :math:`v^{UB}`. If so, fix the variable to the value of :math:`v^{LB}`.

    Keyword arguments below are specified for the ``apply_to`` and
    ``create_using`` functions.

    """

    CONFIG = ConfigBlock("FixedVarDetector")
    CONFIG.declare(
        "tmp",
        ConfigValue(
            default=False,
            domain=bool,
            description="True to store the set of transformed variables and "
            "their old values so that they can be restored."))
    CONFIG.declare(
        "tolerance",
        ConfigValue(default=1E-13,
                    domain=NonNegativeFloat,
                    description="tolerance on bound equality (LB == UB)"))

    __doc__ = add_docstring_list(__doc__, CONFIG)

    alias('contrib.detect_fixed_vars',
          doc=textwrap.fill(textwrap.dedent(__doc__.strip())))

    def _apply_to(self, instance, **kwargs):
        config = self.CONFIG(kwargs)

        if config.tmp:
            instance._xfrm_detect_fixed_vars_old_values = ComponentMap()

        for var in instance.component_data_objects(ctype=Var,
                                                   descend_into=True):
            if var.fixed or var.lb is None or var.ub is None:
                # if the variable is already fixed, or if it is missing a
                # bound, we skip it.
                continue
            if fabs(value(var.lb) - value(var.ub)) <= config.tolerance:
                if config.tmp:
                    instance._xfrm_detect_fixed_vars_old_values[var] = \
                        var.value
                var.fix(var.lb)

    def revert(self, instance):
        """Revert variables fixed by the transformation."""
        for var, var_value in iteritems(
                instance._xfrm_detect_fixed_vars_old_values):
            var.unfix()
            var.set_value(var_value)

        del instance._xfrm_detect_fixed_vars_old_values
Exemplo n.º 3
0
    def _generate_solve_docstring(cls):
        """
        Generate the docstring for cls.solve, including the description
        of the keyword arguments defined by a pyomo configuration object.

        Parameters
        ----------
        cls:
            The subclass of SolverAPI whose docstring is being generated
        """
        cls.solve.__doc__ = SolverAPI.__solve_doc__.format(
            add_docstring_list("", cls.config, 8))
Exemplo n.º 4
0
class InducedLinearity(IsomorphicTransformation):
    """Reformulate nonlinear constraints with induced linearity.

    Finds continuous variables :math:`v` where :math:`v = d_1 + d_2 + d_3`,
    where :math:`d`'s are discrete variables. These continuous variables may
    participate nonlinearly in other expressions, which may then be induced to
    be linear.

    The overall algorithm flow can be summarized as:

    1. Detect effectively discrete variables and the constraints that
       imply discreteness.
    2. Determine the set of valid values for each effectively discrete variable
    3. Find nonlinear expressions in which effectively discrete variables
       participate.
    4. Reformulate nonlinear expressions appropriately.

    .. note:: Tasks 1 & 2 must incorporate scoping considerations (Disjuncts)

    Keyword arguments below are specified for the ``apply_to`` and
    ``create_using`` functions.

    """

    CONFIG = ConfigBlock("contrib.induced_linearity")
    CONFIG.declare(
        'equality_tolerance',
        ConfigValue(default=1E-6,
                    domain=NonNegativeFloat,
                    description="Tolerance on equality constraints."))
    CONFIG.declare(
        'pruning_solver',
        ConfigValue(default='glpk',
                    description="Solver to use when pruning possible values."))

    __doc__ = add_docstring_list(__doc__, CONFIG)

    alias('contrib.induced_linearity',
          doc=textwrap.fill(textwrap.dedent(__doc__.strip())))

    def _apply_to(self, model, **kwds):
        """Apply the transformation to the given model."""
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        _process_container(model, config)
        _process_subcontainers(model, config)
Exemplo n.º 5
0
class GDPoptSolver(object):
    """Decomposition solver for Generalized Disjunctive Programming (GDP) problems.

    The GDPopt (Generalized Disjunctive Programming optimizer) solver applies a
    variety of decomposition-based approaches to solve Generalized Disjunctive
    Programming (GDP) problems. GDP models can include nonlinear, continuous
    variables and constraints, as well as logical conditions.

    These approaches include:

    - Logic-based outer approximation (LOA)
    - Logic-based branch-and-bound (LBB)
    - Partial surrogate cuts [pending]
    - Generalized Bender decomposition [pending]

    This solver implementation was developed by Carnegie Mellon University in the
    research group of Ignacio Grossmann.

    For nonconvex problems, LOA may not report rigorous lower/upper bounds.

    Questions: Please make a post at StackOverflow and/or contact Qi Chen
    <https://github.com/qtothec>.

    Several key GDPopt components were prototyped by BS and MS students:

    - Logic-based branch and bound: Sunjeev Kale
    - MC++ interface: Johnny Bates
    - LOA set-covering initialization: Eloy Fernandez

    """

    # Declare configuration options for the GDPopt solver
    CONFIG = _get_GDPopt_config()

    def solve(self, model, **kwds):
        """Solve the model.

        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.

        This function performs all of the GDPopt solver setup and problem
        validation. It then calls upon helper functions to construct the
        initial master approximation and iteration loop.

        Args:
            model (Block): a Pyomo model or block to be solved

        """
        config = self.CONFIG(kwds.pop('options', {}), preserve_implicit=True)
        config.set_value(kwds)

        with setup_solver_environment(model, config) as solve_data:
            self._log_solver_intro_message(config)
            solve_data.results.solver.name = 'GDPopt %s - %s' % (str(
                self.version()), config.strategy)

            # Verify that objective has correct form
            process_objective(solve_data, config)

            # Presolve LP or NLP problems using subsolvers
            presolved, presolve_results = presolve_lp_nlp(solve_data, config)
            if presolved:
                # TODO merge the solver results
                return presolve_results  # problem presolved

            if solve_data.active_strategy in {'LOA', 'GLOA'}:
                # Initialize the master problem
                with time_code(solve_data.timing, 'initialization'):
                    GDPopt_initialize_master(solve_data, config)

                # Algorithm main loop
                with time_code(solve_data.timing, 'main loop'):
                    GDPopt_iteration_loop(solve_data, config)
            elif solve_data.active_strategy == 'LBB':
                _perform_branch_and_bound(solve_data)

        return solve_data.results

    """Support use as a context manager under current solver API"""

    def __enter__(self):
        return self

    def __exit__(self, t, v, traceback):
        pass

    def available(self, exception_flag=True):
        """Check if solver is available.

        TODO: For now, it is always available. However, sub-solvers may not
        always be available, and so this should reflect that possibility.

        """
        return True

    def version(self):
        """Return a 3-tuple describing the solver version."""
        return __version__

    def _log_solver_intro_message(self, config):
        config.logger.info(
            "Starting GDPopt version %s using %s algorithm" %
            (".".join(map(str, self.version())), config.strategy))
        mip_args_output = StringIO()
        nlp_args_output = StringIO()
        minlp_args_output = StringIO()
        lminlp_args_output = StringIO()
        config.mip_solver_args.display(ostream=mip_args_output)
        config.nlp_solver_args.display(ostream=nlp_args_output)
        config.minlp_solver_args.display(ostream=minlp_args_output)
        config.local_minlp_solver_args.display(ostream=lminlp_args_output)
        mip_args_text = indent(mip_args_output.getvalue().rstrip(),
                               prefix=" " * 2 + " - ")
        nlp_args_text = indent(nlp_args_output.getvalue().rstrip(),
                               prefix=" " * 2 + " - ")
        minlp_args_text = indent(minlp_args_output.getvalue().rstrip(),
                                 prefix=" " * 2 + " - ")
        lminlp_args_text = indent(lminlp_args_output.getvalue().rstrip(),
                                  prefix=" " * 2 + " - ")
        mip_args_text = "" if len(
            mip_args_text.strip()) == 0 else "\n" + mip_args_text
        nlp_args_text = "" if len(
            nlp_args_text.strip()) == 0 else "\n" + nlp_args_text
        minlp_args_text = "" if len(
            minlp_args_text.strip()) == 0 else "\n" + minlp_args_text
        lminlp_args_text = "" if len(
            lminlp_args_text.strip()) == 0 else "\n" + lminlp_args_text
        config.logger.info("""
Subsolvers:
- MILP: {milp}{milp_args}
- NLP: {nlp}{nlp_args}
- MINLP: {minlp}{minlp_args}
- local MINLP: {lminlp}{lminlp_args}
            """.format(
            milp=config.mip_solver,
            milp_args=mip_args_text,
            nlp=config.nlp_solver,
            nlp_args=nlp_args_text,
            minlp=config.minlp_solver,
            minlp_args=minlp_args_text,
            lminlp=config.local_minlp_solver,
            lminlp_args=lminlp_args_text,
        ).strip())
        to_cite_text = """
If you use this software, you may cite the following:
- Implementation:
Chen, Q; Johnson, ES; Siirola, JD; Grossmann, IE.
Pyomo.GDP: Disjunctive Models in Python. 
Proc. of the 13th Intl. Symposium on Process Systems Eng.
San Diego, 2018.
        """.strip()
        if config.strategy == "LOA":
            to_cite_text += "\n"
            to_cite_text += """
- LOA algorithm:
Türkay, M; Grossmann, IE.
Logic-based MINLP algorithms for the optimal synthesis of process networks.
Comp. and Chem. Eng. 1996, 20(8), 959–978.
DOI: 10.1016/0098-1354(95)00219-7.
            """.strip()
        elif config.strategy == "GLOA":
            to_cite_text += "\n"
            to_cite_text += """
- GLOA algorithm:
Lee, S; Grossmann, IE.
A Global Optimization Algorithm for Nonconvex Generalized Disjunctive Programming and Applications to Process Systems.
Comp. and Chem. Eng. 2001, 25, 1675-1697.
DOI: 10.1016/S0098-1354(01)00732-3.
            """.strip()
        elif config.strategy == "LBB":
            to_cite_text += "\n"
            to_cite_text += """
- LBB algorithm:
Lee, S; Grossmann, IE.
New algorithms for nonlinear generalized disjunctive programming.
Comp. and Chem. Eng. 2000, 24, 2125-2141.
DOI: 10.1016/S0098-1354(00)00581-0.
            """.strip()
        config.logger.info(to_cite_text)

    _metasolver = False

    if six.PY2:
        __doc__ = """
    Keyword arguments below are specified for the :code:`solve` function.
        
    """ + add_docstring_list(__doc__, CONFIG)
Exemplo n.º 6
0
Lee, S; Grossmann, IE.
A Global Optimization Algorithm for Nonconvex Generalized Disjunctive Programming and Applications to Process Systems.
Comp. and Chem. Eng. 2001, 25, 1675-1697.
DOI: 10.1016/S0098-1354(01)00732-3.
            """.strip()
        elif config.strategy == "LBB":
            to_cite_text += "\n"
            to_cite_text += """
- LBB algorithm:
Lee, S; Grossmann, IE.
New algorithms for nonlinear generalized disjunctive programming.
Comp. and Chem. Eng. 2000, 24, 2125-2141.
DOI: 10.1016/S0098-1354(00)00581-0.
            """.strip()
        config.logger.info(to_cite_text)

    _metasolver = False

    if six.PY2:
        __doc__ = """
    Keyword arguments below are specified for the :code:`solve` function.
        
    """ + add_docstring_list(__doc__, CONFIG)


if six.PY3:
    # Add the CONFIG arguments to the solve method docstring
    GDPoptSolver.solve.__doc__ = add_docstring_list(GDPoptSolver.solve.__doc__,
                                                    GDPoptSolver.CONFIG,
                                                    indent_by=8)
Exemplo n.º 7
0
class ConstraintToVarBoundTransform(IsomorphicTransformation):
    """Change constraints to be a bound on the variable.

    Looks for constraints of form: :math:`k*v + c_1 \\leq c_2`. Changes
    variable lower bound on :math:`v` to match :math:`(c_2 - c_1)/k` if it
    results in a tighter bound. Also does the same thing for lower bounds.

    Keyword arguments below are specified for the ``apply_to`` and
    ``create_using`` functions.

    """

    CONFIG = ConfigBlock("ConstraintToVarBounds")
    CONFIG.declare(
        "tolerance",
        ConfigValue(
            default=1E-13,
            domain=NonNegativeFloat,
            description="tolerance on bound equality (:math:`LB = UB`)"))
    CONFIG.declare(
        "detect_fixed",
        ConfigValue(default=True,
                    domain=bool,
                    description="If True, fix variable when "
                    ":math:`| LB - UB | \\leq tolerance`."))

    __doc__ = add_docstring_list(__doc__, CONFIG)

    def _apply_to(self, model, **kwds):
        config = self.CONFIG(kwds)

        for constr in model.component_data_objects(ctype=Constraint,
                                                   active=True,
                                                   descend_into=True):
            # Check if the constraint is k * x + c1 <= c2 or c2 <= k * x + c1
            repn = generate_standard_repn(constr.body)
            if not repn.is_linear() or len(repn.linear_vars) != 1:
                # Skip nonlinear constraints, trivial constraints, and those
                # that involve more than one variable.
                continue
            else:
                var = repn.linear_vars[0]
                const = repn.constant
                coef = float(repn.linear_coefs[0])

            if coef == 0:
                # Skip trivial constraints
                continue
            elif coef > 0:
                if constr.has_ub():
                    new_ub = (value(constr.upper) - const) / coef
                    var_ub = float('inf') if var.ub is None else var.ub
                    var.setub(min(var_ub, new_ub))
                if constr.has_lb():
                    new_lb = (value(constr.lower) - const) / coef
                    var_lb = float('-inf') if var.lb is None else var.lb
                    var.setlb(max(var_lb, new_lb))
            elif coef < 0:
                if constr.has_ub():
                    new_lb = (value(constr.upper) - const) / coef
                    var_lb = float('-inf') if var.lb is None else var.lb
                    var.setlb(max(var_lb, new_lb))
                if constr.has_lb():
                    new_ub = (value(constr.lower) - const) / coef
                    var_ub = float('inf') if var.ub is None else var.ub
                    var.setub(min(var_ub, new_ub))

            if var.is_integer() or var.is_binary():
                # Make sure that the lb and ub are integral. Use safe construction if near to integer.
                if var.has_lb():
                    var.setlb(
                        int(
                            min(math.ceil(var.lb - config.tolerance),
                                math.ceil(var.lb))))
                if var.has_ub():
                    var.setub(
                        int(
                            max(math.floor(var.ub + config.tolerance),
                                math.floor(var.ub))))

            if var is not None and var.value is not None:
                _adjust_var_value_if_not_feasible(var)

            if (config.detect_fixed and var.has_lb() and var.has_ub() and
                    fabs(value(var.lb) - value(var.ub)) <= config.tolerance):
                var.fix(var.lb)

            constr.deactivate()
class TrivialConstraintDeactivator(IsomorphicTransformation):
    """Deactivates trivial constraints.

    Trivial constraints take form :math:`k_1 = k_2` or :math:`k_1 \\leq k_2`,
    where :math:`k_1` and :math:`k_2` are constants. These constraints
    typically arise when variables are fixed.

    Keyword arguments below are specified for the ``apply_to`` and
    ``create_using`` functions.

    """

    CONFIG = ConfigBlock("TrivialConstraintDeactivator")
    CONFIG.declare(
        "tmp",
        ConfigValue(default=False,
                    domain=bool,
                    description=
                    "True to store a set of transformed constraints for future"
                    " reversion of the transformation."))
    CONFIG.declare(
        "ignore_infeasible",
        ConfigValue(
            default=False,
            domain=bool,
            description="True to skip over trivial constraints that are "
            "infeasible instead of raising a ValueError."))
    CONFIG.declare(
        "return_trivial",
        ConfigValue(default=[],
                    description="a list to which the deactivated trivial"
                    "constraints are appended (side effect)"))
    CONFIG.declare(
        "tolerance",
        ConfigValue(default=1E-13,
                    domain=NonNegativeFloat,
                    description="tolerance on constraint violations"))

    __doc__ = add_docstring_list(__doc__, CONFIG)

    def _apply_to(self, instance, **kwargs):
        config = self.CONFIG(kwargs)
        if config.tmp and not hasattr(instance,
                                      '_tmp_trivial_deactivated_constrs'):
            instance._tmp_trivial_deactivated_constrs = ComponentSet()
        elif config.tmp:
            logger.warning(
                'Deactivating trivial constraints on the block {} for which '
                'trivial constraints were previously deactivated. '
                'Reversion will affect all deactivated constraints.'.format(
                    instance.name))

        # Trivial constraints are those that do not contain any variables, ie.
        # the polynomial degree is 0
        trivial_constraints = (
            constr for constr in instance.component_data_objects(
                ctype=Constraint, active=True, descend_into=True)
            if constr.body.polynomial_degree() == 0)

        for constr in trivial_constraints:
            # We need to check each constraint to sure that it is not violated.
            constr_lb = value(
                constr.lower) if constr.has_lb() else float('-inf')
            constr_ub = value(
                constr.upper) if constr.has_ub() else float('inf')
            constr_value = value(constr.body)

            # Check if the lower bound is violated outside a given tolerance
            if (constr_value + config.tolerance <= constr_lb):
                if config.ignore_infeasible:
                    continue
                else:
                    raise ValueError(
                        'Trivial constraint {} violates LB {} ≤ BODY {}.'.
                        format(constr.name, constr_lb, constr_value))

            # Check if the upper bound is violated outside a given tolerance
            if (constr_value >= constr_ub + config.tolerance):
                if config.ignore_infeasible:
                    continue
                else:
                    raise ValueError(
                        'Trivial constraint {} violates BODY {} ≤ UB {}.'.
                        format(constr.name, constr_value, constr_ub))

            # Constraint is not infeasible. Deactivate it.
            if config.tmp:
                instance._tmp_trivial_deactivated_constrs.add(constr)
            config.return_trivial.append(constr)
            constr.deactivate()

    def revert(self, instance):
        """Revert constraints deactivated by the transformation.

        Args:
            instance: the model instance on which trivial constraints were
                earlier deactivated.
        """
        for constr in instance._tmp_trivial_deactivated_constrs:
            constr.activate()
        del instance._tmp_trivial_deactivated_constrs
Exemplo n.º 9
0
class VariableBoundStripper(NonIsomorphicTransformation):
    """Strips bounds from variables.

    Keyword arguments below are specified for the ``apply_to`` and
    ``create_using`` functions.

    """

    CONFIG = ConfigBlock()
    CONFIG.declare(
        "strip_domains",
        ConfigValue(
            default=True,
            domain=bool,
            description="strip the domain for discrete variables as well"))
    CONFIG.declare(
        "reversible",
        ConfigValue(
            default=False,
            domain=bool,
            description="Whether the bound stripping will be temporary. "
            "If so, store information for reversion."))

    __doc__ = add_docstring_list(__doc__, CONFIG)

    alias('contrib.strip_var_bounds',
          doc=textwrap.fill(textwrap.dedent(__doc__.strip())))

    def _apply_to(self, instance, **kwds):
        config = self.CONFIG(kwds)
        if config.reversible:
            if any(
                    hasattr(instance, map_name) for map_name in [
                        '_tmp_var_bound_strip_lb', '_tmp_var_bound_strip_ub',
                        '_tmp_var_bound_strip_domain'
                    ]):
                raise RuntimeError(
                    'Variable stripping reversion component maps already '
                    'exist. Did you already apply a temporary transformation '
                    'without a subsequent reversion?')
            # Component maps to store data for reversion.
            instance._tmp_var_bound_strip_lb = ComponentMap()
            instance._tmp_var_bound_strip_ub = ComponentMap()
            instance._tmp_var_bound_strip_domain = ComponentMap()
        for var in instance.component_data_objects(ctype=Var):
            if config.strip_domains and not var.domain == Reals:
                if config.reversible:
                    instance._tmp_var_bound_strip_domain[var] = var.domain
                var.domain = Reals
            if var.has_lb():
                if config.reversible:
                    instance._tmp_var_bound_strip_lb[var] = var.lb
                var.setlb(None)
            if var.has_ub():
                if config.reversible:
                    instance._tmp_var_bound_strip_ub[var] = var.ub
                var.setub(None)

    def revert(self, instance):
        """Revert variable bounds and domains changed by the transformation."""
        for var in instance.component_data_objects(ctype=Var,
                                                   descend_into=True):
            if var in instance._tmp_var_bound_strip_lb:
                var.setlb(instance._tmp_var_bound_strip_lb[var])
            if var in instance._tmp_var_bound_strip_ub:
                var.setub(instance._tmp_var_bound_strip_ub[var])
            if var in instance._tmp_var_bound_strip_domain:
                var.domain = instance._tmp_var_bound_strip_domain[var]
        del instance._tmp_var_bound_strip_lb
        del instance._tmp_var_bound_strip_ub
        del instance._tmp_var_bound_strip_domain
Exemplo n.º 10
0
class ReplaceVariables(NonIsomorphicTransformation):
    """Replace variables in a model or block with other variables.

    Keyword arguments below are specified for the ``apply_to(instance, **kwargs)``
    method.

    """
    CONFIG = ConfigBlock()
    CONFIG.declare(
        "substitute",
        ConfigValue(default=[],
                    description=
                    "List-like of tuples where the first item in a tuple is a "
                    "Pyomo variable to be replaced and the second item in the "
                    "tuple is a Pyomo variable to replace it with. This "
                    "transformation is not reversible."))

    __doc__ = add_docstring_list(__doc__, CONFIG)

    @staticmethod
    def replace(instance, substitute):
        # Create the replacement dict. Do some argument validation and indexed
        # var handling
        d = {}
        for r in substitute:
            if not (_is_var(r[0]) and _is_var(r[1])):
                raise TypeError(
                    "Replace only allows variables to be replaced, {} is type {}"
                    " and {} is type {}".format(r[0], type(r[0]), r[1],
                                                type(r[1])))
            if r[0].is_indexed() != r[1].is_indexed():
                raise TypeError(
                    "IndexedVars must be replaced by IndexedVars, {} is type {}"
                    " and {} is type {}".format(r[0], type(r[0]), r[1],
                                                type(r[1])))
            if r[0].is_indexed() and r[1].is_indexed():
                if not r[0].index_set().issubset(r[1].index_set()):
                    raise ValueError("The index set of {} must be a subset of"
                                     " {}.".format(r[0], r[1]))
                for i in r[0]:
                    d[id(r[0][i])] = r[1][i]
            else:
                #scalar replace
                d[id(r[0])] = r[1]

        # Replacement Visitor
        vis = EXPR.ExpressionReplacementVisitor(
            substitute=d,
            descend_into_named_expressions=True,
            remove_named_expressions=False,
        )

        # Do replacements in Expressions, Constraints, and Objectives
        for c in instance.component_data_objects(
            (Constraint, Expression, Objective),
                descend_into=True,
                active=True):
            c.set_value(expr=vis.dfs_postorder_stack(c.expr))

    def _apply_to(self, instance, **kwds):
        """
        Apply the transformation.  This is called by ``apply_to`` in the
        superclass, and should not be called directly.  ``apply_to`` takes the
        same arguments.

        Args:
            instance: A block or model to apply the transformation to
            substitute: A list-like of two-element list-likes.  Each two element
                list-like specifies a replacment of the first variable by the
                second.  SimpleVar, IndexedVar, _GeneralVarData, and Reference are
                all accepted types.

        Returns:
            None
        """
        config = self.CONFIG(kwds)
        self.replace(instance, config.substitute)
Exemplo n.º 11
0
class GDPoptSolver(object):
    """Decomposition solver for Generalized Disjunctive Programming (GDP) problems.

    The GDPopt (Generalized Disjunctive Programming optimizer) solver applies a
    variety of decomposition-based approaches to solve Generalized Disjunctive
    Programming (GDP) problems. GDP models can include nonlinear, continuous
    variables and constraints, as well as logical conditions.

    These approaches include:

    - Outer approximation
    - Partial surrogate cuts [pending]
    - Generalized Bender decomposition [pending]

    This solver implementation was developed by Carnegie Mellon University in the
    research group of Ignacio Grossmann.

    For nonconvex problems, the bounds self.LB and self.UB may not be rigorous.

    Questions: Please make a post at StackOverflow and/or contact Qi Chen
    <https://github.com/qtothec>.

    Keyword arguments below are specified for the :code:`solve` function.

    """

    _metasolver = False

    CONFIG = ConfigBlock("GDPopt")
    CONFIG.declare("iterlim", ConfigValue(
        default=30, domain=NonNegativeInt,
        description="Iteration limit."
    ))
    CONFIG.declare("strategy", ConfigValue(
        default="LOA", domain=In(["LOA", "GLOA"]),
        description="Decomposition strategy to use."
    ))
    CONFIG.declare("init_strategy", ConfigValue(
        default="set_covering", domain=In(valid_init_strategies.keys()),
        description="Initialization strategy to use.",
        doc="""Selects the initialization strategy to use when generating
        the initial cuts to construct the master problem."""
    ))
    CONFIG.declare("custom_init_disjuncts", ConfigList(
        # domain=ComponentSets of Disjuncts,
        default=None,
        description="List of disjunct sets to use for initialization."
    ))
    CONFIG.declare("max_slack", ConfigValue(
        default=1000, domain=NonNegativeFloat,
        description="Upper bound on slack variables for OA"
    ))
    CONFIG.declare("OA_penalty_factor", ConfigValue(
        default=1000, domain=NonNegativeFloat,
        description="Penalty multiplication term for slack variables on the "
        "objective value."
    ))
    CONFIG.declare("set_cover_iterlim", ConfigValue(
        default=8, domain=NonNegativeInt,
        description="Limit on the number of set covering iterations."
    ))
    CONFIG.declare("mip_solver", ConfigValue(
        default="gurobi",
        description="Mixed integer linear solver to use."
    ))
    CONFIG.declare("mip_presolve", ConfigValue(
        default=True,
        description="Flag to enable or diable Pyomo MIP presolve. Default=True.",
        domain=bool
    ))
    mip_solver_args = CONFIG.declare(
        "mip_solver_args", ConfigBlock(implicit=True))
    CONFIG.declare("nlp_solver", ConfigValue(
        default="ipopt",
        description="Nonlinear solver to use"))
    nlp_solver_args = CONFIG.declare(
        "nlp_solver_args", ConfigBlock(implicit=True))
    CONFIG.declare("subproblem_presolve", ConfigValue(
        default=True,
        description="Flag to enable or disable subproblem presolve. Default=True.",
        domain=bool
    ))
    CONFIG.declare("minlp_solver", ConfigValue(
        default="baron",
        description="MINLP solver to use"
    ))
    minlp_solver_args = CONFIG.declare(
        "minlp_solver_args", ConfigBlock(implicit=True))
    CONFIG.declare("call_before_master_solve", ConfigValue(
        default=_DoNothing,
        description="callback hook before calling the master problem solver"
    ))

    CONFIG.declare("call_after_master_solve", ConfigValue(
        default=_DoNothing,
        description="callback hook after a solution of the master problem"
    ))
    CONFIG.declare("call_before_subproblem_solve", ConfigValue(
        default=_DoNothing,
        description="callback hook before calling the subproblem solver"
    ))
    CONFIG.declare("call_after_subproblem_solve", ConfigValue(
        default=_DoNothing,
        description="callback hook after a solution of the "
        "nonlinear subproblem"
    ))
    CONFIG.declare("call_after_subproblem_feasible", ConfigValue(
        default=_DoNothing,
        description="callback hook after feasible solution of "
        "the nonlinear subproblem"
    ))
    CONFIG.declare("algorithm_stall_after", ConfigValue(
        default=2,
        description="number of non-improving master iterations after which "
        "the algorithm will stall and exit."
    ))
    CONFIG.declare("tee", ConfigValue(
        default=False,
        description="Stream output to terminal.",
        domain=bool
    ))
    CONFIG.declare("logger", ConfigValue(
        default='pyomo.contrib.gdpopt',
        description="The logger object or name to use for reporting.",
        domain=a_logger
    ))
    CONFIG.declare("calc_disjunctive_bounds", ConfigValue(
        default=False,
        description="Calculate special disjunctive variable bounds for GLOA. False by default.",
        domain=bool
    ))
    CONFIG.declare("obbt_disjunctive_bounds", ConfigValue(
        default=False,
        description="Use optimality-based bounds tightening rather than feasibility-based bounds tightening "
        "to compute disjunctive variable bounds. False by default.",
        domain=bool
    ))
    CONFIG.declare("bound_tolerance", ConfigValue(
        default=1E-6, domain=NonNegativeFloat,
        description="Tolerance for bound convergence."
    ))
    CONFIG.declare("small_dual_tolerance", ConfigValue(
        default=1E-8,
        description="When generating cuts, small duals multiplied "
        "by expressions can cause problems. Exclude all duals "
        "smaller in absolue value than the following."
    ))
    CONFIG.declare("integer_tolerance", ConfigValue(
        default=1E-5,
        description="Tolerance on integral values."
    ))
    CONFIG.declare("constraint_tolerance", ConfigValue(
        default=1E-6,
        description="Tolerance on constraint satisfaction."
    ))
    CONFIG.declare("variable_tolerance", ConfigValue(
        default=1E-8,
        description="Tolerance on variable bounds."
    ))
    CONFIG.declare("zero_tolerance", ConfigValue(
        default=1E-15,
        description="Tolerance on variable equal to zero."))
    CONFIG.declare("round_discrete_vars", ConfigValue(
        default=True,
        description="flag to round subproblem discrete variable values to the nearest integer. "
        "Rounding is done before fixing disjuncts."
    ))
    CONFIG.declare("force_subproblem_nlp", ConfigValue(
        default=False,
        description="Force subproblems to be NLP, even if discrete variables exist."
    ))

    __doc__ = add_docstring_list(__doc__, CONFIG)

    def available(self, exception_flag=True):
        """Check if solver is available.

        TODO: For now, it is always available. However, sub-solvers may not
        always be available, and so this should reflect that possibility.

        """
        return True

    def version(self):
        """Return a 3-tuple describing the solver version."""
        return __version__

    def solve(self, model, **kwds):
        """Solve the model.

        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.

        This function performs all of the GDPopt solver setup and problem
        validation. It then calls upon helper functions to construct the
        initial master approximation and iteration loop.

        Args:
            model (Block): a Pyomo model or block to be solved

        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        solve_data = GDPoptSolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Container()

        old_logger_level = config.logger.getEffectiveLevel()
        with time_code(solve_data.timing, 'total'), \
                restore_logger_level(config.logger), \
                create_utility_block(model, 'GDPopt_utils', solve_data):
            if config.tee and old_logger_level > logging.INFO:
                # If the logger does not already include INFO, include it.
                config.logger.setLevel(logging.INFO)
            config.logger.info(
                "Starting GDPopt version %s using %s algorithm"
                % (".".join(map(str, self.version())), config.strategy)
            )
            config.logger.info(
                """
If you use this software, you may cite the following:
- Implementation:
    Chen, Q; Johnson, ES; Siirola, JD; Grossmann, IE.
    Pyomo.GDP: Disjunctive Models in Python. 
    Proc. of the 13th Intl. Symposium on Process Systems Eng.
    San Diego, 2018.
- LOA algorithm:
    Türkay, M; Grossmann, IE.
    Logic-based MINLP algorithms for the optimal synthesis of process networks.
    Comp. and Chem. Eng. 1996, 20(8), 959–978.
    DOI: 10.1016/0098-1354(95)00219-7.
- GLOA algorithm:
    Lee, S; Grossmann, IE.
    A Global Optimization Algorithm for Nonconvex Generalized Disjunctive Programming and Applications to Process Systems
    Comp. and Chem. Eng. 2001, 25, 1675-1697.
    DOI: 10.1016/S0098-1354(01)00732-3
                """.strip()
            )
            solve_data.results.solver.name = 'GDPopt %s - %s' % (
                str(self.version()), config.strategy)

            solve_data.original_model = model
            solve_data.working_model = model.clone()
            GDPopt = solve_data.working_model.GDPopt_utils
            setup_results_object(solve_data, config)

            solve_data.current_strategy = config.strategy

            # Verify that objective has correct form
            process_objective(solve_data, config)

            # Save model initial values. These are used later to initialize NLP
            # subproblems.
            solve_data.initial_var_values = list(
                v.value for v in GDPopt.variable_list)
            solve_data.best_solution_found = None

            # Validate the model to ensure that GDPopt is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Integer cuts exclude particular discrete decisions
            GDPopt.integer_cuts = ConstraintList(doc='integer cuts')

            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default, unless the initial model has no
            # discrete decisions.

            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary GDPopt_integer_cuts ConstraintList.
            GDPopt.no_backtracking = ConstraintList(
                doc='explored integer cuts')

            # Set up iteration counters
            solve_data.master_iteration = 0
            solve_data.mip_iteration = 0
            solve_data.nlp_iteration = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.iteration_log = {}

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.feasible_solution_improved = False

            # Initialize the master problem
            with time_code(solve_data.timing, 'initialization'):
                GDPopt_initialize_master(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                GDPopt_iteration_loop(solve_data, config)

            if solve_data.best_solution_found is not None:
                # Update values in working model
                copy_var_list_values(
                    from_list=solve_data.best_solution_found.GDPopt_utils.variable_list,
                    to_list=GDPopt.variable_list,
                    config=config)
                # Update values in original model
                copy_var_list_values(
                    GDPopt.variable_list,
                    solve_data.original_model.GDPopt_utils.variable_list,
                    config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total

        solve_data.results.solver.iterations = solve_data.master_iteration

        return solve_data.results

    #
    # Support "with" statements.
    #
    def __enter__(self):
        return self

    def __exit__(self, t, v, traceback):
        pass
Exemplo n.º 12
0
            solve_data.results.solver.wallclock_time = solve_data.timing.total
            solve_data.results.solver.iterations = solve_data.mip_iter
            solve_data.results.solver.num_infeasible_nlp_subproblem = solve_data.nlp_infeasible_counter
            solve_data.results.solver.best_solution_found_time = solve_data.best_solution_found_time
            solve_data.results.solver.primal_integral = get_primal_integral(solve_data, config)
            solve_data.results.solver.dual_integral = get_dual_integral(solve_data, config)
            solve_data.results.solver.primal_dual_gap_integral = solve_data.results.solver.primal_integral + \
                solve_data.results.solver.dual_integral
            config.logger.info(' {:<25}:   {:>7.4f} '.format(
                'Primal-dual gap integral', solve_data.results.solver.primal_dual_gap_integral))

            if config.single_tree:
                solve_data.results.solver.num_nodes = solve_data.nlp_iter - \
                    (1 if config.init_strategy == 'rNLP' else 0)

        return solve_data.results

    #
    # Support 'with' statements.
    #
    def __enter__(self):
        return self

    def __exit__(self, t, v, traceback):
        pass


# Add the CONFIG arguments to the solve method docstring
MindtPySolver.solve.__doc__ = add_docstring_list(
    MindtPySolver.solve.__doc__, MindtPySolver.CONFIG, indent_by=8)
Exemplo n.º 13
0
class GDPoptSolver(object):
    """Decomposition solver for Generalized Disjunctive Programming (GDP) problems.

    The GDPopt (Generalized Disjunctive Programming optimizer) solver applies a
    variety of decomposition-based approaches to solve Generalized Disjunctive
    Programming (GDP) problems. GDP models can include nonlinear, continuous
    variables and constraints, as well as logical conditions.

    These approaches include:

    - Outer approximation
    - Partial surrogate cuts [pending]
    - Generalized Bender decomposition [pending]

    This solver implementation was developed by Carnegie Mellon University in the
    research group of Ignacio Grossmann.

    For nonconvex problems, the bounds self.LB and self.UB may not be rigorous.

    Questions: Please make a post at StackOverflow and/or contact Qi Chen
    <https://github.com/qtothec>.

    Keyword arguments below are specified for the :code:`solve` function.

    """

    _metasolver = False

    CONFIG = ConfigBlock("GDPopt")
    CONFIG.declare(
        "iterlim",
        ConfigValue(default=30,
                    domain=NonNegativeInt,
                    description="Iteration limit."))
    CONFIG.declare(
        "strategy",
        ConfigValue(default="LOA",
                    domain=In(["LOA", "GLOA"]),
                    description="Decomposition strategy to use."))
    CONFIG.declare(
        "init_strategy",
        ConfigValue(
            default="set_covering",
            domain=In(valid_init_strategies.keys()),
            description="Initialization strategy to use.",
            doc="""Selects the initialization strategy to use when generating
        the initial cuts to construct the master problem."""))
    CONFIG.declare(
        "custom_init_disjuncts",
        ConfigList(
            # domain=ComponentSets of Disjuncts,
            default=None,
            description="List of disjunct sets to use for initialization."))
    CONFIG.declare(
        "max_slack",
        ConfigValue(default=1000,
                    domain=NonNegativeFloat,
                    description="Upper bound on slack variables for OA"))
    CONFIG.declare(
        "OA_penalty_factor",
        ConfigValue(
            default=1000,
            domain=NonNegativeFloat,
            description="Penalty multiplication term for slack variables on the "
            "objective value."))
    CONFIG.declare(
        "set_cover_iterlim",
        ConfigValue(
            default=8,
            domain=NonNegativeInt,
            description="Limit on the number of set covering iterations."))
    CONFIG.declare(
        "mip_solver",
        ConfigValue(default="gurobi",
                    description="Mixed integer linear solver to use."))
    CONFIG.declare(
        "mip_presolve",
        ConfigValue(
            default="true",
            description=
            "Flag to enable or diable Pyomo MIP presolve. Default=True.",
            domain=bool))
    mip_solver_args = CONFIG.declare("mip_solver_args",
                                     ConfigBlock(implicit=True))
    CONFIG.declare(
        "nlp_solver",
        ConfigValue(default="ipopt", description="Nonlinear solver to use"))
    nlp_solver_args = CONFIG.declare("nlp_solver_args",
                                     ConfigBlock(implicit=True))
    CONFIG.declare(
        "nlp_presolve",
        ConfigValue(
            default=True,
            description="Flag to enable or disable NLP presolve. Default=True.",
            domain=bool))
    CONFIG.declare(
        "call_before_master_solve",
        ConfigValue(
            default=_DoNothing,
            description="callback hook before calling the master problem solver"
        ))
    CONFIG.declare(
        "call_after_master_solve",
        ConfigValue(
            default=_DoNothing,
            description="callback hook after a solution of the master problem")
    )
    CONFIG.declare(
        "call_before_subproblem_solve",
        ConfigValue(
            default=_DoNothing,
            description="callback hook before calling the subproblem solver"))
    CONFIG.declare(
        "call_after_subproblem_solve",
        ConfigValue(default=_DoNothing,
                    description="callback hook after a solution of the "
                    "nonlinear subproblem"))
    CONFIG.declare(
        "call_after_subproblem_feasible",
        ConfigValue(default=_DoNothing,
                    description="callback hook after feasible solution of "
                    "the nonlinear subproblem"))
    CONFIG.declare(
        "algorithm_stall_after",
        ConfigValue(
            default=2,
            description="number of non-improving master iterations after which "
            "the algorithm will stall and exit."))
    CONFIG.declare(
        "tee",
        ConfigValue(default=False,
                    description="Stream output to terminal.",
                    domain=bool))
    CONFIG.declare(
        "logger",
        ConfigValue(
            default='pyomo.contrib.gdpopt',
            description="The logger object or name to use for reporting.",
            domain=a_logger))
    CONFIG.declare(
        "bound_tolerance",
        ConfigValue(default=1E-6,
                    domain=NonNegativeFloat,
                    description="Tolerance for bound convergence."))
    CONFIG.declare(
        "small_dual_tolerance",
        ConfigValue(default=1E-8,
                    description="When generating cuts, small duals multiplied "
                    "by expressions can cause problems. Exclude all duals "
                    "smaller in absolue value than the following."))
    CONFIG.declare(
        "integer_tolerance",
        ConfigValue(default=1E-5, description="Tolerance on integral values."))
    CONFIG.declare(
        "constraint_tolerance",
        ConfigValue(default=1E-6,
                    description="Tolerance on constraint satisfaction."))
    CONFIG.declare(
        "variable_tolerance",
        ConfigValue(default=1E-8, description="Tolerance on variable bounds."))
    CONFIG.declare(
        "zero_tolerance",
        ConfigValue(default=1E-15,
                    description="Tolerance on variable equal to zero."))
    CONFIG.declare(
        "round_NLP_binaries",
        ConfigValue(
            default=True,
            description="flag to round binary values to exactly 0 or 1. "
            "Rounding is done before fixing disjuncts."))
    CONFIG.declare(
        "reformulate_integer_vars_using",
        ConfigValue(
            default=None,
            description="The method to use for reformulating integer variables "
            "into binary for this solver."))

    __doc__ = add_docstring_list(__doc__, CONFIG)

    def available(self, exception_flag=True):
        """Check if solver is available.

        TODO: For now, it is always available. However, sub-solvers may not
        always be available, and so this should reflect that possibility.

        """
        return True

    def version(self):
        """Return a 3-tuple describing the solver version."""
        return __version__

    def solve(self, model, **kwds):
        """Solve the model.

        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.

        This function performs all of the GDPopt solver setup and problem
        validation. It then calls upon helper functions to construct the
        initial master approximation and iteration loop.

        Args:
            model (Block): a Pyomo model or block to be solved

        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        solve_data = GDPoptSolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Container()

        old_logger_level = config.logger.getEffectiveLevel()
        with time_code(solve_data.timing, 'total'), \
                restore_logger_level(config.logger), \
                create_utility_block(model, 'GDPopt_utils', solve_data):
            if config.tee and old_logger_level > logging.INFO:
                # If the logger does not already include INFO, include it.
                config.logger.setLevel(logging.INFO)
            config.logger.info("---Starting GDPopt---")

            solve_data.original_model = model

            build_ordered_component_lists(model, solve_data, prefix='orig')
            solve_data.working_model = model.clone()
            GDPopt = solve_data.working_model.GDPopt_utils
            record_original_model_statistics(solve_data, config)

            solve_data.current_strategy = config.strategy

            # Reformulate integer variables to binary
            reformulate_integer_variables(solve_data.working_model, config)
            process_objective(solve_data, config)

            # Save ordered lists of main modeling components, so that data can
            # be easily transferred between future model clones.
            build_ordered_component_lists(solve_data.working_model,
                                          solve_data,
                                          prefix='working')
            record_working_model_statistics(solve_data, config)
            solve_data.results.solver.name = 'GDPopt %s - %s' % (str(
                self.version()), config.strategy)

            # Save model initial values. These are used later to initialize NLP
            # subproblems.
            solve_data.initial_var_values = list(
                v.value for v in GDPopt.working_var_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = solve_data.initial_var_values

            # Validate the model to ensure that GDPopt is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Maps in order to keep track of certain generated constraints
            GDPopt.oa_cut_map = ComponentMap()

            # Integer cuts exclude particular discrete decisions
            GDPopt.integer_cuts = ConstraintList(doc='integer cuts')

            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default, unless the initial model has no
            # discrete decisions.

            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary GDPopt_integer_cuts ConstraintList.
            GDPopt.no_backtracking = ConstraintList(
                doc='explored integer cuts')

            # Set up iteration counters
            solve_data.master_iteration = 0
            solve_data.mip_iteration = 0
            solve_data.nlp_iteration = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.iteration_log = {}

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.feasible_solution_improved = False

            # Initialize the master problem
            with time_code(solve_data.timing, 'initialization'):
                GDPopt_initialize_master(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                GDPopt_iteration_loop(solve_data, config)

            # Update values in working model
            copy_var_list_values(from_list=solve_data.best_solution_found,
                                 to_list=GDPopt.working_var_list,
                                 config=config)
            GDPopt.objective_value.set_value(
                value(solve_data.working_objective_expr, exception=False))

            # Update values in original model
            copy_var_list_values(
                GDPopt.orig_var_list,
                solve_data.original_model.GDPopt_utils.orig_var_list, config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing

        return solve_data.results

    #
    # Support "with" statements.
    #
    def __enter__(self):
        return self

    def __exit__(self, t, v, traceback):
        pass
Exemplo n.º 14
0
class FixedVarPropagator(IsomorphicTransformation):
    """Propagate variable fixing for equalities of type :math:`x = y`.

    If :math:`x` is fixed and :math:`y` is not fixed, then this transformation
    will fix :math:`y` to the value of :math:`x`.

    This transformation can also be performed as a temporary transformation,
    whereby the transformed variables are saved and can be later unfixed.

    Keyword arguments below are specified for the ``apply_to`` and
    ``create_using`` functions.

    """

    CONFIG = ConfigBlock()
    CONFIG.declare("tmp", ConfigValue(
        default=False, domain=bool,
        description="True to store the set of transformed variables and "
        "their old states so that they can be later restored."
    ))

    __doc__ = add_docstring_list(__doc__, CONFIG)

    def _apply_to(self, instance, **kwds):
        config = self.CONFIG(kwds)
        if config.tmp and not hasattr(instance, '_tmp_propagate_fixed'):
            instance._tmp_propagate_fixed = ComponentSet()
        eq_var_map, relevant_vars = _build_equality_set(instance)
        #: ComponentSet: The set of all fixed variables
        fixed_vars = ComponentSet((v for v in relevant_vars if v.fixed))
        newly_fixed = _detect_fixed_variables(instance)
        if config.tmp:
            instance._tmp_propagate_fixed.update(newly_fixed)
        fixed_vars.update(newly_fixed)
        processed = ComponentSet()
        # Go through each fixed variable to propagate the 'fixed' status to all
        # equality-linked variabes.
        for v1 in fixed_vars:
            # If we have already processed the variable, skip it.
            if v1 in processed:
                continue

            eq_set = eq_var_map.get(v1, ComponentSet([v1]))
            for v2 in eq_set:
                if (v2.fixed and value(v1) != value(v2)):
                    raise InfeasibleConstraintException(
                        'Variables {} and {} have conflicting fixed '
                        'values of {} and {}, but are linked by '
                        'equality constraints.'
                        .format(v1.name,
                                v2.name,
                                value(v1),
                                value(v2)))
                elif not v2.fixed:
                    v2.fix(value(v1))
                    if config.tmp:
                        instance._tmp_propagate_fixed.add(v2)
            # Add all variables in the equality set to the set of processed
            # variables.
            processed.update(eq_set)

    def revert(self, instance):
        """Revert variables fixed by the transformation."""
        for var in instance._tmp_propagate_fixed:
            var.unfix()
        del instance._tmp_propagate_fixed
Exemplo n.º 15
0
class VarBoundPropagator(IsomorphicTransformation):
    """Propagate variable bounds for equalities of type :math:`x = y`.

    If :math:`x` has a tighter bound then :math:`y`, then this transformation
    will adjust the bounds on :math:`y` to match those of :math:`x`.

    Keyword arguments below are specified for the ``apply_to`` and
    ``create_using`` functions.

    """

    CONFIG = ConfigBlock()
    CONFIG.declare("tmp", ConfigValue(
        default=False, domain=bool,
        description="True to store the set of transformed variables and "
        "their old states so that they can be later restored."
    ))

    __doc__ = add_docstring_list(__doc__, CONFIG)

    def _apply_to(self, instance, **kwds):
        config = self.CONFIG(kwds)
        if config.tmp and not hasattr(instance, '_tmp_propagate_original_bounds'):
            instance._tmp_propagate_original_bounds = Suffix(
                direction=Suffix.LOCAL)
        eq_var_map, relevant_vars = _build_equality_set(instance)
        processed = ComponentSet()
        # Go through each variable in an equality set to propagate the variable
        # bounds to all equality-linked variables.
        for var in relevant_vars:
            # If we have already processed the variable, skip it.
            if var in processed:
                continue

            var_equality_set = eq_var_map.get(var, ComponentSet([var]))

            #: variable lower bounds in the equality set
            lbs = [v.lb for v in var_equality_set if v.has_lb()]
            max_lb = max(lbs) if len(lbs) > 0 else None
            #: variable upper bounds in the equality set
            ubs = [v.ub for v in var_equality_set if v.has_ub()]
            min_ub = min(ubs) if len(ubs) > 0 else None

            # Check for error due to bound cross-over
            if max_lb is not None and min_ub is not None and max_lb > min_ub:
                # the lower bound is above the upper bound. Raise an exception.
                # get variable with the highest lower bound
                v1 = next(v for v in var_equality_set if v.lb == max_lb)
                # get variable with the lowest upper bound
                v2 = next(v for v in var_equality_set if v.ub == min_ub)
                raise InfeasibleConstraintException(
                    'Variable {} has a lower bound {} '
                    '> the upper bound {} of variable {}, '
                    'but they are linked by equality constraints.'
                    .format(v1.name, value(v1.lb), value(v2.ub), v2.name))

            for v in var_equality_set:
                if config.tmp:
                    # TODO warn if overwriting
                    instance._tmp_propagate_original_bounds[v] = (
                        v.lb, v.ub)
                v.setlb(max_lb)
                v.setub(min_ub)

            processed.update(var_equality_set)

    def revert(self, instance):
        """Revert variable bounds."""
        for v in instance._tmp_propagate_original_bounds:
            old_LB, old_UB = instance._tmp_propagate_original_bounds[v]
            v.setlb(old_LB)
            v.setub(old_UB)
        del instance._tmp_propagate_original_bounds
Exemplo n.º 16
0
class MultiStart(object):
    """Solver wrapper that initializes at multiple starting points.

    # TODO: also return appropriate duals

    For theoretical underpinning, see
    https://www.semanticscholar.org/paper/How-many-random-restarts-are-enough-Dick-Wong/55b248b398a03dc1ac9a65437f88b835554329e0

    Keyword arguments below are specified for the ``solve`` function.

    """

    CONFIG = ConfigBlock("MultiStart")
    CONFIG.declare(
        "strategy",
        ConfigValue(
            default="rand",
            domain=In([
                "rand", "midpoint_guess_and_bound", "rand_guess_and_bound",
                "rand_distributed"
            ]),
            description="Specify the restart strategy. Defaults to rand.",
            doc="""Specify the restart strategy.

        - "rand": random choice between variable bounds
        - "midpoint_guess_and_bound": midpoint between current value and farthest bound
        - "rand_guess_and_bound": random choice between current value and farthest bound
        - "rand_distributed": random choice among evenly distributed values
        """))
    CONFIG.declare(
        "solver",
        ConfigValue(default="ipopt",
                    description="solver to use, defaults to ipopt"))
    CONFIG.declare(
        "solver_args",
        ConfigValue(
            default={},
            description="Dictionary of keyword arguments to pass to the solver."
        ))
    CONFIG.declare(
        "iterations",
        ConfigValue(
            default=10,
            description="Specify the number of iterations, defaults to 10. "
            "If -1 is specified, the high confidence stopping rule will be used"
        ))
    CONFIG.declare(
        "stopping_mass",
        ConfigValue(
            default=0.5,
            description="Maximum allowable estimated missing mass of optima.",
            doc="""Maximum allowable estimated missing mass of optima for the
        high confidence stopping rule, only used with the random strategy.
        The lower the parameter, the stricter the rule.
        Value bounded in (0, 1]."""))
    CONFIG.declare(
        "stopping_delta",
        ConfigValue(
            default=0.5,
            description=
            "1 minus the confidence level required for the stopping rule.",
            doc=
            """1 minus the confidence level required for the stopping rule for the
        high confidence stopping rule, only used with the random strategy.
        The lower the parameter, the stricter the rule.
        Value bounded in (0, 1]."""))
    CONFIG.declare(
        "suppress_unbounded_warning",
        ConfigValue(
            default=False,
            domain=bool,
            description=
            "True to suppress warning for skipping unbounded variables."))
    CONFIG.declare(
        "HCS_max_iterations",
        ConfigValue(
            default=1000,
            description=
            "Maximum number of iterations before interrupting the high confidence stopping rule."
        ))
    CONFIG.declare(
        "HCS_tolerance",
        ConfigValue(
            default=0,
            description=
            "Tolerance on HCS objective value equality. Defaults to Python float equality precision."
        ))

    __doc__ = add_docstring_list(__doc__, CONFIG)

    def available(self, exception_flag=True):
        """Check if solver is available.

        TODO: For now, it is always available. However, sub-solvers may not
        always be available, and so this should reflect that possibility.

        """
        return True

    def solve(self, model, **kwds):
        # initialize keyword args
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)

        # initialize the solver
        solver = SolverFactory(config.solver)

        # Model sense
        objectives = model.component_data_objects(Objective, active=True)
        obj = next(objectives, None)
        if next(objectives, None) is not None:
            raise RuntimeError(
                "Multistart solver is unable to handle model with multiple active objectives."
            )
        if obj is None:
            raise RuntimeError(
                "Multistart solver is unable to handle model with no active objective."
            )

        # store objective values and objective/result information for best
        # solution obtained
        objectives = []
        obj_sign = 1 if obj.sense == minimize else -1
        best_objective = float('inf') * obj_sign
        best_model = model
        best_result = None

        try:
            # create temporary variable list for value transfer
            tmp_var_list_name = unique_component_name(model, "_vars_list")
            setattr(
                model, tmp_var_list_name,
                list(model.component_data_objects(ctype=Var,
                                                  descend_into=True)))

            best_result = result = solver.solve(model, **config.solver_args)
            if (result.solver.status is SolverStatus.ok
                    and result.solver.termination_condition is tc.optimal):
                obj_val = value(model.obj.expr)
                best_objective = obj_val
                objectives.append(obj_val)
            num_iter = 0
            max_iter = config.iterations
            # if HCS rule is specified, reinitialize completely randomly until
            # rule specifies stopping
            using_HCS = config.iterations == -1
            HCS_completed = False
            if using_HCS:
                assert config.strategy == "rand", \
                    "High confidence stopping rule requires rand strategy."
                max_iter = config.HCS_max_iterations

            while num_iter < max_iter:
                if using_HCS and should_stop(objectives, config.stopping_mass,
                                             config.stopping_delta,
                                             config.HCS_tolerance):
                    HCS_completed = True
                    break
                num_iter += 1
                # at first iteration, solve the originally passed model
                m = model.clone() if num_iter > 1 else model
                reinitialize_variables(m, config)
                result = solver.solve(m, **config.solver_args)
                if (result.solver.status is SolverStatus.ok
                        and result.solver.termination_condition is tc.optimal):
                    obj_val = value(m.obj.expr)
                    objectives.append(obj_val)
                    if obj_val * obj_sign < obj_sign * best_objective:
                        # objective has improved
                        best_objective = obj_val
                        best_model = m
                        best_result = result
                if num_iter == 1:
                    # if it's the first iteration, set the best_model and
                    # best_result regardless of solution status in case the
                    # model is infeasible.
                    best_model = m
                    best_result = result

            if using_HCS and not HCS_completed:
                logger.warning(
                    "High confidence stopping rule was unable to complete "
                    "after %s iterations. To increase this limit, change the "
                    "HCS_max_iterations flag." % num_iter)

            # if no better result was found than initial solve, then return
            # that without needing to copy variables.
            if best_model is model:
                return best_result

            # reassign the given models vars to the new models vars
            orig_var_list = getattr(model, tmp_var_list_name)
            best_soln_var_list = getattr(best_model, tmp_var_list_name)
            for orig_var, new_var in zip(orig_var_list, best_soln_var_list):
                if not orig_var.is_fixed():
                    orig_var.value = new_var.value

            return best_result
        finally:
            # Remove temporary variable list
            delattr(model, tmp_var_list_name)

    def __enter__(self):
        return self

    def __exit__(self, t, v, traceback):
        pass
Exemplo n.º 17
0
def _generate_filtered_docstring():
    cfg = _trf_config()
    return add_docstring_list(TrustRegionSolver.solve.__doc__,
                              cfg,
                              indent_by=8)
Exemplo n.º 18
0
                orphans = vars_not_accounted_for - vars_accounted_for
                orphan_string = ""
                for v in orphans:
                    orphan_string += "'%s', " % v.name
                orphan_string = orphan_string[:-2]
                raise GDP_Error("Partition specified for disjunction "
                                "containing Disjunct '%s' does not "
                                "include all the variables that appear "
                                "in the disjunction. The following "
                                "variables are not assigned to any part "
                                "of the partition: %s" %
                                (disjunct.name, orphan_string))
            transformed_constraint[
                len(transformed_constraint)] = sum(v for v in
                                                   split_aux_vars) <= \
                rhs - repn.constant
        # deactivate the constraint since we've transformed it
        cons.deactivate()

    def _warn_for_active_disjunct(self, disjunct, parent_disjunct,
                                  transformed_parent_disjunct, transBlock,
                                  partition):
        _warn_for_active_disjunct(disjunct, parent_disjunct, NAME_BUFFER)


# Add the CONFIG arguments to the transformation's docstring
PartitionDisjuncts_Transformation.__doc__ = add_docstring_list(
    PartitionDisjuncts_Transformation.__doc__,
    PartitionDisjuncts_Transformation.CONFIG,
    indent_by=8)