Ejemplo n.º 1
0
class TrustRegionSolver(plugin.Plugin):
    """
    A trust region filter method for black box / glass box optimizaiton
    Solves nonlinear optimization problems containing external function calls
    through automatic construction of reduced models (ROM), also known as
    surrogate models.
    Currently implements linear and quadratic reduced models.
    See Eason, Biegler (2016) AIChE Journal for more details

    Arguments:
    """
    #    + param.CONFIG.generte_yaml_template()

    plugin.implements(IOptSolver)
    plugin.alias(
        'trustregion',
        doc='Trust region filter method for black box/glass box optimization')

    def available(self, exception_flag=True):
        """Check if solver is available.

        TODO: For now, it is always available. However, sub-solvers may not
        always be available, and so this should reflect that possibility.

        """
        return True

    def version(self):
        """Return a 3-tuple describing the solver version."""
        return __version__

    def solve(self, model, eflist, **kwds):
        assert not kwds
        #config = param.CONFIG(kwds)
        return TRF(model, eflist)  #, config)
Ejemplo n.º 2
0
class InitMidpoint(IsomorphicTransformation):
    """Initializes non-fixed variables to the midpoint of their bounds.

    - If the variable does not have bounds, set the value to zero.
    - If the variable is missing one bound, set the value to that of the
        existing bound.
    """

    alias('contrib.init_vars_midpoint',
          doc=textwrap.fill(textwrap.dedent(__doc__.strip())))

    def _apply_to(self, instance, overwrite=False):
        """Apply the transformation.

        Kwargs:
            overwrite: if False, transformation will not overwrite existing
                variable values.
        """
        for var in instance.component_data_objects(ctype=Var,
                                                   descend_into=True):
            if var.fixed:
                continue
            if var.value is not None and not overwrite:
                continue
            if var.lb is None and var.ub is None:
                # If LB and UB do not exist, set variable value to 0
                var.set_value(0)
            elif var.lb is None:
                # if one bound does not exist, set variable value to the other
                var.set_value(value(var.ub))
            elif var.ub is None:
                # if one bound does not exist, set variable value to the other
                var.set_value(value(var.lb))
            else:
                var.set_value((value(var.lb) + value(var.ub)) / 2.)
Ejemplo n.º 3
0
class MPEC3_Transformation(Transformation):

    alias('mpec.standard_form',
          doc="Standard reformulation of complementarity condition")

    def __init__(self):
        super(MPEC3_Transformation, self).__init__()

    def _apply_to(self, instance, **kwds):
        options = kwds.pop('options', {})
        #
        # Iterate over the model finding Complementarity components
        #
        for block in instance.block_data_objects(
                active=True, sort=SortComponents.deterministic):
            for complementarity in block.component_objects(Complementarity,
                                                           active=True,
                                                           descend_into=False):
                for index in sorted(iterkeys(complementarity)):
                    _data = complementarity[index]
                    if not _data.active:
                        continue
                    _data.to_standard_form()
                    #
                block.reclassify_component_type(complementarity, Block)
Ejemplo n.º 4
0
class sqlite3_db_Table(db_Table):

    alias('sqlite3', "sqlite3 database interface")

    def __init__(self):
        db_Table.__init__(self)
        self.using = 'sqlite3'

    def available(self):
        return sqlite3_available

    def requirements(self):
        return 'sqlite3'

    def connect(self, connection, options):
        assert (options['using'] == 'sqlite3')

        filename = connection
        if not os.path.exists(filename):
            raise Exception("No such file: " + filename)

        con = sqlite3.connect(filename)
        if options.text_factory:
            con.text_factory = options.text_factory
        return con
Ejemplo n.º 5
0
class FixDiscreteVars(Transformation):

    alias('core.fix_discrete',
          doc="Fix known discrete domains to continuous counterparts")

    def __init__(self):
        super(FixDiscreteVars, self).__init__()

    def _apply_to(self, model, **kwds):
        options = kwds.pop('options', {})
        if kwds.get('undo', options.get('undo', False)):
            for v in model._fixed_discrete_vars[None]:
                v.unfix()
            model.del_component("_fixed_discrete_vars")
            return

        fixed_vars = []
        _base_model_vars = model.component_data_objects(Var,
                                                        active=True,
                                                        descend_into=True)
        for var in _base_model_vars:
            if var.domain in _discrete_relaxation_map and not var.is_fixed():
                fixed_vars.append(var)
                var.fix()
        model._fixed_discrete_vars = Suffix(direction=Suffix.LOCAL)
        model._fixed_discrete_vars[None] = fixed_vars
Ejemplo n.º 6
0
class InitZero(IsomorphicTransformation):
    """Initializes non-fixed variables to zeros.

    - If setting the variable value to zero will violate a bound, set the
        variable value to the relevant bound value.

    """

    alias('contrib.init_vars_zero',
          doc=textwrap.fill(textwrap.dedent(__doc__.strip())))

    def __init__(self):
        """Initialize the transformation."""
        super(InitZero, self).__init__()

    def _apply_to(self, instance, overwrite=False):
        """Apply the transformation.

        Kwargs:
            overwrite: if False, transformation will not overwrite existing
                variable values.
        """
        for var in instance.component_data_objects(ctype=Var,
                                                   descend_into=True):
            if var.fixed:
                continue
            if var.value is not None and not overwrite:
                continue
            if var.lb is not None and value(var.lb) > 0:
                var.set_value(value(var.lb))
            elif var.ub is not None and value(var.ub) < 0:
                var.set_value(value(var.ub))
            else:
                var.set_value(0)
Ejemplo n.º 7
0
class MockGLPK(GLPKSHELL_old, MockMIP):
    """A Mock GLPK solver used for testing
    """

    alias('_mock_glpk')

    def __init__(self, **kwds):
        try:
            GLPKSHELL_old.__init__(self, **kwds)
        except ApplicationError:  #pragma:nocover
            pass  #pragma:nocover
        MockMIP.__init__(self, "glpk")

    def available(self, exception_flag=True):
        return GLPKSHELL_old.available(self, exception_flag)

    def create_command_line(self, executable, problem_files):
        command = GLPKSHELL_old.create_command_line(self, executable,
                                                    problem_files)
        MockMIP.create_command_line(self, executable, problem_files)
        return command

    def executable(self):
        return MockMIP.executable(self)

    def _execute_command(self, cmd):
        return MockMIP._execute_command(self, cmd)

    def _convert_problem(self, args, pformat, valid_pformats):
        if pformat in [ProblemFormat.mps, ProblemFormat.cpxlp]:
            return (args, pformat, None)
        else:
            return (args, ProblemFormat.cpxlp, None)
Ejemplo n.º 8
0
class LinearDual_BilevelTransformation(Base_BilevelTransformation):

    alias('bilevel.linear_dual', doc="Dualize a SubModel block")

    def __init__(self):
        super(LinearDual_BilevelTransformation, self).__init__()

    def _apply_to(self, instance, **kwds):
        #
        # Process options
        #
        submodel = self._preprocess('bilevel.linear_dual', instance, **kwds)
        self._fix_all()
        #
        # Generate the dual
        #
        setattr(instance, self._submodel+'_dual', self._dualize(submodel, self._unfixed_upper_vars))
        instance.reclassify_component_type(self._submodel+'_dual', Block)
        #
        # Deactivate the original subproblem and upper-level objective
        #
        for (oname, odata) in submodel._parent().component_map(Objective, active=True).items():
            odata.deactivate()
        submodel.deactivate()
        #
        # Unfix the upper variables
        #
        self._unfix_all()
        #
        # Disable the original submodel
        #
        sub = getattr(instance,self._submodel)
        # TODO: Cache the list of components that were deactivated
        for (name, data) in sub.component_map(active=True).items():
            if not isinstance(data,Var) and not isinstance(data, Set):
                data.deactivate()


    def _dualize(self, submodel, unfixed):
        """
        Generate the dual of a submodel
        """ 
        transform = TransformationFactory('core.linear_dual')
        return transform._dualize(submodel, unfixed)

    def _xfrm_bilinearities(self, dual):
        """
        Replace bilinear terms in constraints with disjunctions
        """ 
        for (name, data) in dual.component_map(Constraint, active=True).items():
            for ndx in data:
                con = data[ndx]
                degree = con.body.polynomial_degree()
                if degree > 2:
                    raise "RuntimeError: Cannot transform a model with polynomial degree %d" % degree
                if degree == 2:
                    terms = generate_standard_repn(con.body)
                    for i, var in enumerate(terms.quadratic_vars):
                        print("%s %s %s" % (i, str(var), str(terms.quadratic_coefs[i])))
Ejemplo n.º 9
0
class CSVTable(TableData):

    alias("csv", "CSV file interface")

    def __init__(self):
        TableData.__init__(self)

    def open(self):
        if self.filename is None:  #pragma:nocover
            raise IOError("No filename specified")

    def close(self):
        self.FILE.close()

    def read(self):
        if not os.path.exists(self.filename):  #pragma:nocover
            raise IOError("Cannot find file '%s'" % self.filename)
        self.FILE = open(self.filename, 'r')
        tmp = []
        for tokens in csv.reader(self.FILE):
            if tokens != ['']:
                tmp.append(tokens)
        self.FILE.close()
        if len(tmp) == 0:
            raise IOError("Empty *.csv file")
        elif len(tmp) == 1:
            if not self.options.param is None:
                if type(self.options.param) in (list, tuple):
                    p = self.options.param[0]
                else:
                    p = self.options.param
                if isinstance(p, Param):
                    self.options.model = p.model()
                    p = p.local_name
                self._info = ["param", p, ":=", tmp[0][0]]
            elif len(self.options.symbol_map) == 1:
                self._info = [
                    "param",
                    self.options.symbol_map[self.options.symbol_map.keys()[0]],
                    ":=", tmp[0][0]
                ]
            else:
                raise IOError(
                    "Data looks like a parameter, but multiple parameter names have been specified: %s"
                    % str(self.options.symbol_map))
        else:
            self._set_data(tmp[0], tmp[1:])

    def write(self, data):
        if self.options.set is None and self.options.param is None:
            raise IOError("Unspecified model component")
        self.FILE = open(self.filename, 'w')
        table = self._get_table()
        writer = csv.writer(self.FILE)
        writer.writerows(table)
        self.FILE.close()
Ejemplo n.º 10
0
class Xfrm_PyomoTransformation(Transformation):

    alias('contrib.example.xfrm', doc="An example of a transformation in a pyomo.contrib package")

    def __init__(self):
        super(Xfrm_PyomoTransformation, self).__init__()

    def create_using(self, instance, **kwds):
        # This transformation doesn't do anything...
        return instance
Ejemplo n.º 11
0
class TestSolver2(pyomo.opt.OptSolver):

    alias('stest2')

    def __init__(self, **kwds):
        kwds['type'] = 'stest_type'
        pyomo.opt.OptSolver.__init__(self, **kwds)

    def enabled(self):
        return False
Ejemplo n.º 12
0
class StandardForm(IsomorphicTransformation):
    """
    Produces a standard-form representation of the model. This form has 
    the coefficient matrix (A), the cost vector (c), and the
    constraint vector (b), where the 'standard form' problem is

    min/max c'x
    s.t.    Ax = b
            x >= 0

    Options
        slack_names         Default auxiliary_slack
        excess_names        Default auxiliary_excess
        lb_names            Default _lower_bound
        up_names            Default _upper_bound
        pos_suffix          Default _plus
        neg_suffix          Default _neg
    """

    alias("core.standard_form",
          doc="Create an equivalent LP model in standard form.")

    def __init__(self, **kwds):
        kwds['name'] = "standard_form"
        super(StandardForm, self).__init__(**kwds)

    def _create_using(self, model, **kwds):
        """
        Tranform a model to standard form
        """

        # Optional naming schemes to pass to EqualityTransform
        eq_kwds = {}
        eq_kwds["slack_names"] = kwds.pop("slack_names", "auxiliary_slack")
        eq_kwds["excess_names"] = kwds.pop("excess_names", "auxiliary_excess")
        eq_kwds["lb_names"] = kwds.pop("lb_names", "_lower_bound")
        eq_kwds["ub_names"] = kwds.pop("ub_names", "_upper_bound")

        # Optional naming schemes to pass to NonNegativeTransformation
        nn_kwds = {}
        nn_kwds["pos_suffix"] = kwds.pop("pos_suffix", "_plus")
        nn_kwds["neg_suffix"] = kwds.pop("neg_suffix", "_minus")

        nonneg = NonNegativeTransformation()
        equality = EqualityTransform()

        # Since NonNegativeTransform introduces new constraints
        # (that aren't equality constraints) we call it first.
        #
        # EqualityTransform introduces new variables, but they are
        # constrainted to be nonnegative.
        sf = nonneg(model, **nn_kwds)
        sf = equality(sf, **eq_kwds)

        return sf
Ejemplo n.º 13
0
class SolverManager_Serial(AsynchronousSolverManager):

    alias("serial", doc="Synchronously execute solvers locally")

    def clear(self):
        """
        Clear manager state
        """
        super(SolverManager_Serial, self).clear()
        self.results = OrderedDict()

    def _perform_queue(self, ah, *args, **kwds):
        """
        Perform the queue operation.  This method returns the ActionHandle,
        and the ActionHandle status indicates whether the queue was successful.
        """

        opt = kwds.pop('solver', kwds.pop('opt', None))
        if opt is None:
            raise ActionManagerError(
                "No solver passed to %s, use keyword option 'solver'" %
                (type(self).__name__))

        time_start = time.time()
        if isinstance(opt, string_types):
            with pyomo.opt.SolverFactory(opt) as _opt:
                results = _opt.solve(*args, **kwds)
        else:
            results = opt.solve(*args, **kwds)
        results.pyomo_solve_time = time.time() - time_start

        self.results[ah.id] = results
        ah.status = ActionStatus.done
        self.event_handle[ah.id].update(ah)

        return ah

    def _perform_wait_any(self):
        """
        Perform the wait_any operation.  This method returns an
        ActionHandle with the results of waiting.  If None is returned
        then the ActionManager assumes that it can call this method again.
        Note that an ActionHandle can be returned with a dummy value,
        to indicate an error.
        """
        if len(self.results) > 0:
            ah_id, result = self.results.popitem(last=False)
            self.results[ah_id] = result
            return self.event_handle[ah_id]
        return ActionHandle(error=True,
                            explanation=("No queued evaluations available in "
                                         "the 'serial' solver manager, which "
                                         "executes solvers synchronously"))
Ejemplo n.º 14
0
class FixedVarDetector(IsomorphicTransformation):
    """Detects variables that are de-facto fixed but not considered fixed.

    Descends through the model. For each variable found, check to see if var.lb
    is within some tolerance of var.ub. If so, fix the variable to the value of
    var.lb.

    """

    alias('contrib.detect_fixed_vars',
          doc=textwrap.fill(textwrap.dedent(__doc__.strip())))

    def __init__(self):
        """Initialize the transformation."""
        super(FixedVarDetector, self).__init__()

    def _apply_to(self, instance, **kwargs):
        """Apply the transformation.

        Args:
            instance: Pyomo model object to transform.

        Kwargs:
            tmp: True to store the set of transformed variables and their old
                values so that they can be restored.
            tol: tolerance on bound equality (LB == UB)
        """
        tmp = kwargs.pop('tmp', False)
        tol = kwargs.pop('tolerance', 1E-13)

        if tmp:
            instance._xfrm_detect_fixed_vars_old_values = ComponentMap()

        for var in instance.component_data_objects(ctype=Var,
                                                   descend_into=True):
            if var.fixed or var.lb is None or var.ub is None:
                # if the variable is already fixed, or if it is missing a
                # bound, we skip it.
                continue
            if fabs(value(var.lb - var.ub)) <= tol:
                if tmp:
                    instance._xfrm_detect_fixed_vars_old_values[var] = \
                        var.value
                var.fix(var.lb)

    def revert(self, instance):
        """Revert variables fixed by the transformation."""
        for var, var_value in iteritems(
                instance._xfrm_detect_fixed_vars_old_values):
            var.unfix()
            var.set_value(var_value)

        del instance._xfrm_detect_fixed_vars_old_values
Ejemplo n.º 15
0
class pymysql_db_Table(db_Table):

    alias('pymysql', "pymysql database interface")

    def __init__(self):
        db_Table.__init__(self)
        self.using = 'pymysql'

    def available(self):
        return pymysql_available

    def requirements(self):
        return 'pymysql'
Ejemplo n.º 16
0
class PyomoDataCommands(Plugin):

    alias("dat", "Pyomo data command file interface")

    implements(IDataManager, service=False)

    def __init__(self):
        self._info = []
        self.options = Options()

    def available(self):
        return True

    def initialize(self, **kwds):
        self.filename = kwds.pop('filename')
        self.add_options(**kwds)

    def add_options(self, **kwds):
        self.options.update(kwds)

    def open(self):
        if self.filename is None:  #pragma:nocover
            raise IOError("No filename specified")
        if not os.path.exists(self.filename):  #pragma:nocover
            raise IOError("Cannot find file '%s'" % self.filename)

    def close(self):
        pass

    def read(self):
        """
        This function does nothing, since executing Pyomo data commands
        both reads and processes the data all at once.
        """
        pass

    def write(self, data):  #pragma:nocover
        """
        This function does nothing, because we cannot write to a *.dat file.
        """
        pass

    def process(self, model, data, default):
        """
        Read Pyomo data commands and process the data.
        """
        _process_include(['include', self.filename], model, data, default,
                         self.options)

    def clear(self):
        self._info = []
Ejemplo n.º 17
0
class RemoveZeroTerms(IsomorphicTransformation):
    """Looks for 0 * var in a constraint and removes it.

    Currently limited to processing linear constraints of the form x1 = 0 *
    x3, occurring as a result of x2.fix(0).

    TODO: support nonlinear expressions

    """

    alias('contrib.remove_zero_terms',
          doc=textwrap.fill(textwrap.dedent(__doc__.strip())))

    def __init__(self, *args, **kwargs):
        """Initialize the transformation."""
        super(RemoveZeroTerms, self).__init__(*args, **kwargs)

    def _apply_to(self, model):
        """Apply the transformation."""
        m = model

        for constr in m.component_data_objects(ctype=Constraint,
                                               active=True,
                                               descend_into=True):
            if not constr.body.polynomial_degree() == 1:
                continue  # we currently only process linear constraints
            repn = generate_canonical_repn(constr.body)

            # get the index of all nonzero coefficient variables
            nonzero_vars_indx = [
                i for i, _ in enumerate(repn.variables)
                if not repn.linear[i] == 0
            ]
            const = repn.constant if repn.constant is not None else 0

            # reconstitute the constraint, including only variable terms with
            # nonzero coefficients
            constr_body = sum(repn.linear[i] * repn.variables[i]
                              for i in nonzero_vars_indx) + const
            if constr.equality:
                constr.set_value(constr_body == constr.upper)
            elif constr.has_lb() and not constr.has_ub():
                constr.set_value(constr_body >= constr.lower)
            elif constr.has_ub() and not constr.has_lb():
                constr.set_value(constr_body <= constr.upper)
            else:
                # constraint is a bounded inequality of form a <= x <= b.
                # I don't think this is a great idea, but ¯\_(ツ)_/¯
                constr.set_value(constr.lower <= constr_body <= constr.upper)
Ejemplo n.º 18
0
    class SheetTable_xlsm(SheetTable):

        alias("xlsm", "Excel XLSM file interface")

        def __init__(self):
            if win32com_available and _excel_available:
                SheetTable.__init__(self, ctype='win32com')
            else:
                SheetTable.__init__(self, ctype='openpyxl')

        def available(self):
            return win32com_available or openpyxl_available

        def requirements(self):
            return "win32com or openpyxl"
Ejemplo n.º 19
0
    class SheetTable_xls(SheetTable):

        alias("xls", "Excel XLS file interface")

        def __init__(self):
            if win32com_available and _excel_available:
                SheetTable.__init__(self, ctype='win32com')
            else:
                SheetTable.__init__(self, ctype='xlrd')

        def available(self):
            return win32com_available or xlrd_available

        def requirements(self):
            return "win32com or xlrd"
Ejemplo n.º 20
0
    class SheetTable_xlsm(pyodbc_db_base):

        alias("xlsm", "Excel XLSM file interface")

        def __init__(self):
            pyodbc_db_base.__init__(self)

        def requirements(self):
            return "pyodbc or pypyodbc"

        def open(self):
            if self.filename is None:
                raise IOError("No filename specified")
            if not os.path.exists(self.filename):
                raise IOError("Cannot find file '%s'" % self.filename)
            return pyodbc_db_base.open(self)
Ejemplo n.º 21
0
class pypyodbc_db_Table(pyodbc_db_Table):

    alias('pypyodbc', "%s database interface" % 'pypyodbc')

    def __init__(self):
        pyodbc_db_Table.__init__(self)
        self.using = 'pypyodbc'

    def available(self):
        return pypyodbc_available

    def requirements(self):
        return 'pypyodbc'

    def connect(self, connection, options):
        assert (options['using'] == 'pypyodbc')

        return pyodbc_db_Table.connect(self, connection, options)
Ejemplo n.º 22
0
class HACK_GDP_Disjunct_Reclassifier(Transformation):
    """Reclassify Disjuncts to Blocks.

    HACK: this will reclassify all Disjuncts to Blocks so the current writers
    can find the variables

    """

    alias('gdp.reclassify',
          doc=textwrap.fill(textwrap.dedent(__doc__.strip())))

    def _apply_to(self, instance, **kwds):
        assert not kwds
        disjunct_generator = instance.component_objects(
            Disjunct, descend_into=(Block, Disjunct))
        for disjunct_component in disjunct_generator:
            for disjunct in itervalues(disjunct_component._data):
                if disjunct.active:
                    logger.error("""
                    Reclassifying active Disjunct "%s" as a Block.  This
                    is generally an error as it indicates that the model
                    was not completely relaxed before applying the
                    gdp.reclassify transformation""" % (disjunct.name, ))

            # Reclassify this disjunct as a block
            disjunct_component.parent_block().reclassify_component_type(
                disjunct_component, Block)
            disjunct_component._activate_without_unfixing_indicator()

            # Deactivate all constraints.  Note that we only need to
            # descend into blocks: we will catch disjuncts in the outer
            # loop.
            #
            # Note that we defer this until AFTER we reactivate the
            # block, as the component_objects generator will not
            # return anything when active=True and the block is
            # deactivated.
            for disjunct in itervalues(disjunct_component._data):
                cons_in_disjunct = disjunct.component_objects(
                    Constraint, descend_into=Block, active=True)
                for con in cons_in_disjunct:
                    con.deactivate()
Ejemplo n.º 23
0
class RelaxDiscreteVars(Transformation):

    alias('core.relax_discrete',
          doc="Relax known discrete domains to continuous counterparts")

    def __init__(self):
        super(RelaxDiscreteVars, self).__init__()

    def _apply_to(self, model, **kwds):
        options = kwds.pop('options', {})
        if kwds.get('undo', options.get('undo', False)):
            for v, d in itervalues(model._relaxed_discrete_vars[None]):
                v.domain = d
            model.del_component("_relaxed_discrete_vars")
            return

        # Relax the model
        relaxed_vars = {}
        _base_model_vars = model.component_data_objects(Var,
                                                        active=True,
                                                        descend_into=True)
        for var in _base_model_vars:
            if var.domain in _discrete_relaxation_map:
                if var.domain is Binary or var.domain is Boolean:
                    var.setlb(0)
                    var.setub(1)
                # Note: some indexed components can only have their
                # domain set on the parent component (the individual
                # indices cannot be set independently)
                _c = var.parent_component()
                if id(_c) in _discrete_relaxation_map:
                    continue
                try:
                    _domain = var.domain
                    var.domain = _discrete_relaxation_map[_domain]
                    relaxed_vars[id(var)] = (var, _domain)
                except:
                    _domain = _c.domain
                    _c.domain = _discrete_relaxation_map[_domain]
                    relaxed_vars[id(_c)] = (_c, _domain)
        model._relaxed_discrete_vars = Suffix(direction=Suffix.LOCAL)
        model._relaxed_discrete_vars[None] = relaxed_vars
Ejemplo n.º 24
0
class ResultsReader_json(results.AbstractResultsReader):
    """
    Class that reads in a *.jsn file and generates a
    SolverResults object.
    """

    alias(str(ResultsFormat.json))

    def __init__(self):
        results.AbstractResultsReader.__init__(self, ResultsFormat.json)

    def __call__(self, filename, res=None, soln=None, suffixes=[]):
        """
        Parse a *.results file
        """
        if res is None:
            res = SolverResults()
        #
        res.read(filename, using_yaml=False)
        return res
Ejemplo n.º 25
0
class HACK_GDP_Var_Mover(Transformation):
    """Move indicator vars to top block.

    HACK: this will move all indicator variables on the model to the top block
    so the writers can find them.

    """

    alias('gdp.varmover', doc=textwrap.fill(textwrap.dedent(__doc__.strip())))

    def _apply_to(self, instance, **kwds):
        assert not kwds
        count = 0
        disjunct_generator = instance.component_data_objects(
            Disjunct, descend_into=(Block, Disjunct))
        for disjunct in disjunct_generator:
            count += 1
            var = disjunct.indicator_var
            var.doc = "%s(Moved from %s)" % (
                var.doc + " " if var.doc else "", var.name, )
            disjunct.del_component(var)
            instance.add_component("_gdp_moved_IV_%s" % (count,), var)
Ejemplo n.º 26
0
class RelaxIntegrality(NonIsomorphicTransformation):
    """
    This plugin relaxes integrality in a Pyomo model.
    """

    alias('core.relax_integrality',\
          doc="Create a model where integer variables are replaced with real variables.")

    def __init__(self, **kwds):
        kwds['name'] = "relax_integrality"
        super(RelaxIntegrality, self).__init__(**kwds)

    def _apply_to(self, model, **kwds):
        #
        # Iterate over all variables, replacing the domain with a real-valued domain
        # and setting appropriate bounds.
        #
        for var in model.component_data_objects(Var):
            # var.bounds returns the tightest of the domain
            # vs user-supplied lower and upper bounds
            lb, ub = var.bounds
            var.domain = Reals
            var.setlb(lb)
            var.setub(ub)
Ejemplo n.º 27
0
class CuttingPlane_Transformation(Transformation):

    alias('gdp.cuttingplane',
          doc="Relaxes a linear disjunctive model by "
          "adding cuts from convex hull to Big-M relaxation.")

    def __init__(self):
        super(CuttingPlane_Transformation, self).__init__()

    def _apply_to(self, instance, bigM=None, **kwds):
        options = kwds.pop('options', {})

        if kwds:
            logger.warning(
                "GDP(CuttingPlanes): unrecognized keyword arguments:"
                "\n%s" % ('\n'.join(iterkeys(kwds)), ))
        if options:
            logger.warning("GDP(CuttingPlanes): unrecognized options:\n%s" %
                           ('\n'.join(iterkeys(options)), ))

        instance_rBigM, instance_rCHull, var_info, transBlockName \
            = self._setup_subproblems(instance, bigM)

        self._generate_cuttingplanes(instance, instance_rBigM, instance_rCHull,
                                     var_info, transBlockName)

    def _setup_subproblems(self, instance, bigM):
        # create transformation block
        transBlockName, transBlock = self._add_relaxation_block(
            instance, '_pyomo_gdp_cuttingplane_relaxation')

        # We store a list of all vars so that we can efficiently
        # generate maps among the subproblems
        transBlock.all_vars = list(v for v in instance.component_data_objects(
            Var,
            descend_into=(Block, Disjunct),
            sort=SortComponents.deterministic) if not v.is_fixed())

        # we'll store all the cuts we add together
        transBlock.cuts = Constraint(Any)

        # get bigM and chull relaxations
        bigMRelaxation = TransformationFactory('gdp.bigm')
        chullRelaxation = TransformationFactory('gdp.chull')
        relaxIntegrality = TransformationFactory('core.relax_integrality')

        # HACK: for the current writers, we need to also apply gdp.reclassify so
        # that the indicator variables stay where they are in the big M model
        # (since that is what we are eventually going to solve after we add our
        # cuts).
        reclassify = TransformationFactory('gdp.reclassify')

        #
        # Generalte the CHull relaxation (used for the separation
        # problem to generate cutting planes
        #
        instance_rCHull = chullRelaxation.create_using(instance)
        # This relies on relaxIntegrality relaxing variables on deactivated
        # blocks, which should be fine.
        reclassify.apply_to(instance_rCHull)
        relaxIntegrality.apply_to(instance_rCHull)

        #
        # Reformulate the instance using the BigM relaxation (this will
        # be the final instance returned to the user)
        #
        bigMRelaxation.apply_to(instance, bigM=bigM)
        reclassify.apply_to(instance)

        #
        # Generate the continuous relaxation of the BigM transformation
        #
        instance_rBigM = relaxIntegrality.create_using(instance)

        #
        # Add the xstar parameter for the CHull problem
        #
        transBlock_rCHull = instance_rCHull.component(transBlockName)
        #
        # this will hold the solution to rbigm each time we solve it. We
        # add it to the transformation block so that we don't have to
        # worry about name conflicts.
        transBlock_rCHull.xstar = Param(range(len(transBlock.all_vars)),
                                        mutable=True,
                                        default=None)

        transBlock_rBigM = instance_rBigM.component(transBlockName)

        #
        # Generate the mapping between the variables on all the
        # instances and the xstar parameter
        #
        var_info = tuple(
            (v, transBlock_rBigM.all_vars[i], transBlock_rCHull.all_vars[i],
             transBlock_rCHull.xstar[i])
            for i, v in enumerate(transBlock.all_vars))

        #
        # Add the separation objective to the chull subproblem
        #
        self._add_separation_objective(var_info, transBlock_rCHull)

        return instance_rBigM, instance_rCHull, var_info, transBlockName

    def _generate_cuttingplanes(self, instance, instance_rBigM,
                                instance_rCHull, var_info, transBlockName):

        opt = SolverFactory(SOLVER)

        improving = True
        iteration = 0
        prev_obj = float("inf")
        epsilon = 0.01

        transBlock = instance.component(transBlockName)
        transBlock_rBigM = instance_rBigM.component(transBlockName)

        # We try to grab the first active objective. If there is more
        # than one, the writer will yell when we try to solve below. If
        # there are 0, we will yell here.
        rBigM_obj = next(
            instance_rBigM.component_data_objects(Objective, active=True),
            None)
        if rBigM_obj is None:
            raise GDP_Error("Cannot apply cutting planes transformation "
                            "without an active objective in the model!")

        while (improving):
            # solve rBigM, solution is xstar
            results = opt.solve(instance_rBigM, tee=stream_solvers)
            if verify_successful_solve(results) is not NORMAL:
                logger.warning("GDP.cuttingplane: Relaxed BigM subproblem "
                               "did not solve normally. Stopping cutting "
                               "plane generation.\n\n%s" % (results, ))
                return

            rBigM_objVal = value(rBigM_obj)
            logger.warning("gdp.cuttingplane: rBigM objective = %s" %
                           (rBigM_objVal, ))

            # copy over xstar
            for x_bigm, x_rbigm, x_chull, x_star in var_info:
                x_star.value = x_rbigm.value
                # initialize the X values
                x_chull.value = x_rbigm.value

            # solve separation problem to get xhat.
            results = opt.solve(instance_rCHull, tee=stream_solvers)
            if verify_successful_solve(results) is not NORMAL:
                logger.warning("GDP.cuttingplane: CHull separation subproblem "
                               "did not solve normally. Stopping cutting "
                               "plane generation.\n\n%s" % (results, ))
                return

            self._add_cut(var_info, transBlock, transBlock_rBigM)

            # decide whether or not to keep going: check absolute difference
            # close to 0, relative difference further from 0.
            obj_diff = prev_obj - rBigM_objVal
            improving = math.isinf(obj_diff) or \
                        ( abs(obj_diff) > epsilon if abs(rBigM_objVal) < 1 else
                          abs(obj_diff/prev_obj) > epsilon )

            prev_obj = rBigM_objVal
            iteration += 1

    def _add_relaxation_block(self, instance, name):
        # creates transformation block with a unique name based on name, adds it
        # to instance, and returns it.
        transBlockName = unique_component_name(
            instance, '_pyomo_gdp_cuttingplane_relaxation')
        transBlock = Block()
        instance.add_component(transBlockName, transBlock)
        return transBlockName, transBlock

    def _add_separation_objective(self, var_info, transBlock_rCHull):
        # Deactivate any/all other objectives
        for o in transBlock_rCHull.model().component_data_objects(Objective):
            o.deactivate()

        obj_expr = 0
        for x_bigm, x_rbigm, x_chull, x_star in var_info:
            obj_expr += (x_chull - x_star)**2
        # add separation objective to transformation block
        transBlock_rCHull.separation_objective = Objective(expr=obj_expr)

    def _add_cut(self, var_info, transBlock, transBlock_rBigM):
        # add cut to BM and rBM
        cut_number = len(transBlock.cuts)
        logger.warning("gdp.cuttingplane: Adding cut %s to BM model." %
                       (cut_number, ))

        cutexpr_bigm = 0
        cutexpr_rBigM = 0
        for x_bigm, x_rbigm, x_chull, x_star in var_info:
            # xhat = x_chull.value
            cutexpr_bigm += (x_chull.value - x_star.value) * (x_bigm -
                                                              x_chull.value)
            cutexpr_rBigM += (x_chull.value - x_star.value) * (x_rbigm -
                                                               x_chull.value)

        transBlock.cuts.add(cut_number, cutexpr_bigm >= 0)
        transBlock_rBigM.cuts.add(cut_number, cutexpr_rBigM >= 0)
Ejemplo n.º 28
0
class DualTransformation(IsomorphicTransformation):
    """
    Creates a standard form Pyomo model that is equivalent to another model

    Options
        dual_constraint_suffix      Defaults to _constraint
        dual_variable_prefix        Defaults to p_
        slack_names                 Defaults to auxiliary_slack
        excess_names                Defaults to auxiliary_excess
        lb_names                    Defaults to _lower_bound
        ub_names                    Defaults to _upper_bound
        pos_suffix                  Defaults to _plus
        neg_suffix                  Defaults to _minus
    """

    alias("core.lagrangian_dual", doc="Create the LP dual model.")

    def __init__(self, **kwds):
        kwds['name'] = "linear_dual"
        super(DualTransformation, self).__init__(**kwds)

    def _create_using(self, model, **kwds):
        """
        Tranform a model to its Lagrangian dual.
        """

        # Optional naming schemes for dual variables and constraints
        constraint_suffix = kwds.pop("dual_constraint_suffix", "_constraint")
        variable_prefix = kwds.pop("dual_variable_prefix", "p_")

        # Optional naming schemes to pass to StandardForm
        sf_kwds = {}
        sf_kwds["slack_names"] = kwds.pop("slack_names", "auxiliary_slack")
        sf_kwds["excess_names"] = kwds.pop("excess_names", "auxiliary_excess")
        sf_kwds["lb_names"] = kwds.pop("lb_names", "_lower_bound")
        sf_kwds["ub_names"] = kwds.pop("ub_names", "_upper_bound")
        sf_kwds["pos_suffix"] = kwds.pop("pos_suffix", "_plus")
        sf_kwds["neg_suffix"] = kwds.pop("neg_suffix", "_minus")

        # Get the standard form model
        sf_transform = StandardForm()
        sf = sf_transform(model, **sf_kwds)

        # Roughly, parse the objectives and constraints to form A, b, and c of
        #
        # min  c'x
        # s.t. Ax  = b
        #       x >= 0
        #
        # and create a new model from them.

        # We use sparse matrix representations

        # {constraint_name: {variable_name: coefficient}}
        A = _sparse(lambda: _sparse(0))

        # {constraint_name: coefficient}
        b = _sparse(0)

        # {variable_name: coefficient}
        c = _sparse(0)

        # Walk constaints
        for (con_name, con_array) in sf.component_map(Constraint, active=True).items():
            for con in (con_array[ndx] for ndx in con_array._index):
                # The qualified constraint name
                cname = "%s%s" % (variable_prefix, con.local_name)

                # Process the body of the constraint
                body_terms = process_canonical_repn(
                    generate_standard_repn(con.body))

                # Add a numeric constant to the 'b' vector, if present
                b[cname] -= body_terms.pop(None, 0)

                # Add variable coefficients to the 'A' matrix
                row = _sparse(0)
                for (vname, coef) in body_terms.items():
                    row["%s%s" % (vname, constraint_suffix)] += coef

                # Process the upper bound of the constraint. We rely on
                # StandardForm to produce equality constraints, thus
                # requiring us only to check the lower bounds.
                lower_terms = process_canonical_repn(
                    generate_standard_repn(con.lower))

                # Add a numeric constant to the 'b' matrix, if present
                b[cname] += lower_terms.pop(None, 0)

                # Add any variables to the 'A' matrix, if present
                for (vname, coef) in lower_terms.items():
                    row["%s%s" % (vname, constraint_suffix)] -= coef

                A[cname] = row

        # Walk objectives. Multiply all coefficients by the objective's 'sense'
        # to convert maximizing objectives to minimizing ones.
        for (obj_name, obj_array) in sf.component_map(Objective, active=True).items():
            for obj in (obj_array[ndx] for ndx in obj_array._index):
                # The qualified objective name

                # Process the objective
                terms = process_canonical_repn(
                    generate_standard_repn(obj.expr))

                # Add coefficients
                for (name, coef) in terms.items():
                    c["%s%s" % (name, constraint_suffix)] += coef*obj_array.sense

        # Form the dual
        dual = AbstractModel()

        # Make constraint index set
        constraint_set_init = []
        for (var_name, var_array) in sf.component_map(Var, active=True).items():
            for var in (var_array[ndx] for ndx in var_array._index):
                constraint_set_init.append("%s%s" %
                                           (var.local_name, constraint_suffix))

        # Make variable index set
        variable_set_init = []
        dual_variable_roots = []
        for (con_name, con_array) in sf.component_map(Constraint, active=True).items():
            for con in (con_array[ndx] for ndx in con_array._index):
                dual_variable_roots.append(con.local_name)
                variable_set_init.append("%s%s" % (variable_prefix, con.local_name))

        # Create the dual Set and Var objects
        dual.var_set = Set(initialize=variable_set_init)
        dual.con_set = Set(initialize=constraint_set_init)
        dual.vars = Var(dual.var_set)

        # Make the dual constraints
        def constraintRule(A, c, ndx, model):
            return sum(A[v][ndx] * model.vars[v] for v in model.var_set) <= \
                   c[ndx]
        dual.cons = Constraint(dual.con_set,
                               rule=partial(constraintRule, A, c))

        # Make the dual objective (maximizing)
        def objectiveRule(b, model):
            return sum(b[v] * model.vars[v] for v in model.var_set)
        dual.obj = Objective(rule=partial(objectiveRule, b), sense=maximize)

        return dual.create()
Ejemplo n.º 29
0
class EqualityTransform(IsomorphicTransformation):
    """
    Creates a new, equivalent model by introducing slack and excess variables
    to eliminate inequality constraints.
    """

    alias(
        "core.add_slack_vars",
        doc=
        "Create an equivalent model by introducing slack variables to eliminate inequality constraints."
    )

    def __init__(self, **kwds):
        kwds["name"] = kwds.pop("name", "add_slack_vars")
        super(EqualityTransform, self).__init__(**kwds)

    def _create_using(self, model, **kwds):
        """
        Eliminate inequality constraints.

        Required arguments:

          model The model to transform.

        Optional keyword arguments:

          slack_root  The root name of auxiliary slack variables.
                      Default is 'auxiliary_slack'.
          excess_root The root name of auxiliary slack variables.
                      Default is 'auxiliary_excess'.
          lb_suffix   The suffix applied to converted upper bound constraints
                      Default is '_lower_bound'.
          ub_suffix   The suffix applied to converted lower bound constraints
                      Default is '_upper_bound'.
        """

        # Optional naming schemes
        slack_suffix = kwds.pop("slack_suffix", "slack")
        excess_suffix = kwds.pop("excess_suffix", "excess")
        lb_suffix = kwds.pop("lb_suffix", "lb")
        ub_suffix = kwds.pop("ub_suffix", "ub")

        equality = model.clone()
        components = collectAbstractComponents(equality)

        #
        # Fix all Constraint objects
        #
        for con_name in components["Constraint"]:
            con = equality.__getattribute__(con_name)

            #
            # Get all _ConstraintData objects
            #
            # We need to get the keys ahead of time because we are modifying
            # con._data on-the-fly.
            #
            indices = con._data.keys()
            for (ndx, cdata) in [(ndx, con._data[ndx]) for ndx in indices]:

                qualified_con_name = create_name(con_name, ndx)

                # Do nothing with equality constraints
                if cdata.equality:
                    continue

                # Add an excess variable if the lower bound exists
                if cdata.lower is not None:

                    # Make the excess variable
                    excess_name = "%s_%s" % (qualified_con_name, excess_suffix)
                    equality.__setattr__(excess_name,
                                         Var(within=NonNegativeReals))

                    # Make a new lower bound constraint
                    lb_name = "%s_%s" % (create_name("", ndx), lb_suffix)
                    excess = equality.__getattribute__(excess_name)
                    new_expr = (cdata.lower == cdata.body - excess)
                    con.add(lb_name, new_expr)

                # Add a slack variable if the lower bound exists
                if cdata.upper is not None:

                    # Make the excess variable
                    slack_name = "%s_%s" % (qualified_con_name, slack_suffix)
                    equality.__setattr__(slack_name,
                                         Var(within=NonNegativeReals))

                    # Make a new upper bound constraint
                    ub_name = "%s_%s" % (create_name("", ndx), ub_suffix)
                    slack = equality.__getattribute__(slack_name)
                    new_expr = (cdata.upper == cdata.body + slack)
                    con.add(ub_name, new_expr)

                # Since we explicitly `continue` for equality constraints, we
                # can safely remove the old _ConstraintData object
                del con._data[ndx]

        return equality.create()
Ejemplo n.º 30
0
class pyodbc_db_Table(db_Table):

    alias('pyodbc', "%s database interface" % 'pyodbc')

    _drivers = {
        'mdb': "Microsoft Access Driver (*.mdb)",
        'xls': "Microsoft Excel Driver (*.xls, *.xlsx, *.xlsm, *.xlsb)",
        'xlsx': "Microsoft Excel Driver (*.xls, *.xlsx, *.xlsm, *.xlsb)",
        'xlsm': "Microsoft Excel Driver (*.xls, *.xlsx, *.xlsm, *.xlsb)",
        'xlsb': "Microsoft Excel Driver (*.xls, *.xlsx, *.xlsm, *.xlsb)",
        'mysql': "MySQL"
    }

    def __init__(self):
        db_Table.__init__(self)
        self.using = 'pyodbc'

    def available(self):
        return pyodbc_available

    def requirements(self):
        return 'pyodbc'

    def connect(self, connection, options):
        if not options.driver is None:
            ctype = options.driver
        elif '.' in connection:
            ctype = connection.split('.')[-1]
        elif 'mysql' in connection.lower():
            ctype = 'mysql'
        else:
            ctype = ''
        extras = {}
        if ctype in [
                'xls', 'xlsx', 'xlsm', 'xlsb', 'excel'
        ] or '.xls' in connection or '.xlsx' in connection or '.xlsm' in connection or '.xlsb' in connection:
            extras['autocommit'] = True

        connection = self.create_connection_string(ctype, connection, options)
        try:
            conn = db_Table.connect(self, connection, options, extras)
        except TypeError:
            raise
        except Exception:
            e = sys.exc_info()[1]
            code = e.args[0]
            if code == 'IM002' or code == '08001':
                # Need a DSN! Try to add it to $HOME/.odbc.ini ...
                odbcIniPath = os.path.join(os.environ['HOME'], '.odbc.ini')
                if os.path.exists(odbcIniPath):
                    shutil.copy(odbcIniPath, odbcIniPath + '.orig')
                    config = ODBCConfig(filename=odbcIniPath)
                else:
                    config = ODBCConfig()

                dsninfo = self.create_dsn_dict(connection, config)
                dsnid = re.sub(
                    '[^A-Za-z0-9]', '',
                    dsninfo['Database'])  # Strip filenames of funny characters
                dsn = 'PYOMO{0}'.format(dsnid)

                config.add_source(dsn, dsninfo['Driver'])
                config.add_source_spec(dsn, dsninfo)
                config.write(odbcIniPath)

                connstr = "DRIVER={{{0}}};DSN={1}".format(
                    dsninfo['Driver'], dsn)
                conn = db_Table.connect(
                    self, connstr, options,
                    extras)  # Will raise its own exception on failure

            # Propagate the exception
            else:
                raise

        return conn

    def create_dsn_dict(self, argstr, existing_config):
        result = {}

        parts = argstr.split(';')
        argdict = {}
        for part in parts:
            if len(part) > 0 and '=' in part:
                key, val = part.split('=', 1)
                argdict[key.lower().strip()] = val.strip()

        if 'driver' in argdict:
            result['Driver'] = "{0}".format(argdict['driver']).strip("{}")

        if 'dsn' in argdict:
            if argdict['dsn'] in existing_config.source_specs:
                return existing_config.source_specs[argdict['dsn']]
            else:
                import logging
                logger = logging.getLogger("pyomo.core")
                logger.warning(
                    "DSN with name {0} not found. Attempting to continue with options..."
                    .format(argdict['dsn']))

        if 'dbq' in argdict:
            # Using a file for db access.
            if 'Driver' not in result:
                result['Driver'] = self._drivers[argdict['dbq'].split('.')
                                                 [-1].lower()]
            result['Database'] = argdict['dbq']
            result['Server'] = 'localhost'
            result['User'] = ''
            result['Password'] = ''
            result['Port'] = '5432'
            result['Description'] = argdict['dbq']
            for k in argdict.keys():
                if k.capitalize() not in result:
                    result[k.capitalize()] = argdict[k]
        else:
            if 'Driver' not in result:
                raise Exception(
                    "No driver specified, and no DBQ to infer from")
            elif result['Driver'].lower() == "mysql":
                result['Driver'] = "MySQL"
                result['Server'] = argdict.get('server', 'localhost')
                result['Database'] = argdict.get('database', '')
                result['Port'] = argdict.get('port', '3306')
                result['Socket'] = argdict.get('socket', '')
                result['Option'] = argdict.get('option', '')
                result['Stmt'] = argdict.get('stmt', '')
                result['User'] = argdict.get('user', '')
                result['Password'] = argdict.get('password', '')
                result['Description'] = argdict.get('description', '')
            else:
                raise Exception(
                    "Unknown driver type '{0}' for database connection".format(
                        result['Driver']))

        return result

    def create_connection_string(self, ctype, connection, options):
        if ctype in ['xls', 'xlsx', 'xlsm', 'xlsb', 'excel']:
            return "DRIVER={Microsoft Excel Driver (*.xls, *.xlsx, *.xlsm, *.xlsb)}; Dbq=%s;" % connection
        if ctype in ['mdb', 'access']:
            return "DRIVER={Microsoft Access Driver (*.mdb)}; Dbq=%s;" % connection
        return connection