Exemple #1
0
def test_solvers(options=None, argv=None):
    """
    The is the function executed by the command
        pyomo test-solvers [solver ...]
    """
    global rootdir
    rootdir = os.getcwd()
    if argv is None:
        if options.debug:
            if len(options.solver) == 0:
                print("Testing all solvers")
            else:
                print("Testing solver", options.solver[0])
        # Over-ride the value of sys.argv, which is used by unittest.main()
        sys.argv=['test_solver']
    else:
        sys.argv=argv
    # Create the tests defined in the YAML configuration file
    autotest_options = Options()
    autotest_options.testname_format = "%s_TEST_%s"
    pyutilib.autotest.create_test_suites(filename=currdir+'test_solvers.yml', _globals=globals(), options=autotest_options)
    # Execute the tests, using a custom test runner
    runner = SolverTestRunner()
    runner.options = options
    unittest.main(module=globals()['__name__'], testRunner=runner)
Exemple #2
0
def help_environment():
    cmddir = os.path.dirname(os.path.abspath(sys.executable))+os.sep
    info = Options()
    #
    info.python = Options()
    info.python.version = '%d.%d.%d' % sys.version_info[:3]
    info.python.executable = sys.executable
    info.python.platform = sys.platform
    try:
        packages = []
        import pip
        for package in pip.get_installed_distributions():
            packages.append( Options(name=package.project_name, version=package.version) )
        info.python.packages = packages
    except:
        pass
    #
    info.environment = Options()
    path = os.environ.get('PATH', None)
    if not path is None:
        info.environment['shell path'] = path.split(os.pathsep)
    info.environment['python path'] = sys.path
    #
    print('#')
    print('# Information About the Python and Shell Environment')
    print('#')
    print(str(info))
Exemple #3
0
def create_test_suites(filename=None, config=None, _globals=None, options=None):
    if options is None:  #pragma:nocover
        options = Options()
    #
    # Add categories specified by the PYUTILIB_AUTOTEST_CATEGORIES
    # or PYUTILIB_UNITTEST_CATEGORIES environments
    #
    if options is None or options.categories is None or len(
            options.categories) == 0:
        options.categories = set()
        if 'PYUTILIB_AUTOTEST_CATEGORIES' in os.environ:
            for cat in re.split(',',
                                os.environ['PYUTILIB_AUTOTEST_CATEGORIES']):
                if cat != '':
                    options.categories.add(cat.strip())
        elif 'PYUTILIB_UNITTEST_CATEGORIES' in os.environ:
            for cat in re.split(',',
                                os.environ['PYUTILIB_UNITTEST_CATEGORIES']):
                if cat != '':
                    options.categories.add(cat.strip())
    #
    if not filename is None:
        if options.currdir is None:
            options.currdir = dirname(abspath(filename)) + os.sep
        #
        ep = ExtensionPoint(plugins.ITestParser)
        ftype = os.path.splitext(filename)[1]
        if not ftype == '':
            ftype = ftype[1:]
        service = ep.service(ftype)
        if service is None:
            raise IOError(
                "Unknown file type.  Cannot load test configuration from file '%s'"
                % filename)
        config = service.load_test_config(filename)
    #service.print_test_config(config)
    validate_test_config(config)
    #
    # Evaluate Python expressions
    #
    for item in config.get('python', []):
        try:
            exec(item, _globals)
        except Exception:
            err = sys.exc_info()[1]
            print("ERROR executing '%s'" % item)
            print("  Exception: %s" % str(err))
    #
    # Create test driver, which is put in the global namespace
    #
    driver = plugins.TestDriverFactory(config['driver'])
    if driver is None:
        raise IOError("Unexpected test driver '%s'" % config['driver'])
    _globals["test_driver"] = driver
    #
    # Generate suite
    #
    for suite in config.get('suites', {}):
        create_test_suite(suite, config, _globals, options)
Exemple #4
0
class PyomoDataCommands(Plugin):

    alias("dat", "Pyomo data command file interface")

    implements(IDataManager, service=False)

    def __init__(self):
        self._info = []
        self.options = Options()

    def available(self):
        return True

    def initialize(self, **kwds):
        self.filename = kwds.pop("filename")
        self.add_options(**kwds)

    def add_options(self, **kwds):
        self.options.update(kwds)

    def open(self):
        if self.filename is None:  # pragma:nocover
            raise IOError("No filename specified")
        if not os.path.exists(self.filename):  # pragma:nocover
            raise IOError("Cannot find file '%s'" % self.filename)

    def close(self):
        pass

    def read(self):
        """
        This function does nothing, since executing Pyomo data commands
        both reads and processes the data all at once.
        """
        pass

    def write(self, data):  # pragma:nocover
        """
        This function does nothing, because we cannot write to a *.dat file.
        """
        pass

    def process(self, model, data, default):
        """
        Read Pyomo data commands and process the data.
        """
        _process_include(["include", self.filename], model, data, default, self.options)

    def clear(self):
        self._info = []
Exemple #5
0
 def __init__(self):
     """
     Constructor
     """
     self._info=None
     self._data=None
     self.options = Options()
     self.options.ncolumns = 1
Exemple #6
0
def configure_loggers(options=None, reset=False):
    if reset:
        options = Options()
        options.runtime = Options()
        options.runtime.logging = 'quiet'
        logging.getLogger('pyomo.core').handlers = []
        logging.getLogger('pyomo').handlers = []
        logging.getLogger('pyutilib').handlers = []
    #
    # Configure the logger
    #
    if options.runtime is None:
        options.runtime = Options()
    if options.runtime.logging == 'quiet':
        logging.getLogger('pyomo.opt').setLevel(logging.ERROR)
        logging.getLogger('pyomo.core').setLevel(logging.ERROR)
        logging.getLogger('pyomo').setLevel(logging.ERROR)
        logging.getLogger('pyutilib').setLevel(logging.ERROR)
    elif options.runtime.logging == 'warning':
        logging.getLogger('pyomo.opt').setLevel(logging.WARNING)
        logging.getLogger('pyomo.core').setLevel(logging.WARNING)
        logging.getLogger('pyomo').setLevel(logging.WARNING)
        logging.getLogger('pyutilib').setLevel(logging.WARNING)
    elif options.runtime.logging == 'info':
        logging.getLogger('pyomo.opt').setLevel(logging.INFO)
        logging.getLogger('pyomo.core').setLevel(logging.INFO)
        logging.getLogger('pyomo').setLevel(logging.INFO)
        logging.getLogger('pyutilib').setLevel(logging.INFO)
    elif options.runtime.logging == 'verbose':
        logger.setLevel(logging.DEBUG)
        logging.getLogger('pyomo').setLevel(logging.DEBUG)
        logging.getLogger('pyutilib').setLevel(logging.DEBUG)
    elif options.runtime.logging == 'debug':
        logging.getLogger('pyomo.opt').setLevel(logging.DEBUG)
        logging.getLogger('pyomo.core').setLevel(logging.DEBUG)
        logging.getLogger('pyomo').setLevel(logging.DEBUG)
        logging.getLogger('pyutilib').setLevel(logging.DEBUG)
    if options.runtime.logfile:
        logging.getLogger('pyomo.opt').handlers = []
        logging.getLogger('pyomo.core').handlers = []
        logging.getLogger('pyomo').handlers = []
        logging.getLogger('pyutilib').handlers = []
        logging.getLogger('pyomo.core').addHandler( logging.FileHandler(options.runtime.logfile, 'w'))
        logging.getLogger('pyomo').addHandler( logging.FileHandler(options.runtime.logfile, 'w'))
        logging.getLogger('pyutilib').addHandler( logging.FileHandler(options.runtime.logfile, 'w'))
Exemple #7
0
 def setUp(self, testcase, options):
     global tmpdir
     tmpdir = os.getcwd()
     os.chdir(options.currdir)
     pyutilib.services.TempfileManager.push()
     pyutilib.services.TempfileManager.sequential_files(0)
     pyutilib.services.TempfileManager.tempdir = options.currdir
     #
     if ':' in options.solver:
         solver, sub_solver = options.solver.split(':')
         if options.solver_options is None:
             _options = Options()
         else:
             _options = options.solver_options
         _options.solver = sub_solver
         testcase.opt = pyomo.opt.SolverFactory(solver, options=_options)
     else:
         testcase.opt = pyomo.opt.SolverFactory(options.solver, options=options.solver_options)
     if testcase.opt is None or not testcase.opt.available(False):
         testcase.skipTest('Solver %s is not available' % options.solver)
Exemple #8
0
def initialize(**kwds):
    obj = Options(**kwds)
    #
    # Set obj.available
    #
    opt = None
    try:
        opt = SolverFactory(obj.name, solver_io=obj.io)
    except:
        pass
    if opt is None or isinstance(opt, UnknownSolver):
        obj.available = False
    elif (obj.name == "gurobi") and (not GUROBISHELL.license_is_valid()):
        obj.available = False
    elif (obj.name == "baron") and (not BARONSHELL.license_is_valid()):
        obj.available = False
    else:
        obj.available = (opt.available(exception_flag=False)) and (
            (not hasattr(opt, "executable")) or (opt.executable() is not None)
        )
    #
    # Check capabilities
    #
    if obj.available:
        for _c in obj.capabilities:
            if not _c in opt._capabilities:
                raise ValueError("Solver %s does not support capability %s!" % (obj.name, _c))
        #
        # Get version
        #
        obj.version = opt.version()
    return obj
Exemple #9
0
    def test_t1(self):
        # Run a simple model
        model = ConcreteModel()
        model.A = RangeSet(1,4)
        model.x = Var(model.A, bounds=(-1,1))
        def obj_rule(model):
            return sum_product(model.x)
        model.obj = Objective(rule=obj_rule)
        def c_rule(model):
            expr = 0
            for i in model.A:
                expr += i*model.x[i]
            return expr == 0
        model.c = Constraint(rule=c_rule)

        #
        data = Options()
        data.suffixes = {}
        data.solver_options = {}
        data.warmstart_filename = None
        data.filename = currdir+'t1.lp'
        model.write(data['filename'])
        INPUT = open(data['filename'],'r')
        data['file'] = INPUT.read()
        INPUT.close()
        data['opt'] = 'glpk'
        data.kwds = {}
        #
        results = self.worker.process(data)

        # Decode, evaluate and unpickle results
        if using_pyro4:
            # These two conversions are in place to unwrap
            # the hacks placed in the pyro_mip_server
            # before transmitting the results
            # object. These hacks are put in place to
            # avoid errors when transmitting the pickled
            # form of the results object with the default Pyro4
            # serializer (Serpent)
            if six.PY3:
                results = base64.decodebytes(
                    ast.literal_eval(results))
            else:
                results = base64.decodestring(results)

        results = pickle.loads(results)

        #
        results.write(filename=currdir+"t1.out", format='json')
        self.assertMatchesJsonBaseline(currdir+"t1.out",currdir+"t1.txt", tolerance=1e-4)
        self.assertEqual(results._smap_id, None)
        os.remove(data['filename'])
Exemple #10
0
    def __init__(self, **kwds):
        configure_gurobi_direct()
        #
        # Call base class constructor
        #
        kwds['type'] = 'gurobi_direct'
        OptSolver.__init__(self, **kwds)

        self._model = None

        # a dictionary that maps pyomo _VarData labels to the
        # corresponding Gurobi variable object. created each time
        # _populate_gurobi_instance is called.
        self._pyomo_gurobi_variable_map = None

        # this interface doesn't use files, but we can create a log
        # file is requested
        self._keepfiles = False
        # do we warmstart
        self._warm_start_solve = False
        # io_options
        self._symbolic_solver_labels = False
        self._output_fixed_variable_bounds = False
        self._skip_trivial_constraint = False

        # Note: Undefined capabilities default to 'None'
        self._capabilities = Options()
        self._capabilities.linear = True
        self._capabilities.quadratic_objective = True
        self._capabilities.quadratic_constraint = True
        self._capabilities.integer = True
        self._capabilities.sos1 = True
        self._capabilities.sos2 = True

        # collection of id(_VarData).
        self._referenced_variable_ids = set()
Exemple #11
0
def test_scenarios(arg=None):
    """
    Generate scenarios
    """
    for model in sorted(test_models()):
        _model = test_models(model)
        if not arg is None and not arg(_model):
            continue
        for solver, io in sorted(test_solver_cases()):
            _solver_case = test_solver_cases(solver, io)

            # Skip this test case if the solver doesn't support the
            # capabilities required by the model
            if not _model.capabilities.issubset(_solver_case.capabilities):
                continue

            # Set status values for expected failures
            status = 'ok'
            msg = ""
            if not _solver_case.available:
                status = 'skip'
                msg = "Skipping test because solver %s (%s) is unavailable" % (
                    solver, io)
            if (solver, io, _model.description) in ExpectedFailures:
                case = ExpectedFailures[solver, io, _model.description]
                if _solver_case.version is not None and\
                   case[0](_solver_case.version):
                    status = 'expected failure'
                    msg = case[1]

            # Return scenario dimensions and scenario information
            yield (model, solver, io), Options(status=status,
                                               msg=msg,
                                               model=_model,
                                               solver=None,
                                               testcase=_solver_case)
Exemple #12
0
    def __init__(self, **kwds):
        configure_glpk()
        #
        # Call base constructor
        #
        kwds['type'] = 'glpk'
        SystemCallSolver.__init__(self, **kwds)
        #
        # Valid problem formats, and valid results for each format
        #
        self._valid_problem_formats = [
            ProblemFormat.mod, ProblemFormat.cpxlp, ProblemFormat.mps
        ]
        self._valid_result_formats = {
            ProblemFormat.mod: ResultsFormat.soln,
            ProblemFormat.cpxlp: ResultsFormat.soln,
            ProblemFormat.mps: ResultsFormat.soln,
        }
        self.set_problem_format(ProblemFormat.cpxlp)

        # Note: Undefined capabilities default to 'None'
        self._capabilities = Options()
        self._capabilities.linear = True
        self._capabilities.integer = True
Exemple #13
0
def configure_loggers(options=None, shutdown=False):
    if shutdown:
        options = Options()
        options.runtime = Options()
        options.runtime.logging = 'quiet'
        if configure_loggers.fileLogger is not None:
            logging.getLogger('pyomo').handlers = []
            logging.getLogger('pyutilib').handlers = []
            configure_loggers.fileLogger.close()
            configure_loggers.fileLogger = None
            # TBD: This seems dangerous in Windows, as the process will
            # have multiple open file handles pointint to the same file.
            pyutilib.misc.reset_redirect()

    #
    # Configure the logger
    #
    if options.runtime is None:
        options.runtime = Options()
    if options.runtime.logging == 'quiet':
        logging.getLogger('pyomo').setLevel(logging.ERROR)
    elif options.runtime.logging == 'warning':
        logging.getLogger('pyomo').setLevel(logging.WARNING)
    elif options.runtime.logging == 'info':
        logging.getLogger('pyomo').setLevel(logging.INFO)
        logging.getLogger('pyutilib').setLevel(logging.INFO)
    elif options.runtime.logging == 'verbose':
        logging.getLogger('pyomo').setLevel(logging.DEBUG)
        logging.getLogger('pyutilib').setLevel(logging.DEBUG)
    elif options.runtime.logging == 'debug':
        logging.getLogger('pyomo').setLevel(logging.DEBUG)
        logging.getLogger('pyutilib').setLevel(logging.DEBUG)

    if options.runtime.logfile:
        configure_loggers.fileLogger \
            = logging.FileHandler(options.runtime.logfile, 'w')
        logging.getLogger('pyomo').handlers = []
        logging.getLogger('pyutilib').handlers = []
        logging.getLogger('pyomo').addHandler(configure_loggers.fileLogger)
        logging.getLogger('pyutilib').addHandler(configure_loggers.fileLogger)
        # TBD: This seems dangerous in Windows, as the process will
        # have multiple open file handles pointint to the same file.
        pyutilib.misc.setup_redirect(options.runtime.logfile)
Exemple #14
0
def initialize(**kwds):
    obj = Options(**kwds)
    #
    # Set obj.available
    #
    opt = None
    try:
        opt = SolverFactory(obj.name, solver_io=obj.io)
    except:
        pass
    if opt is None or isinstance(opt, UnknownSolver):
        obj.available = False
    elif (obj.name == "gurobi") and \
       (not GUROBISHELL.license_is_valid()):
        obj.available = False
    elif (obj.name == "baron") and \
       (not BARONSHELL.license_is_valid()):
        obj.available = False
    else:
        obj.available = \
            (opt.available(exception_flag=False)) and \
            ((not hasattr(opt,'executable')) or \
            (opt.executable() is not None))
    #
    # Check capabilities, even if the solver is not available
    #
    if not (opt is None or isinstance(opt, UnknownSolver)):
        for _c in obj.capabilities:
            if not _c in opt._capabilities:
                raise ValueError("Solver %s does not support capability %s!" %
                                 (obj.name, _c))
    #
    # Get version
    #
    if obj.available:
        obj.version = opt.version()
    return obj
Exemple #15
0
class TableData(object):
    """
    A class used to read/write data from/to a table in an external
    data source.
    """

    def __init__(self):
        """
        Constructor
        """
        self._info=None
        self._data=None
        self.options = Options()
        self.options.ncolumns = 1

    def available(self):
        """
        Returns:
            Return :const:`True` if the data manager is available.
        """
        return True

    def initialize(self, **kwds):
        """
        Initialize the data manager with keyword arguments.

        The `filename` argument is recognized here, and other arguments
        are passed to the :func:`add_options` method.
        """
        self.filename = kwds.pop('filename')
        self.add_options(**kwds)

    def add_options(self, **kwds):
        """
        Add the keyword options to the :class:`Options` object in this
        object.
        """
        self.options.update(kwds)

    def open(self):                        #pragma:nocover
        """
        Open the data manager.
        """
        pass

    def read(self):                         #pragma:nocover
        """
        Read data from the data manager.
        """
        return False

    def write(self, data):                  #pragma:nocover
        """
        Write data to the data manager.
        """
        return False

    def close(self):                        #pragma:nocover
        """
        Close the data manager.
        """
        pass

    def process(self, model, data, default):
        """
        Process the data that was extracted from this data manager and
        return it.
        """
        if model is None:
            model = self.options.model
        if not self.options.namespace in data:
            data[self.options.namespace] = {}
        return _process_data(
          self._info,
          model,
          data[self.options.namespace],
          default,
          self.filename,
          index=self.options.index,
          set=self.options.set,
          param=self.options.param,
          ncolumns = self.options.ncolumns)

    def clear(self):
        """
        Clear the data that was extracted from this table
        """
        self._info = None

    def _set_data(self, headers, rows):
        from pyomo.core.base.sets import Set
        from pyomo.core.base.param import Param

        header_index = []
        if self.options.select is None:
            for i in xrange(len(headers)):
                header_index.append(i)
        else:
            for i in self.options.select:
                try:
                    header_index.append(headers.index(str(i)))
                except:
                    print("Model declaration '%s' not found in returned query columns" %str(i))
                    raise
        self.options.ncolumns = len(headers)

        if not self.options.param is None:
            if not type(self.options.param) in (list, tuple):
                self.options.param = (self.options.param,)
            _params = []
            for p in self.options.param:
                if isinstance(p, Param):
                    self.options.model = p.model()
                    _params.append(p.local_name)
                else:
                    _params.append(p)
            self.options.param = tuple(_params)

        if isinstance(self.options.set, Set):
            self.options.model = self.options.set.model()
            self.options.set = self.options.set.local_name

        if isinstance(self.options.index, Set):
            self.options.model = self.options.index.model()
            self.options.index = self.options.index.local_name

        elif type(self.options.index) in [tuple, list]:
            tmp = []
            for val in self.options.index:
                if isinstance(val, Set):
                    tmp.append(val.local_name)
                    self.options.model = val.model()
                else:
                    tmp.append(val)
            self.options.index = tuple(tmp)

        if self.options.format is None:
            if not self.options.set is None:
                self.options.format = 'set'
            elif not self.options.param is None:
                self.options.format = 'table'
            if self.options.format is None:
                raise ValueError("Unspecified format and  data option")
        elif self.options.set is None and self.options.param is None:
            msg = "Must specify the set or parameter option for data"
            raise IOError(msg)

        if self.options.format == 'set':
            if not self.options.index is None:
                msg = "Cannot specify index for data with the 'set' format: %s"
                raise IOError(msg % str(self.options.index))

            self._info = ["set",self.options.set,":="]
            for row in rows:
                if self.options.ncolumns > 1:
                    self._info.append(tuple(row))
                else:
                    self._info.extend(row)

        elif self.options.format == 'set_array':
            if not self.options.index is None:
                msg = "Cannot specify index for data with the 'set_array' "   \
                      'format: %s'
                raise IOError(msg % str(self.options.index))

            self._info = ["set",self.options.set, ":"]
            self._info.extend(headers[1:])
            self._info.append(":=")
            for row in rows:
                self._info.extend(row)

        elif self.options.format == 'transposed_array':
            self._info = ["param",self.options.param[0],"(tr)",":"]
            self._info.extend(headers[1:])
            self._info.append(":=")
            for row in rows:
                self._info.extend(row)

        elif self.options.format == 'array':
            self._info = ["param",self.options.param[0],":"]
            self._info.extend(headers[1:])
            self._info.append(":=")
            for row in rows:
                self._info.extend(row)

        elif self.options.format == 'table':
            if self.options.index is not None:
                self._info = ["param",":",self.options.index,":"]
            else:
                self._info = ["param",":"]
            for param in self.options.param:
                self._info.append(param)
            self._info.append(":=")
            for row in rows:
                for i in header_index:
                    self._info.append(row[i])
            self.options.ncolumns = len(header_index)
        else:
            msg = "Unknown parameter format: '%s'"
            raise ValueError(msg % self.options.format)

    def _get_table(self):
        from pyomo.core.expr import value

        tmp = []
        if not self.options.columns is None:
            tmp.append(self.options.columns)
        if not self.options.set is None:
            # Create column names
            if self.options.columns is None:
                cols = []
                for i in xrange(self.options.set.dimen):
                    cols.append(self.options.set.local_name+str(i))
                tmp.append(cols)
            # Get rows
            if not self.options.sort is None:
                for data in sorted(self.options.set):
                    if self.options.set.dimen > 1:
                        tmp.append(list(data))
                    else:
                        tmp.append([data])
            else:
                for data in self.options.set:
                    if self.options.set.dimen > 1:
                        tmp.append(list(data))
                    else:
                        tmp.append([data])
        elif not self.options.param is None:
            if type(self.options.param) in (list,tuple):
                _param = self.options.param
            else:
                _param = [self.options.param]
            tmp = []
            # Collect data
            for index in _param[0]:
                if index is None:
                    row = []
                elif type(index) in (list,tuple):
                    row = list(index)
                else:
                    row = [index]
                for param in _param:
                    row.append(value(param[index]))
                tmp.append(row)
            # Create column names
            if self.options.columns is None:
                cols = []
                for i in xrange(len(tmp[0])-len(_param)):
                    cols.append('I'+str(i))
                for param in _param:
                    cols.append(param)
                tmp = [cols] + tmp
        return tmp
Exemple #16
0
class YamlDictionary(object):

    def __init__(self):
        self._info = {}
        self.options = Options()

    def available(self):
        return yaml_available

    def requirements(self):
        return "pyyaml"

    def initialize(self, **kwds):
        self.filename = kwds.pop('filename')
        self.add_options(**kwds)

    def add_options(self, **kwds):
        self.options.update(kwds)

    def open(self):
        if self.filename is None:
            raise IOError("No filename specified")

    def close(self):
        pass

    def read(self):
        """
        This function loads data from a YAML file and tuplizes the nested
        dictionaries and lists of lists.
        """
        if not os.path.exists(self.filename):
            raise IOError("Cannot find file '%s'" % self.filename)
        INPUT = open(self.filename, 'r')
        jdata = yaml.load(INPUT)
        INPUT.close()
        if jdata is None:
            raise IOError("Empty YAML file")
        self._info = {}
        for k,v in jdata.items():
            self._info[k] = tuplize(v)

    def write(self, data):
        """
        This function creates a YAML file for the specified data.
        """
        with open(self.filename, 'w') as OUTPUT:
            jdata = {}
            if self.options.data is None:
                for k,v in data.items():
                    jdata[k] = detuplize(v)
            elif type(self.options.data) in (list, tuple):
                for k in self.options.data:
                    jdata[k] = detuplize(data[k], sort=self.options.sort)
            else:
                k = self.options.data
                jdata[k] = detuplize(data[k])
            yaml.dump(jdata, OUTPUT)

    def process(self, model, data, default):
        """
        Set the data for the selected components
        """
        if not self.options.namespace in data:
            data[self.options.namespace] = {}
        #
        try:
            if self.options.data is None:
                for key in self._info:
                    self._set_data(data, self.options.namespace, key, self._info[key])
            elif type(self.options.data) in (list, tuple):
                for key in self.options.data:
                    self._set_data(data, self.options.namespace, key, self._info[key])
            else:
                key = self.options.data
                self._set_data(data, self.options.namespace, key, self._info[key])
        except KeyError:
            raise IOError("Data value for '%s' is not available in YAML file '%s'" % (key, self.filename))

    def _set_data(self, data, namespace, name, value):
        if type(value) is dict:
            data[namespace][name] = value
        else:
            data[namespace][name] = {None: value}

    def clear(self):
        self._info = {}
Exemple #17
0
def run_test_scenarios(options):
    logging.disable(logging.WARNING)

    solvers = set(options.solver)
    stat = {}

    for key, test_case in test_scenarios():
        model, solver, io = key
        if len(solvers) > 0 and not solver in solvers:
            continue
        if test_case.status == 'skip':
            continue

        # Create the model test class
        model_class = test_case.model()
        # Create the model instance
        model_class.generate_model()
        model_class.warmstart_model()
        # Solve
        symbolic_labels = False
        load_solutions = False
        opt, results = model_class.solve(solver, io,
                                         test_case.testcase.io_options, {},
                                         symbolic_labels, load_solutions)

        termination_condition = results['Solver'][0]['termination condition']
        # Validate solution status
        try:
            model_class.post_solve_test_validation(None, results)
        except:
            if test_case.status == 'expected failure':
                stat[key] = (True, "Expected failure")
            else:
                stat[key] = (False, "Unexpected termination condition: %s" %
                             str(termination_condition))
            continue
        if termination_condition == TerminationCondition.unbounded or \
           termination_condition == TerminationCondition.infeasible:
            # Unbounded or Infeasible
            stat[key] = (True, "")
        else:
            # Validate the solution returned by the solver
            if isinstance(model_class.model, IBlock):
                model_class.model.load_solution(results.solution)
            else:
                model_class.model.solutions.load_from(
                    results,
                    default_variable_value=opt.default_variable_value())
            rc = model_class.validate_current_solution(
                suffixes=model_class.test_suffixes)

            if test_case.status == 'expected failure':
                if rc[0] is True:
                    stat[key] = (False, "Unexpected success")
                else:
                    stat[key] = (True, "Expected failure")
            else:
                if rc[0] is True:
                    stat[key] = (True, "")
                else:
                    stat[key] = (False, "Unexpected failure")

    if options.verbose:
        print("---------------")
        print(" Test Failures")
        print("---------------")
    nfail = 0
    #
    # Summarize the runtime statistics, by solver
    #
    summary = {}
    total = Options(NumEPass=0, NumEFail=0, NumUPass=0, NumUFail=0)
    for key in stat:
        model, solver, io = key
        if not solver in summary:
            summary[solver] = Options(NumEPass=0,
                                      NumEFail=0,
                                      NumUPass=0,
                                      NumUFail=0)
        _pass, _str = stat[key]
        if _pass:
            if _str == "Expected failure":
                summary[solver].NumEFail += 1
            else:
                summary[solver].NumEPass += 1
        else:
            nfail += 1
            if _str == "Unexpected failure":
                summary[solver].NumUFail += 1
                if options.verbose:
                    print("- Unexpected Test Failure: " +
                          ", ".join((model, solver, io)))
            else:
                summary[solver].NumUPass += 1
                if options.verbose:
                    print("- Unexpected Test Success: " +
                          ", ".join((model, solver, io)))
    if options.verbose:
        if nfail == 0:
            print("- NONE")
        print("")

    stream = sys.stdout
    maxSolverNameLen = max([max(len(name) for name in summary), len("Solver")])
    fmtStr = "{{0:<{0}}}| {{1:>8}} | {{2:>8}} | {{3:>10}} | {{4:>10}} | {{5:>13}}\n".format(
        maxSolverNameLen + 2)
    #
    stream.write("\n")
    stream.write("Solver Test Summary\n")
    stream.write("=" * (maxSolverNameLen + 66) + "\n")
    stream.write(
        fmtStr.format("Solver", "# Pass", "# Fail", "# OK Fail", "# Bad Pass",
                      "% OK"))
    stream.write("=" * (maxSolverNameLen + 66) + "\n")
    #
    for _solver in sorted(summary):
        ans = summary[_solver]
        total.NumEPass += ans.NumEPass
        total.NumEFail += ans.NumEFail
        total.NumUPass += ans.NumUPass
        total.NumUFail += ans.NumUFail
        stream.write(
            fmtStr.format(
                _solver, str(ans.NumEPass), str(ans.NumUFail),
                str(ans.NumEFail), str(ans.NumUPass),
                str(
                    int(100.0 * (ans.NumEPass + ans.NumEFail) /
                        (ans.NumEPass + ans.NumEFail + ans.NumUFail +
                         ans.NumUPass)))))
    #
    stream.write("=" * (maxSolverNameLen + 66) + "\n")
    stream.write(
        fmtStr.format(
            "TOTALS", str(total.NumEPass), str(total.NumUFail),
            str(total.NumEFail), str(total.NumUPass),
            str(
                int(100.0 * (total.NumEPass + total.NumEFail) /
                    (total.NumEPass + total.NumEFail + total.NumUFail +
                     total.NumUPass)))))
    stream.write("=" * (maxSolverNameLen + 66) + "\n")

    logging.disable(logging.NOTSET)
Exemple #18
0
class JSONDictionary(Plugin):

    alias("json", "JSON file interface")

    implements(IDataManager, service=False)

    def __init__(self):
        self._info = {}
        self.options = Options()

    def available(self):
        return True

    def initialize(self, **kwds):
        self.filename = kwds.pop('filename')
        self.add_options(**kwds)

    def add_options(self, **kwds):
        self.options.update(kwds)

    def open(self):
        if self.filename is None:
            raise IOError("No filename specified")

    def close(self):
        pass

    def read(self):
        """
        This function loads data from a JSON file and tuplizes the nested
        dictionaries and lists of lists.
        """
        if not os.path.exists(self.filename):
            raise IOError("Cannot find file '%s'" % self.filename)
        INPUT = open(self.filename, 'r')
        jdata = json.load(INPUT)
        INPUT.close()
        if jdata is None or len(jdata) == 0:
            raise IOError("Empty JSON data file")
        self._info = {}
        for k,v in jdata.items():
            self._info[k] = tuplize(v)

    def write(self, data):
        """
        This function creates a JSON file for the specified data.
        """
        with open(self.filename, 'w') as OUTPUT:
            jdata = {}
            if self.options.data is None:
                for k,v in data.items():
                    jdata[k] = detuplize(v)
            elif type(self.options.data) in (list, tuple):
                for k in self.options.data:
                    jdata[k] = detuplize(data[k])
            else:
                k = self.options.data
                jdata[k] = detuplize(data[k])
            json.dump(jdata, OUTPUT)

    def process(self, model, data, default):
        """
        Set the data for the selected components
        """
        if not self.options.namespace in data:
            data[self.options.namespace] = {}
        #
        try:
            if self.options.data is None:
                for key in self._info:
                    self._set_data(data, self.options.namespace, key, self._info[key])
            elif type(self.options.data) in (list, tuple):
                for key in self.options.data:
                    self._set_data(data, self.options.namespace, key, self._info[key])
            else:
                key = self.options.data
                self._set_data(data, self.options.namespace, key, self._info[key])
        except KeyError:
            raise IOError("Data value for '%s' is not available in JSON file '%s'" % (key, self.filename))

    def _set_data(self, data, namespace, name, value):
        if type(value) is dict:
            data[namespace][name] = value
        else:
            data[namespace][name] = {None: value}

    def clear(self):
        self._info = {}
Exemple #19
0
class TableData(object):
    """
    A class used to read/write data from/to a table in an external
    data source.
    """
    def __init__(self):
        """
        Constructor
        """
        self._info = None
        self._data = None
        self.options = Options()
        self.options.ncolumns = 1

    def available(self):
        """
        Returns:
            Return :const:`True` if the data manager is available.
        """
        return True

    def initialize(self, **kwds):
        """
        Initialize the data manager with keyword arguments.

        The `filename` argument is recognized here, and other arguments
        are passed to the :func:`add_options` method.
        """
        self.filename = kwds.pop('filename')
        self.add_options(**kwds)

    def add_options(self, **kwds):
        """
        Add the keyword options to the :class:`Options` object in this
        object.
        """
        self.options.update(kwds)

    def open(self):  #pragma:nocover
        """
        Open the data manager.
        """
        pass

    def read(self):  #pragma:nocover
        """
        Read data from the data manager.
        """
        return False

    def write(self, data):  #pragma:nocover
        """
        Write data to the data manager.
        """
        return False

    def close(self):  #pragma:nocover
        """
        Close the data manager.
        """
        pass

    def process(self, model, data, default):
        """
        Process the data that was extracted from this data manager and
        return it.
        """
        if model is None:
            model = self.options.model
        if not self.options.namespace in data:
            data[self.options.namespace] = {}
        return _process_data(self._info,
                             model,
                             data[self.options.namespace],
                             default,
                             self.filename,
                             index=self.options.index,
                             set=self.options.set,
                             param=self.options.param,
                             ncolumns=self.options.ncolumns)

    def clear(self):
        """
        Clear the data that was extracted from this table
        """
        self._info = None

    def _set_data(self, headers, rows):
        from pyomo.core.base.sets import Set
        from pyomo.core.base.param import Param

        header_index = []
        if self.options.select is None:
            for i in xrange(len(headers)):
                header_index.append(i)
        else:
            for i in self.options.select:
                try:
                    header_index.append(headers.index(str(i)))
                except:
                    print(
                        "Model declaration '%s' not found in returned query columns"
                        % str(i))
                    raise
        self.options.ncolumns = len(headers)

        if not self.options.param is None:
            if not type(self.options.param) in (list, tuple):
                self.options.param = (self.options.param, )
            _params = []
            for p in self.options.param:
                if isinstance(p, Param):
                    self.options.model = p.model()
                    _params.append(p.local_name)
                else:
                    _params.append(p)
            self.options.param = tuple(_params)

        if isinstance(self.options.set, Set):
            self.options.model = self.options.set.model()
            self.options.set = self.options.set.local_name

        if isinstance(self.options.index, Set):
            self.options.model = self.options.index.model()
            self.options.index = self.options.index.local_name

        elif type(self.options.index) in [tuple, list]:
            tmp = []
            for val in self.options.index:
                if isinstance(val, Set):
                    tmp.append(val.local_name)
                    self.options.model = val.model()
                else:
                    tmp.append(val)
            self.options.index = tuple(tmp)

        if self.options.format is None:
            if not self.options.set is None:
                self.options.format = 'set'
            elif not self.options.param is None:
                self.options.format = 'table'
            if self.options.format is None:
                raise ValueError("Unspecified format and  data option")
        elif self.options.set is None and self.options.param is None:
            msg = "Must specify the set or parameter option for data"
            raise IOError(msg)

        if self.options.format == 'set':
            if not self.options.index is None:
                msg = "Cannot specify index for data with the 'set' format: %s"
                raise IOError(msg % str(self.options.index))

            self._info = ["set", self.options.set, ":="]
            for row in rows:
                if self.options.ncolumns > 1:
                    self._info.append(tuple(row))
                else:
                    self._info.extend(row)

        elif self.options.format == 'set_array':
            if not self.options.index is None:
                msg = "Cannot specify index for data with the 'set_array' "   \
                      'format: %s'
                raise IOError(msg % str(self.options.index))

            self._info = ["set", self.options.set, ":"]
            self._info.extend(headers[1:])
            self._info.append(":=")
            for row in rows:
                self._info.extend(row)

        elif self.options.format == 'transposed_array':
            self._info = ["param", self.options.param[0], "(tr)", ":"]
            self._info.extend(headers[1:])
            self._info.append(":=")
            for row in rows:
                self._info.extend(row)

        elif self.options.format == 'array':
            self._info = ["param", self.options.param[0], ":"]
            self._info.extend(headers[1:])
            self._info.append(":=")
            for row in rows:
                self._info.extend(row)

        elif self.options.format == 'table':
            if self.options.index is not None:
                self._info = ["param", ":", self.options.index, ":"]
            else:
                self._info = ["param", ":"]
            for param in self.options.param:
                self._info.append(param)
            self._info.append(":=")
            for row in rows:
                for i in header_index:
                    self._info.append(row[i])
            self.options.ncolumns = len(header_index)
        else:
            msg = "Unknown parameter format: '%s'"
            raise ValueError(msg % self.options.format)

    def _get_table(self):
        from pyomo.core.expr import value

        tmp = []
        if not self.options.columns is None:
            tmp.append(self.options.columns)
        if not self.options.set is None:
            # Create column names
            if self.options.columns is None:
                cols = []
                for i in xrange(self.options.set.dimen):
                    cols.append(self.options.set.local_name + str(i))
                tmp.append(cols)
            # Get rows
            if not self.options.sort is None:
                for data in sorted(self.options.set):
                    if self.options.set.dimen > 1:
                        tmp.append(list(data))
                    else:
                        tmp.append([data])
            else:
                for data in self.options.set:
                    if self.options.set.dimen > 1:
                        tmp.append(list(data))
                    else:
                        tmp.append([data])
        elif not self.options.param is None:
            if type(self.options.param) in (list, tuple):
                _param = self.options.param
            else:
                _param = [self.options.param]
            tmp = []
            # Collect data
            for index in _param[0]:
                if index is None:
                    row = []
                elif type(index) in (list, tuple):
                    row = list(index)
                else:
                    row = [index]
                for param in _param:
                    row.append(value(param[index]))
                tmp.append(row)
            # Create column names
            if self.options.columns is None:
                cols = []
                for i in xrange(len(tmp[0]) - len(_param)):
                    cols.append('I' + str(i))
                for param in _param:
                    cols.append(param)
                tmp = [cols] + tmp
        return tmp
    def min_solve_solver(self, knapsack_data, active_item_list, model_data,
                         existance, output_directory, sub_problem, time,
                         max_duration, penval):

        ##Solver info
        opt = SolverFactory(self.solver)
        options = Options()
        opt.options.mip_tolerances_mipgap = self.mipgap

        ################################################################
        ### 			Generate Model (Model Specific)
        ################################################################
        if self.model_type == 'PRDP':
            ### Define Item List
            items = knapsack_data.ItemList

            ### Generate Model
            if 'greedy' in self._opts:
                from Core.Solvers.KDA.KDA_PRDP_Functions import PRDP_Min_Solve_Model_Generator_Greedy
                model = PRDP_Min_Solve_Model_Generator_Greedy(
                    knapsack_data, model_data, active_item_list, existance,
                    sub_problem, time, max_duration, self._opts, penval)
            else:
                from Core.Solvers.KDA.KDA_PRDP_Functions import PRDP_Min_Solve_Model_Generator
                model = PRDP_Min_Solve_Model_Generator(
                    knapsack_data, model_data, active_item_list, existance,
                    sub_problem, time, max_duration, self._opts, penval)

        ### Initiate Timer
        st = timer.clock()

        ################################################################
        ### 					Create Instance
        ################################################################

        for (i, j) in self.fixed_items[sub_problem]:
            model.x[i].value = j
            model.x[i].fixed = True

        model.preprocess()

        ################################################################
        ###						 Solve Model
        ################################################################
        results = opt.solve(model)

        ### Take finish Time
        ft = timer.clock()

        ################################################################
        ### 					Process Results
        ################################################################
        ### Make Output Directory
        if 'quiet' not in self._opts:
            if not os.path.exists(output_directory):
                os.makedirs(output_directory)

        ### Load Results
        model.solutions.load_from(results)

        #### Save problem information
        if 'quiet' not in self._opts:
            save_file = "IntKs_" + str(sub_problem) + "_" + str(time) + ".json"
            results.write(filename=os.path.join(output_directory, save_file))

        ### Create Performance variables for problem runtime and item management
        self.temp = ()
        self.Item_Selection = ()

        ## Store the items packed in the knapsack in temporary variable
        for i in items:
            if model.x[i].value == 1:

                ########################################################
                ### 		Store Results (Model Specific)
                ########################################################
                if self.model_type == 'PRDP':
                    from Core.Solvers.KDA.KDA_PRDP_Functions import results_processing
                    obj1, obj2 = results_processing(knapsack_data, model_data,
                                                    i, time)

                ### Items Selected
                self.Item_Selection += (obj1, )

                ### Append item
                self.temp += (obj2, )

        if time == 0:
            self.temp_dict[sub_problem] = self.temp
            self.temp_item_selection[sub_problem] = self.Item_Selection

        ## Store solve time
        self.run_time[(sub_problem, time)] = ft - st
Exemple #21
0
class PersistentSolver(DirectOrPersistentSolver):
    """
    A base class for persistent solvers. Direct solver interfaces do not use any file io.
    Rather, they interface directly with the python bindings for the specific solver. Persistent solver interfaces
    are similar except that they "remember" their model. Thus, persistent solver interfaces allow incremental changes
    to the solver model (e.g., the gurobi python model or the cplex python model). Note that users are responsible
    for notifying the persistent solver interfaces when changes are made to the corresponding pyomo model.

    Keyword Arguments
    -----------------
    type: str
        String indicating the class type of the solver instance.
    name: str
        String representing either the class type of the solver instance or an assigned name.
    doc: str
        Documentation for the solver
    options: dict
        Dictionary of solver options
    """
    def _presolve(self, **kwds):
        DirectOrPersistentSolver._presolve(self, **kwds)

    def set_instance(self, model, **kwds):
        """
        This method is used to translate the Pyomo model provided to an instance of the solver's Python model. This
        discards any existing model and starts from scratch.

        Parameters
        ----------
        model: ConcreteModel
            The pyomo model to be used with the solver.

        Keyword Arguments
        -----------------
        symbolic_solver_labels: bool
            If True, the solver's components (e.g., variables, constraints) will be given names that correspond to
            the Pyomo component names.
        skip_trivial_constraints: bool
            If True, then any constraints with a constant body will not be added to the solver model.
            Be careful with this. If a trivial constraint is skipped then that constraint cannot be removed from
            a persistent solver (an error will be raised if a user tries to remove a non-existent constraint).
        output_fixed_variable_bounds: bool
            If False then an error will be raised if a fixed variable is used in one of the solver constraints.
            This is useful for catching bugs. Ordinarily a fixed variable should appear as a constant value in the
            solver constraints. If True, then the error will not be raised.
        """
        return self._set_instance(model, kwds)

    def add_block(self, block):
        """Add a single Pyomo Block to the solver's model.

        This will keep any existing model components intact.

        Parameters
        ----------
        block: Block (scalar Block or single _BlockData)

        """
        if self._pyomo_model is None:
            raise RuntimeError(
                'You must call set_instance before calling add_block.')
        # see PR #366 for discussion about handling indexed
        # objects and keeping compatibility with the
        # pyomo.kernel objects
        #if block.is_indexed():
        #    for sub_block in block.values():
        #        self._add_block(block)
        #    return
        self._add_block(block)

    def set_objective(self, obj):
        """
        Set the solver's objective. Note that, at least for now, any existing objective will be discarded. Other than
        that, any existing model components will remain intact.

        Parameters
        ----------
        obj: Objective
        """
        if self._pyomo_model is None:
            raise RuntimeError(
                'You must call set_instance before calling set_objective.')
        return self._set_objective(obj)

    def add_constraint(self, con):
        """Add a single constraint to the solver's model.

        This will keep any existing model components intact.

        Parameters
        ----------
        con: Constraint (scalar Constraint or single _ConstraintData)

        """
        if self._pyomo_model is None:
            raise RuntimeError(
                'You must call set_instance before calling add_constraint.')
        # see PR #366 for discussion about handling indexed
        # objects and keeping compatibility with the
        # pyomo.kernel objects
        #if con.is_indexed():
        #    for child_con in con.values():
        #        self._add_constraint(child_con)
        #else:
        self._add_constraint(con)

    def add_var(self, var):
        """Add a single variable to the solver's model.

        This will keep any existing model components intact.

        Parameters
        ----------
        var: Var

        """
        if self._pyomo_model is None:
            raise RuntimeError(
                'You must call set_instance before calling add_var.')
        if id(self._pyomo_model) != id(var.model()):
            raise RuntimeError(
                'The pyomo var must be attached to the solver model')
        # see PR #366 for discussion about handling indexed
        # objects and keeping compatibility with the
        # pyomo.kernel objects
        #if var.is_indexed():
        #    for child_var in var.values():
        #        self._add_var(child_var)
        #else:
        self._add_var(var)

    def add_sos_constraint(self, con):
        """Add a single SOS constraint to the solver's model (if supported).

        This will keep any existing model components intact.

        Parameters
        ----------
        con: SOSConstraint

        """
        if self._pyomo_model is None:
            raise RuntimeError(
                'You must call set_instance before calling add_sos_constraint.'
            )
        # see PR #366 for discussion about handling indexed
        # objects and keeping compatibility with the
        # pyomo.kernel objects
        #if con.is_indexed():
        #    for child_con in con.values():
        #        self._add_sos_constraint(child_con)
        #else:
        self._add_sos_constraint(con)

    def add_column(self, model, var, obj_coef, constraints, coefficients):
        """Add a column to the solver's and Pyomo model

        This will add the Pyomo variable var to the solver's
        model, and put the coefficients on the associated 
        constraints in the solver model. If the obj_coef is
        not zero, it will add obj_coef*var to the objective 
        of both the Pyomo and solver's model.

        Parameters
        ----------
        model: pyomo ConcreteModel to which the column will be added
        var: Var (scalar Var or single _VarData)
        obj_coef: float, pyo.Param
        constraints: list of scalar Constraints of single _ConstraintDatas  
        coefficients: list of the coefficient to put on var in the associated constraint

        """
        if self._pyomo_model is None:
            raise RuntimeError(
                'You must call set_instance before calling add_column.')
        if id(self._pyomo_model) != id(model):
            raise RuntimeError(
                'The pyomo model which the column is being added to '
                'must be the same as the pyomo model attached to this '
                'PersistentSolver instance; i.e., the same pyomo model '
                'used in set_instance.')
        if id(self._pyomo_model) != id(var.model()):
            raise RuntimeError(
                'The pyomo var must be attached to the solver model')
        if var in self._pyomo_var_to_solver_var_map:
            raise RuntimeError(
                'The pyomo var must not have been already added to '
                'the solver model')
        if len(constraints) != len(coefficients):
            raise RuntimeError(
                'The list of constraints and the list of coefficents '
                'be of equal length')
        obj_coef, constraints, coefficients = self._add_and_collect_column_data(
            var, obj_coef, constraints, coefficients)
        self._add_column(var, obj_coef, constraints, coefficients)

    """ This method should be implemented by subclasses."""

    def _add_column(self, var, obj_coef, constraints, coefficients):
        raise NotImplementedError(
            'This method should be implemented by subclasses.')

    def _add_and_collect_column_data(self, var, obj_coef, constraints,
                                     coefficients):
        """
        Update the objective Pyomo objective function and constraints, and update
        the _vars_referenced_by Maps

        Returns the column and objective coefficient data to pass to the solver
        """
        ## process the objective
        if obj_coef.__class__ in native_numeric_types and obj_coef == 0.:
            pass  ## nothing to do
        else:
            self._objective.expr += obj_coef * var
            self._vars_referenced_by_obj.add(var)
            obj_coef = _convert_to_const(obj_coef)

        ## add the constraints, collect the
        ## column information
        coeff_list = list()
        constr_list = list()
        for val, c in zip(coefficients, constraints):
            c._body += val * var
            self._vars_referenced_by_con[c].add(var)

            cval = _convert_to_const(val)
            coeff_list.append(cval)
            constr_list.append(self._pyomo_con_to_solver_con_map[c])

        return obj_coef, constr_list, coeff_list

    """ This method should be implemented by subclasses."""

    def _remove_constraint(self, solver_con):
        raise NotImplementedError(
            'This method should be implemented by subclasses.')

    """ This method should be implemented by subclasses."""

    def _remove_sos_constraint(self, solver_sos_con):
        raise NotImplementedError(
            'This method should be implemented by subclasses.')

    """ This method should be implemented by subclasses."""

    def _remove_var(self, solver_var):
        raise NotImplementedError(
            'This method should be implemented by subclasses.')

    def remove_block(self, block):
        """Remove a single block from the solver's model.

        This will keep any other model components intact.

        WARNING: Users must call remove_block BEFORE modifying the block.

        Parameters
        ----------
        block: Block (scalar Block or a single _BlockData)

        """
        # see PR #366 for discussion about handling indexed
        # objects and keeping compatibility with the
        # pyomo.kernel objects
        #if block.is_indexed():
        #    for sub_block in block.values():
        #        self.remove_block(sub_block)
        #    return
        for sub_block in block.block_data_objects(descend_into=True,
                                                  active=True):
            for con in sub_block.component_data_objects(ctype=Constraint,
                                                        descend_into=False,
                                                        active=True):
                self.remove_constraint(con)

            for con in sub_block.component_data_objects(ctype=SOSConstraint,
                                                        descend_into=False,
                                                        active=True):
                self.remove_sos_constraint(con)

        for var in block.component_data_objects(ctype=Var,
                                                descend_into=True,
                                                active=True):
            self.remove_var(var)

    def remove_constraint(self, con):
        """Remove a single constraint from the solver's model.

        This will keep any other model components intact.

        Parameters
        ----------
        con: Constraint (scalar Constraint or single _ConstraintData)

        """
        # see PR #366 for discussion about handling indexed
        # objects and keeping compatibility with the
        # pyomo.kernel objects
        #if con.is_indexed():
        #    for child_con in con.values():
        #        self.remove_constraint(child_con)
        #    return
        solver_con = self._pyomo_con_to_solver_con_map[con]
        self._remove_constraint(solver_con)
        self._symbol_map.removeSymbol(con)
        self._labeler.remove_obj(con)
        for var in self._vars_referenced_by_con[con]:
            self._referenced_variables[var] -= 1
        del self._vars_referenced_by_con[con]
        del self._pyomo_con_to_solver_con_map[con]
        del self._solver_con_to_pyomo_con_map[solver_con]

    def remove_sos_constraint(self, con):
        """Remove a single SOS constraint from the solver's model.

        This will keep any other model components intact.

        Parameters
        ----------
        con: SOSConstraint

        """
        # see PR #366 for discussion about handling indexed
        # objects and keeping compatibility with the
        # pyomo.kernel objects
        #if con.is_indexed():
        #    for child_con in con.values():
        #        self.remove_sos_constraint(child_con)
        #    return
        solver_con = self._pyomo_con_to_solver_con_map[con]
        self._remove_sos_constraint(solver_con)
        self._symbol_map.removeSymbol(con)
        self._labeler.remove_obj(con)
        for var in self._vars_referenced_by_con[con]:
            self._referenced_variables[var] -= 1
        del self._vars_referenced_by_con[con]
        del self._pyomo_con_to_solver_con_map[con]
        del self._solver_con_to_pyomo_con_map[solver_con]

    def remove_var(self, var):
        """Remove a single variable from the solver's model.

        This will keep any other model components intact.

        Parameters
        ----------
        var: Var (scalar Var or single _VarData)

        """
        # see PR #366 for discussion about handling indexed
        # objects and keeping compatibility with the
        # pyomo.kernel objects
        #if var.is_indexed():
        #    for child_var in var.values():
        #        self.remove_var(child_var)
        #    return
        if self._referenced_variables[var] != 0:
            raise ValueError(
                'Cannot remove Var {0} because it is still referenced by the '.
                format(var) + 'objective or one or more constraints')
        solver_var = self._pyomo_var_to_solver_var_map[var]
        self._remove_var(solver_var)
        self._symbol_map.removeSymbol(var)
        self._labeler.remove_obj(var)
        del self._referenced_variables[var]
        del self._pyomo_var_to_solver_var_map[var]
        del self._solver_var_to_pyomo_var_map[solver_var]

    """ This method should be implemented by subclasses."""

    def update_var(self, var):
        """
        Update a variable in the solver's model. This will update bounds, fix/unfix the variable as needed, and update
        the variable type.

        Parameters
        ----------
        var: Var
        """
        raise NotImplementedError(
            'This method should be implemented by subclasses.')

    def solve(self, *args, **kwds):
        """
        Solve the model.

        Keyword Arguments
        -----------------
        suffixes: list of str
            The strings should represnt suffixes support by the solver. Examples include 'dual', 'slack', and 'rc'.
        options: dict
            Dictionary of solver options. See the solver documentation for possible solver options.
        warmstart: bool
            If True, the solver will be warmstarted.
        keepfiles: bool
            If True, the solver log file will be saved.
        logfile: str
            Name to use for the solver log file.
        load_solutions: bool
            If True and a solution exists, the solution will be loaded into the Pyomo model.
        report_timing: bool
            If True, then timing information will be printed.
        tee: bool
            If True, then the solver log will be printed.
        """
        if self._pyomo_model is None:
            msg = 'Please use set_instance to set the instance before calling solve with the persistent'
            msg += ' solver interface.'
            raise RuntimeError(msg)
        if len(args) != 0:
            if self._pyomo_model is not args[0]:
                msg = 'The problem instance provided to the solve method is not the same as the instance provided'
                msg += ' to the set_instance method in the persistent solver interface. '
                raise ValueError(msg)

        self.available(exception_flag=True)

        # Collect suffix names to try and import from solution.
        if isinstance(self._pyomo_model, _BlockData):
            model_suffixes = list(name for (
                name,
                comp) in active_import_suffix_generator(self._pyomo_model))

        else:
            assert isinstance(self._pyomo_model, IBlock)
            model_suffixes = list(
                comp.storage_key for comp in import_suffix_generator(
                    self._pyomo_model, active=True, descend_into=False))

        if len(model_suffixes) > 0:
            kwds_suffixes = kwds.setdefault('suffixes', [])
            for name in model_suffixes:
                if name not in kwds_suffixes:
                    kwds_suffixes.append(name)

        #
        # Handle ephemeral solvers options here. These
        # will override whatever is currently in the options
        # dictionary, but we will reset these options to
        # their original value at the end of this method.
        #

        orig_options = self.options

        self.options = Options()
        self.options.update(orig_options)
        self.options.update(kwds.pop('options', {}))
        self.options.update(
            self._options_string_to_dict(kwds.pop('options_string', '')))
        try:

            # we're good to go.
            initial_time = time.time()

            self._presolve(**kwds)

            presolve_completion_time = time.time()
            if self._report_timing:
                print("      %6.2f seconds required for presolve" %
                      (presolve_completion_time - initial_time))

            if self._pyomo_model is not None:
                self._initialize_callbacks(self._pyomo_model)

            _status = self._apply_solver()
            if hasattr(self, '_transformation_data'):
                del self._transformation_data
            if not hasattr(_status, 'rc'):
                logger.warning(
                    "Solver (%s) did not return a solver status code.\n"
                    "This is indicative of an internal solver plugin error.\n"
                    "Please report this to the Pyomo developers.")
            elif _status.rc:
                logger.error("Solver (%s) returned non-zero return code (%s)" %
                             (
                                 self.name,
                                 _status.rc,
                             ))
                if self._tee:
                    logger.error(
                        "See the solver log above for diagnostic information.")
                elif hasattr(_status, 'log') and _status.log:
                    logger.error("Solver log:\n" + str(_status.log))
                raise ApplicationError("Solver (%s) did not exit normally" %
                                       self.name)
            solve_completion_time = time.time()
            if self._report_timing:
                print("      %6.2f seconds required for solver" %
                      (solve_completion_time - presolve_completion_time))

            result = self._postsolve()
            # ***********************************************************
            # The following code is only needed for backwards compatability of load_solutions=False.
            # If we ever only want to support the load_vars, load_duals, etc. methods, then this can be deleted.
            if self._save_results:
                result._smap_id = self._smap_id
                result._smap = None
                _model = self._pyomo_model
                if _model:
                    if isinstance(_model, IBlock):
                        if len(result.solution) == 1:
                            result.solution(0).symbol_map = \
                                getattr(_model, "._symbol_maps")[result._smap_id]
                            result.solution(0).default_variable_value = \
                                self._default_variable_value
                            if self._load_solutions:
                                _model.load_solution(result.solution(0))
                        else:
                            assert len(result.solution) == 0
                        # see the hack in the write method
                        # we don't want this to stick around on the model
                        # after the solve
                        assert len(getattr(_model, "._symbol_maps")) == 1
                        delattr(_model, "._symbol_maps")
                        del result._smap_id
                        if self._load_solutions and \
                           (len(result.solution) == 0):
                            logger.error("No solution is available")
                    else:
                        if self._load_solutions:
                            _model.solutions.load_from(
                                result,
                                select=self._select_index,
                                default_variable_value=self.
                                _default_variable_value)
                            result._smap_id = None
                            result.solution.clear()
                        else:
                            result._smap = _model.solutions.symbol_map[
                                self._smap_id]
                            _model.solutions.delete_symbol_map(self._smap_id)
            # ********************************************************
            postsolve_completion_time = time.time()

            if self._report_timing:
                print("      %6.2f seconds required for postsolve" %
                      (postsolve_completion_time - solve_completion_time))

        finally:
            #
            # Reset the options dict
            #
            self.options = orig_options

        return result

    def has_instance(self):
        """
        True if set_instance has been called and this solver interface has a pyomo model and a solver model.

        Returns
        -------
        tmp: bool
        """
        return self._pyomo_model is not None
# create the constraints
model.ca_bal = Constraint(expr = (0 == model.sv * caf \
                 - model.sv * model.ca - k1 * model.ca \
                 -  2.0 * k3 * model.ca ** 2.0))

model.cb_bal = Constraint(expr=(0 == -model.sv * model.cb \
                 + k1 * model.ca - k2 * model.cb))

model.cc_bal = Constraint(expr=(0 == -model.sv * model.cc \
                 + k2 * model.cb))

model.cd_bal = Constraint(expr=(0 == -model.sv * model.cd \
                 + k3 * model.ca ** 2.0))

# setup the solver options
options = Options()
options.solver = 'ipopt'
options.quiet = True

# run the sequence of square problems
instance = model.create()
instance.sv.fixed = True
sv_values = [1.0 + v * 0.05 for v in range(1, 20)]
print "   ", 'sv'.rjust(10), 'cb'.rjust(10)
for sv_value in sv_values:
    instance.sv = sv_value
    results, opt = \
        scripting.util.apply_optimizer(options, instance)
    instance.load(results)
    print "   ", str(instance.sv.value).rjust(10),\
        str(instance.cb.value).rjust(15)
Exemple #23
0
def run(argv, _globals=None):
    #
    # Set sys.argv to the value specified by the user
    #
    sys.argv = argv
    #
    # Create the option parser
    #
    parser = optparse.OptionParser()
    parser.remove_option('-h')
    #
    parser.add_option('-h','--help',
        action='store_true',
        dest='help',
        default=False,
        help='Print command options')
    #
    parser.add_option('-d','--debug',
        action='store_true',
        dest='debug',
        default=False,
        help='Set debugging flag')
    #
    parser.add_option('-v','--verbose',
        action='store_true',
        dest='verbose',
        default=False,
        help='Verbose output')
    #
    parser.add_option('-q','--quiet',
        action='store_true',
        dest='quiet',
        default=False,
        help='Minimal output')
    #
    parser.add_option('-f','--failfast',
        action='store_true',
        dest='failfast',
        default=False,
        help='Stop on first failure')
    #
    parser.add_option('-c','--catch',
        action='store_true',
        dest='catch',
        default=False,
        help='Catch control-C and display results')
    #
    parser.add_option('-b','--buffer',
        action='store_true',
        dest='buffer',
        default=False,
        help='Buffer stdout and stderr durring test runs')
    #
    parser.add_option('--cat', '--category',
        action='append',
        dest='categories',
        default=[],
        help='Define a list of categories that filter the execution of test suites')
    #
    parser.add_option('--help-suites',
        action='store_true',
        dest='help_suites',
        default=False,
        help='Print the test suites that can be executed')
    #
    parser.add_option('--help-tests',
        action='store',
        dest='help_tests',
        default=None,
        help='Print the tests in the specified test suite')
    #
    parser.add_option('--help-categories',
        action='store_true',
        dest='help_categories',
        default=False,
        help='Print the test suite categories that can be specified')
    #
    # Parse the argument list and print help info if needed
    #
    _options, args = parser.parse_args(sys.argv)
    if _options.help:
        parser.print_help()

        print("""
Examples:
  %s                               - run all test suites
  %s MyTestCase.testSomething      - run MyTestCase.testSomething
  %s MyTestCase                    - run all 'test*' test methods
                                               in MyTestCase
""" % (args[0],args[0],args[0]))
        return
    #
    # If no value for _globals is specified, then we use the current context.
    #
    if _globals is None:
        _globals=globals()
    #
    # Setup and Options object and create test suites from the specified
    # configuration files.
    #
    options = Options()
    options.debug = _options.debug
    options.verbose = _options.verbose
    options.quiet = _options.quiet
    options.categories = _options.categories
    _argv = []
    for arg in args[1:]:
        if os.path.exists(arg):
            create_test_suites(filename=arg, _globals=_globals, options=options)
        else:
            _argv.append(arg)
    #
    # Collect information about the test suites:  suite names and categories
    #
    suites = []
    categories = set()
    for key in _globals.keys():
        if type(_globals[key]) is type and issubclass(_globals[key], unittest.TestCase):
            suites.append(key)
            for c in _globals[key].suite_categories:
                categories.add(c)
    #
    # Process the --help-tests option
    #
    if _options.help_tests and not _globals is None:
        suite = _globals.get(_options.help_tests, None)
        if not type(suite) is type:
            print("Test suite '%s' not found!" % str(_options.help_tests))
            return cleanup(_globals, suites)
        tests = []
        for item in dir(suite):
            if item.startswith('test'):
                tests.append(item)
        print("")
        if len(tests) > 0:
            print("Tests defined in test suite '%s':" % _options.help_tests)
            for tmp in sorted(tests):
                print("    "+tmp)
        else:
            print("No tests defined in test suite '%s':" % _options.help_tests)
        print("")
        return cleanup(_globals, suites)
    #
    # Process the --help-suites and --help-categories options
    #
    if (_options.help_suites or _options.help_categories) and not _globals is None:
        if _options.help_suites:
            print("")
            if len(suites) > 0:
                print("Test suites defined in '%s':" % os.path.basename(argv[0]))
                for suite in sorted(suites):
                    print("    "+suite)
            else:
                print("No test suites defined in '%s'!" % os.path.basename(argv[0]))
            print("")
        if _options.help_categories:
            tmp = list(categories)
            print("")
            if len(tmp) > 0:
                print("Test suite categories defined in '%s':" % os.path.basename(argv[0]))
                for c in sorted(tmp):
                    print("    "+c)
            else:
                print("No test suite categories defined in '%s':" % os.path.basename(argv[0]))
            print("")
        return cleanup(_globals, suites)
    #
    # Reset the value of sys.argv per the expectations of the unittest module
    #
    tmp = [args[0]]
    if _options.quiet:
        tmp.append('-q')
    if _options.verbose or _options.debug:
        tmp.append('-v')
    if _options.failfast:
        tmp.append('-f')
    if _options.catch:
        tmp.append('-c')
    if _options.buffer:
        tmp.append('-b')
    tmp += _argv
    sys.argv = tmp
    #
    # Execute the unittest main function to run tests
    #
    unittest.main(module=_globals['__name__'])
    cleanup(_globals, suites)
Exemple #24
0
        self.model = ConcreteModel()
        self.model.x = Var()

    def compare(self):
        S = Pyomo2FuncDesigner(self.model)
        self.assertAlmostEqual(self.model.f(), S.f(S.initial_point))

    def tearDown(self):
        self.model = None


@unittest.nottest
def expr_test(self, name):
    options = self.get_options(name)
    self.model.x.value = options.x
    if name == 'pow':
        self.model.f = Objective(expr=options.fn(self.model.x, 2))
    else:
        self.model.f = Objective(expr=options.fn(self.model.x))
    self.compare()


for i in range(len(fns)):
    options = Options()
    options.fn = fns[i]
    options.x = xs[i]
    Tests.add_fn_test(fn=expr_test, name=fns[i].__name__, options=options)

if __name__ == "__main__":
    unittest.main()
Exemple #25
0
         (5,1,4),(5,4,8),(5,6,3),(5,9,1),
         (6,1,7),(6,5,2),(6,9,6),
         (7,2,6),(7,7,2),(7,8,8),
         (8,4,4),(8,5,1),(8,6,9),(8,9,5),
         (9,5,8),(9,8,7),(9,9,9)]

# create the empty list of cuts to start
cut_on = []
cut_off = []

done = False
while not done:
    model = create_sudoku_model(cut_on, cut_off, board)
    instance = model.create()
    
    options = Options()
    options.solver = 'glpk'
    options.quiet = True
    #options.tee = True

    results, opt = scripting.util.apply_optimizer(options, instance)
    instance.load(results)

    if str(results.Solution.Status) != 'optimal':
        break

    # add cuts
    new_cut_on = []
    new_cut_off = []
    for r in instance.ROWS:
        for c in instance.COLS:
Exemple #26
0
    def solve(self, *args, **kwds):
        """ Solve the problem """

        self.available(exception_flag=True)
        #
        # If the inputs are models, then validate that they have been
        # constructed! Collect suffix names to try and import from solution.
        #
        from pyomo.core.base.block import _BlockData
        import pyomo.core.base.suffix
        from pyomo.core.kernel.block import IBlock
        import pyomo.core.kernel.suffix
        _model = None
        for arg in args:
            if isinstance(arg, (_BlockData, IBlock)):
                if isinstance(arg, _BlockData):
                    if not arg.is_constructed():
                        raise RuntimeError(
                            "Attempting to solve model=%s with unconstructed "
                            "component(s)" % (arg.name, ))

                _model = arg
                # import suffixes must be on the top-level model
                if isinstance(arg, _BlockData):
                    model_suffixes = list(name for (name,comp) \
                                          in pyomo.core.base.suffix.\
                                          active_import_suffix_generator(arg))
                else:
                    assert isinstance(arg, IBlock)
                    model_suffixes = list(comp.storage_key for comp
                                          in pyomo.core.kernel.suffix.\
                                          import_suffix_generator(arg,
                                                                  active=True,
                                                                  descend_into=False))

                if len(model_suffixes) > 0:
                    kwds_suffixes = kwds.setdefault('suffixes', [])
                    for name in model_suffixes:
                        if name not in kwds_suffixes:
                            kwds_suffixes.append(name)

        #
        # Handle ephemeral solvers options here. These
        # will override whatever is currently in the options
        # dictionary, but we will reset these options to
        # their original value at the end of this method.
        #

        orig_options = self.options

        self.options = Options()
        self.options.update(orig_options)
        self.options.update(kwds.pop('options', {}))
        self.options.update(
            self._options_string_to_dict(kwds.pop('options_string', '')))
        try:

            # we're good to go.
            initial_time = time.time()

            self._presolve(*args, **kwds)

            presolve_completion_time = time.time()
            if self._report_timing:
                print("      %6.2f seconds required for presolve" %
                      (presolve_completion_time - initial_time))

            if not _model is None:
                self._initialize_callbacks(_model)

            _status = self._apply_solver()
            if hasattr(self, '_transformation_data'):
                del self._transformation_data
            if not hasattr(_status, 'rc'):
                logger.warning(
                    "Solver (%s) did not return a solver status code.\n"
                    "This is indicative of an internal solver plugin error.\n"
                    "Please report this to the Pyomo developers.")
            elif _status.rc:
                logger.error("Solver (%s) returned non-zero return code (%s)" %
                             (
                                 self.name,
                                 _status.rc,
                             ))
                if self._tee:
                    logger.error(
                        "See the solver log above for diagnostic information.")
                elif hasattr(_status, 'log') and _status.log:
                    logger.error("Solver log:\n" + str(_status.log))
                raise ApplicationError("Solver (%s) did not exit normally" %
                                       self.name)
            solve_completion_time = time.time()
            if self._report_timing:
                print("      %6.2f seconds required for solver" %
                      (solve_completion_time - presolve_completion_time))

            result = self._postsolve()
            result._smap_id = self._smap_id
            result._smap = None
            if _model:
                if isinstance(_model, IBlock):
                    if len(result.solution) == 1:
                        result.solution(0).symbol_map = \
                            getattr(_model, "._symbol_maps")[result._smap_id]
                        result.solution(0).default_variable_value = \
                            self._default_variable_value
                        if self._load_solutions:
                            _model.load_solution(result.solution(0))
                    else:
                        assert len(result.solution) == 0
                    # see the hack in the write method
                    # we don't want this to stick around on the model
                    # after the solve
                    assert len(getattr(_model, "._symbol_maps")) == 1
                    delattr(_model, "._symbol_maps")
                    del result._smap_id
                    if self._load_solutions and \
                       (len(result.solution) == 0):
                        logger.error("No solution is available")
                else:
                    if self._load_solutions:
                        _model.solutions.load_from(result,
                                                   select=self._select_index,
                                                   default_variable_value=self.
                                                   _default_variable_value)
                        result._smap_id = None
                        result.solution.clear()
                    else:
                        result._smap = _model.solutions.symbol_map[
                            self._smap_id]
                        _model.solutions.delete_symbol_map(self._smap_id)
            postsolve_completion_time = time.time()

            if self._report_timing:
                print("      %6.2f seconds required for postsolve" %
                      (postsolve_completion_time - solve_completion_time))

        finally:
            #
            # Reset the options dict
            #
            self.options = orig_options

        return result
Exemple #27
0
def _process_table(cmd, _model, _data, _default, options=None):
    #print("TABLE %s" % cmd)
    #
    _options = {}
    _set = OrderedDict()
    _param = OrderedDict()
    _labels = []

    _cmd = cmd[1]
    _cmd_len = len(_cmd)
    name = None
    i = 0
    while i < _cmd_len:
        try:
            #print("CMD i=%s cmd=%s" % (i, _cmd[i:]))
            #
            # This should not be error prone, so we treat errors
            # with a general exception
            #

            #
            # Processing labels
            #
            if _cmd[i] == ':':
                i += 1
                while i < _cmd_len:
                    _labels.append(_cmd[i])
                    i += 1
                continue
            #
            # Processing options
            #
            name = _cmd[i]
            if i + 1 == _cmd_len:
                _param[name] = []
                _labels = ['Z']
                i += 1
                continue
            if _cmd[i + 1] == '=':
                if type(_cmd[i + 2]) is list:
                    _set[name] = _cmd[i + 2]
                else:
                    _options[name] = _cmd[i + 2]
                i += 3
                continue
            # This should be a parameter declaration
            if not type(_cmd[i + 1]) is tuple:
                raise IOError
            if i + 2 < _cmd_len and _cmd[i + 2] == '=':
                _param[name] = (_cmd[i + 1], _cmd[i + 3][0])
                i += 4
            else:
                _param[name] = _cmd[i + 1]
                i += 2
        except:
            raise IOError("Error parsing table options: %s" % name)

    #print("_options %s" % _options)
    #print("_set %s" % _set)
    #print("_param %s" % _param)
    #print("_labels %s" % _labels)


#
    options = Options(**_options)
    for key in options:
        if not key in ['columns']:
            raise ValueError("Unknown table option '%s'" % key)
    #
    ncolumns = options.columns
    if ncolumns is None:
        ncolumns = len(_labels)
        if ncolumns == 0:
            if not (len(_set) == 1 and len(_set[_set.keys()[0]]) == 0):
                raise IOError(
                    "Must specify either the 'columns' option or column headers"
                )
            else:
                ncolumns = 1
    else:
        ncolumns = int(ncolumns)
    #
    data = cmd[2]
    Ldata = len(cmd[2])
    #
    cmap = {}
    if len(_labels) == 0:
        for i in range(ncolumns):
            cmap[i + 1] = i
        for label in _param:
            ndx = cmap[_param[label][1]]
            if ndx < 0 or ndx >= ncolumns:
                raise IOError("Bad column value %s for data %s" %
                              (str(ndx), label))
            cmap[label] = ndx
            _param[label] = _param[label][0]
    else:
        i = 0
        for label in _labels:
            cmap[label] = i
            i += 1
    #print("CMAP %s" % cmap)
    #
    #print("_param %s" % _param)
    #print("_set %s" % _set)
    for sname in _set:
        # Creating set sname
        cols = _set[sname]
        tmp = []
        for col in cols:
            if not col in cmap:
                raise IOError(
                    "Unexpected table column '%s' for index set '%s'" %
                    (col, sname))
            tmp.append(cmap[col])
        if not sname in cmap:
            cmap[sname] = tmp
        cols = flatten(tmp)
        #
        _cmd = ['set', sname, ':=']
        i = 0
        while i < Ldata:
            row = []
            #print("COLS %s  NCOLS %d" % (cols, ncolumns))
            for col in cols:
                #print("Y %s %s" % (i, col))
                row.append(data[i + col])
            if len(row) > 1:
                _cmd.append(tuple(row))
            else:
                _cmd.append(row[0])
            i += ncolumns
        #print("_data %s" % _data)
        _process_set(_cmd, _model, _data)
    #
    #print("CMAP %s" % cmap)
    _i = 0
    if ncolumns == 0:
        raise IOError
    for vname in _param:
        _i += 1
        # create value vname
        cols = _param[vname]
        tmp = []
        for col in cols:
            #print("COL %s" % col)
            if not col in cmap:
                raise IOError(
                    "Unexpected table column '%s' for table value '%s'" %
                    (col, vname))
            tmp.append(cmap[col])
        #print("X %s %s" % (len(cols), tmp))
        cols = flatten(tmp)
        #print("X %s" % len(cols))
        #print("VNAME %s %s" % (vname, cmap[vname]))
        if vname in cmap:
            cols.append(cmap[vname])
        else:
            cols.append(ncolumns - 1 - (len(_param) - _i))
        #print("X %s" % len(cols))
        #
        _cmd = ['param', vname, ':=']
        i = 0
        while i < Ldata:
            #print("HERE %s %s %s" % (i, cols, ncolumns))
            for col in cols:
                _cmd.append(data[i + col])
            i += ncolumns
        #print("HERE %s" % _cmd)
        #print("_data %s" % _data)
        _process_param(_cmd, _model, _data, None, ncolumns=len(cols))
Exemple #28
0
class YamlDictionary(Plugin):

    alias("yaml", "YAML file interface")

    implements(IDataManager, service=False)

    def __init__(self):
        self._info = {}
        self.options = Options()

    def available(self):
        return yaml_available

    def requirements(self):
        return "pyyaml"

    def initialize(self, **kwds):
        self.filename = kwds.pop('filename')
        self.add_options(**kwds)

    def add_options(self, **kwds):
        self.options.update(kwds)

    def open(self):
        if self.filename is None:
            raise IOError("No filename specified")

    def close(self):
        pass

    def read(self):
        """
        This function loads data from a YAML file and tuplizes the nested
        dictionaries and lists of lists.
        """
        if not os.path.exists(self.filename):
            raise IOError("Cannot find file '%s'" % self.filename)
        INPUT = open(self.filename, 'r')
        jdata = yaml.load(INPUT)
        INPUT.close()
        if jdata is None:
            raise IOError("Empty YAML file")
        self._info = {}
        for k, v in jdata.items():
            self._info[k] = tuplize(v)

    def write(self, data):
        """
        This function creates a YAML file for the specified data.
        """
        with open(self.filename, 'w') as OUTPUT:
            jdata = {}
            if self.options.data is None:
                for k, v in data.items():
                    jdata[k] = detuplize(v)
            elif type(self.options.data) in (list, tuple):
                for k in self.options.data:
                    jdata[k] = detuplize(data[k], sort=self.options.sort)
            else:
                k = self.options.data
                jdata[k] = detuplize(data[k])
            yaml.dump(jdata, OUTPUT)

    def process(self, model, data, default):
        """
        Set the data for the selected components
        """
        if not self.options.namespace in data:
            data[self.options.namespace] = {}
        #
        try:
            if self.options.data is None:
                for key in self._info:
                    self._set_data(data, self.options.namespace, key,
                                   self._info[key])
            elif type(self.options.data) in (list, tuple):
                for key in self.options.data:
                    self._set_data(data, self.options.namespace, key,
                                   self._info[key])
            else:
                key = self.options.data
                self._set_data(data, self.options.namespace, key,
                               self._info[key])
        except KeyError:
            raise IOError(
                "Data value for '%s' is not available in YAML file '%s'" %
                (key, self.filename))

    def _set_data(self, data, namespace, name, value):
        if type(value) is dict:
            data[namespace][name] = value
        else:
            data[namespace][name] = {None: value}

    def clear(self):
        self._info = {}
Exemple #29
0
def _process_load(cmd, _model, _data, _default, options=None):
    #print("LOAD %s" % cmd)
    from pyomo.core import Set

    _cmd_len = len(cmd)
    _options = {}
    _options['filename'] = cmd[1]
    i = 2
    while cmd[i] != ':':
        _options[cmd[i]] = cmd[i + 2]
        i += 3
    i += 1
    _Index = (None, [])
    if type(cmd[i]) is tuple:
        _Index = (None, cmd[i])
        i += 1
    elif i + 1 < _cmd_len and cmd[i + 1] == '=':
        _Index = (cmd[i], cmd[i + 2])
        i += 3
    _smap = OrderedDict()
    while i < _cmd_len:
        if i + 2 < _cmd_len and cmd[i + 1] == '=':
            _smap[cmd[i + 2]] = cmd[i]
            i += 3
        else:
            _smap[cmd[i]] = cmd[i]
            i += 1

    if len(cmd) < 2:
        raise IOError("The 'load' command must specify a filename")

    options = Options(**_options)
    for key in options:
        if not key in [
                'range', 'filename', 'format', 'using', 'driver', 'query',
                'table', 'user', 'password', 'database'
        ]:
            raise ValueError("Unknown load option '%s'" % key)

    global Filename
    Filename = options.filename

    global Lineno
    Lineno = 0
    #
    # TODO: process mapping info
    #
    if options.using is None:
        tmp = options.filename.split(".")[-1]
        data = DataManagerFactory(tmp)
        if (data is None) or \
           isinstance(data, UnknownDataManager):
            raise pyutilib.common.ApplicationError(
                "Data manager '%s' is not available." % tmp)
    else:
        try:
            data = DataManagerFactory(options.using)
        except:
            data = None
        if (data is None) or \
           isinstance(data, UnknownDataManager):
            raise pyutilib.common.ApplicationError(
                "Data manager '%s' is not available." % options.using)
    set_name = None
    param_name = None
    #
    # Create symbol map
    #
    symb_map = _smap
    if len(symb_map) == 0:
        raise IOError(
            "Must specify at least one set or parameter name that will be loaded"
        )
    #
    # Process index data
    #
    _index = None
    index_name = _Index[0]
    _select = None
    #
    # Set the 'set name' based on the format
    #
    _set = None
    if options.format == 'set' or options.format == 'set_array':
        if len(_smap) != 1:
            raise IOError(
                "A single set name must be specified when using format '%s'" %
                options.format)
        set_name = list(_smap.keys())[0]
        _set = set_name
    #
    # Set the 'param name' based on the format
    #
    _param = None
    if options.format == 'transposed_array' or options.format == 'array' or options.format == 'param':
        if len(_smap) != 1:
            raise IOError(
                "A single parameter name must be specified when using format '%s'"
                % options.format)
    if options.format in ('transposed_array', 'array', 'param', None):
        if _Index[0] is None:
            _index = None
        else:
            _index = _Index[0]
        _param = []
        _select = list(_Index[1])
        for key in _smap:
            _param.append(_smap[key])
            _select.append(key)
    if options.format in ('transposed_array', 'array'):
        _select = None

    #print "YYY", _param, options
    if not _param is None and len(
            _param) == 1 and not _model is None and isinstance(
                getattr(_model, _param[0]), Set):
        _select = None
        _set = _param[0]
        _param = None
        _index = None

    #print "SELECT", _param, _select
    #
    data.initialize(model=options.model,
                    filename=options.filename,
                    index=_index,
                    index_name=index_name,
                    param_name=symb_map,
                    set=_set,
                    param=_param,
                    format=options.format,
                    range=options.range,
                    query=options.query,
                    using=options.using,
                    table=options.table,
                    select=_select,
                    user=options.user,
                    password=options.password,
                    database=options.database)
    #
    data.open()
    try:
        data.read()
    except Exception:
        data.close()
        raise
    data.close()
    data.process(_model, _data, _default)
Exemple #30
0
class JSONDictionary(Plugin):

    alias("json", "JSON file interface")

    implements(IDataManager, service=False)

    def __init__(self):
        self._info = {}
        self.options = Options()

    def available(self):
        return True

    def initialize(self, **kwds):
        self.filename = kwds.pop('filename')
        self.add_options(**kwds)

    def add_options(self, **kwds):
        self.options.update(kwds)

    def open(self):
        if self.filename is None:
            raise IOError("No filename specified")

    def close(self):
        pass

    def read(self):
        """
        This function loads data from a JSON file and tuplizes the nested
        dictionaries and lists of lists.
        """
        if not os.path.exists(self.filename):
            raise IOError("Cannot find file '%s'" % self.filename)
        INPUT = open(self.filename, 'r')
        if six.PY2 and self.options.convert_unicode:

            def _byteify(data, ignore_dicts=False):
                if isinstance(data, six.text_type):
                    return data.encode('utf-8')
                if isinstance(data, list):
                    return [_byteify(item, True) for item in data]
                if isinstance(data, dict) and not ignore_dicts:
                    return dict((_byteify(key, True), _byteify(value, True))
                                for (key, value) in data.iteritems())
                return data

            jdata = json.load(INPUT, object_hook=_byteify)
        else:
            jdata = json.load(INPUT)
        INPUT.close()
        if jdata is None or len(jdata) == 0:
            raise IOError("Empty JSON data file")
        self._info = {}
        for k, v in jdata.items():
            self._info[k] = tuplize(v)

    def write(self, data):
        """
        This function creates a JSON file for the specified data.
        """
        with open(self.filename, 'w') as OUTPUT:
            jdata = {}
            if self.options.data is None:
                for k, v in data.items():
                    jdata[k] = detuplize(v)
            elif type(self.options.data) in (list, tuple):
                for k in self.options.data:
                    jdata[k] = detuplize(data[k], sort=self.options.sort)
            else:
                k = self.options.data
                jdata[k] = detuplize(data[k])
            json.dump(jdata, OUTPUT)

    def process(self, model, data, default):
        """
        Set the data for the selected components
        """
        if not self.options.namespace in data:
            data[self.options.namespace] = {}
        #
        try:
            if self.options.data is None:
                for key in self._info:
                    self._set_data(data, self.options.namespace, key,
                                   self._info[key])
            elif type(self.options.data) in (list, tuple):
                for key in self.options.data:
                    self._set_data(data, self.options.namespace, key,
                                   self._info[key])
            else:
                key = self.options.data
                self._set_data(data, self.options.namespace, key,
                               self._info[key])
        except KeyError:
            raise IOError(
                "Data value for '%s' is not available in JSON file '%s'" %
                (key, self.filename))

    def _set_data(self, data, namespace, name, value):
        if type(value) is dict:
            data[namespace][name] = value
        else:
            data[namespace][name] = {None: value}

    def clear(self):
        self._info = {}
Exemple #31
0
    def solve(self, *args, **kwds):
        """
        Solve the model.

        Keyword Arguments
        -----------------
        suffixes: list of str
            The strings should represnt suffixes support by the solver. Examples include 'dual', 'slack', and 'rc'.
        options: dict
            Dictionary of solver options. See the solver documentation for possible solver options.
        warmstart: bool
            If True, the solver will be warmstarted.
        keepfiles: bool
            If True, the solver log file will be saved.
        logfile: str
            Name to use for the solver log file.
        load_solutions: bool
            If True and a solution exists, the solution will be loaded into the Pyomo model.
        report_timing: bool
            If True, then timing information will be printed.
        tee: bool
            If True, then the solver log will be printed.
        """
        if self._pyomo_model is None:
            msg = 'Please use set_instance to set the instance before calling solve with the persistent'
            msg += ' solver interface.'
            raise RuntimeError(msg)
        if len(args) != 0:
            if self._pyomo_model is not args[0]:
                msg = 'The problem instance provided to the solve method is not the same as the instance provided'
                msg += ' to the set_instance method in the persistent solver interface. '
                raise ValueError(msg)

        self.available(exception_flag=True)

        # Collect suffix names to try and import from solution.
        if isinstance(self._pyomo_model, _BlockData):
            model_suffixes = list(name for (
                name,
                comp) in active_import_suffix_generator(self._pyomo_model))

        else:
            assert isinstance(self._pyomo_model, IBlock)
            model_suffixes = list(
                comp.storage_key for comp in import_suffix_generator(
                    self._pyomo_model, active=True, descend_into=False))

        if len(model_suffixes) > 0:
            kwds_suffixes = kwds.setdefault('suffixes', [])
            for name in model_suffixes:
                if name not in kwds_suffixes:
                    kwds_suffixes.append(name)

        #
        # Handle ephemeral solvers options here. These
        # will override whatever is currently in the options
        # dictionary, but we will reset these options to
        # their original value at the end of this method.
        #

        orig_options = self.options

        self.options = Options()
        self.options.update(orig_options)
        self.options.update(kwds.pop('options', {}))
        self.options.update(
            self._options_string_to_dict(kwds.pop('options_string', '')))
        try:

            # we're good to go.
            initial_time = time.time()

            self._presolve(**kwds)

            presolve_completion_time = time.time()
            if self._report_timing:
                print("      %6.2f seconds required for presolve" %
                      (presolve_completion_time - initial_time))

            if self._pyomo_model is not None:
                self._initialize_callbacks(self._pyomo_model)

            _status = self._apply_solver()
            if hasattr(self, '_transformation_data'):
                del self._transformation_data
            if not hasattr(_status, 'rc'):
                logger.warning(
                    "Solver (%s) did not return a solver status code.\n"
                    "This is indicative of an internal solver plugin error.\n"
                    "Please report this to the Pyomo developers.")
            elif _status.rc:
                logger.error("Solver (%s) returned non-zero return code (%s)" %
                             (
                                 self.name,
                                 _status.rc,
                             ))
                if self._tee:
                    logger.error(
                        "See the solver log above for diagnostic information.")
                elif hasattr(_status, 'log') and _status.log:
                    logger.error("Solver log:\n" + str(_status.log))
                raise ApplicationError("Solver (%s) did not exit normally" %
                                       self.name)
            solve_completion_time = time.time()
            if self._report_timing:
                print("      %6.2f seconds required for solver" %
                      (solve_completion_time - presolve_completion_time))

            result = self._postsolve()
            # ***********************************************************
            # The following code is only needed for backwards compatability of load_solutions=False.
            # If we ever only want to support the load_vars, load_duals, etc. methods, then this can be deleted.
            if self._save_results:
                result._smap_id = self._smap_id
                result._smap = None
                _model = self._pyomo_model
                if _model:
                    if isinstance(_model, IBlock):
                        if len(result.solution) == 1:
                            result.solution(0).symbol_map = \
                                getattr(_model, "._symbol_maps")[result._smap_id]
                            result.solution(0).default_variable_value = \
                                self._default_variable_value
                            if self._load_solutions:
                                _model.load_solution(result.solution(0))
                        else:
                            assert len(result.solution) == 0
                        # see the hack in the write method
                        # we don't want this to stick around on the model
                        # after the solve
                        assert len(getattr(_model, "._symbol_maps")) == 1
                        delattr(_model, "._symbol_maps")
                        del result._smap_id
                        if self._load_solutions and \
                           (len(result.solution) == 0):
                            logger.error("No solution is available")
                    else:
                        if self._load_solutions:
                            _model.solutions.load_from(
                                result,
                                select=self._select_index,
                                default_variable_value=self.
                                _default_variable_value)
                            result._smap_id = None
                            result.solution.clear()
                        else:
                            result._smap = _model.solutions.symbol_map[
                                self._smap_id]
                            _model.solutions.delete_symbol_map(self._smap_id)
            # ********************************************************
            postsolve_completion_time = time.time()

            if self._report_timing:
                print("      %6.2f seconds required for postsolve" %
                      (postsolve_completion_time - solve_completion_time))

        finally:
            #
            # Reset the options dict
            #
            self.options = orig_options

        return result
Exemple #32
0
# Mimic the pyomo script
from pyomo.environ import *
from pyutilib.misc import Options

# set high level options that mimic pyomo comand line
options = Options()
options.model_file = 'DiseaseEstimation.py'
options.data_files = ['DiseaseEstimation.dat']
options.solver = 'ipopt'
options.solver_io = 'nl'
#options.keepfiles = True
#options.tee = True

# mimic the set of function calls done by pyomo command line
scripting.util.setup_environment(options)

# the following imports the model found in options.model_file,
# sets this to options.usermodel, and executes preprocessors
scripting.util.apply_preprocessing(options, parser=None)

# create the wrapper for the model, the data, the instance, and the options
model_data = scripting.util.create_model(options)
instance = model_data.instance

# solve
results, opt = scripting.util.apply_optimizer(options, instance)

# the following simply outputs the final time elapsed
scripting.util.finalize(options)

# load results into instance and print
Exemple #33
0
# Objective
#model.obj = Objective(expr = math.sqrt(((model.p - model.x)**2) + ((model.q - model.y)**2)))
model.obj = Objective(expr = (((model.p - model.x)**2) + ((model.q - model.y)**2))**0.5)

# Constraints
model.KeineAhnung = Constraint(expr = ((model.x / model.length)**2) + ((model.y / model.width)**2) - 1 >= 0)

model.pprint()

model.skip_canonical_repn = True # for nonlinear models

instance=model.create()

SolverName = "asl"
so = Options()
so.solver = "ipopt"
opt=SolverFactory(SolverName, options=so)

if opt is None:
    print("Could not construct solver %s : %s" % (SolverName, so.solver))
    sys.exit(1)

results=opt.solve(instance)
results.write()
instance.load(results) # put results in model

# because we know there is a variable named x
x_var = getattr(instance, "x")
x_val = x_var()
Exemple #34
0
class TableData(Plugin):
    """
    An object that imports data from a table in an external data source.
    """

    implements(IDataManager, service=False)

    def __init__(self):
        """
        Constructor
        """
        self._info = None
        self._data = None
        self.options = Options()
        self.options.ncolumns = 1

    def available(self):
        return True

    def initialize(self, **kwds):
        self.filename = kwds.pop('filename')
        self.add_options(**kwds)

    def add_options(self, **kwds):
        self.options.update(kwds)

    def open(self):  #pragma:nocover
        """
        Open the table
        """
        pass

    def read(self):  #pragma:nocover
        """
        Read data from the table
        """
        return False

    def write(self, data):  #pragma:nocover
        """
        Write data from the table
        """
        return False

    def close(self):  #pragma:nocover
        """
        Close the table
        """
        pass

    def process(self, model, data, default):
        """
        Return the data that was extracted from this table
        """
        if model is None:
            model = self.options.model
        if not self.options.namespace in data:
            data[self.options.namespace] = {}
        return _process_data(self._info,
                             model,
                             data[self.options.namespace],
                             default,
                             self.filename,
                             index=self.options.index,
                             set=self.options.set,
                             param=self.options.param,
                             ncolumns=self.options.ncolumns)

    def clear(self):
        """
        Clear the data that was extracted from this table
        """
        self._info = None

    def _set_data(self, headers, rows):
        header_index = []
        if self.options.select is None:
            for i in xrange(len(headers)):
                header_index.append(i)
        else:
            for i in self.options.select:
                header_index.append(headers.index(str(i)))
        self.options.ncolumns = len(headers)

        if not self.options.param is None:
            if not type(self.options.param) in (list, tuple):
                self.options.param = (self.options.param, )
            _params = []
            for p in self.options.param:
                if isinstance(p, Param):
                    self.options.model = p.model()
                    _params.append(p.name)
                else:
                    _params.append(p)
            self.options.param = tuple(_params)

        if isinstance(self.options.set, Set):
            self.options.model = self.options.set.model()
            self.options.set = self.options.set.name

        if isinstance(self.options.index, Set):
            self.options.model = self.options.index.model()
            self.options.index = self.options.index.name

        if self.options.format is None:
            if not self.options.set is None:
                self.options.format = 'set'
            elif not self.options.param is None:
                self.options.format = 'table'
            if self.options.format is None:
                raise ValueError("Unspecified format and  data option")
        elif self.options.set is None and self.options.param is None:
            msg = "Must specify the set or parameter option for data"
            raise IOError(msg)

        if self.options.format == 'set':
            if not self.options.index is None:
                msg = "Cannot specify index for data with the 'set' format: %s"
                raise IOError(msg % str(self.options.index))

            self._info = ["set", self.options.set, ":="]
            for row in rows:
                if self.options.ncolumns > 1:
                    self._info.append(tuple(row))
                else:
                    self._info.extend(row)

        elif self.options.format == 'set_array':
            if not self.options.index is None:
                msg = "Cannot specify index for data with the 'set_array' "   \
                      'format: %s'
                raise IOError(msg % str(self.options.index))

            self._info = ["set", self.options.set, ":"]
            self._info.extend(headers[1:])
            self._info.append(":=")
            for row in rows:
                self._info.extend(row)

        elif self.options.format == 'transposed_array':
            self._info = ["param", self.options.param[0], "(tr)", ":"]
            self._info.extend(headers[1:])
            self._info.append(":=")
            for row in rows:
                self._info.extend(row)

        elif self.options.format == 'array':
            self._info = ["param", self.options.param[0], ":"]
            self._info.extend(headers[1:])
            self._info.append(":=")
            for row in rows:
                self._info.extend(row)

        elif self.options.format == 'table':
            if self.options.index is not None:
                self._info = ["param", ":", self.options.index, ":"]
            else:
                self._info = ["param", ":"]
            for param in self.options.param:
                self._info.append(param)
            self._info.append(":=")
            for row in rows:
                for i in header_index:
                    self._info.append(row[i])
            self.options.ncolumns = len(header_index)
        else:
            msg = "Unknown parameter format: '%s'"
            raise ValueError(msg % self.options.format)

    def get_table(self):
        tmp = []
        if not self.options.columns is None:
            tmp.append(self.options.columns)
        if not self.options.set is None:
            # Create column names
            if self.options.columns is None:
                cols = []
                for i in xrange(self.options.set.dimen):
                    cols.append(self.options.set.name + str(i))
                tmp.append(cols)
            # Get rows
            for data in self.options.set:
                if self.options.set.dimen > 1:
                    tmp.append(list(data))
                else:
                    tmp.append([data])
        elif not self.options.param is None:
            if type(self.options.param) in (list, tuple):
                _param = self.options.param
            else:
                _param = [self.options.param]
            tmp = []
            # Collect data
            for index in _param[0]:
                if index is None:
                    row = []
                elif type(index) in (list, tuple):
                    row = list(index)
                else:
                    row = [index]
                for param in _param:
                    row.append(value(param[index]))
                tmp.append(row)
            # Create column names
            if self.options.columns is None:
                cols = []
                for i in xrange(len(tmp[0]) - len(_param)):
                    cols.append('I' + str(i))
                for param in _param:
                    cols.append(param)
                tmp = [cols] + tmp
        return tmp
Exemple #35
0
                case = MissingSuffixFailures[solver, io, _model.description]
                if _solver_case.version is not None and\
                   case[0](_solver_case.version):
                    if type(case[1]) is dict:
                        exclude_suffixes.update(case[1])
                    else:
                        for x in case[1]:
                            exclude_suffixes[x] = (True, {})
                    msg = case[2]

            # Return scenario dimensions and scenario information
            yield (model, solver,
                   io), Options(status=status,
                                msg=msg,
                                model=_model,
                                solver=None,
                                testcase=_solver_case,
                                demo_limits=_solver_case.demo_limits,
                                exclude_suffixes=exclude_suffixes)


@unittest.nottest
def run_test_scenarios(options):
    logging.disable(logging.WARNING)

    solvers = set(options.solver)
    stat = {}

    for key, test_case in test_scenarios():
        model, solver, io = key
        if len(solvers) > 0 and not solver in solvers:
Exemple #36
0
def _process_table(cmd, _model, _data, _default, options=None):
    #print "TABLE", cmd
    #
    options = Options(**cmd[1])
    for key in options:
        if not key in ['columns']:
            raise ValueError("Unknown table option '%s'" % key)
    #
    ncolumns=options.columns
    if ncolumns is None:
        ncolumns = len(cmd[4])
        if ncolumns == 0:
            if not (len(cmd[3]) == 1 and len(cmd[3][cmd[3].keys()[0]]) == 0):
                raise IOError("Must specify either the 'columns' option or column headers")
            else:
                ncolumns=1
    else:
        ncolumns = int(ncolumns)
    #
    data = cmd[5]
    Ldata = len(cmd[5])
    #
    cmap = {}
    if len(cmd[4]) == 0:
        for i in range(ncolumns):
            cmap[str(i+1)] = i
        for label in cmd[3]:
            ndx = int(cmd[3][label][1])-1
            if ndx < 0 or ndx >= ncolumns:
                raise IOError("Bad column value %s for data %s" % (str(ndx), label))
            cmap[label] = ndx
            cmd[3][label] = cmd[3][label][0]
    else:
        i = 0
        for label in cmd[4]:
            cmap[label] = i
            i += 1
    #
    for sname in cmd[2]:
        # Creating set sname
        cols = cmd[2][sname]
        tmp = []
        for col in cols:
            if not col in cmap:
                raise IOError("Unexpected table column '%s' for index set '%s'" % (col, sname))
            tmp.append(cmap[col])
        cmap[sname] = tmp
        cols = tmp
        # 
        _cmd = ['set', sname, ':=']
        i = 0
        while i < Ldata:
            row = []
            for col in cols:
                row.append( data[i+col] )
            if len(row) > 1:
                    _cmd.append( tuple(row) )
            else:
                    _cmd.append( row[0] )
            i += ncolumns
        _process_set(_cmd, _model, _data)
    #
    _i=0
    #print "HERE ncol", ncolumns
    if ncolumns == 0:
        raise IOError
    for vname in cmd[3]:
        _i += 1
        # create value vname
        cols = cmd[3][vname]
        tmp = []
        for col in cols:
            if not col in cmap:
                raise IOError("Unexpected table column '%s' for table value '%s'" % (col, vname))
            tmp.append(cmap[col])
        cols = flatten(tmp)
        if vname in cmap:
            cols.append(cmap[vname])
        else:
            cols.append( ncolumns-1 - (len(cmd[3])-_i) )
        #
        _cmd = ['param', vname, ':=']
        i = 0
        while i < Ldata:
            #print "HERE", i, cols, ncolumns
            for col in cols:
                _cmd.append( data[i+col] )
            i += ncolumns
        _process_param(_cmd, _model, _data, None, ncolumns=len(cols))
Exemple #37
0
 def __init__(self):
     self._info = {}
     self.options = Options()
Exemple #38
0
def create_test_suite(suite, config, _globals, options):
    #
    # Skip suite creation if the options categores do not intersect with the list of test suite categories
    #
    if len(options.categories) > 0:
        flag = False
        for cat in options.categories:
            if cat in config['suites'][suite].get('categories',[]):
                flag = True
                break
        if not flag:
            return
    #
    # Create test driver
    #
    if suite in _globals:
        raise IOError("Cannot create suite '%s' since there is another symbol with that name in the global namespace!" % suite)
    def setUpClassFn(cls):
        options = cls._options[None]
        cls._test_driver.setUpClass(cls,options)
    _globals[suite] = type(str(suite),(unittest.TestCase,),{'setUpClass': classmethod(setUpClassFn)})
    _globals[suite]._options[None] = options
    setattr(_globals[suite],'_test_driver', _globals['test_driver'])
    setattr(_globals[suite],'suite_categories', config['suites'][suite].get('categories',[]))
    #
    # Create test functions
    #
    tests = []
    if 'tests' in config['suites'][suite]:
        for item in config['suites'][suite]['tests']:
            tests.append( (item['solver'], item['problem'], item) )
    else:
        for solver in config['suites'][suite]['solvers']:
            for problem in config['suites'][suite]['problems']:
                tests.append( (solver, problem, {}) )
    #
    for solver, problem, item in tests:
        ##sname = solver
        if options.testname_format is None:
            test_name = solver+"_"+problem
        else:
            test_name = options.testname_format % (solver, problem)
        #
        def fn(testcase, name, suite):
            options = testcase._options[suite,name]
            fn.test_driver.setUp(testcase, options)
            ans = fn.test_driver.run_test(testcase, name, options)
            fn.test_driver.tearDown(testcase, options)
            return ans
        fn.test_driver = _globals['test_driver']
        #
        _options = Options()
        #
        problem_options = config['suites'][suite]['problems'][problem]
        if not problem_options is None and 'problem' in problem_options:
            _problem = problem_options['problem']
        else:
            _problem = problem
        for attr,value in config['problems'].get(_problem,{}).items():
            _options[attr] = _str(value)
        if not problem_options is None:
            for attr,value in problem_options.items():
                _options[attr] = _str(value)
        #
        solver_options = config['suites'][suite]['solvers'][solver]
        if not solver_options is None and 'solver' in solver_options:
            _solver = solver_options['solver']
        else:
            _solver = solver
        _name = _solver
        for attr,value in config['solvers'].get(_solver,{}).items():
            _options[attr] = _str(value)
            if attr == 'name':
                _name = value
        if not solver_options is None:
            for attr,value in solver_options.items():
                _options[attr] = _str(value)
        #
        for key in item:
            if key not in ['problem','solver']:
                _options[key] = _str(item[key])
        #
        _options.solver = _str(_name)
        _options.problem = _str(_problem)
        _options.suite = _str(suite)
        _options.currdir = _str(options.currdir)
        #
        _globals[suite].add_fn_test(name=test_name, fn=fn, suite=suite, options=_options)
Exemple #39
0
class JSONDictionary(object):

    def __init__(self):
        self._info = {}
        self.options = Options()

    def available(self):
        return True

    def initialize(self, **kwds):
        self.filename = kwds.pop('filename')
        self.add_options(**kwds)

    def add_options(self, **kwds):
        self.options.update(kwds)

    def open(self):
        if self.filename is None:
            raise IOError("No filename specified")

    def close(self):
        pass

    def read(self):
        """
        This function loads data from a JSON file and tuplizes the nested
        dictionaries and lists of lists.
        """
        if not os.path.exists(self.filename):
            raise IOError("Cannot find file '%s'" % self.filename)
        INPUT = open(self.filename, 'r')
        if six.PY2 and self.options.convert_unicode:
            def _byteify(data, ignore_dicts=False):
                if isinstance(data, six.text_type):
                    return data.encode('utf-8') 
                if isinstance(data, list):
                    return [ _byteify(item, True) for item in data ]
                if isinstance(data, dict) and not ignore_dicts:
                    return dict( (_byteify(key, True), _byteify(value, True)) for (key, value) in data.iteritems() )
                return data
            jdata = json.load(INPUT, object_hook=_byteify)
        else:
            jdata = json.load(INPUT)
        INPUT.close()
        if jdata is None or len(jdata) == 0:
            raise IOError("Empty JSON data file")
        self._info = {}
        for k,v in jdata.items():
            self._info[k] = tuplize(v)

    def write(self, data):
        """
        This function creates a JSON file for the specified data.
        """
        with open(self.filename, 'w') as OUTPUT:
            jdata = {}
            if self.options.data is None:
                for k,v in data.items():
                    jdata[k] = detuplize(v)
            elif type(self.options.data) in (list, tuple):
                for k in self.options.data:
                    jdata[k] = detuplize(data[k], sort=self.options.sort)
            else:
                k = self.options.data
                jdata[k] = detuplize(data[k])
            json.dump(jdata, OUTPUT)

    def process(self, model, data, default):
        """
        Set the data for the selected components
        """
        if not self.options.namespace in data:
            data[self.options.namespace] = {}
        #
        try:
            if self.options.data is None:
                for key in self._info:
                    self._set_data(data, self.options.namespace, key, self._info[key])
            elif type(self.options.data) in (list, tuple):
                for key in self.options.data:
                    self._set_data(data, self.options.namespace, key, self._info[key])
            else:
                key = self.options.data
                self._set_data(data, self.options.namespace, key, self._info[key])
        except KeyError:
            raise IOError("Data value for '%s' is not available in JSON file '%s'" % (key, self.filename))

    def _set_data(self, data, namespace, name, value):
        if type(value) is dict:
            data[namespace][name] = value
        else:
            data[namespace][name] = {None: value}

    def clear(self):
        self._info = {}
Exemple #40
0
def convert_dakota(options=Options(), parser=None):
    #
    # Import plugins
    #
    import pyomo.environ

    model_file = os.path.basename(options.model.save_file)
    model_file_no_ext = os.path.splitext(model_file)[0]

    #
    # Set options for writing the .nl and related files
    #

    # By default replace .py with .nl
    if options.model.save_file is None:
       options.model.save_file = model_file_no_ext + '.nl'
    options.model.save_format = ProblemFormat.nl
    # Dakota requires .row/.col files
    options.model.symbolic_solver_labels = True

    #
    # Call the core converter
    #
    model_data = convert(options, parser)

    #
    # Generate Dakota input file fragments for the Vars, Objectives, Constraints
    #

    # TODO: the converted model doesn't expose the right symbol_map
    #       for only the vars active in the .nl

    model = model_data.instance

    # Easy way
    #print "VARIABLE:"
    #lines = open(options.save_model.replace('.nl','.col'),'r').readlines()
    #for varName in lines:
    #    varName = varName.strip()
    #    var = model_data.symbol_map.getObject(varName)
    #    print "'%s': %s" % (varName, var)
    #    #print var.pprint()

    # Hard way
    variables = 0
    var_descriptors = []
    var_lb = []
    var_ub = []
    var_initial = []
    tmpDict = model_data.symbol_map.getByObjectDictionary()
    for var in model.component_data_objects(Var, active=True):
        if id(var) in tmpDict:
            variables += 1
            var_descriptors.append(var.cname(True))

            # apply user bound, domain bound, or infinite
            _lb, _ub = var.bounds
            if _lb is not None:
                var_lb.append(str(_lb))
            else:
                var_lb.append("-inf")

            if _ub is not None:
                var_ub.append(str(_ub))
            else:
                var_ub.append("inf")

            try:
                val = value(var)
            except:
                val = None
            var_initial.append(str(val))

    objectives = 0
    obj_descriptors = []
    for obj in model.component_data_objects(Objective, active=True):
        objectives += 1
        obj_descriptors.append(obj.cname(True))

    constraints = 0
    cons_descriptors = []
    cons_lb = []
    cons_ub = []
    for con in model.component_data_objects(Constraint, active=True):
        constraints += 1
        cons_descriptors.append(con.cname(True))
        if con.lower is not None:
            cons_lb.append(str(con.lower))
        else:
            cons_lb.append("-inf")
        if con.upper is not None:
            cons_ub.append(str(con.upper))
        else:
            cons_ub.append("inf")

    # Write the Dakota input file fragments

    dakfrag = open(model_file_no_ext + ".dak", 'w')

    dakfrag.write("#--- Dakota variables block ---#\n")
    dakfrag.write("variables\n")
    dakfrag.write("  continuous_design " + str(variables) + '\n')
    dakfrag.write("    descriptors\n")
    for vd in var_descriptors:
        dakfrag.write("      '%s'\n" % vd)
    dakfrag.write("    lower_bounds " + " ".join(var_lb) + '\n')
    dakfrag.write("    upper_bounds " + " ".join(var_ub) + '\n')
    dakfrag.write("    initial_point " + " ".join(var_initial) + '\n')

    dakfrag.write("#--- Dakota interface block ---#\n")
    dakfrag.write("interface\n")
    dakfrag.write("  algebraic_mappings = '" + options.model.save_file  + "'\n")

    dakfrag.write("#--- Dakota responses block ---#\n")
    dakfrag.write("responses\n")
    dakfrag.write("  objective_functions " + str(objectives) + '\n')

    if (constraints > 0):
        dakfrag.write("  nonlinear_inequality_constraints " + str(constraints) + '\n')
        dakfrag.write("    lower_bounds " + " ".join(cons_lb) + '\n')
        dakfrag.write("    upper_bounds " + " ".join(cons_ub) + '\n')

    dakfrag.write("    descriptors\n")
    for od in obj_descriptors:
        dakfrag.write("      '%s'\n" % od)
    if (constraints > 0):
        for cd in cons_descriptors:
            dakfrag.write("      '%s'\n" % cd)

    # TODO: detect whether gradient information available in model
    dakfrag.write("  analytic_gradients\n")
    dakfrag.write("  no_hessians\n")

    dakfrag.close()

    sys.stdout.write( "Dakota input fragment written to file '%s'\n" 
                      % (model_file_no_ext + ".dak",) )
    return model_data
Exemple #41
0
        self.model.x = Var()

    def compare(self):
        S = Pyomo2FuncDesigner(self.model)
        self.assertAlmostEqual(self.model.f(), S.f(S.initial_point))

    def tearDown(self):
        self.model = None


@unittest.nottest
def expr_test(self, name):
    options = self.get_options(name)
    self.model.x.value = options.x
    if name == 'pow':
        self.model.f = Objective(expr=options.fn(self.model.x, 2))
    else:
        self.model.f = Objective(expr=options.fn(self.model.x))
    self.compare()
    


for i in range(len(fns)):
    options = Options()
    options.fn = fns[i]
    options.x  = xs[i]
    Tests.add_fn_test(fn=expr_test, name=fns[i].__name__, options=options)    

if __name__ == "__main__":
    unittest.main()
Exemple #42
0
class TableData(Plugin):
    """
    An object that imports data from a table in an external data source.
    """

    implements(IDataManager, service=False)

    def __init__(self):
        """
        Constructor
        """
        self._info=None
        self._data=None
        self.options = Options()
        self.options.ncolumns = 1

    def available(self):
        return True

    def initialize(self, **kwds):
        self.filename = kwds.pop('filename')
        self.add_options(**kwds)

    def add_options(self, **kwds):
        self.options.update(kwds)

    def open(self):                        #pragma:nocover
        """
        Open the table
        """
        pass

    def read(self):                         #pragma:nocover
        """
        Read data from the table
        """
        return False

    def write(self, data):                  #pragma:nocover
        """
        Write data from the table
        """
        return False

    def close(self):                        #pragma:nocover
        """
        Close the table
        """
        pass

    def process(self, model, data, default):
        """
        Return the data that was extracted from this table
        """
        if model is None:
            model = self.options.model
        if not self.options.namespace in data:
            data[self.options.namespace] = {}
        return _process_data(
          self._info,
          model,
          data[self.options.namespace],
          default,
          self.filename,
          index=self.options.index,
          set=self.options.set,
          param=self.options.param,
          ncolumns = self.options.ncolumns)

    def clear(self):
        """
        Clear the data that was extracted from this table
        """
        self._info = None

    def _set_data(self, headers, rows):
        header_index = []
        if self.options.select is None:
            for i in xrange(len(headers)):
                header_index.append(i)
        else:
            for i in self.options.select:
                header_index.append(headers.index(str(i)))
        self.options.ncolumns = len(headers)

        if not self.options.param is None:
            if not type(self.options.param) in (list, tuple):
                self.options.param = (self.options.param,)
            _params = []
            for p in self.options.param:
                if isinstance(p, Param):
                    self.options.model = p.model()
                    _params.append(p.local_name)
                else:
                    _params.append(p)
            self.options.param = tuple(_params)

        if isinstance(self.options.set, Set):
            self.options.model = self.options.set.model()
            self.options.set = self.options.set.local_name

        if isinstance(self.options.index, Set):
            self.options.model = self.options.index.model()
            self.options.index = self.options.index.local_name

        if self.options.format is None:
            if not self.options.set is None:
                self.options.format = 'set'
            elif not self.options.param is None:
                self.options.format = 'table'
            if self.options.format is None:
                raise ValueError("Unspecified format and  data option")
        elif self.options.set is None and self.options.param is None:
            msg = "Must specify the set or parameter option for data"
            raise IOError(msg)

        if self.options.format == 'set':
            if not self.options.index is None:
                msg = "Cannot specify index for data with the 'set' format: %s"
                raise IOError(msg % str(self.options.index))

            self._info = ["set",self.options.set,":="]
            for row in rows:
                if self.options.ncolumns > 1:
                    self._info.append(tuple(row))
                else:
                    self._info.extend(row)

        elif self.options.format == 'set_array':
            if not self.options.index is None:
                msg = "Cannot specify index for data with the 'set_array' "   \
                      'format: %s'
                raise IOError(msg % str(self.options.index))

            self._info = ["set",self.options.set, ":"]
            self._info.extend(headers[1:])
            self._info.append(":=")
            for row in rows:
                self._info.extend(row)

        elif self.options.format == 'transposed_array':
            self._info = ["param",self.options.param[0],"(tr)",":"]
            self._info.extend(headers[1:])
            self._info.append(":=")
            for row in rows:
                self._info.extend(row)

        elif self.options.format == 'array':
            self._info = ["param",self.options.param[0],":"]
            self._info.extend(headers[1:])
            self._info.append(":=")
            for row in rows:
                self._info.extend(row)

        elif self.options.format == 'table':
            if self.options.index is not None:
                self._info = ["param",":",self.options.index,":"]
            else:
                self._info = ["param",":"]
            for param in self.options.param:
                self._info.append(param)
            self._info.append(":=")
            for row in rows:
                for i in header_index:
                    self._info.append(row[i])
            self.options.ncolumns = len(header_index)
        else:
            msg = "Unknown parameter format: '%s'"
            raise ValueError(msg % self.options.format)

    def get_table(self):
        tmp = []
        if not self.options.columns is None:
            tmp.append(self.options.columns)
        if not self.options.set is None:
            # Create column names
            if self.options.columns is None:
                cols = []
                for i in xrange(self.options.set.dimen):
                    cols.append(self.options.set.local_name+str(i))
                tmp.append(cols)
            # Get rows
            if not self.options.sort is None:
                for data in sorted(self.options.set):
                    if self.options.set.dimen > 1:
                        tmp.append(list(data))
                    else:
                        tmp.append([data])
            else:
                for data in self.options.set:
                    if self.options.set.dimen > 1:
                        tmp.append(list(data))
                    else:
                        tmp.append([data])
        elif not self.options.param is None:
            if type(self.options.param) in (list,tuple):
                _param = self.options.param
            else:
                _param = [self.options.param]
            tmp = []
            # Collect data
            for index in _param[0]:
                if index is None:
                    row = []
                elif type(index) in (list,tuple):
                    row = list(index)
                else:
                    row = [index]
                for param in _param:
                    row.append(value(param[index]))
                tmp.append(row)
            # Create column names
            if self.options.columns is None:
                cols = []
                for i in xrange(len(tmp[0])-len(_param)):
                    cols.append('I'+str(i))
                for param in _param:
                    cols.append(param)
                tmp = [cols] + tmp
        return tmp
Exemple #43
0
 def __init__(self):
     self._info = {}
     self.options = Options()
Exemple #44
0
            #return Constraint.Skip
        return expr >= 1
    model.cover = Constraint(model.I, rule=cover_rule)

    #
    print_model_stats(model_options, model)
    return model

def test_model(options=None):
    model = pyomo_create_model(model_options=options)
    #print_model_stats(options, model)

if __name__ == '__main__':
    test_model()
    #
    options = Options()
    options.type = 'fixed_set_size'
    options.m = 11
    options.n = 21
    options.rho = 0.3
    test_model(options)
    #
    options = Options()
    options.type = 'fixed_element_coverage'
    test_model(options)
    #
    options = Options()
    options.m = 100
    options.n = 200
    options.type = 'fixed_probability'
    test_model(options)
Exemple #45
0
    def __init__(self, **kwds):
        OptSolver.__init__(self, **kwds)

        self._pyomo_model = None
        """The pyomo model being solved."""

        self._solver_model = None
        """The python instance of the solver model (e.g., the gurobipy Model instance)."""

        self._symbol_map = SymbolMap()
        """A symbol map used to map between pyomo components and their names used with the solver."""

        self._labeler = None
        """The labeler for creating names for the solver model components."""

        self._pyomo_var_to_solver_var_map = ComponentMap()
        """A dictionary mapping pyomo Var's to the solver variables."""

        self._pyomo_con_to_solver_con_map = ComponentMap()
        """A dictionary mapping pyomo constraints to solver constraints."""

        self._vars_referenced_by_con = ComponentMap()
        """A dictionary mapping constraints to a ComponentSet containt the pyomo variables referenced by that
        constraint. This is primarily needed for the persistent solvers. When a constraint is deleted, we need
        to decrement the number of times those variables are referenced (see self._referenced_variables)."""

        self._vars_referenced_by_obj = ComponentSet()
        """A set containing the pyomo variables referenced by that the objective.
        This is primarily needed for the persistent solvers. When a the objective is deleted, we need
        to decrement the number of times those variables are referenced (see self._referenced_variables)."""

        self._objective = None
        """The pyomo Objective object currently being used with the solver."""

        self.results = None
        """A results object return from the solve method."""

        self._skip_trivial_constraints = False
        """A bool. If True, then any constraints with a constant body will not be added to the solver model.
        Be careful with this. If a trivial constraint is skipped then that constraint cannot be removed from
        a persistent solver (an error will be raised if a user tries to remove a non-existent constraint)."""

        self._output_fixed_variable_bounds = False
        """A bool. If False then an error will be raised if a fixed variable is used in one of the solver constraints.
        This is useful for catching bugs. Ordinarily a fixed variable should appear as a constant value in the
        solver constraints. If True, then the error will not be raised."""

        self._python_api_exists = False
        """A bool indicating whether or not the python api is available for the specified solver."""

        self._version = None
        """The version of the solver."""

        self._version_major = None
        """The major version of the solver. For example, if using Gurobi 7.0.2, then _version_major is 7."""

        self._symbolic_solver_labels = False
        """A bool. If true then the solver components will be given names corresponding to the pyomo component names."""

        self._capabilites = Options()

        self._referenced_variables = ComponentMap()
        """dict: {var: count} where count is the number of constraints/objective referencing the var"""

        self._keepfiles = False
        """A bool. If True, then the solver log will be saved."""

        self._save_results = True
        """A bool. This is used for backwards compatability. If True, the solution will be loaded into the Solution
Exemple #46
0
def pyomo_create_model(options=None, model_options=None):
    if model_options is None:
        model_options = Options()
    if model_options.type is None:
        model_options.type = 'fixed_set_size'
    #
    # m - number of elements
    #
    m = 100 if model_options.m is None else model_options.m
    #
    # n - number of sets
    #
    n = 200 if model_options.n is None else model_options.n
    seed = 9090 if model_options.seed is None else model_options.seed
    random.seed(9090)
    #
    if model_options.type == 'fixed_set_size':
        #
        # p   - fixed number elements per set
        # rho - fixed fraction of elements per set
        #
        p = model_options.p
        if p is None:
            if model_options.rho is None:
                p = int(math.ceil(m * 0.7))
            else:
                p = int(math.ceil(m * model_options.rho))
        #
        def S_rule(model):
            ans = set()
            for j in xrange(1,n+1):
                tmp = list(range(1,m+1))
                random.shuffle( tmp )
                for i in range(0,p):
                    ans.add( (tmp[i], j) )
            return ans
    elif model_options.type == 'fixed_element_coverage':
        #
        # p   - fixed number of sets that cover each element
        # rho - fixed fraction of sets that cover each element
        #
        p = model_options.p
        if p is None:
            if model_options.rho is None:
                p = int(math.ceil(n * 0.4))
            else:
                p = int(math.ceil(n * model_options.rho))
        #
        def S_rule(model):
            ans = set()
            for i in xrange(1,m+1):
                tmp = list(range(1,n+1))
                random.shuffle( tmp )
                for j in range(0,p):
                    ans.add( (i, tmp[j]) )
            return ans
    elif model_options.type == 'fixed_probability':
        #
        # rho - probability of selecting element for a set
        #
        rho = 0.3 if model_options.rho is None else model_options.rho
        #
        def S_rule(model):
            ans = set()
            for j in xrange(1,n+1):
                for i in xrange(1,m+1):
                    if random.uniform(0,1) < rho:
                        ans.add( (i, j) )
            return ans
    elif model_options.type == 'fixed_fill':
        #
        # rho - |S|/(I*J)
        #
        rho = 0.3 if model_options.rho is None else model_options.rho
        #
        def S_rule(model):
            ans = set()
            for j in xrange(1,n+1):
                for i in xrange(1,m+1):
                    if random.uniform(0,1) < rho:
                        ans.add( (i, j) )
            return ans
    #
    # CREATE MODEL
    #
    model = ConcreteModel()
    #
    # (i,j) in S if element i in set j
    #
    model.S = Set(dimen=2, initialize=S_rule)
    #
    # Dynamically create the I and J index sets, since
    # some rows or columns of S may not be populated.
    #
    def I_rule(model):
        return set((i for (i,j) in model.S))
    model.I = Set(initialize=I_rule)
    def J_rule(model):
        return set((j for (i,j) in model.S))
    model.J = Set(initialize=J_rule)
    #
    # Weights
    #
    model.w = Param(model.J, within=NonNegativeReals, initialize=1.0)
    #
    # Set selection binary variables
    #
    model.x = Var(model.J, within=Binary)
    #
    # Objective
    #
    def cost_rule(model):
        return sum_product(model.w, model.x)
    model.cost = Objective(rule=cost_rule)

    #
    # Constraint
    #
    def cover_rule(model, i):
        expr = 0
        for j in model.x:
            if (i,j) in model.S:
                expr += model.x[j]
        #
        # WEH - this check is not needed, since I is constructed dynamically
        #
        #if expr is 0:
            #return Constraint.Skip
        return expr >= 1
    model.cover = Constraint(model.I, rule=cover_rule)

    #
    print_model_stats(model_options, model)
    return model
Exemple #47
0
except:
    IPython_available = False
else:
    ipshell = IPShellEmbed([''],
                           banner='\n# Dropping into Python interpreter',
                           exit_msg='\n# Leaving Interpreter, back to Pyomo\n')

from pyutilib.misc import Options
try:
    from pympler import muppy
    from pympler import summary
    from pympler.asizeof import *
    pympler_available = True
except:
    pympler_available = False
memory_data = Options()

import pyutilib.misc
from pyomo.util.plugin import ExtensionPoint, Plugin, implements
from pyutilib.misc import Container
from pyutilib.services import TempfileManager

from pyomo.opt import ProblemFormat
from pyomo.opt.base import SolverFactory
from pyomo.opt.parallel import SolverManagerFactory
from pyomo.core import *
from pyomo.core.base import TextLabeler
import pyomo.core.base

filter_excepthook = False
modelapi = {
# create the constraints
model.ca_bal = Constraint(expr = (0 == model.sv * caf \
                 - model.sv * model.ca - k1 * model.ca \
                 -  2.0 * k3 * model.ca ** 2.0))

model.cb_bal = Constraint(expr=(0 == -model.sv * model.cb \
                 + k1 * model.ca - k2 * model.cb))

model.cc_bal = Constraint(expr=(0 == -model.sv * model.cc \
                 + k2 * model.cb))

model.cd_bal = Constraint(expr=(0 == -model.sv * model.cd \
                 + k3 * model.ca ** 2.0))

# setup the solver options
options = Options()
options.solver = 'ipopt'
options.quiet = True

# run the sequence of square problems
instance = model.create()
instance.sv.fixed = True
sv_values = [1.0 + v * 0.05 for v in range(1, 20)]
print "   ", 'sv'.rjust(10), 'cb'.rjust(10)
for sv_value in sv_values:
    instance.sv = sv_value
    results, opt = \
        scripting.util.apply_optimizer(options, instance)
    instance.load(results)
    print "   ", str(instance.sv.value).rjust(10),\
        str(instance.cb.value).rjust(15)
Exemple #49
0
def run_pyomo(options=Options(), parser=None):
    data = Options(options=options)

    if options.model.filename == '':
        parser.print_help()
        return Container()

    try:
        pyomo.scripting.util.setup_environment(data)

        pyomo.scripting.util.apply_preprocessing(data, parser=parser)
    except:
        # TBD: I should be able to call this function in the case of
        #      an exception to perform cleanup. However, as it stands
        #      calling finalize with its default keyword value for
        #      model(=None) results in an a different error related to
        #      task port values.  Not sure how to interpret that.
        pyomo.scripting.util.finalize(data,
                                      model=ConcreteModel(),
                                      instance=None,
                                      results=None)
        raise
    else:
        if data.error:
            # TBD: I should be able to call this function in the case of
            #      an exception to perform cleanup. However, as it stands
            #      calling finalize with its default keyword value for
            #      model(=None) results in an a different error related to
            #      task port values.  Not sure how to interpret that.
            pyomo.scripting.util.finalize(data,
                                          model=ConcretModel(),
                                          instance=None,
                                          results=None)
            return Container()  #pragma:nocover

    try:
        model_data = pyomo.scripting.util.create_model(data)
    except:
        # TBD: I should be able to call this function in the case of
        #      an exception to perform cleanup. However, as it stands
        #      calling finalize with its default keyword value for
        #      model(=None) results in an a different error related to
        #      task port values.  Not sure how to interpret that.
        pyomo.scripting.util.finalize(data,
                                      model=ConcreteModel(),
                                      instance=None,
                                      results=None)
        raise
    else:
        if (((not options.runtime.logging == 'debug') and \
             options.model.save_file) or \
            options.runtime.only_instance):
            pyomo.scripting.util.finalize(data,
                                          model=model_data.model,
                                          instance=model_data.instance,
                                          results=None)
            return Container(instance=model_data.instance)

    try:
        opt_data = pyomo.scripting.util.apply_optimizer(
            data, instance=model_data.instance)

        pyomo.scripting.util.process_results(data,
                                             instance=model_data.instance,
                                             results=opt_data.results,
                                             opt=opt_data.opt)

        pyomo.scripting.util.apply_postprocessing(data,
                                                  instance=model_data.instance,
                                                  results=opt_data.results)
    except:
        # TBD: I should be able to call this function in the case of
        #      an exception to perform cleanup. However, as it stands
        #      calling finalize with its default keyword value for
        #      model(=None) results in an a different error related to
        #      task port values.  Not sure how to interpret that.
        pyomo.scripting.util.finalize(data,
                                      model=ConcreteModel(),
                                      instance=None,
                                      results=None)
        raise
    else:
        pyomo.scripting.util.finalize(data,
                                      model=model_data.model,
                                      instance=model_data.instance,
                                      results=opt_data.results)

        return Container(options=options,
                         instance=model_data.instance,
                         results=opt_data.results,
                         local=opt_data.local)
Exemple #50
0
def run_command(command=None,
                parser=None,
                args=None,
                name='unknown',
                data=None,
                options=None):
    """
    Execute a function that processes command-line arguments and
    then calls a command-line driver.

    This function provides a generic facility for executing a command
    function is rather generic.  This function is segregated from
    the driver to enable profiling of the command-line execution.

    Required:
        command:    The name of a function that will be executed to perform process the command-line
                    options with a parser object.
        parser:     The parser object that is used by the command-line function.

    Optional:
        options:    If this is not None, then ignore the args option and use
                    this to specify command options.
        args:       Command-line arguments that are parsed.  If this value is `None`, then the
                    arguments in `sys.argv` are used to parse the command-line.
        name:       Specifying the name of the command-line (for error messages).
        data:       A container of labeled data.

    Returned:
        retval:     Return values from the command-line execution.
        errorcode:  0 if Pyomo ran successfully
    """
    #
    #
    # Parse command-line options
    #
    #
    retval = None
    errorcode = 0
    if options is None:
        try:
            if type(args) is argparse.Namespace:
                _options = args
            else:
                _options = parser.parse_args(args=args)
            # Replace the parser options object with a pyutilib.misc.Options object
            options = pyutilib.misc.Options()
            for key in dir(_options):
                if key[0] != '_':
                    val = getattr(_options, key)
                    if not isinstance(val, types.MethodType):
                        options[key] = val
        except SystemExit:
            # the parser throws a system exit if "-h" is specified - catch
            # it to exit gracefully.
            return Container(retval=retval, errorcode=errorcode)
    #
    # Configure loggers
    #
    fileLogger = configure_loggers(options=options)
    #
    # Setup I/O redirect to a file
    #
    if fileLogger is not None:
        # TBD: This seems dangerous in Windows, as the process will
        # have multiple open file handles pointint to the same file.
        pyutilib.misc.setup_redirect(options.runtime.logfile)
    #
    # Call the main Pyomo runner with profiling
    #
    TempfileManager.push()
    pcount = options.runtime.profile_count
    if pcount > 0:
        if not pstats_available:
            msg = "Cannot use the 'profile' option.  The Python 'pstats' "    \
                  'package cannot be imported!'
            if fileLogger is not None:
                pyutilib.misc.reset_redirect()
                fileLogger.close()
                configure_loggers(options=Options(), reset=True)
            raise ValueError(msg)
        tfile = TempfileManager.create_tempfile(suffix=".profile")
        tmp = profile.runctx(
            command.__name__ + '(options=options,parser=parser)',
            command.__globals__, locals(), tfile)
        p = pstats.Stats(tfile).strip_dirs()
        p.sort_stats('time', 'cumulative')
        p = p.print_stats(pcount)
        p.print_callers(pcount)
        p.print_callees(pcount)
        p = p.sort_stats('cumulative', 'calls')
        p.print_stats(pcount)
        p.print_callers(pcount)
        p.print_callees(pcount)
        p = p.sort_stats('calls')
        p.print_stats(pcount)
        p.print_callers(pcount)
        p.print_callees(pcount)
        retval = tmp
    else:
        #
        # Call the main Pyomo runner without profiling
        #
        TempfileManager.push()
        try:
            retval = command(options=options, parser=parser)
        except SystemExit:
            err = sys.exc_info()[1]
            #
            # If debugging is enabled or the 'catch' option is specified, then
            # exit.  Otherwise, print an "Exiting..." message.
            #
            if __debug__ and (options.runtime.logging == 'debug'
                              or options.runtime.catch_errors):
                if fileLogger is not None:
                    pyutilib.misc.reset_redirect()
                    fileLogger.close()
                    configure_loggers(options=Options(), reset=True)
                sys.exit(0)
            print('Exiting %s: %s' % (name, str(err)))
            errorcode = err.code
        except Exception:
            err = sys.exc_info()[1]
            #
            # If debugging is enabled or the 'catch' option is specified, then
            # pass the exception up the chain (to pyomo_excepthook)
            #
            if __debug__ and (options.runtime.logging == 'debug'
                              or options.runtime.catch_errors):
                if fileLogger is not None:
                    pyutilib.misc.reset_redirect()
                    fileLogger.close()
                    configure_loggers(options=Options(), reset=True)
                TempfileManager.pop(remove=not options.runtime.keep_files)
                raise

            if not options.model is None and not options.model.save_file is None:
                model = "model " + options.model.save_file
            else:
                model = "model"

            global filter_excepthook
            if filter_excepthook:
                action = "loading"
            else:
                action = "running"

            msg = "Unexpected exception while %s %s:\n" % (action, model)
            #
            # This handles the case where the error is propagated by a KeyError.
            # KeyError likes to pass raw strings that don't handle newlines
            # (they translate "\n" to "\\n"), as well as tacking on single
            # quotes at either end of the error message. This undoes all that.
            #
            errStr = str(err)
            if type(err) == KeyError and errStr != "None":
                errStr = str(err).replace(r"\n", "\n")[1:-1]

            logging.getLogger('pyomo.core').error(msg + errStr)
            errorcode = 1

    if fileLogger is not None:
        pyutilib.misc.reset_redirect()
        fileLogger.close()
        configure_loggers(options=Options(), reset=True)

    if options.runtime.disable_gc:
        gc.enable()
    TempfileManager.pop(remove=not options.runtime.keep_files)
    return Container(retval=retval, errorcode=errorcode)
Exemple #51
0
def X_process_load(cmd, _model, _data, _default, options=None):
    #logger.warning("WARNING: the 'import' data command is deprecated")

    #print "LOAD",cmd
    options = Options(**cmd[1])
    for key in options:
        if not key in ['range','filename','format','using','driver','query','table','user','password']:
            raise ValueError("Unknown import option '%s'" % key)

    global Filename
    Filename = cmd[4]
    global Lineno
    Lineno = 0

    #
    # TODO: process mapping info
    #
    if options.using is None:
        tmp = options.filename.split(".")[-1]
        data = DataManagerFactory(tmp)
        if data is None:
            raise pyutilib.common.ApplicationError("Data manager '%s' is not available." % tmp)
    else:
        data = DataManagerFactory(options.using)
        if data is None:
            raise pyutilib.common.ApplicationError("Data manager '%s' is not available." % options.using)
    set_name=None
    param_name=None
    #
    # Create symbol map
    #
    symb_map = cmd[3]
    if len(symb_map) == 0:
        raise IOError("Must specify at least one set or parameter name that will be imported")
    #
    # Process index data
    #
    _index=None
    index_name=cmd[2][0]
    _select = None
    #
    # Set the 'set name' based on the format
    #
    _set = None
    if options.format == 'set' or options.format == 'set_array':
        if len(cmd[3]) != 1:
            raise IOError("A single set name must be specified when using format '%s'" % options.format)
        set_name=cmd[3].keys()[0]
        _set = set_name
    #
    # Set the 'param name' based on the format
    #
    _param = None
    if options.format == 'transposed_array' or options.format == 'array' or options.format == 'param':
        if len(cmd[3]) != 1:
            raise IOError("A single parameter name must be specified when using format '%s'" % options.format)
    if options.format in ('transposed_array', 'array', 'param', None):
        if cmd[2][0] is None:
            _index = None
        else:
            _index = cmd[2][0]
        _param = []
        _select = cmd[2][1]
        for key in cmd[3]:
            _param.append( cmd[3][key] )
            _select.append( key )
    if options.format in ('transposed_array', 'array'):
        _select = None

    #print "YYY", _param, options
    if not _param is None and len(_param) == 1 and not _model is None and isinstance(getattr(_model, _param[0]), Set):
        _select = None
        _set = _param[0]
        _param = None
        _index = None

    #print "SELECT", _param, _select
    #
    data.initialize(model=options.model, filename=options.filename, index=_index, index_name=index_name, param_name=symb_map, set=_set, param=_param, format=options.format, range=options.range, query=options.query, using=options.using, table=options.table, select=_select)
    #
    data.open()
    try:
        data.read()
    except Exception:
        data.close()
        raise
    data.close()
    data.process(_model, _data, _default)
Exemple #52
0
def deterministic_PRDP_solve_with_return(mipgap, model_data, output_directory):
	### Start Solution Timer
	start_time = timer.clock()
	init_mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss

	##Solver Choice
	opt = SolverFactory("cplex")
	options = Options()
	opt.options.mip_tolerances_mipgap = mipgap
	
	
	##########################################
	### Generate Scenario
	##########################################
	
	#### Problem Info For Scenario Generation
	num_product = len(model_data._data['product'][None])
	prod = model_data._data['product'][None]
	
	num_trial = len(model_data._data['trial'][None])
	sg = model_data._data['trial'][None]
	
	prob = model_data._data['probability']
	num_ts = len(model_data._data['time_step'][None])
	
	### Generate all possible outcomes
	Outcomes = itertools.product(range(num_trial + 1), repeat = num_product)
	Outcomes = tuple(Outcomes)
	
	### From Outcomes Name and Generate Scenarios
	scenario = 1
	List_of_Scenarios = {}
	SS=[]
	
	for items in Outcomes:
		scenario_name = scenario
		List_of_Scenarios[scenario_name] = scenario_class.scenario(items,prob, prod,sg)
		SS.append(scenario_name)
		scenario += 1
	
	##########################################################
	### Input Parameters to Solver
	##########################################################
	
	rev_max = {}
	gammaL = {}
	gammaD = {}
	duration = {}
	trial_cost = {}
	revenue_max = {}
	success = {}
	rev_run = {}
	rev_open = {}
	discounting_factor ={}
	
	##Set product
	product = model_data._data['product'][None]
		
	##Set stage_gate
	stage_gate = model_data._data['trial'][None]
	
	## Set time step
	time_step = model_data._data['time_step'][None]
	
	##Set resource type
	resource_type = model_data._data['resource_type'][None]
	
	## Set duration
	duration = model_data._data['trial_duration']
	
	## Set trial cost
	trial_cost = model_data._data['trial_cost']
	
	## Set Discount Values
	for items in model_data._data['gammaL']:
		gammaL[items[0]] = model_data._data['gammaL'][items]
		
	for items in model_data._data['gammaD']:
		gammaD[items[0]] = model_data._data['gammaD'][items]
	
	## Set Maximum Revenue	
	for items in model_data._data['maximum_revenue']:
		revenue_max[items[0]] = model_data._data['maximum_revenue'][items]
		
	## Set Last Trial
	last_trial = len(stage_gate)
	
	last_time_step = len(time_step)
	
	##Calculate Success matrix
	success = M2S_item.calc_success(product, num_trial, List_of_Scenarios)
	
	## Calculate running rev
	rev_run = M2S_item.calc_rr(revenue_max,gammaL,duration, product, stage_gate, time_step)
		
	##Calculate open rev  
	rev_open = M2S_item.calc_openrev(revenue_max,gammaL,duration, product, stage_gate, time_step, last_time_step)
	
	##Calculate Discounting Factor
	discounting_factor = M2S_item.calc_discounting_factor(revenue_max,gammaL,trial_cost, product, stage_gate, last_time_step)
	
	
	## Set Probabilities and Outcomes	
	pb = {}
	outcome = {}
	for s in SS:
		pb[s] = List_of_Scenarios[s].probability
		outcome[s] = List_of_Scenarios[s].outcome
			
	resource_max = {}
	for items in model_data._data['max_resource']:
		resource_max[items[0]] = model_data._data['max_resource'][items]
		
	resource_required = {}
	resource_required = model_data._data['resource_requirement']
	
	#######################################################################
	### Generate Non-Anticipativity Constraints
	#######################################################################

	OC = {}
	for s in SS:
		OC[s] = [] 
		for i in prod:
			OC[s].append(List_of_Scenarios[s].outcome[prod.index(i)])

	phi= {}
	phii= {}
	phij ={}

		
	for s in SS:
		for sp in SS:
			if sp > s:
				for i in prod:
					OCtest = list(OC[s])
					OCtest[prod.index(i)] += 1
					OCtest2 = list(OC[s])
					OCtest2[prod.index(i)] += -1
					if OCtest == OC[sp]:
						trl = OC[s][prod.index(i)] + 1
						phi[(s,sp)] = 1
						phii[(s,sp)] = i
						phij[(s,sp)] = trl
					if OCtest2 == OC[sp]:
						trl = OC[sp][prod.index(i)] + 1
						phi[(s,sp)] = 1
						phii[(s,sp)] = i
						phij[(s,sp)] = trl
						
	
	############################################
	### Solve Model
	############################################
	model = defunction.de(prod,sg,time_step,resource_type,SS,resource_max,gammaL,gammaD,duration,trial_cost,resource_required, revenue_max,pb, success,last_time_step, last_trial, rev_run, rev_open, discounting_factor, phi, phii, phij, outcome)
	
	sttmr = timer.clock()
	results= opt.solve(model)
	fttmr = timer.clock()
	fin_mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
	
	model.solutions.load_from(results)	
	
	Scenario_Results = {}	
	for t in time_step:
		for s in SS:
			for i in product:
				for j in stage_gate:
					if model.Decision_X[i,j,t,s].value == 1:
						index = product.index(i)
						jndex = stage_gate.index(j)
						tndx = time_step.index(t)
						try: 
							Scenario_Results[(i,j,t)]
						except:
							Scenario_Results[(i,j,t)] = 1
		
	### Make Output Directory
	if not os.path.exists(output_directory):
		os.makedirs(output_directory)					
	
	save_file = "Deterministic_Solution"
	results.write(filename = os.path.join(output_directory, save_file))
						

	Finish_Time = timer.clock()
	Total_Solve_Time = fttmr - sttmr
	Total_Time = Finish_Time - start_time
	Objective_Value = results['Problem'][0]['Lower bound']
	
	### Generate New File Name
	save_file = "Output" 
		
	### Open save file
	f = open(os.path.join(output_directory, save_file),	"w")
		
	### Generate file contents
	algorithm_time = 'Total Solve Time:' + ' ' + str(Total_Solve_Time)
	f.write(algorithm_time + '\n')
		
	algorithm_time = 'Total Time:' + ' ' + str(Total_Time)
	f.write(algorithm_time + '\n')
	
	objective = "ENPV:" + " " + str(Objective_Value)
	f.write(objective + '\n')
	
	total_resource = "Total Memory:" + " " + str(fin_mem-init_mem)
	f.write(total_resource + "\n")
	
	f.write(str(Scenario_Results) + "\n")
	
	f.close()
	
	from Core.Solvers.MSSP.MSSP_Results_Object import MSSP_Results_Object
	return_object = MSSP_Results_Object(Objective_Value, Total_Solve_Time,(fin_mem-init_mem), Total_Time)
	 
	return return_object