Beispiel #1
0
def test_solvers(options=None, argv=None):
    """
    The is the function executed by the command
        pyomo test-solvers [solver ...]
    """
    global rootdir
    rootdir = os.getcwd()
    if argv is None:
        if options.debug:
            if len(options.solver) == 0:
                print("Testing all solvers")
            else:
                print("Testing solver", options.solver[0])
        # Over-ride the value of sys.argv, which is used by unittest.main()
        sys.argv = ['test_solver']
    else:
        sys.argv = argv
    # Create the tests defined in the YAML configuration file
    autotest_options = Options()
    autotest_options.testname_format = "%s_TEST_%s"
    pyutilib.autotest.create_test_suites(filename=currdir + 'test_solvers.yml',
                                         _globals=globals(),
                                         options=autotest_options)
    # Execute the tests, using a custom test runner
    runner = SolverTestRunner()
    runner.options = options
    unittest.main(module=globals()['__name__'], testRunner=runner)
Beispiel #2
0
 def __init__(self):
     """
     Constructor
     """
     self._info = None
     self._data = None
     self.options = Options()
     self.options.ncolumns = 1
Beispiel #3
0
 def __init__(self, options):
     if options is None:
         options = Options()
     if options.runtime is None:
         options.runtime = Options()
     self.options = options
     self.fileLogger = None
     self.original = None
Beispiel #4
0
def help_environment():
    info = Options()
    #
    info.python = Options()
    info.python.version = '%d.%d.%d' % sys.version_info[:3]
    info.python.executable = sys.executable
    info.python.platform = sys.platform
    try:
        packages = []
        import pip
        for package in pip.get_installed_distributions():
            packages.append(
                Options(name=package.project_name, version=package.version))
        info.python.packages = packages
    except:
        pass
    #
    info.environment = Options()
    path = os.environ.get('PATH', None)
    if not path is None:
        info.environment['shell path'] = path.split(os.pathsep)
    info.environment['python path'] = sys.path
    #
    print('#')
    print('# Information About the Python and Shell Environment')
    print('#')
    print(str(info))
Beispiel #5
0
def convert(options=Options(), parser=None, model_format=None):
    global _format
    if not model_format is None:
        _format = model_format
    #
    # Import plugins
    #
    import pyomo.environ

    if options.model.save_file is None:
        if _format == ProblemFormat.cpxlp:
            options.model.save_file = 'unknown.lp'
        else:
            options.model.save_file = 'unknown.' + str(_format)
    options.model.save_format = _format

    data = Options(options=options)

    model_data = None
    try:
        pyomo.scripting.util.setup_environment(data)

        pyomo.scripting.util.apply_preprocessing(data, parser=parser)

        if data.error:
            return Container()

        model_data = pyomo.scripting.util.create_model(data)

        model_data.options = options
    except:

        # TBD: I should be able to call this function in the case of
        #      an exception to perform cleanup. However, as it stands
        #      calling finalize with its default keyword value for
        #      model(=None) results in an a different error related to
        #      task port values.  Not sure how to interpret that.
        pyomo.scripting.util.finalize(data,
                                      model=ConcreteModel(),
                                      instance=None,
                                      results=None)
        raise

    else:

        pyomo.scripting.util.finalize(data, model=model_data.model)

    return model_data
Beispiel #6
0
    def __init__(self, **kwds):
        """Pass kwds to update the options attribute after setting defaults"""
        self.cache = {}
        options = self.options = Options()
        # defaults
        options["graph"] = None
        options["tear_set"] = None
        options["select_tear_method"] = "mip"
        options["run_first_pass"] = True
        options["solve_tears"] = True
        options["guesses"] = ComponentMap()
        options["default_guess"] = None
        options["almost_equal_tol"] = 1.0E-8
        options["log_info"] = False
        options["tear_method"] = "Direct"
        options["iterLim"] = 40
        options["tol"] = 1.0E-5
        options["tol_type"] = "abs"
        options["report_diffs"] = False
        options["accel_min"] = -5
        options["accel_max"] = 0
        options["tear_solver"] = "cplex"
        options["tear_solver_io"] = None
        options["tear_solver_options"] = {}

        options.update(kwds)
Beispiel #7
0
    def __init__(self, **kwds):
        #
        # Call base class constructor
        #
        kwds['type'] = 'xpress'
        ILMLicensedSystemCallSolver.__init__(self, **kwds)

        self.is_mip = kwds.pop('is_mip', False)

        #
        # Define valid problem formats and associated results formats
        #
        self._valid_problem_formats = [ProblemFormat.cpxlp, ProblemFormat.mps]
        self._valid_result_formats = {}
        self._valid_result_formats[ProblemFormat.cpxlp] = [ResultsFormat.soln]
        self._valid_result_formats[ProblemFormat.mps] = [ResultsFormat.soln]
        self.set_problem_format(ProblemFormat.cpxlp)

        #
        # Cache the problem type - LP or MIP. Xpress needs to know this
        # on the command-line, and it matters when reading the solution file.
        #

        # Note: Undefined capabilities default to 'None'
        self._capabilities = Options()
        self._capabilities.linear = True
        self._capabilities.quadratic_objective = True
        self._capabilities.quadratic_constraint = True
        self._capabilities.integer = True
        self._capabilities.sos1 = True
        self._capabilities.sos2 = True
Beispiel #8
0
    def __init__(self, **kwargs):
        configure_glpk()
        #
        # Call base constructor
        #
        kwargs['type'] = 'glpk'
        SystemCallSolver.__init__(self, **kwargs)

        self._rawfile = None

        #
        # Valid problem formats, and valid results for each format
        #
        self._valid_problem_formats = [
            ProblemFormat.cpxlp, ProblemFormat.mps, ProblemFormat.mod
        ]
        self._valid_result_formats = {
            ProblemFormat.mod: ResultsFormat.soln,
            ProblemFormat.cpxlp: ResultsFormat.soln,
            ProblemFormat.mps: ResultsFormat.soln,
        }
        self.set_problem_format(ProblemFormat.cpxlp)

        # Note: Undefined capabilities default to 'None'
        self._capabilities = Options()
        self._capabilities.linear = True
        self._capabilities.integer = True
Beispiel #9
0
    def __init__(self, **kwds):
        #
        # Call base class constructor
        #
        kwds['type'] = 'baron'
        SystemCallSolver.__init__(self, **kwds)

        self._tim_file = None

        self._valid_problem_formats = [ProblemFormat.bar]
        self._valid_result_formats = {}
        self._valid_result_formats[ProblemFormat.bar] = [ResultsFormat.soln]
        self.set_problem_format(ProblemFormat.bar)

        self._capabilities = Options()
        self._capabilities.linear = True
        self._capabilities.quadratic_objective = True
        self._capabilities.quadratic_constraint = True
        self._capabilities.integer = True
        self._capabilities.sos1 = False
        self._capabilities.sos2 = False

        # CLH: Coppied from cpxlp.py, the cplex file writer.
        # Keven Hunter made a nice point about using %.16g in his attachment
        # to ticket #4319. I am adjusting this to %.17g as this mocks the
        # behavior of using %r (i.e., float('%r'%<number>) == <number>) with
        # the added benefit of outputting (+/-). The only case where this
        # fails to mock the behavior of %r is for large (long) integers (L),
        # which is a rare case to run into and is probably indicative of
        # other issues with the model.
        # *** NOTE ***: If you use 'r' or 's' here, it will break code that
        #               relies on using '%+' before the formatting character
        #               and you will need to go add extra logic to output
        #               the number's sign.
        self._precision_string = '.17g'
Beispiel #10
0
    def __init__(self, **kwds):
        #
        # Call base class constructor
        #
        kwds['type'] = 'cplex'
        ILMLicensedSystemCallSolver.__init__(self, **kwds)

        # NOTE: eventually both of the following attributes should be migrated to a common base class.
        # is the current solve warm-started? a transient data member to communicate state information
        # across the _presolve, _apply_solver, and _postsolve methods.
        self._warm_start_solve = False
        # related to the above, the temporary name of the MST warm-start file (if any).
        self._warm_start_file_name = None

        #
        # Define valid problem formats and associated results formats
        #
        self._valid_problem_formats = [ProblemFormat.cpxlp, ProblemFormat.mps]
        self._valid_result_formats = {}
        self._valid_result_formats[ProblemFormat.cpxlp] = [ResultsFormat.soln]
        self._valid_result_formats[ProblemFormat.mps] = [ResultsFormat.soln]
        self.set_problem_format(ProblemFormat.cpxlp)

        # Note: Undefined capabilities default to 'None'
        self._capabilities = Options()
        self._capabilities.linear = True
        self._capabilities.quadratic_objective = True
        self._capabilities.quadratic_constraint = True
        self._capabilities.integer = True
        self._capabilities.sos1 = True
        self._capabilities.sos2 = True
Beispiel #11
0
 def __init__(self, **kwds):
     #
     # Call base constructor
     #
     if not 'type' in kwds:
         kwds["type"] = "asl"
     SystemCallSolver.__init__(self, **kwds)
     self._metasolver = True
     #
     # Setup valid problem formats, and valid results for each problem format.
     # Also set the default problem and results formats.
     #
     self._valid_problem_formats=[ProblemFormat.nl]
     self._valid_result_formats = {}
     self._valid_result_formats[ProblemFormat.nl] = [ResultsFormat.sol]
     self.set_problem_format(ProblemFormat.nl)
     #
     # Note: Undefined capabilities default to 'None'
     #
     self._capabilities = Options()
     self._capabilities.linear = True
     self._capabilities.integer = True
     self._capabilities.quadratic_objective = True
     self._capabilities.quadratic_constraint = True
     self._capabilities.sos1 = True
     self._capabilities.sos2 = True
Beispiel #12
0
class PyomoDataCommands(object):
    def __init__(self):
        self._info = []
        self.options = Options()

    def available(self):
        return True

    def initialize(self, **kwds):
        self.filename = kwds.pop('filename')
        self.add_options(**kwds)

    def add_options(self, **kwds):
        self.options.update(kwds)

    def open(self):
        if self.filename is None:  #pragma:nocover
            raise IOError("No filename specified")
        if not os.path.exists(self.filename):  #pragma:nocover
            raise IOError("Cannot find file '%s'" % self.filename)

    def close(self):
        pass

    def read(self):
        """
        This function does nothing, since executing Pyomo data commands
        both reads and processes the data all at once.
        """
        pass

    def write(self, data):  #pragma:nocover
        """
        This function does nothing, because we cannot write to a *.dat file.
        """
        pass

    def process(self, model, data, default):
        """
        Read Pyomo data commands and process the data.
        """
        _process_include(['include', self.filename], model, data, default,
                         self.options)

    def clear(self):
        self._info = []
Beispiel #13
0
def results_schema():
    if len(sys.argv) > 1:
        print(
            "results_schema  - Print the predefined schema in a SolverResults object"
        )
    options = Options(schema=True)
    r = SolverResults()
    repn = r._repn_(options)
    r.pprint(sys.stdout, options, repn=repn)
Beispiel #14
0
def initialize(**kwds):
    obj = Options(**kwds)
    #
    # Set obj.available
    #
    try:
        opt = SolverFactory(obj.name, solver_io=obj.io)
    except:
        opt = None

    if opt is None or isinstance(opt, UnknownSolver):
        obj.available = False
    elif not opt.available(exception_flag=False):
        obj.available = False
    elif hasattr(opt, 'executable') and opt.executable() is None:
        obj.available = False
    elif not opt.license_is_valid() \
         and obj.name not in licensed_solvers_with_demo_mode:
        obj.available = False
    else:
        obj.available = True
    #
    # Set the limits for the solver's "demo" (unlicensed) mode:
    #   ( nVars, nCons, nNonZeros )
    obj.demo_limits = (None, None, None)
    if obj.available:
        if obj.name == "baron" and not opt.license_is_valid():
            obj.demo_limits = (10, 10, 50)
    #
    # Check capabilities, even if the solver is not available
    #
    if not (opt is None or isinstance(opt, UnknownSolver)):
        for _c in obj.capabilities:
            if not _c in opt._capabilities:
                raise ValueError("Solver %s does not support capability %s!" %
                                 (obj.name, _c))
    #
    # Get version
    #
    if obj.available:
        obj.version = opt.version()
    return obj
Beispiel #15
0
    def test_t1(self):
        # Run a simple model
        model = ConcreteModel()
        model.A = RangeSet(1, 4)
        model.x = Var(model.A, bounds=(-1, 1))

        def obj_rule(model):
            return sum_product(model.x)

        model.obj = Objective(rule=obj_rule)

        def c_rule(model):
            expr = 0
            for i in model.A:
                expr += i * model.x[i]
            return expr == 0

        model.c = Constraint(rule=c_rule)

        #
        data = Options()
        data.suffixes = {}
        data.solver_options = {}
        data.warmstart_filename = None
        data.filename = currdir + 't1.lp'
        model.write(data['filename'])
        INPUT = open(data['filename'], 'r')
        data['file'] = INPUT.read()
        INPUT.close()
        data['opt'] = 'glpk'
        data.kwds = {}
        #
        results = self.worker.process(data)

        # Decode, evaluate and unpickle results
        if using_pyro4:
            # These two conversions are in place to unwrap
            # the hacks placed in the pyro_mip_server
            # before transmitting the results
            # object. These hacks are put in place to
            # avoid errors when transmitting the pickled
            # form of the results object with the default Pyro4
            # serializer (Serpent)
            if six.PY3:
                results = base64.decodebytes(ast.literal_eval(results))
            else:
                results = base64.decodestring(results)

        results = pickle.loads(results)

        #
        results.write(filename=currdir + "t1.out", format='json')
        self.assertMatchesJsonBaseline(currdir + "t1.out",
                                       currdir + "t1.txt",
                                       tolerance=1e-4)
        self.assertEqual(results._smap_id, None)
        os.remove(data['filename'])
Beispiel #16
0
 def setUp(self, testcase, options):
     global tmpdir
     tmpdir = os.getcwd()
     os.chdir(options.currdir)
     pyutilib.services.TempfileManager.push()
     pyutilib.services.TempfileManager.sequential_files(0)
     pyutilib.services.TempfileManager.tempdir = options.currdir
     #
     if ':' in options.solver:
         solver, sub_solver = options.solver.split(':')
         if options.solver_options is None:
             _options = Options()
         else:
             _options = options.solver_options
         _options.solver = sub_solver
         testcase.opt = pyomo.opt.SolverFactory(solver, options=_options)
     else:
         testcase.opt = pyomo.opt.SolverFactory(
             options.solver, options=options.solver_options)
     if testcase.opt is None or not testcase.opt.available(False):
         testcase.skipTest('Solver %s is not available' % options.solver)
Beispiel #17
0
def initialize(**kwds):
    obj = Options(**kwds)
    #
    # Set the limits for the solver's "demo" (unlicensed) mode:
    #   ( nVars, nCons, nNonZeros )
    obj.demo_limits = (None, None, None)
    if (obj.name == "baron") and \
       (not BARONSHELL.license_is_valid()):
        obj.demo_limits = (10, 10, 50)
    #
    #
    # Set obj.available
    #
    opt = None
    try:
        opt = SolverFactory(obj.name, solver_io=obj.io)
    except:
        pass
    if opt is None or isinstance(opt, UnknownSolver):
        obj.available = False
    elif (obj.name == "gurobi") and \
       (not GUROBISHELL.license_is_valid()):
        obj.available = False
    elif (obj.name in {"mosek_direct", "mosek_persistent"}) and \
       (not MOSEKDirect.license_is_valid()):
        obj.available = False
    else:
        obj.available = \
            (opt.available(exception_flag=False)) and \
            ((not hasattr(opt,'executable')) or \
            (opt.executable() is not None))
    #
    # Check capabilities, even if the solver is not available
    #
    if not (opt is None or isinstance(opt, UnknownSolver)):
        for _c in obj.capabilities:
            if not _c in opt._capabilities:
                raise ValueError("Solver %s does not support capability %s!" %
                                 (obj.name, _c))
    #
    # Get version
    #
    if obj.available:
        obj.version = opt.version()
    return obj
Beispiel #18
0
def run_convert(options=Options(), parser=None):
    from pyomo.scripting.convert import convert, convert_dakota
    if options.model.save_format is None and options.model.save_file:
        options.model.save_format = options.model.save_file.split('.')[-1]
    #
    _format = guess_format(options.model.save_format)

    if options.model.save_format == 'dakota':
        return convert_dakota(options, parser)
    elif _format is None:
        if options.model.save_format is None:
            raise RuntimeError("Unspecified target conversion format!")
        else:
            raise RuntimeError("Unrecognized target conversion format (%s)!" %
                               (options.model.save_format, ))
    else:
        return convert(options, parser, _format)
Beispiel #19
0
    def __init__(self, **kwds):
        #
        # Call base constructor
        #
        kwds['type'] = 'cbc'
        super(CBCSHELL, self).__init__(**kwds)

        # NOTE: eventually both of the following attributes should be migrated to a common base class.
        # is the current solve warm-started? a transient data member to communicate state information
        # across the _presolve, _apply_solver, and _postsolve methods.
        self._warm_start_solve = False
        # related to the above, the temporary name of the SOLN warm-start file (if any).
        self._warm_start_file_name = None

        #
        # Set up valid problem formats and valid results for each problem format
        #
        self._valid_problem_formats = [ProblemFormat.cpxlp, ProblemFormat.mps]
        if (_cbc_compiled_with_asl is not False) and \
           (_cbc_old_version is not True):
            self._valid_problem_formats.append(ProblemFormat.nl)
        self._valid_result_formats = {}
        self._valid_result_formats[ProblemFormat.cpxlp] = [ResultsFormat.soln]
        if (_cbc_compiled_with_asl is not False) and \
           (_cbc_old_version is not True):
            self._valid_result_formats[ProblemFormat.nl] = [ResultsFormat.sol]
        self._valid_result_formats[ProblemFormat.mps] = [ResultsFormat.soln]

        # Note: Undefined capabilities default to 'None'
        self._capabilities = Options()
        self._capabilities.linear = True
        self._capabilities.integer = True
        # The quadratic capabilities may be true but there is
        # some weirdness in the solution file that this
        # plugin does not handle correctly  (extra variables
        # added that are not in the symbol map?)
        self._capabilities.quadratic_objective = False
        self._capabilities.quadratic_constraint = False
        # These flags are updated by the set_problem_format method
        # as cbc can handle SOS constraints with the NL file format but
        # currently not through the LP file format
        self._capabilities.sos1 = False
        self._capabilities.sos2 = False

        self.set_problem_format(ProblemFormat.cpxlp)
Beispiel #20
0
    def __init__(self, **kwds):
        #
        # Call base class constructor
        #
        kwds['type'] = 'glpk_direct'
        OptSolver.__init__(self, **kwds)

        # NOTE: eventually both of the following attributes should be migrated
        # to a common base class.  Is the current solve warm-started?  A
        # transient data member to communicate state information across the
        # _presolve, _apply_solver, and _postsolve methods.
        self.warm_start_solve = False
        self._timelimit = None

        # Note: Undefined capabilities default to 'None'
        self._capabilities = Options()
        self._capabilities.linear = True
        self._capabilities.integer = True
Beispiel #21
0
    def __init__(self, **kwds):
        #
        # Call base constructor
        #
        kwds["type"] = "pico"
        SystemCallSolver.__init__(self, **kwds)
        #
        # Setup valid problem formats, and valid results for each problem format
        #
        self._valid_problem_formats = [
            ProblemFormat.cpxlp, ProblemFormat.nl, ProblemFormat.mps
        ]
        self._valid_result_formats = {}
        self._valid_result_formats[ProblemFormat.cpxlp] = [ResultsFormat.soln]
        self._valid_result_formats[ProblemFormat.nl] = [ResultsFormat.sol]
        self._valid_result_formats[ProblemFormat.mps] = [ResultsFormat.soln]
        self.set_problem_format(ProblemFormat.cpxlp)

        # Note: Undefined capabilities default to 'None'
        self._capabilities = Options()
        self._capabilities.linear = True
        self._capabilities.integer = True
Beispiel #22
0
    def __init__(self, **kwds):
        #
        # Call base constructor
        #
        kwds["type"] = "ipopt"
        super(IPOPT, self).__init__(**kwds)
        #
        # Setup valid problem formats, and valid results for each problem format
        # Also set the default problem and results formats.
        #
        self._valid_problem_formats = [ProblemFormat.nl]
        self._valid_result_formats = {}
        self._valid_result_formats[ProblemFormat.nl] = [ResultsFormat.sol]
        self.set_problem_format(ProblemFormat.nl)

        # Note: Undefined capabilities default to 'None'
        self._capabilities = Options()
        self._capabilities.linear = True
        self._capabilities.integer = False
        self._capabilities.quadratic_objective = True
        self._capabilities.quadratic_constraint = True
        self._capabilities.sos1 = False
        self._capabilities.sos2 = False
Beispiel #23
0
def configure_loggers(options=None, shutdown=False):
    if shutdown:
        options = Options()
        options.runtime = Options()
        options.runtime.logging = 'quiet'
        if configure_loggers.fileLogger is not None:
            logging.getLogger('pyomo').handlers = []
            logging.getLogger('pyutilib').handlers = []
            configure_loggers.fileLogger.close()
            configure_loggers.fileLogger = None
            # TBD: This seems dangerous in Windows, as the process will
            # have multiple open file handles pointint to the same file.
            reset_redirect()

    #
    # Configure the logger
    #
    if options.runtime is None:
        options.runtime = Options()
    if options.runtime.logging == 'quiet':
        logging.getLogger('pyomo').setLevel(logging.ERROR)
    elif options.runtime.logging == 'warning':
        logging.getLogger('pyomo').setLevel(logging.WARNING)
    elif options.runtime.logging == 'info':
        logging.getLogger('pyomo').setLevel(logging.INFO)
        logging.getLogger('pyutilib').setLevel(logging.INFO)
    elif options.runtime.logging == 'verbose':
        logging.getLogger('pyomo').setLevel(logging.DEBUG)
        logging.getLogger('pyutilib').setLevel(logging.DEBUG)
    elif options.runtime.logging == 'debug':
        logging.getLogger('pyomo').setLevel(logging.DEBUG)
        logging.getLogger('pyutilib').setLevel(logging.DEBUG)

    if options.runtime.logfile:
        configure_loggers.fileLogger \
            = logging.FileHandler(options.runtime.logfile, 'w')
        logging.getLogger('pyomo').handlers = []
        logging.getLogger('pyutilib').handlers = []
        logging.getLogger('pyomo').addHandler(configure_loggers.fileLogger)
        logging.getLogger('pyutilib').addHandler(configure_loggers.fileLogger)
        # TBD: This seems dangerous in Windows, as the process will
        # have multiple open file handles pointint to the same file.
        setup_redirect(options.runtime.logfile)
Beispiel #24
0
def run_test_scenarios(options):
    logging.disable(logging.WARNING)

    solvers = set(options.solver)
    stat = {}

    for key, test_case in test_scenarios():
        model, solver, io = key
        if len(solvers) > 0 and not solver in solvers:
            continue
        if test_case.status == 'skip':
            continue

        # Create the model test class
        model_class = test_case.model()
        # Create the model instance
        model_class.generate_model()
        model_class.warmstart_model()
        # Solve
        symbolic_labels = False
        load_solutions = False
        opt, results = model_class.solve(solver, io,
                                         test_case.testcase.io_options, {},
                                         symbolic_labels, load_solutions)

        termination_condition = results['Solver'][0]['termination condition']
        # Validate solution status
        try:
            model_class.post_solve_test_validation(None, results)
        except:
            if test_case.status == 'expected failure':
                stat[key] = (True, "Expected failure")
            else:
                stat[key] = (False, "Unexpected termination condition: %s" %
                             str(termination_condition))
            continue
        if termination_condition == TerminationCondition.unbounded or \
           termination_condition == TerminationCondition.infeasible:
            # Unbounded or Infeasible
            stat[key] = (True, "")
        else:
            # Validate the solution returned by the solver
            if isinstance(model_class.model, IBlock):
                model_class.model.load_solution(results.solution)
            else:
                model_class.model.solutions.load_from(
                    results,
                    default_variable_value=opt.default_variable_value())
            rc = model_class.validate_current_solution(
                suffixes=model_class.test_suffixes)

            if test_case.status == 'expected failure':
                if rc[0] is True:
                    stat[key] = (False, "Unexpected success")
                else:
                    stat[key] = (True, "Expected failure")
            else:
                if rc[0] is True:
                    stat[key] = (True, "")
                else:
                    stat[key] = (False, "Unexpected failure")

    if options.verbose:
        print("---------------")
        print(" Test Failures")
        print("---------------")
    nfail = 0
    #
    # Summarize the runtime statistics, by solver
    #
    summary = {}
    total = Options(NumEPass=0, NumEFail=0, NumUPass=0, NumUFail=0)
    for key in stat:
        model, solver, io = key
        if not solver in summary:
            summary[solver] = Options(NumEPass=0,
                                      NumEFail=0,
                                      NumUPass=0,
                                      NumUFail=0)
        _pass, _str = stat[key]
        if _pass:
            if _str == "Expected failure":
                summary[solver].NumEFail += 1
            else:
                summary[solver].NumEPass += 1
        else:
            nfail += 1
            if _str == "Unexpected failure":
                summary[solver].NumUFail += 1
                if options.verbose:
                    print("- Unexpected Test Failure: " +
                          ", ".join((model, solver, io)))
            else:
                summary[solver].NumUPass += 1
                if options.verbose:
                    print("- Unexpected Test Success: " +
                          ", ".join((model, solver, io)))
    if options.verbose:
        if nfail == 0:
            print("- NONE")
        print("")

    stream = sys.stdout
    maxSolverNameLen = max([max(len(name) for name in summary), len("Solver")])
    fmtStr = "{{0:<{0}}}| {{1:>8}} | {{2:>8}} | {{3:>10}} | {{4:>10}} | {{5:>13}}\n".format(
        maxSolverNameLen + 2)
    #
    stream.write("\n")
    stream.write("Solver Test Summary\n")
    stream.write("=" * (maxSolverNameLen + 66) + "\n")
    stream.write(
        fmtStr.format("Solver", "# Pass", "# Fail", "# OK Fail", "# Bad Pass",
                      "% OK"))
    stream.write("=" * (maxSolverNameLen + 66) + "\n")
    #
    for _solver in sorted(summary):
        ans = summary[_solver]
        total.NumEPass += ans.NumEPass
        total.NumEFail += ans.NumEFail
        total.NumUPass += ans.NumUPass
        total.NumUFail += ans.NumUFail
        stream.write(
            fmtStr.format(
                _solver, str(ans.NumEPass), str(ans.NumUFail),
                str(ans.NumEFail), str(ans.NumUPass),
                str(
                    int(100.0 * (ans.NumEPass + ans.NumEFail) /
                        (ans.NumEPass + ans.NumEFail + ans.NumUFail +
                         ans.NumUPass)))))
    #
    stream.write("=" * (maxSolverNameLen + 66) + "\n")
    stream.write(
        fmtStr.format(
            "TOTALS", str(total.NumEPass), str(total.NumUFail),
            str(total.NumEFail), str(total.NumUPass),
            str(
                int(100.0 * (total.NumEPass + total.NumEFail) /
                    (total.NumEPass + total.NumEFail + total.NumUFail +
                     total.NumUPass)))))
    stream.write("=" * (maxSolverNameLen + 66) + "\n")

    logging.disable(logging.NOTSET)
Beispiel #25
0
    def __init__(self, **kwds):
        OptSolver.__init__(self, **kwds)

        self._pyomo_model = None
        """The pyomo model being solved."""

        self._solver_model = None
        """The python instance of the solver model (e.g., the gurobipy Model instance)."""

        self._symbol_map = SymbolMap()
        """A symbol map used to map between pyomo components and their names used with the solver."""

        self._labeler = None
        """The labeler for creating names for the solver model components."""

        self._pyomo_var_to_solver_var_map = ComponentMap()
        self._solver_var_to_pyomo_var_map = dict()
        """A dictionary mapping pyomo Var's to the solver variables."""

        self._pyomo_con_to_solver_con_map = dict()
        self._solver_con_to_pyomo_con_map = dict()
        """A dictionary mapping pyomo constraints to solver constraints."""

        self._vars_referenced_by_con = ComponentMap()
        """A dictionary mapping constraints to a ComponentSet containt the pyomo variables referenced by that
        constraint. This is primarily needed for the persistent solvers. When a constraint is deleted, we need
        to decrement the number of times those variables are referenced (see self._referenced_variables)."""

        self._vars_referenced_by_obj = ComponentSet()
        """A set containing the pyomo variables referenced by that the objective.
        This is primarily needed for the persistent solvers. When a the objective is deleted, we need
        to decrement the number of times those variables are referenced (see self._referenced_variables)."""

        self._objective = None
        """The pyomo Objective object currently being used with the solver."""

        self.results = None
        """A results object return from the solve method."""

        self._skip_trivial_constraints = False
        """A bool. If True, then any constraints with a constant body will not be added to the solver model.
        Be careful with this. If a trivial constraint is skipped then that constraint cannot be removed from
        a persistent solver (an error will be raised if a user tries to remove a non-existent constraint)."""

        self._output_fixed_variable_bounds = False
        """A bool. If False then an error will be raised if a fixed variable is used in one of the solver constraints.
        This is useful for catching bugs. Ordinarily a fixed variable should appear as a constant value in the
        solver constraints. If True, then the error will not be raised."""

        self._python_api_exists = False
        """A bool indicating whether or not the python api is available for the specified solver."""

        self._version = None
        """The version of the solver."""

        self._version_major = None
        """The major version of the solver. For example, if using Gurobi 7.0.2, then _version_major is 7."""

        self._symbolic_solver_labels = False
        """A bool. If true then the solver components will be given names corresponding to the pyomo component names."""

        self._capabilites = Options()

        self._referenced_variables = ComponentMap()
        """dict: {var: count} where count is the number of constraints/objective referencing the var"""

        self._keepfiles = False
        """A bool. If True, then the solver log will be saved."""

        self._save_results = True
        """A bool. This is used for backwards compatability. If True, the solution will be loaded into the Solution
Beispiel #26
0
def _process_load(cmd, _model, _data, _default, options=None):
    #print("LOAD %s" % cmd)
    from pyomo.core import Set

    _cmd_len = len(cmd)
    _options = {}
    _options['filename'] = cmd[1]
    i = 2
    while cmd[i] != ':':
        _options[cmd[i]] = cmd[i + 2]
        i += 3
    i += 1
    _Index = (None, [])
    if type(cmd[i]) is tuple:
        _Index = (None, cmd[i])
        i += 1
    elif i + 1 < _cmd_len and cmd[i + 1] == '=':
        _Index = (cmd[i], cmd[i + 2])
        i += 3
    _smap = OrderedDict()
    while i < _cmd_len:
        if i + 2 < _cmd_len and cmd[i + 1] == '=':
            _smap[cmd[i + 2]] = cmd[i]
            i += 3
        else:
            _smap[cmd[i]] = cmd[i]
            i += 1

    if len(cmd) < 2:
        raise IOError("The 'load' command must specify a filename")

    options = Options(**_options)
    for key in options:
        if not key in [
                'range', 'filename', 'format', 'using', 'driver', 'query',
                'table', 'user', 'password', 'database'
        ]:
            raise ValueError("Unknown load option '%s'" % key)

    global Filename
    Filename = options.filename

    global Lineno
    Lineno = 0
    #
    # TODO: process mapping info
    #
    if options.using is None:
        tmp = options.filename.split(".")[-1]
        data = DataManagerFactory(tmp)
        if (data is None) or \
           isinstance(data, UnknownDataManager):
            raise ApplicationError("Data manager '%s' is not available." % tmp)
    else:
        try:
            data = DataManagerFactory(options.using)
        except:
            data = None
        if (data is None) or \
           isinstance(data, UnknownDataManager):
            raise ApplicationError("Data manager '%s' is not available." %
                                   options.using)
    set_name = None
    #
    # Create symbol map
    #
    symb_map = _smap
    if len(symb_map) == 0:
        raise IOError(
            "Must specify at least one set or parameter name that will be loaded"
        )
    #
    # Process index data
    #
    _index = None
    index_name = _Index[0]
    _select = None
    #
    # Set the 'set name' based on the format
    #
    _set = None
    if options.format == 'set' or options.format == 'set_array':
        if len(_smap) != 1:
            raise IOError(
                "A single set name must be specified when using format '%s'" %
                options.format)
        set_name = list(_smap.keys())[0]
        _set = set_name
    #
    # Set the 'param name' based on the format
    #
    _param = None
    if options.format == 'transposed_array' or options.format == 'array' or options.format == 'param':
        if len(_smap) != 1:
            raise IOError(
                "A single parameter name must be specified when using format '%s'"
                % options.format)
    if options.format in ('transposed_array', 'array', 'param', None):
        if _Index[0] is None:
            _index = None
        else:
            _index = _Index[0]
        _param = []
        _select = list(_Index[1])
        for key in _smap:
            _param.append(_smap[key])
            _select.append(key)
    if options.format in ('transposed_array', 'array'):
        _select = None

    #print "YYY", _param, options
    if not _param is None and len(
            _param) == 1 and not _model is None and isinstance(
                getattr(_model, _param[0]), Set):
        _select = None
        _set = _param[0]
        _param = None
        _index = None

    #print "SELECT", _param, _select
    #
    data.initialize(model=options.model,
                    filename=options.filename,
                    index=_index,
                    index_name=index_name,
                    param_name=symb_map,
                    set=_set,
                    param=_param,
                    format=options.format,
                    range=options.range,
                    query=options.query,
                    using=options.using,
                    table=options.table,
                    select=_select,
                    user=options.user,
                    password=options.password,
                    database=options.database)
    #
    data.open()
    try:
        data.read()
    except Exception:
        data.close()
        raise
    data.close()
    data.process(_model, _data, _default)
Beispiel #27
0
def _process_table(cmd, _model, _data, _default, options=None):
    #print("TABLE %s" % cmd)
    #
    _options = {}
    _set = OrderedDict()
    _param = OrderedDict()
    _labels = []

    _cmd = cmd[1]
    _cmd_len = len(_cmd)
    name = None
    i = 0
    while i < _cmd_len:
        try:
            #print("CMD i=%s cmd=%s" % (i, _cmd[i:]))
            #
            # This should not be error prone, so we treat errors
            # with a general exception
            #

            #
            # Processing labels
            #
            if _cmd[i] == ':':
                i += 1
                while i < _cmd_len:
                    _labels.append(_cmd[i])
                    i += 1
                continue
            #
            # Processing options
            #
            name = _cmd[i]
            if i + 1 == _cmd_len:
                _param[name] = []
                _labels = ['Z']
                i += 1
                continue
            if _cmd[i + 1] == '=':
                if type(_cmd[i + 2]) is list:
                    _set[name] = _cmd[i + 2]
                else:
                    _options[name] = _cmd[i + 2]
                i += 3
                continue
            # This should be a parameter declaration
            if not type(_cmd[i + 1]) is tuple:
                raise IOError
            if i + 2 < _cmd_len and _cmd[i + 2] == '=':
                _param[name] = (_cmd[i + 1], _cmd[i + 3][0])
                i += 4
            else:
                _param[name] = _cmd[i + 1]
                i += 2
        except:
            raise IOError("Error parsing table options: %s" % name)

    #print("_options %s" % _options)
    #print("_set %s" % _set)
    #print("_param %s" % _param)
    #print("_labels %s" % _labels)


#
    options = Options(**_options)
    for key in options:
        if not key in ['columns']:
            raise ValueError("Unknown table option '%s'" % key)
    #
    ncolumns = options.columns
    if ncolumns is None:
        ncolumns = len(_labels)
        if ncolumns == 0:
            if not (len(_set) == 1 and len(_set[_set.keys()[0]]) == 0):
                raise IOError(
                    "Must specify either the 'columns' option or column headers"
                )
            else:
                ncolumns = 1
    else:
        ncolumns = int(ncolumns)
    #
    data = cmd[2]
    Ldata = len(cmd[2])
    #
    cmap = {}
    if len(_labels) == 0:
        for i in range(ncolumns):
            cmap[i + 1] = i
        for label in _param:
            ndx = cmap[_param[label][1]]
            if ndx < 0 or ndx >= ncolumns:
                raise IOError("Bad column value %s for data %s" %
                              (str(ndx), label))
            cmap[label] = ndx
            _param[label] = _param[label][0]
    else:
        i = 0
        for label in _labels:
            cmap[label] = i
            i += 1
    #print("CMAP %s" % cmap)
    #
    #print("_param %s" % _param)
    #print("_set %s" % _set)
    for sname in _set:
        # Creating set sname
        cols = _set[sname]
        tmp = []
        for col in cols:
            if not col in cmap:
                raise IOError(
                    "Unexpected table column '%s' for index set '%s'" %
                    (col, sname))
            tmp.append(cmap[col])
        if not sname in cmap:
            cmap[sname] = tmp
        cols = list(flatten_tuple(tmp))
        #
        _cmd = ['set', sname, ':=']
        i = 0
        while i < Ldata:
            row = []
            #print("COLS %s  NCOLS %d" % (cols, ncolumns))
            for col in cols:
                #print("Y %s %s" % (i, col))
                row.append(data[i + col])
            if len(row) > 1:
                _cmd.append(tuple(row))
            else:
                _cmd.append(row[0])
            i += ncolumns
        #print("_data %s" % _data)
        _process_set(_cmd, _model, _data)
    #
    #print("CMAP %s" % cmap)
    _i = 0
    if ncolumns == 0:
        raise IOError
    for vname in _param:
        _i += 1
        # create value vname
        cols = _param[vname]
        tmp = []
        for col in cols:
            #print("COL %s" % col)
            if not col in cmap:
                raise IOError(
                    "Unexpected table column '%s' for table value '%s'" %
                    (col, vname))
            tmp.append(cmap[col])
        #print("X %s %s" % (len(cols), tmp))
        cols = list(flatten_tuple(tmp))
        #print("X %s" % len(cols))
        #print("VNAME %s %s" % (vname, cmap[vname]))
        if vname in cmap:
            cols.append(cmap[vname])
        else:
            cols.append(ncolumns - 1 - (len(_param) - _i))
        #print("X %s" % len(cols))
        #
        _cmd = ['param', vname, ':=']
        i = 0
        while i < Ldata:
            #print("HERE %s %s %s" % (i, cols, ncolumns))
            for col in cols:
                _cmd.append(data[i + col])
            i += ncolumns
        #print("HERE %s" % _cmd)
        #print("_data %s" % _data)
        _process_param(_cmd, _model, _data, None, ncolumns=len(cols))
Beispiel #28
0
import sys
import pyomo.environ
from pyomo.opt import SolverFactory
from pyomo.common.collections import Options

from indexnonlin import model

model.pprint()

model.skip_canonical_repn = True # for nonlinear models

instance=model.create()

SolverName = "asl"
so = Options()
so.solver = "ipopt"
opt=SolverFactory(SolverName, options=so)

if opt is None:
    print("Could not construct solver %s:%s" % (SolverName,so.solver))
    sys.exit(1)

results=opt.solve(instance)
results.write()
instance.load(results) # put results in model

# because we know there is a variable named x
x_var = getattr(instance, "x")
# because we know there is an index named xAxis
x_val = x_var["xAxis"]()
Beispiel #29
0
def convert_dakota(options=Options(), parser=None):
    #
    # Import plugins
    #
    import pyomo.environ

    model_file = os.path.basename(options.model.save_file)
    model_file_no_ext = os.path.splitext(model_file)[0]

    #
    # Set options for writing the .nl and related files
    #

    # By default replace .py with .nl
    if options.model.save_file is None:
        options.model.save_file = model_file_no_ext + '.nl'
    options.model.save_format = ProblemFormat.nl
    # Dakota requires .row/.col files
    options.model.symbolic_solver_labels = True

    #
    # Call the core converter
    #
    model_data = convert(options, parser)

    #
    # Generate Dakota input file fragments for the Vars, Objectives, Constraints
    #

    # TODO: the converted model doesn't expose the right symbol_map
    #       for only the vars active in the .nl

    model = model_data.instance

    # Easy way
    #print "VARIABLE:"
    #lines = open(options.save_model.replace('.nl','.col'),'r').readlines()
    #for varName in lines:
    #    varName = varName.strip()
    #    var = model_data.symbol_map.getObject(varName)
    #    print "'%s': %s" % (varName, var)
    #    #print var.pprint()

    # Hard way
    variables = 0
    var_descriptors = []
    var_lb = []
    var_ub = []
    var_initial = []
    tmpDict = model_data.symbol_map.getByObjectDictionary()
    for var in model.component_data_objects(Var, active=True):
        if id(var) in tmpDict:
            variables += 1
            var_descriptors.append(var.name)

            # apply user bound, domain bound, or infinite
            _lb, _ub = var.bounds
            if _lb is not None:
                var_lb.append(str(_lb))
            else:
                var_lb.append("-inf")

            if _ub is not None:
                var_ub.append(str(_ub))
            else:
                var_ub.append("inf")

            try:
                val = value(var)
            except:
                val = None
            var_initial.append(str(val))

    objectives = 0
    obj_descriptors = []
    for obj in model.component_data_objects(Objective, active=True):
        objectives += 1
        obj_descriptors.append(obj.name)

    constraints = 0
    cons_descriptors = []
    cons_lb = []
    cons_ub = []
    for con in model.component_data_objects(Constraint, active=True):
        constraints += 1
        cons_descriptors.append(con.name)
        if con.lower is not None:
            cons_lb.append(str(con.lower))
        else:
            cons_lb.append("-inf")
        if con.upper is not None:
            cons_ub.append(str(con.upper))
        else:
            cons_ub.append("inf")

    # Write the Dakota input file fragments

    dakfrag = open(model_file_no_ext + ".dak", 'w')

    dakfrag.write("#--- Dakota variables block ---#\n")
    dakfrag.write("variables\n")
    dakfrag.write("  continuous_design " + str(variables) + '\n')
    dakfrag.write("    descriptors\n")
    for vd in var_descriptors:
        dakfrag.write("      '%s'\n" % vd)
    dakfrag.write("    lower_bounds " + " ".join(var_lb) + '\n')
    dakfrag.write("    upper_bounds " + " ".join(var_ub) + '\n')
    dakfrag.write("    initial_point " + " ".join(var_initial) + '\n')

    dakfrag.write("#--- Dakota interface block ---#\n")
    dakfrag.write("interface\n")
    dakfrag.write("  algebraic_mappings = '" + options.model.save_file + "'\n")

    dakfrag.write("#--- Dakota responses block ---#\n")
    dakfrag.write("responses\n")
    dakfrag.write("  objective_functions " + str(objectives) + '\n')

    if (constraints > 0):
        dakfrag.write("  nonlinear_inequality_constraints " +
                      str(constraints) + '\n')
        dakfrag.write("    lower_bounds " + " ".join(cons_lb) + '\n')
        dakfrag.write("    upper_bounds " + " ".join(cons_ub) + '\n')

    dakfrag.write("    descriptors\n")
    for od in obj_descriptors:
        dakfrag.write("      '%s'\n" % od)
    if (constraints > 0):
        for cd in cons_descriptors:
            dakfrag.write("      '%s'\n" % cd)

    # TODO: detect whether gradient information available in model
    dakfrag.write("  analytic_gradients\n")
    dakfrag.write("  no_hessians\n")

    dakfrag.close()

    sys.stdout.write("Dakota input fragment written to file '%s'\n" %
                     (model_file_no_ext + ".dak", ))
    return model_data
Beispiel #30
0
                case = MissingSuffixFailures[solver, io, _model.description]
                if _solver_case.version is not None and\
                   case[0](_solver_case.version):
                    if type(case[1]) is dict:
                        exclude_suffixes.update(case[1])
                    else:
                        for x in case[1]:
                            exclude_suffixes[x] = (True, {})
                    msg = case[2]

            # Return scenario dimensions and scenario information
            yield (model, solver,
                   io), Options(status=status,
                                msg=msg,
                                model=_model,
                                solver=None,
                                testcase=_solver_case,
                                demo_limits=_solver_case.demo_limits,
                                exclude_suffixes=exclude_suffixes)


@unittest.nottest
def run_test_scenarios(options):
    logging.disable(logging.WARNING)

    solvers = set(options.solver)
    stat = {}

    for key, test_case in test_scenarios():
        model, solver, io = key
        if len(solvers) > 0 and not solver in solvers: