Exemplo n.º 1
0
def test_scenarios(arg=None):
    """
    Generate scenarios
    """
    for model in sorted(test_models()):
        _model = test_models(model)
        if not arg is None and not arg(_model):
            continue
        for solver, io in sorted(test_solver_cases()):
            _solver_case = test_solver_cases(solver, io)

            # Skip this test case if the solver doesn't support the
            # capabilities required by the model
            if not _model.capabilities.issubset( _solver_case.capabilities ):
                continue

            # Set status values for expected failures
            status='ok'
            msg=""
            if not _solver_case.available:
                status='skip'
                msg="Skipping test because solver %s (%s) is unavailable" % (solver,io)
            if (solver, io, model) in ExpectedFailures:
                case = ExpectedFailures[solver, io, model]
                if _solver_case.version is not None and\
                   case[0](_solver_case.version):
                    status='expected failure'
                    msg=case[1]

            # Return scenario dimensions and scenario information
            yield (model, solver, io), Options(status=status, msg=msg, model=_model, solver=None, testcase=_solver_case)
Exemplo n.º 2
0
def test_scenarios(arg=None):
    """
    Generate scenarios
    """
    for model in sorted(test_models()):
        _model = test_models(model)
        if not arg is None and not arg(_model):
            continue
        for solver, io in sorted(test_solver_cases()):
            _solver_case = test_solver_cases(solver, io)

            # Skip this test case if the solver doesn't support the
            # capabilities required by the model
            if not _model.capabilities.issubset( _solver_case.capabilities ):
                continue

            # Set status values for expected failures
            status='ok'
            msg=""
            if not _solver_case.available:
                status='skip'
                msg="Skipping test because solver %s (%s) is unavailable" % (solver,io)
            if (solver,io,_model.description) in ExpectedFailures:
                case = ExpectedFailures[solver,io,_model.description]
                if _solver_case.version is not None and\
                   case[0](_solver_case.version):
                    status='expected failure'
                    msg=case[1]

            # Return scenario dimensions and scenario information
            yield (model, solver, io), Options(status=status, msg=msg, model=_model, solver=None, testcase=_solver_case)
Exemplo n.º 3
0
def test_scenarios(arg=None):
    """
    Generate scenarios
    """
    for model in sorted(test_models()):
        _model = test_models(model)
        if not arg is None and not arg(_model):
            continue
        for solver, io in sorted(test_solver_cases()):
            _solver_case = test_solver_cases(solver, io)
            _ver = _solver_case.version

            # Skip this test case if the solver doesn't support the
            # capabilities required by the model
            if not _model.capabilities.issubset(_solver_case.capabilities):
                continue

            # Set status values for expected failures
            exclude_suffixes = {}
            status = 'ok'
            msg = ""
            case_skip = SkipTests.get((solver, io, _model.description), None)
            case_suffix = MissingSuffixFailures.get(
                (solver, io, _model.description), None)
            case_fail = ExpectedFailures.get((solver, io, _model.description),
                                             None)
            if not _solver_case.available:
                status = 'skip'
                msg = ("Skipping test because solver %s (%s) is unavailable" %
                       (solver, io))
            elif (case_skip is not None and _ver is not None
                  and case_skip[0](_ver)):
                status = 'skip'
                msg = case_skip[1]
            elif (case_fail is not None and _ver is not None
                  and case_fail[0](_ver)):
                status = 'expected failure'
                msg = case_fail[1]
            elif (case_suffix is not None and _ver is not None
                  and case_suffix[0](_ver)):
                if type(case_suffix[1]) is dict:
                    exclude_suffixes.update(case_suffix[1])
                else:
                    for x in case_suffix[1]:
                        exclude_suffixes[x] = (True, {})
                msg = case_suffix[2]

            # Return scenario dimensions and scenario information
            yield (model, solver,
                   io), Bunch(status=status,
                              msg=msg,
                              model=_model,
                              solver=None,
                              testcase=_solver_case,
                              demo_limits=_solver_case.demo_limits,
                              exclude_suffixes=exclude_suffixes)
Exemplo n.º 4
0
def test_scenarios(arg=None):
    """
    Generate scenarios
    """
    for model in sorted(test_models()):
        _model = test_models(model)
        if not arg is None and not arg(_model):
            continue
        for solver, io in sorted(test_solver_cases()):
            _solver_case = test_solver_cases(solver, io)

            # Skip this test case if the solver doesn't support the
            # capabilities required by the model
            if not _model.capabilities.issubset(_solver_case.capabilities):
                continue

            # Set status values for expected failures
            exclude_suffixes = {}
            status = 'ok'
            msg = ""
            if not _solver_case.available:
                status = 'skip'
                msg = "Skipping test because solver %s (%s) is unavailable" % (
                    solver, io)
            if (solver, io, _model.description) in ExpectedFailures:
                case = ExpectedFailures[solver, io, _model.description]
                if _solver_case.version is not None and\
                   case[0](_solver_case.version):
                    status = 'expected failure'
                    msg = case[1]
            if (solver, io, _model.description) in MissingSuffixFailures:
                case = MissingSuffixFailures[solver, io, _model.description]
                if _solver_case.version is not None and\
                   case[0](_solver_case.version):
                    if type(case[1]) is dict:
                        exclude_suffixes.update(case[1])
                    else:
                        for x in case[1]:
                            exclude_suffixes[x] = (True, {})
                    msg = case[2]
Exemplo n.º 5
0
    if is_expected_failure:
        @unittest.expectedFailure
        def failing_pickle_test(self):
            return pickle_test(self)
        # Return a test that is expected to fail
        return failing_pickle_test

    return pickle_test

cls = None

#
# Create test driver classes for each test model
#
driver = {}
for model in test_models():
    # Get the test case for the model
    case = test_models(model)

    # Create the test class
    name = "Test_%s" % model
    if new_available:
        cls = new.classobj(name, (unittest.TestCase,), {})
    else:
        cls = types.new_class(name, (unittest.TestCase,))
    cls = unittest.category(*case.level)(cls)
    driver[model] = cls
    globals()[name] = cls
#
# Iterate through all test scenarios and add test methods
#
Exemplo n.º 6
0
        def failing_pickle_test(self):
            return pickle_test(self)

        # Return a test that is expected to fail
        return failing_pickle_test

    return pickle_test


cls = None

#
# Create test driver classes for each test model
#
driver = {}
for model in test_models():
    # Get the test case for the model
    case = test_models(model)

    # Create the test class
    name = "Test_%s" % model
    if new_available:
        cls = new.classobj(name, (unittest.TestCase, ), {})
    else:
        cls = types.new_class(name, (unittest.TestCase, ))
        cls.__module__ = __name__
    cls = unittest.category(*case.level)(cls)
    driver[model] = cls
    globals()[name] = cls
#
# Iterate through all test scenarios and add test methods
Exemplo n.º 7
0
        fmtStr.format(
            "TOTALS", str(total.NumEPass), str(total.NumUFail),
            str(total.NumEFail), str(total.NumUPass),
            str(
                int(100.0 * (total.NumEPass + total.NumEFail) /
                    (total.NumEPass + total.NumEFail + total.NumUFail +
                     total.NumUPass)))))
    stream.write("=" * (maxSolverNameLen + 66) + "\n")

    logging.disable(logging.NOTSET)


if __name__ == "__main__":
    from pyomo.solvers.tests.models.base import test_models

    print("")
    print("Testing model generation")
    print("-" * 30)
    for key in sorted(test_models()):
        print(key)
        obj = test_models(key)()
        obj.generate_model()
        obj.warmstart_model()

    print("")
    print("Testing scenario generation")
    print("-" * 30)
    for key, value in test_scenarios():
        print(", ".join(key))
        print("   %s: %s" % (value.status, value.msg))
Exemplo n.º 8
0
        total.NumUPass += ans.NumUPass
        total.NumUFail += ans.NumUFail
        stream.write(fmtStr.format(_solver, str(ans.NumEPass), str(ans.NumUFail), str(ans.NumEFail), str(ans.NumUPass), str(100.0*(ans.NumEPass+ans.NumEFail)/(ans.NumEPass+ans.NumEFail+ans.NumUFail+ans.NumUPass))))
    #
    stream.write("=" * (maxSolverNameLen + 66) + "\n")
    stream.write(fmtStr.format("TOTALS", str(total.NumEPass), str(total.NumUFail), str(total.NumEFail), str(total.NumUPass), str(100.0*(total.NumEPass+total.NumEFail)/(total.NumEPass+total.NumEFail+total.NumUFail+total.NumUPass))))
    stream.write("=" * (maxSolverNameLen + 66) + "\n")

    logger.setLevel( _level )


if __name__ == "__main__":
    from pyomo.solvers.tests.models.base import test_models

    print("")
    print("Testing model generation")
    print("-"*30)
    for key in sorted(test_models()):
        print(key)
        obj = test_models(key)()
        obj.generate_model()
        obj.warmstart_model()

    print("")
    print("Testing scenario generation")
    print("-"*30)
    for key, value in test_scenarios():
        print(", ".join(key))
        print("   %s: %s" % (value.status, value.msg))