Exemple #1
0
def test_solvers(options=None, argv=None):
    """
    The is the function executed by the command
        pyomo test-solvers [solver ...]
    """
    global rootdir
    rootdir = os.getcwd()
    if argv is None:
        if options.debug:
            if len(options.solver) == 0:
                print("Testing all solvers")
            else:
                print("Testing solver", options.solver[0])
        # Over-ride the value of sys.argv, which is used by unittest.main()
        sys.argv=['test_solver']
    else:
        sys.argv=argv
    # Create the tests defined in the YAML configuration file
    autotest_options = Options()
    autotest_options.testname_format = "%s_TEST_%s"
    pyutilib.autotest.create_test_suites(filename=currdir+'test_solvers.yml', _globals=globals(), options=autotest_options)
    # Execute the tests, using a custom test runner
    runner = SolverTestRunner()
    runner.options = options
    unittest.main(module=globals()['__name__'], testRunner=runner)
Exemple #2
0
    if new_available:
        cls = new.classobj(name, (unittest.TestCase,), {})
    else:
        cls = types.new_class(name, (unittest.TestCase,))
    cls = unittest.category(*case.level)(cls)
    driver[model] = cls
    globals()[name] = cls
#
# Iterate through all test scenarios and add test methods
#
for key, value in test_scenarios(lambda c: c.test_pickling):
    model, solver, io = key
    cls = driver[model]
    # Symbolic labels
    test_name = "test_"+solver+"_"+io +"_symbolic_labels"
    test_method = create_test_method(model, solver, io, value, True)
    if test_method is not None:
        setattr(cls, test_name, test_method)
    # Non-symbolic labels
    test_name = "test_"+solver+"_"+io +"_nonsymbolic_labels"
    test_method = create_test_method(model, solver, io, value, False)
    if test_method is not None:
        setattr(cls, test_name, test_method)

# Reset the cls variable, since it contains a unittest.TestCase subclass.
# This prevents this class from being processed twice!
cls = None

if __name__ == "__main__":
    unittest.main()
Exemple #3
0
            idx = sys.argv.index('--include')
            _test_name_wildcard_include.append(sys.argv[idx+1])
            sys.argv.remove('--include')
            sys.argv.remove(_test_name_wildcard_include[-1])
    if '--exclude' in sys.argv:
        _test_name_wildcard_exclude = []
        while '--exclude' in sys.argv:
            idx = sys.argv.index('--exclude')
            _test_name_wildcard_exclude.append(sys.argv[idx+1])
            sys.argv.remove('--exclude')
            sys.argv.remove(_test_name_wildcard_exclude[-1])
    if '--disable-stdout-test' in sys.argv:
        sys.argv.remove('--disable-stdout-test')
        _disable_stdout_test = True

    print("Including all tests matching wildcard: '%s'" % _test_name_wildcard_include)
    print("Excluding all tests matching wildcard: '%s'" % _test_name_wildcard_exclude)

    tester = unittest.main(exit=False)
    if len(tester.result.failures) or len(tester.result.skipped) or len(tester.result.errors):
        with open('UnitTestNoPass.txt','w') as f:
            f.write("Failures:\n")
            for res in tester.result.failures:
                f.write('.'.join(res[0].id().split('.')[-2:])+' ')
            f.write("\n\nSkipped:\n")
            for res in tester.result.skipped:
                f.write('.'.join(res[0].id().split('.')[-2:])+' ')
            f.write("\n\nErrors:\n")
            for res in tester.result.errors:
                f.write('.'.join(res[0].id().split('.')[-2:])+' ')
Exemple #4
0
        exp = pyomo.core.base.expr._IntrinsicFunctionExpression('sum', 1, [MockFixedValue()], sum)
        exp_ar = pyomo.repn.generate_ampl_repn(exp)

        self.assertIsNotNone(exp_ar)
        self.assertIsInstance(exp_ar, pyomo.core.ampl.ampl_representation)

        self.assertEquals(type(exp), type(exp_ar._nonlinear_expr))
        self.assertEquals(exp.name, exp_ar._nonlinear_expr.name)
        self.assertEquals(0, len(exp_ar._nonlinear_vars))

    def testFixedValue(self):
        val = MockFixedValue()
        val_ar = pyomo.repn.generate_ampl_repn(val)

        self.assertIsInstance(val_ar, pyomo.core.ampl.ampl_representation)
        self.assertEquals(MockFixedValue.value, val_ar._constant)

    def testCombinedProductSum(self):
        x = pyomo.core.base.var.Var()
        y = pyomo.core.base.var.Var()
        z = pyomo.core.base.var.Var()
        exp = x * y + z

        exp_ar = pyomo.repn.generate_ampl_repn(exp)

        self.assertIsInstance(exp_ar, pyomo.core.ampl.ampl_representation)
        self.assertTrue(exp_ar.is_nonlinear())

if __name__ == "__main__":
    unittest.main(verbosity=2)
Exemple #5
0
def run(argv, _globals=None):
    #
    # Set sys.argv to the value specified by the user
    #
    sys.argv = argv
    #
    # Create the option parser
    #
    parser = optparse.OptionParser()
    parser.remove_option('-h')
    #
    parser.add_option('-h','--help',
        action='store_true',
        dest='help',
        default=False,
        help='Print command options')
    #
    parser.add_option('-d','--debug',
        action='store_true',
        dest='debug',
        default=False,
        help='Set debugging flag')
    #
    parser.add_option('-v','--verbose',
        action='store_true',
        dest='verbose',
        default=False,
        help='Verbose output')
    #
    parser.add_option('-q','--quiet',
        action='store_true',
        dest='quiet',
        default=False,
        help='Minimal output')
    #
    parser.add_option('-f','--failfast',
        action='store_true',
        dest='failfast',
        default=False,
        help='Stop on first failure')
    #
    parser.add_option('-c','--catch',
        action='store_true',
        dest='catch',
        default=False,
        help='Catch control-C and display results')
    #
    parser.add_option('-b','--buffer',
        action='store_true',
        dest='buffer',
        default=False,
        help='Buffer stdout and stderr durring test runs')
    #
    parser.add_option('--cat', '--category',
        action='append',
        dest='categories',
        default=[],
        help='Define a list of categories that filter the execution of test suites')
    #
    parser.add_option('--help-suites',
        action='store_true',
        dest='help_suites',
        default=False,
        help='Print the test suites that can be executed')
    #
    parser.add_option('--help-tests',
        action='store',
        dest='help_tests',
        default=None,
        help='Print the tests in the specified test suite')
    #
    parser.add_option('--help-categories',
        action='store_true',
        dest='help_categories',
        default=False,
        help='Print the test suite categories that can be specified')
    #
    # Parse the argument list and print help info if needed
    #
    _options, args = parser.parse_args(sys.argv)
    if _options.help:
        parser.print_help()

        print("""
Examples:
  %s                               - run all test suites
  %s MyTestCase.testSomething      - run MyTestCase.testSomething
  %s MyTestCase                    - run all 'test*' test methods
                                               in MyTestCase
""" % (args[0],args[0],args[0]))
        return
    #
    # If no value for _globals is specified, then we use the current context.
    #
    if _globals is None:
        _globals=globals()
    #
    # Setup and Options object and create test suites from the specified
    # configuration files.
    #
    options = Options()
    options.debug = _options.debug
    options.verbose = _options.verbose
    options.quiet = _options.quiet
    options.categories = _options.categories
    _argv = []
    for arg in args[1:]:
        if os.path.exists(arg):
            create_test_suites(filename=arg, _globals=_globals, options=options)
        else:
            _argv.append(arg)
    #
    # Collect information about the test suites:  suite names and categories
    #
    suites = []
    categories = set()
    for key in _globals.keys():
        if type(_globals[key]) is type and issubclass(_globals[key], unittest.TestCase):
            suites.append(key)
            for c in _globals[key].suite_categories:
                categories.add(c)
    #
    # Process the --help-tests option
    #
    if _options.help_tests and not _globals is None:
        suite = _globals.get(_options.help_tests, None)
        if not type(suite) is type:
            print("Test suite '%s' not found!" % str(_options.help_tests))
            return cleanup(_globals, suites)
        tests = []
        for item in dir(suite):
            if item.startswith('test'):
                tests.append(item)
        print("")
        if len(tests) > 0:
            print("Tests defined in test suite '%s':" % _options.help_tests)
            for tmp in sorted(tests):
                print("    "+tmp)
        else:
            print("No tests defined in test suite '%s':" % _options.help_tests)
        print("")
        return cleanup(_globals, suites)
    #
    # Process the --help-suites and --help-categories options
    #
    if (_options.help_suites or _options.help_categories) and not _globals is None:
        if _options.help_suites:
            print("")
            if len(suites) > 0:
                print("Test suites defined in '%s':" % os.path.basename(argv[0]))
                for suite in sorted(suites):
                    print("    "+suite)
            else:
                print("No test suites defined in '%s'!" % os.path.basename(argv[0]))
            print("")
        if _options.help_categories:
            tmp = list(categories)
            print("")
            if len(tmp) > 0:
                print("Test suite categories defined in '%s':" % os.path.basename(argv[0]))
                for c in sorted(tmp):
                    print("    "+c)
            else:
                print("No test suite categories defined in '%s':" % os.path.basename(argv[0]))
            print("")
        return cleanup(_globals, suites)
    #
    # Reset the value of sys.argv per the expectations of the unittest module
    #
    tmp = [args[0]]
    if _options.quiet:
        tmp.append('-q')
    if _options.verbose or _options.debug:
        tmp.append('-v')
    if _options.failfast:
        tmp.append('-f')
    if _options.catch:
        tmp.append('-c')
    if _options.buffer:
        tmp.append('-b')
    tmp += _argv
    sys.argv = tmp
    #
    # Execute the unittest main function to run tests
    #
    unittest.main(module=_globals['__name__'])
    cleanup(_globals, suites)
Exemple #6
0
        _slicer = self.m.b[1, :].c[:, 4].component('y', False)
        self.assertIsInstance(_slicer, _IndexedComponent_slicer)
        _slicer.call_errors_generate_exceptions = False
        ans = [str(x) for x in _slicer]
        self.assertEqual(ans, [])

        _slicer = self.m.b[1, :].c[:, 4].x[1]
        self.assertIsInstance(_slicer, _IndexedComponent_slicer)
        _slicer.key_errors_generate_exceptions = True
        self.assertRaises(KeyError, _slicer.next)

        _slicer = self.m.b[1, :].c[:, 4].y
        self.assertIsInstance(_slicer, _IndexedComponent_slicer)
        _slicer.attribute_errors_generate_exceptions = True
        self.assertRaises(AttributeError, _slicer.next)

        _slicer = self.m.b[1, :].c[:, 4].component('y', False)
        self.assertIsInstance(_slicer, _IndexedComponent_slicer)
        _slicer.call_errors_generate_exceptions = True
        self.assertRaises(TypeError, _slicer.next)

        _slicer = self.m.b[1, :].c[:, 4].component()
        self.assertIsInstance(_slicer, _IndexedComponent_slicer)
        _slicer.call_errors_generate_exceptions = True
        self.assertRaises(TypeError, _slicer.next)


if __name__ == "__main__":
    unittest.main()
Exemple #7
0
    def testSum(self):
        expr = sum(self.model.z[i] for i in self.model.s)
        self.assertAmplRepnMatch(gar(expr), cgar(expr))

    def testCompoundSum(self):
        expr = sum(self.model.z[i] for i in self.model.s) + self.model.x / self.model.y
        self.assertAmplRepnMatch(gar(expr), cgar(expr))

    def testSumExpression(self):
        expr = sum(self.model.w[i] / self.model.z[i] ** 3 for i in self.model.s)
        self.assertAmplRepnMatch(gar(expr), cgar(expr))

    def testSumExpressionParam(self):
        expr = sum(value(self.model.q[i]) / self.model.z[i] ** 3 for i in self.model.s)
        self.assertAmplRepnMatch(gar(expr), cgar(expr))

    def testCantilvrConstraintExpr(self):
        # originally from pyomo.data.cute cantilvr model.
        expr = sum(value(self.model.q[i]) / self.model.z[i] ** 3 for i in self.model.s) - 1.0
        self.assertAmplRepnMatch(gar(expr), cgar(expr))

    def testCantilvrObjective(self):
        # originally from pyomo.data.cute cantilvr model.
        # exposes problem in linear product handling, if present.
        expr = sum(self.model.z[i] for i in self.model.s) * 0.0624
        self.assertAmplRepnMatch(gar(expr), cgar(expr))

if __name__ == "__main__":
    unittest.main(verbosity=2)
Exemple #8
0
            sys.argv.remove(_test_name_wildcard_include[-1])
    if '--exclude' in sys.argv:
        _test_name_wildcard_exclude = []
        while '--exclude' in sys.argv:
            idx = sys.argv.index('--exclude')
            _test_name_wildcard_exclude.append(sys.argv[idx + 1])
            sys.argv.remove('--exclude')
            sys.argv.remove(_test_name_wildcard_exclude[-1])
    if '--disable-stdout-test' in sys.argv:
        sys.argv.remove('--disable-stdout-test')
        _disable_stdout_test = True

    print("Including all tests matching wildcard: '%s'" %
          _test_name_wildcard_include)
    print("Excluding all tests matching wildcard: '%s'" %
          _test_name_wildcard_exclude)

    tester = unittest.main(exit=False)
    if len(tester.result.failures) or len(tester.result.skipped) or len(
            tester.result.errors):
        with open('UnitTestNoPass.txt', 'w') as f:
            f.write("Failures:\n")
            for res in tester.result.failures:
                f.write('.'.join(res[0].id().split('.')[-2:]) + ' ')
            f.write("\n\nSkipped:\n")
            for res in tester.result.skipped:
                f.write('.'.join(res[0].id().split('.')[-2:]) + ' ')
            f.write("\n\nErrors:\n")
            for res in tester.result.errors:
                f.write('.'.join(res[0].id().split('.')[-2:]) + ' ')
Exemple #9
0
def run(argv, _globals=None):
    #
    # Set sys.argv to the value specified by the user
    #
    sys.argv = argv
    #
    # Create the option parser
    #
    parser = optparse.OptionParser()
    parser.remove_option('-h')
    #
    parser.add_option(
        '-h',
        '--help',
        action='store_true',
        dest='help',
        default=False,
        help='Print command options')
    #
    parser.add_option(
        '-d',
        '--debug',
        action='store_true',
        dest='debug',
        default=False,
        help='Set debugging flag')
    #
    parser.add_option(
        '-v',
        '--verbose',
        action='store_true',
        dest='verbose',
        default=False,
        help='Verbose output')
    #
    parser.add_option(
        '-q',
        '--quiet',
        action='store_true',
        dest='quiet',
        default=False,
        help='Minimal output')
    #
    parser.add_option(
        '-f',
        '--failfast',
        action='store_true',
        dest='failfast',
        default=False,
        help='Stop on first failure')
    #
    parser.add_option(
        '-c',
        '--catch',
        action='store_true',
        dest='catch',
        default=False,
        help='Catch control-C and display results')
    #
    parser.add_option(
        '-b',
        '--buffer',
        action='store_true',
        dest='buffer',
        default=False,
        help='Buffer stdout and stderr durring test runs')
    #
    parser.add_option(
        '--cat',
        '--category',
        action='append',
        dest='categories',
        default=[],
        help='Define a list of categories that filter the execution of test suites')
    #
    parser.add_option(
        '--help-suites',
        action='store_true',
        dest='help_suites',
        default=False,
        help='Print the test suites that can be executed')
    #
    parser.add_option(
        '--help-tests',
        action='store',
        dest='help_tests',
        default=None,
        help='Print the tests in the specified test suite')
    #
    parser.add_option(
        '--help-categories',
        action='store_true',
        dest='help_categories',
        default=False,
        help='Print the test suite categories that can be specified')
    #
    # Parse the argument list and print help info if needed
    #
    _options, args = parser.parse_args(sys.argv)
    if _options.help:
        parser.print_help()

        print("""
Examples:
  %s                               - run all test suites
  %s MyTestCase.testSomething      - run MyTestCase.testSomething
  %s MyTestCase                    - run all 'test*' test methods
                                               in MyTestCase
""" % (args[0], args[0], args[0]))
        return
    #
    # If no value for _globals is specified, then we use the current context.
    #
    if _globals is None:
        _globals = globals()
    #
    # Setup and Options object and create test suites from the specified
    # configuration files.
    #
    options = Options()
    options.debug = _options.debug
    options.verbose = _options.verbose
    options.quiet = _options.quiet
    options.categories = _options.categories
    _argv = []
    for arg in args[1:]:
        if os.path.exists(arg):
            create_test_suites(filename=arg, _globals=_globals, options=options)
        else:
            _argv.append(arg)
    #
    # Collect information about the test suites:  suite names and categories
    #
    suites = []
    categories = set()
    for key in _globals.keys():
        if type(_globals[key]) is type and issubclass(_globals[key],
                                                      unittest.TestCase):
            suites.append(key)
            for c in _globals[key].suite_categories:
                categories.add(c)
    #
    # Process the --help-tests option
    #
    if _options.help_tests and not _globals is None:
        suite = _globals.get(_options.help_tests, None)
        if not type(suite) is type:
            print("Test suite '%s' not found!" % str(_options.help_tests))
            return cleanup(_globals, suites)
        tests = []
        for item in dir(suite):
            if item.startswith('test'):
                tests.append(item)
        print("")
        if len(tests) > 0:
            print("Tests defined in test suite '%s':" % _options.help_tests)
            for tmp in sorted(tests):
                print("    " + tmp)
        else:
            print("No tests defined in test suite '%s':" % _options.help_tests)
        print("")
        return cleanup(_globals, suites)
    #
    # Process the --help-suites and --help-categories options
    #
    if (_options.help_suites or
            _options.help_categories) and not _globals is None:
        if _options.help_suites:
            print("")
            if len(suites) > 0:
                print("Test suites defined in '%s':" %
                      os.path.basename(argv[0]))
                for suite in sorted(suites):
                    print("    " + suite)
            else:
                print("No test suites defined in '%s'!" %
                      os.path.basename(argv[0]))
            print("")
        if _options.help_categories:
            tmp = list(categories)
            print("")
            if len(tmp) > 0:
                print("Test suite categories defined in '%s':" %
                      os.path.basename(argv[0]))
                for c in sorted(tmp):
                    print("    " + c)
            else:
                print("No test suite categories defined in '%s':" %
                      os.path.basename(argv[0]))
            print("")
        return cleanup(_globals, suites)
    #
    # Reset the value of sys.argv per the expectations of the unittest module
    #
    tmp = [args[0]]
    if _options.quiet:
        tmp.append('-q')
    if _options.verbose or _options.debug:
        tmp.append('-v')
    if _options.failfast:
        tmp.append('-f')
    if _options.catch:
        tmp.append('-c')
    if _options.buffer:
        tmp.append('-b')
    tmp += _argv
    sys.argv = tmp
    #
    # Execute the unittest main function to run tests
    #
    unittest.main(module=_globals['__name__'])
    cleanup(_globals, suites)