def run_tests(self, suite):
     runner = unittest.TextTestRunner(stream=StringIO())
     # Run tests across 2 processes
     concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(2))
     result = runner.run(concurrent_suite)
     self.assertEqual(result.testsRun, suite.countTestCases())
     return result
Example #2
0
def run_test_suites(sanity_tests, single_tests, parallel_tests):
    all_successful = False
    sanity_runner = unittest.TextTestRunner(verbosity=255, failfast=True)
    sanity_result = sanity_runner.run(sanity_tests)
    if sanity_result.wasSuccessful():
        print('running %u tests in parallel and %u tests serial' %
              (parallel_tests.countTestCases(), single_tests.countTestCases()))
        results = []
        if parallel_tests.countTestCases():
            max_parallel_tests = min(parallel_tests.countTestCases(),
                                     MAX_PARALLEL_TESTS)
            parallel_runner = unittest.TextTestRunner(verbosity=255)
            parallel_suite = ConcurrentTestSuite(
                parallel_tests, fork_for_tests(max_parallel_tests))
            results.append(parallel_runner.run(parallel_suite))
        # TODO: Tests that are serialized generally depend on hardcoded ports.
        # Make them use dynamic ports.
        if single_tests.countTestCases():
            single_runner = unittest.TextTestRunner(verbosity=255)
            results.append(single_runner.run(single_tests))
        all_successful = True
        for result in results:
            if not result.wasSuccessful():
                all_successful = False
                print(result.printErrors())
    else:
        print('sanity tests failed - test environment not correct')
    return all_successful
def setup():
    suite = unittest.TestSuite((
        unittest.TestLoader().loadTestsFromTestCase(ForkingWorkersTestCase),
        unittest.TestLoader().loadTestsFromTestCase(PartitionTestCase),
    ))
    concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(2))
    return concurrent_suite
Example #4
0
def main(argv):
    parser = argparse.ArgumentParser(prog=argv[0],
        description='run CAmkES tests')
    parser.add_argument('--jobs', '-j', nargs='?', type=int,
        help='parallelise test execution')
    parser.add_argument('--verbosity', '-v', default=1, type=int, help="Verbosity to run tests. 0 = quiet. 1 = default. 2 = verbose")
    parser.add_argument('test', nargs='*', choices=TESTS+['all'], default='all', help='run a specific category of tests')
    parser.add_argument('--capdl-python', help='Deprecated. Using this argument has no effect.')
    options = parser.parse_args(argv[1:])

    if options.jobs is None:
        # Maximum parallelism.
        options.jobs = multiprocessing.cpu_count()

    # work out which tests to run
    if options.test == 'all' or 'all' in options.test:
        test_packages = TESTS
    else:
        test_packages = options.test

    # load the tests we want to run
    loader = unittest.TestLoader()
    test_suite = unittest.TestSuite()
    for v in test_packages:
        test_suite.addTests(loader.discover('camkes.' + v, top_level_dir=os.path.dirname(ME)))

    concurrent_suite = ConcurrentTestSuite(test_suite, fork_for_tests(options.jobs))
    runner = unittest.TextTestRunner(verbosity=options.verbosity)
    result = runner.run(concurrent_suite)
    if result.wasSuccessful():
        return 0
    return 1
 def run_tests(self, suite):
     runner = unittest.TextTestRunner(stream=StringIO())
     # Run tests across 2 processes
     concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(2))
     result = runner.run(concurrent_suite)
     self.assertEqual(result.testsRun, suite.countTestCases())
     return result
Example #6
0
def runtests(processes=8):
    """
        Run the crypto library's unit tests. Will run tests in parallel if the `concurrencytest`
        module is installed. Defaults to four processes.

        Example:

        >>> from crypto.tests import runtests
        >>> runtests()
        ...
    """
    import unittest
    loader = unittest.TestLoader()
    # Discover all tests in the current directory that are prefixed with `test`. Also discovers
    # the doctests loaded by defining a load_tests(...) function in each submodule's __init__.py
    suite = loader.discover('.', pattern='test*.py')
    runner = unittest.runner.TextTestRunner()
    try:
        from concurrencytest import ConcurrentTestSuite, fork_for_tests
        concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(processes))
        runner.run(concurrent_suite)
    except ImportError:
        runner.run(suite)
    # Prevent calling sys.exit() just in case the user is running the tests from an interpreter.
    unittest.main(exit=False)
def main(args):
    """
    Implementation of main method starts here....
    """
    process = 0
    iter_num = 0
    if len(args) > 1:
        iter_num = int(args[0])
        process = int(args[1])
    else:
        iter_num = int(args[0])

    # load a TestSuite with 50x TestCases for demo
    loader = unittest.TestLoader()
    suite = unittest.TestSuite()
    for _ in range(iter_num):
        suite.addTests(loader.loadTestsFromTestCase(MemcacheConcurrencyTestCase))
    print('Loaded %d test cases...' % suite.countTestCases())
 
    runner = unittest.TextTestRunner()
    
    # Run parallely with process and parallel hits at once
    if process:
        print('\nRun same tests with %s processes: ' % process)
        concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(process))
        runner.run(concurrent_suite)
    else:
        # Sequential run (one by one)
        print('\nRun tests sequentially:')
        runner.run(suite)
Example #8
0
def execute_test_class(test_class):
	loader = unittest.TestLoader()
	suite = unittest.TestSuite()
	suite.addTests(loader.loadTestsFromTestCase(test_class))
	runner = unittest.TextTestRunner()
	concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(50))
	runner.run(concurrent_suite)
Example #9
0
 def multi_processing(self):
     print("multi_processing")
     runner = unittest.TextTestRunner()
     suite = unittest.TestLoader().loadTestsFromTestCase(test_name)
     concurrent_suite = ConcurrentTestSuite(suite,
                                            fork_for_tests(self.processing))
     runner.run(concurrent_suite)
Example #10
0
def RunTests(debug, processes, args):
    """Run the functional tests and any embedded doctests

    Args:
        debug: True to enable debugging, which shows a full stack trace on error
        args: List of positional args provided to binman. This can hold a test
            name to execute (as in 'binman -t testSections', for example)
        processes: Number of processes to use to run tests (None=same as #CPUs)
    """
    import elf_test
    import entry_test
    import fdt_test
    import ftest
    import image_test
    import test
    import doctest

    result = unittest.TestResult()
    for module in []:
        suite = doctest.DocTestSuite(module)
        suite.run(result)

    sys.argv = [sys.argv[0]]
    if debug:
        sys.argv.append('-D')
    if debug:
        sys.argv.append('-D')

    # Run the entry tests first ,since these need to be the first to import the
    # 'entry' module.
    test_name = args and args[0] or None
    suite = unittest.TestSuite()
    loader = unittest.TestLoader()
    for module in (entry_test.TestEntry, ftest.TestFunctional, fdt_test.TestFdt,
                   elf_test.TestElf, image_test.TestImage):
        if test_name:
            try:
                suite.addTests(loader.loadTestsFromName(test_name, module))
            except AttributeError:
                continue
        else:
            suite.addTests(loader.loadTestsFromTestCase(module))
    if use_concurrent and processes != 1:
        concurrent_suite = ConcurrentTestSuite(suite,
                fork_for_tests(processes or multiprocessing.cpu_count()))
        concurrent_suite.run(result)
    else:
        suite.run(result)

    print result
    for test, err in result.errors:
        print test.id(), err
    for test, err in result.failures:
        print err, result.failures
    if result.errors or result.failures:
      print 'binman tests FAILED'
      return 1
    return 0
def main():
    runner = unittest.TextTestRunner()
    suite = unittest.TestSuite((
        unittest.TestLoader().loadTestsFromTestCase(ForkingWorkersTestCase),
        unittest.TestLoader().loadTestsFromTestCase(PartitionTestCase),
    ))
    concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests())
    result = runner.run(concurrent_suite)
    return len(result.errors) + len(result.failures)
def main():
    runner = unittest.TextTestRunner()
    suite = unittest.TestSuite((
        unittest.TestLoader().loadTestsFromTestCase(ForkingWorkersTestCase),
        unittest.TestLoader().loadTestsFromTestCase(PartitionTestCase),
    ))
    concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests())
    result = runner.run(concurrent_suite)
    return len(result.errors) + len(result.failures)
def run_parallel_test_suites(root_tmpdir, resultclass, parallel_tests):
    results = []
    if parallel_tests.countTestCases():
        max_parallel_tests = min(parallel_tests.countTestCases(), max_loadavg())
        print('running maximum of %u parallel tests' % max_parallel_tests)
        parallel_runner = test_runner(root_tmpdir, resultclass)
        parallel_suite = ConcurrentTestSuite(
            parallel_tests, fork_for_tests(max_parallel_tests))
        results.append(parallel_runner.run(parallel_suite))
    return results
Example #14
0
def run_parallel_test_suites(root_tmpdir, resultclass, parallel_tests):
    results = []
    if parallel_tests.countTestCases():
        max_parallel_tests = min(parallel_tests.countTestCases(), max_loadavg())
        print('running maximum of %u parallel tests' % max_parallel_tests)
        parallel_runner = test_runner(root_tmpdir, resultclass)
        parallel_suite = ConcurrentTestSuite(
            parallel_tests, fork_for_tests(max_parallel_tests))
        results.append(parallel_runner.run(parallel_suite))
    return results
Example #15
0
def main():
    print("\n###################### running tests ##########################\n")

    s = unittest.defaultTestLoader.discover('', pattern="Test*")
    concurrent_suite = ConcurrentTestSuite(s, fork_for_tests(16))
    r = unittest.TextTestRunner()

    res = r.run(concurrent_suite)
    if len(res.errors) + len(res.failures) > 0:
        sys.exit(1)
Example #16
0
def RunTestSuites(result, debug, verbosity, test_preserve_dirs, processes,
                  test_name, toolpath, test_class_list):
    """Run a series of test suites and collect the results

    Args:
        result: A unittest.TestResult object to add the results to
        debug: True to enable debugging, which shows a full stack trace on error
        verbosity: Verbosity level to use (0-4)
        test_preserve_dirs: True to preserve the input directory used by tests
            so that it can be examined afterwards (only useful for debugging
            tests). If a single test is selected (in args[0]) it also preserves
            the output directory for this test. Both directories are displayed
            on the command line.
        processes: Number of processes to use to run tests (None=same as #CPUs)
        test_name: Name of test to run, or None for all
        toolpath: List of paths to use for tools
        test_class_list: List of test classes to run
    """
    for module in []:
        suite = doctest.DocTestSuite(module)
        suite.run(result)

    sys.argv = [sys.argv[0]]
    if debug:
        sys.argv.append('-D')
    if verbosity:
        sys.argv.append('-v%d' % verbosity)
    if toolpath:
        for path in toolpath:
            sys.argv += ['--toolpath', path]

    suite = unittest.TestSuite()
    loader = unittest.TestLoader()
    for module in test_class_list:
        # Test the test module about our arguments, if it is interested
        if hasattr(module, 'setup_test_args'):
            setup_test_args = getattr(module, 'setup_test_args')
            setup_test_args(preserve_indir=test_preserve_dirs,
                            preserve_outdirs=test_preserve_dirs
                            and test_name is not None,
                            toolpath=toolpath,
                            verbosity=verbosity)
        if test_name:
            try:
                suite.addTests(loader.loadTestsFromName(test_name, module))
            except AttributeError:
                continue
        else:
            suite.addTests(loader.loadTestsFromTestCase(module))
    if use_concurrent and processes != 1:
        concurrent_suite = ConcurrentTestSuite(
            suite, fork_for_tests(processes or multiprocessing.cpu_count()))
        concurrent_suite.run(result)
    else:
        suite.run(result)
 def test_run_default_concurrency(self):
     runner = unittest.TextTestRunner(stream=StringIO())
     suite = unittest.TestLoader().loadTestsFromTestCase(BothPass)
     # Run across all cpu/processesors in machine
     concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests())
     result = runner.run(concurrent_suite)
     self.assertTrue(result.wasSuccessful())
     self.assertEqual(result.testsRun, suite.countTestCases())
     self.assertEqual(result.errors, [])
     self.assertEqual(result.failures, [])
     self.assertEqual(result.skipped, [])
 def test_run_default_concurrency(self):
     runner = unittest.TextTestRunner(stream=StringIO())
     suite = unittest.TestLoader().loadTestsFromTestCase(BothPass)
     # Run across all cpu/processesors in machine
     concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests())
     result = runner.run(concurrent_suite)
     self.assertTrue(result.wasSuccessful())
     self.assertEqual(result.testsRun, suite.countTestCases())
     self.assertEqual(result.errors, [])
     self.assertEqual(result.failures, [])
     self.assertEqual(result.skipped, [])
Example #19
0
    def _parallelize(suite):
        def fdopen(fd, mode, *kwds):
            stream = orig_fdopen(fd, mode)
            atexit.register(stream.close)
            return stream

        # Monkey patch concurrencytest lib bug (fdopen() stream not closed).
        # https://github.com/cgoldberg/concurrencytest/issues/11
        orig_fdopen = os.fdopen
        concurrencytest.os.fdopen = fdopen
        forker = concurrencytest.fork_for_tests(NWORKERS)
        return concurrencytest.ConcurrentTestSuite(suite, forker)
Example #20
0
def test_Trigger():
    loader = unittest.TestLoader()
    suite = unittest.TestSuite()
    suite.addTests(loader.loadTestsFromTestCase(TestAuthWithGoogleAuth))
    suite.addTests(loader.loadTestsFromTestCase(TestAuthWithOAuth2Client))
    suite.addTests(loader.loadTestsFromTestCase(TestAuthWithoutAuth))
    suite.addTests(loader.loadTestsFromTestCase(TestGoogleAuthWithoutHttplib2))
    suite.addTests(loader.loadTestsFromTestCase(TestMediaUpload))

    logger.debug('Loaded %d test cases...' % suite.countTestCases())
    runner = unittest.TextTestRunner()
    logger.debug('\nRun same tests with 8 processes:')
    concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(8))
    runner.run(concurrent_suite)
Example #21
0
def runsuite(suite, parallel):
    suite_to_run = suite
    if parallel > 1:
        from concurrencytest import ConcurrentTestSuite, fork_for_tests
        if not WiredTigerTestCase._globalSetup:
            WiredTigerTestCase.globalSetup()
        WiredTigerTestCase._concurrent = True
        suite_to_run = ConcurrentTestSuite(suite, fork_for_tests(parallel))
    try:
        return unittest.TextTestRunner(
            verbosity=WiredTigerTestCase._verbose).run(suite_to_run)
    except BaseException as e:
        # This should not happen for regular test errors, unittest should catch everything
        print('ERROR: running test: ', e)
        raise e
Example #22
0
def runsuite(suite, parallel):
    suite_to_run = suite
    if parallel > 1:
        from concurrencytest import ConcurrentTestSuite, fork_for_tests
        if not WiredTigerTestCase._globalSetup:
            WiredTigerTestCase.globalSetup()
        WiredTigerTestCase._concurrent = True
        suite_to_run = ConcurrentTestSuite(suite, fork_for_tests(parallel))
    try:
        return unittest.TextTestRunner(
            verbosity=WiredTigerTestCase._verbose).run(suite_to_run)
    except BaseException as e:
        # This should not happen for regular test errors, unittest should catch everything
        print('ERROR: running test: ', e)
        raise e
Example #23
0
def main(argv):
    parser = argparse.ArgumentParser(prog=argv[0],
                                     description='run CAmkES tests')
    parser.add_argument('--jobs',
                        '-j',
                        nargs='?',
                        type=int,
                        help='parallelise test execution')
    parser.add_argument(
        '--verbosity',
        '-v',
        default=1,
        type=int,
        help="Verbosity to run tests. 0 = quiet. 1 = default. 2 = verbose")
    parser.add_argument('test',
                        nargs='*',
                        choices=TESTS + ['all'],
                        default='all',
                        help='run a specific category of tests')
    parser.add_argument('--capdl-python',
                        help='Deprecated. Using this argument has no effect.')
    options = parser.parse_args(argv[1:])

    if options.jobs is None:
        # Maximum parallelism.
        options.jobs = multiprocessing.cpu_count()

    # work out which tests to run
    if options.test == 'all' or 'all' in options.test:
        test_packages = TESTS
    else:
        test_packages = options.test

    # load the tests we want to run
    loader = unittest.TestLoader()
    test_suite = unittest.TestSuite()
    for v in test_packages:
        test_suite.addTests(
            loader.discover('camkes.' + v, top_level_dir=os.path.dirname(ME)))

    concurrent_suite = ConcurrentTestSuite(test_suite,
                                           fork_for_tests(options.jobs))
    runner = unittest.TextTestRunner(verbosity=options.verbosity)
    result = runner.run(concurrent_suite)
    if result.wasSuccessful():
        return 0
    return 1
Example #24
0
def runsuite(suite, parallel):
    suite_to_run = suite
    if parallel > 1:
        from concurrencytest import ConcurrentTestSuite, fork_for_tests
        if not WiredTigerTestCase._globalSetup:
            WiredTigerTestCase.globalSetup()
        WiredTigerTestCase._concurrent = True
        suite_to_run = ConcurrentTestSuite(suite, fork_for_tests(parallel))
    try:
        if WiredTigerTestCase._randomseed:
            WiredTigerTestCase.prout("Starting test suite with seedw={0} and seedz={1}. Rerun this test with -seed {0}.{1} to get the same randomness"
                .format(str(WiredTigerTestCase._seeds[0]), str(WiredTigerTestCase._seeds[1])))
        return unittest.TextTestRunner(
            verbosity=WiredTigerTestCase._verbose).run(suite_to_run)
    except BaseException as e:
        # This should not happen for regular test errors, unittest should catch everything
        print('ERROR: running test: ', e)
        raise e
Example #25
0
def main(argv):
    parser = argparse.ArgumentParser(prog=argv[0],
                                     description='Run capdl tests')
    parser.add_argument('--verbosity', '-v', default=1, type=int,
                        help="Verbosity to run tests. 0 = quiet. 1 = default. 2 = verbose")
    options = parser.parse_args(argv[1:])

    # load the tests we want to run
    loader = unittest.TestLoader()
    test_suite = unittest.TestSuite()
    print("Looking for tests in {0}".format(os.path.dirname(ME)))
    test_suite.addTests(loader.discover(os.path.dirname(ME), pattern="*.py"))

    concurrent_suite = ConcurrentTestSuite(test_suite, fork_for_tests(multiprocessing.cpu_count()))
    runner = unittest.TextTestRunner(verbosity=options.verbosity)
    result = runner.run(concurrent_suite)
    if result.wasSuccessful():
        return 0
    return 1
Example #26
0
def main():
    # runner = TextTestRunner(verbosity=2, failfast=True)
    runner = TextTestRunner(verbosity=2)

    try:
        from concurrencytest import ConcurrentTestSuite, fork_for_tests
        useParallerlTest = True
    except ImportError:
        # concurrencytest is not installed, use regular test runner
        useParallerlTest = False
    # useParallerlTest = False

    if useParallerlTest:
        concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests())
        res = runner.run(concurrent_suite)
    else:
        res = runner.run(suite)
    if not res.wasSuccessful():
        sys.exit(1)
Example #27
0
def runtests(processes=4):
    """
        Run Bitfield's unit tests. Will run the tests in parallel if the `concurrencytest` library
        is installed. Will run serially otherwise.
    """
    # Discover all tests in the current directory that are prefixed with `test`. Also discovers
    # the doctests loaded by defining a load_tests(...) function in the module __init__.py
    loader = unittest.TestLoader()
    doctest_suite = loader.discover('.', pattern='test*.py')
    runner = unittest.runner.TextTestRunner()

    try:
        from concurrencytest import ConcurrentTestSuite, fork_for_tests
        concurrent_doctest_suite = ConcurrentTestSuite(
            doctest_suite, fork_for_tests(processes))
        runner.run(concurrent_doctest_suite)
    except ImportError:
        runner.run(doctest_suite)

    # Prevent calling sys.exit() just in case the user is running the tests from an interpreter.
    unittest.main(exit=False)
def parallel_run(verbose):
    if verbose:
        print("[runCompleteTestSuite] verbose run not implemented "
              "for parallel version")
        return
    try:
        from concurrencytest import ConcurrentTestSuite, fork_for_tests
    except ImportError as e:
        print("Need to install the module concurrencytest.")
        print("pip install --user concurrencytest")
        return
    from smodels.tools import runtime
    suite = unittest.TestLoader().discover("./")
    ncpus = runtime.nCPUs()
    ## "shuffle" the tests, so that the heavy tests get distributed
    ## more evenly among threads (didnt help, so I commented it out)
    #suite._tests = [ item for sublist in [ suite._tests[x::ncpus] \
    #    for x in range(ncpus) ] for item in sublist ]
    concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(ncpus))
    runner = unittest.TextTestRunner()
    runner.run(concurrent_suite)
Example #29
0
def parallel_run ( verbose ):
    if verbose:
        print ("[runCompleteTestSuite] verbose run not implemented "
               "for parallel version" )
        return
    try:
        from concurrencytest import ConcurrentTestSuite, fork_for_tests
    except ImportError as e:
        print ( "Need to install the module concurrencytest." )
        print ( "pip install --user concurrencytest" )
        return
    from smodels.tools import runtime
    suite = unittest.TestLoader().discover("./") 
    ncpus = runtime.nCPUs()
    ## "shuffle" the tests, so that the heavy tests get distributed
    ## more evenly among threads (didnt help, so I commented it out)
    #suite._tests = [ item for sublist in [ suite._tests[x::ncpus] \
    #    for x in range(ncpus) ] for item in sublist ]
    concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests( ncpus ))
    runner = unittest.TextTestRunner()
    runner.run(concurrent_suite)
Example #30
0
def execute():
    test_list = []
    setup(test_list)
    for t in test_list:
        test_name = 'test_%s' % t[0]
        test = test_generator(t[1], t[2])
        setattr(TestSequence, test_name, test)

    fast = unittest.makeSuite(TestSequence, 'test')
    print('\nRun mutant tests across 3 processes:')

    with open("output.log", 'w') as f:
        runner = unittest.TextTestRunner(f)
        concurrent_suite = ConcurrentTestSuite(fast, fork_for_tests(3))
        output = runner.run(concurrent_suite)

    with open("output.log", 'r') as f:
        lines = f.readlines()
        i = 1
        with open("results.txt", 'w') as w:
            for c in lines[0].strip():
                if c == '.':
                    w.write(f"\nMutant #{i} was not killed")
                    print(f"Mutant #{i} was not killed")
                else:
                    w.write(f"\nMutant #{i} was killed")
                    print(f"Mutant #{i} was killed")

                w.write(f"\nvector is {l[i - 1]}\n")
                print(f"vector is {l[i - 1]}\n")
                i = i + 1

    tot_tests = output.testsRun
    tot_fail = len(output.failures)
    mut_cov = (tot_fail / tot_tests) * 100

    with open("results.txt", "a") as w:
        w.write("\nMutant Coverage: %{0:.2f}".format(mut_cov))
    print("Mutant Coverage: %{0:.2f}".format(mut_cov))
Example #31
0
def runsuite(suite, parallel):
    suite_to_run = suite
    if parallel > 1:
        try:
            from concurrencytest import ConcurrentTestSuite, fork_for_tests
        except ImportError:
            print('ERROR: additional python modules must be installed\n' +
                  '       to use the "--parallel N" option.  Consult\n' +
                  '       the WiredTiger HOWTO:RunTheTestSuite wiki page.\n')
            raise

        if not WiredTigerTestCase._globalSetup:
            WiredTigerTestCase.globalSetup()
        WiredTigerTestCase._concurrent = True
        suite_to_run = ConcurrentTestSuite(suite, fork_for_tests(parallel))
    try:
        return unittest.TextTestRunner(
            verbosity=WiredTigerTestCase._verbose).run(suite_to_run)
    except BaseException as e:
        # This should not happen for regular test errors, unittest should catch everything
        print('ERROR: running test: ', e)
        raise e
Example #32
0
def runsuite(suite, parallel):
    suite_to_run = suite
    if parallel > 1:
        try:
            from concurrencytest import ConcurrentTestSuite, fork_for_tests
        except ImportError:
            print ('ERROR: additional python modules must be installed\n' +
                   '       to use the "--parallel N" option.  Consult\n' +
                   '       the WiredTiger HOWTO:RunTheTestSuite wiki page.\n')
            raise

        if not WiredTigerTestCase._globalSetup:
            WiredTigerTestCase.globalSetup()
        WiredTigerTestCase._concurrent = True
        suite_to_run = ConcurrentTestSuite(suite, fork_for_tests(parallel))
    try:
        return unittest.TextTestRunner(
            verbosity=WiredTigerTestCase._verbose).run(suite_to_run)
    except BaseException as e:
        # This should not happen for regular test errors, unittest should catch everything
        print('ERROR: running test: ', e)
        raise e
Example #33
0
def run_test_suites(sanity_tests, single_tests, parallel_tests):
    sanity = False
    all_successful = False
    failures = []
    sanity_runner = unittest.TextTestRunner(verbosity=255, failfast=True)
    sanity_result = sanity_runner.run(sanity_tests)
    max_parallel_tests = multiprocessing.cpu_count() * 3
    if sanity_result.wasSuccessful():
        sanity = True
        print('running %u tests in parallel and %u tests serial' % (
            parallel_tests.countTestCases(), single_tests.countTestCases()))
        results = []
        if parallel_tests.countTestCases():
            print('running maximum of %u of parallel tests' % max_parallel_tests)
            max_parallel_tests = min(parallel_tests.countTestCases(), max_parallel_tests)
            parallel_runner = unittest.TextTestRunner(verbosity=255)
            parallel_suite = ConcurrentTestSuite(
                parallel_tests, fork_for_tests(max_parallel_tests))
            results.append(parallel_runner.run(parallel_suite))
        # TODO: Tests that are serialized generally depend on hardcoded ports.
        # Make them use dynamic ports.
        if single_tests.countTestCases():
            single_runner = unittest.TextTestRunner(verbosity=255)
            results.append(single_runner.run(single_tests))
        all_successful = True
        for result in results:
            if not result.wasSuccessful():
                for failure in result.failures + result.errors:
                    description = failure[0].shortDescription()
                    if description:
                        failed_name = '-'.join(description.split('.')[1:])
                        failures.append(failed_name)
                all_successful = False
                print(result.printErrors())
    else:
        print('sanity tests failed - test environment not correct')
    return (sanity, all_successful, failures)
Example #34
0
def main():
    # the following monkeypatching of the path would 
    # make the execution in subdirectories impossible 
    #print("\n###################### running single tests ##########################\n")
    #s1=unittest.TestSuite()
    #from Test_smooth_model_run import TestSmoothModelRun
    #s1.addTest(TestSmoothModelRun('test_linearize'))
    #r = unittest.TextTestRunner()
    #res = r.run(s1)
    #if len(res.errors) + len(res.failures) > 0:
    #    sys.exit(1)

    print("\n###################### running tests ##########################\n")

    s = unittest.defaultTestLoader.discover('', pattern='Test*')
    #p = unittest.defaultTestLoader.discover('', pattern='Pinned_Test*')
    #s.addTests(p)
    #concurrent_suite = s
    concurrent_suite = ConcurrentTestSuite(s, fork_for_tests(64))
    r = unittest.TextTestRunner()

    res = r.run(concurrent_suite)
    if len(res.errors) + len(res.failures) > 0:
        sys.exit(1)
        Smat = get_shift_matrix(K_ortho_Geier, ex_Geier, ey_Geier)

        f = io.StringIO()
        with redirect_stdout(f):
            print_as_vector(Shift_ortho_Geier, 's')
        out = f.getvalue()

        f2 = io.StringIO()
        with redirect_stdout(f2):
            print_as_vector(Smat[3:, 3:], 's')
        out2 = f2.getvalue()

        assert out == out2

# Pycharm runs them sequentially
# python -m unittest tests/test_example_unit_tests_parallel_run.py # sequential as well
# python tests/test_example_unit_tests_parallel_run.py # concurrent run :)


if __name__ == '__main__':
    loader = unittest.TestLoader()
    runner = unittest.TextTestRunner()
    # Run same tests across 4 processes
    cores = multiprocessing.cpu_count()
    print(f'\nRunning tests on {cores} cores:')
    suite = unittest.TestLoader().loadTestsFromTestCase(TestMatrixGenerator)
    concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(cores))
    # concurrent_suite.addTests(loader.loadTestsFromTestCase(TestMatrixGenerator))  # add more :>
    runner.run(concurrent_suite)
Example #36
0
def main(arguments):
    """
    Begins running unit tests.
    argv[1] if used must be the source filename else os_env.yaml will be
    leveraged instead
    argv[2] if used must be the proxy server <host>:<port>
    """
    logger.info('Starting test suite')

    log_level = LOG_LEVELS.get(arguments.log_level, logging.DEBUG)

    flavor_metadata = None
    if arguments.flavor_metadata:
        flavor_metadata = {
            'metadata': {
                'hw:mem_page_size': arguments.flavor_metadata
            }
        }

    image_metadata = None
    if arguments.image_metadata_file:
        image_metadata = file_utils.read_yaml(arguments.image_metadata_file)

    concurrent_suite = None
    sequential_suite = None

    if arguments.env and arguments.ext_net:
        unit = arguments.include_unit != ARG_NOT_SET
        connection = arguments.include_connection != ARG_NOT_SET
        api = arguments.include_api != ARG_NOT_SET
        integration = arguments.include_integration != ARG_NOT_SET
        ci = arguments.continuous_integration != ARG_NOT_SET
        staging = arguments.include_staging != ARG_NOT_SET
        if (not unit and not connection and not api and not integration
                and not staging and not ci):
            unit = True
            connection = True
            api = True
            integration = True

        concurrent_suite = __create_concurrent_test_suite(
            arguments.env, arguments.ext_net, arguments.proxy,
            arguments.ssh_proxy_cmd, unit, connection, api, integration,
            staging, flavor_metadata, image_metadata,
            arguments.floating_ips != ARG_NOT_SET, ci, log_level)

        if (arguments.include_integration != ARG_NOT_SET
                and arguments.floating_ips != ARG_NOT_SET):
            sequential_suite = __create_sequential_test_suite(
                arguments.env, arguments.ext_net, arguments.proxy,
                arguments.ssh_proxy_cmd, integration, flavor_metadata,
                image_metadata, arguments.floating_ips != ARG_NOT_SET,
                log_level)
    else:
        logger.error('Environment file or external network not defined')
        exit(1)

    i = 0
    while i < int(arguments.num_runs):
        i += 1

        if concurrent_suite:
            logger.info('Running Concurrent Tests')
            concurrent_runner = unittest.TextTestRunner(verbosity=2)
            concurrent_suite = ConcurrentTestSuite(
                concurrent_suite, fork_for_tests(int(arguments.threads)))
            concurrent_results = concurrent_runner.run(concurrent_suite)
            __output_results(concurrent_results)

            if ((concurrent_results.errors
                 and len(concurrent_results.errors) > 0)
                    or (concurrent_results.failures
                        and len(concurrent_results.failures) > 0)):
                logger.error('See above for test failures')
                exit(1)
            else:
                logger.info(
                    'Concurrent tests completed successfully in run #%s', i)

        if sequential_suite:
            logger.info('Running Sequential Tests')
            sequential_runner = unittest.TextTestRunner(verbosity=2)
            sequential_results = sequential_runner.run(sequential_suite)
            __output_results(sequential_results)

            if ((sequential_results.errors
                 and len(sequential_results.errors) > 0)
                    or (sequential_results.failures
                        and len(sequential_results.failures) > 0)):
                logger.error('See above for test failures')
                exit(1)
            else:
                logger.info(
                    'Sequential tests completed successfully in run #%s', i)

    logger.info('Successful completion of %s test runs', i)
    exit(0)
Example #37
0
def main():
    # pylint: disable=import-outside-toplevel
    if "BUNDLED_LIB_LOCATION" in os.environ:
        sys.path.insert(0, os.environ["BUNDLED_LIB_LOCATION"])

    if "--installed" in sys.argv:
        sys.path.append(PACKAGE_DIR)

        from pcs import settings

        if settings.pcs_bundled_packages_dir not in sys.path:
            sys.path.insert(0, settings.pcs_bundled_packages_dir)

        from pcs_test.tools import pcs_runner

        pcs_runner.test_installed = True
    else:
        sys.path.insert(0, PACKAGE_DIR)
        from pcs import settings

        settings.pcs_data_dir = os.path.join(PACKAGE_DIR, "data")

    run_concurrently = can_concurrency and "--no-parallel" not in sys.argv

    explicitly_enumerated_tests = [
        prepare_test_name(arg) for arg in sys.argv[1:] if arg not in (
            "-v",
            "--all-but",
            "--fast-info",  # show a traceback immediatelly after the test fails
            "--last-slash",
            "--list",
            "--no-parallel",
            "--traceback-highlight",
            "--traditional-verbose",
            "--vanilla",
            "--installed",
            "--tier0",
            "--tier1",
        )
    ]

    tier = None
    if "--tier0" in sys.argv:
        tier = 0
    elif "--tier1" in sys.argv:
        tier = 1

    discovered_tests = discover_tests(explicitly_enumerated_tests,
                                      "--all-but" in sys.argv,
                                      tier=tier)
    if "--list" in sys.argv:
        test_list = tests_from_suite(discovered_tests)
        print("\n".join(sorted(test_list)))
        print("{0} tests found".format(len(test_list)))
        sys.exit()

    tests_to_run = discovered_tests
    tier1_fixtures_cleanup = None
    if tier1_fixtures_needed(tests_to_run):
        tier1_fixtures_cleanup = run_tier1_fixtures(
            run_concurrently=run_concurrently)
    if run_concurrently:
        tests_to_run = ConcurrentTestSuite(
            discovered_tests,
            concurrencytest.fork_for_tests(),
        )

    use_improved_result_class = (sys.stdout.isatty() and sys.stderr.isatty()
                                 and "--vanilla" not in sys.argv)

    ResultClass = unittest.TextTestResult
    if use_improved_result_class:
        from pcs_test.tools.color_text_runner import get_text_test_result_class

        ResultClass = get_text_test_result_class(
            slash_last_fail_in_overview=("--last-slash" in sys.argv),
            traditional_verbose=(
                "--traditional-verbose" in sys.argv or
                # temporary workaround - our verbose writer is not compatible with
                # running tests in parallel, use our traditional writer
                (run_concurrently and "-v" in sys.argv)),
            traceback_highlight=("--traceback-highlight" in sys.argv),
            fast_info=("--fast-info" in sys.argv),
        )

    test_runner = unittest.TextTestRunner(
        verbosity=2 if "-v" in sys.argv else 1, resultclass=ResultClass)
    test_result = test_runner.run(tests_to_run)
    if tier1_fixtures_cleanup:
        tier1_fixtures_cleanup()
    if not test_result.wasSuccessful():
        sys.exit(1)
        for scoring_algorithm in scoring_algorithms:
            file_name = "{} {} {}.csv".format(
                scoring_algorithm.value, pairing_algorithm.value, correct_rate_str
            )
            report_path = "{}/{}".format(REPORT_FOLDER, file_name)
            with open(report_path, "w+") as csvfile:
                out = csv.writer(csvfile)
                heading = []
                for index in range(1, 101):
                    heading.append("Round {}".format(index))
                out.writerow(heading)

            AlgorithmValidityTests.SCORING_ALGORITHM = scoring_algorithm
            AlgorithmValidityTests.PAIRING_ALGORITHM = pairing_algorithm
            AlgorithmValidityTests.REPORT_PATH = report_path
            AlgorithmValidityTests.WINNER_SELECTOR = winner_selector
            AlgorithmValidityTests.CORRECT_RATE = correct_rate

            loader = unittest.TestLoader()
            test = loader.loadTestsFromName('test_random_students_perform_comparisons', AlgorithmValidityTests)

            suite = unittest.TestSuite()
            for _ in range(REPETITIONS):
                suite.addTest(test)

            runner = unittest.TextTestRunner()
            concurrent_suite = concurrencytest.ConcurrentTestSuite(suite, concurrencytest.fork_for_tests(CONCURRENCY))
            print("Starting Test for {}".format(file_name))
            runner.run(concurrent_suite)
            print("Finished Test for {}".format(file_name))
            print("")
Example #39
0
def RunTests(debug, verbosity, processes, test_preserve_dirs, args, toolpath):
    """Run the functional tests and any embedded doctests

    Args:
        debug: True to enable debugging, which shows a full stack trace on error
        verbosity: Verbosity level to use
        test_preserve_dirs: True to preserve the input directory used by tests
            so that it can be examined afterwards (only useful for debugging
            tests). If a single test is selected (in args[0]) it also preserves
            the output directory for this test. Both directories are displayed
            on the command line.
        processes: Number of processes to use to run tests (None=same as #CPUs)
        args: List of positional args provided to binman. This can hold a test
            name to execute (as in 'binman test testSections', for example)
        toolpath: List of paths to use for tools
    """
    import cbfs_util_test
    import elf_test
    import entry_test
    import fdt_test
    import ftest
    import image_test
    import test
    import doctest

    result = unittest.TestResult()
    for module in []:
        suite = doctest.DocTestSuite(module)
        suite.run(result)

    sys.argv = [sys.argv[0]]
    if debug:
        sys.argv.append('-D')
    if verbosity:
        sys.argv.append('-v%d' % verbosity)
    if toolpath:
        for path in toolpath:
            sys.argv += ['--toolpath', path]

    # Run the entry tests first ,since these need to be the first to import the
    # 'entry' module.
    test_name = args and args[0] or None
    suite = unittest.TestSuite()
    loader = unittest.TestLoader()
    for module in (entry_test.TestEntry, ftest.TestFunctional,
                   fdt_test.TestFdt, elf_test.TestElf, image_test.TestImage,
                   cbfs_util_test.TestCbfs):
        # Test the test module about our arguments, if it is interested
        if hasattr(module, 'setup_test_args'):
            setup_test_args = getattr(module, 'setup_test_args')
            setup_test_args(preserve_indir=test_preserve_dirs,
                            preserve_outdirs=test_preserve_dirs
                            and test_name is not None,
                            toolpath=toolpath,
                            verbosity=verbosity)
        if test_name:
            try:
                suite.addTests(loader.loadTestsFromName(test_name, module))
            except AttributeError:
                continue
        else:
            suite.addTests(loader.loadTestsFromTestCase(module))
    if use_concurrent and processes != 1:
        concurrent_suite = ConcurrentTestSuite(
            suite, fork_for_tests(processes or multiprocessing.cpu_count()))
        concurrent_suite.run(result)
    else:
        suite.run(result)

    # Remove errors which just indicate a missing test. Since Python v3.5 If an
    # ImportError or AttributeError occurs while traversing name then a
    # synthetic test that raises that error when run will be returned. These
    # errors are included in the errors accumulated by result.errors.
    if test_name:
        errors = []
        for test, err in result.errors:
            if ("has no attribute '%s'" % test_name) not in err:
                errors.append((test, err))
            result.testsRun -= 1
        result.errors = errors

    print(result)
    for test, err in result.errors:
        print(test.id(), err)
    for test, err in result.failures:
        print(err, result.failures)
    if result.skipped:
        print('%d binman test%s SKIPPED:' %
              (len(result.skipped), 's' if len(result.skipped) > 1 else ''))
        for skip_info in result.skipped:
            print('%s: %s' % (skip_info[0], skip_info[1]))
    if result.errors or result.failures:
        print('binman tests FAILED')
        return 1
    return 0
Example #40
0
                files_patched.append(l[len("patching file '"):-1])
            elif l.startswith("patching file "):
                files_patched.append(l[len("patching file "):])
        print(result[0])
        if args.add:
            subprocess.call(["git", "add"] + files_patched)
            print("Done.\nAdded to git:\n" + '\n'.join(files_patched))
        else:
            print("Done.\nYou now need to run:\n" +
                  '\n'.join('git add ' + x for x in files_patched))
    else:
        test_loader = unittest2.TestLoader()
        all_test_names = ["test_" + t.name for t in TEST_CASES]
        test_loader.sortTestMethodsUsing = lambda x, y: cmp(
            all_test_names.index(x), all_test_names.index(y))
        suite = test_loader.loadTestsFromTestCase(IntegrationTests)
        if args.jobs > 1:
            suite = ConcurrentTestSuite(suite, fork_for_tests(args.jobs))
        if hasattr(sys.stderr, "isatty") and sys.stderr.isatty():
            test_result = colour_runner.runner.ColourTextTestRunner(
                verbosity=2).run(suite)
        else:
            test_result = unittest2.TextTestRunner(verbosity=2).run(suite)
        if not test_result.wasSuccessful():
            print('\n***\nRun one of these:\n' +
                  './integration_tests.py --fix\n' +
                  './integration_tests.py --fix --add\n' + '***\n')
            exit(1)
        else:
            exit(0)
        sql.upload_dataset()

    # Create and run suite
    suite = unittest.TestSuite()

    for test in to_run:
        suite.addTest(SQLShare(test))

    result = unittest.TestResult(verbosity=2)
    runner = unittest.TextTestRunner(stream=sys.stdout)
    #runner.run(suite)

    # Concurrency
    if settings['concurrent_tests']:
        from concurrencytest import ConcurrentTestSuite, fork_for_tests
        concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(settings['concurrent_tests']))
        runner.run(concurrent_suite)
    else:
        runner.run(suite)
        

    for dataset in to_upload:
        dataset_name = dataset['dataset_name']
        if dataset_name is not sql.to_delete_dataset:
            sql.open_dataset(dataset_name)
            sql.delete_dataset()

    sql.tearDown()
    del sql    
Example #42
0
#!/usr/bin/python
# vim: set expandtab ts=4
import unittest
from concurrencytest import ConcurrentTestSuite, fork_for_tests
import IndexedTest
import ChangeOfBaseTests

if  __name__ == '__main__':
    s= unittest.TestLoader().loadTestsFromTestCase(IndexedTest.IndexedTest)
    suites=[ChangeOfBaseTests.suite()]
    for suite in suites:
        s.addTests(suite)
    # Run same tests across 16 processes
    concurrent_suite = ConcurrentTestSuite(s, fork_for_tests(16))
    runner = unittest.TextTestRunner()
    runner.run(concurrent_suite)
'''

import unittest, sys

import testcases.regression_test_1
import testcases.regression_test_2

from concurrencytest import ConcurrentTestSuite, fork_for_tests

if __name__ == '__main__':
    loader = unittest.TestLoader()
    suite = unittest.BaseTestSuite()

    suite.addTests(loader.loadTestsFromModule(testcases.regression_test_1))
    suite.addTests(loader.loadTestsFromModule(testcases.regression_test_2)) 
       

    runner = unittest.TextTestRunner()

    isSuccessful = True
    testsRan = False
    

    concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(10))
    isSuccessful = runner.run(concurrent_suite)
    
    if isSuccessful:
        sys.exit(0)

    sys.exit(1)
    
Example #44
0
    def assert_equal(self, iteration, a, b):
        if iteration == self.iteration_max: return True #self.assertEqual(a, b)
        return a == b

if __name__ == '__main__':
    TestCodeSnippets.dump = True
    os.system('rm -r paper_statistics/pyct_run_pyct*')
    os.system('mkdir -p paper_statistics/pyct_run_pyct')

    # load the TestSuite
    loader = unittest.TestLoader()
    suite = unittest.TestSuite()
    suite.addTests(loader.loadTestsFromTestCase(TestCodeSnippets))

    # start preparation
    def job():
        while len(next(os.walk('paper_statistics/pyct_run_pyct'))[2]) != suite.countTestCases(): pass
        os.system('echo "ID|Function|Line Coverage|Time (sec.)|# of SMT files|# of SAT|Time of SAT|# of UNSAT|Time of UNSAT|# of OTHERWISE|Time of OTHERWISE" > paper_statistics/pyct_run_pyct.csv')
        os.system('cat paper_statistics/pyct_run_pyct/*.csv >> paper_statistics/pyct_run_pyct.csv')
        os.system('rm -r paper_statistics/pyct_run_pyct')
        pid = os.getpid()
        os.kill(pid, signal.SIGTERM) #or signal.SIGKILL
    t = threading.Thread(target = job)
    t.start()

    # run the TestSuite
    concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(args.num))
    result = unittest.TextTestRunner().run(concurrent_suite)
    result.stop()

    print('Finish the integration test!!!')
Example #45
0
    explicitly_enumerated_tests, "--all-but" in sys.argv
)
if "--list" in sys.argv:
    test_list = tests_from_suite(discovered_tests)
    print("\n".join(sorted(test_list)))
    print("{0} tests found".format(len(test_list)))
    sys.exit()

tests_to_run = discovered_tests
if run_concurrently:
    # replace the partitioning function with our own
    concurrencytest.partition_tests = partition_tests
    tests_to_run = ConcurrentTestSuite(
        discovered_tests,
        # the number doesn't matter, we do our own partitioning which ignores it
        concurrencytest.fork_for_tests(1)
    )


use_improved_result_class = (
    sys.stdout.isatty()
    and
    sys.stderr.isatty()
    and
    "--vanilla" not in sys.argv
)

resultclass = unittest.TextTestResult
if use_improved_result_class:
    from pcs_test.tools.color_text_runner import get_text_test_result_class
    resultclass = get_text_test_result_class(
Example #46
0
    explicitly_enumerated_tests, "--all-but" in sys.argv
)
if "--list" in sys.argv:
    test_list = tests_from_suite(discovered_tests)
    print("\n".join(sorted(test_list)))
    print("{0} tests found".format(len(test_list)))
    sys.exit()

tests_to_run = discovered_tests
if run_concurrently:
    # replace the partitioning function with our own
    concurrencytest.partition_tests = partition_tests
    tests_to_run = ConcurrentTestSuite(
        discovered_tests,
        # the number doesn't matter, we do our own partitioning which ignores it
        concurrencytest.fork_for_tests(1)
    )


use_improved_result_class = (
    sys.stdout.isatty()
    and
    sys.stderr.isatty()
    and
    "--vanilla" not in sys.argv
)

resultclass = unittest.TextTestResult
if use_improved_result_class:
    from pcs.test.tools.color_text_runner import get_text_test_result_class
    resultclass = get_text_test_result_class(
Example #47
0
def testSuiteFromTCs(*tcs):
    loader = TestLoader()
    loadedTcs = [loader.loadTestsFromTestCase(tc) for tc in tcs]
    suite = TestSuite(loadedTcs)
    return suite


suite = testSuiteFromTCs(
    JsonWriterTC,
    VcdParserTC,
    VcdWriterTC,
)


if __name__ == '__main__':
    runner = TextTestRunner(verbosity=2)

    try:
        from concurrencytest import ConcurrentTestSuite, fork_for_tests
        useParallerlTest = True
    except ImportError:
        # concurrencytest is not installed, use regular test runner
        useParallerlTest = False

    if useParallerlTest:
        # Run same tests across 4 processes
        concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests())
        runner.run(concurrent_suite)
    else:
        sys.exit(not runner.run(suite).wasSuccessful())
Example #48
0
        sql.upload_dataset()

    # Create and run suite
    suite = unittest.TestSuite()

    for test in to_run:
        suite.addTest(SQLShare(test))

    result = unittest.TestResult(verbosity=2)
    runner = unittest.TextTestRunner(stream=sys.stdout)
    #runner.run(suite)

    # Concurrency
    if settings['concurrent_tests']:
        from concurrencytest import ConcurrentTestSuite, fork_for_tests
        concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(settings['concurrent_tests']))
        runner.run(concurrent_suite)
    else:
        runner.run(suite)
        

    for dataset in to_upload:
        dataset_name = dataset['dataset_name']
        if dataset_name is not sql.to_delete_dataset:
            sql.open_dataset(dataset_name)
            sql.delete_dataset()

    sql.tearDown()
    del sql