def run_test_concurrently(self, test, success):
     res = results.TextTestResult(StringIO(), verbosity=0)
     suite = unittest.TestSuite([test])
     # Run tests across 2 processes
     concurrent_suite = testtools.ConcurrentTestSuite(
         suite, concurrency.fork_for_tests(2))
     res.startTestRun()
     concurrent_suite.run(res)
     res.stopTestRun()
     self.assertEqual(success, res.wasSuccessful())
     self.assertEqual(1, res.testsRun)
     return res
Пример #2
0
def test(suite, result, processes):
    """Test `suite`, emitting results to `result`.

    :param suite: The test suite to run.
    :param result: The test result to which to report.
    :param processes: The number of processes to split up tests amongst.
    :return: A boolean signalling success or not.
    """
    split = make_splitter(processes)
    suite = testtools.ConcurrentTestSuite(suite, split)

    result.startTestRun()
    try:
        suite.run(result)
    finally:
        result.stopTestRun()

    return result.wasSuccessful()
Пример #3
0
            config = pycallgraph.Config(max_depth=10)
            config.trace_filter = pycallgraph.GlobbingFilter(exclude=[
                'pycallgraph.*',
                'unittest.*',
                'testtools.*',
                'nova.tests.unit.*',
            ])
            graphviz = output.GraphvizOutput(output_file='scheduler.png')

            with pycallgraph.PyCallGraph(output=graphviz):
                per_request_ms = run_test()

        else:
            per_request_ms = run_test()

        # This has proved to be around 1 ms on a random dev box
        # But this is here so you can do simply performance testing easily.
        self.assertLess(per_request_ms, 1000)


if __name__ == '__main__':
    # A handy tool to help profile the schedulers performance
    ENABLE_PROFILER = True
    import testtools
    suite = testtools.ConcurrentTestSuite()
    test = "test_performance_check_select_destination"
    test_case = CachingSchedulerTestCase(test)
    suite.addTest(test_case)
    runner = testtools.TextTestResult.TextTestRunner()
    runner.run(suite)
Пример #4
0
def runtests(test_regexps,
             results_directory,
             out,
             test_dir='.',
             collect_only=False,
             browser_factory=None,
             report_format='console',
             shared_directory=None,
             screenshots_on=False,
             concurrency_num=1,
             failfast=False,
             debug=False,
             extended=False,
             includes=None,
             excludes=None):
    if not os.path.isdir(test_dir):
        raise RuntimeError('Specified directory %r does not exist' %
                           (test_dir, ))
    if browser_factory is None and collect_only is False:
        raise RuntimeError('A browser must be specified')
    shared_directory = find_shared_directory(test_dir, shared_directory)
    config.shared_directory = shared_directory
    if shared_directory is not None:
        sys.path.append(shared_directory)

    loader = loaders.SSTestLoader(results_directory, browser_factory,
                                  screenshots_on, debug, extended)
    alltests = loader.suiteClass()
    alltests.addTests(loader.discoverTestsFromTree(test_dir))
    alltests = filters.include_regexps(test_regexps, alltests)
    alltests = filters.exclude_regexps(excludes, alltests)

    if not alltests.countTestCases():
        # FIXME: Really needed ? Can't we just rely on the number of tests run
        # ? -- vila 2013-06-04
        raise RuntimeError('Did not find any tests')

    if collect_only:
        for t in testtools.testsuite.iterate_tests(alltests):
            out.write(t.id() + '\n')
        return 0

    txt_res = results.TextTestResult(out, failfast=failfast, verbosity=2)
    if report_format == 'xml':
        results_file = os.path.join(results_directory, 'results.xml')
        xml_stream = file(results_file, 'wb')
        result = testtools.testresult.MultiTestResult(
            txt_res, junitxml.JUnitXmlResult(xml_stream))
        result.failfast = failfast
    else:
        result = txt_res

    if concurrency_num == 1:
        suite = alltests
    else:
        suite = testtools.ConcurrentTestSuite(
            alltests, concurrency.fork_for_tests(concurrency_num))

    result.startTestRun()
    try:
        suite.run(result)
    except KeyboardInterrupt:
        out.write('Test run interrupted\n')
    result.stopTestRun()

    return len(result.failures) + len(result.errors)