def RunShardedTests(self):
        """Runs tests in parallel using a pool of workers.

    Returns:
      A list of test results aggregated from all test runs.
    """
        logging.warning('*' * 80)
        logging.warning('Sharding in ' + str(len(self.attached_devices)) +
                        ' devices.')
        logging.warning('Note that the output is not synchronized.')
        logging.warning('Look for the "Final result" banner in the end.')
        logging.warning('*' * 80)
        all_passed = []
        test_results = TestResults()
        tests_to_run = self.tests
        for retry in xrange(self.retries):
            logging.warning('Try %d of %d', retry + 1, self.retries)
            self._SetupSharding(self.tests)
            test_runners = self._MakeTestRunners(self.attached_devices)
            logging.warning('Starting...')
            pool = multiprocessing.Pool(len(self.attached_devices),
                                        SetTestsContainer,
                                        [PythonTestSharder.tests_container])

            # List of TestResults objects from each test execution.
            try:
                results_lists = pool.map(_DefaultRunnable, test_runners)
            except Exception:
                logging.exception('Unable to run tests. Something with the '
                                  'PythonTestRunners has gone wrong.')
                raise FatalTestException(
                    'PythonTestRunners were unable to run tests.')

            test_results = TestResults.FromTestResults(results_lists)
            # Accumulate passing results.
            all_passed += test_results.ok
            # If we have failed tests, map them to tests to retry.
            failed_tests = test_results.GetAllBroken()
            tests_to_run = self._GetTestsToRetry(self.tests, failed_tests)

            # Bail out early if we have no more tests. This can happen if all tests
            # pass before we're out of retries, for example.
            if not tests_to_run:
                break

        final_results = TestResults()
        # all_passed has accumulated all passing test results.
        # test_results will have the results from the most recent run, which could
        # include a variety of failure modes (unknown, crashed, failed, etc).
        final_results = test_results
        final_results.ok = all_passed

        return final_results
    def RunTests(self):
        """Runs tests from the shared pool of tests, aggregating results.

    Returns:
      A list of test results for all of the tests which this runner executed.
    """
        tests = PythonTestSharder.tests_container

        results = []
        for t in tests:
            res = CallPythonTest(t, self.options)
            results.append(res)

        return TestResults.FromTestResults(results)
Exemple #3
0
def SummarizeResults(java_results, python_results, annotation):
    """Summarize the results from the various test types.

  Args:
    java_results: a TestResults object with java test case results.
    python_results: a TestResults object with python test case results.
    annotation: the annotation used for these results.

  Returns:
    A tuple (all_results, summary_string, num_failing)
  """
    all_results = TestResults.FromTestResults([java_results, python_results])
    summary_string = all_results.LogFull('Instrumentation', annotation)
    num_failing = (len(all_results.failed) + len(all_results.crashed) +
                   len(all_results.unknown))
    return all_results, summary_string, num_failing
def DispatchInstrumentationTests(options):
    """Dispatches the Java and Python instrumentation tests, sharding if possible.

  Uses the logging module to print the combined final results and
  summary of the Java and Python tests. If the java_only option is set, only
  the Java tests run. If the python_only option is set, only the python tests
  run. If neither are set, run both Java and Python tests.

  Args:
    options: command-line options for running the Java and Python tests.

  Returns:
    An integer representing the number of failing tests.
  """
    if not options.keep_test_server_ports:
        # Reset the test port allocation. It's important to do it before starting
        # to dispatch any tests.
        if not ports.ResetTestServerPortAllocation():
            raise Exception('Failed to reset test server port.')

    start_date = int(time.time() * 1000)
    java_results = TestResults()
    python_results = TestResults()

    if options.run_java_tests:
        java_results = run_java_tests.DispatchJavaTests(
            options, [
                apk_info.ApkInfo(options.test_apk_path,
                                 options.test_apk_jar_path)
            ])
    if options.run_python_tests:
        python_results = run_python_tests.DispatchPythonTests(options)

    all_results, summary_string, num_failing = SummarizeResults(
        java_results, python_results, options.annotation, options.build_type)

    if options.flakiness_dashboard_server:
        flakiness_dashboard_results_uploader.Upload(
            options.flakiness_dashboard_server,
            'Chromium_Android_Instrumentation',
            TestResults.FromTestResults([java_results, python_results]))

    return num_failing
def DispatchInstrumentationTests(options):
    """Dispatches the Java and Python instrumentation tests, sharding if possible.

  Uses the logging module to print the combined final results and
  summary of the Java and Python tests. If the java_only option is set, only
  the Java tests run. If the python_only option is set, only the python tests
  run. If neither are set, run both Java and Python tests.

  Args:
    options: command-line options for running the Java and Python tests.

  Returns:
    An integer representing the number of broken tests.
  """
    if not options.keep_test_server_ports:
        # Reset the test port allocation. It's important to do it before starting
        # to dispatch any tests.
        if not ports.ResetTestServerPortAllocation():
            raise Exception('Failed to reset test server port.')

    start_date = int(time.time() * 1000)
    java_results = TestResults()
    python_results = TestResults()

    if options.run_java_tests:
        java_results = run_java_tests.DispatchJavaTests(
            options, [
                apk_info.ApkInfo(options.test_apk_path,
                                 options.test_apk_jar_path)
            ])
    if options.run_python_tests:
        python_results = run_python_tests.DispatchPythonTests(options)

    all_results = TestResults.FromTestResults([java_results, python_results])

    all_results.LogFull(test_type='Instrumentation',
                        test_package=options.test_apk,
                        annotation=options.annotation,
                        build_type=options.build_type,
                        flakiness_server=options.flakiness_dashboard_server)

    return len(all_results.GetAllBroken())
Exemple #6
0
def RunTests(exe, device, test_suite, gtest_filter, test_arguments, rebaseline,
             timeout, performance_test, cleanup_test_files, tool,
             log_dump_name, fast_and_loose):
    """Runs the tests.

  Args:
    exe: boolean to state if we are using the exe based test runner
    device: Device to run the tests.
    test_suite: A specific test suite to run, empty to run all.
    gtest_filter: A gtest_filter flag.
    test_arguments: Additional arguments to pass to the test binary.
    rebaseline: Whether or not to run tests in isolation and update the filter.
    timeout: Timeout for each test.
    performance_test: Whether or not performance test(s).
    cleanup_test_files: Whether or not to cleanup test files on device.
    tool: Name of the Valgrind tool.
    log_dump_name: Name of log dump file.
    fast_and_loose: if set, skip copying data files.

  Returns:
    A TestResults object.
  """
    results = []

    if test_suite:
        if not os.path.exists(test_suite):
            logging.critical('Unrecognized test suite %s, supported: %s',
                             test_suite, _TEST_SUITES)
            if test_suite in _TEST_SUITES:
                logging.critical(
                    '(Remember to include the path: out/Release/%s)',
                    test_suite)
            test_suite_basename = os.path.basename(test_suite)
            if test_suite_basename in _TEST_SUITES:
                logging.critical('Try "make -j15 %s"', test_suite_basename)
            else:
                logging.critical('Unrecognized test suite, supported: %s',
                                 _TEST_SUITES)
            return TestResults.FromRun([], [BaseTestResult(test_suite, '')],
                                       False, False)
        fully_qualified_test_suites = [test_suite]
    else:
        fully_qualified_test_suites = FullyQualifiedTestSuites(
            exe, _TEST_SUITES)
    debug_info_list = []
    print 'Known suites: ' + str(_TEST_SUITES)
    print 'Running these: ' + str(fully_qualified_test_suites)
    for t in fully_qualified_test_suites:
        buildbot_report.PrintNamedStep('Test suite %s' % os.path.basename(t))
        test = SingleTestRunner(device, t, gtest_filter, test_arguments,
                                timeout, rebaseline, performance_test,
                                cleanup_test_files, tool, 0,
                                not not log_dump_name, fast_and_loose)
        test.Run()

        results += [test.test_results]
        # Collect debug info.
        debug_info_list += [test.dump_debug_info]
        if rebaseline:
            test.UpdateFilter(test.test_results.failed)
        test.test_results.LogFull('Unit test', os.path.basename(t))
    # Zip all debug info outputs into a file named by log_dump_name.
    debug_info.GTestDebugInfo.ZipAndCleanResults(
        os.path.join(constants.CHROME_DIR, 'out', 'Release',
                     'debug_info_dumps'), log_dump_name,
        [d for d in debug_info_list if d])

    PrintAnnotationForTestResults(test.test_results)

    return TestResults.FromTestResults(results)