def RunTests(self):
        """Runs tests on a single device.

    Returns:
      A TestResults object.
    """
        try:
            self.test_package.CreateTestRunnerScript(self._gtest_filter,
                                                     self._test_arguments)
            self.test_results = self.test_package.RunTestsAndListResults()
        except errors.DeviceUnresponsiveError as e:
            # Make sure this device is not attached
            if android_commands.IsDeviceAttached(self.device):
                raise e

            # TODO(frankf): We should report these as "skipped" not "failures".
            # Wrap the results
            logging.warning(e)
            failed_tests = []
            for t in self._gtest_filter.split(':'):
                failed_tests += [BaseTestResult(t, '')]
            self.test_results = TestResults.FromRun(
                failed=failed_tests, device_exception=self.device)

        return self.test_results
Exemple #2
0
def RunTests(exe, device, test_suite, gtest_filter, test_arguments, rebaseline,
             timeout, performance_test, cleanup_test_files, tool,
             log_dump_name, fast_and_loose):
    """Runs the tests.

  Args:
    exe: boolean to state if we are using the exe based test runner
    device: Device to run the tests.
    test_suite: A specific test suite to run, empty to run all.
    gtest_filter: A gtest_filter flag.
    test_arguments: Additional arguments to pass to the test binary.
    rebaseline: Whether or not to run tests in isolation and update the filter.
    timeout: Timeout for each test.
    performance_test: Whether or not performance test(s).
    cleanup_test_files: Whether or not to cleanup test files on device.
    tool: Name of the Valgrind tool.
    log_dump_name: Name of log dump file.
    fast_and_loose: if set, skip copying data files.

  Returns:
    A TestResults object.
  """
    results = []

    if test_suite:
        if not os.path.exists(test_suite):
            logging.critical('Unrecognized test suite %s, supported: %s',
                             test_suite, _TEST_SUITES)
            if test_suite in _TEST_SUITES:
                logging.critical(
                    '(Remember to include the path: out/Release/%s)',
                    test_suite)
            test_suite_basename = os.path.basename(test_suite)
            if test_suite_basename in _TEST_SUITES:
                logging.critical('Try "make -j15 %s"', test_suite_basename)
            else:
                logging.critical('Unrecognized test suite, supported: %s',
                                 _TEST_SUITES)
            return TestResults.FromRun([], [BaseTestResult(test_suite, '')],
                                       False, False)
        fully_qualified_test_suites = [test_suite]
    else:
        fully_qualified_test_suites = FullyQualifiedTestSuites(
            exe, _TEST_SUITES)
    debug_info_list = []
    print 'Known suites: ' + str(_TEST_SUITES)
    print 'Running these: ' + str(fully_qualified_test_suites)
    for t in fully_qualified_test_suites:
        buildbot_report.PrintNamedStep('Test suite %s' % os.path.basename(t))
        test = SingleTestRunner(device, t, gtest_filter, test_arguments,
                                timeout, rebaseline, performance_test,
                                cleanup_test_files, tool, 0,
                                not not log_dump_name, fast_and_loose)
        test.Run()

        results += [test.test_results]
        # Collect debug info.
        debug_info_list += [test.dump_debug_info]
        if rebaseline:
            test.UpdateFilter(test.test_results.failed)
        test.test_results.LogFull('Unit test', os.path.basename(t))
    # Zip all debug info outputs into a file named by log_dump_name.
    debug_info.GTestDebugInfo.ZipAndCleanResults(
        os.path.join(constants.CHROME_DIR, 'out', 'Release',
                     'debug_info_dumps'), log_dump_name,
        [d for d in debug_info_list if d])

    PrintAnnotationForTestResults(test.test_results)

    return TestResults.FromTestResults(results)
Exemple #3
0
    def _WatchTestOutput(self, p):
        """Watches the test output.
    Args:
      p: the process generating output as created by pexpect.spawn.
    """
        ok_tests = []
        failed_tests = []
        crashed_tests = []
        timed_out = False
        overall_fail = False

        # Test case statuses.
        re_run = re.compile('\[ RUN      \] ?(.*)\r\n')
        re_fail = re.compile('\[  FAILED  \] ?(.*)\r\n')
        re_ok = re.compile('\[       OK \] ?(.*?) .*\r\n')

        # Test run statuses.
        re_passed = re.compile('\[  PASSED  \] ?(.*)\r\n')
        re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n')
        # Signal handlers are installed before starting tests
        # to output the CRASHED marker when a crash happens.
        re_crash = re.compile('\[ CRASHED      \](.*)\r\n')

        try:
            while True:
                found = p.expect([re_run, re_passed, re_runner_fail],
                                 timeout=self.timeout)
                if found == 1:  # re_passed
                    break
                elif found == 2:  # re_runner_fail
                    overall_fail = True
                    break
                else:  # re_run
                    if self.dump_debug_info:
                        self.dump_debug_info.TakeScreenshot('_Test_Start_Run_')

                    full_test_name = p.match.group(1).replace('\r', '')
                    found = p.expect([re_ok, re_fail, re_crash],
                                     timeout=self.timeout)
                    if found == 0:  # re_ok
                        if full_test_name == p.match.group(1).replace(
                                '\r', ''):
                            ok_tests += [
                                BaseTestResult(full_test_name, p.before)
                            ]
                    elif found == 2:  # re_crash
                        crashed_tests += [
                            BaseTestResult(full_test_name, p.before)
                        ]
                        overall_fail = True
                        break
                    else:  # re_fail
                        failed_tests += [
                            BaseTestResult(full_test_name, p.before)
                        ]
        except pexpect.EOF:
            logging.error('Test terminated - EOF')
            raise errors.DeviceUnresponsiveError('Device may be offline')
        except pexpect.TIMEOUT:
            logging.error('Test terminated after %d second timeout.',
                          self.timeout)
            timed_out = True
        finally:
            p.close()

        ret_code = self._GetGTestReturnCode()
        if ret_code:
            logging.critical(
                'gtest exit code: %d\npexpect.before: %s\npexpect.after: %s',
                ret_code, p.before, p.after)
            overall_fail = True

        # Create TestResults and return
        return TestResults.FromRun(ok=ok_tests,
                                   failed=failed_tests,
                                   crashed=crashed_tests,
                                   timed_out=timed_out,
                                   overall_fail=overall_fail)