Пример #1
0
 def RebaselineTests(self):
   """Runs all available tests, restarting in case of failures."""
   if self._gtest_filter:
     all_tests = set(self._gtest_filter.split(':'))
   else:
     all_tests = set(self.test_package.GetAllTests())
   failed_results = set()
   executed_results = set()
   while True:
     executed_names = set([f.name for f in executed_results])
     self._gtest_filter = ':'.join(all_tests - executed_names)
     self.RunTestsWithFilter()
     failed_results.update(self.test_results.crashed,
         self.test_results.failed)
     executed_results.update(self.test_results.crashed,
                             self.test_results.failed,
                             self.test_results.ok)
     executed_names = set([f.name for f in executed_results])
     logging.info('*' * 80)
     logging.info(self.device)
     logging.info('Executed: ' + str(len(executed_names)) + ' of ' +
                  str(len(all_tests)))
     logging.info('Failed so far: ' + str(len(failed_results)) + ' ' +
                  str([f.name for f in failed_results]))
     logging.info('Remaining: ' + str(len(all_tests - executed_names)) + ' ' +
                  str(all_tests - executed_names))
     logging.info('*' * 80)
     if executed_names == all_tests:
       break
   self.test_results = TestResults.FromRun(
       ok=list(executed_results - failed_results),
       failed=list(failed_results))
Пример #2
0
    def RunTests(self):
        """Runs all tests (in rebaseline mode, runs each test in isolation).

    Returns:
      A TestResults object.
    """
        try:
            if self.test_package.rebaseline:
                self.RebaselineTests()
            else:
                if not self._gtest_filter:
                    self._gtest_filter = (
                        '-' + ':'.join(self.GetDisabledTests()) + ':' +
                        ':'.join([
                            '*.' + x + '*'
                            for x in self.test_package.GetDisabledPrefixes()
                        ]))
                self.RunTestsWithFilter()
        except errors.DeviceUnresponsiveError as e:
            # Make sure this device is not attached
            if android_commands.IsDeviceAttached(self.device):
                raise e

            # Wrap the results
            logging.warning(e)
            failed_tests = []
            for t in self._gtest_filter.split(':'):
                failed_tests += [BaseTestResult(t, '')]
            self.test_results = TestResults.FromRun(
                failed=failed_tests, device_exception=self.device)

        return self.test_results
Пример #3
0
    def RunTests(self):
        """Runs tests on a single device.

    Returns:
      A TestResults object.
    """
        try:
            self.test_package.CreateTestRunnerScript(self._gtest_filter,
                                                     self._test_arguments)
            self.test_results = self.test_package.RunTestsAndListResults()
        except errors.DeviceUnresponsiveError as e:
            # Make sure this device is not attached
            if android_commands.IsDeviceAttached(self.device):
                raise e

            # TODO(frankf): We should report these as "skipped" not "failures".
            # Wrap the results
            logging.warning(e)
            failed_tests = []
            for t in self._gtest_filter.split(':'):
                failed_tests += [BaseTestResult(t, '')]
            self.test_results = TestResults.FromRun(
                failed=failed_tests, device_exception=self.device)

        return self.test_results
Пример #4
0
def RunTests(device,
             test_suite,
             gtest_filter,
             test_arguments,
             rebaseline,
             timeout,
             performance_test,
             cleanup_test_files,
             tool,
             log_dump_name,
             apk,
             annotate=False):
    """Runs the tests.

  Args:
    device: Device to run the tests.
    test_suite: A specific test suite to run, empty to run all.
    gtest_filter: A gtest_filter flag.
    test_arguments: Additional arguments to pass to the test binary.
    rebaseline: Whether or not to run tests in isolation and update the filter.
    timeout: Timeout for each test.
    performance_test: Whether or not performance test(s).
    cleanup_test_files: Whether or not to cleanup test files on device.
    tool: Name of the Valgrind tool.
    log_dump_name: Name of log dump file.
    apk: boolean to state if we are using the apk based test runner
    annotate: should we print buildbot-style annotations?

  Returns:
    A TestResults object.
  """
    results = []

    if test_suite:
        global _TEST_SUITES
        if (not os.path.exists(test_suite)
                and not os.path.splitext(test_suite)[1] == '.apk'):
            logging.critical('Unrecognized test suite %s, supported: %s' %
                             (test_suite, _TEST_SUITES))
            if test_suite in _TEST_SUITES:
                logging.critical(
                    '(Remember to include the path: out/Release/%s)',
                    test_suite)
            return TestResults.FromRun(failed=[BaseTestResult(test_suite, '')])
        fully_qualified_test_suites = [test_suite]
    else:
        fully_qualified_test_suites = FullyQualifiedTestSuites(apk)
    debug_info_list = []
    print 'Known suites: ' + str(_TEST_SUITES)
    print 'Running these: ' + str(fully_qualified_test_suites)
    for t in fully_qualified_test_suites:
        if annotate:
            print '@@@BUILD_STEP Test suite %s@@@' % os.path.basename(t)
        test = SingleTestRunner(device, t, gtest_filter, test_arguments,
                                timeout, rebaseline, performance_test,
                                cleanup_test_files, tool, 0,
                                not not log_dump_name)
        test.Run()

        results += [test.test_results]
        # Collect debug info.
        debug_info_list += [test.dump_debug_info]
        if rebaseline:
            test.UpdateFilter(test.test_results.failed)
        test.test_results.LogFull()
    # Zip all debug info outputs into a file named by log_dump_name.
    debug_info.GTestDebugInfo.ZipAndCleanResults(
        os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release',
                     'debug_info_dumps'), log_dump_name,
        [d for d in debug_info_list if d])

    if annotate:
        PrintAnnotationForTestResults(test.test_results)

    return TestResults.FromTestResults(results)
Пример #5
0
 def _WatchTestOutput(self, p):
     """Watches the test output.
 Args:
   p: the process generating output as created by pexpect.spawn.
 """
     ok_tests = []
     failed_tests = []
     crashed_tests = []
     timed_out = False
     overall_fail = False
     re_run = re.compile('\[ RUN      \] ?(.*)\r\n')
     # APK tests rely on the END tag.
     re_end = re.compile('\[ END      \] ?(.*)\r\n')
     # Signal handlers are installed before starting tests
     # to output the CRASHED marker when a crash happens.
     re_crash = re.compile('\[ CRASHED      \](.*)\r\n')
     re_fail = re.compile('\[  FAILED  \] ?(.*)\r\n')
     re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n')
     re_ok = re.compile('\[       OK \] ?(.*)\r\n')
     (io_stats_before, ready_to_continue) = self._BeginGetIOStats()
     while ready_to_continue:
         found = p.expect([re_run, pexpect.EOF, re_end, re_runner_fail],
                          timeout=self.timeout)
         if found == 1:  # matched pexpect.EOF
             break
         if found == 2:  # matched END.
             break
         if found == 3:  # RUNNER_FAILED
             logging.error('RUNNER_FAILED')
             overall_fail = True
             break
         if self.dump_debug_info:
             self.dump_debug_info.TakeScreenshot('_Test_Start_Run_')
         full_test_name = p.match.group(1)
         found = p.expect(
             [re_ok, re_fail, re_crash, pexpect.EOF, pexpect.TIMEOUT],
             timeout=self.timeout)
         if found == 0:  # re_ok
             ok_tests += [
                 BaseTestResult(full_test_name.replace('\r', ''), p.before)
             ]
             continue
         if found == 2:  # re_crash
             crashed_tests += [
                 BaseTestResult(full_test_name.replace('\r', ''), p.before)
             ]
             overall_fail = True
             break
         # The test failed.
         failed_tests += [
             BaseTestResult(full_test_name.replace('\r', ''), p.before)
         ]
         if found >= 3:
             # The test bailed out (i.e., didn't print OK or FAIL).
             if found == 4:  # pexpect.TIMEOUT
                 logging.error('Test terminated after %d second timeout.',
                               self.timeout)
                 timed_out = True
             break
     p.close()
     if not self.rebaseline and ready_to_continue:
         ok_tests += self._EndGetIOStats(io_stats_before)
         ret_code = self._GetGTestReturnCode()
         if ret_code:
             failed_tests += [
                 BaseTestResult(
                     'gtest exit code: %d' % ret_code, 'pexpect.before: %s'
                     '\npexpect.after: %s' % (p.before, p.after))
             ]
     # Create TestResults and return
     return TestResults.FromRun(ok=ok_tests,
                                failed=failed_tests,
                                crashed=crashed_tests,
                                timed_out=timed_out,
                                overall_fail=overall_fail)
Пример #6
0
  def _WatchTestOutput(self, p):
    """Watches the test output.
    Args:
      p: the process generating output as created by pexpect.spawn.
    """
    ok_tests = []
    failed_tests = []
    crashed_tests = []
    timed_out = False
    overall_fail = False

    # Test case statuses.
    re_run = re.compile('\[ RUN      \] ?(.*)\r\n')
    re_fail = re.compile('\[  FAILED  \] ?(.*)\r\n')
    re_ok = re.compile('\[       OK \] ?(.*?) .*\r\n')

    # Test run statuses.
    re_passed = re.compile('\[  PASSED  \] ?(.*)\r\n')
    re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n')
    # Signal handlers are installed before starting tests
    # to output the CRASHED marker when a crash happens.
    re_crash = re.compile('\[ CRASHED      \](.*)\r\n')

    try:
      while True:
        found = p.expect([re_run, re_passed, re_runner_fail],
                         timeout=self.timeout)
        if found == 1:  # re_passed
          break
        elif found == 2:  # re_runner_fail
          overall_fail = True
          break
        else:  # re_run
          if self.dump_debug_info:
            self.dump_debug_info.TakeScreenshot('_Test_Start_Run_')

          full_test_name = p.match.group(1).replace('\r', '')
          found = p.expect([re_ok, re_fail, re_crash], timeout=self.timeout)
          if found == 0:  # re_ok
            if full_test_name == p.match.group(1).replace('\r', ''):
              ok_tests += [BaseTestResult(full_test_name, p.before)]
          elif found == 2:  # re_crash
            crashed_tests += [BaseTestResult(full_test_name, p.before)]
            overall_fail = True
            break
          else:  # re_fail
            failed_tests += [BaseTestResult(full_test_name, p.before)]
    except pexpect.EOF:
      logging.error('Test terminated - EOF')
      raise errors.DeviceUnresponsiveError('Device may be offline')
    except pexpect.TIMEOUT:
      logging.error('Test terminated after %d second timeout.',
                    self.timeout)
      timed_out = True
    finally:
      p.close()

    ret_code = self._GetGTestReturnCode()
    if ret_code:
      logging.critical(
          'gtest exit code: %d\npexpect.before: %s\npexpect.after: %s',
          ret_code, p.before, p.after)
      overall_fail = True

    # Create TestResults and return
    return TestResults.FromRun(ok=ok_tests, failed=failed_tests,
                               crashed=crashed_tests, timed_out=timed_out,
                               overall_fail=overall_fail)