Пример #1
0
 def RebaselineTests(self):
     """Runs all available tests, restarting in case of failures."""
     if self._gtest_filter:
         all_tests = set(self._gtest_filter.split(':'))
     else:
         all_tests = set(self.test_package.GetAllTests())
     failed_results = set()
     executed_results = set()
     while True:
         executed_names = set([f.name for f in executed_results])
         self._gtest_filter = ':'.join(all_tests - executed_names)
         self.RunTestsWithFilter()
         failed_results.update(self.test_results.crashed,
                               self.test_results.failed)
         executed_results.update(self.test_results.crashed,
                                 self.test_results.failed,
                                 self.test_results.ok)
         executed_names = set([f.name for f in executed_results])
         logging.info('*' * 80)
         logging.info(self.device)
         logging.info('Executed: ' + str(len(executed_names)) + ' of ' +
                      str(len(all_tests)))
         logging.info('Failed so far: ' + str(len(failed_results)) + ' ' +
                      str([f.name for f in failed_results]))
         logging.info('Remaining: ' + str(len(all_tests - executed_names)) +
                      ' ' + str(all_tests - executed_names))
         logging.info('*' * 80)
         if executed_names == all_tests:
             break
     self.test_results = TestResults.FromOkAndFailed(
         list(executed_results - failed_results), list(failed_results))
Пример #2
0
def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline,
             timeout, performance_test, cleanup_test_files, tool,
             log_dump_name):
  """Runs the tests.

  Args:
    device: Device to run the tests.
    test_suite: A specific test suite to run, empty to run all.
    gtest_filter: A gtest_filter flag.
    test_arguments: Additional arguments to pass to the test binary.
    rebaseline: Whether or not to run tests in isolation and update the filter.
    timeout: Timeout for each test.
    performance_test: Whether or not performance test(s).
    cleanup_test_files: Whether or not to cleanup test files on device.
    tool: Name of the Valgrind tool.
    log_dump_name: Name of log dump file.

  Returns:
    A TestResults object.
  """
  results = []

  if test_suite:
    global _TEST_SUITES
    if not os.path.exists(test_suite):
      logging.critical('Unrecognized test suite, supported: %s' %
                       _TEST_SUITES)
      if test_suite in _TEST_SUITES:
        logging.critical('(Remember to include the path: out/Release/%s)',
                         test_suite)
      return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')])
    _TEST_SUITES = [test_suite]
  else:
    # If not specified, assume the test suites are in out/Release
    test_suite_dir = os.path.abspath(os.path.join(run_tests_helper.CHROME_DIR,
        'out', 'Release'))
    _TEST_SUITES = [os.path.join(test_suite_dir, t) for t in _TEST_SUITES]
  debug_info_list = []
  for t in _TEST_SUITES:
    test = SingleTestRunner(device, t, gtest_filter, test_arguments,
                            timeout, rebaseline, performance_test,
                            cleanup_test_files, tool, not not log_dump_name)
    test.RunTests()
    results += [test.test_results]
    # Collect debug info.
    debug_info_list += [test.dump_debug_info]
    if rebaseline:
      test.UpdateFilter(test.test_results.failed)
    elif test.test_results.failed:
      # Stop running test if encountering failed test.
      test.test_results.LogFull()
      break
  # Zip all debug info outputs into a file named by log_dump_name.
  debug_info.GTestDebugInfo.ZipAndCleanResults(
      os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release',
          'debug_info_dumps'),
      log_dump_name, [d for d in debug_info_list if d])
  return TestResults.FromTestResults(results)
Пример #3
0
 def _WatchTestOutput(self, p):
   """Watches the test output.
   Args:
     p: the process generating output as created by pexpect.spawn.
   """
   ok_tests = []
   failed_tests = []
   timed_out = False
   overall_fail = False
   re_run = re.compile('\[ RUN      \] ?(.*)\r\n')
   # APK tests rely on the END tag.
   re_end = re.compile('\[ END      \] ?(.*)\r\n')
   re_fail = re.compile('\[  FAILED  \] ?(.*)\r\n')
   re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n')
   re_ok = re.compile('\[       OK \] ?(.*)\r\n')
   (io_stats_before, ready_to_continue) = self._BeginGetIOStats()
   while ready_to_continue:
     found = p.expect([re_run, pexpect.EOF, re_end, re_runner_fail],
                      timeout=self.timeout)
     if found == 1:  # matched pexpect.EOF
       break
     if found == 2:  # matched END.
       break
     if found == 3:  # RUNNER_FAILED
       logging.error('RUNNER_FAILED')
       overall_fail = True
       break
     if self.dump_debug_info:
       self.dump_debug_info.TakeScreenshot('_Test_Start_Run_')
     full_test_name = p.match.group(1)
     found = p.expect([re_ok, re_fail, pexpect.EOF, pexpect.TIMEOUT],
                      timeout=self.timeout)
     if found == 0:  # re_ok
       ok_tests += [BaseTestResult(full_test_name.replace('\r', ''),
                                   p.before)]
       continue
     failed_tests += [BaseTestResult(full_test_name.replace('\r', ''),
                                     p.before)]
     if found >= 2:
       # The test crashed / bailed out (i.e., didn't print OK or FAIL).
       if found == 3:  # pexpect.TIMEOUT
         logging.error('Test terminated after %d second timeout.',
                       self.timeout)
         timed_out = True
       break
   p.close()
   if not self.rebaseline and ready_to_continue:
     ok_tests += self._EndGetIOStats(io_stats_before)
     ret_code = self._GetGTestReturnCode()
     if ret_code:
       failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code,
                                       'pexpect.before: %s'
                                       '\npexpect.after: %s'
                                       % (p.before,
                                          p.after))]
   return TestResults.FromOkAndFailed(ok_tests, failed_tests,
                                      timed_out, overall_fail)
Пример #4
0
def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline,
             timeout, performance_test, cleanup_test_files, tool,
             log_dump_name, apk, annotate=False):
  """Runs the tests.

  Args:
    device: Device to run the tests.
    test_suite: A specific test suite to run, empty to run all.
    gtest_filter: A gtest_filter flag.
    test_arguments: Additional arguments to pass to the test binary.
    rebaseline: Whether or not to run tests in isolation and update the filter.
    timeout: Timeout for each test.
    performance_test: Whether or not performance test(s).
    cleanup_test_files: Whether or not to cleanup test files on device.
    tool: Name of the Valgrind tool.
    log_dump_name: Name of log dump file.
    apk: boolean to state if we are using the apk based test runner
    annotate: should we print buildbot-style annotations?

  Returns:
    A TestResults object.
  """
  results = []

  if test_suite:
    global _TEST_SUITES
    if (not os.path.exists(test_suite) and
        not os.path.splitext(test_suite)[1] == '.apk'):
      logging.critical('Unrecognized test suite %s, supported: %s' %
                       (test_suite, _TEST_SUITES))
      if test_suite in _TEST_SUITES:
        logging.critical('(Remember to include the path: out/Release/%s)',
                         test_suite)
      return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')],
                                         False, False)
    fully_qualified_test_suites = [test_suite]
  else:
    fully_qualified_test_suites = FullyQualifiedTestSuites(apk)
  debug_info_list = []
  print 'Known suites: ' + str(_TEST_SUITES)
  print 'Running these: ' + str(fully_qualified_test_suites)
  for t in fully_qualified_test_suites:
    if annotate:
      print '@@@BUILD_STEP Test suite %s@@@' % os.path.basename(t)
    test = SingleTestRunner(device, t, gtest_filter, test_arguments,
                            timeout, rebaseline, performance_test,
                            cleanup_test_files, tool, 0, not not log_dump_name)
    test.Run()

    results += [test.test_results]
    # Collect debug info.
    debug_info_list += [test.dump_debug_info]
    if rebaseline:
      test.UpdateFilter(test.test_results.failed)
    test.test_results.LogFull()
  # Zip all debug info outputs into a file named by log_dump_name.
  debug_info.GTestDebugInfo.ZipAndCleanResults(
      os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release',
          'debug_info_dumps'),
      log_dump_name, [d for d in debug_info_list if d])

  if annotate:
    if test.test_results.timed_out:
      print '@@@STEP_WARNINGS@@@'
    elif test.test_results.failed:
      print '@@@STEP_FAILURE@@@'
    elif test.test_results.overall_fail:
      print '@@@STEP_FAILURE@@@'
    else:
      print 'Step success!'  # No annotation needed

  return TestResults.FromTestResults(results)