def _ProcessResults(self, result, start_ms, duration_ms):
    """Translates a Java test result into a Python result for this test.

    The TestRunner class that we use under the covers will return a test result
    for that specific Java test. However, to make reporting clearer, we have
    this method to abstract that detail and instead report that as a failure of
    this particular test case while still including the Java stack trace.

    Args:
      result: TestResults with a single Java test result
      start_ms: the time the test started
      duration_ms: the length of the test

    Returns:
      A TestResults object containing a result for this Python test.
    """
    test_results = TestResults()

    # If our test is in broken, then it crashed/failed.
    broken = result.GetAllBroken()
    if broken:
      # Since we have run only one test, take the first and only item.
      single_result = broken[0]

      log = single_result.log
      if not log:
        log = 'No logging information.'

      short_error_msg = single_result.log.split('\n')[0]
      # err_info is ostensibly for Sponge to consume; it's a short error
      # message and a longer one.
      err_info = (short_error_msg, log)

      python_result = SingleTestResult(self.qualified_name, start_ms,
                                       duration_ms,
                                       PYTHON,
                                       log,
                                       err_info)

      # Figure out where the test belonged. There's probably a cleaner way of
      # doing this.
      if single_result in result.crashed:
        test_results.crashed = [python_result]
      elif single_result in result.failed:
        test_results.failed = [python_result]
      elif single_result in result.unknown:
        test_results.unknown = [python_result]

    else:
      python_result = SingleTestResult(self.qualified_name, start_ms,
                                       duration_ms,
                                       PYTHON)
      test_results.ok = [python_result]

    return test_results
Ejemplo n.º 2
0
  def RunShardedTests(self):
    """Runs the tests in all connected devices.

    Returns:
      A TestResults object.
    """
    logging.warning('*' * 80)
    logging.warning('Sharding in ' + str(len(self.attached_devices)) +
                    ' devices.')
    logging.warning('Note that the output is not synchronized.')
    logging.warning('Look for the "Final result" banner in the end.')
    logging.warning('*' * 80)
    final_results = TestResults()
    for retry in xrange(self.retries):
      logging.warning('Try %d of %d', retry + 1, self.retries)
      self.SetupSharding(self.tests)
      test_runners = []
      for index, device in enumerate(self.attached_devices):
        logging.warning('*' * 80)
        logging.warning('Creating shard %d for %s', index, device)
        logging.warning('*' * 80)
        test_runner = self.CreateShardedTestRunner(device, index)
        test_runners += [test_runner]
      logging.warning('Starting...')
      pool = multiprocessing.Pool(len(self.attached_devices),
                                  SetTestsContainer,
                                  [BaseTestSharder.tests_container])
      # map can't handle KeyboardInterrupt exception. It's a python bug.
      # So use map_async instead.
      async_results = pool.map_async(_ShardedTestRunnable, test_runners)
      results_lists = async_results.get(999999)

      test_results = TestResults.FromTestResults(results_lists)
      # Re-check the attached devices for some devices may
      # become offline
      retry_devices = set(android_commands.GetAttachedDevices())
      # Remove devices that had exceptions.
      retry_devices -= TestResults.DeviceExceptions(results_lists)
      # Retry on devices that didn't have any exception.
      self.attached_devices = list(retry_devices)
      if (retry == self.retries - 1 or
          len(self.attached_devices) == 0):
        all_passed = final_results.ok + test_results.ok
        final_results = test_results
        final_results.ok = all_passed
        break
      else:
        final_results.ok += test_results.ok
        self.tests = []
        for t in test_results.GetAllBroken():
          self.tests += [t.name]
        if not self.tests:
          break
    self.OnTestsCompleted(test_runners, final_results)
    return final_results
  def RunShardedTests(self):
    """Runs tests in parallel using a pool of workers.

    Returns:
      A list of test results aggregated from all test runs.
    """
    logging.warning('*' * 80)
    logging.warning('Sharding in ' + str(len(self.attached_devices)) +
                    ' devices.')
    logging.warning('Note that the output is not synchronized.')
    logging.warning('Look for the "Final result" banner in the end.')
    logging.warning('*' * 80)
    all_passed = []
    test_results = TestResults()
    tests_to_run = self.tests
    for retry in xrange(self.retries):
      logging.warning('Try %d of %d', retry + 1, self.retries)
      self._SetupSharding(self.tests)
      test_runners = self._MakeTestRunners(self.attached_devices)
      logging.warning('Starting...')
      pool = multiprocessing.Pool(len(self.attached_devices),
                                  SetTestsContainer,
                                  [PythonTestSharder.tests_container])

      # List of TestResults objects from each test execution.
      try:
        results_lists = pool.map(_DefaultRunnable, test_runners)
      except Exception:
        logging.exception('Unable to run tests. Something with the '
                          'PythonTestRunners has gone wrong.')
        raise FatalTestException('PythonTestRunners were unable to run tests.')

      test_results = TestResults.FromTestResults(results_lists)
      # Accumulate passing results.
      all_passed += test_results.ok
      # If we have failed tests, map them to tests to retry.
      failed_tests = test_results.GetAllBroken()
      tests_to_run = self._GetTestsToRetry(self.tests,
                                           failed_tests)

      # Bail out early if we have no more tests. This can happen if all tests
      # pass before we're out of retries, for example.
      if not tests_to_run:
        break

    final_results = TestResults()
    # all_passed has accumulated all passing test results.
    # test_results will have the results from the most recent run, which could
    # include a variety of failure modes (unknown, crashed, failed, etc).
    final_results = test_results
    final_results.ok = all_passed

    return final_results
Ejemplo n.º 4
0
    def RunShardedTests(self):
        """Runs the tests in all connected devices.

    Returns:
      A TestResults object.
    """
        logging.warning("*" * 80)
        logging.warning("Sharding in " + str(len(self.attached_devices)) + " devices.")
        logging.warning("Note that the output is not synchronized.")
        logging.warning('Look for the "Final result" banner in the end.')
        logging.warning("*" * 80)
        final_results = TestResults()
        for retry in xrange(self.retries):
            logging.warning("Try %d of %d", retry + 1, self.retries)
            self.SetupSharding(self.tests)
            test_runners = []
            for index, device in enumerate(self.attached_devices):
                logging.warning("*" * 80)
                logging.warning("Creating shard %d for %s", index, device)
                logging.warning("*" * 80)
                test_runner = self.CreateShardedTestRunner(device, index)
                test_runners += [test_runner]
            logging.warning("Starting...")
            pool = multiprocessing.Pool(
                len(self.attached_devices), SetTestsContainer, [BaseTestSharder.tests_container]
            )
            results_lists = pool.map(_ShardedTestRunnable, test_runners)
            test_results = TestResults.FromTestResults(results_lists)
            if retry == self.retries - 1:
                all_passed = final_results.ok + test_results.ok
                final_results = test_results
                final_results.ok = all_passed
                break
            else:
                final_results.ok += test_results.ok
                self.tests = []
                for t in test_results.GetAllBroken():
                    self.tests += [t.name]
                if not self.tests:
                    break
        self.OnTestsCompleted(test_runners, final_results)
        return final_results
Ejemplo n.º 5
0
  def __init__(self, options, device, tests_iter, coverage, shard_index, apks,
               ports_to_forward):
    """Create a new TestRunner.

    Args:
      options: An options object with the following required attributes:
      -  build_type: 'Release' or 'Debug'.
      -  install_apk: Re-installs the apk if opted.
      -  save_perf_json: Whether or not to save the JSON file from UI perf
            tests.
      -  screenshot_failures: Take a screenshot for a test failure
      -  tool: Name of the Valgrind tool.
      -  wait_for_debugger: blocks until the debugger is connected.
      -  disable_assertions: Whether to disable java assertions on the device.
      device: Attached android device.
      tests_iter: A list of tests to be run.
      coverage: Collects coverage information if opted.
      shard_index: shard # for this TestRunner, used to create unique port
          numbers.
      apks: A list of ApkInfo objects need to be installed. The first element
            should be the tests apk, the rests could be the apks used in test.
            The default is ChromeTest.apk.
      ports_to_forward: A list of port numbers for which to set up forwarders.
                        Can be optionally requested by a test case.
    Raises:
      FatalTestException: if coverage metadata is not available.
    """
    BaseTestRunner.__init__(
        self, device, options.tool, shard_index, options.build_type)

    if not apks:
      apks = [apk_info.ApkInfo(options.test_apk_path,
                               options.test_apk_jar_path)]

    self.build_type = options.build_type
    self.install_apk = options.install_apk
    self.save_perf_json = options.save_perf_json
    self.screenshot_failures = options.screenshot_failures
    self.wait_for_debugger = options.wait_for_debugger
    self.disable_assertions = options.disable_assertions

    self.tests_iter = tests_iter
    self.coverage = coverage
    self.apks = apks
    self.test_apk = apks[0]
    self.instrumentation_class_path = self.test_apk.GetPackageName()
    self.ports_to_forward = ports_to_forward

    self.test_results = TestResults()
    self.forwarder = None

    if self.coverage:
      if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME):
        os.remove(TestRunner._COVERAGE_MERGED_FILENAME)
      if not os.path.exists(TestRunner._COVERAGE_META_INFO_PATH):
        raise FatalTestException('FATAL ERROR in ' + sys.argv[0] +
                                 ' : Coverage meta info [' +
                                 TestRunner._COVERAGE_META_INFO_PATH +
                                 '] does not exist.')
      if (not TestRunner._COVERAGE_WEB_ROOT_DIR or
          not os.path.exists(TestRunner._COVERAGE_WEB_ROOT_DIR)):
        raise FatalTestException('FATAL ERROR in ' + sys.argv[0] +
                                 ' : Path specified in $EMMA_WEB_ROOTDIR [' +
                                 TestRunner._COVERAGE_WEB_ROOT_DIR +
                                 '] does not exist.')
Ejemplo n.º 6
0
def DispatchJavaTests(options, apks):
  """Dispatches Java tests onto connected device(s).

  If possible, this method will attempt to shard the tests to
  all connected devices. Otherwise, dispatch and run tests on one device.

  Args:
    options: Command line options.
    apks: list of APKs to use.

  Returns:
    A TestResults object holding the results of the Java tests.

  Raises:
    FatalTestException: when there's no attached the devices.
  """
  test_apk = apks[0]
  if options.annotation:
    available_tests = test_apk.GetAnnotatedTests(options.annotation)
    if len(options.annotation) == 1 and options.annotation[0] == 'SmallTest':
      tests_without_annotation = [
          m for m in
          test_apk.GetTestMethods()
          if not test_apk.GetTestAnnotations(m) and
          not apk_info.ApkInfo.IsPythonDrivenTest(m)]
      if tests_without_annotation:
        tests_without_annotation.sort()
        logging.warning('The following tests do not contain any annotation. '
                        'Assuming "SmallTest":\n%s',
                        '\n'.join(tests_without_annotation))
        available_tests += tests_without_annotation
  else:
    available_tests = [m for m in test_apk.GetTestMethods()
                       if not apk_info.ApkInfo.IsPythonDrivenTest(m)]
  coverage = os.environ.get('EMMA_INSTRUMENT') == 'true'

  tests = []
  if options.test_filter:
    # |available_tests| are in adb instrument format: package.path.class#test.
    filter_without_hash = options.test_filter.replace('#', '.')
    tests = [t for t in available_tests
             if filter_without_hash in t.replace('#', '.')]
  else:
    tests = available_tests

  if not tests:
    logging.warning('No Java tests to run with current args.')
    return TestResults()

  tests *= options.number_of_runs

  attached_devices = android_commands.GetAttachedDevices()
  test_results = TestResults()

  if not attached_devices:
    raise FatalTestException('You have no devices attached or visible!')
  if options.device:
    attached_devices = [options.device]

  logging.info('Will run: %s', str(tests))

  if len(attached_devices) > 1 and (coverage or options.wait_for_debugger):
    logging.warning('Coverage / debugger can not be sharded, '
                    'using first available device')
    attached_devices = attached_devices[:1]
  sharder = TestSharder(attached_devices, options, tests, apks)
  test_results = sharder.RunShardedTests()
  return test_results
Ejemplo n.º 7
0
def RunTests(device,
             test_suite,
             gtest_filter,
             test_arguments,
             rebaseline,
             timeout,
             performance_test,
             cleanup_test_files,
             tool,
             log_dump_name,
             apk,
             annotate=False):
    """Runs the tests.

  Args:
    device: Device to run the tests.
    test_suite: A specific test suite to run, empty to run all.
    gtest_filter: A gtest_filter flag.
    test_arguments: Additional arguments to pass to the test binary.
    rebaseline: Whether or not to run tests in isolation and update the filter.
    timeout: Timeout for each test.
    performance_test: Whether or not performance test(s).
    cleanup_test_files: Whether or not to cleanup test files on device.
    tool: Name of the Valgrind tool.
    log_dump_name: Name of log dump file.
    apk: boolean to state if we are using the apk based test runner
    annotate: should we print buildbot-style annotations?

  Returns:
    A TestResults object.
  """
    results = []

    if test_suite:
        global _TEST_SUITES
        if (not os.path.exists(test_suite)
                and not os.path.splitext(test_suite)[1] == '.apk'):
            logging.critical('Unrecognized test suite %s, supported: %s' %
                             (test_suite, _TEST_SUITES))
            if test_suite in _TEST_SUITES:
                logging.critical(
                    '(Remember to include the path: out/Release/%s)',
                    test_suite)
            return TestResults.FromRun(failed=[BaseTestResult(test_suite, '')])
        fully_qualified_test_suites = [test_suite]
    else:
        fully_qualified_test_suites = FullyQualifiedTestSuites(apk)
    debug_info_list = []
    print 'Known suites: ' + str(_TEST_SUITES)
    print 'Running these: ' + str(fully_qualified_test_suites)
    for t in fully_qualified_test_suites:
        if annotate:
            print '@@@BUILD_STEP Test suite %s@@@' % os.path.basename(t)
        test = SingleTestRunner(device, t, gtest_filter, test_arguments,
                                timeout, rebaseline, performance_test,
                                cleanup_test_files, tool, 0,
                                not not log_dump_name)
        test.Run()

        results += [test.test_results]
        # Collect debug info.
        debug_info_list += [test.dump_debug_info]
        if rebaseline:
            test.UpdateFilter(test.test_results.failed)
        test.test_results.LogFull()
    # Zip all debug info outputs into a file named by log_dump_name.
    debug_info.GTestDebugInfo.ZipAndCleanResults(
        os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release',
                     'debug_info_dumps'), log_dump_name,
        [d for d in debug_info_list if d])

    if annotate:
        if test.test_results.timed_out:
            print '@@@STEP_WARNINGS@@@'
        elif test.test_results.failed:
            print '@@@STEP_FAILURE@@@'
        elif test.test_results.overall_fail:
            print '@@@STEP_FAILURE@@@'
        else:
            print 'Step success!'  # No annotation needed

    return TestResults.FromTestResults(results)
Ejemplo n.º 8
0
 def _WatchTestOutput(self, p):
     """Watches the test output.
 Args:
   p: the process generating output as created by pexpect.spawn.
 """
     ok_tests = []
     failed_tests = []
     crashed_tests = []
     timed_out = False
     overall_fail = False
     re_run = re.compile('\[ RUN      \] ?(.*)\r\n')
     # APK tests rely on the PASSED tag.
     re_passed = re.compile('\[  PASSED  \] ?(.*)\r\n')
     # Signal handlers are installed before starting tests
     # to output the CRASHED marker when a crash happens.
     re_crash = re.compile('\[ CRASHED      \](.*)\r\n')
     re_fail = re.compile('\[  FAILED  \] ?(.*)\r\n')
     re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n')
     re_ok = re.compile('\[       OK \] ?(.*?) .*\r\n')
     io_stats_before = self._BeginGetIOStats()
     try:
         while True:
             found = p.expect([re_run, re_passed, re_runner_fail],
                              timeout=self.timeout)
             if found == 1:  # matched PASSED.
                 break
             if found == 2:  # RUNNER_FAILED
                 logging.error('RUNNER_FAILED')
                 overall_fail = True
                 break
             if self.dump_debug_info:
                 self.dump_debug_info.TakeScreenshot('_Test_Start_Run_')
             full_test_name = p.match.group(1).replace('\r', '')
             found = p.expect([re_ok, re_fail, re_crash],
                              timeout=self.timeout)
             if found == 0:  # re_ok
                 if full_test_name == p.match.group(1).replace('\r', ''):
                     ok_tests += [BaseTestResult(full_test_name, p.before)]
                     continue
             if found == 2:  # re_crash
                 crashed_tests += [BaseTestResult(full_test_name, p.before)]
                 overall_fail = True
                 break
             # The test failed.
             failed_tests += [BaseTestResult(full_test_name, p.before)]
     except pexpect.EOF:
         logging.error('Test terminated - EOF')
     except pexpect.TIMEOUT:
         logging.error('Test terminated after %d second timeout.',
                       self.timeout)
         timed_out = True
     finally:
         p.close()
     if not self.rebaseline:
         ok_tests += self._EndGetIOStats(io_stats_before)
         ret_code = self._GetGTestReturnCode()
         if ret_code:
             failed_tests += [
                 BaseTestResult(
                     'gtest exit code: %d' % ret_code, 'pexpect.before: %s'
                     '\npexpect.after: %s' % (p.before, p.after))
             ]
     # Create TestResults and return
     return TestResults.FromRun(ok=ok_tests,
                                failed=failed_tests,
                                crashed=crashed_tests,
                                timed_out=timed_out,
                                overall_fail=overall_fail)
  def RunShardedTests(self):
    """Runs the tests in all connected devices.

    Returns:
      A TestResults object.
    """
    logging.warning('*' * 80)
    logging.warning('Sharding in ' + str(len(self.attached_devices)) +
                    ' devices.')
    logging.warning('Note that the output is not synchronized.')
    logging.warning('Look for the "Final result" banner in the end.')
    logging.warning('*' * 80)
    final_results = TestResults()
    self._KillHostForwarder()
    for retry in xrange(self.retries):
      logging.warning('Try %d of %d', retry + 1, self.retries)
      self.SetupSharding(self.tests)
      test_runners = []

      # Try to create N shards, and retrying on failure.
      try:
        for index, device in enumerate(self.attached_devices):
          logging.warning('*' * 80)
          logging.warning('Creating shard %d for %s', index, device)
          logging.warning('*' * 80)
          test_runner = self.CreateShardedTestRunner(device, index)
          test_runners += [test_runner]
      except errors.DeviceUnresponsiveError as e:
        logging.critical('****Failed to create a shard: [%s]', e)
        self.attached_devices.remove(device)
        continue

      logging.warning('Starting...')
      pool = multiprocessing.Pool(len(self.attached_devices),
                                  SetTestsContainer,
                                  [BaseTestSharder.tests_container])
      # map can't handle KeyboardInterrupt exception. It's a python bug.
      # So use map_async instead.
      async_results = pool.map_async(_ShardedTestRunnable, test_runners)
      try:
        results_lists = async_results.get(999999)
      except errors.DeviceUnresponsiveError as e:
        logging.critical('****Failed to run test: [%s]', e)
        self.attached_devices = android_commands.GetAttachedDevices()
        continue
      test_results = TestResults.FromTestResults(results_lists)
      # Re-check the attached devices for some devices may
      # become offline
      retry_devices = set(android_commands.GetAttachedDevices())
      # Remove devices that had exceptions.
      retry_devices -= TestResults.DeviceExceptions(results_lists)
      # Retry on devices that didn't have any exception.
      self.attached_devices = list(retry_devices)
      if (retry == self.retries - 1 or
          len(self.attached_devices) == 0):
        all_passed = final_results.ok + test_results.ok
        final_results = test_results
        final_results.ok = all_passed
        break
      else:
        final_results.ok += test_results.ok
        # Timed out tests are not reported in GetAllBroken().
        if test_results.timed_out:
          final_results.timed_out = True
        self.tests = []
        for t in test_results.GetAllBroken():
          self.tests += [t.name]
        if not self.tests:
          break
    else:
      # We ran out retries, possibly out of healthy devices.
      # There's no recovery at this point.
      raise Exception('Unrecoverable error while retrying test runs.')
    self.OnTestsCompleted(test_runners, final_results)
    self._KillHostForwarder()
    return final_results
Ejemplo n.º 10
0
def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline,
             timeout, performance_test, cleanup_test_files, tool,
             log_dump_name):
    """Runs the tests.

  Args:
    device: Device to run the tests.
    test_suite: A specific test suite to run, empty to run all.
    gtest_filter: A gtest_filter flag.
    test_arguments: Additional arguments to pass to the test binary.
    rebaseline: Whether or not to run tests in isolation and update the filter.
    timeout: Timeout for each test.
    performance_test: Whether or not performance test(s).
    cleanup_test_files: Whether or not to cleanup test files on device.
    tool: Name of the Valgrind tool.
    log_dump_name: Name of log dump file.

  Returns:
    A TestResults object.
  """
    results = []

    if test_suite:
        global _TEST_SUITES
        if not os.path.exists(test_suite):
            logging.critical('Unrecognized test suite, supported: %s' %
                             _TEST_SUITES)
            if test_suite in _TEST_SUITES:
                logging.critical(
                    '(Remember to include the path: out/Release/%s)',
                    test_suite)
            return TestResults.FromOkAndFailed(
                [], [BaseTestResult(test_suite, '')])
        _TEST_SUITES = [test_suite]
    else:
        # If not specified, assume the test suites are in out/Release
        test_suite_dir = os.path.abspath(
            os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release'))
        _TEST_SUITES = [os.path.join(test_suite_dir, t) for t in _TEST_SUITES]
    debug_info_list = []
    print _TEST_SUITES  # So it shows up in buildbot output
    for t in _TEST_SUITES:
        test = SingleTestRunner(device, t, gtest_filter, test_arguments,
                                timeout, rebaseline, performance_test,
                                cleanup_test_files, tool,
                                not not log_dump_name)
        test.RunTests()
        results += [test.test_results]
        # Collect debug info.
        debug_info_list += [test.dump_debug_info]
        if rebaseline:
            test.UpdateFilter(test.test_results.failed)
        elif test.test_results.failed:
            # Stop running test if encountering failed test.
            test.test_results.LogFull()
            break
    # Zip all debug info outputs into a file named by log_dump_name.
    debug_info.GTestDebugInfo.ZipAndCleanResults(
        os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release',
                     'debug_info_dumps'), log_dump_name,
        [d for d in debug_info_list if d])
    return TestResults.FromTestResults(results)
Ejemplo n.º 11
0
def CallPythonTest(test, options):
    """Invokes a test function and translates Python exceptions into test results.

  This method invokes SetUp()/TearDown() on the test. It is intended to be
  resilient to exceptions in SetUp(), the test itself, and TearDown(). Any
  Python exception means the test is marked as failed, and the test result will
  contain information about the exception.

  If SetUp() raises an exception, the test is not run.

  If TearDown() raises an exception, the test is treated as a failure. However,
  if the test itself raised an exception beforehand, that stack trace will take
  precedence whether or not TearDown() also raised an exception.

  shard_index is not applicable in single-device scenarios, when test execution
  is serial rather than parallel. Tests can use this to bring up servers with
  unique port numbers, for example. See also python_test_sharder.

  Args:
    test: an object which is ostensibly a subclass of PythonTestBase.
    options: Options to use for setting up tests.

  Returns:
    A TestResults object which contains any results produced by the test or, in
    the case of a Python exception, the Python exception info.
  """

    start_date_ms = int(time.time()) * 1000
    failed = False

    try:
        test.SetUp(options)
    except Exception:
        failed = True
        logging.exception(
            'Caught exception while trying to run SetUp() for test: ' +
            test.qualified_name)
        # Tests whose SetUp() method has failed are likely to fail, or at least
        # yield invalid results.
        exc_info = sys.exc_info()
        return TestResults.FromPythonException(test.qualified_name,
                                               start_date_ms, exc_info)

    try:
        result = test.Run()
    except Exception:
        # Setting this lets TearDown() avoid stomping on our stack trace from Run()
        # should TearDown() also raise an exception.
        failed = True
        logging.exception('Caught exception while trying to run test: ' +
                          test.qualified_name)
        exc_info = sys.exc_info()
        result = TestResults.FromPythonException(test.qualified_name,
                                                 start_date_ms, exc_info)

    try:
        test.TearDown()
    except Exception:
        logging.exception(
            'Caught exception while trying run TearDown() for test: ' +
            test.qualified_name)
        if not failed:
            # Don't stomp the error during the test if TearDown blows up. This is a
            # trade-off: if the test fails, this will mask any problem with TearDown
            # until the test is fixed.
            exc_info = sys.exc_info()
            result = TestResults.FromPythonException(test.qualified_name,
                                                     start_date_ms, exc_info)

    return result
Ejemplo n.º 12
0
    def RunShardedTests(self):
        """Runs the tests in all connected devices.

    Returns:
      A TestResults object.
    """
        logging.warning('*' * 80)
        logging.warning('Sharding in ' + str(len(self.attached_devices)) +
                        ' devices.')
        logging.warning('Note that the output is not synchronized.')
        logging.warning('Look for the "Final result" banner in the end.')
        logging.warning('*' * 80)
        final_results = TestResults()
        self._KillHostForwarder()
        for retry in xrange(self.retries):
            logging.warning('Try %d of %d', retry + 1, self.retries)
            self.SetupSharding(self.tests)
            test_runners = []

            # Try to create N shards, and retrying on failure.
            try:
                for index, device in enumerate(self.attached_devices):
                    logging.warning('*' * 80)
                    logging.warning('Creating shard %d for %s', index, device)
                    logging.warning('*' * 80)
                    test_runner = self.CreateShardedTestRunner(device, index)
                    test_runners += [test_runner]
            except errors.DeviceUnresponsiveError as e:
                logging.critical('****Failed to create a shard: [%s]', e)
                self.attached_devices.remove(device)
                continue

            logging.warning('Starting...')
            pool = multiprocessing.Pool(len(self.attached_devices),
                                        SetTestsContainer,
                                        [BaseTestSharder.tests_container])
            # map can't handle KeyboardInterrupt exception. It's a python bug.
            # So use map_async instead.
            async_results = pool.map_async(_ShardedTestRunnable, test_runners)
            try:
                results_lists = async_results.get(999999)
            except errors.DeviceUnresponsiveError as e:
                logging.critical('****Failed to run test: [%s]', e)
                self.attached_devices = android_commands.GetAttachedDevices()
                continue
            test_results = TestResults.FromTestResults(results_lists)
            # Re-check the attached devices for some devices may
            # become offline
            retry_devices = set(android_commands.GetAttachedDevices())
            # Remove devices that had exceptions.
            retry_devices -= TestResults.DeviceExceptions(results_lists)
            # Retry on devices that didn't have any exception.
            self.attached_devices = list(retry_devices)
            if (retry == self.retries - 1 or len(self.attached_devices) == 0):
                all_passed = final_results.ok + test_results.ok
                final_results = test_results
                final_results.ok = all_passed
                break
            else:
                final_results.ok += test_results.ok
                self.tests = []
                for t in test_results.GetAllBroken():
                    self.tests += [t.name]
                if not self.tests:
                    break
        else:
            # We ran out retries, possibly out of healthy devices.
            # There's no recovery at this point.
            raise Exception('Unrecoverable error while retrying test runs.')
        self.OnTestsCompleted(test_runners, final_results)
        self._KillHostForwarder()
        return final_results
Ejemplo n.º 13
0
  def _WatchTestOutput(self, p):
    """Watches the test output.
    Args:
      p: the process generating output as created by pexpect.spawn.
    """
    ok_tests = []
    failed_tests = []
    crashed_tests = []
    timed_out = False
    overall_fail = False

    # Test case statuses.
    re_run = re.compile('\[ RUN      \] ?(.*)\r\n')
    re_fail = re.compile('\[  FAILED  \] ?(.*)\r\n')
    re_ok = re.compile('\[       OK \] ?(.*?) .*\r\n')

    # Test run statuses.
    re_passed = re.compile('\[  PASSED  \] ?(.*)\r\n')
    re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n')
    # Signal handlers are installed before starting tests
    # to output the CRASHED marker when a crash happens.
    re_crash = re.compile('\[ CRASHED      \](.*)\r\n')

    try:
      while True:
        found = p.expect([re_run, re_passed, re_runner_fail],
                         timeout=self.timeout)
        if found == 1:  # re_passed
          break
        elif found == 2:  # re_runner_fail
          overall_fail = True
          break
        else:  # re_run
          if self.dump_debug_info:
            self.dump_debug_info.TakeScreenshot('_Test_Start_Run_')

          full_test_name = p.match.group(1).replace('\r', '')
          found = p.expect([re_ok, re_fail, re_crash], timeout=self.timeout)
          if found == 0:  # re_ok
            if full_test_name == p.match.group(1).replace('\r', ''):
              ok_tests += [BaseTestResult(full_test_name, p.before)]
          elif found == 2:  # re_crash
            crashed_tests += [BaseTestResult(full_test_name, p.before)]
            overall_fail = True
            break
          else:  # re_fail
            failed_tests += [BaseTestResult(full_test_name, p.before)]
    except pexpect.EOF:
      logging.error('Test terminated - EOF')
      raise errors.DeviceUnresponsiveError('Device may be offline')
    except pexpect.TIMEOUT:
      logging.error('Test terminated after %d second timeout.',
                    self.timeout)
      timed_out = True
    finally:
      p.close()

    ret_code = self._GetGTestReturnCode()
    if ret_code:
      logging.critical(
          'gtest exit code: %d\npexpect.before: %s\npexpect.after: %s',
          ret_code, p.before, p.after)
      overall_fail = True

    # Create TestResults and return
    return TestResults.FromRun(ok=ok_tests, failed=failed_tests,
                               crashed=crashed_tests, timed_out=timed_out,
                               overall_fail=overall_fail)