Beispiel #1
0
    def pMap(self, f, *args, **kwargs):
        """Map a function across the current wrapped objects in parallel.

    This calls f(o, *args, **kwargs) for each o in the set of wrapped objects.

    Note that this call is asynchronous. Call pFinish on the return value to
    block until the call finishes.

    Args:
      f: The function to call.
      args: The positional args to pass to f.
      kwargs: The keyword args to pass to f.
    Returns:
      A Parallelizer wrapping the ReraiserThreadGroup running the map in
      parallel.
    """
        self._assertNoShadow('pMap')
        r = type(self)(self._orig_objs)
        r._objs = reraiser_thread.ReraiserThreadGroup([
            reraiser_thread.ReraiserThread(f,
                                           args=tuple([o] + list(args)),
                                           kwargs=kwargs,
                                           name='%s(%s)' % (f.__name__, d))
            for d, o in zip(self._orig_objs, self._objs)
        ])
        r._objs.StartAll()
        return r
Beispiel #2
0
    def __call__(self, *args, **kwargs):
        """Emulate calling |self| with |args| and |kwargs|.

    Note that this call is asynchronous. Call pFinish on the return value to
    block until the call finishes.

    Returns:
      A Parallelizer wrapping the ReraiserThreadGroup running the call in
      parallel.
    Raises:
      AttributeError if the wrapped objects aren't callable.
    """
        self.pGet(None)

        for o in self._objs:
            if not callable(o):
                raise AttributeError("'%s' is not callable" % o.__name__)

        r = type(self)(self._orig_objs)
        r._objs = reraiser_thread.ReraiserThreadGroup([
            reraiser_thread.ReraiserThread(o,
                                           args=args,
                                           kwargs=kwargs,
                                           name='%s.%s' % (str(d), o.__name__))
            for d, o in zip(self._orig_objs, self._objs)
        ])
        r._objs.StartAll()
        return r
Beispiel #3
0
def _CreateRunners(runner_factory, devices, timeout=None):
  """Creates a test runner for each device and calls SetUp() in parallel.

  Note: if a device is unresponsive the corresponding TestRunner will not be
    included in the returned list.

  Args:
    runner_factory: Callable that takes a device and index and returns a
      TestRunner object.
    devices: List of device serial numbers as strings.
    timeout: Watchdog timeout in seconds, defaults to the default timeout.

  Returns:
    A list of TestRunner objects.
  """
  logging.warning('Creating %s test %s.', len(devices),
                  'runners' if len(devices) != 1 else 'runner')
  runners = []
  counter = _ThreadSafeCounter()
  threads = reraiser_thread.ReraiserThreadGroup(
      [reraiser_thread.ReraiserThread(_SetUp,
                                      [runner_factory, d, runners, counter],
                                      name=str(d)[-4:])
       for d in devices])
  threads.StartAll()
  threads.JoinAll(watchdog_timer.WatchdogTimer(timeout))
  return runners
Beispiel #4
0
    def testJoinRaise(self):
        def f():
            raise TestException

        group = reraiser_thread.ReraiserThreadGroup(
            [reraiser_thread.ReraiserThread(f) for _ in range(5)])
        group.StartAll()
        with self.assertRaises(TestException):
            group.JoinAll()
Beispiel #5
0
def _RunAllTests(runners, test_collection_factory, num_retries, timeout=None,
                 tag_results_with_device=False):
  """Run all tests using the given TestRunners.

  Args:
    runners: A list of TestRunner objects.
    test_collection_factory: A callable to generate a TestCollection object for
        each test runner.
    num_retries: Number of retries for a test.
    timeout: Watchdog timeout in seconds.
    tag_results_with_device: If True, appends the name of the device on which
        the test was run to the test name. Used when replicating to identify
        which device ran each copy of the test, and to ensure each copy of the
        test is recorded separately.

  Returns:
    A tuple of (TestRunResults object, exit code)
  """
  logging.warning('Running tests with %s test %s.',
                  len(runners), 'runners' if len(runners) != 1 else 'runner')
  results = []
  exit_code = 0
  run_results = base_test_result.TestRunResults()
  watcher = watchdog_timer.WatchdogTimer(timeout)
  test_collections = [test_collection_factory() for _ in runners]

  threads = [
      reraiser_thread.ReraiserThread(
          _RunTestsFromQueue,
          [r, tc, results, watcher, num_retries, tag_results_with_device],
          name=r.device_serial[-4:])
      for r, tc in zip(runners, test_collections)]

  workers = reraiser_thread.ReraiserThreadGroup(threads)
  workers.StartAll()

  try:
    workers.JoinAll(watcher)
  except device_errors.CommandFailedError:
    logging.exception('Command failed on device.')
  except device_errors.CommandFailedError:
    logging.exception('Command timed out on device.')
  except device_errors.DeviceUnreachableError:
    logging.exception('Device became unreachable.')

  if not all((len(tc) == 0 for tc in test_collections)):
    logging.error('Only ran %d tests (all devices are likely offline).',
                  len(results))
    for tc in test_collections:
      run_results.AddResults(base_test_result.BaseTestResult(
          t, base_test_result.ResultType.UNKNOWN) for t in tc.test_names())

  for r in results:
    run_results.AddTestRunResults(r)
  if not run_results.DidRunPass():
    exit_code = constants.ERROR_EXIT_CODE
  return (run_results, exit_code)
Beispiel #6
0
    def testInit(self):
        ran = [False] * 5

        def f(i):
            ran[i] = True

        group = reraiser_thread.ReraiserThreadGroup(
            [reraiser_thread.ReraiserThread(f, args=[i]) for i in range(5)])
        group.StartAll()
        group.JoinAll()
        for v in ran:
            self.assertTrue(v)
Beispiel #7
0
def _TearDownRunners(runners, timeout=None):
  """Calls TearDown() for each test runner in parallel.

  Args:
    runners: A list of TestRunner objects.
    timeout: Watchdog timeout in seconds, defaults to the default timeout.
  """
  threads = reraiser_thread.ReraiserThreadGroup(
      [reraiser_thread.ReraiserThread(r.TearDown, name=r.device_serial[-4:])
       for r in runners])
  threads.StartAll()
  threads.JoinAll(watchdog_timer.WatchdogTimer(timeout))
Beispiel #8
0
  def testJoinTimeout(self):
    def f():
      pass

    event = threading.Event()

    def g():
      event.wait()

    group = reraiser_thread.ReraiserThreadGroup(
        [reraiser_thread.ReraiserThread(g),
         reraiser_thread.ReraiserThread(f)])
    group.StartAll()
    with self.assertRaises(reraiser_thread.TimeoutError):
      group.JoinAll(watchdog_timer.WatchdogTimer(0.01))
    event.set()
Beispiel #9
0
    def _RunTest(self, device, test):
        extras = {}

        # Provide package name under test for apk_under_test.
        if self._test_instance.apk_under_test:
            package_name = self._test_instance.apk_under_test.GetPackageName()
            extras[_EXTRA_PACKAGE_UNDER_TEST] = package_name

        flags_to_add = []
        test_timeout_scale = None
        if self._test_instance.coverage_directory:
            coverage_basename = '%s.exec' % (
                '%s_%s_group' %
                (test[0]['class'], test[0]['method']) if isinstance(
                    test, list) else '%s_%s' % (test['class'], test['method']))
            extras['coverage'] = 'true'
            coverage_directory = os.path.join(device.GetExternalStoragePath(),
                                              'chrome', 'test', 'coverage')
            if not device.PathExists(coverage_directory):
                device.RunShellCommand(['mkdir', '-p', coverage_directory],
                                       check_return=True)
            coverage_device_file = os.path.join(coverage_directory,
                                                coverage_basename)
            extras['coverageFile'] = coverage_device_file
        # Save screenshot if screenshot dir is specified (save locally) or if
        # a GS bucket is passed (save in cloud).
        screenshot_device_file = device_temp_file.DeviceTempFile(
            device.adb, suffix='.png', dir=device.GetExternalStoragePath())
        extras[EXTRA_SCREENSHOT_FILE] = screenshot_device_file.name

        # Set up the screenshot directory. This needs to be done for each test so
        # that we only get screenshots created by that test. It has to be on
        # external storage since the default location doesn't allow file creation
        # from the instrumentation test app on Android L and M.
        ui_capture_dir = device_temp_file.NamedDeviceTemporaryDirectory(
            device.adb, dir=device.GetExternalStoragePath())
        extras[EXTRA_UI_CAPTURE_DIR] = ui_capture_dir.name

        if self._env.trace_output:
            trace_device_file = device_temp_file.DeviceTempFile(
                device.adb,
                suffix='.json',
                dir=device.GetExternalStoragePath())
            extras[EXTRA_TRACE_FILE] = trace_device_file.name

        if isinstance(test, list):
            if not self._test_instance.driver_apk:
                raise Exception('driver_apk does not exist. '
                                'Please build it and try again.')
            if any(t.get('is_junit4') for t in test):
                raise Exception('driver apk does not support JUnit4 tests')

            def name_and_timeout(t):
                n = instrumentation_test_instance.GetTestName(t)
                i = self._GetTimeoutFromAnnotations(t['annotations'], n)
                return (n, i)

            test_names, timeouts = zip(*(name_and_timeout(t) for t in test))

            test_name = ','.join(test_names)
            test_display_name = test_name
            target = '%s/%s' % (self._test_instance.driver_package,
                                self._test_instance.driver_name)
            extras.update(
                self._test_instance.GetDriverEnvironmentVars(
                    test_list=test_names))
            timeout = sum(timeouts)
        else:
            test_name = instrumentation_test_instance.GetTestName(test)
            test_display_name = self._GetUniqueTestName(test)
            if test['is_junit4']:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.junit4_runner_class)
            else:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.junit3_runner_class)
            extras['class'] = test_name
            if 'flags' in test and test['flags']:
                flags_to_add.extend(test['flags'])
            timeout = self._GetTimeoutFromAnnotations(test['annotations'],
                                                      test_display_name)

            test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
                test['annotations'])
            if test_timeout_scale and test_timeout_scale != 1:
                valgrind_tools.SetChromeTimeoutScale(
                    device,
                    test_timeout_scale * self._test_instance.timeout_scale)

        if self._test_instance.wait_for_java_debugger:
            timeout = None
        logging.info('preparing to run %s: %s', test_display_name, test)

        render_tests_device_output_dir = None
        if _IsRenderTest(test):
            # TODO(mikecase): Add DeviceTempDirectory class and use that instead.
            render_tests_device_output_dir = posixpath.join(
                device.GetExternalStoragePath(), 'render_test_output_dir')
            flags_to_add.append('--render-test-output-dir=%s' %
                                render_tests_device_output_dir)

        if flags_to_add:
            self._CreateFlagChangerIfNeeded(device)
            self._flag_changers[str(device)].PushFlags(add=flags_to_add)

        time_ms = lambda: int(time.time() * 1e3)
        start_ms = time_ms()

        stream_name = 'logcat_%s_%s_%s' % (test_name.replace(
            '#', '.'), time.strftime('%Y%m%dT%H%M%S-UTC',
                                     time.gmtime()), device.serial)

        with ui_capture_dir:
            with self._env.output_manager.ArchivedTempfile(
                    stream_name, 'logcat') as logcat_file:
                try:
                    with logcat_monitor.LogcatMonitor(
                            device.adb,
                            filter_specs=local_device_environment.
                            LOGCAT_FILTERS,
                            output_file=logcat_file.name,
                            transform_func=self._test_instance.
                            MaybeDeobfuscateLines,
                            check_error=False) as logmon:
                        with _LogTestEndpoints(device, test_name):
                            with contextlib_ext.Optional(
                                    trace_event.trace(test_name),
                                    self._env.trace_output):
                                output = device.StartInstrumentation(
                                    target,
                                    raw=True,
                                    extras=extras,
                                    timeout=timeout,
                                    retries=0)
                finally:
                    logmon.Close()

            if logcat_file.Link():
                logging.info('Logcat saved to %s', logcat_file.Link())

            duration_ms = time_ms() - start_ms

            with contextlib_ext.Optional(trace_event.trace('ProcessResults'),
                                         self._env.trace_output):
                output = self._test_instance.MaybeDeobfuscateLines(output)
                # TODO(jbudorick): Make instrumentation tests output a JSON so this
                # doesn't have to parse the output.
                result_code, result_bundle, statuses = (
                    self._test_instance.ParseAmInstrumentRawOutput(output))
                results = self._test_instance.GenerateTestResults(
                    result_code, result_bundle, statuses, start_ms,
                    duration_ms, device.product_cpu_abi,
                    self._test_instance.symbolizer)

            if self._env.trace_output:
                self._SaveTraceData(trace_device_file, device, test['class'])

            def restore_flags():
                if flags_to_add:
                    self._flag_changers[str(device)].Restore()

            def restore_timeout_scale():
                if test_timeout_scale:
                    valgrind_tools.SetChromeTimeoutScale(
                        device, self._test_instance.timeout_scale)

            def handle_coverage_data():
                if self._test_instance.coverage_directory:
                    try:
                        if not os.path.exists(
                                self._test_instance.coverage_directory):
                            os.makedirs(self._test_instance.coverage_directory)
                        device.PullFile(coverage_device_file,
                                        self._test_instance.coverage_directory)
                        device.RemovePath(coverage_device_file, True)
                    except (OSError, base_error.BaseError) as e:
                        logging.warning(
                            'Failed to handle coverage data after tests: %s',
                            e)

            def handle_render_test_data():
                if _IsRenderTest(test):
                    # Render tests do not cause test failure by default. So we have to
                    # check to see if any failure images were generated even if the test
                    # does not fail.
                    try:
                        self._ProcessRenderTestResults(
                            device, render_tests_device_output_dir, results)
                    finally:
                        device.RemovePath(render_tests_device_output_dir,
                                          recursive=True,
                                          force=True)

            def pull_ui_screen_captures():
                screenshots = []
                for filename in device.ListDirectory(ui_capture_dir.name):
                    if filename.endswith('.json'):
                        screenshots.append(pull_ui_screenshot(filename))
                if screenshots:
                    json_archive_name = 'ui_capture_%s_%s.json' % (
                        test_name.replace('#', '.'),
                        time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()))
                    with self._env.output_manager.ArchivedTempfile(
                            json_archive_name, 'ui_capture',
                            output_manager.Datatype.JSON) as json_archive:
                        json.dump(screenshots, json_archive)
                    for result in results:
                        result.SetLink('ui screenshot', json_archive.Link())

            def pull_ui_screenshot(filename):
                source_dir = ui_capture_dir.name
                json_path = posixpath.join(source_dir, filename)
                json_data = json.loads(device.ReadFile(json_path))
                image_file_path = posixpath.join(source_dir,
                                                 json_data['location'])
                with self._env.output_manager.ArchivedTempfile(
                        json_data['location'], 'ui_capture',
                        output_manager.Datatype.PNG) as image_archive:
                    device.PullFile(image_file_path, image_archive.name)
                json_data['image_link'] = image_archive.Link()
                return json_data

            # While constructing the TestResult objects, we can parallelize several
            # steps that involve ADB. These steps should NOT depend on any info in
            # the results! Things such as whether the test CRASHED have not yet been
            # determined.
            post_test_steps = [
                restore_flags, restore_timeout_scale, handle_coverage_data,
                handle_render_test_data, pull_ui_screen_captures
            ]
            if self._env.concurrent_adb:
                post_test_step_thread_group = reraiser_thread.ReraiserThreadGroup(
                    reraiser_thread.ReraiserThread(f) for f in post_test_steps)
                post_test_step_thread_group.StartAll(will_block=True)
            else:
                for step in post_test_steps:
                    step()

        for result in results:
            if logcat_file:
                result.SetLink('logcat', logcat_file.Link())

        # Update the result name if the test used flags.
        if flags_to_add:
            for r in results:
                if r.GetName() == test_name:
                    r.SetName(test_display_name)

        # Add UNKNOWN results for any missing tests.
        iterable_test = test if isinstance(test, list) else [test]
        test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
        results_names = set(r.GetName() for r in results)
        results.extend(
            base_test_result.BaseTestResult(
                u, base_test_result.ResultType.UNKNOWN)
            for u in test_names.difference(results_names))

        # Update the result type if we detect a crash.
        try:
            if DidPackageCrashOnDevice(self._test_instance.test_package,
                                       device):
                for r in results:
                    if r.GetType() == base_test_result.ResultType.UNKNOWN:
                        r.SetType(base_test_result.ResultType.CRASH)
        except device_errors.CommandTimeoutError:
            logging.warning(
                'timed out when detecting/dismissing error dialogs')
            # Attach screenshot to the test to help with debugging the dialog boxes.
            self._SaveScreenshot(device, screenshot_device_file,
                                 test_display_name, results,
                                 'dialog_box_screenshot')

        # Handle failures by:
        #   - optionally taking a screenshot
        #   - logging the raw output at INFO level
        #   - clearing the application state while persisting permissions
        if any(r.GetType() not in (base_test_result.ResultType.PASS,
                                   base_test_result.ResultType.SKIP)
               for r in results):
            self._SaveScreenshot(device, screenshot_device_file,
                                 test_display_name, results,
                                 'post_test_screenshot')

            logging.info('detected failure in %s. raw output:',
                         test_display_name)
            for l in output:
                logging.info('  %s', l)
            if (not self._env.skip_clear_data
                    and self._test_instance.package_info):
                permissions = (
                    self._test_instance.apk_under_test.GetPermissions()
                    if self._test_instance.apk_under_test else None)
                device.ClearApplicationState(
                    self._test_instance.package_info.package,
                    permissions=permissions)
        else:
            logging.debug('raw output from %s:', test_display_name)
            for l in output:
                logging.debug('  %s', l)
        if self._test_instance.store_tombstones:
            tombstones_url = None
            for result in results:
                if result.GetType() == base_test_result.ResultType.CRASH:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True,
                            tombstone_symbolizer=self._test_instance.symbolizer
                        )
                        tombstone_filename = 'tombstones_%s_%s' % (
                            time.strftime('%Y%m%dT%H%M%S-UTC',
                                          time.gmtime()), device.serial)
                        with self._env.output_manager.ArchivedTempfile(
                                tombstone_filename,
                                'tombstones') as tombstone_file:
                            tombstone_file.write(
                                '\n'.join(resolved_tombstones))
                        result.SetLink('tombstones', tombstone_file.Link())
        if self._env.concurrent_adb:
            post_test_step_thread_group.JoinAll()
        return results, None
Beispiel #10
0
    def _RunTest(self, device, test):
        extras = {}

        flags_to_add = []
        test_timeout_scale = None
        if self._test_instance.coverage_directory:
            coverage_basename = '%s.ec' % ('%s_group' %
                                           test[0]['method'] if isinstance(
                                               test, list) else test['method'])
            extras['coverage'] = 'true'
            coverage_directory = os.path.join(device.GetExternalStoragePath(),
                                              'chrome', 'test', 'coverage')
            coverage_device_file = os.path.join(coverage_directory,
                                                coverage_basename)
            extras['coverageFile'] = coverage_device_file
        # Save screenshot if screenshot dir is specified (save locally) or if
        # a GS bucket is passed (save in cloud).
        screenshot_device_file = None
        if (self._test_instance.screenshot_dir
                or self._test_instance.gs_results_bucket):
            screenshot_device_file = device_temp_file.DeviceTempFile(
                device.adb, suffix='.png', dir=device.GetExternalStoragePath())
            extras[EXTRA_SCREENSHOT_FILE] = screenshot_device_file.name

        extras[EXTRA_UI_CAPTURE_DIR] = self._ui_capture_dir[device]

        if isinstance(test, list):
            if not self._test_instance.driver_apk:
                raise Exception('driver_apk does not exist. '
                                'Please build it and try again.')
            if any(t.get('is_junit4') for t in test):
                raise Exception('driver apk does not support JUnit4 tests')

            def name_and_timeout(t):
                n = instrumentation_test_instance.GetTestName(t)
                i = self._GetTimeoutFromAnnotations(t['annotations'], n)
                return (n, i)

            test_names, timeouts = zip(*(name_and_timeout(t) for t in test))

            test_name = ','.join(test_names)
            test_display_name = test_name
            target = '%s/%s' % (self._test_instance.driver_package,
                                self._test_instance.driver_name)
            extras.update(
                self._test_instance.GetDriverEnvironmentVars(
                    test_list=test_names))
            timeout = sum(timeouts)
        else:
            test_name = instrumentation_test_instance.GetTestName(test)
            test_display_name = self._GetUniqueTestName(test)
            if test['is_junit4']:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner_junit4)
            else:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner)
            extras['class'] = test_name
            if 'flags' in test and test['flags']:
                flags_to_add.extend(test['flags'])
            timeout = self._GetTimeoutFromAnnotations(test['annotations'],
                                                      test_display_name)

            test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
                test['annotations'])
            if test_timeout_scale and test_timeout_scale != 1:
                valgrind_tools.SetChromeTimeoutScale(
                    device,
                    test_timeout_scale * self._test_instance.timeout_scale)

        logging.info('preparing to run %s: %s', test_display_name, test)

        render_tests_device_output_dir = None
        if _IsRenderTest(test):
            # TODO(mikecase): Add DeviceTempDirectory class and use that instead.
            render_tests_device_output_dir = posixpath.join(
                device.GetExternalStoragePath(), 'render_test_output_dir')
            flags_to_add.append('--render-test-output-dir=%s' %
                                render_tests_device_output_dir)

        if flags_to_add:
            self._CreateFlagChangerIfNeeded(device)
            self._flag_changers[str(device)].PushFlags(add=flags_to_add)

        time_ms = lambda: int(time.time() * 1e3)
        start_ms = time_ms()

        stream_name = 'logcat_%s_%s_%s' % (test_name.replace(
            '#', '.'), time.strftime('%Y%m%dT%H%M%S-UTC',
                                     time.gmtime()), device.serial)
        logmon = logdog_logcat_monitor.LogdogLogcatMonitor(
            device.adb, stream_name, filter_specs=LOGCAT_FILTERS)

        with contextlib_ext.Optional(logmon,
                                     self._test_instance.should_save_logcat):
            with _LogTestEndpoints(device, test_name):
                with contextlib_ext.Optional(trace_event.trace(test_name),
                                             self._env.trace_output):
                    output = device.StartInstrumentation(target,
                                                         raw=True,
                                                         extras=extras,
                                                         timeout=timeout,
                                                         retries=0)

        logcat_url = logmon.GetLogcatURL()
        duration_ms = time_ms() - start_ms

        # TODO(jbudorick): Make instrumentation tests output a JSON so this
        # doesn't have to parse the output.
        result_code, result_bundle, statuses = (
            self._test_instance.ParseAmInstrumentRawOutput(output))
        results = self._test_instance.GenerateTestResults(
            result_code, result_bundle, statuses, start_ms, duration_ms)

        def restore_flags():
            if flags_to_add:
                self._flag_changers[str(device)].Restore()

        def restore_timeout_scale():
            if test_timeout_scale:
                valgrind_tools.SetChromeTimeoutScale(
                    device, self._test_instance.timeout_scale)

        def handle_coverage_data():
            if self._test_instance.coverage_directory:
                device.PullFile(coverage_directory,
                                self._test_instance.coverage_directory)
                device.RunShellCommand('rm -f %s' %
                                       posixpath.join(coverage_directory, '*'),
                                       check_return=True,
                                       shell=True)

        def handle_render_test_data():
            if _IsRenderTest(test):
                # Render tests do not cause test failure by default. So we have to check
                # to see if any failure images were generated even if the test does not
                # fail.
                try:
                    self._ProcessRenderTestResults(
                        device, render_tests_device_output_dir, results)
                finally:
                    device.RemovePath(render_tests_device_output_dir,
                                      recursive=True,
                                      force=True)

        # While constructing the TestResult objects, we can parallelize several
        # steps that involve ADB. These steps should NOT depend on any info in
        # the results! Things such as whether the test CRASHED have not yet been
        # determined.
        post_test_steps = [
            restore_flags, restore_timeout_scale, handle_coverage_data,
            handle_render_test_data
        ]
        if self._env.concurrent_adb:
            post_test_step_thread_group = reraiser_thread.ReraiserThreadGroup(
                reraiser_thread.ReraiserThread(f) for f in post_test_steps)
            post_test_step_thread_group.StartAll(will_block=True)
        else:
            for step in post_test_steps:
                step()

        for result in results:
            if logcat_url:
                result.SetLink('logcat', logcat_url)

        # Update the result name if the test used flags.
        if flags_to_add:
            for r in results:
                if r.GetName() == test_name:
                    r.SetName(test_display_name)

        # Add UNKNOWN results for any missing tests.
        iterable_test = test if isinstance(test, list) else [test]
        test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
        results_names = set(r.GetName() for r in results)
        results.extend(
            base_test_result.BaseTestResult(
                u, base_test_result.ResultType.UNKNOWN)
            for u in test_names.difference(results_names))

        # Update the result type if we detect a crash.
        if DidPackageCrashOnDevice(self._test_instance.test_package, device):
            for r in results:
                if r.GetType() == base_test_result.ResultType.UNKNOWN:
                    r.SetType(base_test_result.ResultType.CRASH)

        # Handle failures by:
        #   - optionally taking a screenshot
        #   - logging the raw output at INFO level
        #   - clearing the application state while persisting permissions
        if any(r.GetType() not in (base_test_result.ResultType.PASS,
                                   base_test_result.ResultType.SKIP)
               for r in results):
            with contextlib_ext.Optional(
                    tempfile_ext.NamedTemporaryDirectory(),
                    self._test_instance.screenshot_dir is None
                    and self._test_instance.gs_results_bucket
            ) as screenshot_host_dir:
                screenshot_host_dir = (self._test_instance.screenshot_dir
                                       or screenshot_host_dir)
                self._SaveScreenshot(device, screenshot_host_dir,
                                     screenshot_device_file, test_display_name,
                                     results)

            logging.info('detected failure in %s. raw output:',
                         test_display_name)
            for l in output:
                logging.info('  %s', l)
            if (not self._env.skip_clear_data
                    and self._test_instance.package_info):
                permissions = (
                    self._test_instance.apk_under_test.GetPermissions()
                    if self._test_instance.apk_under_test else None)
                device.ClearApplicationState(
                    self._test_instance.package_info.package,
                    permissions=permissions)
        else:
            logging.debug('raw output from %s:', test_display_name)
            for l in output:
                logging.debug('  %s', l)
        if self._test_instance.store_tombstones:
            tombstones_url = None
            for result in results:
                if result.GetType() == base_test_result.ResultType.CRASH:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True)
                        stream_name = 'tombstones_%s_%s' % (time.strftime(
                            '%Y%m%dT%H%M%S-UTC', time.gmtime()), device.serial)
                        tombstones_url = logdog_helper.text(
                            stream_name, '\n'.join(resolved_tombstones))
                    result.SetLink('tombstones', tombstones_url)

        if self._env.concurrent_adb:
            post_test_step_thread_group.JoinAll()
        return results, None
Beispiel #11
0
 def SetUp(self):
     self._allow_upload = True
     self._thread_group = reraiser_thread.ReraiserThreadGroup()