def StopTracing(self):
        assert self.is_tracing_running, 'Can only stop tracing when tracing is on.'
        self._IssueClockSyncMarker()
        builder = self._current_state.builder

        raised_exception_messages = []
        for agent in self._active_agents_instances + [self]:
            try:
                with trace_event.trace('StopAgentTracing',
                                       agent=str(agent.__class__.__name__)):
                    agent.StopAgentTracing()
            except Exception:  # pylint: disable=broad-except
                raised_exception_messages.append(''.join(
                    traceback.format_exception(*sys.exc_info())))

        for agent in self._active_agents_instances + [self]:
            try:
                with trace_event.trace('CollectAgentTraceData',
                                       agent=str(agent.__class__.__name__)):
                    agent.CollectAgentTraceData(builder)
            except Exception:  # pylint: disable=broad-except
                raised_exception_messages.append(''.join(
                    traceback.format_exception(*sys.exc_info())))

        self._telemetry_info = None
        self._active_agents_instances = []
        self._current_state = None

        if raised_exception_messages:
            raise exceptions.TracingException(
                'Exceptions raised when trying to stop tracing:\n' +
                '\n'.join(raised_exception_messages))

        return builder.AsData()
  def FlushTracing(self, discard_current=False):
    assert self.is_tracing_running, 'Can only flush tracing when tracing is on.'
    self._IssueClockSyncMarker()

    raised_exception_messages = []

    # pylint: disable=redefined-variable-type
    # See: https://github.com/PyCQA/pylint/issues/710
    if discard_current:
      trace_builder = _TraceDataDiscarder()
    else:
      trace_builder = self._current_state.builder

    # Flushing the controller's pytrace is not supported.
    for agent in self._active_agents_instances:
      try:
        if agent.SupportsFlushingAgentTracing():
          with trace_event.trace('FlushAgentTracing',
                                 agent=str(agent.__class__.__name__)):
            with self._CollectNonfatalException('FlushAgentTracing'):
              agent.FlushAgentTracing(self._current_state.config,
                                      self._current_state.timeout,
                                      trace_builder)
      except Exception: # pylint: disable=broad-except
        raised_exception_messages.append(
            ''.join(traceback.format_exception(*sys.exc_info())))

    if raised_exception_messages:
      raise exceptions.TracingException(
          'Exceptions raised when trying to flush tracing:\n' +
          '\n'.join(raised_exception_messages))
Esempio n. 3
0
    def FlushTracing(self):
        assert self.is_tracing_running, 'Can only flush tracing when tracing is on.'
        self._IssueClockSyncMarker()

        raised_exception_messages = []
        # Flushing the controller's pytrace is not supported.
        for agent in self._active_agents_instances:
            try:
                if agent.SupportsFlushingAgentTracing():
                    with trace_event.trace('FlushAgentTracing',
                                           agent=str(
                                               agent.__class__.__name__)):
                        with self._CollectNonfatalException(
                                'FlushAgentTracing'):
                            agent.FlushAgentTracing(
                                self._current_state.config,
                                self._current_state.timeout,
                                self._current_state.builder)
            except Exception:  # pylint: disable=broad-except
                raised_exception_messages.append(''.join(
                    traceback.format_exception(*sys.exc_info())))

        if raised_exception_messages:
            raise exceptions.TracingException(
                'Exceptions raised when trying to flush tracing:\n' +
                '\n'.join(raised_exception_messages))
    def _RunSingleTest(self, test):
        self._test_instance.WriteBuildBotJson(self._output_dir)

        timeout = self._tests[test].get('timeout', self._timeout)
        cmd = self._CreateCmd(test)
        cwd = os.path.abspath(host_paths.DIR_SOURCE_ROOT)

        self._LogTest(test, cmd, timeout)

        try:
            start_time = time.time()

            with contextlib_ext.Optional(trace_event.trace(test),
                                         self._env.trace_output):
                exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout(
                    cmd, timeout, cwd=cwd, shell=True)
            end_time = time.time()
            chart_json_output = self._test_instance.ReadChartjsonOutput(
                self._output_dir)
            if exit_code == 0:
                result_type = base_test_result.ResultType.PASS
            else:
                result_type = base_test_result.ResultType.FAIL
        except cmd_helper.TimeoutError as e:
            end_time = time.time()
            exit_code = -1
            output = e.output
            chart_json_output = ''
            result_type = base_test_result.ResultType.TIMEOUT
        return self._ProcessTestResult(test, cmd, start_time, end_time,
                                       exit_code, output, chart_json_output,
                                       result_type)
    def StartTracing(self, config, timeout):
        if self.is_tracing_running:
            return False

        assert isinstance(config, tracing_config.TracingConfig)
        assert len(self._active_agents_instances) == 0

        self._current_state = _TracingState(config, timeout)
        # Hack: chrome tracing agent may only depend on the number of alive chrome
        # devtools processes, rather platform (when startup tracing is not
        # supported), hence we add it to the list of supported agents here if it was
        # not added.
        if (chrome_tracing_agent.ChromeTracingAgent.IsSupported(
                self._platform_backend)
                and not chrome_tracing_agent.ChromeTracingAgent
                in self._supported_agents_classes):
            self._supported_agents_classes.append(
                chrome_tracing_agent.ChromeTracingAgent)

        self.StartAgentTracing(config, timeout)
        for agent_class in self._supported_agents_classes:
            agent = agent_class(self._platform_backend)
            with trace_event.trace('StartAgentTracing',
                                   agent=str(agent.__class__.__name__)):
                started = agent.StartAgentTracing(config, timeout)
            if started:
                self._active_agents_instances.append(agent)
        return True
Esempio n. 6
0
  def _ArchiveLogcat(self, device, test_name):
    stream_name = 'logcat_%s_%s_%s' % (
        test_name.replace('#', '.'),
        time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()),
        device.serial)

    logcat_file = None
    logmon = None
    try:
      with self._env.output_manager.ArchivedTempfile(
          stream_name, 'logcat') as logcat_file:
        with logcat_monitor.LogcatMonitor(
            device.adb,
            filter_specs=local_device_environment.LOGCAT_FILTERS,
            output_file=logcat_file.name,
            transform_func=self._test_instance.MaybeDeobfuscateLines,
            check_error=False) as logmon:
          with _LogTestEndpoints(device, test_name):
            with contextlib_ext.Optional(
                trace_event.trace(test_name),
                self._env.trace_output):
              yield logcat_file
    finally:
      if logmon:
        logmon.Close()
      if logcat_file and logcat_file.Link():
        logging.info('Logcat saved to %s', logcat_file.Link())
  def StartTracing(self, config, timeout):
    if self.is_tracing_running:
      return False

    assert isinstance(config, tracing_config.TracingConfig)
    assert len(self._active_agents_instances) == 0

    self._current_state = _TracingState(config, timeout)
    # Hack: chrome tracing agent may only depend on the number of alive chrome
    # devtools processes, rather platform (when startup tracing is not
    # supported), hence we add it to the list of supported agents here if it was
    # not added.
    if (chrome_tracing_agent.ChromeTracingAgent.IsSupported(
        self._platform_backend) and
        not chrome_tracing_agent.ChromeTracingAgent in
        self._supported_agents_classes):
      self._supported_agents_classes.append(
          chrome_tracing_agent.ChromeTracingAgent)

    self.StartAgentTracing(config, timeout)
    for agent_class in self._supported_agents_classes:
      agent = agent_class(self._platform_backend)
      with trace_event.trace('StartAgentTracing',
                             agent=str(agent.__class__.__name__)):
        with self._CollectNonfatalException('StartAgentTracing'):
          if agent.StartAgentTracing(config, timeout):
            self._active_agents_instances.append(agent)

    return True
  def _RunSingleTest(self, test):
    self._test_instance.WriteBuildBotJson(self._output_dir)

    timeout = self._tests[test].get('timeout', self._timeout)
    cmd = self._CreateCmd(test)
    cwd = os.path.abspath(host_paths.DIR_SOURCE_ROOT)

    self._LogTest(test, cmd, timeout)

    try:
      start_time = time.time()

      with contextlib_ext.Optional(
          trace_event.trace(test),
          self._env.trace_output):
        exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout(
            cmd, timeout, cwd=cwd, shell=True)
      end_time = time.time()
      chart_json_output = self._test_instance.ReadChartjsonOutput(
          self._output_dir)
      if exit_code == 0:
        result_type = base_test_result.ResultType.PASS
      else:
        result_type = base_test_result.ResultType.FAIL
    except cmd_helper.TimeoutError as e:
      end_time = time.time()
      exit_code = -1
      output = e.output
      chart_json_output = ''
      result_type = base_test_result.ResultType.TIMEOUT
    return self._ProcessTestResult(test, cmd, start_time, end_time, exit_code,
                                   output, chart_json_output, result_type)
    def _ArchiveLogcat(self, device, test):
        if isinstance(test, str):
            desc = test
        else:
            desc = hash(tuple(test))

        stream_name = 'logcat_%s_%s_%s' % (
            desc, time.strftime('%Y%m%dT%H%M%S-UTC',
                                time.gmtime()), device.serial)

        logcat_file = None
        logmon = None
        try:
            with self._env.output_manager.ArchivedTempfile(
                    stream_name, 'logcat') as logcat_file:
                with logcat_monitor.LogcatMonitor(
                        device.adb,
                        filter_specs=local_device_environment.LOGCAT_FILTERS,
                        output_file=logcat_file.name,
                        check_error=False) as logmon:
                    with contextlib_ext.Optional(trace_event.trace(str(test)),
                                                 self._env.trace_output):
                        yield logcat_file
        finally:
            if logmon:
                logmon.Close()
            if logcat_file and logcat_file.Link():
                logging.info('Logcat saved to %s', logcat_file.Link())
 def testAddTraceEvent(self):
     self.agent.StartAgentTracing(self.config, timeout=10)
     with trace_event.trace('test-marker'):
         pass
     self.agent.StopAgentTracing()
     trace = FakeTraceDataBuilder()
     self.agent.CollectAgentTraceData(trace)
     self.assertIn('test-marker', trace.GetEventNames())
Esempio n. 11
0
 def testAddTraceEvent(self):
     self.agent.StartAgentTracing(self.config, timeout=10)
     with trace_event.trace('test-marker'):
         pass
     self.agent.StopAgentTracing()
     with trace_data.TraceDataBuilder() as builder:
         self.agent.CollectAgentTraceData(builder)
         trace = builder.AsData().GetTraceFor(trace_data.TELEMETRY_PART)
     self.assertIn('test-marker', GetEventNames(trace))
 def _IssueClockSyncMarker(self):
     with self._DisableGarbageCollection():
         for agent in self._active_agents_instances:
             if agent.SupportsExplicitClockSync():
                 sync_id = self._GenerateClockSyncId()
                 with trace_event.trace('RecordClockSyncMarker',
                                        agent=str(agent.__class__.__name__),
                                        sync_id=sync_id):
                     agent.RecordClockSyncMarker(
                         sync_id, self._RecordIssuerClockSyncMarker)
 def _IssueClockSyncMarker(self):
   with self._DisableGarbageCollection():
     for agent in self._active_agents_instances:
       if agent.SupportsExplicitClockSync():
         sync_id = self._GenerateClockSyncId()
         with trace_event.trace(
             'RecordClockSyncMarker',
             agent=str(agent.__class__.__name__),
             sync_id=sync_id):
           agent.RecordClockSyncMarker(sync_id,
                                       self._RecordIssuerClockSyncMarker)
  def _IssueClockSyncMarker(self):
    if not telemetry_tracing_agent.IsAgentEnabled():
      return

    with _DisableGarbageCollection():
      for agent in self._active_agents_instances:
        if agent.SupportsExplicitClockSync():
          sync_id = _GenerateClockSyncId()
          with trace_event.trace('RecordClockSyncMarker',
                                 agent=str(agent.__class__.__name__),
                                 sync_id=sync_id):
            agent.RecordClockSyncMarker(
                sync_id, telemetry_tracing_agent.RecordIssuerClockSyncMarker)
Esempio n. 15
0
  def _IssueClockSyncMarker(self):
    if not telemetry_tracing_agent.IsAgentEnabled():
      return

    with _DisableGarbageCollection():
      for agent in self._active_agents_instances:
        if agent.SupportsExplicitClockSync():
          sync_id = _GenerateClockSyncId()
          with trace_event.trace('RecordClockSyncMarker',
                                 agent=str(agent.__class__.__name__),
                                 sync_id=sync_id):
            agent.RecordClockSyncMarker(
                sync_id, telemetry_tracing_agent.RecordIssuerClockSyncMarker)
Esempio n. 16
0
    def _IssueClockSyncMarker(self):
        recorder = self._GetActiveTelemetryTracingAgent()
        if recorder is None:
            return

        with _DisableGarbageCollection():
            for agent in self._active_agents_instances:
                if agent.SupportsExplicitClockSync():
                    sync_id = _GenerateClockSyncId()
                    with trace_event.trace('RecordClockSyncMarker',
                                           agent=str(agent.__class__.__name__),
                                           sync_id=sync_id):
                        agent.RecordClockSyncMarker(
                            sync_id, recorder.RecordIssuerClockSyncMarker)
Esempio n. 17
0
  def StopTracing(self):
    assert self.is_tracing_running, 'Can only stop tracing when tracing is on.'
    self._IssueClockSyncMarker()
    builder = self._current_state.builder

    raised_exception_messages = []
    for agent in self._active_agents_instances + [self]:
      try:
        with trace_event.trace('StopAgentTracing',
                               agent=str(agent.__class__.__name__)):
          with self._CollectNonfatalException('StopAgentTracing'):
            agent.StopAgentTracing()
      except Exception: # pylint: disable=broad-except
        raised_exception_messages.append(
            ''.join(traceback.format_exception(*sys.exc_info())))

    for agent in self._active_agents_instances + [self]:
      try:
        with trace_event.trace('CollectAgentTraceData',
                               agent=str(agent.__class__.__name__)):
          with self._CollectNonfatalException('CollectAgentTraceData'):
            agent.CollectAgentTraceData(builder)
      except Exception: # pylint: disable=broad-except
        raised_exception_messages.append(
            ''.join(traceback.format_exception(*sys.exc_info())))

    self._telemetry_info = None
    self._active_agents_instances = []
    self._current_state = None

    if raised_exception_messages:
      raise exceptions.TracingException(
          'Exceptions raised when trying to stop tracing:\n' +
          '\n'.join(raised_exception_messages))

    return (builder.AsData(), self._nonfatal_exceptions)
    def StartTracing(self, config, timeout):
        if self.is_tracing_running:
            return False

        assert isinstance(config, tracing_config.TracingConfig)
        assert len(self._active_agents_instances) == 0

        self._current_state = _TracingState(config, timeout)

        self.StartAgentTracing(config, timeout)
        for agent_class in _TRACING_AGENT_CLASSES:
            if agent_class.IsSupported(self._platform_backend):
                agent = agent_class(self._platform_backend)
                with trace_event.trace('StartAgentTracing',
                                       agent=str(agent.__class__.__name__)):
                    if agent.StartAgentTracing(config, timeout):
                        self._active_agents_instances.append(agent)

        return True
Esempio n. 19
0
 def testTrace(self):
     with self._test_trace():
         with trace_event.trace('test_event', this='that'):
             pass
         trace_event.trace_flush()
         with open(self._log_path, 'r') as f:
             log_output = json.loads(f.read() + ']')
             self.assertEquals(len(log_output), 3)
             current_entry = log_output.pop(0)
             self.assertEquals(current_entry['category'], 'process_argv')
             self.assertEquals(current_entry['name'], 'process_argv')
             self.assertTrue(current_entry['args']['argv'])
             self.assertEquals(current_entry['ph'], 'M')
             current_entry = log_output.pop(0)
             self.assertEquals(current_entry['category'], 'python')
             self.assertEquals(current_entry['name'], 'test_event')
             self.assertEquals(current_entry['args']['this'], '\'that\'')
             self.assertEquals(current_entry['ph'], 'B')
             current_entry = log_output.pop(0)
             self.assertEquals(current_entry['category'], 'python')
             self.assertEquals(current_entry['name'], 'test_event')
             self.assertEquals(current_entry['args'], {})
             self.assertEquals(current_entry['ph'], 'E')
Esempio n. 20
0
  def FlushTracing(self):
    assert self.is_tracing_running, 'Can only flush tracing when tracing is on.'
    self._IssueClockSyncMarker()

    raised_exception_messages = []
    # Flushing the controller's pytrace is not supported.
    for agent in self._active_agents_instances:
      try:
        if agent.SupportsFlushingAgentTracing():
          with trace_event.trace('FlushAgentTracing',
                                 agent=str(agent.__class__.__name__)):
            with self._CollectNonfatalException('FlushAgentTracing'):
              agent.FlushAgentTracing(self._current_state.config,
                                      self._current_state.timeout,
                                      self._current_state.builder)
      except Exception: # pylint: disable=broad-except
        raised_exception_messages.append(
            ''.join(traceback.format_exception(*sys.exc_info())))

    if raised_exception_messages:
      raise exceptions.TracingException(
          'Exceptions raised when trying to flush tracing:\n' +
          '\n'.join(raised_exception_messages))
Esempio n. 21
0
 def testTrace(self):
  with self._test_trace():
     with trace_event.trace('test_event', this='that'):
       pass
     trace_event.trace_flush()
     with open(self._log_path, 'r') as f:
       log_output = json.loads(f.read() + ']')
       self.assertEquals(len(log_output), 3)
       current_entry = log_output.pop(0)
       self.assertEquals(current_entry['category'], 'process_argv')
       self.assertEquals(current_entry['name'], 'process_argv')
       self.assertTrue(current_entry['args']['argv'])
       self.assertEquals(current_entry['ph'], 'M')
       current_entry = log_output.pop(0)
       self.assertEquals(current_entry['category'], 'python')
       self.assertEquals(current_entry['name'], 'test_event')
       self.assertEquals(current_entry['args']['this'], '\'that\'')
       self.assertEquals(current_entry['ph'], 'B')
       current_entry = log_output.pop(0)
       self.assertEquals(current_entry['category'], 'python')
       self.assertEquals(current_entry['name'], 'test_event')
       self.assertEquals(current_entry['args'], {})
       self.assertEquals(current_entry['ph'], 'E')
Esempio n. 22
0
    def _RunTest(self, device, test):
        extras = {}

        flags_to_add = []
        test_timeout_scale = None
        if self._test_instance.coverage_directory:
            coverage_basename = '%s.ec' % ('%s_group' %
                                           test[0]['method'] if isinstance(
                                               test, list) else test['method'])
            extras['coverage'] = 'true'
            coverage_directory = os.path.join(device.GetExternalStoragePath(),
                                              'chrome', 'test', 'coverage')
            coverage_device_file = os.path.join(coverage_directory,
                                                coverage_basename)
            extras['coverageFile'] = coverage_device_file
        # Save screenshot if screenshot dir is specified (save locally) or if
        # a GS bucket is passed (save in cloud).
        screenshot_device_file = None
        if (self._test_instance.screenshot_dir
                or self._test_instance.gs_results_bucket):
            screenshot_device_file = device_temp_file.DeviceTempFile(
                device.adb, suffix='.png', dir=device.GetExternalStoragePath())
            extras[EXTRA_SCREENSHOT_FILE] = screenshot_device_file.name

        extras[EXTRA_UI_CAPTURE_DIR] = self._ui_capture_dir[device]

        if isinstance(test, list):
            if not self._test_instance.driver_apk:
                raise Exception('driver_apk does not exist. '
                                'Please build it and try again.')
            if any(t.get('is_junit4') for t in test):
                raise Exception('driver apk does not support JUnit4 tests')

            def name_and_timeout(t):
                n = instrumentation_test_instance.GetTestName(t)
                i = self._GetTimeoutFromAnnotations(t['annotations'], n)
                return (n, i)

            test_names, timeouts = zip(*(name_and_timeout(t) for t in test))

            test_name = ','.join(test_names)
            test_display_name = test_name
            target = '%s/%s' % (self._test_instance.driver_package,
                                self._test_instance.driver_name)
            extras.update(
                self._test_instance.GetDriverEnvironmentVars(
                    test_list=test_names))
            timeout = sum(timeouts)
        else:
            test_name = instrumentation_test_instance.GetTestName(test)
            test_display_name = self._GetUniqueTestName(test)
            if test['is_junit4']:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner_junit4)
            else:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner)
            extras['class'] = test_name
            if 'flags' in test and test['flags']:
                flags_to_add.extend(test['flags'])
            timeout = self._GetTimeoutFromAnnotations(test['annotations'],
                                                      test_display_name)

            test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
                test['annotations'])
            if test_timeout_scale and test_timeout_scale != 1:
                valgrind_tools.SetChromeTimeoutScale(
                    device,
                    test_timeout_scale * self._test_instance.timeout_scale)

        logging.info('preparing to run %s: %s', test_display_name, test)

        render_tests_device_output_dir = None
        if _IsRenderTest(test):
            # TODO(mikecase): Add DeviceTempDirectory class and use that instead.
            render_tests_device_output_dir = posixpath.join(
                device.GetExternalStoragePath(), 'render_test_output_dir')
            flags_to_add.append('--render-test-output-dir=%s' %
                                render_tests_device_output_dir)

        if flags_to_add:
            self._CreateFlagChangerIfNeeded(device)
            self._flag_changers[str(device)].PushFlags(add=flags_to_add)

        time_ms = lambda: int(time.time() * 1e3)
        start_ms = time_ms()

        stream_name = 'logcat_%s_%s_%s' % (test_name.replace(
            '#', '.'), time.strftime('%Y%m%dT%H%M%S-UTC',
                                     time.gmtime()), device.serial)
        logmon = logdog_logcat_monitor.LogdogLogcatMonitor(
            device.adb, stream_name, filter_specs=LOGCAT_FILTERS)

        with contextlib_ext.Optional(logmon,
                                     self._test_instance.should_save_logcat):
            with _LogTestEndpoints(device, test_name):
                with contextlib_ext.Optional(trace_event.trace(test_name),
                                             self._env.trace_output):
                    output = device.StartInstrumentation(target,
                                                         raw=True,
                                                         extras=extras,
                                                         timeout=timeout,
                                                         retries=0)

        logcat_url = logmon.GetLogcatURL()
        duration_ms = time_ms() - start_ms

        # TODO(jbudorick): Make instrumentation tests output a JSON so this
        # doesn't have to parse the output.
        result_code, result_bundle, statuses = (
            self._test_instance.ParseAmInstrumentRawOutput(output))
        results = self._test_instance.GenerateTestResults(
            result_code, result_bundle, statuses, start_ms, duration_ms)

        def restore_flags():
            if flags_to_add:
                self._flag_changers[str(device)].Restore()

        def restore_timeout_scale():
            if test_timeout_scale:
                valgrind_tools.SetChromeTimeoutScale(
                    device, self._test_instance.timeout_scale)

        def handle_coverage_data():
            if self._test_instance.coverage_directory:
                device.PullFile(coverage_directory,
                                self._test_instance.coverage_directory)
                device.RunShellCommand('rm -f %s' %
                                       posixpath.join(coverage_directory, '*'),
                                       check_return=True,
                                       shell=True)

        def handle_render_test_data():
            if _IsRenderTest(test):
                # Render tests do not cause test failure by default. So we have to check
                # to see if any failure images were generated even if the test does not
                # fail.
                try:
                    self._ProcessRenderTestResults(
                        device, render_tests_device_output_dir, results)
                finally:
                    device.RemovePath(render_tests_device_output_dir,
                                      recursive=True,
                                      force=True)

        # While constructing the TestResult objects, we can parallelize several
        # steps that involve ADB. These steps should NOT depend on any info in
        # the results! Things such as whether the test CRASHED have not yet been
        # determined.
        post_test_steps = [
            restore_flags, restore_timeout_scale, handle_coverage_data,
            handle_render_test_data
        ]
        if self._env.concurrent_adb:
            post_test_step_thread_group = reraiser_thread.ReraiserThreadGroup(
                reraiser_thread.ReraiserThread(f) for f in post_test_steps)
            post_test_step_thread_group.StartAll(will_block=True)
        else:
            for step in post_test_steps:
                step()

        for result in results:
            if logcat_url:
                result.SetLink('logcat', logcat_url)

        # Update the result name if the test used flags.
        if flags_to_add:
            for r in results:
                if r.GetName() == test_name:
                    r.SetName(test_display_name)

        # Add UNKNOWN results for any missing tests.
        iterable_test = test if isinstance(test, list) else [test]
        test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
        results_names = set(r.GetName() for r in results)
        results.extend(
            base_test_result.BaseTestResult(
                u, base_test_result.ResultType.UNKNOWN)
            for u in test_names.difference(results_names))

        # Update the result type if we detect a crash.
        if DidPackageCrashOnDevice(self._test_instance.test_package, device):
            for r in results:
                if r.GetType() == base_test_result.ResultType.UNKNOWN:
                    r.SetType(base_test_result.ResultType.CRASH)

        # Handle failures by:
        #   - optionally taking a screenshot
        #   - logging the raw output at INFO level
        #   - clearing the application state while persisting permissions
        if any(r.GetType() not in (base_test_result.ResultType.PASS,
                                   base_test_result.ResultType.SKIP)
               for r in results):
            with contextlib_ext.Optional(
                    tempfile_ext.NamedTemporaryDirectory(),
                    self._test_instance.screenshot_dir is None
                    and self._test_instance.gs_results_bucket
            ) as screenshot_host_dir:
                screenshot_host_dir = (self._test_instance.screenshot_dir
                                       or screenshot_host_dir)
                self._SaveScreenshot(device, screenshot_host_dir,
                                     screenshot_device_file, test_display_name,
                                     results)

            logging.info('detected failure in %s. raw output:',
                         test_display_name)
            for l in output:
                logging.info('  %s', l)
            if (not self._env.skip_clear_data
                    and self._test_instance.package_info):
                permissions = (
                    self._test_instance.apk_under_test.GetPermissions()
                    if self._test_instance.apk_under_test else None)
                device.ClearApplicationState(
                    self._test_instance.package_info.package,
                    permissions=permissions)
        else:
            logging.debug('raw output from %s:', test_display_name)
            for l in output:
                logging.debug('  %s', l)
        if self._test_instance.store_tombstones:
            tombstones_url = None
            for result in results:
                if result.GetType() == base_test_result.ResultType.CRASH:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True)
                        stream_name = 'tombstones_%s_%s' % (time.strftime(
                            '%Y%m%dT%H%M%S-UTC', time.gmtime()), device.serial)
                        tombstones_url = logdog_helper.text(
                            stream_name, '\n'.join(resolved_tombstones))
                    result.SetLink('tombstones', tombstones_url)

        if self._env.concurrent_adb:
            post_test_step_thread_group.JoinAll()
        return results, None
Esempio n. 23
0
 def child_function():
     with trace_event.trace('child_event'):
         pass
  def _RunTest(self, device, test):
    extras = {}

    flags_to_add = []
    test_timeout_scale = None
    if self._test_instance.coverage_directory:
      coverage_basename = '%s.ec' % ('%s_group' % test[0]['method']
          if isinstance(test, list) else test['method'])
      extras['coverage'] = 'true'
      coverage_directory = os.path.join(
          device.GetExternalStoragePath(), 'chrome', 'test', 'coverage')
      coverage_device_file = os.path.join(
          coverage_directory, coverage_basename)
      extras['coverageFile'] = coverage_device_file
    # Save screenshot if screenshot dir is specified (save locally) or if
    # a GS bucket is passed (save in cloud).
    screenshot_device_file = device_temp_file.DeviceTempFile(
        device.adb, suffix='.png', dir=device.GetExternalStoragePath())
    extras[EXTRA_SCREENSHOT_FILE] = screenshot_device_file.name

    # Set up the screenshot directory. This needs to be done for each test so
    # that we only get screenshots created by that test. It has to be on
    # external storage since the default location doesn't allow file creation
    # from the instrumentation test app on Android L and M.
    ui_capture_dir = device_temp_file.NamedDeviceTemporaryDirectory(
        device.adb,
        dir=device.GetExternalStoragePath())
    extras[EXTRA_UI_CAPTURE_DIR] = ui_capture_dir.name

    if self._env.trace_output:
      trace_device_file = device_temp_file.DeviceTempFile(
          device.adb, suffix='.json', dir=device.GetExternalStoragePath())
      extras[EXTRA_TRACE_FILE] = trace_device_file.name

    if isinstance(test, list):
      if not self._test_instance.driver_apk:
        raise Exception('driver_apk does not exist. '
                        'Please build it and try again.')
      if any(t.get('is_junit4') for t in test):
        raise Exception('driver apk does not support JUnit4 tests')

      def name_and_timeout(t):
        n = instrumentation_test_instance.GetTestName(t)
        i = self._GetTimeoutFromAnnotations(t['annotations'], n)
        return (n, i)

      test_names, timeouts = zip(*(name_and_timeout(t) for t in test))

      test_name = ','.join(test_names)
      test_display_name = test_name
      target = '%s/%s' % (
          self._test_instance.driver_package,
          self._test_instance.driver_name)
      extras.update(
          self._test_instance.GetDriverEnvironmentVars(
              test_list=test_names))
      timeout = sum(timeouts)
    else:
      test_name = instrumentation_test_instance.GetTestName(test)
      test_display_name = self._GetUniqueTestName(test)
      if test['is_junit4']:
        target = '%s/%s' % (
            self._test_instance.test_package,
            self._test_instance.junit4_runner_class)
      else:
        target = '%s/%s' % (
            self._test_instance.test_package,
            self._test_instance.junit3_runner_class)
      extras['class'] = test_name
      if 'flags' in test and test['flags']:
        flags_to_add.extend(test['flags'])
      timeout = self._GetTimeoutFromAnnotations(
        test['annotations'], test_display_name)

      test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
          test['annotations'])
      if test_timeout_scale and test_timeout_scale != 1:
        valgrind_tools.SetChromeTimeoutScale(
            device, test_timeout_scale * self._test_instance.timeout_scale)

    if self._test_instance.wait_for_java_debugger:
      timeout = None
    logging.info('preparing to run %s: %s', test_display_name, test)

    render_tests_device_output_dir = None
    if _IsRenderTest(test):
      # TODO(mikecase): Add DeviceTempDirectory class and use that instead.
      render_tests_device_output_dir = posixpath.join(
          device.GetExternalStoragePath(),
          'render_test_output_dir')
      flags_to_add.append('--render-test-output-dir=%s' %
                          render_tests_device_output_dir)

    if flags_to_add:
      self._CreateFlagChangerIfNeeded(device)
      self._flag_changers[str(device)].PushFlags(add=flags_to_add)

    time_ms = lambda: int(time.time() * 1e3)
    start_ms = time_ms()

    stream_name = 'logcat_%s_%s_%s' % (
        test_name.replace('#', '.'),
        time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()),
        device.serial)

    with ui_capture_dir:
      with self._env.output_manager.ArchivedTempfile(
          stream_name, 'logcat') as logcat_file:
        try:
          with logcat_monitor.LogcatMonitor(
              device.adb,
              filter_specs=local_device_environment.LOGCAT_FILTERS,
              output_file=logcat_file.name,
              transform_func=self._test_instance.MaybeDeobfuscateLines
              ) as logmon:
            with _LogTestEndpoints(device, test_name):
              with contextlib_ext.Optional(
                  trace_event.trace(test_name),
                  self._env.trace_output):
                output = device.StartInstrumentation(
                    target, raw=True, extras=extras, timeout=timeout, retries=0)
        finally:
          logmon.Close()

      if logcat_file.Link():
        logging.info('Logcat saved to %s', logcat_file.Link())

      duration_ms = time_ms() - start_ms

      with contextlib_ext.Optional(
          trace_event.trace('ProcessResults'),
          self._env.trace_output):
        output = self._test_instance.MaybeDeobfuscateLines(output)
        # TODO(jbudorick): Make instrumentation tests output a JSON so this
        # doesn't have to parse the output.
        result_code, result_bundle, statuses = (
            self._test_instance.ParseAmInstrumentRawOutput(output))
        results = self._test_instance.GenerateTestResults(
            result_code, result_bundle, statuses, start_ms, duration_ms,
            device.product_cpu_abi, self._test_instance.symbolizer)

      if self._env.trace_output:
        self._SaveTraceData(trace_device_file, device, test['class'])

      def restore_flags():
        if flags_to_add:
          self._flag_changers[str(device)].Restore()

      def restore_timeout_scale():
        if test_timeout_scale:
          valgrind_tools.SetChromeTimeoutScale(
              device, self._test_instance.timeout_scale)

      def handle_coverage_data():
        if self._test_instance.coverage_directory:
          device.PullFile(coverage_directory,
              self._test_instance.coverage_directory)
          device.RunShellCommand(
              'rm -f %s' % posixpath.join(coverage_directory, '*'),
              check_return=True, shell=True)

      def handle_render_test_data():
        if _IsRenderTest(test):
          # Render tests do not cause test failure by default. So we have to
          # check to see if any failure images were generated even if the test
          # does not fail.
          try:
            self._ProcessRenderTestResults(
                device, render_tests_device_output_dir, results)
          finally:
            device.RemovePath(render_tests_device_output_dir,
                              recursive=True, force=True)

      def pull_ui_screen_captures():
        screenshots = []
        for filename in device.ListDirectory(ui_capture_dir.name):
          if filename.endswith('.json'):
            screenshots.append(pull_ui_screenshot(filename))
        if screenshots:
          json_archive_name = 'ui_capture_%s_%s.json' % (
              test_name.replace('#', '.'),
              time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()))
          with self._env.output_manager.ArchivedTempfile(
              json_archive_name, 'ui_capture', output_manager.Datatype.JSON
              ) as json_archive:
            json.dump(screenshots, json_archive)
          for result in results:
            result.SetLink('ui screenshot', json_archive.Link())

      def pull_ui_screenshot(filename):
        source_dir = ui_capture_dir.name
        json_path = posixpath.join(source_dir, filename)
        json_data = json.loads(device.ReadFile(json_path))
        image_file_path = posixpath.join(source_dir, json_data['location'])
        with self._env.output_manager.ArchivedTempfile(
            json_data['location'], 'ui_capture', output_manager.Datatype.PNG
            ) as image_archive:
          device.PullFile(image_file_path, image_archive.name)
        json_data['image_link'] = image_archive.Link()
        return json_data

      # While constructing the TestResult objects, we can parallelize several
      # steps that involve ADB. These steps should NOT depend on any info in
      # the results! Things such as whether the test CRASHED have not yet been
      # determined.
      post_test_steps = [restore_flags, restore_timeout_scale,
                         handle_coverage_data, handle_render_test_data,
                         pull_ui_screen_captures]
      if self._env.concurrent_adb:
        post_test_step_thread_group = reraiser_thread.ReraiserThreadGroup(
            reraiser_thread.ReraiserThread(f) for f in post_test_steps)
        post_test_step_thread_group.StartAll(will_block=True)
      else:
        for step in post_test_steps:
          step()

    for result in results:
      if logcat_file:
        result.SetLink('logcat', logcat_file.Link())

    # Update the result name if the test used flags.
    if flags_to_add:
      for r in results:
        if r.GetName() == test_name:
          r.SetName(test_display_name)

    # Add UNKNOWN results for any missing tests.
    iterable_test = test if isinstance(test, list) else [test]
    test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
    results_names = set(r.GetName() for r in results)
    results.extend(
        base_test_result.BaseTestResult(u, base_test_result.ResultType.UNKNOWN)
        for u in test_names.difference(results_names))

    # Update the result type if we detect a crash.
    try:
      if DidPackageCrashOnDevice(self._test_instance.test_package, device):
        for r in results:
          if r.GetType() == base_test_result.ResultType.UNKNOWN:
            r.SetType(base_test_result.ResultType.CRASH)
    except device_errors.CommandTimeoutError:
      logging.warning('timed out when detecting/dismissing error dialogs')
      # Attach screenshot to the test to help with debugging the dialog boxes.
      self._SaveScreenshot(device, screenshot_device_file, test_display_name,
                           results, 'dialog_box_screenshot')

    # Handle failures by:
    #   - optionally taking a screenshot
    #   - logging the raw output at INFO level
    #   - clearing the application state while persisting permissions
    if any(r.GetType() not in (base_test_result.ResultType.PASS,
                               base_test_result.ResultType.SKIP)
           for r in results):
      self._SaveScreenshot(device, screenshot_device_file, test_display_name,
                           results, 'post_test_screenshot')

      logging.info('detected failure in %s. raw output:', test_display_name)
      for l in output:
        logging.info('  %s', l)
      if (not self._env.skip_clear_data
          and self._test_instance.package_info):
        permissions = (
            self._test_instance.apk_under_test.GetPermissions()
            if self._test_instance.apk_under_test
            else None)
        device.ClearApplicationState(self._test_instance.package_info.package,
                                     permissions=permissions)
    else:
      logging.debug('raw output from %s:', test_display_name)
      for l in output:
        logging.debug('  %s', l)
    if self._test_instance.store_tombstones:
      tombstones_url = None
      for result in results:
        if result.GetType() == base_test_result.ResultType.CRASH:
          if not tombstones_url:
            resolved_tombstones = tombstones.ResolveTombstones(
                device,
                resolve_all_tombstones=True,
                include_stack_symbols=False,
                wipe_tombstones=True,
                tombstone_symbolizer=self._test_instance.symbolizer)
            tombstone_filename = 'tombstones_%s_%s' % (
                time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()),
                device.serial)
            with self._env.output_manager.ArchivedTempfile(
                tombstone_filename, 'tombstones') as tombstone_file:
              tombstone_file.write('\n'.join(resolved_tombstones))
            result.SetLink('tombstones', tombstone_file.Link())
    if self._env.concurrent_adb:
      post_test_step_thread_group.JoinAll()
    return results, None
Esempio n. 25
0
    def _RunTest(self, device, test):
        extras = {}

        flags = None
        test_timeout_scale = None
        if self._test_instance.coverage_directory:
            coverage_basename = '%s.ec' % ('%s_group' %
                                           test[0]['method'] if isinstance(
                                               test, list) else test['method'])
            extras['coverage'] = 'true'
            coverage_directory = os.path.join(device.GetExternalStoragePath(),
                                              'chrome', 'test', 'coverage')
            coverage_device_file = os.path.join(coverage_directory,
                                                coverage_basename)
            extras['coverageFile'] = coverage_device_file

        if isinstance(test, list):
            if not self._test_instance.driver_apk:
                raise Exception('driver_apk does not exist. '
                                'Please build it and try again.')
            if any(t.get('is_junit4') for t in test):
                raise Exception('driver apk does not support JUnit4 tests')

            def name_and_timeout(t):
                n = instrumentation_test_instance.GetTestName(t)
                i = self._GetTimeoutFromAnnotations(t['annotations'], n)
                return (n, i)

            test_names, timeouts = zip(*(name_and_timeout(t) for t in test))

            test_name = ','.join(test_names)
            test_display_name = test_name
            target = '%s/%s' % (self._test_instance.driver_package,
                                self._test_instance.driver_name)
            extras.update(
                self._test_instance.GetDriverEnvironmentVars(
                    test_list=test_names))
            timeout = sum(timeouts)
        else:
            test_name = instrumentation_test_instance.GetTestName(test)
            test_display_name = self._GetUniqueTestName(test)
            if test['is_junit4']:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner_junit4)
            else:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner)
            extras['class'] = test_name
            if 'flags' in test:
                flags = test['flags']
            timeout = self._GetTimeoutFromAnnotations(test['annotations'],
                                                      test_display_name)

            test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
                test['annotations'])
            if test_timeout_scale and test_timeout_scale != 1:
                valgrind_tools.SetChromeTimeoutScale(
                    device,
                    test_timeout_scale * self._test_instance.timeout_scale)

        logging.info('preparing to run %s: %s', test_display_name, test)

        if flags:
            self._CreateFlagChangerIfNeeded(device)
            self._flag_changers[str(device)].PushFlags(add=flags.add,
                                                       remove=flags.remove)

        try:
            device.RunShellCommand(
                ['log', '-p', 'i', '-t', _TAG,
                 'START %s' % test_name],
                check_return=True)
            logcat_url = None
            time_ms = lambda: int(time.time() * 1e3)
            start_ms = time_ms()

            stream_name = 'logcat_%s_%s_%s' % (test_name.replace(
                '#', '.'), time.strftime('%Y%m%dT%H%M%S',
                                         time.localtime()), device.serial)
            with contextlib_ext.Optional(
                    logdog_logcat_monitor.LogdogLogcatMonitor(
                        device.adb, stream_name),
                    self._test_instance.should_save_logcat) as logmon:
                with contextlib_ext.Optional(trace_event.trace(test_name),
                                             self._env.trace_output):
                    output = device.StartInstrumentation(target,
                                                         raw=True,
                                                         extras=extras,
                                                         timeout=timeout,
                                                         retries=0)
                if logmon:
                    logcat_url = logmon.GetLogcatURL()
        finally:
            device.RunShellCommand(
                ['log', '-p', 'i', '-t', _TAG,
                 'END %s' % test_name],
                check_return=True)
            duration_ms = time_ms() - start_ms
            if flags:
                self._flag_changers[str(device)].Restore()
            if test_timeout_scale:
                valgrind_tools.SetChromeTimeoutScale(
                    device, self._test_instance.timeout_scale)

        # TODO(jbudorick): Make instrumentation tests output a JSON so this
        # doesn't have to parse the output.
        result_code, result_bundle, statuses = (
            self._test_instance.ParseAmInstrumentRawOutput(output))
        results = self._test_instance.GenerateTestResults(
            result_code, result_bundle, statuses, start_ms, duration_ms)
        for result in results:
            if logcat_url:
                result.SetLink('logcat', logcat_url)

        # Update the result name if the test used flags.
        if flags:
            for r in results:
                if r.GetName() == test_name:
                    r.SetName(test_display_name)

        # Add UNKNOWN results for any missing tests.
        iterable_test = test if isinstance(test, list) else [test]
        test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
        results_names = set(r.GetName() for r in results)
        results.extend(
            base_test_result.BaseTestResult(
                u, base_test_result.ResultType.UNKNOWN)
            for u in test_names.difference(results_names))

        # Update the result type if we detect a crash.
        if DidPackageCrashOnDevice(self._test_instance.test_package, device):
            for r in results:
                if r.GetType() == base_test_result.ResultType.UNKNOWN:
                    r.SetType(base_test_result.ResultType.CRASH)

        # Handle failures by:
        #   - optionally taking a screenshot
        #   - logging the raw output at INFO level
        #   - clearing the application state while persisting permissions
        if any(r.GetType() not in (base_test_result.ResultType.PASS,
                                   base_test_result.ResultType.SKIP)
               for r in results):
            if self._test_instance.screenshot_dir:
                file_name = '%s-%s.png' % (
                    test_display_name,
                    time.strftime('%Y%m%dT%H%M%S', time.localtime()))
                saved_dir = device.TakeScreenshot(
                    os.path.join(self._test_instance.screenshot_dir,
                                 file_name))
                logging.info('Saved screenshot for %s to %s.',
                             test_display_name, saved_dir)
            logging.info('detected failure in %s. raw output:',
                         test_display_name)
            for l in output:
                logging.info('  %s', l)
            if (not self._env.skip_clear_data
                    and self._test_instance.package_info):
                permissions = (
                    self._test_instance.apk_under_test.GetPermissions()
                    if self._test_instance.apk_under_test else None)
                device.ClearApplicationState(
                    self._test_instance.package_info.package,
                    permissions=permissions)

        else:
            logging.debug('raw output from %s:', test_display_name)
            for l in output:
                logging.debug('  %s', l)
        if self._test_instance.coverage_directory:
            device.PullFile(coverage_directory,
                            self._test_instance.coverage_directory)
            device.RunShellCommand('rm -f %s' %
                                   os.path.join(coverage_directory, '*'))
        if self._test_instance.store_tombstones:
            tombstones_url = None
            for result in results:
                if result.GetType() == base_test_result.ResultType.CRASH:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True)
                        stream_name = 'tombstones_%s_%s' % (time.strftime(
                            '%Y%m%dT%H%M%S', time.localtime()), device.serial)
                        tombstones_url = logdog_helper.text(
                            stream_name, resolved_tombstones)
                    result.SetLink('tombstones', tombstones_url)
        return results, None
    def _RunTest(self, device, test):
        # Run the test.
        timeout = (self._test_instance.shard_timeout *
                   self.GetTool(device).GetTimeoutScale())
        if self._test_instance.store_tombstones:
            tombstones.ClearAllTombstones(device)
        with device_temp_file.DeviceTempFile(
                adb=device.adb,
                dir=self._delegate.ResultsDirectory(device),
                suffix='.xml') as device_tmp_results_file:

            flags = list(self._test_instance.flags)
            if self._test_instance.enable_xml_result_parsing:
                flags.append('--gtest_output=xml:%s' %
                             device_tmp_results_file.name)

            logging.info('flags:')
            for f in flags:
                logging.info('  %s', f)

            with contextlib_ext.Optional(trace_event.trace(str(test)),
                                         self._env.trace_output):
                output = self._delegate.Run(test,
                                            device,
                                            flags=' '.join(flags),
                                            timeout=timeout,
                                            retries=0)

            if self._test_instance.enable_xml_result_parsing:
                gtest_xml = device.ReadFile(device_tmp_results_file.name,
                                            as_root=True)

        for s in self._servers[str(device)]:
            s.Reset()
        if self._test_instance.app_files:
            self._delegate.PullAppFiles(device, self._test_instance.app_files,
                                        self._test_instance.app_file_dir)
        if not self._env.skip_clear_data:
            self._delegate.Clear(device)

        # Parse the output.
        # TODO(jbudorick): Transition test scripts away from parsing stdout.
        if self._test_instance.enable_xml_result_parsing:
            results = gtest_test_instance.ParseGTestXML(gtest_xml)
        else:
            results = gtest_test_instance.ParseGTestOutput(output)

        # Check whether there are any crashed testcases.
        self._crashes.update(
            r.GetName() for r in results
            if r.GetType() == base_test_result.ResultType.CRASH)

        if self._test_instance.store_tombstones:
            tombstones_url = None
            for result in results:
                if result.GetType() == base_test_result.ResultType.CRASH:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True)
                        stream_name = 'tombstones_%s_%s' % (time.strftime(
                            '%Y%m%dT%H%M%S', time.localtime()), device.serial)
                        tombstones_url = logdog_helper.text(
                            stream_name, resolved_tombstones)
                    result.SetLink('tombstones', tombstones_url)

        tests_stripped_disabled_prefix = set()
        for t in test:
            tests_stripped_disabled_prefix.add(
                gtest_test_instance.TestNameWithoutDisabledPrefix(t))
        not_run_tests = tests_stripped_disabled_prefix.difference(
            set(r.GetName() for r in results))
        return results, list(not_run_tests) if results else None
Esempio n. 27
0
  def _RunTest(self, device, test):
    # Run the test.
    timeout = (self._test_instance.shard_timeout
               * self.GetTool(device).GetTimeoutScale())
    if self._test_instance.store_tombstones:
      tombstones.ClearAllTombstones(device)
    with device_temp_file.DeviceTempFile(
        adb=device.adb,
        dir=self._delegate.ResultsDirectory(device),
        suffix='.xml') as device_tmp_results_file:

      flags = list(self._test_instance.flags)
      if self._test_instance.enable_xml_result_parsing:
        flags.append('--gtest_output=xml:%s' % device_tmp_results_file.name)

      logging.info('flags:')
      for f in flags:
        logging.info('  %s', f)

      with contextlib_ext.Optional(
          trace_event.trace(str(test)),
          self._env.trace_output):
        output = self._delegate.Run(
            test, device, flags=' '.join(flags),
            timeout=timeout, retries=0)

      if self._test_instance.enable_xml_result_parsing:
        gtest_xml = device.ReadFile(
            device_tmp_results_file.name,
            as_root=True)

    for s in self._servers[str(device)]:
      s.Reset()
    if self._test_instance.app_files:
      self._delegate.PullAppFiles(device, self._test_instance.app_files,
                                  self._test_instance.app_file_dir)
    if not self._env.skip_clear_data:
      self._delegate.Clear(device)

    for l in output:
      logging.info(l)

    # Parse the output.
    # TODO(jbudorick): Transition test scripts away from parsing stdout.
    if self._test_instance.enable_xml_result_parsing:
      results = gtest_test_instance.ParseGTestXML(gtest_xml)
    else:
      results = gtest_test_instance.ParseGTestOutput(output)

    # Check whether there are any crashed testcases.
    self._crashes.update(r.GetName() for r in results
                         if r.GetType() == base_test_result.ResultType.CRASH)

    if self._test_instance.store_tombstones:
      tombstones_url = None
      for result in results:
        if result.GetType() == base_test_result.ResultType.CRASH:
          if not tombstones_url:
            resolved_tombstones = tombstones.ResolveTombstones(
                device,
                resolve_all_tombstones=True,
                include_stack_symbols=False,
                wipe_tombstones=True)
            stream_name = 'tombstones_%s_%s' % (
                time.strftime('%Y%m%dT%H%M%S', time.localtime()),
                device.serial)
            tombstones_url = logdog_helper.text(
                stream_name, '\n'.join(resolved_tombstones))
          result.SetLink('tombstones', tombstones_url)

    tests_stripped_disabled_prefix = set()
    for t in test:
      tests_stripped_disabled_prefix.add(
          gtest_test_instance.TestNameWithoutDisabledPrefix(t))
    not_run_tests = tests_stripped_disabled_prefix.difference(
        set(r.GetName() for r in results))
    return results, list(not_run_tests) if results else None
Esempio n. 28
0
  def _RunTest(self, device, test):
    # Run the test.
    timeout = (self._test_instance.shard_timeout
               * self.GetTool(device).GetTimeoutScale())
    if self._test_instance.wait_for_java_debugger:
      timeout = None
    if self._test_instance.store_tombstones:
      tombstones.ClearAllTombstones(device)
    with device_temp_file.DeviceTempFile(
        adb=device.adb,
        dir=self._delegate.ResultsDirectory(device),
        suffix='.xml') as device_tmp_results_file:
      with contextlib_ext.Optional(
          device_temp_file.NamedDeviceTemporaryDirectory(
              adb=device.adb, dir='/sdcard/'),
          self._test_instance.gs_test_artifacts_bucket) as test_artifacts_dir:
        with (contextlib_ext.Optional(
            device_temp_file.DeviceTempFile(
                adb=device.adb, dir=self._delegate.ResultsDirectory(device)),
            self._test_instance.isolated_script_test_perf_output)
            ) as isolated_script_test_perf_output:

          flags = list(self._test_instance.flags)
          if self._test_instance.enable_xml_result_parsing:
            flags.append('--gtest_output=xml:%s' % device_tmp_results_file.name)

          if self._test_instance.gs_test_artifacts_bucket:
            flags.append('--test_artifacts_dir=%s' % test_artifacts_dir.name)

          if self._test_instance.isolated_script_test_perf_output:
            flags.append('--isolated_script_test_perf_output=%s'
                         % isolated_script_test_perf_output.name)

          logging.info('flags:')
          for f in flags:
            logging.info('  %s', f)

          stream_name = 'logcat_%s_%s_%s' % (
              hash(tuple(test)),
              time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()),
              device.serial)

          with self._env.output_manager.ArchivedTempfile(
              stream_name, 'logcat') as logcat_file:
            with logcat_monitor.LogcatMonitor(
                device.adb,
                filter_specs=local_device_environment.LOGCAT_FILTERS,
                output_file=logcat_file.name) as logmon:
              with contextlib_ext.Optional(
                  trace_event.trace(str(test)),
                  self._env.trace_output):
                output = self._delegate.Run(
                    test, device, flags=' '.join(flags),
                    timeout=timeout, retries=0)
            logmon.Close()

          if logcat_file.Link():
            logging.info('Logcat saved to %s', logcat_file.Link())

          if self._test_instance.enable_xml_result_parsing:
            try:
              gtest_xml = device.ReadFile(
                  device_tmp_results_file.name,
                  as_root=True)
            except device_errors.CommandFailedError as e:
              logging.warning(
                  'Failed to pull gtest results XML file %s: %s',
                  device_tmp_results_file.name,
                  str(e))
              gtest_xml = None

          if self._test_instance.isolated_script_test_perf_output:
            try:
              device.PullFile(
                  isolated_script_test_perf_output.name,
                  self._test_instance.isolated_script_test_perf_output)
            except device_errors.CommandFailedError as e:
              logging.warning(
                  'Failed to pull chartjson results %s: %s',
                  isolated_script_test_perf_output.name, str(e))

          test_artifacts_url = self._UploadTestArtifacts(device,
                                                         test_artifacts_dir)

    for s in self._servers[str(device)]:
      s.Reset()
    if self._test_instance.app_files:
      self._delegate.PullAppFiles(device, self._test_instance.app_files,
                                  self._test_instance.app_file_dir)
    if not self._env.skip_clear_data:
      self._delegate.Clear(device)

    for l in output:
      logging.info(l)

    # Parse the output.
    # TODO(jbudorick): Transition test scripts away from parsing stdout.
    if self._test_instance.enable_xml_result_parsing:
      results = gtest_test_instance.ParseGTestXML(gtest_xml)
    else:
      results = gtest_test_instance.ParseGTestOutput(
          output, self._test_instance.symbolizer, device.product_cpu_abi)

    tombstones_url = None
    for r in results:
      if logcat_file:
        r.SetLink('logcat', logcat_file.Link())

      if self._test_instance.gs_test_artifacts_bucket:
        r.SetLink('test_artifacts', test_artifacts_url)

      if r.GetType() == base_test_result.ResultType.CRASH:
        self._crashes.add(r.GetName())
        if self._test_instance.store_tombstones:
          if not tombstones_url:
            resolved_tombstones = tombstones.ResolveTombstones(
                device,
                resolve_all_tombstones=True,
                include_stack_symbols=False,
                wipe_tombstones=True)
            stream_name = 'tombstones_%s_%s' % (
                time.strftime('%Y%m%dT%H%M%S', time.localtime()),
                device.serial)
            tombstones_url = logdog_helper.text(
                stream_name, '\n'.join(resolved_tombstones))
          r.SetLink('tombstones', tombstones_url)

    tests_stripped_disabled_prefix = set()
    for t in test:
      tests_stripped_disabled_prefix.add(
          gtest_test_instance.TestNameWithoutDisabledPrefix(t))
    not_run_tests = tests_stripped_disabled_prefix.difference(
        set(r.GetName() for r in results))
    return results, list(not_run_tests) if results else None
Esempio n. 29
0
 def child_function():
   with trace_event.trace('child_event'):
     pass
Esempio n. 30
0
    def _RunTest(self, device, test):
        extras = {}

        # Provide package name under test for apk_under_test.
        if self._test_instance.apk_under_test:
            package_name = self._test_instance.apk_under_test.GetPackageName()
            extras[_EXTRA_PACKAGE_UNDER_TEST] = package_name

        flags_to_add = []
        test_timeout_scale = None
        if self._test_instance.coverage_directory:
            coverage_basename = '%s.exec' % (
                '%s_%s_group' %
                (test[0]['class'], test[0]['method']) if isinstance(
                    test, list) else '%s_%s' % (test['class'], test['method']))
            extras['coverage'] = 'true'
            coverage_directory = os.path.join(device.GetExternalStoragePath(),
                                              'chrome', 'test', 'coverage')
            if not device.PathExists(coverage_directory):
                device.RunShellCommand(['mkdir', '-p', coverage_directory],
                                       check_return=True)
            coverage_device_file = os.path.join(coverage_directory,
                                                coverage_basename)
            extras['coverageFile'] = coverage_device_file
        # Save screenshot if screenshot dir is specified (save locally) or if
        # a GS bucket is passed (save in cloud).
        screenshot_device_file = device_temp_file.DeviceTempFile(
            device.adb, suffix='.png', dir=device.GetExternalStoragePath())
        extras[EXTRA_SCREENSHOT_FILE] = screenshot_device_file.name

        # Set up the screenshot directory. This needs to be done for each test so
        # that we only get screenshots created by that test. It has to be on
        # external storage since the default location doesn't allow file creation
        # from the instrumentation test app on Android L and M.
        ui_capture_dir = device_temp_file.NamedDeviceTemporaryDirectory(
            device.adb, dir=device.GetExternalStoragePath())
        extras[EXTRA_UI_CAPTURE_DIR] = ui_capture_dir.name

        if self._env.trace_output:
            trace_device_file = device_temp_file.DeviceTempFile(
                device.adb,
                suffix='.json',
                dir=device.GetExternalStoragePath())
            extras[EXTRA_TRACE_FILE] = trace_device_file.name

        if isinstance(test, list):
            if not self._test_instance.driver_apk:
                raise Exception('driver_apk does not exist. '
                                'Please build it and try again.')
            if any(t.get('is_junit4') for t in test):
                raise Exception('driver apk does not support JUnit4 tests')

            def name_and_timeout(t):
                n = instrumentation_test_instance.GetTestName(t)
                i = self._GetTimeoutFromAnnotations(t['annotations'], n)
                return (n, i)

            test_names, timeouts = zip(*(name_and_timeout(t) for t in test))

            test_name = ','.join(test_names)
            test_display_name = test_name
            target = '%s/%s' % (self._test_instance.driver_package,
                                self._test_instance.driver_name)
            extras.update(
                self._test_instance.GetDriverEnvironmentVars(
                    test_list=test_names))
            timeout = sum(timeouts)
        else:
            test_name = instrumentation_test_instance.GetTestName(test)
            test_display_name = self._GetUniqueTestName(test)
            if test['is_junit4']:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.junit4_runner_class)
            else:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.junit3_runner_class)
            extras['class'] = test_name
            if 'flags' in test and test['flags']:
                flags_to_add.extend(test['flags'])
            timeout = self._GetTimeoutFromAnnotations(test['annotations'],
                                                      test_display_name)

            test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
                test['annotations'])
            if test_timeout_scale and test_timeout_scale != 1:
                valgrind_tools.SetChromeTimeoutScale(
                    device,
                    test_timeout_scale * self._test_instance.timeout_scale)

        if self._test_instance.wait_for_java_debugger:
            timeout = None
        logging.info('preparing to run %s: %s', test_display_name, test)

        render_tests_device_output_dir = None
        if _IsRenderTest(test):
            # TODO(mikecase): Add DeviceTempDirectory class and use that instead.
            render_tests_device_output_dir = posixpath.join(
                device.GetExternalStoragePath(), 'render_test_output_dir')
            flags_to_add.append('--render-test-output-dir=%s' %
                                render_tests_device_output_dir)

        if flags_to_add:
            self._CreateFlagChangerIfNeeded(device)
            self._flag_changers[str(device)].PushFlags(add=flags_to_add)

        time_ms = lambda: int(time.time() * 1e3)
        start_ms = time_ms()

        stream_name = 'logcat_%s_%s_%s' % (test_name.replace(
            '#', '.'), time.strftime('%Y%m%dT%H%M%S-UTC',
                                     time.gmtime()), device.serial)

        with ui_capture_dir:
            with self._env.output_manager.ArchivedTempfile(
                    stream_name, 'logcat') as logcat_file:
                try:
                    with logcat_monitor.LogcatMonitor(
                            device.adb,
                            filter_specs=local_device_environment.
                            LOGCAT_FILTERS,
                            output_file=logcat_file.name,
                            transform_func=self._test_instance.
                            MaybeDeobfuscateLines,
                            check_error=False) as logmon:
                        with _LogTestEndpoints(device, test_name):
                            with contextlib_ext.Optional(
                                    trace_event.trace(test_name),
                                    self._env.trace_output):
                                output = device.StartInstrumentation(
                                    target,
                                    raw=True,
                                    extras=extras,
                                    timeout=timeout,
                                    retries=0)
                finally:
                    logmon.Close()

            if logcat_file.Link():
                logging.info('Logcat saved to %s', logcat_file.Link())

            duration_ms = time_ms() - start_ms

            with contextlib_ext.Optional(trace_event.trace('ProcessResults'),
                                         self._env.trace_output):
                output = self._test_instance.MaybeDeobfuscateLines(output)
                # TODO(jbudorick): Make instrumentation tests output a JSON so this
                # doesn't have to parse the output.
                result_code, result_bundle, statuses = (
                    self._test_instance.ParseAmInstrumentRawOutput(output))
                results = self._test_instance.GenerateTestResults(
                    result_code, result_bundle, statuses, start_ms,
                    duration_ms, device.product_cpu_abi,
                    self._test_instance.symbolizer)

            if self._env.trace_output:
                self._SaveTraceData(trace_device_file, device, test['class'])

            def restore_flags():
                if flags_to_add:
                    self._flag_changers[str(device)].Restore()

            def restore_timeout_scale():
                if test_timeout_scale:
                    valgrind_tools.SetChromeTimeoutScale(
                        device, self._test_instance.timeout_scale)

            def handle_coverage_data():
                if self._test_instance.coverage_directory:
                    try:
                        if not os.path.exists(
                                self._test_instance.coverage_directory):
                            os.makedirs(self._test_instance.coverage_directory)
                        device.PullFile(coverage_device_file,
                                        self._test_instance.coverage_directory)
                        device.RemovePath(coverage_device_file, True)
                    except (OSError, base_error.BaseError) as e:
                        logging.warning(
                            'Failed to handle coverage data after tests: %s',
                            e)

            def handle_render_test_data():
                if _IsRenderTest(test):
                    # Render tests do not cause test failure by default. So we have to
                    # check to see if any failure images were generated even if the test
                    # does not fail.
                    try:
                        self._ProcessRenderTestResults(
                            device, render_tests_device_output_dir, results)
                    finally:
                        device.RemovePath(render_tests_device_output_dir,
                                          recursive=True,
                                          force=True)

            def pull_ui_screen_captures():
                screenshots = []
                for filename in device.ListDirectory(ui_capture_dir.name):
                    if filename.endswith('.json'):
                        screenshots.append(pull_ui_screenshot(filename))
                if screenshots:
                    json_archive_name = 'ui_capture_%s_%s.json' % (
                        test_name.replace('#', '.'),
                        time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()))
                    with self._env.output_manager.ArchivedTempfile(
                            json_archive_name, 'ui_capture',
                            output_manager.Datatype.JSON) as json_archive:
                        json.dump(screenshots, json_archive)
                    for result in results:
                        result.SetLink('ui screenshot', json_archive.Link())

            def pull_ui_screenshot(filename):
                source_dir = ui_capture_dir.name
                json_path = posixpath.join(source_dir, filename)
                json_data = json.loads(device.ReadFile(json_path))
                image_file_path = posixpath.join(source_dir,
                                                 json_data['location'])
                with self._env.output_manager.ArchivedTempfile(
                        json_data['location'], 'ui_capture',
                        output_manager.Datatype.PNG) as image_archive:
                    device.PullFile(image_file_path, image_archive.name)
                json_data['image_link'] = image_archive.Link()
                return json_data

            # While constructing the TestResult objects, we can parallelize several
            # steps that involve ADB. These steps should NOT depend on any info in
            # the results! Things such as whether the test CRASHED have not yet been
            # determined.
            post_test_steps = [
                restore_flags, restore_timeout_scale, handle_coverage_data,
                handle_render_test_data, pull_ui_screen_captures
            ]
            if self._env.concurrent_adb:
                post_test_step_thread_group = reraiser_thread.ReraiserThreadGroup(
                    reraiser_thread.ReraiserThread(f) for f in post_test_steps)
                post_test_step_thread_group.StartAll(will_block=True)
            else:
                for step in post_test_steps:
                    step()

        for result in results:
            if logcat_file:
                result.SetLink('logcat', logcat_file.Link())

        # Update the result name if the test used flags.
        if flags_to_add:
            for r in results:
                if r.GetName() == test_name:
                    r.SetName(test_display_name)

        # Add UNKNOWN results for any missing tests.
        iterable_test = test if isinstance(test, list) else [test]
        test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
        results_names = set(r.GetName() for r in results)
        results.extend(
            base_test_result.BaseTestResult(
                u, base_test_result.ResultType.UNKNOWN)
            for u in test_names.difference(results_names))

        # Update the result type if we detect a crash.
        try:
            if DidPackageCrashOnDevice(self._test_instance.test_package,
                                       device):
                for r in results:
                    if r.GetType() == base_test_result.ResultType.UNKNOWN:
                        r.SetType(base_test_result.ResultType.CRASH)
        except device_errors.CommandTimeoutError:
            logging.warning(
                'timed out when detecting/dismissing error dialogs')
            # Attach screenshot to the test to help with debugging the dialog boxes.
            self._SaveScreenshot(device, screenshot_device_file,
                                 test_display_name, results,
                                 'dialog_box_screenshot')

        # Handle failures by:
        #   - optionally taking a screenshot
        #   - logging the raw output at INFO level
        #   - clearing the application state while persisting permissions
        if any(r.GetType() not in (base_test_result.ResultType.PASS,
                                   base_test_result.ResultType.SKIP)
               for r in results):
            self._SaveScreenshot(device, screenshot_device_file,
                                 test_display_name, results,
                                 'post_test_screenshot')

            logging.info('detected failure in %s. raw output:',
                         test_display_name)
            for l in output:
                logging.info('  %s', l)
            if (not self._env.skip_clear_data
                    and self._test_instance.package_info):
                permissions = (
                    self._test_instance.apk_under_test.GetPermissions()
                    if self._test_instance.apk_under_test else None)
                device.ClearApplicationState(
                    self._test_instance.package_info.package,
                    permissions=permissions)
        else:
            logging.debug('raw output from %s:', test_display_name)
            for l in output:
                logging.debug('  %s', l)
        if self._test_instance.store_tombstones:
            tombstones_url = None
            for result in results:
                if result.GetType() == base_test_result.ResultType.CRASH:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True,
                            tombstone_symbolizer=self._test_instance.symbolizer
                        )
                        tombstone_filename = 'tombstones_%s_%s' % (
                            time.strftime('%Y%m%dT%H%M%S-UTC',
                                          time.gmtime()), device.serial)
                        with self._env.output_manager.ArchivedTempfile(
                                tombstone_filename,
                                'tombstones') as tombstone_file:
                            tombstone_file.write(
                                '\n'.join(resolved_tombstones))
                        result.SetLink('tombstones', tombstone_file.Link())
        if self._env.concurrent_adb:
            post_test_step_thread_group.JoinAll()
        return results, None
Esempio n. 31
0
    def _RunTest(self, device, test):
        # Run the test.
        timeout = (self._test_instance.shard_timeout *
                   self.GetTool(device).GetTimeoutScale())
        if self._test_instance.wait_for_java_debugger:
            timeout = None
        if self._test_instance.store_tombstones:
            tombstones.ClearAllTombstones(device)
        test_perf_output_filename = next(self._test_perf_output_filenames)

        with device_temp_file.DeviceTempFile(
                adb=device.adb,
                dir=self._delegate.ResultsDirectory(device),
                suffix='.xml') as device_tmp_results_file:
            with contextlib_ext.Optional(
                    device_temp_file.NamedDeviceTemporaryDirectory(
                        adb=device.adb, dir='/sdcard/'), self._test_instance.
                    gs_test_artifacts_bucket) as test_artifacts_dir:
                with (contextlib_ext.Optional(
                        device_temp_file.DeviceTempFile(
                            adb=device.adb,
                            dir=self._delegate.ResultsDirectory(device)),
                        test_perf_output_filename)
                      ) as isolated_script_test_perf_output:

                    flags = list(self._test_instance.flags)
                    if self._test_instance.enable_xml_result_parsing:
                        flags.append('--gtest_output=xml:%s' %
                                     device_tmp_results_file.name)

                    if self._test_instance.gs_test_artifacts_bucket:
                        flags.append('--test_artifacts_dir=%s' %
                                     test_artifacts_dir.name)

                    if test_perf_output_filename:
                        flags.append('--isolated_script_test_perf_output=%s' %
                                     isolated_script_test_perf_output.name)

                    logging.info('flags:')
                    for f in flags:
                        logging.info('  %s', f)

                    stream_name = 'logcat_%s_%s_%s' % (
                        hash(tuple(test)),
                        time.strftime('%Y%m%dT%H%M%S-UTC',
                                      time.gmtime()), device.serial)

                    with self._env.output_manager.ArchivedTempfile(
                            stream_name, 'logcat') as logcat_file:
                        with logcat_monitor.LogcatMonitor(
                                device.adb,
                                filter_specs=local_device_environment.
                                LOGCAT_FILTERS,
                                output_file=logcat_file.name,
                                check_error=False) as logmon:
                            with contextlib_ext.Optional(
                                    trace_event.trace(str(test)),
                                    self._env.trace_output):
                                output = self._delegate.Run(
                                    test,
                                    device,
                                    flags=' '.join(flags),
                                    timeout=timeout,
                                    retries=0)
                        logmon.Close()

                    if logcat_file.Link():
                        logging.info('Logcat saved to %s', logcat_file.Link())

                    if self._test_instance.enable_xml_result_parsing:
                        try:
                            gtest_xml = device.ReadFile(
                                device_tmp_results_file.name, as_root=True)
                        except device_errors.CommandFailedError as e:
                            logging.warning(
                                'Failed to pull gtest results XML file %s: %s',
                                device_tmp_results_file.name, str(e))
                            gtest_xml = None

                    if test_perf_output_filename:
                        try:
                            device.PullFile(
                                isolated_script_test_perf_output.name,
                                test_perf_output_filename)
                        except device_errors.CommandFailedError as e:
                            logging.warning(
                                'Failed to pull chartjson results %s: %s',
                                isolated_script_test_perf_output.name, str(e))

                    test_artifacts_url = self._UploadTestArtifacts(
                        device, test_artifacts_dir)

        for s in self._servers[str(device)]:
            s.Reset()
        if self._test_instance.app_files:
            self._delegate.PullAppFiles(device, self._test_instance.app_files,
                                        self._test_instance.app_file_dir)
        if not self._env.skip_clear_data:
            self._delegate.Clear(device)

        for l in output:
            logging.info(l)

        # Parse the output.
        # TODO(jbudorick): Transition test scripts away from parsing stdout.
        if self._test_instance.enable_xml_result_parsing:
            results = gtest_test_instance.ParseGTestXML(gtest_xml)
        else:
            results = gtest_test_instance.ParseGTestOutput(
                output, self._test_instance.symbolizer, device.product_cpu_abi)

        tombstones_url = None
        for r in results:
            if logcat_file:
                r.SetLink('logcat', logcat_file.Link())

            if self._test_instance.gs_test_artifacts_bucket:
                r.SetLink('test_artifacts', test_artifacts_url)

            if r.GetType() == base_test_result.ResultType.CRASH:
                self._crashes.add(r.GetName())
                if self._test_instance.store_tombstones:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True)
                        stream_name = 'tombstones_%s_%s' % (time.strftime(
                            '%Y%m%dT%H%M%S', time.localtime()), device.serial)
                        tombstones_url = logdog_helper.text(
                            stream_name, '\n'.join(resolved_tombstones))
                    r.SetLink('tombstones', tombstones_url)

        tests_stripped_disabled_prefix = set()
        for t in test:
            tests_stripped_disabled_prefix.add(
                gtest_test_instance.TestNameWithoutDisabledPrefix(t))
        not_run_tests = tests_stripped_disabled_prefix.difference(
            set(r.GetName() for r in results))
        return results, list(not_run_tests) if results else None
  def _RunTest(self, device, test):
    extras = {}

    flags = None
    test_timeout_scale = None
    if self._test_instance.coverage_directory:
      coverage_basename = '%s.ec' % ('%s_group' % test[0]['method']
          if isinstance(test, list) else test['method'])
      extras['coverage'] = 'true'
      coverage_directory = os.path.join(
          device.GetExternalStoragePath(), 'chrome', 'test', 'coverage')
      coverage_device_file = os.path.join(
          coverage_directory, coverage_basename)
      extras['coverageFile'] = coverage_device_file

    if isinstance(test, list):
      if not self._test_instance.driver_apk:
        raise Exception('driver_apk does not exist. '
                        'Please build it and try again.')
      if any(t.get('is_junit4') for t in test):
        raise Exception('driver apk does not support JUnit4 tests')

      def name_and_timeout(t):
        n = instrumentation_test_instance.GetTestName(t)
        i = self._GetTimeoutFromAnnotations(t['annotations'], n)
        return (n, i)

      test_names, timeouts = zip(*(name_and_timeout(t) for t in test))

      test_name = ','.join(test_names)
      test_display_name = test_name
      target = '%s/%s' % (
          self._test_instance.driver_package,
          self._test_instance.driver_name)
      extras.update(
          self._test_instance.GetDriverEnvironmentVars(
              test_list=test_names))
      timeout = sum(timeouts)
    else:
      test_name = instrumentation_test_instance.GetTestName(test)
      test_display_name = self._GetUniqueTestName(test)
      if test['is_junit4']:
        target = '%s/%s' % (
            self._test_instance.test_package,
            self._test_instance.test_runner_junit4)
      else:
        target = '%s/%s' % (
            self._test_instance.test_package, self._test_instance.test_runner)
      extras['class'] = test_name
      if 'flags' in test:
        flags = test['flags']
      timeout = self._GetTimeoutFromAnnotations(
        test['annotations'], test_display_name)

      test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
          test['annotations'])
      if test_timeout_scale and test_timeout_scale != 1:
        valgrind_tools.SetChromeTimeoutScale(
            device, test_timeout_scale * self._test_instance.timeout_scale)

    logging.info('preparing to run %s: %s', test_display_name, test)

    if flags:
      self._CreateFlagChangerIfNeeded(device)
      self._flag_changers[str(device)].PushFlags(
        add=flags.add, remove=flags.remove)

    try:
      device.RunShellCommand(
          ['log', '-p', 'i', '-t', _TAG, 'START %s' % test_name],
          check_return=True)
      time_ms = lambda: int(time.time() * 1e3)
      start_ms = time_ms()

      stream_name = 'logcat_%s_%s_%s' % (
          test_name.replace('#', '.'),
          time.strftime('%Y%m%dT%H%M%S', time.localtime()),
          device.serial)
      logmon = logdog_logcat_monitor.LogdogLogcatMonitor(
          device.adb, stream_name, filter_specs=LOGCAT_FILTERS)

      with contextlib_ext.Optional(
          logmon, self._test_instance.should_save_logcat):
        with contextlib_ext.Optional(
            trace_event.trace(test_name),
            self._env.trace_output):
          output = device.StartInstrumentation(
              target, raw=True, extras=extras, timeout=timeout, retries=0)
      logcat_url = logmon.GetLogcatURL()
    finally:
      device.RunShellCommand(
          ['log', '-p', 'i', '-t', _TAG, 'END %s' % test_name],
          check_return=True)
      duration_ms = time_ms() - start_ms
      if flags:
        self._flag_changers[str(device)].Restore()
      if test_timeout_scale:
        valgrind_tools.SetChromeTimeoutScale(
            device, self._test_instance.timeout_scale)

    # TODO(jbudorick): Make instrumentation tests output a JSON so this
    # doesn't have to parse the output.
    result_code, result_bundle, statuses = (
        self._test_instance.ParseAmInstrumentRawOutput(output))
    results = self._test_instance.GenerateTestResults(
        result_code, result_bundle, statuses, start_ms, duration_ms)
    for result in results:
      if logcat_url:
        result.SetLink('logcat', logcat_url)

    # Update the result name if the test used flags.
    if flags:
      for r in results:
        if r.GetName() == test_name:
          r.SetName(test_display_name)

    # Add UNKNOWN results for any missing tests.
    iterable_test = test if isinstance(test, list) else [test]
    test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
    results_names = set(r.GetName() for r in results)
    results.extend(
        base_test_result.BaseTestResult(u, base_test_result.ResultType.UNKNOWN)
        for u in test_names.difference(results_names))

    # Update the result type if we detect a crash.
    if DidPackageCrashOnDevice(self._test_instance.test_package, device):
      for r in results:
        if r.GetType() == base_test_result.ResultType.UNKNOWN:
          r.SetType(base_test_result.ResultType.CRASH)

    # Handle failures by:
    #   - optionally taking a screenshot
    #   - logging the raw output at INFO level
    #   - clearing the application state while persisting permissions
    if any(r.GetType() not in (base_test_result.ResultType.PASS,
                               base_test_result.ResultType.SKIP)
           for r in results):
      if self._test_instance.screenshot_dir:
        file_name = '%s-%s.png' % (
            test_display_name,
            time.strftime('%Y%m%dT%H%M%S', time.localtime()))
        saved_dir = device.TakeScreenshot(
            os.path.join(self._test_instance.screenshot_dir, file_name))
        logging.info(
            'Saved screenshot for %s to %s.',
            test_display_name, saved_dir)
      logging.info('detected failure in %s. raw output:', test_display_name)
      for l in output:
        logging.info('  %s', l)
      if (not self._env.skip_clear_data
          and self._test_instance.package_info):
        permissions = (
            self._test_instance.apk_under_test.GetPermissions()
            if self._test_instance.apk_under_test
            else None)
        device.ClearApplicationState(self._test_instance.package_info.package,
                                     permissions=permissions)

    else:
      logging.debug('raw output from %s:', test_display_name)
      for l in output:
        logging.debug('  %s', l)
    if self._test_instance.coverage_directory:
      device.PullFile(coverage_directory,
          self._test_instance.coverage_directory)
      device.RunShellCommand(
          'rm -f %s' % posixpath.join(coverage_directory, '*'),
          check_return=True, shell=True)
    if self._test_instance.store_tombstones:
      tombstones_url = None
      for result in results:
        if result.GetType() == base_test_result.ResultType.CRASH:
          if not tombstones_url:
            resolved_tombstones = tombstones.ResolveTombstones(
                device,
                resolve_all_tombstones=True,
                include_stack_symbols=False,
                wipe_tombstones=True)
            stream_name = 'tombstones_%s_%s' % (
                time.strftime('%Y%m%dT%H%M%S', time.localtime()),
                device.serial)
            tombstones_url = logdog_helper.text(
                stream_name, resolved_tombstones)
          result.SetLink('tombstones', tombstones_url)
    return results, None
  def _RunTest(self, device, test):
    extras = {}

    flags_to_add = []
    flags_to_remove = []
    test_timeout_scale = None
    if self._test_instance.coverage_directory:
      coverage_basename = '%s.ec' % ('%s_group' % test[0]['method']
          if isinstance(test, list) else test['method'])
      extras['coverage'] = 'true'
      coverage_directory = os.path.join(
          device.GetExternalStoragePath(), 'chrome', 'test', 'coverage')
      coverage_device_file = os.path.join(
          coverage_directory, coverage_basename)
      extras['coverageFile'] = coverage_device_file
    # Save screenshot if screenshot dir is specified (save locally) or if
    # a GS bucket is passed (save in cloud).
    screenshot_device_file = None
    if (self._test_instance.screenshot_dir or
        self._test_instance.gs_results_bucket):
      screenshot_device_file = device_temp_file.DeviceTempFile(
          device.adb, suffix='.png', dir=device.GetExternalStoragePath())
      extras[EXTRA_SCREENSHOT_FILE] = screenshot_device_file.name

    extras[EXTRA_UI_CAPTURE_DIR] = self._ui_capture_dir[device]

    if isinstance(test, list):
      if not self._test_instance.driver_apk:
        raise Exception('driver_apk does not exist. '
                        'Please build it and try again.')
      if any(t.get('is_junit4') for t in test):
        raise Exception('driver apk does not support JUnit4 tests')

      def name_and_timeout(t):
        n = instrumentation_test_instance.GetTestName(t)
        i = self._GetTimeoutFromAnnotations(t['annotations'], n)
        return (n, i)

      test_names, timeouts = zip(*(name_and_timeout(t) for t in test))

      test_name = ','.join(test_names)
      test_display_name = test_name
      target = '%s/%s' % (
          self._test_instance.driver_package,
          self._test_instance.driver_name)
      extras.update(
          self._test_instance.GetDriverEnvironmentVars(
              test_list=test_names))
      timeout = sum(timeouts)
    else:
      test_name = instrumentation_test_instance.GetTestName(test)
      test_display_name = self._GetUniqueTestName(test)
      if test['is_junit4']:
        target = '%s/%s' % (
            self._test_instance.test_package,
            self._test_instance.test_runner_junit4)
      else:
        target = '%s/%s' % (
            self._test_instance.test_package, self._test_instance.test_runner)
      extras['class'] = test_name
      if 'flags' in test:
        flags_to_add.extend(test['flags'].add)
        flags_to_remove.extend(test['flags'].remove)
      timeout = self._GetTimeoutFromAnnotations(
        test['annotations'], test_display_name)

      test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
          test['annotations'])
      if test_timeout_scale and test_timeout_scale != 1:
        valgrind_tools.SetChromeTimeoutScale(
            device, test_timeout_scale * self._test_instance.timeout_scale)

    logging.info('preparing to run %s: %s', test_display_name, test)

    render_tests_device_output_dir = None
    if _IsRenderTest(test):
      # TODO(mikecase): Add DeviceTempDirectory class and use that instead.
      render_tests_device_output_dir = posixpath.join(
          device.GetExternalStoragePath(),
          'render_test_output_dir')
      flags_to_add.append('--render-test-output-dir=%s' %
                          render_tests_device_output_dir)

    if flags_to_add or flags_to_remove:
      self._CreateFlagChangerIfNeeded(device)
      self._flag_changers[str(device)].PushFlags(
        add=flags_to_add, remove=flags_to_remove)

    time_ms = lambda: int(time.time() * 1e3)
    start_ms = time_ms()

    stream_name = 'logcat_%s_%s_%s' % (
        test_name.replace('#', '.'),
        time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()),
        device.serial)
    logmon = logdog_logcat_monitor.LogdogLogcatMonitor(
        device.adb, stream_name, filter_specs=LOGCAT_FILTERS)

    with contextlib_ext.Optional(
        logmon, self._test_instance.should_save_logcat):
      with _LogTestEndpoints(device, test_name):
        with contextlib_ext.Optional(
            trace_event.trace(test_name),
            self._env.trace_output):
          output = device.StartInstrumentation(
              target, raw=True, extras=extras, timeout=timeout, retries=0)

    logcat_url = logmon.GetLogcatURL()
    duration_ms = time_ms() - start_ms
    if flags_to_add or flags_to_remove:
      self._flag_changers[str(device)].Restore()
    if test_timeout_scale:
      valgrind_tools.SetChromeTimeoutScale(
          device, self._test_instance.timeout_scale)

    # TODO(jbudorick): Make instrumentation tests output a JSON so this
    # doesn't have to parse the output.
    result_code, result_bundle, statuses = (
        self._test_instance.ParseAmInstrumentRawOutput(output))
    results = self._test_instance.GenerateTestResults(
        result_code, result_bundle, statuses, start_ms, duration_ms)
    for result in results:
      if logcat_url:
        result.SetLink('logcat', logcat_url)

    if _IsRenderTest(test):
      # Render tests do not cause test failure by default. So we have to check
      # to see if any failure images were generated even if the test does not
      # fail.
      try:
        self._ProcessRenderTestResults(
            device, render_tests_device_output_dir, results)
      finally:
        device.RemovePath(render_tests_device_output_dir,
                          recursive=True, force=True)

    # Update the result name if the test used flags.
    if flags_to_add or flags_to_remove:
      for r in results:
        if r.GetName() == test_name:
          r.SetName(test_display_name)

    # Add UNKNOWN results for any missing tests.
    iterable_test = test if isinstance(test, list) else [test]
    test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
    results_names = set(r.GetName() for r in results)
    results.extend(
        base_test_result.BaseTestResult(u, base_test_result.ResultType.UNKNOWN)
        for u in test_names.difference(results_names))

    # Update the result type if we detect a crash.
    if DidPackageCrashOnDevice(self._test_instance.test_package, device):
      for r in results:
        if r.GetType() == base_test_result.ResultType.UNKNOWN:
          r.SetType(base_test_result.ResultType.CRASH)

    # Handle failures by:
    #   - optionally taking a screenshot
    #   - logging the raw output at INFO level
    #   - clearing the application state while persisting permissions
    if any(r.GetType() not in (base_test_result.ResultType.PASS,
                               base_test_result.ResultType.SKIP)
           for r in results):
      with contextlib_ext.Optional(
          tempfile_ext.NamedTemporaryDirectory(),
          self._test_instance.screenshot_dir is None and
              self._test_instance.gs_results_bucket) as screenshot_host_dir:
        screenshot_host_dir = (
            self._test_instance.screenshot_dir or screenshot_host_dir)
        self._SaveScreenshot(device, screenshot_host_dir,
                             screenshot_device_file, test_display_name,
                             results)

      logging.info('detected failure in %s. raw output:', test_display_name)
      for l in output:
        logging.info('  %s', l)
      if (not self._env.skip_clear_data
          and self._test_instance.package_info):
        permissions = (
            self._test_instance.apk_under_test.GetPermissions()
            if self._test_instance.apk_under_test
            else None)
        device.ClearApplicationState(self._test_instance.package_info.package,
                                     permissions=permissions)
    else:
      logging.debug('raw output from %s:', test_display_name)
      for l in output:
        logging.debug('  %s', l)
    if self._test_instance.coverage_directory:
      device.PullFile(coverage_directory,
          self._test_instance.coverage_directory)
      device.RunShellCommand(
          'rm -f %s' % posixpath.join(coverage_directory, '*'),
          check_return=True, shell=True)
    if self._test_instance.store_tombstones:
      tombstones_url = None
      for result in results:
        if result.GetType() == base_test_result.ResultType.CRASH:
          if not tombstones_url:
            resolved_tombstones = tombstones.ResolveTombstones(
                device,
                resolve_all_tombstones=True,
                include_stack_symbols=False,
                wipe_tombstones=True)
            stream_name = 'tombstones_%s_%s' % (
                time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()),
                device.serial)
            tombstones_url = logdog_helper.text(
                stream_name, '\n'.join(resolved_tombstones))
          result.SetLink('tombstones', tombstones_url)
    return results, None