Ejemplo n.º 1
0
def _handle_perf_logs(benchmark_directory_map, extra_links):
    """ Upload benchmark logs to logdog and add a page entry for them. """
    begin_time = time.time()
    benchmark_logs_links = collections.defaultdict(list)

    for benchmark_name, directories in benchmark_directory_map.iteritems():
        for directory in directories:
            benchmark_log_file = os.path.join(directory, 'benchmark_log.txt')
            if os.path.exists(benchmark_log_file):
                with open(benchmark_log_file) as f:
                    uploaded_link = logdog_helper.text(
                        name=_generate_unique_logdog_filename(benchmark_name),
                        data=f.read())
                    benchmark_logs_links[benchmark_name].append(uploaded_link)

    logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Logs')
    logdog_stream = logdog_helper.text(logdog_file_name,
                                       json.dumps(benchmark_logs_links,
                                                  sort_keys=True,
                                                  indent=4,
                                                  separators=(',', ': ')),
                                       content_type=JSON_CONTENT_TYPE)
    extra_links['Benchmarks logs'] = logdog_stream
    end_time = time.time()
    print_duration('Generating perf log streams', begin_time, end_time)
Ejemplo n.º 2
0
def _handle_perf_results(
    benchmark_enabled_map, benchmark_directory_map, configuration_name,
    build_properties, service_account_file, extra_links):
  """
    Upload perf results to the perf dashboard.

    This method also upload the perf results to logdog and augment it to
    |extra_links|.

    Returns:
      0 if this upload to perf dashboard succesfully, 1 otherwise.
  """
  tmpfile_dir = tempfile.mkdtemp('resultscache')
  try:
    # Upload all eligible benchmarks to the perf dashboard
    logdog_dict = {}
    logdog_stream = None
    logdog_label = 'Results Dashboard'
    upload_fail = False
    with oauth_api.with_access_token(service_account_file) as oauth_file:
      for benchmark_name, directories in benchmark_directory_map.iteritems():
        if not benchmark_enabled_map[benchmark_name]:
          continue
        # There are potentially multiple directores with results, re-write and
        # merge them if necessary
        results_filename = None
        if len(directories) > 1:
          merge_perf_dir = os.path.join(
              os.path.abspath(tmpfile_dir), benchmark_name)
          if not os.path.exists(merge_perf_dir):
            os.makedirs(merge_perf_dir)
          results_filename = os.path.join(
              merge_perf_dir, 'merged_perf_results.json')
          _merge_perf_results(results_filename, directories)
        else:
          # It was only written to one shard, use that shards data
          results_filename = join(directories[0], 'perf_results.json')
        print 'Uploading perf results from %s benchmark' % benchmark_name
        upload_fail = _upload_and_write_perf_data_to_logfile(
            benchmark_name, results_filename, configuration_name,
            build_properties, oauth_file, tmpfile_dir, logdog_dict,
            ('.reference' in benchmark_name))

    logdog_file_name = _generate_unique_logdog_filename('Results_Dashboard_')
    logdog_stream = logdog_helper.text(logdog_file_name,
        json.dumps(logdog_dict, sort_keys=True,
                   indent=4, separators=(',', ': ')),
        content_type=JSON_CONTENT_TYPE)
    if upload_fail:
      logdog_label += ' Upload Failure'
    extra_links[logdog_label] = logdog_stream
    if upload_fail:
      return 1
    return 0
  finally:
    shutil.rmtree(tmpfile_dir)
Ejemplo n.º 3
0
def _handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links):
    begin_time = time.time()
    with open(benchmarks_shard_map_file) as f:
        benchmarks_shard_data = f.read()
        logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Shard_Map')
        logdog_stream = logdog_helper.text(
            logdog_file_name, benchmarks_shard_data, content_type=JSON_CONTENT_TYPE)
        extra_links['Benchmarks shard map'] = logdog_stream
    end_time = time.time()
    print_duration('Generating benchmark shard map stream', begin_time, end_time)
Ejemplo n.º 4
0
def _handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links):
  begin_time = time.time()
  with open(benchmarks_shard_map_file) as f:
    benchmarks_shard_data = json.load(f)
    logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Shard_Map')
    logdog_stream = logdog_helper.text(
        logdog_file_name, json.dumps(benchmarks_shard_data, sort_keys=True,
                                     indent=4, separators=(',', ': ')),
        content_type=JSON_CONTENT_TYPE)
    extra_links['Benchmarks shard map'] = logdog_stream
  end_time = time.time()
  print_duration('Generating benchmark shard map stream', begin_time, end_time)
Ejemplo n.º 5
0
def _handle_perf_logs(benchmark_directory_list, extra_links):
    """ Upload benchmark logs to logdog and add a page entry for them. """

    benchmark_logs_links = {}

    for directory in benchmark_directory_list:
        # Obtain the test name we are running
        benchmark_name = _get_benchmark_name(directory)
        with open(join(directory, 'benchmark_log.txt')) as f:
            uploaded_link = logdog_helper.text(
                name=_generate_unique_logdog_filename(benchmark_name),
                data=f.read())
            benchmark_logs_links[benchmark_name] = uploaded_link

    logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Logs')
    logdog_stream = logdog_helper.text(
        logdog_file_name,
        json.dumps(benchmark_logs_links,
                   sort_keys=True,
                   indent=4,
                   separators=(',', ': ')))
    extra_links['Benchmarks logs'] = logdog_stream
Ejemplo n.º 6
0
def _handle_perf_logs(benchmark_directory_map, extra_links):
  """ Upload benchmark logs to logdog and add a page entry for them. """

  benchmark_logs_links = {}

  for benchmark_name, directories in benchmark_directory_map.iteritems():
    for directory in directories:
      with open(join(directory, 'benchmark_log.txt')) as f:
        uploaded_link = logdog_helper.text(
            name=_generate_unique_logdog_filename(benchmark_name),
            data=f.read())
        if benchmark_name in benchmark_logs_links.keys():
          benchmark_logs_links[benchmark_name].append(uploaded_link)
        else:
          benchmark_logs_links[benchmark_name] = [uploaded_link]

  logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Logs')
  logdog_stream = logdog_helper.text(
      logdog_file_name, json.dumps(benchmark_logs_links, sort_keys=True,
                                   indent=4, separators=(',', ': ')),
      content_type=JSON_CONTENT_TYPE)
  extra_links['Benchmarks logs'] = logdog_stream
Ejemplo n.º 7
0
def _handle_perf_results(benchmark_enabled_map, benchmark_directory_list,
                         configuration_name, build_properties,
                         service_account_file, extra_links):
    """
    Upload perf results to the perf dashboard.

    This method also upload the perf results to logdog and augment it to
    |extra_links|.

    Returns:
      0 if this upload to perf dashboard succesfully, 1 otherwise.
  """
    tmpfile_dir = tempfile.mkdtemp('resultscache')
    try:
        # Upload all eligible benchmarks to the perf dashboard
        logdog_dict = {}
        logdog_stream = None
        logdog_label = 'Results Dashboard'
        upload_fail = False
        with oauth_api.with_access_token(service_account_file) as oauth_file:
            for directory in benchmark_directory_list:
                benchmark_name = _get_benchmark_name(directory)
                if not benchmark_enabled_map[benchmark_name]:
                    continue
                print 'Uploading perf results from %s benchmark' % benchmark_name
                upload_fail = _upload_and_write_perf_data_to_logfile(
                    benchmark_name, directory, configuration_name,
                    build_properties, oauth_file, tmpfile_dir, logdog_dict,
                    ('.reference' in benchmark_name))

        logdog_file_name = _generate_unique_logdog_filename(
            'Results_Dashboard_')
        logdog_stream = logdog_helper.text(
            logdog_file_name,
            json.dumps(logdog_dict,
                       sort_keys=True,
                       indent=4,
                       separators=(',', ': ')))
        if upload_fail:
            logdog_label += ' Upload Failure'
        extra_links[logdog_label] = logdog_stream
        if upload_fail:
            return 1
        return 0
    finally:
        shutil.rmtree(tmpfile_dir)
Ejemplo n.º 8
0
    def _RunTest(self, device, test):
        extras = {}

        flags = None
        test_timeout_scale = None
        if self._test_instance.coverage_directory:
            coverage_basename = '%s.ec' % ('%s_group' %
                                           test[0]['method'] if isinstance(
                                               test, list) else test['method'])
            extras['coverage'] = 'true'
            coverage_directory = os.path.join(device.GetExternalStoragePath(),
                                              'chrome', 'test', 'coverage')
            coverage_device_file = os.path.join(coverage_directory,
                                                coverage_basename)
            extras['coverageFile'] = coverage_device_file

        if isinstance(test, list):
            if not self._test_instance.driver_apk:
                raise Exception('driver_apk does not exist. '
                                'Please build it and try again.')
            if any(t.get('is_junit4') for t in test):
                raise Exception('driver apk does not support JUnit4 tests')

            def name_and_timeout(t):
                n = instrumentation_test_instance.GetTestName(t)
                i = self._GetTimeoutFromAnnotations(t['annotations'], n)
                return (n, i)

            test_names, timeouts = zip(*(name_and_timeout(t) for t in test))

            test_name = ','.join(test_names)
            test_display_name = test_name
            target = '%s/%s' % (self._test_instance.driver_package,
                                self._test_instance.driver_name)
            extras.update(
                self._test_instance.GetDriverEnvironmentVars(
                    test_list=test_names))
            timeout = sum(timeouts)
        else:
            test_name = instrumentation_test_instance.GetTestName(test)
            test_display_name = self._GetUniqueTestName(test)
            if test['is_junit4']:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner_junit4)
            else:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner)
            extras['class'] = test_name
            if 'flags' in test:
                flags = test['flags']
            timeout = self._GetTimeoutFromAnnotations(test['annotations'],
                                                      test_display_name)

            test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
                test['annotations'])
            if test_timeout_scale and test_timeout_scale != 1:
                valgrind_tools.SetChromeTimeoutScale(
                    device,
                    test_timeout_scale * self._test_instance.timeout_scale)

        logging.info('preparing to run %s: %s', test_display_name, test)

        if flags:
            self._CreateFlagChangerIfNeeded(device)
            self._flag_changers[str(device)].PushFlags(add=flags.add,
                                                       remove=flags.remove)

        try:
            device.RunShellCommand(
                ['log', '-p', 'i', '-t', _TAG,
                 'START %s' % test_name],
                check_return=True)
            logcat_url = None
            time_ms = lambda: int(time.time() * 1e3)
            start_ms = time_ms()

            stream_name = 'logcat_%s_%s_%s' % (test_name.replace(
                '#', '.'), time.strftime('%Y%m%dT%H%M%S',
                                         time.localtime()), device.serial)
            with contextlib_ext.Optional(
                    logdog_logcat_monitor.LogdogLogcatMonitor(
                        device.adb, stream_name),
                    self._test_instance.should_save_logcat) as logmon:
                with contextlib_ext.Optional(trace_event.trace(test_name),
                                             self._env.trace_output):
                    output = device.StartInstrumentation(target,
                                                         raw=True,
                                                         extras=extras,
                                                         timeout=timeout,
                                                         retries=0)
                if logmon:
                    logcat_url = logmon.GetLogcatURL()
        finally:
            device.RunShellCommand(
                ['log', '-p', 'i', '-t', _TAG,
                 'END %s' % test_name],
                check_return=True)
            duration_ms = time_ms() - start_ms
            if flags:
                self._flag_changers[str(device)].Restore()
            if test_timeout_scale:
                valgrind_tools.SetChromeTimeoutScale(
                    device, self._test_instance.timeout_scale)

        # TODO(jbudorick): Make instrumentation tests output a JSON so this
        # doesn't have to parse the output.
        result_code, result_bundle, statuses = (
            self._test_instance.ParseAmInstrumentRawOutput(output))
        results = self._test_instance.GenerateTestResults(
            result_code, result_bundle, statuses, start_ms, duration_ms)
        for result in results:
            if logcat_url:
                result.SetLink('logcat', logcat_url)

        # Update the result name if the test used flags.
        if flags:
            for r in results:
                if r.GetName() == test_name:
                    r.SetName(test_display_name)

        # Add UNKNOWN results for any missing tests.
        iterable_test = test if isinstance(test, list) else [test]
        test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
        results_names = set(r.GetName() for r in results)
        results.extend(
            base_test_result.BaseTestResult(
                u, base_test_result.ResultType.UNKNOWN)
            for u in test_names.difference(results_names))

        # Update the result type if we detect a crash.
        if DidPackageCrashOnDevice(self._test_instance.test_package, device):
            for r in results:
                if r.GetType() == base_test_result.ResultType.UNKNOWN:
                    r.SetType(base_test_result.ResultType.CRASH)

        # Handle failures by:
        #   - optionally taking a screenshot
        #   - logging the raw output at INFO level
        #   - clearing the application state while persisting permissions
        if any(r.GetType() not in (base_test_result.ResultType.PASS,
                                   base_test_result.ResultType.SKIP)
               for r in results):
            if self._test_instance.screenshot_dir:
                file_name = '%s-%s.png' % (
                    test_display_name,
                    time.strftime('%Y%m%dT%H%M%S', time.localtime()))
                saved_dir = device.TakeScreenshot(
                    os.path.join(self._test_instance.screenshot_dir,
                                 file_name))
                logging.info('Saved screenshot for %s to %s.',
                             test_display_name, saved_dir)
            logging.info('detected failure in %s. raw output:',
                         test_display_name)
            for l in output:
                logging.info('  %s', l)
            if (not self._env.skip_clear_data
                    and self._test_instance.package_info):
                permissions = (
                    self._test_instance.apk_under_test.GetPermissions()
                    if self._test_instance.apk_under_test else None)
                device.ClearApplicationState(
                    self._test_instance.package_info.package,
                    permissions=permissions)

        else:
            logging.debug('raw output from %s:', test_display_name)
            for l in output:
                logging.debug('  %s', l)
        if self._test_instance.coverage_directory:
            device.PullFile(coverage_directory,
                            self._test_instance.coverage_directory)
            device.RunShellCommand('rm -f %s' %
                                   os.path.join(coverage_directory, '*'))
        if self._test_instance.store_tombstones:
            tombstones_url = None
            for result in results:
                if result.GetType() == base_test_result.ResultType.CRASH:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True)
                        stream_name = 'tombstones_%s_%s' % (time.strftime(
                            '%Y%m%dT%H%M%S', time.localtime()), device.serial)
                        tombstones_url = logdog_helper.text(
                            stream_name, resolved_tombstones)
                    result.SetLink('tombstones', tombstones_url)
        return results, None
Ejemplo n.º 9
0
    def _RunTest(self, device, test):
        # Run the test.
        timeout = (self._test_instance.shard_timeout *
                   self.GetTool(device).GetTimeoutScale())
        if self._test_instance.wait_for_java_debugger:
            timeout = None
        if self._test_instance.store_tombstones:
            tombstones.ClearAllTombstones(device)
        test_perf_output_filename = next(self._test_perf_output_filenames)

        with device_temp_file.DeviceTempFile(
                adb=device.adb,
                dir=self._delegate.ResultsDirectory(device),
                suffix='.xml') as device_tmp_results_file:
            with contextlib_ext.Optional(
                    device_temp_file.NamedDeviceTemporaryDirectory(
                        adb=device.adb, dir='/sdcard/'), self._test_instance.
                    gs_test_artifacts_bucket) as test_artifacts_dir:
                with (contextlib_ext.Optional(
                        device_temp_file.DeviceTempFile(
                            adb=device.adb,
                            dir=self._delegate.ResultsDirectory(device)),
                        test_perf_output_filename)
                      ) as isolated_script_test_perf_output:

                    flags = list(self._test_instance.flags)
                    if self._test_instance.enable_xml_result_parsing:
                        flags.append('--gtest_output=xml:%s' %
                                     device_tmp_results_file.name)

                    if self._test_instance.gs_test_artifacts_bucket:
                        flags.append('--test_artifacts_dir=%s' %
                                     test_artifacts_dir.name)

                    if test_perf_output_filename:
                        flags.append('--isolated_script_test_perf_output=%s' %
                                     isolated_script_test_perf_output.name)

                    logging.info('flags:')
                    for f in flags:
                        logging.info('  %s', f)

                    stream_name = 'logcat_%s_%s_%s' % (
                        hash(tuple(test)),
                        time.strftime('%Y%m%dT%H%M%S-UTC',
                                      time.gmtime()), device.serial)

                    with self._env.output_manager.ArchivedTempfile(
                            stream_name, 'logcat') as logcat_file:
                        with logcat_monitor.LogcatMonitor(
                                device.adb,
                                filter_specs=local_device_environment.
                                LOGCAT_FILTERS,
                                output_file=logcat_file.name,
                                check_error=False) as logmon:
                            with contextlib_ext.Optional(
                                    trace_event.trace(str(test)),
                                    self._env.trace_output):
                                output = self._delegate.Run(
                                    test,
                                    device,
                                    flags=' '.join(flags),
                                    timeout=timeout,
                                    retries=0)
                        logmon.Close()

                    if logcat_file.Link():
                        logging.info('Logcat saved to %s', logcat_file.Link())

                    if self._test_instance.enable_xml_result_parsing:
                        try:
                            gtest_xml = device.ReadFile(
                                device_tmp_results_file.name, as_root=True)
                        except device_errors.CommandFailedError as e:
                            logging.warning(
                                'Failed to pull gtest results XML file %s: %s',
                                device_tmp_results_file.name, str(e))
                            gtest_xml = None

                    if test_perf_output_filename:
                        try:
                            device.PullFile(
                                isolated_script_test_perf_output.name,
                                test_perf_output_filename)
                        except device_errors.CommandFailedError as e:
                            logging.warning(
                                'Failed to pull chartjson results %s: %s',
                                isolated_script_test_perf_output.name, str(e))

                    test_artifacts_url = self._UploadTestArtifacts(
                        device, test_artifacts_dir)

        for s in self._servers[str(device)]:
            s.Reset()
        if self._test_instance.app_files:
            self._delegate.PullAppFiles(device, self._test_instance.app_files,
                                        self._test_instance.app_file_dir)
        if not self._env.skip_clear_data:
            self._delegate.Clear(device)

        for l in output:
            logging.info(l)

        # Parse the output.
        # TODO(jbudorick): Transition test scripts away from parsing stdout.
        if self._test_instance.enable_xml_result_parsing:
            results = gtest_test_instance.ParseGTestXML(gtest_xml)
        else:
            results = gtest_test_instance.ParseGTestOutput(
                output, self._test_instance.symbolizer, device.product_cpu_abi)

        tombstones_url = None
        for r in results:
            if logcat_file:
                r.SetLink('logcat', logcat_file.Link())

            if self._test_instance.gs_test_artifacts_bucket:
                r.SetLink('test_artifacts', test_artifacts_url)

            if r.GetType() == base_test_result.ResultType.CRASH:
                self._crashes.add(r.GetName())
                if self._test_instance.store_tombstones:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True)
                        stream_name = 'tombstones_%s_%s' % (time.strftime(
                            '%Y%m%dT%H%M%S', time.localtime()), device.serial)
                        tombstones_url = logdog_helper.text(
                            stream_name, '\n'.join(resolved_tombstones))
                    r.SetLink('tombstones', tombstones_url)

        tests_stripped_disabled_prefix = set()
        for t in test:
            tests_stripped_disabled_prefix.add(
                gtest_test_instance.TestNameWithoutDisabledPrefix(t))
        not_run_tests = tests_stripped_disabled_prefix.difference(
            set(r.GetName() for r in results))
        return results, list(not_run_tests) if results else None
Ejemplo n.º 10
0
  def _RunTest(self, device, test):
    # Run the test.
    timeout = (self._test_instance.shard_timeout
               * self.GetTool(device).GetTimeoutScale())
    if self._test_instance.store_tombstones:
      tombstones.ClearAllTombstones(device)
    with device_temp_file.DeviceTempFile(
        adb=device.adb,
        dir=self._delegate.ResultsDirectory(device),
        suffix='.xml') as device_tmp_results_file:

      flags = list(self._test_instance.flags)
      if self._test_instance.enable_xml_result_parsing:
        flags.append('--gtest_output=xml:%s' % device_tmp_results_file.name)

      logging.info('flags:')
      for f in flags:
        logging.info('  %s', f)

      with contextlib_ext.Optional(
          trace_event.trace(str(test)),
          self._env.trace_output):
        output = self._delegate.Run(
            test, device, flags=' '.join(flags),
            timeout=timeout, retries=0)

      if self._test_instance.enable_xml_result_parsing:
        gtest_xml = device.ReadFile(
            device_tmp_results_file.name,
            as_root=True)

    for s in self._servers[str(device)]:
      s.Reset()
    if self._test_instance.app_files:
      self._delegate.PullAppFiles(device, self._test_instance.app_files,
                                  self._test_instance.app_file_dir)
    if not self._env.skip_clear_data:
      self._delegate.Clear(device)

    for l in output:
      logging.info(l)

    # Parse the output.
    # TODO(jbudorick): Transition test scripts away from parsing stdout.
    if self._test_instance.enable_xml_result_parsing:
      results = gtest_test_instance.ParseGTestXML(gtest_xml)
    else:
      results = gtest_test_instance.ParseGTestOutput(output)

    # Check whether there are any crashed testcases.
    self._crashes.update(r.GetName() for r in results
                         if r.GetType() == base_test_result.ResultType.CRASH)

    if self._test_instance.store_tombstones:
      tombstones_url = None
      for result in results:
        if result.GetType() == base_test_result.ResultType.CRASH:
          if not tombstones_url:
            resolved_tombstones = tombstones.ResolveTombstones(
                device,
                resolve_all_tombstones=True,
                include_stack_symbols=False,
                wipe_tombstones=True)
            stream_name = 'tombstones_%s_%s' % (
                time.strftime('%Y%m%dT%H%M%S', time.localtime()),
                device.serial)
            tombstones_url = logdog_helper.text(
                stream_name, '\n'.join(resolved_tombstones))
          result.SetLink('tombstones', tombstones_url)

    tests_stripped_disabled_prefix = set()
    for t in test:
      tests_stripped_disabled_prefix.add(
          gtest_test_instance.TestNameWithoutDisabledPrefix(t))
    not_run_tests = tests_stripped_disabled_prefix.difference(
        set(r.GetName() for r in results))
    return results, list(not_run_tests) if results else None
Ejemplo n.º 11
0
    def _RunTest(self, device, test):
        # Run the test.
        timeout = (self._test_instance.shard_timeout *
                   self.GetTool(device).GetTimeoutScale())
        if self._test_instance.store_tombstones:
            tombstones.ClearAllTombstones(device)
        with device_temp_file.DeviceTempFile(
                adb=device.adb,
                dir=self._delegate.ResultsDirectory(device),
                suffix='.xml') as device_tmp_results_file:

            flags = list(self._test_instance.flags)
            if self._test_instance.enable_xml_result_parsing:
                flags.append('--gtest_output=xml:%s' %
                             device_tmp_results_file.name)

            logging.info('flags:')
            for f in flags:
                logging.info('  %s', f)

            with contextlib_ext.Optional(trace_event.trace(str(test)),
                                         self._env.trace_output):
                output = self._delegate.Run(test,
                                            device,
                                            flags=' '.join(flags),
                                            timeout=timeout,
                                            retries=0)

            if self._test_instance.enable_xml_result_parsing:
                gtest_xml = device.ReadFile(device_tmp_results_file.name,
                                            as_root=True)

        for s in self._servers[str(device)]:
            s.Reset()
        if self._test_instance.app_files:
            self._delegate.PullAppFiles(device, self._test_instance.app_files,
                                        self._test_instance.app_file_dir)
        if not self._env.skip_clear_data:
            self._delegate.Clear(device)

        # Parse the output.
        # TODO(jbudorick): Transition test scripts away from parsing stdout.
        if self._test_instance.enable_xml_result_parsing:
            results = gtest_test_instance.ParseGTestXML(gtest_xml)
        else:
            results = gtest_test_instance.ParseGTestOutput(output)

        # Check whether there are any crashed testcases.
        self._crashes.update(
            r.GetName() for r in results
            if r.GetType() == base_test_result.ResultType.CRASH)

        if self._test_instance.store_tombstones:
            tombstones_url = None
            for result in results:
                if result.GetType() == base_test_result.ResultType.CRASH:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True)
                        stream_name = 'tombstones_%s_%s' % (time.strftime(
                            '%Y%m%dT%H%M%S', time.localtime()), device.serial)
                        tombstones_url = logdog_helper.text(
                            stream_name, resolved_tombstones)
                    result.SetLink('tombstones', tombstones_url)

        tests_stripped_disabled_prefix = set()
        for t in test:
            tests_stripped_disabled_prefix.add(
                gtest_test_instance.TestNameWithoutDisabledPrefix(t))
        not_run_tests = tests_stripped_disabled_prefix.difference(
            set(r.GetName() for r in results))
        return results, list(not_run_tests) if results else None
Ejemplo n.º 12
0
 def _Archive(self):
   with open(self.name, 'r') as f:
     logdog_helper.text(self._stream_name, f.read())
Ejemplo n.º 13
0
def _handle_perf_results(benchmark_enabled_map, benchmark_directory_map,
                         configuration_name, build_properties, extra_links,
                         output_results_dir):
    """
    Upload perf results to the perf dashboard.

    This method also upload the perf results to logdog and augment it to
    |extra_links|.

    Returns:
      (return_code, benchmark_upload_result_map)
      return_code is 0 if this upload to perf dashboard successfully, 1
        otherwise.
       benchmark_upload_result_map is a dictionary describes which benchmark
        was successfully uploaded.
  """
    begin_time = time.time()
    # Upload all eligible benchmarks to the perf dashboard
    results_dict = {}

    invocations = []
    for benchmark_name, directories in benchmark_directory_map.iteritems():
        if not benchmark_enabled_map.get(benchmark_name, False):
            continue
        # Create a place to write the perf results that you will write out to
        # logdog.
        output_json_file = os.path.join(output_results_dir,
                                        (str(uuid.uuid4()) + benchmark_name))
        results_dict[benchmark_name] = output_json_file
        invocations.append((benchmark_name, directories, configuration_name,
                            build_properties, output_json_file))

    # Kick off the uploads in multiple processes
    pool = multiprocessing.Pool(_GetCpuCount())
    try:
        async_result = pool.map_async(_upload_individual_benchmark,
                                      invocations)
        # TODO(crbug.com/947035): What timeout is reasonable?
        results = async_result.get(timeout=4000)
    except multiprocessing.TimeoutError:
        logging.error(
            'Failed uploading benchmarks to perf dashboard in parallel')
        results = []
        for benchmark_name in benchmark_directory_map:
            results.append((benchmark_name, False))
    finally:
        pool.terminate()

    # Keep a mapping of benchmarks to their upload results
    benchmark_upload_result_map = {}
    for r in results:
        benchmark_upload_result_map[r[0]] = r[1]

    logdog_dict = {}
    upload_failures_counter = 0
    logdog_stream = None
    logdog_label = 'Results Dashboard'
    for benchmark_name, output_file in results_dict.iteritems():
        upload_succeed = benchmark_upload_result_map[benchmark_name]
        if not upload_succeed:
            upload_failures_counter += 1
        is_reference = '.reference' in benchmark_name
        _write_perf_data_to_logfile(benchmark_name,
                                    output_file,
                                    configuration_name,
                                    build_properties,
                                    logdog_dict,
                                    is_reference,
                                    upload_failure=not upload_succeed)

    logdog_file_name = _generate_unique_logdog_filename('Results_Dashboard_')
    logdog_stream = logdog_helper.text(logdog_file_name,
                                       json.dumps(dict(logdog_dict),
                                                  sort_keys=True,
                                                  indent=4,
                                                  separators=(',', ': ')),
                                       content_type=JSON_CONTENT_TYPE)
    if upload_failures_counter > 0:
        logdog_label += (' %s merge script perf data upload failures' %
                         upload_failures_counter)
    extra_links[logdog_label] = logdog_stream
    end_time = time.time()
    print_duration('Uploading results to perf dashboard', begin_time, end_time)
    if upload_failures_counter > 0:
        return 1, benchmark_upload_result_map
    return 0, benchmark_upload_result_map
Ejemplo n.º 14
0
def _handle_perf_results(benchmark_enabled_map, benchmark_directory_map,
                         configuration_name, build_properties,
                         service_account_file, extra_links):
    """
    Upload perf results to the perf dashboard.

    This method also upload the perf results to logdog and augment it to
    |extra_links|.

    Returns:
      0 if this upload to perf dashboard succesfully, 1 otherwise.
  """
    begin_time = time.time()
    tmpfile_dir = tempfile.mkdtemp('outputresults')
    try:
        # Upload all eligible benchmarks to the perf dashboard
        results_dict = {}

        invocations = []
        for benchmark_name, directories in benchmark_directory_map.iteritems():
            if not benchmark_enabled_map.get(benchmark_name, False):
                continue
            # Create a place to write the perf results that you will write out to
            # logdog.
            output_json_file = os.path.join(
                tmpfile_dir, (str(uuid.uuid4()) + benchmark_name))
            results_dict[benchmark_name] = output_json_file
            invocations.append(
                (benchmark_name, directories, configuration_name,
                 build_properties, output_json_file, service_account_file))

        # Kick off the uploads in mutliple processes
        pool = mp.Pool()
        try:
            async_result = pool.map_async(_upload_individual_benchmark,
                                          invocations)
            results = async_result.get(timeout=2000)
        except mp.TimeoutError:
            logging.error(
                'Failed uploading benchmarks to perf dashboard in parallel')
            pool.terminate()
            results = []
            for benchmark_name in benchmark_directory_map:
                results.append((benchmark_name, False))

        # Keep a mapping of benchmarks to their upload results
        benchmark_upload_result_map = {}
        for r in results:
            benchmark_upload_result_map[r[0]] = bool(r[1])

        logdog_dict = {}
        upload_failures_counter = 0
        logdog_stream = None
        logdog_label = 'Results Dashboard'
        for benchmark_name, output_file in results_dict.iteritems():
            failure = benchmark_upload_result_map[benchmark_name]
            if failure:
                upload_failures_counter += 1
            is_reference = '.reference' in benchmark_name
            _write_perf_data_to_logfile(benchmark_name, output_file,
                                        configuration_name, build_properties,
                                        logdog_dict, is_reference, failure)

        logdog_file_name = _generate_unique_logdog_filename(
            'Results_Dashboard_')
        logdog_stream = logdog_helper.text(logdog_file_name,
                                           json.dumps(dict(logdog_dict),
                                                      sort_keys=True,
                                                      indent=4,
                                                      separators=(',', ': ')),
                                           content_type=JSON_CONTENT_TYPE)
        if upload_failures_counter > 0:
            logdog_label += ('Upload Failure (%s benchmark upload failures)' %
                             upload_failures_counter)
        extra_links[logdog_label] = logdog_stream
        end_time = time.time()
        print_duration('Uploading results to perf dashboard', begin_time,
                       end_time)
        if upload_failures_counter > 0:
            return 1
        return 0
    finally:
        shutil.rmtree(tmpfile_dir)
Ejemplo n.º 15
0
 def _Archive(self):
     with open(self.name, 'r') as f:
         logdog_helper.text(self._stream_name, f.read())
Ejemplo n.º 16
0
  def _RunTest(self, device, test):
    # Run the test.
    timeout = (self._test_instance.shard_timeout
               * self.GetTool(device).GetTimeoutScale())
    if self._test_instance.wait_for_java_debugger:
      timeout = None
    if self._test_instance.store_tombstones:
      tombstones.ClearAllTombstones(device)
    with device_temp_file.DeviceTempFile(
        adb=device.adb,
        dir=self._delegate.ResultsDirectory(device),
        suffix='.xml') as device_tmp_results_file:
      with contextlib_ext.Optional(
          device_temp_file.NamedDeviceTemporaryDirectory(
              adb=device.adb, dir='/sdcard/'),
          self._test_instance.gs_test_artifacts_bucket) as test_artifacts_dir:
        with (contextlib_ext.Optional(
            device_temp_file.DeviceTempFile(
                adb=device.adb, dir=self._delegate.ResultsDirectory(device)),
            self._test_instance.isolated_script_test_perf_output)
            ) as isolated_script_test_perf_output:

          flags = list(self._test_instance.flags)
          if self._test_instance.enable_xml_result_parsing:
            flags.append('--gtest_output=xml:%s' % device_tmp_results_file.name)

          if self._test_instance.gs_test_artifacts_bucket:
            flags.append('--test_artifacts_dir=%s' % test_artifacts_dir.name)

          if self._test_instance.isolated_script_test_perf_output:
            flags.append('--isolated_script_test_perf_output=%s'
                         % isolated_script_test_perf_output.name)

          logging.info('flags:')
          for f in flags:
            logging.info('  %s', f)

          stream_name = 'logcat_%s_%s_%s' % (
              hash(tuple(test)),
              time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()),
              device.serial)

          with self._env.output_manager.ArchivedTempfile(
              stream_name, 'logcat') as logcat_file:
            with logcat_monitor.LogcatMonitor(
                device.adb,
                filter_specs=local_device_environment.LOGCAT_FILTERS,
                output_file=logcat_file.name) as logmon:
              with contextlib_ext.Optional(
                  trace_event.trace(str(test)),
                  self._env.trace_output):
                output = self._delegate.Run(
                    test, device, flags=' '.join(flags),
                    timeout=timeout, retries=0)
            logmon.Close()

          if logcat_file.Link():
            logging.info('Logcat saved to %s', logcat_file.Link())

          if self._test_instance.enable_xml_result_parsing:
            try:
              gtest_xml = device.ReadFile(
                  device_tmp_results_file.name,
                  as_root=True)
            except device_errors.CommandFailedError as e:
              logging.warning(
                  'Failed to pull gtest results XML file %s: %s',
                  device_tmp_results_file.name,
                  str(e))
              gtest_xml = None

          if self._test_instance.isolated_script_test_perf_output:
            try:
              device.PullFile(
                  isolated_script_test_perf_output.name,
                  self._test_instance.isolated_script_test_perf_output)
            except device_errors.CommandFailedError as e:
              logging.warning(
                  'Failed to pull chartjson results %s: %s',
                  isolated_script_test_perf_output.name, str(e))

          test_artifacts_url = self._UploadTestArtifacts(device,
                                                         test_artifacts_dir)

    for s in self._servers[str(device)]:
      s.Reset()
    if self._test_instance.app_files:
      self._delegate.PullAppFiles(device, self._test_instance.app_files,
                                  self._test_instance.app_file_dir)
    if not self._env.skip_clear_data:
      self._delegate.Clear(device)

    for l in output:
      logging.info(l)

    # Parse the output.
    # TODO(jbudorick): Transition test scripts away from parsing stdout.
    if self._test_instance.enable_xml_result_parsing:
      results = gtest_test_instance.ParseGTestXML(gtest_xml)
    else:
      results = gtest_test_instance.ParseGTestOutput(
          output, self._test_instance.symbolizer, device.product_cpu_abi)

    tombstones_url = None
    for r in results:
      if logcat_file:
        r.SetLink('logcat', logcat_file.Link())

      if self._test_instance.gs_test_artifacts_bucket:
        r.SetLink('test_artifacts', test_artifacts_url)

      if r.GetType() == base_test_result.ResultType.CRASH:
        self._crashes.add(r.GetName())
        if self._test_instance.store_tombstones:
          if not tombstones_url:
            resolved_tombstones = tombstones.ResolveTombstones(
                device,
                resolve_all_tombstones=True,
                include_stack_symbols=False,
                wipe_tombstones=True)
            stream_name = 'tombstones_%s_%s' % (
                time.strftime('%Y%m%dT%H%M%S', time.localtime()),
                device.serial)
            tombstones_url = logdog_helper.text(
                stream_name, '\n'.join(resolved_tombstones))
          r.SetLink('tombstones', tombstones_url)

    tests_stripped_disabled_prefix = set()
    for t in test:
      tests_stripped_disabled_prefix.add(
          gtest_test_instance.TestNameWithoutDisabledPrefix(t))
    not_run_tests = tests_stripped_disabled_prefix.difference(
        set(r.GetName() for r in results))
    return results, list(not_run_tests) if results else None
Ejemplo n.º 17
0
    def _RunTest(self, device, test):
        extras = {}

        flags_to_add = []
        test_timeout_scale = None
        if self._test_instance.coverage_directory:
            coverage_basename = '%s.ec' % ('%s_group' %
                                           test[0]['method'] if isinstance(
                                               test, list) else test['method'])
            extras['coverage'] = 'true'
            coverage_directory = os.path.join(device.GetExternalStoragePath(),
                                              'chrome', 'test', 'coverage')
            coverage_device_file = os.path.join(coverage_directory,
                                                coverage_basename)
            extras['coverageFile'] = coverage_device_file
        # Save screenshot if screenshot dir is specified (save locally) or if
        # a GS bucket is passed (save in cloud).
        screenshot_device_file = None
        if (self._test_instance.screenshot_dir
                or self._test_instance.gs_results_bucket):
            screenshot_device_file = device_temp_file.DeviceTempFile(
                device.adb, suffix='.png', dir=device.GetExternalStoragePath())
            extras[EXTRA_SCREENSHOT_FILE] = screenshot_device_file.name

        extras[EXTRA_UI_CAPTURE_DIR] = self._ui_capture_dir[device]

        if isinstance(test, list):
            if not self._test_instance.driver_apk:
                raise Exception('driver_apk does not exist. '
                                'Please build it and try again.')
            if any(t.get('is_junit4') for t in test):
                raise Exception('driver apk does not support JUnit4 tests')

            def name_and_timeout(t):
                n = instrumentation_test_instance.GetTestName(t)
                i = self._GetTimeoutFromAnnotations(t['annotations'], n)
                return (n, i)

            test_names, timeouts = zip(*(name_and_timeout(t) for t in test))

            test_name = ','.join(test_names)
            test_display_name = test_name
            target = '%s/%s' % (self._test_instance.driver_package,
                                self._test_instance.driver_name)
            extras.update(
                self._test_instance.GetDriverEnvironmentVars(
                    test_list=test_names))
            timeout = sum(timeouts)
        else:
            test_name = instrumentation_test_instance.GetTestName(test)
            test_display_name = self._GetUniqueTestName(test)
            if test['is_junit4']:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner_junit4)
            else:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner)
            extras['class'] = test_name
            if 'flags' in test and test['flags']:
                flags_to_add.extend(test['flags'])
            timeout = self._GetTimeoutFromAnnotations(test['annotations'],
                                                      test_display_name)

            test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
                test['annotations'])
            if test_timeout_scale and test_timeout_scale != 1:
                valgrind_tools.SetChromeTimeoutScale(
                    device,
                    test_timeout_scale * self._test_instance.timeout_scale)

        logging.info('preparing to run %s: %s', test_display_name, test)

        render_tests_device_output_dir = None
        if _IsRenderTest(test):
            # TODO(mikecase): Add DeviceTempDirectory class and use that instead.
            render_tests_device_output_dir = posixpath.join(
                device.GetExternalStoragePath(), 'render_test_output_dir')
            flags_to_add.append('--render-test-output-dir=%s' %
                                render_tests_device_output_dir)

        if flags_to_add:
            self._CreateFlagChangerIfNeeded(device)
            self._flag_changers[str(device)].PushFlags(add=flags_to_add)

        time_ms = lambda: int(time.time() * 1e3)
        start_ms = time_ms()

        stream_name = 'logcat_%s_%s_%s' % (test_name.replace(
            '#', '.'), time.strftime('%Y%m%dT%H%M%S-UTC',
                                     time.gmtime()), device.serial)
        logmon = logdog_logcat_monitor.LogdogLogcatMonitor(
            device.adb, stream_name, filter_specs=LOGCAT_FILTERS)

        with contextlib_ext.Optional(logmon,
                                     self._test_instance.should_save_logcat):
            with _LogTestEndpoints(device, test_name):
                with contextlib_ext.Optional(trace_event.trace(test_name),
                                             self._env.trace_output):
                    output = device.StartInstrumentation(target,
                                                         raw=True,
                                                         extras=extras,
                                                         timeout=timeout,
                                                         retries=0)

        logcat_url = logmon.GetLogcatURL()
        duration_ms = time_ms() - start_ms

        # TODO(jbudorick): Make instrumentation tests output a JSON so this
        # doesn't have to parse the output.
        result_code, result_bundle, statuses = (
            self._test_instance.ParseAmInstrumentRawOutput(output))
        results = self._test_instance.GenerateTestResults(
            result_code, result_bundle, statuses, start_ms, duration_ms)

        def restore_flags():
            if flags_to_add:
                self._flag_changers[str(device)].Restore()

        def restore_timeout_scale():
            if test_timeout_scale:
                valgrind_tools.SetChromeTimeoutScale(
                    device, self._test_instance.timeout_scale)

        def handle_coverage_data():
            if self._test_instance.coverage_directory:
                device.PullFile(coverage_directory,
                                self._test_instance.coverage_directory)
                device.RunShellCommand('rm -f %s' %
                                       posixpath.join(coverage_directory, '*'),
                                       check_return=True,
                                       shell=True)

        def handle_render_test_data():
            if _IsRenderTest(test):
                # Render tests do not cause test failure by default. So we have to check
                # to see if any failure images were generated even if the test does not
                # fail.
                try:
                    self._ProcessRenderTestResults(
                        device, render_tests_device_output_dir, results)
                finally:
                    device.RemovePath(render_tests_device_output_dir,
                                      recursive=True,
                                      force=True)

        # While constructing the TestResult objects, we can parallelize several
        # steps that involve ADB. These steps should NOT depend on any info in
        # the results! Things such as whether the test CRASHED have not yet been
        # determined.
        post_test_steps = [
            restore_flags, restore_timeout_scale, handle_coverage_data,
            handle_render_test_data
        ]
        if self._env.concurrent_adb:
            post_test_step_thread_group = reraiser_thread.ReraiserThreadGroup(
                reraiser_thread.ReraiserThread(f) for f in post_test_steps)
            post_test_step_thread_group.StartAll(will_block=True)
        else:
            for step in post_test_steps:
                step()

        for result in results:
            if logcat_url:
                result.SetLink('logcat', logcat_url)

        # Update the result name if the test used flags.
        if flags_to_add:
            for r in results:
                if r.GetName() == test_name:
                    r.SetName(test_display_name)

        # Add UNKNOWN results for any missing tests.
        iterable_test = test if isinstance(test, list) else [test]
        test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
        results_names = set(r.GetName() for r in results)
        results.extend(
            base_test_result.BaseTestResult(
                u, base_test_result.ResultType.UNKNOWN)
            for u in test_names.difference(results_names))

        # Update the result type if we detect a crash.
        if DidPackageCrashOnDevice(self._test_instance.test_package, device):
            for r in results:
                if r.GetType() == base_test_result.ResultType.UNKNOWN:
                    r.SetType(base_test_result.ResultType.CRASH)

        # Handle failures by:
        #   - optionally taking a screenshot
        #   - logging the raw output at INFO level
        #   - clearing the application state while persisting permissions
        if any(r.GetType() not in (base_test_result.ResultType.PASS,
                                   base_test_result.ResultType.SKIP)
               for r in results):
            with contextlib_ext.Optional(
                    tempfile_ext.NamedTemporaryDirectory(),
                    self._test_instance.screenshot_dir is None
                    and self._test_instance.gs_results_bucket
            ) as screenshot_host_dir:
                screenshot_host_dir = (self._test_instance.screenshot_dir
                                       or screenshot_host_dir)
                self._SaveScreenshot(device, screenshot_host_dir,
                                     screenshot_device_file, test_display_name,
                                     results)

            logging.info('detected failure in %s. raw output:',
                         test_display_name)
            for l in output:
                logging.info('  %s', l)
            if (not self._env.skip_clear_data
                    and self._test_instance.package_info):
                permissions = (
                    self._test_instance.apk_under_test.GetPermissions()
                    if self._test_instance.apk_under_test else None)
                device.ClearApplicationState(
                    self._test_instance.package_info.package,
                    permissions=permissions)
        else:
            logging.debug('raw output from %s:', test_display_name)
            for l in output:
                logging.debug('  %s', l)
        if self._test_instance.store_tombstones:
            tombstones_url = None
            for result in results:
                if result.GetType() == base_test_result.ResultType.CRASH:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True)
                        stream_name = 'tombstones_%s_%s' % (time.strftime(
                            '%Y%m%dT%H%M%S-UTC', time.gmtime()), device.serial)
                        tombstones_url = logdog_helper.text(
                            stream_name, '\n'.join(resolved_tombstones))
                    result.SetLink('tombstones', tombstones_url)

        if self._env.concurrent_adb:
            post_test_step_thread_group.JoinAll()
        return results, None
  def _RunTest(self, device, test):
    extras = {}

    flags_to_add = []
    test_timeout_scale = None
    if self._test_instance.coverage_directory:
      coverage_basename = '%s.ec' % ('%s_group' % test[0]['method']
          if isinstance(test, list) else test['method'])
      extras['coverage'] = 'true'
      coverage_directory = os.path.join(
          device.GetExternalStoragePath(), 'chrome', 'test', 'coverage')
      coverage_device_file = os.path.join(
          coverage_directory, coverage_basename)
      extras['coverageFile'] = coverage_device_file
    # Save screenshot if screenshot dir is specified (save locally) or if
    # a GS bucket is passed (save in cloud).
    screenshot_device_file = None
    if (self._test_instance.screenshot_dir or
        self._test_instance.gs_results_bucket):
      screenshot_device_file = device_temp_file.DeviceTempFile(
          device.adb, suffix='.png', dir=device.GetExternalStoragePath())
      extras[EXTRA_SCREENSHOT_FILE] = screenshot_device_file.name

    extras[EXTRA_UI_CAPTURE_DIR] = self._ui_capture_dir[device]

    if self._env.trace_output:
      trace_device_file = device_temp_file.DeviceTempFile(
          device.adb, suffix='.json', dir=device.GetExternalStoragePath())
      extras[EXTRA_TRACE_FILE] = trace_device_file.name

    if isinstance(test, list):
      if not self._test_instance.driver_apk:
        raise Exception('driver_apk does not exist. '
                        'Please build it and try again.')
      if any(t.get('is_junit4') for t in test):
        raise Exception('driver apk does not support JUnit4 tests')

      def name_and_timeout(t):
        n = instrumentation_test_instance.GetTestName(t)
        i = self._GetTimeoutFromAnnotations(t['annotations'], n)
        return (n, i)

      test_names, timeouts = zip(*(name_and_timeout(t) for t in test))

      test_name = ','.join(test_names)
      test_display_name = test_name
      target = '%s/%s' % (
          self._test_instance.driver_package,
          self._test_instance.driver_name)
      extras.update(
          self._test_instance.GetDriverEnvironmentVars(
              test_list=test_names))
      timeout = sum(timeouts)
    else:
      test_name = instrumentation_test_instance.GetTestName(test)
      test_display_name = self._GetUniqueTestName(test)
      if test['is_junit4']:
        target = '%s/%s' % (
            self._test_instance.test_package,
            self._test_instance.junit4_runner_class)
      else:
        target = '%s/%s' % (
            self._test_instance.test_package,
            self._test_instance.junit3_runner_class)
      extras['class'] = test_name
      if 'flags' in test and test['flags']:
        flags_to_add.extend(test['flags'])
      timeout = self._GetTimeoutFromAnnotations(
        test['annotations'], test_display_name)

      test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
          test['annotations'])
      if test_timeout_scale and test_timeout_scale != 1:
        valgrind_tools.SetChromeTimeoutScale(
            device, test_timeout_scale * self._test_instance.timeout_scale)

    logging.info('preparing to run %s: %s', test_display_name, test)

    render_tests_device_output_dir = None
    if _IsRenderTest(test):
      # TODO(mikecase): Add DeviceTempDirectory class and use that instead.
      render_tests_device_output_dir = posixpath.join(
          device.GetExternalStoragePath(),
          'render_test_output_dir')
      flags_to_add.append('--render-test-output-dir=%s' %
                          render_tests_device_output_dir)

    if flags_to_add:
      self._CreateFlagChangerIfNeeded(device)
      self._flag_changers[str(device)].PushFlags(add=flags_to_add)

    time_ms = lambda: int(time.time() * 1e3)
    start_ms = time_ms()

    stream_name = 'logcat_%s_%s_%s' % (
        test_name.replace('#', '.'),
        time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()),
        device.serial)
    logmon = logdog_logcat_monitor.LogdogLogcatMonitor(
        device.adb, stream_name, filter_specs=LOGCAT_FILTERS)

    with contextlib_ext.Optional(
        logmon, self._test_instance.should_save_logcat):
      with _LogTestEndpoints(device, test_name):
        with contextlib_ext.Optional(
            trace_event.trace(test_name),
            self._env.trace_output):
          output = device.StartInstrumentation(
              target, raw=True, extras=extras, timeout=timeout, retries=0)

    logcat_url = logmon.GetLogcatURL()
    duration_ms = time_ms() - start_ms

    if self._env.trace_output:
      self._SaveTraceData(trace_device_file, device, test['class'])

    # TODO(jbudorick): Make instrumentation tests output a JSON so this
    # doesn't have to parse the output.
    result_code, result_bundle, statuses = (
        self._test_instance.ParseAmInstrumentRawOutput(output))
    results = self._test_instance.GenerateTestResults(
        result_code, result_bundle, statuses, start_ms, duration_ms)

    def restore_flags():
      if flags_to_add:
        self._flag_changers[str(device)].Restore()

    def restore_timeout_scale():
      if test_timeout_scale:
        valgrind_tools.SetChromeTimeoutScale(
            device, self._test_instance.timeout_scale)

    def handle_coverage_data():
      if self._test_instance.coverage_directory:
        device.PullFile(coverage_directory,
            self._test_instance.coverage_directory)
        device.RunShellCommand(
            'rm -f %s' % posixpath.join(coverage_directory, '*'),
            check_return=True, shell=True)

    def handle_render_test_data():
      if _IsRenderTest(test):
        # Render tests do not cause test failure by default. So we have to check
        # to see if any failure images were generated even if the test does not
        # fail.
        try:
          self._ProcessRenderTestResults(
              device, render_tests_device_output_dir, results)
        finally:
          device.RemovePath(render_tests_device_output_dir,
                            recursive=True, force=True)

    # While constructing the TestResult objects, we can parallelize several
    # steps that involve ADB. These steps should NOT depend on any info in
    # the results! Things such as whether the test CRASHED have not yet been
    # determined.
    post_test_steps = [restore_flags, restore_timeout_scale,
                       handle_coverage_data, handle_render_test_data]
    if self._env.concurrent_adb:
      post_test_step_thread_group = reraiser_thread.ReraiserThreadGroup(
          reraiser_thread.ReraiserThread(f) for f in post_test_steps)
      post_test_step_thread_group.StartAll(will_block=True)
    else:
      for step in post_test_steps:
        step()

    for result in results:
      if logcat_url:
        result.SetLink('logcat', logcat_url)

    # Update the result name if the test used flags.
    if flags_to_add:
      for r in results:
        if r.GetName() == test_name:
          r.SetName(test_display_name)

    # Add UNKNOWN results for any missing tests.
    iterable_test = test if isinstance(test, list) else [test]
    test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
    results_names = set(r.GetName() for r in results)
    results.extend(
        base_test_result.BaseTestResult(u, base_test_result.ResultType.UNKNOWN)
        for u in test_names.difference(results_names))

    # Update the result type if we detect a crash.
    if DidPackageCrashOnDevice(self._test_instance.test_package, device):
      for r in results:
        if r.GetType() == base_test_result.ResultType.UNKNOWN:
          r.SetType(base_test_result.ResultType.CRASH)

    # Handle failures by:
    #   - optionally taking a screenshot
    #   - logging the raw output at INFO level
    #   - clearing the application state while persisting permissions
    if any(r.GetType() not in (base_test_result.ResultType.PASS,
                               base_test_result.ResultType.SKIP)
           for r in results):
      with contextlib_ext.Optional(
          tempfile_ext.NamedTemporaryDirectory(),
          self._test_instance.screenshot_dir is None and
              self._test_instance.gs_results_bucket) as screenshot_host_dir:
        screenshot_host_dir = (
            self._test_instance.screenshot_dir or screenshot_host_dir)
        self._SaveScreenshot(device, screenshot_host_dir,
                             screenshot_device_file, test_display_name,
                             results)

      logging.info('detected failure in %s. raw output:', test_display_name)
      for l in output:
        logging.info('  %s', l)
      if (not self._env.skip_clear_data
          and self._test_instance.package_info):
        permissions = (
            self._test_instance.apk_under_test.GetPermissions()
            if self._test_instance.apk_under_test
            else None)
        device.ClearApplicationState(self._test_instance.package_info.package,
                                     permissions=permissions)
    else:
      logging.debug('raw output from %s:', test_display_name)
      for l in output:
        logging.debug('  %s', l)
    if self._test_instance.store_tombstones:
      tombstones_url = None
      for result in results:
        if result.GetType() == base_test_result.ResultType.CRASH:
          if not tombstones_url:
            resolved_tombstones = tombstones.ResolveTombstones(
                device,
                resolve_all_tombstones=True,
                include_stack_symbols=False,
                wipe_tombstones=True,
                tombstone_symbolizer=self._test_instance.symbolizer)
            stream_name = 'tombstones_%s_%s' % (
                time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()),
                device.serial)
            tombstones_url = logdog_helper.text(
                stream_name, '\n'.join(resolved_tombstones))
          result.SetLink('tombstones', tombstones_url)

    if self._env.concurrent_adb:
      post_test_step_thread_group.JoinAll()
    return results, None
Ejemplo n.º 19
0
def _process_perf_results(output_json, configuration_name,
                          service_account_file, build_properties,
                          task_output_dir):
    """Process one or more perf JSON results.

  Consists of merging the json-test-format output and uploading the perf test
  output (chartjson and histogram).

  Each directory in the task_output_dir represents one benchmark
  that was run. Within this directory, there is a subdirectory with the name
  of the benchmark that was run. In that subdirectory, there is a
  perftest-output.json file containing the performance results in histogram
  or dashboard json format and an output.json file containing the json test
  results for the benchmark.
  """
    directory_list = [
        f for f in listdir(task_output_dir)
        if not isfile(join(task_output_dir, f))
    ]
    benchmark_directory_list = []
    for directory in directory_list:
        benchmark_directory_list += [
            join(task_output_dir, directory, f)
            for f in listdir(join(task_output_dir, directory))
        ]

    # We need to keep track of disabled benchmarks so we don't try to
    # upload the results.
    test_results_list = []
    tmpfile_dir = tempfile.mkdtemp('resultscache')
    upload_failure = False

    build_properties = json.loads(build_properties)
    if not configuration_name:
        # we are deprecating perf-id crbug.com/817823
        configuration_name = build_properties['buildername']

    try:
        logdog_dict = {}
        with oauth_api.with_access_token(service_account_file) as oauth_file:
            for directory in benchmark_directory_list:
                # Obtain the test name we are running
                benchmark_name = basename(directory).replace(" benchmark", "")
                is_ref = '.reference' in benchmark_name
                disabled = False
                with open(join(directory, 'test_results.json')) as json_data:
                    json_results = json.load(json_data)
                    if not json_results:
                        # Output is null meaning the test didn't produce any results.
                        # Want to output an error and continue loading the rest of the
                        # test results.
                        print 'No results produced for %s, skipping upload' % directory
                        continue
                    if json_results.get('version') == 3:
                        # Non-telemetry tests don't have written json results but
                        # if they are executing then they are enabled and will generate
                        # chartjson results.
                        if not bool(json_results.get('tests')):
                            disabled = True
                    if not is_ref:
                        # We don't need to upload reference build data to the
                        # flakiness dashboard since we don't monitor the ref build
                        test_results_list.append(json_results)
                if disabled:
                    # We don't upload disabled benchmarks
                    print 'Benchmark %s disabled' % benchmark_name
                    continue

                print 'Uploading perf results from %s benchmark' % benchmark_name

                upload_fail = _upload_and_write_perf_data_to_logfile(
                    benchmark_name, directory, configuration_name,
                    build_properties, oauth_file, tmpfile_dir, logdog_dict,
                    is_ref)
                upload_failure = upload_failure or upload_fail

            logdog_label = 'Results Dashboard'
            logdog_file_name = 'Results_Dashboard_' + str(uuid.uuid4())
            if upload_failure:
                logdog_label += ' Upload Failure'
            _merge_json_output(
                output_json, test_results_list,
                logdog_helper.text(
                    logdog_file_name,
                    json.dumps(logdog_dict,
                               sort_keys=True,
                               indent=4,
                               separators=(',', ':'))), logdog_label)
    finally:
        shutil.rmtree(tmpfile_dir)
    return upload_failure
  def _RunTest(self, device, test):
    extras = {}

    flags = None
    test_timeout_scale = None
    if self._test_instance.coverage_directory:
      coverage_basename = '%s.ec' % ('%s_group' % test[0]['method']
          if isinstance(test, list) else test['method'])
      extras['coverage'] = 'true'
      coverage_directory = os.path.join(
          device.GetExternalStoragePath(), 'chrome', 'test', 'coverage')
      coverage_device_file = os.path.join(
          coverage_directory, coverage_basename)
      extras['coverageFile'] = coverage_device_file

    if isinstance(test, list):
      if not self._test_instance.driver_apk:
        raise Exception('driver_apk does not exist. '
                        'Please build it and try again.')
      if any(t.get('is_junit4') for t in test):
        raise Exception('driver apk does not support JUnit4 tests')

      def name_and_timeout(t):
        n = instrumentation_test_instance.GetTestName(t)
        i = self._GetTimeoutFromAnnotations(t['annotations'], n)
        return (n, i)

      test_names, timeouts = zip(*(name_and_timeout(t) for t in test))

      test_name = ','.join(test_names)
      test_display_name = test_name
      target = '%s/%s' % (
          self._test_instance.driver_package,
          self._test_instance.driver_name)
      extras.update(
          self._test_instance.GetDriverEnvironmentVars(
              test_list=test_names))
      timeout = sum(timeouts)
    else:
      test_name = instrumentation_test_instance.GetTestName(test)
      test_display_name = self._GetUniqueTestName(test)
      if test['is_junit4']:
        target = '%s/%s' % (
            self._test_instance.test_package,
            self._test_instance.test_runner_junit4)
      else:
        target = '%s/%s' % (
            self._test_instance.test_package, self._test_instance.test_runner)
      extras['class'] = test_name
      if 'flags' in test:
        flags = test['flags']
      timeout = self._GetTimeoutFromAnnotations(
        test['annotations'], test_display_name)

      test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
          test['annotations'])
      if test_timeout_scale and test_timeout_scale != 1:
        valgrind_tools.SetChromeTimeoutScale(
            device, test_timeout_scale * self._test_instance.timeout_scale)

    logging.info('preparing to run %s: %s', test_display_name, test)

    if flags:
      self._CreateFlagChangerIfNeeded(device)
      self._flag_changers[str(device)].PushFlags(
        add=flags.add, remove=flags.remove)

    try:
      device.RunShellCommand(
          ['log', '-p', 'i', '-t', _TAG, 'START %s' % test_name],
          check_return=True)
      time_ms = lambda: int(time.time() * 1e3)
      start_ms = time_ms()

      stream_name = 'logcat_%s_%s_%s' % (
          test_name.replace('#', '.'),
          time.strftime('%Y%m%dT%H%M%S', time.localtime()),
          device.serial)
      logmon = logdog_logcat_monitor.LogdogLogcatMonitor(
          device.adb, stream_name, filter_specs=LOGCAT_FILTERS)

      with contextlib_ext.Optional(
          logmon, self._test_instance.should_save_logcat):
        with contextlib_ext.Optional(
            trace_event.trace(test_name),
            self._env.trace_output):
          output = device.StartInstrumentation(
              target, raw=True, extras=extras, timeout=timeout, retries=0)
      logcat_url = logmon.GetLogcatURL()
    finally:
      device.RunShellCommand(
          ['log', '-p', 'i', '-t', _TAG, 'END %s' % test_name],
          check_return=True)
      duration_ms = time_ms() - start_ms
      if flags:
        self._flag_changers[str(device)].Restore()
      if test_timeout_scale:
        valgrind_tools.SetChromeTimeoutScale(
            device, self._test_instance.timeout_scale)

    # TODO(jbudorick): Make instrumentation tests output a JSON so this
    # doesn't have to parse the output.
    result_code, result_bundle, statuses = (
        self._test_instance.ParseAmInstrumentRawOutput(output))
    results = self._test_instance.GenerateTestResults(
        result_code, result_bundle, statuses, start_ms, duration_ms)
    for result in results:
      if logcat_url:
        result.SetLink('logcat', logcat_url)

    # Update the result name if the test used flags.
    if flags:
      for r in results:
        if r.GetName() == test_name:
          r.SetName(test_display_name)

    # Add UNKNOWN results for any missing tests.
    iterable_test = test if isinstance(test, list) else [test]
    test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
    results_names = set(r.GetName() for r in results)
    results.extend(
        base_test_result.BaseTestResult(u, base_test_result.ResultType.UNKNOWN)
        for u in test_names.difference(results_names))

    # Update the result type if we detect a crash.
    if DidPackageCrashOnDevice(self._test_instance.test_package, device):
      for r in results:
        if r.GetType() == base_test_result.ResultType.UNKNOWN:
          r.SetType(base_test_result.ResultType.CRASH)

    # Handle failures by:
    #   - optionally taking a screenshot
    #   - logging the raw output at INFO level
    #   - clearing the application state while persisting permissions
    if any(r.GetType() not in (base_test_result.ResultType.PASS,
                               base_test_result.ResultType.SKIP)
           for r in results):
      if self._test_instance.screenshot_dir:
        file_name = '%s-%s.png' % (
            test_display_name,
            time.strftime('%Y%m%dT%H%M%S', time.localtime()))
        saved_dir = device.TakeScreenshot(
            os.path.join(self._test_instance.screenshot_dir, file_name))
        logging.info(
            'Saved screenshot for %s to %s.',
            test_display_name, saved_dir)
      logging.info('detected failure in %s. raw output:', test_display_name)
      for l in output:
        logging.info('  %s', l)
      if (not self._env.skip_clear_data
          and self._test_instance.package_info):
        permissions = (
            self._test_instance.apk_under_test.GetPermissions()
            if self._test_instance.apk_under_test
            else None)
        device.ClearApplicationState(self._test_instance.package_info.package,
                                     permissions=permissions)

    else:
      logging.debug('raw output from %s:', test_display_name)
      for l in output:
        logging.debug('  %s', l)
    if self._test_instance.coverage_directory:
      device.PullFile(coverage_directory,
          self._test_instance.coverage_directory)
      device.RunShellCommand(
          'rm -f %s' % posixpath.join(coverage_directory, '*'),
          check_return=True, shell=True)
    if self._test_instance.store_tombstones:
      tombstones_url = None
      for result in results:
        if result.GetType() == base_test_result.ResultType.CRASH:
          if not tombstones_url:
            resolved_tombstones = tombstones.ResolveTombstones(
                device,
                resolve_all_tombstones=True,
                include_stack_symbols=False,
                wipe_tombstones=True)
            stream_name = 'tombstones_%s_%s' % (
                time.strftime('%Y%m%dT%H%M%S', time.localtime()),
                device.serial)
            tombstones_url = logdog_helper.text(
                stream_name, resolved_tombstones)
          result.SetLink('tombstones', tombstones_url)
    return results, None
Ejemplo n.º 21
0
def _process_perf_results(output_json, configuration_name,
                          service_account_file,
                          build_properties, task_output_dir,
                          smoke_test_mode):
  """Process one or more perf JSON results.

  Consists of merging the json-test-format output and uploading the perf test
  output (chartjson and histogram).

  Each directory in the task_output_dir represents one benchmark
  that was run. Within this directory, there is a subdirectory with the name
  of the benchmark that was run. In that subdirectory, there is a
  perftest-output.json file containing the performance results in histogram
  or dashboard json format and an output.json file containing the json test
  results for the benchmark.
  """
  directory_list = [
      f for f in listdir(task_output_dir)
      if not isfile(join(task_output_dir, f))
  ]
  benchmark_directory_list = []
  for directory in directory_list:
    benchmark_directory_list += [
      join(task_output_dir, directory, f)
      for f in listdir(join(task_output_dir, directory))
    ]

  test_results_list = []
  tmpfile_dir = tempfile.mkdtemp('resultscache')
  upload_failure = False

  build_properties = json.loads(build_properties)
  if not configuration_name:
    # we are deprecating perf-id crbug.com/817823
    configuration_name = build_properties['buildername']

  try:
    # First obtain the list of json test results to merge
    # and determine the status of each benchmark
    benchmark_enabled_map = _handle_perf_json_test_results(
        benchmark_directory_list, test_results_list)

    # Upload all eligible benchmarks to the perf dashboard
    logdog_dict = {}
    logdog_stream = None
    logdog_label = 'Results Dashboard'
    if not smoke_test_mode:
      with oauth_api.with_access_token(service_account_file) as oauth_file:
        for directory in benchmark_directory_list:
          benchmark_name = _get_benchmark_name(directory)
          if not benchmark_enabled_map[benchmark_name]:
            continue
          print 'Uploading perf results from %s benchmark' % benchmark_name
          upload_fail = _upload_and_write_perf_data_to_logfile(
              benchmark_name, directory, configuration_name, build_properties,
              oauth_file, tmpfile_dir, logdog_dict,
              ('.reference' in benchmark_name))
          upload_failure = upload_failure or upload_fail

      logdog_file_name = 'Results_Dashboard_' + str(uuid.uuid4())
      logdog_stream = logdog_helper.text(logdog_file_name,
          json.dumps(logdog_dict, sort_keys=True,
              indent=4, separators=(',', ':')))
      if upload_failure:
        logdog_label += ' Upload Failure'

    # Finally, merge all test results json and write out to output location
    _merge_json_output(output_json, test_results_list,
                       logdog_stream, logdog_label)
  finally:
    shutil.rmtree(tmpfile_dir)
  return upload_failure