def _GetJsonTestList(possible_browser, possible_reference_browser,
                     test_classes, num_shards):
  """Returns a list of all enabled tests in a JSON format expected by buildbots.

  JSON format (see build/android/pylib/perf/test_runner.py):
  { "version": <int>,
    "steps": {
      <string>: {
        "device_affinity": <int>,
        "cmd": <string>,
        "perf_dashboard_id": <string>,
      },
      ...
    }
  }
  """
  output = {
    'version': 1,
    'steps': {
    }
  }
  for test_class in test_classes:
    if not issubclass(test_class, benchmark.Benchmark):
      continue
    if not decorators.IsEnabled(test_class, possible_browser):
      continue

    base_name = test_class.Name()
    base_cmd = [sys.executable, os.path.realpath(sys.argv[0]),
                '-v', '--output-format=buildbot', base_name]
    perf_dashboard_id = base_name
    # TODO(tonyg): Currently we set the device affinity to a stable hash of the
    # test name. This somewhat evenly distributes benchmarks among the requested
    # number of shards. However, it is far from optimal in terms of cycle time.
    # We should add a test size decorator (e.g. small, medium, large) and let
    # that inform sharding.
    device_affinity = int(hashlib.sha1(base_name).hexdigest(), 16) % num_shards

    output['steps'][base_name] = {
      'cmd': ' '.join(base_cmd + [
            '--browser=%s' % possible_browser.browser_type]),
      'device_affinity': device_affinity,
      'perf_dashboard_id': perf_dashboard_id,
    }
    if (possible_reference_browser and
        decorators.IsEnabled(test_class, possible_reference_browser)):
      output['steps'][base_name + '.reference'] = {
        'cmd': ' '.join(base_cmd + [
              '--browser=reference', '--output-trace-tag=_ref']),
        'device_affinity': device_affinity,
        'perf_dashboard_id': perf_dashboard_id,
      }

  return json.dumps(output, indent=2, sort_keys=True)
Esempio n. 2
0
    def RunTest(self):
        class SinglePageBenchmark(benchmark_class):  # pylint: disable=no-init
            def CreateStorySet(self, options):
                # pylint: disable=super-on-old-class
                story_set = super(SinglePageBenchmark,
                                  self).CreateStorySet(options)
                stories_to_remove = [
                    s for s in story_set.stories if s != story_to_smoke_test
                ]
                for s in stories_to_remove:
                    story_set.RemoveStory(s)
                assert story_set.stories
                return story_set

        options = GenerateBenchmarkOptions(benchmark_class)
        possible_browser = browser_finder.FindBrowser(options)
        if possible_browser is None:
            self.skipTest('Cannot find the browser to run the test.')
        if (SinglePageBenchmark.ShouldDisable(possible_browser)
                or not decorators.IsEnabled(benchmark_class,
                                            possible_browser)[0]):
            self.skipTest('Benchmark %s is disabled' %
                          SinglePageBenchmark.Name())

        if self.id() in _DISABLED_TESTS:
            self.skipTest('Test is explicitly disabled')

        self.assertEqual(0,
                         SinglePageBenchmark().Run(options),
                         msg='Failed: %s' % benchmark_class)
Esempio n. 3
0
 def IsTestSelected(test):
     if len(args) != 0:
         found = False
         for name in args:
             if name in test.id():
                 found = True
         if not found:
             return False
     if default_options.run_disabled_tests:
         return True
     # pylint: disable=W0212
     if not hasattr(test, '_testMethodName'):
         return True
     method = getattr(test, test._testMethodName)
     return decorators.IsEnabled(method, options.GetBrowserType(), platform)
Esempio n. 4
0
    def _GetPossibleBrowser(self, test, finder_options):
        """Return a possible_browser with the given options for |test|. """
        possible_browser = self._FindBrowser(finder_options)
        finder_options.browser_options.browser_type = (
            possible_browser.browser_type)

        enabled, msg = decorators.IsEnabled(test, possible_browser)
        if not enabled and not finder_options.run_disabled_tests:
            logging.warning(msg)
            logging.warning('You are trying to run a disabled test.')

        if possible_browser.IsRemote():
            possible_browser.RunRemote()
            sys.exit(0)
        return possible_browser
Esempio n. 5
0
    def _GetPossibleBrowser(self, test, finder_options):
        """Return a possible_browser with the given options for |test|. """
        possible_browser = self._FindBrowser(finder_options)
        finder_options.browser_options.browser_type = (
            possible_browser.browser_type)

        (enabled, msg) = decorators.IsEnabled(test, possible_browser)
        if (not enabled and not finder_options.run_disabled_tests):
            logging.warning(msg)
            logging.warning('You are trying to run a disabled test.')
            logging.warning(
                'Pass --also-run-disabled-tests to squelch this message.')
            sys.exit(0)

        if possible_browser.IsRemote():
            possible_browser.RunRemote()
            sys.exit(0)
        return possible_browser
Esempio n. 6
0
    def RunTest(self):
        class SinglePageBenchmark(benchmark_class):  # pylint: disable=no-init
            def CreateStorySet(self, options):
                # pylint: disable=super-on-old-class
                story_set = super(SinglePageBenchmark,
                                  self).CreateStorySet(options)
                stories_to_remove = [
                    s for s in story_set.stories if s != story_to_smoke_test
                ]
                for s in stories_to_remove:
                    story_set.RemoveStory(s)
                assert story_set.stories
                return story_set

        options = GenerateBenchmarkOptions(benchmark_class)

        # Prevent benchmarks from accidentally trying to upload too much data to the
        # chromeperf dashboard. The number of values uploaded is equal to (the
        # average number of values produced by a single story) * (1 + (the number of
        # stories)). The "1 + " accounts for values summarized across all stories.
        # We can approximate "the average number of values produced by a single
        # story" as the number of values produced by the given story.
        # pageset_repeat doesn't matter because values are summarized across
        # repetitions before uploading.
        story_set = benchmark_class().CreateStorySet(options)
        SinglePageBenchmark.MAX_NUM_VALUES = MAX_NUM_VALUES / len(
            story_set.stories)

        possible_browser = browser_finder.FindBrowser(options)
        if possible_browser is None:
            self.skipTest('Cannot find the browser to run the test.')
        if (SinglePageBenchmark.ShouldDisable(possible_browser)
                or not decorators.IsEnabled(benchmark_class,
                                            possible_browser)[0]):
            self.skipTest('Benchmark %s is disabled' %
                          SinglePageBenchmark.Name())

        if self.id() in _DISABLED_TESTS:
            self.skipTest('Test is explicitly disabled')

        self.assertEqual(0,
                         SinglePageBenchmark().Run(options),
                         msg='Failed: %s' % benchmark_class)
Esempio n. 7
0
def PrintBenchmarkList(benchmarks, possible_browser, output_pipe=sys.stdout):
    """ Print benchmarks that are not filtered in the same order of benchmarks in
  the |benchmarks| list.

  Args:
    benchmarks: the list of benchmarks to be printed (in the same order of the
      list).
    possible_browser: the possible_browser instance that's used for checking
      which benchmarks are enabled.
    output_pipe: the stream in which benchmarks are printed on.
  """
    if not benchmarks:
        print >> output_pipe, 'No benchmarks found!'
        return
    b = None  # Need this to stop pylint from complaining undefined variable.
    if any(not issubclass(b, benchmark.Benchmark) for b in benchmarks):
        assert False, '|benchmarks| param contains non benchmark class: %s' % b

    # Align the benchmark names to the longest one.
    format_string = '  %%-%ds %%s' % max(len(b.Name()) for b in benchmarks)
    disabled_benchmarks = []

    print >> output_pipe, 'Available benchmarks %sare:' % (
        'for %s ' % possible_browser.browser_type if possible_browser else '')
    for benchmark_class in benchmarks:
        if possible_browser and not decorators.IsEnabled(
                benchmark_class, possible_browser)[0]:
            disabled_benchmarks.append(benchmark_class)
            continue
        print >> output_pipe, format_string % (benchmark_class.Name(),
                                               benchmark_class.Description())

    if disabled_benchmarks:
        print >> output_pipe
        print >> output_pipe, (
            'Disabled benchmarks for %s are (force run with -d):' %
            possible_browser.browser_type)
        for benchmark_class in disabled_benchmarks:
            print >> output_pipe, format_string % (
                benchmark_class.Name(), benchmark_class.Description())
    print >> output_pipe, (
        'Pass --browser to list benchmarks for another browser.')
    print >> output_pipe
Esempio n. 8
0
 def IsTestSelected(test):
   if selected_tests:
     found = False
     for name in selected_tests:
       if selected_tests_are_exact:
         if name == test.id():
           found = True
       else:
         if name in test.id():
           found = True
     if not found:
       return False
   if run_disabled_tests:
     return True
   # pylint: disable=W0212
   if not hasattr(test, '_testMethodName'):
     return True
   method = getattr(test, test._testMethodName)
   return decorators.IsEnabled(method, possible_browser)
Esempio n. 9
0
def _GetJsonTestList(possible_browser, test_classes, num_shards):
    """Returns a list of all enabled tests in a JSON format expected by buildbots.

  JSON format (see build/android/pylib/perf/test_runner.py):
  { "version": int,
    "steps": {
      "foo": {
        "device_affinity": int,
        "cmd": "script_to_execute foo"
      },
      "bar": {
        "device_affinity": int,
        "cmd": "script_to_execute bar"
      }
    }
  }
  """
    output = {'version': 1, 'steps': {}}
    for test_class in test_classes:
        if not issubclass(test_class, benchmark.Benchmark):
            continue
        if not decorators.IsEnabled(test_class, possible_browser):
            continue
        name = test_class.Name()
        output['steps'][name] = {
            'cmd':
            ' '.join([
                sys.executable,
                os.path.realpath(sys.argv[0]),
                '--browser=%s' % possible_browser.browser_type, '-v',
                '--output-format=buildbot', name
            ]),
            # TODO(tonyg): Currently we set the device affinity to a stable hash of
            # the test name. This somewhat evenly distributes benchmarks among the
            # requested number of shards. However, it is far from optimal in terms of
            # cycle time. We should add a test size decorator (e.g. small, medium,
            # large) and let that inform sharding.
            'device_affinity':
            int(hashlib.sha1(name).hexdigest(), 16) % num_shards
        }
    return json.dumps(output, indent=2, sort_keys=True)
  def _GetPossibleBrowser(self, test, finder_options):
    """Return a possible_browser with the given options. """
    possible_browser = browser_finder.FindBrowser(finder_options)
    if not possible_browser:
      raise browser_finder_exceptions.BrowserFinderException(
          'No browser found.\n\nAvailable browsers:\n%s\n' %
          '\n'.join(browser_finder.GetAllAvailableBrowserTypes(finder_options)))
    finder_options.browser_options.browser_type = (
        possible_browser.browser_type)

    (enabled, msg) = decorators.IsEnabled(test, possible_browser)
    if (not enabled and
        not finder_options.run_disabled_tests):
      logging.warning(msg)
      logging.warning('You are trying to run a disabled test.')
      logging.warning('Pass --also-run-disabled-tests to squelch this message.')
      sys.exit(0)

    if possible_browser.IsRemote():
      possible_browser.RunRemote()
      sys.exit(0)
    return possible_browser
Esempio n. 11
0
def _PrintBenchmarkList(benchmarks, possible_browser):
    if not benchmarks:
        print >> sys.stderr, 'No benchmarks found!'
        return

    # Align the benchmark names to the longest one.
    format_string = '  %%-%ds %%s' % max(len(b.Name()) for b in benchmarks)

    filtered_benchmarks = [
        benchmark_class for benchmark_class in benchmarks
        if issubclass(benchmark_class, benchmark.Benchmark)
    ]
    disabled_benchmarks = []
    if filtered_benchmarks:
        print >> sys.stderr, 'Available benchmarks %sare:' % (
            'for %s ' %
            possible_browser.browser_type if possible_browser else '')
        for benchmark_class in sorted(filtered_benchmarks,
                                      key=lambda b: b.Name()):
            if possible_browser and not decorators.IsEnabled(
                    benchmark_class, possible_browser):
                disabled_benchmarks.append(benchmark_class)
                continue
            print >> sys.stderr, format_string % (
                benchmark_class.Name(), benchmark_class.Description())

        if disabled_benchmarks:
            print >> sys.stderr, (
                'Disabled benchmarks for %s are (force run with -d): ' %
                possible_browser.browser_type)
            for benchmark_class in disabled_benchmarks:
                print >> sys.stderr, format_string % (
                    benchmark_class.Name(), benchmark_class.Description())
        print >> sys.stderr, (
            'Pass --browser to list benchmarks for another browser.')
        print >> sys.stderr
Esempio n. 12
0
def _IsBenchmarkEnabled(benchmark_class, possible_browser):
    return (issubclass(benchmark_class, benchmark.Benchmark)
            and not benchmark_class.ShouldDisable(possible_browser)
            and decorators.IsEnabled(benchmark_class, possible_browser)[0])
Esempio n. 13
0
  try:
    possible_browser = browser_finder.FindBrowser(finder_options)
  except browser_finder.BrowserTypeRequiredException, e:
    sys.stderr.write(str(e) + '\n')
    sys.exit(-1)
  if not possible_browser:
    sys.stderr.write(
        'No browser found. Available browsers:\n%s\n' %
        '\n'.join(browser_finder.GetAllAvailableBrowserTypes(finder_options)))
    sys.exit(-1)

  browser_options = possible_browser.finder_options.browser_options
  browser_options.browser_type = possible_browser.browser_type
  test.CustomizeBrowserOptions(browser_options)

  if (not decorators.IsEnabled(test, possible_browser) and
      not finder_options.run_disabled_tests):
    logging.warning('You are trying to run a disabled test.')
    logging.warning('Pass --also-run-disabled-tests to squelch this message.')
    return

  if possible_browser.IsRemote():
    possible_browser.RunRemote()
    sys.exit(0)

  # Reorder page set based on options.
  pages = _ShuffleAndFilterPageSet(page_set, finder_options)

  if not finder_options.use_live_sites:
    _UpdateCredentials(page_set)
    if browser_options.wpr_mode != wpr_modes.WPR_RECORD:
Esempio n. 14
0
 def MockPredicate(test):
   method = getattr(test, test._testMethodName)
   return decorators.IsEnabled(method, MockPossibleBrowser(
       browser_type, os_name, os_version_name, supports_tab_control))
Esempio n. 15
0
def _GetJsonBenchmarkList(possible_browser, possible_reference_browser,
                          benchmark_classes, num_shards):
    """Returns a list of all enabled benchmarks in a JSON format expected by
  buildbots.

  JSON format (see build/android/pylib/perf/benchmark_runner.py):
  { "version": <int>,
    "steps": {
      <string>: {
        "device_affinity": <int>,
        "cmd": <string>,
        "perf_dashboard_id": <string>,
      },
      ...
    }
  }
  """
    output = {'version': 1, 'steps': {}}
    for benchmark_class in benchmark_classes:
        if not issubclass(benchmark_class, benchmark.Benchmark):
            continue
        enabled, _ = decorators.IsEnabled(benchmark_class, possible_browser)
        if not enabled:
            continue

        base_name = benchmark_class.Name()
        base_cmd = [
            sys.executable,
            os.path.realpath(sys.argv[0]), '-v', '--output-format=chartjson',
            '--upload-results', base_name
        ]
        perf_dashboard_id = base_name
        # TODO(fmeawad): Currently we set the device affinity to a stable hash of
        # the benchmark name. This somewhat evenly distributes benchmarks among the
        # requested number of shards. However, it is far from optimal in terms of
        # cycle time.  We should add a benchmark size decorator (e.g. small, medium,
        # large) and let that inform sharding.

        # Based on the current timings, we shift the result of the hash function to
        # achieve better load balancing. Those shift values are to be revised when
        # necessary. (See tools/build/scripts/tools/perf/chrome-perf-step-timings.py
        # for more details)
        hash_shift = {2: 47, 5: 56, 8: 50}
        shift = hash_shift.get(num_shards, 0)
        base_name_hash = hashlib.sha1(base_name).hexdigest()
        device_affinity = (int(base_name_hash, 16) >> shift) % num_shards

        output['steps'][base_name] = {
            'cmd':
            ' '.join(base_cmd +
                     ['--browser=%s' % possible_browser.browser_type]),
            'device_affinity':
            device_affinity,
            'perf_dashboard_id':
            perf_dashboard_id,
        }
        if possible_reference_browser:
            enabled, _ = decorators.IsEnabled(benchmark_class,
                                              possible_reference_browser)
            if enabled:
                output['steps'][base_name + '.reference'] = {
                    'cmd':
                    ' '.join(
                        base_cmd +
                        ['--browser=reference', '--output-trace-tag=_ref']),
                    'device_affinity':
                    device_affinity,
                    'perf_dashboard_id':
                    perf_dashboard_id,
                }

    return json.dumps(output, indent=2, sort_keys=True)
 def MockPredicate(test):
     method = getattr(test, test._testMethodName)
     return decorators.IsEnabled(method, browser_type, platform)
Esempio n. 17
0
    # Create a possible_browser with the given options.
    try:
        possible_browser = browser_finder.FindBrowser(finder_options)
    except browser_finder.BrowserTypeRequiredException, e:
        sys.stderr.write(str(e) + '\n')
        sys.exit(-1)
    if not possible_browser:
        sys.stderr.write('No browser found. Available browsers:\n' + '\n'.join(
            browser_finder.GetAllAvailableBrowserTypes(finder_options)) + '\n')
        sys.exit(-1)

    browser_options = possible_browser.finder_options.browser_options
    browser_options.browser_type = possible_browser.browser_type
    test.CustomizeBrowserOptions(browser_options)

    should_run = decorators.IsEnabled(test, possible_browser)

    should_run = should_run or finder_options.run_disabled_tests

    if not should_run:
        logging.warning('You are trying to run a disabled test.')
        logging.warning(
            'Pass --also-run-disabled-tests to squelch this message.')
        return

    # Reorder page set based on options.
    pages = _ShuffleAndFilterPageSet(page_set, finder_options)

    if (not finder_options.use_live_sites
            and browser_options.wpr_mode != wpr_modes.WPR_RECORD):
        _UpdatePageSetArchivesIfChanged(page_set)
Esempio n. 18
0
    # Create a possible_browser with the given options.
    test.CustomizeBrowserOptions(finder_options)
    try:
        possible_browser = browser_finder.FindBrowser(finder_options)
    except browser_finder.BrowserTypeRequiredException, e:
        sys.stderr.write(str(e) + '\n')
        sys.exit(1)
    if not possible_browser:
        sys.stderr.write('No browser found. Available browsers:\n' + '\n'.join(
            browser_finder.GetAllAvailableBrowserTypes(finder_options)) + '\n')
        sys.exit(1)

    browser_options.browser_type = possible_browser.browser_type

    if not decorators.IsEnabled(test, browser_options.browser_type,
                                possible_browser.platform):
        return results

    # Reorder page set based on options.
    pages = _ShuffleAndFilterPageSet(page_set, finder_options)

    if (not finder_options.allow_live_sites
            and browser_options.wpr_mode != wpr_modes.WPR_RECORD):
        pages = _CheckArchives(page_set, pages, results)

    # Verify credentials path.
    credentials_path = None
    if page_set.credentials_path:
        credentials_path = os.path.join(os.path.dirname(page_set.file_path),
                                        page_set.credentials_path)
        if not os.path.exists(credentials_path):