Esempio n. 1
0
def main():
    benchmarks = benchmark_finders.GetAllBenchmarks()
    with open(path_util.GetExpectationsPath()) as fp:
        raw_expectations_data = fp.read()
    test_expectations = typ_expectations_parser.TestExpectations()
    ret, msg = test_expectations.parse_tagged_list(raw_expectations_data)
    if ret:
        logging.error(msg)
        return ret
    validate_story_names(benchmarks, test_expectations)
    validate_expectations_component_tags(test_expectations)
    return 0
def main(args):
  parser = argparse.ArgumentParser(
      description=('Tests if disabled stories exist.'))
  parser.add_argument(
      '--list', action='store_true', default=False,
      help=('Prints list of disabled stories.'))
  options = parser.parse_args(args)
  benchmarks = benchmark_finders.GetAllBenchmarks()
  with open(path_util.GetExpectationsPath()) as fp:
    raw_expectations_data = fp.read()
  if options.list:
    stories = GetDisabledStories(benchmarks, raw_expectations_data)
    print json.dumps(stories, sort_keys=True, indent=4, separators=(',', ': '))
  else:
    validate_story_names(benchmarks, raw_expectations_data)
  return 0
Esempio n. 3
0
def main(args):
    parser = argparse.ArgumentParser(
        description=('Tests if disabled stories exist.'))
    parser.add_argument('--list',
                        action='store_true',
                        default=False,
                        help=('Prints list of disabled stories.'))
    options = parser.parse_args(args)
    benchmarks = benchmark_finders.GetAllBenchmarks()

    if options.list:
        stories = GetDisabledStories(benchmarks)
        print json.dumps(stories,
                         sort_keys=True,
                         indent=4,
                         separators=(',', ': '))
    else:
        validate_story_names(benchmarks)
        check_decorators(benchmarks)
    return 0
Esempio n. 4
0
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import urllib

from core import benchmark_finders
from core import benchmark_utils

from telemetry.story import story_filter


_SHARD_MAP_DIR = os.path.join(os.path.dirname(__file__), 'shard_maps')

_ALL_BENCHMARKS_BY_NAMES = dict(
    (b.Name(), b) for b in benchmark_finders.GetAllBenchmarks())

OFFICIAL_BENCHMARKS = frozenset(
    b for b in benchmark_finders.GetOfficialBenchmarks()
    if not b.Name().startswith('UNSCHEDULED_'))
CONTRIB_BENCHMARKS = frozenset(benchmark_finders.GetContribBenchmarks())
ALL_SCHEDULEABLE_BENCHMARKS = OFFICIAL_BENCHMARKS | CONTRIB_BENCHMARKS
GTEST_STORY_NAME = '_gtest_'


def _IsPlatformSupported(benchmark, platform):
  supported = benchmark.GetSupportedPlatformNames(benchmark.SUPPORTED_PLATFORMS)
  return 'all' in supported or platform in supported


class PerfPlatform(object):
def main(args):
  del args  # unused
  benchmarks = benchmark_finders.GetAllBenchmarks()
  validate_story_names(benchmarks)
  return 0