Beispiel #1
0
def main(args):
    parser = argparse.ArgumentParser(
        description='Fetch the dependencies of perf benchmark(s).')
    parser.add_argument('benchmark_name', type=str, nargs='?')
    parser.add_argument('--force',
                        '-f',
                        help=('Force fetching all the benchmarks when '
                              'benchmark_name is not specified'),
                        action='store_true',
                        default=False)
    # Flag --output-deps: output the dependencies to a json file, CrOS autotest
    # telemetry_runner parses the output to upload the dependencies to the DUT.
    # Example output, fetch_benchmark_deps.py --output-deps=deps octane:
    # {'octane': ['tools/perf/page_sets/data/octane_002.wprgo']}
    parser.add_argument('--output-deps',
                        help=('Output dependencies to a json file'))
    parser.add_argument('-v',
                        '--verbose',
                        action='count',
                        dest='verbosity',
                        help='Increase verbosity level (repeat as needed)')

    options = parser.parse_args(args)

    if options.verbosity >= 2:
        logging.getLogger().setLevel(logging.DEBUG)
    elif options.verbosity:
        logging.getLogger().setLevel(logging.INFO)
    else:
        logging.getLogger().setLevel(logging.WARNING)

    deps = {}
    if options.benchmark_name:
        perf_dir = path_util.GetPerfDir()
        benchmark_dirs = [
            os.path.join(perf_dir, 'benchmarks'),
            os.path.join(perf_dir, 'contrib')
        ]
        config = chromium_config.ChromiumConfig(
            top_level_dir=path_util.GetPerfDir(),
            benchmark_dirs=benchmark_dirs)
        benchmark = benchmark_runner.GetBenchmarkByName(
            options.benchmark_name, config)
        if not benchmark:
            raise ValueError('No such benchmark: %s' % options.benchmark_name)
        deps[benchmark.Name()] = _FetchDepsForBenchmark(benchmark)
    else:
        if not options.force:
            raw_input(
                'No benchmark name is specified. Fetching all benchmark deps. '
                'Press enter to continue...')
        for b in benchmark_finders.GetOfficialBenchmarks():
            deps[b.Name()] = _FetchDepsForBenchmark(b)

    if options.output_deps:
        with open(options.output_deps, 'w') as outfile:
            json.dump(deps, outfile)
Beispiel #2
0
def _get_telemetry_perf_benchmarks_metadata():
    metadata = {}
    for benchmark in benchmark_finders.GetOfficialBenchmarks():
        benchmark_name = benchmark.Name()
        emails = decorators.GetEmails(benchmark)
        if emails:
            emails = ', '.join(emails)
        metadata[benchmark_name] = BenchmarkMetadata(
            emails=emails,
            component=decorators.GetComponent(benchmark),
            documentation_url=decorators.GetDocumentationLink(benchmark),
            stories=benchmark_utils.GetBenchmarkStoryInfo(benchmark()))
    return metadata
Beispiel #3
0
def _get_telemetry_perf_benchmarks_metadata():
    metadata = {}
    benchmark_list = benchmark_finders.GetOfficialBenchmarks()

    for benchmark in benchmark_list:
        emails = decorators.GetEmails(benchmark)
        if emails:
            emails = ', '.join(emails)
        tags_set = benchmark_utils.GetStoryTags(benchmark())
        metadata[benchmark.Name()] = BenchmarkMetadata(
            emails, decorators.GetComponent(benchmark),
            decorators.GetDocumentationLink(benchmark), ','.join(tags_set))
    return metadata
Beispiel #4
0
import os
import urllib

from core import benchmark_finders
from core import benchmark_utils

from telemetry.story import story_filter


_SHARD_MAP_DIR = os.path.join(os.path.dirname(__file__), 'shard_maps')

_ALL_BENCHMARKS_BY_NAMES = dict(
    (b.Name(), b) for b in benchmark_finders.GetAllBenchmarks())

OFFICIAL_BENCHMARKS = frozenset(
    b for b in benchmark_finders.GetOfficialBenchmarks()
    if not b.Name().startswith('UNSCHEDULED_'))
CONTRIB_BENCHMARKS = frozenset(benchmark_finders.GetContribBenchmarks())
ALL_SCHEDULEABLE_BENCHMARKS = OFFICIAL_BENCHMARKS | CONTRIB_BENCHMARKS
GTEST_STORY_NAME = '_gtest_'


def _IsPlatformSupported(benchmark, platform):
  supported = benchmark.GetSupportedPlatformNames(benchmark.SUPPORTED_PLATFORMS)
  return 'all' in supported or platform in supported


class PerfPlatform(object):
  def __init__(self,
               name,
               description,