Ejemplo n.º 1
0
def main(args, output):
    parser = argparse.ArgumentParser(
        description='Fetch the dependencies of perf benchmark(s).')
    parser.add_argument('benchmark_name', type=str, nargs='?')
    parser.add_argument('--force',
                        '-f',
                        help=('Force fetching all the benchmarks when '
                              'benchmark_name is not specified'),
                        action='store_true',
                        default=False)

    options = parser.parse_args(args)

    if options.benchmark_name:
        config = chromium_config.ChromiumConfig(
            top_level_dir=path_util.GetPerfDir(),
            benchmark_dirs=[
                os.path.join(path_util.GetPerfDir(), 'benchmarks')
            ])
        benchmark = benchmark_runner.GetBenchmarkByName(
            options.benchmark_name, config)
        if not benchmark:
            raise ValueError('No such benchmark: %s' % options.benchmark_name)
        FetchDepsForBenchmark(benchmark, output)
    else:
        if not options.force:
            raw_input(
                'No benchmark name is specified. Fetching all benchmark deps. '
                'Press enter to continue...')
        for b in benchmark_finders.GetAllPerfBenchmarks():
            print >> output, ('Fetch dependencies for benchmark %s' % b.Name())
            FetchDepsForBenchmark(b, output)
Ejemplo n.º 2
0
def main(args):
    parser = argparse.ArgumentParser(
        description='Fetch the dependencies of perf benchmark(s).')
    parser.add_argument('benchmark_name', type=str, nargs='?')
    parser.add_argument('--force',
                        '-f',
                        help=('Force fetching all the benchmarks when '
                              'benchmark_name is not specified'),
                        action='store_true',
                        default=False)
    # Flag --output-deps: output the dependencies to a json file, CrOS autotest
    # telemetry_runner parses the output to upload the dependencies to the DUT.
    # Example output, fetch_benchmark_deps.py --output-deps=deps octane:
    # {'octane': ['tools/perf/page_sets/data/octane_002.wprgo']}
    parser.add_argument('--output-deps',
                        help=('Output dependencies to a json file'))
    parser.add_argument('-v',
                        '--verbose',
                        action='count',
                        dest='verbosity',
                        help='Increase verbosity level (repeat as needed)')

    options = parser.parse_args(args)

    if options.verbosity >= 2:
        logging.getLogger().setLevel(logging.DEBUG)
    elif options.verbosity:
        logging.getLogger().setLevel(logging.INFO)
    else:
        logging.getLogger().setLevel(logging.WARNING)

    deps = {}
    if options.benchmark_name:
        perf_dir = path_util.GetPerfDir()
        benchmark_dirs = [
            os.path.join(perf_dir, 'benchmarks'),
            os.path.join(perf_dir, 'contrib')
        ]
        config = chromium_config.ChromiumConfig(
            top_level_dir=path_util.GetPerfDir(),
            benchmark_dirs=benchmark_dirs)
        benchmark = benchmark_runner.GetBenchmarkByName(
            options.benchmark_name, config)
        if not benchmark:
            raise ValueError('No such benchmark: %s' % options.benchmark_name)
        deps[benchmark.Name()] = _FetchDepsForBenchmark(benchmark)
    else:
        if not options.force:
            raw_input(
                'No benchmark name is specified. Fetching all benchmark deps. '
                'Press enter to continue...')
        for b in benchmark_finders.GetOfficialBenchmarks():
            deps[b.Name()] = _FetchDepsForBenchmark(b)

    if options.output_deps:
        with open(options.output_deps, 'w') as outfile:
            json.dump(deps, outfile)
Ejemplo n.º 3
0
 def ProcessCommandLineArgs(cls, parser, options, extra_args, environment):
     del environment  # unused
     for arg in extra_args:
         if arg == '--browser' or arg.startswith('--browser='):
             parser.error(
                 '--browser=... is not allowed when running trybot.')
     all_benchmarks = discover.DiscoverClasses(
         start_dir=path_util.GetPerfBenchmarksDir(),
         top_level_dir=path_util.GetPerfDir(),
         base_class=benchmark.Benchmark).values()
     all_benchmark_names = [b.Name() for b in all_benchmarks]
     all_benchmarks_by_names = {b.Name(): b for b in all_benchmarks}
     benchmark_class = all_benchmarks_by_names.get(options.benchmark_name,
                                                   None)
     if not benchmark_class:
         possible_benchmark_names = matching.GetMostLikelyMatchedObject(
             all_benchmark_names, options.benchmark_name)
         parser.error(
             'No benchmark named "%s". Do you mean any of those benchmarks '
             'below?\n%s' %
             (options.benchmark_name, '\n'.join(possible_benchmark_names)))
     is_benchmark_disabled, reason = cls.IsBenchmarkDisabledOnTrybotPlatform(
         benchmark_class, options.trybot)
     also_run_disabled_option = '--also-run-disabled-tests'
     if is_benchmark_disabled and also_run_disabled_option not in extra_args:
         parser.error('%s To run the benchmark on trybot anyway, add '
                      '%s option.' % (reason, also_run_disabled_option))
Ejemplo n.º 4
0
def GetDefaultChromiumConfig():
    return ChromiumConfig(benchmark_dirs=[
        path_util.GetOfficialBenchmarksDir(),
        path_util.GetContribDir()
    ],
                          top_level_dir=path_util.GetPerfDir(),
                          expectations_files=[path_util.GetExpectationsPath()])
Ejemplo n.º 5
0
def main(args):
    parser = argparse.ArgumentParser(
        description='Fetch the dependencies of perf benchmark(s).')
    parser.add_argument('benchmark_name', type=str, nargs='?')
    parser.add_argument('--force',
                        '-f',
                        help=('Force fetching all the benchmarks when '
                              'benchmark_name is not specified'),
                        action='store_true',
                        default=False)
    parser.add_argument('-v',
                        '--verbose',
                        action='count',
                        dest='verbosity',
                        help='Increase verbosity level (repeat as needed)')

    options = parser.parse_args(args)

    if options.verbosity >= 2:
        logging.getLogger().setLevel(logging.DEBUG)
    elif options.verbosity:
        logging.getLogger().setLevel(logging.INFO)
    else:
        logging.getLogger().setLevel(logging.WARNING)

    if options.benchmark_name:
        perf_dir = path_util.GetPerfDir()
        benchmark_dirs = [
            os.path.join(perf_dir, 'benchmarks'),
            os.path.join(perf_dir, 'contrib')
        ]
        config = chromium_config.ChromiumConfig(
            top_level_dir=path_util.GetPerfDir(),
            benchmark_dirs=benchmark_dirs)
        benchmark = benchmark_runner.GetBenchmarkByName(
            options.benchmark_name, config)
        if not benchmark:
            raise ValueError('No such benchmark: %s' % options.benchmark_name)
        FetchDepsForBenchmark(benchmark)
    else:
        if not options.force:
            raw_input(
                'No benchmark name is specified. Fetching all benchmark deps. '
                'Press enter to continue...')
        for b in benchmark_finders.GetAllPerfBenchmarks():
            logging.info('Fetch dependencies for benchmark %s', b.Name())
            FetchDepsForBenchmark(b)
Ejemplo n.º 6
0
def main():
    config = chromium_config.ChromiumConfig(
        benchmark_dirs=[
            path_util.GetOfficialBenchmarksDir(),
            path_util.GetContribDir()
        ],
        top_level_dir=path_util.GetPerfDir(),
        expectations_files=[path_util.GetExpectationsPath()])
    return benchmark_runner.main(config)
Ejemplo n.º 7
0
def main(output=sys.stdout):
    config = chromium_config.ChromiumConfig(
        top_level_dir=path_util.GetPerfDir(),
        benchmark_dirs=[os.path.join(path_util.GetPerfDir(), 'benchmarks')])

    name = sys.argv[1]
    benchmark = benchmark_runner.GetBenchmarkByName(name, config)
    if not benchmark:
        raise ValueError('No such benchmark: %s' % name)

    # Download files according to specified benchmark.
    story_set = benchmark().CreateStorySet(None)

    _FetchDependenciesIfNeeded(story_set)

    # Print files downloaded.
    deps = _EnumerateDependencies(story_set)
    for dep in deps:
        print >> output, dep
Ejemplo n.º 8
0
def _GetAllSystemHealthBenchmarks():
    all_perf_benchmarks = discover.DiscoverClasses(
        path_util.GetPerfBenchmarksDir(),
        path_util.GetPerfDir(),
        benchmark_module.Benchmark,
        index_by_class_name=True).values()
    return [
        b for b in all_perf_benchmarks
        if sys.modules[b.__module__] == system_health_benchmark
    ]
Ejemplo n.º 9
0
def GetContribBenchmarks():
  """Returns the list of all contrib benchmarks.
  The benchmarks are sorted by order of their names.
  """
  benchmarks = discover.DiscoverClasses(
      start_dir=path_util.GetContribDir(),
      top_level_dir=path_util.GetPerfDir(),
      base_class=benchmark_module.Benchmark,
      index_by_class_name=True).values()
  benchmarks.sort(key=lambda b: b.Name())
  return benchmarks
Ejemplo n.º 10
0
 def ProcessCommandLineArgs(cls, parser, options, extra_args, environment):
     del environment  # unused
     for arg in extra_args:
         if arg == '--browser' or arg.startswith('--browser='):
             parser.error(
                 '--browser=... is not allowed when running trybot.')
     all_benchmarks = discover.DiscoverClasses(
         start_dir=path_util.GetPerfBenchmarksDir(),
         top_level_dir=path_util.GetPerfDir(),
         base_class=benchmark.Benchmark).values()
     all_benchmark_names = [b.Name() for b in all_benchmarks]
     if options.benchmark_name not in all_benchmark_names:
         possible_benchmark_names = matching.GetMostLikelyMatchedObject(
             all_benchmark_names, options.benchmark_name)
         parser.error(
             'No benchmark named "%s". Do you mean any of those benchmarks '
             'below?\n%s' %
             (options.benchmark_name, '\n'.join(possible_benchmark_names)))
Ejemplo n.º 11
0
def GetAllContribBenchmarks():
  return discover.DiscoverClasses(
      start_dir=path_util.GetPerfContribDir(),
      top_level_dir=path_util.GetPerfDir(),
      base_class=benchmark_module.Benchmark,
      index_by_class_name=True).values()
Ejemplo n.º 12
0
def GetBenchmarksInSubDirectory(directory):
  return discover.DiscoverClasses(
    start_dir=directory,
    top_level_dir = path_util.GetPerfDir(),
    base_class=benchmark_module.Benchmark,
    index_by_class_name=True).values()
Ejemplo n.º 13
0
def _GetAllPerfBenchmarks():
    return discover.DiscoverClasses(path_util.GetPerfBenchmarksDir(),
                                    path_util.GetPerfDir(),
                                    benchmark_module.Benchmark,
                                    index_by_class_name=True).values()
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

import csv
import sys

from core import path_util
sys.path.insert(1, path_util.GetPerfDir())  # To resolve perf imports
sys.path.insert(1, path_util.GetTelemetryDir()) # To resolve telemetry imports
import page_sets
from page_sets.system_health import expectations

def IterAllSystemHealthStories():
  for s in page_sets.SystemHealthStorySet(platform='desktop'):
    yield s
  for s in page_sets.SystemHealthStorySet(platform='mobile'):
    if len(s.SUPPORTED_PLATFORMS) < 2:
      yield s


def PopulateExpectations(all_expectations):
  """Accepts Expectations and parses out the storyname and disabled platforms.

  Args:
    all_expectations = {
        story_name: [[conditions], reason]}
    conditions: list of disabled platforms for story_name
    reason: Bug referencing why the test is disabled on the platform

  Returns: