示例#1
0
 def ProcessCommandLineArgs(cls, parser, options, extra_args, environment):
     del environment  # unused
     for arg in extra_args:
         if arg == '--browser' or arg.startswith('--browser='):
             parser.error(
                 '--browser=... is not allowed when running trybot.')
     all_benchmarks = benchmark_finders.GetAllPerfBenchmarks()
     all_benchmarks.extend(benchmark_finders.GetAllContribBenchmarks())
     all_benchmark_names = [b.Name() for b in all_benchmarks]
     all_benchmarks_by_names = {b.Name(): b for b in all_benchmarks}
     benchmark_class = all_benchmarks_by_names.get(options.benchmark_name,
                                                   None)
     if not benchmark_class:
         possible_benchmark_names = matching.GetMostLikelyMatchedObject(
             all_benchmark_names, options.benchmark_name)
         parser.error(
             'No benchmark named "%s". Do you mean any of those benchmarks '
             'below?\n%s' %
             (options.benchmark_name, '\n'.join(possible_benchmark_names)))
     is_benchmark_disabled, reason = cls.IsBenchmarkDisabledOnTrybotPlatform(
         benchmark_class, options.trybot)
     also_run_disabled_option = '--also-run-disabled-tests'
     if is_benchmark_disabled and also_run_disabled_option not in extra_args:
         parser.error('%s To run the benchmark on trybot anyway, add '
                      '%s option.' % (reason, also_run_disabled_option))
示例#2
0
def main(args, output):
    parser = argparse.ArgumentParser(
        description='Fetch the dependencies of perf benchmark(s).')
    parser.add_argument('benchmark_name', type=str, nargs='?')
    parser.add_argument('--force',
                        '-f',
                        help=('Force fetching all the benchmarks when '
                              'benchmark_name is not specified'),
                        action='store_true',
                        default=False)

    options = parser.parse_args(args)

    if options.benchmark_name:
        config = chromium_config.ChromiumConfig(
            top_level_dir=path_util.GetPerfDir(),
            benchmark_dirs=[
                os.path.join(path_util.GetPerfDir(), 'benchmarks')
            ])
        benchmark = benchmark_runner.GetBenchmarkByName(
            options.benchmark_name, config)
        if not benchmark:
            raise ValueError('No such benchmark: %s' % options.benchmark_name)
        FetchDepsForBenchmark(benchmark, output)
    else:
        if not options.force:
            raw_input(
                'No benchmark name is specified. Fetching all benchmark deps. '
                'Press enter to continue...')
        for b in benchmark_finders.GetAllPerfBenchmarks():
            print >> output, ('Fetch dependencies for benchmark %s' % b.Name())
            FetchDepsForBenchmark(b, output)
示例#3
0
def main(args):
    parser = argparse.ArgumentParser(
        description='Fetch the dependencies of perf benchmark(s).')
    parser.add_argument('benchmark_name', type=str, nargs='?')
    parser.add_argument('--force',
                        '-f',
                        help=('Force fetching all the benchmarks when '
                              'benchmark_name is not specified'),
                        action='store_true',
                        default=False)
    # Flag --output-deps: output the dependencies to a json file, CrOS autotest
    # telemetry_runner parses the output to upload the dependencies to the DUT.
    # Example output, fetch_benchmark_deps.py --output-deps=deps octane:
    # {'octane': ['tools/perf/page_sets/data/octane_002.wprgo']}
    parser.add_argument('--output-deps',
                        help=('Output dependencies to a json file'))
    parser.add_argument('-v',
                        '--verbose',
                        action='count',
                        dest='verbosity',
                        help='Increase verbosity level (repeat as needed)')

    options = parser.parse_args(args)

    if options.verbosity >= 2:
        logging.getLogger().setLevel(logging.DEBUG)
    elif options.verbosity:
        logging.getLogger().setLevel(logging.INFO)
    else:
        logging.getLogger().setLevel(logging.WARNING)

    deps = {}
    if options.benchmark_name:
        perf_dir = path_util.GetPerfDir()
        benchmark_dirs = [
            os.path.join(perf_dir, 'benchmarks'),
            os.path.join(perf_dir, 'contrib')
        ]
        config = chromium_config.ChromiumConfig(
            top_level_dir=path_util.GetPerfDir(),
            benchmark_dirs=benchmark_dirs)
        benchmark = benchmark_runner.GetBenchmarkByName(
            options.benchmark_name, config)
        if not benchmark:
            raise ValueError('No such benchmark: %s' % options.benchmark_name)
        deps[benchmark.Name()] = _FetchDepsForBenchmark(benchmark)
    else:
        if not options.force:
            raw_input(
                'No benchmark name is specified. Fetching all benchmark deps. '
                'Press enter to continue...')
        for b in benchmark_finders.GetAllPerfBenchmarks():
            deps[b.Name()] = _FetchDepsForBenchmark(b)

    if options.output_deps:
        with open(options.output_deps, 'w') as outfile:
            json.dump(deps, outfile)
示例#4
0
def _get_telemetry_perf_benchmarks_metadata():
    metadata = {}
    benchmark_list = benchmark_finders.GetAllPerfBenchmarks()

    for benchmark in benchmark_list:
        emails = decorators.GetEmails(benchmark)
        if emails:
            emails = ', '.join(emails)
        tags_set = benchmark_utils.GetStoryTags(benchmark())
        metadata[benchmark.Name()] = BenchmarkMetadata(
            emails, decorators.GetComponent(benchmark),
            decorators.GetDocumentationLink(benchmark), ','.join(tags_set))
    return metadata
示例#5
0
def main(args):
    parser = argparse.ArgumentParser(
        description='Fetch the dependencies of perf benchmark(s).')
    parser.add_argument('benchmark_name', type=str, nargs='?')
    parser.add_argument('--force',
                        '-f',
                        help=('Force fetching all the benchmarks when '
                              'benchmark_name is not specified'),
                        action='store_true',
                        default=False)
    parser.add_argument('-v',
                        '--verbose',
                        action='count',
                        dest='verbosity',
                        help='Increase verbosity level (repeat as needed)')

    options = parser.parse_args(args)

    if options.verbosity >= 2:
        logging.getLogger().setLevel(logging.DEBUG)
    elif options.verbosity:
        logging.getLogger().setLevel(logging.INFO)
    else:
        logging.getLogger().setLevel(logging.WARNING)

    if options.benchmark_name:
        perf_dir = path_util.GetPerfDir()
        benchmark_dirs = [
            os.path.join(perf_dir, 'benchmarks'),
            os.path.join(perf_dir, 'contrib')
        ]
        config = chromium_config.ChromiumConfig(
            top_level_dir=path_util.GetPerfDir(),
            benchmark_dirs=benchmark_dirs)
        benchmark = benchmark_runner.GetBenchmarkByName(
            options.benchmark_name, config)
        if not benchmark:
            raise ValueError('No such benchmark: %s' % options.benchmark_name)
        FetchDepsForBenchmark(benchmark)
    else:
        if not options.force:
            raw_input(
                'No benchmark name is specified. Fetching all benchmark deps. '
                'Press enter to continue...')
        for b in benchmark_finders.GetAllPerfBenchmarks():
            logging.info('Fetch dependencies for benchmark %s', b.Name())
            FetchDepsForBenchmark(b)
示例#6
0

UNSCHEDULED_TELEMETRY_BENCHMARKS = set([
    'experimental.startup.android.coldish'
])


_SHARD_MAP_DIR = os.path.join(os.path.dirname(__file__), 'shard_maps')


_ALL_TELEMETRY_BENCHMARKS_BY_NAMES= dict(
    (b.Name(), b) for b in benchmark_finders.GetAllBenchmarks())


_ALL_PERF_WATERFALL_TELEMETRY_BENCHMARKS = frozenset(
    s for s in benchmark_finders.GetAllPerfBenchmarks() if s.Name() not in
    UNSCHEDULED_TELEMETRY_BENCHMARKS)


_ANDROID_GO_BENCHMARK_NAMES = {
    'memory.top_10_mobile',
    'system_health.memory_mobile',
    'system_health.common_mobile',
    'power.typical_10_mobile',
    'startup.mobile',
    'system_health.webview_startup',
    'v8.browsing_mobile',
    'speedometer',
    'speedometer2'
}
示例#7
0
import urllib

from core import benchmark_finders

UNSCHEDULED_TELEMETRY_BENCHMARKS = set([
    'experimental.startup.android.coldish',
    'experimental.startup.mobile',
])

_SHARD_MAP_DIR = os.path.join(os.path.dirname(__file__), 'shard_maps')

_ALL_TELEMETRY_BENCHMARKS_BY_NAMES = dict(
    (b.Name(), b) for b in benchmark_finders.GetAllBenchmarks())

_ALL_PERF_WATERFALL_TELEMETRY_BENCHMARKS = frozenset(
    s for s in benchmark_finders.GetAllPerfBenchmarks()
    if s.Name() not in UNSCHEDULED_TELEMETRY_BENCHMARKS)

_ANDROID_GO_BENCHMARK_NAMES = {
    'memory.top_10_mobile', 'system_health.memory_mobile',
    'system_health.common_mobile', 'power.typical_10_mobile',
    'start_with_url.cold.startup_pages', 'start_with_url.warm.startup_pages',
    'system_health.webview_startup', 'v8.browsing_mobile', 'speedometer',
    'speedometer2'
}


class PerfPlatform(object):
    def __init__(self,
                 name,
                 description,