Beispiel #1
0
def main(args, output):
    parser = argparse.ArgumentParser(
        description='Fetch the dependencies of perf benchmark(s).')
    parser.add_argument('benchmark_name', type=str, nargs='?')
    parser.add_argument('--force',
                        '-f',
                        help=('Force fetching all the benchmarks when '
                              'benchmark_name is not specified'),
                        action='store_true',
                        default=False)

    options = parser.parse_args(args)

    if options.benchmark_name:
        config = chromium_config.ChromiumConfig(
            top_level_dir=path_util.GetPerfDir(),
            benchmark_dirs=[
                os.path.join(path_util.GetPerfDir(), 'benchmarks')
            ])
        benchmark = benchmark_runner.GetBenchmarkByName(
            options.benchmark_name, config)
        if not benchmark:
            raise ValueError('No such benchmark: %s' % options.benchmark_name)
        FetchDepsForBenchmark(benchmark, output)
    else:
        if not options.force:
            raw_input(
                'No benchmark name is specified. Fetching all benchmark deps. '
                'Press enter to continue...')
        for b in benchmark_finders.GetAllPerfBenchmarks():
            print >> output, ('Fetch dependencies for benchmark %s' % b.Name())
            FetchDepsForBenchmark(b, output)
Beispiel #2
0
def main(args):
    parser = argparse.ArgumentParser(
        description='Fetch the dependencies of perf benchmark(s).')
    parser.add_argument('benchmark_name', type=str, nargs='?')
    parser.add_argument('--force',
                        '-f',
                        help=('Force fetching all the benchmarks when '
                              'benchmark_name is not specified'),
                        action='store_true',
                        default=False)
    # Flag --output-deps: output the dependencies to a json file, CrOS autotest
    # telemetry_runner parses the output to upload the dependencies to the DUT.
    # Example output, fetch_benchmark_deps.py --output-deps=deps octane:
    # {'octane': ['tools/perf/page_sets/data/octane_002.wprgo']}
    parser.add_argument('--output-deps',
                        help=('Output dependencies to a json file'))
    parser.add_argument('-v',
                        '--verbose',
                        action='count',
                        dest='verbosity',
                        help='Increase verbosity level (repeat as needed)')

    options = parser.parse_args(args)

    if options.verbosity >= 2:
        logging.getLogger().setLevel(logging.DEBUG)
    elif options.verbosity:
        logging.getLogger().setLevel(logging.INFO)
    else:
        logging.getLogger().setLevel(logging.WARNING)

    deps = {}
    if options.benchmark_name:
        perf_dir = path_util.GetPerfDir()
        benchmark_dirs = [
            os.path.join(perf_dir, 'benchmarks'),
            os.path.join(perf_dir, 'contrib')
        ]
        config = chromium_config.ChromiumConfig(
            top_level_dir=path_util.GetPerfDir(),
            benchmark_dirs=benchmark_dirs)
        benchmark = benchmark_runner.GetBenchmarkByName(
            options.benchmark_name, config)
        if not benchmark:
            raise ValueError('No such benchmark: %s' % options.benchmark_name)
        deps[benchmark.Name()] = _FetchDepsForBenchmark(benchmark)
    else:
        if not options.force:
            raw_input(
                'No benchmark name is specified. Fetching all benchmark deps. '
                'Press enter to continue...')
        for b in benchmark_finders.GetOfficialBenchmarks():
            deps[b.Name()] = _FetchDepsForBenchmark(b)

    if options.output_deps:
        with open(options.output_deps, 'w') as outfile:
            json.dump(deps, outfile)
Beispiel #3
0
def main(args):
    parser = argparse.ArgumentParser(
        description='Fetch the dependencies of perf benchmark(s).')
    parser.add_argument('benchmark_name', type=str, nargs='?')
    parser.add_argument('--force',
                        '-f',
                        help=('Force fetching all the benchmarks when '
                              'benchmark_name is not specified'),
                        action='store_true',
                        default=False)
    parser.add_argument('-v',
                        '--verbose',
                        action='count',
                        dest='verbosity',
                        help='Increase verbosity level (repeat as needed)')

    options = parser.parse_args(args)

    if options.verbosity >= 2:
        logging.getLogger().setLevel(logging.DEBUG)
    elif options.verbosity:
        logging.getLogger().setLevel(logging.INFO)
    else:
        logging.getLogger().setLevel(logging.WARNING)

    if options.benchmark_name:
        perf_dir = path_util.GetPerfDir()
        benchmark_dirs = [
            os.path.join(perf_dir, 'benchmarks'),
            os.path.join(perf_dir, 'contrib')
        ]
        config = chromium_config.ChromiumConfig(
            top_level_dir=path_util.GetPerfDir(),
            benchmark_dirs=benchmark_dirs)
        benchmark = benchmark_runner.GetBenchmarkByName(
            options.benchmark_name, config)
        if not benchmark:
            raise ValueError('No such benchmark: %s' % options.benchmark_name)
        FetchDepsForBenchmark(benchmark)
    else:
        if not options.force:
            raw_input(
                'No benchmark name is specified. Fetching all benchmark deps. '
                'Press enter to continue...')
        for b in benchmark_finders.GetAllPerfBenchmarks():
            logging.info('Fetch dependencies for benchmark %s', b.Name())
            FetchDepsForBenchmark(b)
Beispiel #4
0
def main(output=sys.stdout):
    config = chromium_config.ChromiumConfig(
        top_level_dir=path_util.GetPerfDir(),
        benchmark_dirs=[os.path.join(path_util.GetPerfDir(), 'benchmarks')])

    name = sys.argv[1]
    benchmark = benchmark_runner.GetBenchmarkByName(name, config)
    if not benchmark:
        raise ValueError('No such benchmark: %s' % name)

    # Download files according to specified benchmark.
    story_set = benchmark().CreateStorySet(None)

    _FetchDependenciesIfNeeded(story_set)

    # Print files downloaded.
    deps = _EnumerateDependencies(story_set)
    for dep in deps:
        print >> output, dep