示例#1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output', required=True)
    # These two flags are passed in from the swarming recipe
    # but will no longer be needed when we migrate to this new recipe.
    # For now we need to recognize them so they don't get passed
    # through to telemetry.
    parser.add_argument('--isolated-script-test-chartjson-output',
                        required=False)
    parser.add_argument('--isolated-script-test-perf-output', required=False)

    parser.add_argument('--isolated-script-test-filter',
                        type=str,
                        required=False)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
    # TODO(eyaich) We could potentially assume this based on shards == 1 since
    # benchmarks will always have multiple shards.
    parser.add_argument('--non-telemetry',
                        help='Type of perf test',
                        type=bool,
                        default=False)
    parser.add_argument('--testing',
                        help='Testing instance',
                        type=bool,
                        default=False)

    args, rest_args = parser.parse_known_args()
    isolated_out_dir = os.path.dirname(args.isolated_script_test_output)

    if args.non_telemetry:
        # For non telemetry tests the benchmark name is the name of the executable.
        benchmark_name = rest_args[0]
        return_code, charts, output_json = run_gtest_perf_test.execute_perf_test(
            args, rest_args)

        write_results(benchmark_name, charts, output_json, isolated_out_dir,
                      True)
    else:
        # First determine what shard we are running on to know how to
        # index into the bot map to get list of benchmarks to run.
        total_shards = None
        shard_index = None

        env = os.environ.copy()
        if 'GTEST_TOTAL_SHARDS' in env:
            total_shards = env['GTEST_TOTAL_SHARDS']
        if 'GTEST_SHARD_INDEX' in env:
            shard_index = env['GTEST_SHARD_INDEX']

        if not (total_shards or shard_index):
            raise Exception('Shard indicators must be present for perf tests')

        sharding_map_path = get_sharding_map_path(total_shards, args.testing
                                                  or False)
        with open(sharding_map_path) as f:
            sharding_map = json.load(f)
        sharding = None
        sharding = sharding_map[shard_index]['benchmarks']
        return_code = 0

        for benchmark in sharding:
            return_code = (execute_benchmark(benchmark, isolated_out_dir, args,
                                             rest_args, False) or return_code)
            # We ignore the return code of the reference build since we do not
            # monitor it.
            execute_benchmark(benchmark, isolated_out_dir, args, rest_args,
                              True)

    return return_code
示例#2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output', required=True)
    # These two flags are passed in from the swarming recipe
    # but will no longer be needed when we migrate to this new recipe.
    # For now we need to recognize them so they don't get passed
    # through to telemetry.
    parser.add_argument('--isolated-script-test-chartjson-output',
                        required=False)
    parser.add_argument('--isolated-script-test-perf-output', required=False)

    parser.add_argument('--isolated-script-test-filter',
                        type=str,
                        required=False)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
    parser.add_argument('--non-telemetry',
                        help='Type of perf test',
                        type=bool,
                        default=False)
    parser.add_argument('--testing',
                        help='Test run, execute subset of tests',
                        type=bool,
                        default=False)
    parser.add_argument(
        '--benchmarks',
        help='Comma separated list of benchmark names'
        ' to run in lieu of indexing into our benchmark bot maps',
        required=False)
    parser.add_argument('--output-format', action='append')

    args, rest_args = parser.parse_known_args()
    isolated_out_dir = os.path.dirname(args.isolated_script_test_output)
    return_code = 0

    if args.non_telemetry:
        # For non telemetry tests the benchmark name is the name of the executable.
        benchmark_name = rest_args[0]
        return_code, charts, output_json = run_gtest_perf_test.execute_perf_test(
            args, rest_args)

        write_results(benchmark_name, charts, output_json, isolated_out_dir,
                      True)
    else:
        # If the user has supplied a list of benchmark names, execute those instead
        # of the entire suite of benchmarks.
        if args.benchmarks:
            benchmarks = args.benchmark_names.split(',')
            for benchmark in benchmarks:
                return_code = (execute_benchmark(benchmark, isolated_out_dir,
                                                 args, rest_args, False)
                               or return_code)
        else:
            # First determine what shard we are running on to know how to
            # index into the bot map to get list of benchmarks to run.
            total_shards = None
            shard_index = None

            env = os.environ.copy()
            if 'GTEST_TOTAL_SHARDS' in env:
                total_shards = env['GTEST_TOTAL_SHARDS']
            if 'GTEST_SHARD_INDEX' in env:
                shard_index = env['GTEST_SHARD_INDEX']

            if not (total_shards or shard_index):
                raise Exception(
                    'Shard indicators must be present for perf tests')

            sharding_map_path = get_sharding_map_path(total_shards,
                                                      args.testing or False)
            with open(sharding_map_path) as f:
                sharding_map = json.load(f)
            sharding = None
            sharding = sharding_map[shard_index]['benchmarks']

            # We don't execute tests on the reference build on android webview
            # since telemetry doesn't support it.  See crbug.com/612455
            is_webview = any(
                ('browser' in a and 'webview' in a) for a in rest_args)

            for benchmark in sharding:
                # Need to run the benchmark twice on browser and reference build
                return_code = (execute_benchmark(benchmark, isolated_out_dir,
                                                 args, rest_args, False)
                               or return_code)
                # We ignore the return code of the reference build since we do not
                # monitor it.
                if not is_webview:
                    execute_benchmark(benchmark, isolated_out_dir, args,
                                      rest_args, True)

    return return_code
示例#3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output', required=True)
    # These two flags are passed in from the swarming recipe
    # but will no longer be needed when we migrate to this new recipe.
    # For now we need to recognize them so they don't get passed
    # through to telemetry.
    parser.add_argument('--isolated-script-test-chartjson-output',
                        required=False)
    parser.add_argument('--isolated-script-test-perf-output', required=False)

    parser.add_argument('--isolated-script-test-filter',
                        type=str,
                        required=False)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
    parser.add_argument('--non-telemetry',
                        help='Type of perf test',
                        type=bool,
                        default=False)
    parser.add_argument(
        '--benchmarks',
        help='Comma separated list of benchmark names'
        ' to run in lieu of indexing into our benchmark bot maps',
        required=False)
    # Some executions may have a different sharding scheme and/or set of tests.
    # These files must live in src/tools/perf/core/
    parser.add_argument('--test-shard-map-filename', type=str, required=False)
    parser.add_argument('--output-format', action='append')
    parser.add_argument('--run-ref-build',
                        help='Run test on reference browser',
                        action='store_true')

    args, rest_args = parser.parse_known_args()
    isolated_out_dir = os.path.dirname(args.isolated_script_test_output)
    return_code = 0

    if args.non_telemetry:
        # For non telemetry tests the benchmark name is the name of the executable.
        benchmark_name = rest_args[0]
        return_code, charts, output_json = run_gtest_perf_test.execute_perf_test(
            args, rest_args)

        write_results(benchmark_name,
                      charts,
                      output_json,
                      benchmark_log='Not available for C++ perf test',
                      isolated_out_dir=isolated_out_dir,
                      encoded=True)
    else:
        # If the user has supplied a list of benchmark names, execute those instead
        # of the entire suite of benchmarks.
        if args.benchmarks:
            benchmarks = args.benchmarks.split(',')
            for benchmark in benchmarks:
                return_code = (execute_benchmark(benchmark, isolated_out_dir,
                                                 args, rest_args, False)
                               or return_code)
        else:
            # First determine what shard we are running on to know how to
            # index into the bot map to get list of benchmarks to run.
            total_shards = None
            shard_index = None

            env = os.environ.copy()
            if 'GTEST_TOTAL_SHARDS' in env:
                total_shards = env['GTEST_TOTAL_SHARDS']
            if 'GTEST_SHARD_INDEX' in env:
                shard_index = env['GTEST_SHARD_INDEX']

            if not (total_shards or shard_index):
                raise Exception(
                    'Shard indicators must be present for perf tests')

            sharding_map_path = get_sharding_map_path(args)
            with open(sharding_map_path) as f:
                sharding_map = json.load(f)
            sharding = sharding_map[shard_index]['benchmarks']

            for benchmark, stories in sharding.iteritems():
                # Need to run the benchmark twice on browser and reference build
                return_code = (execute_benchmark(benchmark,
                                                 isolated_out_dir,
                                                 args,
                                                 rest_args,
                                                 False,
                                                 stories=stories)
                               or return_code)
                # We ignore the return code of the reference build since we do not
                # monitor it.
                if args.run_ref_build:
                    execute_benchmark(benchmark,
                                      isolated_out_dir,
                                      args,
                                      rest_args,
                                      True,
                                      stories=stories)

    return return_code