コード例 #1
0
def main():
    # 1. GET USER INPUTS
    parser = argparse.ArgumentParser(
        description='Run all the MXNet operator benchmarks')

    parser.add_argument(
        '--ctx',
        type=str,
        default='cpu',
        help='Global context to run all benchmarks. By default, cpu on a '
        'CPU machine, gpu(0) on a GPU machine. '
        'Valid Inputs - cpu, gpu, gpu(0), gpu(1)...')
    parser.add_argument(
        '--dtype',
        type=str,
        default='float32',
        help='DType (Precision) to run benchmarks. By default, '
        'float32. Valid Inputs - float32, float64, int32, '
        'int64')
    parser.add_argument(
        '-f',
        '--output-format',
        type=str,
        default='json',
        choices=['json', 'md'],
        help='Benchmark result output format. By default, json. '
        'Valid Inputs - json, md')

    parser.add_argument('-o',
                        '--output-file',
                        type=str,
                        default='./mxnet_operator_benchmarks.json',
                        help='Name and path for the '
                        'output file.')

    args = parser.parse_args()
    logging.info(
        f"Running MXNet operator benchmarks with the following options: {args}"
    )
    assert not os.path.isfile(
        args.output_file), f"Output file {args.output_file} already exists."

    # 2. RUN BENCHMARKS
    ctx = _parse_mxnet_context(args.ctx)
    dtype = args.dtype
    final_benchmark_results = run_all_mxnet_operator_benchmarks(ctx=ctx,
                                                                dtype=dtype)

    # 3. PREPARE OUTPUTS
    run_time_features = get_current_runtime_features()
    save_to_file(final_benchmark_results, args.output_file, args.output_format,
                 run_time_features)

    # 4. Generate list of MXNet operators not covered in benchmarks
    ops_not_covered = get_operators_with_no_benchmark(
        final_benchmark_results.keys())
    for idx, op in enumerate(ops_not_covered):
        print(f"{idx}. {op}")

    return 0
コード例 #2
0
        default='json',
        help='Benchmark result output format. By default, json. '
        'Valid Inputs - json, md')

    parser.add_argument('--output-file',
                        type=str,
                        default='./mxnet_operator_benchmarks.json',
                        help='Name and path for the '
                        'output file.')

    user_options = parser.parse_args()
    logging.info(
        f"Running MXNet operator benchmarks with the following options: {user_options}"
    )

    # 2. RUN BENCHMARKS
    ctx = _parse_mxnet_context(user_options.ctx)
    dtype = user_options.dtype
    final_benchmark_results = run_all_mxnet_operator_benchmarks(
        ctx=ctx, dtype=user_options.dtype)

    # 3. PREPARE OUTPUTS
    save_to_file(final_benchmark_results, user_options.output_file,
                 user_options.output_format)

    # 4. Generate list of MXNet operators not covered in benchmarks
    ops_not_covered = get_operators_with_no_benchmark(
        final_benchmark_results.keys())
    for idx, op in enumerate(ops_not_covered):
        print(f"{idx}. {op}")
コード例 #3
0
def main():
    # 1. GET USER INPUTS
    parser = argparse.ArgumentParser(
        description='Run all the MXNet operator benchmarks')

    parser.add_argument(
        '--ctx',
        type=str,
        default='cpu',
        help='Global context to run all benchmarks. By default, cpu on a '
        'CPU machine, gpu(0) on a GPU machine. '
        'Valid Inputs - cpu, gpu, gpu(0), gpu(1)...')
    parser.add_argument(
        '--dtype',
        type=str,
        default='float32',
        help='DType (Precision) to run benchmarks. By default, '
        'float32. Valid Inputs - float32, float64, int32, '
        'int64')
    parser.add_argument(
        '-f',
        '--output-format',
        type=str,
        default='json',
        choices=['json', 'md'],
        help='Benchmark result output format. By default, json. '
        'Valid Inputs - json, md')

    parser.add_argument('-o',
                        '--output-file',
                        type=str,
                        default='./mxnet_operator_benchmarks.json',
                        help='Name and path for the '
                        'output file.')

    parser.add_argument('-p',
                        '--profiler',
                        type=str,
                        default='native',
                        help='Use built-in CPP profiler (native) or Python'
                        'time module.'
                        'Valid Inputs - native, python')

    parser.add_argument('-w',
                        '--warmup',
                        type=int,
                        default=25,
                        help='Number of times to run for warmup.'
                        'Valid Inputs - positive integers')

    parser.add_argument('-r',
                        '--runs',
                        type=int,
                        default=100,
                        help='Number of runs to capture benchmark results.'
                        'Valid Inputs - positive integers')

    args = parser.parse_args()
    logging.info(
        "Running MXNet operator benchmarks with the following options: {args}".
        format(args=args))
    assert not os.path.isfile(args.output_file),\
        "Output file {output_file} already exists.".format(output_file=args.output_file)

    # 2. RUN BENCHMARKS
    ctx = _parse_mxnet_context(args.ctx)
    dtype = args.dtype
    profiler = args.profiler
    warmup = args.warmup
    runs = args.runs
    benchmark_results = run_all_mxnet_operator_benchmarks(ctx=ctx,
                                                          dtype=dtype,
                                                          profiler=profiler,
                                                          warmup=warmup,
                                                          runs=runs)

    # Sort benchmark results alphabetically by op name
    final_benchmark_results = dict()
    for key in sorted(benchmark_results.keys()):
        final_benchmark_results[key] = benchmark_results[key]

    # 3. PREPARE OUTPUTS
    run_time_features = get_current_runtime_features()
    save_to_file(final_benchmark_results, args.output_file, args.output_format,
                 run_time_features, profiler)

    # 4. Generate list of MXNet operators not covered in benchmarks
    ops_not_covered = get_operators_with_no_benchmark(
        final_benchmark_results.keys())
    for idx, op in enumerate(ops_not_covered):
        print("{idx}. {op}".format(idx=idx, op=op))

    return 0