Exemple #1
0
def test_pytorch_sharding_matmul():
    """Test pytorch-sharding-matmul benchmark."""
    context = BenchmarkRegistry.create_benchmark_context(
        'sharding-matmul',
        platform=Platform.CUDA,
        parameters='--run_count 2 --num_steps 20',
        framework=Framework.PYTORCH)

    assert (BenchmarkRegistry.is_benchmark_context_valid(context))

    port = network.get_free_port()
    assert (port)
    utils.setup_simulated_ddp_distributed_env(1, 0, port)
    benchmark = BenchmarkRegistry.launch_benchmark(context)

    # Check basic information.
    assert (benchmark)
    assert (isinstance(benchmark, ShardingMatmul))
    assert (benchmark.name == 'pytorch-sharding-matmul')
    assert (benchmark.type == BenchmarkType.MICRO)

    # Check predefined parameters of sharding-matmul benchmark.
    assert (benchmark._args.mode == [
        ShardingMode.ALLREDUCE, ShardingMode.ALLGATHER
    ])

    # Check parameters specified in BenchmarkContext.
    assert (benchmark._args.run_count == 2)
    assert (benchmark._args.num_steps == 20)

    # Check results and metrics.
    assert (benchmark.run_count == 2)
    assert (benchmark.return_code == ReturnCode.SUCCESS)
    for metric in ['allreduce_time', 'allgather_time']:
        assert (len(benchmark.raw_data[metric]) == benchmark.run_count)
        assert (len(
            benchmark.raw_data[metric][0]) == benchmark._args.num_steps)
        assert (len(benchmark.result[metric]) == benchmark.run_count)

    utils.clean_simulated_ddp_distributed_env()
def test_cublas_functions():
    """Test cublas-function benchmark."""
    # Test for default configuration
    context = BenchmarkRegistry.create_benchmark_context(
        'cublas-function',
        platform=Platform.CUDA,
        parameters='--num_warmup 10 --num_steps 10 --num_in_step 100')

    assert (BenchmarkRegistry.is_benchmark_context_valid(context))

    benchmark = BenchmarkRegistry.launch_benchmark(context)

    # Check basic information.
    assert (benchmark)
    assert (benchmark.name == 'cublas-function')
    assert (benchmark.type == BenchmarkType.MICRO)

    # Check parameters specified in BenchmarkContext.
    assert (benchmark._args.num_warmup == 10)
    assert (benchmark._args.num_steps == 10)
    assert (benchmark._args.num_in_step == 100)

    # Check results and metrics.
    assert (benchmark.run_count == 1)
    assert (benchmark.return_code == ReturnCode.SUCCESS)
    assert ('raw_output_0' in benchmark.raw_data)
    assert (len(benchmark.raw_data['raw_output_0']) == 1)
    assert (isinstance(benchmark.raw_data['raw_output_0'][0], str))

    assert (19 <= len(benchmark.result))
    for metric in list(benchmark.result.keys()):
        assert (len(benchmark.result[metric]) == 1)
        assert (isinstance(benchmark.result[metric][0], numbers.Number))
        if metric != 'return_code':
            assert (len(
                benchmark.raw_data[metric][0]) == benchmark._args.num_steps)

    # Test for custom configuration
    custom_config_str = '{"name":"cublasCgemm","m":512,"n":512,"k":32,"transa":1,"transb":0}'
    context = BenchmarkRegistry.create_benchmark_context(
        'cublas-function',
        platform=Platform.CUDA,
        parameters=
        '--num_warmup 10 --num_steps 10 --num_in_step 100 --config_json_str ' +
        custom_config_str)

    assert (BenchmarkRegistry.is_benchmark_context_valid(context))

    benchmark = BenchmarkRegistry.launch_benchmark(context)

    # Check basic information.
    assert (benchmark)
    assert (benchmark.name == 'cublas-function')
    assert (benchmark.type == BenchmarkType.MICRO)

    # Check parameters specified in BenchmarkContext.
    assert (benchmark._args.num_warmup == 10)
    assert (benchmark._args.num_steps == 10)
    assert (benchmark._args.num_in_step == 100)

    # Check results and metrics.
    assert (benchmark.run_count == 1)
    assert (benchmark.return_code == ReturnCode.SUCCESS)
    assert ('raw_output_0' in benchmark.raw_data)
    assert (len(benchmark.raw_data['raw_output_0']) == 1)
    assert (isinstance(benchmark.raw_data['raw_output_0'][0], str))

    assert (1 + benchmark.default_metric_count == len(benchmark.result))
    for metric in list(benchmark.result.keys()):
        assert (len(benchmark.result[metric]) == 1)
        assert (isinstance(benchmark.result[metric][0], numbers.Number))
        if metric != 'return_code':
            assert (len(
                benchmark.raw_data[metric][0]) == benchmark._args.num_steps)
Exemple #3
0
def test_cudnn_functions():
    """Test cudnn-function benchmark."""
    # Test for default configuration
    context = BenchmarkRegistry.create_benchmark_context(
        'cudnn-function',
        platform=Platform.CUDA,
        parameters='--num_warmup 10 --num_steps 10 --num_in_step 100')

    assert (BenchmarkRegistry.is_benchmark_context_valid(context))

    benchmark = BenchmarkRegistry.launch_benchmark(context)

    # Check basic information.
    assert (benchmark)
    assert (benchmark.name == 'cudnn-function')
    assert (benchmark.type == BenchmarkType.MICRO)

    # Check parameters specified in BenchmarkContext.
    assert (benchmark._args.num_warmup == 10)
    assert (benchmark._args.num_steps == 10)
    assert (benchmark._args.num_in_step == 100)

    # Check results and metrics.
    assert (benchmark.run_count == 1)
    assert (benchmark.return_code == ReturnCode.SUCCESS)
    assert ('raw_output_0' in benchmark.raw_data)
    assert (len(benchmark.raw_data['raw_output_0']) == 1)
    assert (isinstance(benchmark.raw_data['raw_output_0'][0], str))

    assert (18 <= len(benchmark.result))
    for metric in list(benchmark.result.keys()):
        assert (len(benchmark.result[metric]) == 1)
        assert (isinstance(benchmark.result[metric][0], numbers.Number))
        if metric != 'return_code':
            assert (len(
                benchmark.raw_data[metric][0]) == benchmark._args.num_steps)

    # Test for custom configuration
    custom_config_str = '{"algo":0,"arrayLength":2,"convType":0,"dilationA":[1,1],"filterStrideA":[1,1],' \
        + '"filterDims":[32,128,3,3],"inputDims":[32,128,14,14],"inputStride":[25088,196,14,1],"inputType":0,'\
        + '"mode":1,"name":"cudnnConvolutionBackwardFilter","outputDims":[32,32,14,14],'\
        + '"outputStride":[6272,196,14,1],"padA":[1,1],"tensorOp":false}'

    context = BenchmarkRegistry.create_benchmark_context(
        'cudnn-function',
        platform=Platform.CUDA,
        parameters=
        '--num_warmup 10 --num_steps 10 --num_in_step 100 --config_json_str ' +
        custom_config_str)

    assert (BenchmarkRegistry.is_benchmark_context_valid(context))

    benchmark = BenchmarkRegistry.launch_benchmark(context)

    # Check basic information.
    assert (benchmark)
    assert (benchmark.name == 'cudnn-function')
    assert (benchmark.type == BenchmarkType.MICRO)

    # Check parameters specified in BenchmarkContext.
    assert (benchmark._args.num_warmup == 10)
    assert (benchmark._args.num_steps == 10)
    assert (benchmark._args.num_in_step == 100)

    # Check results and metrics.
    assert (benchmark.run_count == 1)
    assert (benchmark.return_code == ReturnCode.SUCCESS)
    assert ('raw_output_0' in benchmark.raw_data)
    assert (len(benchmark.raw_data['raw_output_0']) == 1)
    assert (isinstance(benchmark.raw_data['raw_output_0'][0], str))

    assert (1 + benchmark.default_metric_count == len(benchmark.result))
    for metric in list(benchmark.result.keys()):
        assert (len(benchmark.result[metric]) == 1)
        assert (isinstance(benchmark.result[metric][0], numbers.Number))
        if metric != 'return_code':
            assert (len(
                benchmark.raw_data[metric][0]) == benchmark._args.num_steps)