Exemple #1
0
    def exec(self):
        """Run the SuperBench benchmarks locally."""
        for benchmark_name in self._sb_benchmarks:
            if benchmark_name not in self._sb_enabled:
                continue
            benchmark_config = self._sb_benchmarks[benchmark_name]
            benchmark_results = list()
            self.__create_benchmark_dir(benchmark_name)
            cwd = os.getcwd()
            os.chdir(self.__get_benchmark_dir(benchmark_name))

            monitor = None
            if self.__get_rank_id(
            ) == 0 and self._sb_monitor_config and self._sb_monitor_config.enable:
                if self.__get_platform() == Platform.CUDA:
                    monitor = Monitor(
                        None, int(self._sb_monitor_config.sample_duration
                                  or 10),
                        int(self._sb_monitor_config.sample_interval or 1),
                        self.__get_monitor_path(benchmark_name))
                    monitor.start()
                else:
                    logger.warning(
                        'Monitor can not support ROCM/CPU platform.')

            benchmark_real_name = benchmark_name.split(':')[0]
            for framework in benchmark_config.frameworks or [
                    Framework.NONE.value
            ]:
                if benchmark_real_name == 'model-benchmarks' or (
                        ':' not in benchmark_name
                        and benchmark_name.endswith('_models')):
                    for model in benchmark_config.models:
                        full_name = f'{benchmark_name}/{framework}-{model}'
                        logger.info('Executor is going to execute %s.',
                                    full_name)
                        context = BenchmarkRegistry.create_benchmark_context(
                            model,
                            platform=self.__get_platform(),
                            framework=Framework(framework.lower()),
                            parameters=self.__get_arguments(
                                benchmark_config.parameters))
                        result = self.__exec_benchmark(full_name, context)
                        benchmark_results.append(result)
                else:
                    full_name = benchmark_name
                    logger.info('Executor is going to execute %s.', full_name)
                    context = BenchmarkRegistry.create_benchmark_context(
                        benchmark_real_name,
                        platform=self.__get_platform(),
                        framework=Framework(framework.lower()),
                        parameters=self.__get_arguments(
                            benchmark_config.parameters))
                    result = self.__exec_benchmark(full_name, context)
                    benchmark_results.append(result)

            if monitor:
                monitor.stop()
            self.__write_benchmark_results(benchmark_name, benchmark_results)
            os.chdir(cwd)
def test_kernel_launch_overhead():
    """Test kernel-launch benchmark."""
    context = BenchmarkRegistry.create_benchmark_context(
        'kernel-launch',
        parameters='--num_warmup 200 --num_steps 20000 --interval 100')

    assert (BenchmarkRegistry.is_benchmark_context_valid(context))

    benchmark = BenchmarkRegistry.launch_benchmark(context)

    # Check basic information.
    assert (benchmark)
    assert (benchmark.name == 'kernel-launch')
    assert (benchmark.type == BenchmarkType.MICRO)

    # Check parameters specified in BenchmarkContext.
    assert (benchmark._args.num_warmup == 200)
    assert (benchmark._args.num_steps == 20000)
    assert (benchmark._args.interval == 100)

    # Check results and metrics.
    assert (benchmark.run_count == 1)
    assert (benchmark.return_code == ReturnCode.SUCCESS)
    assert ('raw_output_0' in benchmark.raw_data)
    assert (len(benchmark.raw_data['raw_output_0']) == 1)
    assert (isinstance(benchmark.raw_data['raw_output_0'][0], str))
    for metric in ['event_time', 'wall_time']:
        assert (metric in benchmark.result)
        assert (len(benchmark.result[metric]) == 1)
        assert (isinstance(benchmark.result[metric][0], numbers.Number))
Exemple #3
0
def test_pytorch_computation_communication_overlap_normal():
    """Test pytorch-computation-communication-overlap benchmark on distributed normal case."""
    context = BenchmarkRegistry.create_benchmark_context(
        'computation-communication-overlap',
        parameters='--num_warmup 5 --num_steps 10 --ratio 5',
        framework=Framework.PYTORCH
    )
    world_size = 2
    assert (BenchmarkRegistry.is_benchmark_context_valid(context))
    results = utils.simulated_ddp_distributed_benchmark(context, world_size)
    assert (results)
    for benchmark in results:
        # Check basic information.
        assert (benchmark)
        assert (isinstance(benchmark, ComputationCommunicationOverlap))
        assert (benchmark.name == 'pytorch-computation-communication-overlap')
        assert (benchmark.type == BenchmarkType.MICRO)

        # Check predefined parameters of sharding-matmul benchmark.
        assert (benchmark._args.kernel == [ComputationKernelType.MUL, ComputationKernelType.MATMUL])

        # Check parameters specified in BenchmarkContext.
        assert (benchmark._args.num_steps == 10)

        # Check results and metrics.
        assert (benchmark.run_count == 1)
        assert (benchmark.return_code == ReturnCode.SUCCESS)

        assert (len(benchmark.raw_data) == len(benchmark._args.kernel))
        assert (len(benchmark.result) == len(benchmark._args.kernel) + benchmark.default_metric_count)
Exemple #4
0
def test_pytorch_computation_communication_overlap_fake_distributed():
    """Test pytorch-computation-communication-overlap benchmark on single gpu."""
    context = BenchmarkRegistry.create_benchmark_context(
        'computation-communication-overlap',
        parameters='--num_warmup 5 --num_steps 10 --ratio 5',
        framework=Framework.PYTORCH
    )
    port = network.get_free_port()
    assert (port)
    utils.setup_simulated_ddp_distributed_env(1, 0, port)
    benchmark = BenchmarkRegistry.launch_benchmark(context)

    # Check basic information.
    assert (benchmark)
    assert (isinstance(benchmark, ComputationCommunicationOverlap))
    assert (benchmark.name == 'pytorch-computation-communication-overlap')
    assert (benchmark.type == BenchmarkType.MICRO)

    # Check predefined parameters of sharding-matmul benchmark.
    assert (benchmark._args.kernel == [ComputationKernelType.MUL, ComputationKernelType.MATMUL])

    # Check parameters specified in BenchmarkContext.
    assert (benchmark._args.num_steps == 10)

    # Check results and metrics.
    assert (benchmark.run_count == 1)
    assert (benchmark.return_code == ReturnCode.SUCCESS)

    assert (len(benchmark.raw_data) == len(benchmark._args.kernel))
    assert (len(benchmark.result) == len(benchmark._args.kernel) + benchmark.default_metric_count)
    utils.clean_simulated_ddp_distributed_env()
def test_pytorch_matmul():
    """Test pytorch-matmul benchmark."""
    context = BenchmarkRegistry.create_benchmark_context(
        'matmul',
        platform=Platform.CUDA,
        parameters='--run_count 2 --num_steps 20',
        framework=Framework.PYTORCH)

    assert (BenchmarkRegistry.is_benchmark_context_valid(context))

    benchmark = BenchmarkRegistry.launch_benchmark(context)

    # Check basic information.
    assert (benchmark)
    assert (benchmark.name == 'pytorch-matmul')
    assert (benchmark.type == BenchmarkType.MICRO)

    # Check predefined parameters of sharding-matmul benchmark.
    assert (benchmark._args.mode == [ShardingMode.NOSHARDING])

    # Check parameters specified in BenchmarkContext.
    assert (benchmark._args.run_count == 2)
    assert (benchmark._args.num_steps == 20)

    # Check results and metrics.
    assert (benchmark.run_count == 2)
    assert (benchmark.return_code == ReturnCode.SUCCESS)
    assert (len(benchmark.raw_data['nosharding_time']) == benchmark.run_count)
    assert (len(
        benchmark.raw_data['nosharding_time'][0]) == benchmark._args.num_steps)
    assert (len(benchmark.result['nosharding_time']) == benchmark.run_count)
def benchmark_list_params_command_handler(name=None):
    """List parameters for benchmarks which match the regular expression.

    Args:
        name (str, optional): Benchmark name or regular expression. Defaults to None.

    Raises:
        CLIError: If cannot find the matching benchmark.
    """
    for benchmark_name in benchmark_list_command_handler(name):
        format_help = ''
        for platform in Platform:
            if platform in BenchmarkRegistry.benchmarks[benchmark_name]:
                format_help = BenchmarkRegistry.get_benchmark_configurable_settings(
                    BenchmarkRegistry.create_benchmark_context(benchmark_name, platform=platform)
                )
                break
        print(
            (
                f'=== {benchmark_name} ===\n\n'
                f'{format_help}\n\n'
                f'default values:\n'
                f'{pformat(BenchmarkRegistry.benchmarks[benchmark_name]["predefine_param"])}\n'
            )
        )
Exemple #7
0
def test_register_benchmark():
    """Test interface BenchmarkRegistry.register_benchmark()."""
    # Register the benchmark for all platform if use default platform.
    BenchmarkRegistry.register_benchmark('accumulation', AccumulationBenchmark)
    for platform in Platform:
        context = BenchmarkRegistry.create_benchmark_context('accumulation',
                                                             platform=platform)
        assert (BenchmarkRegistry.is_benchmark_registered(context))

    # Register the benchmark for CUDA platform if use platform=Platform.CUDA.
    BenchmarkRegistry.register_benchmark('accumulation-cuda',
                                         AccumulationBenchmark,
                                         platform=Platform.CUDA)
    context = BenchmarkRegistry.create_benchmark_context(
        'accumulation-cuda', platform=Platform.CUDA)
    assert (BenchmarkRegistry.is_benchmark_registered(context))
    context = BenchmarkRegistry.create_benchmark_context(
        'accumulation-cuda', platform=Platform.ROCM)
    assert (BenchmarkRegistry.is_benchmark_registered(context) is False)
Exemple #8
0
def test_is_benchmark_context_valid():
    """Test interface BenchmarkRegistry.is_benchmark_context_valid()."""
    # Positive case.
    context = BenchmarkRegistry.create_benchmark_context('accumulation',
                                                         platform=Platform.CPU)
    assert (BenchmarkRegistry.is_benchmark_context_valid(context))

    # Negative case.
    context = 'context'
    assert (BenchmarkRegistry.is_benchmark_context_valid(context) is False)
    context = None
    assert (BenchmarkRegistry.is_benchmark_context_valid(context) is False)
Exemple #9
0
def test_pytorch_bert_base():
    """Test pytorch-bert-base benchmark."""
    context = BenchmarkRegistry.create_benchmark_context(
        'bert-base',
        platform=Platform.CUDA,
        parameters=
        '--batch_size 1 --num_classes 5 --seq_len 8 --num_warmup 2 --num_steps 4 \
            --model_action train inference',
        framework=Framework.PYTORCH)

    assert (BenchmarkRegistry.is_benchmark_context_valid(context))

    benchmark = BenchmarkRegistry.launch_benchmark(context)

    # Check basic information.
    assert (benchmark)
    assert (isinstance(benchmark, PytorchBERT))
    assert (benchmark.name == 'pytorch-bert-base')
    assert (benchmark.type == BenchmarkType.MODEL)

    # Check predefined parameters of resnet101 model.
    assert (benchmark._args.hidden_size == 768)
    assert (benchmark._args.num_hidden_layers == 12)
    assert (benchmark._args.num_attention_heads == 12)
    assert (benchmark._args.intermediate_size == 3072)

    # Check parameters specified in BenchmarkContext.
    assert (benchmark._args.batch_size == 1)
    assert (benchmark._args.num_classes == 5)
    assert (benchmark._args.seq_len == 8)
    assert (benchmark._args.num_warmup == 2)
    assert (benchmark._args.num_steps == 4)

    # Check dataset scale.
    assert (len(benchmark._dataset) == benchmark._args.sample_count *
            benchmark._world_size)

    # Check results and metrics.
    assert (benchmark.run_count == 1)
    assert (benchmark.return_code == ReturnCode.SUCCESS)
    for metric in [
            'fp32_train_step_time', 'fp32_train_throughput',
            'fp16_train_step_time', 'fp16_train_throughput',
            'fp32_inference_step_time', 'fp32_inference_throughput',
            'fp16_inference_step_time', 'fp16_inference_throughput'
    ]:
        assert (len(benchmark.raw_data[metric]) == benchmark.run_count)
        assert (len(
            benchmark.raw_data[metric][0]) == benchmark._args.num_steps)
        assert (len(benchmark.result[metric]) == benchmark.run_count)
Exemple #10
0
    def test_tcp_connectivity(self):
        """Test tcp-connectivity benchmark."""
        context = BenchmarkRegistry.create_benchmark_context(
            'tcp-connectivity',
            parameters=
            '--hostfile /tmp/superbench/hostfile.test --port 80 --parallel 2',
        )
        assert (BenchmarkRegistry.is_benchmark_context_valid(context))
        benchmark = BenchmarkRegistry.launch_benchmark(context)

        # Check basic information.
        assert (benchmark)
        assert (isinstance(benchmark, TCPConnectivityBenchmark))
        assert (benchmark.name == 'tcp-connectivity')
        assert (benchmark.type == BenchmarkType.MICRO)

        # Check parameters specified in BenchmarkContext.
        assert (benchmark._args.hostfile == '/tmp/superbench/hostfile.test')
        assert (benchmark._args.port == 80)
        assert (benchmark._args.count == 10)
        assert (benchmark._args.timeout == 1)
        assert (benchmark._args.parallel == 2)

        print(benchmark.result)
        assert (benchmark.result)

        # Check results and metrics.
        assert (benchmark.result['api.github.com_successed_count'][0] == 10)
        assert (benchmark.result['api.github.com_failed_count'][0] == 0)
        assert (benchmark.result['api.github.com_success_rate'][0] == 100.0)
        assert (isinstance(benchmark.result['api.github.com_time_min'][0],
                           numbers.Number))
        assert (isinstance(benchmark.result['api.github.com_time_max'][0],
                           numbers.Number))
        assert (isinstance(benchmark.result['api.github.com_time_avg'][0],
                           numbers.Number))
        assert (isinstance(benchmark.result['localhost_successed_count'][0],
                           numbers.Number))
        assert (isinstance(benchmark.result['localhost_failed_count'][0],
                           numbers.Number))
        assert (isinstance(benchmark.result['localhost_time_max'][0],
                           numbers.Number))
        assert (isinstance(benchmark.result['localhost_time_min'][0],
                           numbers.Number))
        assert (isinstance(benchmark.result['localhost_time_avg'][0],
                           numbers.Number))
        assert (benchmark.return_code == ReturnCode.SUCCESS)
Exemple #11
0
def run_pytorch_lstm(parameters='', check_metrics=[]):
    """Test pytorch-lstm benchmark."""
    context = BenchmarkRegistry.create_benchmark_context(
        'lstm',
        platform=Platform.CUDA,
        parameters=parameters,
        framework=Framework.PYTORCH)

    assert (BenchmarkRegistry.is_benchmark_context_valid(context))

    benchmark = BenchmarkRegistry.launch_benchmark(context)

    # Check basic information.
    assert (benchmark)
    assert (isinstance(benchmark, PytorchLSTM))
    assert (benchmark.name == 'pytorch-lstm')
    assert (benchmark.type == BenchmarkType.MODEL)

    # Check predefined parameters of lstm model.
    assert (benchmark._args.input_size == 256)
    assert (benchmark._args.hidden_size == 1024)
    assert (benchmark._args.num_layers == 8)

    # Check parameters specified in BenchmarkContext.
    assert (benchmark._args.batch_size == 1)
    assert (benchmark._args.num_classes == 5)
    assert (benchmark._args.seq_len == 8)
    assert (benchmark._args.num_warmup == 2)
    assert (benchmark._args.num_steps == 4)

    # Check dataset scale.
    assert (len(benchmark._dataset) == benchmark._args.sample_count *
            benchmark._world_size)

    # Check results and metrics.
    assert (benchmark.run_count == 1)
    assert (benchmark.return_code == ReturnCode.SUCCESS)
    for metric in check_metrics:
        assert (len(benchmark.raw_data[metric]) == benchmark.run_count)
        assert (len(
            benchmark.raw_data[metric][0]) == benchmark._args.num_steps)
        assert (len(benchmark.result[metric]) == benchmark.run_count)
Exemple #12
0
def test_get_benchmark_configurable_settings():
    """Test BenchmarkRegistry interface.

    BenchmarkRegistry.get_benchmark_configurable_settings().
    """
    # Register benchmarks for testing.
    BenchmarkRegistry.register_benchmark('accumulation', AccumulationBenchmark)

    context = BenchmarkRegistry.create_benchmark_context('accumulation',
                                                         platform=Platform.CPU)
    settings = BenchmarkRegistry.get_benchmark_configurable_settings(context)

    expected = """optional arguments:
  --duration int     The elapsed time of benchmark in seconds.
  --log_raw_data     Log raw data into file instead of saving it into result
                     object.
  --lower_bound int  The lower bound for accumulation.
  --run_count int    The run count of benchmark.
  --upper_bound int  The upper bound for accumulation."""
    assert (settings == expected)
Exemple #13
0
def run_pytorch_cnn(models=[], parameters='', check_metrics=[]):
    """Run pytorch cnn benchmarks."""
    for model in models:
        context = BenchmarkRegistry.create_benchmark_context(
            model,
            platform=Platform.CUDA,
            parameters=parameters,
            framework=Framework.PYTORCH)

        assert (BenchmarkRegistry.is_benchmark_context_valid(context))

        benchmark = BenchmarkRegistry.launch_benchmark(context)

        # Check basic information.
        assert (benchmark)
        assert (isinstance(benchmark, PytorchCNN))
        assert (benchmark.name == 'pytorch-' + model)
        assert (benchmark.type == BenchmarkType.MODEL)

        # Check predefined parameters of resnet101 model.
        assert (benchmark._args.model_type == model)

        # Check parameters specified in BenchmarkContext.
        assert (benchmark._args.batch_size == 1)
        assert (benchmark._args.image_size == 224)
        assert (benchmark._args.num_classes == 5)
        assert (benchmark._args.num_warmup == 2)
        assert (benchmark._args.num_steps == 4)

        # Check Dataset.
        assert (len(benchmark._dataset) == benchmark._args.sample_count *
                benchmark._world_size)

        # Check results and metrics.
        assert (benchmark.run_count == 1)
        assert (benchmark.return_code == ReturnCode.SUCCESS)
        for metric in check_metrics:
            assert (len(benchmark.raw_data[metric]) == benchmark.run_count)
            assert (len(
                benchmark.raw_data[metric][0]) == benchmark._args.num_steps)
            assert (len(benchmark.result[metric]) == benchmark.run_count)
def create_benchmark(params='--num_steps 8'):
    """Register and create benchmark."""
    # Register the FakeModelBenchmark benchmark.
    BenchmarkRegistry.register_benchmark(
        'pytorch-fake-model',
        FakeModelBenchmark,
        parameters='--hidden_size 2',
        platform=Platform.CUDA,
    )
    context = BenchmarkRegistry.create_benchmark_context(
        'fake-model',
        platform=Platform.CUDA,
        parameters=params,
        framework=Framework.PYTORCH)
    name = BenchmarkRegistry._BenchmarkRegistry__get_benchmark_name(context)
    assert (name)
    (benchmark_class, predefine_params
     ) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(
         name, context.platform)
    assert (benchmark_class)
    return benchmark_class(name, predefine_params + ' ' + context.parameters)
Exemple #15
0
def test_pytorch_sharding_matmul():
    """Test pytorch-sharding-matmul benchmark."""
    context = BenchmarkRegistry.create_benchmark_context(
        'sharding-matmul',
        platform=Platform.CUDA,
        parameters='--run_count 2 --num_steps 20',
        framework=Framework.PYTORCH)

    assert (BenchmarkRegistry.is_benchmark_context_valid(context))

    port = network.get_free_port()
    assert (port)
    utils.setup_simulated_ddp_distributed_env(1, 0, port)
    benchmark = BenchmarkRegistry.launch_benchmark(context)

    # Check basic information.
    assert (benchmark)
    assert (isinstance(benchmark, ShardingMatmul))
    assert (benchmark.name == 'pytorch-sharding-matmul')
    assert (benchmark.type == BenchmarkType.MICRO)

    # Check predefined parameters of sharding-matmul benchmark.
    assert (benchmark._args.mode == [
        ShardingMode.ALLREDUCE, ShardingMode.ALLGATHER
    ])

    # Check parameters specified in BenchmarkContext.
    assert (benchmark._args.run_count == 2)
    assert (benchmark._args.num_steps == 20)

    # Check results and metrics.
    assert (benchmark.run_count == 2)
    assert (benchmark.return_code == ReturnCode.SUCCESS)
    for metric in ['allreduce_time', 'allgather_time']:
        assert (len(benchmark.raw_data[metric]) == benchmark.run_count)
        assert (len(
            benchmark.raw_data[metric][0]) == benchmark._args.num_steps)
        assert (len(benchmark.result[metric]) == benchmark.run_count)

    utils.clean_simulated_ddp_distributed_env()
Exemple #16
0
def test_get_benchmark_name():
    """Test interface BenchmarkRegistry.get_benchmark_name()."""
    # Register benchmarks for testing.
    benchmark_names = [
        'accumulation', 'pytorch-accumulation', 'tf1-accumulation',
        'onnxruntime-accumulation'
    ]
    for name in benchmark_names:
        BenchmarkRegistry.register_benchmark(name, AccumulationBenchmark)

    # Test benchmark name for different Frameworks.
    benchmark_frameworks = [
        Framework.NONE, Framework.PYTORCH, Framework.TENSORFLOW1,
        Framework.ONNXRUNTIME
    ]
    for i in range(len(benchmark_names)):
        context = BenchmarkRegistry.create_benchmark_context(
            'accumulation',
            platform=Platform.CPU,
            framework=benchmark_frameworks[i])
        name = BenchmarkRegistry._BenchmarkRegistry__get_benchmark_name(
            context)
        assert (name == benchmark_names[i])
def test_pytorch_empty_cache():
    """Test PytorchBase class."""
    # Register mnist benchmark.
    BenchmarkRegistry.register_benchmark('pytorch-mnist', PytorchMNIST)

    # Test cache empty by manually calling torch.cuda.empty_cache().
    parameters = '--batch_size 32 --num_warmup 8 --num_steps 64 --model_action train'
    benchmark = PytorchMNIST('pytorch-mnist', parameters=parameters)
    assert (benchmark)
    assert (benchmark._preprocess())
    assert (benchmark._benchmark())
    del benchmark
    assert (torch.cuda.memory_stats()['reserved_bytes.all.current'] > 0)
    torch.cuda.empty_cache()
    assert (torch.cuda.memory_stats()['reserved_bytes.all.current'] == 0)

    # Test automatic cache empty.
    context = BenchmarkRegistry.create_benchmark_context(
        'pytorch-mnist', parameters='--batch_size 32 --num_warmup 8 --num_steps 64 --model_action train'
    )

    benchmark = BenchmarkRegistry.launch_benchmark(context)
    assert (benchmark)
    assert (torch.cuda.memory_stats()['reserved_bytes.all.current'] == 0)
Exemple #18
0
import argparse

from superbench.benchmarks import Platform, Framework, BenchmarkRegistry
from superbench.common.utils import logger

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--distributed',
                        action='store_true',
                        default=False,
                        help='Whether to enable distributed training.')
    args = parser.parse_args()

    # Specify the model name and benchmark parameters.
    model_name = 'gpt2-large'
    parameters = '--batch_size 1 --duration 120 --seq_len 128 --precision float32 --run_count 2'
    if args.distributed:
        parameters += ' --distributed_impl ddp --distributed_backend nccl'

    # Create context for gpt2-large benchmark and run it for 120 * 2 seconds.
    context = BenchmarkRegistry.create_benchmark_context(
        model_name,
        platform=Platform.CUDA,
        parameters=parameters,
        framework=Framework.PYTORCH)

    benchmark = BenchmarkRegistry.launch_benchmark(context)
    if benchmark:
        logger.info('benchmark: {}, return code: {}, result: {}'.format(
            benchmark.name, benchmark.return_code, benchmark.result))
def test_cublas_functions():
    """Test cublas-function benchmark."""
    # Test for default configuration
    context = BenchmarkRegistry.create_benchmark_context(
        'cublas-function',
        platform=Platform.CUDA,
        parameters='--num_warmup 10 --num_steps 10 --num_in_step 100')

    assert (BenchmarkRegistry.is_benchmark_context_valid(context))

    benchmark = BenchmarkRegistry.launch_benchmark(context)

    # Check basic information.
    assert (benchmark)
    assert (benchmark.name == 'cublas-function')
    assert (benchmark.type == BenchmarkType.MICRO)

    # Check parameters specified in BenchmarkContext.
    assert (benchmark._args.num_warmup == 10)
    assert (benchmark._args.num_steps == 10)
    assert (benchmark._args.num_in_step == 100)

    # Check results and metrics.
    assert (benchmark.run_count == 1)
    assert (benchmark.return_code == ReturnCode.SUCCESS)
    assert ('raw_output_0' in benchmark.raw_data)
    assert (len(benchmark.raw_data['raw_output_0']) == 1)
    assert (isinstance(benchmark.raw_data['raw_output_0'][0], str))

    assert (19 <= len(benchmark.result))
    for metric in list(benchmark.result.keys()):
        assert (len(benchmark.result[metric]) == 1)
        assert (isinstance(benchmark.result[metric][0], numbers.Number))
        if metric != 'return_code':
            assert (len(
                benchmark.raw_data[metric][0]) == benchmark._args.num_steps)

    # Test for custom configuration
    custom_config_str = '{"name":"cublasCgemm","m":512,"n":512,"k":32,"transa":1,"transb":0}'
    context = BenchmarkRegistry.create_benchmark_context(
        'cublas-function',
        platform=Platform.CUDA,
        parameters=
        '--num_warmup 10 --num_steps 10 --num_in_step 100 --config_json_str ' +
        custom_config_str)

    assert (BenchmarkRegistry.is_benchmark_context_valid(context))

    benchmark = BenchmarkRegistry.launch_benchmark(context)

    # Check basic information.
    assert (benchmark)
    assert (benchmark.name == 'cublas-function')
    assert (benchmark.type == BenchmarkType.MICRO)

    # Check parameters specified in BenchmarkContext.
    assert (benchmark._args.num_warmup == 10)
    assert (benchmark._args.num_steps == 10)
    assert (benchmark._args.num_in_step == 100)

    # Check results and metrics.
    assert (benchmark.run_count == 1)
    assert (benchmark.return_code == ReturnCode.SUCCESS)
    assert ('raw_output_0' in benchmark.raw_data)
    assert (len(benchmark.raw_data['raw_output_0']) == 1)
    assert (isinstance(benchmark.raw_data['raw_output_0'][0], str))

    assert (1 + benchmark.default_metric_count == len(benchmark.result))
    for metric in list(benchmark.result.keys()):
        assert (len(benchmark.result[metric]) == 1)
        assert (isinstance(benchmark.result[metric][0], numbers.Number))
        if metric != 'return_code':
            assert (len(
                benchmark.raw_data[metric][0]) == benchmark._args.num_steps)
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""Micro benchmark example for disk performance.

Commands to run:
  python3 examples/benchmarks/memory_bw_latency_performance.py
"""

from superbench.benchmarks import BenchmarkRegistry, Platform
from superbench.common.utils import logger

if __name__ == '__main__':
    context = BenchmarkRegistry.create_benchmark_context(
        'cpu-memory-bw-latency',
        platform=Platform.CPU,
        parameters='--tests bandwidth_matrix latency_matrix max_bandwidth')

    benchmark = BenchmarkRegistry.launch_benchmark(context)
    if benchmark:
        logger.info('benchmark: {}, return code: {}, result: {}'.format(
            benchmark.name, benchmark.return_code, benchmark.result))
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""Micro benchmark example for GPU copy bandwidth performance.

Commands to run:
  python3 examples/benchmarks/gpu_copy_bw_performance.py
"""

from superbench.benchmarks import BenchmarkRegistry, Platform
from superbench.common.utils import logger

if __name__ == '__main__':
    context = BenchmarkRegistry.create_benchmark_context(
        'gpu-copy-bw',
        platform=Platform.CUDA,
        parameters='--mem_type htod dtoh dtod --copy_type sm dma')
    # For ROCm environment, please specify the benchmark name and the platform as the following.
    # context = BenchmarkRegistry.create_benchmark_context(
    #     'gpu-copy-bw', platform=Platform.ROCM, parameters='--mem_type htod dtoh dtod --copy_type sm dma'
    # )
    # For bidirectional test, please specify parameters as the following.
    # parameters='--mem_type htod dtod --copy_type sm dma --bidirectional'
    # To enable data checking, please add '--check_data'.

    benchmark = BenchmarkRegistry.launch_benchmark(context)
    if benchmark:
        logger.info('benchmark: {}, return code: {}, result: {}'.format(
            benchmark.name, benchmark.return_code, benchmark.result))
Exemple #22
0
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

"""Microbenchmark benchmark example for TCP connectivity.

Commands to run:
  python3 examples/benchmarks/tcp_connectivity.py
"""

from superbench.benchmarks import BenchmarkRegistry
from superbench.common.utils import logger

if __name__ == '__main__':
    context = BenchmarkRegistry.create_benchmark_context(
        'tcp-connectivity', parameters='--hostfile /tmp/superbench/hostfile.test --port 80 --parallel 1'
    )

    benchmark = BenchmarkRegistry.launch_benchmark(context)
    if benchmark:
        logger.info(
            'benchmark: {}, return code: {}, result: {}'.format(
                benchmark.name, benchmark.return_code, benchmark.result
            )
        )
Exemple #23
0
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""Micro benchmark example for cudnn performance benchmark.

Commands to run:
  python3 examples/benchmarks/cudnn_function.py
"""

from superbench.benchmarks import BenchmarkRegistry, Platform
from superbench.common.utils import logger

if __name__ == '__main__':
    parameters = '--num_warmup 8 --num_steps 100 --num_in_step 1000'
    context = BenchmarkRegistry.create_benchmark_context(
        'cudnn-function', platform=Platform.CUDA, parameters=parameters)

    benchmark = BenchmarkRegistry.launch_benchmark(context)
    if benchmark:
        logger.info('benchmark: {}, return code: {}, result: {}'.format(
            benchmark.name, benchmark.return_code, benchmark.result))
Exemple #24
0
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

"""Micro benchmark example for GPU-Burn.

Commands to run:
  python3 examples/benchmarks/gpu_burn_test.py
"""

from superbench.benchmarks import BenchmarkRegistry, Platform
from superbench.common.utils import logger

if __name__ == '__main__':
    context = BenchmarkRegistry.create_benchmark_context(
        'gpu-burn', platform=Platform.CUDA, parameters='--doubles --tensor_core --time 10'
    )

    benchmark = BenchmarkRegistry.launch_benchmark(context)
    if benchmark:
        logger.info(
            'benchmark: {}, return code: {}, result: {}'.format(
                benchmark.name, benchmark.return_code, benchmark.result
            )
        )
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

"""Micro benchmark example for sharding-matmul with pytorch.

Commands to run:
  python3 -m torch.distributed.launch --nproc_per_node=8 examples/benchmarks/sharding_matmul.py
"""

from superbench.benchmarks import Framework, BenchmarkRegistry
from superbench.common.utils import logger

if __name__ == '__main__':
    context = BenchmarkRegistry.create_benchmark_context(
        'sharding-matmul', parameters='--num_steps 20', framework=Framework.PYTORCH
    )

    benchmark = BenchmarkRegistry.launch_benchmark(context)
    if benchmark:
        logger.info(
            'benchmark: {}, return code: {}, result: {}'.format(
                benchmark.name, benchmark.return_code, benchmark.result
            )
        )
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""Model benchmark example for Cutlass GEMM FLOPs performance.

Commands to run:
  python3 examples/benchmarks/gemm_flops_cuda_performance.py
"""

from superbench.benchmarks import BenchmarkRegistry, Platform
from superbench.common.utils import logger

if __name__ == '__main__':
    parameters = '--n 16384 --k 16384 --m 16384'
    context = BenchmarkRegistry.create_benchmark_context(
        'gemm-flops', platform=Platform.CUDA, parameters=parameters)

    benchmark = BenchmarkRegistry.launch_benchmark(context)
    if benchmark:
        logger.info('benchmark: {}, return code: {}, result: {}'.format(
            benchmark.name, benchmark.return_code, benchmark.result))
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""Model benchmark example for kernel launch overhead.

Commands to run:
  python3 examples/benchmarks/kernel_launch_overhead.py
"""

from superbench.benchmarks import BenchmarkRegistry
from superbench.common.utils import logger

if __name__ == '__main__':
    context = BenchmarkRegistry.create_benchmark_context('kernel-launch')

    benchmark = BenchmarkRegistry.launch_benchmark(context)
    if benchmark:
        logger.info('benchmark: {}, return code: {}, result: {}'.format(
            benchmark.name, benchmark.return_code, benchmark.result))
Exemple #28
0
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""Micro benchmark example for ONNXRuntime inference performance.

Commands to run:
    python3 examples/benchmarks/ort_inference_performance.py
"""

from superbench.benchmarks import BenchmarkRegistry, Platform
from superbench.common.utils import logger

if __name__ == '__main__':
    context = BenchmarkRegistry.create_benchmark_context(
        'ort-inference',
        platform=Platform.CUDA,
        parameters='--pytorch_models resnet50 resnet101 --precision float16')
    benchmark = BenchmarkRegistry.launch_benchmark(context)
    if benchmark:
        logger.info('benchmark: {}, return code: {}, result: {}'.format(
            benchmark.name, benchmark.return_code, benchmark.result))
Exemple #29
0
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""Micro benchmark example for IB loopback performance.

Commands to run:
  python examples/benchmarks/ib_loopback_performance_performance.py
"""

from superbench.benchmarks import BenchmarkRegistry
from superbench.common.utils import logger

if __name__ == '__main__':
    context = BenchmarkRegistry.create_benchmark_context('ib-loopback')

    benchmark = BenchmarkRegistry.launch_benchmark(context)
    if benchmark:
        logger.info('benchmark: {}, return code: {}, result: {}'.format(
            benchmark.name, benchmark.return_code, benchmark.result))
Exemple #30
0
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""Micro benchmark example for device memory bandwidth performance.

Commands to run:
  python3 examples/benchmarks/rocm_memory_bw_performance.py
"""

from superbench.benchmarks import BenchmarkRegistry, Platform
from superbench.common.utils import logger

if __name__ == '__main__':
    context = BenchmarkRegistry.create_benchmark_context(
        'mem-bw', platform=Platform.ROCM)

    benchmark = BenchmarkRegistry.launch_benchmark(context)
    if benchmark:
        logger.info('benchmark: {}, return code: {}, result: {}'.format(
            benchmark.name, benchmark.return_code, benchmark.result))