def benchmark_list_params_command_handler(name=None): """List parameters for benchmarks which match the regular expression. Args: name (str, optional): Benchmark name or regular expression. Defaults to None. Raises: CLIError: If cannot find the matching benchmark. """ for benchmark_name in benchmark_list_command_handler(name): format_help = '' for platform in Platform: if platform in BenchmarkRegistry.benchmarks[benchmark_name]: format_help = BenchmarkRegistry.get_benchmark_configurable_settings( BenchmarkRegistry.create_benchmark_context(benchmark_name, platform=platform) ) break print( ( f'=== {benchmark_name} ===\n\n' f'{format_help}\n\n' f'default values:\n' f'{pformat(BenchmarkRegistry.benchmarks[benchmark_name]["predefine_param"])}\n' ) )
def test_get_benchmark_configurable_settings(): """Test BenchmarkRegistry interface. BenchmarkRegistry.get_benchmark_configurable_settings(). """ # Register benchmarks for testing. BenchmarkRegistry.register_benchmark('accumulation', AccumulationBenchmark) context = BenchmarkRegistry.create_benchmark_context('accumulation', platform=Platform.CPU) settings = BenchmarkRegistry.get_benchmark_configurable_settings(context) expected = """optional arguments: --duration int The elapsed time of benchmark in seconds. --log_raw_data Log raw data into file instead of saving it into result object. --lower_bound int The lower bound for accumulation. --run_count int The run count of benchmark. --upper_bound int The upper bound for accumulation.""" assert (settings == expected)