def main(): """Main program driving measurement of benchmark size""" # Establish the root directory of the repository, since we know this file is # in that directory. gp['rootdir'] = os.path.abspath(os.path.dirname(__file__)) # Parse arguments using standard technology parser = build_parser() args = parser.parse_args() # Establish logging setup_logging(args.logdir, 'size') log_args(args) # Check args are OK (have to have logging and build directory set up first) validate_args(args) # Find the benchmarks benchmarks = find_benchmarks() log_benchmarks(benchmarks) # Collect the size data for the benchmarks raw_data, rel_data = collect_data(benchmarks) # We can't compute geometric SD on the fly, so we need to collect all the # data and then process it in two passes. We could do the first processing # as we collect the data, but it is clearer to do the three things # separately. Given the size of datasets with which we are concerned the # compute overhead is not significant. if raw_data: embench_stats(benchmarks, raw_data, rel_data) log.info('All benchmarks sized successfully') else: log.info('ERROR: Failed to compute size benchmarks') sys.exit(1)
def main(): """Main program driving measurement of benchmark size""" # Establish the root directory of the repository, since we know this file is # in that directory. gp['rootdir'] = os.path.abspath(os.path.dirname(__file__)) # Parse arguments common to all speed testers, and get list of those # remaining. args, remnant = get_common_args() # Establish logging setup_logging(args.logdir, 'speed') log_args(args) # Check args are OK (have to have logging and build directory set up first) validate_args(args) # Find the benchmarks benchmarks = find_benchmarks() log_benchmarks(benchmarks) # Collect the size data for the benchmarks. Pass any remaining args. raw_data, rel_data = collect_data(benchmarks, remnant) # We can't compute geometric SD on the fly, so we need to collect all the # data and then process it in two passes. We could do the first processing # as we collect the data, but it is clearer to do the three things # separately. Given the size of datasets with which we are concerned the # compute overhead is not significant. if raw_data: if gp['output_format'] != output_format.BASELINE: opt_comma = ',' if args.json_comma else '' embench_stats(benchmarks, raw_data, rel_data, 'speed', opt_comma) log.info('All benchmarks run successfully') else: log.info('ERROR: Failed to compute speed benchmarks') sys.exit(1)