def main(): parser = argparse.ArgumentParser( description="Report the weight of a microbenchmark" ) parser.add_argument("functions_txt") parser.add_argument("performance_csv") parser.add_argument("cluster_json") parser.add_argument("function_name") parser.add_argument("invocation", type=int) args = parser.parse_args() cluster_info = ClusteringInformation.from_file(args.cluster_json) invocations = load_invocations_from_file(args.performance_csv) weight = None with open(args.functions_txt) as file: lines = file.readlines()[1:] for line in lines: _, name, _, _, weight, _, _ = line.split("\t") weight = float(weight[:-1]) / 100 if name == args.function_name: break function = Function(cluster_info, invocations, weight) print(function.get_weight_of_invocation(args.invocation))
def use_instr_dbscan(args): if not (0 < args.minimum_distance_percentage <= 100): chop_print( "ERROR: minimum_distance_percentage should be in (0,100] range" ) exit(1) if not (0 < args.maximum_distance_percentage <= 100): chop_print( "ERROR: maximum_distance_percentage should be in (0,100] range" ) exit(1) if not (0 < args.minimum_cluster_size_percentage <= 100): chop_print( "ERROR: minimum_cluster_size_percentage should be in (0,100] range" ) exit(1) if args.uniq_value_threshold <= 0: chop_print("ERROR: uniq_value_threshold should be in > 0") exit(1) if args.minimum_cluster_count < 0: chop_print("ERROR: minimum_cluster_count should be in >= 0") exit(1) if args.maximum_cluster_count <= 0: chop_print("ERROR: maximum_cluster_count should be in > 0") exit(1) if args.maximum_cluster_count <= args.minimum_cluster_count: chop_print( "ERROR: maximum_cluster_count should be > than minimum_cluster_count" ) exit(1) if args.maximum_distance_percentage <= args.minimum_distance_percentage: chop_print( "ERROR: maximum_distance_percentage should be > than minimum_distance_percentage" ) exit(1) invocations = load_invocations_from_file(args.perf_invok_csv) cluster_info = dbscan_instr( invocations, plot_path=args.plot_path, minimum_distance_percentage=args.minimum_distance_percentage, uniq_value_threshold=args.uniq_value_threshold, maximum_distance_percentage=args.maximum_distance_percentage, minimum_cluster_size_percentage=args.minimum_cluster_size_percentage, minimum_cluster_count=args.minimum_cluster_count, maximum_cluster_count=args.maximum_cluster_count, benchmark_name=args.benchmark_name, function_name=args.function_name, ) cluster_info.to_file(args.output) chop_print("Results written to %s" % args.output)
def use_instr_ipc_2d_density(args): if args.max_clusters <= 0: chop_print("ERROR: max_clusters should be in > 0") exit(1) if not (0 < args.min_clusters_weight_percentage <= 100): chop_print( "ERROR: min_clusters_weight_percentage should be in (0,100] range" ) exit(1) if not (0 < args.target_coverage_percentage <= 100): chop_print( "ERROR: target_coverage_percentage should be in (0,100] range" ) exit(1) if not (0 < args.outlier_percent <= 100): chop_print("ERROR: outlier_percent should be in (0,100] range") exit(1) if args.outlier_minsize_threshold <= 0: chop_print("ERROR: outlier_minsize_threshold should be in > 0") exit(1) if not (0 < args.minimum_granularity_percentage <= 100): chop_print( "ERROR: minimum_granularity_percentage should be in (0,100] range" ) exit(1) if not (0 < args.granularity_step_percentage <= 100): chop_print( "ERROR: granularity_step_percentage should be in (0,100] range" ) exit(1) invocations = load_invocations_from_file(args.perf_invok_csv) cluster_info = brute_force_2d_density( invocations, None, plot_path=args.plot_path, max_clusters=args.max_clusters, min_clusters_weight_percentage=args.min_clusters_weight_percentage, target_coverage_percentage=args.target_coverage_percentage, outlier_percent=args.outlier_percent, outlier_minsize_threshold=args.outlier_minsize_threshold, minimum_granularity_percentage=args.minimum_granularity_percentage, granularity_step_percentage=args.granularity_step_percentage, benchmark_name=args.benchmark_name, function_name=args.function_name, ) cluster_info.to_file(args.output) chop_print("Results written to %s" % args.output)
def main(): parser = argparse.ArgumentParser( description="Inspect ChopStix traces either generated during tracing" " or by the chop-perf-invok support tool.") parser.add_argument( "-t", "--input_type", help="Input file type", type=str, default="ipc", choices=["trace", "ipc"], ) parser.add_argument("input_file") args = parser.parse_args() chop_print("Parsing: '%s'" % args.input_file) if args.input_type == "trace": trace = Trace(args.input_file) print("Input file: %s" % args.input_file) print("Subtrace Count: %d" % trace.get_subtrace_count()) print("Invocation Count: %d" % trace.get_invocation_count()) print("Distinct Invocations Count: %d" % trace.get_invocation_set_count()) exit(0) else: trace = load_invocations_from_file(args.input_file) print("Input file: %s" % args.input_file) print("Distinct Invocations Count: %d" % len(trace)) metrics = [] instr = np.array([invocation.metrics.instructions for invocation in trace]) metrics.append(("Instructions", instr)) ipcs = np.array([invocation.metrics.ipc for invocation in trace]) metrics.append(("IPC", ipcs)) cycles = np.array([invocation.metrics.cycles for invocation in trace]) metrics.append(("Cycles", cycles)) mem_instrs = np.array( [invocation.metrics.mem_instrs for invocation in trace]) metrics.append(("Memory instructions", mem_instrs)) misses = np.array([invocation.metrics.misses for invocation in trace]) metrics.append(("Misses", misses)) for metric, value in metrics: print("Average %s per invocation: %.2f" % (metric, np.average(value))) print("Stdev of %s per invocation: %.2f" % (metric, np.std(value))) with warnings.catch_warnings(): warnings.simplefilter("ignore") print("%%Stdev of %s per invocation: %.2f %%" % (metric, 100 * np.std(value) / np.average(value))) exit(0)
def load_function_data(results_path, benchmark): with open(os.path.join(results_path, "functions.txt")) as file: lines = file.readlines()[1:] for line in lines: _, name, _, _, weight, _, _ = line.split("\t") weight = float(weight[:-1]) / 100 cluster_info = ClusteringInformation.from_file( os.path.join(results_path, name + "_cluster.json")) invocations = load_invocations_from_file( os.path.join(results_path, name, "benchmark.csv")) function = Function(cluster_info, invocations, weight) benchmark.add_function(name, function)
def use_ipc_dbscan(args): if args.epsilon != None: if args.epsilon <= 0: chop_print("ERROR: Epsilon should be >= 0") exit(1) invocations = load_invocations_from_file(args.perf_invok_csv) cluster_info = dbscan_ipc( invocations, args.epsilon, plot_path=args.plot_path, benchmark_name=args.benchmark_name, function_name=args.function_name, ) cluster_info.to_file(args.output) chop_print("Results written to %s" % args.output)
def use_ipc(args): invocations = load_invocations_from_file(args.perf_csv) cluster_info = dbscan_ipc(invocations, args.epsilon) cluster_info.to_file(args.output)