def gen_mpstats(benchmarks): print "parsing..." jgraph_input = [] stacks = [] for benchmark in benchmarks: bars = [] for config in configs: print " %s %s" % (benchmark, config) filenames = glob.glob(benchmark + "/" + benchmark + "*-" + config + "-*.xterm") for filename in filenames: stats = get_mpstats(filename) stats = stats[12:] stats.reverse() config_label = string.split(config, "-")[0] bar = [config_label] + mfgraph.stack_bars(stats) bars.append(bar) stacks.append([benchmark] + bars) jgraph_input.append( mfgraph.stacked_bar_graph( stacks, bar_segment_labels=["idle", "wait", "system", "user"], title="utilization", ylabel="percent", colors=[".8 .8 .8", "0 .5 0", "0 0 1", "1 0 0"], patterns=["stripe 45", "solid", "solid", "solid"])) column_label = "CPU minor-faults major-faults cross-calls interrupts interrupts-as-threads context-switches involuntary-context-switches migrations mutex-spins read-write-spins system-calls usr sys wt idl" column_name = string.split(column_label) column_name = map(string.strip, column_name) for column in range(1, 12): stacks = [] for benchmark in benchmarks: bars = [] for config in configs: filenames = glob.glob(benchmark + "/" + benchmark + "*-" + config + "-*.xterm") for filename in filenames: stats = get_mpstats(filename) config_label = string.split(config, "-")[0] bars.append([config_label, stats[column]]) stacks.append([benchmark] + bars) jgraph_input.append( mfgraph.stacked_bar_graph( stacks, title=column_name[column], ylabel="operations/second (for each processor)", colors=["0 0 1"], patterns=["solid"])) return jgraph_input
def gen_mpstats(benchmarks): print "parsing..." jgraph_input = [] stacks = []; for benchmark in benchmarks: bars = [] for config in configs: print " %s %s" % (benchmark, config) filenames = glob.glob(benchmark + "/" + benchmark + "*-" + config + "-*.xterm") for filename in filenames: stats = get_mpstats(filename) stats = stats[12:] stats.reverse() config_label = string.split(config, "-")[0] bar = [config_label] + mfgraph.stack_bars(stats) bars.append(bar) stacks.append([benchmark] + bars) jgraph_input.append(mfgraph.stacked_bar_graph(stacks, bar_segment_labels = ["idle", "wait", "system", "user"], title = "utilization", ylabel = "percent", colors = [".8 .8 .8", "0 .5 0", "0 0 1", "1 0 0"], patterns = ["stripe 45", "solid", "solid", "solid"])) column_label = "CPU minor-faults major-faults cross-calls interrupts interrupts-as-threads context-switches involuntary-context-switches migrations mutex-spins read-write-spins system-calls usr sys wt idl" column_name = string.split(column_label) column_name = map(string.strip, column_name) for column in range(1, 12): stacks = [] for benchmark in benchmarks: bars = [] for config in configs: filenames = glob.glob(benchmark + "/" + benchmark + "*-" + config + "-*.xterm") for filename in filenames: stats = get_mpstats(filename) config_label = string.split(config, "-")[0] bars.append([config_label, stats[column]]) stacks.append([benchmark] + bars) jgraph_input.append(mfgraph.stacked_bar_graph(stacks, title = column_name[column], ylabel = "operations/second (for each processor)", colors = ["0 0 1"], patterns = ["solid"])) return jgraph_input
def gen_scale(benchmarks): configs = ["1p", "2p", "4p", "8p", "16p"] base_config = "1p" parameter = "Ruby_cycles" stacks = [] print "parsing..." for benchmark in benchmarks: assoc_data = {} for config in configs: sys.stderr.write(" %s %s\n" % (benchmark, config)) numbers = [] filenames = glob.glob(benchmark + "/*-" + config + "-*.stats") for filename in filenames: lines = mfgraph.grep(filename, parameter) line = lines[0] numbers.append(float(string.split(line)[1])) med = mfgraph.median(numbers) assoc_data[config] = med mfgraph.normalize(assoc_data, base_config) bars = [] stack_data = [benchmark] for config in configs: bars.append([config, assoc_data[config]]) stacks.append([benchmark] + bars) print "done." return [ mfgraph.stacked_bar_graph( stacks, title="Scalability", ylabel="normalized runtime", colors=["0 0 1"], patterns=["solid"] ) ]
def generate_cpi(jgraphs, results_dir): data = [] files = glob.glob("%s/*.opal" % results_dir) #print files for file in files: print file cpi = get_cpi(file) width = get_issue_width(file) data.append([width, cpi]) #data.sort() #cpi_data = mfgraph.merge_data(group_title = "CPI", data = data) cpi_bars = make_bars(fields=["CPI"], data=data) cpi_bars.sort() cpi_bars.insert(0, "CPI") graph_bars = [cpi_bars] #draw_std_line(jgraphs, #"CPI vs. Instruction Window Size", #"Issue Window Size", #"CPI", #lines) #print graph_bars jgraphs.append( mfgraph.stacked_bar_graph( graph_bars, title="CPI vs Window Size", bar_segment_labels=["CPI"], ylabel="CPI", xsize=4.0, ysize=1.778, ))
def gen_scale(benchmarks): configs = ["1p", "2p", "4p", "8p", "16p"] base_config = "1p" parameter = "Ruby_cycles" stacks = []; print "parsing..." for benchmark in benchmarks: assoc_data = {}; for config in configs: sys.stderr.write(" %s %s\n" % (benchmark, config)) numbers = [] filenames = glob.glob(benchmark + "/*-" + config + "-*.stats") for filename in filenames: lines = mfgraph.grep(filename, parameter); line = lines[0] numbers.append(float(string.split(line)[1])) med = mfgraph.median(numbers) assoc_data[config] = med; mfgraph.normalize(assoc_data, base_config) bars = [] stack_data = [benchmark] for config in configs: bars.append([config, assoc_data[config]]) stacks.append([benchmark] + bars) print "done." return [mfgraph.stacked_bar_graph(stacks, title = "Scalability", ylabel = "normalized runtime", colors = ["0 0 1"], patterns = ["solid"])]
def generate_reasons_for_fetch_stall(jgraphs, results_dir): data = [] files = glob.glob("%s/*.opal" % results_dir) #print files for file in files: tuple = get_reasons_for_fetch_stall(file) data.append(tuple) fetch_data = make_bars(fields=[ "I-Cache Miss", "Squash", "I-TLB Miss", "Window Full", "Fetch Barrier" ], data=data) fetch_bars = make_stacked(fetch_data) fetch_bars.sort() fetch_bars.insert(0, "Window Width") graph_bars = [fetch_bars] jgraphs.append( mfgraph.stacked_bar_graph( graph_bars, title="Reasons for Fetch Stall vs. Window Width", bar_segment_labels=[ "I-Cache Miss", "Squash", "I-TLB Miss", "Window Full", "Fetch Barrier" ], ylabel="%", xsize=4.0, ysize=1.778, ))
def generate_time_breakdown(jgraphs, results_dir): # Gather ECperf data tuple_list = get_time_breakdown_data(results_dir) bar_fields = ["usr", "sys", "wt", "idl", "gc_idle"] ecperf_data = make_bars(fields=bar_fields, data=tuple_list) ecperf_bars = make_stacked(ecperf_data) ecperf_bars.sort() ecperf_bars.insert(0, "ECperf") ecperf_data.sort() ecperf_data.insert(0, "ECperf") graph_bars = [ecperf_bars] #print graph_bars jgraphs.append( mfgraph.stacked_bar_graph( graph_bars, title="", bar_segment_labels=["User", "System", "I/O", "Idle", "GC Idle"], ylabel="Execution Time (%)", ymax=100, ymin=0, xsize=4.0, ysize=1.778, title_fontsize="12", label_fontsize="9", bar_name_font_size="9", legend_fontsize="9", stack_name_font_size="10", stack_name_location=9, bar_space=1.2, colors=["1 1 1", ".8 .8 .8", ".6 .6 .6", ".4 .4 .4", "0 0 0"]))
def generate_cpi(jgraphs, results_dir): data = [] files = glob.glob("%s/*.opal" % results_dir) #print files for file in files: print file cpi = get_cpi(file) width = get_issue_width(file) data.append([width, cpi]) #data.sort() #cpi_data = mfgraph.merge_data(group_title = "CPI", data = data) cpi_bars = make_bars(fields=["CPI"], data=data) cpi_bars.sort() cpi_bars.insert(0, "CPI") graph_bars = [cpi_bars] #draw_std_line(jgraphs, #"CPI vs. Instruction Window Size", #"Issue Window Size", #"CPI", #lines) #print graph_bars jgraphs.append(mfgraph.stacked_bar_graph(graph_bars, title = "CPI vs Window Size", bar_segment_labels = ["CPI"], ylabel = "CPI", xsize = 4.0, ysize = 1.778, ))
def touched_by(): stats = [ "touched_by_block_address:", "touched_by_weighted_block_address:", "touched_by_macroblock_address:", "touched_by_weighted_macroblock_address:", # "touched_by_pc_address:", # "touched_by_weighted_pc_address:", ] stats_names = { "touched_by_block_address:" : "(a) Percent blocks touched by n processors", "touched_by_weighted_block_address:": "(b) Percent of misses to blocks touched by n processors", "touched_by_macroblock_address:": "(c) Percent of macroblocks touched by n processors", "touched_by_weighted_macroblock_address:": "(d) Percent of misses to macroblocks touched by n processors", } jgraph_input = [] cols = 1 row_space = 2.9 col_space = 3 num = 0 for stat in stats: bars = [] for benchmark in benchmarks: print stat, benchmark group = [benchmark_names[benchmark]] filenames = glob.glob("%s/*trace-profiler*.stats" % benchmark) for filename in filenames: line = mfgraph.grep(filename, stat)[0] line = string.replace(line, "]", "") line = string.replace(line, "[", "") data = string.split(line)[2:] data = map(float, data) normalize_list(data) for index in range(len(data)): if index+1 in [1, 4, 8, 12, 16]: group.append(["%d" % (index+1), data[index]*100.0]) else: group.append(["", data[index]*100.0]) bars.append(group) jgraph_input.append(mfgraph.stacked_bar_graph(bars, title = stats_names[stat], xsize = 6.5, ysize = 2, xlabel = "", ylabel = "", stack_space = 3, x_translate = (num % cols) * col_space, y_translate = (num / cols) * -row_space, )) num += 1 mfgraph.run_jgraph("\n".join(jgraph_input), "touched-by")
def gen_misses(benchmarks): configs = [ "1p-MOSI_bcast_opt", "2p-MOSI_bcast_opt", "4p-MOSI_bcast_opt", "8p-MOSI_bcast_opt", "16p-MOSI_bcast_opt" ] parameters = [ "Request_type_IFETCH", "Request_type_LD", "Request_type_ST", "Request_type_ATOMIC" ] stacks = [] for benchmark in benchmarks: bars = [] for config in configs: print " %s %s" % (benchmark, config) filenames = glob.glob(benchmark + "/*-" + config + "-*.stats") for filename in filenames: numbers = [] lines = mfgraph.grep(filename, "instruction_executed") line = string.split(lines[0]) insn = long(line[1]) for parameter in parameters: lines = mfgraph.grep(filename, parameter) line = string.split(lines[0]) map(string.strip, line) numbers.append(1000.0 * (float(line[1]) / insn)) numbers = mfgraph.stack_bars(numbers) config_label = string.split(config, "-")[0] bars.append([config_label] + numbers) stacks.append([benchmark] + bars) labels = [] for label in parameters: labels.append(string.split(label, "_")[2]) return [ mfgraph.stacked_bar_graph( stacks, bar_segment_labels=labels, title="Breakdown of misses", ylabel="misses per thousand instructions", patterns=["solid"], ) ]
def gen_sharing(benchmarks, normalize=1): configs = ["1p", "2p", "4p", "8p", "16p"] stacks = [] for benchmark in benchmarks: bars = [] for config in configs: print " %s %s" % (benchmark, config) filenames = glob.glob(benchmark + "/*-" + config + "-*.stats") for filename in filenames: lines = mfgraph.grep(filename, "instruction_executed") line = string.split(lines[0]) insn = long(line[1]) numbers = get_sharing(filename) if normalize: sum = reduce(lambda x, y: x + y, numbers) for index in range(len(numbers)): numbers[index] = (numbers[index] / sum) * 100.0 else: for index in range(len(numbers)): numbers[index] = (numbers[index] / float(insn)) * 1000.0 numbers = mfgraph.stack_bars(numbers) bars.append([config] + numbers) stacks.append([benchmark] + bars) if normalize: y_axis_label = "percent of misses" else: y_axis_label = "misses per thousand instructions", return [ mfgraph.stacked_bar_graph( stacks, bar_segment_labels=labels, title="Breakdown of misses", ylabel=y_axis_label, patterns=[ "stripe -45", "stripe -45", "stripe -45", "solid", "solid" ], ) ]
def generate_macro_bar(): processor = 16 bandwidth = 1600 stat = "Ruby_cycles" stacks = [] for benchmark in benchmarks[1:]: bars = [] modules = ["MOSI_bcast_opt_4", "MOSI_GS_4", "MOSI_mcast_aggr_4"] #norm = mfgraph.average(get_data(benchmark, processor=processor, module="MOSI_mcast_aggr_4", bandwidth=bandwidth, stat=stat)) norm = mfgraph.average(get_data(benchmark, processor=processor, module="MOSI_bcast_opt_4", bandwidth=bandwidth, stat=stat)) for module in modules: if module == "MOSI_mcast_aggr_4": bars.append(["", 0]) else: data = get_data(benchmark, processor=processor, module=module, bandwidth=bandwidth, stat=stat) value = mfgraph.average(data) stddev = mfgraph.stddev(data) if (stddev/value)*100.0 > 1.0: # only plot error bars if they are more than 1% bars.append([protocol_name[module], [norm/value, norm/(value+stddev), norm/(value-stddev)]]) else: bars.append([protocol_name[module], [norm/value, norm/(value+stddev), norm/(value-stddev)]]) # bars.append([protocol_name[module], norm/value]) stacks.append([workload_name[benchmark]] + bars) jgraph_input = mfgraph.stacked_bar_graph(stacks, colors = ["1 0 0", "0 .5 0", "0 0 1", "0 1 1", "1 0 1"], # colors = [".85 .85 .85", ".5 .5 .5", ".45 .45 .45"], patterns = ["solid", "stripe -45"], ymax = 1.5, xsize = 2.7, ysize = 2.3, label_fontsize = "9", hash_label_fontsize = "9", stack_name_font_size = "9", bar_name_font_size = "9", bar_name_rotate = 90.0, stack_name_location = 28.0, ylabel = "normalized performance", yhash = 0.2, ymhash = 1, ) mfgraph.run_jgraph(jgraph_input, "bash-macro-talk-bars")
def gen_sharing_histo(): bars = [] for benchmark in benchmarks: filenames = glob.glob("*/%s-gs320*.stats" % benchmark) if len(filenames) != 1: continue filename = filenames[0] gets_dist = get_histo(filename, "gets_sharing_histogram:") getx_dist = get_histo(filename, "getx_sharing_histogram:") total_misses = get_data(filename, "Total_misses:") gets_dist = map(lambda x: 100.0 * (float(x) / total_misses), gets_dist) gets_dist += [0] * 14 # fill in the end with zeros getx_dist = map(lambda x: 100.0 * (float(x) / total_misses), getx_dist) getx_dist = getx_dist[0:3] + [ reduce(lambda x, y: x + y, getx_dist[3:7]) ] + [reduce(lambda x, y: x + y, getx_dist[7:])] gets_dist = gets_dist[0:3] + [ reduce(lambda x, y: x + y, gets_dist[3:7]) ] + [reduce(lambda x, y: x + y, gets_dist[7:])] # getx_dist = mfgraph.stack_bars(getx_dist) # gets_dist = mfgraph.stack_bars(gets_dist) labels = ["0", "1", "2", "3-7", "8+"] bars.append([benchmark_names[benchmark]] + map(lambda l, gets, getx: (l, gets + getx, getx), labels, gets_dist, getx_dist)) jgraph_input = mfgraph.stacked_bar_graph( bars, bar_segment_labels=["Get shared", "Get exclusive"], xsize=7, ysize=3, ylabel="Percent of all misses", colors=["1 0 0", "0 0 1"], patterns=["solid", "stripe -45"], bar_name_font_size="10", stack_name_location=10.0, legend_x="31", legend_y="87", ) mfgraph.run_jgraph(jgraph_input, "sharers")
def gen_protocol(benchmarks): # configs = ["8p-perfect", "8p-MOSI_bcast_opt", "8p-MOSI_GS"] configs = ["8p-MOSI_bcast_opt", "8p-MOSI_GS"] base_config = "8p-MOSI_bcast_opt" parameter = "Ruby_cycles" stacks = []; print "parsing..." for benchmark in benchmarks: assoc_data = {}; for config in configs: sys.stderr.write(" %s %s\n" % (benchmark, config)) numbers = [] filenames = glob.glob(benchmark + "/*-" + config + "-*.stats") for filename in filenames: lines = mfgraph.grep(filename, parameter); line = lines[0] numbers.append(float(string.split(line)[1])) print numbers med = mfgraph.median(numbers) stddev = mfgraph.stddev(numbers) min_val = min(numbers) max_val = max(numbers) assoc_data[config] = [med, min_val, max_val]; mfgraph.normalize(assoc_data, base_config) bars = [] stack_data = [benchmark] for config in configs: bars.append([config, assoc_data[config]]) stacks.append([benchmark] + bars) print "done." print stacks return [mfgraph.stacked_bar_graph(stacks, title = "Snooping vs Directory", ylabel = "normalized runtime", colors = [".5 .5 1"], patterns = ["solid"])]
def gen_cache_state(benchmarks): configs = ["1p", "2p", "4p", "8p", "16p"] parameters = ["GETS NP", "GETS I", "GETX NP", "GETX I", "GETX S", "GETX O"] stacks = [] for benchmark in benchmarks: bars = [] for config in configs: filenames = glob.glob(benchmark + "/*-" + config + "-*.stats") print " %s %s" % (benchmark, config), filenames numbers = [] for parameter in parameters: sum = 0 for filename in filenames: # print benchmark, config, parameter, filename lines = mfgraph.grep(filename, "instruction_executed") line = string.split(lines[0]) insn = long(line[1]) lines = mfgraph.grep(filename, parameter) for line in lines: fields = string.split(line) # print fields sum += int(fields[3]) numbers.append(float(sum) / float(insn)) numbers = mfgraph.stack_bars(numbers) bars.append([config] + numbers) stacks.append([benchmark] + bars) labels = [] for label in parameters: labels.append(label) return [ mfgraph.stacked_bar_graph( stacks, bar_segment_labels=labels, title="Cache Misses by state", ylabel="Number", ) ]
def generate_reasons_for_fetch_stall(jgraphs, results_dir): data = [] files = glob.glob("%s/*.opal" % results_dir) #print files for file in files: tuple = get_reasons_for_fetch_stall(file) data.append(tuple) fetch_data = make_bars(fields=["I-Cache Miss", "Squash", "I-TLB Miss", "Window Full", "Fetch Barrier"], data=data) fetch_bars = make_stacked(fetch_data) fetch_bars.sort() fetch_bars.insert(0, "Window Width") graph_bars = [fetch_bars] jgraphs.append(mfgraph.stacked_bar_graph(graph_bars, title = "Reasons for Fetch Stall vs. Window Width", bar_segment_labels = ["I-Cache Miss", "Squash", "I-TLB Miss", "Window Full", "Fetch Barrier"], ylabel = "%", xsize = 4.0, ysize = 1.778, ))
def generate_reasons_for_retire_stall(jgraphs, results_dir): data = [] files = glob.glob("%s/*.opal" % results_dir) #print files for file in files: tuple = get_reasons_for_retire_stall(file) data.append(tuple) retire_data = make_bars(fields=["Updating", "Squash", "Limit"], data=data) retire_bars = make_stacked(retire_data) retire_bars.sort() retire_bars.insert(0, "JBB") graph_bars = [retire_bars] jgraphs.append(mfgraph.stacked_bar_graph(graph_bars, title = "Reasons for Retire Stall vs. Window Width", bar_segment_labels = ["Updating", "Squash", "Limit"], ylabel = "%", xsize = 4.0, ysize = 1.778, ))
def generate_bar_example(jgraphs): bars = [ [ "group1", ["bar1", 20, 10, 5], ["bar2", 10, 5, 2.5], ], [ "group2", ["bar1", 80, 40, 10], ["bar2", [100, 90, 110], 50, 10], # note, this has an error bar ["bar3", 30, 25, 5], ] ] jgraphs.append( mfgraph.stacked_bar_graph( bars, bar_segment_labels=["segment1", "segment2", "segment3"], xsize=5.0, ))
def generate_reasons_for_retire_stall(jgraphs, results_dir): data = [] files = glob.glob("%s/*.opal" % results_dir) #print files for file in files: tuple = get_reasons_for_retire_stall(file) data.append(tuple) retire_data = make_bars(fields=["Updating", "Squash", "Limit"], data=data) retire_bars = make_stacked(retire_data) retire_bars.sort() retire_bars.insert(0, "JBB") graph_bars = [retire_bars] jgraphs.append( mfgraph.stacked_bar_graph( graph_bars, title="Reasons for Retire Stall vs. Window Width", bar_segment_labels=["Updating", "Squash", "Limit"], ylabel="%", xsize=4.0, ysize=1.778, ))
def gen_cache_state(benchmarks): configs = ["1p", "2p", "4p", "8p", "16p"] parameters = ["GETS NP", "GETS I", "GETX NP", "GETX I", "GETX S", "GETX O"] stacks = [] for benchmark in benchmarks: bars = [] for config in configs: filenames = glob.glob(benchmark + "/*-" + config + "-*.stats") print " %s %s" % (benchmark, config), filenames numbers = [] for parameter in parameters: sum = 0 for filename in filenames: # print benchmark, config, parameter, filename lines = mfgraph.grep(filename, "instruction_executed"); line = string.split(lines[0]) insn = long(line[1]) lines = mfgraph.grep(filename, parameter); for line in lines: fields = string.split(line) # print fields sum += int(fields[3]) numbers.append(float(sum)/float(insn)) numbers = mfgraph.stack_bars(numbers) bars.append([config] + numbers) stacks.append([benchmark] + bars) labels = [] for label in parameters: labels.append(label) return [mfgraph.stacked_bar_graph(stacks, bar_segment_labels = labels, title = "Cache Misses by state", ylabel = "Number", )]
def gen_data_size(benchmarks): configs = ["1p-MOSI_bcast_opt", "2p-MOSI_bcast_opt", "4p-MOSI_bcast_opt", "8p-MOSI_bcast_opt", "16p-MOSI_bcast_opt"] parameters = ["C GET_INSTR", "C GETS", "C GETX"] stacks = [] for benchmark in benchmarks: bars = [] for config in configs: print " %s %s" % (benchmark, config) filenames = glob.glob(benchmark + "/*-" + config + "-*.stats") for filename in filenames: numbers = [] for parameter in parameters: lines = mfgraph.grep(filename, parameter); line = string.split(lines[0]) map(string.strip, line) num = string.split(line[2], "%") num = (64L*(long(num[0])))/(1024.0*1024.0) numbers.append(num) numbers = mfgraph.stack_bars(numbers) number = reduce(lambda x,y:x+y, numbers) config_label = string.split(config, "-")[0] bars.append([config_label] + [number]) stacks.append([benchmark] + bars) # labels = [] # for label in parameters: # labels.append(string.split(label)[1]) return [mfgraph.stacked_bar_graph(stacks, # bar_segment_labels = labels, title = "Memory touched", ylabel = "Mbytes", patterns = ["solid"], xsize = 8.5, )]
def gen_sharing(benchmarks, normalize = 1): configs = ["1p", "2p", "4p", "8p", "16p"] stacks = [] for benchmark in benchmarks: bars = [] for config in configs: print " %s %s" % (benchmark, config) filenames = glob.glob(benchmark + "/*-" + config + "-*.stats") for filename in filenames: lines = mfgraph.grep(filename, "instruction_executed"); line = string.split(lines[0]) insn = long(line[1]) numbers = get_sharing(filename) if normalize: sum = reduce(lambda x,y: x+y, numbers) for index in range(len(numbers)): numbers[index] = (numbers[index] / sum) * 100.0 else: for index in range(len(numbers)): numbers[index] = (numbers[index] / float(insn)) * 1000.0 numbers = mfgraph.stack_bars(numbers) bars.append([config] + numbers) stacks.append([benchmark] + bars) if normalize: y_axis_label = "percent of misses" else: y_axis_label = "misses per thousand instructions", return [mfgraph.stacked_bar_graph(stacks, bar_segment_labels = labels, title = "Breakdown of misses", ylabel = y_axis_label, patterns = ["stripe -45", "stripe -45", "stripe -45", "solid", "solid"], )]
def generate_time_breakdown(jgraphs, results_dir): # Gather ECperf data tuple_list = get_time_breakdown_data(results_dir) bar_fields = ["usr", "sys", "wt", "idl", "gc_idle"] ecperf_data = make_bars(fields=bar_fields, data=tuple_list) ecperf_bars = make_stacked(ecperf_data) ecperf_bars.sort() ecperf_bars.insert(0, "ECperf") ecperf_data.sort() ecperf_data.insert(0, "ECperf") graph_bars = [ecperf_bars] #print graph_bars jgraphs.append(mfgraph.stacked_bar_graph(graph_bars, title = "", bar_segment_labels = ["User", "System", "I/O", "Idle", "GC Idle"], ylabel = "Execution Time (%)", ymax = 100, ymin = 0, xsize = 4.0, ysize = 1.778, title_fontsize = "12", label_fontsize = "9", bar_name_font_size = "9", legend_fontsize = "9", stack_name_font_size = "10", stack_name_location = 9, bar_space = 1.2, colors = ["1 1 1", ".8 .8 .8", ".6 .6 .6", ".4 .4 .4", "0 0 0"] ))
def gen_sharing_histo(): bars = [] for benchmark in benchmarks: filenames = glob.glob("*/%s-gs320*.stats" % benchmark) if len(filenames) != 1: continue filename = filenames[0] gets_dist = get_histo(filename, "gets_sharing_histogram:") getx_dist = get_histo(filename, "getx_sharing_histogram:") total_misses = get_data(filename, "Total_misses:") gets_dist = map(lambda x : 100.0 * (float(x) / total_misses), gets_dist) gets_dist += [0] * 14 # fill in the end with zeros getx_dist = map(lambda x : 100.0 * (float(x) / total_misses), getx_dist) getx_dist = getx_dist[0:3] + [reduce(lambda x,y:x+y, getx_dist[3:7])] + [reduce(lambda x,y:x+y, getx_dist[7:])] gets_dist = gets_dist[0:3] + [reduce(lambda x,y:x+y, gets_dist[3:7])] + [reduce(lambda x,y:x+y, gets_dist[7:])] # getx_dist = mfgraph.stack_bars(getx_dist) # gets_dist = mfgraph.stack_bars(gets_dist) labels = ["0", "1", "2", "3-7", "8+"] bars.append([benchmark_names[benchmark]] + map(lambda l, gets, getx : (l, gets+getx, getx), labels, gets_dist, getx_dist)) jgraph_input = mfgraph.stacked_bar_graph(bars, bar_segment_labels = ["Get shared", "Get exclusive"], xsize = 7, ysize = 3, ylabel = "Percent of all misses", colors = ["1 0 0", "0 0 1"], patterns = ["solid", "stripe -45"], bar_name_font_size = "10", stack_name_location = 10.0, legend_x = "31", legend_y = "87", ) mfgraph.run_jgraph(jgraph_input, "sharers")
def gen_misses(benchmarks): configs = ["1p-MOSI_bcast_opt", "2p-MOSI_bcast_opt", "4p-MOSI_bcast_opt", "8p-MOSI_bcast_opt", "16p-MOSI_bcast_opt"] parameters = ["Request_type_IFETCH", "Request_type_LD", "Request_type_ST", "Request_type_ATOMIC"] stacks = [] for benchmark in benchmarks: bars = [] for config in configs: print " %s %s" % (benchmark, config) filenames = glob.glob(benchmark + "/*-" + config + "-*.stats") for filename in filenames: numbers = [] lines = mfgraph.grep(filename, "instruction_executed"); line = string.split(lines[0]) insn = long(line[1]) for parameter in parameters: lines = mfgraph.grep(filename, parameter); line = string.split(lines[0]) map(string.strip, line) numbers.append(1000.0*(float(line[1])/insn)) numbers = mfgraph.stack_bars(numbers) config_label = string.split(config, "-")[0] bars.append([config_label] + numbers) stacks.append([benchmark] + bars) labels = [] for label in parameters: labels.append(string.split(label, "_")[2]) return [mfgraph.stacked_bar_graph(stacks, bar_segment_labels = labels, title = "Breakdown of misses", ylabel = "misses per thousand instructions", patterns = ["solid"], )]
def touched_by(): stats = [ "touched_by_block_address:", "touched_by_weighted_block_address:", "touched_by_macroblock_address:", "touched_by_weighted_macroblock_address:", # "touched_by_pc_address:", # "touched_by_weighted_pc_address:", ] stats_names = { "touched_by_block_address:": "(a) Percent blocks touched by n processors", "touched_by_weighted_block_address:": "(b) Percent of misses to blocks touched by n processors", "touched_by_macroblock_address:": "(c) Percent of macroblocks touched by n processors", "touched_by_weighted_macroblock_address:": "(d) Percent of misses to macroblocks touched by n processors", } jgraph_input = [] cols = 1 row_space = 2.9 col_space = 3 num = 0 for stat in stats: bars = [] for benchmark in benchmarks: print stat, benchmark group = [benchmark_names[benchmark]] filenames = glob.glob("%s/*trace-profiler*.stats" % benchmark) for filename in filenames: line = mfgraph.grep(filename, stat)[0] line = string.replace(line, "]", "") line = string.replace(line, "[", "") data = string.split(line)[2:] data = map(float, data) normalize_list(data) for index in range(len(data)): if index + 1 in [1, 4, 8, 12, 16]: group.append(["%d" % (index + 1), data[index] * 100.0]) else: group.append(["", data[index] * 100.0]) bars.append(group) jgraph_input.append( mfgraph.stacked_bar_graph( bars, title=stats_names[stat], xsize=6.5, ysize=2, xlabel="", ylabel="", stack_space=3, x_translate=(num % cols) * col_space, y_translate=(num / cols) * -row_space, )) num += 1 mfgraph.run_jgraph("\n".join(jgraph_input), "touched-by")
def instant_sharers(): jgraph_input = [] gets_map = { "N N": 1, "Y N": 0, } getx_map = { "N N N N": 1, "N N N Y": 1, "N N Y N": 1, "N N Y Y": 1, # "N Y N N" : "X", # "N Y N Y" : "X", # "N Y Y N" : "X", "N Y Y Y": 0, "Y N N N": 0, "Y N N Y": 0, "Y N Y N": 0, "Y N Y Y": 0, # "Y Y N N" : "X", # "Y Y N Y" : "X", # "Y Y Y N" : "X", # "Y Y Y Y" : "X", } cols = 2 row_space = 2.9 col_space = 3 num = 0 bars = [] for benchmark in benchmarks: getx_dist = [0] * 16 gets_dist = [0] * 16 print benchmark group = [benchmark_names[benchmark]] filename = glob.glob("%s/*trace-profiler*.stats" % benchmark)[0] gets_mode = 0 sum = 0 for line in open(filename).readlines(): line = string.strip(line) line = string.translate(line, string.maketrans("][", " ")) # set mode str = "Total requests: " if line[0:len(str)] == str: total_requests = int(string.split(line)[2]) if line == "GETS message classifications:": gets_mode = 1 if line == "": gets_mode = 0 if gets_mode == 1: #gets key = line[0:3] if gets_map.has_key(key): parts = string.split(line) sum += int(parts[2]) # no histogram data = parts[2:3] # shift if one if gets_map[key] == 1: data = [0] + data data = map(int, data) add_list(gets_dist, data) else: #getx key = line[0:7] if getx_map.has_key(key): parts = string.split(line) sum += int(parts[4]) if len(parts) > 10: # histogram data = parts[19:] else: # no histogram data = parts[4:5] # shift if one if getx_map[key] == 1: data = [0] + data data = map(int, data) add_list(getx_dist, data) for i in range(len(getx_dist)): gets_dist[i] = 100.0 * ((gets_dist[i] + getx_dist[i]) / float(sum)) getx_dist[i] = 100.0 * (getx_dist[i] / float(sum)) getx_dist = getx_dist[0:3] + [ reduce(lambda x, y: x + y, getx_dist[3:7]) ] + [reduce(lambda x, y: x + y, getx_dist[7:])] gets_dist = gets_dist[0:3] + [ reduce(lambda x, y: x + y, gets_dist[3:7]) ] + [reduce(lambda x, y: x + y, gets_dist[7:])] print getx_dist print gets_dist print "indirections:", 100.0 - gets_dist[0] labels = ["0", "1", "2", "3-7", ">8"] ## labels = [] ## for i in range(16): ## if i in [0, 3, 7, 11, 15]: ## labels.append("%d" % i) ## else: ## labels.append("") bars.append([benchmark_names[benchmark]] + map(None, labels, gets_dist, getx_dist)) print bars jgraph_input.append( mfgraph.stacked_bar_graph( bars, bar_segment_labels=["Get shared", "Get exclusive"], xsize=7, ysize=3, ylabel="Percent of all misses", legend_x="25", legend_y="90", )) mfgraph.run_jgraph("\n".join(jgraph_input), "instant-sharers")
def touched_by(): stats = [ "touched_by_block_address:", "touched_by_weighted_block_address:", # "touched_by_macroblock_address:", # "touched_by_weighted_macroblock_address:", # "touched_by_supermacroblock_address:", # "touched_by_weighted_supermacroblock_address:", # "last_n_block_touched_by", # "last_n_macroblock_touched_by", ] yaxis_names = [ "Percent of all blocks", "Percent of all misses", "Percent of all macroblocks", "Percent of all misses", "Percent of all macroblocks", "Percent of all misses", # "Percent", # "Percent", ] stats_names = { "touched_by_block_address:" : "(a) Percent of data blocks (64B) touched by n processors", "touched_by_weighted_block_address:": "(b) Percent of misses to data blocks (64B) touched by n processors", # "touched_by_macroblock_address:": "(c) Percent of data macroblocks (1024B) touched by n processors", # "touched_by_weighted_macroblock_address:": "(d) Percent of misses to data macroblocks (1024B) touched by n processors", # "touched_by_supermacroblock_address:": "(e) Percent of 4kB macroblocks touched by n processors", # "touched_by_weighted_supermacroblock_address:": "(f) Percent of misses to 4kB macroblocks touched by n processors", # "last_n_block_touched_by" : "(e) Percent of misses touched by n processors in the last 64 misses to the block", # "last_n_macroblock_touched_by" : "(f) Percent of misses touched by n processors in the last 64 misses to the macroblock", } jgraph_input = [] cols = 1 row_space = 2.2 col_space = 3 num = 0 for stat in stats: bars = [] for benchmark in benchmarks: print stat, benchmark group = [benchmark_names[benchmark]] filenames = glob.glob("*/%s-*gs320*.stats" % benchmark) print benchmark, filenames for filename in filenames: line = mfgraph.grep(filename, stat)[0] line = string.replace(line, "]", "") line = string.replace(line, "[", "") data = string.split(line)[2:] # data = string.split(line)[14:] data = map(float, data) print data # new_data = [] # sum = 0.0 # for item in data: # new_data.append(item + sum) # sum += item # data = new_data print data # for index in range(len(data)): # data[index] = data[index]/sum print data normalize_list(data) for index in range(len(data)): if index+1 in [1, 4, 8, 12, 16]: group.append(["%d" % (index+1), data[index]*100.0]) else: group.append(["", data[index]*100.0]) bars.append(group) jgraph_input.append(mfgraph.stacked_bar_graph(bars, title = stats_names[stat], title_fontsize = "12", title_font = "Times-Roman", title_y = -25.0, xsize = 6.5, ysize = 1.5, xlabel = "", ymax = 100.01, ylabel = yaxis_names[num], colors = [".5 .5 .5"], patterns = ["solid"], stack_name_location = 12.0, stack_space = 3, x_translate = (num % cols) * col_space, y_translate = (num / cols) * -row_space, )) num += 1 mfgraph.run_jgraph("\n".join(jgraph_input), "touched-by")
def touched_by(): stats = [ "touched_by_block_address:", "touched_by_weighted_block_address:", # "touched_by_macroblock_address:", # "touched_by_weighted_macroblock_address:", # "touched_by_supermacroblock_address:", # "touched_by_weighted_supermacroblock_address:", # "last_n_block_touched_by", # "last_n_macroblock_touched_by", ] yaxis_names = [ "Percent of all blocks", "Percent of all misses", "Percent of all macroblocks", "Percent of all misses", "Percent of all macroblocks", "Percent of all misses", # "Percent", # "Percent", ] stats_names = { "touched_by_block_address:": "(a) Percent of data blocks (64B) touched by n processors", "touched_by_weighted_block_address:": "(b) Percent of misses to data blocks (64B) touched by n processors", # "touched_by_macroblock_address:": "(c) Percent of data macroblocks (1024B) touched by n processors", # "touched_by_weighted_macroblock_address:": "(d) Percent of misses to data macroblocks (1024B) touched by n processors", # "touched_by_supermacroblock_address:": "(e) Percent of 4kB macroblocks touched by n processors", # "touched_by_weighted_supermacroblock_address:": "(f) Percent of misses to 4kB macroblocks touched by n processors", # "last_n_block_touched_by" : "(e) Percent of misses touched by n processors in the last 64 misses to the block", # "last_n_macroblock_touched_by" : "(f) Percent of misses touched by n processors in the last 64 misses to the macroblock", } jgraph_input = [] cols = 1 row_space = 2.2 col_space = 3 num = 0 for stat in stats: bars = [] for benchmark in benchmarks: print stat, benchmark group = [benchmark_names[benchmark]] filenames = glob.glob("*/%s-*gs320*.stats" % benchmark) print benchmark, filenames for filename in filenames: line = mfgraph.grep(filename, stat)[0] line = string.replace(line, "]", "") line = string.replace(line, "[", "") data = string.split(line)[2:] # data = string.split(line)[14:] data = map(float, data) print data # new_data = [] # sum = 0.0 # for item in data: # new_data.append(item + sum) # sum += item # data = new_data print data # for index in range(len(data)): # data[index] = data[index]/sum print data normalize_list(data) for index in range(len(data)): if index + 1 in [1, 4, 8, 12, 16]: group.append(["%d" % (index + 1), data[index] * 100.0]) else: group.append(["", data[index] * 100.0]) bars.append(group) jgraph_input.append( mfgraph.stacked_bar_graph( bars, title=stats_names[stat], title_fontsize="12", title_font="Times-Roman", title_y=-25.0, xsize=6.5, ysize=1.5, xlabel="", ymax=100.01, ylabel=yaxis_names[num], colors=[".5 .5 .5"], patterns=["solid"], stack_name_location=12.0, stack_space=3, x_translate=(num % cols) * col_space, y_translate=(num / cols) * -row_space, )) num += 1 mfgraph.run_jgraph("\n".join(jgraph_input), "touched-by")
def instant_sharers(): jgraph_input = [] gets_map = { "N N" : 1, "Y N" : 0, } getx_map = { "N N N N" : 1, "N N N Y" : 1, "N N Y N" : 1, "N N Y Y" : 1, # "N Y N N" : "X", # "N Y N Y" : "X", # "N Y Y N" : "X", "N Y Y Y" : 0, "Y N N N" : 0, "Y N N Y" : 0, "Y N Y N" : 0, "Y N Y Y" : 0, # "Y Y N N" : "X", # "Y Y N Y" : "X", # "Y Y Y N" : "X", # "Y Y Y Y" : "X", } cols = 2 row_space = 2.9 col_space = 3 num = 0 bars = [] for benchmark in benchmarks: getx_dist = [0] * 16 gets_dist = [0] * 16 print benchmark group = [benchmark_names[benchmark]] filename = glob.glob("%s/*trace-profiler*.stats" % benchmark)[0] gets_mode = 0 sum = 0 for line in open(filename).readlines(): line = string.strip(line) line = string.translate(line, string.maketrans("][", " ")) # set mode str = "Total requests: " if line[0:len(str)] == str: total_requests = int(string.split(line)[2]) if line == "GETS message classifications:": gets_mode = 1; if line == "": gets_mode = 0; if gets_mode == 1: #gets key = line[0:3] if gets_map.has_key(key): parts = string.split(line) sum += int(parts[2]) # no histogram data = parts[2:3] # shift if one if gets_map[key] == 1: data = [0] + data data = map(int, data) add_list(gets_dist, data) else: #getx key = line[0:7] if getx_map.has_key(key): parts = string.split(line) sum += int(parts[4]) if len(parts) > 10: # histogram data = parts[19:] else: # no histogram data = parts[4:5] # shift if one if getx_map[key] == 1: data = [0] + data data = map(int, data) add_list(getx_dist, data) for i in range(len(getx_dist)): gets_dist[i] = 100.0 * ((gets_dist[i]+getx_dist[i]) / float(sum)) getx_dist[i] = 100.0 * (getx_dist[i] / float(sum)) getx_dist = getx_dist[0:3] + [reduce(lambda x,y:x+y, getx_dist[3:7])] + [reduce(lambda x,y:x+y, getx_dist[7:])] gets_dist = gets_dist[0:3] + [reduce(lambda x,y:x+y, gets_dist[3:7])] + [reduce(lambda x,y:x+y, gets_dist[7:])] print getx_dist print gets_dist print "indirections:", 100.0-gets_dist[0] labels = ["0", "1", "2", "3-7", ">8"] ## labels = [] ## for i in range(16): ## if i in [0, 3, 7, 11, 15]: ## labels.append("%d" % i) ## else: ## labels.append("") bars.append([benchmark_names[benchmark]] + map(None, labels, gets_dist, getx_dist)) print bars jgraph_input.append(mfgraph.stacked_bar_graph(bars, bar_segment_labels = ["Get shared", "Get exclusive"], xsize = 7, ysize = 3, ylabel = "Percent of all misses", legend_x = "25", legend_y = "90", )) mfgraph.run_jgraph("\n".join(jgraph_input), "instant-sharers")
def generate_bar_example(): stacks=[] bars=[] tempval = 0.25 output_list = "" for j in range(i): bars=[] numbers = [] if(float(hash_table0_results[j]) > 43200): output_list = output_list + "graph 2 newstring fontsize 9 x " + str(tempval) + " y 107 hjc vjt rotate 90.0 : " + "inf" + "\n" numbers.append(43200) else: numbers.append(hash_table0_results[j]) numbers=mfgraph.stack_bars(numbers) bars.append([""] + numbers) tempval += 1.1 numbers = [] if(float(hash_table1_results[j]) > 43200): output_list = output_list + "graph 2 newstring fontsize 9 x " + str(tempval) + " y 107 hjc vjt rotate 90.0 : " + "inf" + "\n" numbers.append(43200) else: numbers.append(hash_table1_results[j]) numbers=mfgraph.stack_bars(numbers) bars.append([""] + numbers) tempval += 1.1 numbers = [] if(float(hash_table2_results[j]) > 43200) : output_list = output_list + "graph 2 newstring fontsize 9 x " + str(tempval) + " y 107 hjc vjt rotate 90.0 : " + "inf" + "\n" numbers.append(43200) else: numbers.append(hash_table2_results[j]) numbers=mfgraph.stack_bars(numbers) bars.append([""] + numbers) tempval += 1.1 numbers = [] if(float(hash_table3_results[j]) > 43200): output_list = output_list + "graph 2 newstring fontsize 9 x " + str(tempval) + " y 107 hjc vjt rotate 90.0 : " + "inf" + "\n" numbers.append(43200) else: numbers.append(hash_table3_results[j]) numbers=mfgraph.stack_bars(numbers) bars.append([""] + numbers) stacks.append([benchmarks[j]]+ bars) tempval += 2.15 #print stacks return [mfgraph.stacked_bar_graph(stacks, bar_segment_labels = labels, title = " ", title_y = 140, title_fontsize = "5", ylabel = "Total Time (sec)", #xlabel = "Number of pointer jumps", colors = ["0.375 0.375 0.375", "0.875 0.875 0.875", "0 0 0", "0.625 0.625 0.625"], legend_x = "2", legend_y = "125", legend_type = "Manual", legend_type_x=[0, 20, 0, 20], legend_type_y=[10, 10, 0, 0] , clip = 300, ysize = 1.1, xsize = 6, ymax = 43200, patterns = ["solid"], stack_name_rotate = 25.0, stack_name_font_size = "6", #bmarks names label_fontsize = "6", #y-axis name legend_fontsize = "6", #label names ylog = 10, ymin = 10, yhash_marks = [100, 1000, 10000, 43200], yhash_names = ["100", "1000", "10000", "43200"], ) + output_list]