Exemple #1
0
def touched_by():
    stats = [
        "touched_by_block_address:",
        "touched_by_weighted_block_address:",
        "touched_by_macroblock_address:",
        "touched_by_weighted_macroblock_address:",
        #    "touched_by_pc_address:",
        #    "touched_by_weighted_pc_address:",
        ]
    
    stats_names = {
        "touched_by_block_address:" : "(a) Percent blocks touched by n processors",
        "touched_by_weighted_block_address:": "(b) Percent of misses to blocks touched by n processors",
        "touched_by_macroblock_address:": "(c) Percent of macroblocks touched by n processors",
        "touched_by_weighted_macroblock_address:": "(d) Percent of misses to macroblocks touched by n processors",
        }

    jgraph_input = []

    cols = 1
    row_space = 2.9
    col_space = 3

    num = 0
    for stat in stats:
        bars = []
        for benchmark in benchmarks:
            print stat, benchmark
            group = [benchmark_names[benchmark]]
            filenames = glob.glob("%s/*trace-profiler*.stats" % benchmark)
            for filename in filenames:
                line = mfgraph.grep(filename, stat)[0]
                line = string.replace(line, "]", "")
                line = string.replace(line, "[", "")
                data = string.split(line)[2:]
                data = map(float, data)
                normalize_list(data)
                for index in range(len(data)):
                    if index+1 in [1, 4, 8, 12, 16]:
                        group.append(["%d" % (index+1), data[index]*100.0])
                    else:
                        group.append(["", data[index]*100.0])

            bars.append(group)

        jgraph_input.append(mfgraph.stacked_bar_graph(bars,
                                                      title = stats_names[stat],
                                                      xsize = 6.5,
                                                      ysize = 2,
                                                      xlabel = "",
                                                      ylabel = "",
                                                      stack_space = 3,
                                                      x_translate = (num % cols) * col_space,
                                                      y_translate = (num / cols) * -row_space,
                                                      ))
        num += 1

    mfgraph.run_jgraph("\n".join(jgraph_input), "touched-by")
Exemple #2
0
def run_trace(name, pred_lst):
    jgraph_input = []
    cols = 2
    row_space = 2.8
    col_space = 2.8
    num = 0
    for bench in benchmarks:
        lines = []
        for predictor in pred_lst:
            predictor_data = get_data(bench, predictor)[0]
            (predictor, bandwidth, runtime) = predictor_data
            if not (predictor in pred_lst):
                continue
            line = [
                predictor_map[predictor], [float(bandwidth),
                                           float(runtime)]
            ]
            #        line = [predictor, [float(bandwidth), float(runtime)]]
            lines.append(line)

        legend_hack = ""
        if (num == 4):
            legend_hack = "yes"

        print legend_hack

        jgraph_input.append(
            mfgraph.line_graph(
                lines,
                title=benchmark_map[bench],
                title_fontsize="12",
                title_font="Times-Roman",
                xsize=1.8,
                ysize=1.8,
                xlabel="control bandwidth (normalized to Broadcast)",
                ylabel="indirections (normalized to Directory)",
                label_fontsize="10",
                label_font="Times-Roman",
                legend_fontsize="10",
                legend_font="Times-Roman",
                linetype=["none"],
                marktype=["circle", "box", "diamond", "triangle", "triangle"],
                mrotate=[0, 0, 0, 0, 180],
                colors=["0 0 0"],
                xmin=0,
                x_translate=(num % cols) * col_space,
                y_translate=(num / cols) * -row_space,
                line_thickness=1.0,
                legend_hack=legend_hack,
                legend_x="150",
                #                                           legend_y = "",
            ))

        num += 1

    mfgraph.run_jgraph("\n".join(jgraph_input),
                       "/p/multifacet/papers/mask-prediction/graphs/%s" % name)
Exemple #3
0
def generate_micro4(stat, ylabel, transformation, ymax, legend_x, legend_y):
    benchmark = "microbenchmark"
    processor = 64
    ## Generate bandwidth available vs performance (one graph per processor count)
    for bandwidth in bandwidth_list:
        jgraph_input = ""
        lines = []
        for module in modules_list:
            points = []
            for think_time in think_time_list:
                if think_time > 1000:
                    continue
                data = get_data(benchmark,
                                think_time=think_time,
                                processor=processor,
                                module=module,
                                bandwidth=bandwidth,
                                stat=stat)
                if len(data) > 0:
                    value = mfgraph.average(data)
                    if (stat == "Ruby_cycles"):
                        points.append([
                            think_time, ((value / 10000.0) - think_time) / 2.0
                        ])
                    else:
                        points.append([think_time, value])

            lines.append([protocol_name[module]] + points)

        transformation(lines)
        xlabel = "think time (cycles)"
        jgraph_input += mfgraph.line_graph(
            lines,
            title_fontsize="12",
            title_font="Times-Roman",
            ymax=ymax,
            xlabel=xlabel,
            ylabel=ylabel,
            label_fontsize="9",
            xsize=1.8,
            ysize=1.8,
            xmin=0.0,
            legend_x=legend_x,
            legend_y=legend_y,
            legend_fontsize="8",
            #                                           marktype = ["circle"],
            #                                           marksize = .03,
        )

        if stat == "links_utilized_percent":
            jgraph_input += "newcurve clip pts 0.1 75 1000 75 linetype solid linethickness 1 marktype none gray .75\n"
            jgraph_input += "newstring x 950 y 76 fontsize 8 : 75%\n"

        mfgraph.run_jgraph(
            jgraph_input, "bash-microbenchmark-thinktime-%d-%s" %
            (bandwidth, string.split(ylabel)[0]))
Exemple #4
0
def generate_micro3(stat, ylabel, transformation, ymax, legend_x, legend_y):
    benchmark = "microbenchmark"
    ## Generate bandwidth available vs performance (one graph per processor count)
    for bandwidth in bandwidth_list:
        jgraph_input = ""
        lines = []
        for module in modules_list:
            points = []
            for processor in processor_list:
                data = get_data(benchmark,
                                processor=processor,
                                module=module,
                                bandwidth=bandwidth,
                                stat=stat)
                if len(data) > 0:
                    value = mfgraph.average(data)
                    points.append([processor, value])

            lines.append([protocol_name[module]] + points)

        transformation(lines)
        xlabel = "processors"
        if ylabel == "performance":
            ylabel = "performance per processor"
        jgraph_input += mfgraph.line_graph(
            lines,
            title_fontsize="12",
            title_font="Times-Roman",
            ymax=ymax,
            xlabel=xlabel,
            ylabel=ylabel,
            label_fontsize="9",
            xsize=1.8,
            ysize=1.8,
            xlog=10,
            xmin=3.0,
            legend_x=4,
            legend_y=legend_y,
            legend_fontsize="8",
            marktype=["circle"],
            marksize=.03,
            hash_marks=map(str, processor_list),
        )

        if stat == "links_utilized_percent":
            jgraph_input += "newcurve clip pts 0.1 75 512 75 linetype solid linethickness 1 marktype none gray .75\n"
            jgraph_input += "newstring x 512 y 76 fontsize 8 : 75%\n"

        mfgraph.run_jgraph(
            jgraph_input, "bash-microbenchmark-processors-%d-%s" %
            (bandwidth, string.split(ylabel)[0]))
Exemple #5
0
def run_trace(name, pred_lst):
    jgraph_input = []
    cols = 2
    row_space = 2.8
    col_space = 2.8
    num = 0
    for bench in benchmarks:
        lines = []
        for predictor in pred_lst:
            predictor_data = get_data(bench, predictor)[0]
            (predictor, bandwidth, runtime) = predictor_data
            if not (predictor in pred_lst):
                continue
            line = [predictor_map[predictor], [float(bandwidth), float(runtime)]]
    #        line = [predictor, [float(bandwidth), float(runtime)]]
            lines.append(line)

        legend_hack = ""
        if (num == 4):
            legend_hack = "yes"

        print legend_hack

        jgraph_input.append(mfgraph.line_graph(lines,
                                               title = benchmark_map[bench],
                                               title_fontsize = "12",
                                               title_font = "Times-Roman",
                                               xsize = 1.8,
                                               ysize = 1.8,
                                               xlabel = "control bandwidth (normalized to Broadcast)",
                                               ylabel = "indirections (normalized to Directory)",
                                               label_fontsize = "10",
                                               label_font = "Times-Roman",
                                               legend_fontsize = "10",
                                               legend_font = "Times-Roman",
                                               linetype = ["none"],
                                               marktype = ["circle", "box", "diamond", "triangle", "triangle"],
                                               mrotate = [0, 0, 0, 0, 180],
                                               colors = ["0 0 0"], 
                                               xmin = 0,
                                               x_translate = (num % cols) * col_space,
                                               y_translate = (num / cols) * -row_space,
                                               line_thickness = 1.0,
                                               legend_hack = legend_hack,
                                               legend_x = "150",
    #                                           legend_y = "",
                                               ))

        num += 1

    mfgraph.run_jgraph("\n".join(jgraph_input), "/p/multifacet/papers/mask-prediction/graphs/%s" % name)
Exemple #6
0
def generate_micro1(stat, ylabel, transformation, ymax, legend_x, legend_y):
    jgraph_input = ""
    ## Generate bandwidth available vs performance (one graph per processor count)
    benchmark = "microbenchmark"
    processor = 64
    lines = []
    for module in modules_list:
        points = []
        for bandwidth in bandwidth_list:
            if bandwidth > 30000:
                continue
            data = get_data(benchmark,
                            processor=processor,
                            module=module,
                            bandwidth=bandwidth,
                            stat=stat)
            if len(data) > 0:
                value = mfgraph.average(data)
                points.append([bandwidth, value])
        lines.append([protocol_name[module]] + points)

    transformation(lines)
    xlabel = "endpoint bandwidth available (MB/second)"
    jgraph_input += mfgraph.line_graph(
        lines,
        ymax=ymax,
        xlabel=xlabel,
        ylabel=ylabel,
        label_fontsize="9",
        xsize=1.8,
        ysize=1.8,
        xlog=10,
        xmin=90.0,
        xmax=30000.0,
        legend_x=legend_x,
        legend_y=legend_y,
        legend_fontsize="8",
        ylabel_location=18.0,
    )

    if stat == "links_utilized_percent":
        jgraph_input += "newcurve clip pts 0.1 75 200000 75 linetype solid linethickness 1 marktype none gray .75\n"
        jgraph_input += "newstring x 20000 y 76 fontsize 8 : 75%\n"

    mfgraph.run_jgraph(
        jgraph_input,
        "bash-microbench-basic-%d-%s" % (processor, string.split(ylabel)[0]))
Exemple #7
0
def generate_micro4(stat, ylabel, transformation, ymax, legend_x, legend_y):
    benchmark = "microbenchmark"
    processor = 64
    ## Generate bandwidth available vs performance (one graph per processor count)
    for bandwidth in bandwidth_list:
        jgraph_input = ""
        lines = []
        for module in modules_list:
            points = []
            for think_time in think_time_list:
                if think_time > 1000:
                    continue
                data = get_data(benchmark, think_time=think_time, processor=processor, module=module, bandwidth=bandwidth, stat=stat)
                if len(data) > 0:
                    value = mfgraph.average(data)
                    if (stat == "Ruby_cycles"):
                        points.append([think_time, ((value/10000.0)-think_time)/2.0])
                    else:
                        points.append([think_time, value])
                        
            lines.append([protocol_name[module]] + points)
            
        transformation(lines)
        xlabel = "think time (cycles)"
        jgraph_input += mfgraph.line_graph(lines,
                                           title_fontsize = "12",
                                           title_font = "Times-Roman",
                                           ymax = ymax,
                                           xlabel = xlabel,
                                           ylabel = ylabel,
                                           label_fontsize = "9",
                                           xsize = 1.8,
                                           ysize = 1.8,
                                           xmin = 0.0,
                                           legend_x = legend_x,
                                           legend_y = legend_y,
                                           legend_fontsize = "8",
#                                           marktype = ["circle"],
#                                           marksize = .03,
                                           )
        
        if stat == "links_utilized_percent":
            jgraph_input += "newcurve clip pts 0.1 75 1000 75 linetype solid linethickness 1 marktype none gray .75\n"
            jgraph_input += "newstring x 950 y 76 fontsize 8 : 75%\n"
        
        mfgraph.run_jgraph(jgraph_input, "bash-microbenchmark-thinktime-%d-%s" % (bandwidth, string.split(ylabel)[0]))
Exemple #8
0
def generate_micro3(stat, ylabel, transformation, ymax, legend_x, legend_y):
    benchmark = "microbenchmark"
    ## Generate bandwidth available vs performance (one graph per processor count)
    for bandwidth in bandwidth_list:
        jgraph_input = ""
        lines = []
        for module in modules_list:
            points = []
            for processor in processor_list:
                data = get_data(benchmark, processor=processor, module=module, bandwidth=bandwidth, stat=stat)
                if len(data) > 0:
                    value = mfgraph.average(data)
                    points.append([processor, value])
                        
            lines.append([protocol_name[module]] + points)
            
        transformation(lines)
        xlabel = "processors"
        if ylabel == "performance":
            ylabel = "performance per processor"
        jgraph_input += mfgraph.line_graph(lines,
                                           title_fontsize = "12",
                                           title_font = "Times-Roman",
                                           ymax = ymax,
                                           xlabel = xlabel,
                                           ylabel = ylabel,
                                           label_fontsize = "9",
                                           xsize = 1.8,
                                           ysize = 1.8,
                                           xlog = 10,
                                           xmin = 3.0,
                                           legend_x = 4,
                                           legend_y = legend_y,
                                           legend_fontsize = "8",
                                           marktype = ["circle"],
                                           marksize = .03,
                                           hash_marks = map(str, processor_list),
                                           )
        
        if stat == "links_utilized_percent":
            jgraph_input += "newcurve clip pts 0.1 75 512 75 linetype solid linethickness 1 marktype none gray .75\n"
            jgraph_input += "newstring x 512 y 76 fontsize 8 : 75%\n"
        
        mfgraph.run_jgraph(jgraph_input, "bash-microbenchmark-processors-%d-%s" % (bandwidth, string.split(ylabel)[0]))
Exemple #9
0
def generate_macro_bar():
    processor = 16
    bandwidth = 1600
    stat = "Ruby_cycles"

    stacks = []
    for benchmark in benchmarks[1:]:
        bars = []
        modules = ["MOSI_bcast_opt_4", "MOSI_GS_4", "MOSI_mcast_aggr_4"]
        #norm = mfgraph.average(get_data(benchmark, processor=processor, module="MOSI_mcast_aggr_4", bandwidth=bandwidth, stat=stat))
        norm = mfgraph.average(get_data(benchmark, processor=processor, module="MOSI_bcast_opt_4", bandwidth=bandwidth, stat=stat))
        for module in modules:

            if module == "MOSI_mcast_aggr_4":
                bars.append(["", 0])
            else:
                data = get_data(benchmark, processor=processor, module=module, bandwidth=bandwidth, stat=stat)
                value = mfgraph.average(data)
                stddev = mfgraph.stddev(data)
                if (stddev/value)*100.0 > 1.0: # only plot error bars if they are more than 1%
                    bars.append([protocol_name[module], [norm/value, norm/(value+stddev), norm/(value-stddev)]])
                else:
                    bars.append([protocol_name[module], [norm/value, norm/(value+stddev), norm/(value-stddev)]])
    #                bars.append([protocol_name[module], norm/value])
        stacks.append([workload_name[benchmark]] + bars)
    jgraph_input = mfgraph.stacked_bar_graph(stacks,
                                             colors = ["1 0 0", "0 .5 0", "0 0 1", "0 1 1", "1 0 1"],
#                                             colors = [".85 .85 .85", ".5 .5 .5", ".45 .45 .45"],
                                             patterns = ["solid", "stripe -45"],
                                             ymax = 1.5,
                                             xsize = 2.7,
                                             ysize = 2.3,
                                             label_fontsize = "9",
                                             hash_label_fontsize = "9",
                                             stack_name_font_size = "9",
                                             bar_name_font_size = "9",
                                             bar_name_rotate = 90.0,
                                             stack_name_location = 28.0,
                                             ylabel = "normalized performance",
                                             yhash = 0.2,
                                             ymhash = 1,
                                             )
    mfgraph.run_jgraph(jgraph_input, "bash-macro-talk-bars")
Exemple #10
0
def gen_sharing_histo():
    bars = []
    for benchmark in benchmarks:
        filenames = glob.glob("*/%s-gs320*.stats" % benchmark)
        if len(filenames) != 1:
            continue
        filename = filenames[0]

        gets_dist = get_histo(filename, "gets_sharing_histogram:")
        getx_dist = get_histo(filename, "getx_sharing_histogram:")
        total_misses = get_data(filename, "Total_misses:")
        gets_dist = map(lambda x: 100.0 * (float(x) / total_misses), gets_dist)
        gets_dist += [0] * 14  # fill in the end with zeros
        getx_dist = map(lambda x: 100.0 * (float(x) / total_misses), getx_dist)

        getx_dist = getx_dist[0:3] + [
            reduce(lambda x, y: x + y, getx_dist[3:7])
        ] + [reduce(lambda x, y: x + y, getx_dist[7:])]
        gets_dist = gets_dist[0:3] + [
            reduce(lambda x, y: x + y, gets_dist[3:7])
        ] + [reduce(lambda x, y: x + y, gets_dist[7:])]

        #        getx_dist = mfgraph.stack_bars(getx_dist)
        #        gets_dist = mfgraph.stack_bars(gets_dist)

        labels = ["0", "1", "2", "3-7", "8+"]
        bars.append([benchmark_names[benchmark]] +
                    map(lambda l, gets, getx:
                        (l, gets + getx, getx), labels, gets_dist, getx_dist))
    jgraph_input = mfgraph.stacked_bar_graph(
        bars,
        bar_segment_labels=["Get shared", "Get exclusive"],
        xsize=7,
        ysize=3,
        ylabel="Percent of all misses",
        colors=["1 0 0", "0 0 1"],
        patterns=["solid", "stripe -45"],
        bar_name_font_size="10",
        stack_name_location=10.0,
        legend_x="31",
        legend_y="87",
    )
    mfgraph.run_jgraph(jgraph_input, "sharers")
Exemple #11
0
def generate_micro1(stat, ylabel, transformation, ymax, legend_x, legend_y):
    jgraph_input = ""
    ## Generate bandwidth available vs performance (one graph per processor count)
    benchmark = "microbenchmark"
    processor = 64
    lines = []
    for module in modules_list:
        points = []
        for bandwidth in bandwidth_list:
            if bandwidth > 30000:
                continue
            data = get_data(benchmark, processor=processor, module=module, bandwidth=bandwidth, stat=stat)
            if len(data) > 0:
                value = mfgraph.average(data)
                points.append([bandwidth, value])
        lines.append([protocol_name[module]] + points)

    transformation(lines)
    xlabel = "endpoint bandwidth available (MB/second)"
    jgraph_input += mfgraph.line_graph(lines,
                                       ymax = ymax,
                                       xlabel = xlabel,
                                       ylabel = ylabel,
                                       label_fontsize = "9",
                                       xsize = 1.8,
                                       ysize = 1.8,
                                       xlog = 10,
                                       xmin = 90.0,
                                       xmax = 30000.0,
                                       legend_x = legend_x,
                                       legend_y = legend_y,
                                       legend_fontsize = "8",
                                       ylabel_location = 18.0,
                                      )
        
    if stat == "links_utilized_percent":
        jgraph_input += "newcurve clip pts 0.1 75 200000 75 linetype solid linethickness 1 marktype none gray .75\n"
        jgraph_input += "newstring x 20000 y 76 fontsize 8 : 75%\n"
    
    mfgraph.run_jgraph(jgraph_input, "bash-microbench-basic-%d-%s" % (processor, string.split(ylabel)[0]))
Exemple #12
0
def gen_sharing_histo():
    bars = []
    for benchmark in benchmarks:
        filenames = glob.glob("*/%s-gs320*.stats" % benchmark)
        if len(filenames) != 1:
            continue
        filename = filenames[0]

        gets_dist = get_histo(filename, "gets_sharing_histogram:")
        getx_dist = get_histo(filename, "getx_sharing_histogram:")
        total_misses = get_data(filename, "Total_misses:")
        gets_dist = map(lambda x : 100.0 * (float(x) / total_misses), gets_dist)
        gets_dist += [0] * 14  # fill in the end with zeros
        getx_dist = map(lambda x : 100.0 * (float(x) / total_misses), getx_dist)

        getx_dist = getx_dist[0:3] + [reduce(lambda x,y:x+y, getx_dist[3:7])] + [reduce(lambda x,y:x+y, getx_dist[7:])]
        gets_dist = gets_dist[0:3] + [reduce(lambda x,y:x+y, gets_dist[3:7])] + [reduce(lambda x,y:x+y, gets_dist[7:])]

#        getx_dist = mfgraph.stack_bars(getx_dist)
#        gets_dist = mfgraph.stack_bars(gets_dist)
        
        labels = ["0", "1", "2", "3-7", "8+"]
        bars.append([benchmark_names[benchmark]] + map(lambda l, gets, getx : (l, gets+getx, getx), labels, gets_dist, getx_dist))
    jgraph_input = mfgraph.stacked_bar_graph(bars,
                                             bar_segment_labels = ["Get shared", "Get exclusive"],
                                             xsize = 7,
                                             ysize = 3,
                                             ylabel = "Percent of all misses",
                                             colors = ["1 0 0", "0 0 1"],
                                             patterns = ["solid", "stripe -45"],
                                             bar_name_font_size = "10",
                                             stack_name_location = 10.0,
                                             legend_x = "31",
                                             legend_y = "87",
                                             )
    mfgraph.run_jgraph(jgraph_input, "sharers")
Exemple #13
0
#!/s/std/bin/python
import sys, string, os, glob, re, mfgraph

import misses, mpstat, scale, data_size, cache_state, sharing, protocol

benchmarks = [
    "static_web_12500",
    "dynamic_web_100",
    "java_oltp_100000",
    "oltp_1000a",
    "oltp_1000b",
    "oltp_1000c",
    "oltp_1000d",
    "barnes_128k",
    "ocean_514",
    ]

jgraph_input = []
#jgraph_input += protocol.gen_protocol(benchmarks)
jgraph_input += scale.gen_scale(benchmarks)
jgraph_input += data_size.gen_data_size(benchmarks)
jgraph_input += misses.gen_misses(benchmarks)
jgraph_input += sharing.gen_sharing(benchmarks, normalize=0)
jgraph_input += sharing.gen_sharing(benchmarks, normalize=1)
jgraph_input += mpstat.gen_mpstats(benchmarks)

mfgraph.run_jgraph("newpage\n".join(jgraph_input), "data")
Exemple #14
0
    print legend_hack
    
    jgraph_input.append(mfgraph.line_graph(lines,
                                           title = benchmark_map[bench],
                                           title_fontsize = "12",
                                           title_font = "Times-Roman",
                                           xsize = 1.8,
                                           ysize = 1.8,
                                           xlabel = "control bandwidth (normalized to Broadcast)",
                                           ylabel = "runtime (normalized to Directory)",
                                           label_fontsize = "10",
                                           label_font = "Times-Roman",
                                           legend_fontsize = "10",
                                           legend_font = "Times-Roman",
                                           linetype = ["none"],
                                           marktype = ["circle", "box", "diamond", "triangle", "triangle"],
                                           mrotate = [0, 0, 0, 0, 180],
                                           colors = ["0 0 0"], 
                                           xmin = 0,
                                           x_translate = (num % cols) * col_space,
                                           y_translate = (num / cols) * -row_space,
                                           line_thickness = 1.0,
                                           legend_hack = legend_hack,
                                           legend_x = "150",
#                                           legend_y = "",
                                           ))
    
    num += 1

mfgraph.run_jgraph("\n".join(jgraph_input), "/p/multifacet/papers/mask-prediction/graphs/fullsystem")
Exemple #15
0
def touched_by():
    stats = [
        "touched_by_block_address:",
        "touched_by_weighted_block_address:",
        #        "touched_by_macroblock_address:",
        #        "touched_by_weighted_macroblock_address:",
        #        "touched_by_supermacroblock_address:",
        #        "touched_by_weighted_supermacroblock_address:",
        #        "last_n_block_touched_by",
        #        "last_n_macroblock_touched_by",
    ]

    yaxis_names = [
        "Percent of all blocks",
        "Percent of all misses",
        "Percent of all macroblocks",
        "Percent of all misses",
        "Percent of all macroblocks",
        "Percent of all misses",
        #        "Percent",
        #        "Percent",
    ]

    stats_names = {
        "touched_by_block_address:":
        "(a) Percent of data blocks (64B) touched by n processors",
        "touched_by_weighted_block_address:":
        "(b) Percent of misses to data blocks (64B) touched by n processors",
        #        "touched_by_macroblock_address:": "(c) Percent of data macroblocks (1024B) touched by n processors",
        #        "touched_by_weighted_macroblock_address:": "(d) Percent of misses to data macroblocks (1024B) touched by n processors",
        #        "touched_by_supermacroblock_address:": "(e) Percent of 4kB macroblocks touched by n processors",
        #        "touched_by_weighted_supermacroblock_address:": "(f) Percent of misses to 4kB macroblocks touched by n processors",
        #        "last_n_block_touched_by" : "(e) Percent of misses touched by n processors in the last 64 misses to the block",
        #        "last_n_macroblock_touched_by" : "(f) Percent of misses touched by n processors in the last 64 misses to the macroblock",
    }

    jgraph_input = []

    cols = 1
    row_space = 2.2
    col_space = 3

    num = 0
    for stat in stats:
        bars = []
        for benchmark in benchmarks:
            print stat, benchmark
            group = [benchmark_names[benchmark]]
            filenames = glob.glob("*/%s-*gs320*.stats" % benchmark)
            print benchmark, filenames
            for filename in filenames:
                line = mfgraph.grep(filename, stat)[0]
                line = string.replace(line, "]", "")
                line = string.replace(line, "[", "")
                data = string.split(line)[2:]
                #                data = string.split(line)[14:]
                data = map(float, data)
                print data

                #                  new_data = []
                #                  sum = 0.0
                #                  for item in data:
                #                      new_data.append(item + sum)
                #                      sum += item
                #                  data = new_data
                print data

                #                  for index in range(len(data)):
                #                      data[index] = data[index]/sum

                print data
                normalize_list(data)
                for index in range(len(data)):
                    if index + 1 in [1, 4, 8, 12, 16]:
                        group.append(["%d" % (index + 1), data[index] * 100.0])
                    else:
                        group.append(["", data[index] * 100.0])

            bars.append(group)

        jgraph_input.append(
            mfgraph.stacked_bar_graph(
                bars,
                title=stats_names[stat],
                title_fontsize="12",
                title_font="Times-Roman",
                title_y=-25.0,
                xsize=6.5,
                ysize=1.5,
                xlabel="",
                ymax=100.01,
                ylabel=yaxis_names[num],
                colors=[".5 .5 .5"],
                patterns=["solid"],
                stack_name_location=12.0,
                stack_space=3,
                x_translate=(num % cols) * col_space,
                y_translate=(num / cols) * -row_space,
            ))
        num += 1

    mfgraph.run_jgraph("\n".join(jgraph_input), "touched-by")
Exemple #16
0
def generate_micro2(stat, ylabel, transformation, ymax, legend_x, legend_y):

    jgraph_input = ""
    ## Generate threshold variation
    benchmark = "microbenchmark"
    processor = 64

    lines = []
    for module in ["MOSI_bcast_opt_1"]:
        points = []
        for bandwidth in bandwidth_list:
            if bandwidth > 30000:
                continue
            value = mfgraph.average(
                get_data(benchmark,
                         module=module,
                         bandwidth=bandwidth,
                         stat=stat))
            if (value != 0):
                points.append([bandwidth, value])

        lines.append([protocol_name[module]] + points)

    for threshold in [0.55, 0.75, 0.95]:
        points = []
        for bandwidth in bandwidth_list:
            if bandwidth > 30000:
                continue
            value = mfgraph.average(
                get_data(benchmark,
                         module="MOSI_mcast_aggr_1",
                         bandwidth=bandwidth,
                         threshold=threshold,
                         stat=stat))
            if (value != 0):
                points.append([bandwidth, value])

        lines.append([
            protocol_name["MOSI_mcast_aggr_1"] + ": %2.0f%%" %
            (threshold * 100)
        ] + points)

    for module in ["MOSI_GS_1"]:
        points = []
        for bandwidth in bandwidth_list:
            if bandwidth > 30000:
                continue
            value = mfgraph.average(
                get_data(benchmark,
                         module=module,
                         bandwidth=bandwidth,
                         stat=stat))
            if (value != 0):
                points.append([bandwidth, value])

        lines.append([protocol_name[module]] + points)

    global norm_module
    old_norm_module = norm_module
    norm_module = protocol_name["MOSI_mcast_aggr_1"] + ": %2.0f%%" % (
        default_threshold * 100)
    transformation(lines)
    norm_module = old_norm_module

    xlabel = "endpoint bandwidth available (MB/second)"
    jgraph_input += mfgraph.line_graph(
        lines,
        title_fontsize="12",
        title_font="Times-Roman",
        ymax=ymax,
        xlabel=xlabel,
        ylabel=ylabel,
        label_fontsize="9",
        xsize=1.8,
        ysize=1.8,
        xlog=10,
        xmin=90.0,
        xmax=30000.0,
        legend_x=legend_x,
        legend_y=legend_y,
        legend_fontsize="8",
        ylabel_location=18.0,
    )

    mfgraph.run_jgraph(
        jgraph_input, "bash-microbench-threshold-%d-%s" %
        (processor, string.split(ylabel)[0]))
Exemple #17
0
def generate_micro1(stat, ylabel, transformation, ymax, legend_x, legend_y):
    jgraph_input = ""
    ## Generate bandwidth available vs performance (one graph per processor count)
    benchmark = "microbenchmark"
    processor = 64
    lines = []
    for module in ["MOSI_bcast_opt_1", "MOSI_GS_1"]:
        points = []
        for bandwidth in bandwidth_list:
            if bandwidth > 30000:
                continue
            data = get_data(benchmark, processor=processor, module=module, bandwidth=bandwidth, stat=stat)
            if len(data) > 0:
                value = mfgraph.average(data)
                points.append([bandwidth, value])
        lines.append([protocol_name[module]] + points)

    transformation(lines)
    xlabel = "endpoint bandwidth available (MB/second)"
    jgraph_input += mfgraph.line_graph(lines,
                                       ymax = ymax,
                                       xlabel = xlabel,
                                       ylabel = ylabel,
                                       label_fontsize = "9",
                                       xsize = 1.8,
                                       ysize = 1.8,
                                       xlog = 10,
                                       xmin = 90.0,
                                       xmax = 30000.0,
                                       legend_x = legend_x,
                                       legend_y = legend_y,
                                       legend_fontsize = "8",
                                       ylabel_location = 18.0,
                                       colors = ["1 0 0", "0 .5 0", "0 1 1", "1 0 1"],
                                       linetype = ["dotted", "longdash", "dotdash", "dashed"],
                                      )
        
    if stat == "links_utilized_percent":
        jgraph_input += "newcurve clip pts 0.1 75 200000 75 linetype solid linethickness 1 marktype none gray .75\n"
        jgraph_input += "newstring x 20000 y 76 fontsize 8 : 75%\n"
    
    mfgraph.run_jgraph(jgraph_input, "bash-microbench-talk-two-%d-%s" % (processor, string.split(ylabel)[0]))

############ repeated code

    jgraph_input = ""
    ## Generate bandwidth available vs performance (one graph per processor count)
    benchmark = "microbenchmark"
    processor = 64
    lines = []
    for module in modules_list:
        points = []
        for bandwidth in bandwidth_list:
            if bandwidth > 30000:
                continue
            data = get_data(benchmark, processor=processor, module=module, bandwidth=bandwidth, stat=stat)
            if len(data) > 0:
                value = mfgraph.average(data)
                points.append([bandwidth, value])
        lines.append([protocol_name[module]] + points)

    transformation(lines)
    xlabel = "endpoint bandwidth available (MB/second)"
    jgraph_input += mfgraph.line_graph(lines,
                                       ymax = ymax,
                                       xlabel = xlabel,
                                       ylabel = ylabel,
                                       label_fontsize = "9",
                                       xsize = 1.8,
                                       ysize = 1.8,
                                       xlog = 10,
                                       xmin = 90.0,
                                       xmax = 30000.0,
                                       legend_x = legend_x,
                                       legend_y = legend_y,
                                       legend_fontsize = "8",
                                       ylabel_location = 18.0,
                                      )
        
    if stat == "links_utilized_percent":
        jgraph_input += "newcurve clip pts 0.1 75 200000 75 linetype solid linethickness 1 marktype none gray .75\n"
        jgraph_input += "newstring x 20000 y 76 fontsize 8 : 75%\n"
    
    mfgraph.run_jgraph(jgraph_input, "bash-microbench-talk-three-%d-%s" % (processor, string.split(ylabel)[0]))

############ repeated code

    jgraph_input = ""
    ## Generate bandwidth available vs performance (one graph per processor count)
    benchmark = "microbenchmark"
    processor = 64
    lines = []
    for module in ["MOSI_bcast_opt_1", "maximum", "MOSI_GS_1"]:
        points = []
        for bandwidth in bandwidth_list:
            if bandwidth > 30000:
                continue

            if module == "maximum":
                data1 = get_data(benchmark, processor=processor, module="MOSI_bcast_opt_1", bandwidth=bandwidth, stat=stat)
                data2 = get_data(benchmark, processor=processor, module="MOSI_GS_1", bandwidth=bandwidth, stat=stat)

                if len(data1) > 0:
                    value1 = mfgraph.average(data1)
                
                if len(data2) > 0:
                    value2 = mfgraph.average(data2)

                value = min([value1, value2])
                points.append([bandwidth, value])
            else:
                data = get_data(benchmark, processor=processor, module=module, bandwidth=bandwidth, stat=stat)
                
                if len(data) > 0:
                    value = mfgraph.average(data)
                    points.append([bandwidth, value])

        lines.append([protocol_name[module]] + points)

    transformation(lines)
    xlabel = "endpoint bandwidth available (MB/second)"
    jgraph_input += mfgraph.line_graph(lines,
                                       ymax = ymax,
                                       xlabel = xlabel,
                                       ylabel = ylabel,
                                       label_fontsize = "9",
                                       xsize = 1.8,
                                       ysize = 1.8,
                                       xlog = 10,
                                       xmin = 90.0,
                                       xmax = 30000.0,
                                       legend_x = legend_x,
                                       legend_y = legend_y,
                                       legend_fontsize = "8",
                                       ylabel_location = 18.0,
                                      )
        
    if stat == "links_utilized_percent":
        jgraph_input += "newcurve clip pts 0.1 75 200000 75 linetype solid linethickness 1 marktype none gray .75\n"
        jgraph_input += "newstring x 20000 y 76 fontsize 8 : 75%\n"
    
    mfgraph.run_jgraph(jgraph_input, "bash-microbench-talk-three-max-%d-%s" % (processor, string.split(ylabel)[0]))
Exemple #18
0
def generate_micro2(stat, ylabel, transformation, ymax, legend_x, legend_y):

    jgraph_input = ""
    ## Generate threshold variation
    benchmark = "microbenchmark"
    processor = 64

    lines = []
    for module in ["MOSI_bcast_opt_1"]:
        points = []
        for bandwidth in bandwidth_list:
            if bandwidth > 30000:
                continue
            value = mfgraph.average(get_data(benchmark, module=module, bandwidth=bandwidth, stat=stat))
            if (value != 0):
                points.append([bandwidth, value])
                
        lines.append([protocol_name[module]] + points)

    for threshold in [0.55, 0.75, 0.95]:
        points = []
        for bandwidth in bandwidth_list:
            if bandwidth > 30000:
                continue
            value = mfgraph.average(get_data(benchmark, module="MOSI_mcast_aggr_1", bandwidth=bandwidth, threshold=threshold, stat=stat))
            if (value != 0):
                points.append([bandwidth, value])

        lines.append([protocol_name["MOSI_mcast_aggr_1"] + ": %2.0f%%" % (threshold*100)] + points)

    for module in ["MOSI_GS_1"]:
        points = []
        for bandwidth in bandwidth_list:
            if bandwidth > 30000:
                continue
            value = mfgraph.average(get_data(benchmark, module=module, bandwidth=bandwidth, stat=stat))
            if (value != 0):
                points.append([bandwidth, value])
                
        lines.append([protocol_name[module]] + points)

    global norm_module
    old_norm_module = norm_module
    norm_module = protocol_name["MOSI_mcast_aggr_1"] + ": %2.0f%%" % (default_threshold*100)
    transformation(lines)
    norm_module = old_norm_module

    xlabel = "endpoint bandwidth available (MB/second)"
    jgraph_input += mfgraph.line_graph(lines,
                                       title_fontsize = "12",
                                       title_font = "Times-Roman",
                                       ymax = ymax,
                                       xlabel = xlabel,
                                       ylabel = ylabel,
                                       label_fontsize = "9",
                                       xsize = 1.8,
                                       ysize = 1.8,
                                       xlog = 10,
                                       xmin = 90.0,
                                       xmax = 30000.0,
                                       legend_x = legend_x,
                                       legend_y = legend_y,
                                       legend_fontsize = "8",
                                       ylabel_location = 18.0,
                                       )

    mfgraph.run_jgraph(jgraph_input, "bash-microbench-threshold-%d-%s" % (processor, string.split(ylabel)[0]))
Exemple #19
0
        mfgraph.line_graph(
            lines,
            title=benchmark_map[bench],
            title_fontsize="12",
            title_font="Times-Roman",
            xsize=1.8,
            ysize=1.8,
            xlabel="control bandwidth (normalized to Broadcast)",
            ylabel="indirections (normalized to Directory)",
            label_fontsize="10",
            label_font="Times-Roman",
            legend_fontsize="10",
            legend_font="Times-Roman",
            linetype=["none"],
            marktype=["circle", "box", "diamond", "triangle", "triangle"],
            mrotate=[0, 0, 0, 0, 180],
            colors=["0 0 0"],
            xmin=0,
            x_translate=(num % cols) * col_space,
            y_translate=(num / cols) * -row_space,
            line_thickness=1.0,
            legend_hack=legend_hack,
            legend_x="150",
            #                                           legend_y = "",
        ))

    num += 1

mfgraph.run_jgraph("\n".join(jgraph_input),
                   "/p/multifacet/papers/mask-prediction/graphs/predsize")
Exemple #20
0
def make_graph(graph):
    name = graph[0]
    print name
    name = "graphs/" + name
    benchmarks = graph[1]
    data_points = graph[2:]

    ## calculate the line configurations
    marktype_lst = []
    mrotate_lst = []
    color_lst = []
    fill_lst = []
    linetype_lst = []

    for series in data_points:
        ## the lines connecting the points        
        marktype_lst.append("none")
        mrotate_lst.append(0)
        color_lst.append(grey)
        fill_lst.append(grey)
        linetype_lst.append("solid")

        ## the data points
        for (predictor, mark) in series:
            if predictor in ("None", "AlwaysBroadcast"):
                marktype_lst.append("none")
                mrotate_lst.append(0)
                color_lst.append(black)
                fill_lst.append(black)
                linetype_lst.append("dotted")

            (marktype, mrotate) = predictor_to_shape(predictor)
            marktype_lst.append(marktype)
            mrotate_lst.append(mrotate)

            (color, fill) = mark
            color_lst.append(color)
            fill_lst.append(fill)
            linetype_lst.append("none")
        
    ## Make the graphs
    all_data = []
    all_parameters = []

    for benchmark in benchmarks:
        print " ", benchmark
        # collect data
        data = []

        for series in data_points:
            segments = []
            points = []
            for data_point in series:
                predictor = data_point[0]
                predictor_desc = predictor_name_transform(predictor)

                # get the data
                #lst = (-1, -1, -1, -1)
                pred_data = get_maskpred_data(benchmark, predictor)
                dir_data = get_maskpred_data(benchmark, "None")
                bcast_data = get_maskpred_data(benchmark, "AlwaysBroadcast")

                if None in (pred_data, dir_data, bcast_data):
                    x_value = -1
                    y_value = -1
                else:
                    (control_msgs_per_miss, indirections, cycles, total_bandwidth) = pred_data
                    (dir_control_msgs_per_miss, dir_indirections, dir_cycles, dir_total_bandwidth) = dir_data
                    (bcast_control_msgs_per_miss, bcast_indirections, bcast_cycles, bcast_total_bandwidth) = bcast_data

                    normalized_cycles = 100*cycles/dir_cycles  # correct one
                    normalized_bandwidth = 100*total_bandwidth/bcast_total_bandwidth # correct one

                    if not runtime:
                        x_value = control_msgs_per_miss
                        y_value = indirections

                    else:
                        x_value = normalized_bandwidth
                        y_value = normalized_cycles

                print "   ", predictor, "->", benchmark, predictor_desc, x_value, y_value
                if predictor == "None":
                    points.append(["", [x_value, y_value], [x_value, 0]])
                if predictor == "AlwaysBroadcast":
                    points.append(["", [x_value, y_value], [0, y_value]])
                points.append([predictor_desc, [x_value, y_value]])
                segments.append([x_value, y_value])

            data.append([""] + segments)
            for line in points:
                data.append(line)
            
        # graph the data
        all_data.append(data)
        all_parameters.append({ "title" : benchmark_names[benchmark] })

    # only display the legend on the last graph
##     all_parameters[-1]["legend"] = "on"
##     if len(benchmarks) == 6:
##         all_parameters[-1]["legend_x"] = -80
##         all_parameters[-1]["legend_y"] = -70
##         all_parameters[-1]["legend_default"] = "hjl vjt"
        
    if not runtime:
        xlabel = "request messages per miss"
        ylabel = "indirections (percent of all misses)"
    else:
        xlabel = "normalized traffic per miss"
        ylabel = "normalized runtime"
        
    output = mfgraph.multi_graph(all_data,
                                 all_parameters,
                                 legend = "off",
                                 xsize = 1.8,
                                 ysize = 1.8,
                                 xlabel = xlabel,
                                 ylabel = ylabel,
                                 linetype = linetype_lst,
                                 colors = color_lst,
                                 fills = fill_lst,
                                 xmin = 0.0,
                                 ymin = 0.0,
                                 cols = 3,
                                 #ymax = 100.0,
                                 marktype = marktype_lst,
                                 mrotate = mrotate_lst,
                                 title_fontsize = "12",
                                 legend_hack = "yes",
                                 )

    mfgraph.run_jgraph(output, name)
Exemple #21
0
        mfgraph.line_graph(
            lines,
            title=benchmark_map[bench],
            title_fontsize="12",
            title_font="Times-Roman",
            xsize=1.8,
            ysize=1.8,
            xlabel="control bandwidth (normalized to Broadcast)",
            ylabel="runtime (normalized to Directory)",
            label_fontsize="10",
            label_font="Times-Roman",
            legend_fontsize="10",
            legend_font="Times-Roman",
            linetype=["none"],
            marktype=["circle", "box", "diamond", "triangle", "triangle"],
            mrotate=[0, 0, 0, 0, 180],
            colors=["0 0 0"],
            xmin=0,
            x_translate=(num % cols) * col_space,
            y_translate=(num / cols) * -row_space,
            line_thickness=1.0,
            legend_hack=legend_hack,
            legend_x="150",
            #                                           legend_y = "",
        ))

    num += 1

mfgraph.run_jgraph("\n".join(jgraph_input),
                   "/p/multifacet/papers/mask-prediction/graphs/fullsystem")
Exemple #22
0
def cumulative():
    stats = [
        "^block_address ",
        "^macroblock_address ",
#        "^supermacroblock_address ",
        "^pc_address ",
        ]
    
    stat_name = {
        "^block_address " : "Number of data blocks (64B)",
        "^macroblock_address " : "Number of data macroblocks (1024B)",
        "^supermacroblock_address " : "Number of data macroblocks (4096B)",
        "^pc_address " : "Number of static instructions",
        }

    jgraph_input = []

    cols = 3
    row_space = 2.9
    col_space = 3

    num = 0
    for stat in stats:
        graph_lines = []
        for benchmark in benchmarks:
            print stat, benchmark
            group = [benchmark_names[benchmark]]
            filenames = glob.glob("*/%s-gs320*.stats" % benchmark)
            for filename in filenames:
                print filename

                command = 'egrep "^Total_data_misses_block_address" %s' % (filename)
                results = os.popen(command, "r")
                line = results.readlines()[0]
                total_misses = float(string.split(line)[1])
                
                command = 'egrep "^sharing_misses:" %s' % (filename)
                results = os.popen(command, "r")
                line = results.readlines()[0]
                total_sharing_misses = float(string.split(line)[1])

                sharing_misses = get_cumulative(filename, 16, total_sharing_misses, stat)
                line = [benchmark_names[benchmark]]
#                points = range(0, 100) + range(100, len(sharing_misses), 10)
                points = range(1, len(sharing_misses), 100)
                for i in points:
                    line.append([i+1, sharing_misses[i]])
                graph_lines.append(line)

        jgraph_input.append(mfgraph.line_graph(graph_lines,
                                               ymax = 100.0,
#                                               xlog = 10,
                                               xmax = 10000,
                                               title = "",
                                               title_fontsize = "12",
                                               title_font = "Times-Roman",
                                               xsize = 2.0,
                                               ysize = 2.5,
                                               xlabel = stat_name[stat],
                                               ylabel = "Percent of all sharing misses (cumulative)",
                                               label_fontsize = "10",
                                               label_font = "Times-Roman",
                                               legend_fontsize = "10",
                                               legend_font = "Times-Roman",
                                               legend_x = "50",
                                               legend_y = "20",
                                               x_translate = (num % cols) * col_space,
                                               y_translate = (num / cols) * -row_space,
                                               line_thickness = 1.0,
                                               ))

        num += 1

    mfgraph.run_jgraph("\n".join(jgraph_input), "cumulative")
Exemple #23
0
def instant_sharers():
    jgraph_input = []

    gets_map = {
        "N N": 1,
        "Y N": 0,
    }

    getx_map = {
        "N N N N": 1,
        "N N N Y": 1,
        "N N Y N": 1,
        "N N Y Y": 1,

        #        "N Y N N" : "X",
        #        "N Y N Y" : "X",
        #        "N Y Y N" : "X",
        "N Y Y Y": 0,
        "Y N N N": 0,
        "Y N N Y": 0,
        "Y N Y N": 0,
        "Y N Y Y": 0,

        #        "Y Y N N" : "X",
        #        "Y Y N Y" : "X",
        #        "Y Y Y N" : "X",
        #        "Y Y Y Y" : "X",
    }

    cols = 2
    row_space = 2.9
    col_space = 3

    num = 0
    bars = []
    for benchmark in benchmarks:
        getx_dist = [0] * 16
        gets_dist = [0] * 16

        print benchmark
        group = [benchmark_names[benchmark]]
        filename = glob.glob("%s/*trace-profiler*.stats" % benchmark)[0]
        gets_mode = 0
        sum = 0
        for line in open(filename).readlines():
            line = string.strip(line)
            line = string.translate(line, string.maketrans("][", "  "))

            # set mode
            str = "Total requests: "
            if line[0:len(str)] == str:
                total_requests = int(string.split(line)[2])

            if line == "GETS message classifications:":
                gets_mode = 1

            if line == "":
                gets_mode = 0

            if gets_mode == 1:
                #gets
                key = line[0:3]
                if gets_map.has_key(key):
                    parts = string.split(line)
                    sum += int(parts[2])
                    # no histogram
                    data = parts[2:3]

                    # shift if one
                    if gets_map[key] == 1:
                        data = [0] + data

                    data = map(int, data)
                    add_list(gets_dist, data)
            else:
                #getx
                key = line[0:7]
                if getx_map.has_key(key):
                    parts = string.split(line)
                    sum += int(parts[4])
                    if len(parts) > 10:
                        # histogram
                        data = parts[19:]
                    else:
                        # no histogram
                        data = parts[4:5]

                    # shift if one
                    if getx_map[key] == 1:
                        data = [0] + data

                    data = map(int, data)
                    add_list(getx_dist, data)

        for i in range(len(getx_dist)):
            gets_dist[i] = 100.0 * ((gets_dist[i] + getx_dist[i]) / float(sum))
            getx_dist[i] = 100.0 * (getx_dist[i] / float(sum))

        getx_dist = getx_dist[0:3] + [
            reduce(lambda x, y: x + y, getx_dist[3:7])
        ] + [reduce(lambda x, y: x + y, getx_dist[7:])]
        gets_dist = gets_dist[0:3] + [
            reduce(lambda x, y: x + y, gets_dist[3:7])
        ] + [reduce(lambda x, y: x + y, gets_dist[7:])]

        print getx_dist
        print gets_dist
        print "indirections:", 100.0 - gets_dist[0]

        labels = ["0", "1", "2", "3-7", ">8"]
        ##         labels = []
        ##         for i in range(16):
        ##             if i in [0, 3, 7, 11, 15]:
        ##                 labels.append("%d" % i)
        ##             else:
        ##                 labels.append("")

        bars.append([benchmark_names[benchmark]] +
                    map(None, labels, gets_dist, getx_dist))
    print bars
    jgraph_input.append(
        mfgraph.stacked_bar_graph(
            bars,
            bar_segment_labels=["Get shared", "Get exclusive"],
            xsize=7,
            ysize=3,
            ylabel="Percent of all misses",
            legend_x="25",
            legend_y="90",
        ))

    mfgraph.run_jgraph("\n".join(jgraph_input), "instant-sharers")
Exemple #24
0
def touched_by():
    stats = [
        "touched_by_block_address:",
        "touched_by_weighted_block_address:",
#        "touched_by_macroblock_address:",
#        "touched_by_weighted_macroblock_address:",
#        "touched_by_supermacroblock_address:",
#        "touched_by_weighted_supermacroblock_address:",
#        "last_n_block_touched_by",
#        "last_n_macroblock_touched_by",
        ]
    
    yaxis_names = [
        "Percent of all blocks",
        "Percent of all misses",
        "Percent of all macroblocks",
        "Percent of all misses",
        "Percent of all macroblocks",
        "Percent of all misses",
#        "Percent",
#        "Percent",
        ]

    stats_names = {
        "touched_by_block_address:" : "(a) Percent of data blocks (64B) touched by n processors",
        "touched_by_weighted_block_address:": "(b) Percent of misses to data blocks (64B) touched by n processors",
#        "touched_by_macroblock_address:": "(c) Percent of data macroblocks (1024B) touched by n processors",
#        "touched_by_weighted_macroblock_address:": "(d) Percent of misses to data macroblocks (1024B) touched by n processors",
#        "touched_by_supermacroblock_address:": "(e) Percent of 4kB macroblocks touched by n processors",
#        "touched_by_weighted_supermacroblock_address:": "(f) Percent of misses to 4kB macroblocks touched by n processors",
#        "last_n_block_touched_by" : "(e) Percent of misses touched by n processors in the last 64 misses to the block",
#        "last_n_macroblock_touched_by" : "(f) Percent of misses touched by n processors in the last 64 misses to the macroblock",
        }

    jgraph_input = []

    cols = 1
    row_space = 2.2
    col_space = 3

    num = 0
    for stat in stats:
        bars = []
        for benchmark in benchmarks:
            print stat, benchmark
            group = [benchmark_names[benchmark]]
            filenames = glob.glob("*/%s-*gs320*.stats" % benchmark)
            print benchmark, filenames
            for filename in filenames:
                line = mfgraph.grep(filename, stat)[0]
                line = string.replace(line, "]", "")
                line = string.replace(line, "[", "")
                data = string.split(line)[2:]
#                data = string.split(line)[14:]
                data = map(float, data)
                print data

#                  new_data = []
#                  sum = 0.0
#                  for item in data:
#                      new_data.append(item + sum)
#                      sum += item
#                  data = new_data
                print data
                
#                  for index in range(len(data)):
#                      data[index] = data[index]/sum

                print data
                normalize_list(data)
                for index in range(len(data)):
                    if index+1 in [1, 4, 8, 12, 16]:
                        group.append(["%d" % (index+1), data[index]*100.0])
                    else:
                        group.append(["", data[index]*100.0])

            bars.append(group)

        jgraph_input.append(mfgraph.stacked_bar_graph(bars,
                                                      title = stats_names[stat],
                                                      title_fontsize = "12",
                                                      title_font = "Times-Roman",
                                                      title_y = -25.0,
                                                      xsize = 6.5,
                                                      ysize = 1.5,
                                                      xlabel = "",
                                                      ymax = 100.01,
                                                      ylabel = yaxis_names[num],
                                                      colors = [".5 .5 .5"],
                                                      patterns = ["solid"],
                                                      stack_name_location = 12.0,
                                                      stack_space = 3,
                                                      x_translate = (num % cols) * col_space,
                                                      y_translate = (num / cols) * -row_space,
                                                      ))
        num += 1

    mfgraph.run_jgraph("\n".join(jgraph_input), "touched-by")
Exemple #25
0
                                      "ECACHE_CTOC_PER_INST",
                                      "CTOC/Transaction", "Misses/Transaction")

    #     generate_generic_cpustat(jgraph_input, results_dir, "ADDRESSBUS_UTIL", "Bus Utilization", "")
    #     generate_generic_cpustat(jgraph_input, results_dir, "BRANCH_RATE", "Branch Rate", "Branches/Instruction")
    #     generate_generic_cpustat(jgraph_input, results_dir, "BRANCH_MISSRATE", "Branch Missrate", "")
    #     generate_generic_cpustat(jgraph_input, results_dir, "BRANCH_STR", "Branch Stall Rate", "Cycles/Instruction")
    #     generate_generic_cpustat(jgraph_input, results_dir, "BRANCH_TAKENRATE", "Branch Taken Rate", "")

    #     generate_generic_cpustat(jgraph_input, results_dir, "STOREQUEUE_STR", "Store Queue Stall Rate", "Cycles/Instruction")
    #     generate_generic_cpustat(jgraph_input, results_dir, "IU_RAWSTALLRATE", "IU Raw Stall Rate", "Cycles/Instruction")
    #     generate_generic_cpustat(jgraph_input, results_dir, "FP_RAWSTALLRATE", "FP Raw Stall Rate", "Cycles/Instruction")

    #     generate_generic_cpustat(jgraph_input, results_dir, "MCU_READS0", "MCU Reads S0", "")
    #     generate_generic_cpustat(jgraph_input, results_dir, "MCU_READS1", "MCU Reads S1", "")
    #     generate_generic_cpustat(jgraph_input, results_dir, "MCU_READS2", "MCU Reads S2", "")
    #     generate_generic_cpustat(jgraph_input, results_dir, "MCU_READS3", "MCU Reads S3", "")

    #     generate_generic_cpustat(jgraph_input, results_dir, "MCU_WRITES0", "MCU Writes S0", "")
    #     generate_generic_cpustat(jgraph_input, results_dir, "MCU_WRITES1", "MCU Writes S1", "")
    #     generate_generic_cpustat(jgraph_input, results_dir, "MCU_WRITES2", "MCU Writes S2", "")
    #     #generate_generic_cpustat(jgraph_input, results_dir, "MCU_WRITES3", "MCU Writes S3", "")

    #     generate_generic_cpustat(jgraph_input, results_dir, "MCU_BANK0STALL", "MCU Bank-0 Stall", "")
    #     generate_generic_cpustat(jgraph_input, results_dir, "MCU_BANK1STALL", "MCU Bank-1 Stall", "")
    #     generate_generic_cpustat(jgraph_input, results_dir, "MCU_BANK2STALL", "MCU Bank-2 Stall", "")
    #     generate_generic_cpustat(jgraph_input, results_dir, "MCU_BANK3STALL", "MCU Bank-3 Stall", "")

    print "Graph Inputs Ready...Running Jgraph."
    mfgraph.run_jgraph("newpage\n".join(jgraph_input), "ecperf")
Exemple #26
0
   return [mfgraph.stacked_bar_graph(stacks,
                                     bar_segment_labels = labels,
                                     title = "   ",
                                     title_y = 140,
                                     title_fontsize = "5",
                                     ylabel = "Total Time (sec)",
                                     #xlabel = "Number of pointer jumps",                                                                                                                                   
                                     colors = ["0.375 0.375 0.375", "0.875 0.875 0.875", "0 0 0", "0.625 0.625 0.625"],
                                     legend_x = "2",
                                     legend_y = "125",
                                     legend_type = "Manual",
                                     legend_type_x=[0, 20, 0, 20],
                                     legend_type_y=[10, 10, 0, 0] ,
                                     clip = 300,
                                     ysize = 1.1,
                                     xsize = 6,
                                     ymax = 43200,
                                     patterns = ["solid"],
                                     stack_name_rotate = 25.0,
                                     stack_name_font_size = "6", #bmarks names                                                                                                                              
                                     label_fontsize = "6", #y-axis name                                                                                                                                     
                                     legend_fontsize = "6", #label names                                                                                                                                    
                                     ylog = 10,
                                     ymin = 10,
                                     yhash_marks = [100, 1000, 10000, 43200],
                                     yhash_names = ["100", "1000", "10000", "43200"],
                                     ) + output_list]

jgraphString = generate_bar_example()
mfgraph.run_jgraph("newpage\n".join(jgraphString), "TimeComparison")
Exemple #27
0
def maskpred_traces():

    ## read in all data
    global data_map
    data_map = {}
    for benchmark in benchmarks:
        print benchmark
        data_map[benchmark] = {}  # init map

        # gs320 directory protocol
        filenames = glob.glob("*/%s-*gs320*.stats" % benchmark)
        for filename in filenames:
            control_line = mfgraph.grep(filename,
                                        "  switch_16_messages_out_Control")[0]
            control_line = string.replace(control_line, "[", " ")
            control_line = string.split(control_line)

            data_line = mfgraph.grep(filename,
                                     "  switch_16_messages_out_Data")[0]
            data_line = string.replace(data_line, "[", " ")
            data_line = string.split(data_line)

            # to calculate the request bandwidth, we add up all the
            # outgoing control messages and subtract the number of PutXs
            # (which conveniently happens to be the number of virtual
            # network zero data messages sent.  We need to subtract out
            # the number of PutXs, since the GS320 protocol sends a
            # forwarded control message to 'ack' a PutX from a processor.
            control_msgs = float(control_line[1]) - float(data_line[2])

            sharing_misses = get_data(filename, "sharing_misses:")
            total_misses = get_data(filename, "Total_misses:")
            control_msgs_per_miss = control_msgs / total_misses
            cycles = get_data(filename, "Ruby_cycles:")

            if not data_map[benchmark].has_key("Directory"):
                data_map[benchmark]["Directory"] = []
            indirections = 100.0 * (sharing_misses / total_misses)
            data_map[benchmark]["Directory"].append(
                [control_msgs_per_miss, indirections, cycles])

        # mask prediction data
        filenames = glob.glob("*/%s-*mcast*.stats" % benchmark)
        filenames.sort()
        for filename in filenames:
            predictor = string.split(filename, "-")[3]

            # calculate indirections
            data_lines = mfgraph.grep(filename, "multicast_retries:")

            if not data_lines:
                continue  # missing data

            lst = string.split(string.replace(data_lines[0], "]", ""))
            total_misses = float(lst[6])
            total_misses_alt = get_data(filename, "Total_misses:")
            non_retry = float(lst[13])
            retry = total_misses - non_retry
            indirections = 100.0 * (retry / total_misses)
            cycles = get_data(filename, "Ruby_cycles:")

            # calculate bandwidth
            data_lines = mfgraph.grep(filename,
                                      "  switch_16_messages_out_Control:")

            if not data_lines:
                continue  # missing data

            lst = string.split(string.replace(data_lines[0], "]", ""))
            control_msgs = float(lst[1])
            control_msgs_per_miss = control_msgs / total_misses

            print "  ", predictor, "->", benchmark, control_msgs_per_miss, indirections, cycles
            if not data_map[benchmark].has_key(predictor):
                data_map[benchmark][predictor] = []
            data_map[benchmark][predictor].append(
                [control_msgs_per_miss, indirections, cycles])

    ## Make the graphs
    all_data = []
    all_parameters = []
    for benchmark in benchmarks:
        print benchmark
        # collect data
        data = []

        # mask prediction data
        for predictor in data_map[benchmark].keys():
            include_list = []
            include_list.append("^Directory")
            include_list.append("^AlwaysBroadcast")
            #include_list.append("^AlwaysUnicast")

            # Graph1
            #              include_list.append("Counter:Implicit:5:DataBlock:0:0")
            #              include_list.append("Owner:Implicit:DataBlock:0:0")
            #              include_list.append("BroadcastCounter:Implicit:DataBlock:0:0")
            #              include_list.append("OwnerGroup:Implicit:DataBlock:0:0")
            #              include_list.append("OwnerBroadcast:Implicit:DataBlock:0:0")

            # Graph2
            #              include_list.append("Counter:Implicit:5:.*:0:0")
            #              include_list.append("Owner:Implicit:.*:0:0")
            #              include_list.append("BroadcastCounter:Implicit:.*:0:0")
            #              include_list.append("OwnerGroup:Implicit:.*:0:0")
            #              include_list.append("OwnerBroadcast:Implicit:.*:0:0")

            # Graph3

            #            include_list.append("Owner:Implicit:DataBlock:.*:0")
            #            include_list.append("Counter:Implicit:5:DataBlock:.*:0")
            #            include_list.append("OwnerGroup:Implicit:DataBlock:.*:0")
            #            include_list.append("OwnerGroupMod:Implicit:DataBlock:.*:0")
            #            include_list.append("OwnerBroadcast:Implicit:DataBlock:.*:0")
            #            include_list.append("BroadcastCounter:Implicit:DataBlock:.*:0")

            # Graph4
            #            include_list.append("Owner:Implicit:DataBlock:4:.*")
            #            include_list.append("Counter:Implicit:5:DataBlock:4:.*")
            #            include_list.append("BroadcastCounter:Implicit:DataBlock:4:.*")
            #            include_list.append("OwnerGroup:Implicit:DataBlock:4:.*")
            #            include_list.append("OwnerGroupMod:Implicit:DataBlock:4:.*")
            #            include_list.append("OwnerBroadcast:Implicit:DataBlock:4:.*")

            include_list.append("^StickySpatial:Both:1:DataBlock:0:.*")
            #            include_list.append("^OwnerGroup:.*:DataBlock:0:0")

            #            include_list.append("^Owner:Implicit:DataBlock:4:.*")
            #            include_list.append("^Counter:.*:5:DataBlock:0:0")
            #            include_list.append("^OwnerGroup:.*:DataBlock:0:0")
            #            include_list.append("^BroadcastCounter:Implicit:DataBlock:4:.*")
            #            include_list.append("^OwnerGroup:Implicit:DataBlock:.*:0")
            #            include_list.append("^OwnerGroupMod:Implicit:DataBlock:0:0")
            #            include_list.append("^OwnerBroadcast:Implicit:DataBlock:0:0")
            #            include_list.append("^OwnerBroadcastMod:Implicit:DataBlock:0:0")

            #            include_list.append("^BroadcastCounter:Implicit:1:DataBlock:4:.*")
            #            include_list.append("^OwnerGroup:Implicit:DataBlock:[06]:0")
            #            include_list.append("^BroadcastCounter:Implicit:1:DataBlock:0:0")
            #            include_list.append("^Counter:Implicit:1:DataBlock:[06]:0")
            #            include_list.append("^Pairwise:Implicit:DataBlock:4:.*")
            #            include_list.append("^StickySpatial:Implicit:2:DataBlock:0:0")

            #            include_list.append("^Counter:Implicit:1:DataBlock:.*:0")
            #            include_list.append("^BroadcastCounter:Implicit:1:DataBlock:.*:0")
            #            include_list.append("^Pairwise:Implicit:DataBlock:.*:0")

            #            include_list.append("^Counter:Implicit:1:PC:.*:0")
            #            include_list.append("^BroadcastCounter:Implicit:1:PC:.*:0")
            #            include_list.append("^Pairwise:Implicit:PC:.*:0")

            #            include_list.append("^StickySpatial:.*:0:DataBlock:8")

            include = 0  # false
            for pat in include_list:
                if re.compile(pat).search(predictor):
                    include = 1  # true
            if not include:
                #                print "  ", predictor, "-> skipped"
                continue

            predictor_desc = predictor_name_transform(predictor)

            (control_msgs_per_miss, indirections,
             cycles) = get_maskpred_data(benchmark, predictor)
            (dir_control_msgs_per_miss, dir_indirections,
             dir_cycles) = get_maskpred_data(benchmark, "Directory")
            #            indirections = 100*indirections/dir_indirections

            print "  ", predictor, "->", benchmark, predictor_desc, control_msgs_per_miss, indirections
            data.append(
                [predictor_desc, [control_msgs_per_miss, indirections]])

        # graph the data
        all_data.append(data)
        all_parameters.append({"title": benchmark_names[benchmark]})

    # only display the legend on the last graph
    all_parameters[-1]["legend"] = "on"

    output = mfgraph.multi_graph(
        all_data,
        all_parameters,
        legend="off",
        xsize=1.8,
        ysize=1.8,
        xlabel="control messages per miss",
        ylabel="indirections (percent of all misses)",
        #                                 linetype = ["dotted"] + (["none"] * 10),
        linetype=["none"] * 10,
        colors=["1 0 0", "0 0 1", "0 .5 0", "0 0 0", "1 0 1"],
        fills=["1 0 0", "0 0 1", "0 .5 0", "0 .5 1", "1 0 1"],
        xmin=0.0,
        ymin=0.0,
        cols=3,
        #                    ymax = 100.0,
        marktype=(["circle", "box", "diamond", "triangle"] * 10),
        #                                 marktype = ["none"] + (["circle", "box", "diamond", "triangle"] * 10),
        title_fontsize="12",
        legend_hack="yes",
    )

    mfgraph.run_jgraph(output, "traces")
Exemple #28
0
def generate_macro(scale, benchmarks, stat, ylabel, transformation, ymax):
    cols = 3
    row_space = 2.2
    col_space = 2.3
    jgraph_input = ""
    num = 0
    ## Generate bandwidth available vs performance (one graph per processor count)
    for benchmark in benchmarks:
        processor = 16
        lines = []
        if scale == 4:
            modules = "MOSI_bcast_opt_4", "MOSI_mcast_aggr_4", "MOSI_GS_4",

        if scale == 1:
            modules = "MOSI_bcast_opt_1", "MOSI_mcast_aggr_1", "MOSI_GS_1",

        for module in modules:
            points = []
            for bandwidth in bandwidth_list:
                if bandwidth < 600:
                    continue
                if bandwidth > 12800:
                    continue
                data = get_data(benchmark,
                                processor=processor,
                                module=module,
                                bandwidth=bandwidth,
                                stat=stat)
                if len(data) > 0:
                    value = mfgraph.average(data)
                    stddev = mfgraph.stddev(data)
                    if (
                            stddev / value
                    ) * 100.0 > 1.0 and benchmark != "microbenchmark":  # only plot error bars if they are more than 1%
                        points.append(
                            [bandwidth, value, value + stddev, value - stddev])
                    else:
                        points.append([bandwidth, value])

            lines.append([protocol_name[module]] + points)

        transformation(lines)

        # don't plot marks for the microbenchmark
        benchmark_marktype = ["circle"]
        if benchmark == "microbenchmark":
            benchmark_marktype = ["none"]

        xlabel = "endpoint bandwidth available (MB/second)"
        jgraph_input += mfgraph.line_graph(
            lines,
            #title = "%s: %dx%d processors" % (workload_name[benchmark], scale, processor),
            title="%s" % (workload_name[benchmark]),
            title_fontsize="10",
            title_font="Times-Roman",
            ymax=ymax,
            xlabel=xlabel,
            ylabel=ylabel,
            label_fontsize="9",
            xsize=1.8,
            ysize=1.4,
            xlog=10,
            xmin=450.0,
            xmax=12800.0,
            legend_x="2500",
            legend_y=".18",
            legend_fontsize="8",
            marktype=benchmark_marktype,
            marksize=.03,
            x_translate=(num % cols) * col_space,
            y_translate=(num / cols) * -row_space,
            ylabel_location=18.0,
        )

        if stat == "links_utilized_percent":
            jgraph_input += "newcurve clip pts 0.1 75 11000 75 linetype solid linethickness 1 marktype none gray .75\n"
            jgraph_input += "newstring x 10000 y 76 fontsize 8 : 75%\n"

        num += 1

    mfgraph.run_jgraph(
        jgraph_input,
        "bash-macrobenchmarks-%d-%s" % (scale, string.split(ylabel)[0]))
Exemple #29
0
def instant_sharers():
    jgraph_input = []

    gets_map = {
        "N N" : 1,
        "Y N" : 0,
        }

    getx_map = {
        "N N N N" : 1,
        "N N N Y" : 1,
        "N N Y N" : 1,
        "N N Y Y" : 1,

#        "N Y N N" : "X",
#        "N Y N Y" : "X",
#        "N Y Y N" : "X",
        "N Y Y Y" : 0,

        "Y N N N" : 0,
        "Y N N Y" : 0,
        "Y N Y N" : 0,
        "Y N Y Y" : 0,
        
#        "Y Y N N" : "X",
#        "Y Y N Y" : "X",
#        "Y Y Y N" : "X",
#        "Y Y Y Y" : "X",
        }
    
    cols = 2
    row_space = 2.9
    col_space = 3

    num = 0
    bars = []
    for benchmark in benchmarks:
        getx_dist = [0] * 16
        gets_dist = [0] * 16
        
        print benchmark
        group = [benchmark_names[benchmark]]
        filename = glob.glob("%s/*trace-profiler*.stats" % benchmark)[0]
        gets_mode = 0
        sum = 0
        for line in open(filename).readlines():
            line = string.strip(line)
            line = string.translate(line, string.maketrans("][", "  "))

            # set mode
            str = "Total requests: "
            if line[0:len(str)] == str:
                total_requests = int(string.split(line)[2])

            if line == "GETS message classifications:":
                gets_mode = 1;
            
            if line == "":
                gets_mode = 0;

            if gets_mode == 1:
                #gets
                key = line[0:3]
                if gets_map.has_key(key):
                    parts = string.split(line)
                    sum += int(parts[2])
                    # no histogram
                    data = parts[2:3]

                    # shift if one
                    if gets_map[key] == 1:
                        data = [0] + data

                    data = map(int, data)
                    add_list(gets_dist, data)
            else:
                #getx
                key = line[0:7]
                if getx_map.has_key(key):
                    parts = string.split(line)
                    sum += int(parts[4])
                    if len(parts) > 10:
                        # histogram
                        data = parts[19:]
                    else:
                        # no histogram
                        data = parts[4:5]

                    # shift if one
                    if getx_map[key] == 1:
                        data = [0] + data

                    data = map(int, data)
                    add_list(getx_dist, data)

        for i in range(len(getx_dist)):
            gets_dist[i] = 100.0 * ((gets_dist[i]+getx_dist[i]) / float(sum))
            getx_dist[i] = 100.0 * (getx_dist[i] / float(sum))

        getx_dist = getx_dist[0:3] + [reduce(lambda x,y:x+y, getx_dist[3:7])] + [reduce(lambda x,y:x+y, getx_dist[7:])]
        gets_dist = gets_dist[0:3] + [reduce(lambda x,y:x+y, gets_dist[3:7])] + [reduce(lambda x,y:x+y, gets_dist[7:])]

        print getx_dist
        print gets_dist
        print "indirections:", 100.0-gets_dist[0]

        labels = ["0", "1", "2", "3-7", ">8"]
##         labels = []
##         for i in range(16):
##             if i in [0, 3, 7, 11, 15]:
##                 labels.append("%d" % i)
##             else:
##                 labels.append("")

        bars.append([benchmark_names[benchmark]] + map(None, labels, gets_dist, getx_dist))
    print bars
    jgraph_input.append(mfgraph.stacked_bar_graph(bars,
                                                  bar_segment_labels = ["Get shared", "Get exclusive"],
                                                  xsize = 7,
                                                  ysize = 3,
                                                  ylabel = "Percent of all misses",
                                                  legend_x = "25",
                                                  legend_y = "90",
                        ))
    
    mfgraph.run_jgraph("\n".join(jgraph_input), "instant-sharers")
Exemple #30
0
def generate_macro(scale, benchmarks, stat, ylabel, transformation, ymax):
    cols = 3
    row_space = 2.2
    col_space = 2.3
    jgraph_input = ""
    num = 0
    ## Generate bandwidth available vs performance (one graph per processor count)
    for benchmark in benchmarks:
        processor = 16
        lines = []
        if scale == 4:
            modules = "MOSI_bcast_opt_4", "MOSI_mcast_aggr_4", "MOSI_GS_4", 

        if scale == 1:
            modules = "MOSI_bcast_opt_1", "MOSI_mcast_aggr_1", "MOSI_GS_1", 

        for module in modules:
            points = []
            for bandwidth in bandwidth_list:
                if bandwidth < 600:
                    continue
                if bandwidth > 12800:
                    continue
                data = get_data(benchmark, processor=processor, module=module, bandwidth=bandwidth, stat=stat)
                if len(data) > 0:
                    value = mfgraph.average(data)
                    stddev = mfgraph.stddev(data)
                    if (stddev/value)*100.0 > 1.0 and benchmark != "microbenchmark": # only plot error bars if they are more than 1%
                        points.append([bandwidth, value, value+stddev, value-stddev])
                    else:
                        points.append([bandwidth, value])
                        
            lines.append([protocol_name[module]] + points)
            
        transformation(lines)

        # don't plot marks for the microbenchmark
        benchmark_marktype = ["circle"]
        if benchmark == "microbenchmark":
            benchmark_marktype = ["none"]
            
        xlabel = "endpoint bandwidth available (MB/second)"
        jgraph_input += mfgraph.line_graph(lines,
                                           #title = "%s: %dx%d processors" % (workload_name[benchmark], scale, processor),
                                           title = "%s" % (workload_name[benchmark]),
                                           title_fontsize = "10",
                                           title_font = "Times-Roman",
                                           ymax = ymax,
                                           xlabel = xlabel,
                                           ylabel = ylabel,
                                           label_fontsize = "9",
                                           xsize = 1.8,
                                           ysize = 1.4,
                                           xlog = 10,
                                           xmin = 450.0,
                                           xmax = 12800.0,
                                           legend_x = "2500",
                                           legend_y = ".18",
                                           legend_fontsize = "8",
                                           marktype = benchmark_marktype,
                                           marksize = .03,
                                           x_translate = (num % cols) * col_space,
                                           y_translate = (num / cols) * -row_space,
                                           ylabel_location = 18.0,
                                           )
        
        if stat == "links_utilized_percent":
            jgraph_input += "newcurve clip pts 0.1 75 11000 75 linetype solid linethickness 1 marktype none gray .75\n"
            jgraph_input += "newstring x 10000 y 76 fontsize 8 : 75%\n"
        
        num += 1
        
    mfgraph.run_jgraph(jgraph_input, "bash-macrobenchmarks-%d-%s" % (scale, string.split(ylabel)[0]))
Exemple #31
0
def maskpred_traces():

    ## read in all data
    global data_map
    data_map = {}
    for benchmark in benchmarks:
        print benchmark
        data_map[benchmark] = {} # init map
        
        # gs320 directory protocol
        filenames = glob.glob("*/%s-*gs320*.stats" % benchmark)
        for filename in filenames:
            control_line = mfgraph.grep(filename, "  switch_16_messages_out_Control")[0]
            control_line = string.replace(control_line, "[", " ");
            control_line = string.split(control_line)

            data_line = mfgraph.grep(filename, "  switch_16_messages_out_Data")[0]
            data_line = string.replace(data_line, "[", " ");
            data_line = string.split(data_line)

            # to calculate the request bandwidth, we add up all the
            # outgoing control messages and subtract the number of PutXs
            # (which conveniently happens to be the number of virtual
            # network zero data messages sent.  We need to subtract out
            # the number of PutXs, since the GS320 protocol sends a
            # forwarded control message to 'ack' a PutX from a processor.
            control_msgs = float(control_line[1]) - float(data_line[2])

            sharing_misses = get_data(filename, "sharing_misses:")
            total_misses = get_data(filename, "Total_misses:")
            control_msgs_per_miss = control_msgs/total_misses
            cycles = get_data(filename, "Ruby_cycles:")

            if not data_map[benchmark].has_key("Directory"):
                data_map[benchmark]["Directory"] = []
            indirections = 100.0 * (sharing_misses/total_misses)
            data_map[benchmark]["Directory"].append([control_msgs_per_miss, indirections, cycles])
    
        # mask prediction data
        filenames = glob.glob("*/%s-*mcast*.stats" % benchmark)
        filenames.sort()
        for filename in filenames:
            predictor = string.split(filename, "-")[3]
            
            # calculate indirections
            data_lines = mfgraph.grep(filename, "multicast_retries:")

            if not data_lines:
                continue # missing data

            lst = string.split(string.replace(data_lines[0], "]", ""))
            total_misses = float(lst[6])
            total_misses_alt = get_data(filename, "Total_misses:")
            non_retry = float(lst[13])
            retry = total_misses - non_retry
            indirections = 100.0 * (retry/total_misses)
            cycles = get_data(filename, "Ruby_cycles:")

            # calculate bandwidth
            data_lines = mfgraph.grep(filename, "  switch_16_messages_out_Control:")

            if not data_lines:
                continue # missing data

            lst = string.split(string.replace(data_lines[0], "]", ""))
            control_msgs = float(lst[1])
            control_msgs_per_miss = control_msgs/total_misses

            print "  ", predictor, "->", benchmark, control_msgs_per_miss, indirections, cycles
            if not data_map[benchmark].has_key(predictor):
                data_map[benchmark][predictor] = []
            data_map[benchmark][predictor].append([control_msgs_per_miss, indirections, cycles])

    ## Make the graphs
    all_data = []
    all_parameters = []
    for benchmark in benchmarks:
        print benchmark
        # collect data
        data = []

        # mask prediction data
        for predictor in data_map[benchmark].keys():
            include_list = []
            include_list.append("^Directory")
            include_list.append("^AlwaysBroadcast")
            #include_list.append("^AlwaysUnicast")
            
# Graph1
#              include_list.append("Counter:Implicit:5:DataBlock:0:0")
#              include_list.append("Owner:Implicit:DataBlock:0:0")
#              include_list.append("BroadcastCounter:Implicit:DataBlock:0:0")
#              include_list.append("OwnerGroup:Implicit:DataBlock:0:0")
#              include_list.append("OwnerBroadcast:Implicit:DataBlock:0:0")

# Graph2
#              include_list.append("Counter:Implicit:5:.*:0:0")
#              include_list.append("Owner:Implicit:.*:0:0")
#              include_list.append("BroadcastCounter:Implicit:.*:0:0")
#              include_list.append("OwnerGroup:Implicit:.*:0:0")
#              include_list.append("OwnerBroadcast:Implicit:.*:0:0")

# Graph3

#            include_list.append("Owner:Implicit:DataBlock:.*:0")
#            include_list.append("Counter:Implicit:5:DataBlock:.*:0")
#            include_list.append("OwnerGroup:Implicit:DataBlock:.*:0")
#            include_list.append("OwnerGroupMod:Implicit:DataBlock:.*:0")
#            include_list.append("OwnerBroadcast:Implicit:DataBlock:.*:0")
#            include_list.append("BroadcastCounter:Implicit:DataBlock:.*:0")

# Graph4
#            include_list.append("Owner:Implicit:DataBlock:4:.*")
#            include_list.append("Counter:Implicit:5:DataBlock:4:.*")
#            include_list.append("BroadcastCounter:Implicit:DataBlock:4:.*")
#            include_list.append("OwnerGroup:Implicit:DataBlock:4:.*")
#            include_list.append("OwnerGroupMod:Implicit:DataBlock:4:.*")
#            include_list.append("OwnerBroadcast:Implicit:DataBlock:4:.*")

            include_list.append("^StickySpatial:Both:1:DataBlock:0:.*")
#            include_list.append("^OwnerGroup:.*:DataBlock:0:0")

#            include_list.append("^Owner:Implicit:DataBlock:4:.*")
#            include_list.append("^Counter:.*:5:DataBlock:0:0")
#            include_list.append("^OwnerGroup:.*:DataBlock:0:0")
#            include_list.append("^BroadcastCounter:Implicit:DataBlock:4:.*")
#            include_list.append("^OwnerGroup:Implicit:DataBlock:.*:0")
#            include_list.append("^OwnerGroupMod:Implicit:DataBlock:0:0")
#            include_list.append("^OwnerBroadcast:Implicit:DataBlock:0:0")
#            include_list.append("^OwnerBroadcastMod:Implicit:DataBlock:0:0")

#            include_list.append("^BroadcastCounter:Implicit:1:DataBlock:4:.*")
#            include_list.append("^OwnerGroup:Implicit:DataBlock:[06]:0")
#            include_list.append("^BroadcastCounter:Implicit:1:DataBlock:0:0")
#            include_list.append("^Counter:Implicit:1:DataBlock:[06]:0")
#            include_list.append("^Pairwise:Implicit:DataBlock:4:.*")
#            include_list.append("^StickySpatial:Implicit:2:DataBlock:0:0")

#            include_list.append("^Counter:Implicit:1:DataBlock:.*:0")
#            include_list.append("^BroadcastCounter:Implicit:1:DataBlock:.*:0")
#            include_list.append("^Pairwise:Implicit:DataBlock:.*:0")

#            include_list.append("^Counter:Implicit:1:PC:.*:0")
#            include_list.append("^BroadcastCounter:Implicit:1:PC:.*:0")
#            include_list.append("^Pairwise:Implicit:PC:.*:0")

#            include_list.append("^StickySpatial:.*:0:DataBlock:8")
            
            include = 0 # false
            for pat in include_list:
                if re.compile(pat).search(predictor):
                    include = 1 # true
            if not include:
#                print "  ", predictor, "-> skipped"
                continue
            
            predictor_desc = predictor_name_transform(predictor)

            (control_msgs_per_miss, indirections, cycles) = get_maskpred_data(benchmark, predictor)
            (dir_control_msgs_per_miss, dir_indirections, dir_cycles) = get_maskpred_data(benchmark, "Directory")
#            indirections = 100*indirections/dir_indirections
            
            print "  ", predictor, "->", benchmark, predictor_desc, control_msgs_per_miss, indirections
            data.append([predictor_desc, [control_msgs_per_miss, indirections]])

        # graph the data
        all_data.append(data)
        all_parameters.append({ "title" : benchmark_names[benchmark] })

    # only display the legend on the last graph
    all_parameters[-1]["legend"] = "on"

    output = mfgraph.multi_graph(all_data,
                                 all_parameters,
                                 legend = "off",
                                 xsize = 1.8,
                                 ysize = 1.8,
                                 xlabel = "control messages per miss",
                                 ylabel = "indirections (percent of all misses)",
#                                 linetype = ["dotted"] + (["none"] * 10),
                                 linetype = ["none"] * 10,
                                 colors = ["1 0 0", "0 0 1", "0 .5 0", "0 0 0", "1 0 1"],
                                 fills = ["1 0 0", "0 0 1", "0 .5 0", "0 .5 1", "1 0 1"],
                                 xmin = 0.0,
                                 ymin = 0.0,
                                 cols = 3,
                                 #                    ymax = 100.0,
                                 marktype = (["circle", "box", "diamond", "triangle"] * 10),
#                                 marktype = ["none"] + (["circle", "box", "diamond", "triangle"] * 10),
                                 title_fontsize = "12",
                                 legend_hack = "yes",
                                 )

    mfgraph.run_jgraph(output, "traces")
Exemple #32
0
    jgraphs.append(
        mfgraph.line_graph(lines,
                           title="INDIRECT Branch Prediction Accuracy",
                           xlabel="PHT Bits",
                           ylabel="% Correct",
                           xsize=6,
                           ysize=4.5,
                           ymin=80,
                           ymax=100))
    #draw_std_line(jgraphs, "Branch Prediction Accuracy", "PHT Bits", "% Correct", lines)


jgraph_input = []
#results_dir = "/p/multifacet/projects/ecperf/multifacet/workloads/ecperf/"
opal_output_dir = "/p/multifacet/projects/ecperf/multifacet/condor/results/"

if len(sys.argv) == 2:
    run_dir = sys.argv[1]

    results_dir = opal_output_dir + run_dir
    print results_dir
    generate_cpi(jgraph_input, results_dir)
    generate_reasons_for_fetch_stall(jgraph_input, results_dir)
    generate_reasons_for_retire_stall(jgraph_input, results_dir)
    generate_pcond_branch_prediction_rate(jgraph_input, results_dir)

    print "Graph Inputs Ready...Running Jgraph."
    mfgraph.run_jgraph("newpage\n".join(jgraph_input), "opal-" + run_dir)
else:
    print "usage: opal.py <benchmark>"
Exemple #33
0
    print legend_hack
    
    jgraph_input.append(mfgraph.line_graph(lines,
                                           title = benchmark_map[bench],
                                           title_fontsize = "12",
                                           title_font = "Times-Roman",
                                           xsize = 1.8,
                                           ysize = 1.8,
                                           xlabel = "control bandwidth (normalized to Broadcast)",
                                           ylabel = "indirections (normalized to Directory)",
                                           label_fontsize = "10",
                                           label_font = "Times-Roman",
                                           legend_fontsize = "10",
                                           legend_font = "Times-Roman",
                                           linetype = ["none"],
                                           marktype = ["circle", "box", "diamond", "triangle", "triangle"],
                                           mrotate = [0, 0, 0, 0, 180],
                                           colors = ["0 0 0"], 
                                           xmin = 0,
                                           x_translate = (num % cols) * col_space,
                                           y_translate = (num / cols) * -row_space,
                                           line_thickness = 1.0,
                                           legend_hack = legend_hack,
                                           legend_x = "150",
#                                           legend_y = "",
                                           ))
    
    num += 1

mfgraph.run_jgraph("\n".join(jgraph_input), "/p/multifacet/papers/mask-prediction/graphs/predsize")
Exemple #34
0
    generate_generic_cpustat_per_xact(jgraph_input, results_dir, "ECACHE_CTOC_PER_INST", "CTOC/Transaction", "Misses/Transaction")
    
#     generate_generic_cpustat(jgraph_input, results_dir, "ADDRESSBUS_UTIL", "Bus Utilization", "")
#     generate_generic_cpustat(jgraph_input, results_dir, "BRANCH_RATE", "Branch Rate", "Branches/Instruction")
#     generate_generic_cpustat(jgraph_input, results_dir, "BRANCH_MISSRATE", "Branch Missrate", "")
#     generate_generic_cpustat(jgraph_input, results_dir, "BRANCH_STR", "Branch Stall Rate", "Cycles/Instruction")
#     generate_generic_cpustat(jgraph_input, results_dir, "BRANCH_TAKENRATE", "Branch Taken Rate", "")

#     generate_generic_cpustat(jgraph_input, results_dir, "STOREQUEUE_STR", "Store Queue Stall Rate", "Cycles/Instruction")
#     generate_generic_cpustat(jgraph_input, results_dir, "IU_RAWSTALLRATE", "IU Raw Stall Rate", "Cycles/Instruction")
#     generate_generic_cpustat(jgraph_input, results_dir, "FP_RAWSTALLRATE", "FP Raw Stall Rate", "Cycles/Instruction")

#     generate_generic_cpustat(jgraph_input, results_dir, "MCU_READS0", "MCU Reads S0", "")
#     generate_generic_cpustat(jgraph_input, results_dir, "MCU_READS1", "MCU Reads S1", "")
#     generate_generic_cpustat(jgraph_input, results_dir, "MCU_READS2", "MCU Reads S2", "")
#     generate_generic_cpustat(jgraph_input, results_dir, "MCU_READS3", "MCU Reads S3", "")

#     generate_generic_cpustat(jgraph_input, results_dir, "MCU_WRITES0", "MCU Writes S0", "")
#     generate_generic_cpustat(jgraph_input, results_dir, "MCU_WRITES1", "MCU Writes S1", "")
#     generate_generic_cpustat(jgraph_input, results_dir, "MCU_WRITES2", "MCU Writes S2", "")
#     #generate_generic_cpustat(jgraph_input, results_dir, "MCU_WRITES3", "MCU Writes S3", "")

#     generate_generic_cpustat(jgraph_input, results_dir, "MCU_BANK0STALL", "MCU Bank-0 Stall", "")
#     generate_generic_cpustat(jgraph_input, results_dir, "MCU_BANK1STALL", "MCU Bank-1 Stall", "")
#     generate_generic_cpustat(jgraph_input, results_dir, "MCU_BANK2STALL", "MCU Bank-2 Stall", "")
#     generate_generic_cpustat(jgraph_input, results_dir, "MCU_BANK3STALL", "MCU Bank-3 Stall", "")
    
    print "Graph Inputs Ready...Running Jgraph."
    mfgraph.run_jgraph("newpage\n".join(jgraph_input), "ecperf")

Exemple #35
0
def cumulative():
    stats = [
        "^block_address ",
        "^macroblock_address ",
        "^pc_address ",
    ]

    stat_name = {
        "^block_address ": "Number of data blocks (64B)",
        "^macroblock_address ": "Number of data macroblocks (1024B)",
        "^pc_address ": "Number of instructions",
    }

    jgraph_input = []

    cols = 3
    row_space = 2.9
    col_space = 3

    num = 0
    for stat in stats:
        graph_lines = []
        for benchmark in benchmarks:
            print stat, benchmark
            group = [benchmark_names[benchmark]]
            filenames = glob.glob("%s/*trace-profiler*.stats" % benchmark)
            for filename in filenames:
                print filename

                command = 'egrep "^Total_data_misses_block_address" %s' % (
                    filename)
                results = os.popen(command, "r")
                line = results.readlines()[0]
                total_misses = float(string.split(line)[1])

                command = 'egrep "^sharing_misses:" %s' % (filename)
                results = os.popen(command, "r")
                line = results.readlines()[0]
                total_sharing_misses = float(string.split(line)[1])

                sharing_misses = get_cumulative(filename, 16,
                                                total_sharing_misses, stat)
                line = [benchmark_names[benchmark]]
                #                points = range(0, 100) + range(100, len(sharing_misses), 10)
                points = range(1, len(sharing_misses), 100)
                for i in points:
                    line.append([i + 1, sharing_misses[i]])
                graph_lines.append(line)

        jgraph_input.append(
            mfgraph.line_graph(
                graph_lines,
                ymax=100.0,
                #                                               xlog = 10,
                xmax=10000,
                title="",
                title_fontsize="12",
                title_font="Times-Roman",
                xsize=2.0,
                ysize=2.5,
                xlabel=stat_name[stat],
                ylabel="Percent of all sharing misses (cumulative)",
                label_fontsize="10",
                label_font="Times-Roman",
                legend_fontsize="10",
                legend_font="Times-Roman",
                legend_x="4000",
                legend_y="20",
                x_translate=(num % cols) * col_space,
                y_translate=(num / cols) * -row_space,
                line_thickness=1.0,
            ))

        ##         jgraph_input.append(mfgraph.stacked_bar_graph(bars,
        ##                                                       title = stats_names[stat],
        ##                                                       xsize = 6.5,
        ##                                                       ysize = 2,
        ##                                                       xlabel = "",
        ##                                                       ylabel = "",
        ##                                                       stack_space = 3,
        ##                                                       ))
        num += 1

    mfgraph.run_jgraph("\n".join(jgraph_input), "cumulative")
Exemple #36
0
def generate_bar_example(jgraphs):
    bars = [
        [
            "group1",
            ["bar1", 20, 10, 5],
            ["bar2", 10, 5, 2.5],
        ],
        [
            "group2",
            ["bar1", 80, 40, 10],
            ["bar2", [100, 90, 110], 50, 10],  # note, this has an error bar
            ["bar3", 30, 25, 5],
        ]
    ]

    jgraphs.append(
        mfgraph.stacked_bar_graph(
            bars,
            bar_segment_labels=["segment1", "segment2", "segment3"],
            xsize=5.0,
        ))


jgraph_input = []
for graph in graphs:
    generate_line_example(jgraph_input, graph)
generate_bar_example(jgraph_input)

mfgraph.run_jgraph("newpage\n".join(jgraph_input), "examples")
Exemple #37
0
def touched_by():
    stats = [
        "touched_by_block_address:",
        "touched_by_weighted_block_address:",
        "touched_by_macroblock_address:",
        "touched_by_weighted_macroblock_address:",
        #    "touched_by_pc_address:",
        #    "touched_by_weighted_pc_address:",
    ]

    stats_names = {
        "touched_by_block_address:":
        "(a) Percent blocks touched by n processors",
        "touched_by_weighted_block_address:":
        "(b) Percent of misses to blocks touched by n processors",
        "touched_by_macroblock_address:":
        "(c) Percent of macroblocks touched by n processors",
        "touched_by_weighted_macroblock_address:":
        "(d) Percent of misses to macroblocks touched by n processors",
    }

    jgraph_input = []

    cols = 1
    row_space = 2.9
    col_space = 3

    num = 0
    for stat in stats:
        bars = []
        for benchmark in benchmarks:
            print stat, benchmark
            group = [benchmark_names[benchmark]]
            filenames = glob.glob("%s/*trace-profiler*.stats" % benchmark)
            for filename in filenames:
                line = mfgraph.grep(filename, stat)[0]
                line = string.replace(line, "]", "")
                line = string.replace(line, "[", "")
                data = string.split(line)[2:]
                data = map(float, data)
                normalize_list(data)
                for index in range(len(data)):
                    if index + 1 in [1, 4, 8, 12, 16]:
                        group.append(["%d" % (index + 1), data[index] * 100.0])
                    else:
                        group.append(["", data[index] * 100.0])

            bars.append(group)

        jgraph_input.append(
            mfgraph.stacked_bar_graph(
                bars,
                title=stats_names[stat],
                xsize=6.5,
                ysize=2,
                xlabel="",
                ylabel="",
                stack_space=3,
                x_translate=(num % cols) * col_space,
                y_translate=(num / cols) * -row_space,
            ))
        num += 1

    mfgraph.run_jgraph("\n".join(jgraph_input), "touched-by")
Exemple #38
0
                                      ylabel = "% Correct",
                                      xsize = 6,
                                      ysize = 4.5,
                                      ymin = 80,
                                      ymax = 100
                                      ))
    #draw_std_line(jgraphs, "Branch Prediction Accuracy", "PHT Bits", "% Correct", lines)

    

jgraph_input = []
#results_dir = "/p/multifacet/projects/ecperf/multifacet/workloads/ecperf/"
opal_output_dir = "/p/multifacet/projects/ecperf/multifacet/condor/results/"

if len(sys.argv) == 2:
    run_dir = sys.argv[1]
    
    results_dir = opal_output_dir + run_dir
    print results_dir
    generate_cpi(jgraph_input, results_dir)
    generate_reasons_for_fetch_stall(jgraph_input, results_dir)
    generate_reasons_for_retire_stall(jgraph_input, results_dir)
    generate_pcond_branch_prediction_rate(jgraph_input, results_dir)
    
    print "Graph Inputs Ready...Running Jgraph."
    mfgraph.run_jgraph("newpage\n".join(jgraph_input), "opal-" + run_dir)
else:
    print "usage: opal.py <benchmark>"