예제 #1
0
파일: opal.py 프로젝트: dberc/tpzsimul.gems
def get_reasons_for_fetch_stall(output_file):
    grep_lines = mfgraph.grep(output_file, ".*Fetch i-cache miss.*")
    line = string.split(grep_lines[0])
    value_str = string.split(line[6], "%")
    icache_miss = float(value_str[0])

    grep_lines = mfgraph.grep(output_file, ".*Fetch squash.*")
    line = string.split(grep_lines[0])
    value_str = string.split(line[5], "%")
    squash = float(value_str[0])

    grep_lines = mfgraph.grep(output_file, ".*Fetch I-TLB miss.*")
    line = string.split(grep_lines[0])
    value_str = string.split(line[6], "%")
    itlb_miss = float(value_str[0])

    grep_lines = mfgraph.grep(output_file, ".*Window Full.*")
    line = string.split(grep_lines[0])
    value_str = string.split(line[5], "%")
    window_full = float(value_str[0])

    grep_lines = mfgraph.grep(output_file, ".*Fetch Barrier.*")
    line = string.split(grep_lines[0])
    value_str = string.split(line[5], "%")
    barrier = float(value_str[0])

    width = get_issue_width(output_file)
    return [width, icache_miss, squash, itlb_miss, window_full, barrier]
예제 #2
0
def get_reasons_for_fetch_stall(output_file):
    grep_lines = mfgraph.grep(output_file, ".*Fetch i-cache miss.*")
    line = string.split(grep_lines[0])
    value_str = string.split(line[6], "%")
    icache_miss = float(value_str[0])

    grep_lines = mfgraph.grep(output_file, ".*Fetch squash.*")
    line = string.split(grep_lines[0])
    value_str = string.split(line[5], "%")
    squash = float(value_str[0])

    grep_lines = mfgraph.grep(output_file, ".*Fetch I-TLB miss.*")
    line = string.split(grep_lines[0])
    value_str = string.split(line[6], "%")
    itlb_miss = float(value_str[0])

    grep_lines = mfgraph.grep(output_file, ".*Window Full.*")
    line = string.split(grep_lines[0])
    value_str = string.split(line[5], "%")
    window_full = float(value_str[0])

    grep_lines = mfgraph.grep(output_file, ".*Fetch Barrier.*")
    line = string.split(grep_lines[0])
    value_str = string.split(line[5], "%")
    barrier = float(value_str[0])

    width = get_issue_width(output_file)
    return [width, icache_miss, squash, itlb_miss, window_full, barrier]
예제 #3
0
def get_tuning_data(results_dir):
    map = {}
    data = []

    directories = glob.glob(results_dir + "/*")
    for dir in directories:
        
        filename = dir + "/ECperf.summary"
        if os.access(filename, os.F_OK):
            grep_lines = mfgraph.grep(filename, ".+Metric.+")
            line = string.split(grep_lines[0])
            throughput = float(line[3])

            filename = dir + "/config.summary"
            grep_lines = mfgraph.grep(filename, "Thread Count")
            line = string.split(grep_lines[0])
            threads = int(line[2])

            grep_lines = mfgraph.grep(filename, "Tx Rate")
            line = string.split(grep_lines[0])
            tx_rate = int(line[2])

            procs = ecperf_get_num_procs(directory = dir)

            filename = dir + "/Mfg.summary"
            grep_lines = mfgraph.grep(filename, "ECPerf Requirement for 90")
            line = string.split(grep_lines[0])
            passed_mfg = line[6]

            filename = dir + "/Orders.summary"
            grep_lines = mfgraph.grep(filename, "ECPerf Requirement for 90")
            line = string.split(grep_lines[0])
            passed_ords = line[6]
            
            tuple = [procs,
                     throughput,
                     tx_rate,
                     threads,
                     passed_ords,
                     passed_mfg,
                     os.path.basename(dir)]
                     
            data.append(tuple) 
            

        #         key = threads
        #         if map.has_key(key):
        #             map[key].append(tuple)
        #         else:
        #             map[key] = []
        #             map[key].append(tuple)

    data.sort()

    print "Run Directory: %s" % results_dir
    print "procs score tx_rate threads ords mfg directory"
    for tuple in data:
        print "%d %.1f %d %d %s %s %s" % (tuple[0], tuple[1], tuple[2], tuple[3], tuple[4], tuple[5], tuple[6])
        
    return data
예제 #4
0
파일: ecperf.py 프로젝트: vnaveen0/nachos
def get_tuning_data(results_dir):
    map = {}
    data = []

    directories = glob.glob(results_dir + "/*")
    for dir in directories:

        filename = dir + "/ECperf.summary"
        if os.access(filename, os.F_OK):
            grep_lines = mfgraph.grep(filename, ".+Metric.+")
            line = string.split(grep_lines[0])
            throughput = float(line[3])

            filename = dir + "/config.summary"
            grep_lines = mfgraph.grep(filename, "Thread Count")
            line = string.split(grep_lines[0])
            threads = int(line[2])

            grep_lines = mfgraph.grep(filename, "Tx Rate")
            line = string.split(grep_lines[0])
            tx_rate = int(line[2])

            procs = ecperf_get_num_procs(directory=dir)

            filename = dir + "/Mfg.summary"
            grep_lines = mfgraph.grep(filename, "ECPerf Requirement for 90")
            line = string.split(grep_lines[0])
            passed_mfg = line[6]

            filename = dir + "/Orders.summary"
            grep_lines = mfgraph.grep(filename, "ECPerf Requirement for 90")
            line = string.split(grep_lines[0])
            passed_ords = line[6]

            tuple = [
                procs, throughput, tx_rate, threads, passed_ords, passed_mfg,
                os.path.basename(dir)
            ]

            data.append(tuple)

        #         key = threads
        #         if map.has_key(key):
        #             map[key].append(tuple)
        #         else:
        #             map[key] = []
        #             map[key].append(tuple)

    data.sort()

    print "Run Directory: %s" % results_dir
    print "procs score tx_rate threads ords mfg directory"
    for tuple in data:
        print "%d %.1f %d %d %s %s %s" % (tuple[0], tuple[1], tuple[2],
                                          tuple[3], tuple[4], tuple[5],
                                          tuple[6])

    return data
예제 #5
0
파일: scale.py 프로젝트: vnaveen0/nachos
def gen_scale(benchmarks):
    configs = ["1p", "2p", "4p", "8p", "16p"]

    base_config = "1p"

    parameter = "Ruby_cycles"

    stacks = [];
    print "parsing..."
    for benchmark in benchmarks:
        assoc_data = {};
        for config in configs:
            sys.stderr.write("  %s %s\n" % (benchmark, config))
            numbers = []
            filenames = glob.glob(benchmark + "/*-" + config + "-*.stats")
            for filename in filenames:
                lines = mfgraph.grep(filename, parameter);
                line = lines[0]
                numbers.append(float(string.split(line)[1]))
            med = mfgraph.median(numbers)
            assoc_data[config] = med;
        mfgraph.normalize(assoc_data, base_config)

        bars = []
        stack_data = [benchmark]
        for config in configs:
            bars.append([config, assoc_data[config]])
        stacks.append([benchmark] + bars)
    print "done."

    return [mfgraph.stacked_bar_graph(stacks,
                                      title = "Scalability",
                                      ylabel = "normalized runtime",
                                      colors = ["0 0 1"],
                                      patterns = ["solid"])]
예제 #6
0
def get_c2c(cpustat_file):
     grep_lines = mfgraph.grep(cpustat_file, ".+total.+pic0=Instr_cnt,pic1=EC_snoop_cb,sys")
     c2c = 0
     for g_line in grep_lines:
         line = string.split(g_line)
         c2c += long(line[5])
     return c2c
예제 #7
0
파일: scale.py 프로젝트: iniverno/RnR-LLC
def gen_scale(benchmarks):
    configs = ["1p", "2p", "4p", "8p", "16p"]

    base_config = "1p"

    parameter = "Ruby_cycles"

    stacks = []
    print "parsing..."
    for benchmark in benchmarks:
        assoc_data = {}
        for config in configs:
            sys.stderr.write("  %s %s\n" % (benchmark, config))
            numbers = []
            filenames = glob.glob(benchmark + "/*-" + config + "-*.stats")
            for filename in filenames:
                lines = mfgraph.grep(filename, parameter)
                line = lines[0]
                numbers.append(float(string.split(line)[1]))
            med = mfgraph.median(numbers)
            assoc_data[config] = med
        mfgraph.normalize(assoc_data, base_config)

        bars = []
        stack_data = [benchmark]
        for config in configs:
            bars.append([config, assoc_data[config]])
        stacks.append([benchmark] + bars)
    print "done."

    return [
        mfgraph.stacked_bar_graph(
            stacks, title="Scalability", ylabel="normalized runtime", colors=["0 0 1"], patterns=["solid"]
        )
    ]
예제 #8
0
def get_cpi(output_file):
    print output_file
    grep_lines = mfgraph.grep(output_file, ".*Instruction per cycle.*")
    print grep_lines
    line = string.split(grep_lines[0])
    ipc = float(line[4])
    cpi = 1.0 / ipc
    return cpi
예제 #9
0
def ecperf_get_transactions(directory):
    filename = directory + "/Orders.summary"
    if os.access(filename, os.F_OK):
        grep_lines = mfgraph.grep(filename, "Total number of transactions");
        line = string.split(grep_lines[0])
        orders = int(line[5])
    
        filename = directory + "/Mfg.summary"
        if os.access(filename, os.F_OK):
            grep_lines = mfgraph.grep(filename, "Total Number of WorkOrders Processed");
            line = string.split(grep_lines[0])
            work_ords = int(line[6])
            transactions = work_ords + orders
            print "transactions: %d" % transactions
            return transactions

    return 0
예제 #10
0
파일: opal.py 프로젝트: dberc/tpzsimul.gems
def get_cpi(output_file):
    print output_file
    grep_lines = mfgraph.grep(output_file, ".*Instruction per cycle.*")
    print grep_lines
    line = string.split(grep_lines[0])
    ipc = float(line[4])
    cpi = 1.0/ipc
    return cpi
예제 #11
0
def touched_by():
    stats = [
        "touched_by_block_address:",
        "touched_by_weighted_block_address:",
        "touched_by_macroblock_address:",
        "touched_by_weighted_macroblock_address:",
        #    "touched_by_pc_address:",
        #    "touched_by_weighted_pc_address:",
        ]
    
    stats_names = {
        "touched_by_block_address:" : "(a) Percent blocks touched by n processors",
        "touched_by_weighted_block_address:": "(b) Percent of misses to blocks touched by n processors",
        "touched_by_macroblock_address:": "(c) Percent of macroblocks touched by n processors",
        "touched_by_weighted_macroblock_address:": "(d) Percent of misses to macroblocks touched by n processors",
        }

    jgraph_input = []

    cols = 1
    row_space = 2.9
    col_space = 3

    num = 0
    for stat in stats:
        bars = []
        for benchmark in benchmarks:
            print stat, benchmark
            group = [benchmark_names[benchmark]]
            filenames = glob.glob("%s/*trace-profiler*.stats" % benchmark)
            for filename in filenames:
                line = mfgraph.grep(filename, stat)[0]
                line = string.replace(line, "]", "")
                line = string.replace(line, "[", "")
                data = string.split(line)[2:]
                data = map(float, data)
                normalize_list(data)
                for index in range(len(data)):
                    if index+1 in [1, 4, 8, 12, 16]:
                        group.append(["%d" % (index+1), data[index]*100.0])
                    else:
                        group.append(["", data[index]*100.0])

            bars.append(group)

        jgraph_input.append(mfgraph.stacked_bar_graph(bars,
                                                      title = stats_names[stat],
                                                      xsize = 6.5,
                                                      ysize = 2,
                                                      xlabel = "",
                                                      ylabel = "",
                                                      stack_space = 3,
                                                      x_translate = (num % cols) * col_space,
                                                      y_translate = (num / cols) * -row_space,
                                                      ))
        num += 1

    mfgraph.run_jgraph("\n".join(jgraph_input), "touched-by")
예제 #12
0
파일: misses.py 프로젝트: vnaveen0/nachos
def gen_misses(benchmarks):
    configs = [
        "1p-MOSI_bcast_opt", "2p-MOSI_bcast_opt", "4p-MOSI_bcast_opt",
        "8p-MOSI_bcast_opt", "16p-MOSI_bcast_opt"
    ]

    parameters = [
        "Request_type_IFETCH", "Request_type_LD", "Request_type_ST",
        "Request_type_ATOMIC"
    ]

    stacks = []
    for benchmark in benchmarks:
        bars = []
        for config in configs:
            print "  %s %s" % (benchmark, config)
            filenames = glob.glob(benchmark + "/*-" + config + "-*.stats")
            for filename in filenames:
                numbers = []
                lines = mfgraph.grep(filename, "instruction_executed")
                line = string.split(lines[0])
                insn = long(line[1])
                for parameter in parameters:
                    lines = mfgraph.grep(filename, parameter)
                    line = string.split(lines[0])
                    map(string.strip, line)
                    numbers.append(1000.0 * (float(line[1]) / insn))
                numbers = mfgraph.stack_bars(numbers)
                config_label = string.split(config, "-")[0]
                bars.append([config_label] + numbers)
        stacks.append([benchmark] + bars)

    labels = []
    for label in parameters:
        labels.append(string.split(label, "_")[2])

    return [
        mfgraph.stacked_bar_graph(
            stacks,
            bar_segment_labels=labels,
            title="Breakdown of misses",
            ylabel="misses per thousand instructions",
            patterns=["solid"],
        )
    ]
예제 #13
0
def get_data(filename, str, index=1):
    data = mfgraph.grep(filename, str)
    if data:
        line = data[0]
        line = string.replace(line, "]", " ")
        line = string.replace(line, "[", " ")
        return float(string.split(line)[index])
    else:
        return None
예제 #14
0
파일: opal.py 프로젝트: dberc/tpzsimul.gems
def get_reasons_for_retire_stall(output_file):
    grep_lines = mfgraph.grep(output_file, ".*Retire Updating.*")
    line = string.split(grep_lines[0])
    value_str = string.split(line[5], "%")
    updating = float(value_str[0])

    grep_lines = mfgraph.grep(output_file, ".*Retire Squash.*")
    line = string.split(grep_lines[0])
    value_str = string.split(line[5], "%")
    squash = float(value_str[0])

    grep_lines = mfgraph.grep(output_file, ".*Retire Limit.*")
    line = string.split(grep_lines[0])
    value_str = string.split(line[5], "%")
    limit = float(value_str[0])

    width = get_issue_width(output_file)
    return [width, updating, squash, limit]
예제 #15
0
파일: ecperf.py 프로젝트: vnaveen0/nachos
def ecperf_get_avg_mem_use_after(directory):
    filename = directory + "/gc.summary"
    grep_lines = mfgraph.grep(filename, ".+after.+")
    if (grep_lines != []):
        line = string.split(grep_lines[0])
        mem_use = float(line[2]) / 1024
        return mem_use
    else:
        return 0
예제 #16
0
def ecperf_get_throughput(directory):
    filename = directory + "/ECperf.summary"
    if os.access(filename, os.F_OK):
        grep_lines = mfgraph.grep(filename, ".+Metric.+")
        line = string.split(grep_lines[0])
        throughput = float(line[3])
        return throughput
    else:
        return 0
예제 #17
0
파일: ecperf.py 프로젝트: vnaveen0/nachos
def ecperf_get_transactions(directory):
    filename = directory + "/Orders.summary"
    if os.access(filename, os.F_OK):
        grep_lines = mfgraph.grep(filename, "Total number of transactions")
        line = string.split(grep_lines[0])
        orders = int(line[5])

        filename = directory + "/Mfg.summary"
        if os.access(filename, os.F_OK):
            grep_lines = mfgraph.grep(filename,
                                      "Total Number of WorkOrders Processed")
            line = string.split(grep_lines[0])
            work_ords = int(line[6])
            transactions = work_ords + orders
            print "transactions: %d" % transactions
            return transactions

    return 0
예제 #18
0
def ecperf_get_avg_mem_use_before(directory):
    filename = directory + "/gc.summary"
    grep_lines = mfgraph.grep(filename, ".+before.+");
    if(grep_lines != []):
        line = string.split(grep_lines[0])
        mem_use = float(line[2])/1024
        return mem_use
    else:
        return 0
예제 #19
0
파일: ecperf.py 프로젝트: vnaveen0/nachos
def ecperf_get_num_procs(directory):
    filename = directory + "/config.summary"
    if os.access(filename, os.F_OK):
        grep_lines = mfgraph.grep(filename, "Processors")
        line = string.split(grep_lines[0])
        procs = int(line[1])
        return procs
    else:
        return 0
예제 #20
0
파일: ecperf.py 프로젝트: vnaveen0/nachos
def ecperf_get_throughput(directory):
    filename = directory + "/ECperf.summary"
    if os.access(filename, os.F_OK):
        grep_lines = mfgraph.grep(filename, ".+Metric.+")
        line = string.split(grep_lines[0])
        throughput = float(line[3])
        return throughput
    else:
        return 0
예제 #21
0
파일: ecperf.py 프로젝트: vnaveen0/nachos
def ecperf_get_stdystate(directory):
    filename = directory + "/ECperf.summary"
    if os.access(filename, os.F_OK):
        grep_lines = mfgraph.grep(filename, "stdyState")
        line = string.split(grep_lines[0])
        stdy_state = int(line[4])
        return stdy_state

    return 0
예제 #22
0
def ecperf_get_num_procs(directory):
    filename = directory + "/config.summary"
    if os.access(filename, os.F_OK):
        grep_lines = mfgraph.grep(filename, "Processors")
        line = string.split(grep_lines[0])
        procs = int(line[1])
        return procs
    else:
        return 0
예제 #23
0
def ecperf_get_stdystate(directory):
    filename = directory + "/ECperf.summary"
    if os.access(filename, os.F_OK):
        grep_lines = mfgraph.grep(filename, "stdyState")
        line = string.split(grep_lines[0])
        stdy_state = int(line[4])
        return stdy_state

    return 0
예제 #24
0
def get_reasons_for_retire_stall(output_file):
    grep_lines = mfgraph.grep(output_file, ".*Retire Updating.*")
    line = string.split(grep_lines[0])
    value_str = string.split(line[5], "%")
    updating = float(value_str[0])

    grep_lines = mfgraph.grep(output_file, ".*Retire Squash.*")
    line = string.split(grep_lines[0])
    value_str = string.split(line[5], "%")
    squash = float(value_str[0])

    grep_lines = mfgraph.grep(output_file, ".*Retire Limit.*")
    line = string.split(grep_lines[0])
    value_str = string.split(line[5], "%")
    limit = float(value_str[0])

    width = get_issue_width(output_file)
    return [width, updating, squash, limit]
예제 #25
0
def get_l2_miss_ratio(cpustat_file):
    ratio = 0.0
    grep_lines = mfgraph.grep(cpustat_file, ".+total.+pic0=EC_ref,pic1=EC_hit,sys")
    for g_line in grep_lines:
        line = string.split(g_line)
        refs = float(line[3])
        hits = float(line[4])
        ratio = (refs - hits)/refs
        #print "ratio: %f" % ratio
    return ratio
예제 #26
0
def gen_cache_state(benchmarks):
    configs = ["1p", "2p", "4p", "8p", "16p"]
    parameters = ["GETS NP", "GETS I", "GETX NP", "GETX I", "GETX S", "GETX O"]

    stacks = []
    for benchmark in benchmarks:
        bars = []
        for config in configs:
            filenames = glob.glob(benchmark + "/*-" + config + "-*.stats")
            print "  %s %s" % (benchmark, config), filenames
            numbers = []
            for parameter in parameters:
                sum = 0
                for filename in filenames:
                    #                    print benchmark, config, parameter, filename
                    lines = mfgraph.grep(filename, "instruction_executed")
                    line = string.split(lines[0])
                    insn = long(line[1])

                    lines = mfgraph.grep(filename, parameter)
                    for line in lines:
                        fields = string.split(line)
                        #                        print fields
                        sum += int(fields[3])
                numbers.append(float(sum) / float(insn))

            numbers = mfgraph.stack_bars(numbers)
            bars.append([config] + numbers)
        stacks.append([benchmark] + bars)

    labels = []
    for label in parameters:
        labels.append(label)

    return [
        mfgraph.stacked_bar_graph(
            stacks,
            bar_segment_labels=labels,
            title="Cache Misses by state",
            ylabel="Number",
        )
    ]
예제 #27
0
def get_l2_misses(cpustat_file):
    l2_misses = 0
    grep_lines = mfgraph.grep(cpustat_file, ".+total.+pic0=EC_ref,pic1=EC_hit,sys")
    count = 0
    for g_line in grep_lines:
        line = string.split(g_line)
        l2_misses += (long(line[3]) - long(line[4]))
        count += 1
    if count == 0:
        return -1
    else:
        return l2_misses/count
예제 #28
0
def gen_cache_state(benchmarks):
    configs = ["1p", "2p", "4p", "8p", "16p"]
    parameters = ["GETS NP", "GETS I", "GETX NP", "GETX I", "GETX S", "GETX O"]
    
    stacks = []
    for benchmark in benchmarks:
        bars = []
        for config in configs:
            filenames = glob.glob(benchmark + "/*-" + config + "-*.stats")
            print "  %s %s" % (benchmark, config), filenames
            numbers = []
            for parameter in parameters:
                sum = 0
                for filename in filenames:
#                    print benchmark, config, parameter, filename
                    lines = mfgraph.grep(filename, "instruction_executed");
                    line = string.split(lines[0])
                    insn = long(line[1])
                    
                    lines = mfgraph.grep(filename, parameter);
                    for line in lines:
                        fields = string.split(line)
#                        print fields
                        sum += int(fields[3])
                numbers.append(float(sum)/float(insn))

            numbers = mfgraph.stack_bars(numbers)
            bars.append([config] + numbers)
        stacks.append([benchmark] + bars)

    labels = []
    for label in parameters:
        labels.append(label)

    return [mfgraph.stacked_bar_graph(stacks,
                                      bar_segment_labels = labels,
                                      title = "Cache Misses by state",
                                      ylabel = "Number",
                                      )]
예제 #29
0
파일: opal.py 프로젝트: dberc/tpzsimul.gems
def get_branch_prediction_rate_data(branch_type, predictor_type):
    data = []
    files = glob.glob("%s/*.opal" % results_dir)
    for file in files:
        grep_lines = mfgraph.grep(file, ".*Configuration File named.*")
        if not re.match(".*branchpredictors.*", grep_lines[0]):
            #print "Non-branch pred config: " + grep_lines[0]
            continue

        grep_lines = mfgraph.grep(file, "BRANCHPRED_TYPE.*")
        if not re.match(".*" + predictor_type + ".*", grep_lines[0]):
            #print "branch pred doesn't match (%s)" % grep_lines[0]
            continue

        pht_bits = int(extract_value(file, "BRANCHPRED_PHT_BITS", 2))
        grep_lines = mfgraph.grep(file, ".*%s.*" % branch_type)
        
        line = string.split(grep_lines[2]) # use total count
        value_str = string.split(line[7], "%")
        pct_right = float(value_str[0])

        data.append([pht_bits, pct_right])
    return data
예제 #30
0
def get_instructions(cpustat_file):
    grep_lines = mfgraph.grep(cpustat_file, ".+total.+pic0=Cycle_cnt,pic1=Instr_cnt")
    count = 0
    instructions = 0
    for g_line in grep_lines:
        #print "line: " + g_line
        line = string.split(g_line)
        instructions += long(line[4])
        count += 1

    if count == 0:
        return -1
    else:
        return instructions/count
예제 #31
0
def get_branch_prediction_rate_data(branch_type, predictor_type):
    data = []
    files = glob.glob("%s/*.opal" % results_dir)
    for file in files:
        grep_lines = mfgraph.grep(file, ".*Configuration File named.*")
        if not re.match(".*branchpredictors.*", grep_lines[0]):
            #print "Non-branch pred config: " + grep_lines[0]
            continue

        grep_lines = mfgraph.grep(file, "BRANCHPRED_TYPE.*")
        if not re.match(".*" + predictor_type + ".*", grep_lines[0]):
            #print "branch pred doesn't match (%s)" % grep_lines[0]
            continue

        pht_bits = int(extract_value(file, "BRANCHPRED_PHT_BITS", 2))
        grep_lines = mfgraph.grep(file, ".*%s.*" % branch_type)

        line = string.split(grep_lines[2])  # use total count
        value_str = string.split(line[7], "%")
        pct_right = float(value_str[0])

        data.append([pht_bits, pct_right])
    return data
예제 #32
0
def gen_misses(benchmarks):
    configs = ["1p-MOSI_bcast_opt", "2p-MOSI_bcast_opt", "4p-MOSI_bcast_opt", "8p-MOSI_bcast_opt", "16p-MOSI_bcast_opt"]

    parameters = ["Request_type_IFETCH", "Request_type_LD", "Request_type_ST", "Request_type_ATOMIC"]

    stacks = []
    for benchmark in benchmarks:
        bars = []
        for config in configs:
            print "  %s %s" % (benchmark, config)
            filenames = glob.glob(benchmark + "/*-" + config + "-*.stats")
            for filename in filenames:
                numbers = []
                lines = mfgraph.grep(filename, "instruction_executed");
                line = string.split(lines[0])
                insn = long(line[1])
                for parameter in parameters:
                    lines = mfgraph.grep(filename, parameter);
                    line = string.split(lines[0])
                    map(string.strip, line)
                    numbers.append(1000.0*(float(line[1])/insn))
                numbers = mfgraph.stack_bars(numbers)
                config_label = string.split(config, "-")[0]
                bars.append([config_label] + numbers)
        stacks.append([benchmark] + bars)

    labels = []
    for label in parameters:
        labels.append(string.split(label, "_")[2])

    return [mfgraph.stacked_bar_graph(stacks,
                                      bar_segment_labels = labels,
                                      title = "Breakdown of misses",
                                      ylabel = "misses per thousand instructions",
                                      patterns = ["solid"],
                                      )]
예제 #33
0
파일: sharing.py 프로젝트: vnaveen0/nachos
def gen_sharing(benchmarks, normalize=1):
    configs = ["1p", "2p", "4p", "8p", "16p"]

    stacks = []
    for benchmark in benchmarks:
        bars = []
        for config in configs:
            print "  %s %s" % (benchmark, config)
            filenames = glob.glob(benchmark + "/*-" + config + "-*.stats")
            for filename in filenames:
                lines = mfgraph.grep(filename, "instruction_executed")
                line = string.split(lines[0])
                insn = long(line[1])
                numbers = get_sharing(filename)

                if normalize:
                    sum = reduce(lambda x, y: x + y, numbers)
                    for index in range(len(numbers)):
                        numbers[index] = (numbers[index] / sum) * 100.0
                else:
                    for index in range(len(numbers)):
                        numbers[index] = (numbers[index] /
                                          float(insn)) * 1000.0

                numbers = mfgraph.stack_bars(numbers)
                bars.append([config] + numbers)
        stacks.append([benchmark] + bars)

    if normalize:
        y_axis_label = "percent of misses"
    else:
        y_axis_label = "misses per thousand instructions",

    return [
        mfgraph.stacked_bar_graph(
            stacks,
            bar_segment_labels=labels,
            title="Breakdown of misses",
            ylabel=y_axis_label,
            patterns=[
                "stripe -45", "stripe -45", "stripe -45", "solid", "solid"
            ],
        )
    ]
예제 #34
0
def gen_protocol(benchmarks):
#    configs = ["8p-perfect", "8p-MOSI_bcast_opt", "8p-MOSI_GS"]
    configs = ["8p-MOSI_bcast_opt", "8p-MOSI_GS"]

    base_config = "8p-MOSI_bcast_opt"

    parameter = "Ruby_cycles"

    stacks = [];
    print "parsing..."
    for benchmark in benchmarks:
        assoc_data = {};
        for config in configs:
            sys.stderr.write("  %s %s\n" % (benchmark, config))
            numbers = []
            filenames = glob.glob(benchmark + "/*-" + config + "-*.stats")
            for filename in filenames:
                lines = mfgraph.grep(filename, parameter);
                line = lines[0]
                numbers.append(float(string.split(line)[1]))
            print numbers
            med = mfgraph.median(numbers)
            stddev = mfgraph.stddev(numbers)
            min_val = min(numbers)
            max_val = max(numbers)
            assoc_data[config] = [med, min_val, max_val];
        mfgraph.normalize(assoc_data, base_config)

        bars = []
        stack_data = [benchmark]
        for config in configs:
            bars.append([config, assoc_data[config]])
        stacks.append([benchmark] + bars)
    print "done."

    print stacks

    return [mfgraph.stacked_bar_graph(stacks,
                                      title = "Snooping vs Directory",
                                      ylabel = "normalized runtime",
                                      colors = [".5 .5 1"],
                                      patterns = ["solid"])]
예제 #35
0
def get_gc_time_per_xact_data(results_dir):
    data = []
    directories = glob.glob(results_dir + "/*")
    for dir in directories:
        procs = ecperf_get_num_procs(directory = dir)

        filename = dir + "/gc.summary"
        if os.access(filename, os.F_OK):
            grep_lines = mfgraph.grep(filename, ".+time.+");
            line = string.split(grep_lines[0])
            gc_time = float(line[3])
                
            transactions = ecperf_get_transactions(directory = dir)
            if transactions == 0:
                continue
            
            tuple = [procs, (gc_time/transactions)]

            data.append(tuple)
    return data
예제 #36
0
파일: ecperf.py 프로젝트: vnaveen0/nachos
def get_gc_time_per_xact_data(results_dir):
    data = []
    directories = glob.glob(results_dir + "/*")
    for dir in directories:
        procs = ecperf_get_num_procs(directory=dir)

        filename = dir + "/gc.summary"
        if os.access(filename, os.F_OK):
            grep_lines = mfgraph.grep(filename, ".+time.+")
            line = string.split(grep_lines[0])
            gc_time = float(line[3])

            transactions = ecperf_get_transactions(directory=dir)
            if transactions == 0:
                continue

            tuple = [procs, (gc_time / transactions)]

            data.append(tuple)
    return data
예제 #37
0
def gen_data_size(benchmarks):
    configs = ["1p-MOSI_bcast_opt", "2p-MOSI_bcast_opt", "4p-MOSI_bcast_opt", "8p-MOSI_bcast_opt", "16p-MOSI_bcast_opt"]

    parameters = ["C  GET_INSTR", "C  GETS", "C  GETX"]
    
    stacks = []
    for benchmark in benchmarks:
        bars = []
        for config in configs:
            print "  %s %s" % (benchmark, config)
            filenames = glob.glob(benchmark + "/*-" + config + "-*.stats")
            for filename in filenames:
                numbers = []
                for parameter in parameters:
                    lines = mfgraph.grep(filename, parameter);
                    line = string.split(lines[0])
                    map(string.strip, line)
                    num = string.split(line[2], "%")
                    num = (64L*(long(num[0])))/(1024.0*1024.0)
                    numbers.append(num)

                numbers = mfgraph.stack_bars(numbers)
                number = reduce(lambda x,y:x+y, numbers)
                config_label = string.split(config, "-")[0]
                bars.append([config_label] + [number])
        stacks.append([benchmark] + bars)

#    labels = []
#    for label in parameters:

#        labels.append(string.split(label)[1])

    return [mfgraph.stacked_bar_graph(stacks,
#                                      bar_segment_labels = labels,
                                      title = "Memory touched",
                                      ylabel = "Mbytes",
                                      patterns = ["solid"],
                                      xsize = 8.5,
                                      )]
예제 #38
0
파일: ecperf.py 프로젝트: vnaveen0/nachos
def get_time_breakdown_data(results_dir):
    tuple_list = []
    directories = glob.glob(results_dir + "/*")
    for dir in directories:
        #print "working in directory %s." % dir
        procs = ecperf_get_num_procs(directory=dir)

        filename = dir + "/appserver_mpstat.summary"
        if os.access(filename, os.F_OK):
            grep_lines = mfgraph.grep(filename, "AVG.+")
            line = string.split(grep_lines[0])
            if (len(line) > 3):
                usr = float(line[12])
                sys = float(line[13])
                wt = float(line[14])
                idl = float(line[15])
            else:
                usr = 0
                sys = 0
                wt = 0
                idl = 0

            gc_time = ecperf_get_gc_time(dir)
            run_time = ecperf_get_stdystate(dir)
            if run_time == 0:
                continue

            proc_ratio = float(procs - 1) / procs
            gc_idle = proc_ratio * (gc_time / run_time) * 100

            adj_idl = idl - gc_idle
            if adj_idl < 0:
                adj_idl = 0
                gc_idle = idl

            tuple = [procs, usr, sys, wt, adj_idl, gc_idle]
            tuple_list.append(tuple)
    return tuple_list
예제 #39
0
def get_time_breakdown_data(results_dir):
    tuple_list = []
    directories = glob.glob(results_dir + "/*")
    for dir in directories:
        #print "working in directory %s." % dir
        procs = ecperf_get_num_procs(directory = dir)

        filename = dir + "/appserver_mpstat.summary"
        if os.access(filename, os.F_OK):
            grep_lines = mfgraph.grep(filename, "AVG.+");
            line = string.split(grep_lines[0])
            if(len(line) > 3):
                usr = float(line[12])
                sys = float(line[13])
                wt  = float(line[14])
                idl = float(line[15])
            else:
                usr = 0
                sys = 0
                wt = 0
                idl = 0
                
            gc_time = ecperf_get_gc_time(dir)
            run_time = ecperf_get_stdystate(dir)
            if run_time == 0:
                continue
            
            proc_ratio = float(procs - 1)/procs
            gc_idle = proc_ratio * (gc_time/run_time) * 100

            adj_idl = idl - gc_idle
            if adj_idl < 0:
                adj_idl = 0
                gc_idle = idl
            
            tuple = [procs, usr, sys, wt, adj_idl, gc_idle]
            tuple_list.append(tuple)
    return tuple_list
예제 #40
0
def gen_sharing(benchmarks, normalize = 1):
    configs = ["1p", "2p", "4p", "8p", "16p"]

    stacks = []
    for benchmark in benchmarks:
        bars = []
        for config in configs:
            print "  %s %s" % (benchmark, config)
            filenames = glob.glob(benchmark + "/*-" + config + "-*.stats")
            for filename in filenames:
                lines = mfgraph.grep(filename, "instruction_executed");
                line = string.split(lines[0])
                insn = long(line[1])
                numbers = get_sharing(filename)

                if normalize:
                    sum = reduce(lambda x,y: x+y, numbers)
                    for index in range(len(numbers)):
                        numbers[index] = (numbers[index] / sum) * 100.0
                else:
                    for index in range(len(numbers)):
                        numbers[index] = (numbers[index] / float(insn)) * 1000.0

                numbers = mfgraph.stack_bars(numbers)
                bars.append([config] + numbers)
        stacks.append([benchmark] + bars)

    if normalize:
        y_axis_label = "percent of misses"
    else:
        y_axis_label = "misses per thousand instructions",
        
    return [mfgraph.stacked_bar_graph(stacks,
                                      bar_segment_labels = labels,
                                      title = "Breakdown of misses",
                                      ylabel = y_axis_label,
                                      patterns = ["stripe -45", "stripe -45", "stripe -45", "solid", "solid"],
                                      )]
예제 #41
0
def touched_by():
    stats = [
        "touched_by_block_address:",
        "touched_by_weighted_block_address:",
        #        "touched_by_macroblock_address:",
        #        "touched_by_weighted_macroblock_address:",
        #        "touched_by_supermacroblock_address:",
        #        "touched_by_weighted_supermacroblock_address:",
        #        "last_n_block_touched_by",
        #        "last_n_macroblock_touched_by",
    ]

    yaxis_names = [
        "Percent of all blocks",
        "Percent of all misses",
        "Percent of all macroblocks",
        "Percent of all misses",
        "Percent of all macroblocks",
        "Percent of all misses",
        #        "Percent",
        #        "Percent",
    ]

    stats_names = {
        "touched_by_block_address:":
        "(a) Percent of data blocks (64B) touched by n processors",
        "touched_by_weighted_block_address:":
        "(b) Percent of misses to data blocks (64B) touched by n processors",
        #        "touched_by_macroblock_address:": "(c) Percent of data macroblocks (1024B) touched by n processors",
        #        "touched_by_weighted_macroblock_address:": "(d) Percent of misses to data macroblocks (1024B) touched by n processors",
        #        "touched_by_supermacroblock_address:": "(e) Percent of 4kB macroblocks touched by n processors",
        #        "touched_by_weighted_supermacroblock_address:": "(f) Percent of misses to 4kB macroblocks touched by n processors",
        #        "last_n_block_touched_by" : "(e) Percent of misses touched by n processors in the last 64 misses to the block",
        #        "last_n_macroblock_touched_by" : "(f) Percent of misses touched by n processors in the last 64 misses to the macroblock",
    }

    jgraph_input = []

    cols = 1
    row_space = 2.2
    col_space = 3

    num = 0
    for stat in stats:
        bars = []
        for benchmark in benchmarks:
            print stat, benchmark
            group = [benchmark_names[benchmark]]
            filenames = glob.glob("*/%s-*gs320*.stats" % benchmark)
            print benchmark, filenames
            for filename in filenames:
                line = mfgraph.grep(filename, stat)[0]
                line = string.replace(line, "]", "")
                line = string.replace(line, "[", "")
                data = string.split(line)[2:]
                #                data = string.split(line)[14:]
                data = map(float, data)
                print data

                #                  new_data = []
                #                  sum = 0.0
                #                  for item in data:
                #                      new_data.append(item + sum)
                #                      sum += item
                #                  data = new_data
                print data

                #                  for index in range(len(data)):
                #                      data[index] = data[index]/sum

                print data
                normalize_list(data)
                for index in range(len(data)):
                    if index + 1 in [1, 4, 8, 12, 16]:
                        group.append(["%d" % (index + 1), data[index] * 100.0])
                    else:
                        group.append(["", data[index] * 100.0])

            bars.append(group)

        jgraph_input.append(
            mfgraph.stacked_bar_graph(
                bars,
                title=stats_names[stat],
                title_fontsize="12",
                title_font="Times-Roman",
                title_y=-25.0,
                xsize=6.5,
                ysize=1.5,
                xlabel="",
                ymax=100.01,
                ylabel=yaxis_names[num],
                colors=[".5 .5 .5"],
                patterns=["solid"],
                stack_name_location=12.0,
                stack_space=3,
                x_translate=(num % cols) * col_space,
                y_translate=(num / cols) * -row_space,
            ))
        num += 1

    mfgraph.run_jgraph("\n".join(jgraph_input), "touched-by")
예제 #42
0
def maskpred_traces():

    ## read in all data
    global data_map
    data_map = {}
    for benchmark in benchmarks:
        print benchmark
        data_map[benchmark] = {} # init map
        
        # gs320 directory protocol
        filenames = glob.glob("*/%s-*gs320*.stats" % benchmark)
        for filename in filenames:
            control_line = mfgraph.grep(filename, "  switch_16_messages_out_Control")[0]
            control_line = string.replace(control_line, "[", " ");
            control_line = string.split(control_line)

            data_line = mfgraph.grep(filename, "  switch_16_messages_out_Data")[0]
            data_line = string.replace(data_line, "[", " ");
            data_line = string.split(data_line)

            # to calculate the request bandwidth, we add up all the
            # outgoing control messages and subtract the number of PutXs
            # (which conveniently happens to be the number of virtual
            # network zero data messages sent.  We need to subtract out
            # the number of PutXs, since the GS320 protocol sends a
            # forwarded control message to 'ack' a PutX from a processor.
            control_msgs = float(control_line[1]) - float(data_line[2])

            sharing_misses = get_data(filename, "sharing_misses:")
            total_misses = get_data(filename, "Total_misses:")
            control_msgs_per_miss = control_msgs/total_misses
            cycles = get_data(filename, "Ruby_cycles:")

            if not data_map[benchmark].has_key("Directory"):
                data_map[benchmark]["Directory"] = []
            indirections = 100.0 * (sharing_misses/total_misses)
            data_map[benchmark]["Directory"].append([control_msgs_per_miss, indirections, cycles])
    
        # mask prediction data
        filenames = glob.glob("*/%s-*mcast*.stats" % benchmark)
        filenames.sort()
        for filename in filenames:
            predictor = string.split(filename, "-")[3]
            
            # calculate indirections
            data_lines = mfgraph.grep(filename, "multicast_retries:")

            if not data_lines:
                continue # missing data

            lst = string.split(string.replace(data_lines[0], "]", ""))
            total_misses = float(lst[6])
            total_misses_alt = get_data(filename, "Total_misses:")
            non_retry = float(lst[13])
            retry = total_misses - non_retry
            indirections = 100.0 * (retry/total_misses)
            cycles = get_data(filename, "Ruby_cycles:")

            # calculate bandwidth
            data_lines = mfgraph.grep(filename, "  switch_16_messages_out_Control:")

            if not data_lines:
                continue # missing data

            lst = string.split(string.replace(data_lines[0], "]", ""))
            control_msgs = float(lst[1])
            control_msgs_per_miss = control_msgs/total_misses

            print "  ", predictor, "->", benchmark, control_msgs_per_miss, indirections, cycles
            if not data_map[benchmark].has_key(predictor):
                data_map[benchmark][predictor] = []
            data_map[benchmark][predictor].append([control_msgs_per_miss, indirections, cycles])

    ## Make the graphs
    all_data = []
    all_parameters = []
    for benchmark in benchmarks:
        print benchmark
        # collect data
        data = []

        # mask prediction data
        for predictor in data_map[benchmark].keys():
            include_list = []
            include_list.append("^Directory")
            include_list.append("^AlwaysBroadcast")
            #include_list.append("^AlwaysUnicast")
            
# Graph1
#              include_list.append("Counter:Implicit:5:DataBlock:0:0")
#              include_list.append("Owner:Implicit:DataBlock:0:0")
#              include_list.append("BroadcastCounter:Implicit:DataBlock:0:0")
#              include_list.append("OwnerGroup:Implicit:DataBlock:0:0")
#              include_list.append("OwnerBroadcast:Implicit:DataBlock:0:0")

# Graph2
#              include_list.append("Counter:Implicit:5:.*:0:0")
#              include_list.append("Owner:Implicit:.*:0:0")
#              include_list.append("BroadcastCounter:Implicit:.*:0:0")
#              include_list.append("OwnerGroup:Implicit:.*:0:0")
#              include_list.append("OwnerBroadcast:Implicit:.*:0:0")

# Graph3

#            include_list.append("Owner:Implicit:DataBlock:.*:0")
#            include_list.append("Counter:Implicit:5:DataBlock:.*:0")
#            include_list.append("OwnerGroup:Implicit:DataBlock:.*:0")
#            include_list.append("OwnerGroupMod:Implicit:DataBlock:.*:0")
#            include_list.append("OwnerBroadcast:Implicit:DataBlock:.*:0")
#            include_list.append("BroadcastCounter:Implicit:DataBlock:.*:0")

# Graph4
#            include_list.append("Owner:Implicit:DataBlock:4:.*")
#            include_list.append("Counter:Implicit:5:DataBlock:4:.*")
#            include_list.append("BroadcastCounter:Implicit:DataBlock:4:.*")
#            include_list.append("OwnerGroup:Implicit:DataBlock:4:.*")
#            include_list.append("OwnerGroupMod:Implicit:DataBlock:4:.*")
#            include_list.append("OwnerBroadcast:Implicit:DataBlock:4:.*")

            include_list.append("^StickySpatial:Both:1:DataBlock:0:.*")
#            include_list.append("^OwnerGroup:.*:DataBlock:0:0")

#            include_list.append("^Owner:Implicit:DataBlock:4:.*")
#            include_list.append("^Counter:.*:5:DataBlock:0:0")
#            include_list.append("^OwnerGroup:.*:DataBlock:0:0")
#            include_list.append("^BroadcastCounter:Implicit:DataBlock:4:.*")
#            include_list.append("^OwnerGroup:Implicit:DataBlock:.*:0")
#            include_list.append("^OwnerGroupMod:Implicit:DataBlock:0:0")
#            include_list.append("^OwnerBroadcast:Implicit:DataBlock:0:0")
#            include_list.append("^OwnerBroadcastMod:Implicit:DataBlock:0:0")

#            include_list.append("^BroadcastCounter:Implicit:1:DataBlock:4:.*")
#            include_list.append("^OwnerGroup:Implicit:DataBlock:[06]:0")
#            include_list.append("^BroadcastCounter:Implicit:1:DataBlock:0:0")
#            include_list.append("^Counter:Implicit:1:DataBlock:[06]:0")
#            include_list.append("^Pairwise:Implicit:DataBlock:4:.*")
#            include_list.append("^StickySpatial:Implicit:2:DataBlock:0:0")

#            include_list.append("^Counter:Implicit:1:DataBlock:.*:0")
#            include_list.append("^BroadcastCounter:Implicit:1:DataBlock:.*:0")
#            include_list.append("^Pairwise:Implicit:DataBlock:.*:0")

#            include_list.append("^Counter:Implicit:1:PC:.*:0")
#            include_list.append("^BroadcastCounter:Implicit:1:PC:.*:0")
#            include_list.append("^Pairwise:Implicit:PC:.*:0")

#            include_list.append("^StickySpatial:.*:0:DataBlock:8")
            
            include = 0 # false
            for pat in include_list:
                if re.compile(pat).search(predictor):
                    include = 1 # true
            if not include:
#                print "  ", predictor, "-> skipped"
                continue
            
            predictor_desc = predictor_name_transform(predictor)

            (control_msgs_per_miss, indirections, cycles) = get_maskpred_data(benchmark, predictor)
            (dir_control_msgs_per_miss, dir_indirections, dir_cycles) = get_maskpred_data(benchmark, "Directory")
#            indirections = 100*indirections/dir_indirections
            
            print "  ", predictor, "->", benchmark, predictor_desc, control_msgs_per_miss, indirections
            data.append([predictor_desc, [control_msgs_per_miss, indirections]])

        # graph the data
        all_data.append(data)
        all_parameters.append({ "title" : benchmark_names[benchmark] })

    # only display the legend on the last graph
    all_parameters[-1]["legend"] = "on"

    output = mfgraph.multi_graph(all_data,
                                 all_parameters,
                                 legend = "off",
                                 xsize = 1.8,
                                 ysize = 1.8,
                                 xlabel = "control messages per miss",
                                 ylabel = "indirections (percent of all misses)",
#                                 linetype = ["dotted"] + (["none"] * 10),
                                 linetype = ["none"] * 10,
                                 colors = ["1 0 0", "0 0 1", "0 .5 0", "0 0 0", "1 0 1"],
                                 fills = ["1 0 0", "0 0 1", "0 .5 0", "0 .5 1", "1 0 1"],
                                 xmin = 0.0,
                                 ymin = 0.0,
                                 cols = 3,
                                 #                    ymax = 100.0,
                                 marktype = (["circle", "box", "diamond", "triangle"] * 10),
#                                 marktype = ["none"] + (["circle", "box", "diamond", "triangle"] * 10),
                                 title_fontsize = "12",
                                 legend_hack = "yes",
                                 )

    mfgraph.run_jgraph(output, "traces")
예제 #43
0
파일: ecperf.py 프로젝트: vnaveen0/nachos
def ecperf_get_gc_time(directory):
    filename = directory + "/gc.summary"
    grep_lines = mfgraph.grep(filename, ".+time.+")
    line = string.split(grep_lines[0])
    gc_time = float(line[3])
    return gc_time
예제 #44
0
파일: ecperf.py 프로젝트: vnaveen0/nachos
def ecperf_get_tx_rate(directory):
    filename = directory + "/config.summary"
    grep_lines = mfgraph.grep(filename, "Tx")
    line = string.split(grep_lines[0])
    rate = int(line[2])
    return rate
예제 #45
0
def extract_value(filename, key, position):
    grep_lines = mfgraph.grep(filename, ".*" + key + ".*")
    line = string.split(grep_lines[0])
    return line[position]
예제 #46
0
def maskpred_traces():

    ## read in all data
    global data_map
    data_map = {}
    for benchmark in benchmarks:
        print benchmark
        data_map[benchmark] = {}  # init map

        # gs320 directory protocol
        filenames = glob.glob("*/%s-*gs320*.stats" % benchmark)
        for filename in filenames:
            control_line = mfgraph.grep(filename,
                                        "  switch_16_messages_out_Control")[0]
            control_line = string.replace(control_line, "[", " ")
            control_line = string.split(control_line)

            data_line = mfgraph.grep(filename,
                                     "  switch_16_messages_out_Data")[0]
            data_line = string.replace(data_line, "[", " ")
            data_line = string.split(data_line)

            # to calculate the request bandwidth, we add up all the
            # outgoing control messages and subtract the number of PutXs
            # (which conveniently happens to be the number of virtual
            # network zero data messages sent.  We need to subtract out
            # the number of PutXs, since the GS320 protocol sends a
            # forwarded control message to 'ack' a PutX from a processor.
            control_msgs = float(control_line[1]) - float(data_line[2])

            sharing_misses = get_data(filename, "sharing_misses:")
            total_misses = get_data(filename, "Total_misses:")
            control_msgs_per_miss = control_msgs / total_misses
            cycles = get_data(filename, "Ruby_cycles:")

            if not data_map[benchmark].has_key("Directory"):
                data_map[benchmark]["Directory"] = []
            indirections = 100.0 * (sharing_misses / total_misses)
            data_map[benchmark]["Directory"].append(
                [control_msgs_per_miss, indirections, cycles])

        # mask prediction data
        filenames = glob.glob("*/%s-*mcast*.stats" % benchmark)
        filenames.sort()
        for filename in filenames:
            predictor = string.split(filename, "-")[3]

            # calculate indirections
            data_lines = mfgraph.grep(filename, "multicast_retries:")

            if not data_lines:
                continue  # missing data

            lst = string.split(string.replace(data_lines[0], "]", ""))
            total_misses = float(lst[6])
            total_misses_alt = get_data(filename, "Total_misses:")
            non_retry = float(lst[13])
            retry = total_misses - non_retry
            indirections = 100.0 * (retry / total_misses)
            cycles = get_data(filename, "Ruby_cycles:")

            # calculate bandwidth
            data_lines = mfgraph.grep(filename,
                                      "  switch_16_messages_out_Control:")

            if not data_lines:
                continue  # missing data

            lst = string.split(string.replace(data_lines[0], "]", ""))
            control_msgs = float(lst[1])
            control_msgs_per_miss = control_msgs / total_misses

            print "  ", predictor, "->", benchmark, control_msgs_per_miss, indirections, cycles
            if not data_map[benchmark].has_key(predictor):
                data_map[benchmark][predictor] = []
            data_map[benchmark][predictor].append(
                [control_msgs_per_miss, indirections, cycles])

    ## Make the graphs
    all_data = []
    all_parameters = []
    for benchmark in benchmarks:
        print benchmark
        # collect data
        data = []

        # mask prediction data
        for predictor in data_map[benchmark].keys():
            include_list = []
            include_list.append("^Directory")
            include_list.append("^AlwaysBroadcast")
            #include_list.append("^AlwaysUnicast")

            # Graph1
            #              include_list.append("Counter:Implicit:5:DataBlock:0:0")
            #              include_list.append("Owner:Implicit:DataBlock:0:0")
            #              include_list.append("BroadcastCounter:Implicit:DataBlock:0:0")
            #              include_list.append("OwnerGroup:Implicit:DataBlock:0:0")
            #              include_list.append("OwnerBroadcast:Implicit:DataBlock:0:0")

            # Graph2
            #              include_list.append("Counter:Implicit:5:.*:0:0")
            #              include_list.append("Owner:Implicit:.*:0:0")
            #              include_list.append("BroadcastCounter:Implicit:.*:0:0")
            #              include_list.append("OwnerGroup:Implicit:.*:0:0")
            #              include_list.append("OwnerBroadcast:Implicit:.*:0:0")

            # Graph3

            #            include_list.append("Owner:Implicit:DataBlock:.*:0")
            #            include_list.append("Counter:Implicit:5:DataBlock:.*:0")
            #            include_list.append("OwnerGroup:Implicit:DataBlock:.*:0")
            #            include_list.append("OwnerGroupMod:Implicit:DataBlock:.*:0")
            #            include_list.append("OwnerBroadcast:Implicit:DataBlock:.*:0")
            #            include_list.append("BroadcastCounter:Implicit:DataBlock:.*:0")

            # Graph4
            #            include_list.append("Owner:Implicit:DataBlock:4:.*")
            #            include_list.append("Counter:Implicit:5:DataBlock:4:.*")
            #            include_list.append("BroadcastCounter:Implicit:DataBlock:4:.*")
            #            include_list.append("OwnerGroup:Implicit:DataBlock:4:.*")
            #            include_list.append("OwnerGroupMod:Implicit:DataBlock:4:.*")
            #            include_list.append("OwnerBroadcast:Implicit:DataBlock:4:.*")

            include_list.append("^StickySpatial:Both:1:DataBlock:0:.*")
            #            include_list.append("^OwnerGroup:.*:DataBlock:0:0")

            #            include_list.append("^Owner:Implicit:DataBlock:4:.*")
            #            include_list.append("^Counter:.*:5:DataBlock:0:0")
            #            include_list.append("^OwnerGroup:.*:DataBlock:0:0")
            #            include_list.append("^BroadcastCounter:Implicit:DataBlock:4:.*")
            #            include_list.append("^OwnerGroup:Implicit:DataBlock:.*:0")
            #            include_list.append("^OwnerGroupMod:Implicit:DataBlock:0:0")
            #            include_list.append("^OwnerBroadcast:Implicit:DataBlock:0:0")
            #            include_list.append("^OwnerBroadcastMod:Implicit:DataBlock:0:0")

            #            include_list.append("^BroadcastCounter:Implicit:1:DataBlock:4:.*")
            #            include_list.append("^OwnerGroup:Implicit:DataBlock:[06]:0")
            #            include_list.append("^BroadcastCounter:Implicit:1:DataBlock:0:0")
            #            include_list.append("^Counter:Implicit:1:DataBlock:[06]:0")
            #            include_list.append("^Pairwise:Implicit:DataBlock:4:.*")
            #            include_list.append("^StickySpatial:Implicit:2:DataBlock:0:0")

            #            include_list.append("^Counter:Implicit:1:DataBlock:.*:0")
            #            include_list.append("^BroadcastCounter:Implicit:1:DataBlock:.*:0")
            #            include_list.append("^Pairwise:Implicit:DataBlock:.*:0")

            #            include_list.append("^Counter:Implicit:1:PC:.*:0")
            #            include_list.append("^BroadcastCounter:Implicit:1:PC:.*:0")
            #            include_list.append("^Pairwise:Implicit:PC:.*:0")

            #            include_list.append("^StickySpatial:.*:0:DataBlock:8")

            include = 0  # false
            for pat in include_list:
                if re.compile(pat).search(predictor):
                    include = 1  # true
            if not include:
                #                print "  ", predictor, "-> skipped"
                continue

            predictor_desc = predictor_name_transform(predictor)

            (control_msgs_per_miss, indirections,
             cycles) = get_maskpred_data(benchmark, predictor)
            (dir_control_msgs_per_miss, dir_indirections,
             dir_cycles) = get_maskpred_data(benchmark, "Directory")
            #            indirections = 100*indirections/dir_indirections

            print "  ", predictor, "->", benchmark, predictor_desc, control_msgs_per_miss, indirections
            data.append(
                [predictor_desc, [control_msgs_per_miss, indirections]])

        # graph the data
        all_data.append(data)
        all_parameters.append({"title": benchmark_names[benchmark]})

    # only display the legend on the last graph
    all_parameters[-1]["legend"] = "on"

    output = mfgraph.multi_graph(
        all_data,
        all_parameters,
        legend="off",
        xsize=1.8,
        ysize=1.8,
        xlabel="control messages per miss",
        ylabel="indirections (percent of all misses)",
        #                                 linetype = ["dotted"] + (["none"] * 10),
        linetype=["none"] * 10,
        colors=["1 0 0", "0 0 1", "0 .5 0", "0 0 0", "1 0 1"],
        fills=["1 0 0", "0 0 1", "0 .5 0", "0 .5 1", "1 0 1"],
        xmin=0.0,
        ymin=0.0,
        cols=3,
        #                    ymax = 100.0,
        marktype=(["circle", "box", "diamond", "triangle"] * 10),
        #                                 marktype = ["none"] + (["circle", "box", "diamond", "triangle"] * 10),
        title_fontsize="12",
        legend_hack="yes",
    )

    mfgraph.run_jgraph(output, "traces")
예제 #47
0
def touched_by():
    stats = [
        "touched_by_block_address:",
        "touched_by_weighted_block_address:",
#        "touched_by_macroblock_address:",
#        "touched_by_weighted_macroblock_address:",
#        "touched_by_supermacroblock_address:",
#        "touched_by_weighted_supermacroblock_address:",
#        "last_n_block_touched_by",
#        "last_n_macroblock_touched_by",
        ]
    
    yaxis_names = [
        "Percent of all blocks",
        "Percent of all misses",
        "Percent of all macroblocks",
        "Percent of all misses",
        "Percent of all macroblocks",
        "Percent of all misses",
#        "Percent",
#        "Percent",
        ]

    stats_names = {
        "touched_by_block_address:" : "(a) Percent of data blocks (64B) touched by n processors",
        "touched_by_weighted_block_address:": "(b) Percent of misses to data blocks (64B) touched by n processors",
#        "touched_by_macroblock_address:": "(c) Percent of data macroblocks (1024B) touched by n processors",
#        "touched_by_weighted_macroblock_address:": "(d) Percent of misses to data macroblocks (1024B) touched by n processors",
#        "touched_by_supermacroblock_address:": "(e) Percent of 4kB macroblocks touched by n processors",
#        "touched_by_weighted_supermacroblock_address:": "(f) Percent of misses to 4kB macroblocks touched by n processors",
#        "last_n_block_touched_by" : "(e) Percent of misses touched by n processors in the last 64 misses to the block",
#        "last_n_macroblock_touched_by" : "(f) Percent of misses touched by n processors in the last 64 misses to the macroblock",
        }

    jgraph_input = []

    cols = 1
    row_space = 2.2
    col_space = 3

    num = 0
    for stat in stats:
        bars = []
        for benchmark in benchmarks:
            print stat, benchmark
            group = [benchmark_names[benchmark]]
            filenames = glob.glob("*/%s-*gs320*.stats" % benchmark)
            print benchmark, filenames
            for filename in filenames:
                line = mfgraph.grep(filename, stat)[0]
                line = string.replace(line, "]", "")
                line = string.replace(line, "[", "")
                data = string.split(line)[2:]
#                data = string.split(line)[14:]
                data = map(float, data)
                print data

#                  new_data = []
#                  sum = 0.0
#                  for item in data:
#                      new_data.append(item + sum)
#                      sum += item
#                  data = new_data
                print data
                
#                  for index in range(len(data)):
#                      data[index] = data[index]/sum

                print data
                normalize_list(data)
                for index in range(len(data)):
                    if index+1 in [1, 4, 8, 12, 16]:
                        group.append(["%d" % (index+1), data[index]*100.0])
                    else:
                        group.append(["", data[index]*100.0])

            bars.append(group)

        jgraph_input.append(mfgraph.stacked_bar_graph(bars,
                                                      title = stats_names[stat],
                                                      title_fontsize = "12",
                                                      title_font = "Times-Roman",
                                                      title_y = -25.0,
                                                      xsize = 6.5,
                                                      ysize = 1.5,
                                                      xlabel = "",
                                                      ymax = 100.01,
                                                      ylabel = yaxis_names[num],
                                                      colors = [".5 .5 .5"],
                                                      patterns = ["solid"],
                                                      stack_name_location = 12.0,
                                                      stack_space = 3,
                                                      x_translate = (num % cols) * col_space,
                                                      y_translate = (num / cols) * -row_space,
                                                      ))
        num += 1

    mfgraph.run_jgraph("\n".join(jgraph_input), "touched-by")
예제 #48
0
def get_histo(filename, str):
    line = mfgraph.grep(filename, str)[0]
    line = string.replace(line, "]", " ")
    line = string.split(line)
    return line[13:]
예제 #49
0
def ecperf_get_gc_time(directory):
    filename = directory + "/gc.summary"
    grep_lines = mfgraph.grep(filename, ".+time.+");
    line = string.split(grep_lines[0])
    gc_time = float(line[3])
    return gc_time
예제 #50
0
def get_histo(filename, str):
    line = mfgraph.grep(filename, str)[0]
    line = string.replace(line, "]", " ")
    line = string.split(line)
    return line[13:]
예제 #51
0
def ecperf_get_tx_rate(directory):
    filename = directory + "/config.summary"
    grep_lines = mfgraph.grep(filename, "Tx")
    line = string.split(grep_lines[0])
    rate = int(line[2])
    return rate
예제 #52
0
def get_data(filename, str):
    data = mfgraph.grep(filename, str)
    return float(string.split(data[0])[1])
예제 #53
0
def get_issue_width(output_file):
    grep_lines = mfgraph.grep(output_file, ".*IWINDOW_WIN_SIZE.*")
    line = string.split(grep_lines[0])
    width = int(line[2])
    return width
예제 #54
0
파일: maskpred.py 프로젝트: vnaveen0/nachos
def touched_by():
    stats = [
        "touched_by_block_address:",
        "touched_by_weighted_block_address:",
        "touched_by_macroblock_address:",
        "touched_by_weighted_macroblock_address:",
        #    "touched_by_pc_address:",
        #    "touched_by_weighted_pc_address:",
    ]

    stats_names = {
        "touched_by_block_address:":
        "(a) Percent blocks touched by n processors",
        "touched_by_weighted_block_address:":
        "(b) Percent of misses to blocks touched by n processors",
        "touched_by_macroblock_address:":
        "(c) Percent of macroblocks touched by n processors",
        "touched_by_weighted_macroblock_address:":
        "(d) Percent of misses to macroblocks touched by n processors",
    }

    jgraph_input = []

    cols = 1
    row_space = 2.9
    col_space = 3

    num = 0
    for stat in stats:
        bars = []
        for benchmark in benchmarks:
            print stat, benchmark
            group = [benchmark_names[benchmark]]
            filenames = glob.glob("%s/*trace-profiler*.stats" % benchmark)
            for filename in filenames:
                line = mfgraph.grep(filename, stat)[0]
                line = string.replace(line, "]", "")
                line = string.replace(line, "[", "")
                data = string.split(line)[2:]
                data = map(float, data)
                normalize_list(data)
                for index in range(len(data)):
                    if index + 1 in [1, 4, 8, 12, 16]:
                        group.append(["%d" % (index + 1), data[index] * 100.0])
                    else:
                        group.append(["", data[index] * 100.0])

            bars.append(group)

        jgraph_input.append(
            mfgraph.stacked_bar_graph(
                bars,
                title=stats_names[stat],
                xsize=6.5,
                ysize=2,
                xlabel="",
                ylabel="",
                stack_space=3,
                x_translate=(num % cols) * col_space,
                y_translate=(num / cols) * -row_space,
            ))
        num += 1

    mfgraph.run_jgraph("\n".join(jgraph_input), "touched-by")
예제 #55
0
def get_data(filename, str):
    data = mfgraph.grep(filename, str)
    return float(string.split(data[0])[1])