def _startSniper(options, bench, workload, trial):
        cmdLine = Sniper.PARSEC_SNIPER + "/bin/parsecmgmt -a run -p "
        if Benchmark.isParsecBenchmark(bench):
            cmdLine += bench
        else:
            util.raiseError("Invalid bench: ", bench)

        cmd_options = Sniper.CMD_LINE_OPTIONS
        # Pass Sniper configuration file
        if options.cores == 8:
            cmd_options += " -c arc-8"
        elif options.cores == 16:
            cmd_options += " -c arc-16"
        elif options.cores == 32:
            cmd_options += " -c arc-32"
        else:
            util.raiseError("Unknown number of cores: %s" % (options.cores))

        # Append benchmark to output directory name, since otherwise Sniper will overwrite contents
        out_dir = options.getExpOutputDir() + "-" + bench

        cmdLine += (" -c gcc-hooks -i " + workload + " -n " + str(options.pinThreads) +
                    ''' -s "''' + Sniper.SNIPER_ROOT + Sniper.FILE_SEP + Sniper.SNIPER_EXEC +
                    " -n " + str(options.pinThreads) + " -d " + out_dir + cmd_options)

        if options.roiOnly and bench != "vips":
            cmdLine += " --roi"
        cmdLine += ''' -- "'''

        if options.verbose >= 2 or options.printOnly:
            print(Sniper.__outputPrefix() + cmdLine)
        if not options.printOnly:
            Sniper.sniperIDsList.append(subprocess.Popen(cmdLine, shell=True))
Esempio n. 2
0
    def collectTask(options):
        CollectTask.__printTaskInfoStart(options)

        cwd = os.getcwd()
        os.chdir(options.getExpOutputDir())
        resSet = []  # list of dictionaries

        try:
            workloadTuple = options.getWorkloadTuple()
            benchTuple = options.getBenchTuple()

            for w in workloadTuple:
                for num in range(1, options.trials + 1):
                    for b in tuple(benchTuple):
                        path = RunTask.getPathPortion(b, w, num)
                        if not os.path.exists(path):
                            util.raiseError(
                                "Output file path not present: ",
                                options.getExpOutputDir() +
                                CollectTask.FILE_SEP + path)

                        dic = {}
                        # Create key/value pairs and add to the dict
                        dic["bench"] = b
                        dic["trial"] = str(num)
                        dic["workload"] = w
                        for tool in options.getToolsTuple():
                            resSet = CollectTask.__collectResult(
                                path, tool, dic, resSet)

        finally:
            os.chdir(cwd)
            CollectTask.__printTaskInfoEnd(options)
        return resSet
Esempio n. 3
0
    def __processSimOutput(path, tool, di_stats, simType):
        _str_fileName = path + CollectTask.FILE_SEP + tool + "-stats.py"
        if not os.path.isfile(_str_fileName):
            configType = ""
            if simType == SimulatorType.MESI:
                configType = "MESI"
            elif simType == SimulatorType.VISER:
                configType = "Viser"
            elif simType == SimulatorType.RCCSI:
                configType = "RCC-SI"
            elif simType == SimulatorType.PAUSE:
                configType = "Pause"
            util.raiseError(configType + " simulator stats file not present:",
                            _str_fileName)
        di_stats = BackendSimulator.parseStats(_str_fileName, di_stats)

        # Compute on-chip and off-chip network bandwidth.
        globalStats = di_stats[BackendSimulator.GLOBAL_CPUID_VAL]
        numSeconds = globalStats[
            SK.BANDWIDTH_CYCLE_COUNT_KEY] / CollectTask.CLK_FREQUENCY
        numOnChipBytes = (
            globalStats[SK.ONCHIP_NETWORKMSG_SIZE_16BYTES_FLITS_KEY] *
            CollectTask.NUM_BYTES_FLIT)
        onChipBW = numOnChipBytes / (numSeconds * math.pow(2, 30))
        globalStats[SK.SUM_REQD_ONCHIPBW_16BYTES_FLITS_KEY] = onChipBW

        # Off-chip traffic is computed during postProcessResults. So bandwidth can only be
        # computed after that.

        # numOffChipBytes = (
        #     globalStats[SK.LLC_MEM_MSG_SIZE_16BYTES_FLITS_KEY] * CollectTask.NUM_BYTES_FLIT)
        # offChipBW = numOffChipBytes / (numSeconds * math.pow(2, 30))
        # globalStats[SK.SUM_REQD_OFFCHIPBW_64BYTES_FLITS_KEY] = offChipBW

        return di_stats
Esempio n. 4
0
 def getUser(self):
     if "USER" not in self.config:
         util.raiseError("CONFIG file does not contain USER section.")
     for key in self.config["USER"]:
         if key != Config.str_allowedUserKey.lower():
             util.raiseError("Invalid key in USER section in CONFIG file.")
     return self.config["USER"][key]
Esempio n. 5
0
 def __processPintoolOutput(path, tool, di_stats):
     """Parse the given output file and populate and return di_stats."""
     _str_fileName = path + CollectTask.FILE_SEP + tool + "-stats.output"
     if not os.path.isfile(_str_fileName):
         util.raiseError("Pintool stats file not present: ",
                         _str_fileName,
                         stack=False)
     di_stats = Pintool.parseStats(_str_fileName, di_stats)
     return di_stats
Esempio n. 6
0
    def getEmails(self):
        if "EMAIL" not in self.config:
            util.raiseError("CONFIG file does not contain EMAIL section.")

        for key in self.config["EMAIL"]:
            if key != Config.str_allowedEmailKey.lower():
                util.raiseError("Invalid key in EMAIL section in CONFIG file.")

        return self.config["EMAIL"][key]
Esempio n. 7
0
 def __buildPintool(options):
     if options.verbose >= 1:
         print(BuildTask.__outputPrefix() + "Building Pintool.")
     os.chdir(BuildTask.PINTOOL_ROOT)
     try:
         subprocess.check_call(["make"])
     except CalledProcessError:
         util.raiseError("Building Pintool failed.")
     if options.verbose >= 1:
         print(BuildTask.__outputPrefix() + "Done building Pintool.")
Esempio n. 8
0
def __mergeAverage(rs, key):
    """Merge the result set, and return the average."""
    fl_sum = 0.0
    tmp = {}
    try:
        for d in rs:
            fl_sum += d.get(key)
        tmp[key] = fl_sum / len(rs)
    except (ZeroDivisionError, TypeError) as e:
        util.raiseError(repr(e) + ", Key:" + key, stack=True)
    return tmp
Esempio n. 9
0
    def checkSyncBlock(self):
        if "SYNC" not in self.config:
            util.raiseError("CONFIG file does not contain SYNC section.")

        for key in self.config["SYNC"]:
            found = False
            for akey in Config.tup_allowedSyncKeys:
                if key.lower() == akey.lower():
                    found = True
            if not found:
                util.raiseError("Invalid key in SYNC section in CONFIG file.")
Esempio n. 10
0
    def __init__(self):
        cwd = os.getcwd()
        try:
            os.chdir(Config.VISER_EXP)
            if not os.path.exists(Config.CONFIG):
                util.raiseError("CONFIG file missing in ", Config.VISER_EXP,
                                " directory.")

            self.config = configparser.ConfigParser()
            self.config.read(Config.CONFIG)
        finally:
            os.chdir(cwd)
 def parseStats(fileName, di_store):
     try:
         f = open(fileName)  # 'r' is implicit if omitted
         for line in f:
             coreid, d = BackendSimulator.__processLine(line)
             if not d:
                 continue  # Comment line
             if coreid in di_store:
                 di_store[coreid].update(d)
             else:
                 di_store.update({coreid: d})
     except RuntimeError as e:
         # Does not catch all exceptions
         # http://stackoverflow.com/questions/18982610/difference-between-except-and-except-exception-as-e-in-python
         print("Exception thrown:", fileName)
         print(line)
         util.raiseError(repr(e), stack=True)
     return di_store
    def parseDetailedStats(filename, d):
        """This only supports Viser configs that have an AIM."""
        f = open(filename)
        inProcessor = False
        inAIM = False
        num_l3s = 0
        for line in f:
            line = line.strip()

            if line == "Processor:":
                inProcessor = True
                continue
            elif ":" in line:
                inProcessor = False

            if "L3" in line:
                num_l3s += 1
                if num_l3s == 3:
                    inAIM = True

            if inAIM and "Runtime Dynamic" in line:
                d[EnergyStatsKeys.AIM_DYNAMIC_POWER] = Mcpat.__getValue(line)
            elif inAIM and "Subthreshold Leakage with power gating" in line:
                d[EnergyStatsKeys.AIM_STATIC_POWER] = Mcpat.__getValue(line)

            if inProcessor and "Area" in line:
                d[EnergyStatsKeys.AREA] = Mcpat.__getValue(line)
            elif inProcessor and "Subthreshold Leakage with power gating" in line:
                d[EnergyStatsKeys.STATIC_POWER] = Mcpat.__getValue(line)
            elif inProcessor and "Runtime Dynamic" in line:
                d[EnergyStatsKeys.DYNAMIC_POWER] = Mcpat.__getValue(line)

        try:
            if "idealaim" in filename and "32" in filename:
                assert num_l3s == 4
            else:
                assert num_l3s == 3
        except AssertionError:
            print("File name: ", filename)
            util.raiseError("Number of l3s %s do not match" % (num_l3s))

        return d
Esempio n. 13
0
 def emailTask(options):
     EmailTask.__printTaskInfoStart(options)
     str_emails = options.config.getEmails()
     # Construct message
     body = options.getExpCommand()
     try:
         # s = smtplib.SMTP("rain.cse.ohio-state.edu", 25, "localhost")
         s = smtplib.SMTP("localhost")
         for email in ast.literal_eval(str_emails):
             msg['Subject'] = Header(
                 socket.gethostname() + ": Viser experiment is done",
                 'utf-8')
             msg['Subject'] = Header(
                 socket.gethostname() + ": Viser experiment is done",
                 'utf-8')
             msg['From'] = "*****@*****.**"
             msg['To'] = email
             s.send_message(msg)
         s.quit()
         EmailTask.__printTaskInfoEnd(options)
     except ConnectionRefusedError as e:
         util.raiseError(" Sending email failed..." + repr(e))
Esempio n. 14
0
 def __buildSimulatorHelper(path):
     os.chdir(path)
     rc = os.system("ant build")
     if rc != 0:
         util.raiseError("Failed building the simulator.")
Esempio n. 15
0
    def postProcessResults(options, resultSet):
        # The result set is a list of all experiments. It should be a product of
        # (#benchmarks x #trials x #configs).

        DEBUG = False

        # Replace the memory access computation for ARC configurations
        for exp in resultSet:
            if util.isPintool(exp["tool"]):
                continue

            global_data = exp[BackendSimulator.GLOBAL_CPUID_VAL]

            if util.isViserConfig(exp["tool"]):
                llc_misses = (global_data[SK.L3_READ_MISSES_KEY] +
                              global_data[SK.L3_WRITE_MISSES_KEY])
                aim_misses = (global_data[VK.AIM_READ_MISSES_KEY] +
                              global_data[VK.AIM_WRITE_MISSES_KEY])
                llc_evictions = global_data[SK.L3_LINE_EVICTIONS_KEY]
                llc_dirty_evictions = global_data[
                    SK.L3_DIRTY_LINE_EVICTIONS_KEY]
                aim_evictions = global_data[VK.AIM_LINE_EVICTIONS_KEY]
                # Communication between the memory subsystem and controller is in 64-Bytes lines
                aim_64bytes_lines = math.ceil(
                    util.getARCAIMLineSize(exp["tool"]) /
                    Constants.DATA_LINE_SIZE)
                mem_reads = llc_misses + aim_misses * aim_64bytes_lines
                mem_writes = llc_dirty_evictions + aim_evictions * aim_64bytes_lines
                if util.isViserIdealAIMConfig(exp["tool"]):
                    ideal_aim_64bytes_lines = math.ceil(
                        (util.getARCAIMLineSize(exp["tool"]) +
                         Constants.DATA_LINE_SIZE) / Constants.DATA_LINE_SIZE)
                    mem_reads = llc_misses * ideal_aim_64bytes_lines
                    # The data and AIM lines are together, so writes cannot distinguish between the
                    # two parts
                    # mem_writes = llc_dirty_evictions + llc_evictions * aim_64bytes_lines
                    mem_writes = llc_evictions * ideal_aim_64bytes_lines
                global_data[SK.MEM_64BYTES_READS_KEY] = mem_reads
                global_data[SK.MEM_64BYTES_WRITES_KEY] = mem_writes
                global_data[
                    SK.MEM_64BYTES_ACCESSES_KEY] = mem_reads + mem_writes
                if DEBUG:
                    print("Config:", exp["tool"])
                    print("Benchmark:", exp["bench"])
                    print("AIM evictions:", aim_evictions)
                    print("AIM misses:", aim_misses)
                    print("LLC misses:", llc_misses)
                    print("LLC evictions:", llc_evictions)
                    print("LLC dirty evictions:", llc_dirty_evictions)
                    print("Mem accesses:", mem_reads + mem_writes)
                    print("Size of AIM line in 64 Bytes:", aim_64bytes_lines)
            elif util.isCEConfigWithAIM(exp["tool"]):
                llc_misses = (global_data[SK.L3_READ_MISSES_KEY] +
                              global_data[SK.L3_WRITE_MISSES_KEY])
                llc_evictions = global_data[SK.L3_LINE_EVICTIONS_KEY]
                llc_dirty_evictions = global_data[
                    SK.L3_DIRTY_LINE_EVICTIONS_KEY]
                aim_misses = (global_data[VK.AIM_READ_MISSES_KEY] +
                              global_data[VK.AIM_WRITE_MISSES_KEY])
                aim_evictions = global_data[VK.AIM_LINE_EVICTIONS_KEY]
                aim_64bytes_lines = math.ceil(
                    util.getCEAIMLineSize(exp["tool"]) /
                    Constants.DATA_LINE_SIZE)
                mem_reads = llc_misses + aim_misses * aim_64bytes_lines
                mem_writes = llc_dirty_evictions + aim_evictions * aim_64bytes_lines
                global_data[SK.MEM_64BYTES_READS_KEY] = mem_reads
                global_data[SK.MEM_64BYTES_WRITES_KEY] = mem_writes
                global_data[
                    SK.MEM_64BYTES_ACCESSES_KEY] = mem_reads + mem_writes
                if DEBUG:
                    print("Config:", exp["tool"])
                    print("Benchmark:", exp["bench"])
                    print("AIM evictions:", aim_evictions)
                    print("AIM misses:", aim_misses)
                    print("LLC misses:", llc_misses)
                    print("LLC evictions:", llc_evictions)
                    print("LLC dirty evictions:", llc_dirty_evictions)
                    print("Mem accesses:", mem_reads + mem_writes)
                    print("Size of AIM line in 64 Bytes:", aim_64bytes_lines)
            elif util.isCEConfigWithoutAIM(exp["tool"]):
                llc_misses = (global_data[SK.L3_READ_MISSES_KEY] +
                              global_data[SK.L3_WRITE_MISSES_KEY])
                llc_evictions = global_data[SK.L3_LINE_EVICTIONS_KEY]
                llc_dirty_evictions = global_data[
                    SK.L3_DIRTY_LINE_EVICTIONS_KEY]
                l2_evictions = global_data[SK.L2_LINE_EVICTIONS_KEY]
                l2_misses = global_data[SK.L2_READ_MISSES_KEY] + global_data[
                    SK.L2_WRITE_MISSES_KEY]
                # CE only transmits read and write metadata per core but that only
                # impacts on-chip traffic. The off-chip traffic computation assumes the memory
                # controller granularity is 64 Bytes.
                ce_line_size = math.ceil((Constants.RD_MD_BYTES_PER_LINE +
                                          Constants.WR_MD_BYTES_PER_LINE) /
                                         Constants.NUM_BYTES_MEM_FLIT)
                aim_64bytes_lines = math.ceil(
                    util.getCEAIMLineSize(exp["tool"]) /
                    Constants.DATA_LINE_SIZE)
                mem_reads = llc_misses + l2_misses * aim_64bytes_lines
                mem_writes = llc_dirty_evictions + l2_evictions * aim_64bytes_lines
                global_data[SK.MEM_64BYTES_READS_KEY] = mem_reads
                global_data[SK.MEM_64BYTES_WRITES_KEY] = mem_writes
                global_data[
                    SK.MEM_64BYTES_ACCESSES_KEY] = mem_reads + mem_writes
                if DEBUG:
                    print("Config:", exp["tool"])
                    print("Benchmark:", exp["bench"])
                    print("LLC misses:", llc_misses)
                    print("LLC evictions:", llc_evictions)
                    print("LLC dirty evictions:", llc_dirty_evictions)
                    print("L2 misses:", l2_misses)
                    print("L2 evictions:", l2_evictions)
                    print("Mem accesses:", mem_reads + mem_writes)
                    print("Size of CE line in 64 Bytes:", ce_line_size)
                    print("Size of AIM line in 64 Bytes:", aim_64bytes_lines)
            elif util.isMESIConfig(exp["tool"]):
                llc_misses = (global_data[SK.L3_READ_MISSES_KEY] +
                              global_data[SK.L3_WRITE_MISSES_KEY])
                llc_dirty_evictions = global_data[
                    SK.L3_DIRTY_LINE_EVICTIONS_KEY]
                mem_reads = llc_misses
                mem_writes = llc_dirty_evictions
                global_data[SK.MEM_64BYTES_READS_KEY] = mem_reads
                global_data[SK.MEM_64BYTES_WRITES_KEY] = mem_writes
                global_data[
                    SK.MEM_64BYTES_ACCESSES_KEY] = mem_reads + mem_writes
            else:
                util.raiseError("unknown config %s" % (exp["tool"]))

        # Now we have an estimate of the off-chip memory access
        for exp in resultSet:
            if util.isPintool(exp["tool"]):
                continue
            global_data = exp[BackendSimulator.GLOBAL_CPUID_VAL]
            numOffChipBytes = (global_data[SK.MEM_64BYTES_ACCESSES_KEY] *
                               CollectTask.NUM_BYTES_MEM_FLIT)
            numSeconds = (global_data[SK.BANDWIDTH_CYCLE_COUNT_KEY] /
                          CollectTask.CLK_FREQUENCY)
            offChipBW = numOffChipBytes / (numSeconds * math.pow(2, 30))
            global_data[SK.SUM_REQD_OFFCHIPBW_64BYTES_FLITS_KEY] = offChipBW

        # Fill in the max required bandwidth computation

        # Check whether any of the configs exceed the available bandwidth. If yes, then we round
        # off the bandwidth value and scale the execution cycles.
        for exp in resultSet:
            if not util.isPintool(exp["tool"]):
                globalStats = exp[BackendSimulator.GLOBAL_CPUID_VAL]
                offChipBW = globalStats[
                    SK.SUM_REQD_OFFCHIPBW_64BYTES_FLITS_KEY]
                onChipBW = globalStats[SK.SUM_REQD_ONCHIPBW_16BYTES_FLITS_KEY]

                if DEBUG or (onChipBW > Constants.ONCHIP_BW) or (
                        offChipBW > Constants.OFFCHIP_BW):
                    print("Benchmark:%s Tool: %s OnChipBW: %s OffChipBW: %s" %
                          (exp["bench"], exp["tool"], onChipBW, offChipBW))

                if onChipBW > Constants.ONCHIP_BW:
                    frac = (onChipBW / Constants.ONCHIP_BW)
                    numCycles = globalStats[SK.BANDWIDTH_CYCLE_COUNT_KEY]
                    newNumCycles = int(round(frac * numCycles))
                    if util.isViserConfig(exp["tool"]):
                        reg = globalStats[VK.REG_EXEC_BW_CYCLE_COUNT_KEY]
                        pre = globalStats[VK.PRE_COMMIT_BW_CYCLE_COUNT_KEY]
                        post = globalStats[VK.POST_COMMIT_BW_CYCLE_COUNT_KEY]
                        rv = globalStats[VK.READ_VALIDATION_BW_CYCLE_COUNT_KEY]
                        assert reg + pre + post + rv == numCycles
                        globalStats[VK.REG_EXEC_BW_CYCLE_COUNT_KEY] = int(
                            round(reg * frac))
                        reg = globalStats[VK.REG_EXEC_BW_CYCLE_COUNT_KEY]
                        globalStats[VK.PRE_COMMIT_BW_CYCLE_COUNT_KEY] = int(
                            round(pre * frac))
                        pre = globalStats[VK.PRE_COMMIT_BW_CYCLE_COUNT_KEY]
                        globalStats[
                            VK.READ_VALIDATION_BW_CYCLE_COUNT_KEY] = int(
                                round(rv * frac))
                        rv = globalStats[VK.READ_VALIDATION_BW_CYCLE_COUNT_KEY]
                        globalStats[VK.POST_COMMIT_BW_CYCLE_COUNT_KEY] = (
                            newNumCycles - reg - pre - rv)
                    else:
                        coh = globalStats[MK.COHERENCE_EXEC_CYCLE_COUNT_KEY]
                        ex = globalStats[MK.MEM_EXEC_CYCLE_COUNT_KEY]
                        assert coh + ex == numCycles
                        globalStats[MK.COHERENCE_EXEC_CYCLE_COUNT_KEY] = int(
                            round(coh * frac))
                        coh = globalStats[MK.COHERENCE_EXEC_CYCLE_COUNT_KEY]
                        globalStats[MK.MEM_EXEC_CYCLE_COUNT_KEY] = (
                            newNumCycles - coh)
                    print(
                        "Scaling cycles for [%s, %s] by %s because on-chip bandwidth exceeds "
                        "available limit" % (exp["bench"], exp["tool"], frac))
                    print("Old cycles: %s new scaled cycles: %s" %
                          (numCycles, newNumCycles))
                    globalStats[SK.BANDWIDTH_CYCLE_COUNT_KEY] = newNumCycles
                    # globalStats[SK.SUM_REQD_ONCHIPBW_16BYTES_FLITS_KEY] = Constants.ONCHIP_BW

                if offChipBW > Constants.OFFCHIP_BW:
                    frac = (offChipBW / Constants.OFFCHIP_BW)
                    numCycles = globalStats[SK.BANDWIDTH_CYCLE_COUNT_KEY]
                    newNumCycles = int(round(numCycles * frac))
                    if util.isViserConfig(exp["tool"]):
                        reg = globalStats[VK.REG_EXEC_BW_CYCLE_COUNT_KEY]
                        pre = globalStats[VK.PRE_COMMIT_BW_CYCLE_COUNT_KEY]
                        post = globalStats[VK.POST_COMMIT_BW_CYCLE_COUNT_KEY]
                        rv = globalStats[VK.READ_VALIDATION_BW_CYCLE_COUNT_KEY]
                        try:
                            assert reg + pre + post + rv == numCycles
                        except AssertionError:
                            print(
                                "Reg: %s Pre: %s RV: %s Post: %s Sum: %s Total: %s"
                                % (reg, pre, rv, post, reg + pre + rv + post,
                                   numCycles))
                            sys.exit()

                        globalStats[VK.REG_EXEC_BW_CYCLE_COUNT_KEY] = int(
                            round(reg * frac))
                        reg = globalStats[VK.REG_EXEC_BW_CYCLE_COUNT_KEY]
                        globalStats[VK.PRE_COMMIT_BW_CYCLE_COUNT_KEY] = int(
                            round(pre * frac))
                        pre = globalStats[VK.PRE_COMMIT_BW_CYCLE_COUNT_KEY]
                        globalStats[
                            VK.READ_VALIDATION_BW_CYCLE_COUNT_KEY] = int(
                                round(rv * frac))
                        rv = globalStats[VK.READ_VALIDATION_BW_CYCLE_COUNT_KEY]
                        globalStats[VK.POST_COMMIT_BW_CYCLE_COUNT_KEY] = (
                            newNumCycles - reg - pre - rv)
                        post = globalStats[VK.POST_COMMIT_BW_CYCLE_COUNT_KEY]
                    else:
                        coh = globalStats[MK.COHERENCE_EXEC_CYCLE_COUNT_KEY]
                        ex = globalStats[MK.MEM_EXEC_CYCLE_COUNT_KEY]
                        try:
                            assert coh + ex == numCycles
                        except AssertionError:
                            print("Coh: %s Ex: %s NumCycles: %s" %
                                  (coh, ex, numCycles))
                            sys.exit()

                        globalStats[MK.COHERENCE_EXEC_CYCLE_COUNT_KEY] = int(
                            round(coh * frac))
                        coh = globalStats[MK.COHERENCE_EXEC_CYCLE_COUNT_KEY]
                        globalStats[MK.MEM_EXEC_CYCLE_COUNT_KEY] = (
                            newNumCycles - coh)
                    print(
                        "Scaling cycles for [%s, %s] by %s because off-chip bandwidth exceeds "
                        "available limit" % (exp["bench"], exp["tool"], frac))
                    print("Old cycles: %s new scaled cycles: %s" %
                          (numCycles, newNumCycles))
                    globalStats[SK.BANDWIDTH_CYCLE_COUNT_KEY] = newNumCycles
Esempio n. 16
0
    def collectMcpatResults(options, resSet):
        cwd = os.getcwd()
        odir = (options.getExpProductsDir() + os.sep +
                McPATTask.McPAT_ROOT_DIR + os.sep +
                McPATTask.McPAT_OUTPUT_FILES)
        os.chdir(odir)
        energyStats = []  # list of dictionaries

        try:
            for w in options.getWorkloadTuple():
                for b in tuple(options.getBenchTuple()):
                    dic = {}
                    dic["bench"] = b
                    dic["workload"] = w
                    for t in options.getToolsTuple():
                        if util.isPintool(t):
                            continue
                        statsFile = b + '-' + t + '-' + w + '.mcpat'

                        if not os.path.exists(statsFile):
                            util.raiseError(
                                "[error] Mcpat output file not present: ",
                                statsFile)

                        mergedDic = dic.copy()
                        mergedDic["tool"] = t
                        li_di_bench = ResultSet.limitResultSetWithDict(
                            resSet, mergedDic)
                        merged_cycles = merge.merge(
                            li_di_bench, SK.BANDWIDTH_CYCLE_COUNT_KEY)[
                                SK.BANDWIDTH_CYCLE_COUNT_KEY]

                        merged_bf_energy = 0.0
                        if not util.isOnlyCEConfigNoAIM(
                                options.getToolsTuple()):
                            merged_bf_energy = merge.merge(
                                li_di_bench, VK.BLOOM_FILTER_TOTAL_ENERGY)[
                                    VK.BLOOM_FILTER_TOTAL_ENERGY]

                        dynamic_aim_energy = 0
                        static_aim_energy = 0
                        simDic = {}
                        if util.isViserConfig(t) or util.isCEConfigWithAIM(t):
                            simDic = Mcpat.parseDetailedStats(
                                statsFile, simDic)
                            if not CollectTask.ADD_AIM_McPAT:  # Estimate from the simulator
                                # dynamic_aim_energy = (merge.merge(
                                #     li_di_bench,
                                #     VK.AIM_DYNAMIC_TOTAL_ENERGY)[VK.AIM_DYNAMIC_TOTAL_ENERGY])
                                simDic[EK.AIM_STATIC_POWER] = 0
                                simDic[EK.AIM_DYNAMIC_POWER] = 0
                            else:
                                dynamic_aim_energy = (
                                    simDic[EK.AIM_DYNAMIC_POWER] *
                                    merged_cycles / CollectTask.CLK_FREQUENCY)
                                static_aim_energy = (
                                    simDic[EK.AIM_STATIC_POWER] *
                                    merged_cycles / CollectTask.CLK_FREQUENCY)
                        else:
                            simDic = Mcpat.parseTerseStats(statsFile, simDic)

                        simDic[EK.STATIC_ENERGY] = (simDic[EK.STATIC_POWER] *
                                                    merged_cycles /
                                                    CollectTask.CLK_FREQUENCY)
                        simDic[EK.DYNAMIC_ENERGY] = (simDic[EK.DYNAMIC_POWER] *
                                                     merged_cycles /
                                                     CollectTask.CLK_FREQUENCY)
                        simDic[EK.BLOOM_FILTER_ENERGY] = merged_bf_energy
                        simDic[EK.AIM_STATIC_ENERGY] = static_aim_energy
                        simDic[EK.AIM_DYNAMIC_ENERGY] = dynamic_aim_energy
                        # McPAT output already includes the AIM component
                        simDic[EK.TOTAL_ENERGY] = (simDic[EK.STATIC_ENERGY] +
                                                   simDic[EK.DYNAMIC_ENERGY] +
                                                   merged_bf_energy)

                        # Union the two dictionaries
                        energyStats.append({**mergedDic, **simDic})

        finally:
            os.chdir(cwd)
        return energyStats
    def parse(self, options):
        # Check if environment variables are properly defined

        di_options = vars(self.parser.parse_args())
        options.setOptions(di_options)
        if options.verbose >= 2:
            options.printOptions()

        # Sanity checks
        if not Project.isSupportedProject(options.getProject()):
            util.raiseError("Invalid project: ", options.getProject())

        for t in options.getTasksTuple():
            # Allow an empty task, e.g., "clean,"
            if t not in CmdLine.allowedTasks and t:
                util.raiseError("Invalid task: ", t)

        for t in options.getToolsTuple():
            if t not in CmdLine.allowedTools:
                util.raiseError("Invalid tool: ", t)

        options.removeBenchDuplicates()

        for b in options.getBenchTuple():
            if (not Benchmark.isHTTPDBenchmark(b)
                    and not Benchmark.isParsecBenchmark(b)
                    and not Benchmark.isSplash2xBenchmark(b)):
                util.raiseError("Invalid bench: ", b)

        if options.parallelBenches > len(
                options.getBenchTuple()) or options.parallelBenches < 1:
            util.raiseError(
                "Invalid parallelBenches (should be within [1,benchNum]): ",
                str(options.parallelBenches))

        for w in options.getWorkloadTuple():
            if w not in CmdLine.allowedSizes:
                util.raiseError("Invalid workload size: ", w)

        # if "run" is there in "tasks", then "tools" should have pintool and
        # at least one simulator
        if "run" in options.getTasksTuple():
            if "sniper" in options.getToolsTuple() and len(
                    options.getToolsTuple()) > 1:
                util.raiseError("Sniper can be the only tool running!")
            if options.getSimulatorsTuple():
                if "pintool" not in options.getToolsTuple():
                    util.raiseError(
                        "The Pintool frontend is required to run the backend simulators."
                    )

        # "Result" task requires bench and trials option
        if "result" in options.getTasksTuple():
            if not options.getBenchTuple():
                util.raiseError("No benchmark specified.")
            if options.trials == 0:
                util.raiseError("Number of trials unspecified.")
            if not options.getWorkloadTuple():
                util.raiseError("No workload size specified.")

        # Limited safety check for matching cores and configurations
        if "run" in options.getTasksTuple():
            if options.pinThreads == 16 or options.pinThreads == 32:
                for t in options.getToolsTuple():
                    if not util.isPintool(t) and not util.isSniperConfig(t):
                        if str(options.pinThreads) not in t:
                            util.raiseError(
                                "Check tool and threads combination: ", t,
                                str(options.pinThreads))

        # # # SB: Need to polish this more
        # for t in options.getToolsTuple():
        #     if (("16" in t and options.pinThreads != 16) or
        #         ("32" in t and options.pinThreads != 32)):
        #         util.raiseError(("Check tool and threads combination: "), t,
        #                         str(options.pinThreads))

        # Lockstep execution only makes sense if there is at least one backend
        # along with the pintool
        if options.lockstep and not options.getSimulatorsTuple():
            util.raiseError(
                "Lockstep execution only makes sense if there is at least one backend."
            )