"--noanno",
                  dest="noanno",
                  action="store_true",
                  help="Turn off plot annotations")
parser.add_option("-p", "--plotname", dest="plotname", default="",
                  help="string put in the middle of the output files. If nothing is provided, then" +\
                       "a concatination of all the configs in the graph are used.")

(options, args) = parser.parse_args()
common.load_defined_yamls()

benchmarks = []
options.hardware_dir = common.dir_option_test(options.hardware_dir,
                                              "../../run_hw/", this_directory)
options.data_mappings = common.file_option_test(options.data_mappings,
                                                "correl_mappings.py",
                                                this_directory)
options.blacklist = common.file_option_test(options.blacklist, "",
                                            this_directory)

blacklist = []
if options.blacklist != "":
    for bannedname in open(options.blacklist).readlines():
        bannedname = bannedname.strip()
        blacklist.append(re.compile(bannedname))

logger = Logger(options.verbose)

# Get the hardware Data
logger.log("Getting HW data\n")
hw_data = {}
Пример #2
0
        for logf in all_logfiles:
            match_str = r".*\/sim_log\.{0}\..*".format(options.sim_name)
            if re.match(match_str, logf):
                named_sim.append(logf)
        if len(named_sim) == 0:
            exit(
                "Could not find logfiles for job with the name \"{0}\"".format(
                    options.sim_name))
        all_logfiles = named_sim
    parsed_logfiles.append(max(all_logfiles, key=os.path.getmtime))
elif options.logfile == "all":
    parsed_logfiles = [os.path.join(logfiles_directory, f) \
                    for f in os.listdir(logfiles_directory) if(re.match(r'sim_log.*\.latest',f))]
else:
    parsed_logfiles.append(
        common.file_option_test(options.logfile, "", this_directory))

print "Using logfiles " + str(parsed_logfiles)

options.run_dir = common.dir_option_test(
    options.run_dir, this_directory + ("../../sim_run_%s/" % cuda_version),
    this_directory)

if not os.path.isdir(options.run_dir):
    exit(
        options.run_dir +
        " does not exist - specify the run directory where the benchmark/config dirs exist"
    )

# Searches the output file for these strings.
# If they exist, it assumes that the functional
Пример #3
0
options.sim_name = options.sim_name.strip()

common.load_defined_yamls()

cuda_version = common.get_cuda_version(this_directory)
options.run_dir = common.dir_option_test(
    options.run_dir, this_directory + ("../../sim_run_%s/" % cuda_version),
    this_directory)
if not os.path.isdir(options.run_dir):
    exit(
        options.run_dir +
        " does not exist - specify the run directory where the benchmark/config dirs exist"
    )

options.stats_yml = common.file_option_test(
    options.stats_yml,
    os.path.join(this_directory, "stats", "example_stats.yml"), this_directory)
stat_map = {}
configs = []
apps_and_args = []
exes_and_args = []
specific_jobIds = {}

stats_to_pull = {}
stats_yaml = yaml.load(open(options.stats_yml))
stats = {}
for stat in stats_yaml['collect']:
    stats_to_pull[stat] = re.compile(stat)

if options.configs_list != "" and options.benchmark_list != "":
    for app in common.gen_apps_from_suite_list(
parser.add_option("-c", "--csv_file", dest="csv_file",
                  help="File to parse",
                  default="")
parser.add_option("-p", "--publish_path", dest="publish_path",
                  help="After the htmls are generated - they will get published here."+\
                  " Assumes you can scp to this directory.",
                  default="")
parser.add_option("-w", "--publish_web", dest="publish_web",
                  help="After the htmls are generated - they will get published here."+\
                  " Assumes you can scp to this directory.",
                  default="")
parser.add_option("-P", "--plotname", dest="plotname",
                  help="String appended to the filenames",
                  default="")
(options, args) = parser.parse_args()
options.csv_file = common.file_option_test( options.csv_file, "", this_directory )

all_stats = get_csv_data(options.csv_file)

colors= ['#0F8C79','#BD2D28','#E3BA22','#E6842A','#137B80','#8E6C8A','#9A3E25', '#E6842A']
stat_count = 0
for stat,value in all_stats.iteritems():
    traces = []
    cfg_count = 0
    apps, data = value
    for k,v in data.iteritems():
        traces.append(Bar(
            x= apps,
            y= v,
            name=k,
            marker=Marker(color=colors[cfg_count % len(colors)]),
Пример #5
0
            if os.path.lexists(os.path.join(this_run_dir, "data")):
                os.remove(os.path.join(this_run_dir, "data"))
            os.symlink(benchmark_data_dir, os.path.join(this_run_dir, "data"))
        except common.PathMissing:
            pass

        all_data_link = os.path.join(this_run_dir, "data_dirs")
        if os.path.lexists(all_data_link):
            os.remove(all_data_link)
        top_data_dir_path = common.dir_option_test(ddir, "", this_directory)
        os.symlink(top_data_dir_path, all_data_link)

        if args == None:
            args = ""

        exec_path = common.file_option_test(os.path.join(edir, exe), "",
                                            this_directory)
        sh_contents = ""
        kernel_number = ""
        if ('mlperf' in exec_path):
            exec_path = "sh " + exec_path
            # For MLPerf we are by default limiting the number of profiled kernels to 1000
            # This can be overriden by explicitly indicating the number of kernels via the -c argument.
            kernel_number = ' -c 1000 '
        if (options.kernel_number > 0):
            kernel_number = ' -c ' + str(options.kernel_number) + ' '

        if "other_stats" in options.collect:
            if not options.disable_nvprof:
                sh_contents += "\nexport CUDA_VERSION=\"" + cuda_version + "\"; export CUDA_VISIBLE_DEVICES=\"" + options.device_num +\
                    "\" ; timeout 30m nvprof --concurrent-kernels off --print-gpu-trace -u us --metrics all --demangling off --csv --log-file " +\
                    os.path.join(this_run_dir,logfile) + " " + exec_path + " " + str(args) + " "
Пример #6
0
def main():
    parser = OptionParser()
    parser.add_option("-s", "--selfTest", dest="selfTest",
                  help="launched the selftester.", action="store_true")
    parser.add_option("-f", "--file", dest="file",
                  help="File with the processes to manage.", default=procManStateFile)
    parser.add_option("-t", "--sleepTime", dest="sleepTime",
                  help="Tune how often. ProcMan looks for completed jobs",
                  type=int, default=30)
    parser.add_option("-c", "--cores", dest="cores",
                  help="how many cores to use",
                  type=int, default=psutil.cpu_count())
    parser.add_option("-S", "--start", dest="start",action="store_true",
                  help="Just spawn the manager")
    parser.add_option("-p", "--printState", dest="printState",action="store_true",
                  help="Print the state of the manager")
    parser.add_option("-k", "--kill", dest="kill",action="store_true",
                  help="Kill all managed processes")
    parser.add_option("-j", "--procManForJob", dest="procManForJob",default=None, type=int,
                  help="Return the path of the pickle file for the ProcMan managing this job." )
    (options, args) = parser.parse_args()

    if options.selfTest:
        selfTest()
    elif options.kill:
        procmanfiles = glob.glob(options.file + ".*")
        for f in procmanfiles:
            print("Killing active jobs in Procman: {0}".format(os.path.basename(f)))
            procMan = pickle.load(open(f, 'rb'))
            procMan.killJobs()
    elif options.printState:
        numProcMans = 0
        numQueued = 0
        numActive = 0
        numComplete = 0
        procmanfiles = glob.glob(options.file + ".*")
        if len(procmanfiles) == 0:
            print("Nothing Active")
        else:
            for f in procmanfiles:
                numProcMans += 1
                procMan = pickle.load(open(f, 'rb'))
                numQueued += len(procMan.queuedJobs)
                numActive += len(procMan.activeJobs)
                numComplete += len(procMan.completeJobs)
                print("Procman: {0}".format(os.path.basename(f)))
                print(procMan.getState())
            print("Total Procmans={0}, Total Queued={1}, Total Running={2}, Total Complete={3}"\
                .format(numProcMans, numQueued, numActive, numComplete))
    elif options.start:
        if not os.path.exists(options.file):
             sys.exit("Nothing to start {0} does not exist".format(options.file))
        procMan = pickle.load(open(options.file, 'rb'))
        if procMan.jobLimit != int(options.cores):
            procMan.jobLimit = int(options.cores)
            procMan.saveState()
        procMan.spawnProcMan(options.sleepTime)
        procMan.clear()
        procMan.saveState()
    elif options.procManForJob != None:
        procmanfiles = glob.glob(options.file + ".*")
        for f in procmanfiles:
            procMan = pickle.load(open(f, 'rb'))
            j = procMan.getJob(options.procManForJob)
            if j != None:
                print(procMan.pickleFile)
                break
    elif len(args) == 1:
        # To make this work the same as torque and slurm - if you just give it one argument,
        # we assume it's a pointer to a job file you want to submit.
        if os.path.exists(options.file):
            procMan = pickle.load(open(options.file, 'rb'))
            if not procMan.mutable:
                sys.exit("Error - this procman has already started")
        else:
            procMan = ProcMan(options.cores)
        exec_file = args[0]
        st = os.stat(exec_file)
        os.chmod(exec_file, st.st_mode | stat.S_IEXEC)

        # slurmToJob
        job = Job("","",os.getcwd(),exec_file)
        job.id = procMan.queueJob(job)
        contents = ""
        for line in open(exec_file).readlines():
            if line.startswith("#SBATCH"):
                nameMatch = re.match(r"#SBATCH -J (.*)", line.strip())
                if nameMatch:
                    job.name = nameMatch.group(1)
                outFMatch = re.match(r"#SBATCH --output=(.*)", line.strip())
                if outFMatch:
                    job.outF = outFMatch.group(1)
                errFMatch = re.match(r"#SBATCH --error=(.*)", line.strip())
                if errFMatch:
                    job.errF = errFMatch.group(1)
            line = re.sub(r"\$SLURM_JOB_ID", str(job.id), line)
            contents += line
        with open(exec_file, "w+") as f:
            f.write(contents)

        job.outF = os.path.expandvars(re.sub("\%j", str(job.id), job.outF))
        job.errF = os.path.expandvars(re.sub("\%j", str(job.id), job.errF))
        procMan.saveState()
        print(job.id)
    else:
        options.file = common.file_option_test( options.file, "", this_directory )
        if options.file == "":
            sys.exit("Please specify the file containing the processes to manage with -f.")
        procMan = pickle.load(open(options.file, 'rb'))
        if procMan.tickingProcess!= None:
            sys.exit("This procman is already running {0}".format(os.path.basename(options.file)))
        procMan.pickleFile = options.file
        os.remove(options.file)
        if len(procMan.queuedJobs) > 0:
            while not procMan.complete():
                procMan.tick()
                procMan.saveState()
                time.sleep(options.sleepTime)
            os.remove(procMan.pickleFile)
Пример #7
0
parser.add_option("-c",
                  "--csv_files",
                  dest="csv_files",
                  help="Files to merge",
                  default="")
parser.add_option("-R",
                  "--configs_as_rows",
                  dest="configs_as_rows",
                  help="When printing merged files, are configs as rows?",
                  action="store_true")
(options, args) = parser.parse_args()

csv_files = []
for csvf in options.csv_files.split(","):
    try:
        csv_files.append(common.file_option_test(csvf, "", this_directory))
    except common.PathMissing as e:
        print("Warning path {0}. Continuing".format(e), file=sys.stderr)

stats_per_file = {}
for csvf in csv_files:
    stats_per_file[csvf] = get_csv_data_for_merge(csvf)

new_stats = {}
new_configs = []
union_apps_args = set()
union_stats = set()
union_configs = set()
for csvf in csv_files:
    (all_named_kernels, stat_map, apps_and_args, configs, stats,
     gpgpu_build_nums) = stats_per_file[csvf]
        exit("ERROR - No Logfiles in " + logfiles_directory)
    if options.sim_name != "":
        named_sim = []
        for logf in all_logfiles:
            match_str = r".*\/sim_log\.{0}\..*".format( options.sim_name )
            if re.match( match_str, logf ):
                named_sim.append( logf )
        if len( named_sim ) == 0:
            exit( "Could not find logfiles for job with the name \"{0}\"".format( options.sim_name ) )
        all_logfiles = named_sim
    parsed_logfiles.append(max(all_logfiles, key=os.path.getmtime))
elif options.logfile == "all":
    parsed_logfiles = [os.path.join(logfiles_directory, f) \
                    for f in os.listdir(logfiles_directory) if(re.match(r'sim_log.*\.latest',f))]
else:
    parsed_logfiles.append(common.file_option_test( options.logfile, "", this_directory ))

print "Using logfiles " + str(parsed_logfiles)

options.run_dir = common.dir_option_test( options.run_dir, this_directory + ("../../sim_run_%s/"%cuda_version),
                                          this_directory )

if not os.path.isdir(options.run_dir):
    exit(options.run_dir + " does not exist - specify the run directory where the benchmark/config dirs exist")

# Searches the output file for these strings.
# If they exist, it assumes that the functional
# Test implemented in the CPU program has passed
# Searches the output file and the stderr file for these strings
# If they exists, then something has gone wrong.
status_strings = { "passed" : "FUNC_TEST_PASSED",
# If the user does not specify a so file, then use the one in the git repo and copy it out.
options.so_dir = common.dir_option_test(
    options.so_dir,
    os.path.join(os.getenv("GPGPUSIM_ROOT"), "lib",
                 os.getenv("GPGPUSIM_CONFIG")), this_directory)
so_path = os.path.join(options.so_dir, "libcudart.so")
version_string = extract_so_name(so_path)
running_so_dir = os.path.join(options.run_directory, "gpgpu-sim-builds",
                              version_string)
if not os.path.exists(running_so_dir):
    os.makedirs(running_so_dir)
    shutil.copy(so_path, running_so_dir)
options.so_dir = running_so_dir

options.benchmark_file = common.file_option_test(
    options.benchmark_file,
    os.path.join(this_directory, "regression_recipies", "rodinia_2.0-ft",
                 "benchmarks.yml"), this_directory)
options.configs_file = common.file_option_test(
    options.configs_file,
    os.path.join(this_directory, "regression_recipies", "rodinia_2.0-ft",
                 "configs.yml"), this_directory)

# Test for the existance of torque on the system
if not any([
        os.path.isfile(os.path.join(p, "qsub"))
        for p in os.getenv("PATH").split(os.pathsep)
]):
    exit(
        "ERROR - Cannot find qsub in PATH... Is torque installed on this machine?"
    )