def init(benchmark, acf=createChoiceSiteMutators, taf=createTunableMutators): if config.debug: logging.basicConfig(level=logging.DEBUG) config.pause_on_crash = True if not config.threads: config.threads = pbutil.cpuCount() for k in filter(len, config.abort_on.split(',')): warnings.simplefilter('error', getattr(tunerwarnings,k)) infoxml = TrainingInfo(pbutil.benchmarkToInfo(benchmark)) if not config.main: config.main = mainname([pbutil.benchmarkToBin(benchmark)]) tester = CandidateTester(benchmark, config.min_input_size) if config.seed is None: cfg = defaultConfigFile(pbutil.benchmarkToBin(tester.app)) else: cfg = configtool.ConfigFile(config.seed) candidate = Candidate(cfg, infoxml.transform(config.main)) addMutators(candidate, infoxml.globalsec(), acf, taf) addMutators(candidate, infoxml.transform(config.main), acf, taf) candidate.addMutator(mutators.MultiMutator(2)) if not config.delete_output_dir: storagedirs.cur.dumpConfig() storagedirs.cur.dumpGitStatus() storagedirs.cur.saveFile(pbutil.benchmarkToInfo(benchmark)) storagedirs.cur.saveFile(pbutil.benchmarkToBin(benchmark)) return candidate, tester
def init(benchmark, tester_lambda=None, pop_lambda=None, hlconfig_lambda=None, config_lambda=None): if config.debug: logging.basicConfig(level=logging.DEBUG) config.pause_on_crash = True if not config.threads: config.threads = pbutil.cpuCount() for k in filter(len, config.abort_on.split(',')): warnings.simplefilter('error', getattr(tunerwarnings, k)) if hlconfig_lambda is not None: hlconfig = hlconfig_lambda() else: infoxml = TrainingInfo(pbutil.benchmarkToInfo(benchmark)) hlconfig = HighLevelConfig(infoxml) if not config.main: if tester_lambda is None and pop_lambda is None and hlconfig_lambda is None: config.main = mainname([pbutil.benchmarkToBin(benchmark)]) if tester_lambda is not None: tester = tester_lambda(benchmark, config.min_input_size) else: tester = CandidateTester(benchmark, config.min_input_size) if config_lambda is not None: cfg = config_lambda() else: if config.seed is None: cfg = defaultConfigFile(pbutil.benchmarkToBin(tester.app)) else: cfg = configtool.ConfigFile(config.seed) candidate = Candidate(cfg) if hlconfig_lambda is None: if not config.delete_output_dir: storagedirs.cur.dumpConfig() storagedirs.cur.dumpGitStatus() storagedirs.cur.saveFile(pbutil.benchmarkToInfo(benchmark)) storagedirs.cur.saveFile(pbutil.benchmarkToBin(benchmark)) if not infoxml.transform( config.main).isVariableAccuracy() and config.accuracy_target: logging.info("clearing accuracy_target") config.accuracy_target = None return candidate, tester, hlconfig
def init(benchmark, tester_lambda=None, pop_lambda=None, hlconfig_lambda=None, config_lambda=None): if config.debug: logging.basicConfig(level=logging.DEBUG) config.pause_on_crash = True if not config.threads: config.threads = pbutil.cpuCount() for k in filter(len, config.abort_on.split(',')): warnings.simplefilter('error', getattr(tunerwarnings,k)) if hlconfig_lambda is not None: hlconfig = hlconfig_lambda() else: infoxml = TrainingInfo(pbutil.benchmarkToInfo(benchmark)) hlconfig = HighLevelConfig(infoxml) if not config.main: if tester_lambda is None and pop_lambda is None and hlconfig_lambda is None: config.main = mainname([pbutil.benchmarkToBin(benchmark)]) if tester_lambda is not None: tester = tester_lambda(benchmark, config.min_input_size) else: tester = CandidateTester(benchmark, config.min_input_size) if config_lambda is not None: cfg = config_lambda() else: if config.seed is None: cfg = defaultConfigFile(pbutil.benchmarkToBin(tester.app)) else: cfg = configtool.ConfigFile(config.seed) candidate = Candidate(cfg) if hlconfig_lambda is None: if not config.delete_output_dir: storagedirs.cur.dumpConfig() storagedirs.cur.dumpGitStatus() storagedirs.cur.saveFile(pbutil.benchmarkToInfo(benchmark)) storagedirs.cur.saveFile(pbutil.benchmarkToBin(benchmark)) if not infoxml.transform(config.main).isVariableAccuracy() and config.accuracy_target: logging.info("clearing accuracy_target") config.accuracy_target = None return candidate, tester, hlconfig
def main(argv): t1=time.time() global app global cfg global ignore_list global defaultArgs global substderr global options config_tool_path = os.path.split(argv[0])[0] + "/configtool.py" fast = False parser = optparse.OptionParser(usage="usage: %prog [options] BENCHMARK") parser.add_option("--min", type="int", dest="min", default=1) parser.add_option("-n", "--random", "--max", type="int", dest="n", default=-1) parser.add_option("--offset", type="int", dest="offset", default=0) parser.add_option("--max-sec", type="float", dest="maxsec", default=0) parser.add_option("-d", "--debug", action="store_true", dest="debug", default=False) parser.add_option("-f", "--fast", action="store_true", dest="fast", default=False) parser.add_option("--threads", type="int", dest="threads", default=pbutil.cpuCount()) parser.add_option("-c", "--config", dest="config", default=None) parser.add_option("--noisolation", action="store_true", dest="noisolation", default=False) parser.add_option("--print", action="store_true", dest="justprint", default=False) parser.add_option("--time", action="store_true", dest="time", default=False) parser.add_option("--acctrials", type="int", dest="acctrials", default=None) parser.add_option("--accimprovetries", type="int", dest="accimprovetries", default=None) parser.add_option("--trials", type="int", dest="trials", default=None) parser.add_option("--trials-sec", type="float", dest="trialssec", default=None) parser.add_option("--trials-max", type="int", dest="trialsmax", default=None) parser.add_option("--transform", dest="transform", default=None) options,args = parser.parse_args() if len(args) != 1: parser.error("expected benchmark name as arg") cfg=options.config app=args[0] pbutil.chdirToPetabricksRoot() pbutil.compilePetabricks() app = pbutil.normalizeBenchmarkName(app) pbutil.compileBenchmarks([app]) if options.debug: substderr = sys.__stderr__ if cfg is None: cfg = pbutil.benchmarkToCfg(app) defaultArgs = ['--config='+cfg, '--threads=%d'%options.threads, '--offset=%d'%options.offset, '--min=%d'%options.min] if options.noisolation: defaultArgs.append("--noisolation") if options.acctrials is not None: defaultArgs.append("--acctrials=%d"%options.acctrials) if options.trials is not None: defaultArgs.append("--trials=%d"%options.trials) if options.trialssec is not None: defaultArgs.append("--trials-sec=%f"%options.trialssec) if options.trialsmax is not None: defaultArgs.append("--trials-max=%d"%options.trialsmax) if options.accimprovetries is not None: defaultArgs.append("--accimprovetries=%d"%options.accimprovetries) getIgnoreList() try: infoxml = parse(pbutil.benchmarkToInfo(app)) except: print "Cannot parse:", pbutil.benchmarkToInfo(app) sys.exit(-1) #print "Reseting config entries" #reset() #build index of transforms for t in infoxml.getElementsByTagName("transform"): transforms[nameof(t)]=t if t.getAttribute("templateChoice")=="0": transforms[t.getAttribute("templateName")] = t if options.transform is None: maintx = transforms[mainname()] else: maintx = transforms[options.transform] print "Call tree:" walkCallTree(maintx, fnup=printTx) print print "Autotuning:" progress.status("building work queue") if options.n <= 0: tasks.append(TuneTask("determineInputSizes", determineInputSizes)) if options.time: tasks.append(TuneTask("runTimingTest", lambda:runTimingTest(maintx))) #build list of tasks if not options.fast: walkCallTree(maintx, lambda tx, depth, loops: enqueueAutotuneCmds(tx, maintx, 1, depth, loops)) walkCallTree(maintx, lambda tx, depth, loops: enqueueAutotuneCmds(tx, maintx, 2, depth, loops)) if options.time: tasks.append(TuneTask("runTimingTest", lambda:runTimingTest(maintx))) progress.status("autotuning") while len(tasks)>0: w1=remainingTaskWeight() task=tasks.pop(0) w2=remainingTaskWeight() progress.remaining(w1, w2) task.run() progress.clear() t2=time.time() sec=t2-t1 print "autotuning took %.2f sec"%(t2-t1) for k,v in taskStats.items(): print " %.2f sec in %s"%(v.sec, k) sec -= v.sec print " %.2f sec in unknown"%sec names=taskStats.keys() weights=map(lambda x: x.sec/float(max(x.count, 1)), taskStats.values()) scale=len(weights)/sum(weights) print "Suggested weights:" print "taskStats = {" + ", ".join(map(lambda i: "'%s':TaskStats(%.2f)"%(names[i], scale*weights[i]), xrange(len(names)))) + "}"
def main(argv): t1 = time.time() global app global cfg global ignore_list global defaultArgs global substderr global options config_tool_path = os.path.split(argv[0])[0] + "/configtool.py" fast = False parser = optparse.OptionParser(usage="usage: %prog [options] BENCHMARK") parser.add_option("--min", type="int", dest="min", default=1) parser.add_option("-n", "--random", "--max", type="int", dest="n", default=-1) parser.add_option("--offset", type="int", dest="offset", default=0) parser.add_option("--max-sec", type="float", dest="maxsec", default=0) parser.add_option("-d", "--debug", action="store_true", dest="debug", default=False) parser.add_option("-f", "--fast", action="store_true", dest="fast", default=False) parser.add_option("--threads", type="int", dest="threads", default=pbutil.cpuCount()) parser.add_option("-c", "--config", dest="config", default=None) parser.add_option("--noisolation", action="store_true", dest="noisolation", default=False) parser.add_option("--print", action="store_true", dest="justprint", default=False) parser.add_option("--time", action="store_true", dest="time", default=False) parser.add_option("--acctrials", type="int", dest="acctrials", default=None) parser.add_option("--accimprovetries", type="int", dest="accimprovetries", default=None) parser.add_option("--trials", type="int", dest="trials", default=None) parser.add_option("--trials-sec", type="float", dest="trialssec", default=None) parser.add_option("--trials-max", type="int", dest="trialsmax", default=None) parser.add_option("--transform", dest="transform", default=None) options, args = parser.parse_args() if len(args) != 1: parser.error("expected benchmark name as arg") cfg = options.config app = args[0] pbutil.chdirToPetabricksRoot() pbutil.compilePetabricks() app = pbutil.normalizeBenchmarkName(app) pbutil.compileBenchmarks([app]) if options.debug: substderr = sys.__stderr__ if cfg is None: cfg = pbutil.benchmarkToCfg(app) defaultArgs = [ '--config=' + cfg, '--threads=%d' % options.threads, '--offset=%d' % options.offset, '--min=%d' % options.min ] if options.noisolation: defaultArgs.append("--noisolation") if options.acctrials is not None: defaultArgs.append("--acctrials=%d" % options.acctrials) if options.trials is not None: defaultArgs.append("--trials=%d" % options.trials) if options.trialssec is not None: defaultArgs.append("--trials-sec=%f" % options.trialssec) if options.trialsmax is not None: defaultArgs.append("--trials-max=%d" % options.trialsmax) if options.accimprovetries is not None: defaultArgs.append("--accimprovetries=%d" % options.accimprovetries) getIgnoreList() try: infoxml = parse(pbutil.benchmarkToInfo(app)) except: print "Cannot parse:", pbutil.benchmarkToInfo(app) sys.exit(-1) #print "Reseting config entries" #reset() #build index of transforms for t in infoxml.getElementsByTagName("transform"): transforms[nameof(t)] = t if t.getAttribute("templateChoice") == "0": transforms[t.getAttribute("templateName")] = t if options.transform is None: maintx = transforms[mainname()] else: maintx = transforms[options.transform] print "Call tree:" walkCallTree(maintx, fnup=printTx) print print "Autotuning:" progress.status("building work queue") if options.n <= 0: tasks.append(TuneTask("determineInputSizes", determineInputSizes)) if options.time: tasks.append(TuneTask("runTimingTest", lambda: runTimingTest(maintx))) #build list of tasks if not options.fast: walkCallTree( maintx, lambda tx, depth, loops: enqueueAutotuneCmds( tx, maintx, 1, depth, loops)) walkCallTree( maintx, lambda tx, depth, loops: enqueueAutotuneCmds( tx, maintx, 2, depth, loops)) if options.time: tasks.append(TuneTask("runTimingTest", lambda: runTimingTest(maintx))) progress.status("autotuning") while len(tasks) > 0: w1 = remainingTaskWeight() task = tasks.pop(0) w2 = remainingTaskWeight() progress.remaining(w1, w2) task.run() progress.clear() t2 = time.time() sec = t2 - t1 print "autotuning took %.2f sec" % (t2 - t1) for k, v in taskStats.items(): print " %.2f sec in %s" % (v.sec, k) sec -= v.sec print " %.2f sec in unknown" % sec names = taskStats.keys() weights = map(lambda x: x.sec / float(max(x.count, 1)), taskStats.values()) scale = len(weights) / sum(weights) print "Suggested weights:" print "taskStats = {" + ", ".join( map(lambda i: "'%s':TaskStats(%.2f)" % (names[i], scale * weights[i]), xrange(len(names)))) + "}"