Пример #1
0
def init(benchmark, acf=createChoiceSiteMutators, taf=createTunableMutators):
  if config.debug:
    logging.basicConfig(level=logging.DEBUG)
    config.pause_on_crash = True
  if not config.threads:
    config.threads = pbutil.cpuCount()
  for k in filter(len, config.abort_on.split(',')):
    warnings.simplefilter('error', getattr(tunerwarnings,k))
  infoxml = TrainingInfo(pbutil.benchmarkToInfo(benchmark))
  if not config.main:
    config.main = mainname([pbutil.benchmarkToBin(benchmark)])
  tester = CandidateTester(benchmark, config.min_input_size)
  if config.seed is None:
    cfg = defaultConfigFile(pbutil.benchmarkToBin(tester.app))
  else:
    cfg = configtool.ConfigFile(config.seed)
  candidate = Candidate(cfg, infoxml.transform(config.main))
  addMutators(candidate, infoxml.globalsec(), acf, taf)
  addMutators(candidate, infoxml.transform(config.main), acf, taf)
  candidate.addMutator(mutators.MultiMutator(2))
  if not config.delete_output_dir:
    storagedirs.cur.dumpConfig()
    storagedirs.cur.dumpGitStatus()
    storagedirs.cur.saveFile(pbutil.benchmarkToInfo(benchmark))
    storagedirs.cur.saveFile(pbutil.benchmarkToBin(benchmark))
  return candidate, tester
Пример #2
0
def init(benchmark,
         tester_lambda=None,
         pop_lambda=None,
         hlconfig_lambda=None,
         config_lambda=None):
    if config.debug:
        logging.basicConfig(level=logging.DEBUG)
        config.pause_on_crash = True
    if not config.threads:
        config.threads = pbutil.cpuCount()
    for k in filter(len, config.abort_on.split(',')):
        warnings.simplefilter('error', getattr(tunerwarnings, k))
    if hlconfig_lambda is not None:
        hlconfig = hlconfig_lambda()
    else:
        infoxml = TrainingInfo(pbutil.benchmarkToInfo(benchmark))
        hlconfig = HighLevelConfig(infoxml)
    if not config.main:
        if tester_lambda is None and pop_lambda is None and hlconfig_lambda is None:
            config.main = mainname([pbutil.benchmarkToBin(benchmark)])
    if tester_lambda is not None:
        tester = tester_lambda(benchmark, config.min_input_size)
    else:
        tester = CandidateTester(benchmark, config.min_input_size)
    if config_lambda is not None:
        cfg = config_lambda()
    else:
        if config.seed is None:
            cfg = defaultConfigFile(pbutil.benchmarkToBin(tester.app))
        else:
            cfg = configtool.ConfigFile(config.seed)
    candidate = Candidate(cfg)
    if hlconfig_lambda is None:
        if not config.delete_output_dir:
            storagedirs.cur.dumpConfig()
            storagedirs.cur.dumpGitStatus()
            storagedirs.cur.saveFile(pbutil.benchmarkToInfo(benchmark))
            storagedirs.cur.saveFile(pbutil.benchmarkToBin(benchmark))
        if not infoxml.transform(
                config.main).isVariableAccuracy() and config.accuracy_target:
            logging.info("clearing accuracy_target")
            config.accuracy_target = None
    return candidate, tester, hlconfig
Пример #3
0
def testBenchmark(b):
  name=b[0]
  bin=pbutil.benchmarkToBin(name)
  cfg=pbutil.benchmarkToCfg(name)

  if not os.path.isfile(bin):
    return False
  
  #build cmd
  hash=name
  iofiles=[]
  for x in b[1:]:
    iofiles.append(resolveInputPath(x))
    hash+=" "+os.path.basename(x)
  outfile="./testdata/.output/"+re.sub("[ /.]",'_',hash)
  iofiles.append(outfile+".latest")

  try:
    cmd=[bin, '--fixedrandom', '--config=%s.cfg'%outfile, '--reset']
    if run(cmd) != 0:
      print "ERROR: reset config failed"
      return False
  except OSError:
    print "ERROR: program not runnable"
    return False

  if os.path.isfile("%s.cfg.default"%outfile):
    shutil.copy("%s.cfg.default"%outfile, "%s.cfg"%outfile)

  try:
    infoxml=parse(pbutil.benchmarkToInfo(name))
  except:
    print "invalid *.info file"
    return False

  def test():
    cmd=[bin, '--fixedrandom', '--config=%s.cfg'%outfile]
    cmd.extend(iofiles)
    rv = run(cmd)
    if rv != 0:
      print "run FAILED (status=%d, cmd=%s)"%(rv, ' '.join(cmd))
      return False

    if diffFiles(outfile, outfile+".latest"):
      time.sleep(0.1) #try letting the filesystem settle down
      if diffFiles(outfile, outfile+".latest"):
        print "run FAILED (wrong output)"
        return False
    
    print "run PASSED"
    return True

  return test()
Пример #4
0
def init(benchmark, tester_lambda=None, pop_lambda=None, hlconfig_lambda=None, config_lambda=None):
  if config.debug:
    logging.basicConfig(level=logging.DEBUG)
    config.pause_on_crash = True
  if not config.threads:
    config.threads = pbutil.cpuCount()
  for k in filter(len, config.abort_on.split(',')):
    warnings.simplefilter('error', getattr(tunerwarnings,k))
  if hlconfig_lambda is not None:
    hlconfig = hlconfig_lambda()
  else:
    infoxml = TrainingInfo(pbutil.benchmarkToInfo(benchmark))
    hlconfig = HighLevelConfig(infoxml)
  if not config.main:
    if tester_lambda is None and pop_lambda is None and hlconfig_lambda is None:
      config.main = mainname([pbutil.benchmarkToBin(benchmark)])
  if tester_lambda is not None:
    tester = tester_lambda(benchmark, config.min_input_size)
  else:
    tester = CandidateTester(benchmark, config.min_input_size)
  if config_lambda is not None:
    cfg = config_lambda()
  else:
    if config.seed is None:
      cfg = defaultConfigFile(pbutil.benchmarkToBin(tester.app))
    else:
      cfg = configtool.ConfigFile(config.seed)
  candidate = Candidate(cfg)
  if hlconfig_lambda is None:
    if not config.delete_output_dir:
      storagedirs.cur.dumpConfig()
      storagedirs.cur.dumpGitStatus()
      storagedirs.cur.saveFile(pbutil.benchmarkToInfo(benchmark))
      storagedirs.cur.saveFile(pbutil.benchmarkToBin(benchmark))
    if not infoxml.transform(config.main).isVariableAccuracy() and config.accuracy_target:
      logging.info("clearing accuracy_target")
      config.accuracy_target = None
  return candidate, tester, hlconfig
Пример #5
0
def main(benchmark, n, filename):
    if os.path.isdir(filename):
        filename = os.path.join(filename, 'stats/candidatelog.csv')
    f = open(filename)
    infoxml = TrainingInfo(pbutil.benchmarkToInfo(benchmark))
    main = mainname([pbutil.benchmarkToBin(benchmark)])
    infoxml = infoxml.transform(main)
    binpath = pbutil.benchmarkToBin(benchmark)
    tester = CandidateTester(benchmark, n)
    root = os.path.dirname(filename)

    def findconfig(c):
        if c[0] == '/':
            c = c[1:]
        if os.path.isfile(os.path.join(root, c)):
            return os.path.join(root, c)
        if os.path.isfile(os.path.join(root, '..', c)):
            return os.path.join(root, '..', c)
        return None

    rows = list(csv.DictReader(f))
    for i, row in enumerate(rows):
        if options.onlyrounds \
            and i+1<len(rows) \
            and row.has_key('round_number') \
            and rows[i+1]['round_number']==row['round_number']:
            continue
        config = findconfig(row['config_path'])
        row['tests'] = int(row['tests_complete']) + int(
            row['tests_timeout']) + int(row['tests_crashed'])
        candidate = Candidate(ConfigFile(config), infoxml)
        while candidate.numTests(n) < options.trials:
            try:
                tester.testN(candidate, options.trials, options.timeout)
            except candidatetester.CrashException, e:
                print >> sys.stderr, e
        try:
            row['minperf'] = candidate.metrics[0][n].min()
            row['perf_on_%d' % n], row['perf_on_%d_ci' %
                                       n] = candidate.metrics[0][n].interval(
                                           options.confidence)
            row['invperf'] = 1.0 / row['perf_on_%d' % n]
        except Exception, e:
            row['minperf'] = -1
            row['perf_on_%d' % n] = -1
            print >> sys.stderr, e
Пример #6
0
def main(argv):
  if len(argv) != 2:
    print "No program specified."
    print
    print __doc__
    sys.exit(1)
  app = argv[1]
  app = pbutil.normalizeBenchmarkName(app)

  info_file = pbutil.benchmarkToInfo(app)
  try:
    infoxml = minidom.parse(info_file)
  except:
    print "Parse error while parsing .info XML file:", info_file
    sys.exit(1)

  print_report(app, infoxml)
Пример #7
0
def main(benchmark, n, filename):
  if os.path.isdir(filename):
    filename=os.path.join(filename, 'stats/candidatelog.csv')
  f = open(filename)
  infoxml = TrainingInfo(pbutil.benchmarkToInfo(benchmark))
  main = mainname([pbutil.benchmarkToBin(benchmark)])
  infoxml = infoxml.transform(main)
  binpath=pbutil.benchmarkToBin(benchmark)
  tester = CandidateTester(benchmark, n)
  root = os.path.dirname(filename)
  def findconfig(c):
    if c[0]=='/':
      c=c[1:]
    if os.path.isfile(os.path.join(root, c)):
      return os.path.join(root, c)
    if os.path.isfile(os.path.join(root, '..', c)):
      return os.path.join(root, '..', c)
    return None
  rows = list(csv.DictReader(f))
  for i, row in enumerate(rows):
    if options.onlyrounds \
        and i+1<len(rows) \
        and row.has_key('round_number') \
        and rows[i+1]['round_number']==row['round_number']:
      continue
    config = findconfig(row['config_path'])
    row['tests'] = int(row['tests_complete'])+int(row['tests_timeout'])+int(row['tests_crashed'])
    candidate = Candidate(ConfigFile(config), infoxml)
    while candidate.numTests(n)<options.trials:
      try:
        tester.testN(candidate, options.trials, options.timeout)
      except candidatetester.CrashException, e:
        print >>sys.stderr, e
    try:
      row['minperf'] = candidate.metrics[0][n].min()
      row['perf_on_%d'%n], row['perf_on_%d_ci'%n] = candidate.metrics[0][n].interval(options.confidence)
      row['invperf']=1.0/row['perf_on_%d'%n]
    except Exception,e:
      row['minperf'] = -1
      row['perf_on_%d'%n] = -1
      print >>sys.stderr, e
Пример #8
0
def main(argv):
  t1=time.time()

  global app
  global cfg 
  global ignore_list
  global defaultArgs
  global substderr
  global options 

  config_tool_path = os.path.split(argv[0])[0] + "/configtool.py"
  fast = False

  parser = optparse.OptionParser(usage="usage: %prog [options] BENCHMARK")
  parser.add_option("--min", type="int", dest="min", default=1)
  parser.add_option("-n", "--random", "--max", type="int", dest="n", default=-1)
  parser.add_option("--offset", type="int", dest="offset", default=0)
  parser.add_option("--max-sec", type="float", dest="maxsec", default=0)
  parser.add_option("-d", "--debug",  action="store_true", dest="debug", default=False)
  parser.add_option("-f", "--fast",  action="store_true", dest="fast", default=False)
  parser.add_option("--threads",      type="int", dest="threads", default=pbutil.cpuCount())
  parser.add_option("-c", "--config", dest="config", default=None)
  parser.add_option("--noisolation", action="store_true", dest="noisolation", default=False)
  parser.add_option("--print", action="store_true", dest="justprint", default=False)
  parser.add_option("--time", action="store_true", dest="time", default=False)
  parser.add_option("--acctrials", type="int", dest="acctrials", default=None)
  parser.add_option("--accimprovetries", type="int", dest="accimprovetries", default=None)
  parser.add_option("--trials", type="int", dest="trials", default=None)
  parser.add_option("--trials-sec", type="float", dest="trialssec", default=None)
  parser.add_option("--trials-max", type="int", dest="trialsmax", default=None)
  parser.add_option("--transform", dest="transform", default=None)
  options,args = parser.parse_args()

  if len(args) != 1:
    parser.error("expected benchmark name as arg")

  cfg=options.config
  app=args[0]

  pbutil.chdirToPetabricksRoot()
  pbutil.compilePetabricks()
  app = pbutil.normalizeBenchmarkName(app)
  pbutil.compileBenchmarks([app])
  
  if options.debug:
    substderr = sys.__stderr__

  if cfg is None:
    cfg = pbutil.benchmarkToCfg(app)

  defaultArgs = ['--config='+cfg, '--threads=%d'%options.threads, '--offset=%d'%options.offset, '--min=%d'%options.min]

  if options.noisolation:
    defaultArgs.append("--noisolation")

  if options.acctrials is not None:
    defaultArgs.append("--acctrials=%d"%options.acctrials)
  if options.trials is not None:
    defaultArgs.append("--trials=%d"%options.trials)
  if options.trialssec is not None:
    defaultArgs.append("--trials-sec=%f"%options.trialssec)
  if options.trialsmax is not None:
    defaultArgs.append("--trials-max=%d"%options.trialsmax)
  if options.accimprovetries is not None:
    defaultArgs.append("--accimprovetries=%d"%options.accimprovetries)

  getIgnoreList()

  try:
    infoxml = parse(pbutil.benchmarkToInfo(app))
  except:
    print "Cannot parse:", pbutil.benchmarkToInfo(app)
    sys.exit(-1)

 #print "Reseting config entries"
 #reset()

  #build index of transforms
  for t in infoxml.getElementsByTagName("transform"):
    transforms[nameof(t)]=t
    if t.getAttribute("templateChoice")=="0":
      transforms[t.getAttribute("templateName")] = t

  if options.transform is None:
    maintx = transforms[mainname()]
  else:
    maintx = transforms[options.transform]
  
  print "Call tree:"
  walkCallTree(maintx, fnup=printTx)
  print
  print "Autotuning:"

  progress.status("building work queue")
 
  if options.n <= 0:
    tasks.append(TuneTask("determineInputSizes", determineInputSizes))
    
  if options.time:
    tasks.append(TuneTask("runTimingTest", lambda:runTimingTest(maintx)))

  #build list of tasks
  if not options.fast:
    walkCallTree(maintx, lambda tx, depth, loops: enqueueAutotuneCmds(tx, maintx, 1, depth, loops))
  walkCallTree(maintx, lambda tx, depth, loops: enqueueAutotuneCmds(tx, maintx, 2, depth, loops))
  
  if options.time:
    tasks.append(TuneTask("runTimingTest", lambda:runTimingTest(maintx)))

  progress.status("autotuning")

  while len(tasks)>0:
    w1=remainingTaskWeight()
    task=tasks.pop(0)
    w2=remainingTaskWeight()
    progress.remaining(w1, w2)
    task.run()
  progress.clear()

  t2=time.time()
  sec=t2-t1

  

  print "autotuning took %.2f sec"%(t2-t1)
  for k,v in taskStats.items():
    print "  %.2f sec in %s"%(v.sec, k)
    sec -= v.sec
  print "  %.2f sec in unknown"%sec
  
  names=taskStats.keys()
  weights=map(lambda x: x.sec/float(max(x.count, 1)), taskStats.values())
  scale=len(weights)/sum(weights)
  print "Suggested weights:"
  print "taskStats = {" + ", ".join(map(lambda i: "'%s':TaskStats(%.2f)"%(names[i], scale*weights[i]), xrange(len(names)))) + "}"
Пример #9
0
def testBenchmark(b):
    name = b[0]
    bin = pbutil.benchmarkToBin(name)
    cfg = pbutil.benchmarkToCfg(name)

    if not os.path.isfile(bin):
        return False

    #build cmd
    hash = name
    iofiles = []
    for x in b[1:]:
        iofiles.append(resolveInputPath(x))
        hash += " " + os.path.basename(x)
    outfile = "./testdata/.output/" + re.sub("[ /.]", '_', hash)
    iofiles.append(outfile + ".latest")

    try:
        cmd = [bin, '--fixedrandom', '--config=%s.cfg' % outfile, '--reset']
        if run(cmd) != 0:
            print "ERROR: reset config failed"
            return False
    except OSError:
        print "ERROR: program not runnable"
        return False

    if os.path.isfile("%s.cfg.default" % outfile):
        shutil.copy("%s.cfg.default" % outfile, "%s.cfg" % outfile)

    try:
        infoxml = parse(pbutil.benchmarkToInfo(name))
    except:
        print "invalid *.info file"
        return False

    def test():
        if isFloatingPoint() and os.path.exists(outfile + ".float"):
            ext = ".float"
            print "FLOAT"
        else:
            ext = ""

        #run cpu config
        cmd = [bin, '--fixedrandom', '--config=%s.cfg' % outfile]
        cmd.extend(iofiles)
        t1 = time.time()
        rv = run(cmd)
        t2 = time.time()
        if rv != 0:
            print "run FAILED (status=%d, cmd=%s)" % (rv, ' '.join(cmd))
            return False

        if diffFiles(outfile + ext, outfile + ".latest"):
            time.sleep(0.1)  #try letting the filesystem settle down
            if diffFiles(outfile + ext, outfile + ".latest"):
                print "run FAILED (wrong output)"
                return False

        print "run PASSED (took %.2fs)" % (t2 - t1)

        if (not haveOpenCL()) or (not os.path.exists(outfile + ".gpucfg")):
            return True

        #run gpu config
        cmd = [bin, '--fixedrandom', '--config=%s.gpucfg' % outfile]
        cmd.extend(iofiles)
        t1 = time.time()
        rv = run(cmd)
        t2 = time.time()
        if rv != 0:
            print "gpu FAILED (status=%d, cmd=%s)" % (rv, ' '.join(cmd))
            return False

        if diffFiles(outfile + ext, outfile + ".latest"):
            time.sleep(0.1)  #try letting the filesystem settle down
            if diffFiles(outfile + ext, outfile + ".latest"):
                print "gpu FAILED (wrong output)"
                return False

        print "gpu PASSED (took %.2fs)" % (t2 - t1)
        return True

    return test()
Пример #10
0
def main(argv):
    t1 = time.time()

    global app
    global cfg
    global ignore_list
    global defaultArgs
    global substderr
    global options

    config_tool_path = os.path.split(argv[0])[0] + "/configtool.py"
    fast = False

    parser = optparse.OptionParser(usage="usage: %prog [options] BENCHMARK")
    parser.add_option("--min", type="int", dest="min", default=1)
    parser.add_option("-n",
                      "--random",
                      "--max",
                      type="int",
                      dest="n",
                      default=-1)
    parser.add_option("--offset", type="int", dest="offset", default=0)
    parser.add_option("--max-sec", type="float", dest="maxsec", default=0)
    parser.add_option("-d",
                      "--debug",
                      action="store_true",
                      dest="debug",
                      default=False)
    parser.add_option("-f",
                      "--fast",
                      action="store_true",
                      dest="fast",
                      default=False)
    parser.add_option("--threads",
                      type="int",
                      dest="threads",
                      default=pbutil.cpuCount())
    parser.add_option("-c", "--config", dest="config", default=None)
    parser.add_option("--noisolation",
                      action="store_true",
                      dest="noisolation",
                      default=False)
    parser.add_option("--print",
                      action="store_true",
                      dest="justprint",
                      default=False)
    parser.add_option("--time",
                      action="store_true",
                      dest="time",
                      default=False)
    parser.add_option("--acctrials",
                      type="int",
                      dest="acctrials",
                      default=None)
    parser.add_option("--accimprovetries",
                      type="int",
                      dest="accimprovetries",
                      default=None)
    parser.add_option("--trials", type="int", dest="trials", default=None)
    parser.add_option("--trials-sec",
                      type="float",
                      dest="trialssec",
                      default=None)
    parser.add_option("--trials-max",
                      type="int",
                      dest="trialsmax",
                      default=None)
    parser.add_option("--transform", dest="transform", default=None)
    options, args = parser.parse_args()

    if len(args) != 1:
        parser.error("expected benchmark name as arg")

    cfg = options.config
    app = args[0]

    pbutil.chdirToPetabricksRoot()
    pbutil.compilePetabricks()
    app = pbutil.normalizeBenchmarkName(app)
    pbutil.compileBenchmarks([app])

    if options.debug:
        substderr = sys.__stderr__

    if cfg is None:
        cfg = pbutil.benchmarkToCfg(app)

    defaultArgs = [
        '--config=' + cfg,
        '--threads=%d' % options.threads,
        '--offset=%d' % options.offset,
        '--min=%d' % options.min
    ]

    if options.noisolation:
        defaultArgs.append("--noisolation")

    if options.acctrials is not None:
        defaultArgs.append("--acctrials=%d" % options.acctrials)
    if options.trials is not None:
        defaultArgs.append("--trials=%d" % options.trials)
    if options.trialssec is not None:
        defaultArgs.append("--trials-sec=%f" % options.trialssec)
    if options.trialsmax is not None:
        defaultArgs.append("--trials-max=%d" % options.trialsmax)
    if options.accimprovetries is not None:
        defaultArgs.append("--accimprovetries=%d" % options.accimprovetries)

    getIgnoreList()

    try:
        infoxml = parse(pbutil.benchmarkToInfo(app))
    except:
        print "Cannot parse:", pbutil.benchmarkToInfo(app)
        sys.exit(-1)

#print "Reseting config entries"
#reset()

#build index of transforms
    for t in infoxml.getElementsByTagName("transform"):
        transforms[nameof(t)] = t
        if t.getAttribute("templateChoice") == "0":
            transforms[t.getAttribute("templateName")] = t

    if options.transform is None:
        maintx = transforms[mainname()]
    else:
        maintx = transforms[options.transform]

    print "Call tree:"
    walkCallTree(maintx, fnup=printTx)
    print
    print "Autotuning:"

    progress.status("building work queue")

    if options.n <= 0:
        tasks.append(TuneTask("determineInputSizes", determineInputSizes))

    if options.time:
        tasks.append(TuneTask("runTimingTest", lambda: runTimingTest(maintx)))

    #build list of tasks
    if not options.fast:
        walkCallTree(
            maintx, lambda tx, depth, loops: enqueueAutotuneCmds(
                tx, maintx, 1, depth, loops))
    walkCallTree(
        maintx, lambda tx, depth, loops: enqueueAutotuneCmds(
            tx, maintx, 2, depth, loops))

    if options.time:
        tasks.append(TuneTask("runTimingTest", lambda: runTimingTest(maintx)))

    progress.status("autotuning")

    while len(tasks) > 0:
        w1 = remainingTaskWeight()
        task = tasks.pop(0)
        w2 = remainingTaskWeight()
        progress.remaining(w1, w2)
        task.run()
    progress.clear()

    t2 = time.time()
    sec = t2 - t1

    print "autotuning took %.2f sec" % (t2 - t1)
    for k, v in taskStats.items():
        print "  %.2f sec in %s" % (v.sec, k)
        sec -= v.sec
    print "  %.2f sec in unknown" % sec

    names = taskStats.keys()
    weights = map(lambda x: x.sec / float(max(x.count, 1)), taskStats.values())
    scale = len(weights) / sum(weights)
    print "Suggested weights:"
    print "taskStats = {" + ", ".join(
        map(lambda i: "'%s':TaskStats(%.2f)" %
            (names[i], scale * weights[i]), xrange(len(names)))) + "}"