Esempio n. 1
0
def recompile():
    pbutil.chdirToPetabricksRoot()
    config.benchmark = pbutil.normalizeBenchmarkName(config.benchmark)
    config.output_cfg = pbutil.benchmarkToCfg(config.benchmark)
    if config.recompile:
        pbutil.compilePetabricks()
        pbutil.compileBenchmarks([config.benchmark])
Esempio n. 2
0
def recompile():
  pbutil.chdirToPetabricksRoot();
  config.benchmark = pbutil.normalizeBenchmarkName(config.benchmark)
  config.output_cfg = pbutil.benchmarkToCfg(config.benchmark) 
  if config.recompile:
    pbutil.compilePetabricks();
    pbutil.compileBenchmarks([config.benchmark])
Esempio n. 3
0
def print_tunables(benchmark, choice2id2label):
  cfg_file = pbutil.benchmarkToCfg(benchmark)
  def fix_rule_numbers(key, val):
    """If the tunable is an algorithm choice, convert the index to the label."""
    if key in choice2id2label:
      return (key, choice2id2label[key][int(val)])
    else:
      return (key, val)
  configtool.processConfigFile(cfg_file, "/dev/stdout", [fix_rule_numbers])
Esempio n. 4
0
def testBenchmark(b):
  name=b[0]
  bin=pbutil.benchmarkToBin(name)
  cfg=pbutil.benchmarkToCfg(name)

  if not os.path.isfile(bin):
    return False
  
  #build cmd
  hash=name
  iofiles=[]
  for x in b[1:]:
    iofiles.append(resolveInputPath(x))
    hash+=" "+os.path.basename(x)
  outfile="./testdata/.output/"+re.sub("[ /.]",'_',hash)
  iofiles.append(outfile+".latest")

  try:
    cmd=[bin, '--fixedrandom', '--config=%s.cfg'%outfile, '--reset']
    if run(cmd) != 0:
      print "ERROR: reset config failed"
      return False
  except OSError:
    print "ERROR: program not runnable"
    return False

  if os.path.isfile("%s.cfg.default"%outfile):
    shutil.copy("%s.cfg.default"%outfile, "%s.cfg"%outfile)

  try:
    infoxml=parse(pbutil.benchmarkToInfo(name))
  except:
    print "invalid *.info file"
    return False

  def test():
    cmd=[bin, '--fixedrandom', '--config=%s.cfg'%outfile]
    cmd.extend(iofiles)
    rv = run(cmd)
    if rv != 0:
      print "run FAILED (status=%d, cmd=%s)"%(rv, ' '.join(cmd))
      return False

    if diffFiles(outfile, outfile+".latest"):
      time.sleep(0.1) #try letting the filesystem settle down
      if diffFiles(outfile, outfile+".latest"):
        print "run FAILED (wrong output)"
        return False
    
    print "run PASSED"
    return True

  return test()
Esempio n. 5
0
def main(argv):
  t1=time.time()

  global app
  global cfg 
  global ignore_list
  global defaultArgs
  global substderr
  global options 

  config_tool_path = os.path.split(argv[0])[0] + "/configtool.py"
  fast = False

  parser = optparse.OptionParser(usage="usage: %prog [options] BENCHMARK")
  parser.add_option("--min", type="int", dest="min", default=1)
  parser.add_option("-n", "--random", "--max", type="int", dest="n", default=-1)
  parser.add_option("--offset", type="int", dest="offset", default=0)
  parser.add_option("--max-sec", type="float", dest="maxsec", default=0)
  parser.add_option("-d", "--debug",  action="store_true", dest="debug", default=False)
  parser.add_option("-f", "--fast",  action="store_true", dest="fast", default=False)
  parser.add_option("--threads",      type="int", dest="threads", default=pbutil.cpuCount())
  parser.add_option("-c", "--config", dest="config", default=None)
  parser.add_option("--noisolation", action="store_true", dest="noisolation", default=False)
  parser.add_option("--print", action="store_true", dest="justprint", default=False)
  parser.add_option("--time", action="store_true", dest="time", default=False)
  parser.add_option("--acctrials", type="int", dest="acctrials", default=None)
  parser.add_option("--accimprovetries", type="int", dest="accimprovetries", default=None)
  parser.add_option("--trials", type="int", dest="trials", default=None)
  parser.add_option("--trials-sec", type="float", dest="trialssec", default=None)
  parser.add_option("--trials-max", type="int", dest="trialsmax", default=None)
  parser.add_option("--transform", dest="transform", default=None)
  options,args = parser.parse_args()

  if len(args) != 1:
    parser.error("expected benchmark name as arg")

  cfg=options.config
  app=args[0]

  pbutil.chdirToPetabricksRoot()
  pbutil.compilePetabricks()
  app = pbutil.normalizeBenchmarkName(app)
  pbutil.compileBenchmarks([app])
  
  if options.debug:
    substderr = sys.__stderr__

  if cfg is None:
    cfg = pbutil.benchmarkToCfg(app)

  defaultArgs = ['--config='+cfg, '--threads=%d'%options.threads, '--offset=%d'%options.offset, '--min=%d'%options.min]

  if options.noisolation:
    defaultArgs.append("--noisolation")

  if options.acctrials is not None:
    defaultArgs.append("--acctrials=%d"%options.acctrials)
  if options.trials is not None:
    defaultArgs.append("--trials=%d"%options.trials)
  if options.trialssec is not None:
    defaultArgs.append("--trials-sec=%f"%options.trialssec)
  if options.trialsmax is not None:
    defaultArgs.append("--trials-max=%d"%options.trialsmax)
  if options.accimprovetries is not None:
    defaultArgs.append("--accimprovetries=%d"%options.accimprovetries)

  getIgnoreList()

  try:
    infoxml = parse(pbutil.benchmarkToInfo(app))
  except:
    print "Cannot parse:", pbutil.benchmarkToInfo(app)
    sys.exit(-1)

 #print "Reseting config entries"
 #reset()

  #build index of transforms
  for t in infoxml.getElementsByTagName("transform"):
    transforms[nameof(t)]=t
    if t.getAttribute("templateChoice")=="0":
      transforms[t.getAttribute("templateName")] = t

  if options.transform is None:
    maintx = transforms[mainname()]
  else:
    maintx = transforms[options.transform]
  
  print "Call tree:"
  walkCallTree(maintx, fnup=printTx)
  print
  print "Autotuning:"

  progress.status("building work queue")
 
  if options.n <= 0:
    tasks.append(TuneTask("determineInputSizes", determineInputSizes))
    
  if options.time:
    tasks.append(TuneTask("runTimingTest", lambda:runTimingTest(maintx)))

  #build list of tasks
  if not options.fast:
    walkCallTree(maintx, lambda tx, depth, loops: enqueueAutotuneCmds(tx, maintx, 1, depth, loops))
  walkCallTree(maintx, lambda tx, depth, loops: enqueueAutotuneCmds(tx, maintx, 2, depth, loops))
  
  if options.time:
    tasks.append(TuneTask("runTimingTest", lambda:runTimingTest(maintx)))

  progress.status("autotuning")

  while len(tasks)>0:
    w1=remainingTaskWeight()
    task=tasks.pop(0)
    w2=remainingTaskWeight()
    progress.remaining(w1, w2)
    task.run()
  progress.clear()

  t2=time.time()
  sec=t2-t1

  

  print "autotuning took %.2f sec"%(t2-t1)
  for k,v in taskStats.items():
    print "  %.2f sec in %s"%(v.sec, k)
    sec -= v.sec
  print "  %.2f sec in unknown"%sec
  
  names=taskStats.keys()
  weights=map(lambda x: x.sec/float(max(x.count, 1)), taskStats.values())
  scale=len(weights)/sum(weights)
  print "Suggested weights:"
  print "taskStats = {" + ", ".join(map(lambda i: "'%s':TaskStats(%.2f)"%(names[i], scale*weights[i]), xrange(len(names)))) + "}"
Esempio n. 6
0
def testBenchmark(b):
    name = b[0]
    bin = pbutil.benchmarkToBin(name)
    cfg = pbutil.benchmarkToCfg(name)

    if not os.path.isfile(bin):
        return False

    #build cmd
    hash = name
    iofiles = []
    for x in b[1:]:
        iofiles.append(resolveInputPath(x))
        hash += " " + os.path.basename(x)
    outfile = "./testdata/.output/" + re.sub("[ /.]", '_', hash)
    iofiles.append(outfile + ".latest")

    try:
        cmd = [bin, '--fixedrandom', '--config=%s.cfg' % outfile, '--reset']
        if run(cmd) != 0:
            print "ERROR: reset config failed"
            return False
    except OSError:
        print "ERROR: program not runnable"
        return False

    if os.path.isfile("%s.cfg.default" % outfile):
        shutil.copy("%s.cfg.default" % outfile, "%s.cfg" % outfile)

    try:
        infoxml = parse(pbutil.benchmarkToInfo(name))
    except:
        print "invalid *.info file"
        return False

    def test():
        if isFloatingPoint() and os.path.exists(outfile + ".float"):
            ext = ".float"
            print "FLOAT"
        else:
            ext = ""

        #run cpu config
        cmd = [bin, '--fixedrandom', '--config=%s.cfg' % outfile]
        cmd.extend(iofiles)
        t1 = time.time()
        rv = run(cmd)
        t2 = time.time()
        if rv != 0:
            print "run FAILED (status=%d, cmd=%s)" % (rv, ' '.join(cmd))
            return False

        if diffFiles(outfile + ext, outfile + ".latest"):
            time.sleep(0.1)  #try letting the filesystem settle down
            if diffFiles(outfile + ext, outfile + ".latest"):
                print "run FAILED (wrong output)"
                return False

        print "run PASSED (took %.2fs)" % (t2 - t1)

        if (not haveOpenCL()) or (not os.path.exists(outfile + ".gpucfg")):
            return True

        #run gpu config
        cmd = [bin, '--fixedrandom', '--config=%s.gpucfg' % outfile]
        cmd.extend(iofiles)
        t1 = time.time()
        rv = run(cmd)
        t2 = time.time()
        if rv != 0:
            print "gpu FAILED (status=%d, cmd=%s)" % (rv, ' '.join(cmd))
            return False

        if diffFiles(outfile + ext, outfile + ".latest"):
            time.sleep(0.1)  #try letting the filesystem settle down
            if diffFiles(outfile + ext, outfile + ".latest"):
                print "gpu FAILED (wrong output)"
                return False

        print "gpu PASSED (took %.2fs)" % (t2 - t1)
        return True

    return test()
Esempio n. 7
0
def main(argv):
    t1 = time.time()

    global app
    global cfg
    global ignore_list
    global defaultArgs
    global substderr
    global options

    config_tool_path = os.path.split(argv[0])[0] + "/configtool.py"
    fast = False

    parser = optparse.OptionParser(usage="usage: %prog [options] BENCHMARK")
    parser.add_option("--min", type="int", dest="min", default=1)
    parser.add_option("-n",
                      "--random",
                      "--max",
                      type="int",
                      dest="n",
                      default=-1)
    parser.add_option("--offset", type="int", dest="offset", default=0)
    parser.add_option("--max-sec", type="float", dest="maxsec", default=0)
    parser.add_option("-d",
                      "--debug",
                      action="store_true",
                      dest="debug",
                      default=False)
    parser.add_option("-f",
                      "--fast",
                      action="store_true",
                      dest="fast",
                      default=False)
    parser.add_option("--threads",
                      type="int",
                      dest="threads",
                      default=pbutil.cpuCount())
    parser.add_option("-c", "--config", dest="config", default=None)
    parser.add_option("--noisolation",
                      action="store_true",
                      dest="noisolation",
                      default=False)
    parser.add_option("--print",
                      action="store_true",
                      dest="justprint",
                      default=False)
    parser.add_option("--time",
                      action="store_true",
                      dest="time",
                      default=False)
    parser.add_option("--acctrials",
                      type="int",
                      dest="acctrials",
                      default=None)
    parser.add_option("--accimprovetries",
                      type="int",
                      dest="accimprovetries",
                      default=None)
    parser.add_option("--trials", type="int", dest="trials", default=None)
    parser.add_option("--trials-sec",
                      type="float",
                      dest="trialssec",
                      default=None)
    parser.add_option("--trials-max",
                      type="int",
                      dest="trialsmax",
                      default=None)
    parser.add_option("--transform", dest="transform", default=None)
    options, args = parser.parse_args()

    if len(args) != 1:
        parser.error("expected benchmark name as arg")

    cfg = options.config
    app = args[0]

    pbutil.chdirToPetabricksRoot()
    pbutil.compilePetabricks()
    app = pbutil.normalizeBenchmarkName(app)
    pbutil.compileBenchmarks([app])

    if options.debug:
        substderr = sys.__stderr__

    if cfg is None:
        cfg = pbutil.benchmarkToCfg(app)

    defaultArgs = [
        '--config=' + cfg,
        '--threads=%d' % options.threads,
        '--offset=%d' % options.offset,
        '--min=%d' % options.min
    ]

    if options.noisolation:
        defaultArgs.append("--noisolation")

    if options.acctrials is not None:
        defaultArgs.append("--acctrials=%d" % options.acctrials)
    if options.trials is not None:
        defaultArgs.append("--trials=%d" % options.trials)
    if options.trialssec is not None:
        defaultArgs.append("--trials-sec=%f" % options.trialssec)
    if options.trialsmax is not None:
        defaultArgs.append("--trials-max=%d" % options.trialsmax)
    if options.accimprovetries is not None:
        defaultArgs.append("--accimprovetries=%d" % options.accimprovetries)

    getIgnoreList()

    try:
        infoxml = parse(pbutil.benchmarkToInfo(app))
    except:
        print "Cannot parse:", pbutil.benchmarkToInfo(app)
        sys.exit(-1)

#print "Reseting config entries"
#reset()

#build index of transforms
    for t in infoxml.getElementsByTagName("transform"):
        transforms[nameof(t)] = t
        if t.getAttribute("templateChoice") == "0":
            transforms[t.getAttribute("templateName")] = t

    if options.transform is None:
        maintx = transforms[mainname()]
    else:
        maintx = transforms[options.transform]

    print "Call tree:"
    walkCallTree(maintx, fnup=printTx)
    print
    print "Autotuning:"

    progress.status("building work queue")

    if options.n <= 0:
        tasks.append(TuneTask("determineInputSizes", determineInputSizes))

    if options.time:
        tasks.append(TuneTask("runTimingTest", lambda: runTimingTest(maintx)))

    #build list of tasks
    if not options.fast:
        walkCallTree(
            maintx, lambda tx, depth, loops: enqueueAutotuneCmds(
                tx, maintx, 1, depth, loops))
    walkCallTree(
        maintx, lambda tx, depth, loops: enqueueAutotuneCmds(
            tx, maintx, 2, depth, loops))

    if options.time:
        tasks.append(TuneTask("runTimingTest", lambda: runTimingTest(maintx)))

    progress.status("autotuning")

    while len(tasks) > 0:
        w1 = remainingTaskWeight()
        task = tasks.pop(0)
        w2 = remainingTaskWeight()
        progress.remaining(w1, w2)
        task.run()
    progress.clear()

    t2 = time.time()
    sec = t2 - t1

    print "autotuning took %.2f sec" % (t2 - t1)
    for k, v in taskStats.items():
        print "  %.2f sec in %s" % (v.sec, k)
        sec -= v.sec
    print "  %.2f sec in unknown" % sec

    names = taskStats.keys()
    weights = map(lambda x: x.sec / float(max(x.count, 1)), taskStats.values())
    scale = len(weights) / sum(weights)
    print "Suggested weights:"
    print "taskStats = {" + ", ".join(
        map(lambda i: "'%s':TaskStats(%.2f)" %
            (names[i], scale * weights[i]), xrange(len(names)))) + "}"