Ejemplo n.º 1
0
def recompile():
    pbutil.chdirToPetabricksRoot()
    config.benchmark = pbutil.normalizeBenchmarkName(config.benchmark)
    config.output_cfg = pbutil.benchmarkToCfg(config.benchmark)
    if config.recompile:
        pbutil.compilePetabricks()
        pbutil.compileBenchmarks([config.benchmark])
Ejemplo n.º 2
0
def recompile():
  pbutil.chdirToPetabricksRoot();
  config.benchmark = pbutil.normalizeBenchmarkName(config.benchmark)
  config.output_cfg = pbutil.benchmarkToCfg(config.benchmark) 
  if config.recompile:
    pbutil.compilePetabricks();
    pbutil.compileBenchmarks([config.benchmark])
Ejemplo n.º 3
0
def main(argv):
  if len(argv) != 2:
    print "No program specified."
    print
    print __doc__
    sys.exit(1)
  app = argv[1]
  app = pbutil.normalizeBenchmarkName(app)

  info_file = pbutil.benchmarkToInfo(app)
  try:
    infoxml = minidom.parse(info_file)
  except:
    print "Parse error while parsing .info XML file:", info_file
    sys.exit(1)

  print_report(app, infoxml)
Ejemplo n.º 4
0
    ]
    print '#', ','.join(headers)
    t = csv.DictWriter(sys.stdout, headers, extrasaction='ignore')
    t.writerows(rows)


if __name__ == "__main__":
    from optparse import OptionParser
    parser = OptionParser(
        usage="usage: graphgen.py [options] benchmark candidatelog.csv")
    parser.add_option('--trials', type='int', default=10)
    parser.add_option('--confidence', type='float', default=.95)
    parser.add_option('--timeout', type='float', default=5.0)
    parser.add_option('--onlyrounds', type='int', default=True)
    parser.add_option('-n', type='int', default=1024)

    warnings.simplefilter('ignore', tunerwarnings.TooManyTrials)

    (options, args) = parser.parse_args()
    if len(args) != 2:
        parser.print_usage()
        sys.exit(1)
    benchmark = args[0]
    config = os.path.abspath(args[1])
    pbutil.chdirToPetabricksRoot()
    #pbutil.compilePetabricks();
    benchmark = pbutil.normalizeBenchmarkName(benchmark)
    #pbutil.compileBenchmarks([benchmark])
    storagedirs.callWithLogDir(lambda: main(benchmark, options.n, config),
                               '/tmp', True)
Ejemplo n.º 5
0
def main(argv):
  t1=time.time()

  global app
  global cfg 
  global ignore_list
  global defaultArgs
  global substderr
  global options 

  config_tool_path = os.path.split(argv[0])[0] + "/configtool.py"
  fast = False

  parser = optparse.OptionParser(usage="usage: %prog [options] BENCHMARK")
  parser.add_option("--min", type="int", dest="min", default=1)
  parser.add_option("-n", "--random", "--max", type="int", dest="n", default=-1)
  parser.add_option("--offset", type="int", dest="offset", default=0)
  parser.add_option("--max-sec", type="float", dest="maxsec", default=0)
  parser.add_option("-d", "--debug",  action="store_true", dest="debug", default=False)
  parser.add_option("-f", "--fast",  action="store_true", dest="fast", default=False)
  parser.add_option("--threads",      type="int", dest="threads", default=pbutil.cpuCount())
  parser.add_option("-c", "--config", dest="config", default=None)
  parser.add_option("--noisolation", action="store_true", dest="noisolation", default=False)
  parser.add_option("--print", action="store_true", dest="justprint", default=False)
  parser.add_option("--time", action="store_true", dest="time", default=False)
  parser.add_option("--acctrials", type="int", dest="acctrials", default=None)
  parser.add_option("--accimprovetries", type="int", dest="accimprovetries", default=None)
  parser.add_option("--trials", type="int", dest="trials", default=None)
  parser.add_option("--trials-sec", type="float", dest="trialssec", default=None)
  parser.add_option("--trials-max", type="int", dest="trialsmax", default=None)
  parser.add_option("--transform", dest="transform", default=None)
  options,args = parser.parse_args()

  if len(args) != 1:
    parser.error("expected benchmark name as arg")

  cfg=options.config
  app=args[0]

  pbutil.chdirToPetabricksRoot()
  pbutil.compilePetabricks()
  app = pbutil.normalizeBenchmarkName(app)
  pbutil.compileBenchmarks([app])
  
  if options.debug:
    substderr = sys.__stderr__

  if cfg is None:
    cfg = pbutil.benchmarkToCfg(app)

  defaultArgs = ['--config='+cfg, '--threads=%d'%options.threads, '--offset=%d'%options.offset, '--min=%d'%options.min]

  if options.noisolation:
    defaultArgs.append("--noisolation")

  if options.acctrials is not None:
    defaultArgs.append("--acctrials=%d"%options.acctrials)
  if options.trials is not None:
    defaultArgs.append("--trials=%d"%options.trials)
  if options.trialssec is not None:
    defaultArgs.append("--trials-sec=%f"%options.trialssec)
  if options.trialsmax is not None:
    defaultArgs.append("--trials-max=%d"%options.trialsmax)
  if options.accimprovetries is not None:
    defaultArgs.append("--accimprovetries=%d"%options.accimprovetries)

  getIgnoreList()

  try:
    infoxml = parse(pbutil.benchmarkToInfo(app))
  except:
    print "Cannot parse:", pbutil.benchmarkToInfo(app)
    sys.exit(-1)

 #print "Reseting config entries"
 #reset()

  #build index of transforms
  for t in infoxml.getElementsByTagName("transform"):
    transforms[nameof(t)]=t
    if t.getAttribute("templateChoice")=="0":
      transforms[t.getAttribute("templateName")] = t

  if options.transform is None:
    maintx = transforms[mainname()]
  else:
    maintx = transforms[options.transform]
  
  print "Call tree:"
  walkCallTree(maintx, fnup=printTx)
  print
  print "Autotuning:"

  progress.status("building work queue")
 
  if options.n <= 0:
    tasks.append(TuneTask("determineInputSizes", determineInputSizes))
    
  if options.time:
    tasks.append(TuneTask("runTimingTest", lambda:runTimingTest(maintx)))

  #build list of tasks
  if not options.fast:
    walkCallTree(maintx, lambda tx, depth, loops: enqueueAutotuneCmds(tx, maintx, 1, depth, loops))
  walkCallTree(maintx, lambda tx, depth, loops: enqueueAutotuneCmds(tx, maintx, 2, depth, loops))
  
  if options.time:
    tasks.append(TuneTask("runTimingTest", lambda:runTimingTest(maintx)))

  progress.status("autotuning")

  while len(tasks)>0:
    w1=remainingTaskWeight()
    task=tasks.pop(0)
    w2=remainingTaskWeight()
    progress.remaining(w1, w2)
    task.run()
  progress.clear()

  t2=time.time()
  sec=t2-t1

  

  print "autotuning took %.2f sec"%(t2-t1)
  for k,v in taskStats.items():
    print "  %.2f sec in %s"%(v.sec, k)
    sec -= v.sec
  print "  %.2f sec in unknown"%sec
  
  names=taskStats.keys()
  weights=map(lambda x: x.sec/float(max(x.count, 1)), taskStats.values())
  scale=len(weights)/sum(weights)
  print "Suggested weights:"
  print "taskStats = {" + ", ".join(map(lambda i: "'%s':TaskStats(%.2f)"%(names[i], scale*weights[i]), xrange(len(names)))) + "}"
Ejemplo n.º 6
0
        else:
          break
      warnings.warn(ComparisonFailed(self.n, a, b))
      return 0
    return compare

  def cleanup(self):
    if config.cleanup_inputs:
      storagedirs.clearInputs();
      self.inputs=[]

if __name__ == "__main__":
  print "TESTING CANDIDATETESTER"
  pbutil.chdirToPetabricksRoot();
  pbutil.compilePetabricks();
  benchmark=pbutil.normalizeBenchmarkName('multiply')
  pbutil.compileBenchmarks([benchmark])
  tester = CandidateTester(benchmark, 768)
  try:
    candidate = Candidate(defaultConfigFile(pbutil.benchmarkToBin(tester.app)))
    candidate2 = Candidate(defaultConfigFile(pbutil.benchmarkToBin(tester.app)))
    candidate2.config['MatrixMultiplyTransposed_0_lvl1_rule']=1
    tester.test(candidate)
    tester.test(candidate)
    tester.test(candidate)
    tester.test(candidate2)
    tester.test(candidate2)
    tester.test(candidate2)
    print candidate.metrics[0]
    print candidate2.metrics[0]
    print str(candidate.metrics[0][768])
Ejemplo n.º 7
0
    cmpobj = self.comparer(config.timing_metric_idx,
                           config.confidence_pct,
                           config.max_trials,
                           limit)
    return cmpobj(a,b)

  def cleanup(self):
    if config.cleanup_inputs:
      storagedirs.clearInputs();
      self.inputs=[]

if __name__ == "__main__":
  print "TESTING CANDIDATETESTER"
  pbutil.chdirToPetabricksRoot();
  pbutil.compilePetabricks();
  benchmark=pbutil.normalizeBenchmarkName('multiply')
  pbutil.compileBenchmarks([benchmark])
  tester = CandidateTester(benchmark, 768)
  try:
    candidate = Candidate(defaultConfigFile(pbutil.benchmarkToBin(tester.app)))
    candidate2 = Candidate(defaultConfigFile(pbutil.benchmarkToBin(tester.app)))
    candidate2.config['MatrixMultiplyTransposed_0_lvl1_rule']=1
    tester.test(candidate)
    tester.test(candidate)
    tester.test(candidate)
    tester.test(candidate2)
    tester.test(candidate2)
    tester.test(candidate2)
    print candidate.metrics[0]
    print candidate2.metrics[0]
    print str(candidate.metrics[0][768])
Ejemplo n.º 8
0
def main(argv):
    t1 = time.time()

    global app
    global cfg
    global ignore_list
    global defaultArgs
    global substderr
    global options

    config_tool_path = os.path.split(argv[0])[0] + "/configtool.py"
    fast = False

    parser = optparse.OptionParser(usage="usage: %prog [options] BENCHMARK")
    parser.add_option("--min", type="int", dest="min", default=1)
    parser.add_option("-n",
                      "--random",
                      "--max",
                      type="int",
                      dest="n",
                      default=-1)
    parser.add_option("--offset", type="int", dest="offset", default=0)
    parser.add_option("--max-sec", type="float", dest="maxsec", default=0)
    parser.add_option("-d",
                      "--debug",
                      action="store_true",
                      dest="debug",
                      default=False)
    parser.add_option("-f",
                      "--fast",
                      action="store_true",
                      dest="fast",
                      default=False)
    parser.add_option("--threads",
                      type="int",
                      dest="threads",
                      default=pbutil.cpuCount())
    parser.add_option("-c", "--config", dest="config", default=None)
    parser.add_option("--noisolation",
                      action="store_true",
                      dest="noisolation",
                      default=False)
    parser.add_option("--print",
                      action="store_true",
                      dest="justprint",
                      default=False)
    parser.add_option("--time",
                      action="store_true",
                      dest="time",
                      default=False)
    parser.add_option("--acctrials",
                      type="int",
                      dest="acctrials",
                      default=None)
    parser.add_option("--accimprovetries",
                      type="int",
                      dest="accimprovetries",
                      default=None)
    parser.add_option("--trials", type="int", dest="trials", default=None)
    parser.add_option("--trials-sec",
                      type="float",
                      dest="trialssec",
                      default=None)
    parser.add_option("--trials-max",
                      type="int",
                      dest="trialsmax",
                      default=None)
    parser.add_option("--transform", dest="transform", default=None)
    options, args = parser.parse_args()

    if len(args) != 1:
        parser.error("expected benchmark name as arg")

    cfg = options.config
    app = args[0]

    pbutil.chdirToPetabricksRoot()
    pbutil.compilePetabricks()
    app = pbutil.normalizeBenchmarkName(app)
    pbutil.compileBenchmarks([app])

    if options.debug:
        substderr = sys.__stderr__

    if cfg is None:
        cfg = pbutil.benchmarkToCfg(app)

    defaultArgs = [
        '--config=' + cfg,
        '--threads=%d' % options.threads,
        '--offset=%d' % options.offset,
        '--min=%d' % options.min
    ]

    if options.noisolation:
        defaultArgs.append("--noisolation")

    if options.acctrials is not None:
        defaultArgs.append("--acctrials=%d" % options.acctrials)
    if options.trials is not None:
        defaultArgs.append("--trials=%d" % options.trials)
    if options.trialssec is not None:
        defaultArgs.append("--trials-sec=%f" % options.trialssec)
    if options.trialsmax is not None:
        defaultArgs.append("--trials-max=%d" % options.trialsmax)
    if options.accimprovetries is not None:
        defaultArgs.append("--accimprovetries=%d" % options.accimprovetries)

    getIgnoreList()

    try:
        infoxml = parse(pbutil.benchmarkToInfo(app))
    except:
        print "Cannot parse:", pbutil.benchmarkToInfo(app)
        sys.exit(-1)

#print "Reseting config entries"
#reset()

#build index of transforms
    for t in infoxml.getElementsByTagName("transform"):
        transforms[nameof(t)] = t
        if t.getAttribute("templateChoice") == "0":
            transforms[t.getAttribute("templateName")] = t

    if options.transform is None:
        maintx = transforms[mainname()]
    else:
        maintx = transforms[options.transform]

    print "Call tree:"
    walkCallTree(maintx, fnup=printTx)
    print
    print "Autotuning:"

    progress.status("building work queue")

    if options.n <= 0:
        tasks.append(TuneTask("determineInputSizes", determineInputSizes))

    if options.time:
        tasks.append(TuneTask("runTimingTest", lambda: runTimingTest(maintx)))

    #build list of tasks
    if not options.fast:
        walkCallTree(
            maintx, lambda tx, depth, loops: enqueueAutotuneCmds(
                tx, maintx, 1, depth, loops))
    walkCallTree(
        maintx, lambda tx, depth, loops: enqueueAutotuneCmds(
            tx, maintx, 2, depth, loops))

    if options.time:
        tasks.append(TuneTask("runTimingTest", lambda: runTimingTest(maintx)))

    progress.status("autotuning")

    while len(tasks) > 0:
        w1 = remainingTaskWeight()
        task = tasks.pop(0)
        w2 = remainingTaskWeight()
        progress.remaining(w1, w2)
        task.run()
    progress.clear()

    t2 = time.time()
    sec = t2 - t1

    print "autotuning took %.2f sec" % (t2 - t1)
    for k, v in taskStats.items():
        print "  %.2f sec in %s" % (v.sec, k)
        sec -= v.sec
    print "  %.2f sec in unknown" % sec

    names = taskStats.keys()
    weights = map(lambda x: x.sec / float(max(x.count, 1)), taskStats.values())
    scale = len(weights) / sum(weights)
    print "Suggested weights:"
    print "taskStats = {" + ", ".join(
        map(lambda i: "'%s':TaskStats(%.2f)" %
            (names[i], scale * weights[i]), xrange(len(names)))) + "}"
Ejemplo n.º 9
0
  if sys.argv[1] == "--help":
    print "**** USAGE: ",sys.argv[0], "[BENCHMARK] [args]"
    print "****    1) Compile pbc"
    print "****    2) If BENCHMARK is given, compile BENCHMARK"
    print "****    3) If args are given, run BENCHMARK"
    sys.exit(1)
  else:
    benchmark=sys.argv[1]
    if os.path.isfile(benchmark) or os.path.isfile(benchmark+".pbcc"):
      benchmark=os.path.abspath(benchmark)

pbutil.chdirToPetabricksRoot();
pbutil.compilePetabricks();

if benchmark is not None:
  benchmark=pbutil.normalizeBenchmarkName(relpath(benchmark))

cmd=[]
if gdb:
  cmd.extend(["/usr/bin/gdb", "--args"])

if len(sys.argv)==2:
  cmd.extend(["./src/pbc", pbutil.benchmarkToSrc(benchmark)])
  print " ".join(cmd)
  os.execv(cmd[0], cmd)
elif len(sys.argv)>2:
  pbutil.compileBenchmarks([benchmark])
  cmd.extend([pbutil.benchmarkToBin(benchmark)])
  cmd.extend(sys.argv[2:])
  print " ".join(cmd)
  os.execv(cmd[0], cmd)
Ejemplo n.º 10
0
  headers = ['time','minperf', 'perf_on_%d'%n, 'perf_on_%d_ci'%n, 'tests', 'candidates', 'input_size', 'invperf', 'tests_timeout']
  print '#',','.join(headers)
  t=csv.DictWriter(sys.stdout, headers, extrasaction='ignore')
  t.writerows(rows)
  

if __name__ == "__main__":
  from optparse import OptionParser
  parser = OptionParser(usage="usage: graphgen.py [options] benchmark candidatelog.csv")
  parser.add_option('--trials', type='int', default=10)
  parser.add_option('--confidence', type='float', default=.95)
  parser.add_option('--timeout', type='float', default=5.0)
  parser.add_option('--onlyrounds', type='int', default=True)
  parser.add_option('-n', type='int', default=1024)

  warnings.simplefilter('ignore',  tunerwarnings.TooManyTrials)

  (options, args) = parser.parse_args()
  if len(args)!=2:
    parser.print_usage()
    sys.exit(1)
  benchmark=args[0]
  config=os.path.abspath(args[1])
  pbutil.chdirToPetabricksRoot();
  #pbutil.compilePetabricks();
  benchmark=pbutil.normalizeBenchmarkName(benchmark)
  #pbutil.compileBenchmarks([benchmark])
  storagedirs.callWithLogDir(lambda: main(benchmark, options.n, config), '/tmp', True)


Ejemplo n.º 11
0
    if sys.argv[1] == "--help":
        print "**** USAGE: ", sys.argv[0], "[BENCHMARK] [args]"
        print "****    1) Compile pbc"
        print "****    2) If BENCHMARK is given, compile BENCHMARK"
        print "****    3) If args are given, run BENCHMARK"
        sys.exit(1)
    else:
        benchmark = sys.argv[1]
        if os.path.isfile(benchmark) or os.path.isfile(benchmark + ".pbcc"):
            benchmark = os.path.abspath(benchmark)

pbutil.chdirToPetabricksRoot()
pbutil.compilePetabricks()

if benchmark is not None:
    benchmark = pbutil.normalizeBenchmarkName(relpath(benchmark))

cmd = []
if gdb:
    cmd.extend(["/usr/bin/gdb", "--args"])

if len(sys.argv) == 2:
    cmd.extend(["./src/pbc", pbutil.benchmarkToSrc(benchmark)])
    print " ".join(cmd)
    os.execv(cmd[0], cmd)
elif len(sys.argv) > 2:
    pbutil.compileBenchmarks([benchmark])
    cmd.extend([pbutil.benchmarkToBin(benchmark)])
    cmd.extend(sys.argv[2:])
    print " ".join(cmd)
    os.execv(cmd[0], cmd)