Beispiel #1
0
def autotuneCutoffBinarySearch(tx, tunable, n, min=0, max=-1):
  progress.push()
  progress.status("* optimize " + nameof(tx) + " tunable " + nameof(tunable))
  val, impact = optimizeParam(tx, n, nameof(tunable), min, max, best=(-1, goottimelimit()))
  print "* optimize " + nameof(tx) + " tunable " + nameof(tunable) + " = %d "%val + "(impact=%.2f)"%impact
  setConfigVal(tunable, val)
  progress.pop()
Beispiel #2
0
def autotuneAlgchoice(tx, site, ctx, n, cutoffs):
    progress.push()
    cmd = mkcmd([
        "--transform=" + nameof(ctx), "--autotune",
        "--autotune-transform=" + nameof(tx),
        "--autotune-site=%d" % site,
        "--max=%d" % n,
        "--max-sec=%d" % goodtimelimit()
    ])
    for x in cutoffs:
        cmd.append("--autotune-tunable=" + nameof(x))
    if options.debug or options.justprint:
        print ' '.join(cmd)
    if options.justprint:
        progress.pop()
        return True
    runsCur = 1
    runsLast = 1
    inputCur = 1
    #progress formula assumes linear runtime of program
    calcprog = lambda: 1.0 - (math.log(inputCur, 2) + min(
        1.0, runsCur / float(runsLast))) / (math.log(n, 2) + 1)
    p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=substderr)
    pfx = "tuning " + nameof(tx) + ":%d - " % site
    if site == -1:
        pfx = "tuning %d cutoffs in %s - " % (len(cutoffs), nameof(tx))
    str = ""
    progress.status(pfx)
    while True:
        line = p.stdout.readline()
        if line == "":
            break
        #BEGIN ITERATION .... / 4096
        m = iterRe.match(line)
        if m:
            inputCur = int(m.group(1))
            runsLast = runsCur
            runsCur = 0
            progress.remaining(calcprog())
        #SLOT[0] KEEP .... = 1.25
        m = slotRe.match(line)
        if m:
            if m.group(1) == "0":
                str = m.group(3)
                progress.status(pfx + str)
        #  * TRY ...
        m = runRe.match(line)
        if m:
            runsCur += 1
            progress.remaining(calcprog())
    progress.pop()
    if p.wait() == 0:
        print "* " + pfx + str
        return True
    else:
        print 'FAILURE OF TUNING STEP:', ' '.join(cmd)
        return False
Beispiel #3
0
def runTimingTest(tx):
  progress.push()
  progress.remaining(1)
  progress.status("running timing test")
  t=timingRun(tx, options.n)
  progress.remaining(0)
  if len(results)>0:
    speedup=results[-1]/t
    print "* timing test... %.4f (%.2fx speedup)"%(t, speedup)
  else:
    print "* initial timing test... %.4lf s"%t
  results.append(t)
  progress.pop()
Beispiel #4
0
def runTimingTest(tx):
    progress.push()
    progress.remaining(1)
    progress.status("running timing test")
    t = timingRun(tx, options.n)
    progress.remaining(0)
    if len(results) > 0:
        speedup = results[-1] / t
        print "* timing test... %.4f (%.2fx speedup)" % (t, speedup)
    else:
        print "* initial timing test... %.4lf s" % t
    results.append(t)
    progress.pop()
Beispiel #5
0
def autotuneCutoffBinarySearch(tx, tunable, n, min=0, max=-1):
    progress.push()
    progress.status("* optimize " + nameof(tx) + " tunable " + nameof(tunable))
    val, impact = optimizeParam(tx,
                                n,
                                nameof(tunable),
                                min,
                                max,
                                best=(-1, goottimelimit()))
    print "* optimize " + nameof(tx) + " tunable " + nameof(
        tunable) + " = %d " % val + "(impact=%.2f)" % impact
    setConfigVal(tunable, val)
    progress.pop()
Beispiel #6
0
def autotuneAlgchoice(tx, site, ctx, n, cutoffs):
  progress.push()
  cmd=mkcmd(["--transform="+nameof(ctx), "--autotune", "--autotune-transform="+nameof(tx),  "--autotune-site=%d"%site,
             "--max=%d"%n, "--max-sec=%d"%goodtimelimit()])
  for x in cutoffs:
    cmd.append("--autotune-tunable="+nameof(x))
  if options.debug or options.justprint:
    print ' '.join(cmd)
  if options.justprint:
    progress.pop()
    return True
  runsCur=1
  runsLast=1
  inputCur=1
  #progress formula assumes linear runtime of program
  calcprog=lambda: 1.0 - (math.log(inputCur,2) + min(1.0,runsCur/float(runsLast)))/(math.log(n,2)+1)
  p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=substderr)
  pfx="tuning "+nameof(tx)+":%d - "%site
  if site == -1:
    pfx="tuning %d cutoffs in %s - " % (len(cutoffs), nameof(tx))
  str=""
  progress.status(pfx)
  while True:
    line=p.stdout.readline()
    if line == "":
      break
    #BEGIN ITERATION .... / 4096
    m=iterRe.match(line)
    if m:
      inputCur=int(m.group(1))
      runsLast=runsCur
      runsCur=0
      progress.remaining(calcprog())
    #SLOT[0] KEEP .... = 1.25
    m=slotRe.match(line)
    if m:
      if m.group(1)=="0":
        str=m.group(3)
        progress.status(pfx+str)
    #  * TRY ... 
    m=runRe.match(line)
    if m:
      runsCur+=1
      progress.remaining(calcprog())
  progress.pop()
  if p.wait()==0:
    print "* "+pfx+str
    return True
  else:
    print 'FAILURE OF TUNING STEP:', ' '.join(cmd)
    return False
Beispiel #7
0
def main():
  warnings.simplefilter('ignore', tunerwarnings.NewProgramCrash)
  warnings.simplefilter('ignore', tunerwarnings.TargetNotMet)
  warnings.simplefilter('ignore', tunerwarnings.NanAccuracy)

  progress.push()
  progress.status("compiling benchmarks")

  pbutil.chdirToPetabricksRoot()
  pbutil.compilePetabricks();

  r, lines = pbutil.loadAndCompileBenchmarks("./scripts/pbbenchmark.tests")

  if filter(lambda x: x.rv!=0, r):
    print "compile failed"
    sys.exit(1)

  print 
  print "All scores are relative performance to a baseline system."
  print "Higher is better."
  print

  baselines = dict()
  for line in csv.reader(open("./testdata/configs/baselines.csv")):
    if len(line)>=3:
      baselines[line[0]] = line[1:]

  benchmarks=[]
  for benchmark, cfg, n, accTarg in lines:
    try:
      baseline = baselines[benchmark]
    except KeyError:
      baseline = (1.0, 1.0)
    benchmarks.append(Benchmark(benchmark, cfg, n, accTarg, baseline[0], baseline[1]))


  print LONGBAR
  print "Fixed (no autotuning) scores:"
  print SHORTBAR
  progress.remainingTicks(len(benchmarks)+3)
  progress.tick()
  for b in benchmarks:
    progress.status("running fixed "+fmtCfg(b.cfg))
    b.runFixed()
    b.printFixed()
  progress.tick()
  score_fixed = geomean(map(Benchmark.scoreFixed, benchmarks))

  print SHORTBAR
  print "Fixed Score (pbbenchmark v%s): %.2f" % (VERSION, geomean(map(Benchmark.scoreFixed, benchmarks)))
  print LONGBAR
  print



  print LONGBAR
  print "Tuned scores:"
  print SHORTBAR
  for b in benchmarks:
    progress.status("running tuned "+fmtCfg(b.cfg))
    progress.status("autotuning")
    b.autotune()
    b.runTuned()
    b.printTuned()
    progress.tick()
  
  score_tuned = geomean(map(Benchmark.scoreTuned, benchmarks))
  score_training_time = geomean(map(Benchmark.scoreTrainingTime, benchmarks))

  print SHORTBAR
  print "Tuned Score (pbbenchmark v%s): %.2f" % (VERSION, score_tuned)
  print "Training Time Score (pbbenchmark v%s): %.2f" % (VERSION, score_training_time)
  print LONGBAR
  print

  if DEBUG:
    print LONGBAR
    print "Debug:"
    print SHORTBAR
    for b in benchmarks:
      b.printDebug()
    print LONGBAR
    print

  fd = open("./testdata/configs/baselines.csv.latest", "w")
  for b in benchmarks:
    print >>fd, "%s, %f, %f" % (b.benchmark, b.tuned_perf['average'], b.tuning_time)
  fd.close()

  if not os.path.isdir(LOGDIR):
    os.mkdir(LOGDIR)
  
  for b in benchmarks:
    writelog(expandLog(b.cfg), b.logEntry())
    
  writelog(expandLog('scores.log'), {
      'version'             : VERSION,
      'score_fixed'         : score_fixed,
      'score_tuned'         : score_tuned,
      'score_training_time' : score_training_time,
      'hostname'            : socket.gethostname(),
      'timestamp'           : TIMESTAMP,
    })
  
  progress.tick()
  progress.status("done")
  progress.pop()
Beispiel #8
0
      #move completed jobs to jobs_done list
      newdone=filter(lambda x: x.pid is None, jobs_running)
      jobs_running = filter(lambda x: x.pid is not None, jobs_running)
      jobs_done.extend(newdone)
      jobs_done.sort()
      updatestatus(True)
  except KeyboardInterrupt:
    for j in jobs_running:
      j.kill()
      j.addmsg("INTERRUPTED")
    jobs_done.extend(jobs_running)
    jobs_done.sort()
    updatestatus()
    raise
  updatestatus()
  progress.pop()
  return jobs_done

def getscriptpath():
  try:
    import configtool
    m=re.search('''from ['"](.*)['"]''', str(configtool))
    return os.path.dirname(m.group(1))
  except:
    return os.path.abspath(os.path.dirname(sys.argv[0]))
    
def chdirToPetabricksRoot():
  old = os.getcwd()
  new = getscriptpath()
  isCurDirOk = lambda: os.path.isfile("src/compiler/pbc.cpp")
  if not isCurDirOk():
Beispiel #9
0
def main():
    warnings.simplefilter('ignore', tunerwarnings.NewProgramCrash)
    warnings.simplefilter('ignore', tunerwarnings.SmallInputProgramCrash)
    warnings.simplefilter('ignore', tunerwarnings.TargetNotMet)
    warnings.simplefilter('ignore', tunerwarnings.NanAccuracy)

    #Parse input options
    from optparse import OptionParser
    parser = OptionParser(usage="usage: pbbenchmark [options]")
    parser.add_option("--learning",
                      action="store_true",
                      dest="learning",
                      default=False,
                      help="enable heuristics learning")
    parser.add_option(
        "--heuristics",
        type="string",
        help=
        "name of the file containing the set of heuristics to use. Automatically enables --learning",
        default=None)

    (options, args) = parser.parse_args()

    if options.heuristics:
        options.learning = True

    if options.learning:
        print "Learning of heuristics is ACTIVE"
        if options.heuristics:
            print "Using heuristics file: " + str(options.heuristics)
        else:
            print "Using only heuristics in the database"

    progress.push()
    progress.status("compiling benchmarks")

    pbutil.chdirToPetabricksRoot()
    pbutil.compilePetabricks()

    global REV
    try:
        REV = pbutil.getRevision()
    except:
        pass

    r, lines = pbutil.loadAndCompileBenchmarks(
        "./scripts/pbbenchmark.tests",
        searchterms=sys.argv[1:],
        learning=options.learning,
        heuristicSetFileName=options.heuristics)

    if filter(lambda x: x.rv != 0, r):
        print "compile failed"
        sys.exit(1)

    print
    print "All scores are relative performance to a baseline system."
    print "Higher is better."
    print

    baselines = dict()
    for line in csv.reader(open("./testdata/configs/baselines.csv")):
        if len(line) >= 3:
            baselines[line[0]] = line[1:]

    benchmarks = []
    for benchmark, cfg, n, accTarg in lines:
        try:
            baseline = baselines[benchmark]
        except KeyError:
            baseline = (1.0, 1.0)
        benchmarks.append(
            Benchmark(benchmark, cfg, n, accTarg, baseline[0], baseline[1]))

        progress.remainingTicks(len(benchmarks))

    #print LONGBAR
    #print "Fixed (no autotuning) scores:"
    #print SHORTBAR
    #for b in benchmarks:
    #  progress.status("running fixed "+fmtCfg(b.cfg))
    #  b.runFixed()
    #  b.printFixed()
    #score_fixed = geomean(map(Benchmark.scoreFixed, benchmarks))

    #print SHORTBAR
    #print "Fixed Score (pbbenchmark v%s): %.2f" % (VERSION, geomean(map(Benchmark.scoreFixed, benchmarks)))
    #print LONGBAR
    #print

    print LONGBAR
    print "Tuned scores:"
    print SHORTBAR
    for b in benchmarks:
        progress.status("running tuned " + fmtCfg(b.cfg))
        progress.status("autotuning")
        b.autotune()
        b.runTuned()
        b.printTuned()
        progress.tick()

    score_tuned = geomean(map(Benchmark.scoreTuned, benchmarks))
    score_training_time = geomean(map(Benchmark.scoreTrainingTime, benchmarks))

    print SHORTBAR
    print "Tuned Score (pbbenchmark v%s): %.2f" % (VERSION, score_tuned)
    print "Training Time Score (pbbenchmark v%s): %.2f" % (VERSION,
                                                           score_training_time)
    print LONGBAR
    print

    if DEBUG:
        print LONGBAR
        print "Debug:"
        print SHORTBAR
        for b in benchmarks:
            b.printDebug()
        print LONGBAR
        print

    fd = open("./testdata/configs/baselines.csv.latest", "w")
    for b in benchmarks:
        print >> fd, "%s, %f, %f" % (b.benchmark, b.tuned_perf['average'],
                                     b.tuning_time)
    fd.close()

    if not os.path.isdir(LOGDIR):
        os.mkdir(LOGDIR)

    for b in benchmarks:
        writelog(expandLog(b.cfg), b.logEntry())

    writelog(
        expandLog('scores.log'),
        {
            'version': VERSION,
            'score_fixed': -1,  #score_fixed,
            'score_tuned': score_tuned,
            'score_training_time': score_training_time,
            'hostname': socket.gethostname(),
            'timestamp': TIMESTAMP,
            'revision': REV,
        })

    progress.status("done")
    progress.pop()
Beispiel #10
0
def main():
  warnings.simplefilter('ignore', tunerwarnings.NewProgramCrash)
  warnings.simplefilter('ignore', tunerwarnings.SmallInputProgramCrash)
  warnings.simplefilter('ignore', tunerwarnings.TargetNotMet)
  warnings.simplefilter('ignore', tunerwarnings.NanAccuracy)

  #Parse input options
  from optparse import OptionParser
  parser = OptionParser(usage="usage: pbbenchmark [options]")
  parser.add_option("--learning", action="store_true", dest="learning", default=False, help="enable heuristics learning")
  parser.add_option("--heuristics", type="string", help="name of the file containing the set of heuristics to use. Automatically enables --learning", default=None)

  (options, args) = parser.parse_args()

  if options.heuristics:
    options.learning = True

  if options.learning:
    print "Learning of heuristics is ACTIVE"
    if options.heuristics:
      print "Using heuristics file: "+ str(options.heuristics)
    else:
      print "Using only heuristics in the database"

  progress.push()
  progress.status("compiling benchmarks")

  pbutil.chdirToPetabricksRoot()
  pbutil.compilePetabricks()

  global REV
  try:
    REV=pbutil.getRevision()
  except:
    pass

  r, lines = pbutil.loadAndCompileBenchmarks("./scripts/pbbenchmark.tests", searchterms=sys.argv[1:], learning=options.learning, heuristicSetFileName=options.heuristics)

  if filter(lambda x: x.rv!=0, r):
    print "compile failed"
    sys.exit(1)

  print 
  print "All scores are relative performance to a baseline system."
  print "Higher is better."
  print

  baselines = dict()
  for line in csv.reader(open("./testdata/configs/baselines.csv")):
    if len(line)>=3:
      baselines[line[0]] = line[1:]

  benchmarks=[]
  for benchmark, cfg, n, accTarg in lines:
    try:
      baseline = baselines[benchmark]
    except KeyError:
      baseline = (1.0, 1.0)
    benchmarks.append(Benchmark(benchmark, cfg, n, accTarg, baseline[0], baseline[1]))

    progress.remainingTicks(len(benchmarks))

   #print LONGBAR
   #print "Fixed (no autotuning) scores:"
   #print SHORTBAR
   #for b in benchmarks:
   #  progress.status("running fixed "+fmtCfg(b.cfg))
   #  b.runFixed()
   #  b.printFixed()
   #score_fixed = geomean(map(Benchmark.scoreFixed, benchmarks))

   #print SHORTBAR
   #print "Fixed Score (pbbenchmark v%s): %.2f" % (VERSION, geomean(map(Benchmark.scoreFixed, benchmarks)))
   #print LONGBAR
   #print



  print LONGBAR
  print "Tuned scores:"
  print SHORTBAR
  for b in benchmarks:
    progress.status("running tuned "+fmtCfg(b.cfg))
    progress.status("autotuning")
    b.autotune()
    b.runTuned()
    b.printTuned()
    progress.tick()
  
  score_tuned = geomean(map(Benchmark.scoreTuned, benchmarks))
  score_training_time = geomean(map(Benchmark.scoreTrainingTime, benchmarks))

  print SHORTBAR
  print "Tuned Score (pbbenchmark v%s): %.2f" % (VERSION, score_tuned)
  print "Training Time Score (pbbenchmark v%s): %.2f" % (VERSION, score_training_time)
  print LONGBAR
  print

  if DEBUG:
    print LONGBAR
    print "Debug:"
    print SHORTBAR
    for b in benchmarks:
      b.printDebug()
    print LONGBAR
    print

  fd = open("./testdata/configs/baselines.csv.latest", "w")
  for b in benchmarks:
    print >>fd, "%s, %f, %f" % (b.benchmark, b.tuned_perf['average'], b.tuning_time)
  fd.close()

  if not os.path.isdir(LOGDIR):
    os.mkdir(LOGDIR)
  
  for b in benchmarks:
    writelog(expandLog(b.cfg), b.logEntry())
    
  writelog(expandLog('scores.log'), {
      'version'             : VERSION,
      'score_fixed'         : -1,#score_fixed,
      'score_tuned'         : score_tuned,
      'score_training_time' : score_training_time,
      'hostname'            : socket.gethostname(),
      'timestamp'           : TIMESTAMP,
      'revision'            : REV,
    })
  
  progress.status("done")
  progress.pop()
Beispiel #11
0
            #move completed jobs to jobs_done list
            newdone = filter(lambda x: x.pid is None, jobs_running)
            jobs_running = filter(lambda x: x.pid is not None, jobs_running)
            jobs_done.extend(newdone)
            jobs_done.sort()
            updatestatus(True)
    except KeyboardInterrupt:
        for j in jobs_running:
            j.kill()
            j.addmsg("INTERRUPTED")
        jobs_done.extend(jobs_running)
        jobs_done.sort()
        updatestatus()
        raise
    updatestatus()
    progress.pop()
    return jobs_done


def getscriptpath():
    try:
        import configtool
        m = re.search('''from ['"](.*)['"]''', str(configtool))
        return os.path.dirname(m.group(1))
    except:
        return os.path.abspath(os.path.dirname(sys.argv[0]))


def chdirToPetabricksRoot():
    old = os.getcwd()
    new = getscriptpath()
Beispiel #12
0
def autotuneInner(benchmark):
  progress.push()
  config.benchmark = benchmark
  candidate, tester = init(benchmark)
  try:
    pop = Population(candidate, tester, None)
    
    if not pop.isVariableAccuracy() and config.accuracy_target:
      logging.info("clearing accuracy_target")
      config.accuracy_target = None

    stats = storagedirs.openCsvStats("roundstats", 
        ("round",
         "input_size",
         "cumulative_sec",
         "incremental_sec",
         "testing_sec",
         "inputgen_sec")+pop.statsHeader())
    timers.total.start()
    config.end_time = time.time() + config.max_time
    try:
      progress.remaining(config.max_input_size*(1+config.final_rounds))
      while pop.inputSize() < config.max_input_size:
        progress.status("autotuning %s: input %d of %d" % (config.benchmark, pop.inputSize(), config.max_input_size))
        pop.generation()
        stats.writerow((pop.roundNumber,
                        pop.inputSize(),
                        timers.total.total(),
                        timers.total.lap(),
                        timers.testing.lap(),
                        timers.inputgen.lap())+pop.stats())
        pop.nextInputSize()
        progress.remaining(config.max_input_size - pop.inputSize() + config.max_input_size*config.final_rounds)
      for z in xrange(config.final_rounds):
        pop.generation()
        stats.writerow((pop.roundNumber,
                        pop.inputSize(),
                        timers.total.total(),
                        timers.total.lap(),
                        timers.testing.lap(),
                        timers.inputgen.lap())+pop.stats())
        progress.remaining((config.final_rounds - z)*config.max_input_size)
    except TrainingTimeout:
      pass
    timers.total.stop()

    #check to make sure we did something:
    if pop.firstRound:
      warnings.warn(tunerwarnings.AlwaysCrashes())
      
    logging.info("TODO: using acc target: "+str(config.accuracy_target))
    return pop.best
  finally:
    if pop.best and config.output_cfg:
      print pop.best.cfgfile(),"=>" , config.output_cfg
      shutil.copyfile(pop.best.cfgfile(), config.output_cfg)
    at = storagedirs.getactivetimers()
    if len(at):
      storagedirs.openCsvStats("timers", at.keys()).writerow(at.values())
    if tester:
      tester.cleanup()
    progress.pop()