Ejemplo n.º 1
0
def autotuneCutoffBinarySearch(tx, tunable, n, min=0, max=-1):
  progress.push()
  progress.status("* optimize " + nameof(tx) + " tunable " + nameof(tunable))
  val, impact = optimizeParam(tx, n, nameof(tunable), min, max, best=(-1, goottimelimit()))
  print "* optimize " + nameof(tx) + " tunable " + nameof(tunable) + " = %d "%val + "(impact=%.2f)"%impact
  setConfigVal(tunable, val)
  progress.pop()
Ejemplo n.º 2
0
def determineInputSizes():
    progress.status("finding reasonable input size for training... (%d sec) " %
                    INFERINPUTSIZES_SEC)
    options.n = pbutil.inferGoodInputSizes(pbutil.benchmarkToBin(app),
                                           [inputSizeTarget],
                                           INFERINPUTSIZES_SEC)[0]
    print "* finding reasonable input size for training... %d" % options.n
Ejemplo n.º 3
0
def autotuneAlgchoice(tx, site, ctx, n, cutoffs):
    progress.push()
    cmd = mkcmd([
        "--transform=" + nameof(ctx), "--autotune",
        "--autotune-transform=" + nameof(tx),
        "--autotune-site=%d" % site,
        "--max=%d" % n,
        "--max-sec=%d" % goodtimelimit()
    ])
    for x in cutoffs:
        cmd.append("--autotune-tunable=" + nameof(x))
    if options.debug or options.justprint:
        print ' '.join(cmd)
    if options.justprint:
        progress.pop()
        return True
    runsCur = 1
    runsLast = 1
    inputCur = 1
    #progress formula assumes linear runtime of program
    calcprog = lambda: 1.0 - (math.log(inputCur, 2) + min(
        1.0, runsCur / float(runsLast))) / (math.log(n, 2) + 1)
    p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=substderr)
    pfx = "tuning " + nameof(tx) + ":%d - " % site
    if site == -1:
        pfx = "tuning %d cutoffs in %s - " % (len(cutoffs), nameof(tx))
    str = ""
    progress.status(pfx)
    while True:
        line = p.stdout.readline()
        if line == "":
            break
        #BEGIN ITERATION .... / 4096
        m = iterRe.match(line)
        if m:
            inputCur = int(m.group(1))
            runsLast = runsCur
            runsCur = 0
            progress.remaining(calcprog())
        #SLOT[0] KEEP .... = 1.25
        m = slotRe.match(line)
        if m:
            if m.group(1) == "0":
                str = m.group(3)
                progress.status(pfx + str)
        #  * TRY ...
        m = runRe.match(line)
        if m:
            runsCur += 1
            progress.remaining(calcprog())
    progress.pop()
    if p.wait() == 0:
        print "* " + pfx + str
        return True
    else:
        print 'FAILURE OF TUNING STEP:', ' '.join(cmd)
        return False
Ejemplo n.º 4
0
def optimizeParam(ctx,
                  n,
                  param,
                  start=0,
                  stop=-1,
                  branchfactor=7,
                  best=(-1, maxint),
                  worst=(-1, -maxint)):
    def timeat(x, thresh):
        old = getConfigVal(param)
        setConfigVal(param, x)
        t = timingRun(ctx, n, thresh)
        setConfigVal(param, old)
        return t

    if stop < 0:
        stop = n
    step = (stop - start) / float(branchfactor - 1)
    progress.status(
        "optimizing %s in %s, searching [%d,%d], impact=%.2f" %
        (param, nameof(ctx), start, stop, max(0,
                                              (worst[1] - best[1]) / best[1])))
    if step >= 1:
        xs = map(lambda i: start + int(round(step * i)), xrange(branchfactor))
        ys = []
        for i in xrange(len(xs)):
            progress.remaining(
                math.log(stop - start, branchfactor / 2.0) * branchfactor - i)
            x = xs[i]
            if x == best[0]:
                y = best[1]  # cached value
            else:
                try:
                    y = timeat(x, best[1] + 1)
                except pbutil.TimingRunTimeout, e:
                    y = maxint
            if y < best[1]:
                best = (x, y)
            ys.append(y)
        minTime, minX = reduce(min,
                               map(lambda i: (ys[i], xs[i]), xrange(len(xs))))
        maxTime, maxX = reduce(max,
                               map(lambda i: (ys[i], xs[i]), xrange(len(xs))))
        improvement = (maxTime - minTime) / maxTime
        newStart = max(int(round(minX - step)), start)
        newStop = min(int(round(minX + step)), stop)
        best = (minX, minTime)
        if worst[1] < maxTime:
            worst = (maxX, maxTime)
        #print minX, start, stop, improvement
        if improvement > 0.05:
            return optimizeParam(ctx, n, param, newStart, newStop,
                                 branchfactor, best, worst)
Ejemplo n.º 5
0
def runTimingTest(tx):
  progress.push()
  progress.remaining(1)
  progress.status("running timing test")
  t=timingRun(tx, options.n)
  progress.remaining(0)
  if len(results)>0:
    speedup=results[-1]/t
    print "* timing test... %.4f (%.2fx speedup)"%(t, speedup)
  else:
    print "* initial timing test... %.4lf s"%t
  results.append(t)
  progress.pop()
Ejemplo n.º 6
0
def runTimingTest(tx):
    progress.push()
    progress.remaining(1)
    progress.status("running timing test")
    t = timingRun(tx, options.n)
    progress.remaining(0)
    if len(results) > 0:
        speedup = results[-1] / t
        print "* timing test... %.4f (%.2fx speedup)" % (t, speedup)
    else:
        print "* initial timing test... %.4lf s" % t
    results.append(t)
    progress.pop()
Ejemplo n.º 7
0
def autotuneCutoffBinarySearch(tx, tunable, n, min=0, max=-1):
    progress.push()
    progress.status("* optimize " + nameof(tx) + " tunable " + nameof(tunable))
    val, impact = optimizeParam(tx,
                                n,
                                nameof(tunable),
                                min,
                                max,
                                best=(-1, goottimelimit()))
    print "* optimize " + nameof(tx) + " tunable " + nameof(
        tunable) + " = %d " % val + "(impact=%.2f)" % impact
    setConfigVal(tunable, val)
    progress.pop()
Ejemplo n.º 8
0
def autotuneAlgchoice(tx, site, ctx, n, cutoffs):
  progress.push()
  cmd=mkcmd(["--transform="+nameof(ctx), "--autotune", "--autotune-transform="+nameof(tx),  "--autotune-site=%d"%site,
             "--max=%d"%n, "--max-sec=%d"%goodtimelimit()])
  for x in cutoffs:
    cmd.append("--autotune-tunable="+nameof(x))
  if options.debug or options.justprint:
    print ' '.join(cmd)
  if options.justprint:
    progress.pop()
    return True
  runsCur=1
  runsLast=1
  inputCur=1
  #progress formula assumes linear runtime of program
  calcprog=lambda: 1.0 - (math.log(inputCur,2) + min(1.0,runsCur/float(runsLast)))/(math.log(n,2)+1)
  p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=substderr)
  pfx="tuning "+nameof(tx)+":%d - "%site
  if site == -1:
    pfx="tuning %d cutoffs in %s - " % (len(cutoffs), nameof(tx))
  str=""
  progress.status(pfx)
  while True:
    line=p.stdout.readline()
    if line == "":
      break
    #BEGIN ITERATION .... / 4096
    m=iterRe.match(line)
    if m:
      inputCur=int(m.group(1))
      runsLast=runsCur
      runsCur=0
      progress.remaining(calcprog())
    #SLOT[0] KEEP .... = 1.25
    m=slotRe.match(line)
    if m:
      if m.group(1)=="0":
        str=m.group(3)
        progress.status(pfx+str)
    #  * TRY ... 
    m=runRe.match(line)
    if m:
      runsCur+=1
      progress.remaining(calcprog())
  progress.pop()
  if p.wait()==0:
    print "* "+pfx+str
    return True
  else:
    print 'FAILURE OF TUNING STEP:', ' '.join(cmd)
    return False
Ejemplo n.º 9
0
def optimizeParam(ctx, n, param, start=0, stop=-1, branchfactor=7, best=(-1, maxint), worst=(-1, -maxint)):
  def timeat(x, thresh):
    old=getConfigVal(param)
    setConfigVal(param, x)
    t=timingRun(ctx, n, thresh)
    setConfigVal(param, old)
    return t
  if stop<0:
    stop=n
  step=(stop-start)/float(branchfactor-1)
  progress.status("optimizing %s in %s, searching [%d,%d], impact=%.2f" %(param,nameof(ctx),start,stop,max(0,(worst[1]-best[1])/best[1])))
  if step>=1:
    xs=map(lambda i: start+int(round(step*i)), xrange(branchfactor))
    ys=[]
    for i in xrange(len(xs)):
      progress.remaining(math.log(stop-start, branchfactor/2.0)*branchfactor - i)
      x=xs[i]
      if x==best[0]:
        y=best[1] # cached value
      else:
        try:
          y=timeat(x, best[1]+1)
        except pbutil.TimingRunTimeout, e:
          y=maxint
      if y<best[1]:
        best=(x,y)
      ys.append(y)
    minTime, minX = reduce(min, map(lambda i: (ys[i], xs[i]), xrange(len(xs))))
    maxTime, maxX = reduce(max, map(lambda i: (ys[i], xs[i]), xrange(len(xs))))
    improvement=(maxTime-minTime)/maxTime
    newStart = max(int(round(minX-step)), start)
    newStop = min(int(round(minX+step)), stop)
    best=(minX, minTime)
    if worst[1]<maxTime:
      worst=(maxX, maxTime)
    #print minX, start, stop, improvement
    if improvement > 0.05:
      return optimizeParam(ctx, n, param, newStart, newStop, branchfactor, best, worst)
Ejemplo n.º 10
0
def main():
  warnings.simplefilter('ignore', tunerwarnings.NewProgramCrash)
  warnings.simplefilter('ignore', tunerwarnings.TargetNotMet)
  warnings.simplefilter('ignore', tunerwarnings.NanAccuracy)

  progress.push()
  progress.status("compiling benchmarks")

  pbutil.chdirToPetabricksRoot()
  pbutil.compilePetabricks();

  r, lines = pbutil.loadAndCompileBenchmarks("./scripts/pbbenchmark.tests")

  if filter(lambda x: x.rv!=0, r):
    print "compile failed"
    sys.exit(1)

  print 
  print "All scores are relative performance to a baseline system."
  print "Higher is better."
  print

  baselines = dict()
  for line in csv.reader(open("./testdata/configs/baselines.csv")):
    if len(line)>=3:
      baselines[line[0]] = line[1:]

  benchmarks=[]
  for benchmark, cfg, n, accTarg in lines:
    try:
      baseline = baselines[benchmark]
    except KeyError:
      baseline = (1.0, 1.0)
    benchmarks.append(Benchmark(benchmark, cfg, n, accTarg, baseline[0], baseline[1]))


  print LONGBAR
  print "Fixed (no autotuning) scores:"
  print SHORTBAR
  progress.remainingTicks(len(benchmarks)+3)
  progress.tick()
  for b in benchmarks:
    progress.status("running fixed "+fmtCfg(b.cfg))
    b.runFixed()
    b.printFixed()
  progress.tick()
  score_fixed = geomean(map(Benchmark.scoreFixed, benchmarks))

  print SHORTBAR
  print "Fixed Score (pbbenchmark v%s): %.2f" % (VERSION, geomean(map(Benchmark.scoreFixed, benchmarks)))
  print LONGBAR
  print



  print LONGBAR
  print "Tuned scores:"
  print SHORTBAR
  for b in benchmarks:
    progress.status("running tuned "+fmtCfg(b.cfg))
    progress.status("autotuning")
    b.autotune()
    b.runTuned()
    b.printTuned()
    progress.tick()
  
  score_tuned = geomean(map(Benchmark.scoreTuned, benchmarks))
  score_training_time = geomean(map(Benchmark.scoreTrainingTime, benchmarks))

  print SHORTBAR
  print "Tuned Score (pbbenchmark v%s): %.2f" % (VERSION, score_tuned)
  print "Training Time Score (pbbenchmark v%s): %.2f" % (VERSION, score_training_time)
  print LONGBAR
  print

  if DEBUG:
    print LONGBAR
    print "Debug:"
    print SHORTBAR
    for b in benchmarks:
      b.printDebug()
    print LONGBAR
    print

  fd = open("./testdata/configs/baselines.csv.latest", "w")
  for b in benchmarks:
    print >>fd, "%s, %f, %f" % (b.benchmark, b.tuned_perf['average'], b.tuning_time)
  fd.close()

  if not os.path.isdir(LOGDIR):
    os.mkdir(LOGDIR)
  
  for b in benchmarks:
    writelog(expandLog(b.cfg), b.logEntry())
    
  writelog(expandLog('scores.log'), {
      'version'             : VERSION,
      'score_fixed'         : score_fixed,
      'score_tuned'         : score_tuned,
      'score_training_time' : score_training_time,
      'hostname'            : socket.gethostname(),
      'timestamp'           : TIMESTAMP,
    })
  
  progress.tick()
  progress.status("done")
  progress.pop()
Ejemplo n.º 11
0
      s += "%d failed, "%failed
    s += "%d running, "%len(jobs_running)
    s += "%d pending"%len(jobs_pending)
    return s
  def updatestatus(fast=False):
    progress.remaining(2*len(jobs_pending)+len(jobs_running))
    if not fast:
      for j in jobs_done[maxprinted[0]:]:
        if j.id==maxprinted[0]:
          print j.getmsg()
          maxprinted[0]+=1
        else:
          break

  progress.push()
  progress.status(mkstatus)
  updatestatus()

  try:
    while len(jobs_pending)>0 or len(jobs_running)>0:
      #spawn new jobs
      while len(jobs_pending)>0 and len(jobs_running)<NCPU:
        jobs_running.append(jobs_pending.pop(0).forkrun())
      updatestatus()
        
      #wait for an event
      rj, wj, xj = select.select(jobs_running, [], jobs_running)

      #handle pending data
      for j in rj:
        j.handleevent()
Ejemplo n.º 12
0
def main():
    parser = ArgumentParser()
    parser.add_argument('-a', '--adjacencies', help='Adjacencies derived from traceroutes')
    parser.add_argument('-b', '--ip2as', help='BGP prefixes')
    parser.add_argument('-c', '--addresses', help='List of addresses')
    parser.add_argument('-f', '--factor', type=float, default=0, help='Factor used in the paper')
    parser.add_argument('-i', '--interfaces', dest='interfaces', help='Interface information')
    parser.add_argument('-o', '--as2org', help='AS2ORG mappings')
    parser.add_argument('-v', dest='verbose', action='count', default=0, help='Increase verbosity for each v')
    parser.add_argument('-w', '--output', type=FileType('w'), default='-', help='Output filename')
    parser.add_argument('--addresses-exit', dest='addresses_exit', type=FileType('w'), help='Extract addresses from traces and exit.')
    parser.add_argument('--potaroo', action='store_true', help='Include AS identifiers and names from http://bgp.potaroo.net/cidr/autnums.html')
    parser.add_argument('--trace-exit', type=FileType('w'), help='Extract adjacencies and addresses from the traceroutes and exit')
    providers_group = parser.add_mutually_exclusive_group()
    providers_group.add_argument('-r', '--rel-graph', help='CAIDA relationship graph')
    providers_group.add_argument('-p', '--asn-providers', help='List of ISP ASes')
    providers_group.add_argument('-q', '--org-providers', help='List of ISP ORGs')
    parser.add_argument('-I', '--iterations', type=int, default=100)
    args = parser.parse_args()

    log.setLevel(max((3 - args.verbose) * 10, 10))

    ip2as = RoutingTable.ip2as(args.ip2as)
    as2org = AS2Org(args.as2org, include_potaroo=False)

    adjacencies = read_adjacencies(args.adjacencies)
    neighbors = defaultdict(list)
    for x, y in adjacencies:
        neighbors[(x, True)].append(y)
        neighbors[(y, False)].append(x)
    status('Extracting addresses from adjacencies')
    unique_interfaces = {u for u, _ in adjacencies} | {v for _, v in adjacencies}
    finish_status('Found {:,d}'.format(len(unique_interfaces)))
    status('Converting addresses to ipnums')
    addresses = {struct.unpack("!L", socket.inet_aton(addr.strip()))[0] for addr in unique_interfaces}
    finish_status()
    log.info('Mapping IP addresses to ASes.')
    asns = {}
    for address in unique_interfaces:
        asn = ip2as[address]
        if asn != -2:
            asns[address] = asn
    if as2org:
        log.info('Mapping ASes to Orgs.')
        orgs = {address: as2org[asn] for address, asn in asns.items()}
    else:
        orgs = asns
    log.info('Determining other sides for each address (assuming point-to-point).')
    othersides = {address: determine_otherside(address, addresses) for address in asns}
    log.info('Creating interface halves.')
    halves_dict = {
        (address, direction): InterfaceHalf(address, asns[address], orgs[address], direction, othersides[address])
        for (address, direction) in neighbors if address in asns
        }
    for (address, direction), half in halves_dict.items():
        half.set_otherhalf(halves_dict.get((address, not direction)))
        half.set_otherside(halves_dict.get((half.otherside_address, not direction)))
        half.set_neighbors([halves_dict[(neighbor, not direction)] for neighbor in neighbors[(address, direction)] if
                            neighbor in asns])
    allhalves = list(halves_dict.values())
    if args.asn_providers:
        with File2(args.providers) as f:
            providers = {int(asn.strip()) for asn in f}
    elif args.org_providers:
        with File2(args.providers) as f:
            providers = {asn.strip() for asn in f}
    elif args.rel_graph:
        rels = pd.read_csv(args.rel_graph, sep='|', comment='#', names=['AS1', 'AS2', 'Rel'], usecols=[0, 1, 2])
        providers = set(rels[rels.Rel == -1].AS1.unique())
    else:
        providers = None
    updates = algorithm(allhalves, factor=args.factor, providers=providers, iterations=args.iterations)
    updates.write(args.output)
Ejemplo n.º 13
0
    def add(self, iter, pars, trn_loss, val_loss, tst_loss):
        """
        Adds an iteration.
        :param iter: iteration number
        :param pars: parameters used in this iteration
        :param trn_loss: training set loss
        :param val_loss: validation set loss
        :param tst_loss: test set loss
        """
        iter = int(iter)
        trn_loss = float(trn_loss)
        val_loss = float(val_loss)
        tst_loss = float(tst_loss)

        # keep track of best results so far
        if val_loss < self.best_val_loss - self.min_improvement:
            self.best_iter = iter
            self.best_val_loss = val_loss
            self.best_tst_loss = tst_loss
            if isinstance(pars, gp.garray):
                self.best_pars = gp.garray(pars, copy=True)
            else:
                self.best_pars = np.copy(pars)
            self.last_val_improvement = iter

        # termination criteria
        mmi = None
        if self.max_missed_val_improvements is not None:
            if self.iteration_gain != 0:
                mmi = max(self.max_missed_val_improvements, (self.iteration_gain - 1) * self.last_val_improvement)
            else:
                mmi = self.max_missed_val_improvements
            if iter - self.last_val_improvement > mmi:
                self.termination_reason = 'no_improvement'
                self.should_terminate = True
        if self.desired_loss is not None and val_loss <= self.desired_loss:
            self.termination_reason = 'desired_loss_reached'
            self.should_terminate = True
        if self.min_iters is not None and iter < self.min_iters:
            self.termination_reason = ''
            self.should_terminate = False
        if self.max_iters is not None and iter >= self.max_iters:
            self.termination_reason = 'max_iters_reached'
            self.should_terminate = True
        if not (np.isfinite(trn_loss) and np.isfinite(val_loss)):
            self.termination_reason = 'nan_or_inf_loss'
            self.should_terminate = True

        # store current losses
        self.history = np.hstack((self.history, [[iter],
                                                 [trn_loss],
                                                 [val_loss],
                                                 [tst_loss]]))

        # display progress
        if self.show_progress:
            caption = "training: %9.5f  validation: %9.5f (best: %9.5f)  " \
                      "test: %9.5f   " % \
                      (trn_loss, val_loss, self.best_val_loss, tst_loss)
            if mmi is not None:
                caption += "    (patience for %d non-improving iterations) " % \
                           (mmi - (iter - self.last_val_improvement))
            progress.status(iter, caption=caption)
        if time() > self._last_auto_plot_time + self.auto_plot_interval:
            try:
                plt.figure()
                self.plot()
                plt.title("training in progress")
                plt.savefig(join(self.state_dir, "loss.pdf"))
                plt.close()
                self._last_auto_plot_time = time()
            except:
                pass

        # termination by user
        key = get_key()
        if key == "q":
            print
            print "Termination by user."
            self.termination_reason = 'user'
            self.should_terminate = True
        elif key == "d":
            print
            print "Learning rate decrease by user."
            self.termination_reason = 'user_learning_rate_decrease'
            self.should_terminate = True
Ejemplo n.º 14
0
def autotuneInner(benchmark):
  progress.push()
  config.benchmark = benchmark
  candidate, tester = init(benchmark)
  try:
    pop = Population(candidate, tester, None)
    
    if not pop.isVariableAccuracy() and config.accuracy_target:
      logging.info("clearing accuracy_target")
      config.accuracy_target = None

    stats = storagedirs.openCsvStats("roundstats", 
        ("round",
         "input_size",
         "cumulative_sec",
         "incremental_sec",
         "testing_sec",
         "inputgen_sec")+pop.statsHeader())
    timers.total.start()
    config.end_time = time.time() + config.max_time
    try:
      progress.remaining(config.max_input_size*(1+config.final_rounds))
      while pop.inputSize() < config.max_input_size:
        progress.status("autotuning %s: input %d of %d" % (config.benchmark, pop.inputSize(), config.max_input_size))
        pop.generation()
        stats.writerow((pop.roundNumber,
                        pop.inputSize(),
                        timers.total.total(),
                        timers.total.lap(),
                        timers.testing.lap(),
                        timers.inputgen.lap())+pop.stats())
        pop.nextInputSize()
        progress.remaining(config.max_input_size - pop.inputSize() + config.max_input_size*config.final_rounds)
      for z in xrange(config.final_rounds):
        pop.generation()
        stats.writerow((pop.roundNumber,
                        pop.inputSize(),
                        timers.total.total(),
                        timers.total.lap(),
                        timers.testing.lap(),
                        timers.inputgen.lap())+pop.stats())
        progress.remaining((config.final_rounds - z)*config.max_input_size)
    except TrainingTimeout:
      pass
    timers.total.stop()

    #check to make sure we did something:
    if pop.firstRound:
      warnings.warn(tunerwarnings.AlwaysCrashes())
      
    logging.info("TODO: using acc target: "+str(config.accuracy_target))
    return pop.best
  finally:
    if pop.best and config.output_cfg:
      print pop.best.cfgfile(),"=>" , config.output_cfg
      shutil.copyfile(pop.best.cfgfile(), config.output_cfg)
    at = storagedirs.getactivetimers()
    if len(at):
      storagedirs.openCsvStats("timers", at.keys()).writerow(at.values())
    if tester:
      tester.cleanup()
    progress.pop()
Ejemplo n.º 15
0
def main(argv):
    t1 = time.time()

    global app
    global cfg
    global ignore_list
    global defaultArgs
    global substderr
    global options

    config_tool_path = os.path.split(argv[0])[0] + "/configtool.py"
    fast = False

    parser = optparse.OptionParser(usage="usage: %prog [options] BENCHMARK")
    parser.add_option("--min", type="int", dest="min", default=1)
    parser.add_option("-n",
                      "--random",
                      "--max",
                      type="int",
                      dest="n",
                      default=-1)
    parser.add_option("--offset", type="int", dest="offset", default=0)
    parser.add_option("--max-sec", type="float", dest="maxsec", default=0)
    parser.add_option("-d",
                      "--debug",
                      action="store_true",
                      dest="debug",
                      default=False)
    parser.add_option("-f",
                      "--fast",
                      action="store_true",
                      dest="fast",
                      default=False)
    parser.add_option("--threads",
                      type="int",
                      dest="threads",
                      default=pbutil.cpuCount())
    parser.add_option("-c", "--config", dest="config", default=None)
    parser.add_option("--noisolation",
                      action="store_true",
                      dest="noisolation",
                      default=False)
    parser.add_option("--print",
                      action="store_true",
                      dest="justprint",
                      default=False)
    parser.add_option("--time",
                      action="store_true",
                      dest="time",
                      default=False)
    parser.add_option("--acctrials",
                      type="int",
                      dest="acctrials",
                      default=None)
    parser.add_option("--accimprovetries",
                      type="int",
                      dest="accimprovetries",
                      default=None)
    parser.add_option("--trials", type="int", dest="trials", default=None)
    parser.add_option("--trials-sec",
                      type="float",
                      dest="trialssec",
                      default=None)
    parser.add_option("--trials-max",
                      type="int",
                      dest="trialsmax",
                      default=None)
    parser.add_option("--transform", dest="transform", default=None)
    options, args = parser.parse_args()

    if len(args) != 1:
        parser.error("expected benchmark name as arg")

    cfg = options.config
    app = args[0]

    pbutil.chdirToPetabricksRoot()
    pbutil.compilePetabricks()
    app = pbutil.normalizeBenchmarkName(app)
    pbutil.compileBenchmarks([app])

    if options.debug:
        substderr = sys.__stderr__

    if cfg is None:
        cfg = pbutil.benchmarkToCfg(app)

    defaultArgs = [
        '--config=' + cfg,
        '--threads=%d' % options.threads,
        '--offset=%d' % options.offset,
        '--min=%d' % options.min
    ]

    if options.noisolation:
        defaultArgs.append("--noisolation")

    if options.acctrials is not None:
        defaultArgs.append("--acctrials=%d" % options.acctrials)
    if options.trials is not None:
        defaultArgs.append("--trials=%d" % options.trials)
    if options.trialssec is not None:
        defaultArgs.append("--trials-sec=%f" % options.trialssec)
    if options.trialsmax is not None:
        defaultArgs.append("--trials-max=%d" % options.trialsmax)
    if options.accimprovetries is not None:
        defaultArgs.append("--accimprovetries=%d" % options.accimprovetries)

    getIgnoreList()

    try:
        infoxml = parse(pbutil.benchmarkToInfo(app))
    except:
        print "Cannot parse:", pbutil.benchmarkToInfo(app)
        sys.exit(-1)

#print "Reseting config entries"
#reset()

#build index of transforms
    for t in infoxml.getElementsByTagName("transform"):
        transforms[nameof(t)] = t
        if t.getAttribute("templateChoice") == "0":
            transforms[t.getAttribute("templateName")] = t

    if options.transform is None:
        maintx = transforms[mainname()]
    else:
        maintx = transforms[options.transform]

    print "Call tree:"
    walkCallTree(maintx, fnup=printTx)
    print
    print "Autotuning:"

    progress.status("building work queue")

    if options.n <= 0:
        tasks.append(TuneTask("determineInputSizes", determineInputSizes))

    if options.time:
        tasks.append(TuneTask("runTimingTest", lambda: runTimingTest(maintx)))

    #build list of tasks
    if not options.fast:
        walkCallTree(
            maintx, lambda tx, depth, loops: enqueueAutotuneCmds(
                tx, maintx, 1, depth, loops))
    walkCallTree(
        maintx, lambda tx, depth, loops: enqueueAutotuneCmds(
            tx, maintx, 2, depth, loops))

    if options.time:
        tasks.append(TuneTask("runTimingTest", lambda: runTimingTest(maintx)))

    progress.status("autotuning")

    while len(tasks) > 0:
        w1 = remainingTaskWeight()
        task = tasks.pop(0)
        w2 = remainingTaskWeight()
        progress.remaining(w1, w2)
        task.run()
    progress.clear()

    t2 = time.time()
    sec = t2 - t1

    print "autotuning took %.2f sec" % (t2 - t1)
    for k, v in taskStats.items():
        print "  %.2f sec in %s" % (v.sec, k)
        sec -= v.sec
    print "  %.2f sec in unknown" % sec

    names = taskStats.keys()
    weights = map(lambda x: x.sec / float(max(x.count, 1)), taskStats.values())
    scale = len(weights) / sum(weights)
    print "Suggested weights:"
    print "taskStats = {" + ", ".join(
        map(lambda i: "'%s':TaskStats(%.2f)" %
            (names[i], scale * weights[i]), xrange(len(names)))) + "}"
Ejemplo n.º 16
0
        s += "%d running, " % len(jobs_running)
        s += "%d pending" % len(jobs_pending)
        return s

    def updatestatus(fast=False):
        progress.remaining(2 * len(jobs_pending) + len(jobs_running))
        if not fast:
            for j in jobs_done[maxprinted[0]:]:
                if j.id == maxprinted[0]:
                    print j.getmsg()
                    maxprinted[0] += 1
                else:
                    break

    progress.push()
    progress.status(mkstatus)
    updatestatus()

    try:
        while len(jobs_pending) > 0 or len(jobs_running) > 0:
            #spawn new jobs
            while len(jobs_pending) > 0 and len(jobs_running) < nParallelJobs:
                jobs_running.append(jobs_pending.pop(0).forkrun())
            updatestatus()

            #wait for an event
            rj, wj, xj = select.select(jobs_running, [], jobs_running)

            #handle pending data
            for j in rj:
                j.handleevent()
Ejemplo n.º 17
0
def main():
  warnings.simplefilter('ignore', tunerwarnings.NewProgramCrash)
  warnings.simplefilter('ignore', tunerwarnings.SmallInputProgramCrash)
  warnings.simplefilter('ignore', tunerwarnings.TargetNotMet)
  warnings.simplefilter('ignore', tunerwarnings.NanAccuracy)

  #Parse input options
  from optparse import OptionParser
  parser = OptionParser(usage="usage: pbbenchmark [options]")
  parser.add_option("--learning", action="store_true", dest="learning", default=False, help="enable heuristics learning")
  parser.add_option("--heuristics", type="string", help="name of the file containing the set of heuristics to use. Automatically enables --learning", default=None)

  (options, args) = parser.parse_args()

  if options.heuristics:
    options.learning = True

  if options.learning:
    print "Learning of heuristics is ACTIVE"
    if options.heuristics:
      print "Using heuristics file: "+ str(options.heuristics)
    else:
      print "Using only heuristics in the database"

  progress.push()
  progress.status("compiling benchmarks")

  pbutil.chdirToPetabricksRoot()
  pbutil.compilePetabricks()

  global REV
  try:
    REV=pbutil.getRevision()
  except:
    pass

  r, lines = pbutil.loadAndCompileBenchmarks("./scripts/pbbenchmark.tests", searchterms=sys.argv[1:], learning=options.learning, heuristicSetFileName=options.heuristics)

  if filter(lambda x: x.rv!=0, r):
    print "compile failed"
    sys.exit(1)

  print 
  print "All scores are relative performance to a baseline system."
  print "Higher is better."
  print

  baselines = dict()
  for line in csv.reader(open("./testdata/configs/baselines.csv")):
    if len(line)>=3:
      baselines[line[0]] = line[1:]

  benchmarks=[]
  for benchmark, cfg, n, accTarg in lines:
    try:
      baseline = baselines[benchmark]
    except KeyError:
      baseline = (1.0, 1.0)
    benchmarks.append(Benchmark(benchmark, cfg, n, accTarg, baseline[0], baseline[1]))

    progress.remainingTicks(len(benchmarks))

   #print LONGBAR
   #print "Fixed (no autotuning) scores:"
   #print SHORTBAR
   #for b in benchmarks:
   #  progress.status("running fixed "+fmtCfg(b.cfg))
   #  b.runFixed()
   #  b.printFixed()
   #score_fixed = geomean(map(Benchmark.scoreFixed, benchmarks))

   #print SHORTBAR
   #print "Fixed Score (pbbenchmark v%s): %.2f" % (VERSION, geomean(map(Benchmark.scoreFixed, benchmarks)))
   #print LONGBAR
   #print



  print LONGBAR
  print "Tuned scores:"
  print SHORTBAR
  for b in benchmarks:
    progress.status("running tuned "+fmtCfg(b.cfg))
    progress.status("autotuning")
    b.autotune()
    b.runTuned()
    b.printTuned()
    progress.tick()
  
  score_tuned = geomean(map(Benchmark.scoreTuned, benchmarks))
  score_training_time = geomean(map(Benchmark.scoreTrainingTime, benchmarks))

  print SHORTBAR
  print "Tuned Score (pbbenchmark v%s): %.2f" % (VERSION, score_tuned)
  print "Training Time Score (pbbenchmark v%s): %.2f" % (VERSION, score_training_time)
  print LONGBAR
  print

  if DEBUG:
    print LONGBAR
    print "Debug:"
    print SHORTBAR
    for b in benchmarks:
      b.printDebug()
    print LONGBAR
    print

  fd = open("./testdata/configs/baselines.csv.latest", "w")
  for b in benchmarks:
    print >>fd, "%s, %f, %f" % (b.benchmark, b.tuned_perf['average'], b.tuning_time)
  fd.close()

  if not os.path.isdir(LOGDIR):
    os.mkdir(LOGDIR)
  
  for b in benchmarks:
    writelog(expandLog(b.cfg), b.logEntry())
    
  writelog(expandLog('scores.log'), {
      'version'             : VERSION,
      'score_fixed'         : -1,#score_fixed,
      'score_tuned'         : score_tuned,
      'score_training_time' : score_training_time,
      'hostname'            : socket.gethostname(),
      'timestamp'           : TIMESTAMP,
      'revision'            : REV,
    })
  
  progress.status("done")
  progress.pop()
Ejemplo n.º 18
0
def main():
    warnings.simplefilter('ignore', tunerwarnings.NewProgramCrash)
    warnings.simplefilter('ignore', tunerwarnings.SmallInputProgramCrash)
    warnings.simplefilter('ignore', tunerwarnings.TargetNotMet)
    warnings.simplefilter('ignore', tunerwarnings.NanAccuracy)

    #Parse input options
    from optparse import OptionParser
    parser = OptionParser(usage="usage: pbbenchmark [options]")
    parser.add_option("--learning",
                      action="store_true",
                      dest="learning",
                      default=False,
                      help="enable heuristics learning")
    parser.add_option(
        "--heuristics",
        type="string",
        help=
        "name of the file containing the set of heuristics to use. Automatically enables --learning",
        default=None)

    (options, args) = parser.parse_args()

    if options.heuristics:
        options.learning = True

    if options.learning:
        print "Learning of heuristics is ACTIVE"
        if options.heuristics:
            print "Using heuristics file: " + str(options.heuristics)
        else:
            print "Using only heuristics in the database"

    progress.push()
    progress.status("compiling benchmarks")

    pbutil.chdirToPetabricksRoot()
    pbutil.compilePetabricks()

    global REV
    try:
        REV = pbutil.getRevision()
    except:
        pass

    r, lines = pbutil.loadAndCompileBenchmarks(
        "./scripts/pbbenchmark.tests",
        searchterms=sys.argv[1:],
        learning=options.learning,
        heuristicSetFileName=options.heuristics)

    if filter(lambda x: x.rv != 0, r):
        print "compile failed"
        sys.exit(1)

    print
    print "All scores are relative performance to a baseline system."
    print "Higher is better."
    print

    baselines = dict()
    for line in csv.reader(open("./testdata/configs/baselines.csv")):
        if len(line) >= 3:
            baselines[line[0]] = line[1:]

    benchmarks = []
    for benchmark, cfg, n, accTarg in lines:
        try:
            baseline = baselines[benchmark]
        except KeyError:
            baseline = (1.0, 1.0)
        benchmarks.append(
            Benchmark(benchmark, cfg, n, accTarg, baseline[0], baseline[1]))

        progress.remainingTicks(len(benchmarks))

    #print LONGBAR
    #print "Fixed (no autotuning) scores:"
    #print SHORTBAR
    #for b in benchmarks:
    #  progress.status("running fixed "+fmtCfg(b.cfg))
    #  b.runFixed()
    #  b.printFixed()
    #score_fixed = geomean(map(Benchmark.scoreFixed, benchmarks))

    #print SHORTBAR
    #print "Fixed Score (pbbenchmark v%s): %.2f" % (VERSION, geomean(map(Benchmark.scoreFixed, benchmarks)))
    #print LONGBAR
    #print

    print LONGBAR
    print "Tuned scores:"
    print SHORTBAR
    for b in benchmarks:
        progress.status("running tuned " + fmtCfg(b.cfg))
        progress.status("autotuning")
        b.autotune()
        b.runTuned()
        b.printTuned()
        progress.tick()

    score_tuned = geomean(map(Benchmark.scoreTuned, benchmarks))
    score_training_time = geomean(map(Benchmark.scoreTrainingTime, benchmarks))

    print SHORTBAR
    print "Tuned Score (pbbenchmark v%s): %.2f" % (VERSION, score_tuned)
    print "Training Time Score (pbbenchmark v%s): %.2f" % (VERSION,
                                                           score_training_time)
    print LONGBAR
    print

    if DEBUG:
        print LONGBAR
        print "Debug:"
        print SHORTBAR
        for b in benchmarks:
            b.printDebug()
        print LONGBAR
        print

    fd = open("./testdata/configs/baselines.csv.latest", "w")
    for b in benchmarks:
        print >> fd, "%s, %f, %f" % (b.benchmark, b.tuned_perf['average'],
                                     b.tuning_time)
    fd.close()

    if not os.path.isdir(LOGDIR):
        os.mkdir(LOGDIR)

    for b in benchmarks:
        writelog(expandLog(b.cfg), b.logEntry())

    writelog(
        expandLog('scores.log'),
        {
            'version': VERSION,
            'score_fixed': -1,  #score_fixed,
            'score_tuned': score_tuned,
            'score_training_time': score_training_time,
            'hostname': socket.gethostname(),
            'timestamp': TIMESTAMP,
            'revision': REV,
        })

    progress.status("done")
    progress.pop()
Ejemplo n.º 19
0
def determineInputSizes():
  progress.status("finding reasonable input size for training... (%d sec) " % INFERINPUTSIZES_SEC)
  options.n=pbutil.inferGoodInputSizes( pbutil.benchmarkToBin(app)
                                      , [inputSizeTarget]
                                      , INFERINPUTSIZES_SEC)[0]
  print "* finding reasonable input size for training... %d" % options.n 
Ejemplo n.º 20
0
     neighbors[(y, False)].append(x)
 if args.interfaces:
     df = pd.read_csv(args.interfaces, index_col='Address')
     asns, orgs, othersides = df['ASN', 'Org',
                                 'Otherside'].to_dict('records')
 else:
     addresses = {
         struct.unpack("!L", socket.inet_aton(addr.strip()))[0]
         for addr in addresses
     }
     ip2as = create_routing_table(args.bgp,
                                  args.ixp_prefixes,
                                  args.ixp_asns,
                                  bgp_compression=args.bgp_compression)
     as2org = AS2Org(args.as2org, include_potaroo=args.potaroo)
     status('Extracting addresses from adjacencies')
     unique_interfaces = {u
                          for u, _ in adjacencies
                          } | {v
                               for _, v in adjacencies}
     finish_status('Found {:,d}'.format(len(unique_interfaces)))
     log.info('Mapping IP addresses to ASes.')
     asns = {}
     for address in unique_interfaces:
         asn = ip2as[address]
         if asn != -1:
             asns[address] = asn
     if as2org:
         log.info('Mapping ASes to Orgs.')
         orgs = {
             address: as2org.get(asn, asn)
Ejemplo n.º 21
0
def main(argv):
  t1=time.time()

  global app
  global cfg 
  global ignore_list
  global defaultArgs
  global substderr
  global options 

  config_tool_path = os.path.split(argv[0])[0] + "/configtool.py"
  fast = False

  parser = optparse.OptionParser(usage="usage: %prog [options] BENCHMARK")
  parser.add_option("--min", type="int", dest="min", default=1)
  parser.add_option("-n", "--random", "--max", type="int", dest="n", default=-1)
  parser.add_option("--offset", type="int", dest="offset", default=0)
  parser.add_option("--max-sec", type="float", dest="maxsec", default=0)
  parser.add_option("-d", "--debug",  action="store_true", dest="debug", default=False)
  parser.add_option("-f", "--fast",  action="store_true", dest="fast", default=False)
  parser.add_option("--threads",      type="int", dest="threads", default=pbutil.cpuCount())
  parser.add_option("-c", "--config", dest="config", default=None)
  parser.add_option("--noisolation", action="store_true", dest="noisolation", default=False)
  parser.add_option("--print", action="store_true", dest="justprint", default=False)
  parser.add_option("--time", action="store_true", dest="time", default=False)
  parser.add_option("--acctrials", type="int", dest="acctrials", default=None)
  parser.add_option("--accimprovetries", type="int", dest="accimprovetries", default=None)
  parser.add_option("--trials", type="int", dest="trials", default=None)
  parser.add_option("--trials-sec", type="float", dest="trialssec", default=None)
  parser.add_option("--trials-max", type="int", dest="trialsmax", default=None)
  parser.add_option("--transform", dest="transform", default=None)
  options,args = parser.parse_args()

  if len(args) != 1:
    parser.error("expected benchmark name as arg")

  cfg=options.config
  app=args[0]

  pbutil.chdirToPetabricksRoot()
  pbutil.compilePetabricks()
  app = pbutil.normalizeBenchmarkName(app)
  pbutil.compileBenchmarks([app])
  
  if options.debug:
    substderr = sys.__stderr__

  if cfg is None:
    cfg = pbutil.benchmarkToCfg(app)

  defaultArgs = ['--config='+cfg, '--threads=%d'%options.threads, '--offset=%d'%options.offset, '--min=%d'%options.min]

  if options.noisolation:
    defaultArgs.append("--noisolation")

  if options.acctrials is not None:
    defaultArgs.append("--acctrials=%d"%options.acctrials)
  if options.trials is not None:
    defaultArgs.append("--trials=%d"%options.trials)
  if options.trialssec is not None:
    defaultArgs.append("--trials-sec=%f"%options.trialssec)
  if options.trialsmax is not None:
    defaultArgs.append("--trials-max=%d"%options.trialsmax)
  if options.accimprovetries is not None:
    defaultArgs.append("--accimprovetries=%d"%options.accimprovetries)

  getIgnoreList()

  try:
    infoxml = parse(pbutil.benchmarkToInfo(app))
  except:
    print "Cannot parse:", pbutil.benchmarkToInfo(app)
    sys.exit(-1)

 #print "Reseting config entries"
 #reset()

  #build index of transforms
  for t in infoxml.getElementsByTagName("transform"):
    transforms[nameof(t)]=t
    if t.getAttribute("templateChoice")=="0":
      transforms[t.getAttribute("templateName")] = t

  if options.transform is None:
    maintx = transforms[mainname()]
  else:
    maintx = transforms[options.transform]
  
  print "Call tree:"
  walkCallTree(maintx, fnup=printTx)
  print
  print "Autotuning:"

  progress.status("building work queue")
 
  if options.n <= 0:
    tasks.append(TuneTask("determineInputSizes", determineInputSizes))
    
  if options.time:
    tasks.append(TuneTask("runTimingTest", lambda:runTimingTest(maintx)))

  #build list of tasks
  if not options.fast:
    walkCallTree(maintx, lambda tx, depth, loops: enqueueAutotuneCmds(tx, maintx, 1, depth, loops))
  walkCallTree(maintx, lambda tx, depth, loops: enqueueAutotuneCmds(tx, maintx, 2, depth, loops))
  
  if options.time:
    tasks.append(TuneTask("runTimingTest", lambda:runTimingTest(maintx)))

  progress.status("autotuning")

  while len(tasks)>0:
    w1=remainingTaskWeight()
    task=tasks.pop(0)
    w2=remainingTaskWeight()
    progress.remaining(w1, w2)
    task.run()
  progress.clear()

  t2=time.time()
  sec=t2-t1

  

  print "autotuning took %.2f sec"%(t2-t1)
  for k,v in taskStats.items():
    print "  %.2f sec in %s"%(v.sec, k)
    sec -= v.sec
  print "  %.2f sec in unknown"%sec
  
  names=taskStats.keys()
  weights=map(lambda x: x.sec/float(max(x.count, 1)), taskStats.values())
  scale=len(weights)/sum(weights)
  print "Suggested weights:"
  print "taskStats = {" + ", ".join(map(lambda i: "'%s':TaskStats(%.2f)"%(names[i], scale*weights[i]), xrange(len(names)))) + "}"
Ejemplo n.º 22
0
    def add(self, iter, pars, trn_loss, val_loss, tst_loss):
        """
        Adds an iteration.
        :param iter: iteration number
        :param pars: parameters used in this iteration
        :param trn_loss: training set loss
        :param val_loss: validation set loss
        :param tst_loss: test set loss
        """
        iter = int(iter)
        trn_loss = float(trn_loss)
        val_loss = float(val_loss)
        tst_loss = float(tst_loss)

        # keep track of best results so far
        if val_loss < self.best_val_loss - self.min_improvement:
            self.best_iter = iter
            self.best_val_loss = val_loss
            self.best_tst_loss = tst_loss
            if isinstance(pars, gp.garray):
                self.best_pars = gp.garray(pars, copy=True)
            else:
                self.best_pars = np.copy(pars)
            self.last_val_improvement = iter

        # termination criteria
        mmi = None
        if self.max_missed_val_improvements is not None:
            if self.iteration_gain != 0:
                mmi = max(self.max_missed_val_improvements,
                          (self.iteration_gain - 1) *
                          self.last_val_improvement)
            else:
                mmi = self.max_missed_val_improvements
            if iter - self.last_val_improvement > mmi:
                self.termination_reason = 'no_improvement'
                self.should_terminate = True
        if self.desired_loss is not None and val_loss <= self.desired_loss:
            self.termination_reason = 'desired_loss_reached'
            self.should_terminate = True
        if self.min_iters is not None and iter < self.min_iters:
            self.termination_reason = ''
            self.should_terminate = False
        if self.max_iters is not None and iter >= self.max_iters:
            self.termination_reason = 'max_iters_reached'
            self.should_terminate = True
        if not (np.isfinite(trn_loss) and np.isfinite(val_loss)):
            self.termination_reason = 'nan_or_inf_loss'
            self.should_terminate = True

        # store current losses
        self.history = np.hstack((self.history, [[iter], [trn_loss],
                                                 [val_loss], [tst_loss]]))

        # display progress
        if self.show_progress:
            caption = "training: %9.5f  validation: %9.5f (best: %9.5f)  " \
                      "test: %9.5f   " % \
                      (trn_loss, val_loss, self.best_val_loss, tst_loss)
            if mmi is not None:
                caption += "    (patience for %d non-improving iterations) " % \
                           (mmi - (iter - self.last_val_improvement))
            progress.status(iter, caption=caption)
        if time() > self._last_auto_plot_time + self.auto_plot_interval:
            try:
                plt.figure()
                self.plot()
                plt.title("training in progress")
                plt.savefig(join(self.state_dir, "loss.pdf"))
                plt.close()
                self._last_auto_plot_time = time()
            except:
                pass

        # termination by user
        key = get_key()
        if key == "q":
            print
            print "Termination by user."
            self.termination_reason = 'user'
            self.should_terminate = True
        elif key == "d":
            print
            print "Learning rate decrease by user."
            self.termination_reason = 'user_learning_rate_decrease'
            self.should_terminate = True