Example #1
0
def main():
  warnings.simplefilter('ignore', tunerwarnings.NewProgramCrash)
  warnings.simplefilter('ignore', tunerwarnings.TargetNotMet)
  warnings.simplefilter('ignore', tunerwarnings.NanAccuracy)

  progress.push()
  progress.status("compiling benchmarks")

  pbutil.chdirToPetabricksRoot()
  pbutil.compilePetabricks();

  r, lines = pbutil.loadAndCompileBenchmarks("./scripts/pbbenchmark.tests")

  if filter(lambda x: x.rv!=0, r):
    print "compile failed"
    sys.exit(1)

  print 
  print "All scores are relative performance to a baseline system."
  print "Higher is better."
  print

  baselines = dict()
  for line in csv.reader(open("./testdata/configs/baselines.csv")):
    if len(line)>=3:
      baselines[line[0]] = line[1:]

  benchmarks=[]
  for benchmark, cfg, n, accTarg in lines:
    try:
      baseline = baselines[benchmark]
    except KeyError:
      baseline = (1.0, 1.0)
    benchmarks.append(Benchmark(benchmark, cfg, n, accTarg, baseline[0], baseline[1]))


  print LONGBAR
  print "Fixed (no autotuning) scores:"
  print SHORTBAR
  progress.remainingTicks(len(benchmarks)+3)
  progress.tick()
  for b in benchmarks:
    progress.status("running fixed "+fmtCfg(b.cfg))
    b.runFixed()
    b.printFixed()
  progress.tick()
  score_fixed = geomean(map(Benchmark.scoreFixed, benchmarks))

  print SHORTBAR
  print "Fixed Score (pbbenchmark v%s): %.2f" % (VERSION, geomean(map(Benchmark.scoreFixed, benchmarks)))
  print LONGBAR
  print



  print LONGBAR
  print "Tuned scores:"
  print SHORTBAR
  for b in benchmarks:
    progress.status("running tuned "+fmtCfg(b.cfg))
    progress.status("autotuning")
    b.autotune()
    b.runTuned()
    b.printTuned()
    progress.tick()
  
  score_tuned = geomean(map(Benchmark.scoreTuned, benchmarks))
  score_training_time = geomean(map(Benchmark.scoreTrainingTime, benchmarks))

  print SHORTBAR
  print "Tuned Score (pbbenchmark v%s): %.2f" % (VERSION, score_tuned)
  print "Training Time Score (pbbenchmark v%s): %.2f" % (VERSION, score_training_time)
  print LONGBAR
  print

  if DEBUG:
    print LONGBAR
    print "Debug:"
    print SHORTBAR
    for b in benchmarks:
      b.printDebug()
    print LONGBAR
    print

  fd = open("./testdata/configs/baselines.csv.latest", "w")
  for b in benchmarks:
    print >>fd, "%s, %f, %f" % (b.benchmark, b.tuned_perf['average'], b.tuning_time)
  fd.close()

  if not os.path.isdir(LOGDIR):
    os.mkdir(LOGDIR)
  
  for b in benchmarks:
    writelog(expandLog(b.cfg), b.logEntry())
    
  writelog(expandLog('scores.log'), {
      'version'             : VERSION,
      'score_fixed'         : score_fixed,
      'score_tuned'         : score_tuned,
      'score_training_time' : score_training_time,
      'hostname'            : socket.gethostname(),
      'timestamp'           : TIMESTAMP,
    })
  
  progress.tick()
  progress.status("done")
  progress.pop()
Example #2
0
from optparse import OptionParser
parser = OptionParser(usage="usage: smoketest.py [options]")
parser.add_option("--learning", action="store_true", dest="learning", default=False, help="enable heuristics learning")
parser.add_option("--heuristics",            type="string", help="name of the file containing the set of heuristics to use. Automatically enables --learning", default=None)

(options, args) = parser.parse_args()

if options.heuristics:
  options.learning = True

if options.learning:
  print "Learning of heuristics is ACTIVE"
  if options.heuristics:
    print "Using heuristics file: "+ str(options.heuristics)
  else:
    print "Using only heuristics in the database"

t1=time.time()
results,b=pbutil.loadAndCompileBenchmarks("./scripts/smoketest.tests", args, testBenchmark, postfn=checkBenchmark, learning=options.learning, heuristicSetFileName=options.heuristics, noLearningList=check_exclude)
t2=time.time()


passed=len(filter(lambda x: x.rv==0, results))
total=len(results)

print "%d of %d tests passed (took %.2fs)"%(passed,total,(t2-t1))

sys.exit(min(total-passed, 124))

Example #3
0
def main():
    warnings.simplefilter('ignore', tunerwarnings.NewProgramCrash)
    warnings.simplefilter('ignore', tunerwarnings.SmallInputProgramCrash)
    warnings.simplefilter('ignore', tunerwarnings.TargetNotMet)
    warnings.simplefilter('ignore', tunerwarnings.NanAccuracy)

    #Parse input options
    from optparse import OptionParser
    parser = OptionParser(usage="usage: pbbenchmark [options]")
    parser.add_option("--learning",
                      action="store_true",
                      dest="learning",
                      default=False,
                      help="enable heuristics learning")
    parser.add_option(
        "--heuristics",
        type="string",
        help=
        "name of the file containing the set of heuristics to use. Automatically enables --learning",
        default=None)

    (options, args) = parser.parse_args()

    if options.heuristics:
        options.learning = True

    if options.learning:
        print "Learning of heuristics is ACTIVE"
        if options.heuristics:
            print "Using heuristics file: " + str(options.heuristics)
        else:
            print "Using only heuristics in the database"

    progress.push()
    progress.status("compiling benchmarks")

    pbutil.chdirToPetabricksRoot()
    pbutil.compilePetabricks()

    global REV
    try:
        REV = pbutil.getRevision()
    except:
        pass

    r, lines = pbutil.loadAndCompileBenchmarks(
        "./scripts/pbbenchmark.tests",
        searchterms=sys.argv[1:],
        learning=options.learning,
        heuristicSetFileName=options.heuristics)

    if filter(lambda x: x.rv != 0, r):
        print "compile failed"
        sys.exit(1)

    print
    print "All scores are relative performance to a baseline system."
    print "Higher is better."
    print

    baselines = dict()
    for line in csv.reader(open("./testdata/configs/baselines.csv")):
        if len(line) >= 3:
            baselines[line[0]] = line[1:]

    benchmarks = []
    for benchmark, cfg, n, accTarg in lines:
        try:
            baseline = baselines[benchmark]
        except KeyError:
            baseline = (1.0, 1.0)
        benchmarks.append(
            Benchmark(benchmark, cfg, n, accTarg, baseline[0], baseline[1]))

        progress.remainingTicks(len(benchmarks))

    #print LONGBAR
    #print "Fixed (no autotuning) scores:"
    #print SHORTBAR
    #for b in benchmarks:
    #  progress.status("running fixed "+fmtCfg(b.cfg))
    #  b.runFixed()
    #  b.printFixed()
    #score_fixed = geomean(map(Benchmark.scoreFixed, benchmarks))

    #print SHORTBAR
    #print "Fixed Score (pbbenchmark v%s): %.2f" % (VERSION, geomean(map(Benchmark.scoreFixed, benchmarks)))
    #print LONGBAR
    #print

    print LONGBAR
    print "Tuned scores:"
    print SHORTBAR
    for b in benchmarks:
        progress.status("running tuned " + fmtCfg(b.cfg))
        progress.status("autotuning")
        b.autotune()
        b.runTuned()
        b.printTuned()
        progress.tick()

    score_tuned = geomean(map(Benchmark.scoreTuned, benchmarks))
    score_training_time = geomean(map(Benchmark.scoreTrainingTime, benchmarks))

    print SHORTBAR
    print "Tuned Score (pbbenchmark v%s): %.2f" % (VERSION, score_tuned)
    print "Training Time Score (pbbenchmark v%s): %.2f" % (VERSION,
                                                           score_training_time)
    print LONGBAR
    print

    if DEBUG:
        print LONGBAR
        print "Debug:"
        print SHORTBAR
        for b in benchmarks:
            b.printDebug()
        print LONGBAR
        print

    fd = open("./testdata/configs/baselines.csv.latest", "w")
    for b in benchmarks:
        print >> fd, "%s, %f, %f" % (b.benchmark, b.tuned_perf['average'],
                                     b.tuning_time)
    fd.close()

    if not os.path.isdir(LOGDIR):
        os.mkdir(LOGDIR)

    for b in benchmarks:
        writelog(expandLog(b.cfg), b.logEntry())

    writelog(
        expandLog('scores.log'),
        {
            'version': VERSION,
            'score_fixed': -1,  #score_fixed,
            'score_tuned': score_tuned,
            'score_training_time': score_training_time,
            'hostname': socket.gethostname(),
            'timestamp': TIMESTAMP,
            'revision': REV,
        })

    progress.status("done")
    progress.pop()
Example #4
0
(options, args) = parser.parse_args()

if options.heuristics:
    options.learning = True

if options.learning:
    print "Learning of heuristics is ACTIVE"
    if options.heuristics:
        print "Using heuristics file: " + str(options.heuristics)
    else:
        print "Using only heuristics in the database"

t1 = time.time()
results, b = pbutil.loadAndCompileBenchmarks(
    "./scripts/smoketest.tests",
    args,
    testBenchmark,
    postfn=checkBenchmark,
    learning=options.learning,
    heuristicSetFileName=options.heuristics,
    noLearningList=check_exclude)
t2 = time.time()

passed = len(filter(lambda x: x.rv == 0, results))
total = len(results)

print "%d of %d tests passed (took %.2fs)" % (passed, total, (t2 - t1))

sys.exit(min(total - passed, 124))
Example #5
0
def main():
  warnings.simplefilter('ignore', tunerwarnings.NewProgramCrash)
  warnings.simplefilter('ignore', tunerwarnings.SmallInputProgramCrash)
  warnings.simplefilter('ignore', tunerwarnings.TargetNotMet)
  warnings.simplefilter('ignore', tunerwarnings.NanAccuracy)

  #Parse input options
  from optparse import OptionParser
  parser = OptionParser(usage="usage: pbbenchmark [options]")
  parser.add_option("--learning", action="store_true", dest="learning", default=False, help="enable heuristics learning")
  parser.add_option("--heuristics", type="string", help="name of the file containing the set of heuristics to use. Automatically enables --learning", default=None)

  (options, args) = parser.parse_args()

  if options.heuristics:
    options.learning = True

  if options.learning:
    print "Learning of heuristics is ACTIVE"
    if options.heuristics:
      print "Using heuristics file: "+ str(options.heuristics)
    else:
      print "Using only heuristics in the database"

  progress.push()
  progress.status("compiling benchmarks")

  pbutil.chdirToPetabricksRoot()
  pbutil.compilePetabricks()

  global REV
  try:
    REV=pbutil.getRevision()
  except:
    pass

  r, lines = pbutil.loadAndCompileBenchmarks("./scripts/pbbenchmark.tests", searchterms=sys.argv[1:], learning=options.learning, heuristicSetFileName=options.heuristics)

  if filter(lambda x: x.rv!=0, r):
    print "compile failed"
    sys.exit(1)

  print 
  print "All scores are relative performance to a baseline system."
  print "Higher is better."
  print

  baselines = dict()
  for line in csv.reader(open("./testdata/configs/baselines.csv")):
    if len(line)>=3:
      baselines[line[0]] = line[1:]

  benchmarks=[]
  for benchmark, cfg, n, accTarg in lines:
    try:
      baseline = baselines[benchmark]
    except KeyError:
      baseline = (1.0, 1.0)
    benchmarks.append(Benchmark(benchmark, cfg, n, accTarg, baseline[0], baseline[1]))

    progress.remainingTicks(len(benchmarks))

   #print LONGBAR
   #print "Fixed (no autotuning) scores:"
   #print SHORTBAR
   #for b in benchmarks:
   #  progress.status("running fixed "+fmtCfg(b.cfg))
   #  b.runFixed()
   #  b.printFixed()
   #score_fixed = geomean(map(Benchmark.scoreFixed, benchmarks))

   #print SHORTBAR
   #print "Fixed Score (pbbenchmark v%s): %.2f" % (VERSION, geomean(map(Benchmark.scoreFixed, benchmarks)))
   #print LONGBAR
   #print



  print LONGBAR
  print "Tuned scores:"
  print SHORTBAR
  for b in benchmarks:
    progress.status("running tuned "+fmtCfg(b.cfg))
    progress.status("autotuning")
    b.autotune()
    b.runTuned()
    b.printTuned()
    progress.tick()
  
  score_tuned = geomean(map(Benchmark.scoreTuned, benchmarks))
  score_training_time = geomean(map(Benchmark.scoreTrainingTime, benchmarks))

  print SHORTBAR
  print "Tuned Score (pbbenchmark v%s): %.2f" % (VERSION, score_tuned)
  print "Training Time Score (pbbenchmark v%s): %.2f" % (VERSION, score_training_time)
  print LONGBAR
  print

  if DEBUG:
    print LONGBAR
    print "Debug:"
    print SHORTBAR
    for b in benchmarks:
      b.printDebug()
    print LONGBAR
    print

  fd = open("./testdata/configs/baselines.csv.latest", "w")
  for b in benchmarks:
    print >>fd, "%s, %f, %f" % (b.benchmark, b.tuned_perf['average'], b.tuning_time)
  fd.close()

  if not os.path.isdir(LOGDIR):
    os.mkdir(LOGDIR)
  
  for b in benchmarks:
    writelog(expandLog(b.cfg), b.logEntry())
    
  writelog(expandLog('scores.log'), {
      'version'             : VERSION,
      'score_fixed'         : -1,#score_fixed,
      'score_tuned'         : score_tuned,
      'score_training_time' : score_training_time,
      'hostname'            : socket.gethostname(),
      'timestamp'           : TIMESTAMP,
      'revision'            : REV,
    })
  
  progress.status("done")
  progress.pop()
Example #6
0
      return False

    if diffFiles(outfile, outfile+".latest"):
      time.sleep(0.1) #try letting the filesystem settle down
      if diffFiles(outfile, outfile+".latest"):
        print "run FAILED (wrong output)"
        return False
    
    print "run PASSED"
    return True

  return test()


if 'nocheck' in sys.argv[1:]:
  sys.argv[1:] = filter(lambda x: x!='nocheck', sys.argv[1:])
  CHECK = False

t1=time.time()
results,b=pbutil.loadAndCompileBenchmarks("./scripts/smoketest.tests", sys.argv[1:], testBenchmark, postfn=checkBenchmark)
t2=time.time()


passed=len(filter(lambda x: x.rv==0, results))
total=len(results)

print "%d of %d tests passed (took %.2fs)"%(passed,total,(t2-t1))

sys.exit(min(total-passed, 124))