Exemple #1
0
def init(benchmark, acf=createChoiceSiteMutators, taf=createTunableMutators):
  if config.debug:
    logging.basicConfig(level=logging.DEBUG)
    config.pause_on_crash = True
  if not config.threads:
    config.threads = pbutil.cpuCount()
  for k in filter(len, config.abort_on.split(',')):
    warnings.simplefilter('error', getattr(tunerwarnings,k))
  infoxml = TrainingInfo(pbutil.benchmarkToInfo(benchmark))
  if not config.main:
    config.main = mainname([pbutil.benchmarkToBin(benchmark)])
  tester = CandidateTester(benchmark, config.min_input_size)
  if config.seed is None:
    cfg = defaultConfigFile(pbutil.benchmarkToBin(tester.app))
  else:
    cfg = configtool.ConfigFile(config.seed)
  candidate = Candidate(cfg, infoxml.transform(config.main))
  addMutators(candidate, infoxml.globalsec(), acf, taf)
  addMutators(candidate, infoxml.transform(config.main), acf, taf)
  candidate.addMutator(mutators.MultiMutator(2))
  if not config.delete_output_dir:
    storagedirs.cur.dumpConfig()
    storagedirs.cur.dumpGitStatus()
    storagedirs.cur.saveFile(pbutil.benchmarkToInfo(benchmark))
    storagedirs.cur.saveFile(pbutil.benchmarkToBin(benchmark))
  return candidate, tester
Exemple #2
0
def determineInputSizes():
    progress.status("finding reasonable input size for training... (%d sec) " %
                    INFERINPUTSIZES_SEC)
    options.n = pbutil.inferGoodInputSizes(pbutil.benchmarkToBin(app),
                                           [inputSizeTarget],
                                           INFERINPUTSIZES_SEC)[0]
    print "* finding reasonable input size for training... %d" % options.n
Exemple #3
0
 def run(self, cfg):
   return pbutil.executeTimingRun(pbutil.benchmarkToBin(self.benchmark),
                                  int(self.n),
                                  ['--trials=%d'%TRAILS,
                                   '--config='+cfg,
                                   '--accuracy'],
                                  None,
                                  ['timing', 'accuracy'])
Exemple #4
0
def timingRun(ctx, n, limit=None):
  if limit >= maxint:
    limit=None
  if limit is not None:
    limit=int(math.ceil(limit))
  args=["--transform="+nameof(ctx)]
  args.extend(defaultArgs)
  return pbutil.executeTimingRun(pbutil.benchmarkToBin(app), n, args, limit)['average']
Exemple #5
0
def mkcmd(args):
  t=[pbutil.benchmarkToBin(app)]
  t.extend(defaultArgs)
  if type(args) is type([]):
    t.extend(args)
  else:
    t.append(args)
  return t
Exemple #6
0
def mkcmd(args):
    t = [pbutil.benchmarkToBin(app)]
    t.extend(defaultArgs)
    if type(args) is type([]):
        t.extend(args)
    else:
        t.append(args)
    return t
Exemple #7
0
def main(benchmark, n, filename):
    if os.path.isdir(filename):
        filename = os.path.join(filename, 'stats/candidatelog.csv')
    f = open(filename)
    infoxml = TrainingInfo(pbutil.benchmarkToInfo(benchmark))
    main = mainname([pbutil.benchmarkToBin(benchmark)])
    infoxml = infoxml.transform(main)
    binpath = pbutil.benchmarkToBin(benchmark)
    tester = CandidateTester(benchmark, n)
    root = os.path.dirname(filename)

    def findconfig(c):
        if c[0] == '/':
            c = c[1:]
        if os.path.isfile(os.path.join(root, c)):
            return os.path.join(root, c)
        if os.path.isfile(os.path.join(root, '..', c)):
            return os.path.join(root, '..', c)
        return None

    rows = list(csv.DictReader(f))
    for i, row in enumerate(rows):
        if options.onlyrounds \
            and i+1<len(rows) \
            and row.has_key('round_number') \
            and rows[i+1]['round_number']==row['round_number']:
            continue
        config = findconfig(row['config_path'])
        row['tests'] = int(row['tests_complete']) + int(
            row['tests_timeout']) + int(row['tests_crashed'])
        candidate = Candidate(ConfigFile(config), infoxml)
        while candidate.numTests(n) < options.trials:
            try:
                tester.testN(candidate, options.trials, options.timeout)
            except candidatetester.CrashException, e:
                print >> sys.stderr, e
        try:
            row['minperf'] = candidate.metrics[0][n].min()
            row['perf_on_%d' % n], row['perf_on_%d_ci' %
                                       n] = candidate.metrics[0][n].interval(
                                           options.confidence)
            row['invperf'] = 1.0 / row['perf_on_%d' % n]
        except Exception, e:
            row['minperf'] = -1
            row['perf_on_%d' % n] = -1
            print >> sys.stderr, e
Exemple #8
0
def timingRun(ctx, n, limit=None):
    if limit >= maxint:
        limit = None
    if limit is not None:
        limit = int(math.ceil(limit))
    args = ["--transform=" + nameof(ctx)]
    args.extend(defaultArgs)
    return pbutil.executeTimingRun(pbutil.benchmarkToBin(app), n, args,
                                   limit)['average']
Exemple #9
0
def init(benchmark,
         tester_lambda=None,
         pop_lambda=None,
         hlconfig_lambda=None,
         config_lambda=None):
    if config.debug:
        logging.basicConfig(level=logging.DEBUG)
        config.pause_on_crash = True
    if not config.threads:
        config.threads = pbutil.cpuCount()
    for k in filter(len, config.abort_on.split(',')):
        warnings.simplefilter('error', getattr(tunerwarnings, k))
    if hlconfig_lambda is not None:
        hlconfig = hlconfig_lambda()
    else:
        infoxml = TrainingInfo(pbutil.benchmarkToInfo(benchmark))
        hlconfig = HighLevelConfig(infoxml)
    if not config.main:
        if tester_lambda is None and pop_lambda is None and hlconfig_lambda is None:
            config.main = mainname([pbutil.benchmarkToBin(benchmark)])
    if tester_lambda is not None:
        tester = tester_lambda(benchmark, config.min_input_size)
    else:
        tester = CandidateTester(benchmark, config.min_input_size)
    if config_lambda is not None:
        cfg = config_lambda()
    else:
        if config.seed is None:
            cfg = defaultConfigFile(pbutil.benchmarkToBin(tester.app))
        else:
            cfg = configtool.ConfigFile(config.seed)
    candidate = Candidate(cfg)
    if hlconfig_lambda is None:
        if not config.delete_output_dir:
            storagedirs.cur.dumpConfig()
            storagedirs.cur.dumpGitStatus()
            storagedirs.cur.saveFile(pbutil.benchmarkToInfo(benchmark))
            storagedirs.cur.saveFile(pbutil.benchmarkToBin(benchmark))
        if not infoxml.transform(
                config.main).isVariableAccuracy() and config.accuracy_target:
            logging.info("clearing accuracy_target")
            config.accuracy_target = None
    return candidate, tester, hlconfig
Exemple #10
0
def main(benchmark, n, filename):
  if os.path.isdir(filename):
    filename=os.path.join(filename, 'stats/candidatelog.csv')
  f = open(filename)
  infoxml = TrainingInfo(pbutil.benchmarkToInfo(benchmark))
  main = mainname([pbutil.benchmarkToBin(benchmark)])
  infoxml = infoxml.transform(main)
  binpath=pbutil.benchmarkToBin(benchmark)
  tester = CandidateTester(benchmark, n)
  root = os.path.dirname(filename)
  def findconfig(c):
    if c[0]=='/':
      c=c[1:]
    if os.path.isfile(os.path.join(root, c)):
      return os.path.join(root, c)
    if os.path.isfile(os.path.join(root, '..', c)):
      return os.path.join(root, '..', c)
    return None
  rows = list(csv.DictReader(f))
  for i, row in enumerate(rows):
    if options.onlyrounds \
        and i+1<len(rows) \
        and row.has_key('round_number') \
        and rows[i+1]['round_number']==row['round_number']:
      continue
    config = findconfig(row['config_path'])
    row['tests'] = int(row['tests_complete'])+int(row['tests_timeout'])+int(row['tests_crashed'])
    candidate = Candidate(ConfigFile(config), infoxml)
    while candidate.numTests(n)<options.trials:
      try:
        tester.testN(candidate, options.trials, options.timeout)
      except candidatetester.CrashException, e:
        print >>sys.stderr, e
    try:
      row['minperf'] = candidate.metrics[0][n].min()
      row['perf_on_%d'%n], row['perf_on_%d_ci'%n] = candidate.metrics[0][n].interval(options.confidence)
      row['invperf']=1.0/row['perf_on_%d'%n]
    except Exception,e:
      row['minperf'] = -1
      row['perf_on_%d'%n] = -1
      print >>sys.stderr, e
Exemple #11
0
def testBenchmark(b):
  name=b[0]
  bin=pbutil.benchmarkToBin(name)
  cfg=pbutil.benchmarkToCfg(name)

  if not os.path.isfile(bin):
    return False
  
  #build cmd
  hash=name
  iofiles=[]
  for x in b[1:]:
    iofiles.append(resolveInputPath(x))
    hash+=" "+os.path.basename(x)
  outfile="./testdata/.output/"+re.sub("[ /.]",'_',hash)
  iofiles.append(outfile+".latest")

  try:
    cmd=[bin, '--fixedrandom', '--config=%s.cfg'%outfile, '--reset']
    if run(cmd) != 0:
      print "ERROR: reset config failed"
      return False
  except OSError:
    print "ERROR: program not runnable"
    return False

  if os.path.isfile("%s.cfg.default"%outfile):
    shutil.copy("%s.cfg.default"%outfile, "%s.cfg"%outfile)

  try:
    infoxml=parse(pbutil.benchmarkToInfo(name))
  except:
    print "invalid *.info file"
    return False

  def test():
    cmd=[bin, '--fixedrandom', '--config=%s.cfg'%outfile]
    cmd.extend(iofiles)
    rv = run(cmd)
    if rv != 0:
      print "run FAILED (status=%d, cmd=%s)"%(rv, ' '.join(cmd))
      return False

    if diffFiles(outfile, outfile+".latest"):
      time.sleep(0.1) #try letting the filesystem settle down
      if diffFiles(outfile, outfile+".latest"):
        print "run FAILED (wrong output)"
        return False
    
    print "run PASSED"
    return True

  return test()
Exemple #12
0
def runCfg(benchmark, cfg, n, args=['--trials=5']):
  fd, tmp = tempfile.mkstemp('.cfg')
  try:
    os.close(fd)
    cfg.save(tmp)
    perf, acc = pbutil.executeTimingRun(pbutil.benchmarkToBin(benchmark),
                                        int(n),
                                        args+['--config='+tmp, '--accuracy'],
                                        None,
                                        ['timing', 'accuracy'])
    return perf['average'], acc['average']
  finally:
    os.unlink(tmp)
Exemple #13
0
def init(benchmark, tester_lambda=None, pop_lambda=None, hlconfig_lambda=None, config_lambda=None):
  if config.debug:
    logging.basicConfig(level=logging.DEBUG)
    config.pause_on_crash = True
  if not config.threads:
    config.threads = pbutil.cpuCount()
  for k in filter(len, config.abort_on.split(',')):
    warnings.simplefilter('error', getattr(tunerwarnings,k))
  if hlconfig_lambda is not None:
    hlconfig = hlconfig_lambda()
  else:
    infoxml = TrainingInfo(pbutil.benchmarkToInfo(benchmark))
    hlconfig = HighLevelConfig(infoxml)
  if not config.main:
    if tester_lambda is None and pop_lambda is None and hlconfig_lambda is None:
      config.main = mainname([pbutil.benchmarkToBin(benchmark)])
  if tester_lambda is not None:
    tester = tester_lambda(benchmark, config.min_input_size)
  else:
    tester = CandidateTester(benchmark, config.min_input_size)
  if config_lambda is not None:
    cfg = config_lambda()
  else:
    if config.seed is None:
      cfg = defaultConfigFile(pbutil.benchmarkToBin(tester.app))
    else:
      cfg = configtool.ConfigFile(config.seed)
  candidate = Candidate(cfg)
  if hlconfig_lambda is None:
    if not config.delete_output_dir:
      storagedirs.cur.dumpConfig()
      storagedirs.cur.dumpGitStatus()
      storagedirs.cur.saveFile(pbutil.benchmarkToInfo(benchmark))
      storagedirs.cur.saveFile(pbutil.benchmarkToBin(benchmark))
    if not infoxml.transform(config.main).isVariableAccuracy() and config.accuracy_target:
      logging.info("clearing accuracy_target")
      config.accuracy_target = None
  return candidate, tester, hlconfig
Exemple #14
0
 def __init__(self, app, n, args=[]):
   self.app = app
   self.bin = pbutil.benchmarkToBin(app)
   self.n = n + config.offset
   self.cmd = [
       self.bin,
       "--time",
       "--accuracy",
       "--threads=%d"%config.threads,
     ]
   self.cmd.extend(args)
   self.args=args
   self.inputs=[]
   self.testCount = 0
   self.timeoutCount = 0
   self.crashCount = 0
   self.wasTimeout = True 
Exemple #15
0
 def __init__(self, app, n, args=[]):
   self.app = app
   self.bin = pbutil.benchmarkToBin(app)
   self.n = n + config.offset
   self.cmd = [
       self.bin,
       "--time",
       "--accuracy",
       "--threads=%d"%config.threads,
     ]
   self.cmd.extend(args)
   self.args=args
   self.inputs=[]
   self.testCount = 0
   self.timeoutCount = 0
   self.crashCount = 0
   self.wasTimeout = True 
Exemple #16
0
def testBenchmark(b):
    name = b[0]
    bin = pbutil.benchmarkToBin(name)
    cfg = pbutil.benchmarkToCfg(name)

    if not os.path.isfile(bin):
        return False

    #build cmd
    hash = name
    iofiles = []
    for x in b[1:]:
        iofiles.append(resolveInputPath(x))
        hash += " " + os.path.basename(x)
    outfile = "./testdata/.output/" + re.sub("[ /.]", '_', hash)
    iofiles.append(outfile + ".latest")

    try:
        cmd = [bin, '--fixedrandom', '--config=%s.cfg' % outfile, '--reset']
        if run(cmd) != 0:
            print "ERROR: reset config failed"
            return False
    except OSError:
        print "ERROR: program not runnable"
        return False

    if os.path.isfile("%s.cfg.default" % outfile):
        shutil.copy("%s.cfg.default" % outfile, "%s.cfg" % outfile)

    try:
        infoxml = parse(pbutil.benchmarkToInfo(name))
    except:
        print "invalid *.info file"
        return False

    def test():
        if isFloatingPoint() and os.path.exists(outfile + ".float"):
            ext = ".float"
            print "FLOAT"
        else:
            ext = ""

        #run cpu config
        cmd = [bin, '--fixedrandom', '--config=%s.cfg' % outfile]
        cmd.extend(iofiles)
        t1 = time.time()
        rv = run(cmd)
        t2 = time.time()
        if rv != 0:
            print "run FAILED (status=%d, cmd=%s)" % (rv, ' '.join(cmd))
            return False

        if diffFiles(outfile + ext, outfile + ".latest"):
            time.sleep(0.1)  #try letting the filesystem settle down
            if diffFiles(outfile + ext, outfile + ".latest"):
                print "run FAILED (wrong output)"
                return False

        print "run PASSED (took %.2fs)" % (t2 - t1)

        if (not haveOpenCL()) or (not os.path.exists(outfile + ".gpucfg")):
            return True

        #run gpu config
        cmd = [bin, '--fixedrandom', '--config=%s.gpucfg' % outfile]
        cmd.extend(iofiles)
        t1 = time.time()
        rv = run(cmd)
        t2 = time.time()
        if rv != 0:
            print "gpu FAILED (status=%d, cmd=%s)" % (rv, ' '.join(cmd))
            return False

        if diffFiles(outfile + ext, outfile + ".latest"):
            time.sleep(0.1)  #try letting the filesystem settle down
            if diffFiles(outfile + ext, outfile + ".latest"):
                print "gpu FAILED (wrong output)"
                return False

        print "gpu PASSED (took %.2fs)" % (t2 - t1)
        return True

    return test()
Exemple #17
0
    print "****    2) If BENCHMARK is given, compile BENCHMARK"
    print "****    3) If args are given, run BENCHMARK"
    sys.exit(1)
  else:
    benchmark=sys.argv[1]
    if os.path.isfile(benchmark) or os.path.isfile(benchmark+".pbcc"):
      benchmark=os.path.abspath(benchmark)

pbutil.chdirToPetabricksRoot();
pbutil.compilePetabricks();

if benchmark is not None:
  benchmark=pbutil.normalizeBenchmarkName(relpath(benchmark))

cmd=[]
if gdb:
  cmd.extend(["/usr/bin/gdb", "--args"])

if len(sys.argv)==2:
  cmd.extend(["./src/pbc", pbutil.benchmarkToSrc(benchmark)])
  print " ".join(cmd)
  os.execv(cmd[0], cmd)
elif len(sys.argv)>2:
  pbutil.compileBenchmarks([benchmark])
  cmd.extend([pbutil.benchmarkToBin(benchmark)])
  cmd.extend(sys.argv[2:])
  print " ".join(cmd)
  os.execv(cmd[0], cmd)


Exemple #18
0
    return cmpobj(a,b)

  def cleanup(self):
    if config.cleanup_inputs:
      storagedirs.clearInputs();
      self.inputs=[]

if __name__ == "__main__":
  print "TESTING CANDIDATETESTER"
  pbutil.chdirToPetabricksRoot();
  pbutil.compilePetabricks();
  benchmark=pbutil.normalizeBenchmarkName('multiply')
  pbutil.compileBenchmarks([benchmark])
  tester = CandidateTester(benchmark, 768)
  try:
    candidate = Candidate(defaultConfigFile(pbutil.benchmarkToBin(tester.app)))
    candidate2 = Candidate(defaultConfigFile(pbutil.benchmarkToBin(tester.app)))
    candidate2.config['MatrixMultiplyTransposed_0_lvl1_rule']=1
    tester.test(candidate)
    tester.test(candidate)
    tester.test(candidate)
    tester.test(candidate2)
    tester.test(candidate2)
    tester.test(candidate2)
    print candidate.metrics[0]
    print candidate2.metrics[0]
    print str(candidate.metrics[0][768])
    print str(candidate2.metrics[0][768])
    c=tester.comparer(0, .95, 25)
    print c(candidate, candidate2)
    print candidate.metrics[0][768].sameChance(candidate2.metrics[0][768])
Exemple #19
0
def determineInputSizes():
  progress.status("finding reasonable input size for training... (%d sec) " % INFERINPUTSIZES_SEC)
  options.n=pbutil.inferGoodInputSizes( pbutil.benchmarkToBin(app)
                                      , [inputSizeTarget]
                                      , INFERINPUTSIZES_SEC)[0]
  print "* finding reasonable input size for training... %d" % options.n 
Exemple #20
0
        print "**** USAGE: ", sys.argv[0], "[BENCHMARK] [args]"
        print "****    1) Compile pbc"
        print "****    2) If BENCHMARK is given, compile BENCHMARK"
        print "****    3) If args are given, run BENCHMARK"
        sys.exit(1)
    else:
        benchmark = sys.argv[1]
        if os.path.isfile(benchmark) or os.path.isfile(benchmark + ".pbcc"):
            benchmark = os.path.abspath(benchmark)

pbutil.chdirToPetabricksRoot()
pbutil.compilePetabricks()

if benchmark is not None:
    benchmark = pbutil.normalizeBenchmarkName(relpath(benchmark))

cmd = []
if gdb:
    cmd.extend(["/usr/bin/gdb", "--args"])

if len(sys.argv) == 2:
    cmd.extend(["./src/pbc", pbutil.benchmarkToSrc(benchmark)])
    print " ".join(cmd)
    os.execv(cmd[0], cmd)
elif len(sys.argv) > 2:
    pbutil.compileBenchmarks([benchmark])
    cmd.extend([pbutil.benchmarkToBin(benchmark)])
    cmd.extend(sys.argv[2:])
    print " ".join(cmd)
    os.execv(cmd[0], cmd)
Exemple #21
0
    return compare

  def cleanup(self):
    if config.cleanup_inputs:
      storagedirs.clearInputs();
      self.inputs=[]

if __name__ == "__main__":
  print "TESTING CANDIDATETESTER"
  pbutil.chdirToPetabricksRoot();
  pbutil.compilePetabricks();
  benchmark=pbutil.normalizeBenchmarkName('multiply')
  pbutil.compileBenchmarks([benchmark])
  tester = CandidateTester(benchmark, 768)
  try:
    candidate = Candidate(defaultConfigFile(pbutil.benchmarkToBin(tester.app)))
    candidate2 = Candidate(defaultConfigFile(pbutil.benchmarkToBin(tester.app)))
    candidate2.config['MatrixMultiplyTransposed_0_lvl1_rule']=1
    tester.test(candidate)
    tester.test(candidate)
    tester.test(candidate)
    tester.test(candidate2)
    tester.test(candidate2)
    tester.test(candidate2)
    print candidate.metrics[0]
    print candidate2.metrics[0]
    print str(candidate.metrics[0][768])
    print str(candidate2.metrics[0][768])
    c=tester.comparer(0, .95, 25)
    print c(candidate, candidate2)
    print candidate.metrics[0][768].sameChance(candidate2.metrics[0][768])
Exemple #22
0
 def run(self, cfg):
     return pbutil.executeTimingRun(
         pbutil.benchmarkToBin(self.benchmark), int(self.n),
         ['--trials=%d' % TRAILS, '--config=' + cfg, '--accuracy'], None,
         ['timing', 'accuracy'])