コード例 #1
0
 def __init__(self, cfg, mlog=list()):
   self.config         = ConfigFile(cfg)
   self.startconfig    = ConfigFile(deepcopy(self.config))
   self.metrics        = [ResultsDB(x) for x in config.metrics]
   self.cid            = Candidate.nextCandidateId
   self.outputdir      = storagedirs.candidate(self.cid)
   self.mutationlog    = list(mlog)
   Candidate.nextCandidateId += 1
コード例 #2
0
 def test(self, count):
   '''test each member of the pop count times'''
   if self.best is not None:
       print '------------------------------------------------'
       print 'Best schedule:'
       print self.hl_cfg.str(HalideConfigAccessor(ConfigFile(self.best.cfgfile())))
       print '------------------------------------------------'
   
   self.failed=set()
   tests = []
   for z in xrange(count):
     tests.extend(self.members)
   random.shuffle(tests)
   for m in tests:
     sgatuner.check_timeout()
     if m not in self.failed and m.numTests(self.inputSize())<config.max_trials:
       try:
         self.testers[-1].test(m)
       except candidatetester.CrashException, e:
         if m.numTotalTests()==0:
           warnings.warn(InitialProgramCrash(e))
         else:
           warnings.warn(ExistingProgramCrash(e))
         self.failed.add(m)
         self.members.remove(m)
コード例 #3
0
def main(benchmark, n, filename):
    if os.path.isdir(filename):
        filename = os.path.join(filename, 'stats/candidatelog.csv')
    f = open(filename)
    infoxml = TrainingInfo(pbutil.benchmarkToInfo(benchmark))
    main = mainname([pbutil.benchmarkToBin(benchmark)])
    infoxml = infoxml.transform(main)
    binpath = pbutil.benchmarkToBin(benchmark)
    tester = CandidateTester(benchmark, n)
    root = os.path.dirname(filename)

    def findconfig(c):
        if c[0] == '/':
            c = c[1:]
        if os.path.isfile(os.path.join(root, c)):
            return os.path.join(root, c)
        if os.path.isfile(os.path.join(root, '..', c)):
            return os.path.join(root, '..', c)
        return None

    rows = list(csv.DictReader(f))
    for i, row in enumerate(rows):
        if options.onlyrounds \
            and i+1<len(rows) \
            and row.has_key('round_number') \
            and rows[i+1]['round_number']==row['round_number']:
            continue
        config = findconfig(row['config_path'])
        row['tests'] = int(row['tests_complete']) + int(
            row['tests_timeout']) + int(row['tests_crashed'])
        candidate = Candidate(ConfigFile(config), infoxml)
        while candidate.numTests(n) < options.trials:
            try:
                tester.testN(candidate, options.trials, options.timeout)
            except candidatetester.CrashException, e:
                print >> sys.stderr, e
        try:
            row['minperf'] = candidate.metrics[0][n].min()
            row['perf_on_%d' % n], row['perf_on_%d_ci' %
                                       n] = candidate.metrics[0][n].interval(
                                           options.confidence)
            row['invperf'] = 1.0 / row['perf_on_%d' % n]
        except Exception, e:
            row['minperf'] = -1
            row['perf_on_%d' % n] = -1
            print >> sys.stderr, e
コード例 #4
0
 def test(self, candidate, limit=None):
     try:
         limit = TIMELIMIT
         
         self.testCount += 1
         cfgfile = candidate.cfgfile()
         testNumber = candidate.numTests(self.n)
         if testNumber>=config.max_trials:
             warnings.warn(tunerwarnings.TooManyTrials(testNumber+1))
 #    cmd = list(self.cmd)
 #    cmd.append("--config="+cfgfile)
 #    cmd.extend(timers.inputgen.wrap(lambda:self.getInputArg(testNumber)))
 #    if limit is not None:
 #      cmd.append("--max-sec=%f"%limit)
 #    cmd.extend(getMemoryLimitArgs())
         cfg = HalideConfigAccessor(ConfigFile(cfgfile))
         try:
             #results = timers.testing.wrap(lambda: runCommand(self.app, cfg, self.hl_cfg, limit))
             #for i,result in enumerate(results):
             #    if result is not None:
             #        v=result['average']
             #        if numpy.isnan(v) or numpy.isinf(v):
             #            warnings.warn(tunerwarnings.NanAccuracy())
             #            raise pbutil.TimingRunFailed(None)
             #        candidate.metrics[i][self.n].add(v)
             #return True
             T = runCommand(self.app, cfg, self.hl_cfg, limit, self.test_func, self.func_d, self.func, self.scope)
             #print 'succeeded'
             for timeval in T:
                 candidate.metrics[config.timing_metric_idx][self.n].add(timeval)
         except TimingRunTimeout:
             #assert limit is not None
             #warnings.warn(tunerwarnings.ProgramTimeout(candidate, self.n, limit))
             candidate.metrics[config.timing_metric_idx][self.n].addTimeout(limit)
             self.timeoutCount += 1
             return False
         except TimingRunFailed, e:
             self.crashCount += 1
             raise CrashException(testNumber, self.n, candidate, self.app)
     except:
         traceback.print_exc()
         raise
コード例 #5
0
 def __setitem__(self, k, v):
     if not k in self.values:
         self.add(k, v)
     else:
         ConfigFile.__setitem__(self, k, v)
コード例 #6
0
class Candidate:
  nextCandidateId=0
  '''A candidate algorithm in the population'''
  def __init__(self, cfg, mlog=list()):
    self.config         = ConfigFile(cfg)
    self.startconfig    = ConfigFile(deepcopy(self.config))
    self.metrics        = [ResultsDB(x) for x in config.metrics]
    self.cid            = Candidate.nextCandidateId
    self.outputdir      = storagedirs.candidate(self.cid)
    self.mutationlog    = list(mlog)
    Candidate.nextCandidateId += 1

  def discardResults(self, n):
    for m in self.metrics:
      for k in m.keys():
        m[k].discard(n)

  def __str__(self):
    return "Candidate%d"%self.cid

  def clone(self):
    return Candidate(deepcopy(self.config), self.mutationlog)
  
  def reset_mutation_log(self):
    self.mutationlog = list()

  def log_mutation(self, m):
    self.mutationlog.append(m)

  def clearResultsAbove(self, val):
    for i in xrange(len(self.metrics)):
      for n in self.metrics[i].keys():
        if n>=val:
          self.metrics[i][n] = Results()

  def clearResults(self):
    for i in xrange(len(self.metrics)):
      for n in self.metrics[i].keys():
        self.metrics[i][n] = Results()

  def reasonableLimit(self, n):
    if self.numTests(n)>0:
      return self.metrics[config.timing_metric_idx][n].reasonableLimit()
    else:
      return None

  def resultsStr(self, n, baseline=None):
    s=['trials: %2d'%self.numTests(n)]
    t=str
    if config.print_raw:
      t=repr
    for i, m in enumerate(self.metrics):
      s.append("%s: %s" % (config.metrics[i], t(m[n])))
    return ', '.join(s)

  def resultsTable(self, n):
    l = [('candidate', str(self.cid)),
         ('trials', str(self.numTests(n)))]
    for i, m in enumerate(self.metrics):
      l.append((config.metrics[i], str(m[n])))
    l.append(('mutation_log', ''.join(map(lambda x: x.short, self.mutationlog)[-10:])))
    return l

  def numTests(self, n):
    return len(self.metrics[config.timing_metric_idx][n])

  def numTimeouts(self, n):
    return self.metrics[config.timing_metric_idx][n].numTimeouts()

  def isAllTimeout(self, n):
    return self.numTests(n) <= self.numTimeouts(n)
  
  def numTotalTests(self):
    return self.metrics[config.timing_metric_idx].totalTests()

  def performance(self, n):
    if len(self.metrics[config.timing_metric_idx][n]) == 0:
      return (2**31)
    return self.metrics[config.timing_metric_idx][n].mean()

  def accuracy(self, n):
    if len(self.metrics[config.accuracy_metric_idx][n]) == 0:
      return -(2**31)
    return self.metrics[config.accuracy_metric_idx][n].mean()

  def hasAccuracy(self, n, target):
    if len(self.metrics[config.accuracy_metric_idx][n]) == 0:
      return False
    if target is None:
      return True
    return self.metrics[config.accuracy_metric_idx][n].mean() >= target

  def cfgfile(self):
    cf=os.path.join(self.outputdir,'config')
    self.config.save(cf)
    return cf

  def rmfiles(self):
    for f in ('config', 'stats', 'stats_raw'):
      f=os.path.join(self.outputdir,f)
      if os.path.isfile(f):
        os.unlink(f)
    os.rmdir(self.outputdir)
  
  def timingResults(self, n=None):
    if n is None:
      n=max(self.metrics[config.timing_metric_idx].keys())
    return self.metrics[config.timing_metric_idx][n]

  def accuracyResults(self, n=None):
    if n is None:
      n=max(self.metrics[config.accuracy_metric_idx].keys())
    return self.metrics[config.accuracy_metric_idx][n]

  def writestats(self, n, filename=None):
    if filename is None:
      filename=os.path.join(self.outputdir,'stats')
    first=not os.path.isfile(filename)
    s=open(filename, 'a')
    if first:
      s.write("#input, ")
      for m in config.metrics:
        s.write("%s_mean, %s_stddev, %s_stderr, %s_ci, "%(m, m, m, m))
      s.write("\n")
    s.write("%6d, "%n)
    for m in self.metrics:
      try:
        avg,ci = m[n].interval(config.display_confidence)
        sd = math.sqrt(m[n].variance())
        se = math.sqrt(m[n].meanVariance())
      except OverflowError:
        if numpy.isinf(m[n].variance()):
          sd = numpy.inf
          se = numpy.inf
        else:
          raise
      except AssertionError:
        avg = -1
        ci = -1
        se = -1
        sd = -1

      s.write("%.8f, %.8f, %.8f, %.8f, "%(avg,sd,se,ci))
    s.write("\n")
    s.close()
コード例 #7
0
 def __setitem__(self, k, v):
     if not k in self.values:
         self.add(k, v)
     else:
         ConfigFile.__setitem__(self, k, v)