Example #1
0
def autotune(benchmark,
             returnBest=None,
             tester_lambda=None,
             pop_lambda=None,
             hlconfig_lambda=None,
             config_lambda=None):
    return storagedirs.callWithLogDir(
        lambda: autotuneInner(benchmark, returnBest, tester_lambda, pop_lambda,
                              hlconfig_lambda, config_lambda),
        config.output_dir, config.delete_output_dir)
Example #2
0
def regression_check(benchmark):
  tunerconfig.applypatch(tunerconfig.patch_regression)
  storagedirs.callWithLogDir(lambda: autotuneInner(benchmark),
                             config.output_dir,
                             config.delete_output_dir)
Example #3
0
def autotune(benchmark, returnBest=None, tester_lambda=None, pop_lambda=None, hlconfig_lambda=None, config_lambda=None):
  return storagedirs.callWithLogDir(lambda: autotuneInner(benchmark, returnBest, tester_lambda, pop_lambda, hlconfig_lambda, config_lambda),
                                    config.output_dir,
                                    config.delete_output_dir)
Example #4
0
def onlinelearn(benchmark):
  storagedirs.callWithLogDir(lambda: onlinelearnInner(benchmark),
                             config.output_dir,
                             config.delete_output_dir)
Example #5
0
    ]
    print '#', ','.join(headers)
    t = csv.DictWriter(sys.stdout, headers, extrasaction='ignore')
    t.writerows(rows)


if __name__ == "__main__":
    from optparse import OptionParser
    parser = OptionParser(
        usage="usage: graphgen.py [options] benchmark candidatelog.csv")
    parser.add_option('--trials', type='int', default=10)
    parser.add_option('--confidence', type='float', default=.95)
    parser.add_option('--timeout', type='float', default=5.0)
    parser.add_option('--onlyrounds', type='int', default=True)
    parser.add_option('-n', type='int', default=1024)

    warnings.simplefilter('ignore', tunerwarnings.TooManyTrials)

    (options, args) = parser.parse_args()
    if len(args) != 2:
        parser.print_usage()
        sys.exit(1)
    benchmark = args[0]
    config = os.path.abspath(args[1])
    pbutil.chdirToPetabricksRoot()
    #pbutil.compilePetabricks();
    benchmark = pbutil.normalizeBenchmarkName(benchmark)
    #pbutil.compileBenchmarks([benchmark])
    storagedirs.callWithLogDir(lambda: main(benchmark, options.n, config),
                               '/tmp', True)
Example #6
0
def onlinelearn(benchmark):
    storagedirs.callWithLogDir(lambda: onlinelearnInner(benchmark),
                               config.output_dir, config.delete_output_dir)
Example #7
0
def autotune(benchmark):
  return storagedirs.callWithLogDir(lambda: autotuneInner(benchmark),
                                    config.output_dir,
                                    config.delete_output_dir)
Example #8
0
def regression_check(benchmark):
    tunerconfig.applypatch(tunerconfig.patch_regression)
    storagedirs.callWithLogDir(lambda: autotuneInner(benchmark),
                               config.output_dir, config.delete_output_dir)
Example #9
0
  headers = ['time','minperf', 'perf_on_%d'%n, 'perf_on_%d_ci'%n, 'tests', 'candidates', 'input_size', 'invperf', 'tests_timeout']
  print '#',','.join(headers)
  t=csv.DictWriter(sys.stdout, headers, extrasaction='ignore')
  t.writerows(rows)
  

if __name__ == "__main__":
  from optparse import OptionParser
  parser = OptionParser(usage="usage: graphgen.py [options] benchmark candidatelog.csv")
  parser.add_option('--trials', type='int', default=10)
  parser.add_option('--confidence', type='float', default=.95)
  parser.add_option('--timeout', type='float', default=5.0)
  parser.add_option('--onlyrounds', type='int', default=True)
  parser.add_option('-n', type='int', default=1024)

  warnings.simplefilter('ignore',  tunerwarnings.TooManyTrials)

  (options, args) = parser.parse_args()
  if len(args)!=2:
    parser.print_usage()
    sys.exit(1)
  benchmark=args[0]
  config=os.path.abspath(args[1])
  pbutil.chdirToPetabricksRoot();
  #pbutil.compilePetabricks();
  benchmark=pbutil.normalizeBenchmarkName(benchmark)
  #pbutil.compileBenchmarks([benchmark])
  storagedirs.callWithLogDir(lambda: main(benchmark, options.n, config), '/tmp', True)