Exemplo n.º 1
0
def recompile():
    pbutil.chdirToPetabricksRoot()
    config.benchmark = pbutil.normalizeBenchmarkName(config.benchmark)
    config.output_cfg = pbutil.benchmarkToCfg(config.benchmark)
    if config.recompile:
        pbutil.compilePetabricks()
        pbutil.compileBenchmarks([config.benchmark])
Exemplo n.º 2
0
def recompile():
  pbutil.chdirToPetabricksRoot();
  config.benchmark = pbutil.normalizeBenchmarkName(config.benchmark)
  config.output_cfg = pbutil.benchmarkToCfg(config.benchmark) 
  if config.recompile:
    pbutil.compilePetabricks();
    pbutil.compileBenchmarks([config.benchmark])
Exemplo n.º 3
0
def main(argv):
  t1=time.time()

  global app
  global cfg 
  global ignore_list
  global defaultArgs
  global substderr
  global options 

  config_tool_path = os.path.split(argv[0])[0] + "/configtool.py"
  fast = False

  parser = optparse.OptionParser(usage="usage: %prog [options] BENCHMARK")
  parser.add_option("--min", type="int", dest="min", default=1)
  parser.add_option("-n", "--random", "--max", type="int", dest="n", default=-1)
  parser.add_option("--offset", type="int", dest="offset", default=0)
  parser.add_option("--max-sec", type="float", dest="maxsec", default=0)
  parser.add_option("-d", "--debug",  action="store_true", dest="debug", default=False)
  parser.add_option("-f", "--fast",  action="store_true", dest="fast", default=False)
  parser.add_option("--threads",      type="int", dest="threads", default=pbutil.cpuCount())
  parser.add_option("-c", "--config", dest="config", default=None)
  parser.add_option("--noisolation", action="store_true", dest="noisolation", default=False)
  parser.add_option("--print", action="store_true", dest="justprint", default=False)
  parser.add_option("--time", action="store_true", dest="time", default=False)
  parser.add_option("--acctrials", type="int", dest="acctrials", default=None)
  parser.add_option("--accimprovetries", type="int", dest="accimprovetries", default=None)
  parser.add_option("--trials", type="int", dest="trials", default=None)
  parser.add_option("--trials-sec", type="float", dest="trialssec", default=None)
  parser.add_option("--trials-max", type="int", dest="trialsmax", default=None)
  parser.add_option("--transform", dest="transform", default=None)
  options,args = parser.parse_args()

  if len(args) != 1:
    parser.error("expected benchmark name as arg")

  cfg=options.config
  app=args[0]

  pbutil.chdirToPetabricksRoot()
  pbutil.compilePetabricks()
  app = pbutil.normalizeBenchmarkName(app)
  pbutil.compileBenchmarks([app])
  
  if options.debug:
    substderr = sys.__stderr__

  if cfg is None:
    cfg = pbutil.benchmarkToCfg(app)

  defaultArgs = ['--config='+cfg, '--threads=%d'%options.threads, '--offset=%d'%options.offset, '--min=%d'%options.min]

  if options.noisolation:
    defaultArgs.append("--noisolation")

  if options.acctrials is not None:
    defaultArgs.append("--acctrials=%d"%options.acctrials)
  if options.trials is not None:
    defaultArgs.append("--trials=%d"%options.trials)
  if options.trialssec is not None:
    defaultArgs.append("--trials-sec=%f"%options.trialssec)
  if options.trialsmax is not None:
    defaultArgs.append("--trials-max=%d"%options.trialsmax)
  if options.accimprovetries is not None:
    defaultArgs.append("--accimprovetries=%d"%options.accimprovetries)

  getIgnoreList()

  try:
    infoxml = parse(pbutil.benchmarkToInfo(app))
  except:
    print "Cannot parse:", pbutil.benchmarkToInfo(app)
    sys.exit(-1)

 #print "Reseting config entries"
 #reset()

  #build index of transforms
  for t in infoxml.getElementsByTagName("transform"):
    transforms[nameof(t)]=t
    if t.getAttribute("templateChoice")=="0":
      transforms[t.getAttribute("templateName")] = t

  if options.transform is None:
    maintx = transforms[mainname()]
  else:
    maintx = transforms[options.transform]
  
  print "Call tree:"
  walkCallTree(maintx, fnup=printTx)
  print
  print "Autotuning:"

  progress.status("building work queue")
 
  if options.n <= 0:
    tasks.append(TuneTask("determineInputSizes", determineInputSizes))
    
  if options.time:
    tasks.append(TuneTask("runTimingTest", lambda:runTimingTest(maintx)))

  #build list of tasks
  if not options.fast:
    walkCallTree(maintx, lambda tx, depth, loops: enqueueAutotuneCmds(tx, maintx, 1, depth, loops))
  walkCallTree(maintx, lambda tx, depth, loops: enqueueAutotuneCmds(tx, maintx, 2, depth, loops))
  
  if options.time:
    tasks.append(TuneTask("runTimingTest", lambda:runTimingTest(maintx)))

  progress.status("autotuning")

  while len(tasks)>0:
    w1=remainingTaskWeight()
    task=tasks.pop(0)
    w2=remainingTaskWeight()
    progress.remaining(w1, w2)
    task.run()
  progress.clear()

  t2=time.time()
  sec=t2-t1

  

  print "autotuning took %.2f sec"%(t2-t1)
  for k,v in taskStats.items():
    print "  %.2f sec in %s"%(v.sec, k)
    sec -= v.sec
  print "  %.2f sec in unknown"%sec
  
  names=taskStats.keys()
  weights=map(lambda x: x.sec/float(max(x.count, 1)), taskStats.values())
  scale=len(weights)/sum(weights)
  print "Suggested weights:"
  print "taskStats = {" + ", ".join(map(lambda i: "'%s':TaskStats(%.2f)"%(names[i], scale*weights[i]), xrange(len(names)))) + "}"
Exemplo n.º 4
0
def main():
  warnings.simplefilter('ignore', tunerwarnings.NewProgramCrash)
  warnings.simplefilter('ignore', tunerwarnings.TargetNotMet)
  warnings.simplefilter('ignore', tunerwarnings.NanAccuracy)

  progress.push()
  progress.status("compiling benchmarks")

  pbutil.chdirToPetabricksRoot()
  pbutil.compilePetabricks();

  r, lines = pbutil.loadAndCompileBenchmarks("./scripts/pbbenchmark.tests")

  if filter(lambda x: x.rv!=0, r):
    print "compile failed"
    sys.exit(1)

  print 
  print "All scores are relative performance to a baseline system."
  print "Higher is better."
  print

  baselines = dict()
  for line in csv.reader(open("./testdata/configs/baselines.csv")):
    if len(line)>=3:
      baselines[line[0]] = line[1:]

  benchmarks=[]
  for benchmark, cfg, n, accTarg in lines:
    try:
      baseline = baselines[benchmark]
    except KeyError:
      baseline = (1.0, 1.0)
    benchmarks.append(Benchmark(benchmark, cfg, n, accTarg, baseline[0], baseline[1]))


  print LONGBAR
  print "Fixed (no autotuning) scores:"
  print SHORTBAR
  progress.remainingTicks(len(benchmarks)+3)
  progress.tick()
  for b in benchmarks:
    progress.status("running fixed "+fmtCfg(b.cfg))
    b.runFixed()
    b.printFixed()
  progress.tick()
  score_fixed = geomean(map(Benchmark.scoreFixed, benchmarks))

  print SHORTBAR
  print "Fixed Score (pbbenchmark v%s): %.2f" % (VERSION, geomean(map(Benchmark.scoreFixed, benchmarks)))
  print LONGBAR
  print



  print LONGBAR
  print "Tuned scores:"
  print SHORTBAR
  for b in benchmarks:
    progress.status("running tuned "+fmtCfg(b.cfg))
    progress.status("autotuning")
    b.autotune()
    b.runTuned()
    b.printTuned()
    progress.tick()
  
  score_tuned = geomean(map(Benchmark.scoreTuned, benchmarks))
  score_training_time = geomean(map(Benchmark.scoreTrainingTime, benchmarks))

  print SHORTBAR
  print "Tuned Score (pbbenchmark v%s): %.2f" % (VERSION, score_tuned)
  print "Training Time Score (pbbenchmark v%s): %.2f" % (VERSION, score_training_time)
  print LONGBAR
  print

  if DEBUG:
    print LONGBAR
    print "Debug:"
    print SHORTBAR
    for b in benchmarks:
      b.printDebug()
    print LONGBAR
    print

  fd = open("./testdata/configs/baselines.csv.latest", "w")
  for b in benchmarks:
    print >>fd, "%s, %f, %f" % (b.benchmark, b.tuned_perf['average'], b.tuning_time)
  fd.close()

  if not os.path.isdir(LOGDIR):
    os.mkdir(LOGDIR)
  
  for b in benchmarks:
    writelog(expandLog(b.cfg), b.logEntry())
    
  writelog(expandLog('scores.log'), {
      'version'             : VERSION,
      'score_fixed'         : score_fixed,
      'score_tuned'         : score_tuned,
      'score_training_time' : score_training_time,
      'hostname'            : socket.gethostname(),
      'timestamp'           : TIMESTAMP,
    })
  
  progress.tick()
  progress.status("done")
  progress.pop()
Exemplo n.º 5
0
def main():
    warnings.simplefilter('ignore', tunerwarnings.NewProgramCrash)
    warnings.simplefilter('ignore', tunerwarnings.SmallInputProgramCrash)
    warnings.simplefilter('ignore', tunerwarnings.TargetNotMet)
    warnings.simplefilter('ignore', tunerwarnings.NanAccuracy)

    #Parse input options
    from optparse import OptionParser
    parser = OptionParser(usage="usage: pbbenchmark [options]")
    parser.add_option("--learning",
                      action="store_true",
                      dest="learning",
                      default=False,
                      help="enable heuristics learning")
    parser.add_option(
        "--heuristics",
        type="string",
        help=
        "name of the file containing the set of heuristics to use. Automatically enables --learning",
        default=None)

    (options, args) = parser.parse_args()

    if options.heuristics:
        options.learning = True

    if options.learning:
        print "Learning of heuristics is ACTIVE"
        if options.heuristics:
            print "Using heuristics file: " + str(options.heuristics)
        else:
            print "Using only heuristics in the database"

    progress.push()
    progress.status("compiling benchmarks")

    pbutil.chdirToPetabricksRoot()
    pbutil.compilePetabricks()

    global REV
    try:
        REV = pbutil.getRevision()
    except:
        pass

    r, lines = pbutil.loadAndCompileBenchmarks(
        "./scripts/pbbenchmark.tests",
        searchterms=sys.argv[1:],
        learning=options.learning,
        heuristicSetFileName=options.heuristics)

    if filter(lambda x: x.rv != 0, r):
        print "compile failed"
        sys.exit(1)

    print
    print "All scores are relative performance to a baseline system."
    print "Higher is better."
    print

    baselines = dict()
    for line in csv.reader(open("./testdata/configs/baselines.csv")):
        if len(line) >= 3:
            baselines[line[0]] = line[1:]

    benchmarks = []
    for benchmark, cfg, n, accTarg in lines:
        try:
            baseline = baselines[benchmark]
        except KeyError:
            baseline = (1.0, 1.0)
        benchmarks.append(
            Benchmark(benchmark, cfg, n, accTarg, baseline[0], baseline[1]))

        progress.remainingTicks(len(benchmarks))

    #print LONGBAR
    #print "Fixed (no autotuning) scores:"
    #print SHORTBAR
    #for b in benchmarks:
    #  progress.status("running fixed "+fmtCfg(b.cfg))
    #  b.runFixed()
    #  b.printFixed()
    #score_fixed = geomean(map(Benchmark.scoreFixed, benchmarks))

    #print SHORTBAR
    #print "Fixed Score (pbbenchmark v%s): %.2f" % (VERSION, geomean(map(Benchmark.scoreFixed, benchmarks)))
    #print LONGBAR
    #print

    print LONGBAR
    print "Tuned scores:"
    print SHORTBAR
    for b in benchmarks:
        progress.status("running tuned " + fmtCfg(b.cfg))
        progress.status("autotuning")
        b.autotune()
        b.runTuned()
        b.printTuned()
        progress.tick()

    score_tuned = geomean(map(Benchmark.scoreTuned, benchmarks))
    score_training_time = geomean(map(Benchmark.scoreTrainingTime, benchmarks))

    print SHORTBAR
    print "Tuned Score (pbbenchmark v%s): %.2f" % (VERSION, score_tuned)
    print "Training Time Score (pbbenchmark v%s): %.2f" % (VERSION,
                                                           score_training_time)
    print LONGBAR
    print

    if DEBUG:
        print LONGBAR
        print "Debug:"
        print SHORTBAR
        for b in benchmarks:
            b.printDebug()
        print LONGBAR
        print

    fd = open("./testdata/configs/baselines.csv.latest", "w")
    for b in benchmarks:
        print >> fd, "%s, %f, %f" % (b.benchmark, b.tuned_perf['average'],
                                     b.tuning_time)
    fd.close()

    if not os.path.isdir(LOGDIR):
        os.mkdir(LOGDIR)

    for b in benchmarks:
        writelog(expandLog(b.cfg), b.logEntry())

    writelog(
        expandLog('scores.log'),
        {
            'version': VERSION,
            'score_fixed': -1,  #score_fixed,
            'score_tuned': score_tuned,
            'score_training_time': score_training_time,
            'hostname': socket.gethostname(),
            'timestamp': TIMESTAMP,
            'revision': REV,
        })

    progress.status("done")
    progress.pop()
Exemplo n.º 6
0
          self.test(a)
        else:
          break
      warnings.warn(ComparisonFailed(self.n, a, b))
      return 0
    return compare

  def cleanup(self):
    if config.cleanup_inputs:
      storagedirs.clearInputs();
      self.inputs=[]

if __name__ == "__main__":
  print "TESTING CANDIDATETESTER"
  pbutil.chdirToPetabricksRoot();
  pbutil.compilePetabricks();
  benchmark=pbutil.normalizeBenchmarkName('multiply')
  pbutil.compileBenchmarks([benchmark])
  tester = CandidateTester(benchmark, 768)
  try:
    candidate = Candidate(defaultConfigFile(pbutil.benchmarkToBin(tester.app)))
    candidate2 = Candidate(defaultConfigFile(pbutil.benchmarkToBin(tester.app)))
    candidate2.config['MatrixMultiplyTransposed_0_lvl1_rule']=1
    tester.test(candidate)
    tester.test(candidate)
    tester.test(candidate)
    tester.test(candidate2)
    tester.test(candidate2)
    tester.test(candidate2)
    print candidate.metrics[0]
    print candidate2.metrics[0]
Exemplo n.º 7
0
def main():
  warnings.simplefilter('ignore', tunerwarnings.NewProgramCrash)
  warnings.simplefilter('ignore', tunerwarnings.SmallInputProgramCrash)
  warnings.simplefilter('ignore', tunerwarnings.TargetNotMet)
  warnings.simplefilter('ignore', tunerwarnings.NanAccuracy)

  #Parse input options
  from optparse import OptionParser
  parser = OptionParser(usage="usage: pbbenchmark [options]")
  parser.add_option("--learning", action="store_true", dest="learning", default=False, help="enable heuristics learning")
  parser.add_option("--heuristics", type="string", help="name of the file containing the set of heuristics to use. Automatically enables --learning", default=None)

  (options, args) = parser.parse_args()

  if options.heuristics:
    options.learning = True

  if options.learning:
    print "Learning of heuristics is ACTIVE"
    if options.heuristics:
      print "Using heuristics file: "+ str(options.heuristics)
    else:
      print "Using only heuristics in the database"

  progress.push()
  progress.status("compiling benchmarks")

  pbutil.chdirToPetabricksRoot()
  pbutil.compilePetabricks()

  global REV
  try:
    REV=pbutil.getRevision()
  except:
    pass

  r, lines = pbutil.loadAndCompileBenchmarks("./scripts/pbbenchmark.tests", searchterms=sys.argv[1:], learning=options.learning, heuristicSetFileName=options.heuristics)

  if filter(lambda x: x.rv!=0, r):
    print "compile failed"
    sys.exit(1)

  print 
  print "All scores are relative performance to a baseline system."
  print "Higher is better."
  print

  baselines = dict()
  for line in csv.reader(open("./testdata/configs/baselines.csv")):
    if len(line)>=3:
      baselines[line[0]] = line[1:]

  benchmarks=[]
  for benchmark, cfg, n, accTarg in lines:
    try:
      baseline = baselines[benchmark]
    except KeyError:
      baseline = (1.0, 1.0)
    benchmarks.append(Benchmark(benchmark, cfg, n, accTarg, baseline[0], baseline[1]))

    progress.remainingTicks(len(benchmarks))

   #print LONGBAR
   #print "Fixed (no autotuning) scores:"
   #print SHORTBAR
   #for b in benchmarks:
   #  progress.status("running fixed "+fmtCfg(b.cfg))
   #  b.runFixed()
   #  b.printFixed()
   #score_fixed = geomean(map(Benchmark.scoreFixed, benchmarks))

   #print SHORTBAR
   #print "Fixed Score (pbbenchmark v%s): %.2f" % (VERSION, geomean(map(Benchmark.scoreFixed, benchmarks)))
   #print LONGBAR
   #print



  print LONGBAR
  print "Tuned scores:"
  print SHORTBAR
  for b in benchmarks:
    progress.status("running tuned "+fmtCfg(b.cfg))
    progress.status("autotuning")
    b.autotune()
    b.runTuned()
    b.printTuned()
    progress.tick()
  
  score_tuned = geomean(map(Benchmark.scoreTuned, benchmarks))
  score_training_time = geomean(map(Benchmark.scoreTrainingTime, benchmarks))

  print SHORTBAR
  print "Tuned Score (pbbenchmark v%s): %.2f" % (VERSION, score_tuned)
  print "Training Time Score (pbbenchmark v%s): %.2f" % (VERSION, score_training_time)
  print LONGBAR
  print

  if DEBUG:
    print LONGBAR
    print "Debug:"
    print SHORTBAR
    for b in benchmarks:
      b.printDebug()
    print LONGBAR
    print

  fd = open("./testdata/configs/baselines.csv.latest", "w")
  for b in benchmarks:
    print >>fd, "%s, %f, %f" % (b.benchmark, b.tuned_perf['average'], b.tuning_time)
  fd.close()

  if not os.path.isdir(LOGDIR):
    os.mkdir(LOGDIR)
  
  for b in benchmarks:
    writelog(expandLog(b.cfg), b.logEntry())
    
  writelog(expandLog('scores.log'), {
      'version'             : VERSION,
      'score_fixed'         : -1,#score_fixed,
      'score_tuned'         : score_tuned,
      'score_training_time' : score_training_time,
      'hostname'            : socket.gethostname(),
      'timestamp'           : TIMESTAMP,
      'revision'            : REV,
    })
  
  progress.status("done")
  progress.pop()
Exemplo n.º 8
0
    '''
    cmpobj = self.comparer(config.timing_metric_idx,
                           config.confidence_pct,
                           config.max_trials,
                           limit)
    return cmpobj(a,b)

  def cleanup(self):
    if config.cleanup_inputs:
      storagedirs.clearInputs();
      self.inputs=[]

if __name__ == "__main__":
  print "TESTING CANDIDATETESTER"
  pbutil.chdirToPetabricksRoot();
  pbutil.compilePetabricks();
  benchmark=pbutil.normalizeBenchmarkName('multiply')
  pbutil.compileBenchmarks([benchmark])
  tester = CandidateTester(benchmark, 768)
  try:
    candidate = Candidate(defaultConfigFile(pbutil.benchmarkToBin(tester.app)))
    candidate2 = Candidate(defaultConfigFile(pbutil.benchmarkToBin(tester.app)))
    candidate2.config['MatrixMultiplyTransposed_0_lvl1_rule']=1
    tester.test(candidate)
    tester.test(candidate)
    tester.test(candidate)
    tester.test(candidate2)
    tester.test(candidate2)
    tester.test(candidate2)
    print candidate.metrics[0]
    print candidate2.metrics[0]
Exemplo n.º 9
0
def main(argv):
    t1 = time.time()

    global app
    global cfg
    global ignore_list
    global defaultArgs
    global substderr
    global options

    config_tool_path = os.path.split(argv[0])[0] + "/configtool.py"
    fast = False

    parser = optparse.OptionParser(usage="usage: %prog [options] BENCHMARK")
    parser.add_option("--min", type="int", dest="min", default=1)
    parser.add_option("-n",
                      "--random",
                      "--max",
                      type="int",
                      dest="n",
                      default=-1)
    parser.add_option("--offset", type="int", dest="offset", default=0)
    parser.add_option("--max-sec", type="float", dest="maxsec", default=0)
    parser.add_option("-d",
                      "--debug",
                      action="store_true",
                      dest="debug",
                      default=False)
    parser.add_option("-f",
                      "--fast",
                      action="store_true",
                      dest="fast",
                      default=False)
    parser.add_option("--threads",
                      type="int",
                      dest="threads",
                      default=pbutil.cpuCount())
    parser.add_option("-c", "--config", dest="config", default=None)
    parser.add_option("--noisolation",
                      action="store_true",
                      dest="noisolation",
                      default=False)
    parser.add_option("--print",
                      action="store_true",
                      dest="justprint",
                      default=False)
    parser.add_option("--time",
                      action="store_true",
                      dest="time",
                      default=False)
    parser.add_option("--acctrials",
                      type="int",
                      dest="acctrials",
                      default=None)
    parser.add_option("--accimprovetries",
                      type="int",
                      dest="accimprovetries",
                      default=None)
    parser.add_option("--trials", type="int", dest="trials", default=None)
    parser.add_option("--trials-sec",
                      type="float",
                      dest="trialssec",
                      default=None)
    parser.add_option("--trials-max",
                      type="int",
                      dest="trialsmax",
                      default=None)
    parser.add_option("--transform", dest="transform", default=None)
    options, args = parser.parse_args()

    if len(args) != 1:
        parser.error("expected benchmark name as arg")

    cfg = options.config
    app = args[0]

    pbutil.chdirToPetabricksRoot()
    pbutil.compilePetabricks()
    app = pbutil.normalizeBenchmarkName(app)
    pbutil.compileBenchmarks([app])

    if options.debug:
        substderr = sys.__stderr__

    if cfg is None:
        cfg = pbutil.benchmarkToCfg(app)

    defaultArgs = [
        '--config=' + cfg,
        '--threads=%d' % options.threads,
        '--offset=%d' % options.offset,
        '--min=%d' % options.min
    ]

    if options.noisolation:
        defaultArgs.append("--noisolation")

    if options.acctrials is not None:
        defaultArgs.append("--acctrials=%d" % options.acctrials)
    if options.trials is not None:
        defaultArgs.append("--trials=%d" % options.trials)
    if options.trialssec is not None:
        defaultArgs.append("--trials-sec=%f" % options.trialssec)
    if options.trialsmax is not None:
        defaultArgs.append("--trials-max=%d" % options.trialsmax)
    if options.accimprovetries is not None:
        defaultArgs.append("--accimprovetries=%d" % options.accimprovetries)

    getIgnoreList()

    try:
        infoxml = parse(pbutil.benchmarkToInfo(app))
    except:
        print "Cannot parse:", pbutil.benchmarkToInfo(app)
        sys.exit(-1)

#print "Reseting config entries"
#reset()

#build index of transforms
    for t in infoxml.getElementsByTagName("transform"):
        transforms[nameof(t)] = t
        if t.getAttribute("templateChoice") == "0":
            transforms[t.getAttribute("templateName")] = t

    if options.transform is None:
        maintx = transforms[mainname()]
    else:
        maintx = transforms[options.transform]

    print "Call tree:"
    walkCallTree(maintx, fnup=printTx)
    print
    print "Autotuning:"

    progress.status("building work queue")

    if options.n <= 0:
        tasks.append(TuneTask("determineInputSizes", determineInputSizes))

    if options.time:
        tasks.append(TuneTask("runTimingTest", lambda: runTimingTest(maintx)))

    #build list of tasks
    if not options.fast:
        walkCallTree(
            maintx, lambda tx, depth, loops: enqueueAutotuneCmds(
                tx, maintx, 1, depth, loops))
    walkCallTree(
        maintx, lambda tx, depth, loops: enqueueAutotuneCmds(
            tx, maintx, 2, depth, loops))

    if options.time:
        tasks.append(TuneTask("runTimingTest", lambda: runTimingTest(maintx)))

    progress.status("autotuning")

    while len(tasks) > 0:
        w1 = remainingTaskWeight()
        task = tasks.pop(0)
        w2 = remainingTaskWeight()
        progress.remaining(w1, w2)
        task.run()
    progress.clear()

    t2 = time.time()
    sec = t2 - t1

    print "autotuning took %.2f sec" % (t2 - t1)
    for k, v in taskStats.items():
        print "  %.2f sec in %s" % (v.sec, k)
        sec -= v.sec
    print "  %.2f sec in unknown" % sec

    names = taskStats.keys()
    weights = map(lambda x: x.sec / float(max(x.count, 1)), taskStats.values())
    scale = len(weights) / sum(weights)
    print "Suggested weights:"
    print "taskStats = {" + ", ".join(
        map(lambda i: "'%s':TaskStats(%.2f)" %
            (names[i], scale * weights[i]), xrange(len(names)))) + "}"