def launcher(bm, harnessArgs, extraVmOpts): return sanitycheck.getScalaDacapo(bm, harnessArgs).test(mx_graal_core.get_vm(), extraVmOpts=extraVmOpts)
def bench(args): """run benchmarks and parse their output for results Results are JSON formated : {group : {benchmark : score}}.""" resultFile = None if '-resultfile' in args: index = args.index('-resultfile') if index + 1 < len(args): resultFile = args[index + 1] del args[index] del args[index] else: mx.abort('-resultfile must be followed by a file name') resultFileCSV = None if '-resultfilecsv' in args: index = args.index('-resultfilecsv') if index + 1 < len(args): resultFileCSV = args[index + 1] del args[index] del args[index] else: mx.abort('-resultfilecsv must be followed by a file name') vm = mx_graal_core.get_vm() if len(args) is 0: args = ['all'] vmArgs = [arg for arg in args if arg.startswith('-')] def benchmarks_in_group(group): prefix = group + ':' return [a[len(prefix):] for a in args if a.startswith(prefix)] results = {} benchmarks = [] # DaCapo if 'dacapo' in args or 'all' in args: benchmarks += sanitycheck.getDacapos( level=sanitycheck.SanityCheckLevel.Benchmark) else: dacapos = benchmarks_in_group('dacapo') for dacapo in dacapos: if dacapo not in sanitycheck.dacapoSanityWarmup.keys(): mx.abort('Unknown DaCapo : ' + dacapo) iterations = sanitycheck.dacapoSanityWarmup[dacapo][ sanitycheck.SanityCheckLevel.Benchmark] if iterations > 0: benchmarks += [ sanitycheck.getDacapo(dacapo, ['-n', str(iterations)]) ] if 'scaladacapo' in args or 'all' in args: benchmarks += sanitycheck.getScalaDacapos( level=sanitycheck.SanityCheckLevel.Benchmark) else: scaladacapos = benchmarks_in_group('scaladacapo') for scaladacapo in scaladacapos: if scaladacapo not in sanitycheck.dacapoScalaSanityWarmup.keys(): mx.abort('Unknown Scala DaCapo : ' + scaladacapo) iterations = sanitycheck.dacapoScalaSanityWarmup[scaladacapo][ sanitycheck.SanityCheckLevel.Benchmark] if iterations > 0: benchmarks += [ sanitycheck.getScalaDacapo(scaladacapo, ['-n', str(iterations)]) ] # Bootstrap if 'bootstrap' in args or 'all' in args: benchmarks += sanitycheck.getBootstraps() # SPECjvm2008 if 'specjvm2008' in args or 'all' in args: benchmarks += [ sanitycheck.getSPECjvm2008(['-ikv', '-wt', '120', '-it', '120']) ] else: specjvms = benchmarks_in_group('specjvm2008') for specjvm in specjvms: benchmarks += [ sanitycheck.getSPECjvm2008( ['-ikv', '-wt', '120', '-it', '120', specjvm]) ] if 'specjbb2005' in args or 'all' in args: benchmarks += [sanitycheck.getSPECjbb2005()] if 'specjbb2013' in args: # or 'all' in args //currently not in default set benchmarks += [sanitycheck.getSPECjbb2013()] if 'ctw-full' in args: benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.Full)) if 'ctw-noinline' in args: benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.NoInline)) for f in extraBenchmarks: f(args, vm, benchmarks) for test in benchmarks: for (groupName, res) in test.bench(vm, extraVmOpts=vmArgs).items(): group = results.setdefault(groupName, {}) group.update(res) mx.log(json.dumps(results)) if resultFile: with open(resultFile, 'w') as f: f.write(json.dumps(results)) if resultFileCSV: with open(resultFileCSV, 'w') as f: for key1, value1 in results.iteritems(): f.write('%s;\n' % (str(key1))) for key2, value2 in sorted(value1.iteritems()): f.write('%s; %s;\n' % (str(key2), str(value2)))
def bench(args): """run benchmarks and parse their output for results Results are JSON formated : {group : {benchmark : score}}.""" resultFile = None if '-resultfile' in args: index = args.index('-resultfile') if index + 1 < len(args): resultFile = args[index + 1] del args[index] del args[index] else: mx.abort('-resultfile must be followed by a file name') resultFileCSV = None if '-resultfilecsv' in args: index = args.index('-resultfilecsv') if index + 1 < len(args): resultFileCSV = args[index + 1] del args[index] del args[index] else: mx.abort('-resultfilecsv must be followed by a file name') vm = mx_graal_core.get_vm() if len(args) is 0: args = ['all'] vmArgs = [arg for arg in args if arg.startswith('-')] def benchmarks_in_group(group): prefix = group + ':' return [a[len(prefix):] for a in args if a.startswith(prefix)] results = {} benchmarks = [] # DaCapo if 'dacapo' in args or 'all' in args: benchmarks += sanitycheck.getDacapos(level=sanitycheck.SanityCheckLevel.Benchmark) else: dacapos = benchmarks_in_group('dacapo') for dacapo in dacapos: if dacapo not in sanitycheck.dacapoSanityWarmup.keys(): mx.abort('Unknown DaCapo : ' + dacapo) iterations = sanitycheck.dacapoSanityWarmup[dacapo][sanitycheck.SanityCheckLevel.Benchmark] if iterations > 0: benchmarks += [sanitycheck.getDacapo(dacapo, ['-n', str(iterations)])] if 'scaladacapo' in args or 'all' in args: benchmarks += sanitycheck.getScalaDacapos(level=sanitycheck.SanityCheckLevel.Benchmark) else: scaladacapos = benchmarks_in_group('scaladacapo') for scaladacapo in scaladacapos: if scaladacapo not in sanitycheck.dacapoScalaSanityWarmup.keys(): mx.abort('Unknown Scala DaCapo : ' + scaladacapo) iterations = sanitycheck.dacapoScalaSanityWarmup[scaladacapo][sanitycheck.SanityCheckLevel.Benchmark] if iterations > 0: benchmarks += [sanitycheck.getScalaDacapo(scaladacapo, ['-n', str(iterations)])] # Bootstrap if 'bootstrap' in args or 'all' in args: benchmarks += sanitycheck.getBootstraps() # SPECjvm2008 if 'specjvm2008' in args or 'all' in args: benchmarks += [sanitycheck.getSPECjvm2008(['-ikv', '-wt', '120', '-it', '120'])] else: specjvms = benchmarks_in_group('specjvm2008') for specjvm in specjvms: benchmarks += [sanitycheck.getSPECjvm2008(['-ikv', '-wt', '120', '-it', '120', specjvm])] if 'specjbb2005' in args or 'all' in args: benchmarks += [sanitycheck.getSPECjbb2005()] if 'specjbb2013' in args: # or 'all' in args //currently not in default set benchmarks += [sanitycheck.getSPECjbb2013()] if 'ctw-full' in args: benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.Full)) if 'ctw-noinline' in args: benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.NoInline)) for f in extraBenchmarks: f(args, vm, benchmarks) for test in benchmarks: for (groupName, res) in test.bench(vm, extraVmOpts=vmArgs).items(): group = results.setdefault(groupName, {}) group.update(res) mx.log(json.dumps(results)) if resultFile: with open(resultFile, 'w') as f: f.write(json.dumps(results)) if resultFileCSV: with open(resultFileCSV, 'w') as f: for key1, value1 in results.iteritems(): f.write('%s;\n' % (str(key1))) for key2, value2 in sorted(value1.iteritems()): f.write('%s; %s;\n' % (str(key2), str(value2)))
def launcher(bm, harnessArgs, extraVmOpts): return sanitycheck.getScalaDacapo(bm, harnessArgs).test( mx_graal_core.get_vm(), extraVmOpts=extraVmOpts)