def archive_prefix(self): return self.prefix def getResults(self): return mx.ArchivableProject.walk(self.output_dir()) class SulongDocsProject(ArchiveProject): # pylint: disable=too-many-ancestors doc_files = (glob.glob(join(_suite.dir, 'LICENSE')) + glob.glob(join(_suite.dir, '*.md'))) def getResults(self): return [join(_suite.dir, f) for f in self.doc_files] mx_benchmark.add_bm_suite(mx_sulong_benchmarks.SulongBenchmarkSuite()) _toolchains = {} def _get_toolchain(toolchain_name): if toolchain_name not in _toolchains: mx.abort("Toolchain '{}' does not exists! Known toolchains {}".format( toolchain_name, ", ".join(_toolchains.keys()))) return _toolchains[toolchain_name] def _get_toolchain_tool(name_tool): name, tool = name_tool.split(",", 2) return _get_toolchain(name).get_toolchain_tool(tool)
class JMHRunnerTruffleBenchmarkSuite(mx_benchmark.JMHRunnerBenchmarkSuite): def name(self): return "truffle" def group(self): return "Graal" def subgroup(self): return "truffle" def extraVmArgs(self): return ['-XX:-UseJVMCIClassLoader'] + super( JMHRunnerTruffleBenchmarkSuite, self).extraVmArgs() mx_benchmark.add_bm_suite(JMHRunnerTruffleBenchmarkSuite()) #mx_benchmark.add_java_vm(mx_benchmark.DefaultJavaVm("server", "default"), priority=3) def javadoc(args, vm=None): """build the Javadoc for all API packages""" extraArgs = mx_sdk.build_oracle_compliant_javadoc_args( _suite, 'GraalVM', 'Truffle') mx.javadoc([ '--unified', '--exclude-packages', 'com.oracle.truffle.tck,com.oracle.truffle.tck.impl' ] + extraArgs + args) javadoc_dir = os.sep.join([_suite.dir, 'javadoc']) checkLinks(javadoc_dir)
class JMHRunnerToolsBenchmarkSuite(mx_benchmark.JMHRunnerBenchmarkSuite): def name(self): return "tools" def group(self): return "Graal" def subgroup(self): return "tools" def extraVmArgs(self): return ['-XX:-UseJVMCIClassLoader'] + super( JMHRunnerToolsBenchmarkSuite, self).extraVmArgs() mx_benchmark.add_bm_suite(JMHRunnerToolsBenchmarkSuite()) def javadoc(args): """build the Javadoc for all packages""" if not args: projectNames = [] for p in mx.projects(True, True): projectNames.append(p.name) mx.javadoc(['--unified', '--projects', ','.join(projectNames)], includeDeps=False) else: mx.javadoc(['--unified'] + args) javadocDir = os.sep.join([_suite.dir, 'javadoc']) index = os.sep.join([javadocDir, 'index.html']) if exists(index):
'metric.better': 'higher', 'extra.metric.human': str(used_samples) }] else: sys.stderr.write(out.data) # TODO CS 24-Jun-16, how can we fail the wider suite? return [{ 'benchmark': benchmark, 'metric.name': 'throughput', 'metric.value': 0, 'metric.unit': 'op/s', 'metric.better': 'higher', 'extra.error': 'failed' }] mx_benchmark.add_bm_suite(AllocationBenchmarkSuite()) mx_benchmark.add_bm_suite(MinHeapBenchmarkSuite()) mx_benchmark.add_bm_suite(TimeBenchmarkSuite()) mx_benchmark.add_bm_suite(ClassicBenchmarkSuite()) mx_benchmark.add_bm_suite(ChunkyBenchmarkSuite()) mx_benchmark.add_bm_suite(PSDBenchmarkSuite()) mx_benchmark.add_bm_suite(ImageDemoBenchmarkSuite()) mx_benchmark.add_bm_suite(AsciidoctorBenchmarkSuite()) mx_benchmark.add_bm_suite(OptcarrotBenchmarkSuite()) mx_benchmark.add_bm_suite(SyntheticBenchmarkSuite()) mx_benchmark.add_bm_suite(MicroBenchmarkSuite()) mx_benchmark.add_bm_suite(SavinaBenchmarkSuite()) mx_benchmark.add_bm_suite(ServerBenchmarkSuite())
# we do not want warmup results for timing benchmarks return [result for result in results if result["metric.name"] != "warmup"] def run(self, benchmarks, bmSuiteArgs): results = super(DaCapoTimingBenchmarkMixin, self).run(benchmarks, bmSuiteArgs) return self.removeWarmup(results) class DaCapoTimingBenchmarkSuite(DaCapoTimingBenchmarkMixin, DaCapoBenchmarkSuite): # pylint: disable=too-many-ancestors """DaCapo 9.12 (Bach) benchmark suite implementation.""" def benchSuiteName(self): return "dacapo" mx_benchmark.add_bm_suite(DaCapoTimingBenchmarkSuite()) class ScalaDaCapoTimingBenchmarkSuite(DaCapoTimingBenchmarkMixin, ScalaDaCapoBenchmarkSuite): # pylint: disable=too-many-ancestors """Scala DaCapo benchmark suite implementation.""" def benchSuiteName(self): return "scala-dacapo" mx_benchmark.add_bm_suite(ScalaDaCapoTimingBenchmarkSuite()) class JMHRunnerGraalCoreBenchmarkSuite(mx_benchmark.JMHRunnerBenchmarkSuite): # pylint: disable=too-many-ancestors def alternative_suite(self):
"Update Python inlined files: %s" % import_version) answer = raw_input("Should we push python-import (y/N)? ") if answer and answer in "Yy": _suite.vc.git_command( _suite.dir, ["push", "origin", "python-import:python-import"]) _suite.vc.update(_suite.dir, rev=tip) _suite.vc.git_command(_suite.dir, ["merge", "python-import"]) # ---------------------------------------------------------------------------------------------------------------------- # # add the defined python benchmark suites # # ---------------------------------------------------------------------------------------------------------------------- for py_bench_suite in PythonBenchmarkSuite.get_benchmark_suites(): mx_benchmark.add_bm_suite(py_bench_suite) # ---------------------------------------------------------------------------------------------------------------------- # # register as a GraalVM language # # ---------------------------------------------------------------------------------------------------------------------- mx_sdk.register_graalvm_component( mx_sdk.GraalVmLanguage( suite=_suite, name='Graal.Python', short_name='pyn', dir_name='python', license_files=['LICENSE_GRAALPYTHON'], third_party_license_files=['3rd_party_licenses_graalpython.txt'], truffle_jars=[
def name(self): return 'micro' def benchmarks(self): out = mx.OutputCapture() jt(['where', 'repos', 'all-ruby-benchmarks'], out=out) all_ruby_benchmarks = out.data.strip() benchmarks = [] for root, dirs, files in os.walk(os.path.join(all_ruby_benchmarks, 'micro')): for name in files: if name.endswith('.rb'): benchmark_file = os.path.join(root, name)[len(all_ruby_benchmarks)+1:] out = mx.OutputCapture() if jt(['benchmark', 'list', benchmark_file], out=out): benchmarks.extend([benchmark_file + ':' + b.strip() for b in out.data.split('\n') if len(b.strip()) > 0]) else: sys.stderr.write(out.data) return benchmarks def time(self): return micro_benchmark_time mx_benchmark.add_bm_suite(AllocationBenchmarkSuite()) mx_benchmark.add_bm_suite(MinHeapBenchmarkSuite()) mx_benchmark.add_bm_suite(TimeBenchmarkSuite()) mx_benchmark.add_bm_suite(ClassicBenchmarkSuite()) mx_benchmark.add_bm_suite(ChunkyBenchmarkSuite()) mx_benchmark.add_bm_suite(PSDBenchmarkSuite()) mx_benchmark.add_bm_suite(SyntheticBenchmarkSuite()) mx_benchmark.add_bm_suite(MicroBenchmarkSuite())
return {'kind': 'fixed-iterations', 'iterations': iterations} def name(self): return 'ruby-warmup' def directory(self): return None def benchmarkList(self, bmSuiteArgs): benchmarks = warmup_benchmarks[:] if os.getenv('HOST_VM') != "jruby": benchmarks.extend(blog6_benchmarks) return benchmarks mx_benchmark.add_bm_suite(BuildStatsBenchmarkSuite()) mx_benchmark.add_bm_suite(AllocationBenchmarkSuite()) mx_benchmark.add_bm_suite(InstructionsBenchmarkSuite()) mx_benchmark.add_bm_suite(MinHeapBenchmarkSuite()) mx_benchmark.add_bm_suite(MaxRssBenchmarkSuite()) mx_benchmark.add_bm_suite(TimeBenchmarkSuite()) mx_benchmark.add_bm_suite(ClassicBenchmarkSuite()) mx_benchmark.add_bm_suite(ChunkyBenchmarkSuite()) mx_benchmark.add_bm_suite(PSDBenchmarkSuite()) mx_benchmark.add_bm_suite(ImageDemoBenchmarkSuite()) mx_benchmark.add_bm_suite(AsciidoctorBenchmarkSuite()) mx_benchmark.add_bm_suite(OptcarrotBenchmarkSuite()) mx_benchmark.add_bm_suite(SyntheticBenchmarkSuite()) mx_benchmark.add_bm_suite(MicroBenchmarkSuite()) mx_benchmark.add_bm_suite(SavinaBenchmarkSuite()) mx_benchmark.add_bm_suite(ServerBenchmarkSuite())
for executed_bench in executed_benchmarks: if executed_bench in excluded: mx.abort("The test " + executed_bench + " is in excluded list: " + str(excluded)) def checkIncluded(command, included): check(command, included, set(benchmark_list) - set(included)) def checkExcluded(command, excluded): check(command, set(benchmark_list) - set(excluded), excluded) mx_benchmark.add_bm_suite(TestBenchBenchmarkSuite()) checkIncluded("benchSuite:a", ["a"]) checkIncluded("benchSuite:*[a,X-Y,123]", ["a", "X-Y", "123"]) checkIncluded("benchSuite:*[a , X-Y , 123]", ["a", "X-Y", "123"]) # space allowed around comma checkIncluded("benchSuite:r[[ah].*]", ["a", "hello-world"]) checkIncluded("benchSuite:r[b]", ["b"]) # does not contain bbb, since we use fullmatch checkIncluded("benchSuite:r[.*, .*]", ["meta, tests"]) # comma and space are interpreted correctly checkExcluded("benchSuite:*", []) checkExcluded("benchSuite:~a", ["a"]) checkExcluded("benchSuite:~a,b", ["a", "b"]) checkExcluded("benchSuite:~a , b", ["a", "b"]) # space allowed around comma checkExcluded("benchSuite:~[a,b]", ["a", "b"])
'metric.better': 'lower', 'extra.metric.human': data['human'] }] class TimeBenchmarkSuite(MetricsBenchmarkSuite): def name(self): return 'time' def runBenchmark(self, benchmark, bmSuiteArgs): out = mx.OutputCapture() options = [] jt(['metrics', 'time', '--json'] + metrics_benchmarks[benchmark] + bmSuiteArgs, out=out) data = json.loads(out.data) return [{ 'benchmark': benchmark, 'extra.metric.region': r, 'metric.name': 'time', 'metric.value': t, 'metric.unit': 's', 'metric.better': 'lower', 'extra.metric.human': data['human'] } for r, t in data.items() if r != 'human'] mx_benchmark.add_bm_suite(AllocationBenchmarkSuite()) mx_benchmark.add_bm_suite(MinHeapBenchmarkSuite()) mx_benchmark.add_bm_suite(TimeBenchmarkSuite())
f"\n" f"step <- 1L\n" f"timestamps <- vector('double', {MAX_BENCH_ITERATIONS})\n" f"cur_seconds <- get_cur_seconds()\n" f"target_time <- {bench_args.warmup + bench_args.measure}\n" f"start_time <- cur_seconds\n" f"\n" f"while (cur_seconds - start_time < target_time) {{\n" f" if (!benchmark_func(benchmark_func_args)) {{\n" f" cat('ERROR: Wrong result\\n')\n" f" return (0)\n" f" }}\n" f" timestamps[[step]] <- cur_seconds\n" f" step <- step + 1L\n" f" cur_seconds <- get_cur_seconds()\n" f"}}\n" f"\n" f"end_time <- get_cur_seconds()\n" f"\n" # Output of the benchmark f"cat('benchmark results:', '\\n')\n" f"cat(start_time, end_time, step - 1, '\\n')\n" f"cat(timestamps[1:length(timestamps) < step], sep='\\n')\n" ) def altrep_benchmark(args): return mx_benchmark.benchmark(["altrep"] + args) mx_benchmark.add_bm_suite(AltrepBenchmarkSuite())
'metric.value': ips, 'metric.unit': 'op/s', 'metric.better': 'higher', 'extra.metric.human': str(used_samples) }] else: sys.stderr.write(out.data) # TODO CS 24-Jun-16, how can we fail the wider suite? return [{ 'benchmark': benchmark, 'metric.name': 'throughput', 'metric.value': 0, 'metric.unit': 'op/s', 'metric.better': 'higher', 'extra.error': 'failed' }] mx_benchmark.add_bm_suite(AllocationBenchmarkSuite()) mx_benchmark.add_bm_suite(MinHeapBenchmarkSuite()) mx_benchmark.add_bm_suite(TimeBenchmarkSuite()) mx_benchmark.add_bm_suite(ClassicBenchmarkSuite()) mx_benchmark.add_bm_suite(ChunkyBenchmarkSuite()) mx_benchmark.add_bm_suite(PSDBenchmarkSuite()) mx_benchmark.add_bm_suite(ImageDemoBenchmarkSuite()) mx_benchmark.add_bm_suite(AsciidoctorBenchmarkSuite()) mx_benchmark.add_bm_suite(OptcarrotBenchmarkSuite()) mx_benchmark.add_bm_suite(SyntheticBenchmarkSuite()) mx_benchmark.add_bm_suite(MicroBenchmarkSuite()) mx_benchmark.add_bm_suite(ServerBenchmarkSuite())
class MicroBenchmarkSuite(AllBenchmarksBenchmarkSuite): def name(self): return 'micro' def benchmarks(self): out = mx.OutputCapture() jt(['where', 'repos', 'all-ruby-benchmarks'], out=out) all_ruby_benchmarks = out.data.strip() benchmarks = [] for root, dirs, files in os.walk(os.path.join(all_ruby_benchmarks, 'micro')): for name in files: if name.endswith('.rb'): benchmark_file = os.path.join(root, name)[len(all_ruby_benchmarks)+1:] out = mx.OutputCapture() jt(['benchmark', 'list', benchmark_file], out=out) benchmarks.extend([benchmark_file + ':' + b.strip() for b in out.data.split('\n') if len(b.strip()) > 0]) return benchmarks def time(self): return micro_benchmark_time mx_benchmark.add_bm_suite(AllocationBenchmarkSuite()) mx_benchmark.add_bm_suite(MinHeapBenchmarkSuite()) mx_benchmark.add_bm_suite(TimeBenchmarkSuite()) mx_benchmark.add_bm_suite(ClassicBenchmarkSuite()) mx_benchmark.add_bm_suite(ChunkyBenchmarkSuite()) mx_benchmark.add_bm_suite(PSDBenchmarkSuite()) mx_benchmark.add_bm_suite(SyntheticBenchmarkSuite()) mx_benchmark.add_bm_suite(MicroBenchmarkSuite())
mx_benchmark.StdOutRule( r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) completed warmup [0-9]+ in (?P<time>[0-9]+) msec =====", # pylint: disable=line-too-long { "benchmark": ("<benchmark>", str), "metric.name": "warmup", "metric.value": ("<time>", int), "metric.unit": "ms", "metric.type": "numeric", "metric.score-function": "id", "metric.better": "lower", "metric.iteration": ("$iteration", int) } ) ] mx_benchmark.add_bm_suite(DaCapoBenchmarkSuite()) _allSpecJVM2008Benchs = [ 'startup.helloworld', 'startup.compiler.compiler', # 'startup.compiler.sunflow', # disabled until timeout problem in jdk8 is resolved 'startup.compress', 'startup.crypto.aes', 'startup.crypto.rsa', 'startup.crypto.signverify', 'startup.mpegaudio', 'startup.scimark.fft', 'startup.scimark.lu', 'startup.scimark.monte_carlo', 'startup.scimark.sor', 'startup.scimark.sparse',
class JMHRunnerToolsBenchmarkSuite(mx_benchmark.JMHRunnerBenchmarkSuite): def name(self): return "tools" def group(self): return "Graal" def subgroup(self): return "tools" def extraVmArgs(self): return ['-XX:-UseJVMCIClassLoader'] + super(JMHRunnerToolsBenchmarkSuite, self).extraVmArgs() mx_benchmark.add_bm_suite(JMHRunnerToolsBenchmarkSuite()) def javadoc(args): """build the Javadoc for all packages""" if not args: projectNames = [] for p in mx.projects(True, True): projectNames.append(p.name) mx.javadoc(['--unified', '--projects', ','.join(projectNames)], includeDeps=False) else: mx.javadoc(['--unified'] + args) javadocDir = os.sep.join([_suite.dir, 'javadoc']) index = os.sep.join([javadocDir, 'index.html']) if exists(index): indexContent = open(index, 'r').read()
def archive_prefix(self): return self.prefix def getResults(self): return mx.ArchivableProject.walk(self.output_dir()) class SulongDocsProject(ArchiveProject): doc_files = (glob.glob(join(_suite.dir, 'LICENSE')) + glob.glob(join(_suite.dir, '*.md'))) def getResults(self): return [join(_suite.dir, f) for f in self.doc_files] mx_benchmark.add_bm_suite(mx_sulong_benchmarks.SulongBenchmarkSuite()) mx_sdk.register_graalvm_component(mx_sdk.GraalVmLanguage( suite=_suite, name='Sulong', short_name='slg', dir_name='llvm', license_files=[], third_party_license_files=[], truffle_jars=['sulong:SULONG'], support_distributions=[ 'sulong:SULONG_LIBS', 'sulong:SULONG_GRAALVM_DOCS', ], launcher_configs=[ mx_sdk.LanguageLauncherConfig(
class TimeBenchmarkSuite(MetricsBenchmarkSuite): def name(self): return 'time' def runBenchmark(self, benchmark, bmSuiteArgs): out = mx.OutputCapture() options = [] jt(['metrics', 'time', '--json'] + metrics_benchmarks[benchmark] + bmSuiteArgs, out=out) data = json.loads(out.data) return [{ 'benchmark': benchmark, 'extra.metric.region': r, 'metric.name': 'time', 'metric.value': t, 'metric.unit': 's', 'metric.better': 'lower', 'extra.metric.human': data['human'] } for r, t in data.items() if r != 'human'] mx_benchmark.add_bm_suite(AllocationBenchmarkSuite()) mx_benchmark.add_bm_suite(MinHeapBenchmarkSuite()) mx_benchmark.add_bm_suite(TimeBenchmarkSuite())
with _open_for_reading(ll_tmp_path) as ll_tmp_f, _open_for_writing( ll_path) as ll_f: ll_f.writelines((l.replace(tmp_path, in_file) for l in ll_tmp_f)) finally: if tmp_dir: shutil.rmtree(tmp_dir) _env_flags = [] if 'CPPFLAGS' in os.environ: _env_flags = os.environ['CPPFLAGS'].split(' ') # Legacy bm suite mx_benchmark.add_bm_suite(mx_sulong_benchmarks.SulongBenchmarkSuite(False)) # Polybench bm suite mx_benchmark.add_bm_suite(mx_sulong_benchmarks.SulongBenchmarkSuite(True)) # LLVM unit tests suite mx_benchmark.add_bm_suite(mx_sulong_benchmarks.LLVMUnitTestsSuite()) _toolchains = {} def _get_toolchain(toolchain_name): if toolchain_name not in _toolchains: mx.abort("Toolchain '{}' does not exists! Known toolchains {}".format( toolchain_name, ", ".join(_toolchains.keys()))) return _toolchains[toolchain_name]
def successPatterns(self): return [] def isWasmBenchmarkVm(self, bmSuiteArgs): parser = argparse.ArgumentParser() parser.add_argument("--jvm-config") jvm_config = parser.parse_known_args(bmSuiteArgs)[0].jvm_config return jvm_config in ("node", "native") def rules(self, out, benchmarks, bmSuiteArgs): if self.isWasmBenchmarkVm(bmSuiteArgs): return [] return [WasmJMHJsonRule(mx_benchmark.JMHBenchmarkSuiteBase.jmh_result_file, self.benchSuiteName(bmSuiteArgs))] add_bm_suite(WasmBenchmarkSuite()) _suite = mx.suite("wasm") MEMORY_PROFILER_CLASS_NAME = "org.graalvm.wasm.benchmark.MemoryFootprintBenchmarkRunner" MEMORY_WARMUP_ITERATIONS = 10 BENCHMARKCASES_DISTRIBUTION = "WASM_BENCHMARKCASES" class MemoryBenchmarkSuite(mx_benchmark.JavaBenchmarkSuite, mx_benchmark.AveragingBenchmarkMixin): """ Example suite used for testing and as a subclassing template. """
('trace-cache-executed', [], 130), ]: if mx.suite('js-benchmarks', fatalIfMissing=False): import mx_js_benchmarks mx_js_benchmarks.add_vm(GraalJsVm(config_name, options), _suite, priority) mx_benchmark.js_vm_registry.add_vm(GraalJsVm(config_name, options), _suite, priority) class JMHDistGraalJsBenchmarkSuite(JMHDistBenchmarkSuite): def name(self): return "js-interop-jmh" def group(self): return "Graal" def subgroup(self): return "graal-js" add_bm_suite(JMHDistGraalJsBenchmarkSuite()) mx_sdk_vm.register_vm_config('ce', [ 'cmp', 'icu4j', 'js', 'nfi', 'rgx', 'sdk', 'svm', 'svmnfi', 'tfl', 'tflm' ], _suite) mx_sdk_vm.register_vm_config('ee', [ 'cmp', 'cmpee', 'icu4j', 'js', 'nfi', 'rgx', 'sdk', 'svm', 'svmee', 'svmeegc', 'svmnfi', 'tfl', 'tflm' ], _suite)
"id", "metric.better": "lower", "metric.iteration": ("$iteration", int) }) ] def postprocessRunArgs(self, benchname, runArgs): parser = argparse.ArgumentParser(add_help=False) parser.add_argument("-n", default=None) args, remaining = parser.parse_known_args(runArgs) result = ['-c', 'WallTimeCallback'] + remaining if args.n: if args.n.isdigit(): result = ["-n", args.n] + result else: iterations = scala_dacapo_warmup_iterations[benchname][ "late-warmup"] result = ["-n", str(iterations)] + result return result def run(self, benchmarks, bmSuiteArgs): results = super(ScalaDaCapoWarmupBenchmarkSuite, self).run(benchmarks, bmSuiteArgs) self.warmupResults(results, scala_dacapo_warmup_iterations, 'walltime') # walltime entries are not accepted by the bench server return [e for e in results if e["metric.name"] != "walltime"] mx_benchmark.add_bm_suite(ScalaDaCapoWarmupBenchmarkSuite())
return [ mx_benchmark.StdOutRule(r'^\[(?P<benchmark>\S+?):[0-9]+\][ ]+\[total\]:[ ]+(?P<time>[0-9,.]+?) ms', { "bench-suite": self.name(), "benchmark": ("<benchmark>", str), "metric.name": "time", "metric.type": "numeric", "metric.unit": "ms", "metric.value": ("<time>", NativeImageTimeToInt()), "metric.score-function": "id", "metric.better": "lower", "metric.iteration": 0, }) ] mx_benchmark.add_bm_suite(NativeImageBuildBenchmarkSuite(name='native-image', benchmarks={'js': ['--language:js']}, registry=_native_image_vm_registry)) mx_benchmark.add_bm_suite(NativeImageBuildBenchmarkSuite(name='gu', benchmarks={'js': ['js'], 'libpolyglot': ['libpolyglot']}, registry=_gu_vm_registry)) def register_graalvm_vms(): graalvm_hostvm_name = mx_vm.graalvm_dist_name().lower().replace('_', '-') for config_name, java_args, launcher_args, priority in mx_sdk.graalvm_hostvm_configs: mx_benchmark.java_vm_registry.add_vm(GraalVm(graalvm_hostvm_name, config_name, java_args, launcher_args), _suite, priority) if mx_vm.has_component('svm', fatalIfMissing=False): _native_image_vm_registry.add_vm(NativeImageBuildVm(graalvm_hostvm_name, 'default', [], []), _suite, 10) _gu_vm_registry.add_vm(GuVm(graalvm_hostvm_name, 'default', [], []), _suite, 10) # We support only EE and CE configuration for native-image benchmarks for short_name, config_suffix in [('niee', 'ee'), ('ni', 'ce')]: if any(component.short_name == short_name for component in mx_vm.registered_graalvm_components(stage1=False)): mx_benchmark.add_java_vm(NativeImageVM('native-image', 'default-' + config_suffix, None, None, 0, False), _suite, 10)
mx_benchmark.StdOutRule( r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) completed warmup [0-9]+ in (?P<time>[0-9]+) msec =====", # pylint: disable=line-too-long { "benchmark": ("<benchmark>", str), "metric.name": "warmup", "metric.value": ("<time>", int), "metric.unit": "ms", "metric.type": "numeric", "metric.score-function": "id", "metric.better": "lower", "metric.iteration": ("$iteration", int) }) ] mx_benchmark.add_bm_suite(DaCapoBenchmarkSuite()) _allSpecJVM2008Benchs = [ 'startup.helloworld', 'startup.compiler.compiler', # 'startup.compiler.sunflow', # disabled until timeout problem in jdk8 is resolved 'startup.compress', 'startup.crypto.aes', 'startup.crypto.rsa', 'startup.crypto.signverify', 'startup.mpegaudio', 'startup.scimark.fft', 'startup.scimark.lu', 'startup.scimark.monte_carlo', 'startup.scimark.sor', 'startup.scimark.sparse',
def classpath_repr(self, resolve=True): return None def get_dependencies(self, group): deps = [] for f in os.listdir(group): f_path = mx.join(group, f) if os.path.isfile(f_path) and f.endswith('.jar'): deps.append( RenaissanceNativeImageBenchmarkSuite. RenaissanceDependency(os.path.basename(f), f_path)) return deps def collect_group_dependencies(self, group, scala_version): if group == 'harness': if scala_version == 12: unpacked_renaissance = RenaissanceNativeImageBenchmarkSuite.renaissance_unpacked( self.suite) path = mx.join(unpacked_renaissance, 'renaissance-harness') else: path = RenaissanceNativeImageBenchmarkSuite.harness_path( self.suite) else: unpacked_renaissance = RenaissanceNativeImageBenchmarkSuite.renaissance_unpacked( self.suite) path = mx.join(unpacked_renaissance, 'benchmarks', group) return self.get_dependencies(path) mx_benchmark.add_bm_suite(RenaissanceNativeImageBenchmarkSuite())
def _register_bench_suites(namespace): for py_bench_suite in PythonBenchmarkSuite.get_benchmark_suites( BENCHMARKS): mx_benchmark.add_bm_suite(py_bench_suite)
def daCapoIterations(self): return _daCapoIterations def flakySuccessPatterns(self): return [ re.compile( r"^javax.ejb.FinderException: Cannot find account for", re.MULTILINE), re.compile( r"^java.lang.Exception: TradeDirect:Login failure for user:"******"""DaCapo 9.12 (Bach) benchmark suite implementation.""" def benchSuiteName(self): return "dacapo" mx_benchmark.add_bm_suite(DaCapoTimingBenchmarkSuite()) class DaCapoMoveProfilingBenchmarkSuite(DaCapoMoveProfilingBenchmarkMixin, DaCapoBenchmarkSuite): # pylint: disable=too-many-ancestors """DaCapo 9.12 (Bach) benchmark suite implementation."""
def name(self): return "wasm" def group(self): return "Graal" def benchSuiteName(self, bmSuiteArgs): return next(arg for arg in bmSuiteArgs if arg.endswith("BenchmarkSuite")) def subgroup(self): return "wasm" def successPatterns(self): return [] def isWasmBenchmarkVm(self, bmSuiteArgs): jvm_config = bmSuiteArgs[bmSuiteArgs.index("--jvm-config") + 1] return jvm_config == "node" or jvm_config == "native" def rules(self, out, benchmarks, bmSuiteArgs): if self.isWasmBenchmarkVm(bmSuiteArgs): return [] return [ WasmJMHJsonRule(mx_benchmark.JMHBenchmarkSuiteBase.jmh_result_file, self.benchSuiteName(bmSuiteArgs)) ] add_bm_suite(WasmBenchmarkSuite())
def daCapoIterations(self): return _daCapoIterations def flakySuccessPatterns(self): return [ re.compile( r"^javax.ejb.FinderException: Cannot find account for", re.MULTILINE), re.compile( r"^java.lang.Exception: TradeDirect:Login failure for user:"******"""DaCapo 9.12 (Bach) benchmark suite implementation.""" def benchSuiteName(self): return "dacapo" mx_benchmark.add_bm_suite(DaCapoTimingBenchmarkSuite()) _daCapoScalaConfig = { "actors" : 10, "apparat" : 5,
class ZipPyBenchmarkSuite(BaseZippyBenchmarkSuite): def name(self): return "zippy-normal" def getPath(self): return pathBench def benchmarksType(self): return "normal" def benchmarksIterations(self): return pythonBenchmarks mx_benchmark.add_bm_suite(ZipPyBenchmarkSuite()) class ZipPyMicroBenchmarkSuite(BaseZippyBenchmarkSuite): def name(self): return "zippy-micro" def getPath(self): return pathMicro def benchmarksType(self): return "micro" def benchmarksIterations(self): return pythonMicroBenchmarks
if group == 'harness': if scala_version == 12: unpacked_renaissance = RenaissanceNativeImageBenchmarkSuite.renaissance_unpacked( self.suite) path = mx.join(unpacked_renaissance, 'renaissance-harness') else: path = RenaissanceNativeImageBenchmarkSuite.harness_path( self.suite) else: unpacked_renaissance = RenaissanceNativeImageBenchmarkSuite.renaissance_unpacked( self.suite) path = mx.join(unpacked_renaissance, 'benchmarks', group) return self.get_dependencies(path) mx_benchmark.add_bm_suite(RenaissanceNativeImageBenchmarkSuite()) class BaseDaCapoNativeImageBenchmarkSuite(): '''`SetBuildInfo` method in DaCapo source reads from the file nested in daCapo jar. This is not supported with native image, hence it returns `unknown` for code version.''' def suite_title(self): return 'DaCapo unknown' @staticmethod def collect_dependencies(path): deps = [] for f in list_jars(path): deps.append(mx.join(path, f)) return deps
class JMHRunnerTruffleBenchmarkSuite(mx_benchmark.JMHRunnerBenchmarkSuite): def name(self): return "truffle" def group(self): return "Graal" def subgroup(self): return "truffle" def extraVmArgs(self): return ['-XX:-UseJVMCIClassLoader'] + super(JMHRunnerTruffleBenchmarkSuite, self).extraVmArgs() mx_benchmark.add_bm_suite(JMHRunnerTruffleBenchmarkSuite()) #mx_benchmark.add_java_vm(mx_benchmark.DefaultJavaVm("server", "default"), priority=3) def javadoc(args, vm=None): """build the Javadoc for all API packages""" mx.javadoc(['--unified', '--exclude-packages', 'com.oracle.truffle.tck,com.oracle.truffle.tck.impl'] + args) javadoc_dir = os.sep.join([_suite.dir, 'javadoc']) checkLinks(javadoc_dir) def checkLinks(javadocDir): href = re.compile('(?<=href=").*?(?=")') filesToCheck = {} for root, _, files in os.walk(javadocDir): for f in files: if f.endswith('.html'): html = os.path.join(root, f)