Ejemplo n.º 1
0
def graal_wasm_gate_runner(args, tasks):
    with Task("BuildAll", tasks, tags=[GraalWasmDefaultTags.buildall]) as t:
        if t:
            mx.build(["--all"])
    with Task("UnitTests", tasks, tags=[GraalWasmDefaultTags.wasmtest]) as t:
        if t:
            unittest(["-Dwasmtest.watToWasmExecutable=" + os.path.join(wabt_dir, wat2wasm_binary()), "WasmTestSuite"])
    with Task("ConstantsPolicyUnitTests", tasks, tags=[GraalWasmDefaultTags.wasmconstantspolicytest]) as t:
        if t:
            unittest(["-Dwasmtest.watToWasmExecutable=" + os.path.join(wabt_dir, wat2wasm_binary()),
                      "-Dwasmtest.storeConstantsPolicy=LARGE_ONLY", "WasmTestSuite"])
    with Task("ExtraUnitTests", tasks, tags=[GraalWasmDefaultTags.wasmextratest]) as t:
        if t:
            unittest(["CSuite"])
            unittest(["WatSuite"])
    with Task("ConstantsPolicyExtraUnitTests", tasks, tags=[GraalWasmDefaultTags.wasmconstantspolicyextratest]) as t:
        if t:
            unittest(["-Dwasmtest.storeConstantsPolicy=LARGE_ONLY", "CSuite"])
            unittest(["-Dwasmtest.storeConstantsPolicy=LARGE_ONLY", "WatSuite"])
    # This is a gate used to test that all the benchmarks return the correct results. It does not upload anything,
    # and does not run on a dedicated machine.
    with Task("BenchTest", tasks, tags=[GraalWasmDefaultTags.wasmbenchtest]) as t:
        if t:
            for b in microbenchmarks:
                exitcode = mx_benchmark.benchmark([
                        "wasm:WASM_BENCHMARKCASES", "--",
                        "--jvm", "server", "--jvm-config", "graal-core",
                        "-Dwasmbench.benchmarkName=" + b, "-Dwasmtest.keepTempFiles=true", "--",
                        "CMicroBenchmarkSuite", "-wi", "1", "-i", "1"])
                if exitcode != 0:
                    mx.abort("Errors during benchmark tests, aborting.")
Ejemplo n.º 2
0
def graal_wasm_gate_runner(args, tasks):
    with Task("BuildAll", tasks, tags=[GraalWasmDefaultTags.buildall]) as t:
        if t:
            mx.build(["--all"])
    with Task("UnitTests", tasks, tags=[GraalWasmDefaultTags.wasmtest]) as t:
        if t:
            unittest([
                "-Dwasmtest.watToWasmExecutable=" +
                os.path.join(wabt_dir, "wat2wasm"), "WasmTestSuite"
            ])
    with Task("ExtraUnitTests",
              tasks,
              tags=[GraalWasmDefaultTags.wasmextratest]) as t:
        if t:
            unittest(["CSuite"])
            unittest(["WatSuite"])
    with Task("BenchTest", tasks,
              tags=[GraalWasmDefaultTags.wasmbenchtest]) as t:
        if t:
            for b in microbenchmarks:
                exitcode = mx_benchmark.benchmark([
                    "wasm:WASM_BENCHMARKCASES", "--", "--jvm", "server",
                    "--jvm-config", "graal-core",
                    "-Dwasmbench.benchmarkName=" + b,
                    "-Dwasmtest.keepTempFiles=true", "--",
                    "CMicroBenchmarkSuite", "-wi", "1", "-i", "1"
                ])
                if exitcode != 0:
                    mx.abort("Errors during benchmark tests, aborting.")
Ejemplo n.º 3
0
def bench_shortcut(benchSuite, args):
    benchname = "*"
    if not args:
        vm_py_args = []
    elif args[0] == "--":
        vm_py_args = args  # VM or Python options
    else:
        benchname = args[0]
        vm_py_args = args[1:]

    return mx_benchmark.benchmark([bench_suite + ":" + benchname] + vm_py_args)
def createBenchmarkShortcut(benchSuite, args):
    if not args:
        benchname = "*"
        remaining_args = []
    elif args[0] == "--":
        # not a benchmark name
        benchname = "*"
        remaining_args = args
    else:
        benchname = args[0]
        remaining_args = args[1:]
    return mx_benchmark.benchmark([benchSuite + ":" + benchname] + remaining_args)
Ejemplo n.º 5
0
def createBenchmarkShortcut(benchSuite, args):
    if not args:
        benchname = "*"
        remaining_args = []
    elif args[0] == "--":
        # not a benchmark name
        benchname = "*"
        remaining_args = args
    else:
        benchname = args[0]
        remaining_args = args[1:]
    return mx_benchmark.benchmark([benchSuite + ":" + benchname] + remaining_args)
# Short-hand commands used to quickly run common benchmarks.
mx.update_commands(mx.suite('graal-core'), {
    'dacapo': [
      lambda args: createBenchmarkShortcut("dacapo", args),
      '[<benchmarks>|*] [-- [VM options] [-- [DaCapo options]]]'
    ],
    'scaladacapo': [
      lambda args: createBenchmarkShortcut("scala-dacapo", args),
      '[<benchmarks>|*] [-- [VM options] [-- [Scala DaCapo options]]]'
    ],
    'specjvm2008': [
      lambda args: createBenchmarkShortcut("specjvm2008", args),
      '[<benchmarks>|*] [-- [VM options] [-- [SPECjvm2008 options]]]'
    ],
    'specjbb2005': [
      lambda args: mx_benchmark.benchmark(["specjbb2005"] + args),
      '[-- [VM options] [-- [SPECjbb2005 options]]]'
    ],
    'specjbb2013': [
      lambda args: mx_benchmark.benchmark(["specjbb2013"] + args),
      '[-- [VM options] [-- [SPECjbb2013 options]]]'
    ],
    'specjbb2015': [
      lambda args: mx_benchmark.benchmark(["specjbb2015"] + args),
      '[-- [VM options] [-- [SPECjbb2015 options]]]'
    ],
})


def createBenchmarkShortcut(benchSuite, args):
    if not args:
Ejemplo n.º 7
0
# Short-hand commands used to quickly run common benchmarks.
mx.update_commands(_suite, {
    'dacapo': [
      lambda args: createBenchmarkShortcut("dacapo", args),
      '[<benchmarks>|*] [-- [VM options] [-- [DaCapo options]]]'
    ],
    'scaladacapo': [
      lambda args: createBenchmarkShortcut("scala-dacapo", args),
      '[<benchmarks>|*] [-- [VM options] [-- [Scala DaCapo options]]]'
    ],
    'specjvm2008': [
      lambda args: createBenchmarkShortcut("specjvm2008", args),
      '[<benchmarks>|*] [-- [VM options] [-- [SPECjvm2008 options]]]'
    ],
    'specjbb2005': [
      lambda args: mx_benchmark.benchmark(["specjbb2005"] + args),
      '[-- [VM options] [-- [SPECjbb2005 options]]]'
    ],
    'specjbb2013': [
      lambda args: mx_benchmark.benchmark(["specjbb2013"] + args),
      '[-- [VM options] [-- [SPECjbb2013 options]]]'
    ],
    'specjbb2015': [
      lambda args: mx_benchmark.benchmark(["specjbb2015"] + args),
      '[-- [VM options] [-- [SPECjbb2015 options]]]'
    ],
})


def createBenchmarkShortcut(benchSuite, args):
    if not args:
Ejemplo n.º 8
0
def altrep_benchmark(args):
    return mx_benchmark.benchmark(["altrep"] + args)