Beispiel #1
0
def compiler_gate_runner(suites, unit_test_runs, bootstrap_tests, tasks, extraVMarguments=None):

    # Build server-hosted-jvmci now so we can run the unit tests
    with Task('BuildHotSpotGraalHosted: product', tasks, tags=[GraalTags.test, GraalTags.fulltest]) as t:
        if t: buildvms(['--vms', 'server', '--builds', 'product'])

    with VM('server', 'product'):
    # Run unit tests on server-hosted-jvmci
        for r in unit_test_runs:
            r.run(suites, tasks, extraVMarguments)

    # Run microbench on server-hosted-jvmci (only for testing the JMH setup)
    with VM('server', 'product'):
        for r in [MicrobenchRun('Microbench', ['TestJMH'], tags=[GraalTags.fulltest])]:
            r.run(tasks, extraVMarguments)

    # Run ctw against rt.jar on server-hosted-jvmci
    with VM('server', 'product'):
        with Task('CTW:hosted-product', tasks, tags=[GraalTags.fulltest]) as t:
            if t: ctw(['--ctwopts', '-Inline +ExitVMOnException', '-esa', '-G:+CompileTheWorldMultiThreaded', '-G:-InlineDuringParsing', '-G:-CompileTheWorldVerbose', '-XX:ReservedCodeCacheSize=400m'], _noneAsEmptyList(extraVMarguments))

    # Build the jvmci VMs so we can run the other tests
    with Task('BuildHotSpotGraalJVMCI: fastdebug', tasks, tags=[GraalTags.bootstrap, GraalTags.fulltest]) as t:
        if t: buildvms(['--vms', 'jvmci', '--builds', 'fastdebug'])
    with Task('BuildHotSpotGraalJVMCI: product', tasks, tags=[GraalTags.fulltest]) as t:
        if t: buildvms(['--vms', 'jvmci', '--builds', 'product'])

    # bootstrap tests
    for b in bootstrap_tests:
        b.run(tasks, extraVMarguments)

    # run dacapo sanitychecks
    for vmbuild in ['fastdebug', 'product']:
        for test in sanitycheck.getDacapos(level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel=vmbuild, extraVmArguments=extraVMarguments) \
                + sanitycheck.getScalaDacapos(level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel=vmbuild, extraVmArguments=extraVMarguments):
            with Task(str(test) + ':' + vmbuild, tasks, tags=[GraalTags.fulltest]) as t:
                if t and not test.test('jvmci'):
                    t.abort(test.name + ' Failed')

    # ensure -Xbatch still works
    with VM('jvmci', 'product'):
        with Task('DaCapo_pmd:BatchMode:product', tasks, tags=[GraalTags.fulltest]) as t:
            if t: dacapo(_noneAsEmptyList(extraVMarguments) + ['-Xbatch', 'pmd'])

    # ensure benchmark counters still work
    with VM('jvmci', 'product'):
        with Task('DaCapo_pmd:BenchmarkCounters:product', tasks, tags=[GraalTags.fulltest]) as t:
            if t: dacapo(_noneAsEmptyList(extraVMarguments) + ['-G:+LIRProfileMoves', '-G:+GenericDynamicCounters', '-XX:JVMCICounterSize=10', 'pmd'])

    # ensure -Xcomp still works
    with VM('jvmci', 'product'):
        with Task('XCompMode:product', tasks, tags=[GraalTags.fulltest]) as t:
            if t: run_vm(_noneAsEmptyList(extraVMarguments) + ['-Xcomp', '-version'])
Beispiel #2
0
def compiler_gate_runner(suites, unit_test_runs, bootstrap_tests, tasks, extraVMarguments=None):

    # Build server-hosted-jvmci now so we can run the unit tests
    with Task('BuildHotSpotGraalHosted: product', tasks, tags=[GraalTags.test, GraalTags.fulltest]) as t:
        if t: buildvms(['--vms', 'server', '--builds', 'product'])

    with VM('server', 'product'):
    # Run unit tests on server-hosted-jvmci
        for r in unit_test_runs:
            r.run(suites, tasks, extraVMarguments)

    # Run microbench on server-hosted-jvmci (only for testing the JMH setup)
    with VM('server', 'product'):
        for r in [MicrobenchRun('Microbench', ['TestJMH'], tags=[GraalTags.fulltest])]:
            r.run(tasks, extraVMarguments)

    # Run ctw against rt.jar on server-hosted-jvmci
    with VM('server', 'product'):
        with Task('CTW:hosted-product', tasks, tags=[GraalTags.fulltest]) as t:
            if t: ctw(['--ctwopts', '-Inline +ExitVMOnException', '-esa', '-G:+CompileTheWorldMultiThreaded', '-G:-InlineDuringParsing', '-G:-CompileTheWorldVerbose', '-XX:ReservedCodeCacheSize=400m'], _noneAsEmptyList(extraVMarguments))

    # Build the jvmci VMs so we can run the other tests
    with Task('BuildHotSpotGraalJVMCI: fastdebug', tasks, tags=[GraalTags.bootstrap, GraalTags.fulltest]) as t:
        if t: buildvms(['--vms', 'jvmci', '--builds', 'fastdebug'])
    with Task('BuildHotSpotGraalJVMCI: product', tasks, tags=[GraalTags.fulltest]) as t:
        if t: buildvms(['--vms', 'jvmci', '--builds', 'product'])

    # bootstrap tests
    for b in bootstrap_tests:
        b.run(tasks, extraVMarguments)

    # run dacapo sanitychecks
    for vmbuild in ['fastdebug', 'product']:
        for test in sanitycheck.getDacapos(level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel=vmbuild, extraVmArguments=extraVMarguments) \
                + sanitycheck.getScalaDacapos(level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel=vmbuild, extraVmArguments=extraVMarguments):
            with Task(str(test) + ':' + vmbuild, tasks, tags=[GraalTags.fulltest]) as t:
                if t and not test.test('jvmci'):
                    t.abort(test.name + ' Failed')

    # ensure -Xbatch still works
    with VM('jvmci', 'product'):
        with Task('DaCapo_pmd:BatchMode:product', tasks, tags=[GraalTags.fulltest]) as t:
            if t: dacapo(_noneAsEmptyList(extraVMarguments) + ['-Xbatch', 'pmd'])

    # ensure benchmark counters still work
    with VM('jvmci', 'product'):
        with Task('DaCapo_pmd:BenchmarkCounters:product', tasks, tags=[GraalTags.fulltest]) as t:
            if t: dacapo(_noneAsEmptyList(extraVMarguments) + ['-G:+LIRProfileMoves', '-G:+GenericDynamicCounters', '-XX:JVMCICounterSize=10', 'pmd'])

    # ensure -Xcomp still works
    with VM('jvmci', 'product'):
        with Task('XCompMode:product', tasks, tags=[GraalTags.fulltest]) as t:
            if t: run_vm(_noneAsEmptyList(extraVMarguments) + ['-Xcomp', '-version'])
Beispiel #3
0
def compiler_simple_gate_runner(suites,
                                unit_test_runs,
                                bootstrap_tests,
                                tasks,
                                extraVMarguments=None):

    # Run unit tests in hosted mode
    with JVMCIMode('hosted'):
        for r in unit_test_runs:
            r.run(suites, tasks, extraVMarguments)

    # Run microbench in hosted mode (only for testing the JMH setup)
    with JVMCIMode('hosted'):
        for r in [MicrobenchRun('Microbench', ['TestJMH'])]:
            r.run(tasks, extraVMarguments)

    # bootstrap tests
    for b in bootstrap_tests:
        b.run(tasks, extraVMarguments)

    # ensure -Xcomp still works
    with JVMCIMode('jit'):
        with Task('XCompMode:product', tasks) as t:
            if t:
                run_vm(
                    _noneAsEmptyList(extraVMarguments) +
                    ['-Xcomp', '-version'])
Beispiel #4
0
def ctw(args, extraVMarguments=None):
    """run CompileTheWorld"""

    defaultCtwopts = '-Inline'

    parser = ArgumentParser(prog='mx ctw')
    parser.add_argument('--ctwopts', action='store', help='space separated JVMCI options used for CTW compilations (default: --ctwopts="' + defaultCtwopts + '")', default=defaultCtwopts, metavar='<options>')
    parser.add_argument('--cp', '--jar', action='store', help='jar or class path denoting classes to compile', metavar='<path>')

    args, vmargs = parser.parse_known_args(args)

    if args.ctwopts:
        # Replace spaces  with '#' since -G: options cannot contain spaces
        vmargs.append('-G:CompileTheWorldConfig=' + re.sub(r'\s+', '#', args.ctwopts))

    if args.cp:
        cp = os.path.abspath(args.cp)
    else:
        cp = join(_jdk.home, 'lib', 'modules', 'bootmodules.jimage')
        vmargs.append('-G:CompileTheWorldExcludeMethodFilter=sun.awt.X11.*.*')

    # suppress menubar and dock when running on Mac; exclude x11 classes as they may cause vm crashes (on Solaris)
    vmargs = ['-Djava.awt.headless=true'] + vmargs

    if _vm.jvmciMode == 'disabled':
        vmargs += ['-XX:+CompileTheWorld', '-Xbootclasspath/p:' + cp]
    else:
        if _vm.jvmciMode == 'jit':
            vmargs += ['-XX:+BootstrapJVMCI']
        vmargs += ['-G:CompileTheWorldClasspath=' + cp, 'com.oracle.graal.hotspot.CompileTheWorld']

    run_vm(vmargs + _noneAsEmptyList(extraVMarguments))
Beispiel #5
0
def compiler_gate_runner(suites,
                         unit_test_runs,
                         bootstrap_tests,
                         tasks,
                         extraVMarguments=None):

    # Run unit tests in hosted mode
    with JVMCIMode('hosted'):
        for r in unit_test_runs:
            r.run(suites, tasks, extraVMarguments)

    # Run ctw against rt.jar on server-hosted-jvmci
    with JVMCIMode('hosted'):
        with Task('CTW:hosted', tasks) as t:
            if t:
                ctw([
                    '--ctwopts', '-Inline +ExitVMOnException', '-esa',
                    '-G:+CompileTheWorldMultiThreaded',
                    '-G:-InlineDuringParsing', '-G:-CompileTheWorldVerbose',
                    '-XX:ReservedCodeCacheSize=300m'
                ], _noneAsEmptyList(extraVMarguments))

    # bootstrap tests
    for b in bootstrap_tests:
        b.run(tasks, extraVMarguments)

    # run dacapo sanitychecks
    for test in sanitycheck.getDacapos(level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel='release', extraVmArguments=extraVMarguments) \
            + sanitycheck.getScalaDacapos(level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel='release', extraVmArguments=extraVMarguments):
        with Task(str(test) + ':' + 'release', tasks) as t:
            if t and not test.test('jvmci'):
                t.abort(test.name + ' Failed')

    # ensure -Xbatch still works
    with JVMCIMode('jit'):
        with Task('DaCapo_pmd:BatchMode', tasks) as t:
            if t:
                dacapo(_noneAsEmptyList(extraVMarguments) + ['-Xbatch', 'pmd'])

    # ensure -Xcomp still works
    with JVMCIMode('jit'):
        with Task('XCompMode:product', tasks) as t:
            if t:
                run_vm(
                    _noneAsEmptyList(extraVMarguments) +
                    ['-Xcomp', '-version'])
 def run(self, suites, tasks, extraVMarguments=None):
     for suite in suites:
         with Task(self.name + ': hosted-release ' + suite, tasks) as t:
             if t:
                 unittest([
                     '--suite', suite, '--enable-timing', '--verbose',
                     '--fail-fast'
                 ] + self.args + _noneAsEmptyList(extraVMarguments))
Beispiel #7
0
 def run(self, tasks, extraVMarguments=None):
     with JVMCIMode('jit'):
         with Task(self.name, tasks, tags=self.tags) as t:
             if t:
                 if self.suppress:
                     out = mx.DuplicateSuppressingStream(self.suppress).write
                 else:
                     out = None
                 run_vm(self.args + _noneAsEmptyList(extraVMarguments) + ['-XX:-TieredCompilation', '-XX:+BootstrapJVMCI', '-version'], out=out)
Beispiel #8
0
 def run(self, tasks, extraVMarguments=None):
     with VM('jvmci', self.vmbuild):
         with Task(self.name + ':' + self.vmbuild, tasks) as t:
             if t:
                 if self.suppress:
                     out = mx.DuplicateSuppressingStream(self.suppress).write
                 else:
                     out = None
                 run_vm(self.args + _noneAsEmptyList(extraVMarguments) + ['-XX:-TieredCompilation', '-XX:+BootstrapJVMCI', '-version'], out=out)
Beispiel #9
0
def compiler_gate_runner(suites, unit_test_runs, bootstrap_tests, tasks, extraVMarguments=None):

    # Run unit tests in hosted mode
    with JVMCIMode('hosted'):
        for r in unit_test_runs:
            r.run(suites, tasks, extraVMarguments)

    # Run microbench in hosted mode (only for testing the JMH setup)
    with JVMCIMode('hosted'):
        for r in [MicrobenchRun('Microbench', ['TestJMH'])]:
            r.run(tasks, extraVMarguments)

    # Run ctw against rt.jar on server-hosted-jvmci
    with JVMCIMode('hosted'):
        with Task('CTW:hosted', tasks) as t:
            if t: ctw(['--ctwopts', '-Inline +ExitVMOnException', '-esa', '-G:+CompileTheWorldMultiThreaded', '-G:-InlineDuringParsing', '-G:-CompileTheWorldVerbose', '-XX:ReservedCodeCacheSize=300m'], _noneAsEmptyList(extraVMarguments))

    # bootstrap tests
    for b in bootstrap_tests:
        b.run(tasks, extraVMarguments)

    # run dacapo sanitychecks
    for test in sanitycheck.getDacapos(level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel='release', extraVmArguments=extraVMarguments) \
            + sanitycheck.getScalaDacapos(level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel='release', extraVmArguments=extraVMarguments):
        with Task(str(test) + ':' + 'release', tasks) as t:
            if t and not test.test('jvmci'):
                t.abort(test.name + ' Failed')

    # ensure -Xbatch still works
    with JVMCIMode('jit'):
        with Task('DaCapo_pmd:BatchMode', tasks) as t:
            if t: dacapo(_noneAsEmptyList(extraVMarguments) + ['-Xbatch', 'pmd'])

    # ensure benchmark counters still work
    with JVMCIMode('jit'):
        with Task('DaCapo_pmd:BenchmarkCounters:product', tasks) as t:
            if t: dacapo(_noneAsEmptyList(extraVMarguments) + ['-G:+LIRProfileMoves', '-G:+GenericDynamicCounters', '-XX:JVMCICounterSize=10', 'pmd'])

    # ensure -Xcomp still works
    with JVMCIMode('jit'):
        with Task('XCompMode:product', tasks) as t:
            if t: run_vm(_noneAsEmptyList(extraVMarguments) + ['-Xcomp', '-version'])
Beispiel #10
0
 def run(self, suites, tasks, extraVMarguments=None):
     for suite in suites:
         with Task(self.name + ": hosted-product " + suite, tasks, tags=self.tags) as t:
             if mx_gate.Task.verbose:
                 extra_args = ["--verbose", "--enable-timing"]
             else:
                 extra_args = []
             if t:
                 unittest(
                     ["--suite", suite, "--fail-fast"] + extra_args + self.args + _noneAsEmptyList(extraVMarguments)
                 )
Beispiel #11
0
 def run(self, suites, tasks, extraVMarguments=None):
     for suite in suites:
         with Task(self.name + ': hosted-product ' + suite,
                   tasks,
                   tags=self.tags) as t:
             if mx_gate.Task.verbose:
                 extra_args = ['--verbose', '--enable-timing']
             else:
                 extra_args = []
             if t:
                 unittest(['--suite', suite, '--fail-fast'] + extra_args +
                          self.args + _noneAsEmptyList(extraVMarguments))
Beispiel #12
0
 def run(self, tasks, extraVMarguments=None):
     with VM("jvmci", self.vmbuild):
         with Task(self.name + ":" + self.vmbuild, tasks, tags=self.tags) as t:
             if t:
                 if self.suppress:
                     out = mx.DuplicateSuppressingStream(self.suppress).write
                 else:
                     out = None
                 run_vm(
                     self.args
                     + _noneAsEmptyList(extraVMarguments)
                     + ["-XX:-TieredCompilation", "-XX:+BootstrapJVMCI", "-version"],
                     out=out,
                 )
Beispiel #13
0
 def run(self, tasks, extraVMarguments=None):
     with VM('jvmci', self.vmbuild):
         with Task(self.name + ':' + self.vmbuild, tasks) as t:
             if t:
                 if self.suppress:
                     out = mx.DuplicateSuppressingStream(
                         self.suppress).write
                 else:
                     out = None
                 run_vm(self.args + _noneAsEmptyList(extraVMarguments) + [
                     '-XX:-TieredCompilation', '-XX:+BootstrapJVMCI',
                     '-version'
                 ],
                        out=out)
Beispiel #14
0
def compiler_simple_gate_runner(suites, unit_test_runs, bootstrap_tests, tasks, extraVMarguments=None):

    # Run unit tests in hosted mode
    with JVMCIMode('hosted'):
        for r in unit_test_runs:
            r.run(suites, tasks, extraVMarguments)

    # Run microbench in hosted mode (only for testing the JMH setup)
    with JVMCIMode('hosted'):
        for r in [MicrobenchRun('Microbench', ['TestJMH'])]:
            r.run(tasks, extraVMarguments)

    # bootstrap tests
    for b in bootstrap_tests:
        b.run(tasks, extraVMarguments)

    # ensure -Xcomp still works
    with JVMCIMode('jit'):
        with Task('XCompMode:product', tasks) as t:
            if t: run_vm(_noneAsEmptyList(extraVMarguments) + ['-Xcomp', '-version'])
Beispiel #15
0
def ctw(args, extraVMarguments=None):
    """run CompileTheWorld"""

    defaultCtwopts = "-Inline"

    parser = ArgumentParser(prog="mx ctw")
    parser.add_argument(
        "--ctwopts",
        action="store",
        help='space separated JVMCI options used for CTW compilations (default: --ctwopts="' + defaultCtwopts + '")',
        default=defaultCtwopts,
        metavar="<options>",
    )
    parser.add_argument(
        "--cp", "--jar", action="store", help="jar or class path denoting classes to compile", metavar="<path>"
    )

    args, vmargs = parser.parse_known_args(args)

    if args.ctwopts:
        # Replace spaces  with '#' since -G: options cannot contain spaces
        vmargs.append("-G:CompileTheWorldConfig=" + re.sub(r"\s+", "#", args.ctwopts))

    if args.cp:
        cp = os.path.abspath(args.cp)
    else:
        cp = join(_jdk.home, "lib", "modules", "bootmodules.jimage")
        vmargs.append("-G:CompileTheWorldExcludeMethodFilter=sun.awt.X11.*.*")

    # suppress menubar and dock when running on Mac; exclude x11 classes as they may cause vm crashes (on Solaris)
    vmargs = ["-Djava.awt.headless=true"] + vmargs

    if _vm.jvmciMode == "disabled":
        vmargs += ["-XX:+CompileTheWorld", "-Xbootclasspath/p:" + cp]
    else:
        if _vm.jvmciMode == "jit":
            vmargs += ["-XX:+BootstrapJVMCI"]
        vmargs += ["-G:CompileTheWorldClasspath=" + cp, "com.oracle.graal.hotspot.CompileTheWorld"]

    run_vm(vmargs + _noneAsEmptyList(extraVMarguments))
Beispiel #16
0
 def run(self, suites, tasks, extraVMarguments=None):
     for suite in suites:
         with Task(self.name + ': hosted-product ' + suite, tasks) as t:
             if t: unittest(['--suite', suite, '--fail-fast'] + self.args + _noneAsEmptyList(extraVMarguments))
Beispiel #17
0
    def bench(self, vm, cwd=None, extraVmOpts=None, vmbuild=None):
        """
        Run this program as a benchmark.
        Copied from sanitycheck.Test to extend benchmarking for non-JVMs.
        """
        if vm in self.ignoredVMs:
            return {}
        if cwd is None:
            cwd = self.defaultCwd
        parser = OutputParser()

        for successRE in self.successREs:
            parser.addMatcher(ValuesMatcher(successRE, {'passed' : '1'}))
        for failureRE in self.failureREs:
            parser.addMatcher(ValuesMatcher(failureRE, {'failed' : '1'}))
        for scoreMatcher in self.scoreMatchers:
            parser.addMatcher(scoreMatcher)

        if self.benchmarkCompilationRate:
            if vm == 'graal':
                bps = re.compile(r"ParsedBytecodesPerSecond@final: (?P<rate>[0-9]+)")
                ibps = re.compile(r"InlinedBytecodesPerSecond@final: (?P<rate>[0-9]+)")
                parser.addMatcher(ValuesMatcher(bps, {'group' : 'ParsedBytecodesPerSecond', 'name' : self.name, 'score' : '<rate>'}))
                parser.addMatcher(ValuesMatcher(ibps, {'group' : 'InlinedBytecodesPerSecond', 'name' : self.name, 'score' : '<rate>'}))
            else:
                ibps = re.compile(r"(?P<compiler>[\w]+) compilation speed: +(?P<rate>[0-9]+) bytes/s {standard")
                parser.addMatcher(ValuesMatcher(ibps, {'group' : 'InlinedBytecodesPerSecond', 'name' : '<compiler>:' + self.name, 'score' : '<rate>'}))

        startDelim = 'START: ' + self.name
        endDelim = 'END: ' + self.name

        outputfile = os.environ.get('BENCH_OUTPUT', None)
        if outputfile:
            # Used only to debug output parsing
            with open(outputfile) as fp:
                output = fp.read()
                start = output.find(startDelim)
                end = output.find(endDelim, start)
                if start == -1 and end == -1:
                    return {}
                output = output[start + len(startDelim + os.linesep): end]
                mx.log(startDelim)
                mx.log(output)
                mx.log(endDelim)
        else:
            tee = Tee()
            mx.log(startDelim)
            # zippy
            result = -1
            if vm == 'cpython2':
                result = mx.run(['python'] + self.cmd[-2:], out=tee.eat)
            elif vm == 'cpython':
                result = mx.run(['python3'] + self.cmd[-2:], out=tee.eat)
            elif vm == 'jython':
                result = mx_graal.vm(['-jar', mx.library('JYTHON').path] + self.cmd[-2:], vm = 'original', out=tee.eat)
            elif vm == 'pypy':
                result = mx.run(['pypy'] + self.cmd[-2:], out=tee.eat)
            elif vm == 'pypy3':
                result = mx.run(['pypy3'] + self.cmd[-2:], out=tee.eat)
            else:
                result = mx_graal.vm(self.vmOpts + _noneAsEmptyList(extraVmOpts) + self.cmd, vm, nonZeroIsFatal=False, out=tee.eat, err=subprocess.STDOUT, cwd=cwd, vmbuild=vmbuild)

            if result != 0:
                mx.abort("Benchmark failed (non-zero retcode)")
            # wait for subprocess to finish
            time.sleep(.5)
            mx.log(endDelim)
            output = tee.output.getvalue()

        groups = {}
        passed = False
        for valueMap in parser.parse(output):
            assert (valueMap.has_key('name') and valueMap.has_key('score') and valueMap.has_key('group')) or valueMap.has_key('passed') or valueMap.has_key('failed'), valueMap
            if valueMap.get('failed') == '1':
                mx.abort("Benchmark failed")
            if valueMap.get('passed') == '1':
                passed = True
            groupName = valueMap.get('group')
            if groupName:
                group = groups.setdefault(groupName, {})
                name = valueMap.get('name')
                score = valueMap.get('score')
                if name and score:
                    group[name] = score

        if not passed:
            mx.abort("Benchmark failed (not passed)")

        return groups
Beispiel #18
0
 def run(self, tasks, extraVMarguments=None):
     with Task(self.name + ': hosted-product ', tasks, tags=self.tags) as t:
         if t: microbench(_noneAsEmptyList(extraVMarguments) + ['--'] + self.args)
 def run(self, tasks, extraVMarguments=None):
     with Task(self.name + ': hosted-product ', tasks) as t:
         if t:
             microbench(
                 _noneAsEmptyList(extraVMarguments) + ['--'] + self.args)
Beispiel #20
0
def compiler_gate_runner(suites, unit_test_runs, bootstrap_tests, tasks, extraVMarguments=None):

    # Run unit tests in hosted mode
    with JVMCIMode("hosted"):
        for r in unit_test_runs:
            r.run(suites, tasks, extraVMarguments)

    # Run microbench in hosted mode (only for testing the JMH setup)
    with JVMCIMode("hosted"):
        for r in [MicrobenchRun("Microbench", ["TestJMH"], tags=[GraalTags.fulltest])]:
            r.run(tasks, extraVMarguments)

    # Run ctw against rt.jar on server-hosted-jvmci
    with JVMCIMode("hosted"):
        with Task("CTW:hosted", tasks, tags=[GraalTags.fulltest]) as t:
            if t:
                ctw(
                    [
                        "--ctwopts",
                        "-Inline +ExitVMOnException",
                        "-esa",
                        "-G:+CompileTheWorldMultiThreaded",
                        "-G:-InlineDuringParsing",
                        "-G:-CompileTheWorldVerbose",
                        "-XX:ReservedCodeCacheSize=300m",
                    ],
                    _noneAsEmptyList(extraVMarguments),
                )

    # bootstrap tests
    for b in bootstrap_tests:
        b.run(tasks, extraVMarguments)

    # run dacapo sanitychecks
    for test in sanitycheck.getDacapos(
        level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel="release", extraVmArguments=extraVMarguments
    ) + sanitycheck.getScalaDacapos(
        level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel="release", extraVmArguments=extraVMarguments
    ):
        with Task(str(test) + ":" + "release", tasks, tags=[GraalTags.fulltest]) as t:
            if t and not test.test("jvmci"):
                t.abort(test.name + " Failed")

    # ensure -Xbatch still works
    with JVMCIMode("jit"):
        with Task("DaCapo_pmd:BatchMode", tasks, tags=[GraalTags.fulltest]) as t:
            if t:
                dacapo(_noneAsEmptyList(extraVMarguments) + ["-Xbatch", "pmd"])

    # ensure benchmark counters still work
    with JVMCIMode("jit"):
        with Task("DaCapo_pmd:BenchmarkCounters:product", tasks, tags=[GraalTags.fulltest]) as t:
            if t:
                dacapo(
                    _noneAsEmptyList(extraVMarguments)
                    + ["-G:+LIRProfileMoves", "-G:+GenericDynamicCounters", "-XX:JVMCICounterSize=10", "pmd"]
                )

    # ensure -Xcomp still works
    with JVMCIMode("jit"):
        with Task("XCompMode:product", tasks, tags=[GraalTags.fulltest]) as t:
            if t:
                run_vm(_noneAsEmptyList(extraVMarguments) + ["-Xcomp", "-version"])
Beispiel #21
0
 def run(self, suites, tasks, extraVMarguments=None):
     for suite in suites:
         with Task(self.name + ': hosted-release ' + suite, tasks) as t:
             if t: unittest(['--suite', suite, '--enable-timing', '--verbose', '--fail-fast'] + self.args + _noneAsEmptyList(extraVMarguments))
Beispiel #22
0
    def bench(self, vm, cwd=None, extraVmOpts=None, vmbuild=None):
        """
        Run this program as a benchmark.
        Copied from sanitycheck.Test to extend benchmarking for non-JVMs.
        """
        if vm in self.ignoredVMs:
            return {}
        if cwd is None:
            cwd = self.defaultCwd
        parser = OutputParser()

        for successRE in self.successREs:
            parser.addMatcher(ValuesMatcher(successRE, {'passed': '1'}))
        for failureRE in self.failureREs:
            parser.addMatcher(ValuesMatcher(failureRE, {'failed': '1'}))
        for scoreMatcher in self.scoreMatchers:
            parser.addMatcher(scoreMatcher)

        if self.benchmarkCompilationRate:
            if vm == 'graal':
                bps = re.compile(
                    r"ParsedBytecodesPerSecond@final: (?P<rate>[0-9]+)")
                ibps = re.compile(
                    r"InlinedBytecodesPerSecond@final: (?P<rate>[0-9]+)")
                parser.addMatcher(
                    ValuesMatcher(
                        bps, {
                            'group': 'ParsedBytecodesPerSecond',
                            'name': self.name,
                            'score': '<rate>'
                        }))
                parser.addMatcher(
                    ValuesMatcher(
                        ibps, {
                            'group': 'InlinedBytecodesPerSecond',
                            'name': self.name,
                            'score': '<rate>'
                        }))
            else:
                ibps = re.compile(
                    r"(?P<compiler>[\w]+) compilation speed: +(?P<rate>[0-9]+) bytes/s {standard"
                )
                parser.addMatcher(
                    ValuesMatcher(
                        ibps, {
                            'group': 'InlinedBytecodesPerSecond',
                            'name': '<compiler>:' + self.name,
                            'score': '<rate>'
                        }))

        startDelim = 'START: ' + self.name
        endDelim = 'END: ' + self.name

        outputfile = os.environ.get('BENCH_OUTPUT', None)
        if outputfile:
            # Used only to debug output parsing
            with open(outputfile) as fp:
                output = fp.read()
                start = output.find(startDelim)
                end = output.find(endDelim, start)
                if start == -1 and end == -1:
                    return {}
                output = output[start + len(startDelim + os.linesep):end]
                mx.log(startDelim)
                mx.log(output)
                mx.log(endDelim)
        else:
            tee = Tee()
            mx.log(startDelim)
            # zippy
            result = -1
            if vm == 'cpython2':
                result = mx.run(['python'] + self.cmd[-2:], out=tee.eat)
            elif vm == 'cpython':
                result = mx.run(['python3'] + self.cmd[-2:], out=tee.eat)
            elif vm == 'jython':
                result = mx_graal.vm(
                    ['-jar', mx.library('JYTHON').path] + self.cmd[-2:],
                    vm='original',
                    out=tee.eat)
            elif vm == 'pypy':
                result = mx.run(['pypy'] + self.cmd[-2:], out=tee.eat)
            elif vm == 'pypy3':
                result = mx.run(['pypy3'] + self.cmd[-2:], out=tee.eat)
            else:
                result = mx_graal.vm(self.vmOpts +
                                     _noneAsEmptyList(extraVmOpts) + self.cmd,
                                     vm,
                                     nonZeroIsFatal=False,
                                     out=tee.eat,
                                     err=subprocess.STDOUT,
                                     cwd=cwd,
                                     vmbuild=vmbuild)

            if result != 0:
                mx.abort("Benchmark failed (non-zero retcode)")
            # wait for subprocess to finish
            time.sleep(.5)
            mx.log(endDelim)
            output = tee.output.getvalue()

        groups = {}
        passed = False
        for valueMap in parser.parse(output):
            assert (valueMap.has_key('name') and valueMap.has_key('score')
                    and valueMap.has_key('group')) or valueMap.has_key(
                        'passed') or valueMap.has_key('failed'), valueMap
            if valueMap.get('failed') == '1':
                mx.abort("Benchmark failed")
            if valueMap.get('passed') == '1':
                passed = True
            groupName = valueMap.get('group')
            if groupName:
                group = groups.setdefault(groupName, {})
                name = valueMap.get('name')
                score = valueMap.get('score')
                if name and score:
                    group[name] = score

        if not passed:
            mx.abort("Benchmark failed (not passed)")

        return groups
Beispiel #23
0
 def run(self, tasks, extraVMarguments=None):
     with Task(self.name + ': hosted-product ', tasks, tags=self.tags) as t:
         if t: mx_microbench.get_microbenchmark_executor().microbench(_noneAsEmptyList(extraVMarguments) + ['--', '-foe', 'true'] + self.args)
Beispiel #24
0
 def run(self, suites, tasks, extraVMarguments=None):
     for suite in suites:
         with Task(self.name + ': hosted-product ' + suite, tasks) as t:
             if t:
                 unittest(['--suite', suite, '--fail-fast'] + self.args +
                          _noneAsEmptyList(extraVMarguments))