예제 #1
0
def getSPECjbb2013(benchArgs=[]):

    specjbb2013 = mx.get_env('SPECJBB2013')
    if specjbb2013 is None or not exists(join(specjbb2013, 'specjbb2013.jar')):
        mx.abort(
            'Please set the SPECJBB2013 environment variable to a SPECjbb2013 directory'
        )

    jops = re.compile(
        r"^RUN RESULT: hbIR \(max attempted\) = [0-9]+, hbIR \(settled\) = [0-9]+, max-jOPS = (?P<max>[0-9]+), critical-jOPS = (?P<critical>[0-9]+)$",
        re.MULTILINE)
    #error?
    success = re.compile(r"org.spec.jbb.controller: Run finished",
                         re.MULTILINE)
    matcherMax = ValuesMatcher(jops, {
        'group': 'SPECjbb2013',
        'name': 'max',
        'score': '<max>'
    })
    matcherCritical = ValuesMatcher(jops, {
        'group': 'SPECjbb2013',
        'name': 'critical',
        'score': '<critical>'
    })
    return Test("SPECjbb2013",
                ['-jar', 'specjbb2013.jar', '-m', 'composite'] + benchArgs,
                [success], [], [matcherCritical, matcherMax],
                vmOpts=[
                    '-Xmx6g', '-Xms6g', '-Xmn3g', '-XX:+UseParallelOldGC',
                    '-XX:-UseAdaptiveSizePolicy', '-XX:-UseBiasedLocking',
                    '-XX:-UseCompressedOops'
                ],
                defaultCwd=specjbb2013)
예제 #2
0
def getBootstraps():
    time = re.compile(r"Bootstrapping Graal\.+ in (?P<time>[0-9]+) ms")
    scoreMatcher = ValuesMatcher(time, {
        'group': 'Bootstrap',
        'name': 'BootstrapTime',
        'score': '<time>'
    })
    scoreMatcherBig = ValuesMatcher(time, {
        'group': 'Bootstrap-bigHeap',
        'name': 'BootstrapTime',
        'score': '<time>'
    })

    tests = []
    tests.append(
        Test("Bootstrap", ['-version'],
             successREs=[time],
             scoreMatchers=[scoreMatcher],
             ignoredVMs=['client', 'server'],
             benchmarkCompilationRate=False))
    tests.append(
        Test("Bootstrap-bigHeap", ['-version'],
             successREs=[time],
             scoreMatchers=[scoreMatcherBig],
             vmOpts=['-Xms2g'],
             ignoredVMs=['client', 'server'],
             benchmarkCompilationRate=False))
    return tests
예제 #3
0
    def test(self, vm, cwd=None, extraVmOpts=None, vmbuild=None):
        """
        Run this program as a sanity test.
        """
        if vm in self.ignoredVMs:
            return True
        if cwd is None:
            cwd = self.defaultCwd
        parser = OutputParser()
        jvmError = re.compile(
            r"(?P<jvmerror>([A-Z]:|/).*[/\\]hs_err_pid[0-9]+\.log)")
        parser.addMatcher(ValuesMatcher(jvmError, {'jvmError': '<jvmerror>'}))

        for successRE in self.successREs:
            parser.addMatcher(ValuesMatcher(successRE, {'passed': '1'}))
        for failureRE in self.failureREs:
            parser.addMatcher(ValuesMatcher(failureRE, {'failed': '1'}))

        tee = Tee()
        retcode = mx_graal_core.run_vm(
            self.vmOpts + _noneAsEmptyList(extraVmOpts) + self.cmd,
            vm,
            nonZeroIsFatal=False,
            out=tee.eat,
            err=subprocess.STDOUT,
            cwd=cwd,
            vmbuild=vmbuild)
        output = tee.output.getvalue()
        valueMaps = parser.parse(output)

        if len(valueMaps) == 0:
            return False

        record = {}
        for valueMap in valueMaps:
            for key, value in valueMap.items():
                if record.has_key(key) and record[key] != value:
                    mx.abort(
                        'Inconsistant values returned by test machers : ' +
                        str(valueMaps))
                record[key] = value

        jvmErrorFile = record.get('jvmError')
        if jvmErrorFile:
            mx.log('/!\\JVM Error : dumping error log...')
            with open(jvmErrorFile, 'rb') as fp:
                mx.log(fp.read())
            os.unlink(jvmErrorFile)
            return False

        if record.get('failed') == '1':
            return False

        return retcode == 0 and record.get('passed') == '1'
예제 #4
0
def getDacapo(name, n, dacapoArgs=[]):
    dacapo = mx.get_env('DACAPO_CP')
    if dacapo is None:
        l = mx.library('DACAPO', False)
        if l is not None:
            dacapo = l.get_path(True)
        else:
            mx.abort(
                'DaCapo 9.12 jar file must be specified with DACAPO_CP environment variable or as DACAPO library'
            )

    if not isfile(dacapo) or not dacapo.endswith('.jar'):
        mx.abort(
            'Specified DaCapo jar file does not exist or is not a jar file: ' +
            dacapo)

    dacapoSuccess = re.compile(
        r"^===== DaCapo 9\.12 ([a-zA-Z0-9_]+) PASSED in ([0-9]+) msec =====$",
        re.MULTILINE)
    dacapoFail = re.compile(
        r"^===== DaCapo 9\.12 ([a-zA-Z0-9_]+) FAILED (warmup|) =====$",
        re.MULTILINE)
    dacapoTime = re.compile(
        r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec ====="
    )
    dacapoTime1 = re.compile(
        r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) completed warmup 1 in (?P<time>[0-9]+) msec ====="
    )

    dacapoMatcher = ValuesMatcher(dacapoTime, {
        'group': 'DaCapo',
        'name': '<benchmark>',
        'score': '<time>'
    })
    dacapoMatcher1 = ValuesMatcher(dacapoTime1, {
        'group': 'DaCapo-1stRun',
        'name': '<benchmark>',
        'score': '<time>'
    })

    return Test("DaCapo-" + name, [
        '-jar',
        dacapo,
        name,
        '-n',
        str(n),
    ] + dacapoArgs, [dacapoSuccess], [dacapoFail],
                [dacapoMatcher, dacapoMatcher1],
                ['-Xms2g', '-XX:+' + gc, '-XX:-UseCompressedOops'])
예제 #5
0
def getDacapo(name, dacapoArgs=None, extraVmArguments=None):
    dacapo = mx.get_env('DACAPO_CP')
    if dacapo is None:
        l = mx.library('DACAPO', False)
        if l is not None:
            dacapo = l.get_path(True)
        else:
            mx.abort(
                'DaCapo 9.12 jar file must be specified with DACAPO_CP environment variable or as DACAPO library'
            )

    if not isfile(dacapo) or not dacapo.endswith('.jar'):
        mx.abort(
            'Specified DaCapo jar file does not exist or is not a jar file: ' +
            dacapo)

    dacapoSuccess = re.compile(
        r"^===== DaCapo 9\.12 ([a-zA-Z0-9_]+) PASSED in ([0-9]+) msec =====",
        re.MULTILINE)
    dacapoFail = re.compile(
        r"^===== DaCapo 9\.12 ([a-zA-Z0-9_]+) FAILED (warmup|) =====",
        re.MULTILINE)
    dacapoTime = re.compile(
        r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec ====="
    )
    dacapoTime1 = re.compile(
        r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) completed warmup 1 in (?P<time>[0-9]+) msec ====="
    )

    dacapoMatcher = ValuesMatcher(dacapoTime, {
        'group': 'DaCapo',
        'name': '<benchmark>',
        'score': '<time>'
    })
    dacapoMatcher1 = ValuesMatcher(dacapoTime1, {
        'group': 'DaCapo-1stRun',
        'name': '<benchmark>',
        'score': '<time>'
    })

    # Use ipv4 stack for dacapos; tomcat+solaris+ipv6_interface fails (see also: JDK-8072384)
    return Test("DaCapo-" + name,
                ['-jar', mx._cygpathU2W(dacapo), name] +
                _noneAsEmptyList(dacapoArgs), [dacapoSuccess], [dacapoFail],
                [dacapoMatcher, dacapoMatcher1], [
                    '-Xms2g', '-XX:+' + gc, '-XX:-UseCompressedOops',
                    "-Djava.net.preferIPv4Stack=true", '-G:+ExitVMOnException'
                ] + _noneAsEmptyList(extraVmArguments))
예제 #6
0
def getSPECjbb2005(benchArgs=[]):

    specjbb2005 = mx.get_env('SPECJBB2005')
    if specjbb2005 is None or not exists(join(specjbb2005, 'jbb.jar')):
        mx.abort(
            'Please set the SPECJBB2005 environment variable to a SPECjbb2005 directory'
        )

    score = re.compile(r"^Valid run, Score is  (?P<score>[0-9]+)$",
                       re.MULTILINE)
    error = re.compile(r"VALIDATION ERROR")
    success = re.compile(r"^Valid run, Score is  [0-9]+$", re.MULTILINE)
    matcher = ValuesMatcher(score, {
        'group': 'SPECjbb2005',
        'name': 'score',
        'score': '<score>'
    })
    classpath = ['jbb.jar', 'check.jar']
    return Test("SPECjbb2005",
                ['spec.jbb.JBBmain', '-propfile', 'SPECjbb.props'] + benchArgs,
                [success], [error], [matcher],
                vmOpts=[
                    '-Xms3g', '-XX:+' + gc, '-XX:-UseCompressedOops', '-cp',
                    os.pathsep.join(classpath)
                ],
                defaultCwd=specjbb2005)
예제 #7
0
def getCTW(vm, mode):
    time = re.compile(
        r"CompileTheWorld : Done \([0-9]+ classes, [0-9]+ methods, (?P<time>[0-9]+) ms\)"
    )
    scoreMatcher = ValuesMatcher(time, {
        'group': 'CompileTheWorld',
        'name': 'CompileTime',
        'score': '<time>'
    })

    jre = os.environ.get('JAVA_HOME')
    if exists(join(jre, 'jre')):
        jre = join(jre, 'jre')
    rtjar = join(jre, 'lib', 'rt.jar')

    args = ['-XX:+CompileTheWorld', '-Xbootclasspath/p:' + rtjar]
    if vm == 'jvmci':
        args += ['-XX:+BootstrapGraal']
    if mode >= CTWMode.NoInline:
        if not mx_graal_core.isJVMCIEnabled(vm):
            args.append('-XX:-Inline')
        else:
            args.append('-G:CompileTheWordConfig=-Inline')

    return Test("CompileTheWorld",
                args,
                successREs=[time],
                scoreMatchers=[scoreMatcher],
                benchmarkCompilationRate=False)
예제 #8
0
def getSPECjvm2008(benchArgs=None):

    specjvm2008 = mx.get_env('SPECJVM2008')
    if specjvm2008 is None or not exists(join(specjvm2008, 'SPECjvm2008.jar')):
        mx.abort(
            'Please set the SPECJVM2008 environment variable to a SPECjvm2008 directory'
        )

    score = re.compile(
        r"^(Score on|Noncompliant) (?P<benchmark>[a-zA-Z0-9\._]+)( result)?: (?P<score>[0-9]+((,|\.)[0-9]+)?)( SPECjvm2008 Base)? ops/m$",
        re.MULTILINE)
    error = re.compile(r"^Errors in benchmark: ", re.MULTILINE)
    # The ' ops/m' at the end of the success string is important : it's how you can tell valid and invalid runs apart
    success = re.compile(
        r"^(Noncompliant c|C)omposite result: [0-9]+((,|\.)[0-9]+)?( SPECjvm2008 (Base|Peak))? ops/m$",
        re.MULTILINE)
    matcher = ValuesMatcher(score, {
        'group': 'SPECjvm2008',
        'name': '<benchmark>',
        'score': '<score>'
    })

    return Test("SPECjvm2008",
                ['-jar', 'SPECjvm2008.jar'] + _noneAsEmptyList(benchArgs),
                [success], [error], [matcher],
                vmOpts=['-Xms3g', '-XX:+' + gc, '-XX:-UseCompressedOops'],
                defaultCwd=specjvm2008)
예제 #9
0
def getScalaDacapo(name, dacapoArgs=[]):
    dacapo = mx.get_env('DACAPO_SCALA_CP')
    if dacapo is None:
        l = mx.library('DACAPO_SCALA', False)
        if l is not None:
            dacapo = l.get_path(True)
        else:
            mx.abort(
                'Scala DaCapo 0.1.0 jar file must be specified with DACAPO_SCALA_CP environment variable or as DACAPO_SCALA library'
            )

    if not isfile(dacapo) or not dacapo.endswith('.jar'):
        mx.abort(
            'Specified Scala DaCapo jar file does not exist or is not a jar file: '
            + dacapo)

    dacapoSuccess = re.compile(
        r"^===== DaCapo 0\.1\.0(-SNAPSHOT)? ([a-zA-Z0-9_]+) PASSED in ([0-9]+) msec =====$",
        re.MULTILINE)
    dacapoFail = re.compile(
        r"^===== DaCapo 0\.1\.0(-SNAPSHOT)? ([a-zA-Z0-9_]+) FAILED (warmup|) =====$",
        re.MULTILINE)
    dacapoTime = re.compile(
        r"===== DaCapo 0\.1\.0(-SNAPSHOT)? (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec ====="
    )

    dacapoMatcher = ValuesMatcher(dacapoTime, {
        'group': "Scala-DaCapo",
        'name': '<benchmark>',
        'score': '<time>'
    })

    return Test("Scala-DaCapo-" + name, ['-jar', dacapo, name] + dacapoArgs,
                [dacapoSuccess], [dacapoFail], [dacapoMatcher],
                ['-Xms2g', '-XX:+' + gc, '-XX:-UseCompressedOops'])
예제 #10
0
파일: pymarks.py 프로젝트: wdv4758h/ZipPy
def getSuccessErrorMatcher():
    score = re.compile(
        r"^(?P<benchmark>[a-zA-Z0-9\.\-]+): (?P<score>[0-9]+(\.[0-9]+)?$)",
        re.MULTILINE)
    error = re.compile(r"Exception")
    success = score  #re.compile(r"^Score \(version \d\): (?:[0-9]+(?:\.[0-9]+)?)", re.MULTILINE)
    matcher = ValuesMatcher(score, {
        'group': 'Python',
        'name': '<benchmark>',
        'score': '<score>'
    })
    return success, error, matcher
예제 #11
0
def getCTW(vm, mode):
    time = re.compile(
        r"CompileTheWorld : Done \([0-9]+ classes, [0-9]+ methods, (?P<time>[0-9]+) ms\)"
    )
    scoreMatcher = ValuesMatcher(time, {
        'group': 'CompileTheWorld',
        'name': 'CompileTime',
        'score': '<time>'
    })

    jre = os.environ.get('JAVA_HOME')
    if exists(join(jre, 'jre')):
        jre = join(jre, 'jre')
    rtjar = join(jre, 'lib', 'rt.jar')

    args = ['-XX:+CompileTheWorld', '-Xbootclasspath/p:' + rtjar]
    if commands.isGraalEnabled(vm):
        args += ['-XX:+BootstrapGraal', '-G:-Debug']
    if mode >= CTWMode.NoInline:
        if not commands.isGraalEnabled(vm):
            args.append('-XX:-Inline')
        else:
            args.append('-G:-Inline')
    if mode >= CTWMode.NoComplex:
        if commands.isGraalEnabled(vm):
            args += [
                '-G:-OptLoopTransform', '-G:-OptTailDuplication',
                '-G:-FullUnroll', '-G:-MemoryAwareScheduling',
                '-G:-NewMemoryAwareScheduling', '-G:-PartialEscapeAnalysis'
            ]

    return Test("CompileTheWorld",
                args,
                successREs=[time],
                scoreMatchers=[scoreMatcher],
                benchmarkCompilationRate=False)
예제 #12
0
    def bench(self, vm, cwd=None, opts=[], vmbuild=None):
        """
        Run this program as a benchmark.
        """
        if (vm in self.ignoredVMs):
            return {}
        if cwd is None:
            cwd = self.defaultCwd
        parser = OutputParser()

        for successRE in self.successREs:
            parser.addMatcher(ValuesMatcher(successRE, {'passed': '1'}))
        for failureRE in self.failureREs:
            parser.addMatcher(ValuesMatcher(failureRE, {'failed': '1'}))
        for scoreMatcher in self.scoreMatchers:
            parser.addMatcher(scoreMatcher)

        if self.benchmarkCompilationRate:
            if vm == 'graal':
                bps = re.compile(
                    r"ParsedBytecodesPerSecond@final: (?P<rate>[0-9]+)")
                ibps = re.compile(
                    r"InlinedBytecodesPerSecond@final: (?P<rate>[0-9]+)")
                parser.addMatcher(
                    ValuesMatcher(
                        bps, {
                            'group': 'ParsedBytecodesPerSecond',
                            'name': self.name,
                            'score': '<rate>'
                        }))
                parser.addMatcher(
                    ValuesMatcher(
                        ibps, {
                            'group': 'InlinedBytecodesPerSecond',
                            'name': self.name,
                            'score': '<rate>'
                        }))
            else:
                ibps = re.compile(
                    r"(?P<compiler>[\w]+) compilation speed: +(?P<rate>[0-9]+) bytes/s {standard"
                )
                parser.addMatcher(
                    ValuesMatcher(
                        ibps, {
                            'group': 'InlinedBytecodesPerSecond',
                            'name': '<compiler>:' + self.name,
                            'score': '<rate>'
                        }))

        startDelim = 'START: ' + self.name
        endDelim = 'END: ' + self.name

        outputfile = os.environ.get('BENCH_OUTPUT', None)
        if outputfile:
            # Used only to debug output parsing
            with open(outputfile) as fp:
                output = fp.read()
                start = output.find(startDelim)
                end = output.find(endDelim, start)
                if start == -1 and end == -1:
                    return {}
                output = output[start + len(startDelim + os.linesep):end]
                mx.log(startDelim)
                mx.log(output)
                mx.log(endDelim)
        else:
            tee = Tee()
            mx.log(startDelim)
            if commands.vm(self.vmOpts + opts + self.cmd,
                           vm,
                           nonZeroIsFatal=False,
                           out=tee.eat,
                           err=subprocess.STDOUT,
                           cwd=cwd,
                           vmbuild=vmbuild) != 0:
                mx.abort("Benchmark failed (non-zero retcode)")
            mx.log(endDelim)
            output = tee.output.getvalue()

        groups = {}
        passed = False
        for valueMap in parser.parse(output):
            assert (valueMap.has_key('name') and valueMap.has_key('score')
                    and valueMap.has_key('group')) or valueMap.has_key(
                        'passed') or valueMap.has_key('failed'), valueMap
            if valueMap.get('failed') == '1':
                mx.abort("Benchmark failed")
            if valueMap.get('passed') == '1':
                passed = True
            groupName = valueMap.get('group')
            if groupName:
                group = groups.setdefault(groupName, {})
                name = valueMap.get('name')
                score = valueMap.get('score')
                if name and score:
                    group[name] = score

        if not passed:
            mx.abort("Benchmark failed (not passed)")

        return groups
예제 #13
0
파일: pymarks.py 프로젝트: wdv4758h/ZipPy
    def bench(self, vm, cwd=None, extraVmOpts=None, vmbuild=None):
        """
        Run this program as a benchmark.
        Copied from sanitycheck.Test to extend benchmarking for non-JVMs.
        """
        if vm in self.ignoredVMs:
            return {}
        if cwd is None:
            cwd = self.defaultCwd
        parser = OutputParser()

        for successRE in self.successREs:
            parser.addMatcher(ValuesMatcher(successRE, {'passed': '1'}))
        for failureRE in self.failureREs:
            parser.addMatcher(ValuesMatcher(failureRE, {'failed': '1'}))
        for scoreMatcher in self.scoreMatchers:
            parser.addMatcher(scoreMatcher)

        if self.benchmarkCompilationRate:
            if vm == 'graal':
                bps = re.compile(
                    r"ParsedBytecodesPerSecond@final: (?P<rate>[0-9]+)")
                ibps = re.compile(
                    r"InlinedBytecodesPerSecond@final: (?P<rate>[0-9]+)")
                parser.addMatcher(
                    ValuesMatcher(
                        bps, {
                            'group': 'ParsedBytecodesPerSecond',
                            'name': self.name,
                            'score': '<rate>'
                        }))
                parser.addMatcher(
                    ValuesMatcher(
                        ibps, {
                            'group': 'InlinedBytecodesPerSecond',
                            'name': self.name,
                            'score': '<rate>'
                        }))
            else:
                ibps = re.compile(
                    r"(?P<compiler>[\w]+) compilation speed: +(?P<rate>[0-9]+) bytes/s {standard"
                )
                parser.addMatcher(
                    ValuesMatcher(
                        ibps, {
                            'group': 'InlinedBytecodesPerSecond',
                            'name': '<compiler>:' + self.name,
                            'score': '<rate>'
                        }))

        startDelim = 'START: ' + self.name
        endDelim = 'END: ' + self.name

        outputfile = os.environ.get('BENCH_OUTPUT', None)
        if outputfile:
            # Used only to debug output parsing
            with open(outputfile) as fp:
                output = fp.read()
                start = output.find(startDelim)
                end = output.find(endDelim, start)
                if start == -1 and end == -1:
                    return {}
                output = output[start + len(startDelim + os.linesep):end]
                mx.log(startDelim)
                mx.log(output)
                mx.log(endDelim)
        else:
            tee = Tee()
            mx.log(startDelim)
            # zippy
            result = -1
            if vm == 'cpython2':
                result = mx.run(['python'] + self.cmd[-2:], out=tee.eat)
            elif vm == 'cpython':
                result = mx.run(['python3'] + self.cmd[-2:], out=tee.eat)
            elif vm == 'jython':
                result = mx_graal.vm(
                    ['-jar', mx.library('JYTHON').path] + self.cmd[-2:],
                    vm='original',
                    out=tee.eat)
            elif vm == 'pypy':
                result = mx.run(['pypy'] + self.cmd[-2:], out=tee.eat)
            elif vm == 'pypy3':
                result = mx.run(['pypy3'] + self.cmd[-2:], out=tee.eat)
            else:
                result = mx_graal.vm(self.vmOpts +
                                     _noneAsEmptyList(extraVmOpts) + self.cmd,
                                     vm,
                                     nonZeroIsFatal=False,
                                     out=tee.eat,
                                     err=subprocess.STDOUT,
                                     cwd=cwd,
                                     vmbuild=vmbuild)

            if result != 0:
                mx.abort("Benchmark failed (non-zero retcode)")
            # wait for subprocess to finish
            time.sleep(.5)
            mx.log(endDelim)
            output = tee.output.getvalue()

        groups = {}
        passed = False
        for valueMap in parser.parse(output):
            assert (valueMap.has_key('name') and valueMap.has_key('score')
                    and valueMap.has_key('group')) or valueMap.has_key(
                        'passed') or valueMap.has_key('failed'), valueMap
            if valueMap.get('failed') == '1':
                mx.abort("Benchmark failed")
            if valueMap.get('passed') == '1':
                passed = True
            groupName = valueMap.get('group')
            if groupName:
                group = groups.setdefault(groupName, {})
                name = valueMap.get('name')
                score = valueMap.get('score')
                if name and score:
                    group[name] = score

        if not passed:
            mx.abort("Benchmark failed (not passed)")

        return groups