예제 #1
0
파일: mx_sulong.py 프로젝트: bjfish/sulong
def travis2(args=None):
    tasks = []
    with Task('BuildHotSpotGraalServer: product', tasks) as t:
        if t: buildvms(['-c', '--vms', 'server', '--builds', 'product'])
    with VM('server', 'product'):
        with Task('TestGCC', tasks) as t:
            if t: runGCCTestCases()
예제 #2
0
파일: mx_sulong.py 프로젝트: bjfish/sulong
def executeGate():
    """executes the TruffleLLVM gate tasks"""
    tasks = []
    with Task('BuildHotSpotGraalServer: product', tasks) as t:
        if t: buildvms(['-c', '--vms', 'server', '--builds', 'product'])
    with VM('server', 'product'):
        with Task('Findbugs', tasks) as t:
            if t: mx_findbugs.findbugs([])
    with VM('server', 'product'):
        with Task('TestBenchmarks', tasks) as t:
            if t: runBenchmarkTestCases()
    with VM('server', 'product'):
        with Task('TestTypes', tasks) as t:
            if t: runTypeTestCases()
    with VM('server', 'product'):
        with Task('TestSulong', tasks) as t:
            if t: runTruffleTestCases()
    with VM('server', 'product'):
        with Task('TestGCC', tasks) as t:
            if t: runGCCTestCases()
    with VM('server', 'product'):
        with Task('TestLLVM', tasks) as t:
            if t: runLLVMTestCases()
    with VM('server', 'product'):
        with Task('TestNWCC', tasks) as t:
            if t: runNWCCTestCases()
예제 #3
0
def travisJRuby(args=None):
    tasks = []
    with Task('BuildHotSpotGraalServer: product', tasks) as t:
        if t: buildvms(['-c', '--vms', 'server', '--builds', 'product'])
    with VM('server', 'product'):
        with Task('TestJRuby', tasks) as t:
            if t: runTestJRuby()
예제 #4
0
def executeGate():
    """executes the TruffleLLVM gate tasks"""
    tasks = []
    with Task('BuildHotSpotGraalServer: product', tasks) as t:
        if t: buildvms(['-c', '--vms', 'server', '--builds', 'product'])
    with VM('server', 'product'):
        with Task('Findbugs', tasks) as t:
            if t: mx_findbugs.findbugs([])
    with VM('server', 'product'):
        with Task('TestBenchmarks', tasks) as t:
            if t: runBenchmarkTestCases()
    with VM('server', 'product'):
        with Task('TestTypes', tasks) as t:
            if t: runTypeTestCases()
    with VM('server', 'product'):
        with Task('TestSulong', tasks) as t:
            if t: runTruffleTestCases()
    with VM('server', 'product'):
        with Task('TestGCC', tasks) as t:
            if t: runGCCTestCases()
    with VM('server', 'product'):
        with Task('TestLLVM', tasks) as t:
            if t: runLLVMTestCases()
    with VM('server', 'product'):
        with Task('TestNWCC', tasks) as t:
            if t: runNWCCTestCases()
예제 #5
0
def travis2(args=None):
    tasks = []
    with Task('BuildHotSpotGraalServer: product', tasks) as t:
        if t: buildvms(['-c', '--vms', 'server', '--builds', 'product'])
    with VM('server', 'product'):
        with Task('TestGCC', tasks) as t:
            if t: runGCCTestCases()
예제 #6
0
def compiler_gate_runner(suites, unit_test_runs, bootstrap_tests, tasks, extraVMarguments=None):

    # Build server-hosted-jvmci now so we can run the unit tests
    with Task('BuildHotSpotGraalHosted: product', tasks) as t:
        if t: buildvms(['--vms', 'server', '--builds', 'product'])

    with VM('server', 'product'):
    # Run unit tests on server-hosted-jvmci
        for r in unit_test_runs:
            r.run(suites, tasks, extraVMarguments)

    # Run microbench on server-hosted-jvmci (only for testing the JMH setup)
    with VM('server', 'product'):
        for r in [MicrobenchRun('Microbench', ['TestJMH'])]:
            r.run(tasks, extraVMarguments)

    # Run ctw against rt.jar on server-hosted-jvmci
    with VM('server', 'product'):
        with Task('CTW:hosted-product', tasks) as t:
            if t: ctw(['--ctwopts', '-Inline +ExitVMOnException', '-esa', '-G:+CompileTheWorldMultiThreaded', '-G:-InlineDuringParsing', '-G:-CompileTheWorldVerbose', '-XX:ReservedCodeCacheSize=400m'], _noneAsEmptyList(extraVMarguments))

    # Build the jvmci VMs so we can run the other tests
    with Task('BuildHotSpotGraalOthers: fastdebug,product', tasks) as t:
        if t: buildvms(['--vms', 'jvmci', '--builds', 'fastdebug,product'])

    # bootstrap tests
    for b in bootstrap_tests:
        b.run(tasks, extraVMarguments)

    # run dacapo sanitychecks
    for vmbuild in ['fastdebug', 'product']:
        for test in sanitycheck.getDacapos(level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel=vmbuild, extraVmArguments=extraVMarguments) \
                + sanitycheck.getScalaDacapos(level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel=vmbuild, extraVmArguments=extraVMarguments):
            with Task(str(test) + ':' + vmbuild, tasks) as t:
                if t and not test.test('jvmci'):
                    t.abort(test.name + ' Failed')

    # ensure -Xbatch still works
    with VM('jvmci', 'product'):
        with Task('DaCapo_pmd:BatchMode:product', tasks) as t:
            if t: dacapo(_noneAsEmptyList(extraVMarguments) + ['-Xbatch', 'pmd'])

    # ensure benchmark counters still work
    with VM('jvmci', 'product'):
        with Task('DaCapo_pmd:BenchmarkCounters:product', tasks) as t:
            if t: dacapo(_noneAsEmptyList(extraVMarguments) + ['-G:+LIRProfileMoves', '-G:+GenericDynamicCounters', '-XX:JVMCICounterSize=10', 'pmd'])

    # ensure -Xcomp still works
    with VM('jvmci', 'product'):
        with Task('XCompMode:product', tasks) as t:
            if t: run_vm(_noneAsEmptyList(extraVMarguments) + ['-Xcomp', '-version'])
예제 #7
0
def compiler_simple_gate_runner(suites, unit_test_runs, bootstrap_tests, tasks, extraVMarguments=None):
    # Run unit tests on server-hosted-jvmci
    with VM('jvmci', 'fastdebug'):
        # Build
        with Task('BuildHotSpotJVMCI: fastdebug', tasks) as t:
            if t: buildvms(['--vms', 'jvmci', '--builds', 'fastdebug'])

        for r in unit_test_runs:
            r.run(suites, tasks, extraVMarguments)

        # Run microbench (only for testing the JMH setup)
        for r in [MicrobenchRun('Microbench', ['TestJMH'])]:
            r.run(tasks, extraVMarguments)

        # bootstrap tests
        for b in bootstrap_tests:
            b.run(tasks, extraVMarguments)
예제 #8
0
def travis1(args=None):
    tasks = []
    with Task('BuildJavaWithEcj', tasks) as t:
        if t:
            if mx.get_env('JDT'):
                mx.command_function('build')(['-p', '--no-native', '--warning-as-error'])
                gate_clean([], tasks, name='CleanAfterEcjBuild')
            else:
                mx._warn_or_abort('JDT environment variable not set. Cannot execute BuildJavaWithEcj task.', args.strict_mode)
    with Task('BuildJavaWithJavac', tasks) as t:
        if t: mx.command_function('build')(['-p', '--warning-as-error', '--no-native', '--force-javac'])
    with Task('BuildHotSpotGraalServer: product', tasks) as t:
        if t: buildvms(['-c', '--vms', 'server', '--builds', 'product'])
    with VM('server', 'product'):
        with Task('Findbugs', tasks) as t:
            if t and mx_findbugs.findbugs([]) != 0:
                t.abort('FindBugs warnings were found')
    with VM('server', 'product'):
        with Task('TestBenchmarks', tasks) as t:
            if t: runBenchmarkTestCases()
    with VM('server', 'product'):
        with Task('TestPolglot', tasks) as t:
            if t: runPolyglotTestCases()
    with VM('server', 'product'):
        with Task('TestInterop', tasks) as t:
            if t: runInteropTestCases()
    with VM('server', 'product'):
        with Task('TestAsm', tasks) as t:
            if t: runAsmTestCases()
    with VM('server', 'product'):
        with Task('TestTypes', tasks) as t:
            if t: runTypeTestCases()
    with VM('server', 'product'):
        with Task('TestSulong', tasks) as t:
            if t: runTruffleTestCases()
    with VM('server', 'product'):
        with Task('TestLLVM', tasks) as t:
            if t: runLLVMTestCases()
    with VM('server', 'product'):
        with Task('TestNWCC', tasks) as t:
            if t: runNWCCTestCases()
    with VM('server', 'product'):
        with Task('TestGCCSuiteCompile', tasks) as t:
            if t: runCompileTestCases()
예제 #9
0
def compiler_simple_gate_runner(suites,
                                unit_test_runs,
                                bootstrap_tests,
                                tasks,
                                extraVMarguments=None):
    # Run unit tests on server-hosted-jvmci
    with VM('jvmci', 'fastdebug'):
        # Build
        with Task('BuildHotSpotJVMCI: fastdebug', tasks) as t:
            if t: buildvms(['--vms', 'jvmci', '--builds', 'fastdebug'])

        for r in unit_test_runs:
            r.run(suites, tasks, extraVMarguments)

        # Run microbench (only for testing the JMH setup)
        for r in [MicrobenchRun('Microbench', ['TestJMH'])]:
            r.run(tasks, extraVMarguments)

        # bootstrap tests
        for b in bootstrap_tests:
            b.run(tasks, extraVMarguments)
예제 #10
0
파일: mx_sulong.py 프로젝트: bjfish/sulong
def travis1(args=None):
    tasks = []
    with Task('BuildHotSpotGraalServer: product', tasks) as t:
        if t: buildvms(['-c', '--vms', 'server', '--builds', 'product'])
    with VM('server', 'product'):
        with Task('Findbugs', tasks) as t:
            if t: mx_findbugs.findbugs([])
    with VM('server', 'product'):
        with Task('TestBenchmarks', tasks) as t:
            if t: runBenchmarkTestCases()
    with VM('server', 'product'):
        with Task('TestTypes', tasks) as t:
            if t: runTypeTestCases()
    with VM('server', 'product'):
        with Task('TestSulong', tasks) as t:
            if t: runTruffleTestCases()
    with VM('server', 'product'):
        with Task('TestLLVM', tasks) as t:
            if t: runLLVMTestCases()
    with VM('server', 'product'):
        with Task('TestNWCC', tasks) as t:
            if t: runNWCCTestCases()
예제 #11
0
def executeGate():
    """executes the TruffleLLVM gate tasks"""
    tasks = []
    with Task('BuildHotSpotGraalServer: product', tasks) as t:
        if t: buildvms(['-c', '--vms', 'server', '--builds', 'product'])
    with VM('server', 'product'):
        with Task('Findbugs', tasks) as t:
            if t and mx_findbugs.findbugs([]) != 0:
                t.abort('FindBugs warnings were found')
    with VM('server', 'product'):
        with Task('TestBenchmarks', tasks) as t:
            if t: runBenchmarkTestCases()
    with VM('server', 'product'):
        with Task('TestTypes', tasks) as t:
            if t: runTypeTestCases()
    with VM('server', 'product'):
        with Task('TestPolglot', tasks) as t:
            if t: runPolyglotTestCases()
    with VM('server', 'product'):
        with Task('TestInterop', tasks) as t:
            if t: runInteropTestCases()
    with VM('server', 'product'):
        with Task('TestAsm', tasks) as t:
            if t: runAsmTestCases()
    with VM('server', 'product'):
        with Task('TestSulong', tasks) as t:
            if t: runTruffleTestCases()
    with VM('server', 'product'):
        with Task('TestGCC', tasks) as t:
            if t: runGCCTestCases()
    with VM('server', 'product'):
        with Task('TestLLVM', tasks) as t:
            if t: runLLVMTestCases()
    with VM('server', 'product'):
        with Task('TestNWCC', tasks) as t:
            if t: runNWCCTestCases()
    with VM('server', 'product'):
        with Task('TestGCCSuiteCompile', tasks) as t:
            if t: runCompileTestCases()
예제 #12
0
def travis1(args=None):
    tasks = []
    with Task('BuildHotSpotGraalServer: product', tasks) as t:
        if t: buildvms(['-c', '--vms', 'server', '--builds', 'product'])
    with VM('server', 'product'):
        with Task('Findbugs', tasks) as t:
            if t: mx_findbugs.findbugs([])
    with VM('server', 'product'):
        with Task('TestBenchmarks', tasks) as t:
            if t: runBenchmarkTestCases()
    with VM('server', 'product'):
        with Task('TestTypes', tasks) as t:
            if t: runTypeTestCases()
    with VM('server', 'product'):
        with Task('TestSulong', tasks) as t:
            if t: runTruffleTestCases()
    with VM('server', 'product'):
        with Task('TestLLVM', tasks) as t:
            if t: runLLVMTestCases()
    with VM('server', 'product'):
        with Task('TestNWCC', tasks) as t:
            if t: runNWCCTestCases()
예제 #13
0
def compiler_gate_runner(suites, unit_test_runs, bootstrap_tests, tasks, extraVMarguments=None):

    # Build server-hosted-jvmci now so we can run the unit tests
    with Task('BuildHotSpotGraalHosted: product', tasks, tags=[GraalTags.test, GraalTags.fulltest]) as t:
        if t: buildvms(['--vms', 'server', '--builds', 'product'])

    with VM('server', 'product'):
    # Run unit tests on server-hosted-jvmci
        for r in unit_test_runs:
            r.run(suites, tasks, extraVMarguments)

    # Run microbench on server-hosted-jvmci (only for testing the JMH setup)
    with VM('server', 'product'):
        for r in [MicrobenchRun('Microbench', ['TestJMH'], tags=[GraalTags.fulltest])]:
            r.run(tasks, extraVMarguments)

    # Run ctw against rt.jar on server-hosted-jvmci
    with VM('server', 'product'):
        with Task('CTW:hosted-product', tasks, tags=[GraalTags.fulltest]) as t:
            if t: ctw(['--ctwopts', '-Inline +ExitVMOnException', '-esa', '-G:+CompileTheWorldMultiThreaded', '-G:-InlineDuringParsing', '-G:-CompileTheWorldVerbose', '-XX:ReservedCodeCacheSize=400m'], _noneAsEmptyList(extraVMarguments))

    # Build the jvmci VMs so we can run the other tests
    with Task('BuildHotSpotGraalJVMCI: fastdebug', tasks, tags=[GraalTags.bootstrap, GraalTags.fulltest]) as t:
        if t: buildvms(['--vms', 'jvmci', '--builds', 'fastdebug'])
    with Task('BuildHotSpotGraalJVMCI: product', tasks, tags=[GraalTags.fulltest]) as t:
        if t: buildvms(['--vms', 'jvmci', '--builds', 'product'])

    # bootstrap tests
    for b in bootstrap_tests:
        b.run(tasks, extraVMarguments)

    # run dacapo sanitychecks
    for vmbuild in ['fastdebug', 'product']:
        for test in sanitycheck.getDacapos(level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel=vmbuild, extraVmArguments=extraVMarguments) \
                + sanitycheck.getScalaDacapos(level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel=vmbuild, extraVmArguments=extraVMarguments):
            with Task(str(test) + ':' + vmbuild, tasks, tags=[GraalTags.fulltest]) as t:
                if t and not test.test('jvmci'):
                    t.abort(test.name + ' Failed')

    # ensure -Xbatch still works
    with VM('jvmci', 'product'):
        with Task('DaCapo_pmd:BatchMode:product', tasks, tags=[GraalTags.fulltest]) as t:
            if t: dacapo(_noneAsEmptyList(extraVMarguments) + ['-Xbatch', 'pmd'])

    # ensure benchmark counters still work
    with VM('jvmci', 'product'):
        with Task('DaCapo_pmd:BenchmarkCounters:product', tasks, tags=[GraalTags.fulltest]) as t:
            if t: dacapo(_noneAsEmptyList(extraVMarguments) + ['-G:+LIRProfileMoves', '-G:+GenericDynamicCounters', '-XX:JVMCICounterSize=10', 'pmd'])

    # ensure -Xcomp still works
    with VM('jvmci', 'product'):
        with Task('XCompMode:product', tasks, tags=[GraalTags.fulltest]) as t:
            if t: run_vm(_noneAsEmptyList(extraVMarguments) + ['-Xcomp', '-version'])
예제 #14
0
def compiler_gate_runner(suites, unit_test_runs, bootstrap_tests, tasks, extraVMarguments=None):

    # Build server-hosted-jvmci now so we can run the unit tests
    with Task("BuildHotSpotGraalHosted: product", tasks, tags=[GraalTags.test, GraalTags.fulltest]) as t:
        if t:
            buildvms(["--vms", "server", "--builds", "product"])

    with VM("server", "product"):
        # Run unit tests on server-hosted-jvmci
        for r in unit_test_runs:
            r.run(suites, tasks, extraVMarguments)

    # Run microbench on server-hosted-jvmci (only for testing the JMH setup)
    with VM("server", "product"):
        for r in [MicrobenchRun("Microbench", ["TestJMH"], tags=[GraalTags.fulltest])]:
            r.run(tasks, extraVMarguments)

    # Run ctw against rt.jar on server-hosted-jvmci
    with VM("server", "product"):
        with Task("CTW:hosted-product", tasks, tags=[GraalTags.fulltest]) as t:
            if t:
                ctw(
                    [
                        "--ctwopts",
                        "-Inline +ExitVMOnException",
                        "-esa",
                        "-G:+CompileTheWorldMultiThreaded",
                        "-G:-InlineDuringParsing",
                        "-G:-CompileTheWorldVerbose",
                        "-XX:ReservedCodeCacheSize=400m",
                    ],
                    _noneAsEmptyList(extraVMarguments),
                )

    # Build the jvmci VMs so we can run the other tests
    with Task("BuildHotSpotGraalJVMCI: fastdebug", tasks, tags=[GraalTags.bootstrap, GraalTags.fulltest]) as t:
        if t:
            buildvms(["--vms", "jvmci", "--builds", "fastdebug"])
    with Task("BuildHotSpotGraalJVMCI: product", tasks, tags=[GraalTags.fulltest]) as t:
        if t:
            buildvms(["--vms", "jvmci", "--builds", "product"])

    # bootstrap tests
    for b in bootstrap_tests:
        b.run(tasks, extraVMarguments)

    # run dacapo sanitychecks
    for vmbuild in ["fastdebug", "product"]:
        for test in sanitycheck.getDacapos(
            level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel=vmbuild, extraVmArguments=extraVMarguments
        ) + sanitycheck.getScalaDacapos(
            level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel=vmbuild, extraVmArguments=extraVMarguments
        ):
            with Task(str(test) + ":" + vmbuild, tasks, tags=[GraalTags.fulltest]) as t:
                if t and not test.test("jvmci"):
                    t.abort(test.name + " Failed")

    # ensure -Xbatch still works
    with VM("jvmci", "product"):
        with Task("DaCapo_pmd:BatchMode:product", tasks, tags=[GraalTags.fulltest]) as t:
            if t:
                dacapo(_noneAsEmptyList(extraVMarguments) + ["-Xbatch", "pmd"])

    # ensure benchmark counters still work
    with VM("jvmci", "product"):
        with Task("DaCapo_pmd:BenchmarkCounters:product", tasks, tags=[GraalTags.fulltest]) as t:
            if t:
                dacapo(
                    _noneAsEmptyList(extraVMarguments)
                    + ["-G:+LIRProfileMoves", "-G:+GenericDynamicCounters", "-XX:JVMCICounterSize=10", "pmd"]
                )

    # ensure -Xcomp still works
    with VM("jvmci", "product"):
        with Task("XCompMode:product", tasks, tags=[GraalTags.fulltest]) as t:
            if t:
                run_vm(_noneAsEmptyList(extraVMarguments) + ["-Xcomp", "-version"])