コード例 #1
0
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        """Perform a runtime checking transformation test case."""
        with use_custom_builtins(
                os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
            expected_output = remove_comment_lines(testcase.output)
            try:
                ir = build_ir_for_single_file(testcase.input)
            except CompileError as e:
                actual = e.messages
            else:
                actual = []
                for fn in ir:
                    if (fn.name == TOP_LEVEL_NAME
                            and not testcase.name.endswith('_toplevel')):
                        continue
                    insert_uninit_checks(fn)
                    insert_exception_handling(fn)
                    insert_ref_count_opcodes(fn)
                    actual.extend(format_func(fn))
                    if testcase.name.endswith('_freq'):
                        common = frequently_executed_blocks(fn.blocks[0])
                        actual.append('hot blocks: %s' %
                                      sorted(b.label for b in common))

            assert_test_output(testcase, actual, 'Invalid source code output',
                               expected_output)
コード例 #2
0
ファイル: test_refcount.py プロジェクト: wimax-grapl/mypy
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        """Perform a runtime checking transformation test case."""
        options = infer_ir_build_options_from_test_name(testcase.name)
        if options is None:
            # Skipped test case
            return
        with use_custom_builtins(
                os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
            expected_output = remove_comment_lines(testcase.output)
            expected_output = replace_native_int(expected_output)
            expected_output = replace_word_size(expected_output)
            try:
                ir = build_ir_for_single_file(testcase.input, options)
            except CompileError as e:
                actual = e.messages
            else:
                actual = []
                for fn in ir:
                    if (fn.name == TOP_LEVEL_NAME
                            and not testcase.name.endswith('_toplevel')):
                        continue
                    insert_uninit_checks(fn)
                    insert_ref_count_opcodes(fn)
                    actual.extend(format_func(fn))

            assert_test_output(testcase, actual, 'Invalid source code output',
                               expected_output)
コード例 #3
0
ファイル: test_analysis.py プロジェクト: llchan/mypy
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        """Perform a data-flow analysis test case."""

        with use_custom_builtins(
                os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
            # replace native_int with platform specific ints
            int_format_str = 'int32' if IS_32_BIT_PLATFORM else 'int64'
            testcase.output = [
                s.replace('native_int', int_format_str)
                for s in testcase.output
            ]
            try:
                ir = build_ir_for_single_file(testcase.input)
            except CompileError as e:
                actual = e.messages
            else:
                actual = []
                for fn in ir:
                    if (fn.name == TOP_LEVEL_NAME
                            and not testcase.name.endswith('_toplevel')):
                        continue
                    exceptions.insert_exception_handling(fn)
                    actual.extend(format_func(fn))
                    cfg = analysis.get_cfg(fn.blocks)

                    args = set(reg for reg, i in fn.env.indexes.items()
                               if i < len(fn.args))

                    name = testcase.name
                    if name.endswith('_MaybeDefined'):
                        # Forward, maybe
                        analysis_result = analysis.analyze_maybe_defined_regs(
                            fn.blocks, cfg, args)
                    elif name.endswith('_Liveness'):
                        # Backward, maybe
                        analysis_result = analysis.analyze_live_regs(
                            fn.blocks, cfg)
                    elif name.endswith('_MustDefined'):
                        # Forward, must
                        analysis_result = analysis.analyze_must_defined_regs(
                            fn.blocks, cfg, args, regs=fn.env.regs())
                    elif name.endswith('_BorrowedArgument'):
                        # Forward, must
                        analysis_result = analysis.analyze_borrowed_arguments(
                            fn.blocks, cfg, args)
                    else:
                        assert False, 'No recognized _AnalysisName suffix in test case'

                    for key in sorted(analysis_result.before.keys(),
                                      key=lambda x: (x[0].label, x[1])):
                        pre = ', '.join(
                            sorted(reg.name
                                   for reg in analysis_result.before[key]))
                        post = ', '.join(
                            sorted(reg.name
                                   for reg in analysis_result.after[key]))
                        actual.append('%-8s %-23s %s' %
                                      ((key[0].label, key[1]), '{%s}' % pre,
                                       '{%s}' % post))
            assert_test_output(testcase, actual, 'Invalid source code output')
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        # Kind of hacky. Not sure if we need more structure here.
        options = CompilerOptions(strip_asserts='StripAssert' in testcase.name)
        """Perform a runtime checking transformation test case."""
        with use_custom_builtins(
                os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
            expected_output = remove_comment_lines(testcase.output)
            expected_output = replace_native_int(expected_output)
            expected_output = replace_word_size(expected_output)
            name = testcase.name
            # If this is specific to some bit width, always pass if platform doesn't match.
            if name.endswith('_64bit') and IS_32_BIT_PLATFORM:
                return
            if name.endswith('_32bit') and not IS_32_BIT_PLATFORM:
                return
            try:
                ir = build_ir_for_single_file(testcase.input, options)
            except CompileError as e:
                actual = e.messages
            else:
                actual = []
                for fn in ir:
                    if (fn.name == TOP_LEVEL_NAME
                            and not name.endswith('_toplevel')):
                        continue
                    actual.extend(format_func(fn))

            assert_test_output(testcase, actual, 'Invalid source code output',
                               expected_output)
コード例 #5
0
ファイル: test_emitmodule.py プロジェクト: mrwright/mypyc
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        with use_custom_builtins(
                os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
            # Build the program.
            text = '\n'.join(testcase.input)

            options = Options()
            options.use_builtins_fixtures = True
            options.show_traceback = True
            options.strict_optional = True
            options.python_version = (3, 6)
            options.export_types = True
            source = build.BuildSource('prog.py', 'prog', text)

            try:
                ctext = emitmodule.compile_modules_to_c(
                    sources=[source],
                    module_names=['prog'],
                    options=options,
                    alt_lib_path=test_temp_dir)
                out = ctext.splitlines()
            except CompileError as e:
                out = e.messages

            # Verify output.
            assert_test_output(testcase, out, 'Invalid output')
コード例 #6
0
ファイル: test_irbuild.py プロジェクト: nbaskers20/mypy
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        # Kind of hacky. Not sure if we need more structure here.
        options = CompilerOptions(strip_asserts='StripAssert' in testcase.name)
        """Perform a runtime checking transformation test case."""
        with use_custom_builtins(
                os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
            expected_output = remove_comment_lines(testcase.output)
            # replace native_int with platform specific ints
            int_format_str = 'int32' if IS_32_BIT_PLATFORM else 'int64'
            expected_output = [
                s.replace('native_int', int_format_str)
                for s in expected_output
            ]
            try:
                ir = build_ir_for_single_file(testcase.input, options)
            except CompileError as e:
                actual = e.messages
            else:
                actual = []
                for fn in ir:
                    if (fn.name == TOP_LEVEL_NAME
                            and not testcase.name.endswith('_toplevel')):
                        continue
                    actual.extend(format_func(fn))

            assert_test_output(testcase, actual, 'Invalid source code output',
                               expected_output)
コード例 #7
0
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        """Perform a data-flow analysis test case."""

        with use_custom_builtins(
                os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
            testcase.output = replace_native_int(testcase.output)
            try:
                ir = build_ir_for_single_file(testcase.input)
            except CompileError as e:
                actual = e.messages
            else:
                actual = []
                for fn in ir:
                    if (fn.name == TOP_LEVEL_NAME
                            and not testcase.name.endswith('_toplevel')):
                        continue
                    exceptions.insert_exception_handling(fn)
                    actual.extend(format_func(fn))
                    cfg = dataflow.get_cfg(fn.blocks)
                    args = set(fn.arg_regs)  # type: Set[Value]
                    name = testcase.name
                    if name.endswith('_MaybeDefined'):
                        # Forward, maybe
                        analysis_result = dataflow.analyze_maybe_defined_regs(
                            fn.blocks, cfg, args)
                    elif name.endswith('_Liveness'):
                        # Backward, maybe
                        analysis_result = dataflow.analyze_live_regs(
                            fn.blocks, cfg)
                    elif name.endswith('_MustDefined'):
                        # Forward, must
                        analysis_result = dataflow.analyze_must_defined_regs(
                            fn.blocks,
                            cfg,
                            args,
                            regs=all_values(fn.arg_regs, fn.blocks))
                    elif name.endswith('_BorrowedArgument'):
                        # Forward, must
                        analysis_result = dataflow.analyze_borrowed_arguments(
                            fn.blocks, cfg, args)
                    else:
                        assert False, 'No recognized _AnalysisName suffix in test case'

                    names = generate_names_for_ir(fn.arg_regs, fn.blocks)

                    for key in sorted(analysis_result.before.keys(),
                                      key=lambda x: (x[0].label, x[1])):
                        pre = ', '.join(
                            sorted(names[reg]
                                   for reg in analysis_result.before[key]))
                        post = ', '.join(
                            sorted(names[reg]
                                   for reg in analysis_result.after[key]))
                        actual.append('%-8s %-23s %s' %
                                      ((key[0].label, key[1]), '{%s}' % pre,
                                       '{%s}' % post))
            assert_test_output(testcase, actual, 'Invalid source code output')
コード例 #8
0
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        # Parse options from test case description (arguments must not have spaces)
        text = '\n'.join(testcase.input)
        m = re.search(r'# *cmd: *(.*)', text)
        assert m is not None, 'Test case missing "# cmd: <files>" section'
        args = m.group(1).split()

        # Write main program to run (not compiled)
        program = '_%s.py' % testcase.name
        program_path = os.path.join(test_temp_dir, program)
        with open(program_path, 'w') as f:
            f.write(text)

        out = b''
        try:
            # Compile program
            cmd = subprocess.run(
                [sys.executable,
                 os.path.join(base_path, 'scripts', 'mypyc')] + args,
                stdout=subprocess.PIPE,
                stderr=subprocess.STDOUT,
                cwd='tmp')
            if 'ErrorOutput' in testcase.name:
                out += cmd.stdout

            if cmd.returncode == 0:
                # Run main program
                out += subprocess.check_output([python3_path, program],
                                               cwd='tmp')
        finally:
            suffix = 'pyd' if sys.platform == 'win32' else 'so'
            so_paths = glob.glob('tmp/**/*.{}'.format(suffix), recursive=True)
            for path in so_paths:
                os.remove(path)

        # Strip out 'tmp/' from error message paths in the testcase output,
        # due to a mismatch between this test and mypy's test suite.
        expected = [x.replace('tmp/', '') for x in testcase.output]

        # Verify output
        actual = normalize_error_messages(out.decode().splitlines())
        assert_test_output(testcase,
                           actual,
                           'Invalid output',
                           expected=expected)
コード例 #9
0
ファイル: test_genops.py プロジェクト: sinancepel/mypyc
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        """Perform a runtime checking transformation test case."""
        with use_custom_builtins(
                os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
            expected_output = remove_comment_lines(testcase.output)

            try:
                ir = build_ir_for_single_file(testcase.input)
            except CompileError as e:
                actual = e.messages
            else:
                actual = []
                for fn in ir:
                    if (fn.name == TOP_LEVEL_NAME
                            and not testcase.name.endswith('_toplevel')):
                        continue
                    actual.extend(format_func(fn))

            assert_test_output(testcase, actual, 'Invalid source code output',
                               expected_output)
コード例 #10
0
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        """Perform a reference count opcode insertion transform test case."""

        with use_custom_builtins(
                os.path.join(test_data_prefix, ICODE_GEN_BUILTINS), testcase):
            try:
                ir = build_ir_for_single_file(testcase.input)
            except CompileError as e:
                actual = e.messages
            else:
                # Expect one function + module top level function.
                assert len(
                    ir
                ) == 2, "Only 1 function definition expected per test case"
                fn = ir[0]
                insert_exception_handling(fn)
                insert_ref_count_opcodes(fn)
                actual = format_func(fn)
                actual = actual[actual.index('L0:'):]

            assert_test_output(testcase, actual, 'Invalid source code output')
コード例 #11
0
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        """Perform a runtime checking transformation test case."""
        with use_custom_builtins(
                os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
            expected_output = remove_comment_lines(testcase.output)

            program_text = '\n'.join(testcase.input)

            options = Options()
            options.use_builtins_fixtures = True
            options.show_traceback = True
            options.strict_optional = True
            options.python_version = (3, 6)
            options.export_types = True

            source = build.BuildSource('main', '__main__', program_text)
            try:
                # Construct input as a single single.
                # Parse and type check the input program.
                result = build.build(sources=[source],
                                     options=options,
                                     alt_lib_path=test_temp_dir)
            except CompileError as e:
                actual = e.messages
            else:
                if result.errors:
                    actual = result.errors
                else:
                    modules = genops.build_ir([result.files['__main__']],
                                              result.types)
                    module = modules[0][1]
                    actual = []
                    for fn in module.functions:
                        if is_empty_module_top_level(fn):
                            # Skip trivial module top levels that only return.
                            continue
                        actual.extend(format_func(fn))

            assert_test_output(testcase, actual, 'Invalid source code output',
                               expected_output)
コード例 #12
0
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        """Perform a runtime checking transformation test case."""
        options = infer_ir_build_options_from_test_name(testcase.name)
        if options is None:
            # Skipped test case
            return
        with use_custom_builtins(
                os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
            try:
                ir = build_ir_for_single_file2(testcase.input, options)
            except CompileError as e:
                actual = e.messages
            else:
                actual = []
                for cl in ir.classes:
                    if cl.name.startswith('_'):
                        continue
                    actual.append('{}: [{}]'.format(
                        cl.name,
                        ', '.join(sorted(cl._always_initialized_attrs))))

            assert_test_output(testcase, actual, 'Invalid test output',
                               testcase.output)
コード例 #13
0
ファイル: test_run.py プロジェクト: rheehot/mypy
    def run_case_step(self, testcase: DataDrivenTestCase,
                      incremental_step: int) -> None:
        bench = testcase.config.getoption(
            '--bench', False) and 'Benchmark' in testcase.name

        options = Options()
        options.use_builtins_fixtures = True
        options.show_traceback = True
        options.strict_optional = True
        # N.B: We try to (and ought to!) run with the current
        # version of python, since we are going to link and run
        # against the current version of python.
        # But a lot of the tests use type annotations so we can't say it is 3.5.
        options.python_version = max(sys.version_info[:2], (3, 6))
        options.export_types = True
        options.preserve_asts = True
        options.incremental = False

        # Avoid checking modules/packages named 'unchecked', to provide a way
        # to test interacting with code we don't have types for.
        options.per_module_options['unchecked.*'] = {'follow_imports': 'error'}

        source = build.BuildSource('native.py', 'native', None)
        sources = [source]
        module_names = ['native']
        module_paths = ['native.py']

        # Hard code another module name to compile in the same compilation unit.
        to_delete = []
        for fn, text in testcase.files:
            fn = os.path.relpath(fn, test_temp_dir)

            if os.path.basename(fn).startswith('other') and fn.endswith('.py'):
                name = os.path.basename(fn).split('.')[0]
                module_names.append(name)
                sources.append(build.BuildSource(fn, name, None))
                to_delete.append(fn)
                module_paths.append(fn)

                shutil.copyfile(
                    fn,
                    os.path.join(os.path.dirname(fn),
                                 name + '_interpreted.py'))

        for source in sources:
            options.per_module_options.setdefault(source.module,
                                                  {})['mypyc'] = True

        separate = (self.get_separate('\n'.join(
            testcase.input), incremental_step) if self.separate else False)

        groups = construct_groups(sources, separate, len(module_names) > 1)

        try:
            result = emitmodule.parse_and_typecheck(sources=sources,
                                                    options=options,
                                                    alt_lib_path='.')
            errors = Errors()
            compiler_options = CompilerOptions(multi_file=self.multi_file,
                                               separate=self.separate)
            ir, cfiles = emitmodule.compile_modules_to_c(
                result,
                compiler_options=compiler_options,
                errors=errors,
                groups=groups,
            )
            if errors.num_errors:
                errors.flush_errors()
                assert False, "Compile error"
        except CompileError as e:
            for line in e.messages:
                print(line)
            assert False, 'Compile error'

        # Check that serialization works on this IR
        check_serialization_roundtrip(ir)

        setup_file = os.path.abspath(os.path.join(WORKDIR, 'setup.py'))
        # We pass the C file information to the build script via setup.py unfortunately
        with open(setup_file, 'w', encoding='utf-8') as f:
            f.write(
                setup_format.format(module_paths, separate, cfiles,
                                    self.multi_file))

        if not run_setup(setup_file, ['build_ext', '--inplace']):
            if testcase.config.getoption('--mypyc-showc'):
                show_c(cfiles)
            assert False, "Compilation failed"

        # Assert that an output file got created
        suffix = 'pyd' if sys.platform == 'win32' else 'so'
        assert glob.glob('native.*.{}'.format(suffix))

        driver_path = 'driver.py'
        env = os.environ.copy()
        env['MYPYC_RUN_BENCH'] = '1' if bench else '0'

        # XXX: This is an ugly hack.
        if 'MYPYC_RUN_GDB' in os.environ:
            if platform.system() == 'Darwin':
                subprocess.check_call(
                    ['lldb', '--', sys.executable, driver_path], env=env)
                assert False, (
                    "Test can't pass in lldb mode. (And remember to pass -s to "
                    "pytest)")
            elif platform.system() == 'Linux':
                subprocess.check_call(
                    ['gdb', '--args', sys.executable, driver_path], env=env)
                assert False, (
                    "Test can't pass in gdb mode. (And remember to pass -s to "
                    "pytest)")
            else:
                assert False, 'Unsupported OS'

        proc = subprocess.Popen([sys.executable, driver_path],
                                stdout=subprocess.PIPE,
                                stderr=subprocess.STDOUT,
                                env=env)
        output = proc.communicate()[0].decode('utf8')
        outlines = output.splitlines()

        if testcase.config.getoption('--mypyc-showc'):
            show_c(cfiles)
        if proc.returncode != 0:
            print()
            print('*** Exit status: %d' % proc.returncode)

        # Verify output.
        if bench:
            print('Test output:')
            print(output)
        else:
            if incremental_step == 1:
                msg = 'Invalid output'
                expected = testcase.output
            else:
                msg = 'Invalid output (step {})'.format(incremental_step)
                expected = testcase.output2.get(incremental_step, [])

            assert_test_output(testcase, outlines, msg, expected)

        if incremental_step > 1 and options.incremental:
            suffix = '' if incremental_step == 2 else str(incremental_step - 1)
            expected_rechecked = testcase.expected_rechecked_modules.get(
                incremental_step - 1)
            if expected_rechecked is not None:
                assert_module_equivalence('rechecked' + suffix,
                                          expected_rechecked,
                                          result.manager.rechecked_modules)
            expected_stale = testcase.expected_stale_modules.get(
                incremental_step - 1)
            if expected_stale is not None:
                assert_module_equivalence('stale' + suffix, expected_stale,
                                          result.manager.stale_modules)

        assert proc.returncode == 0
コード例 #14
0
ファイル: test_run.py プロジェクト: vin-turnier/mypy
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        bench = testcase.config.getoption('--bench', False) and 'Benchmark' in testcase.name

        # setup.py wants to be run from the root directory of the package, which we accommodate
        # by chdiring into tmp/
        with use_custom_builtins(os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase), (
                chdir_manager('tmp')):
            text = '\n'.join(testcase.input)

            options = Options()
            options.use_builtins_fixtures = True
            options.show_traceback = True
            options.strict_optional = True
            # N.B: We try to (and ought to!) run with the current
            # version of python, since we are going to link and run
            # against the current version of python.
            # But a lot of the tests use type annotations so we can't say it is 3.5.
            options.python_version = max(sys.version_info[:2], (3, 6))
            options.export_types = True
            options.preserve_asts = True

            # Avoid checking modules/packages named 'unchecked', to provide a way
            # to test interacting with code we don't have types for.
            options.per_module_options['unchecked.*'] = {'follow_imports': 'error'}

            workdir = 'build'
            os.mkdir(workdir)

            source_path = 'native.py'
            with open(source_path, 'w', encoding='utf-8') as f:
                f.write(text)
            with open('interpreted.py', 'w', encoding='utf-8') as f:
                f.write(text)

            shutil.copyfile(TESTUTIL_PATH, 'testutil.py')

            source = build.BuildSource(source_path, 'native', text)
            sources = [source]
            module_names = ['native']
            module_paths = [os.path.abspath('native.py')]

            # Hard code another module name to compile in the same compilation unit.
            to_delete = []
            for fn, text in testcase.files:
                fn = os.path.relpath(fn, test_temp_dir)

                if os.path.basename(fn).startswith('other'):
                    name = os.path.basename(fn).split('.')[0]
                    module_names.append(name)
                    sources.append(build.BuildSource(fn, name, text))
                    to_delete.append(fn)
                    module_paths.append(os.path.abspath(fn))

                    shutil.copyfile(fn,
                                    os.path.join(os.path.dirname(fn), name + '_interpreted.py'))

            for source in sources:
                options.per_module_options.setdefault(source.module, {})['mypyc'] = True

            if len(module_names) == 1:
                lib_name = None  # type: Optional[str]
            else:
                lib_name = shared_lib_name([source.module for source in sources])

            try:
                result = emitmodule.parse_and_typecheck(
                    sources=sources,
                    options=options,
                    alt_lib_path='.')
                errors = Errors()
                compiler_options = CompilerOptions(multi_file=self.multi_file)
                cfiles = emitmodule.compile_modules_to_c(
                    result,
                    module_names=module_names,
                    shared_lib_name=lib_name,
                    compiler_options=compiler_options,
                    errors=errors,
                )
                if errors.num_errors:
                    errors.flush_errors()
                    assert False, "Compile error"
            except CompileError as e:
                for line in e.messages:
                    print(line)
                assert False, 'Compile error'

            for cfile, ctext in cfiles:
                with open(os.path.join(workdir, cfile), 'w', encoding='utf-8') as f:
                    f.write(ctext)

            setup_file = os.path.abspath(os.path.join(workdir, 'setup.py'))
            with open(setup_file, 'w') as f:
                f.write(setup_format.format(module_paths))

            if not run_setup(setup_file, ['build_ext', '--inplace']):
                if testcase.config.getoption('--mypyc-showc'):
                    show_c(cfiles)
                assert False, "Compilation failed"

            # Assert that an output file got created
            suffix = 'pyd' if sys.platform == 'win32' else 'so'
            assert glob.glob('native.*.{}'.format(suffix))

            for p in to_delete:
                os.remove(p)

            driver_path = 'driver.py'
            env = os.environ.copy()
            env['MYPYC_RUN_BENCH'] = '1' if bench else '0'

            # XXX: This is an ugly hack.
            if 'MYPYC_RUN_GDB' in os.environ:
                if platform.system() == 'Darwin':
                    subprocess.check_call(['lldb', '--', sys.executable, driver_path], env=env)
                    assert False, ("Test can't pass in lldb mode. (And remember to pass -s to "
                                   "pytest)")
                elif platform.system() == 'Linux':
                    subprocess.check_call(['gdb', '--args', sys.executable, driver_path], env=env)
                    assert False, ("Test can't pass in gdb mode. (And remember to pass -s to "
                                   "pytest)")
                else:
                    assert False, 'Unsupported OS'

            proc = subprocess.Popen([sys.executable, driver_path], stdout=subprocess.PIPE,
                                    stderr=subprocess.STDOUT, env=env)
            output = proc.communicate()[0].decode('utf8')
            outlines = output.splitlines()

            if testcase.config.getoption('--mypyc-showc'):
                show_c(cfiles)
            if proc.returncode != 0:
                print()
                print('*** Exit status: %d' % proc.returncode)

            # Verify output.
            if bench:
                print('Test output:')
                print(output)
            else:
                assert_test_output(testcase, outlines, 'Invalid output')

            assert proc.returncode == 0
コード例 #15
0
ファイル: test_run.py プロジェクト: 97littleleaf11/mypy
    def run_case_step(self, testcase: DataDrivenTestCase, incremental_step: int) -> None:
        bench = testcase.config.getoption('--bench', False) and 'Benchmark' in testcase.name

        options = Options()
        options.use_builtins_fixtures = True
        options.show_traceback = True
        options.strict_optional = True
        options.python_version = sys.version_info[:2]
        options.export_types = True
        options.preserve_asts = True
        options.incremental = self.separate

        # Avoid checking modules/packages named 'unchecked', to provide a way
        # to test interacting with code we don't have types for.
        options.per_module_options['unchecked.*'] = {'follow_imports': 'error'}

        source = build.BuildSource('native.py', 'native', None)
        sources = [source]
        module_names = ['native']
        module_paths = ['native.py']

        # Hard code another module name to compile in the same compilation unit.
        to_delete = []
        for fn, text in testcase.files:
            fn = os.path.relpath(fn, test_temp_dir)

            if os.path.basename(fn).startswith('other') and fn.endswith('.py'):
                name = fn.split('.')[0].replace(os.sep, '.')
                module_names.append(name)
                sources.append(build.BuildSource(fn, name, None))
                to_delete.append(fn)
                module_paths.append(fn)

                shutil.copyfile(fn,
                                os.path.join(os.path.dirname(fn), name + '_interpreted.py'))

        for source in sources:
            options.per_module_options.setdefault(source.module, {})['mypyc'] = True

        separate = (self.get_separate('\n'.join(testcase.input), incremental_step) if self.separate
                    else False)

        groups = construct_groups(sources, separate, len(module_names) > 1)

        try:
            compiler_options = CompilerOptions(multi_file=self.multi_file, separate=self.separate)
            result = emitmodule.parse_and_typecheck(
                sources=sources,
                options=options,
                compiler_options=compiler_options,
                groups=groups,
                alt_lib_path='.')
            errors = Errors()
            ir, cfiles = emitmodule.compile_modules_to_c(
                result,
                compiler_options=compiler_options,
                errors=errors,
                groups=groups,
            )
            if errors.num_errors:
                errors.flush_errors()
                assert False, "Compile error"
        except CompileError as e:
            for line in e.messages:
                print(fix_native_line_number(line, testcase.file, testcase.line))
            assert False, 'Compile error'

        # Check that serialization works on this IR. (Only on the first
        # step because the the returned ir only includes updated code.)
        if incremental_step == 1:
            check_serialization_roundtrip(ir)

        opt_level = int(os.environ.get('MYPYC_OPT_LEVEL', 0))
        debug_level = int(os.environ.get('MYPYC_DEBUG_LEVEL', 0))

        setup_file = os.path.abspath(os.path.join(WORKDIR, 'setup.py'))
        # We pass the C file information to the build script via setup.py unfortunately
        with open(setup_file, 'w', encoding='utf-8') as f:
            f.write(setup_format.format(module_paths,
                                        separate,
                                        cfiles,
                                        self.multi_file,
                                        opt_level,
                                        debug_level))

        if not run_setup(setup_file, ['build_ext', '--inplace']):
            if testcase.config.getoption('--mypyc-showc'):
                show_c(cfiles)
            assert False, "Compilation failed"

        # Assert that an output file got created
        suffix = 'pyd' if sys.platform == 'win32' else 'so'
        assert glob.glob(f'native.*.{suffix}') or glob.glob(f'native.{suffix}')

        driver_path = 'driver.py'
        if not os.path.isfile(driver_path):
            # No driver.py provided by test case. Use the default one
            # (mypyc/test-data/driver/driver.py) that calls each
            # function named test_*.
            default_driver = os.path.join(
                os.path.dirname(__file__), '..', 'test-data', 'driver', 'driver.py')
            shutil.copy(default_driver, driver_path)
        env = os.environ.copy()
        env['MYPYC_RUN_BENCH'] = '1' if bench else '0'

        # XXX: This is an ugly hack.
        if 'MYPYC_RUN_GDB' in os.environ:
            if platform.system() == 'Darwin':
                subprocess.check_call(['lldb', '--', sys.executable, driver_path], env=env)
                assert False, ("Test can't pass in lldb mode. (And remember to pass -s to "
                               "pytest)")
            elif platform.system() == 'Linux':
                subprocess.check_call(['gdb', '--args', sys.executable, driver_path], env=env)
                assert False, ("Test can't pass in gdb mode. (And remember to pass -s to "
                               "pytest)")
            else:
                assert False, 'Unsupported OS'

        proc = subprocess.Popen([sys.executable, driver_path], stdout=subprocess.PIPE,
                                stderr=subprocess.STDOUT, env=env)
        output = proc.communicate()[0].decode('utf8')
        outlines = output.splitlines()

        if testcase.config.getoption('--mypyc-showc'):
            show_c(cfiles)
        if proc.returncode != 0:
            print()
            print('*** Exit status: %d' % proc.returncode)

        # Verify output.
        if bench:
            print('Test output:')
            print(output)
        else:
            if incremental_step == 1:
                msg = 'Invalid output'
                expected = testcase.output
            else:
                msg = f'Invalid output (step {incremental_step})'
                expected = testcase.output2.get(incremental_step, [])

            if not expected:
                # Tweak some line numbers, but only if the expected output is empty,
                # as tweaked output might not match expected output.
                outlines = [fix_native_line_number(line, testcase.file, testcase.line)
                            for line in outlines]
            assert_test_output(testcase, outlines, msg, expected)

        if incremental_step > 1 and options.incremental:
            suffix = '' if incremental_step == 2 else str(incremental_step - 1)
            expected_rechecked = testcase.expected_rechecked_modules.get(incremental_step - 1)
            if expected_rechecked is not None:
                assert_module_equivalence(
                    'rechecked' + suffix,
                    expected_rechecked, result.manager.rechecked_modules)
            expected_stale = testcase.expected_stale_modules.get(incremental_step - 1)
            if expected_stale is not None:
                assert_module_equivalence(
                    'stale' + suffix,
                    expected_stale, result.manager.stale_modules)

        assert proc.returncode == 0
コード例 #16
0
ファイル: test_run.py プロジェクト: mrwright/mypyc
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        bench = testcase.config.getoption(
            '--bench', False) and 'Benchmark' in testcase.name

        with use_custom_builtins(
                os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
            text = '\n'.join(testcase.input)

            options = Options()
            options.use_builtins_fixtures = True
            options.show_traceback = True
            options.strict_optional = True
            options.python_version = (3, 6)
            options.export_types = True

            os.mkdir('tmp/py')
            source_path = 'tmp/py/native.py'
            with open(source_path, 'w') as f:
                f.write(text)
            with open('tmp/interpreted.py', 'w') as f:
                f.write(text)

            source = build.BuildSource(source_path, 'native', text)
            sources = [source]
            module_names = ['native']

            # Hard code another module name to compile in the same compilation unit.
            to_delete = []
            for fn, text in testcase.files:
                if os.path.basename(fn).startswith('other'):
                    name = os.path.basename(fn).split('.')[0]
                    module_names.append(name)
                    sources.append(build.BuildSource(fn, name, text))
                    to_delete.append(fn)

            try:
                ctext = emitmodule.compile_modules_to_c(
                    sources=sources,
                    module_names=module_names,
                    options=options,
                    alt_lib_path=test_temp_dir)
            except CompileError as e:
                for line in e.messages:
                    print(line)
                assert False, 'Compile error'

            # If compiling more than one native module, compile a shared
            # library that contains all the modules. Also generate shims that
            # just call into the shared lib.
            use_shared_lib = len(module_names) > 1

            if use_shared_lib:
                common_path = os.path.join(test_temp_dir, '__shared_stuff.c')
                with open(common_path, 'w') as f:
                    f.write(ctext)
                try:
                    shared_lib = buildc.build_shared_lib_for_modules(
                        common_path, module_names)
                except buildc.BuildError as err:
                    show_c_error(common_path, err.output)
                    raise

            for mod in module_names:
                cpath = os.path.join(test_temp_dir, '%s.c' % mod)
                with open(cpath, 'w') as f:
                    f.write(ctext)

                try:
                    if use_shared_lib:
                        native_lib_path = buildc.build_c_extension_shim(
                            mod, shared_lib)
                    else:
                        native_lib_path = buildc.build_c_extension(
                            cpath, mod, preserve_setup=True)
                except buildc.BuildError as err:
                    show_c_error(cpath, err.output)
                    raise

            # # TODO: is the location of the shared lib good?
            # shared_lib = buildc.build_shared_lib_for_modules(cpath)

            for p in to_delete:
                os.remove(p)

            driver_path = os.path.join(test_temp_dir, 'driver.py')
            env = os.environ.copy()
            path = [
                os.path.dirname(native_lib_path),
                os.path.join(PREFIX, 'extensions')
            ]
            env['PYTHONPATH'] = ':'.join(path)
            env['MYPYC_RUN_BENCH'] = '1' if bench else '0'
            env['LD_LIBRARY_PATH'] = os.getcwd()

            proc = subprocess.Popen(['python', driver_path],
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.STDOUT,
                                    env=env)
            output, _ = proc.communicate()
            output = output.decode('utf8')
            outlines = output.splitlines()

            heading('Generated C')
            with open(cpath) as f:
                print(f.read().rstrip())
            heading('End C')
            if proc.returncode != 0:
                print()
                print('*** Exit status: %d' % proc.returncode)

            # Verify output.
            if bench:
                print('Test output:')
                print(output)
            else:
                assert_test_output(testcase, outlines, 'Invalid output')

            assert proc.returncode == 0
コード例 #17
0
ファイル: test_run.py プロジェクト: miknoj/mypyc
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        bench = testcase.config.getoption('--bench', False) and 'Benchmark' in testcase.name

        # setup.py wants to be run from the root directory of the package, which we accommodate
        # by chdiring into tmp/
        with use_custom_builtins(os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase), (
                chdir_manager('tmp')):
            text = '\n'.join(testcase.input)

            options = Options()
            options.use_builtins_fixtures = True
            options.show_traceback = True
            options.strict_optional = True
            options.python_version = (3, 6)
            options.export_types = True

            workdir = 'build'
            os.mkdir(workdir)

            source_path = 'native.py'
            with open(source_path, 'w') as f:
                f.write(text)
            with open('interpreted.py', 'w') as f:
                f.write(text)

            source = build.BuildSource(source_path, 'native', text)
            sources = [source]
            module_names = ['native']
            module_paths = [os.path.abspath('native.py')]

            # Hard code another module name to compile in the same compilation unit.
            to_delete = []
            for fn, text in testcase.files:
                fn = os.path.relpath(fn, test_temp_dir)

                if os.path.basename(fn).startswith('other'):
                    name = os.path.basename(fn).split('.')[0]
                    module_names.append(name)
                    sources.append(build.BuildSource(fn, name, text))
                    to_delete.append(fn)
                    module_paths.append(os.path.abspath(fn))

            for source in sources:
                options.per_module_options.setdefault(source.module, {})['mypyc'] = True

            try:
                result = emitmodule.parse_and_typecheck(
                    sources=sources,
                    options=options,
                    alt_lib_path='.')
                ctext = emitmodule.compile_modules_to_c(
                    result,
                    module_names=module_names,
                    use_shared_lib=len(module_names) > 1)
            except CompileError as e:
                for line in e.messages:
                    print(line)
                assert False, 'Compile error'

            cpath = os.path.abspath(os.path.join(workdir, '__native.c'))
            with open(cpath, 'w') as f:
                f.write(ctext)

            setup_file = os.path.abspath(os.path.join(workdir, 'setup.py'))
            with open(setup_file, 'w') as f:
                f.write(setup_format.format(module_paths))

            run_setup(setup_file, ['build_ext', '--inplace'])
            # Oh argh run_setup doesn't propagate failure. For now we'll just assert
            # that the file is there.
            if not glob.glob('native.*.so'):
                show_c(ctext)
                assert False, "Compilation failed"

            for p in to_delete:
                os.remove(p)

            driver_path = 'driver.py'
            env = os.environ.copy()
            env['MYPYC_RUN_BENCH'] = '1' if bench else '0'

            # XXX: This is an ugly hack.
            if 'MYPYC_RUN_GDB' in os.environ:
                if platform.system() == 'Darwin':
                    subprocess.check_call(['lldb', '--', 'python', driver_path], env=env)
                    assert False, ("Test can't pass in lldb mode. (And remember to pass -s to "
                                   "pytest)")
                elif platform.system() == 'Linux':
                    subprocess.check_call(['gdb', '--args', 'python', driver_path], env=env)
                    assert False, ("Test can't pass in gdb mode. (And remember to pass -s to "
                                   "pytest)")
                else:
                    assert False, 'Unsupported OS'

            proc = subprocess.Popen(['python', driver_path], stdout=subprocess.PIPE,
                                    stderr=subprocess.STDOUT, env=env)
            output, _ = proc.communicate()
            output = output.decode('utf8')
            outlines = output.splitlines()

            show_c(ctext)
            if proc.returncode != 0:
                print()
                print('*** Exit status: %d' % proc.returncode)

            # Verify output.
            if bench:
                print('Test output:')
                print(output)
            else:
                assert_test_output(testcase, outlines, 'Invalid output')

            assert proc.returncode == 0
コード例 #18
0
ファイル: test_analysis.py プロジェクト: mrwright/mypyc
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        """Perform a data-flow analysis test case."""

        with use_custom_builtins(os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
            program_text = '\n'.join(testcase.input)

            options = Options()
            options.use_builtins_fixtures = True
            options.show_traceback = True
            options.python_version = (3, 6)
            options.export_types = True

            source = build.BuildSource('main', '__main__', program_text)
            try:
                # Construct input as a single single.
                # Parse and type check the input program.
                result = build.build(sources=[source],
                                     options=options,
                                     alt_lib_path=test_temp_dir)
            except CompileError as e:
                actual = e.messages
            else:
                if result.errors:
                    actual = result.errors
                else:
                    modules = genops.build_ir([result.files['__main__']], result.types)
                    module = modules[0][1]
                    assert len(module.functions) == 2, (
                        "Only 1 function definition expected per test case")
                    fn = module.functions[0]
                    actual = format_func(fn)
                    actual = actual[actual.index('L0:'):]
                    cfg = analysis.get_cfg(fn.blocks)

                    args = set(reg for reg, i in fn.env.indexes.items() if i < len(fn.args))

                    name = testcase.name
                    if name.endswith('_MaybeDefined'):
                        # Forward, maybe
                        analysis_result = analysis.analyze_maybe_defined_regs(fn.blocks, cfg, args)
                    elif name.endswith('_Liveness'):
                        # Backward, maybe
                        analysis_result = analysis.analyze_live_regs(fn.blocks, cfg)
                    elif name.endswith('_MustDefined'):
                        # Forward, must
                        analysis_result = analysis.analyze_must_defined_regs(
                            fn.blocks, cfg, args,
                            regs=fn.env.regs())
                    elif name.endswith('_BorrowedArgument'):
                        # Forward, must
                        analysis_result = analysis.analyze_borrowed_arguments(fn.blocks, cfg, args)
                    else:
                        assert False, 'No recognized _AnalysisName suffix in test case'

                    actual.append('')
                    for key in sorted(analysis_result.before.keys(),
                                      key=lambda x: (x[0].label, x[1])):
                        pre = ', '.join(sorted(reg.name
                                               for reg in analysis_result.before[key]))
                        post = ', '.join(sorted(reg.name
                                                for reg in analysis_result.after[key]))
                        actual.append('%-8s %-23s %s' % ((key[0].label, key[1]),
                                                         '{%s}' % pre, '{%s}' % post))
            assert_test_output(testcase, actual, 'Invalid source code output')