예제 #1
0
def infer_ir_build_options_from_test_name(name: str) -> Optional[CompilerOptions]:
    """Look for magic substrings in test case name to set compiler options.

    Return None if the test case should be skipped (always pass).

    Supported naming conventions:

      *_64bit*:
          Run test case only on 64-bit platforms
      *_32bit*:
          Run test caseonly on 32-bit platforms
      *_python3_8* (or for any Python version):
          Use Python 3.8+ C API features (default: lowest supported version)
      *StripAssert*:
          Don't generate code for assert statements
    """
    # If this is specific to some bit width, always pass if platform doesn't match.
    if '_64bit' in name and IS_32_BIT_PLATFORM:
        return None
    if '_32bit' in name and not IS_32_BIT_PLATFORM:
        return None
    options = CompilerOptions(strip_asserts='StripAssert' in name,
                              capi_version=(3, 5))
    # A suffix like _python3.8 is used to set the target C API version.
    m = re.search(r'_python([3-9]+)_([0-9]+)(_|\b)', name)
    if m:
        options.capi_version = (int(m.group(1)), int(m.group(2)))
    elif '_py' in name or '_Python' in name:
        assert False, 'Invalid _py* suffix (should be _pythonX_Y): {}'.format(name)
    return options
예제 #2
0
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        # Kind of hacky. Not sure if we need more structure here.
        options = CompilerOptions(strip_asserts='StripAssert' in testcase.name)
        """Perform a runtime checking transformation test case."""
        with use_custom_builtins(
                os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
            expected_output = remove_comment_lines(testcase.output)
            # replace native_int with platform specific ints
            int_format_str = 'int32' if IS_32_BIT_PLATFORM else 'int64'
            expected_output = [
                s.replace('native_int', int_format_str)
                for s in expected_output
            ]
            try:
                ir = build_ir_for_single_file(testcase.input, options)
            except CompileError as e:
                actual = e.messages
            else:
                actual = []
                for fn in ir:
                    if (fn.name == TOP_LEVEL_NAME
                            and not testcase.name.endswith('_toplevel')):
                        continue
                    actual.extend(format_func(fn))

            assert_test_output(testcase, actual, 'Invalid source code output',
                               expected_output)
예제 #3
0
파일: build.py 프로젝트: zomglings/mypy
def generate_c(sources: List[BuildSource], options: Options,
               shared_lib_name: Optional[str],
               compiler_options: Optional[CompilerOptions] = None
               ) -> Tuple[List[Tuple[str, str]], str]:
    """Drive the actual core compilation step.

    Returns the C source code and (for debugging) the pretty printed IR.
    """
    module_names = [source.module for source in sources]
    compiler_options = compiler_options or CompilerOptions()

    # Do the actual work now
    t0 = time.time()
    try:
        result = emitmodule.parse_and_typecheck(sources, options)
    except CompileError as e:
        for line in e.messages:
            print(line)
        fail('Typechecking failure')

    t1 = time.time()
    if compiler_options.verbose:
        print("Parsed and typechecked in {:.3f}s".format(t1 - t0))

    ops = []  # type: List[str]
    ctext = emitmodule.compile_modules_to_c(result, module_names, shared_lib_name,
                                            compiler_options=compiler_options, ops=ops)

    t2 = time.time()
    if compiler_options.verbose:
        print("Compiled to C in {:.3f}s".format(t2 - t1))

    return ctext, '\n'.join(ops)
예제 #4
0
def build_ir_for_single_file(
        input_lines: List[str],
        compiler_options: Optional[CompilerOptions] = None) -> List[FuncIR]:
    program_text = '\n'.join(input_lines)

    compiler_options = compiler_options or CompilerOptions()
    options = Options()
    options.show_traceback = True
    options.use_builtins_fixtures = True
    options.strict_optional = True
    options.python_version = (3, 6)
    options.export_types = True
    options.preserve_asts = True
    options.per_module_options['__main__'] = {'mypyc': True}

    source = build.BuildSource('main', '__main__', program_text)
    # Construct input as a single single.
    # Parse and type check the input program.
    result = build.build(sources=[source],
                         options=options,
                         alt_lib_path=test_temp_dir)
    if result.errors:
        raise CompileError(result.errors)
    _, modules, errors = genops.build_ir([result.files['__main__']],
                                         result.graph, result.types,
                                         compiler_options)
    assert errors == 0

    module = modules[0][1]
    return module.functions
예제 #5
0
def build_ir_for_single_file(input_lines: List[str],
                             compiler_options: Optional[CompilerOptions] = None) -> List[FuncIR]:
    program_text = '\n'.join(input_lines)

    compiler_options = compiler_options or CompilerOptions()
    options = Options()
    options.show_traceback = True
    options.use_builtins_fixtures = True
    options.strict_optional = True
    options.python_version = (3, 6)
    options.export_types = True
    options.preserve_asts = True
    options.per_module_options['__main__'] = {'mypyc': True}

    source = build.BuildSource('main', '__main__', program_text)
    # Construct input as a single single.
    # Parse and type check the input program.
    result = build.build(sources=[source],
                         options=options,
                         alt_lib_path=test_temp_dir)
    if result.errors:
        raise CompileError(result.errors)

    errors = Errors()
    modules = build_ir(
        [result.files['__main__']], result.graph, result.types,
        Mapper({'__main__': None}),
        compiler_options, errors)
    if errors.num_errors:
        errors.flush_errors()
        pytest.fail('Errors while building IR')

    module = list(modules.values())[0]
    return module.functions
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        # Kind of hacky. Not sure if we need more structure here.
        options = CompilerOptions(strip_asserts='StripAssert' in testcase.name)
        """Perform a runtime checking transformation test case."""
        with use_custom_builtins(
                os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
            expected_output = remove_comment_lines(testcase.output)
            expected_output = replace_native_int(expected_output)
            expected_output = replace_word_size(expected_output)
            name = testcase.name
            # If this is specific to some bit width, always pass if platform doesn't match.
            if name.endswith('_64bit') and IS_32_BIT_PLATFORM:
                return
            if name.endswith('_32bit') and not IS_32_BIT_PLATFORM:
                return
            try:
                ir = build_ir_for_single_file(testcase.input, options)
            except CompileError as e:
                actual = e.messages
            else:
                actual = []
                for fn in ir:
                    if (fn.name == TOP_LEVEL_NAME
                            and not name.endswith('_toplevel')):
                        continue
                    actual.extend(format_func(fn))

            assert_test_output(testcase, actual, 'Invalid source code output',
                               expected_output)
예제 #7
0
파일: testutil.py 프로젝트: vemel/mypy
def build_ir_for_single_file(
        input_lines: List[str],
        compiler_options: Optional[CompilerOptions] = None) -> List[FuncIR]:
    program_text = '\n'.join(input_lines)

    # By default generate IR compatible with the earliest supported Python C API.
    # If a test needs more recent API features, this should be overridden.
    compiler_options = compiler_options or CompilerOptions(capi_version=(3, 5))
    options = Options()
    options.show_traceback = True
    options.use_builtins_fixtures = True
    options.strict_optional = True
    options.python_version = (3, 6)
    options.export_types = True
    options.preserve_asts = True
    options.per_module_options['__main__'] = {'mypyc': True}

    source = build.BuildSource('main', '__main__', program_text)
    # Construct input as a single single.
    # Parse and type check the input program.
    result = build.build(sources=[source],
                         options=options,
                         alt_lib_path=test_temp_dir)
    if result.errors:
        raise CompileError(result.errors)

    errors = Errors()
    modules = build_ir([result.files['__main__']], result.graph, result.types,
                       Mapper({'__main__': None}), compiler_options, errors)
    if errors.num_errors:
        raise CompileError(errors.new_messages())

    module = list(modules.values())[0]
    return module.functions
예제 #8
0
파일: build.py 프로젝트: wade1990/mypy
def generate_c(
    sources: List[BuildSource],
    options: Options,
    groups: emitmodule.Groups,
    compiler_options: Optional[CompilerOptions] = None
) -> Tuple[List[List[Tuple[str, str]]], str]:
    """Drive the actual core compilation step.

    The groups argument describes how modules are assigned to C
    extension modules. See the comments on the Groups type in
    mypyc.emitmodule for details.

    Returns the C source code and (for debugging) the pretty printed IR.
    """
    compiler_options = compiler_options or CompilerOptions()

    # Do the actual work now
    t0 = time.time()
    try:
        result = emitmodule.parse_and_typecheck(sources, options)
    except CompileError as e:
        for line in e.messages:
            print(line)
        fail('Typechecking failure')

    t1 = time.time()
    if compiler_options.verbose:
        print("Parsed and typechecked in {:.3f}s".format(t1 - t0))

    all_module_names = []
    for group_sources, _ in groups:
        all_module_names.extend([source.module for source in group_sources])

    errors = Errors()

    ops = []  # type: List[str]
    ctext = emitmodule.compile_modules_to_c(result,
                                            compiler_options=compiler_options,
                                            errors=errors,
                                            ops=ops,
                                            groups=groups)
    if errors.num_errors:
        errors.flush_errors()
        sys.exit(1)

    t2 = time.time()
    if compiler_options.verbose:
        print("Compiled to C in {:.3f}s".format(t2 - t1))

    return ctext, '\n'.join(ops)
예제 #9
0
파일: build.py 프로젝트: wade1990/mypy
def mypycify(paths: List[str],
             mypy_options: Optional[List[str]] = None,
             *,
             verbose: bool = False,
             opt_level: str = '3',
             strip_asserts: bool = False,
             multi_file: bool = False,
             separate: Union[bool, List[Tuple[List[str],
                                              Optional[str]]]] = False,
             skip_cgen_input: Optional[Any] = None) -> List[Extension]:
    """Main entry point to building using mypyc.

    This produces a list of Extension objects that should be passed as the
    ext_modules parameter to setup.

    Arguments:
        paths: A list of file paths to build. It may contain globs.
        mypy_options: Optionally, a list of command line flags to pass to mypy.
                      (This can also contain additional files, for compatibility reasons.)
        verbose: Should mypyc be more verbose. Defaults to false.

        opt_level: The optimization level, as a string. Defaults to '3' (meaning '-O3').
        strip_asserts: Should asserts be stripped from the generated code.

        multi_file: Should each Python module be compiled into its own C source file.
                    This can reduce compile time and memory requirements at the likely
                    cost of runtime performance of compiled code. Defaults to false.
        separate: Should compiled modules be placed in separate extension modules.
                  If False, all modules are placed in a single shared library.
                  If True, every module is placed in its own library.
                  Otherwise separate should be a list of
                  (file name list, optional shared library name) pairs specifying
                  groups of files that should be placed in the same shared library
                  (while all other modules will be placed in its own library).

                  Each group can be compiled independently, which can
                  speed up compilation, but calls between groups can
                  be slower than calls within a group and can't be
                  inlined.
    """

    setup_mypycify_vars()
    compiler_options = CompilerOptions(strip_asserts=strip_asserts,
                                       multi_file=multi_file,
                                       verbose=verbose)

    # Create a compiler object so we can make decisions based on what
    # compiler is being used. typeshed is missing some attribues on the
    # compiler object so we give it type Any
    compiler = ccompiler.new_compiler()  # type: Any
    sysconfig.customize_compiler(compiler)

    expanded_paths = []
    for path in paths:
        expanded_paths.extend(glob.glob(path))

    build_dir = 'build'  # TODO: can this be overridden??
    try:
        os.mkdir(build_dir)
    except FileExistsError:
        pass

    sources, options = get_mypy_config(expanded_paths, mypy_options)
    # We generate a shared lib if there are multiple modules or if any
    # of the modules are in package. (Because I didn't want to fuss
    # around with making the single module code handle packages.)
    use_shared_lib = len(sources) > 1 or any('.' in x.module for x in sources)

    groups = construct_groups(sources, separate, use_shared_lib)

    # We let the test harness just pass in the c file contents instead
    # so that it can do a corner-cutting version without full stubs.
    if not skip_cgen_input:
        group_cfiles, ops_text = generate_c(sources,
                                            options,
                                            groups,
                                            compiler_options=compiler_options)
        # TODO: unique names?
        with open(os.path.join(build_dir, 'ops.txt'), 'w') as f:
            f.write(ops_text)
    else:
        group_cfiles = skip_cgen_input

    # Write out the generated C and collect the files for each group
    group_cfilenames = []  # type: List[Tuple[List[str], List[str]]]
    for cfiles in group_cfiles:
        cfilenames = []
        for cfile, ctext in cfiles:
            cfile = os.path.join(build_dir, cfile)
            write_file(cfile, ctext)
            if os.path.splitext(cfile)[1] == '.c':
                cfilenames.append(cfile)

        deps = [
            os.path.join(build_dir, dep) for dep in get_header_deps(cfiles)
        ]
        group_cfilenames.append((cfilenames, deps))

    cflags = []  # type: List[str]
    if compiler.compiler_type == 'unix':
        cflags += [
            '-O{}'.format(opt_level),
            '-Werror',
            '-Wno-unused-function',
            '-Wno-unused-label',
            '-Wno-unreachable-code',
            '-Wno-unused-variable',
            '-Wno-trigraphs',
            '-Wno-unused-command-line-argument',
            '-Wno-unknown-warning-option',
        ]
        if 'gcc' in compiler.compiler[0]:
            # This flag is needed for gcc but does not exist on clang.
            cflags += ['-Wno-unused-but-set-variable']
    elif compiler.compiler_type == 'msvc':
        if opt_level == '3':
            opt_level = '2'
        cflags += [
            '/O{}'.format(opt_level),
            '/wd4102',  # unreferenced label
            '/wd4101',  # unreferenced local variable
            '/wd4146',  # negating unsigned int
        ]
        if multi_file:
            # Disable whole program optimization in multi-file mode so
            # that we actually get the compilation speed and memory
            # use wins that multi-file mode is intended for.
            cflags += [
                '/GL-',
                '/wd9025',  # warning about overriding /GL
            ]

    # Copy the runtime library in
    shared_cfilenames = []
    for name in ['CPy.c', 'getargs.c']:
        rt_file = os.path.join(build_dir, name)
        with open(os.path.join(include_dir(), name), encoding='utf-8') as f:
            write_file(rt_file, f.read())
        shared_cfilenames.append(rt_file)

    extensions = []
    for (group_sources, lib_name), (cfilenames,
                                    deps) in zip(groups, group_cfilenames):
        if use_shared_lib:
            assert lib_name
            extensions.extend(
                build_using_shared_lib(group_sources, lib_name,
                                       cfilenames + shared_cfilenames, deps,
                                       build_dir, cflags))
        else:
            extensions.extend(
                build_single_module(group_sources,
                                    cfilenames + shared_cfilenames, cflags))

    return extensions
예제 #10
0
파일: test_run.py 프로젝트: rheehot/mypy
    def run_case_step(self, testcase: DataDrivenTestCase,
                      incremental_step: int) -> None:
        bench = testcase.config.getoption(
            '--bench', False) and 'Benchmark' in testcase.name

        options = Options()
        options.use_builtins_fixtures = True
        options.show_traceback = True
        options.strict_optional = True
        # N.B: We try to (and ought to!) run with the current
        # version of python, since we are going to link and run
        # against the current version of python.
        # But a lot of the tests use type annotations so we can't say it is 3.5.
        options.python_version = max(sys.version_info[:2], (3, 6))
        options.export_types = True
        options.preserve_asts = True
        options.incremental = False

        # Avoid checking modules/packages named 'unchecked', to provide a way
        # to test interacting with code we don't have types for.
        options.per_module_options['unchecked.*'] = {'follow_imports': 'error'}

        source = build.BuildSource('native.py', 'native', None)
        sources = [source]
        module_names = ['native']
        module_paths = ['native.py']

        # Hard code another module name to compile in the same compilation unit.
        to_delete = []
        for fn, text in testcase.files:
            fn = os.path.relpath(fn, test_temp_dir)

            if os.path.basename(fn).startswith('other') and fn.endswith('.py'):
                name = os.path.basename(fn).split('.')[0]
                module_names.append(name)
                sources.append(build.BuildSource(fn, name, None))
                to_delete.append(fn)
                module_paths.append(fn)

                shutil.copyfile(
                    fn,
                    os.path.join(os.path.dirname(fn),
                                 name + '_interpreted.py'))

        for source in sources:
            options.per_module_options.setdefault(source.module,
                                                  {})['mypyc'] = True

        separate = (self.get_separate('\n'.join(
            testcase.input), incremental_step) if self.separate else False)

        groups = construct_groups(sources, separate, len(module_names) > 1)

        try:
            result = emitmodule.parse_and_typecheck(sources=sources,
                                                    options=options,
                                                    alt_lib_path='.')
            errors = Errors()
            compiler_options = CompilerOptions(multi_file=self.multi_file,
                                               separate=self.separate)
            ir, cfiles = emitmodule.compile_modules_to_c(
                result,
                compiler_options=compiler_options,
                errors=errors,
                groups=groups,
            )
            if errors.num_errors:
                errors.flush_errors()
                assert False, "Compile error"
        except CompileError as e:
            for line in e.messages:
                print(line)
            assert False, 'Compile error'

        # Check that serialization works on this IR
        check_serialization_roundtrip(ir)

        setup_file = os.path.abspath(os.path.join(WORKDIR, 'setup.py'))
        # We pass the C file information to the build script via setup.py unfortunately
        with open(setup_file, 'w', encoding='utf-8') as f:
            f.write(
                setup_format.format(module_paths, separate, cfiles,
                                    self.multi_file))

        if not run_setup(setup_file, ['build_ext', '--inplace']):
            if testcase.config.getoption('--mypyc-showc'):
                show_c(cfiles)
            assert False, "Compilation failed"

        # Assert that an output file got created
        suffix = 'pyd' if sys.platform == 'win32' else 'so'
        assert glob.glob('native.*.{}'.format(suffix))

        driver_path = 'driver.py'
        env = os.environ.copy()
        env['MYPYC_RUN_BENCH'] = '1' if bench else '0'

        # XXX: This is an ugly hack.
        if 'MYPYC_RUN_GDB' in os.environ:
            if platform.system() == 'Darwin':
                subprocess.check_call(
                    ['lldb', '--', sys.executable, driver_path], env=env)
                assert False, (
                    "Test can't pass in lldb mode. (And remember to pass -s to "
                    "pytest)")
            elif platform.system() == 'Linux':
                subprocess.check_call(
                    ['gdb', '--args', sys.executable, driver_path], env=env)
                assert False, (
                    "Test can't pass in gdb mode. (And remember to pass -s to "
                    "pytest)")
            else:
                assert False, 'Unsupported OS'

        proc = subprocess.Popen([sys.executable, driver_path],
                                stdout=subprocess.PIPE,
                                stderr=subprocess.STDOUT,
                                env=env)
        output = proc.communicate()[0].decode('utf8')
        outlines = output.splitlines()

        if testcase.config.getoption('--mypyc-showc'):
            show_c(cfiles)
        if proc.returncode != 0:
            print()
            print('*** Exit status: %d' % proc.returncode)

        # Verify output.
        if bench:
            print('Test output:')
            print(output)
        else:
            if incremental_step == 1:
                msg = 'Invalid output'
                expected = testcase.output
            else:
                msg = 'Invalid output (step {})'.format(incremental_step)
                expected = testcase.output2.get(incremental_step, [])

            assert_test_output(testcase, outlines, msg, expected)

        if incremental_step > 1 and options.incremental:
            suffix = '' if incremental_step == 2 else str(incremental_step - 1)
            expected_rechecked = testcase.expected_rechecked_modules.get(
                incremental_step - 1)
            if expected_rechecked is not None:
                assert_module_equivalence('rechecked' + suffix,
                                          expected_rechecked,
                                          result.manager.rechecked_modules)
            expected_stale = testcase.expected_stale_modules.get(
                incremental_step - 1)
            if expected_stale is not None:
                assert_module_equivalence('stale' + suffix, expected_stale,
                                          result.manager.stale_modules)

        assert proc.returncode == 0
예제 #11
0
파일: build.py 프로젝트: zomglings/mypy
def mypycify(paths: List[str],
             mypy_options: Optional[List[str]] = None,
             opt_level: str = '3',
             multi_file: bool = False,
             skip_cgen: bool = False,
             verbose: bool = False,
             strip_asserts: bool = False) -> List[Extension]:
    """Main entry point to building using mypyc.

    This produces a list of Extension objects that should be passed as the
    ext_modules parameter to setup.

    Arguments:
      * paths: A list of file paths to build. It may contain globs.
      * mypy_options: Optionally, a list of command line flags to pass to mypy.
                      (This can also contain additional files, for compatibility reasons.)
      * opt_level: The optimization level, as a string. Defaults to '3' (meaning '-O3').
    """

    setup_mypycify_vars()
    compiler_options = CompilerOptions(strip_asserts=strip_asserts,
                                       multi_file=multi_file, verbose=verbose)

    # Create a compiler object so we can make decisions based on what
    # compiler is being used. typeshed is missing some attribues on the
    # compiler object so we give it type Any
    compiler = ccompiler.new_compiler()  # type: Any
    sysconfig.customize_compiler(compiler)

    expanded_paths = []
    for path in paths:
        expanded_paths.extend(glob.glob(path))

    build_dir = 'build'  # TODO: can this be overridden??
    try:
        os.mkdir(build_dir)
    except FileExistsError:
        pass

    sources, options = get_mypy_config(expanded_paths, mypy_options)
    # We generate a shared lib if there are multiple modules or if any
    # of the modules are in package. (Because I didn't want to fuss
    # around with making the single module code handle packages.)
    use_shared_lib = len(sources) > 1 or any('.' in x.module for x in sources)

    lib_name = shared_lib_name([source.module for source in sources]) if use_shared_lib else None

    # We let the test harness make us skip doing the full compilation
    # so that it can do a corner-cutting version without full stubs.
    # TODO: Be able to do this based on file mtimes?
    if not skip_cgen:
        cfiles, ops_text = generate_c(sources, options, lib_name,
                                      compiler_options=compiler_options)
        # TODO: unique names?
        with open(os.path.join(build_dir, 'ops.txt'), 'w') as f:
            f.write(ops_text)
        cfilenames = []
        for cfile, ctext in cfiles:
            cfile = os.path.join(build_dir, cfile)
            write_file(cfile, ctext)
            if os.path.splitext(cfile)[1] == '.c':
                cfilenames.append(cfile)
    else:
        cfilenames = glob.glob(os.path.join(build_dir, '*.c'))

    cflags = []  # type: List[str]
    if compiler.compiler_type == 'unix':
        cflags += [
            '-O{}'.format(opt_level), '-Werror', '-Wno-unused-function', '-Wno-unused-label',
            '-Wno-unreachable-code', '-Wno-unused-variable', '-Wno-trigraphs',
            '-Wno-unused-command-line-argument', '-Wno-unknown-warning-option',
        ]
        if 'gcc' in compiler.compiler[0]:
            # This flag is needed for gcc but does not exist on clang.
            cflags += ['-Wno-unused-but-set-variable']
    elif compiler.compiler_type == 'msvc':
        if opt_level == '3':
            opt_level = '2'
        cflags += [
            '/O{}'.format(opt_level),
            '/wd4102',  # unreferenced label
            '/wd4101',  # unreferenced local variable
            '/wd4146',  # negating unsigned int
        ]
        if multi_file:
            # Disable whole program optimization in multi-file mode so
            # that we actually get the compilation speed and memory
            # use wins that multi-file mode is intended for.
            cflags += [
                '/GL-',
                '/wd9025',  # warning about overriding /GL
            ]

    # Copy the runtime library in
    for name in ['CPy.c', 'getargs.c']:
        rt_file = os.path.join(build_dir, name)
        with open(os.path.join(include_dir(), name), encoding='utf-8') as f:
            write_file(rt_file, f.read())
        cfilenames.append(rt_file)

    if use_shared_lib:
        assert lib_name
        extensions = build_using_shared_lib(sources, lib_name, cfilenames, build_dir, cflags)
    else:
        extensions = build_single_module(sources, cfilenames, cflags)

    return extensions
예제 #12
0
def mypycify(
        paths: List[str],
        *,
        only_compile_paths: Optional[Iterable[str]] = None,
        verbose: bool = False,
        opt_level: str = '3',
        strip_asserts: bool = False,
        multi_file: bool = False,
        separate: Union[bool, List[Tuple[List[str], Optional[str]]]] = False,
        skip_cgen_input: Optional[Any] = None,
        target_dir: Optional[str] = None,
        include_runtime_files: Optional[bool] = None) -> List['Extension']:
    """Main entry point to building using mypyc.

    This produces a list of Extension objects that should be passed as the
    ext_modules parameter to setup.

    Arguments:
        paths: A list of file paths to build. It may also contain mypy options.
        only_compile_paths: If not None, an iterable of paths that are to be
                            the only modules compiled, even if other modules
                            appear in the mypy command line given to paths.
                            (These modules must still be passed to paths.)

        verbose: Should mypyc be more verbose. Defaults to false.

        opt_level: The optimization level, as a string. Defaults to '3' (meaning '-O3').
        strip_asserts: Should asserts be stripped from the generated code.

        multi_file: Should each Python module be compiled into its own C source file.
                    This can reduce compile time and memory requirements at the likely
                    cost of runtime performance of compiled code. Defaults to false.
        separate: Should compiled modules be placed in separate extension modules.
                  If False, all modules are placed in a single shared library.
                  If True, every module is placed in its own library.
                  Otherwise separate should be a list of
                  (file name list, optional shared library name) pairs specifying
                  groups of files that should be placed in the same shared library
                  (while all other modules will be placed in its own library).

                  Each group can be compiled independently, which can
                  speed up compilation, but calls between groups can
                  be slower than calls within a group and can't be
                  inlined.
        target_dir: The directory to write C output files. Defaults to 'build'.
        include_runtime_files: If not None, whether the mypyc runtime library
                               should be directly #include'd instead of linked
                               separately in order to reduce compiler invocations.
                               Defaults to False in multi_file mode, True otherwise.
    """

    # Figure out our configuration
    compiler_options = CompilerOptions(
        strip_asserts=strip_asserts,
        multi_file=multi_file,
        verbose=verbose,
        separate=separate is not False,
        target_dir=target_dir,
        include_runtime_files=include_runtime_files,
    )

    # Generate all the actual important C code
    groups, group_cfilenames = mypyc_build(
        paths,
        only_compile_paths=only_compile_paths,
        compiler_options=compiler_options,
        separate=separate,
        skip_cgen_input=skip_cgen_input,
    )

    # Mess around with setuptools and actually get the thing built
    setup_mypycify_vars()

    # Create a compiler object so we can make decisions based on what
    # compiler is being used. typeshed is missing some attribues on the
    # compiler object so we give it type Any
    compiler = ccompiler.new_compiler()  # type: Any
    sysconfig.customize_compiler(compiler)

    build_dir = compiler_options.target_dir

    cflags = []  # type: List[str]
    if compiler.compiler_type == 'unix':
        cflags += [
            '-O{}'.format(opt_level),
            '-Werror',
            '-Wno-unused-function',
            '-Wno-unused-label',
            '-Wno-unreachable-code',
            '-Wno-unused-variable',
            '-Wno-unused-command-line-argument',
            '-Wno-unknown-warning-option',
        ]
        if 'gcc' in compiler.compiler[0]:
            # This flag is needed for gcc but does not exist on clang.
            cflags += ['-Wno-unused-but-set-variable']
    elif compiler.compiler_type == 'msvc':
        if opt_level == '3':
            opt_level = '2'
        cflags += [
            '/O{}'.format(opt_level),
            '/wd4102',  # unreferenced label
            '/wd4101',  # unreferenced local variable
            '/wd4146',  # negating unsigned int
        ]
        if multi_file:
            # Disable whole program optimization in multi-file mode so
            # that we actually get the compilation speed and memory
            # use wins that multi-file mode is intended for.
            cflags += [
                '/GL-',
                '/wd9025',  # warning about overriding /GL
            ]

    # If configured to (defaults to yes in multi-file mode), copy the
    # runtime library in. Otherwise it just gets #included to save on
    # compiler invocations.
    shared_cfilenames = []
    if not compiler_options.include_runtime_files:
        for name in ['CPy.c', 'getargs.c']:
            rt_file = os.path.join(build_dir, name)
            with open(os.path.join(include_dir(), name),
                      encoding='utf-8') as f:
                write_file(rt_file, f.read())
            shared_cfilenames.append(rt_file)

    extensions = []
    for (group_sources, lib_name), (cfilenames,
                                    deps) in zip(groups, group_cfilenames):
        if lib_name:
            extensions.extend(
                build_using_shared_lib(group_sources, lib_name,
                                       cfilenames + shared_cfilenames, deps,
                                       build_dir, cflags))
        else:
            extensions.extend(
                build_single_module(group_sources,
                                    cfilenames + shared_cfilenames, cflags))

    return extensions
예제 #13
0
    def run_case(self, testcase: DataDrivenTestCase) -> None:
        bench = testcase.config.getoption('--bench', False) and 'Benchmark' in testcase.name

        # setup.py wants to be run from the root directory of the package, which we accommodate
        # by chdiring into tmp/
        with use_custom_builtins(os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase), (
                chdir_manager('tmp')):
            text = '\n'.join(testcase.input)

            options = Options()
            options.use_builtins_fixtures = True
            options.show_traceback = True
            options.strict_optional = True
            # N.B: We try to (and ought to!) run with the current
            # version of python, since we are going to link and run
            # against the current version of python.
            # But a lot of the tests use type annotations so we can't say it is 3.5.
            options.python_version = max(sys.version_info[:2], (3, 6))
            options.export_types = True
            options.preserve_asts = True

            # Avoid checking modules/packages named 'unchecked', to provide a way
            # to test interacting with code we don't have types for.
            options.per_module_options['unchecked.*'] = {'follow_imports': 'error'}

            workdir = 'build'
            os.mkdir(workdir)

            source_path = 'native.py'
            with open(source_path, 'w', encoding='utf-8') as f:
                f.write(text)
            with open('interpreted.py', 'w', encoding='utf-8') as f:
                f.write(text)

            shutil.copyfile(TESTUTIL_PATH, 'testutil.py')

            source = build.BuildSource(source_path, 'native', text)
            sources = [source]
            module_names = ['native']
            module_paths = [os.path.abspath('native.py')]

            # Hard code another module name to compile in the same compilation unit.
            to_delete = []
            for fn, text in testcase.files:
                fn = os.path.relpath(fn, test_temp_dir)

                if os.path.basename(fn).startswith('other'):
                    name = os.path.basename(fn).split('.')[0]
                    module_names.append(name)
                    sources.append(build.BuildSource(fn, name, text))
                    to_delete.append(fn)
                    module_paths.append(os.path.abspath(fn))

                    shutil.copyfile(fn,
                                    os.path.join(os.path.dirname(fn), name + '_interpreted.py'))

            for source in sources:
                options.per_module_options.setdefault(source.module, {})['mypyc'] = True

            if len(module_names) == 1:
                lib_name = None  # type: Optional[str]
            else:
                lib_name = shared_lib_name([source.module for source in sources])

            try:
                result = emitmodule.parse_and_typecheck(
                    sources=sources,
                    options=options,
                    alt_lib_path='.')
                errors = Errors()
                compiler_options = CompilerOptions(multi_file=self.multi_file)
                cfiles = emitmodule.compile_modules_to_c(
                    result,
                    module_names=module_names,
                    shared_lib_name=lib_name,
                    compiler_options=compiler_options,
                    errors=errors,
                )
                if errors.num_errors:
                    errors.flush_errors()
                    assert False, "Compile error"
            except CompileError as e:
                for line in e.messages:
                    print(line)
                assert False, 'Compile error'

            for cfile, ctext in cfiles:
                with open(os.path.join(workdir, cfile), 'w', encoding='utf-8') as f:
                    f.write(ctext)

            setup_file = os.path.abspath(os.path.join(workdir, 'setup.py'))
            with open(setup_file, 'w') as f:
                f.write(setup_format.format(module_paths))

            if not run_setup(setup_file, ['build_ext', '--inplace']):
                if testcase.config.getoption('--mypyc-showc'):
                    show_c(cfiles)
                assert False, "Compilation failed"

            # Assert that an output file got created
            suffix = 'pyd' if sys.platform == 'win32' else 'so'
            assert glob.glob('native.*.{}'.format(suffix))

            for p in to_delete:
                os.remove(p)

            driver_path = 'driver.py'
            env = os.environ.copy()
            env['MYPYC_RUN_BENCH'] = '1' if bench else '0'

            # XXX: This is an ugly hack.
            if 'MYPYC_RUN_GDB' in os.environ:
                if platform.system() == 'Darwin':
                    subprocess.check_call(['lldb', '--', sys.executable, driver_path], env=env)
                    assert False, ("Test can't pass in lldb mode. (And remember to pass -s to "
                                   "pytest)")
                elif platform.system() == 'Linux':
                    subprocess.check_call(['gdb', '--args', sys.executable, driver_path], env=env)
                    assert False, ("Test can't pass in gdb mode. (And remember to pass -s to "
                                   "pytest)")
                else:
                    assert False, 'Unsupported OS'

            proc = subprocess.Popen([sys.executable, driver_path], stdout=subprocess.PIPE,
                                    stderr=subprocess.STDOUT, env=env)
            output = proc.communicate()[0].decode('utf8')
            outlines = output.splitlines()

            if testcase.config.getoption('--mypyc-showc'):
                show_c(cfiles)
            if proc.returncode != 0:
                print()
                print('*** Exit status: %d' % proc.returncode)

            # Verify output.
            if bench:
                print('Test output:')
                print(output)
            else:
                assert_test_output(testcase, outlines, 'Invalid output')

            assert proc.returncode == 0
예제 #14
0
    def run_case_step(self, testcase: DataDrivenTestCase, incremental_step: int) -> None:
        bench = testcase.config.getoption('--bench', False) and 'Benchmark' in testcase.name

        options = Options()
        options.use_builtins_fixtures = True
        options.show_traceback = True
        options.strict_optional = True
        options.python_version = sys.version_info[:2]
        options.export_types = True
        options.preserve_asts = True
        options.incremental = self.separate

        # Avoid checking modules/packages named 'unchecked', to provide a way
        # to test interacting with code we don't have types for.
        options.per_module_options['unchecked.*'] = {'follow_imports': 'error'}

        source = build.BuildSource('native.py', 'native', None)
        sources = [source]
        module_names = ['native']
        module_paths = ['native.py']

        # Hard code another module name to compile in the same compilation unit.
        to_delete = []
        for fn, text in testcase.files:
            fn = os.path.relpath(fn, test_temp_dir)

            if os.path.basename(fn).startswith('other') and fn.endswith('.py'):
                name = fn.split('.')[0].replace(os.sep, '.')
                module_names.append(name)
                sources.append(build.BuildSource(fn, name, None))
                to_delete.append(fn)
                module_paths.append(fn)

                shutil.copyfile(fn,
                                os.path.join(os.path.dirname(fn), name + '_interpreted.py'))

        for source in sources:
            options.per_module_options.setdefault(source.module, {})['mypyc'] = True

        separate = (self.get_separate('\n'.join(testcase.input), incremental_step) if self.separate
                    else False)

        groups = construct_groups(sources, separate, len(module_names) > 1)

        try:
            compiler_options = CompilerOptions(multi_file=self.multi_file, separate=self.separate)
            result = emitmodule.parse_and_typecheck(
                sources=sources,
                options=options,
                compiler_options=compiler_options,
                groups=groups,
                alt_lib_path='.')
            errors = Errors()
            ir, cfiles = emitmodule.compile_modules_to_c(
                result,
                compiler_options=compiler_options,
                errors=errors,
                groups=groups,
            )
            if errors.num_errors:
                errors.flush_errors()
                assert False, "Compile error"
        except CompileError as e:
            for line in e.messages:
                print(fix_native_line_number(line, testcase.file, testcase.line))
            assert False, 'Compile error'

        # Check that serialization works on this IR. (Only on the first
        # step because the the returned ir only includes updated code.)
        if incremental_step == 1:
            check_serialization_roundtrip(ir)

        opt_level = int(os.environ.get('MYPYC_OPT_LEVEL', 0))
        debug_level = int(os.environ.get('MYPYC_DEBUG_LEVEL', 0))

        setup_file = os.path.abspath(os.path.join(WORKDIR, 'setup.py'))
        # We pass the C file information to the build script via setup.py unfortunately
        with open(setup_file, 'w', encoding='utf-8') as f:
            f.write(setup_format.format(module_paths,
                                        separate,
                                        cfiles,
                                        self.multi_file,
                                        opt_level,
                                        debug_level))

        if not run_setup(setup_file, ['build_ext', '--inplace']):
            if testcase.config.getoption('--mypyc-showc'):
                show_c(cfiles)
            assert False, "Compilation failed"

        # Assert that an output file got created
        suffix = 'pyd' if sys.platform == 'win32' else 'so'
        assert glob.glob(f'native.*.{suffix}') or glob.glob(f'native.{suffix}')

        driver_path = 'driver.py'
        if not os.path.isfile(driver_path):
            # No driver.py provided by test case. Use the default one
            # (mypyc/test-data/driver/driver.py) that calls each
            # function named test_*.
            default_driver = os.path.join(
                os.path.dirname(__file__), '..', 'test-data', 'driver', 'driver.py')
            shutil.copy(default_driver, driver_path)
        env = os.environ.copy()
        env['MYPYC_RUN_BENCH'] = '1' if bench else '0'

        # XXX: This is an ugly hack.
        if 'MYPYC_RUN_GDB' in os.environ:
            if platform.system() == 'Darwin':
                subprocess.check_call(['lldb', '--', sys.executable, driver_path], env=env)
                assert False, ("Test can't pass in lldb mode. (And remember to pass -s to "
                               "pytest)")
            elif platform.system() == 'Linux':
                subprocess.check_call(['gdb', '--args', sys.executable, driver_path], env=env)
                assert False, ("Test can't pass in gdb mode. (And remember to pass -s to "
                               "pytest)")
            else:
                assert False, 'Unsupported OS'

        proc = subprocess.Popen([sys.executable, driver_path], stdout=subprocess.PIPE,
                                stderr=subprocess.STDOUT, env=env)
        output = proc.communicate()[0].decode('utf8')
        outlines = output.splitlines()

        if testcase.config.getoption('--mypyc-showc'):
            show_c(cfiles)
        if proc.returncode != 0:
            print()
            print('*** Exit status: %d' % proc.returncode)

        # Verify output.
        if bench:
            print('Test output:')
            print(output)
        else:
            if incremental_step == 1:
                msg = 'Invalid output'
                expected = testcase.output
            else:
                msg = f'Invalid output (step {incremental_step})'
                expected = testcase.output2.get(incremental_step, [])

            if not expected:
                # Tweak some line numbers, but only if the expected output is empty,
                # as tweaked output might not match expected output.
                outlines = [fix_native_line_number(line, testcase.file, testcase.line)
                            for line in outlines]
            assert_test_output(testcase, outlines, msg, expected)

        if incremental_step > 1 and options.incremental:
            suffix = '' if incremental_step == 2 else str(incremental_step - 1)
            expected_rechecked = testcase.expected_rechecked_modules.get(incremental_step - 1)
            if expected_rechecked is not None:
                assert_module_equivalence(
                    'rechecked' + suffix,
                    expected_rechecked, result.manager.rechecked_modules)
            expected_stale = testcase.expected_stale_modules.get(incremental_step - 1)
            if expected_stale is not None:
                assert_module_equivalence(
                    'stale' + suffix,
                    expected_stale, result.manager.stale_modules)

        assert proc.returncode == 0