def build_ir_for_single_file( input_lines: List[str], compiler_options: Optional[CompilerOptions] = None) -> List[FuncIR]: program_text = '\n'.join(input_lines) # By default generate IR compatible with the earliest supported Python C API. # If a test needs more recent API features, this should be overridden. compiler_options = compiler_options or CompilerOptions(capi_version=(3, 5)) options = Options() options.show_traceback = True options.use_builtins_fixtures = True options.strict_optional = True options.python_version = (3, 6) options.export_types = True options.preserve_asts = True options.per_module_options['__main__'] = {'mypyc': True} source = build.BuildSource('main', '__main__', program_text) # Construct input as a single single. # Parse and type check the input program. result = build.build(sources=[source], options=options, alt_lib_path=test_temp_dir) if result.errors: raise CompileError(result.errors) errors = Errors() modules = build_ir([result.files['__main__']], result.graph, result.types, Mapper({'__main__': None}), compiler_options, errors) if errors.num_errors: raise CompileError(errors.new_messages()) module = list(modules.values())[0] return module.functions
def read_program(path: str, pyversion: Tuple[int, int]) -> str: try: text = read_with_python_encoding(path, pyversion) except IOError as ioerr: raise CompileError( ["mypy: can't read file '{}': {}".format(path, ioerr.strerror)]) except UnicodeDecodeError as decodeerr: raise CompileError( ["mypy: can't decode file '{}': {}".format(path, str(decodeerr))]) return text
def run_test(self, testcase): """Perform a test case.""" try: # Build test case input. src = '\n'.join(testcase.input) result = build.build(target=build.SEMANTIC_ANALYSIS, sources=[BuildSource('main', None, src)], flags=[build.TEST_BUILTINS], alt_lib_path=test_temp_dir) a = result.errors if a: raise CompileError(a) # Collect all TypeInfos in top-level modules. typeinfos = TypeInfoMap() for f in result.files.values(): for n in f.names.values(): if isinstance(n.node, TypeInfo): typeinfos[n.fullname] = n.node # The output is the symbol table converted into a string. a = str(typeinfos).split('\n') except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid semantic analyzer output ({}, line {})'.format( testcase.file, testcase.line))
def test_transform(testcase): """Perform an identity transform test case.""" try: src = '\n'.join(testcase.input) result = build.build(target=build.SEMANTIC_ANALYSIS, sources=[BuildSource('main', None, src)], pyversion=testfile_pyversion(testcase.file), flags=[build.TEST_BUILTINS], alt_lib_path=test_temp_dir) a = result.errors if a: raise CompileError(a) # Include string representations of the source files in the actual # output. for fnam in sorted(result.files.keys()): f = result.files[fnam] # Omit the builtins module and files with a special marker in the # path. # TODO the test is not reliable if (not f.path.endswith( (os.sep + 'builtins.py', 'typing.py', 'abc.py')) and not os.path.basename(f.path).startswith('_') and not os.path.splitext(os.path.basename( f.path))[0].endswith('_')): t = TestTransformVisitor() f = t.node(f) a += str(f).split('\n') except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid semantic analyzer output ({}, line {})'.format( testcase.file, testcase.line))
def build_ir_for_single_file( input_lines: List[str], compiler_options: Optional[CompilerOptions] = None) -> List[FuncIR]: program_text = '\n'.join(input_lines) compiler_options = compiler_options or CompilerOptions() options = Options() options.show_traceback = True options.use_builtins_fixtures = True options.strict_optional = True options.python_version = (3, 6) options.export_types = True options.preserve_asts = True options.per_module_options['__main__'] = {'mypyc': True} source = build.BuildSource('main', '__main__', program_text) # Construct input as a single single. # Parse and type check the input program. result = build.build(sources=[source], options=options, alt_lib_path=test_temp_dir) if result.errors: raise CompileError(result.errors) _, modules, errors = genops.build_ir([result.files['__main__']], result.graph, result.types, compiler_options) assert errors == 0 module = modules[0][1] return module.functions
def build_ir_for_single_file(input_lines: List[str], compiler_options: Optional[CompilerOptions] = None) -> List[FuncIR]: program_text = '\n'.join(input_lines) compiler_options = compiler_options or CompilerOptions() options = Options() options.show_traceback = True options.use_builtins_fixtures = True options.strict_optional = True options.python_version = (3, 6) options.export_types = True options.preserve_asts = True options.per_module_options['__main__'] = {'mypyc': True} source = build.BuildSource('main', '__main__', program_text) # Construct input as a single single. # Parse and type check the input program. result = build.build(sources=[source], options=options, alt_lib_path=test_temp_dir) if result.errors: raise CompileError(result.errors) errors = Errors() modules = build_ir( [result.files['__main__']], result.graph, result.types, Mapper({'__main__': None}), compiler_options, errors) if errors.num_errors: errors.flush_errors() pytest.fail('Errors while building IR') module = list(modules.values())[0] return module.functions
def compile_modules_to_c(sources: List[BuildSource], module_names: List[str], options: Options, alt_lib_path: Optional[str] = None) -> str: """Compile Python module(s) to C that can be used from Python C extension modules.""" assert options.strict_optional, 'strict_optional must be turned on' result = build(sources=sources, options=options, alt_lib_path=alt_lib_path) if result.errors: raise CompileError(result.errors) # Generate basic IR, with missing exception and refcount handling. file_nodes = [result.files[name] for name in module_names] modules = genops.build_ir(file_nodes, result.types) # Insert exception handling. for _, module in modules: for fn in module.functions: insert_exception_handling(fn) # Insert refcount handling. for _, module in modules: for fn in module.functions: insert_ref_count_opcodes(fn) # Generate C code. source_paths = { module_name: result.files[module_name].path for module_name in module_names } generator = ModuleGenerator(modules, source_paths) return generator.generate_c_for_modules()
def get_site_packages_dirs(python_executable: Optional[str]) -> Tuple[List[str], List[str]]: """Find package directories for given python. This runs a subprocess call, which generates a list of the egg directories, and the site package directories. To avoid repeatedly calling a subprocess (which can be slow!) we lru_cache the results. """ if python_executable is None: return [], [] elif python_executable == sys.executable: # Use running Python's package dirs site_packages = pyinfo.getsitepackages() else: # Use subprocess to get the package directory of given Python # executable try: site_packages = ast.literal_eval( subprocess.check_output([python_executable, pyinfo.__file__, 'getsitepackages'], stderr=subprocess.PIPE).decode()) except OSError as err: reason = os.strerror(err.errno) raise CompileError( [f"mypy: Invalid python executable '{python_executable}': {reason}"] ) from err return expand_site_packages(site_packages)
def lookup_program(module: str, lib_path: List[str]) -> str: # Modules are .py and not .pyi path = find_module(module, lib_path) if path: return path else: raise CompileError(["mypy: can't find module '{}'".format(module)])
def run_case(self, testcase: DataDrivenTestCase) -> None: """Perform a test case.""" try: # Build test case input. src = '\n'.join(testcase.input) result = build.build(sources=[BuildSource('main', None, src)], options=get_semanal_options(), alt_lib_path=test_temp_dir) a = result.errors if a: raise CompileError(a) # Collect all TypeInfos in top-level modules. typeinfos = TypeInfoMap() for f in result.files.values(): for n in f.names.values(): if isinstance(n.node, TypeInfo): assert n.fullname is not None typeinfos[n.fullname] = n.node # The output is the symbol table converted into a string. a = str(typeinfos).split('\n') except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid semantic analyzer output ({}, line {})'.format( testcase.file, testcase.line))
def parse_and_typecheck(sources: List[BuildSource], options: Options, alt_lib_path: Optional[str] = None) -> BuildResult: assert options.strict_optional, 'strict_optional must be turned on' result = build(sources=sources, options=options, alt_lib_path=alt_lib_path) if result.errors: raise CompileError(result.errors) return result
def read_program(path: str) -> bytes: try: with open(path, 'rb') as file: text = file.read() except IOError as ioerr: raise CompileError([ "mypy: can't read file '{}': {}".format(path, ioerr.strerror)]) return text
def read_program(path: str) -> str: try: f = open(path) text = f.read() f.close() except IOError as ioerr: raise CompileError([ "mypy: can't read file '{}': {}".format(path, ioerr.strerror)]) return text
def compile_module_to_c(sources: List[BuildSource], module_name: str, options: Options, alt_lib_path: str) -> str: """Compile a Python module to source for a Python C extension module.""" assert options.strict_optional, 'strict_optional must be turned on' result = build(sources=sources, options=options, alt_lib_path=alt_lib_path) if result.errors: raise CompileError(result.errors) module = genops.build_ir(result.files[module_name], result.types) for fn in module.functions: insert_ref_count_opcodes(fn) generator = ModuleGenerator(module_name, module) return generator.generate_c_module()
def parse_and_typecheck(sources: List[BuildSource], options: Options, compiler_options: CompilerOptions, groups: Groups, fscache: Optional[FileSystemCache] = None, alt_lib_path: Optional[str] = None) -> BuildResult: assert options.strict_optional, 'strict_optional must be turned on' result = build( sources=sources, options=options, alt_lib_path=alt_lib_path, fscache=fscache, extra_plugins=[MypycPlugin(options, compiler_options, groups)]) if result.errors: raise CompileError(result.errors) return result
def build_ir_for_single_file(input_lines: List[str]) -> List[FuncIR]: program_text = '\n'.join(input_lines) options = Options() options.show_traceback = True options.use_builtins_fixtures = True options.strict_optional = True source = build.BuildSource('main', '__main__', program_text) # Construct input as a single single. # Parse and type check the input program. result = build.build(sources=[source], options=options, alt_lib_path=test_temp_dir) if result.errors: raise CompileError(result.errors) module = genops.build_ir(result.files['__main__'], result.types) return module.functions
def test_transform(testcase: DataDrivenTestCase) -> None: """Perform an identity transform test case.""" try: src = '\n'.join(testcase.input) options = parse_options(src, testcase, 1) options.use_builtins_fixtures = True options.semantic_analysis_only = True options.enable_incomplete_features = True options.show_traceback = True result = build.build(sources=[BuildSource('main', None, src)], options=options, alt_lib_path=test_temp_dir) a = result.errors if a: raise CompileError(a) # Include string representations of the source files in the actual # output. for fnam in sorted(result.files.keys()): f = result.files[fnam] # Omit the builtins module and files with a special marker in the # path. # TODO the test is not reliable if (not f.path.endswith((os.sep + 'builtins.pyi', 'typing_extensions.pyi', 'typing.pyi', 'abc.pyi', 'sys.pyi')) and not os.path.basename(f.path).startswith('_') and not os.path.splitext( os.path.basename(f.path))[0].endswith('_')): t = TypeAssertTransformVisitor() t.test_only = True f = t.mypyfile(f) a += str(f).split('\n') except CompileError as e: a = e.messages if testcase.normalize_output: a = normalize_error_messages(a) assert_string_arrays_equal( testcase.output, a, f'Invalid semantic analyzer output ({testcase.file}, line {testcase.line})')
def test_semanal(testcase: DataDrivenTestCase) -> None: """Perform a semantic analysis test case. The testcase argument contains a description of the test case (inputs and output). """ try: src = '\n'.join(testcase.input) options = get_semanal_options(src, testcase) options.python_version = testfile_pyversion(testcase.file) result = build.build(sources=[BuildSource('main', None, src)], options=options, alt_lib_path=test_temp_dir) a = result.errors if a: raise CompileError(a) # Include string representations of the source files in the actual # output. for fnam in sorted(result.files.keys()): f = result.files[fnam] # Omit the builtins module and files with a special marker in the # path. # TODO the test is not reliable if (not f.path.endswith((os.sep + 'builtins.pyi', 'typing.pyi', 'mypy_extensions.pyi', 'typing_extensions.pyi', 'abc.pyi', 'collections.pyi', 'sys.pyi')) and not os.path.basename(f.path).startswith('_') and not os.path.splitext( os.path.basename(f.path))[0].endswith('_')): a += str(f).split('\n') except CompileError as e: a = e.messages if testcase.normalize_output: a = normalize_error_messages(a) assert_string_arrays_equal( testcase.output, a, 'Invalid semantic analyzer output ({}, line {})'.format(testcase.file, testcase.line))
def run_case(self, testcase: DataDrivenTestCase) -> None: """Perform a test case.""" try: # Build test case input. src = '\n'.join(testcase.input) result = build.build(sources=[BuildSource('main', None, src)], options=get_semanal_options(), alt_lib_path=test_temp_dir) # The output is the symbol table converted into a string. a = result.errors if a: raise CompileError(a) for f in sorted(result.files.keys()): if f not in ('builtins', 'typing', 'abc'): a.append('{}:'.format(f)) for s in str(result.files[f].names).split('\n'): a.append(' ' + s) except CompileError as e: a = e.messages assert_string_arrays_equal( testcase.output, a, 'Invalid semantic analyzer output ({}, line {})'.format( testcase.file, testcase.line))