def test_bad_toml_config() -> None: full_config_filename = 'tests/mypy/configs/pyproject-plugin-bad-param.toml' full_filename = 'tests/mypy/modules/success.py' # Specifying a different cache dir for each configuration dramatically speeds up subsequent execution # It also prevents cache-invalidation-related bugs in the tests cache_dir = '.mypy_cache/test-pyproject-plugin-bad-param' command = [full_filename, '--config-file', full_config_filename, '--cache-dir', cache_dir, '--show-error-codes'] print(f"\nExecuting: mypy {' '.join(command)}") # makes it easier to debug as necessary with pytest.raises(ValueError) as e: mypy_api.run(command) assert str(e.value) == 'Configuration value must be a boolean for key: init_forbid_extra'
def pyls_lint(config, workspace, document, is_saved): settings = config.plugin_settings('pyls_mypy') live_mode = settings.get('live_mode', True) python = settings.get('python', None) config_file = settings.get('config_file', None) args = [ '--incremental', '--show-column-numbers', '--follow-imports', 'error', '--namespace-packages', '--explicit-package-bases' ] if settings.get('strict', False): args.append('--strict') if python: args.extend(["--python-executable", python]) if config_file: args.extend(["--config-file", config_file]) if live_mode: with tempfile.NamedTemporaryFile('w', prefix="pyls-mypy-") as tmpFile: with open(tmpFile.name, 'w') as f: f.write(document.source) args.extend( ['--shadow-file', document.path, tmpFile.name, document.path]) report, errors, _ = mypy_api.run(args) elif is_saved: args.append(document.path) report, errors, _ = mypy_api.run(args) else: return [] diagnostics = [] if errors: diagnostics.append({ 'source': 'mypy', 'message': str(errors), 'severity': 1, 'range': { "start": 0, "end": 0 }, }) for line in report.splitlines(): diag = parse_line(line, document) if diag: diagnostics.append(diag) return diagnostics
def main(): parser = argparse.ArgumentParser() parser.add_argument('--runs', default=1, type=int, help='Number of trials to run') parser.add_argument('--reporters', nargs='+', help='Enables specified reporters in karma') args = parser.parse_args() # Do static type checking on the project first. type_check_result = mypy_api.run(['streamer/']) if type_check_result[2] != 0: print('The type checker found the following errors: ') print(type_check_result[0]) return 1 # Install test dependencies. subprocess.check_call(['npm', 'install']) # Fetch streams used in tests. if not os.path.exists(TEST_DIR): os.mkdir(TEST_DIR) fetch_cloud_assets() # Start up flask server on a thread. # Daemon is set to True so that this thread automatically gets # killed when exiting main. Flask does not have any clean alternatives # to be killed. threading.Thread(target=app.run, daemon=True).start() fails = 0 trials = args.runs print('Running', trials, 'trials') for i in range(trials): # Start up karma. karma_args = [ 'node_modules/karma/bin/karma', 'start', 'tests/karma.conf.js', # DRM currently is not compatible with headless, so it's run in Chrome. # Linux: If you want to run tests as "headless", wrap it with "xvfb-run -a". '--browsers', 'Chrome', '--single-run', ] if args.reporters: converted_string = ','.join(args.reporters) karma_args += [ '--reporters', converted_string, ] # If the exit code was not 0, the tests in karma failed or crashed. if subprocess.call(karma_args) != 0: fails += 1 print('\n\nNumber of failures:', fails, '\nNumber of trials:', trials) print('\nSuccess rate:', 100 * (trials - fails) / trials, '%') cleanup() return fails
def test_mypy_results(config_filename, python_filename, output_filename): full_config_filename = f'tests/mypy/configs/{config_filename}' full_filename = f'tests/mypy/modules/{python_filename}' full_output_filename = None if output_filename is None else f'tests/mypy/outputs/{output_filename}' expected_out = '' expected_err = '' expected_returncode = 0 if output_filename is None else 1 if full_output_filename is not None: with open(full_output_filename, 'r') as f: expected_out = f.read() # Specifying a different cache dir for each configuration dramatically speeds up subsequent execution # It also prevents cache-invalidation-related bugs in the tests cache_dir = f'.mypy_cache/test-{config_filename[:-4]}' actual_result = api.run( [full_filename, '--config-file', full_config_filename, '--cache-dir', cache_dir, '--show-error-codes'] ) actual_out, actual_err, actual_returncode = actual_result # Need to strip filenames due to differences in formatting by OS actual_out = '\n'.join(['.py:'.join(line.split('.py:')[1:]) for line in actual_out.split('\n')]).strip() if GENERATE and output_filename is not None: with open(full_output_filename, 'w') as f: f.write(actual_out) else: assert actual_out == expected_out, actual_out assert actual_err == expected_err assert actual_returncode == expected_returncode
def test_fail(path): stdout, stderr, exitcode = api.run([path]) assert exitcode != 0 with open(path) as fin: lines = fin.readlines() errors = defaultdict(lambda: "") error_lines = stdout.rstrip("\n").split("\n") assert re.match( r"Found \d+ errors? in \d+ files? \(checked \d+ source files?\)", error_lines[-1].strip(), ) for error_line in error_lines[:-1]: error_line = error_line.strip() if not error_line: continue lineno = int(error_line.split(":")[1]) errors[lineno] += error_line for i, line in enumerate(lines): lineno = i + 1 if " E:" not in line and lineno not in errors: continue target_line = lines[lineno - 1] if "# E:" in target_line: marker = target_line.split("# E:")[-1].strip() assert lineno in errors, f'Extra error "{marker}"' assert marker in errors[lineno] else: pytest.fail(f"Error {repr(errors[lineno])} not found")
def mypy(argv: List[str], strict: bool = False) -> None: """ Invoke mypy with our preferred options. Strict Mode enables additional checks that are currently failing (that we plan on integrating once they pass) """ args = [ '--ignore-missing-imports', # Don't complain about 3rd party libs with no stubs '--disallow-untyped-calls', # Strict Mode. All function calls must have a return type. '--warn-redundant-casts', '--disallow-incomplete-defs', # All parameters must have type definitions. '--check-untyped-defs' # Typecheck on all methods, not just typed ones. ] if strict: args.append('--disallow-untyped-defs') # All methods must be typed. args.extend(argv or [ '.' # Invoke on the entire project. ]) from mypy import api result = api.run(args) if result[0]: print(result[0]) # stdout if result[1]: sys.stderr.write(result[1]) # stderr print('Exit status: {code} ({english})'.format(code=result[2], english='Failure' if result[2] else 'Success')) if result[2]: n = len(result[0].strip().split('\n')) print(f'{n} issues') sys.exit(result[2])
def mypy(): print("Performing mypy checks...\n") # By default run mypy against the whole database everytime checks # are performed. If performance is an issue then `app_configs` # can be inspected and the scope of the mypy check can be restricted results = api.run([settings.BASE_DIR]) error_messages = results[0] if not error_messages: return [] pattern = re.compile("^(.+\d+): (\w+): (.+)") for message in error_messages.rstrip().split("\n"): parsed = re.match(pattern, message) if not parsed: continue location = parsed.group(1) mypy_level = parsed.group(2) message = parsed.group(3) level = DEBUG if mypy_level == "note": level = INFO elif mypy_level == "warning": level = WARNING elif mypy_level == "error": level = ERROR else: print(f"Unrecognized mypy level: {mypy_level}") print( CheckMessage(level, message, obj=MyPyErrorLocation(location)) )
def test_types(): files_to_type_check = [ "apriori.py", "armpy.py", "datasetreader.py", "fptree.py", "generaterules.py", "index.py", "item.py", "test_apriori.py", "test_fptree.py", "test_index.py", ] result = api.run(files_to_type_check) if result[0]: print('\nType checking report:\n') print(result[0]) # stdout if result[1]: print('\nError report:\n') print(result[1]) # stderr print('\nExit status:', result[2]) assert (result[2] == 0)
def typecheck(line, cell): """Run the code in cell through mypy. Does NOT execute code in IPython! Any parameters that would normally be passed to the mypy cli can be passed on the first line, with the exception of the -c flag we use to pass the code from the cell we want to execute i.e. %%typecheck --ignore-missing-imports ... ... ... mypy stdout and stderr will print prior to output of cell. If there are no conflicts, nothing will be printed by mypy. """ from IPython import get_ipython from mypy import api mypy_result = api.run(line.split() + ['-c', cell]) return_value = True if mypy_result[0]: # print mypy stdout print("MyPy errors:") print(mypy_result[0]) if mypy_result[1]: # print mypy stderr print("\nMyPy stderr:") print(mypy_result[1])
def test_mypy_results(config_filename: str, python_filename: str, output_filename: str) -> None: full_config_filename = f'tests/mypy/configs/{config_filename}' full_filename = f'tests/mypy/modules/{python_filename}' output_path = None if output_filename is None else Path(f'tests/mypy/outputs/{output_filename}') # Specifying a different cache dir for each configuration dramatically speeds up subsequent execution # It also prevents cache-invalidation-related bugs in the tests cache_dir = f'.mypy_cache/test-{os.path.splitext(config_filename)[0]}' command = [full_filename, '--config-file', full_config_filename, '--cache-dir', cache_dir, '--show-error-codes'] print(f"\nExecuting: mypy {' '.join(command)}") # makes it easier to debug as necessary actual_result = mypy_api.run(command) actual_out, actual_err, actual_returncode = actual_result # Need to strip filenames due to differences in formatting by OS actual_out = '\n'.join(['.py:'.join(line.split('.py:')[1:]) for line in actual_out.split('\n') if line]).strip() actual_out = re.sub(r'\n\s*\n', r'\n', actual_out) if actual_out: print('{0}\n{1:^100}\n{0}\n{2}\n{0}'.format('=' * 100, 'mypy output', actual_out)) assert actual_err == '' expected_returncode = 0 if output_filename is None else 1 assert actual_returncode == expected_returncode if output_path and not output_path.exists(): output_path.write_text(actual_out) raise RuntimeError(f'wrote actual output to {output_path} since file did not exist') expected_out = Path(output_path).read_text() if output_path else '' assert actual_out == expected_out, actual_out
def run_mypy() -> None: """Clears the cache and run mypy before running any of the typing tests. The mypy results are cached in `OUTPUT_MYPY` for further use. The cache refresh can be skipped using NUMPY_TYPING_TEST_CLEAR_CACHE=0 pytest numpy/typing/tests """ if (os.path.isdir(CACHE_DIR) and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True))): shutil.rmtree(CACHE_DIR) for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR, MISC_DIR): # Run mypy stdout, stderr, exit_code = api.run([ "--config-file", MYPY_INI, "--cache-dir", CACHE_DIR, directory, ]) if stderr: pytest.fail(f"Unexpected mypy standard error\n\n{stderr}") elif exit_code not in {0, 1}: pytest.fail(f"Unexpected mypy exit code: {exit_code}\n\n{stdout}") stdout = stdout.replace('*', '') # Parse the output iterator = itertools.groupby(stdout.split("\n"), key=_key_func) OUTPUT_MYPY.update((k, list(v)) for k, v in iterator if k)
def test_reveal(path): __tracebackhide__ = True stdout, stderr, exitcode = api.run([ "--config-file", MYPY_INI, "--cache-dir", CACHE_DIR, path, ]) with open(path) as fin: lines = fin.read().replace('*', '').split("\n") stdout_list = stdout.replace('*', '').split("\n") for error_line in stdout_list: error_line = error_line.strip() if not error_line: continue match = re.match( r"^.+\.py:(?P<lineno>\d+): note: .+$", error_line, ) if match is None: raise ValueError(f"Unexpected reveal line format: {error_line}") lineno = int(match.group('lineno')) - 1 assert "Revealed type is" in error_line marker = lines[lineno].split("# E:")[-1].strip() _test_reveal(path, marker, error_line, lineno)
def typecheck(line, cell): """Run the following cell though mypy. Any parameters that would normally be passed to the mypy CLI can be passed on the first line, with the exception of the -c flag we use to pass the code from the cell we want to execute. For example: ``` %%typecheck --ignore-missing-imports ... ... ... ``` mypy stdout and stderr will print prior to output of cell. If there are no conflicts, nothing will be printed by mypy. """ from IPython import get_ipython from mypy import api # inserting a newline at the beginning of the cell # ensures mypy's output matches the the line # numbers in jupyter cell = '\n' + cell mypy_result = api.run(['-c', cell] + line.split()) if mypy_result[0]: # print mypy stdout print(mypy_result[0]) if mypy_result[1]: # print mypy stderr print(mypy_result[1]) shell = get_ipython() shell.run_cell(cell)
def typecheck(line, cell): """ Run the following cell though mypy. Any parameters that would normally be passed to the mypy cli can be passed on the first line, with the exception of the -c flag we use to pass the code from the cell we want to execute i.e. %%typecheck --ignore-missing-imports ... ... ... mypy stdout and stderr will print prior to output of cell. If there are no conflicts, nothing will be printed by mypy. """ from IPython import get_ipython from mypy import api mypy_result = api.run(line.split() + ['-c', cell]) if mypy_result[0]: # print mypy stdout print(mypy_result[0]) if mypy_result[1]: # print mypy stderr print(mypy_result[1]) shell = get_ipython() shell.run_cell(cell)
def test_reveal(path): stdout, stderr, exitcode = api.run([ "--config-file", MYPY_INI, "--cache-dir", CACHE_DIR, path, ]) with open(path) as fin: lines = fin.readlines() for error_line in stdout.split("\n"): error_line = error_line.strip() if not error_line: continue match = re.match( r"^.+\.py:(?P<lineno>\d+): note: .+$", error_line, ) if match is None: raise ValueError(f"Unexpected reveal line format: {error_line}") lineno = int(match.group('lineno')) assert "Revealed type is" in error_line marker = lines[lineno - 1].split("# E:")[-1].strip() assert marker in error_line
def test_python_evaluation(testcase: DataDrivenTestCase, cache_dir: str) -> None: """Runs Mypy in a subprocess. If this passes without errors, executes the script again with a given Python version. """ assert testcase.old_cwd is not None, "test was not properly set up" # TODO: Enable strict optional for these tests mypy_cmdline = [ '--show-traceback', '--no-site-packages', '--no-strict-optional', '--no-silence-site-packages', ] if testcase.name.lower().endswith('_newsemanal'): mypy_cmdline.append('--new-semantic-analyzer') py2 = testcase.name.lower().endswith('python2') if py2: mypy_cmdline.append('--py2') interpreter = try_find_python2_interpreter() if interpreter is None: # Skip, can't find a Python 2 interpreter. pytest.skip() # placate the type checker return else: interpreter = python3_path mypy_cmdline.append('--python-version={}'.format('.'.join( map(str, PYTHON3_VERSION)))) # Write the program to a file. program = '_' + testcase.name + '.py' program_path = os.path.join(test_temp_dir, program) mypy_cmdline.append(program_path) with open(program_path, 'w', encoding='utf8') as file: for s in testcase.input: file.write('{}\n'.format(s)) mypy_cmdline.append('--cache-dir={}'.format(cache_dir)) output = [] # Type check the program. out, err, returncode = api.run(mypy_cmdline) # split lines, remove newlines, and remove directory of test case for line in (out + err).splitlines(): if line.startswith(test_temp_dir + os.sep): output.append(line[len(test_temp_dir + os.sep):].rstrip("\r\n")) else: output.append(line.rstrip("\r\n")) if returncode == 0: # Execute the program. returncode, interp_out = run_command([interpreter, program]) output.extend(interp_out) # Remove temp file. os.remove(program_path) for i, line in enumerate(output): if os.path.sep + 'typeshed' + os.path.sep in line: output[i] = line.split(os.path.sep)[-1] assert_string_arrays_equal( adapt_output(testcase), output, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def mypy(strict: bool = False) -> None: args = [ '--ignore-missing-imports', # Don't complain about 3rd party libs with no stubs '--disallow-untyped-calls', # Strict Mode. All function calls must have a return type. ] if strict: args.append('--disallow-incomplete-defs' ) # All parameters must have type definitions. args.extend(sys.argv[2:] or ['.' # Invoke on the entire project. ]) from mypy import api result = api.run(args) if result[0]: print(result[0]) # stdout if result[1]: sys.stderr.write(result[1]) # stderr print('Exit status: {code} ({english})'.format( code=result[2], english='Failure' if result[2] else 'Success')) if result[2]: n = len(result[0].split('\n')) print(f'{n} issues') sys.exit(result[2])
def pyls_lint(config, workspace, document, is_saved): settings = config.plugin_settings('pyls_mypy') live_mode = settings.get('live_mode', True) if live_mode: args = [ '--incremental', '--show-column-numbers', '--follow-imports', 'silent', '--command', document.source ] elif is_saved: args = [ '--incremental', '--show-column-numbers', '--follow-imports', 'silent', document.path ] else: return [] if settings.get('strict', False): args.append('--strict') try: old_wd = os.getcwd() print(config) os.chdir(config._root_path) report, errors, _ = mypy_api.run(args) finally: os.chdir(old_wd) diagnostics = [] for line in report.splitlines(): diag = parse_line(line, document) if diag: diagnostics.append(diag) return diagnostics
def run(filename, use_plugin=True, incremental=False, working_dir=None): if working_dir: path = os.path.join(working_dir, filename) else: path = os.path.join(os.path.dirname(__file__), "files", filename) args = [ "--strict", "--raise-exceptions", "--cache-dir", cachedir, "--config-file", os.path.join( cachedir, "sqla_mypy_config.cfg" if use_plugin else "plain_mypy_config.cfg", ), ] args.append(path) return api.run(args)
def test_success(path, py2_arg): stdout, stderr, exitcode = api.run([path] + py2_arg) assert exitcode == 0, stdout assert re.match( r'Success: no issues found in \d+ source files?', stdout.strip(), )
def run_mypy(filepath): """ Runs mypy on the file using mypy api and returns the generated output. Parameters ---------- filepath : pathlib.path The path to of the file to run mypy on. Returns ------- normal_report : str Output on stdout error_report : str Output on stderr exit_status : int The exit status """ # Need to use tempdir since mypy complains that: # "site-packages is in PYTHONPATH. Please change directory so it is not." filepath = str(filepath) with tempfile.TemporaryDirectory() as tempdir: dest_filename = os.path.basename(filepath) dest = shutil.copyfile(filepath, os.path.join(tempdir, dest_filename)) normal_report, error_report, exit_status = mypy_api.run( [dest, '--show-error-code']) return normal_report, error_report, exit_status
def run(cmd): normal_report, error_report, exit_status = api.run( ["--command", cmd, "--follow-imports=skip"]) print(cmd.split(";")[-1]) print(" ", normal_report) print(" ", error_report) assert exit_status == 0
def run_mypy() -> None: """Clears the cache and run mypy before running any of the typing tests. The mypy results are cached in `OUTPUT_MYPY` for further use. """ if os.path.isdir(CACHE_DIR): shutil.rmtree(CACHE_DIR) for directory in (REVEAL_DIR, PASS_DIR, FAIL_DIR): # Run mypy stdout, stderr, _ = api.run( [ "--show-absolute-path", "--config-file", MYPY_INI, "--cache-dir", CACHE_DIR, directory, ] ) assert not stderr, directory stdout = stdout.replace("*", "") # Parse the output iterator = itertools.groupby(stdout.split("\n"), key=_key_func) OUTPUT_MYPY.update((k, list(v)) for k, v in iterator if k)
def test_fail(path, py2_arg): stdout, stderr, exitcode = api.run([path] + py2_arg) assert exitcode != 0 with open(path) as fin: lines = fin.readlines() errors = {} for error_line in stdout.split("\n"): error_line = error_line.strip() if not error_line: continue lineno = int(error_line.split(":")[1]) errors[lineno] = error_line for i, line in enumerate(lines): lineno = i + 1 if " E:" not in line and lineno not in errors: continue target_line = lines[lineno - 1] if "# E:" in target_line: marker = target_line.split("# E:")[-1].strip() assert lineno in errors, f'Extra error "{marker}"' assert marker in errors[lineno] else: pytest.fail(f'Error {repr(errors[lineno])} not found')
def run_mypy(args: List[str]) -> None: __tracebackhide__ = True outval, errval, status = api.run( args + ['--show-traceback', '--no-site-packages']) if status != 0: sys.stdout.write(outval) sys.stderr.write(errval) pytest.fail(msg="Sample check failed", pytrace=False)
def analyze(python_files: list) -> str: mypy_output = api.run(python_files) if mypy_output[0]: successful_results = mypy_output[0] split_issues_on_newline = successful_results.split('\n') del split_issues_on_newline[-1] for issue in split_issues_on_newline: line_number = re.search(line_number_regex, issue) if line_number is not None: line_number = line_number.group(1) else: line_number = 1 file_name = re.search(file_name_regex, issue).group(1) issue_description = re.search(issue_description_error_regex, issue) if issue_description is not None: issue_description = issue_description.group(1) elif issue_description is None: issue_description = re.search(issue_description_note_regex, issue).group(1) else: issue_description = re.search(issue_description_warning_regex, issue).group(1) codeclimate_json = dict() codeclimate_json['type'] = 'issue' codeclimate_json['check_name'] = 'Static Type Check' codeclimate_json['categories'] = ['Style'] codeclimate_json['description'] = issue_description location = dict() location['path'] = file_name location['positions'] = { 'begin': { 'line': int(line_number), 'column': 0, }, 'end': { 'line': int(line_number), 'column': 0, }, } codeclimate_json['location'] = location codeclimate_json['severity'] = 'info' codeclimate_json_string = json.dumps(codeclimate_json, indent=4) print(codeclimate_json_string + "\0") if mypy_output[1]: unsuccessful_result = mypy_output[1] print(unsuccessful_result, file=sys.stdout)
def test_python_evaluation(testcase: DataDrivenTestCase, cache_dir: str) -> None: """Runs Mypy in a subprocess. If this passes without errors, executes the script again with a given Python version. """ assert testcase.old_cwd is not None, "test was not properly set up" # TODO: Enable strict optional for these tests mypy_cmdline = [ '--show-traceback', '--no-site-packages', '--no-strict-optional', '--no-silence-site-packages', ] if testcase.name.lower().endswith('_newsemanal'): mypy_cmdline.append('--new-semantic-analyzer') py2 = testcase.name.lower().endswith('python2') if py2: mypy_cmdline.append('--py2') interpreter = try_find_python2_interpreter() if interpreter is None: # Skip, can't find a Python 2 interpreter. pytest.skip() # placate the type checker return else: interpreter = python3_path mypy_cmdline.append('--python-version={}'.format('.'.join(map(str, PYTHON3_VERSION)))) # Write the program to a file. program = '_' + testcase.name + '.py' program_path = os.path.join(test_temp_dir, program) mypy_cmdline.append(program_path) with open(program_path, 'w', encoding='utf8') as file: for s in testcase.input: file.write('{}\n'.format(s)) mypy_cmdline.append('--cache-dir={}'.format(cache_dir)) output = [] # Type check the program. out, err, returncode = api.run(mypy_cmdline) # split lines, remove newlines, and remove directory of test case for line in (out + err).splitlines(): if line.startswith(test_temp_dir + os.sep): output.append(line[len(test_temp_dir + os.sep):].rstrip("\r\n")) else: output.append(line.rstrip("\r\n")) if returncode == 0: # Execute the program. returncode, interp_out = run_command([interpreter, program]) output.extend(interp_out) # Remove temp file. os.remove(program_path) for i, line in enumerate(output): if os.path.sep + 'typeshed' + os.path.sep in line: output[i] = line.split(os.path.sep)[-1] assert_string_arrays_equal(adapt_output(testcase), output, 'Invalid output ({}, line {})'.format( testcase.file, testcase.line))
def type_check_program(): result = api.run(["../sudoku_solver"]) if result[0]: print('\nType checking report:\n') print(result[0]) # stdout if result[1]: print('\nError report:\n') print(result[1]) # stderr print('\nExit status:', result[2])
def do_static_test(source: str, expected: str) -> None: with tempfile.NamedTemporaryFile(suffix=".py") as f: f.write(source.encode()) f.flush() stdout, stderr, retcode = mypy_api.run(["--no-incremental", f.name]) assert stderr == "" assert retcode == 1 messages = stdout.replace(f.name, "##FILE##") assert messages == expected
def test_typing_bad(): script = "check_typing_bad.py" # the following check indicates that check_typing_bad.py # is not present in the current directory, so follow # the path specified TYPING_TEST_DIRS if not os.path.exists(script): script = os.path.join(TYPING_TEST_DIRS, script) _, msg, status_code = mypy_api.run([script]) assert status_code == 1, msg
def run_mypy(args: List[str]) -> None: __tracebackhide__ = True outval, errval, status = api.run(args + ['--show-traceback', '--no-site-packages', '--no-silence-site-packages']) if status != 0: sys.stdout.write(outval) sys.stderr.write(errval) pytest.fail(msg="Sample check failed", pytrace=False)
def _run_mypy(args): return run([ '--config-file', str(config), # mypy will complain about how older versions (< 6.x) of pytest # don't have type annotations. We're not concerned with that. '--ignore-missing-imports', *args, ])
def mypy_run(args): """Runs mypy with given arguments and shows the result.""" logger.log_cmd(["mypy"] + args) try: stdout, stderr, exit_code = run(args) except BaseException: traceback.print_exc() else: for line in stdout.splitlines(): yield line, False for line in stderr.splitlines(): yield line, True
def run (sourcePath): try: utils.log (True, 'Performing static type validation on application: {}\n', sourcePath) for line in api.run (sourcePath): utils.log (True, line) utils.log (True, '\n') except Exception as exception: print (traceback.format_exc ()) utils.log (False, traceback.format_exc ()) raise exception
def test_python_evaluation(testcase: DataDrivenTestCase) -> None: """Runs Mypy in a subprocess. If this passes without errors, executes the script again with a given Python version. """ assert testcase.old_cwd is not None, "test was not properly set up" mypy_cmdline = ['--show-traceback'] py2 = testcase.name.lower().endswith('python2') if py2: mypy_cmdline.append('--py2') interpreter = try_find_python2_interpreter() if interpreter is None: # Skip, can't find a Python 2 interpreter. pytest.skip() # placate the type checker return else: interpreter = python3_path # Write the program to a file. program = '_' + testcase.name + '.py' program_path = os.path.join(test_temp_dir, program) mypy_cmdline.append(program_path) with open(program_path, 'w') as file: for s in testcase.input: file.write('{}\n'.format(s)) output = [] # Type check the program. out, err, returncode = api.run(mypy_cmdline) # split lines, remove newlines, and remove directory of test case for line in (out + err).splitlines(): if line.startswith(test_temp_dir + os.sep): output.append(line[len(test_temp_dir + os.sep):].rstrip("\r\n")) else: output.append(line.rstrip("\r\n")) if returncode == 0: # Execute the program. returncode, interp_out = run([interpreter, program]) output.extend(interp_out) # Remove temp file. os.remove(program_path) assert_string_arrays_equal(adapt_output(testcase), output, 'Invalid output ({}, line {})'.format( testcase.file, testcase.line))
def run(path, code=None, params=None, **meta): """Check code with mypy. :return list: List of errors. """ args = [path, '--follow-imports=skip', '--show-column-numbers'] stdout, stderr, status = api.run(args) messages = [] for line in stdout.split('\n'): line.strip() if not line: continue message = _MyPyMessage(line) if message.valid: if message.message_type == 'note': if messages[-1].line_num == message.line_num: messages[-1].add_note(message.text) else: messages.append(message) return [m.to_result() for m in messages]
import sys mypyPath = 'D:/activ_tosh/geatec/transcrypt/qquick/Transcrypt/transcrypt/modules/org/transcrypt/type_check/mypy-master-0.4.7' sys.path.insert (0, mypyPath) # Prepend, to favor it over CPython's mypy installation from mypy import api result = api.run(sys.argv[1:]) if result[0]: print('/nType checking report:/n') print(result[0]) # stdout if result[1]: print('/nError report:/n') print(result[1]) # stderr print ('/nExit status:', result[2])
import sys from mypy import api result = api.run (' '.join (sys.argv [1:])) if result [0]: print ('\nType checking report:\n') print (result [0]) # stdout if result [1]: print ('\nError report:\n') print (result [1]) # stderr
def mypy_test(self): """Test types with mypy.""" from mypy import api # pylint: disable=import-error out, err, res = api.run(["-c", mypy_script]) self.assertEqual(res, 0, msg=out + err)