def test_python_cmdline(testcase: DataDrivenTestCase) -> None: assert testcase.old_cwd is not None, "test was not properly set up" # Write the program to a file. program = '_program.py' program_path = os.path.join(test_temp_dir, program) with open(program_path, 'w') as file: for s in testcase.input: file.write('{}\n'.format(s)) args = parse_args(testcase.input[0]) args.append('--show-traceback') args.append('--no-site-packages') # Type check the program. fixed = [python3_path, '-m', 'mypy'] env = os.environ.copy() env['PYTHONPATH'] = PREFIX process = subprocess.Popen(fixed + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=test_temp_dir, env=env) outb = process.stdout.read() # Split output into lines. out = [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()] result = process.wait() # Remove temp file. os.remove(program_path) # Compare actual output to expected. if testcase.output_files: for path, expected_content in testcase.output_files: if not os.path.exists(path): raise AssertionError( 'Expected file {} was not produced by test case'.format( path)) with open(path, 'r') as output_file: actual_output_content = output_file.read().splitlines() normalized_output = normalize_file_output( actual_output_content, os.path.abspath(test_temp_dir)) if testcase.native_sep and os.path.sep == '\\': normalized_output = [ fix_cobertura_filename(line) for line in normalized_output ] normalized_output = normalize_error_messages(normalized_output) assert_string_arrays_equal( expected_content.splitlines(), normalized_output, 'Output file {} did not match its expected output'.format( path)) else: out = normalize_error_messages(out) obvious_result = 1 if out else 0 if obvious_result != result: out.append('== Return code: {}'.format(result)) assert_string_arrays_equal( testcase.output, out, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def test_python_cmdline(testcase: DataDrivenTestCase) -> None: assert testcase.old_cwd is not None, "test was not properly set up" # Write the program to a file. program = '_program.py' program_path = os.path.join(test_temp_dir, program) with open(program_path, 'w') as file: for s in testcase.input: file.write('{}\n'.format(s)) args = parse_args(testcase.input[0]) args.append('--show-traceback') # Type check the program. fixed = [python3_path, os.path.join(testcase.old_cwd, 'scripts', 'mypy')] process = subprocess.Popen(fixed + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=test_temp_dir) outb = process.stdout.read() # Split output into lines. out = [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()] result = process.wait() # Remove temp file. os.remove(program_path) # Compare actual output to expected. if testcase.output_files: for path, expected_content in testcase.output_files: if not os.path.exists(path): raise AssertionError( 'Expected file {} was not produced by test case'.format(path)) with open(path, 'r') as output_file: actual_output_content = output_file.read().splitlines() normalized_output = normalize_file_output(actual_output_content, os.path.abspath(test_temp_dir)) if testcase.native_sep and os.path.sep == '\\': normalized_output = [fix_cobertura_filename(line) for line in normalized_output] normalized_output = normalize_error_messages(normalized_output) assert_string_arrays_equal(expected_content.splitlines(), normalized_output, 'Output file {} did not match its expected output'.format( path)) else: out = normalize_error_messages(out) obvious_result = 1 if out else 0 if obvious_result != result: out.append('== Return code: {}'.format(result)) assert_string_arrays_equal(testcase.output, out, 'Invalid output ({}, line {})'.format( testcase.file, testcase.line))
def test_python_cmdline(testcase: DataDrivenTestCase, step: int) -> None: assert testcase.old_cwd is not None, "test was not properly set up" # Write the program to a file. program = '_program.py' program_path = os.path.join(test_temp_dir, program) with open(program_path, 'w', encoding='utf8') as file: for s in testcase.input: file.write('{}\n'.format(s)) args = parse_args(testcase.input[0]) args.append('--show-traceback') args.append('--no-site-packages') if '--error-summary' not in args: args.append('--no-error-summary') # Type check the program. fixed = [python3_path, '-m', 'mypy'] env = os.environ.copy() env['PYTHONPATH'] = PREFIX process = subprocess.Popen(fixed + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=test_temp_dir, env=env) outb, errb = process.communicate() result = process.returncode # Split output into lines. out = [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()] err = [s.rstrip('\n\r') for s in str(errb, 'utf8').splitlines()] if "PYCHARM_HOSTED" in os.environ: for pos, line in enumerate(err): if line.startswith('pydev debugger: '): # Delete the attaching debugger message itself, plus the extra newline added. del err[pos:pos + 2] break # Remove temp file. os.remove(program_path) # Compare actual output to expected. if testcase.output_files: # Ignore stdout, but we insist on empty stderr and zero status. if err or result: raise AssertionError( 'Expected zero status and empty stderr%s, got %d and\n%s' % (' on step %d' % step if testcase.output2 else '', result, '\n'.join(err + out))) check_test_output_files(testcase, step) else: if testcase.normalize_output: out = normalize_error_messages(err + out) obvious_result = 1 if out else 0 if obvious_result != result: out.append('== Return code: {}'.format(result)) expected_out = testcase.output if step == 1 else testcase.output2[step] # Strip "tmp/" out of the test so that # E: works... expected_out = [s.replace("tmp" + os.sep, "") for s in expected_out] assert_string_arrays_equal( expected_out, out, 'Invalid output ({}, line {}){}'.format( testcase.file, testcase.line, ' on step %d' % step if testcase.output2 else ''))
def test_python_evaluation(testcase: DataDrivenTestCase) -> None: # Write the program to a file. program = '_program.py' program_path = os.path.join(test_temp_dir, program) with open(program_path, 'w') as file: for s in testcase.input: file.write('{}\n'.format(s)) args = parse_args(testcase.input[0]) args.append('--show-traceback') # Type check the program. fixed = [python3_path, os.path.join(testcase.old_cwd, 'scripts', 'mypy')] process = subprocess.Popen(fixed + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=test_temp_dir) outb = process.stdout.read() # Split output into lines. out = [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()] # Remove temp file. os.remove(program_path) # Compare actual output to expected. if testcase.output_files: for path, expected_content in testcase.output_files: if not os.path.exists(path): raise AssertionFailure( 'Expected file {} was not produced by test case'.format( path)) with open(path, 'r') as output_file: actual_output_content = output_file.read().splitlines() normalized_output = normalize_file_output( actual_output_content, os.path.abspath(test_temp_dir)) if testcase.native_sep and os.path.sep == '\\': normalized_output = [ fix_cobertura_filename(line) for line in normalized_output ] normalized_output = normalize_error_messages(normalized_output) assert_string_arrays_equal( expected_content.splitlines(), normalized_output, 'Output file {} did not match its expected output'.format( path)) else: out = normalize_error_messages(out) assert_string_arrays_equal( testcase.output, out, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def run_case_once(self, testcase: DataDrivenTestCase, incremental=0) -> None: find_module_clear_caches() program_text = '\n'.join(testcase.input) module_name, program_name, program_text = self.parse_module(program_text) options = self.parse_options(program_text) options.use_builtins_fixtures = True options.python_version = testcase_pyversion(testcase.file, testcase.name) output = testcase.output if incremental: options.incremental = True if incremental == 1: # In run 1, copy program text to program file. output = [] with open(program_name, 'w') as f: f.write(program_text) elif incremental == 2: # In run 2, copy *.py.next files to *.py files. for dn, dirs, files in os.walk(os.curdir): for file in files: if file.endswith('.py.next'): full = os.path.join(dn, file) target = full[:-5] shutil.copy(full, target) # Always set to none so we're forced to reread program_name program_text = None source = BuildSource(program_name, module_name, program_text) try: res = build.build(sources=[source], options=options, alt_lib_path=test_temp_dir) a = res.errors except CompileError as e: res = None a = e.messages a = normalize_error_messages(a) if output != a and self.update_data: update_testcase_output(testcase, a) assert_string_arrays_equal( output, a, 'Invalid type checker output ({}, line {})'.format( testcase.file, testcase.line)) if incremental and res: self.verify_cache(module_name, program_name, a, res.manager) if testcase.expected_stale_modules is not None and incremental == 2: assert_string_arrays_equal( list(sorted(testcase.expected_stale_modules)), list(sorted(res.manager.stale_modules.difference({"__main__"}))), 'Set of stale modules does not match expected set')
def run_test_once(self, testcase: DataDrivenTestCase, incremental=0) -> None: find_module_clear_caches() program_text = '\n'.join(testcase.input) module_name, program_name, program_text = self.parse_module(program_text) options = self.parse_options(program_text) options.use_builtins_fixtures = True options.python_version = testcase_pyversion(testcase.file, testcase.name) output = testcase.output if incremental: options.incremental = True if incremental == 1: # In run 1, copy program text to program file. output = [] with open(program_name, 'w') as f: f.write(program_text) program_text = None elif incremental == 2: # In run 2, copy *.py.next files to *.py files. for dn, dirs, files in os.walk(os.curdir): for file in files: if file.endswith('.py.next'): full = os.path.join(dn, file) target = full[:-5] shutil.copy(full, target) source = BuildSource(program_name, module_name, program_text) try: res = build.build(sources=[source], options=options, alt_lib_path=test_temp_dir) a = res.errors except CompileError as e: res = None a = e.messages a = normalize_error_messages(a) if output != a and mypy.myunit.UPDATE_TESTCASES: update_testcase_output(testcase, a, mypy.myunit.APPEND_TESTCASES) assert_string_arrays_equal( output, a, 'Invalid type checker output ({}, line {})'.format( testcase.file, testcase.line)) if incremental and res: self.verify_cache(module_name, program_name, a, res.manager) if testcase.expected_stale_modules is not None and incremental == 2: assert_string_arrays_equal( list(sorted(testcase.expected_stale_modules)), list(sorted(res.manager.stale_modules.difference({"__main__"}))), 'Set of stale modules does not match expected set')
def run_case(self, testcase: DataDrivenTestCase) -> None: name = testcase.name # We use the test case name to decide which data structures to dump. # Dumping everything would result in very verbose test cases. if name.endswith('_symtable'): kind = SYMTABLE elif name.endswith('_typeinfo'): kind = TYPEINFO elif name.endswith('_types'): kind = TYPES else: kind = AST main_src = '\n'.join(testcase.input) result = self.build(main_src) assert result is not None, 'cases where CompileError occurred should not be run' result.manager.fscache.flush() fine_grained_manager = FineGrainedBuildManager(result) a = [] if result.errors: a.extend(result.errors) target_path = os.path.join(test_temp_dir, 'target.py') shutil.copy(os.path.join(test_temp_dir, 'target.py.next'), target_path) a.extend(self.dump(fine_grained_manager, kind)) old_subexpr = get_subexpressions(result.manager.modules['target']) a.append('==>') new_file, new_types = self.build_increment(fine_grained_manager, 'target', target_path) a.extend(self.dump(fine_grained_manager, kind)) for expr in old_subexpr: if isinstance(expr, TypeVarExpr): # These are merged so we can't perform the check. continue # Verify that old AST nodes are removed from the expression type map. assert expr not in new_types if testcase.normalize_output: a = normalize_error_messages(a) assert_string_arrays_equal( testcase.output, a, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def run_case(self, testcase: DataDrivenTestCase) -> None: name = testcase.name # We use the test case name to decide which data structures to dump. # Dumping everything would result in very verbose test cases. if name.endswith('_symtable'): kind = SYMTABLE elif name.endswith('_typeinfo'): kind = TYPEINFO elif name.endswith('_types'): kind = TYPES else: kind = AST main_src = '\n'.join(testcase.input) result = self.build(main_src) assert result is not None, 'cases where CompileError occurred should not be run' result.manager.fscache.flush() fine_grained_manager = FineGrainedBuildManager(result) a = [] if result.errors: a.extend(result.errors) target_path = os.path.join(test_temp_dir, 'target.py') shutil.copy(os.path.join(test_temp_dir, 'target.py.next'), target_path) a.extend(self.dump(fine_grained_manager, kind)) old_subexpr = get_subexpressions(result.manager.modules['target']) a.append('==>') new_file, new_types = self.build_increment(fine_grained_manager, 'target', target_path) a.extend(self.dump(fine_grained_manager, kind)) for expr in old_subexpr: if isinstance(expr, TypeVarExpr): # These are merged so we can't perform the check. continue # Verify that old AST nodes are removed from the expression type map. assert expr not in new_types if testcase.normalize_output: a = normalize_error_messages(a) assert_string_arrays_equal( testcase.output, a, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def run_test_once(self, testcase: DataDrivenTestCase, incremental=0) -> None: find_module_clear_caches() pyversion = testcase_pyversion(testcase.file, testcase.name) program_text = '\n'.join(testcase.input) module_name, program_name, program_text = self.parse_options( program_text) flags = self.parse_flags(program_text) output = testcase.output if incremental: flags.append(build.INCREMENTAL) if incremental == 1: # In run 1, copy program text to program file. output = [] with open(program_name, 'w') as f: f.write(program_text) program_text = None elif incremental == 2: # In run 2, copy *.py.next files to *.py files. for dn, dirs, files in os.walk(os.curdir): for file in files: if file.endswith('.py.next'): full = os.path.join(dn, file) target = full[:-5] shutil.copy(full, target) source = BuildSource(program_name, module_name, program_text) try: res = build.build(target=build.TYPE_CHECK, sources=[source], pyversion=pyversion, flags=flags + [build.TEST_BUILTINS], alt_lib_path=test_temp_dir) a = res.errors except CompileError as e: res = None a = e.messages a = normalize_error_messages(a) if output != a and mypy.myunit.UPDATE_TESTCASES: update_testcase_output(testcase, a, mypy.myunit.APPEND_TESTCASES) assert_string_arrays_equal( output, a, 'Invalid type checker output ({}, line {})'.format( testcase.file, testcase.line)) if incremental and res: self.verify_cache(module_name, program_name, a, res.manager)
def test_daemon(testcase: DataDrivenTestCase) -> None: assert testcase.old_cwd is not None, "test was not properly set up" for i, step in enumerate(parse_script(testcase.input)): cmd = step[0] expected_lines = step[1:] assert cmd.startswith('$') cmd = cmd[1:].strip() cmd = cmd.replace('{python}', sys.executable) sts, output = run_cmd(cmd) output_lines = output.splitlines() output_lines = normalize_error_messages(output_lines) if sts: output_lines.append('== Return code: %d' % sts) assert_string_arrays_equal( expected_lines, output_lines, "Command %d (%s) did not give expected output" % (i + 1, cmd))
def run_test_once(self, testcase: DataDrivenTestCase, incremental=0) -> None: find_module_clear_caches() program_text = '\n'.join(testcase.input) module_name, program_name, program_text = self.parse_module(program_text) options = self.parse_options(program_text) options.use_builtins_fixtures = True options.python_version = testcase_pyversion(testcase.file, testcase.name) output = testcase.output if incremental: options.incremental = True if incremental == 1: # In run 1, copy program text to program file. output = [] with open(program_name, 'w') as f: f.write(program_text) program_text = None elif incremental == 2: # In run 2, copy *.py.next files to *.py files. for dn, dirs, files in os.walk(os.curdir): for file in files: if file.endswith('.py.next'): full = os.path.join(dn, file) target = full[:-5] shutil.copy(full, target) source = BuildSource(program_name, module_name, program_text) try: res = build.build(sources=[source], options=options, alt_lib_path=test_temp_dir) a = res.errors except CompileError as e: res = None a = e.messages a = normalize_error_messages(a) if output != a and mypy.myunit.UPDATE_TESTCASES: update_testcase_output(testcase, a, mypy.myunit.APPEND_TESTCASES) assert_string_arrays_equal( output, a, 'Invalid type checker output ({}, line {})'.format( testcase.file, testcase.line)) if incremental and res: self.verify_cache(module_name, program_name, a, res.manager)
def test_semanal_error(testcase: DataDrivenTestCase) -> None: """Perform a test case.""" try: src = '\n'.join(testcase.input) res = build.build(sources=[BuildSource('main', None, src)], options=get_semanal_options(), alt_lib_path=test_temp_dir) a = res.errors assert a, 'No errors reported in {}, line {}'.format(testcase.file, testcase.line) except CompileError as e: # Verify that there was a compile error and that the error messages # are equivalent. a = e.messages assert_string_arrays_equal( testcase.output, normalize_error_messages(a), 'Invalid compiler output ({}, line {})'.format(testcase.file, testcase.line))
def test_semanal_error(testcase: DataDrivenTestCase) -> None: """Perform a test case.""" try: src = '\n'.join(testcase.input) res = build.build(sources=[BuildSource('main', None, src)], options=get_semanal_options(), alt_lib_path=test_temp_dir) a = res.errors assert a, 'No errors reported in {}, line {}'.format(testcase.file, testcase.line) except CompileError as e: # Verify that there was a compile error and that the error messages # are equivalent. a = e.messages assert_string_arrays_equal( testcase.output, normalize_error_messages(a), 'Invalid compiler output ({}, line {})'.format(testcase.file, testcase.line))
def run_case(self, testcase: DataDrivenTestCase) -> None: # Parse options from test case description (arguments must not have spaces) text = '\n'.join(testcase.input) m = re.search(r'# *cmd: *(.*)', text) assert m is not None, 'Test case missing "# cmd: <files>" section' args = m.group(1).split() # Write main program to run (not compiled) program = '_%s.py' % testcase.name program_path = os.path.join(test_temp_dir, program) with open(program_path, 'w') as f: f.write(text) out = b'' try: # Compile program cmd = subprocess.run( [sys.executable, os.path.join(base_path, 'scripts', 'mypyc')] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd='tmp') if 'ErrorOutput' in testcase.name: out += cmd.stdout if cmd.returncode == 0: # Run main program out += subprocess.check_output([python3_path, program], cwd='tmp') finally: suffix = 'pyd' if sys.platform == 'win32' else 'so' so_paths = glob.glob('tmp/**/*.{}'.format(suffix), recursive=True) for path in so_paths: os.remove(path) # Strip out 'tmp/' from error message paths in the testcase output, # due to a mismatch between this test and mypy's test suite. expected = [x.replace('tmp/', '') for x in testcase.output] # Verify output actual = normalize_error_messages(out.decode().splitlines()) assert_test_output(testcase, actual, 'Invalid output', expected=expected)
def test_semanal_error(testcase): """Perform a test case.""" try: src = '\n'.join(testcase.input) res = build.build(target=build.SEMANTIC_ANALYSIS, sources=[BuildSource('main', None, src)], flags=[build.TEST_BUILTINS], alt_lib_path=test_temp_dir) a = res.errors assert a, 'No errors reported in {}, line {}'.format(testcase.file, testcase.line) except CompileError as e: # Verify that there was a compile error and that the error messages # are equivalent. a = e.messages assert_string_arrays_equal( testcase.output, normalize_error_messages(a), 'Invalid compiler output ({}, line {})'.format(testcase.file, testcase.line))
def test_semanal_error(testcase): """Perform a test case.""" try: src = '\n'.join(testcase.input) res = build.build(target=build.SEMANTIC_ANALYSIS, sources=[BuildSource('main', None, src)], flags=[build.TEST_BUILTINS], alt_lib_path=test_temp_dir) a = res.errors assert a, 'No errors reported in {}, line {}'.format(testcase.file, testcase.line) except CompileError as e: # Verify that there was a compile error and that the error messages # are equivalent. a = e.messages assert_string_arrays_equal( testcase.output, normalize_error_messages(a), 'Invalid compiler output ({}, line {})'.format(testcase.file, testcase.line))
def test_semanal(testcase: DataDrivenTestCase) -> None: """Perform a semantic analysis test case. The testcase argument contains a description of the test case (inputs and output). """ try: src = '\n'.join(testcase.input) options = get_semanal_options() options.python_version = testfile_pyversion(testcase.file) result = build.build(sources=[BuildSource('main', None, src)], options=options, alt_lib_path=test_temp_dir) a = result.errors if a: raise CompileError(a) # Include string representations of the source files in the actual # output. for fnam in sorted(result.files.keys()): f = result.files[fnam] # Omit the builtins module and files with a special marker in the # path. # TODO the test is not reliable if (not f.path.endswith((os.sep + 'builtins.pyi', 'typing.pyi', 'mypy_extensions.pyi', 'typing_extensions.pyi', 'abc.pyi', 'collections.pyi', 'sys.pyi')) and not os.path.basename(f.path).startswith('_') and not os.path.splitext( os.path.basename(f.path))[0].endswith('_')): a += str(f).split('\n') except CompileError as e: a = e.messages if testcase.normalize_output: a = normalize_error_messages(a) assert_string_arrays_equal( testcase.output, a, 'Invalid semantic analyzer output ({}, line {})'.format(testcase.file, testcase.line))
def test_transform(testcase: DataDrivenTestCase) -> None: """Perform an identity transform test case.""" try: src = '\n'.join(testcase.input) options = parse_options(src, testcase, 1) options.use_builtins_fixtures = True options.semantic_analysis_only = True options.enable_incomplete_features = True options.show_traceback = True result = build.build(sources=[BuildSource('main', None, src)], options=options, alt_lib_path=test_temp_dir) a = result.errors if a: raise CompileError(a) # Include string representations of the source files in the actual # output. for fnam in sorted(result.files.keys()): f = result.files[fnam] # Omit the builtins module and files with a special marker in the # path. # TODO the test is not reliable if (not f.path.endswith((os.sep + 'builtins.pyi', 'typing_extensions.pyi', 'typing.pyi', 'abc.pyi', 'sys.pyi')) and not os.path.basename(f.path).startswith('_') and not os.path.splitext( os.path.basename(f.path))[0].endswith('_')): t = TypeAssertTransformVisitor() t.test_only = True f = t.mypyfile(f) a += str(f).split('\n') except CompileError as e: a = e.messages if testcase.normalize_output: a = normalize_error_messages(a) assert_string_arrays_equal( testcase.output, a, f'Invalid semantic analyzer output ({testcase.file}, line {testcase.line})')
def test_semanal(testcase: DataDrivenTestCase) -> None: """Perform a semantic analysis test case. The testcase argument contains a description of the test case (inputs and output). """ try: src = '\n'.join(testcase.input) options = get_semanal_options(src, testcase) options.python_version = testfile_pyversion(testcase.file) result = build.build(sources=[BuildSource('main', None, src)], options=options, alt_lib_path=test_temp_dir) a = result.errors if a: raise CompileError(a) # Include string representations of the source files in the actual # output. for fnam in sorted(result.files.keys()): f = result.files[fnam] # Omit the builtins module and files with a special marker in the # path. # TODO the test is not reliable if (not f.path.endswith((os.sep + 'builtins.pyi', 'typing.pyi', 'mypy_extensions.pyi', 'typing_extensions.pyi', 'abc.pyi', 'collections.pyi', 'sys.pyi')) and not os.path.basename(f.path).startswith('_') and not os.path.splitext( os.path.basename(f.path))[0].endswith('_')): a += str(f).split('\n') except CompileError as e: a = e.messages if testcase.normalize_output: a = normalize_error_messages(a) assert_string_arrays_equal( testcase.output, a, 'Invalid semantic analyzer output ({}, line {})'.format(testcase.file, testcase.line))
def test_transform(testcase: DataDrivenTestCase) -> None: """Perform an identity transform test case.""" try: src = '\n'.join(testcase.input) options = Options() options.use_builtins_fixtures = True options.semantic_analysis_only = True options.show_traceback = True options.python_version = testfile_pyversion(testcase.file) result = build.build(sources=[BuildSource('main', None, src)], options=options, alt_lib_path=test_temp_dir) a = result.errors if a: raise CompileError(a) # Include string representations of the source files in the actual # output. for fnam in sorted(result.files.keys()): f = result.files[fnam] # Omit the builtins module and files with a special marker in the # path. # TODO the test is not reliable if (not f.path.endswith((os.sep + 'builtins.pyi', 'typing.pyi', 'abc.pyi')) and not os.path.basename(f.path).startswith('_') and not os.path.splitext( os.path.basename(f.path))[0].endswith('_')): t = TypeAssertTransformVisitor() f = t.mypyfile(f) a += str(f).split('\n') except CompileError as e: a = e.messages if testcase.normalize_output: a = normalize_error_messages(a) assert_string_arrays_equal( testcase.output, a, 'Invalid semantic analyzer output ({}, line {})'.format(testcase.file, testcase.line))
def test_python_cmdline(testcase: DataDrivenTestCase) -> None: assert testcase.old_cwd is not None, "test was not properly set up" # Write the program to a file. program = '_program.py' program_path = os.path.join(test_temp_dir, program) with open(program_path, 'w') as file: for s in testcase.input: file.write('{}\n'.format(s)) args = parse_args(testcase.input[0]) args.append('--show-traceback') args.append('--no-site-packages') # Type check the program. fixed = [python3_path, '-m', 'mypy'] env = os.environ.copy() env['PYTHONPATH'] = PREFIX process = subprocess.Popen(fixed + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=test_temp_dir, env=env) outb, errb = process.communicate() result = process.returncode # Split output into lines. out = [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()] err = [s.rstrip('\n\r') for s in str(errb, 'utf8').splitlines()] if "PYCHARM_HOSTED" in os.environ: for pos, line in enumerate(err): if line.startswith('pydev debugger: '): # Delete the attaching debugger message itself, plus the extra newline added. del err[pos:pos + 2] break # Remove temp file. os.remove(program_path) # Compare actual output to expected. if testcase.output_files: # Ignore stdout, but we insist on empty stderr and zero status. if err or result: raise AssertionError( 'Expected zero status and empty stderr, got %d and\n%s' % (result, '\n'.join(err + out))) for path, expected_content in testcase.output_files: if not os.path.exists(path): raise AssertionError( 'Expected file {} was not produced by test case'.format( path)) with open(path, 'r') as output_file: actual_output_content = output_file.read().splitlines() normalized_output = normalize_file_output( actual_output_content, os.path.abspath(test_temp_dir)) # We always normalize things like timestamp, but only handle operating-system # specific things if requested. if testcase.normalize_output: if testcase.suite.native_sep and os.path.sep == '\\': normalized_output = [ fix_cobertura_filename(line) for line in normalized_output ] normalized_output = normalize_error_messages(normalized_output) assert_string_arrays_equal( expected_content.splitlines(), normalized_output, 'Output file {} did not match its expected output'.format( path)) else: if testcase.normalize_output: out = normalize_error_messages(err + out) obvious_result = 1 if out else 0 if obvious_result != result: out.append('== Return code: {}'.format(result)) assert_string_arrays_equal( testcase.output, out, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def run_case_once(self, testcase: DataDrivenTestCase, incremental_step: int = 0) -> None: find_module_clear_caches() original_program_text = '\n'.join(testcase.input) module_data = self.parse_module(original_program_text, incremental_step) if incremental_step: if incremental_step == 1: # In run 1, copy program text to program file. for module_name, program_path, program_text in module_data: if module_name == '__main__': with open(program_path, 'w') as f: f.write(program_text) break elif incremental_step > 1: # In runs 2+, copy *.[num] files to * files. for dn, dirs, files in os.walk(os.curdir): for file in files: if file.endswith('.' + str(incremental_step)): full = os.path.join(dn, file) target = full[:-2] # Use retries to work around potential flakiness on Windows (AppVeyor). retry_on_error(lambda: shutil.copy(full, target)) # In some systems, mtime has a resolution of 1 second which can cause # annoying-to-debug issues when a file has the same size after a # change. We manually set the mtime to circumvent this. new_time = os.stat(target).st_mtime + 1 os.utime(target, times=(new_time, new_time)) # Delete files scheduled to be deleted in [delete <path>.num] sections. for path in testcase.deleted_paths.get(incremental_step, set()): # Use retries to work around potential flakiness on Windows (AppVeyor). retry_on_error(lambda: os.remove(path)) # Parse options after moving files (in case mypy.ini is being moved). options = parse_options(original_program_text, testcase, incremental_step) options.use_builtins_fixtures = True options.show_traceback = True if 'optional' in testcase.file: options.strict_optional = True if incremental_step: options.incremental = True else: options.cache_dir = os.devnull # Don't waste time writing cache sources = [] for module_name, program_path, program_text in module_data: # Always set to none so we're forced to reread the module in incremental mode sources.append(BuildSource(program_path, module_name, None if incremental_step else program_text)) res = None try: res = build.build(sources=sources, options=options, alt_lib_path=test_temp_dir) a = res.errors except CompileError as e: a = e.messages a = normalize_error_messages(a) # Make sure error messages match if incremental_step == 0: # Not incremental msg = 'Unexpected type checker output ({}, line {})' output = testcase.output elif incremental_step == 1: msg = 'Unexpected type checker output in incremental, run 1 ({}, line {})' output = testcase.output elif incremental_step > 1: msg = ('Unexpected type checker output in incremental, run {}'.format( incremental_step) + ' ({}, line {})') output = testcase.output2.get(incremental_step, []) else: raise AssertionError() if output != a and self.update_data: update_testcase_output(testcase, a) assert_string_arrays_equal(output, a, msg.format(testcase.file, testcase.line)) if incremental_step and res: if options.follow_imports == 'normal' and testcase.output is None: self.verify_cache(module_data, a, res.manager) if incremental_step > 1: suffix = '' if incremental_step == 2 else str(incremental_step - 1) self.check_module_equivalence( 'rechecked' + suffix, testcase.expected_rechecked_modules.get(incremental_step - 1), res.manager.rechecked_modules) self.check_module_equivalence( 'stale' + suffix, testcase.expected_stale_modules.get(incremental_step - 1), res.manager.stale_modules)
def run_case_once(self, testcase: DataDrivenTestCase, incremental=0) -> None: find_module_clear_caches() original_program_text = '\n'.join(testcase.input) module_data = self.parse_module(original_program_text, incremental) options = self.parse_options(original_program_text, testcase) options.use_builtins_fixtures = True options.show_traceback = True if 'optional' in testcase.file: options.strict_optional = True if incremental: options.incremental = True if incremental == 1: # In run 1, copy program text to program file. for module_name, program_path, program_text in module_data: if module_name == '__main__': with open(program_path, 'w') as f: f.write(program_text) break elif incremental == 2: # In run 2, copy *.py.next files to *.py files. for dn, dirs, files in os.walk(os.curdir): for file in files: if file.endswith('.py.next'): full = os.path.join(dn, file) target = full[:-5] shutil.copy(full, target) # In some systems, mtime has a resolution of 1 second which can cause # annoying-to-debug issues when a file has the same size after a # change. We manually set the mtime to circumvent this. new_time = os.stat(target).st_mtime + 1 os.utime(target, times=(new_time, new_time)) sources = [] for module_name, program_path, program_text in module_data: # Always set to none so we're forced to reread the module in incremental mode sources.append( BuildSource(program_path, module_name, None if incremental else program_text)) res = None try: res = build.build(sources=sources, options=options, alt_lib_path=test_temp_dir) a = res.errors except CompileError as e: a = e.messages a = normalize_error_messages(a) # Make sure error messages match if incremental == 0: msg = 'Invalid type checker output ({}, line {})' output = testcase.output elif incremental == 1: msg = 'Invalid type checker output in incremental, run 1 ({}, line {})' output = testcase.output elif incremental == 2: msg = 'Invalid type checker output in incremental, run 2 ({}, line {})' output = testcase.output2 else: raise AssertionError() if output != a and self.update_data: update_testcase_output(testcase, a) assert_string_arrays_equal(output, a, msg.format(testcase.file, testcase.line)) if incremental and res: if not options.silent_imports and testcase.output is None: self.verify_cache(module_data, a, res.manager) if incremental == 2: self.check_module_equivalence( 'rechecked', testcase.expected_rechecked_modules, res.manager.rechecked_modules) self.check_module_equivalence('stale', testcase.expected_stale_modules, res.manager.stale_modules)
def test_python_cmdline(testcase: DataDrivenTestCase, step: int) -> None: assert testcase.old_cwd is not None, "test was not properly set up" # Write the program to a file. program = '_program.py' program_path = os.path.join(test_temp_dir, program) with open(program_path, 'w', encoding='utf8') as file: for s in testcase.input: file.write('{}\n'.format(s)) args = parse_args(testcase.input[0]) args.append('--show-traceback') args.append('--no-site-packages') # Type check the program. fixed = [python3_path, '-m', 'mypy'] env = os.environ.copy() env['PYTHONPATH'] = PREFIX process = subprocess.Popen(fixed + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=test_temp_dir, env=env) outb, errb = process.communicate() result = process.returncode # Split output into lines. out = [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()] err = [s.rstrip('\n\r') for s in str(errb, 'utf8').splitlines()] if "PYCHARM_HOSTED" in os.environ: for pos, line in enumerate(err): if line.startswith('pydev debugger: '): # Delete the attaching debugger message itself, plus the extra newline added. del err[pos:pos + 2] break # Remove temp file. os.remove(program_path) # Compare actual output to expected. if testcase.output_files: # Ignore stdout, but we insist on empty stderr and zero status. if err or result: raise AssertionError( 'Expected zero status and empty stderr%s, got %d and\n%s' % (' on step %d' % step if testcase.output2 else '', result, '\n'.join(err + out))) for path, expected_content in testcase.output_files: if not os.path.exists(path): raise AssertionError( 'Expected file {} was not produced by test case{}'.format( path, ' on step %d' % step if testcase.output2 else '')) with open(path, 'r', encoding='utf8') as output_file: actual_output_content = output_file.read().splitlines() normalized_output = normalize_file_output(actual_output_content, os.path.abspath(test_temp_dir)) # We always normalize things like timestamp, but only handle operating-system # specific things if requested. if testcase.normalize_output: if testcase.suite.native_sep and os.path.sep == '\\': normalized_output = [fix_cobertura_filename(line) for line in normalized_output] normalized_output = normalize_error_messages(normalized_output) assert_string_arrays_equal(expected_content.splitlines(), normalized_output, 'Output file {} did not match its expected output{}'.format( path, ' on step %d' % step if testcase.output2 else '')) else: if testcase.normalize_output: out = normalize_error_messages(err + out) obvious_result = 1 if out else 0 if obvious_result != result: out.append('== Return code: {}'.format(result)) expected_out = testcase.output if step == 1 else testcase.output2[step] assert_string_arrays_equal(expected_out, out, 'Invalid output ({}, line {}){}'.format( testcase.file, testcase.line, ' on step %d' % step if testcase.output2 else ''))
def run_case_once(self, testcase: DataDrivenTestCase, operations: List[FileOperation] = [], incremental_step: int = 0) -> None: original_program_text = '\n'.join(testcase.input) module_data = self.parse_module(original_program_text, incremental_step) # Unload already loaded plugins, they may be updated. for file, _ in testcase.files: module = module_from_path(file) if module.endswith('_plugin') and module in sys.modules: del sys.modules[module] if incremental_step == 0 or incremental_step == 1: # In run 1, copy program text to program file. for module_name, program_path, program_text in module_data: if module_name == '__main__': with open(program_path, 'w', encoding='utf8') as f: f.write(program_text) break elif incremental_step > 1: # In runs 2+, copy *.[num] files to * files. for op in operations: if isinstance(op, UpdateFile): # Modify/create file copy_and_fudge_mtime(op.source_path, op.target_path) else: # Delete file # Use retries to work around potential flakiness on Windows (AppVeyor). path = op.path retry_on_error(lambda: os.remove(path)) # Parse options after moving files (in case mypy.ini is being moved). options = parse_options(original_program_text, testcase, incremental_step) options.use_builtins_fixtures = True options.show_traceback = True # Enable some options automatically based on test file name. if 'optional' in testcase.file: options.strict_optional = True if 'columns' in testcase.file: options.show_column_numbers = True if 'errorcodes' in testcase.file: options.show_error_codes = True if incremental_step and options.incremental: # Don't overwrite # flags: --no-incremental in incremental test cases options.incremental = True else: options.incremental = False # Don't waste time writing cache unless we are specifically looking for it if not testcase.writescache: options.cache_dir = os.devnull sources = [] for module_name, program_path, program_text in module_data: # Always set to none so we're forced to reread the module in incremental mode sources.append( BuildSource(program_path, module_name, None if incremental_step else program_text)) plugin_dir = os.path.join(test_data_prefix, 'plugins') sys.path.insert(0, plugin_dir) res = None try: res = build.build(sources=sources, options=options, alt_lib_path=test_temp_dir) a = res.errors except CompileError as e: a = e.messages finally: assert sys.path[0] == plugin_dir del sys.path[0] if testcase.normalize_output: a = normalize_error_messages(a) # Make sure error messages match if incremental_step == 0: # Not incremental msg = 'Unexpected type checker output ({}, line {})' output = testcase.output elif incremental_step == 1: msg = 'Unexpected type checker output in incremental, run 1 ({}, line {})' output = testcase.output elif incremental_step > 1: msg = ('Unexpected type checker output in incremental, run {}'. format(incremental_step) + ' ({}, line {})') output = testcase.output2.get(incremental_step, []) else: raise AssertionError() if output != a and testcase.config.getoption('--update-data', False): update_testcase_output(testcase, a) assert_string_arrays_equal(output, a, msg.format(testcase.file, testcase.line)) if res: if options.cache_dir != os.devnull: self.verify_cache(module_data, res.errors, res.manager, res.graph) name = 'targets' if incremental_step: name += str(incremental_step + 1) expected = testcase.expected_fine_grained_targets.get( incremental_step + 1) actual = res.manager.processed_targets # Skip the initial builtin cycle. actual = [ t for t in actual if not any( t.startswith(mod) for mod in core_modules + ['mypy_extensions']) ] if expected is not None: assert_target_equivalence(name, expected, actual) if incremental_step > 1: suffix = '' if incremental_step == 2 else str( incremental_step - 1) expected_rechecked = testcase.expected_rechecked_modules.get( incremental_step - 1) if expected_rechecked is not None: assert_module_equivalence('rechecked' + suffix, expected_rechecked, res.manager.rechecked_modules) expected_stale = testcase.expected_stale_modules.get( incremental_step - 1) if expected_stale is not None: assert_module_equivalence('stale' + suffix, expected_stale, res.manager.stale_modules) if testcase.output_files: check_test_output_files(testcase, incremental_step, strip_prefix='tmp/')
def run_case_once(self, testcase: DataDrivenTestCase, incremental_step: int) -> None: assert incremental_step >= 1 build.find_module_clear_caches() original_program_text = '\n'.join(testcase.input) if incremental_step > 1: # In runs 2+, copy *.[num] files to * files. for dn, dirs, files in os.walk(os.curdir): for file in files: if file.endswith('.' + str(incremental_step)): full = os.path.join(dn, file) target = full[:-2] # Use retries to work around potential flakiness on Windows (AppVeyor). retry_on_error(lambda: shutil.copy(full, target)) # In some systems, mtime has a resolution of 1 second which can cause # annoying-to-debug issues when a file has the same size after a # change. We manually set the mtime to circumvent this. new_time = os.stat(target).st_mtime + 1 os.utime(target, times=(new_time, new_time)) # Delete files scheduled to be deleted in [delete <path>.num] sections. for path in testcase.deleted_paths.get(incremental_step, set()): # Use retries to work around potential flakiness on Windows (AppVeyor). retry_on_error(lambda: os.remove(path)) module_data = self.parse_module(original_program_text, incremental_step) if incremental_step == 1: # In run 1, copy program text to program file. for module_name, program_path, program_text in module_data: if module_name == '__main__' and program_text is not None: with open(program_path, 'w') as f: f.write(program_text) break # Parse options after moving files (in case mypy.ini is being moved). options = self.parse_options(original_program_text, testcase, incremental_step) if incremental_step == 1: server_options = [] # type: List[str] if 'fine-grained' in testcase.file: server_options.append('--experimental') options.fine_grained_incremental = True self.server = dmypy_server.Server( server_options) # TODO: Fix ugly API self.server.options = options assert self.server is not None # Set in step 1 and survives into next steps sources = [] for module_name, program_path, program_text in module_data: # Always set to none so we're forced to reread the module in incremental mode sources.append(build.BuildSource(program_path, module_name, None)) response = self.server.check(sources, alt_lib_path=test_temp_dir) a = (response['out'] or response['err']).splitlines() a = normalize_error_messages(a) # Make sure error messages match if incremental_step == 1: msg = 'Unexpected type checker output in incremental, run 1 ({}, line {})' output = testcase.output elif incremental_step > 1: msg = ('Unexpected type checker output in incremental, run {}'. format(incremental_step) + ' ({}, line {})') output = testcase.output2.get(incremental_step, []) else: raise AssertionError() if output != a and self.update_data: update_testcase_output(testcase, a) assert_string_arrays_equal(output, a, msg.format(testcase.file, testcase.line)) manager = self.server.last_manager if manager is not None: if options.follow_imports == 'normal' and testcase.output is None: self.verify_cache(module_data, a, manager) if incremental_step > 1: suffix = '' if incremental_step == 2 else str( incremental_step - 1) self.check_module_equivalence( 'rechecked' + suffix, testcase.expected_rechecked_modules.get(incremental_step - 1), manager.rechecked_modules) self.check_module_equivalence( 'stale' + suffix, testcase.expected_stale_modules.get(incremental_step - 1), manager.stale_modules)
def run_case_once(self, testcase: DataDrivenTestCase, incremental: int = 0) -> None: find_module_clear_caches() original_program_text = '\n'.join(testcase.input) module_data = self.parse_module(original_program_text, incremental) if incremental: if incremental == 1: # In run 1, copy program text to program file. for module_name, program_path, program_text in module_data: if module_name == '__main__': with open(program_path, 'w') as f: f.write(program_text) break elif incremental == 2: # In run 2, copy *.next files to * files. for dn, dirs, files in os.walk(os.curdir): for file in files: if file.endswith('.next'): full = os.path.join(dn, file) target = full[:-5] shutil.copy(full, target) # In some systems, mtime has a resolution of 1 second which can cause # annoying-to-debug issues when a file has the same size after a # change. We manually set the mtime to circumvent this. new_time = os.stat(target).st_mtime + 1 os.utime(target, times=(new_time, new_time)) # Parse options after moving files (in case mypy.ini is being moved). options = self.parse_options(original_program_text, testcase) options.use_builtins_fixtures = True options.show_traceback = True if 'optional' in testcase.file: options.strict_optional = True if incremental: options.incremental = True if os.path.split(testcase.file)[1] in fast_parser_files: options.fast_parser = True sources = [] for module_name, program_path, program_text in module_data: # Always set to none so we're forced to reread the module in incremental mode sources.append(BuildSource(program_path, module_name, None if incremental else program_text)) res = None try: res = build.build(sources=sources, options=options, alt_lib_path=test_temp_dir) a = res.errors except CompileError as e: a = e.messages a = normalize_error_messages(a) # Make sure error messages match if incremental == 0: msg = 'Invalid type checker output ({}, line {})' output = testcase.output elif incremental == 1: msg = 'Invalid type checker output in incremental, run 1 ({}, line {})' output = testcase.output elif incremental == 2: msg = 'Invalid type checker output in incremental, run 2 ({}, line {})' output = testcase.output2 else: raise AssertionError() if output != a and self.update_data: update_testcase_output(testcase, a) assert_string_arrays_equal(output, a, msg.format(testcase.file, testcase.line)) if incremental and res: if options.follow_imports == 'normal' and testcase.output is None: self.verify_cache(module_data, a, res.manager) if incremental == 2: self.check_module_equivalence( 'rechecked', testcase.expected_rechecked_modules, res.manager.rechecked_modules) self.check_module_equivalence( 'stale', testcase.expected_stale_modules, res.manager.stale_modules)
def run_case_once(self, testcase: DataDrivenTestCase, incremental_step: int) -> None: assert incremental_step >= 1 build.find_module_clear_caches() original_program_text = '\n'.join(testcase.input) module_data = self.parse_module(original_program_text, incremental_step) if incremental_step == 1: # In run 1, copy program text to program file. for module_name, program_path, program_text in module_data: if module_name == '__main__': with open(program_path, 'w') as f: f.write(program_text) break elif incremental_step > 1: # In runs 2+, copy *.[num] files to * files. for dn, dirs, files in os.walk(os.curdir): for file in files: if file.endswith('.' + str(incremental_step)): full = os.path.join(dn, file) target = full[:-2] # Use retries to work around potential flakiness on Windows (AppVeyor). retry_on_error(lambda: shutil.copy(full, target)) # In some systems, mtime has a resolution of 1 second which can cause # annoying-to-debug issues when a file has the same size after a # change. We manually set the mtime to circumvent this. new_time = os.stat(target).st_mtime + 1 os.utime(target, times=(new_time, new_time)) # Delete files scheduled to be deleted in [delete <path>.num] sections. for path in testcase.deleted_paths.get(incremental_step, set()): # Use retries to work around potential flakiness on Windows (AppVeyor). retry_on_error(lambda: os.remove(path)) # Parse options after moving files (in case mypy.ini is being moved). options = self.parse_options(original_program_text, testcase, incremental_step) if incremental_step == 1: self.server = dmypy_server.Server([]) # TODO: Fix ugly API self.server.options = options assert self.server is not None # Set in step 1 and survives into next steps sources = [] for module_name, program_path, program_text in module_data: # Always set to none so we're forced to reread the module in incremental mode sources.append(build.BuildSource(program_path, module_name, None)) response = self.server.check(sources, alt_lib_path=test_temp_dir) a = (response['out'] or response['err']).splitlines() a = normalize_error_messages(a) # Make sure error messages match if incremental_step == 1: msg = 'Unexpected type checker output in incremental, run 1 ({}, line {})' output = testcase.output elif incremental_step > 1: msg = ('Unexpected type checker output in incremental, run {}'.format( incremental_step) + ' ({}, line {})') output = testcase.output2.get(incremental_step, []) else: raise AssertionError() if output != a and self.update_data: update_testcase_output(testcase, a) assert_string_arrays_equal(output, a, msg.format(testcase.file, testcase.line)) manager = self.server.last_manager if manager is not None: if options.follow_imports == 'normal' and testcase.output is None: self.verify_cache(module_data, a, manager) if incremental_step > 1: suffix = '' if incremental_step == 2 else str(incremental_step - 1) self.check_module_equivalence( 'rechecked' + suffix, testcase.expected_rechecked_modules.get(incremental_step - 1), manager.rechecked_modules) self.check_module_equivalence( 'stale' + suffix, testcase.expected_stale_modules.get(incremental_step - 1), manager.stale_modules)
def renormalize_error_messages(messages): messages = [x for x in messages if not x.endswith(' defined here')] return normalize_error_messages(messages)
def run_case_once(self, testcase: DataDrivenTestCase, incremental=0) -> None: find_module_clear_caches() original_program_text = '\n'.join(testcase.input) module_data = self.parse_module(original_program_text, incremental) options = self.parse_options(original_program_text) options.use_builtins_fixtures = True options.python_version = testcase_pyversion(testcase.file, testcase.name) set_show_tb(True) # Show traceback on crash. output = testcase.output if incremental: options.incremental = True if incremental == 1: # In run 1, copy program text to program file. output = [] for module_name, program_path, program_text in module_data: if module_name == '__main__': with open(program_path, 'w') as f: f.write(program_text) break elif incremental == 2: # In run 2, copy *.py.next files to *.py files. for dn, dirs, files in os.walk(os.curdir): for file in files: if file.endswith('.py.next'): full = os.path.join(dn, file) target = full[:-5] shutil.copy(full, target) # In some systems, mtime has a resolution of 1 second which can cause # annoying-to-debug issues when a file has the same size after a # change. We manually set the mtime to circumvent this. new_time = os.stat(target).st_mtime + 1 os.utime(target, times=(new_time, new_time)) sources = [] for module_name, program_path, program_text in module_data: # Always set to none so we're forced to reread the module in incremental mode program_text = None if incremental else program_text sources.append(BuildSource(program_path, module_name, program_text)) try: res = build.build(sources=sources, options=options, alt_lib_path=test_temp_dir) a = res.errors except CompileError as e: res = None a = e.messages a = normalize_error_messages(a) if output != a and self.update_data: update_testcase_output(testcase, a) assert_string_arrays_equal( output, a, 'Invalid type checker output ({}, line {})'.format( testcase.file, testcase.line)) if incremental and res: if not options.silent_imports: self.verify_cache(module_data, a, res.manager) if testcase.expected_stale_modules is not None and incremental == 2: assert_string_arrays_equal( list(sorted(testcase.expected_stale_modules)), list(sorted(res.manager.stale_modules.difference({"__main__"}))), 'Set of stale modules does not match expected set')
def run_case_once(self, testcase: DataDrivenTestCase, operations: List[FileOperation] = [], incremental_step: int = 0) -> None: original_program_text = '\n'.join(testcase.input) module_data = self.parse_module(original_program_text, incremental_step) if incremental_step == 0 or incremental_step == 1: # In run 1, copy program text to program file. for module_name, program_path, program_text in module_data: if module_name == '__main__': with open(program_path, 'w') as f: f.write(program_text) break elif incremental_step > 1: # In runs 2+, copy *.[num] files to * files. for op in operations: if isinstance(op, UpdateFile): # Modify/create file copy_and_fudge_mtime(op.source_path, op.target_path) else: # Delete file # Use retries to work around potential flakiness on Windows (AppVeyor). path = op.path retry_on_error(lambda: os.remove(path)) # Parse options after moving files (in case mypy.ini is being moved). options = parse_options(original_program_text, testcase, incremental_step) options.use_builtins_fixtures = True options.show_traceback = True if 'optional' in testcase.file: options.strict_optional = True if incremental_step and options.incremental: # Don't overwrite # flags: --no-incremental in incremental test cases options.incremental = True else: options.incremental = False # Don't waste time writing cache unless we are specifically looking for it if 'writescache' not in testcase.name.lower(): options.cache_dir = os.devnull sources = [] for module_name, program_path, program_text in module_data: # Always set to none so we're forced to reread the module in incremental mode sources.append( BuildSource(program_path, module_name, None if incremental_step else program_text)) res = None try: res = build.build(sources=sources, options=options, alt_lib_path=test_temp_dir) a = res.errors except CompileError as e: a = e.messages a = normalize_error_messages(a) # Make sure error messages match if incremental_step == 0: # Not incremental msg = 'Unexpected type checker output ({}, line {})' output = testcase.output elif incremental_step == 1: msg = 'Unexpected type checker output in incremental, run 1 ({}, line {})' output = testcase.output elif incremental_step > 1: msg = ('Unexpected type checker output in incremental, run {}'. format(incremental_step) + ' ({}, line {})') output = testcase.output2.get(incremental_step, []) else: raise AssertionError() if output != a and testcase.config.getoption('--update-data', False): update_testcase_output(testcase, a) assert_string_arrays_equal(output, a, msg.format(testcase.file, testcase.line)) if res: if options.cache_dir != os.devnull: self.verify_cache(module_data, res.errors, res.manager, res.graph) if incremental_step > 1: suffix = '' if incremental_step == 2 else str( incremental_step - 1) assert_module_equivalence( 'rechecked' + suffix, testcase.expected_rechecked_modules.get(incremental_step - 1), res.manager.rechecked_modules) assert_module_equivalence( 'stale' + suffix, testcase.expected_stale_modules.get(incremental_step - 1), res.manager.stale_modules)
def run_case_once(self, testcase: DataDrivenTestCase, incremental_step: int = 0) -> None: original_program_text = '\n'.join(testcase.input) module_data = self.parse_module(original_program_text, incremental_step) if incremental_step == 0 or incremental_step == 1: # In run 1, copy program text to program file. for module_name, program_path, program_text in module_data: if module_name == '__main__': with open(program_path, 'w') as f: f.write(program_text) break elif incremental_step > 1: # In runs 2+, copy *.[num] files to * files. for dn, dirs, files in os.walk(os.curdir): for file in files: if file.endswith('.' + str(incremental_step)): full = os.path.join(dn, file) target = full[:-2] copy_and_fudge_mtime(full, target) # Delete files scheduled to be deleted in [delete <path>.num] sections. for path in testcase.deleted_paths.get(incremental_step, set()): # Use retries to work around potential flakiness on Windows (AppVeyor). retry_on_error(lambda: os.remove(path)) # Parse options after moving files (in case mypy.ini is being moved). options = parse_options(original_program_text, testcase, incremental_step) options.use_builtins_fixtures = True options.show_traceback = True if 'optional' in testcase.file: options.strict_optional = True if incremental_step: options.incremental = True sources = [] for module_name, program_path, program_text in module_data: # Always set to none so we're forced to reread the module in incremental mode sources.append(BuildSource(program_path, module_name, None if incremental_step else program_text)) res = None try: res = build.build(sources=sources, options=options, alt_lib_path=test_temp_dir) a = res.errors except CompileError as e: a = e.messages a = normalize_error_messages(a) # Make sure error messages match if incremental_step == 0: # Not incremental msg = 'Unexpected type checker output ({}, line {})' output = testcase.output elif incremental_step == 1: msg = 'Unexpected type checker output in incremental, run 1 ({}, line {})' output = testcase.output elif incremental_step > 1: msg = ('Unexpected type checker output in incremental, run {}'.format( incremental_step) + ' ({}, line {})') output = testcase.output2.get(incremental_step, []) else: raise AssertionError() if output != a and self.update_data: update_testcase_output(testcase, a) assert_string_arrays_equal(output, a, msg.format(testcase.file, testcase.line)) if res: self.verify_cache(module_data, res.errors, res.manager, res.graph) if incremental_step > 1: suffix = '' if incremental_step == 2 else str(incremental_step - 1) assert_module_equivalence( 'rechecked' + suffix, testcase.expected_rechecked_modules.get(incremental_step - 1), res.manager.rechecked_modules) assert_module_equivalence( 'stale' + suffix, testcase.expected_stale_modules.get(incremental_step - 1), res.manager.stale_modules)
def run_case_once(self, testcase: DataDrivenTestCase, operations: List[FileOperation] = [], incremental_step: int = 0) -> None: original_program_text = '\n'.join(testcase.input) module_data = self.parse_module(original_program_text, incremental_step) # Unload already loaded plugins, they may be updated. for file, _ in testcase.files: module = module_from_path(file) if module.endswith('_plugin') and module in sys.modules: del sys.modules[module] if incremental_step == 0 or incremental_step == 1: # In run 1, copy program text to program file. for module_name, program_path, program_text in module_data: if module_name == '__main__': with open(program_path, 'w', encoding='utf8') as f: f.write(program_text) break elif incremental_step > 1: # In runs 2+, copy *.[num] files to * files. for op in operations: if isinstance(op, UpdateFile): # Modify/create file copy_and_fudge_mtime(op.source_path, op.target_path) else: # Delete file # Use retries to work around potential flakiness on Windows (AppVeyor). path = op.path retry_on_error(lambda: os.remove(path)) # Parse options after moving files (in case mypy.ini is being moved). options = parse_options(original_program_text, testcase, incremental_step) options.use_builtins_fixtures = True options.show_traceback = True if 'optional' in testcase.file: options.strict_optional = True if 'newsemanal' in testcase.file: options.new_semantic_analyzer = True if incremental_step and options.incremental: # Don't overwrite # flags: --no-incremental in incremental test cases options.incremental = True else: options.incremental = False # Don't waste time writing cache unless we are specifically looking for it if not testcase.writescache: options.cache_dir = os.devnull sources = [] for module_name, program_path, program_text in module_data: # Always set to none so we're forced to reread the module in incremental mode sources.append(BuildSource(program_path, module_name, None if incremental_step else program_text)) plugin_dir = os.path.join(test_data_prefix, 'plugins') sys.path.insert(0, plugin_dir) res = None try: res = build.build(sources=sources, options=options, alt_lib_path=test_temp_dir) a = res.errors except CompileError as e: a = e.messages finally: assert sys.path[0] == plugin_dir del sys.path[0] if testcase.normalize_output: a = normalize_error_messages(a) # Make sure error messages match if incremental_step == 0: # Not incremental msg = 'Unexpected type checker output ({}, line {})' output = testcase.output elif incremental_step == 1: msg = 'Unexpected type checker output in incremental, run 1 ({}, line {})' output = testcase.output elif incremental_step > 1: msg = ('Unexpected type checker output in incremental, run {}'.format( incremental_step) + ' ({}, line {})') output = testcase.output2.get(incremental_step, []) else: raise AssertionError() if output != a and testcase.config.getoption('--update-data', False): update_testcase_output(testcase, a) assert_string_arrays_equal(output, a, msg.format(testcase.file, testcase.line)) if res: if options.cache_dir != os.devnull: self.verify_cache(module_data, res.errors, res.manager, res.graph) if incremental_step > 1: suffix = '' if incremental_step == 2 else str(incremental_step - 1) assert_module_equivalence( 'rechecked' + suffix, testcase.expected_rechecked_modules.get(incremental_step - 1), res.manager.rechecked_modules) assert_module_equivalence( 'stale' + suffix, testcase.expected_stale_modules.get(incremental_step - 1), res.manager.stale_modules)