def test_parse_error(testcase): try: # Compile temporary file. parse(bytes('\n'.join(testcase.input), 'ascii'), INPUT_FILE_NAME) raise AssertionFailure('No errors reported') except CompileError as e: # Verify that there was a compile error and that the error messages # are equivalent. assert_string_arrays_equal( testcase.output, e.messages, 'Invalid compiler output ({}, line {})'.format(testcase.file, testcase.line))
def test_parse_error(testcase: DataDrivenTestCase) -> None: try: # Compile temporary file. The test file contains non-ASCII characters. parse(bytes('\n'.join(testcase.input), 'utf-8'), INPUT_FILE_NAME, None, Options()) raise AssertionFailure('No errors reported') except CompileError as e: # Verify that there was a compile error and that the error messages # are equivalent. assert_string_arrays_equal( testcase.output, e.messages, 'Invalid compiler output ({}, line {})'.format( testcase.file, testcase.line))
def test_python_cmdline(testcase: DataDrivenTestCase) -> None: assert testcase.old_cwd is not None, "test was not properly set up" # Write the program to a file. program = '_program.py' program_path = os.path.join(test_temp_dir, program) with open(program_path, 'w') as file: for s in testcase.input: file.write('{}\n'.format(s)) args = parse_args(testcase.input[0]) args.append('--show-traceback') # Type check the program. fixed = [python3_path, os.path.join(testcase.old_cwd, 'scripts', 'mypy')] process = subprocess.Popen(fixed + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=test_temp_dir) outb = process.stdout.read() # Split output into lines. out = [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()] result = process.wait() # Remove temp file. os.remove(program_path) # Compare actual output to expected. if testcase.output_files: for path, expected_content in testcase.output_files: if not os.path.exists(path): raise AssertionFailure( 'Expected file {} was not produced by test case'.format( path)) with open(path, 'r') as output_file: actual_output_content = output_file.read().splitlines() normalized_output = normalize_file_output( actual_output_content, os.path.abspath(test_temp_dir)) if testcase.native_sep and os.path.sep == '\\': normalized_output = [ fix_cobertura_filename(line) for line in normalized_output ] normalized_output = normalize_error_messages(normalized_output) assert_string_arrays_equal( expected_content.splitlines(), normalized_output, 'Output file {} did not match its expected output'.format( path)) else: out = normalize_error_messages(out) obvious_result = 1 if out else 0 if obvious_result != result: out.append('== Return code: {}'.format(result)) assert_string_arrays_equal( testcase.output, out, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def verify_cache(self, module_name: str, program_name: str, a: List[str], manager: build.BuildManager) -> None: # There should be valid cache metadata for each module except # those in error_paths; for those there should not be. # # NOTE: When A imports B and there's an error in B, the cache # data for B is invalidated, but the cache data for A remains. # However build.process_graphs() will ignore A's cache data. error_paths = self.find_error_paths(a) modules = self.find_module_files() modules.update({module_name: program_name}) missing_paths = self.find_missing_cache_files(modules, manager) if missing_paths != error_paths: raise AssertionFailure("cache data discrepancy %s != %s" % (missing_paths, error_paths))
def verify_cache(self, module_data: List[Tuple[str, str, str]], a: List[str], manager: build.BuildManager) -> None: # There should be valid cache metadata for each module except # those in error_paths; for those there should not be. # # NOTE: When A imports B and there's an error in B, the cache # data for B is invalidated, but the cache data for A remains. # However build.process_graphs() will ignore A's cache data. # # Also note that when A imports B, and there's an error in A # _due to a valid change in B_, the cache data for B will be # invalidated and updated, but the old cache data for A will # remain unchanged. As before, build.process_graphs() will # ignore A's (old) cache data. error_paths = self.find_error_paths(a) modules = self.find_module_files() modules.update({module_name: path for module_name, path, text in module_data}) missing_paths = self.find_missing_cache_files(modules, manager) if not missing_paths.issubset(error_paths): raise AssertionFailure("cache data discrepancy %s != %s" % (missing_paths, error_paths))
def test_python_evaluation(testcase: DataDrivenTestCase) -> None: # Write the program to a file. program = '_program.py' program_path = os.path.join(test_temp_dir, program) with open(program_path, 'w') as file: for s in testcase.input: file.write('{}\n'.format(s)) args = parse_args(testcase.input[0]) args.append('--show-traceback') # Type check the program. fixed = [python3_path, os.path.join(testcase.old_cwd, 'scripts', 'mypy')] process = subprocess.Popen(fixed + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=test_temp_dir) outb = process.stdout.read() # Split output into lines. out = [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()] # Remove temp file. os.remove(program_path) # Compare actual output to expected. if testcase.output_files: for path, expected_content in testcase.output_files: if not os.path.exists(path): raise AssertionFailure( 'Expected file {} was not produced by test case'.format( path)) with open(path, 'r') as output_file: actual_output_content = output_file.read().splitlines() noramlized_output = normalize_file_output( actual_output_content, os.path.abspath(test_temp_dir)) assert_string_arrays_equal( expected_content.splitlines(), noramlized_output, 'Output file {} did not match its expected output'.format( path)) else: assert_string_arrays_equal( testcase.output, out, 'Invalid output ({}, line {})'.format(testcase.file, testcase.line))
def assert_string_arrays_equal(expected: List[str], actual: List[str], msg: str) -> None: """Assert that two string arrays are equal. Display any differences in a human-readable form. """ actual = clean_up(actual) if actual != expected: num_skip_start = num_skipped_prefix_lines(expected, actual) num_skip_end = num_skipped_suffix_lines(expected, actual) sys.stderr.write('Expected:\n') # If omit some lines at the beginning, indicate it by displaying a line # with '...'. if num_skip_start > 0: sys.stderr.write(' ...\n') # Keep track of the first different line. first_diff = -1 # Display only this many first characers of identical lines. width = 75 for i in range(num_skip_start, len(expected) - num_skip_end): if i >= len(actual) or expected[i] != actual[i]: if first_diff < 0: first_diff = i sys.stderr.write(' {:<45} (diff)'.format(expected[i])) else: e = expected[i] sys.stderr.write(' ' + e[:width]) if len(e) > width: sys.stderr.write('...') sys.stderr.write('\n') if num_skip_end > 0: sys.stderr.write(' ...\n') sys.stderr.write('Actual:\n') if num_skip_start > 0: sys.stderr.write(' ...\n') for j in range(num_skip_start, len(actual) - num_skip_end): if j >= len(expected) or expected[j] != actual[j]: sys.stderr.write(' {:<45} (diff)'.format(actual[j])) else: a = actual[j] sys.stderr.write(' ' + a[:width]) if len(a) > width: sys.stderr.write('...') sys.stderr.write('\n') if actual == []: sys.stderr.write(' (empty)\n') if num_skip_end > 0: sys.stderr.write(' ...\n') sys.stderr.write('\n') if first_diff >= 0 and first_diff < len(actual) and ( len(expected[first_diff]) >= MIN_LINE_LENGTH_FOR_ALIGNMENT or len(actual[first_diff]) >= MIN_LINE_LENGTH_FOR_ALIGNMENT): # Display message that helps visualize the differences between two # long lines. show_align_message(expected[first_diff], actual[first_diff]) raise AssertionFailure(msg)