def test_nonexistant_file(self): self.section.append(Setting("language", "bullshit")) with self.assertRaises(KeyError): LanguageDefinition.from_section(self.section) self.section.append(Setting("language_family", "bullshit")) with self.assertRaises(FileNotFoundError): LanguageDefinition.from_section(self.section)
def test_external_coalang(self): with TemporaryDirectory() as directory: coalang_file = os.path.join(directory, 'random_language.coalang') with open(coalang_file, 'w') as file: file.write('extensions = .lol, .ROFL') uut = LanguageDefinition("random_language", coalang_dir=directory) self.assertIn("extensions", uut) self.assertEqual(list(uut["extensions"]), [".lol", ".ROFL"])
def run(self, filename, file, language: str, coalang_dir: str = None): """ Finds out all the positions of strings and comments in a file. The Bear searches for valid comments and strings and yields their ranges as SourceRange objects in HiddenResults. :param language: The programming language of the source code. :param coalang_dir: External directory for coalang file. :return: One HiddenResult containing a dictionary with keys being 'strings' or 'comments' and values being a tuple of SourceRanges pointing to the strings and a tuple of SourceRanges pointing to all comments respectively. The ranges do include string quotes or the comment starting separator but not anything before (e.g. when using ``u"string"``, the ``u`` will not be in the source range). """ try: lang_dict = LanguageDefinition(language, coalang_dir=coalang_dir) except FileNotFoundError: content = ('coalang specification for ' + language + ' not found.') yield HiddenResult(self, content) return string_delimiters = dict(lang_dict['string_delimiters']) multiline_string_delimiters = dict( lang_dict['multiline_string_delimiters']) multiline_comment_delimiters = dict( lang_dict['multiline_comment_delimiters']) comment_delimiter = dict(lang_dict['comment_delimiter']) string_ranges = comment_ranges = () try: string_ranges, comment_ranges = self.find_annotation_ranges( file, filename, string_delimiters, multiline_string_delimiters, comment_delimiter, multiline_comment_delimiters) except NoCloseError as e: yield Result(self, str(e), severity=RESULT_SEVERITY.MAJOR, affected_code=(e.code,)) content = {'strings': string_ranges, 'comments': comment_ranges} yield HiddenResult(self, content)
def run(self, filename, file, language: str, coalang_dir: str = None): """ Finds out all the positions of strings and comments in a file. The Bear searches for valid comments and strings and yields their ranges as SourceRange objects in HiddenResults. :param language: Language to be whose annotations are to be searched. :param coalang_dir: external directory for coalang file. :return: HiddenResults containing a dictionary with keys as 'strings' or 'comments' and values as a tuple of SourceRanges of strings and a tuple of SourceRanges of comments respectively. """ try: lang_dict = LanguageDefinition(language, coalang_dir=coalang_dir) except FileNotFoundError: content = ("coalang specification for " + language + " not found.") yield HiddenResult(self, content) return string_delimiters = dict(lang_dict["string_delimiters"]) multiline_string_delimiters = dict( lang_dict["multiline_string_delimiters"]) multiline_comment_delimiters = dict( lang_dict["multiline_comment_delimiters"]) comment_delimiter = dict(lang_dict["comment_delimiter"]) string_ranges = comment_ranges = () try: string_ranges, comment_ranges = self.find_annotation_ranges( file, filename, string_delimiters, multiline_string_delimiters, comment_delimiter, multiline_comment_delimiters) except NoCloseError as e: yield Result(self, str(e), severity=RESULT_SEVERITY.MAJOR, affected_code=(e.code, )) content = {"strings": string_ranges, "comments": comment_ranges} yield HiddenResult(self, content)
def run(self, filename, file, language: str, language_family: str): """ Finds out all the positions of strings and comments in a file. The Bear searches for valid comments and strings and yields their ranges as SourceRange objects in HiddenResults. :param language: Language to be whose annotations are to be searched. :param language_family: Language family whose annotations are to be searched. :return: HiddenResults containing a dictionary with keys as 'strings' or 'comments' and values as a tuple of SourceRanges of strings and a tuple of SourceRanges of comments respectively. """ lang_dict = LanguageDefinition(language, language_family) # Strings # TODO treat single-line and multiline strings differently strings = dict(lang_dict["string_delimiters"]) strings.update(lang_dict["multiline_string_delimiters"]) strings_found = self.find_with_start_end(filename, file, strings) # multiline Comments comments_found = self.find_with_start_end( filename, file, dict(lang_dict["multiline_comment_delimiters"])) # single-line Comments comments_found.update(self.find_singleline_comments( filename, file, list(lang_dict["comment_delimiter"]))) matches_found = strings_found | comments_found # Remove Nested unnested_annotations = set(filter( lambda arg: not starts_within_ranges( arg, matches_found), matches_found)) # Yield different results for strings and comments strings_found = tuple(filter(lambda arg: arg in unnested_annotations, strings_found)) comments_found = tuple(filter(lambda arg: arg in unnested_annotations, comments_found)) yield HiddenResult(self, {'comments': comments_found, 'strings': strings_found})
def apply(self, result, original_file_dict, file_diff_dict, language: str, coalang_dir: str = None, no_orig: bool = False): """ Add ignore comment """ lang_settings_dict = LanguageDefinition(language, coalang_dir=coalang_dir) comment_delimiter = lang_settings_dict['comment_delimiter'] ignore_comment = (str(comment_delimiter) + ' Ignore ' + result.origin + '\n') source_range = next( filter(lambda sr: exists(sr.file), result.affected_code)) filename = source_range.file ignore_diff = Diff(original_file_dict[filename]) ignore_diff.change_line( source_range.start.line, original_file_dict[filename][source_range.start.line - 1], original_file_dict[filename][source_range.start.line - 1].rstrip() + ' ' + ignore_comment) if filename in file_diff_dict: ignore_diff = file_diff_dict[filename] + ignore_diff else: if not no_orig and isfile(filename): shutil.copy2(filename, filename + '.orig') file_diff_dict[filename] = ignore_diff new_filename = ignore_diff.rename if ignore_diff.rename else filename with open(new_filename, mode='w', encoding='utf-8') as file: file.writelines(ignore_diff.modified) return file_diff_dict
def test_key_contains(self): uut = LanguageDefinition.from_section(self.section) self.assertIn('extensions', uut) self.assertNotIn('randomstuff', uut)
def run( self, filename, file, dependency_results: dict, language: str, use_spaces: bool = True, indent_size: int = SpacingHelper.DEFAULT_TAB_WIDTH, coalang_dir: str = None, ): """ It is a generic indent bear, which looks for a start and end indent specifier, example: ``{ : }`` where "{" is the start indent specifier and "}" is the end indent specifier. If the end-specifier is not given, this bear looks for unindents within the code to correctly figure out indentation. It also figures out hanging indents and absolute indentation of function params or list elements. It does not however support indents based on keywords yet. for example: if(something) does not get indented undergoes no change. WARNING: The IndentationBear is experimental right now, you can report any issues found to https://github.com/coala/coala-bears :param filename: Name of the file that needs to be checked. :param file: File that needs to be checked in the form of a list of strings. :param dependency_results: Results given by the AnnotationBear. :param language: Language to be used for indentation. :param use_spaces: Insert spaces instead of tabs for indentation. :param indent_size: Number of spaces per indentation level. :param coalang_dir: Full path of external directory containing the coalang file for language. """ lang_settings_dict = LanguageDefinition(language, coalang_dir=coalang_dir) annotation_dict = dependency_results[AnnotationBear.name][0].contents # sometimes can't convert strings with ':' to dict correctly if ':' in dict(lang_settings_dict['indent_types']).keys(): indent_types = dict(lang_settings_dict['indent_types']) indent_types[':'] = '' else: indent_types = dict(lang_settings_dict['indent_types']) encapsulators = (dict(lang_settings_dict['encapsulators']) if 'encapsulators' in lang_settings_dict else {}) encaps_pos = [] for encapsulator in encapsulators: encaps_pos += self.get_specified_block_range( file, filename, encapsulator, encapsulators[encapsulator], annotation_dict) encaps_pos = tuple(sorted(encaps_pos, key=lambda x: x.start.line)) comments = dict(lang_settings_dict['comment_delimiter']) comments.update( dict(lang_settings_dict['multiline_comment_delimiters'])) try: indent_levels = self.get_indent_levels(file, filename, indent_types, annotation_dict, encaps_pos, comments) # This happens only in case of unmatched indents or # ExpectedIndentError. except (UnmatchedIndentError, ExpectedIndentError) as e: yield Result(self, str(e), severity=RESULT_SEVERITY.MAJOR) return absolute_indent_levels = self.get_absolute_indent_of_range( file, filename, encaps_pos, annotation_dict) insert = ' ' * indent_size if use_spaces else '\t' no_indent_file = self._get_no_indent_file(file) new_file = self._get_basic_indent_file(no_indent_file, indent_levels, insert) new_file = self._get_absolute_indent_file(new_file, absolute_indent_levels, indent_levels, insert) if new_file != list(file): wholediff = Diff.from_string_arrays(file, new_file) for diff in wholediff.split_diff(): yield Result( self, 'The indentation could be changed to improve readability.', severity=RESULT_SEVERITY.INFO, affected_code=(diff.range(filename), ), diffs={filename: diff})
def test_loading(self): uut = LanguageDefinition.from_section(self.section) self.assertEqual(list(uut["extensions"]), [".c", ".cpp", ".h", ".hpp"])