def _rst_checker(self, parsed_file): lines = list(parsed_file.lines_iter()) doc = parsed_file.document nodes_lines, first_line = self._extract_node_lines(doc) directives = self._extract_directives(lines) def find_containing_nodes(num): if num < first_line and len(nodes_lines): return [nodes_lines[0][0]] contained_in = [] for (n, (line_min, line_max)) in nodes_lines: if num >= line_min and num <= line_max: contained_in.append((n, (line_min, line_max))) smallest_span = None best_nodes = [] for (n, (line_min, line_max)) in contained_in: span = line_max - line_min if smallest_span is None: smallest_span = span best_nodes = [n] elif span < smallest_span: smallest_span = span best_nodes = [n] elif span == smallest_span: best_nodes.append(n) return best_nodes def any_types(nodes, types): return any([isinstance(n, types) for n in nodes]) skip_types = ( docutils_nodes.target, docutils_nodes.literal_block, ) title_types = ( docutils_nodes.title, docutils_nodes.subtitle, docutils_nodes.section, ) for i, line in enumerate(lines): if len(line) > self._max_line_length: in_directive = False for (start, end) in directives: if i >= start and i <= end: in_directive = True break if in_directive: continue stripped = line.lstrip() if ' ' not in stripped: # No room to split even if we could. continue if utils.contains_url(stripped): continue nodes = find_containing_nodes(i + 1) if any_types(nodes, skip_types): continue if self._allow_long_titles and any_types(nodes, title_types): continue yield (i + 1, 'D001', 'Line too long')
def _txt_checker(self, parsed_file): for i, line in enumerate(parsed_file.lines_iter()): if len(line) > self._max_line_length: if not utils.contains_url(line): yield (i + 1, "D001", "Line too long")
def _txt_checker(self, parsed_file): for i, line in enumerate(parsed_file.lines_iter()): if len(line) > self._max_line_length: if not utils.contains_url(line): yield (i + 1, 'D001', 'Line too long')