def context_lines(self, aList, i, n=2): '''Return a list containing the n lines of surrounding context of aList[i].''' result = [] aList1 = aList[max(0, i-n):i] aList2 = aList[i+1:i+n+1] result.extend([' %4s %r\n' % (i + 1 - len(aList1) + j, g.truncate(s,60)) for j, s in enumerate(aList1)]) result.append('* %4s %r\n' % (i + 1, g.truncate(aList[i], 60))) result.extend([' %4s %r\n' % (i + 2 + j, g.truncate(s, 60)) for j, s in enumerate(aList2)]) return result
def context_lines(self, aList, i, n=2): """Return a list containing the n lines of surrounding context of aList[i].""" result = [] aList1 = aList[max(0, i - n) : i] aList2 = aList[i + 1 : i + n + 1] result.extend([' %4s %r\n' % (i + 1 - len(aList1) + j, g.truncate(s, 60)) for j, s in enumerate(aList1)]) result.append('* %4s %r\n' % (i + 1, g.truncate(aList[i], 60))) result.extend([' %4s %r\n' % (i + 2 + j, g.truncate(s, 60)) for j, s in enumerate(aList2)]) return result
def clean_headline(self, s): '''Return a cleaned up headline s.''' s = s.strip() # Don't clean a headline twice. if s.endswith('>>') and s.startswith('<<'): return s elif 1: # Imo, showing the whole line is better than truncating it. # However the lines must have a reasonable length. return g.truncate(s, 100) else: i = s.find('(') if i > -1: s = s[:i] return g.truncate(s, 100)
def addAbbrevHelper(self, s, tag=''): '''Enter the abbreviation 's' into the self.abbrevs dict.''' trace = False and not g.unitTesting if not s.strip(): return try: d = self.abbrevs data = s.split('=') # Do *not* strip ws so the user can specify ws. name = data[0].replace('\\t', '\t').replace('\\n', '\n') val = '='.join(data[1:]) if val.endswith('\n'): val = val[:-1] val = self.n_regex.sub('\n', val).replace('\\\\n', '\\n') old, tag = d.get( name, (None, None), ) if old and old != val and not g.unitTesting: g.es_print('redefining abbreviation', name, '\nfrom', repr(old), 'to', repr(val)) if trace: val1 = val if val.find( '\n') == -1 else g.splitLines(val)[0] + '...' g.trace('%12s %r' % (name, g.truncate(val1, 80))) d[name] = val, tag except ValueError: g.es_print('bad abbreviation: %s' % s)
def scan_nonsentinel_lines(self, lines, n, root): ''' Scan a list of lines containing sentinels, looking for the node and offset within the node of the n'th (one-based) line. Only non-sentinel lines increment the global line count, but @+node sentinels reset the offset within the node. Return gnx, h, offset: gnx: the gnx of the #@+node h: the headline of the #@+node offset: the offset of line n within the node. ''' trace = False and not g.unitTesting trace_lines = True delim1, delim2 = self.get_delims(root) count, gnx, h, offset = 0, root.gnx, root.h, 0 stack = [ (gnx, h, offset), ] if trace: g.trace('=====', delim1, delim2, n) for s in lines: is_sentinel = self.is_sentinel(delim1, delim2, s) # if trace and trace_lines: g.trace('%5s %s' % (is_sentinel, s.rstrip())) if is_sentinel: s2 = s.strip()[len(delim1):] if s2.startswith('@+node'): # Invisible, but resets the offset. offset = 0 gnx, h = self.get_script_node_info(s, delim2) if trace: g.trace('@+node: %30s %5s %s' % (gnx, count + 1, g.truncate(h, 50))) elif s2.startswith('@+others') or s2.startswith('@+<<'): stack.append((gnx, h, offset), ) # @others is visible in the outline, but *not* in the file. offset += 1 elif s2.startswith('@-others') or s2.startswith('@-<<'): gnx, h, offset = stack.pop() # @-others is invisible. offset += 1 elif s2.startswith('@@'): # Directives are visible in the outline, but *not* in the file. offset += 1 else: # All other sentinels are invisible to the user. offset += 1 else: # Non-sentinel lines are visible both in the outline and the file. count += 1 offset += 1 if trace and trace_lines: g.trace(count, offset, s.rstrip()) if count == n: # Count is the real, one-based count. break else: gnx, h, offset = None, None, -1 if trace: g.trace('----- gnx:', gnx, 'h:', h, 'offset:', offset) return gnx, h, offset
def clean_headline(self, s, p=None): '''Return a cleaned up headline s.''' s = s.strip() # Don't clean a headline twice. if s.endswith('>>') and s.startswith('<<'): return s for ch in '{(=': if s.endswith(ch): s = s[:-1].strip() return g.truncate(s, 100)
def scan_nonsentinel_lines(self, lines, n, root): ''' Scan a list of lines containing sentinels, looking for the node and offset within the node of the n'th (one-based) line. Only non-sentinel lines increment the global line count, but @+node sentinels reset the offset within the node. Return gnx, h, offset: gnx: the gnx of the #@+node h: the headline of the #@+node offset: the offset of line n within the node. ''' trace = False and not g.unitTesting trace_lines = True delim1, delim2 = self.get_delims(root) count, gnx, h, offset = 0, root.gnx, root.h, 0 stack = [(gnx, h, offset),] if trace: g.trace('=====', delim1, delim2, n) for s in lines: is_sentinel = self.is_sentinel(delim1, delim2, s) # if trace and trace_lines: g.trace('%5s %s' % (is_sentinel, s.rstrip())) if is_sentinel: s2 = s.strip()[len(delim1):] if s2.startswith('@+node'): # Invisible, but resets the offset. offset = 0 gnx, h = self.get_script_node_info(s, delim2) if trace: g.trace('@+node: %30s %5s %s' % (gnx, count+1, g.truncate(h,50))) elif s2.startswith('@+others') or s2.startswith('@+<<'): stack.append((gnx, h, offset),) # @others is visible in the outline, but *not* in the file. offset += 1 elif s2.startswith('@-others') or s2.startswith('@-<<'): gnx, h, offset = stack.pop() # @-others is invisible. offset += 1 elif s2.startswith('@@'): # Directives are visible in the outline, but *not* in the file. offset += 1 else: # All other sentinels are invisible to the user. offset += 1 else: # Non-sentinel lines are visible both in the outline and the file. count += 1 offset += 1 if trace and trace_lines: g.trace(count, offset, s.rstrip()) if count == n: # Count is the real, one-based count. break else: gnx, h, offset = None, None, -1 if trace: g.trace('----- gnx:', gnx, 'h:', h, 'offset:', offset) return gnx, h, offset
def find_long_lines(event): """Report long lines in the log, with clickable links.""" c = event and event.get('c') if not c: return #@+others # helper functions #@+node:ekr.20190609135639.1: *4* function: get_root def get_root(p): """Return True if p is any @<file> node.""" for parent in p.self_and_parents(): if parent.anyAtFileNodeName(): return parent return None #@+node:ekr.20190608084751.2: *4* function: in_no_pylint def in_nopylint(p): """Return p if p is controlled by @nopylint.""" for parent in p.self_and_parents(): if '@nopylint' in parent.h: return True return False #@-others log = c.frame.log max_line = c.config.getInt('max-find-long-lines-length') or 110 count, files, ignore = 0, [], [] for p in c.all_unique_positions(): if in_nopylint(p): continue root = get_root(p) if not root: continue if root.v not in files: files.append(root.v) for i, s in enumerate(g.splitLines(p.b)): if len(s) > max_line: if not root: if p.v not in ignore: ignore.append(p.v) g.es_print('no root', p.h) else: count += 1 short_s = g.truncate(s, 30) g.es('') g.es_print(root.h) g.es_print(p.h) print(short_s) unl = p.get_UNL() log.put(short_s.strip() + '\n', nodeLink=f"{unl}::{i + 1}") # Local line. break g.es_print(f"found {count} long line{g.plural(count)} " f"longer than {max_line} characters in " f"{len(files)} file{g.plural(len(files))}")
def find_long_lines(event): '''Report long lines in the log, with clickable links.''' c = event.get('c') if not c: return #@+others # helper functions #@+node:ekr.20190609135639.1: *4* function: get_root def get_root(p): '''Return True if p is any @<file> node.''' for parent in p.self_and_parents(): if parent.anyAtFileNodeName(): return parent return None #@+node:ekr.20190608084751.2: *4* function: in_no_pylint def in_nopylint(p): '''Return p if p is controlled by @nopylint.''' for parent in p.self_and_parents(): if '@nopylint' in parent.h: return True return False #@-others max_line = c.config.getInt('max-find-long-lines-length') or 110 count, files, ignore = 0, [], [] for p in c.all_unique_positions(): if in_nopylint(p): continue root = get_root(p) if not root: continue if root.v not in files: files.append(root.v) for i, s in enumerate(g.splitLines(p.b)): if len(s) > max_line: if not root: if p.v not in ignore: ignore.append(p.v) g.es_print('no root', p.h) else: count += 1 short_s = g.truncate(s, 30) g.es('') g.es_print(root.h) g.es_print(p.h) print(short_s) g.es_clickable_link(c, p, line_number=i, message=short_s) break g.es_print( 'found %s long line%s longer than %s characters in %s file%s' % (count, g.plural(count), max_line, len(files), g.plural(len(files))))
def scan_sentinel_lines(self, lines, n, root): ''' Scan a list of lines containing sentinels, looking for the node and offset within the node of the n'th (one-based) line. Return gnx, h, offset: gnx: the gnx of the #@+node h: the headline of the #@+node offset: the offset of line n within the node. ''' trace = False and not g.unitTesting trace_lines = False delim1, delim2 = self.get_delims(root) gnx, h, offset = root.gnx, root.h, 0 stack = [ (gnx, h, offset), ] if trace: g.trace('=====', delim1, delim2, n) for i, s in enumerate(lines): if trace and trace_lines: g.trace(s.rstrip()) if self.is_sentinel(delim1, delim2, s): s2 = s.strip()[len(delim1):] if s2.startswith('@+node'): offset = 0 gnx, h = self.get_script_node_info(s, delim2) if trace: g.trace('node: %30s %5s %s' % (gnx, i + 1, g.truncate(h, 50))) elif s2.startswith('@+others') or s2.startswith('@+<<'): stack.append((gnx, h, offset), ) offset += 1 elif s2.startswith('@-others') or s2.startswith('@-<<'): gnx, h, offset = stack.pop() offset += 1 else: offset += 1 else: offset += 1 if trace and trace_lines: g.trace(i + 1, offset, s.rstrip()) if i + 1 == n: # Bug fix 2017/04/01: n is one based. break else: gnx, h, offset = None, None, -1 if trace: g.trace('----- gnx', gnx, 'h', h, 'offset', offset) return gnx, h, offset
def clean_headline(self, s, p=None): '''Return a cleaned up headline s.''' s = s.strip() # Don't clean a headline twice. if s.endswith('>>') and s.startswith('<<'): return s # Try to match patterns. for group_n, pattern in self.function_patterns: m = pattern.match(s) if m: # g.trace('group %s: %s' % (group_n, m.group(group_n))) return m.group(group_n) # Final cleanups, if nothing matches. for ch in '{(=': if s.endswith(ch): s = s[:-1].strip() s = s.replace(' ', ' ') s = s.replace(' (', '(') return g.truncate(s, 100)
def clean_headline(self, s, p=None): '''Return a cleaned up headline s.''' s = s.strip() # Don't clean a headline twice. if s.endswith('>>') and s.startswith('<<'): return s # Try to match patterns. for group_n, pattern in self.function_patterns: m = pattern.match(s) if m: # g.trace('group %s: %s' % (group_n, m.group(group_n))) return m.group(group_n) # Final cleanups, if nothing matches. for ch in '{(=': if s.endswith(ch): s = s[:-1].strip() s = s.replace(' ', ' ') s = s.replace(' (', '(') return g.truncate(s, 100)
def clean_headline(self, s, p=None, trace=False): '''Return a cleaned up headline s.''' # pylint: disable=arguments-differ s = s.strip() # Don't clean a headline twice. if s.endswith('>>') and s.startswith('<<'): return s for ch in '{(=': if s.endswith(ch): s = s[:-1].strip() # First regex cleanup. Use \1. for pattern in self.clean_regex_list1: m = pattern.match(s) if m: s = m.group(1) if trace: g.trace('match 1', pattern) break # Second regex cleanup. Use \1 + \2 for pattern in self.clean_regex_list2: m = pattern.match(s) if m: s = m.group(1) + m.group(2) if trace: g.trace('match 2', pattern) break # Third regex cleanup. Use \1 + ' ' + \2 for pattern in self.clean_regex_list3: m = pattern.match(s) if m: s = m.group(1) + ' ' + m.group(2) if trace: g.trace('match 3', pattern) break # Fourth cleanup. Use \1 + ' ' + \2 again for pattern in self.clean_regex_list4: m = pattern.match(s) if m: s = m.group(1) + ' ' + m.group(2) if trace: g.trace('match 4', pattern) break # Final whitespace cleanups. s = s.replace(' ', ' ') s = s.replace(' (', '(') return g.truncate(s, 100)
def clean_headline(self, s, p=None, trace=False): '''Return a cleaned up headline s.''' # pylint: disable=arguments-differ s = s.strip() # Don't clean a headline twice. if s.endswith('>>') and s.startswith('<<'): return s for ch in '{(=': if s.endswith(ch): s = s[:-1].strip() # First regex cleanup. Use \1. for pattern in self.clean_regex_list1: m = pattern.match(s) if m: s = m.group(1) if trace: g.trace('match 1', pattern) break # Second regex cleanup. Use \1 + \2 for pattern in self.clean_regex_list2: m = pattern.match(s) if m: s = m.group(1) + m.group(2) if trace: g.trace('match 2', pattern) break # Third regex cleanup. Use \1 + ' ' + \2 for pattern in self.clean_regex_list3: m = pattern.match(s) if m: s = m.group(1) + ' ' + m.group(2) if trace: g.trace('match 3', pattern) break # Fourth cleanup. Use \1 + ' ' + \2 again for pattern in self.clean_regex_list4: m = pattern.match(s) if m: s = m.group(1) + ' ' + m.group(2) if trace: g.trace('match 4', pattern) break # Final whitespace cleanups. s = s.replace(' ', ' ') s = s.replace(' (', '(') return g.truncate(s, 100)
def scan_sentinel_lines(self, lines, n, root): ''' Scan a list of lines containing sentinels, looking for the node and offset within the node of the n'th (one-based) line. Return gnx, h, offset: gnx: the gnx of the #@+node h: the headline of the #@+node offset: the offset of line n within the node. ''' trace = False and not g.unitTesting trace_lines = False delim1, delim2 = self.get_delims(root) gnx, h, offset = root.gnx, root.h, 0 stack = [(gnx, h, offset),] if trace: g.trace('=====', delim1, delim2, n) for i, s in enumerate(lines): if trace and trace_lines: g.trace(s.rstrip()) if self.is_sentinel(delim1, delim2, s): s2 = s.strip()[len(delim1):] if s2.startswith('@+node'): offset = 0 gnx, h = self.get_script_node_info(s, delim2) if trace: g.trace('node: %30s %5s %s' % (gnx, i+1, g.truncate(h,50))) elif s2.startswith('@+others') or s2.startswith('@+<<'): stack.append((gnx, h, offset),) offset += 1 elif s2.startswith('@-others') or s2.startswith('@-<<'): gnx, h, offset = stack.pop() offset += 1 else: offset += 1 else: offset += 1 if trace and trace_lines: g.trace(i+1, offset, s.rstrip()) if i+1 == n: # Bug fix 2017/04/01: n is one based. break else: gnx, h, offset = None, None, -1 if trace: g.trace('----- gnx', gnx, 'h', h, 'offset', offset) return gnx, h, offset
def addAbbrevHelper(self, s, tag=''): '''Enter the abbreviation 's' into the self.abbrevs dict.''' trace = False and not g.unitTesting if not s.strip(): return try: d = self.abbrevs data = s.split('=') # Do *not* strip ws so the user can specify ws. name = data[0].replace('\\t', '\t').replace('\\n', '\n') val = '='.join(data[1:]) if val.endswith('\n'): val = val[: -1] val = self.n_regex.sub('\n', val).replace('\\\\n', '\\n') old, tag = d.get(name, (None, None),) if old and old != val and not g.unitTesting: g.es_print('redefining abbreviation', name, '\nfrom', repr(old), 'to', repr(val)) if trace: val1 = val if val.find('\n') == -1 else g.splitLines(val)[0]+'...' g.trace('%12s %r' % (name, g.truncate(val1,80))) d[name] = val, tag except ValueError: g.es_print('bad abbreviation: %s' % s)