コード例 #1
0
 def diff_file(self, fn):
     '''Create an outline describing the git diffs for fn.'''
     c = self.c
     s1 = self.get_file_from_rev(self.rev1, fn)
     s2 = self.get_file_from_rev(self.rev2, fn)
     lines1 = g.splitLines(s1)
     lines2 = g.splitLines(s2)
     diff_list = list(difflib.unified_diff(
         lines1,
         lines2,
         self.rev1 or 'uncommitted',
         self.rev2 or 'uncommitted',
     ))
     diff_list.insert(0, '@language patch\n')
     self.file_node = self.create_file_node(diff_list, fn)
     if c.looksLikeDerivedFile(fn):
         c1 = self.make_at_file_outline(fn, s1, self.rev1)
         c2 = self.make_at_file_outline(fn, s2, self.rev2)
     else:
         root = self.find_file(fn)
         if root:
             c1 = self.make_at_clean_outline(fn, root, s1, self.rev1)
             c2 = self.make_at_clean_outline(fn, root, s2, self.rev2)
         else:
             g.es_print('No outline for', fn)
             c1 = c2 = None
     if c1 and c2:
         self.make_diff_outlines(fn, c1, c2)
         self.file_node.b = '%s\n@language %s\n' % (
             self.file_node.b.rstrip(), c2.target_language)
コード例 #2
0
 def compare(self, d1, d2, p1, p2, root):
     '''Compare dicts d1 and d2.'''
     for h in sorted(d1.keys()):
         p1, p2 = d1.get(h), d2.get(h)
         if h in d2:
             lines1, lines2 = g.splitLines(p1.b), g.splitLines(p2.b)
             aList = list(difflib.unified_diff(lines1, lines2, 'vr1', 'vr2'))
             if aList:
                 p = root.insertAsLastChild()
                 p.h = h
                 p.b = ''.join(aList)
                 p1.clone().moveToLastChildOf(p)
                 p2.clone().moveToLastChildOf(p)
         elif p1.b.strip():
             # Only in p1 tree, and not an organizer node.
             p = root.insertAsLastChild()
             p.h = h + '(%s only)' % p1.h
             p1.clone().moveToLastChildOf(p)
     for h in sorted(d2.keys()):
         p2 = d2.get(h)
         if h not in d1 and p2.b.strip():
             # Only in p2 tree, and not an organizer node.
             p = root.insertAsLastChild()
             p.h = h + '(%s only)' % p2.h
             p2.clone().moveToLastChildOf(p)
     return root
コード例 #3
0
 def diff_two_branches(self, branch1, branch2, fn, directory=None):
     '''Create an outline describing the git diffs for fn.'''
     c = self.c
     if not self.set_directory(directory):
         return
     self.root = p = c.lastTopLevel().insertAfter()
     p.h = 'git-diff-branches %s %s' % (branch1, branch2)
     s1 = self.get_file_from_branch(branch1, fn)
     s2 = self.get_file_from_branch(branch2, fn)
     lines1 = g.splitLines(s1)
     lines2 = g.splitLines(s2)
     diff_list = list(difflib.unified_diff(
         lines1,
         lines2,
         branch1 ,
         branch2,
     ))
     diff_list.insert(0, '@ignore\n@nosearch\n@language patch\n')
     self.file_node = self.create_file_node(diff_list, fn)
     if c.looksLikeDerivedFile(fn):
         c1 = self.make_at_file_outline(fn, s1, branch1)
         c2 = self.make_at_file_outline(fn, s2, branch2)
     else:
         root = self.find_file(fn)
         if root:
             c1 = self.make_at_clean_outline(fn, root, s1, branch1)
             c2 = self.make_at_clean_outline(fn, root, s2, branch2)
         else:
             c1 = c2 = None
     if c1 and c2:
         self.make_diff_outlines(c1, c2, fn)
         self.file_node.b = '%s\n@language %s\n' % (
             self.file_node.b.rstrip(), c2.target_language)
     self.finish()
コード例 #4
0
 def undent_coffeescript_body(self, s):
     '''Return the undented body of s.'''
     trace = False and not g.unitTesting and self.root.h.endswith('1.coffee')
     lines = g.splitLines(s)
     if trace:
         g.trace('='*20)
         self.print_lines(lines)
     # Undent all leading whitespace or comment lines.
     leading_lines = []
     for line in lines:
         if self.is_ws_line(line):
             # Tricky.  Stipping a black line deletes it.
             leading_lines.append(line if line.isspace() else line.lstrip())
         else:
             break
     i = len(leading_lines)
     # Don't unindent the def/class line! It prevents later undents.
     tail = self.undent_body_lines(lines[i:], ignoreComments=True)
     # Remove all blank lines from leading lines.
     if 0:
         for i, line in enumerate(leading_lines):
             if not line.isspace():
                 leading_lines = leading_lines[i:]
                 break
     result = ''.join(leading_lines) + tail
     if trace:
         g.trace('-'*20)
         self.print_lines(g.splitLines(result))
     return result
コード例 #5
0
ファイル: leoShadow.py プロジェクト: davy39/leo-editor
    def check_the_final_output(self, new_private_lines, new_public_lines, sentinel_lines, marker):
        """
        Check that we produced a valid output.

        Input:
            new_targetlines:   the lines with sentinels which produce changed_lines_without_sentinels.
            sentinels:         new_targetlines should include all the lines from sentinels.

        checks:
            1. new_targetlines without sentinels must equal changed_lines_without_sentinels.
            2. the sentinel lines of new_targetlines must match 'sentinels'
        """
        new_public_lines2, new_sentinel_lines2 = self.separate_sentinels(
            new_private_lines, marker)

        lines1 = new_public_lines
        lines2 = new_public_lines2
        sents1 = sentinel_lines
        sents2 = new_sentinel_lines2
        if 1: # Ignore trailing ws:
            s1 = ''.join(lines1).rstrip()
            s2 = ''.join(lines2).rstrip()
            lines1 = g.splitLines(s1)
            lines2 = g.splitLines(s2)
        if 1: # Ignore trailing ws on every line.
            lines1 = [z.rstrip() for z in lines1]
            lines2 = [z.rstrip() for z in lines2]
            sents1 = [z.rstrip() for z in sents1]
            sents2 = [z.rstrip() for z in sents2]
        lines_ok = lines1 == lines2
        sents_ok = sents1 == sents2

        if g.unitTesting:
            # The unit test will report the error.
            return lines_ok and sents_ok

        if not lines_ok:
            if 1:
                g.trace()
                d = difflib.Differ()
                # g.trace('Different!!',d)
                aList = list(d.compare(new_public_lines2,new_public_lines))
                pprint.pprint(aList)
            else:
                self.show_error(
                    lines1 = new_public_lines2,
                    lines2 = new_public_lines,
                    message = "Error in updating public file!",
                    lines1_message = "new public lines (derived from new private lines)",
                    lines2_message = "new public lines")

        if not sents_ok:
            self.show_error(
                lines1 = sentinel_lines,
                lines2 = new_sentinel_lines2,
                message = "Sentinals not preserved!",
                lines1_message = "old sentinels",
                lines2_message = "new sentinels")

        return lines_ok and sents_ok
コード例 #6
0
 def toAtEdit(self, p):
     '''Convert p from @auto to @edit.'''
     c = self.c
     w = c.frame.body.wrapper
     p.h = '@edit' + p.h[5:]
     # Compute the position of the present line within the *selected* node c.p
     ins = w.getInsertPoint()
     row, col = g.convertPythonIndexToRowCol(c.p.b, ins)
     # Ignore directive lines.
     directives = [z for z in g.splitLines(c.p.b)[:row] if g.isDirective(z)]
     row -= len(directives)
     row = max(0, row)
     # Count preceding lines from p to c.p, again ignoring directives.
     for p2 in p.self_and_subtree():
         if p2 == c.p:
             break
         lines = [z for z in g.splitLines(p2.b) if not g.isDirective(z)]
         row += len(lines)
     # Reload the file into a single node.
     c.selectPosition(p, enableRedrawFlag=False)
     c.refreshFromDisk()
     # Restore the line in the proper node.
     ins = g.convertRowColToPythonIndex(p.b, row+1, 0)
     w.setInsertPoint(ins)
     p.setDirty()
     c.setChanged()
     c.redraw()
     c.bodyWantsFocus()
コード例 #7
0
 def diff_file(self, fn, directory=None, rev1='HEAD', rev2=''):
     '''
     Create an outline describing the git diffs for fn.
     '''
     if not self.set_directory(directory):
         return
     c = self.c
     s1 = self.get_file_from_rev(rev1, fn)
     s2 = self.get_file_from_rev(rev2, fn)
     lines1 = g.splitLines(s1)
     lines2 = g.splitLines(s2)
     diff_list = list(difflib.unified_diff(
         lines1,
         lines2,
         rev1 or 'uncommitted',
         rev2 or 'uncommitted',
     ))
     diff_list.insert(0, '@ignore\n@nosearch\n@language patch\n')
     self.file_node = self.create_file_node(diff_list, fn)
     if c.looksLikeDerivedFile(fn):
         c1 = self.make_at_file_outline(fn, s1, rev1)
         c2 = self.make_at_file_outline(fn, s2, rev2)
     else:
         root = self.find_file(fn)
         if root:
             c1 = self.make_at_clean_outline(fn, root, s1, rev1)
             c2 = self.make_at_clean_outline(fn, root, s2, rev2)
         else:
             # This warning is silly.
             # g.es_print('No outline for', fn)
             c1 = c2 = None
     if c1 and c2:
         self.make_diff_outlines(c1, c2, fn, rev1, rev2)
         self.file_node.b = '%s\n@language %s\n' % (
             self.file_node.b.rstrip(), c2.target_language)
コード例 #8
0
ファイル: leoShadow.py プロジェクト: jurov/leo-editor
 def propagate_changes(self, old_public_file, old_private_file):
     '''
     Propagate the changes from the public file (without_sentinels)
     to the private file (with_sentinels)
     '''
     trace, verbose = False and not g.unitTesting, False
     import leo.core.leoAtFile as leoAtFile
     x = self ; at = self.c.atFileCommands
     at.errors = 0
     if trace: g.trace('*** header scanned: encoding:',at.encoding)
     self.encoding = at.encoding
     s = at.readFileToUnicode(old_private_file)
         # Sets at.encoding and inits at.readLines.
     old_private_lines = g.splitLines(s)
     s = at.readFileToUnicode(old_public_file)
     if at.encoding != self.encoding:
         g.trace('can not happen: encoding mismatch: %s %s' % (
             at.encoding,self.encoding))
         at.encoding = self.encoding
     old_public_lines = g.splitLines(s)
     if 0:
         g.trace('\nprivate lines...%s' % old_private_file)
         for s in old_private_lines:
             g.trace(type(s),g.isUnicode(s),repr(s))
         g.trace('\npublic lines...%s' % old_public_file)
         for s in old_public_lines:
             g.trace(type(s),g.isUnicode(s),repr(s))
     marker = x.markerFromFileLines(old_private_lines,old_private_file)
     if trace and verbose:
         g.trace(
             'marker',marker,
             '\npublic_file',old_public_file,
             '\npublic lines...\n%s' %(
                 g.listToString(old_public_lines,toRepr=True)),
             '\nprivate_file',old_private_file,
             '\nprivate lines...\n%s\n' %(
                 g.listToString(old_private_lines,toRepr=True)))
     new_private_lines = x.propagate_changed_lines(
         old_public_lines,old_private_lines,marker)
     # Important bug fix: Never create the private file here!
     fn = old_private_file
     exists = g.os_path_exists(fn)
     different = new_private_lines != old_private_lines
     copy = exists and different
     if trace: g.trace('\nexists',exists,fn,'different',different,'errors',x.errors,at.errors)
     # 2010/01/07: check at.errors also.
     if copy and x.errors == 0 and at.errors == 0:
         s = ''.join(new_private_lines)
         ok = x.replaceFileWithString(fn,s)
         if trace: g.trace('ok',ok,'writing private file',fn,g.callers())
     return copy
コード例 #9
0
 def check(self, unused_s, parent):
     '''True if perfect import checks pass.'''
     if g.app.suppressImportChecks:
         g.app.suppressImportChecks = False
         return True
     c = self.c
     sfn = g.shortFileName(self.root.h)
     s1 = g.toUnicode(self.file_s, self.encoding)
     s2 = self.trial_write()
     lines1, lines2 = g.splitLines(s1), g.splitLines(s2)
     if 0: # An excellent trace for debugging.
         g.trace(c.shortFileName())
         g.printObj(lines1, tag='lines1')
         g.printObj(lines2, tag='lines2')
     if self.strict:
         # Ignore blank lines only.
         # Adding nodes may add blank lines.
         lines1 = self.strip_blank_lines(lines1)
         lines2 = self.strip_blank_lines(lines2)
     else:
         # Ignore blank lines and leading whitespace.
         # Importing may regularize whitespace, and that's good.
         lines1 = self.strip_all(lines1)
         lines2 = self.strip_all(lines2)
     # Forgive trailing whitespace problems in the last line.
     # This is not the same as clean_last_lines.
     if lines1 and lines2 and lines1 != lines2:
         lines1[-1] = lines1[-1].rstrip()+'\n'
         lines2[-1] = lines2[-1].rstrip()+'\n'
     # self.trace_lines(lines1, lines2, parent)
     ok = lines1 == lines2
     if not ok and not self.strict:
         # Issue an error only if something *other than* lws is amiss.
         lines1, lines2 = self.strip_lws(lines1), self.strip_lws(lines2)
         ok = lines1 == lines2
         if ok and not g.unitTesting:
             print('warning: leading whitespace changed in:', self.root.h)
     if not ok:
         self.show_failure(lines1, lines2, sfn)
         # self.trace_lines(lines1, lines2, parent)
     # Ensure that the unit tests fail when they should.
     # Unit tests do not generate errors unless the mismatch line does not match.
     if g.app.unitTesting:
         d = g.app.unitTestDict
         d['result'] = ok
         if not ok:
             d['fail'] = g.callers()
             # Used in a unit test.
             c.importCommands.errors += 1
     return ok
コード例 #10
0
def find_bound_paragraph(c):
    '''
    Return the lines of a paragraph to be reformatted.
    This is a convenience method for the reformat-paragraph command.
    '''
    head, ins, tail = c.frame.body.getInsertLines()
    head_lines = g.splitLines(head)
    tail_lines = g.splitLines(tail)
    result = []
    insert_lines = g.splitLines(ins)
    para_lines = insert_lines + tail_lines
    # If the present line doesn't start a paragraph,
    # scan backward, adding trailing lines of head to ins.
    if insert_lines and not startsParagraph(insert_lines[0]):
        n = 0 # number of moved lines.
        for i, s in enumerate(reversed(head_lines)):
            if ends_paragraph(s) or single_line_paragraph(s):
                break
            elif startsParagraph(s):
                n += 1
                break
            else: n += 1
        if n > 0:
            para_lines = head_lines[-n:] + para_lines
            head_lines = head_lines[: -n]
    ended, started = False, False
    for i, s in enumerate(para_lines):
        if started:
            if ends_paragraph(s) or startsParagraph(s):
                ended = True
                break
            else:
                result.append(s)
        elif s.strip():
            result.append(s)
            started = True
            if ends_paragraph(s) or single_line_paragraph(s):
                i += 1
                ended = True
                break
        else:
            head_lines.append(s)
    if started:
        head = g.joinLines(head_lines)
        tail_lines = para_lines[i:] if ended else []
        tail = g.joinLines(tail_lines)
        return head, result, tail # string, list, string
    else:
        return None, None, None
コード例 #11
0
def should_beautify(p):
    '''
    Return True if @beautify is in effect for node p.
    Ambiguous @beautify
    '''
    for p2 in p.self_and_parents():
        d = g.get_directives_dict(p2)
        if 'killbeautify' in d:
            return False
        elif 'beautify' in d and 'nobeautify' in d:
            if p == p2:
                # honor whichever comes first.
                for line in g.splitLines(p2.b):
                    if line.startswith('@beautify'):
                        return True
                    elif line.startswith('@nobeautify'):
                        return False
                g.trace('can not happen', p2.h)
                return False
            else:
                # The ambiguous node has no effect.
                # Look up the tree.
                pass
        elif 'beautify' in d:
            return True
        elif 'nobeautify' in d:
            # This message would quickly become annoying.
            # self.skip_message('@nobeautify',p)
            return False
    # The default is to beautify.
    return True
コード例 #12
0
 def readFile(self, fileName, root):
     '''
     Read the file from the cache if possible.
     Return (s,ok,key)
     '''
     trace = (False or g.app.debug) and not g.unitTesting
     showHits, showLines, verbose = False, False, True
     sfn = g.shortFileName(fileName)
     if not g.enableDB:
         if trace and verbose: g.trace('g.enableDB is False', sfn)
         return '', False, None
     s = g.readFileIntoEncodedString(fileName, silent=True)
     if s is None:
         if trace: g.trace('empty file contents', sfn)
         return s, False, None
     assert not g.isUnicode(s)
     if trace and showLines:
         for i, line in enumerate(g.splitLines(s)):
             print('%3d %s' % (i, repr(line)))
     # There will be a bug if s is not already an encoded string.
     key = self.fileKey(root.h, s, requireEncodedString=True)
     ok = self.db and key in self.db
     if ok:
         if trace and showHits: g.trace('cache hit', key[-6:], sfn)
         # Delete the previous tree, regardless of the @<file> type.
         while root.hasChildren():
             root.firstChild().doDelete()
         # Recreate the file from the cache.
         aList = self.db.get(key)
         self.createOutlineFromCacheList(root.v, aList, fileName=fileName)
     elif trace:
         g.trace('cache miss', key[-6:], sfn)
     return s, ok, key
コード例 #13
0
    def parse_script_string(self, script_string, delim):
        '''
        script_string is single string, representing a list of script strings
        separated by lines that start with delim.

        Return a list of strings.
        '''
        aList = []
        lines = []
        for s in g.splitLines(script_string):
            if s.startswith(delim):
                if lines:
                    aList.append(''.join(lines))
                lines = []
            elif s.isspace() or s.strip().startswith('#'):
                # Ignore comment or blank lines.
                # This allows the user to comment out entire sections.
                pass
            else:
                lines.append(s)
                # lines.append(s.replace('\\', '\\\\'))
                # Experimental: allow escapes.
        if lines:
            aList.append(''.join(lines))
        return aList
コード例 #14
0
ファイル: format-code.py プロジェクト: Armagedoom/leo-editor
    def scanForOptionDocParts (self,p,s):

        '''Return a dictionary containing all options from @rst-options doc parts in p.
        Multiple @rst-options doc parts are allowed: this code aggregates all options.
        '''

        d = {} ; n = 0 ; lines = g.splitLines(s)
        while n < len(lines):
            line = lines[n] ; n += 1
            if line.startswith('@'):
                i = g.skip_ws(line,1)
                for kind in ('@rst-options','@rst-option'):
                    if g.match_word(line,i,kind):
                        # Allow options on the same line.
                        line = line[i+len(kind):]
                        d.update(self.scanOption(p,line))
                        # Add options until the end of the doc part.
                        while n < len(lines):
                            line = lines[n] ; n += 1 ; found = False
                            for stop in ('@c','@code', '@'):
                                if g.match_word(line,0,stop):
                                    found = True ; break
                            if found:
                                break
                            else:
                                d.update(self.scanOption(p,line))
                        break
        return d
コード例 #15
0
ファイル: gotoCommands.py プロジェクト: ATikhonov2/leo-editor
 def find_file_line(self, n, p=None):
     """
     Place the cursor on the n'th line (one-based) of an external file.
     Return (p, offset, found) for unit testing.
     """
     c = self.c
     if n < 0:
         return None, -1, False
     p = p or c.p
     root, fileName = self.find_root(p)
     if root:
         # Step 1: Get the lines of external files *with* sentinels,
         # even if the actual external file actually contains no sentinels.
         sentinels = root.isAtFileNode()
         s = self.get_external_file_with_sentinels(root)
         lines = g.splitLines(s)
         # Step 2: scan the lines for line n.
         if sentinels:
             # All sentinels count as real lines.
             gnx, h, offset = self.scan_sentinel_lines(lines, n, root)
         else:
             # Not all sentinels cound as real lines.
             gnx, h, offset = self.scan_nonsentinel_lines(lines, n, root)
         p, found = self.find_gnx(root, gnx, h)
         if gnx and found:
             self.success(lines, n, offset, p)
             return p, offset, True
         self.fail(lines, n, root)
         return None, -1, False
     return self.find_script_line(n, p)
コード例 #16
0
ファイル: otl.py プロジェクト: jochen-l/leo-editor
 def gen_lines(self, s, parent):
     '''Node generator for otl (vim-outline) mode.'''
     self.inject_lines_ivar(parent)
     # We may as well do this first.  See note below.
     self.add_line(parent, '@others\n')
     self.parents = [parent]
     for line in g.splitLines(s):
         m = self.otl_body_pattern.match(line)
         if m:
             p = self.parents[-1]
             self.add_line(p, m.group(1))
         else:
             m = self.otl_pattern.match(line)
             if m:
                 # Cut back the stack, then allocate a new node.
                 level = 1 + len(m.group(1))
                 self.parents = self.parents[:level]
                 self.find_parent(
                     level = level,
                     h = m.group(2).strip())
             else:
                 self.error('Bad otl line: %r' % line)
     note = (
         'Note: This node\'s body text is ignored when writing this file.\n\n' +
         'The @others directive is not required.\n'
     )
     self.add_line(parent, note)
コード例 #17
0
ファイル: leo_rst.py プロジェクト: tbpassin/leo-editor
 def gen_lines(self, s, parent):
     '''Node generator for reStructuredText importer.'''
     if not s or s.isspace():
         return
     self.inject_lines_ivar(parent)
     # We may as well do this first.  See note below.
     self.stack = [parent]
     skip = 0
     lines = g.splitLines(s)
     for i, line in enumerate(lines):
         if skip > 0:
             skip -= 1
         elif self.is_lookahead_overline(i, lines):
             level = self.ch_level(line[0])
             self.make_node(level, lines[i + 1])
             skip = 2
         elif self.is_lookahead_underline(i, lines):
             level = self.ch_level(lines[i + 1][0])
             self.make_node(level, line)
             skip = 1
         elif i == 0:
             p = self.make_dummy_node('!Dummy chapter')
             self.add_line(p, line)
         else:
             p = self.stack[-1]
             self.add_line(p, line)
コード例 #18
0
 def gen_lines(self, s, parent):
     '''
     Non-recursively parse all lines of s into parent, creating descendant
     nodes as needed.
     '''
     tail_p = None
     prev_state = self.state_class()
     target = Target(parent, prev_state)
     stack = [target, target]
     self.inject_lines_ivar(parent)
     lines = g.splitLines(s)
     self.skip = 0
     for i, line in enumerate(lines):
         new_state = self.scan_line(line, prev_state)
         top = stack[-1]
         if self.skip > 0:
             self.skip -= 1
         elif self.is_ws_line(line):
             p = tail_p or top.p
             self.add_line(p, line)
         elif self.starts_block(i, lines, new_state, prev_state):
             tail_p = None
             self.start_new_block(i, lines, new_state, prev_state, stack)
         elif self.ends_block(line, new_state, prev_state, stack):
             tail_p = self.end_block(line, new_state, stack)
         else:
             p = tail_p or top.p
             self.add_line(p, line)
         prev_state = new_state
コード例 #19
0
ファイル: demo.py プロジェクト: SegundoBob/leo-editor
    def parse_script_string (self, script_string, delim):
        '''
        script_string is single string, representing a list of script strings
        separated by lines that start with delim.

        Return a list of strings.
        '''
        aList = []
        lines = []
        for s in g.splitLines(script_string):
            if s.startswith(delim):
                if lines:
                    aList.append(''.join(lines))
                lines = []
            elif s.isspace() or s.strip().startswith('#'):
                # Ignore comment or blank lines.
                # This allows the user to comment out entire sections.
                pass
            else:
                lines.append(s)
                # lines.append(s.replace('\\', '\\\\'))
                    # Experimental: allow escapes.
        if lines:
            aList.append(''.join(lines))
        # g.trace('===== delim', delim) ; g.printList(aList)
        return aList
コード例 #20
0
 def update_image(self, s, keywords):
     '''Update an image in the vr pane.'''
     pc = self
     if not s.strip():
         return
     lines = g.splitLines(s) or []
     fn = lines and lines[0].strip()
     if not fn:
         return
     w = pc.ensure_text_widget()
     ok, path = pc.get_fn(fn, '@image')
     if not ok:
         w.setPlainText('@image: file not found: %s' % (path))
         return
     path = path.replace('\\', '/')
     template = '''\
 <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
 <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
 <head></head>
 <body bgcolor="#fffbdc">
 <img src="%s">
 </body>
 </html>
 ''' % (path)
     # Only works in Python 3.x.
     template = g.adjustTripleString(template, pc.c.tab_width).strip() # Sensitive to leading blank lines.
     # template = g.toUnicode(template)
     pc.show()
     w.setReadOnly(False)
     w.setHtml(template)
     w.setReadOnly(True)
コード例 #21
0
 def check_all(self, log_flag, pyflakes_errors_only, roots):
     """Run pyflakes on all files in paths."""
     try:
         from pyflakes import api, reporter
     except Exception:  # ModuleNotFoundError
         return True  # Pretend all is fine.
     total_errors = 0
     for i, root in enumerate(roots):
         fn = self.finalize(root)
         sfn = g.shortFileName(fn)
         # #1306: nopyflakes
         if any([
                 z.strip().startswith('@nopyflakes')
                 for z in g.splitLines(root.b)
         ]):
             continue
         # Report the file name.
         s = g.readFileIntoEncodedString(fn)
         if s and s.strip():
             if not pyflakes_errors_only:
                 g.es(f"Pyflakes: {sfn}")
             # Send all output to the log pane.
             r = reporter.Reporter(
                 errorStream=self.LogStream(i, roots),
                 warningStream=self.LogStream(i, roots),
             )
             errors = api.check(s, sfn, r)
             total_errors += errors
     return total_errors
コード例 #22
0
ファイル: linescanner.py プロジェクト: XCaminhante/leo-editor
 def run(self, s, parent, parse_body=False):
     """The common top-level code for all scanners."""
     c = self.c
     # Fix #449: Cloned @auto nodes duplicates section references.
     if parent.isCloned() and parent.hasChildren():
         return None
     self.root = root = parent.copy()
     self.file_s = s
     # Init the error/status info.
     self.errors = 0
     self.parse_body = parse_body
     # Check for intermixed blanks and tabs.
     self.tab_width = c.getTabWidth(p=root)
     lines = g.splitLines(s)
     ws_ok = self.check_blanks_and_tabs(lines)  # Only issues warnings.
     # Regularize leading whitespace
     if not ws_ok:
         lines = self.regularize_whitespace(lines)
     # Generate the nodes, including directives and section references.
     # Completely generate all nodes.
     self.generate_nodes(lines, parent)
     # Check the generated nodes.
     # Return True if the result is equivalent to the original file.
     if parse_body:
         ok = self.errors == 0  # Work around problems with directives.
     else:
         ok = self.errors == 0 and self.check(s, parent)
     # Insert an @ignore directive if there were any serious problems.
     if not ok:
         self.insert_ignore_directive(parent)
     # Importers should never dirty the outline.
     for p in root.self_and_subtree():
         p.clearDirty()
     # #1451: Do not change the outline's change status.
     return ok  # For unit tests.
コード例 #23
0
def reformatSelection(self, event=None, undoType='Reformat Paragraph'):
    """
    Reformat the selected text, as in reformat-paragraph, but without
    expanding the selection past the selected lines.
    """
    c, undoType = self, 'reformat-selection'
    p, u, w = c.p, c.undoer, c.frame.body.wrapper
    if g.app.batchMode:
        c.notValidInBatchMode(undoType)
        return
    bunch = u.beforeChangeBody(p)
    oldSel, oldYview, original, pageWidth, tabWidth = rp_get_args(c)
    head, middle, tail = c.frame.body.getSelectionLines()
    lines = g.splitLines(middle)
    if not lines:
        return
    indents, leading_ws = rp_get_leading_ws(c, lines, tabWidth)
    result = rp_wrap_all_lines(c, indents, leading_ws, lines, pageWidth)
    s = head + result + tail
    if s == original:
        return
    #
    # Update the text and the selection.
    w.setAllText(s)  # Destroys coloring.
    i = len(head)
    j = max(i, len(head) + len(result) - 1)
    j = min(j, len(s))
    w.setSelectionRange(i, j, insert=j)
    #
    # Finish.
    p.v.b = s  # p.b would cause a redraw.
    u.afterChangeBody(p, undoType, bunch)
    w.setXScrollPosition(0)  # Never scroll horizontally.
コード例 #24
0
ファイル: leoCache.py プロジェクト: hanseychen/leo-editor
 def readFile(self, fileName, root):
     '''
     Read the file from the cache if possible.
     Return (s,ok,key)
     '''
     trace = (False or g.app.debug) and not g.unitTesting
     showHits, showLines, verbose = False, False, True
     sfn = g.shortFileName(fileName)
     if not g.enableDB:
         if trace and verbose: g.trace('g.enableDB is False', sfn)
         return '', False, None
     s = g.readFileIntoEncodedString(fileName, silent=True)
     if s is None:
         if trace: g.trace('empty file contents', sfn)
         return s, False, None
     assert not g.isUnicode(s)
     if trace and showLines:
         for i, line in enumerate(g.splitLines(s)):
             print('%3d %s' % (i, repr(line)))
     # There will be a bug if s is not already an encoded string.
     key = self.fileKey(root.h, s, requireEncodedString=True)
     ok = self.db and key in self.db
     if ok:
         if trace and showHits: g.trace('cache hit', key[-6:], sfn)
         # Delete the previous tree, regardless of the @<file> type.
         while root.hasChildren():
             root.firstChild().doDelete()
         # Recreate the file from the cache.
         aList = self.db.get(key)
         self.createOutlineFromCacheList(root.v, aList, fileName=fileName)
     elif trace:
         g.trace('cache miss', key[-6:], sfn)
     return s, ok, key
コード例 #25
0
 def setUp(self):
     super().setUp()
     c = self.c
     self.x = c.convertCommands.PythonToTypescript(c)
     self.assertTrue(hasattr(self.x, 'convert'))
     root = self.root_p
     # Delete all children
     root.deleteAllChildren()
     # Read leo.core.leoNodes into contents.
     unittest_dir = os.path.dirname(__file__)
     core_dir = os.path.abspath(
         os.path.join(unittest_dir, '..', '..', 'core'))
     path = os.path.join(core_dir, 'leoNodes.py')
     with open(path) as f:
         contents = f.read()
     # Set the gnx of the @file nodes in the contents to root.gnx.
     # This is necessary because of a check in fast_at.scan_lines.
     pat = re.compile(r'^\s*#@\+node:([^:]+): \* @file leoNodes\.py$')
     line3 = g.splitLines(contents)[2]
     m = pat.match(line3)
     assert m, "Can not replace gnx"
     contents = contents.replace(m.group(1), root.gnx)
     # Replace c's outline with leoNodes.py.
     gnx2vnode = {}
     ok = c.atFileCommands.fast_read_into_root(c, contents, gnx2vnode, path,
                                               root)
     self.assertTrue(ok)
     root.h = 'leoNodes.py'
     self.p = root
     c.selectPosition(self.p)
コード例 #26
0
 def check_blanks_and_tabs(self, lines):
     '''Check for intermixed blank & tabs.'''
     # Do a quick check for mixed leading tabs/blanks.
     fn = g.shortFileName(self.root.h)
     w = self.tab_width
     blanks = tabs = 0
     for s in g.splitLines(lines):
         lws = self.get_str_lws(s)
         blanks += lws.count(' ')
         tabs += lws.count('\t')
     # Make sure whitespace matches @tabwidth directive.
     if w < 0:
         ok = tabs == 0
         message = 'tabs found with @tabwidth %s in %s' % (w, fn)
     elif w > 0:
         ok = blanks == 0
         message = 'blanks found with @tabwidth %s in %s' % (w, fn)
     if ok:
         ok = blanks == 0 or tabs == 0
         message = 'intermixed blanks and tabs in: %s' % (fn)
     if not ok:
         if g.unitTesting:
             self.report(message)
         else:
             g.es(message)
     return ok
コード例 #27
0
ファイル: leoCheck.py プロジェクト: TheKezzyBoy/leo-editor
 def scan(self, fn, s):
     lines = g.splitLines(s)
     self.tot_lines += len(lines)
     for i, s in enumerate(lines):
         m = re.search(self.r_all, s)
         if m and not s.startswith('@'):
             self.match(fn, i, m, s)
コード例 #28
0
 def regularizeWhitespace(self, s):
     '''Regularize leading whitespace in s:
     Convert tabs to blanks or vice versa depending on the @tabwidth in effect.
     This is only called for strict languages.'''
     changed = False
     lines = g.splitLines(s)
     result = []
     tab_width = self.tab_width
     if tab_width < 0:  # Convert tabs to blanks.
         for line in lines:
             i, w = g.skip_leading_ws_with_indent(line, 0, tab_width)
             s = g.computeLeadingWhitespace(
                 w, -abs(tab_width)) + line[i:]  # Use negative width.
             if s != line: changed = True
             result.append(s)
     elif tab_width > 0:  # Convert blanks to tabs.
         for line in lines:
             s = g.optimizeLeadingWhitespace(
                 line, abs(tab_width))  # Use positive width.
             if s != line: changed = True
             result.append(s)
     if changed:
         action = 'tabs converted to blanks' if self.tab_width < 0 else 'blanks converted to tabs'
         message = 'inconsistent leading whitespace. %s' % action
         self.report(message)
     return ''.join(result)
コード例 #29
0
ファイル: abbrevCommands.py プロジェクト: maarli/leo-editor
    def init_tree_abbrev_helper(self, d, tree_s):

        trace = False and not g.unitTesting
        c = self.c
        old_p = c.p.copy()
        p = c.pasteOutline(s=tree_s,
                           redrawFlag=False,
                           undoFlag=False,
                           tempOutline=True)
        if not p: return g.trace('no pasted node')
        for s in g.splitLines(p.b):
            if s.strip() and not s.startswith('#'):
                abbrev_name = s.strip()
                for child in p.children():
                    if child.h.strip() == abbrev_name:
                        # g.trace('calling c.selectPosition', child.h)
                        c.selectPosition(child)
                        abbrev_s = c.fileCommands.putLeoOutline()
                        if trace: g.trace('define', abbrev_name, len(abbrev_s))
                        d[abbrev_name] = abbrev_s
                        break
                else:
                    g.trace('no definition for %s' % abbrev_name)
        p.doDelete(newNode=old_p)
        c.selectPosition(old_p)
コード例 #30
0
 def v2_gen_lines(self, s, parent):
     '''Node generator for otl (vim-outline) mode.'''
     self.inject_lines_ivar(parent)
     # We may as well do this first.  See warning below.
     self.add_line(parent, '@others\n')
     self.parents = [parent]
     for line in g.splitLines(s):
         m = self.otl_body_pattern.match(line)
         if m:
             p = self.parents[-1]
             self.add_line(p, m.group(1))
         else:
             m = self.otl_pattern.match(line)
             if m:
                 # Cut back the stack, then allocate a new node.
                 level = 1 + len(m.group(1))
                 self.parents = self.parents[:level]
                 self.find_parent(
                     level = level,
                     h = m.group(2).strip())
             else:
                 self.error('Bad otl line: %r' % line)
     # This warning *is* correct.
     warning = '\nWarning: this node is ignored when writing this file.\n\n'
     self.add_line(self.root, warning)
コード例 #31
0
ファイル: qt_commands.py プロジェクト: SegundoBob/leo-editor
def showFonts(self, event=None):
    '''Open a tab in the log pane showing a font picker.'''
    c = self.c; p = c.p

    picker = QtWidgets.QFontDialog()
    if p.h.startswith('@font'):
        (name, family, weight, slant, size) = leoConfig.parseFont(p.b)
    else:
        name, family, weight, slant, size = None, None, False, False, 12
    try:
        font = QtGui.QFont()
        if family: font.setFamily(family)
        font.setBold(weight)
        font.setItalic(slant)
        font.setPointSize(size)
        picker.setCurrentFont(font)
    except ValueError:
        pass
    if not picker.exec_():
        g.es("No font selected")
    else:
        font = picker.selectedFont()
        udata = c.undoer.beforeChangeNodeContents(p)
        comments = [x for x in g.splitLines(p.b) if x.strip().startswith('#')]

        defs = [
            '\n' if comments else '',
            '%s_family = %s\n'%(name, font.family()),
            '%s_weight = %s\n'%(name, 'bold' if font.bold() else 'normal'),
            '%s_slant = %s\n'%(name, 'italic' if font.italic() else 'roman'),
            '%s_size = %s\n'%(name, font.pointSizeF())
        ]

        p.b = g.u('').join(comments + defs)
        c.undoer.afterChangeNodeContents(p, 'change-font', udata)
コード例 #32
0
ファイル: format-code.py プロジェクト: AG4GitHub/leo
    def writeBody(self, p):

        trace = False
        self.p = p.copy()  # for traces.
        if not p.b.strip():
            return  # No need to write any more newlines.

        showDocsAsParagraphs = self.getOption('show_doc_parts_as_paragraphs')
        lines = g.splitLines(p.b)
        parts = self.split_parts(lines, showDocsAsParagraphs)
        result = []
        for kind, lines in parts:
            if trace: g.trace(kind, len(lines), p.h)
            if kind == '@rst-option':  # Also handles '@rst-options'
                pass  # The prepass has already handled the options.
            elif kind == '@rst-markup':
                lines.extend('\n')
                result.extend(lines)
            elif kind == '@doc':
                if showDocsAsParagraphs:
                    result.extend(lines)
                    result.append('\n')
                else:
                    result.extend(self.write_code_block(lines))
            elif kind == 'code':
                result.extend(self.write_code_block(lines))
            else:
                g.trace('Can not happen', kind)

        # Write the lines with exactly two trailing newlines.
        s = ''.join(result).rstrip() + '\n\n'
        self.write(s)
コード例 #33
0
 def gen_lines(self, s, parent):
     '''Node generator for markdown importer.'''
     trace = False and g.unitTesting
     if not s or s.isspace():
         return
     self.inject_lines_ivar(parent)
     # We may as well do this first.  See warning below.
     self.add_line(parent, '@others\n')
     self.stack = [parent]
     skip = 0
     lines = g.splitLines(s)
     for i, line in enumerate(lines):
         if trace: g.trace('%2s %r' % (i + 1, line))
         if skip > 0:
             skip -= 1
         elif self.is_lookahead_overline(i, lines):
             level = self.ch_level(line[0])
             self.make_node(level, lines[i + 1])
             skip = 2
         elif self.is_lookahead_underline(i, lines):
             level = self.ch_level(lines[i + 1][0])
             self.make_node(level, line)
             skip = 1
         elif i == 0:
             p = self.make_dummy_node('!Dummy chapter')
             self.add_line(p, line)
         else:
             p = self.stack[-1]
             self.add_line(p, line)
     warning = '\nWarning: this node is ignored when writing this file.\n\n'
     self.add_line(parent, warning)
コード例 #34
0
ファイル: ipynb.py プロジェクト: SegundoBob/leo-editor
 def put_source(self, p, type_):
     '''Put the 'source' key for p.'''
     lines = [z for z in g.splitLines(p.b) if not g.isDirective(z)]
     # skip blank lines.
     i = 0
     while i < len(lines) and not lines[i].strip():
         i += 1
     lines = lines[i:]
     # skip trailing lines:
     i = len(lines)-1
     while i > 0 and not lines[i].strip():
         i -= 1
     lines = lines[:i+1]
     has_header = any([self.header_re.search(z) for z in lines])
     if lines and lines[-1].endswith('\n'):
         s_last = lines.pop()
         lines.append(s_last.rstrip())
     s = ''.join(lines)
     # Auto add headlines.
     if type_ == 'markdown' and not has_header:
         if 1: # Just put the headline.
             heading = p.h.strip()+'\n'
         else:
             # Not needed now that the import code sets headlines.
             n = min(6, self.level(p))
             heading = '<h%(level)s>%(headline)s</h%(level)s>\n' % {
                 'level': n,
                 'headline': p.h,
             }
         s = heading + s
         # Not completely accurate, but good for now.
     self.put_list('source', s or '# no code!')
コード例 #35
0
ファイル: leoCheck.py プロジェクト: SPRIME01/leo-editor
 def run(self, files):
     """Process all files"""
     self.files = files
     t1 = time.clock()
     for fn in files:
         s, e = g.readFileIntoString(fn)
         if s:
             self.tot_s += len(s)
             g.trace("%8s %s" % ("{:,}".format(len(s)), g.shortFileName(fn)))
             # Print len(s), with commas.
             # Fast, accurate:
             # 1.9 sec for parsing.
             # 2.5 sec for Null AstFullTraverer traversal.
             # 2.7 sec to generate all strings.
             # 3.8 sec to generate all reports.
             s1 = g.toEncodedString(s)
             self.tot_lines += len(g.splitLines(s))
             # Adds less than 0.1 sec.
             node = ast.parse(s1, filename="before", mode="exec")
             ShowDataTraverser(self, fn).visit(node)
             # elif 0: # Too slow, too clumsy: 3.3 sec for tokenizing
             # readlines = g.ReadLinesClass(s).next
             # for token5tuple in tokenize.generate_tokens(readlines):
             # pass
             # else: # Inaccurate. 2.2 sec to generate all reports.
             # self.scan(fn, s)
         else:
             g.trace("skipped", g.shortFileName(fn))
     t2 = time.clock()
     # Get the time exlusive of print time.
     self.show_results()
     g.trace("done: %4.1f sec." % (t2 - t1))
コード例 #36
0
ファイル: format-code.py プロジェクト: Armagedoom/leo-editor
    def writeBody (self,p):

        trace = False
        self.p = p.copy() # for traces.
        if not p.b.strip():
            return # No need to write any more newlines.

        showDocsAsParagraphs = self.getOption('show_doc_parts_as_paragraphs')
        lines = g.splitLines(p.b)
        parts = self.split_parts(lines,showDocsAsParagraphs)
        result = []
        for kind,lines in parts:
            if trace: g.trace(kind,len(lines),p.h)
            if kind == '@rst-option': # Also handles '@rst-options'
                pass # The prepass has already handled the options.
            elif kind == '@rst-markup':
                lines.extend('\n')
                result.extend(lines)
            elif kind == '@doc':
                if showDocsAsParagraphs:
                    result.extend(lines)
                    result.append('\n')
                else:
                    result.extend(self.write_code_block(lines))
            elif kind == 'code':
                result.extend(self.write_code_block(lines))
            else:
                g.trace('Can not happen',kind)

        # Write the lines with exactly two trailing newlines.
        s = ''.join(result).rstrip() + '\n\n'
        self.write(s)
コード例 #37
0
 def find_file_line(self, n, p=None):
     '''
     Place the cursor on the n'th line (one-based) of an external file.
     Return (p, offset, found) for unit testing.
     '''
     c = self.c
     if n < 0:
         return
     p = p or c.p
     root, fileName = self.find_root(p)
     if root:
         # Step 1: Get the lines of external files *with* sentinels,
         # even if the actual external file actually contains no sentinels.
         sentinels = root.isAtFileNode()
         s = self.get_external_file_with_sentinels(root)
         lines = g.splitLines(s)
         # Step 2: scan the lines for line n.
         if sentinels:
             # All sentinels count as real lines.
             gnx, h, offset = self.scan_sentinel_lines(lines, n, root)
         else:
             # Not all sentinels cound as real lines.
             gnx, h, offset = self.scan_nonsentinel_lines(lines, n, root)
         p, found = self.find_gnx(root, gnx, h)
         if gnx and found:
             self.success(lines, n, offset, p)
             return p, offset, True
         else:
             self.fail(lines, n, root)
             return None, -1, False
     else:
         return self.find_script_line(n, p)
コード例 #38
0
 def create_uas(self, at_uas, root):
     '''Recreate uA's from the @ua nodes in the @uas tree.'''
     trace = False and not g.unitTesting
     # Create an *inner* gnx dict.
     # Keys are gnx's, values are positions *within* root's tree.
     d = {}
     for p in root.self_and_subtree():
         d[p.v.gnx] = p.copy()
     # Recreate the uA's for the gnx's given by each @ua node.
     for at_ua in at_uas.children():
         h, b = at_ua.h, at_ua.b
         gnx = h[4:].strip()
         if b and gnx and g.match_word(h, 0, '@ua'):
             p = d.get(gnx)
             if p:
                 # Handle all recent variants of the node.
                 lines = g.splitLines(b)
                 if b.startswith('unl:') and len(lines) == 2:
                     # pylint: disable=unbalanced-tuple-unpacking
                     unl, ua = lines
                 else:
                     unl, ua = None, b
                 if ua.startswith('ua:'):
                     ua = ua[3:]
                 if ua:
                     ua = self.unpickle(ua)
                     if trace: g.trace('set', p.h, ua)
                     p.v.u = ua
                 else:
                     g.trace('Can not unpickle uA in', p.h, repr(unl),
                             type(ua), ua[:40])
             elif trace:
                 g.trace('no match for gnx:', repr(gnx))
         elif trace:
             g.trace('unexpected child of @uas node', at_ua)
コード例 #39
0
def comment_leo_lines(p):
    '''Replace lines with Leonine syntax with special comments.'''
    # Choose the comment string so it appears nowhere in s.
    s0 = p.b
    n = 5
    while s0.find('#' + ('!' * n)) > -1:
        n += 1
    comment = '#' + ('!' * n)
    # Create a dict of directives.
    d = {}
    for z in g.globalDirectiveList:
        d[z] = True
    # Convert all Leonine lines to special comments.
    i, lines, result = 0, g.splitLines(s0), []
    while i < len(lines):
        progress = i
        s = lines[i]
        # Comment out any containing a section reference.
        j = s.find('<<')
        k = s.find('>>') if j > -1 else -1
        if -1 < j < k:
            result.append(comment + s)
            # Generate a properly-indented pass line.
            j2 = g.skip_ws(s, 0)
            result.append('%spass\n' % (' ' * j2))
        elif s.lstrip().startswith('@'):
            # Comment out all other Leonine constructs.
            if starts_doc_part(s):
                # Comment the entire doc part, until @c or @code.
                result.append(comment + s)
                i += 1
                while i < len(lines):
                    s = lines[i]
                    result.append(comment + s)
                    i += 1
                    if ends_doc_part(s):
                        break
            else:
                j = g.skip_ws(s, 0)
                assert s[j] == '@'
                j += 1
                k = g.skip_id(s, j, chars='-')
                if k > j:
                    word = s[j: k]
                    if word == 'others':
                        # Remember the original @others line.
                        result.append(comment + s)
                        # Generate a properly-indented pass line.
                        result.append('%spass\n' % (' ' * (j - 1)))
                    else:
                        # Comment only Leo directives, not decorators.
                        result.append(comment + s if word in d else s)
                else:
                    result.append(s)
        else:
            # A plain line.
            result.append(s)
        if i == progress:
            i += 1
    return comment, ''.join(result)
コード例 #40
0
ファイル: qt_commands.py プロジェクト: wrapperband/leo-editor
def showFonts(self, event=None):
    """Open a tab in the log pane showing a font picker."""
    c = self.c
    p = c.p

    picker = QtWidgets.QFontDialog()
    if p.h.startswith('@font'):
        (name, family, weight, slant, size) = leoConfig.parseFont(p.b)
    else:
        name, family, weight, slant, size = None, None, False, False, 12
    try:
        font = QtGui.QFont()
        if family: font.setFamily(family)
        font.setBold(weight)
        font.setItalic(slant)
        font.setPointSize(size)
        picker.setCurrentFont(font)
    except ValueError:
        pass
    if not picker.exec_():
        g.es("No font selected")
    else:
        font = picker.selectedFont()
        udata = c.undoer.beforeChangeNodeContents(p)
        comments = [x for x in g.splitLines(p.b) if x.strip().startswith('#')]
        defs = [
            '\n' if comments else '',
            '%s_family = %s\n' % (name, font.family()),
            '%s_weight = %s\n' % (name, 'bold' if font.bold() else 'normal'),
            '%s_slant = %s\n' % (name, 'italic' if font.italic() else 'roman'),
            '%s_size = %s\n' % (name, font.pointSizeF())
        ]
        p.b = ''.join(comments + defs)
        c.undoer.afterChangeNodeContents(p, 'change-font', udata)
コード例 #41
0
def should_beautify(p):
    '''
    Return True if @beautify is in effect for node p.
    Ambiguous @beautify
    '''
    for p2 in p.self_and_parents(copy=False):
        d = g.get_directives_dict(p2)
        if 'killbeautify' in d:
            return False
        elif 'beautify' in d and 'nobeautify' in d:
            if p == p2:
                # honor whichever comes first.
                for line in g.splitLines(p2.b):
                    if line.startswith('@beautify'):
                        return True
                    elif line.startswith('@nobeautify'):
                        return False
                g.trace('can not happen', p2.h)
                return False
            else:
                # The ambiguous node has no effect.
                # Look up the tree.
                pass
        elif 'beautify' in d:
            return True
        elif 'nobeautify' in d:
            # This message would quickly become annoying.
            # self.skip_message('@nobeautify',p)
            return False
    # The default is to beautify.
    return True
コード例 #42
0
 def gen_lines(self, s, parent):
     '''
     Non-recursively parse all lines of s into parent, creating descendant
     nodes as needed.
     '''
     tail_p = None
     prev_state = self.state_class()
     target = Target(parent, prev_state)
     stack = [target, target]
     self.inject_lines_ivar(parent)
     lines = g.splitLines(s)
     self.skip = 0
     for i, line in enumerate(lines):
         new_state = self.scan_line(line, prev_state)
         top = stack[-1]
         if self.skip > 0:
             self.skip -= 1
         elif self.is_ws_line(line):
             p = tail_p or top.p
             self.add_line(p, line)
         elif self.starts_block(i, lines, new_state, prev_state):
             tail_p = None
             self.start_new_block(i, lines, new_state, prev_state, stack)
         elif self.ends_block(line, new_state, prev_state, stack):
             tail_p = self.end_block(line, new_state, stack)
         else:
             p = tail_p or top.p
             self.add_line(p, line)
         prev_state = new_state
コード例 #43
0
ファイル: bibtex.py プロジェクト: tbpassin/leo-editor
def readBibTexFileIntoTree(c, fn, p):
    '''Import a BibTeX file into a @bibtex tree.'''
    root = p.copy()
    g.es('reading:', fn)
    s = g.readFileIntoEncodedString(fn)
        # Read the encoded bytes for g.getEncodingAt()
    if not s or not s.strip():
        return
    encoding = g.getEncodingAt(p, s)
    s = g.toUnicode(s, encoding=encoding)
    aList, entries, strings = [], [], []
        # aList is a list of tuples (h,b).
    s = '\n' + ''.join([z.lstrip() for z in g.splitLines(s)])
    for line in s.split('\n@')[1:]:
        kind, rest = line[: 6], line[7:].strip()
        if kind == 'string':
            strings.append(rest[: -1] + '\n')
        else:
            i = min(line.find(','), line.find('\n'))
            h = '@' + line[: i]
            h = h.replace('{', ' ').replace('(', ' ').replace('\n', '')
            b = line[i + 1:].rstrip().lstrip('\n')[: -1]
            entries.append((h, b),)
    if strings:
        h, b = '@string', ''.join(strings)
        aList.append((h, b),)
    aList.extend(entries)
    for h, b in aList:
        p = root.insertAsLastChild()
        p.b, p.h = b, h
    root.expand()
    c.redraw()
コード例 #44
0
 def check_blanks_and_tabs(self, lines):
     '''Check for intermixed blank & tabs.'''
     # Do a quick check for mixed leading tabs/blanks.
     fn = g.shortFileName(self.root.h)
     w = self.tab_width
     blanks = tabs = 0
     for s in g.splitLines(lines):
         lws = self.get_str_lws(s)
         blanks += lws.count(' ')
         tabs += lws.count('\t')
     # Make sure whitespace matches @tabwidth directive.
     if w < 0:
         ok = tabs == 0
         message = 'tabs found with @tabwidth %s in %s' % (w, fn)
     elif w > 0:
         ok = blanks == 0
         message = 'blanks found with @tabwidth %s in %s' % (w, fn)
     if ok:
         ok = blanks == 0 or tabs == 0
         message = 'intermixed blanks and tabs in: %s' % (fn)
     if not ok:
         if g.unitTesting:
             self.report(message)
         else:
             g.es(message)
     return ok
コード例 #45
0
ファイル: viewrendered.py プロジェクト: jurov/leo-editor
    def update_image (self,s,keywords):
        '''Update an image in the vr pane.'''
        pc = self
        if not s.strip():
            return
        lines = g.splitLines(s) or []
        fn = lines and lines[0].strip()
        if not fn:
            return
        w = pc.ensure_text_widget()
        ok,path = pc.get_fn(fn,'@image')
        if not ok:
            w.setPlainText('@image: file not found: %s' % (fn))
            return
        path = fn.replace('\\','/')
        template = '''\
    <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
     "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
    <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
    <head></head>
    <body bgcolor="#fffbdc">
    <img src="%s">
    </body>
    </html>
    ''' % (path)

        # Only works in Python 3.x.
        template = g.adjustTripleString(template,pc.c.tab_width).strip() # Sensitive to leading blank lines.
        # template = g.toUnicode(template)
        pc.show()
        w.setReadOnly(False)
        w.setHtml(template)
        w.setReadOnly(True)
コード例 #46
0
    def readFile (self,fileName,root):

        trace = False and not g.unitTesting
        verbose = True
        c = self.c

        if not g.enableDB:
            if trace: g.trace('g.enableDB is False')
            return '',False,None

        s,e = g.readFileIntoString(fileName,raw=True,silent=True)
        if s is None:
            if trace: g.trace('empty file contents',fileName)
            return s,False,None
        assert not g.isUnicode(s)

        if trace and verbose:
            for i,line in enumerate(g.splitLines(s)):
                print('%3d %s' % (i,repr(line)))

        # There will be a bug if s is not already an encoded string.
        key = self.fileKey(root.h,s,requireEncodedString=True)
        ok = self.db and key in self.db
        if trace: g.trace('in cache',ok,fileName,key)
        if ok:
            # Delete the previous tree, regardless of the @<file> type.
            while root.hasChildren():
                root.firstChild().doDelete()
            # Recreate the file from the cache.
            aList = self.db.get(key)
            self.createOutlineFromCacheList(root.v,aList,fileName=fileName)

        return s,ok,key
コード例 #47
0
 def gen_lines(self, s, parent):
     '''Node generator for markdown importer.'''
     if not s or s.isspace():
         return
     self.inject_lines_ivar(parent)
     # We may as well do this first.  See warning below.
     self.stack = [parent]
     in_code = False
     lines = g.splitLines(s)
     skip = 0
     for i, line in enumerate(lines):
         top = self.stack[-1]
         level, name = self.is_hash(line)
         if skip > 0:
             skip -= 1
         elif not in_code and self.lookahead_underline(i, lines):
             level = 1 if lines[i+1].startswith('=') else 2
             self.make_node(level, line)
             skip = 1
         elif not in_code and name:
             self.make_node(level, name)
         elif i == 0:
             self.make_decls_node(line)
         elif in_code:
             if line.startswith("```"):
                 in_code = False
             self.add_line(top, line)
         elif line.startswith("```"):
             in_code = True
             self.add_line(top, line)
         else:
             self.add_line(top, line)
コード例 #48
0
ファイル: ipynb.py プロジェクト: vansdev/leo-editor
 def put_source(self, p, type_):
     '''Put the 'source' key for p.'''
     lines = [z for z in g.splitLines(p.b) if not g.isDirective(z)]
     # skip blank lines.
     i = 0
     while i < len(lines) and not lines[i].strip():
         i += 1
     lines = lines[i:]
     # skip trailing lines:
     i = len(lines) - 1
     while i > 0 and not lines[i].strip():
         i -= 1
     lines = lines[:i + 1]
     has_header = any([self.header_re.search(z) for z in lines])
     if lines and lines[-1].endswith('\n'):
         s_last = lines.pop()
         lines.append(s_last.rstrip())
     s = ''.join(lines)
     # Auto add headlines.
     if type_ == 'markdown' and not has_header:
         if 1:  # Just put the headline.
             heading = p.h.strip() + '\n'
         else:
             # Not needed now that the import code sets headlines.
             n = min(6, self.level(p))
             heading = '<h%(level)s>%(headline)s</h%(level)s>\n' % {
                 'level': n,
                 'headline': p.h,
             }
         s = heading + s
         # Not completely accurate, but good for now.
     self.put_list('source', s or '# no code!')
コード例 #49
0
ファイル: markdown.py プロジェクト: gunnarahlberg/leo-editor
 def v2_gen_lines(self, s, parent):
     '''Node generator for markdown importer.'''
     trace = False and g.unitTesting
     if not s or s.isspace():
         return
     self.inject_lines_ivar(parent)
     # We may as well do this first.  See warning below.
     self.add_line(parent, '@others\n')
     self.stack = [parent]
     in_code = False
     for i, line in enumerate(g.splitLines(s)):
         kind, level, name = self.starts_section(line)
         top = self.stack[-1]
         if trace:
             g.trace('%2s kind: %4r, level: %4r name: %10r %r' %
                     (i + 1, kind, level, name, line))
         if i == 0 and not kind:
             self.make_decls_node(line)
         elif in_code:
             if line.startswith("```"):
                 in_code = False
             self.add_line(top, line)
         elif self.is_underline(line):
             self.do_underline(line)
         elif kind:
             self.make_node(kind, level, line, name)
         else:
             if line.startswith("```"):
                 in_code = True
             self.add_line(top, line)
     warning = '\nWarning: this node is ignored when writing this file.\n\n'
     self.add_line(parent, warning)
コード例 #50
0
 def hide(self, tag, kwargs, force=False):
     '''Hide all wikiview tags. Now done in the colorizer.'''
     trace = False and not g.unitTesting
     trace_parts = True
     trace_pats = False
     c = self.c
     if not (self.active or force) or kwargs['c'] != c:
         return
     w = c.frame.body.widget
     cursor = w.textCursor()
     s = w.toPlainText()
     if trace:
         g.trace('=====', g.callers())
         g.printList(g.splitLines(s))
     for urlpat in self.urlpats:
         if trace and trace_pats: g.trace(repr(urlpat))
         for m in urlpat.finditer(s):
             if trace: g.trace('FOUND', urlpat.pattern, m.start(0), repr(m.group(0)))
             for group_n, group in enumerate(m.groups()):
                 if group is None:
                     continue
                 if trace and trace_parts: g.trace(
                         m.start(group_n+1),
                         m.end(group_n+1),
                         repr(m.group(group_n+1)))
                 cursor.setPosition(m.start(group_n+1))
                 cursor.setPosition(m.end(group_n+1), cursor.KeepAnchor)
                 cfmt = cursor.charFormat()
                 cfmt.setFontPointSize(self.pts)
                 cfmt.setFontLetterSpacing(self.pct)
                 # cfmt._is_hidden = True  # gets lost
                 cursor.setCharFormat(cfmt)
コード例 #51
0
ファイル: javascript.py プロジェクト: pmills/leo-editor
 def putFunction(self, s, sigStart, codeEnd, start, parent):
     '''Create a node of parent for a function defintion.'''
     trace = False and not g.unitTesting
     verbose = True
     # Enter a new function: save the old function info.
     oldStartSigIndent = self.startSigIndent
     if self.sigId:
         headline = self.sigId
     else:
         ### g.trace('Can not happen: no sigId')
         ### headline = 'unknown function'
         headline = s[sigStart:start]
     body1, body2 = self.computeBody(s, start, sigStart, codeEnd)
     body = body1 + body2
     parent = self.adjustParent(parent, headline)
     if trace:
         # pylint: disable=maybe-no-member
         g.trace('parent', parent and parent.h)
         if verbose:
             # g.trace('**body1...\n',body1)
             g.trace(self.atAutoSeparateNonDefNodes)
             g.trace('**body...\n%s' % body)
     # 2010/11/04: Fix wishlist bug 670744.
     if self.atAutoSeparateNonDefNodes:
         if body1.strip():
             if trace: g.trace('head', body1)
             line1 = g.splitLines(body1.lstrip())[0]
             line1 = line1.strip() or 'non-def code'
             self.createFunctionNode(line1, body1, parent)
             body = body2
     self.lastParent = self.createFunctionNode(headline, body, parent)
     # Exit the function: restore the function info.
     self.startSigIndent = oldStartSigIndent
コード例 #52
0
 def update_cell_body(self, cell, meta, p):
     '''Create a new body text, depending on kind.'''
     
     def clean(lines):
         lines = [z for z in lines if not g.isDirective(z)]
         s = ''.join(lines).strip() + '\n'
         return g.splitLines(s)
         
     kind = self.cell_type(p)
     lines = g.splitLines(p.b)
     level = p.level() - self.root.level()
     if kind == 'markdown':
         # Remove all header markup lines.
         lines = [z for z in lines if
             not self.pat1.match(z) and not self.pat2.match(z)]
         lines = clean(lines)
         # Insert a new header markup line.
         if level > 0:
             lines.insert(0, '%s %s\n' % ('#'*level, self.clean_headline(p.h)))
     else:
         # Remember the level for the importer.
         meta ['leo_level'] = level
         lines = clean(lines)
         # Remove leading whitespace lines inserted during import.
     cell ['source'] = lines
コード例 #53
0
ファイル: leoTips.py プロジェクト: XCaminhante/leo-editor
 def get_tips(data):
     """get_tips - get tips from GitHub issues
     :param dict data: GitHub API issues list
     :return: list of Tips
     """
     tips = []
     for issue in data:
         body, n, title = issue['body'], issue['number'], issue['title']
         lines = g.splitLines(body)
         for i, s in enumerate(lines):
             if s.strip().lower().startswith('tags:'):
                 lines = lines[:i] + lines[i + 1:]
                 text = ''.join(lines).strip()
                 s = s.strip().rstrip('.').strip()
                 s = s[len('tags:'):].strip()
                 tags = [z.strip() for z in s.split(',')]
                 break
         else:
             tags = []
             text = body.strip()
         tips.append(
             UserTip(
                 n=n,
                 tags=tags,
                 text=text.strip(),
                 title=title.strip(),
             ))
     return tips
コード例 #54
0
 def create_uas(self,at_uas,root):
     '''Recreate uA's from the @ua nodes in the @uas tree.'''
     trace = False and not g.unitTesting
     # Create an *inner* gnx dict.
     # Keys are gnx's, values are positions *within* root's tree.
     d = {}
     for p in root.self_and_subtree():
         d[p.v.gnx] = p.copy()
     # Recreate the uA's for the gnx's given by each @ua node.
     for at_ua in at_uas.children():
         h,b = at_ua.h,at_ua.b
         gnx = h[4:].strip()
         if b and gnx and g.match_word(h,0,'@ua'):
             p = d.get(gnx)
             if p:
                 # Handle all recent variants of the node.
                 lines = g.splitLines(b)
                 if b.startswith('unl:') and len(lines) == 2:
                     # pylint: disable=unbalanced-tuple-unpacking
                     unl,ua = lines
                 else:
                     unl,ua = None,b
                 if ua.startswith('ua:'):
                     ua = ua[3:]
                 if ua:
                     ua = self.unpickle(ua)
                     if trace: g.trace('set',p.h,ua)
                     p.v.u = ua
                 else:
                     g.trace('Can not unpickle uA in',p.h,type(ua),ua[:40])
             elif trace:
                 g.trace('no match for gnx:',repr(gnx),'unl:',unl)
         elif trace:
             g.trace('unexpected child of @uas node',at_ua)
コード例 #55
0
ファイル: format-code.py プロジェクト: AG4GitHub/leo
    def scanForOptionDocParts(self, p, s):
        '''Return a dictionary containing all options from @rst-options doc parts in p.
        Multiple @rst-options doc parts are allowed: this code aggregates all options.
        '''

        d = {}
        n = 0
        lines = g.splitLines(s)
        while n < len(lines):
            line = lines[n]
            n += 1
            if line.startswith('@'):
                i = g.skip_ws(line, 1)
                for kind in ('@rst-options', '@rst-option'):
                    if g.match_word(line, i, kind):
                        # Allow options on the same line.
                        line = line[i + len(kind):]
                        d.update(self.scanOption(p, line))
                        # Add options until the end of the doc part.
                        while n < len(lines):
                            line = lines[n]
                            n += 1
                            found = False
                            for stop in ('@c', '@code', '@'):
                                if g.match_word(line, 0, stop):
                                    found = True
                                    break
                            if found:
                                break
                            else:
                                d.update(self.scanOption(p, line))
                        break
        return d
コード例 #56
0
ファイル: xml.py プロジェクト: chiamingyen/kmol2016
    def skipCommentToken(self,s,i):

        '''Return comment lines with all leading/trailing whitespace removed.'''
        j = self.skipComment(s,i)
        lines = g.splitLines(s[i:j])
        lines = [z.strip() for z in lines]
        return j,'\n'.join(lines)
コード例 #57
0
def flattenOutlineToNode(self, event=None):
    '''
    Append the body text of all descendants of the selected node to the
    body text of the selected node.
    '''
    c, root, u = self, self.p, self.undoer
    if not root.hasChildren():
        return
    language = g.getLanguageAtPosition(c, root)
    if language:
        single, start, end = g.set_delims_from_language(language)
    else:
        single, start, end = '#', None, None
    bunch = u.beforeChangeNodeContents(root)
    aList = []
    for p in root.subtree():
        if single:
            aList.append('\n\n===== %s %s\n\n' % (single, p.h))
        else:
            aList.append('\n\n===== %s %s %s\n\n' % (start, p.h, end))
        if p.b.strip():
            lines = g.splitLines(p.b)
            aList.extend(lines)
    root.b = root.b.rstrip() + '\n' + ''.join(aList).rstrip() + '\n'
    u.afterChangeNodeContents(root, 'flatten-outline-to-node', bunch)
コード例 #58
0
ファイル: otl.py プロジェクト: satishgoda/leo-editor
 def gen_lines(self, s, parent):
     '''Node generator for otl (vim-outline) mode.'''
     self.inject_lines_ivar(parent)
     # We may as well do this first.  See warning below.
     self.add_line(parent, '@others\n')
     self.parents = [parent]
     for line in g.splitLines(s):
         m = self.otl_body_pattern.match(line)
         if m:
             p = self.parents[-1]
             self.add_line(p, m.group(1))
         else:
             m = self.otl_pattern.match(line)
             if m:
                 # Cut back the stack, then allocate a new node.
                 level = 1 + len(m.group(1))
                 self.parents = self.parents[:level]
                 self.find_parent(
                     level = level,
                     h = m.group(2).strip())
             else:
                 self.error('Bad otl line: %r' % line)
     # This warning *is* correct.
     warning = '\nWarning: this node is ignored when writing this file.\n\n'
     self.add_line(self.root, warning)
コード例 #59
0
 def addAbbrevHelper(self, s, tag=''):
     '''Enter the abbreviation 's' into the self.abbrevs dict.'''
     trace = False and not g.unitTesting
     if not s.strip():
         return
     try:
         d = self.abbrevs
         data = s.split('=')
         # Do *not* strip ws so the user can specify ws.
         name = data[0].replace('\\t', '\t').replace('\\n', '\n')
         val = '='.join(data[1:])
         if val.endswith('\n'): val = val[:-1]
         val = self.n_regex.sub('\n', val).replace('\\\\n', '\\n')
         old, tag = d.get(
             name,
             (None, None),
         )
         if old and old != val and not g.unitTesting:
             g.es_print('redefining abbreviation', name, '\nfrom',
                        repr(old), 'to', repr(val))
         if trace:
             val1 = val if val.find(
                 '\n') == -1 else g.splitLines(val)[0] + '...'
             g.trace('%12s %r' % (name, g.truncate(val1, 80)))
         d[name] = val, tag
     except ValueError:
         g.es_print('bad abbreviation: %s' % s)