Example #1
0
    def get_tokens_unprocessed(self, text):
        offset = 0
        if re.search(r'^----\s*$', text, re.MULTILINE):
            py, _, text = text.partition('----')

            lexer = PythonLexer(**self.options)
            for i, token, value in lexer.get_tokens_unprocessed(py):
                yield i, token, value

            offset = i + 1
            yield offset, Text, u'----'
            offset += 1

        lexer = HtmlDjangoLexer(**self.options)
        for i, token, value in lexer.get_tokens_unprocessed(text):
            yield offset + i, token, value
Example #2
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in \
             PythonLexer.get_tokens_unprocessed(self, text):
         if token is Name and value in self.EXTRA_KEYWORDS:
             yield index, Keyword.Pseudo, value
         else:
             yield index, token, value
    def get_tokens_unprocessed(self, text):
        pylexer = PythonLexer(**self.options)
        tblexer = PythonTracebackLexer(**self.options)

        # print '\nTEXT > \n', text, '\n TEXT'
        for line in text.splitlines():
            lstrip = line.lstrip()
            if lstrip.startswith('Out'):
                line = lstrip + '\n'
            elif lstrip.startswith('...'):
                line = line + '\n'
            else:
                line = line + '\n'
            input_prompt = self.input_prompt.match(line)
            output_prompt = self.output_prompt.match(line)

            if input_prompt is not None:
                yield (0, Generic.Prompt, input_prompt.group())
                code = line[input_prompt.end():]
                for item in pylexer.get_tokens_unprocessed(code):
                    yield item
            elif output_prompt is not None:
                # Use the 'error' token for output.  We should probably make
                # our own token, but error is typicaly in a bright color like
                # red, so it works fine for our output prompts.
                yield (0, Generic.Error, output_prompt.group())
                index = output_prompt.end()
                yield index, Generic.Output, line[index:]
            else:
                yield 0, Generic.Output, line
Example #4
0
    def get_tokens_unprocessed(self, text):
        offset = 0
        if re.search(r'^----\s*$', text, re.MULTILINE):
            py, _, text = text.partition('----')

            lexer = PythonLexer(**self.options)
            for i, token, value in lexer.get_tokens_unprocessed(py):
                yield i, token, value

            offset = i + 1
            yield offset, Text, u'----'
            offset += 1

        lexer = HtmlDjangoLexer(**self.options)
        for i, token, value in lexer.get_tokens_unprocessed(text):
            yield offset + i, token, value
    def get_tokens_unprocessed(self, text):
        pylexer = PythonLexer(**self.options)
        tblexer = PythonTracebackLexer(**self.options)

        # print '\nTEXT > \n', text, '\n TEXT'
        for line in text.splitlines():
            lstrip = line.lstrip()
            if lstrip.startswith('Out'):
                line = lstrip + '\n'
            elif lstrip.startswith('...'):
                line = line + '\n'
            else:
                line = line + '\n'
            input_prompt = self.input_prompt.match(line)
            output_prompt = self.output_prompt.match(line)

            if input_prompt is not None:
                yield (0, Generic.Prompt, input_prompt.group())
                code = line[input_prompt.end():]
                for item in pylexer.get_tokens_unprocessed(code):
                  yield item
            elif output_prompt is not None:
                # Use the 'error' token for output.  We should probably make
                # our own token, but error is typicaly in a bright color like
                # red, so it works fine for our output prompts.
                yield (0, Generic.Error, output_prompt.group())
                index = output_prompt.end()
                yield index, Generic.Output, line[index:]
            else:
                yield 0, Generic.Output, line
Example #6
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):
         if token is Name and value in self.lpy_modules:
             # Colourize previously detected modules
             yield index, Keyword, value
         else:
             yield index, token, value
Example #7
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in PythonLexer.get_tokens_unprocessed(
             self, text):
         if token is Name and value in self._extra_commands:
             yield index, Name.Builtin, value
         else:
             yield index, token, value
Example #8
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in \
             PythonLexer.get_tokens_unprocessed(self, text):
         if token is Name and value in self.EXTRA_KEYWORDS:
             yield index, Keyword.Pseudo, value
         else:
             yield index, token, value
Example #9
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in PythonLexer.get_tokens_unprocessed(
             self, text):
         if token is Name and value in self.lpy_modules:
             # Colourize previously detected modules
             yield index, Keyword, value
         else:
             yield index, token, value
Example #10
0
    def get_tokens_unprocessed(self, text):
        pylexer = PythonLexer(**self.options)
        tblexer = PythonTracebackLexer(**self.options)

        curcode = ''
        insertions = []
        for match in line_re.finditer(text):
            line = match.group()
            input_prompt = self.input_prompt.match(line)
            continue_prompt = self.continue_prompt.match(line.rstrip())
            output_prompt = self.output_prompt.match(line)
            if line.startswith("#"):
                insertions.append((len(curcode), [(0, Comment, line)]))
            elif input_prompt is not None:
                insertions.append(
                    (len(curcode), [(0, Other, input_prompt.group())]))
                curcode += line[input_prompt.end():]
            elif continue_prompt is not None:
                insertions.append(
                    (len(curcode), [(0, Other, continue_prompt.group())]))
                curcode += line[continue_prompt.end():]
            elif output_prompt is not None:
                # Use the 'error' token for output.  We should probably make
                # our own token, but error is typicaly in a bright color like
                # red, so it works fine for our output prompts.
                insertions.append((
                    len(curcode),
                    [(
                        0,
                        Other,  #Generic.Error,
                        output_prompt.group())]))
                curcode += line[output_prompt.end():]
            else:
                if curcode:
                    for item in do_insertions(
                            insertions,
                            pylexer.get_tokens_unprocessed(curcode)):
                        yield item
                        curcode = ''
                        insertions = []
                yield match.start(), Generic.Output, line
        if curcode:
            for item in do_insertions(insertions,
                                      pylexer.get_tokens_unprocessed(curcode)):
                yield item
    def get_tokens_unprocessed(self, text):
        pylexer = PythonLexer(**self.options)
        tblexer = PythonTracebackLexer(**self.options)

        curcode = ''
        insertions = []
        for match in line_re.finditer(text):
            line = match.group()
            input_prompt = self.input_prompt.match(line)
            continue_prompt = self.continue_prompt.match(line.rstrip())
            output_prompt = self.output_prompt.match(line)
            if line.startswith("#"):
                insertions.append((len(curcode),
                                   [(0, Comment, line)]))
            elif line.startswith("<warning>"):
                insertions.append((len(curcode),
                                   [(0, Generic.Error, line[9:])]))
            elif input_prompt is not None:
                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, input_prompt.group())]))
                curcode += line[input_prompt.end():]
            elif continue_prompt is not None:
                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, continue_prompt.group())]))
                curcode += line[continue_prompt.end():]
            elif output_prompt is not None:
                # Use the 'error' token for output.  We should probably make
                # our own token, but error is typicaly in a bright color like
                # red, so it works fine for our output prompts.
                insertions.append((len(curcode),
                                   [(0, Generic.Error, output_prompt.group())]))
                curcode += line[output_prompt.end():]
            else:
                if curcode:
                    for item in do_insertions(insertions,
                                              pylexer.get_tokens_unprocessed(curcode)):
                        yield item
                        curcode = ''
                        insertions = []
                yield match.start(), Generic.Output, line
        if curcode:
            for item in do_insertions(insertions,
                                      pylexer.get_tokens_unprocessed(curcode)):
                yield item
Example #12
0
    def get_tokens_unprocessed(self, text):
        pylexer = PythonLexer(**self.options)
        tblexer = PythonTracebackLexer(**self.options)

        curcode = ''
        insertions = []
        for match in line_re.finditer(text):
            line = match.group()
            input_prompt = self.input_prompt.match(line)
            continue_prompt = self.continue_prompt.match(line.rstrip())
            output_prompt = self.output_prompt.match(line)
            if line.startswith("#"):
                insertions.append((len(curcode), [(0, Comment, line)]))
            elif input_prompt is not None:
                insertions.append((len(curcode), [(0, Generic.Prompt,
                                                   input_prompt.group())]))
                curcode += line[input_prompt.end():]
            elif continue_prompt is not None:
                insertions.append((len(curcode), [(0, Generic.Prompt,
                                                   continue_prompt.group())]))
                curcode += line[continue_prompt.end():]
            elif output_prompt is not None:
                insertions.append((len(curcode), [(0, Generic.Output,
                                                   output_prompt.group())]))
                curcode += line[output_prompt.end():]
            else:
                if curcode:
                    for item in do_insertions(
                            insertions,
                            pylexer.get_tokens_unprocessed(curcode)):
                        yield item
                        curcode = ''
                        insertions = []
                yield match.start(), Generic.Output, line
        if curcode:
            for item in do_insertions(insertions,
                                      pylexer.get_tokens_unprocessed(curcode)):
                yield item
    def get_tokens_unprocessed(self, text):
        pylexer = PythonLexer(**self.options)
        tblexer = PythonTracebackLexer(**self.options)

        curcode = ''
        insertions = []
        for match in line_re.finditer(text):
            line = match.group()
            input_prompt = self.input_prompt.match(line)
            continue_prompt = self.continue_prompt.match(line.rstrip())
            output_prompt = self.output_prompt.match(line)
            if line.startswith("#"):
                insertions.append((len(curcode),
                                   [(0, Comment, line)]))
            elif input_prompt is not None:
                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, input_prompt.group())]))
                curcode += line[input_prompt.end():]
            elif continue_prompt is not None:
                insertions.append((len(curcode),
                                   [(0, Generic.Prompt, continue_prompt.group())]))
                curcode += line[continue_prompt.end():]
            elif output_prompt is not None:
                insertions.append((len(curcode),
                                   [(0, Generic.Output, output_prompt.group())]))
                curcode += line[output_prompt.end():]
            else:
                if curcode:
                    for item in do_insertions(insertions,
                                              pylexer.get_tokens_unprocessed(curcode)):
                        yield item
                        curcode = ''
                        insertions = []
                yield match.start(), Generic.Output, line
        if curcode:
            for item in do_insertions(insertions,
                                      pylexer.get_tokens_unprocessed(curcode)):
                yield item
Example #14
0
    def get_tokens_unprocessed(self, text):
        for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):

            if value.startswith("###"):
                continue

            if token == Token.Error and value == "$":
                yield index, Token.Keyword, value

            elif token in [ Name, Operator.Word ] and value in KEYWORDS:
                yield index, Token.Keyword, value

            elif token in Name and value in PROPERTIES:
                yield index, Name.Attribute, value

            else:
                yield index, token, value
Example #15
0
    def get_tokens_unprocessed(self, text):
        for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):

            if value.startswith("###"):
                continue

            if token == Token.Error and value == "$":
                yield index, Token.Keyword, value

            elif token in [ Name, Operator.Word ] and value in KEYWORDS:
                yield index, Token.Keyword, value

            elif token in Name and value in PROPERTIES:
                yield index, Name.Attribute, value

            else:
                yield index, token, value
Example #16
0
 def get_tokens_unprocessed(self, text):
     pylexer = PythonLexer(**self.options)
     for pos, type_, value in pylexer.get_tokens_unprocessed(text):
         if type_ == Token.Error and value == '$':
             type_ = Comment.Preproc
         yield pos, type_, value
Example #17
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):
         if token is Name and value in self._extra_commands:
             yield index, Name.Builtin, value
         else:
             yield index, token, value
Example #18
0
 def get_tokens_unprocessed(self, text):
     pylexer = PythonLexer(**self.options)
     for pos, type_, value in pylexer.get_tokens_unprocessed(text):
         if type_ == Token.Error and value == '$':
             type_ = Comment.Preproc
         yield pos, type_, value