def get_tokens_unprocessed(self, text): for index, token, value in JavascriptLexer.get_tokens_unprocessed( self, text): if token is Name.Other and value in self.EXTRA_KEYWORDS: yield index, Keyword, value else: yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in JavascriptLexer.get_tokens_unprocessed(self, text): if token is Error and value in ["#", "@"]: token_type = Name.Tag if value == "#" else Keyword yield index, token_type, value else: yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in JavascriptLexer.get_tokens_unprocessed(self, text): if token is Error and value in ['#', '@']: token_type = Name.Tag if value == '#' else Keyword yield index, token_type, value else: yield index, token, value
def get_tokens_unprocessed(self, text): is_example = False is_output = False for item in JavascriptLexer.get_tokens_unprocessed(self, text): if item[1] is Generic.Prompt: is_example = True is_output = False elif is_example and item[2].endswith(u"\n"): is_example = False is_output = True elif is_output: item = item[0], Generic.Output, item[2] elif item[2] in self.EXCEPTIONS: item = item[0], Name.Exception, item[2] yield item
def get_tokens_unprocessed(self, text): # Munge tokens for IDL for index, token, value in JavascriptLexer.get_tokens_unprocessed(self, text): if value.find('UCS') != -1: # print 'UCS is a %s'%(repr(token)) pass if value.find('@') != -1: # @ is a Token.Error, need to extend lexer to recognize # annotations? # print '%s is a %s'%(value,repr(token)) pass if token is Name.Other and value in self.RESERVED_KEYWORDS: yield index, Keyword.Reserved, value elif token is Name.Other and value in self.PSEUDO_KEYWORDS: yield index, Keyword.Pseudo, value else: yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in JavascriptLexer.get_tokens_unprocessed(self, text): if token is Name.Other and value in self.EXTRA_KEYWORDS: yield index, Keyword, value else: yield index, token, value
def get_tokens_unprocessed(self, text, stack=('root',)): text = text[1:-1] text = text.replace('\\n', '\n') return JavascriptLexer.get_tokens_unprocessed(self, text, stack)
def get_tokens_unprocessed(self, text, stack=('root', )): text = text[1:-1] text = text.replace('\\n', '\n') return JavascriptLexer.get_tokens_unprocessed(self, text, stack)