def get_tokens_unprocessed(self, text): # 'in' is either a keyword or an operator. # If the token two tokens after 'in' is ')', 'in' is a keyword: # objectloop(a in b) # Otherwise, it is an operator: # objectloop(a in b && true) objectloop_queue = [] objectloop_token_count = -1 previous_token = None for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): if previous_token is Name.Variable and value == 'in': objectloop_queue = [[index, token, value]] objectloop_token_count = 2 elif objectloop_token_count > 0: if token not in Comment and token not in Text: objectloop_token_count -= 1 objectloop_queue.append((index, token, value)) else: if objectloop_token_count == 0: if objectloop_queue[-1][2] == ')': objectloop_queue[0][1] = Keyword while objectloop_queue: yield objectloop_queue.pop(0) objectloop_token_count = -1 yield index, token, value if token not in Comment and token not in Text: previous_token = token while objectloop_queue: yield objectloop_queue.pop(0)
def get_tokens_unprocessed(self, text): stack = ['root'] for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack): if token is Name.Variable: if value in self.builtin_function: yield index, Name.Builtin, value continue if value in self.special_forms: yield index, Keyword, value continue if value in self.macros: yield index, Name.Builtin, value continue if value in self.lambda_list_keywords: yield index, Keyword, value continue if value in self.declarations: yield index, Keyword, value continue if value in self.builtin_types: yield index, Keyword.Type, value continue if value in self.builtin_classes: yield index, Name.Class, value continue yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in RegexLexer.get_tokens_unprocessed( self, text): if token is String.Symbol and '_q_' in value: yield index, String.Other, value else: yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): # Convention: mark all upper case names as constants if token is Name: if value.isupper(): token = Name.Constant yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in ( RegexLexer.get_tokens_unprocessed(self, text) ): if token is Name.Function and value in ('init', 'del'): token = Keyword.Pseudo yield index, token, value
def get_tokens_unprocessed(self, text): # cut at the beginning of the interface and information tab stuff substrings = text.partition('@#$#@#$#@') text = substrings[0] stack = ['root'] for item in RegexLexer.get_tokens_unprocessed(self, text, stack): yield item
def get_tokens_unprocessed(self,text): # for AMS files: catch any identifier declaration, and highlight every reference in the rest of the file. In other words, every user defined identifier is triggered :) Because I can. m = id_re.findall(text) # Ok, we found all matches from the big identifiers list. thus we end up with a list like this: # m = [first_match, second_match, ...] and nth_match = (id_type,id_name,id_operator) = ('Parameter', 'OD', ';') id_nametype = {} for i in m: id_nametype[i[1]] = i[0] #print "I'm in get_tokens_unprocessed" for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): # if the item is part of the declared (detected) identifiers, attach the appropriate token, thanks to the switcher :) if any(item == value for item in id_nametype.keys()): yield index, switcher(id_nametype[value]), value # if the item is not detected as an function argument and is part of the Identifier type list, attach a 'kd' token elif (not (token is Name.Argument) and any(item == value for item in id_nametype.values())): yield index, Keyword.Declaration, value else: yield index, token ,value
def get_tokens_unprocessed(self, text): for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): if token is Name: if self.stdlibhighlighting and value in self.stdlib_types: token = Keyword.Type elif self.c99highlighting and value in self.c99_types: token = Keyword.Type yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): if token is Name: if value in builtins.BUILTIN_FUNCTIONS: token = Name.Builtin elif value in builtins.BUILTIN_OPERATIONS: token = Name.Builtin.Pseudo yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text): # Convention: mark all upper case names as constants if token is Name: if value.isupper(): token = Name.Constant yield index, token, value
def get_tokens_unprocessed(self, text): stack = ['root'] for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text, stack): if token is Name and value in self.EXTRA_KEYWORDS: yield index, Name.Builtin, value else: yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in RegexLexer.get_tokens_unprocessed( self, text): if token is Name: if self.stdlibhighlighting and value in self.stdlib_types: token = Keyword.Type elif self.c99highlighting and value in self.c99_types: token = Keyword.Type yield index, token, value
def get_tokens_unprocessed(self, text, stack=('root', )): ma = MathematicaAnnotations() annotations = (ma.builtins, ma.unicode, ma.lexical_scope) for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): result = (index, token, value) for func in annotations: result = func(*result) yield result
def get_tokens_unprocessed(self, text): from pygments.lexers._asy_builtins import ASYFUNCNAME, ASYVARNAME for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text): if token is Name and value in ASYFUNCNAME: token = Name.Function elif token is Name and value in ASYVARNAME: token = Name.Variable yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in RegexLexer.get_tokens_unprocessed( self, text): if token is Name: if value in builtins.BUILTIN_FUNCTIONS: token = Name.Builtin elif value in builtins.BUILTIN_OPERATIONS: token = Name.Builtin.Pseudo yield index, token, value
def get_tokens_unprocessed(self, text): from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, COCOA_PROTOCOLS, COCOA_PRIMITIVES for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): if token is Name or token is Name.Class: if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS or value in COCOA_PRIMITIVES: token = Name.Builtin.Pseudo yield index, token, value
def get_tokens_unprocessed(self,text): for index,token,value in RegexLexer.get_tokens_unprocessed(self,text): if token == Generic: value = "\\(" + value + "\\)" yield (index,token,value) elif token == XMath: yield (index,Generic,value) else: yield (index,token,value)
def get_tokens_unprocessed(self, text): for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text): if token is Name: if self.smhighlighting: if value in self.SM_TYPES: token = Keyword.Type elif value in self._functions: token = Name.Builtin yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): # check for reserved words and pervasives if token is Name: if value in self.reserved_words: token = Keyword.Reserved elif value in self.pervasives: token = Keyword.Pervasive # return result yield index, token, value
def get_tokens_unprocessed(self, text): stack = ["root"] if self.startinline: stack.append("php") for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack): if token is Name.Other: if value in self._functions: yield index, Name.Builtin, value continue yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in RegexLexer.get_tokens_unprocessed( self, text): if token == Generic: value = "\\(" + value + "\\)" yield (index, token, value) elif token == XMath: yield (index, Generic, value) else: yield (index, token, value)
def get_tokens_unprocessed(self, text): stack = ['root'] if self.startinline: stack.append('php') for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text, stack): if token is Name.Other: if value in self._functions: yield index, Name.Builtin, value continue yield index, token, value
def get_tokens_unprocessed(self, text): stack = ['root'] if self.startinline: stack.append('php') for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text, stack): if token is Name.Other: if value in self._functions: yield index, Name.Function, value continue yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in RegexLexer.get_tokens_unprocessed( self, text): if token is Name: if '.' in value: a, b = value.split('.') yield index, Name, a yield index + len(a), Punctuation, '.' yield index + len(a) + 1, Name, b continue yield index, token, value
def get_tokens_unprocessed(self, text): tokens = RegexLexer.get_tokens_unprocessed(self, text) effects, fg, bg = set(), None, None for index, token, value in tokens: if token is Token.ANSI.Escape: effects, fg, bg = self.apply_esc(value, effects, fg, bg) continue yield index, get_token(effects, fg, bg), value
def get_tokens_unprocessed(self, text): stack = ['root'] if self.requiredelimiters: stack.append('delimiters') for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text, stack): if (token is Name.Other and value.lower() in self._builtins or token is Name.Other.Member and value.lower().rstrip('=') in self._members): yield index, Name.Builtin, value continue yield index, token, value
def get_tokens_unprocessed(self,text): for index,token,value in RegexLexer.get_tokens_unprocessed(self,text): if token == Generic: yield (index,token,value) elif token == XMath: yield (index,Punctuation,u"\\") yield (index,Punctuation,u"(") yield (index,Generic,value[2:-2]) yield (index,Punctuation,u"\\") yield (index,Punctuation,u")") else: yield (index,token,value)
def get_tokens_unprocessed(self, text): from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, \ COCOA_PROTOCOLS, COCOA_PRIMITIVES for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text): if token is Name or token is Name.Class: if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \ or value in COCOA_PRIMITIVES: token = Name.Builtin.Pseudo yield index, token, value
def get_tokens_unprocessed(self, text): stack = ['root'] for index, token, value in RegexLexer.get_tokens_unprocessed( self, text, stack): if token is Name.Variable: if value in self.special_forms: yield index, Keyword, value continue if value in self.builtin_types: yield index, Keyword.Type, value continue yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in RegexLexer.get_tokens_unprocessed( self, text): if token == Generic: yield (index, token, value) elif token == XMath: yield (index, Punctuation, u"\\") yield (index, Punctuation, u"(") yield (index, Generic, value[2:-2]) yield (index, Punctuation, u"\\") yield (index, Punctuation, u")") else: yield (index, token, value)
def get_tokens_unprocessed(self, text): for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): if token is Name: if value in self._functions: yield index, Name.Builtin, value continue elif "." in value: a, b = value.split(".") yield index, Name, a yield index + len(a), Punctuation, u"." yield index + len(a) + 1, Name, b continue yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text): if token is Name: if value in self._functions: yield index, Name.Builtin, value continue elif '.' in value: a, b = value.split('.') yield index, Name, a yield index + len(a), Punctuation, u'.' yield index + len(a) + 1, Name, b continue yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text): if token is Name: if value in self._functions: yield index, Name.Function, value continue elif '.' in value: a, b = value.split('.') yield index, Name, a yield index + len(a), Punctuation, u'.' yield index + len(a) + 1, Name, b continue yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text): if token is Name: if self.stdlibhighlighting and value in self.stdlib_types: token = Keyword.Type elif self.c99highlighting and value in self.c99_types: token = Keyword.Type elif self.platformhighlighting and value in self.linux_types: token = Keyword.Type elif re.match(r'\b([tseufpi]_\w+)\b', value): token = Keyword.Type # libccc typedef elif re.match(r'\b([A-Z])\b', value): token = Keyword.Type # libccc generic type macro yield index, token, value
def get_tokens_unprocessed(self, text): # TODO: builtins are only subsequent tokens on lines # and 'keywords' only happen at the beginning except # for :au ones for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text): if token is Name.Other: if self.is_in(value, self._cmd): yield index, Keyword, value elif self.is_in(value, self._opt) or \ self.is_in(value, self._aut): yield index, Name.Builtin, value else: yield index, Text, value else: yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): if token is Name: lowercase_value = value.lower() if lowercase_value in self.builtins: yield index, Name.Builtin, value continue if lowercase_value in self.keywords: yield index, Keyword, value continue if lowercase_value in self.functions: yield index, Name.Builtin, value continue if lowercase_value in self.operators: yield index, Operator, value continue yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): if token is Name: if value in self.KEYWORD: yield index, Keyword, value elif value in self.KEYWORD_OPERATOR: yield index, Operator.Word, value elif value in self.BUILTIN: yield index, Keyword, value elif value in self.BUILTIN_DECLARATION: yield index, Keyword.Declaration, value elif value in self.BUILTIN_NAMESPACE: yield index, Keyword.Namespace, value elif value in self.CONSTANT: yield index, Name.Constant, value elif value in self.PSEUDO_VAR: yield index, Name.Builtin.Pseudo, value else: yield index, token, value else: yield index, token, value
def get_stack_traces(self, text): val = None for index, token, value in RegexLexer.get_tokens_unprocessed( self, text): if token is Error: raise Exception('Lexing error: %s' % (str(index, token, value))) if token is StackTraceBegin: assert val == None val = value elif token is StackTraceLine: if not val: raise Exception( 'Got StackTraceLine without StackTraceBegin: %s' % str(index, token, value)) val += value elif token is StackTraceEnd: if not val: raise Exception( 'Got StackTraceLine without StackTraceBegin: %s' % str(index, token, value)) val += value yield val val = None
def get_tokens_unprocessed(self, text, *args): # Have a copy of the entire text to be used by `language_callback`. self.text = text for x in RegexLexer.get_tokens_unprocessed(self, text, *args): yield x
def get_tokens_unprocessed(self, text): for index, token, value in RegexLexer.get_tokens_unprocessed( self, text): value = isar_decode(value) yield index, token, value
def get_tokens_unprocessed(self, text, stack=('root',)): """Reset the content-type state.""" self.content_type = None return RegexLexer.get_tokens_unprocessed(self, text, stack)
def get_tokens_unprocessed(self, text, stack=('root', )): """Reset the content-type state.""" self.content_type = None return RegexLexer.get_tokens_unprocessed(self, text, stack)
def get_tokens_unprocessed(self, text): for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): if token is String.Symbol and '_q_' in value: yield index, String.Other, value else: yield index, token, value
def get_tokens_unprocessed(self, text, stack=('root', )): for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): if token is MToken.SYMBOL and value in mma.SYSTEM_SYMBOLS: yield index, MToken.BUILTIN, value else: yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in RegexLexer.get_tokens_unprocessed( self, text): yield index, token, value
def get_tokens_unprocessed(self, text): for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): value = isar_decode(value) yield index, token, value