Esempio n. 1
0
    def __init__(self, **options):
        from pygments.lexers._vimbuiltins import command, option, auto
        self._cmd = command
        self._opt = option
        self._aut = auto

        RegexLexer.__init__(self, **options)
Esempio n. 2
0
 def __init__(self, **options):
     level = options.get('i6t', '+i6t-not-inline')
     if level not in self._all_tokens:
         self._tokens = self.__class__.process_tokendef(level)
     else:
         self._tokens = self._all_tokens[level]
     RegexLexer.__init__(self, **options)
Esempio n. 3
0
    def __init__(self, **options):
        self.smhighlighting = get_bool_opt(options,
                                           'sourcemod', True)

        self._functions = set()
        if self.smhighlighting:
            from pygments.lexers._sourcemodbuiltins import FUNCTIONS
            self._functions.update(FUNCTIONS)
        RegexLexer.__init__(self, **options)
Esempio n. 4
0
    def __init__(self, **options):
        level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic')
        if level not in self._all_tokens:
            # compile the regexes now
            self._tokens = self.__class__.process_tokendef(level)
        else:
            self._tokens = self._all_tokens[level]

        RegexLexer.__init__(self, **options)
Esempio n. 5
0
    def __init__(self, **options):
        self.func_name_highlighting = get_bool_opt(
            options, 'func_name_highlighting', True)
        self.disabled_modules = get_list_opt(options, 'disabled_modules', [])

        self._functions = set()
        if self.func_name_highlighting:
            from pygments.lexers._lua_builtins import MODULES
            for mod, func in MODULES.iteritems():
                if mod not in self.disabled_modules:
                    self._functions.update(func)
        RegexLexer.__init__(self, **options)
Esempio n. 6
0
 def __init__(self, **options):
     from pygments.lexers._clbuiltins import BUILTIN_FUNCTIONS, \
         SPECIAL_FORMS, MACROS, LAMBDA_LIST_KEYWORDS, DECLARATIONS, \
         BUILTIN_TYPES, BUILTIN_CLASSES
     self.builtin_function = BUILTIN_FUNCTIONS
     self.special_forms = SPECIAL_FORMS
     self.macros = MACROS
     self.lambda_list_keywords = LAMBDA_LIST_KEYWORDS
     self.declarations = DECLARATIONS
     self.builtin_types = BUILTIN_TYPES
     self.builtin_classes = BUILTIN_CLASSES
     RegexLexer.__init__(self, **options)
Esempio n. 7
0
    def __init__(self, **options):
        self.builtinshighlighting = get_bool_opt(options, "builtinshighlighting", True)
        self.requiredelimiters = get_bool_opt(options, "requiredelimiters", False)

        self._builtins = set()
        self._members = set()
        if self.builtinshighlighting:
            from pygments.lexers._lasso_builtins import BUILTINS, MEMBERS

            for key, value in iteritems(BUILTINS):
                self._builtins.update(value)
            for key, value in iteritems(MEMBERS):
                self._members.update(value)
        RegexLexer.__init__(self, **options)
Esempio n. 8
0
 def get_tokens_unprocessed(self, text):
     # cut at the beginning of the interface and information tab stuff
     substrings = text.partition('@#$#@#$#@')
     text = substrings[0]
     stack = ['root']
     for item in RegexLexer.get_tokens_unprocessed(self, text, stack):
         yield item
 def get_tokens_unprocessed(self, text):
     # cut at the beginning of the interface and information tab stuff
     substrings = text.partition('@#$#@#$#@')
     text = substrings[0]
     stack = ['root']
     for item in RegexLexer.get_tokens_unprocessed(self, text, stack):
         yield item
Esempio n. 10
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in RegexLexer.get_tokens_unprocessed(
             self, text):
         if token is String.Symbol and '_q_' in value:
             yield index, String.Other, value
         else:
             yield index, token, value
Esempio n. 11
0
 def get_tokens_unprocessed(self, text):
     stack = ['root']
     for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack):
         if token is Name.Variable:
             if value in self.builtin_function:
                 yield index, Name.Builtin, value
                 continue
             if value in self.special_forms:
                 yield index, Keyword, value
                 continue
             if value in self.macros:
                 yield index, Name.Builtin, value
                 continue
             if value in self.lambda_list_keywords:
                 yield index, Keyword, value
                 continue
             if value in self.declarations:
                 yield index, Keyword, value
                 continue
             if value in self.builtin_types:
                 yield index, Keyword.Type, value
                 continue
             if value in self.builtin_classes:
                 yield index, Name.Class, value
                 continue
         yield index, token, value
Esempio n. 12
0
 def get_tokens_unprocessed(self, text):
     # 'in' is either a keyword or an operator.
     # If the token two tokens after 'in' is ')', 'in' is a keyword:
     #   objectloop(a in b)
     # Otherwise, it is an operator:
     #   objectloop(a in b && true)
     objectloop_queue = []
     objectloop_token_count = -1
     previous_token = None
     for index, token, value in RegexLexer.get_tokens_unprocessed(self,
                                                                  text):
         if previous_token is Name.Variable and value == 'in':
             objectloop_queue = [[index, token, value]]
             objectloop_token_count = 2
         elif objectloop_token_count > 0:
             if token not in Comment and token not in Text:
                 objectloop_token_count -= 1
             objectloop_queue.append((index, token, value))
         else:
             if objectloop_token_count == 0:
                 if objectloop_queue[-1][2] == ')':
                     objectloop_queue[0][1] = Keyword
                 while objectloop_queue:
                     yield objectloop_queue.pop(0)
                 objectloop_token_count = -1
             yield index, token, value
         if token not in Comment and token not in Text:
             previous_token = token
     while objectloop_queue:
         yield objectloop_queue.pop(0)
Esempio n. 13
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
         # Convention: mark all upper case names as constants
         if token is Name:
             if value.isupper():
                 token = Name.Constant
         yield index, token, value
Esempio n. 14
0
    def get_tokens_unprocessed(self,text):
        
        # for AMS files: catch any identifier declaration, and highlight every reference in the rest of the file. In other words, every user defined identifier is triggered :) Because I can.
        
        m = id_re.findall(text)
        # Ok, we found all matches from the big identifiers list. thus we end up with a list like this: 
        # m = [first_match, second_match, ...] and nth_match = (id_type,id_name,id_operator) = ('Parameter', 'OD', ';')
        
        id_nametype = {}
        
        for i in m:
        
            id_nametype[i[1]] = i[0]
        
                
        #print "I'm in get_tokens_unprocessed"
        
            
        for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
            
            # if the item is part of the declared (detected) identifiers, attach the appropriate token, thanks to the switcher :)
            if any(item == value for item in id_nametype.keys()): 
                yield index, switcher(id_nametype[value]), value
            
            # if the item is not detected as an function argument and is part of the Identifier type list, attach a 'kd' token 
            elif (not (token is Name.Argument) and any(item == value for item in id_nametype.values())): 
                yield index, Keyword.Declaration, value

            else:
                yield index, token ,value              
Esempio n. 15
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in (
         RegexLexer.get_tokens_unprocessed(self, text)
     ):
         if token is Name.Function and value in ('init', 'del'):
             token = Keyword.Pseudo
         yield index, token, value
 def get_tokens_unprocessed(self, text):
     stack = ['root']
     for index, token, value in RegexLexer.get_tokens_unprocessed(
             self, text, stack):
         if token is Name.Variable:
             if value in self.builtin_function:
                 yield index, Name.Builtin, value
                 continue
             if value in self.special_forms:
                 yield index, Keyword, value
                 continue
             if value in self.macros:
                 yield index, Name.Builtin, value
                 continue
             if value in self.lambda_list_keywords:
                 yield index, Keyword, value
                 continue
             if value in self.declarations:
                 yield index, Keyword, value
                 continue
             if value in self.builtin_types:
                 yield index, Keyword.Type, value
                 continue
             if value in self.builtin_classes:
                 yield index, Name.Class, value
                 continue
         yield index, token, value
Esempio n. 17
0
    def __init__(self, **options):
        self.func_name_highlighting = get_bool_opt(options,
                                                   'func_name_highlighting',
                                                   True)
        self.disabled_modules = get_list_opt(options, 'disabled_modules', [])

        self._functions = set()
        if self.func_name_highlighting:
            try:
                from pygments.lexers._luabuiltins import MODULES
            except ImportError:  # pygments 2.x
                from pygments.lexers._lua_builtins import MODULES
            for mod, func in MODULES.iteritems():
                if mod not in self.disabled_modules:
                    self._functions.update(func)
        RegexLexer.__init__(self, **options)
Esempio n. 18
0
 def get_tokens_unprocessed(self, text):
     stack = ['root']
     for index, token, value in \
             RegexLexer.get_tokens_unprocessed(self, text, stack):
         if token is Name and value in self.EXTRA_KEYWORDS:
             yield index, Name.Builtin, value
         else:
             yield index, token, value
Esempio n. 19
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
         if token is Name:
             if value in builtins.BUILTIN_FUNCTIONS:
                 token = Name.Builtin
             elif value in builtins.BUILTIN_OPERATIONS:
                 token = Name.Builtin.Pseudo
         yield index, token, value
Esempio n. 20
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
         if token is Name:
             if self.stdlibhighlighting and value in self.stdlib_types:
                 token = Keyword.Type
             elif self.c99highlighting and value in self.c99_types:
                 token = Keyword.Type
         yield index, token, value
Esempio n. 21
0
 def get_tokens_unprocessed(self, text):
     stack = ['root']
     for index, token, value in \
             RegexLexer.get_tokens_unprocessed(self, text, stack):
         if token is Name and value in self.EXTRA_KEYWORDS:
             yield index, Name.Builtin, value
         else:
             yield index, token, value
Esempio n. 22
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in \
         RegexLexer.get_tokens_unprocessed(self, text):
         # Convention: mark all upper case names as constants
         if token is Name:
             if value.isupper():
                 token = Name.Constant
         yield index, token, value
Esempio n. 23
0
    def __init__(self, **options):
        self.funcnamehighlighting = get_bool_opt(options, "funcnamehighlighting", True)
        self.disabledmodules = get_list_opt(options, "disabledmodules", ["unknown"])
        self.startinline = get_bool_opt(options, "startinline", False)

        # private option argument for the lexer itself
        if "_startinline" in options:
            self.startinline = options.pop("_startinline")

        # collect activated functions in a set
        self._functions = set()
        if self.funcnamehighlighting:
            from pygments.lexers._phpbuiltins import MODULES

            for key, value in MODULES.iteritems():
                if key not in self.disabledmodules:
                    self._functions.update(value)
        RegexLexer.__init__(self, **options)
Esempio n. 24
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in RegexLexer.get_tokens_unprocessed(
             self, text):
         if token is Name:
             if self.stdlibhighlighting and value in self.stdlib_types:
                 token = Keyword.Type
             elif self.c99highlighting and value in self.c99_types:
                 token = Keyword.Type
         yield index, token, value
Esempio n. 25
0
    def get_tokens_unprocessed(self, text, stack=('root', )):
        ma = MathematicaAnnotations()
        annotations = (ma.builtins, ma.unicode, ma.lexical_scope)
        for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
            result = (index, token, value)
            for func in annotations:
                result = func(*result)

            yield result
Esempio n. 26
0
    def get_tokens_unprocessed(self, text, stack=('root', )):
        ma = MathematicaAnnotations()
        annotations = (ma.builtins, ma.unicode, ma.lexical_scope)
        for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
            result = (index, token, value)
            for func in annotations:
                result = func(*result)

            yield result
Esempio n. 27
0
 def get_tokens_unprocessed(self, text):
     from pygments.lexers._asy_builtins import ASYFUNCNAME, ASYVARNAME
     for index, token, value in \
             RegexLexer.get_tokens_unprocessed(self, text):
         if token is Name and value in ASYFUNCNAME:
             token = Name.Function
         elif token is Name and value in ASYVARNAME:
             token = Name.Variable
         yield index, token, value
Esempio n. 28
0
 def get_tokens_unprocessed(self, text):
     from pygments.lexers._asy_builtins import ASYFUNCNAME, ASYVARNAME
     for index, token, value in \
             RegexLexer.get_tokens_unprocessed(self, text):
         if token is Name and value in ASYFUNCNAME:
             token = Name.Function
         elif token is Name and value in ASYVARNAME:
             token = Name.Variable
         yield index, token, value
Esempio n. 29
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in RegexLexer.get_tokens_unprocessed(
             self, text):
         if token is Name:
             if value in builtins.BUILTIN_FUNCTIONS:
                 token = Name.Builtin
             elif value in builtins.BUILTIN_OPERATIONS:
                 token = Name.Builtin.Pseudo
         yield index, token, value
Esempio n. 30
0
    def get_tokens_unprocessed(self, text):
        from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, COCOA_PROTOCOLS, COCOA_PRIMITIVES

        for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
            if token is Name or token is Name.Class:
                if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS or value in COCOA_PRIMITIVES:
                    token = Name.Builtin.Pseudo

            yield index, token, value
Esempio n. 31
0
 def get_tokens_unprocessed(self,text):
   for index,token,value in RegexLexer.get_tokens_unprocessed(self,text):
     if token == Generic:
       value = "\\(" + value + "\\)"
       yield (index,token,value)
     elif token == XMath:
       yield (index,Generic,value)
     else:
       yield (index,token,value)
Esempio n. 32
0
 def get_tokens_unprocessed(self, text):
     stack = ["root"]
     if self.startinline:
         stack.append("php")
     for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack):
         if token is Name.Other:
             if value in self._functions:
                 yield index, Name.Builtin, value
                 continue
         yield index, token, value
Esempio n. 33
0
    def __init__(self, **options):
        self.funcnamehighlighting = get_bool_opt(options,
                                                 'funcnamehighlighting', True)
        self.disabledmodules = get_list_opt(options, 'disabledmodules',
                                            ['unknown'])
        self.startinline = get_bool_opt(options, 'startinline', False)

        # private option argument for the lexer itself
        if '_startinline' in options:
            self.startinline = options.pop('_startinline')

        # collect activated functions in a set
        self._functions = set()
        if self.funcnamehighlighting:
            from pygments.lexers._php_builtins import MODULES
            for key, value in MODULES.items():
                if key not in self.disabledmodules:
                    self._functions.update(value)
        RegexLexer.__init__(self, **options)
Esempio n. 34
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in \
             RegexLexer.get_tokens_unprocessed(self, text):
         if token is Name:
             if self.smhighlighting:
                 if value in self.SM_TYPES:
                     token = Keyword.Type
                 elif value in self._functions:
                     token = Name.Builtin
         yield index, token, value
Esempio n. 35
0
File: php.py Progetto: axil/blog
    def __init__(self, **options):
        self.funcnamehighlighting = get_bool_opt(
            options, 'funcnamehighlighting', True)
        self.disabledmodules = get_list_opt(
            options, 'disabledmodules', ['unknown'])
        self.startinline = get_bool_opt(options, 'startinline', False)

        # private option argument for the lexer itself
        if '_startinline' in options:
            self.startinline = options.pop('_startinline')

        # collect activated functions in a set
        self._functions = set()
        if self.funcnamehighlighting:
            from pygments.lexers._php_builtins import MODULES
            for key, value in iteritems(MODULES):
                if key not in self.disabledmodules:
                    self._functions.update(value)
        RegexLexer.__init__(self, **options)
Esempio n. 36
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in \
             RegexLexer.get_tokens_unprocessed(self, text):
         if token is Name:
             if self.smhighlighting:
                 if value in self.SM_TYPES:
                     token = Keyword.Type
                 elif value in self._functions:
                     token = Name.Builtin
         yield index, token, value
Esempio n. 37
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in RegexLexer.get_tokens_unprocessed(
             self, text):
         if token == Generic:
             value = "\\(" + value + "\\)"
             yield (index, token, value)
         elif token == XMath:
             yield (index, Generic, value)
         else:
             yield (index, token, value)
Esempio n. 38
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
         # check for reserved words and pervasives
         if token is Name:
             if value in self.reserved_words:
                 token = Keyword.Reserved
             elif value in self.pervasives:
                 token = Keyword.Pervasive
         # return result
         yield index, token, value
 def get_tokens_unprocessed(self, text):
     for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
         # check for reserved words and pervasives
         if token is Name:
             if value in self.reserved_words:
                 token = Keyword.Reserved
             elif value in self.pervasives:
                 token = Keyword.Pervasive
         # return result
         yield index, token, value
Esempio n. 40
0
    def get_tokens_unprocessed(self, text):
        tokens = RegexLexer.get_tokens_unprocessed(self, text)

        effects, fg, bg = set(), None, None

        for index, token, value in tokens:
            if token is Token.ANSI.Escape:
                effects, fg, bg = self.apply_esc(value, effects, fg, bg)
                continue

            yield index, get_token(effects, fg, bg), value
Esempio n. 41
0
File: hush.py Progetto: gahag/hush
 def get_tokens_unprocessed(self, text):
     for index, token, value in RegexLexer.get_tokens_unprocessed(
             self, text):
         if token is Name:
             if '.' in value:
                 a, b = value.split('.')
                 yield index, Name, a
                 yield index + len(a), Punctuation, '.'
                 yield index + len(a) + 1, Name, b
                 continue
         yield index, token, value
Esempio n. 42
0
 def get_tokens_unprocessed(self, text):
     stack = ['root']
     if self.startinline:
         stack.append('php')
     for index, token, value in \
         RegexLexer.get_tokens_unprocessed(self, text, stack):
         if token is Name.Other:
             if value in self._functions:
                 yield index, Name.Function, value
                 continue
         yield index, token, value
Esempio n. 43
0
 def get_tokens_unprocessed(self, text):
     stack = ['root']
     if self.startinline:
         stack.append('php')
     for index, token, value in \
             RegexLexer.get_tokens_unprocessed(self, text, stack):
         if token is Name.Other:
             if value in self._functions:
                 yield index, Name.Builtin, value
                 continue
         yield index, token, value
Esempio n. 44
0
 def __init__(self, **options):
     self.reserved_words = set()
     self.pervasives = set()
     # ISO Modula-2
     if get_bool_opt(options, 'iso', False):
         self.reserved_words.update(self.iso_reserved_words)
         self.pervasives.update(self.iso_pervasives)
     # Objective Modula-2
     elif get_bool_opt(options, 'objm2', False):
         self.reserved_words.update(self.objm2_reserved_words)
         self.pervasives.update(self.objm2_pervasives)
     # PIM Modula-2 (DEFAULT)
     else:
         self.reserved_words.update(self.pim_reserved_words)
         self.pervasives.update(self.pim_pervasives)
     # GNU extensions
     if get_bool_opt(options, 'gm2ext', False):
         self.reserved_words.update(self.gnu_reserved_words)
         self.pervasives.update(self.gnu_pervasives)
     # initialise
     RegexLexer.__init__(self, **options)
Esempio n. 45
0
 def get_tokens_unprocessed(self, text):
     stack = ['root']
     if self.requiredelimiters:
         stack.append('delimiters')
     for index, token, value in \
             RegexLexer.get_tokens_unprocessed(self, text, stack):
         if (token is Name.Other and value.lower() in self._builtins or
                 token is Name.Other.Member and
                 value.lower().rstrip('=') in self._members):
             yield index, Name.Builtin, value
             continue
         yield index, token, value
 def __init__(self, **options):
     self.reserved_words = set()
     self.pervasives = set()
     # ISO Modula-2
     if get_bool_opt(options, 'iso', False):
         self.reserved_words.update(self.iso_reserved_words)
         self.pervasives.update(self.iso_pervasives)
     # Objective Modula-2
     elif get_bool_opt(options, 'objm2', False):
         self.reserved_words.update(self.objm2_reserved_words)
         self.pervasives.update(self.objm2_pervasives)
     # PIM Modula-2 (DEFAULT)
     else:
         self.reserved_words.update(self.pim_reserved_words)
         self.pervasives.update(self.pim_pervasives)
     # GNU extensions
     if get_bool_opt(options, 'gm2ext', False):
         self.reserved_words.update(self.gnu_reserved_words)
         self.pervasives.update(self.gnu_pervasives)
     # initialise
     RegexLexer.__init__(self, **options)
 def get_tokens_unprocessed(self, text):
     stack = ['root']
     if self.requiredelimiters:
         stack.append('delimiters')
     for index, token, value in \
             RegexLexer.get_tokens_unprocessed(self, text, stack):
         if (token is Name.Other and value.lower() in self._builtins or
                 token is Name.Other.Member and
                 value.lower().rstrip('=') in self._members):
             yield index, Name.Builtin, value
             continue
         yield index, token, value
Esempio n. 48
0
 def get_tokens_unprocessed(self,text):
   for index,token,value in RegexLexer.get_tokens_unprocessed(self,text):
     if token == Generic:
       yield (index,token,value)
     elif token == XMath:
        yield (index,Punctuation,u"\\")
        yield (index,Punctuation,u"(")
        yield (index,Generic,value[2:-2])
        yield (index,Punctuation,u"\\")
        yield (index,Punctuation,u")")
     else:
       yield (index,token,value)
Esempio n. 49
0
    def get_tokens_unprocessed(self, text):
        from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, \
            COCOA_PROTOCOLS, COCOA_PRIMITIVES

        for index, token, value in \
                RegexLexer.get_tokens_unprocessed(self, text):
            if token is Name or token is Name.Class:
                if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \
                   or value in COCOA_PRIMITIVES:
                    token = Name.Builtin.Pseudo

            yield index, token, value
Esempio n. 50
0
 def get_tokens_unprocessed(self, text):
     stack = ['root']
     for index, token, value in RegexLexer.get_tokens_unprocessed(
             self, text, stack):
         if token is Name.Variable:
             if value in self.special_forms:
                 yield index, Keyword, value
                 continue
             if value in self.builtin_types:
                 yield index, Keyword.Type, value
                 continue
         yield index, token, value
Esempio n. 51
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
         if token is Name:
             if value in self._functions:
                 yield index, Name.Builtin, value
                 continue
             elif "." in value:
                 a, b = value.split(".")
                 yield index, Name, a
                 yield index + len(a), Punctuation, u"."
                 yield index + len(a) + 1, Name, b
                 continue
         yield index, token, value
Esempio n. 52
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in RegexLexer.get_tokens_unprocessed(
             self, text):
         if token == Generic:
             yield (index, token, value)
         elif token == XMath:
             yield (index, Punctuation, u"\\")
             yield (index, Punctuation, u"(")
             yield (index, Generic, value[2:-2])
             yield (index, Punctuation, u"\\")
             yield (index, Punctuation, u")")
         else:
             yield (index, token, value)
Esempio n. 53
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in \
         RegexLexer.get_tokens_unprocessed(self, text):
         if token is Name:
             if value in self._functions:
                 yield index, Name.Function, value
                 continue
             elif '.' in value:
                 a, b = value.split('.')
                 yield index, Name, a
                 yield index + len(a), Punctuation, u'.'
                 yield index + len(a) + 1, Name, b
                 continue
         yield index, token, value
Esempio n. 54
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in \
         RegexLexer.get_tokens_unprocessed(self, text):
         if token is Name:
             if value in self._functions:
                 yield index, Name.Builtin, value
                 continue
             elif '.' in value:
                 a, b = value.split('.')
                 yield index, Name, a
                 yield index + len(a), Punctuation, u'.'
                 yield index + len(a) + 1, Name, b
                 continue
         yield index, token, value
Esempio n. 55
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in \
             RegexLexer.get_tokens_unprocessed(self, text):
         if token is Name:
             if self.stdlibhighlighting and value in self.stdlib_types:
                 token = Keyword.Type
             elif self.c99highlighting and value in self.c99_types:
                 token = Keyword.Type
             elif self.platformhighlighting and value in self.linux_types:
                 token = Keyword.Type
             elif re.match(r'\b([tseufpi]_\w+)\b', value):
                 token = Keyword.Type # libccc typedef
             elif re.match(r'\b([A-Z])\b', value):
                 token = Keyword.Type # libccc generic type macro
         yield index, token, value
Esempio n. 56
0
 def get_tokens_unprocessed(self, text):
     # TODO: builtins are only subsequent tokens on lines
     #       and 'keywords' only happen at the beginning except
     #       for :au ones
     for index, token, value in \
         RegexLexer.get_tokens_unprocessed(self, text):
         if token is Name.Other:
             if self.is_in(value, self._cmd):
                 yield index, Keyword, value
             elif self.is_in(value, self._opt) or \
                  self.is_in(value, self._aut):
                 yield index, Name.Builtin, value
             else:
                 yield index, Text, value
         else:
             yield index, token, value
Esempio n. 57
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
         if token is Name:
             lowercase_value = value.lower()
             if lowercase_value in self.builtins:
                 yield index, Name.Builtin, value
                 continue
             if lowercase_value in self.keywords:
                 yield index, Keyword, value
                 continue
             if lowercase_value in self.functions:
                 yield index, Name.Builtin, value
                 continue
             if lowercase_value in self.operators:
                 yield index, Operator, value
                 continue
         yield index, token, value
Esempio n. 58
0
 def get_tokens_unprocessed(self, text):
     for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
         if token is Name:
             if value in self.KEYWORD:
                 yield index, Keyword, value
             elif value in self.KEYWORD_OPERATOR:
                 yield index, Operator.Word, value
             elif value in self.BUILTIN:
                 yield index, Keyword, value
             elif value in self.BUILTIN_DECLARATION:
                 yield index, Keyword.Declaration, value
             elif value in self.BUILTIN_NAMESPACE:
                 yield index, Keyword.Namespace, value
             elif value in self.CONSTANT:
                 yield index, Name.Constant, value
             elif value in self.PSEUDO_VAR:
                 yield index, Name.Builtin.Pseudo, value
             else:
                 yield index, token, value
         else:
             yield index, token, value
Esempio n. 59
0
    def colored(self):
        """Implement switch colored."""
        if self.root.ids.colors.active:
            if self.results[0][0].isdigit():
                result_index: int = int(self.results[self.marker - 1][0])
                RegexLexer.tokens = {
                    "root":
                    [(words({
                        escape_markup(f"{self.results[self.marker - 1][1]}")
                    }), Generic.Heading),
                     (words({
                         escape_markup(
                             f"{self.results[self.marker - 1][result_index]}")
                     }), Generic.Inserted)]
                }
            else:
                RegexLexer.tokens = {"root": [(r"^.+$", Generic.Error)]}
            self.root.ids.s_color.text = self.GREEN_COLORED_STR
        else:
            RegexLexer.tokens = {"root": []}
            self.root.ids.s_color.text = self.RED_NOT_COLORED_STR

        delattr(RegexLexer, "_tokens")
        self.root.ids.view.lexer = RegexLexer()
Esempio n. 60
0
 def get_stack_traces(self, text):
     val = None
     for index, token, value in RegexLexer.get_tokens_unprocessed(
             self, text):
         if token is Error:
             raise Exception('Lexing error: %s' %
                             (str(index, token, value)))
         if token is StackTraceBegin:
             assert val == None
             val = value
         elif token is StackTraceLine:
             if not val:
                 raise Exception(
                     'Got StackTraceLine without StackTraceBegin: %s' %
                     str(index, token, value))
             val += value
         elif token is StackTraceEnd:
             if not val:
                 raise Exception(
                     'Got StackTraceLine without StackTraceBegin: %s' %
                     str(index, token, value))
             val += value
             yield val
             val = None