Пример #1
0
    def document_lexers(self):
        from pipenv.patched.notpip._vendor.pygments.lexers._mapping import LEXERS
        out = []
        modules = {}
        moduledocstrings = {}
        for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]):
            module = data[0]
            mod = __import__(module, None, None, [classname])
            self.filenames.add(mod.__file__)
            cls = getattr(mod, classname)
            if not cls.__doc__:
                print("Warning: %s does not have a docstring." % classname)
            docstring = cls.__doc__
            if isinstance(docstring, bytes):
                docstring = docstring.decode('utf8')
            modules.setdefault(module, []).append(
                (classname, ', '.join(data[2]) or 'None',
                 ', '.join(data[3]).replace('*', '\\*').replace('_', '\\')
                 or 'None', ', '.join(data[4]) or 'None', docstring))
            if module not in moduledocstrings:
                moddoc = mod.__doc__
                if isinstance(moddoc, bytes):
                    moddoc = moddoc.decode('utf8')
                moduledocstrings[module] = moddoc

        for module, lexers in sorted(modules.items(), key=lambda x: x[0]):
            if moduledocstrings[module] is None:
                raise Exception("Missing docstring for %s" % (module, ))
            heading = moduledocstrings[module].splitlines()[4].strip().rstrip(
                '.')
            out.append(MODULEDOC % (module, heading, '-' * len(heading)))
            for data in lexers:
                out.append(LEXERDOC % data)

        return ''.join(out)
Пример #2
0
def get_all_lexers():
    """Return a generator of tuples in the form ``(name, aliases,
    filenames, mimetypes)`` of all know lexers.
    """
    for item in LEXERS.values():
        yield item[1:]
    for lexer in find_plugin_lexers():
        yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
Пример #3
0
 def __getattr__(self, name):
     info = LEXERS.get(name)
     if info:
         _load_lexers(info[0])
         cls = _lexer_cache[info[1]]
         setattr(self, name, cls)
         return cls
     if name in COMPAT:
         return getattr(self, COMPAT[name])
     raise AttributeError(name)
Пример #4
0
def get_lexer_for_mimetype(_mime, **options):
    """Get a lexer for a mimetype.

    Raises ClassNotFound if not found.
    """
    for modname, name, _, _, mimetypes in LEXERS.values():
        if _mime in mimetypes:
            if name not in _lexer_cache:
                _load_lexers(modname)
            return _lexer_cache[name](**options)
    for cls in find_plugin_lexers():
        if _mime in cls.mimetypes:
            return cls(**options)
    raise ClassNotFound('no lexer for mimetype %r found' % _mime)
Пример #5
0
def find_lexer_class(name):
    """Lookup a lexer class by name.

    Return None if not found.
    """
    if name in _lexer_cache:
        return _lexer_cache[name]
    # lookup builtin lexers
    for module_name, lname, aliases, _, _ in LEXERS.values():
        if name == lname:
            _load_lexers(module_name)
            return _lexer_cache[name]
    # continue with lexers from setuptools entrypoints
    for cls in find_plugin_lexers():
        if cls.name == name:
            return cls
Пример #6
0
def get_lexer_by_name(_alias, **options):
    """Get a lexer by an alias.

    Raises ClassNotFound if not found.
    """
    if not _alias:
        raise ClassNotFound('no lexer for alias %r found' % _alias)

    # lookup builtin lexers
    for module_name, name, aliases, _, _ in LEXERS.values():
        if _alias.lower() in aliases:
            if name not in _lexer_cache:
                _load_lexers(module_name)
            return _lexer_cache[name](**options)
    # continue with lexers from setuptools entrypoints
    for cls in find_plugin_lexers():
        if _alias.lower() in cls.aliases:
            return cls(**options)
    raise ClassNotFound('no lexer for alias %r found' % _alias)
Пример #7
0
def find_lexer_class_by_name(_alias):
    """Lookup a lexer class by alias.

    Like `get_lexer_by_name`, but does not instantiate the class.

    .. versionadded:: 2.2
    """
    if not _alias:
        raise ClassNotFound('no lexer for alias %r found' % _alias)
    # lookup builtin lexers
    for module_name, name, aliases, _, _ in LEXERS.values():
        if _alias.lower() in aliases:
            if name not in _lexer_cache:
                _load_lexers(module_name)
            return _lexer_cache[name]
    # continue with lexers from setuptools entrypoints
    for cls in find_plugin_lexers():
        if _alias.lower() in cls.aliases:
            return cls
    raise ClassNotFound('no lexer for alias %r found' % _alias)
Пример #8
0
def find_lexer_class_for_filename(_fn, code=None):
    """Get a lexer for a filename.

    If multiple lexers match the filename pattern, use ``analyse_text()`` to
    figure out which one is more appropriate.

    Returns None if not found.
    """
    matches = []
    fn = basename(_fn)
    for modname, name, _, filenames, _ in LEXERS.values():
        for filename in filenames:
            if _fn_matches(fn, filename):
                if name not in _lexer_cache:
                    _load_lexers(modname)
                matches.append((_lexer_cache[name], filename))
    for cls in find_plugin_lexers():
        for filename in cls.filenames:
            if _fn_matches(fn, filename):
                matches.append((cls, filename))

    if isinstance(code, bytes):
        # decode it, since all analyse_text functions expect unicode
        code = guess_decode(code)

    def get_rating(info):
        cls, filename = info
        # explicit patterns get a bonus
        bonus = '*' not in filename and 0.5 or 0
        # The class _always_ defines analyse_text because it's included in
        # the Lexer class.  The default implementation returns None which
        # gets turned into 0.0.  Run scripts/detect_missing_analyse_text.py
        # to find lexers which need it overridden.
        if code:
            return cls.analyse_text(code) + bonus, cls.__name__
        return cls.priority + bonus, cls.__name__

    if matches:
        matches.sort(key=get_rating)
        # print "Possible lexers, after sort:", matches
        return matches[-1][0]