def test_functions(self): self.assert_(token.is_token_subtype(token.String, token.String)) self.assert_(token.is_token_subtype(token.String, token.Literal)) self.failIf(token.is_token_subtype(token.Literal, token.String)) self.assert_(token.string_to_tokentype(token.String) is token.String) self.assert_(token.string_to_tokentype('') is token.Token) self.assert_(token.string_to_tokentype('String') is token.String)
def test_functions(self): self.assertTrue(token.is_token_subtype(token.String, token.String)) self.assertTrue(token.is_token_subtype(token.String, token.Literal)) self.assertFalse(token.is_token_subtype(token.Literal, token.String)) self.assertTrue(token.string_to_tokentype(token.String) is token.String) self.assertTrue(token.string_to_tokentype('') is token.Token) self.assertTrue(token.string_to_tokentype('String') is token.String)
def test_functions(self): self.assertTrue(token.is_token_subtype(token.String, token.String)) self.assertTrue(token.is_token_subtype(token.String, token.Literal)) self.assertFalse(token.is_token_subtype(token.Literal, token.String)) self.assertTrue(token.string_to_tokentype(token.String) is token.String) self.assertTrue(token.string_to_tokentype("") is token.Token) self.assertTrue(token.string_to_tokentype("String") is token.String)
def test_functions(): assert token.is_token_subtype(token.String, token.String) assert token.is_token_subtype(token.String, token.Literal) assert not token.is_token_subtype(token.Literal, token.String) assert token.string_to_tokentype(token.String) is token.String assert token.string_to_tokentype('') is token.Token assert token.string_to_tokentype('String') is token.String
def parse_pygments_style(token_name, style_object, style_dict): """Parse token type and style string. :param token_name: str name of Pygments token. Example: "Token.String" :param style_object: pygments.style.Style instance to use as base :param style_dict: dict of token names and their styles, customized to this cli """ token_type = string_to_tokentype(token_name) try: other_token_type = string_to_tokentype(style_dict[token_name]) return token_type, style_object.styles[other_token_type] except AttributeError as err: return token_type, style_dict[token_name]
class CLIStyle(Style): styles = {} styles.update(style.styles) styles.update(default_style_extensions) custom_styles = dict([(string_to_tokentype(x), y) for x, y in cli_style.items()]) styles.update(custom_styles)
def style_factory(name, cli_style): try: style = pygments.styles.get_style_by_name(name) except ClassNotFound: style = pygments.styles.get_style_by_name('native') custom_styles = {} for token in cli_style: try: custom_styles[string_to_tokentype(token)] = style.styles[ string_to_tokentype(cli_style[token])] except AttributeError as err: custom_styles[string_to_tokentype(token)] = cli_style[token] return PygmentsStyle.from_defaults(style_dict=custom_styles, pygments_style_cls=style)
def lectura(archivos): for archivo in archivos: try: # archivo[0] es la ruta del archivo # archivo[1] es la ruta seguida para encontrar el archivo code = readFile(archivo[0]) lista = list(PythonLexer().get_tokens(code)) for item in lista: if item[0] == string_to_tokentype('Token.Name.Namespace'): cadena = item[1] cadena = cadena.replace(".", "/") cadena = cadena + ".py" ruta = archivo[1].copy() ruta.append(archivo[0]) objeto = [cadena, ruta] agregar(objeto, archivos) except: # variable de broma XD variable = 0 # El archivo no existe, de seguro es una librería general #print("El archivo " + archivo + " no existe") # # # # # # # # Código de prueba # # # # # # # # #lectura(resultados) #print(resultados)
def style_factory(name, cli_style): """Get a named style for the CLI. Paramters --------- name: `str` Name of style class. cli_style: `dict` Returns ------- pymgents.style.BaseStyle """ try: style = pygments.styles.get_style_by_name(name) except ClassNotFound: style = pygments.styles.get_style_by_name('native') styles = {} styles.update(style.styles) styles.update(default_style_extensions) custom_styles = {string_to_tokentype(x): y for x, y in cli_style.items()} styles.update(custom_styles) return style_from_dict(styles)
def __init__(self, **options): Filter.__init__(self, **options) self.names = set(get_list_opt(options, 'names', [])) tokentype = options.get('tokentype') if tokentype: self.tokentype = string_to_tokentype(tokentype) else: self.tokentype = Name.Function
def get_fmt(self, token): if type(token) != type(Token.Generic): token = string_to_tokentype(token) fmt = self.styles.get(token, None) if fmt is None: fmt = self.base_fmt() fmt.setProperty(fmt.UserProperty, str(token)) return fmt
def _action_PygmentsProxy(self, action, regex=False): search = self.__getNestedDict(action, 'search') replace = self.__getNestedDict(action, 'replace') count = self.__getNestedDict(action, 'count') occurrence = self.__getNestedDict(action, 'occurrence') if not occurrence == None: if not isinstance(occurrence,list) and not isinstance(occurrence,tuple): print "is not listtuple", type(occurrence), occurrence, isinstance(occurrence,list) occurrence = (int(occurrence),) # create tuple tokentype = self.__getNestedDict(action, 'tokentype') # Validation would be r"^\w+((\.\w+)*)$" if tokentype == None: tokentype = () # x in tokentype cannot be done with tokentype = None else: tokentype = string_to_tokentype(tokentype) print search, replace,tokentype,occurrence temp_results = [] for i in self.LexedScript: doit = False if i[0] in tokentype: doit = True elif tokentype == ():#tokentype was None/not specified, see upper lines doit = True else: doit = False if doit == True:#tokentype is valid doit = False if regex: if re.search(search, i[1], re.DOTALL) is not None: print search,"ist (regex) in",i[1] temp_results.append(i) else: if search in i[1]:#substring found print search,"ist in",i[1] temp_results.append(i) if regex: replacer = self.__regexProxy else: replacer = self.__replaceProxy if occurrence == None: for occ in temp_results: occ[1] = replacer(occ[1], search, replace, count) else: for occ in occurrence: if occ in xrange( -len(temp_results) , len(temp_results) ):# this allows negative indexes temp_results[occ][1] = replacer(temp_results[occ][1], search, replace, count)
def load_json_styles(): import json themeFile = open("theme.json",'r') jColorTheme = json.load(themeFile) themeFile.close() jColorThemeTuple = {string_to_tokentype(k):tuple(v) for k, v in jColorTheme.items()} background_color_tuple = jColorThemeTuple[Token] ## output return toHex(jColorThemeTuple)
def style_factory_output(name, cli_style): try: style = pygments.styles.get_style_by_name(name).styles except ClassNotFound: style = pygments.styles.get_style_by_name('native').styles for token in cli_style: try: style.update({string_to_tokentype( token): style[string_to_tokentype(cli_style[token])], }) except AttributeError as err: style.update( {string_to_tokentype(token): cli_style[token], }) class OutputStyle(pygments.style.Style): default_style = "" styles = style return OutputStyle
def style_factory(name, cli_style): try: style = pygments.styles.get_style_by_name(name) except ClassNotFound: style = pygments.styles.get_style_by_name('native') custom_styles = dict([(string_to_tokentype(x), y) for x, y in cli_style.items()]) return PygmentsStyle.from_defaults(style_dict=custom_styles, pygments_style_cls=style)
def style_factory(name, cli_style): try: style = pygments.styles.get_style_by_name(name) except ClassNotFound: style = pygments.styles.get_style_by_name('native') styles = {} styles.update(style.styles) styles.update(default_style_extensions) custom_styles = {string_to_tokentype(x): y for x, y in cli_style.items()} styles.update(custom_styles) return style_from_dict(styles)
def style_factory(name, cli_style): try: style = pygments.styles.get_style_by_name(name) except ClassNotFound: style = pygments.styles.get_style_by_name('native') styles = {} styles.update(style.styles) styles.update(default_style_extensions) custom_styles = dict([(string_to_tokentype(x), y) for x, y in cli_style.items()]) styles.update(custom_styles) return style_from_dict(styles)
def read_style(configdata): config = ConfigParser(interpolation=ExtendedInterpolation(), default_section="IGNORED_DEFAULT") config.read_string(configdata) token_styles = {} for name, section in config.items(): # Ignore sections that aren't going to be allowed as token names if name == "IGNORED_DEFAULT" or name[0].islower(): continue token = string_to_tokentype(name) token_styles[token] = _read_style_section(section) return token_styles
def read(self, stream): config = self._create_configparser() config.read_file(stream) token_styles = {} for name, section in config.items(): # Ignore sections that aren't going to be allowed as token names if name == 'IGNORED_DEFAULT' or name[0].islower(): continue token = string_to_tokentype(name) token_styles[token] = _read_style_section(section) # TODO: handle background, highlight colors # TODO: save palette? # TODO: have a style name? return create_style(None, token_styles)
def style_factory(name, cli_style): """Create a Pygments Style class based on the user's preferences. :param str name: The name of a built-in Pygments style. :param dict cli_style: The user's token-type style preferences. """ try: style = pygments.styles.get_style_by_name(name) except ClassNotFound: style = pygments.styles.get_style_by_name('native') style_tokens = {} style_tokens.update(style.styles) custom_styles = {string_to_tokentype(x): y for x, y in cli_style.items()} style_tokens.update(custom_styles) class CliStyle(pygments.style.Style): default_styles = '' styles = style_tokens return CliStyle
def substitute(self, sub): """ Transform a stack of substitution tokens into a string, recursively substituting sub-tokens. """ def subarg(arg): if isinstance(arg, Sub): return ' '.join(self.substitute(arg)) elif hasattr(arg, '__iter__'): return ' '.join([' '.join(self.substitute(s)) for s in arg]) return str(arg) if isinstance(sub, Sub): if sub.id not in self.format.subst: return "??{}??".format(sub.id) args = [subarg(x) for x in sub.args] strout = self.format.subst[sub.id] for x in xrange(len(args)): strout = strout.replace('^' + str(x + 1), args[x]) return [strout] elif isinstance(sub, Tok): if sub.is_whitespace(): return [' '] value = utils.latex_escape(sub.value) subbed = False if sub.value in self.format.format: opt = self.format.format[sub.value] if not opt['if'] or any(sub.type in string_to_tokentype(tt) for tt in opt['if']): value = opt['to'] subbed = True tok_types = map(lambda x: str(x).replace('.', '')[5:], reversed(sub.type.split())) found = [y for y in tok_types if y in self.format.subst] if found and not subbed: result = ' '.join(self.substitute(Sub(found[0], value))) return [result] return [value] return [str(sub)]
def substitute(self, sub): """ Transform a stack of substitution tokens into a string, recursively substituting sub-tokens. """ def subarg(arg): if isinstance(arg, Sub): return ' '.join(self.substitute(arg)) elif hasattr(arg, '__iter__'): return ' '.join([' '.join(self.substitute(s)) for s in arg]) return str(arg) if isinstance(sub, Sub): if sub.id not in self.format.subst: return "??{}??".format(sub.id) args = [subarg(x) for x in sub.args] strout = self.format.subst[sub.id] for x in xrange(len(args)): strout = strout.replace('^' + str(x+1), args[x]) return [strout] elif isinstance(sub, Tok): if sub.is_whitespace(): return [' '] value = utils.latex_escape(sub.value) subbed = False if sub.value in self.format.format: opt = self.format.format[sub.value] if not opt['if'] or any(sub.type in string_to_tokentype(tt) for tt in opt['if']): value = opt['to'] subbed = True tok_types = map(lambda x: str(x).replace('.', '')[5:], reversed(sub.type.split())) found = [y for y in tok_types if y in self.format.subst] if found and not subbed: result = ' '.join(self.substitute(Sub(found[0], value))) return [result] return [value] return [str(sub)]
'Get the next item in the iterable without advancing our position.' if (not self.item): try: self.item = next(self.iterator) except StopIteration: return None return self.item def __next__(self): 'Get the next item in the iterable and advance our position.' item = self.peek() self.item = None return item declaration = string_to_tokentype('Declaration') other = string_to_tokentype('Other') punctuation = string_to_tokentype('Punctuation') Double = r'"(\\\\\\\\|\\\\"|[^"])*"' Single = r"'(\\\\\\\\|\\\\'|[^'])*'" BUILTIN = regex.compile( r'(Array|Boolean|Date|Error|Function|Math|netscape|Number|Object|Packages|RegExp|String|Promise|Proxy|sun|decodeURI|decodeURIComponent|encodeURI|encodeURIComponent|Error|eval|isFinite|isNaN|isSafeInteger|parseFloat|parseInt|document|this|window)\b' ) CONSTANT = regex.compile(r'(true|false|null|NaN|Infinity|undefined)\b') RESERVED = regex.compile( r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|extends|final|float|goto|implements|import|int|interface|long|native|package|private|protected|public|short|static|super|synchronized|throws|transient|volatile)\b' ) DECLARATION = regex.compile(r'(var|let|with|function)\b') KEYWORD = regex.compile( r'(for|in|while|do|break|return|continue|switch|case|default|if|else|throw|try|catch|finally|new|delete|typeof|instanceof|void|yield|this|of)\b'
from typing import List import click from pygments import highlight, lexers, formatters from pygments.formatters.terminal import TERMINAL_COLORS from pygments.token import string_to_tokentype from .api import API from .lp import LongPoll from .signal import SignalsList, Signal from .reaction import ReactionsList, Reaction from .annotypes import Annotype from . import current TERMINAL_COLORS[string_to_tokentype("String")] = ("gray", "_") TERMINAL_COLORS[string_to_tokentype("Token.Literal.Number")] = ("yellow", "_") TERMINAL_COLORS[string_to_tokentype("Token.Keyword.Constant")] = ("red", "_") TERMINAL_COLORS[string_to_tokentype("Token.Name.Tag")] = ("cyan", "_") @dataclass class Bot(Annotype): """ Основной менеджер событий LongPoll, сигналов, API запросов и в целом работы бота """ token: str """ Токен пользователя/группы
def finished(self): from pygments.lexers import (PythonTracebackLexer, PythonLexer, DiffLexer) if ANSI_COLORS_SUPPORT: from pygments.console import colorize from pygments import highlight if self.style in ('light', 'dark'): from pygments.formatters import TerminalFormatter formatter = TerminalFormatter(bg=self.style) if self.colorscheme is not None: from pygments.token import string_to_tokentype for token, value in self.colorscheme.iteritems(): token = string_to_tokentype(token.capitalize()) formatter.colorscheme[token] = (value, value) else: from pygments.formatters import Terminal256Formatter formatter = Terminal256Formatter(style=self.style) else: # ANSI color codes seem not to be supported, make colorize() # and highlight() no-ops. formatter = None def colorize(_format, text): return text def highlight(text, _lexer, _formatter): return text if self.counter: self.progress.finish() print width, _ = utils.get_terminal_size() def show(result): print colorize('bold', result.test_name) if result.test.__doc__: print inspect.getdoc(result.test) print colorize('faint', '─' * width) for line in result.stdout: print colorize('bold', '→'), print line for line in result.stderr: print colorize('red', '→'), print line if self.verbose: for result in self.passes: if result.stdout or result.stderr: show(result) print for result in self.failures: show(result) # result.traceback seems to be in UTF-8 on my system (eg. for # literal unicode strings) but I guess this depends on the source # file encoding. Tell Pygments to guess: try UTF-8 and then latin1. # Without an `encoding` argument, Pygments just uses latin1. print highlight(result.traceback, PythonTracebackLexer(encoding='guess'), formatter) assertion = result.assertion if assertion is not None: print highlight(assertion, PythonLexer(encoding='guess'), formatter) equality_diff = result.equality_diff if equality_diff is not None: print highlight(equality_diff, DiffLexer(encoding='guess'), formatter) result.debug() if self.failures: failed = colorize('red', str(len(self.failures))) else: failed = len(self.failures) print 'Failures: %s/%s (%s assertions, %.3f seconds)' % ( failed, self.counter, statistics.assertions, self.total_time) if self.failures: raise SystemExit(1)
import io import re import typing from pygments.lexers import get_lexer_by_name from pygments.styles import get_style_by_name from pygments.lexer import Lexer, RegexLexer from pygments.style import Style from pygments.token import string_to_tokentype from functools import lru_cache NoEscape = string_to_tokentype("NoEscape") @lru_cache(maxsize=None) def allow_math_escape(base_lexer: typing.Type[RegexLexer], noescape): class ExtLexer(RegexLexer): name = base_lexer.name tokens = { **base_lexer.tokens, 'root': [(noescape, NoEscape), *base_lexer.tokens['root']] } return ExtLexer() _xs = 'abcdefghijklmnopqrstuvwxyz' _N = len(_xs) def _i2c(i: int): while i: yield _xs[i % _N]
def indexing(code, colleccion, filepath): cont = 0 # a iterar sobre el codigo para ver que es una variable. for i in code: #DECLARACIONES DE VARIABLES Y FUNCIONES CON function if i[0] == string_to_tokentype('Token.Keyword.Declaration'): if i[1] == 'var': document = { "file": filepath, "type": "var", "name": str(code[cont + 2][1]), "value": str(code[cont + 6][1]) } colleccion.insert_one(document) elif i[1] == 'let': document = { "file": filepath, "type": "let", "name": str(code[cont + 2][1]), "value": str(code[cont + 6][1]) } colleccion.insert_one(document) elif i[1] == 'const': document = { "file": filepath, "type": "const", "name": code[cont + 2][1], "value": code[cont + 6][1] } colleccion.insert_one(document) elif i[1] == 'function': #REVISA SI TIENE NOMBRE PARA GUARDARLO. if code[cont + 1][1] != '(': valor = code[cont + 2][1] contador_aux = 2 params = [] #VERIFICAR PARAMETROS while valor != ")": if (contador_aux > 3 and contador_aux % 2 == 0): params.append(valor) contador_aux += 1 valor = code[cont + contador_aux][1] document = { "file": filepath, "type": "function", "name": str(code[cont + 2][1]), "params": str(params) } colleccion.insert_one(document) #PARA FUNCIONES SIN NOMBRE O DECLARADAS HOLA: FUNCTION() else: '''if code[cont - 2][1] == ":": print("Name", code[cont - 3][1]) else: print("No tiene nombre.")''' valor = code[cont + 1][1] contador_aux = 1 params = [] #VERIFICAR PARAMETROS while valor != ")": if (contador_aux > 1 and contador_aux % 2 == 0): params.append(valor) contador_aux += 1 valor = code[cont + contador_aux][1] document = { "file": filepath, "type": "function", "name": "No Name", "params": str(params) } colleccion.insert_one(document) #CLASES y MODULOS elif i[0] == string_to_tokentype('Token.Keyword.Reserved'): if i[1] == 'class': #VERIFICAR SI TIENE CONSTRUCTOR PARA LOS PARAMETROS if code[cont + 6][1] == 'constructor': add = 8 valortemp = code[cont + add][1] params = [] while valortemp != ')': if (add % 2 == 0): params.append(valortemp) valortemp = code[cont + add][1] add += 1 document = { "file": filepath, "type": "class", "name": str(code[cont + 2][1]), "constructor": "true", "parameters": str(params) } colleccion.insert_one(document) elif code[cont + 10][1] == 'constructor': add = 12 valortemp = code[cont + add][1] params2 = [] while valortemp != ')': valortemp = code[cont + add][1] if (add % 2 == 0): params2.append(valortemp) add += 1 document = { "file": filepath, "type": "class child", "name": str(code[cont + 2][1]), "parent": str(code[cont + 4][1]), "constructor": "true", "parameters": str(params2) } colleccion.insert_one(document) elif i[1] == 'export': document = { "file": filepath, "type": "export", "name": str(code[cont + 4][1]) } colleccion.insert_one(document) elif i[1] == 'import': document = { "file": filepath, "type": "import", "name": str(code[cont + 4][1]) } colleccion.insert_one(document) #FUNCIONES DE TIPO () => {} NO VERIFICA NOMBRE elif i[0] == string_to_tokentype('Token.Punctuation'): if i[1] == '=>': valor = code[cont][1] contador_aux2 = 1 params = [] while valor != "(": if (contador_aux2 > 1 and contador_aux2 % 2 != 0): params.append(valor) contador_aux2 += 1 valor = code[cont - contador_aux2][1] document = { "file": filepath, "type": "arrow function", "name": str(code[cont + 2][1]), "params": str(params) } colleccion.insert_one(document) cont += 1