def get_tokens_unprocessed(self, text): for index, token, value in JavascriptLexer.get_tokens_unprocessed(self, text): if token is Error and value in ["#", "@"]: token_type = Name.Tag if value == "#" else Keyword yield index, token_type, value else: yield index, token, value
def get_tokens_unprocessed(self, text): is_example = False is_output = False for item in JavascriptLexer.get_tokens_unprocessed(self, text): if item[1] is Generic.Prompt: is_example = True is_output = False elif is_example and item[2].endswith(u"\n"): is_example = False is_output = True elif is_output: item = item[0], Generic.Output, item[2] elif item[2] in self.EXCEPTIONS: item = item[0], Name.Exception, item[2] yield item
def get_tokens_unprocessed(self, text): # Munge tokens for IDL for index, token, value in JavascriptLexer.get_tokens_unprocessed(self, text): if value.find('UCS') != -1: # print 'UCS is a %s'%(repr(token)) pass if value.find('@') != -1: # @ is a Token.Error, need to extend lexer to recognize # annotations? # print '%s is a %s'%(value,repr(token)) pass if token is Name.Other and value in self.RESERVED_KEYWORDS: yield index, Keyword.Reserved, value elif token is Name.Other and value in self.PSEUDO_KEYWORDS: yield index, Keyword.Pseudo, value else: yield index, token, value
class FridaCompleter(Completer): def __init__(self, repl): self._repl = repl self._lexer = JavascriptLexer() def get_completions(self, document, complete_event): prefix = document.text_before_cursor tokens = list(self._lexer.get_tokens(prefix))[:-1] word_tokens = 0 before_dot = '' after_dot = '' encountered_dot = False for t in tokens[::-1]: if t[0] == Token.Name.Other: before_dot = t[1] + before_dot word_tokens += 1 elif t[0] == Token.Punctuation and t[1] == '.': before_dot = '.' + before_dot if not encountered_dot: encountered_dot = True after_dot = before_dot[1:] before_dot = '' word_tokens += 1 else: break try: if encountered_dot: for key in self._repl._evaluate("Object.keys(" + before_dot + ").concat(Object.getOwnPropertyNames(Object.getPrototypeOf(" + before_dot + ")))")[1]: if key.startswith(after_dot): yield Completion(key, -len(after_dot)) else: for key in self._repl._evaluate("Object.keys(this)")[1]: if not key.startswith(before_dot) or (key.startswith('_') and before_dot == ''): continue yield Completion(key, -len(before_dot)) except Exception: pass
def get_tokens_unprocessed(self, text): for index, token, value in JavascriptLexer.get_tokens_unprocessed(self, text): if token is Name.Other and value in self.EXTRA_KEYWORDS: yield index, Keyword, value else: yield index, token, value
from pygments.token import Token from pygments.token import String, string_to_tokentype #MONGOOOO client = MongoClient('database:27017') db = client['crawler'] #INICIO print("********** JAVASCRIPT SOURCE CODE CRAWLER **********") #CAMBIAR EL VALOR AL NOMBRE DEL ARCHIVO INICIAL DEL SOURCE CODE archivoyes = "require" #EL ROLLO DE LEXING ARCHIVO BASE filepath = './src/' + archivoyes + '.js' fp = open(filepath).read() #CODE INSERTION lexer = JavascriptLexer() tokens = lex(fp, lexer) code = [] for x in tokens: code.append(x) def indexing(code, colleccion, filepath): cont = 0 # a iterar sobre el codigo para ver que es una variable. for i in code: #DECLARACIONES DE VARIABLES Y FUNCIONES CON function if i[0] == string_to_tokentype('Token.Keyword.Declaration'): if i[1] == 'var': document = { "file": filepath,
def show_js(self, raw): from pygments.lexers import JavascriptLexer from pygments.formatters import TerminalFormatter from pygments import highlight print highlight(raw, JavascriptLexer(), TerminalFormatter())
# remove the fake <? php if snip: match = '<span class="cp"><?php</span>' point = html.find(match) html = html[:point] + html[point + len(match):] print >> out, html elif name.startswith(hideBlaze): path = parseBlaze(name, hideBlaze) print >> out, '<pre class="hide">%s</pre>' % xmlEncode( trail[path].snapShot()) trail[path].hide() elif name.startswith("> "): html = highlight(htmlDecode(name[5:]), JavascriptLexer(), HTMLformat) cutPoint = html.find("<span") print >> out, html[:cutPoint] + prompt + html[cutPoint:] elif name.startswith(snapBlaze): # make snapshots in directory snap/snapName snapName = parseBlaze(name, snapBlaze) snapPath = os.path.join(snapDir, snapName) os.mkdir(snapPath) for item in trail.blazes.values(): if item.visible: fileName = item.fileName f = open(os.path.join(snapPath, item.outFile), "w") f.write(item.snapShot()) f.close()
def __init__(self, repl): self._repl = repl self._lexer = JavascriptLexer()
options_menu.add_command(label=lang['options'][5], command=textbox.scrap_page) options_menu.add_command(label=lang['options'][6], command=textbox.highlight_all) style_menu.add_command(label=lang['style'][0], command=lambda: textbox.tagger('bold')) style_menu.add_command(label=lang['style'][1], command=lambda: textbox.tagger('italic')) style_menu.add_command(label=lang['style'][2], command=lambda: textbox.tagger('underline')) syntax_menu.add_command(label='Python 3', command=lambda: textbox.set_lexer(Python3Lexer())) syntax_menu.add_command(label='C/C++', command=lambda: textbox.set_lexer(CppLexer())) syntax_menu.add_command(label='C#', command=lambda: textbox.set_lexer(CSharpLexer())) syntax_menu.add_command(label='Java', command=lambda: textbox.set_lexer(JavaLexer())) syntax_menu.add_command(label='Rust', command=lambda: textbox.set_lexer(RustLexer())) syntax_menu.add_command(label='Go', command=lambda: textbox.set_lexer(GoLexer())) syntax_menu.add_command(label='HTML', command=lambda: textbox.set_lexer(HtmlLexer())) syntax_menu.add_command(label='CSS', command=lambda: textbox.set_lexer(CssLexer())) syntax_menu.add_command(label='Javascript', command=lambda: textbox.set_lexer(JavascriptLexer())) syntax_menu.add_command(label='PHP', command=lambda: textbox.set_lexer(PhpLexer())) syntax_menu.add_command(label='SQL', command=lambda: textbox.set_lexer(SqlLexer())) syntax_menu.add_command(label='Batch', command=lambda: textbox.set_lexer(BatchLexer())) syntax_menu.add_command(label='Bash', command=lambda: textbox.set_lexer(BashLexer())) syntax_menu.add_command(label='Markdown', command=lambda: textbox.set_lexer(MarkdownLexer())) for font_name in settings["fonts"]: font_menu.add_command(label=font_name, command=lambda font_name=font_name: textbox.change_font(font_name, 0)) for size in range(settings["min_font_size"], settings["max_font_size"] + settings["font_size_interval"], settings["font_size_interval"]): font_size_menu.add_command(label=size, command=lambda size=size: textbox.change_font(size, 1)) for song in os.listdir('sound'):
def get_tokens_unprocessed(self, text, stack=('root',)): text = text[1:-1] text = text.replace('\\n', '\n') return JavascriptLexer.get_tokens_unprocessed(self, text, stack)
def test_correct_output(): markup = highlight(INPUT, JavascriptLexer(), PangoMarkupFormatter()) assert OUTPUT == re.sub('<span fgcolor="#[^"]{6}">', '<span fgcolor="#">', markup)
def get_tokens_unprocessed(self, text, stack=('root', )): text = text[1:-1] text = text.replace('\\n', '\n') return JavascriptLexer.get_tokens_unprocessed(self, text, stack)
def __highlight(self, data): formatted = json.dumps(data, indent=4) return highlight(formatted, formatter=TerminalFormatter(), lexer=JavascriptLexer()).rstrip()
class FridaCompleter(Completer): def __init__(self, repl): self._repl = repl self._lexer = JavascriptLexer() def get_completions(self, document, complete_event): prefix = document.text_before_cursor magic = len(prefix) > 0 and prefix[0] == '%' and not any(map(lambda c: c.isspace(), prefix)) tokens = list(self._lexer.get_tokens(prefix))[:-1] # 0.toString() is invalid syntax, # but pygments doesn't seem to know that for i in range(len(tokens) - 1): if tokens[i][0] == Token.Literal.Number.Integer \ and tokens[i + 1][0] == Token.Punctuation and tokens[i + 1][1] == '.': tokens[i] = (Token.Literal.Number.Float, tokens[i][1] + tokens[i + 1][1]) del tokens[i + 1] before_dot = '' after_dot = '' encountered_dot = False for t in tokens[::-1]: if t[0] in Token.Name.subtypes: before_dot = t[1] + before_dot elif t[0] == Token.Punctuation and t[1] == '.': before_dot = '.' + before_dot if not encountered_dot: encountered_dot = True after_dot = before_dot[1:] before_dot = '' else: if encountered_dot: # The value/contents of the string, number or array doesn't matter, # so we just use the simplest value with that type if t[0] in Token.Literal.String.subtypes: before_dot = '""' + before_dot elif t[0] in Token.Literal.Number.subtypes: before_dot = '0.0' + before_dot elif t[0] == Token.Punctuation and t[1] == ']': before_dot = '[]' + before_dot break try: if encountered_dot: for key in self._get_keys("""try { (function (o) { "use strict"; var k = Object.getOwnPropertyNames(o); if (o !== null && o !== undefined) { var p; if (typeof o !== 'object') p = o.__proto__; else p = Object.getPrototypeOf(o); if (p !== null && p !== undefined) k = k.concat(Object.getOwnPropertyNames(p)); } return k; })(""" + before_dot + """); } catch (e) { []; }"""): if self._pattern_matches(after_dot, key): yield Completion(key, -len(after_dot)) else: if magic: keys = self._repl._magic_command_args.keys() else: keys = self._get_keys("Object.getOwnPropertyNames(this)") for key in keys: if not self._pattern_matches(before_dot, key) or (key.startswith('_') and before_dot == ''): continue yield Completion(key, -len(before_dot)) except frida.InvalidOperationError: pass except Exception as e: self._repl._print(e) def _get_keys(self, code): return sorted( filter(self._is_valid_name, set(self._repl._evaluate(code)[1]))) def _is_valid_name(self, name): tokens = list(self._lexer.get_tokens(name)) return len(tokens) == 2 and tokens[0][0] in Token.Name.subtypes def _pattern_matches(self, pattern, text): return re.search(re.escape(pattern), text, re.IGNORECASE) != None
def main(): import argparse ver = re.search(r'CoffeeScript Compiler v(.+)', open(P('coffee-script.js'), 'rb').read(500)).group(1) epilog = 'Copyright Kovid Goyal 2012' parser = argparse.ArgumentParser(description=''' Serve up files under the current directory via HTTP, automatically compiling .coffee files to javascript. Can also be used as a simple coffeescript compiler. ''', epilog=epilog) parser.add_argument('--version', action='version', version='Using coffeescript compiler version: ' + ver) subparsers = parser.add_subparsers(help='Compile or serve', dest='which', title='Compile or Serve', description='Compile or serve') cc = subparsers.add_parser('compile', help='Compile coffeescript', epilog=epilog) cs = subparsers.add_parser( 'serve', help='Serve files under the current ' 'directory, automatically compiling .coffee files to javascript', epilog=epilog) cc.add_argument('src', type=argparse.FileType('rb'), metavar='path/to/script.coffee', help='The coffee script to compile. Use ' ' - for stdin') cc.add_argument('--highlight', default=False, action='store_true', help='Syntax highlight the output (requires Pygments)') cs.add_argument('--port', type=int, default=8000, help='The port on which to serve. Default: %default') cs.add_argument( '--host', default='0.0.0.0', help='The IP address on which to listen. Default is to listen on all' ' IPv4 addresses (0.0.0.0)') args = parser.parse_args() if args.which == 'compile': ans, errors = compile_coffeescript(args.src.read(), filename=args.src.name) for line in errors: print(line, file=sys.stderr) if ans: if args.highlight: from pygments.lexers import JavascriptLexer from pygments.formatters import TerminalFormatter from pygments import highlight print(highlight(ans, JavascriptLexer(), TerminalFormatter())) else: print(ans.encode(sys.stdout.encoding or 'utf-8')) else: serve(port=args.port, host=args.host)