def print_packets(path: list, nodes: dict) -> None: tokens = [] for e in path[:-1]: node = nodes[e.dst] p = node.render() line = '{} = {}'.format(node.name.replace('-', '_'), repr(p)) tokens.extend(list(pygments.lex(line, lexer=Python3Lexer()))) # p = self.fuzz_node.render() node = nodes[path[-1].dst] p = node.render() line = '{} = {}'.format(node.name.replace('-', '_'), repr(p)) print(pygments.highlight(line, Python3Lexer(), Terminal256Formatter(style='rrt')))
def __init__( self, *, example_split_symbol: str, project_level: bool, file_split_symbol: str = None, filepath_split_symbol: str = "", num_projects_to_load: int = None, random_seed: int = None, ): self._project_level = project_level self._num_projects_to_load = num_projects_to_load if num_projects_to_load is not None else float( "+inf") self._random_seed = random_seed self._content_sets = list() self._write_counter = 0 self._preprocessor = GitDataPreprocessor( example_split_symbol=example_split_symbol, project_level=project_level, file_split_symbol=file_split_symbol, filepath_split_symbol=filepath_split_symbol, ) self._py_lexer = Python3Lexer()
def __init__(self, theme, stream, hook_loader: HookLoader): self.stream = stream self.terminal_formater = Terminal256Formatter(style=theme) self.python_lexer = Python3Lexer() self.python_traceback_lexer = Python3TracebackLexer() self.left_offset = None self.hook_loader = hook_loader
def __init__(self, background_shade=0.9, **kwargs): # Initializing the super class CodeInput super(SimpleConsoleComponent, self).__init__() # Setting the Python3Lexer for Syntax Highlighting self.lexer = Python3Lexer() # reversing the value of the background shade, as it is meant to indicate how black the background should be, # but in kivy a lesser value means blacker self.background_shade = background_shade background_color_value = 1 - self.background_shade self.background_color = [ background_color_value, background_color_value, background_color_value, 1 ] # working around the slight alpha channel animation, that occurs when a TextInput gets the focus, by setting # the picture of a normal TextInput as the "animated" picture aswell self.background_active = self.background_normal self.border = (3, 3, 3, 3) # reducing the default font size and changing the font to "Inconsolata". # NOTE: Inconsolata is not a part of the default kivy distribution and # was downlaoaded at 'http://www.levien.com/type/myfonts/inconsolata.html' # For information on how to add custom fonts visit 'http://cheparev.com/kivy-connecting-font/' self.font_name = "Inconsolata" self.font_size = 13
def __init__( self, *, example_split_symbol: str, project_level: bool, inference_mode: bool = False, file_split_symbol: str = None, filepath_split_symbol: str = "", old_style: bool = False, ): assert ( len(example_split_symbol) == 1 ), f"You must provide a symbol, not a string. {example_split_symbol} were provided" assert ( len(filepath_split_symbol) == 1 ), f"You must provide a symbol, not a string. {filepath_split_symbol} were provided" assert ( len(file_split_symbol) == 1 ), f"You must provide a symbol, not a string. {file_split_symbol} were provided" if project_level: assert file_split_symbol is not None, "If you want project level dataset you must provide file split symbol" self._file_split_symbol = file_split_symbol + "\n" else: self._file_split_symbol = "" self._example_split_symbol = example_split_symbol self._project_level = project_level self._path_content_split_symbol = filepath_split_symbol self._old_style = old_style self._inference_mode = inference_mode self._py_lexer = Python3Lexer()
def pydoc(self, job, url): from pygments.formatters import HtmlFormatter from pygments.lexers.python import Python3Lexer from pygments import highlight modname = url.path().lstrip("/") query = QUrlQuery(url) extras = {} if query.hasQueryItem("hl_lines"): start, end = query.queryItemValue("hl_lines").split("-") extras["hl_lines"] = list(range(int(start), int(end) + 1)) mod = importlib.import_module(modname) filepath = inspect.getsourcefile(mod) formatter = HtmlFormatter(title="Module %s" % modname, full=True, lineanchors="line", **extras) with open(filepath) as f: code = highlight(f.read(), Python3Lexer(), formatter) buffer = QBuffer(self) buffer.setData(code.encode("utf-8")) job.reply(b"text/html", buffer)
def __init__(self, name, main_display, tabsize, multiline_window=1500, number_of_windows=1): self.name = name self.file = f = open(name) try: lexer = guess_lexer_for_filename(name, f.readline()) except TypeError: try: lexer = get_lexer_by_name(os.path.splitext(name)[1][1:]) except pygments.util.ClassNotFound: lexer = TextLexer() except pygments.util.ClassNotFound: lexer = TextLexer() lexer = Python3Lexer() if isinstance(lexer, PythonLexer) else lexer lexer.add_filter(NonEmptyFilter()) lexer.add_filter('tokenmerge') f.seek(0) self.lines = [] self.focus = 0 self.clipboard = None self.clipboard_pos = None self.lexer = lexer self.w_pos = {} self.all_tokens = None self._etext = lambda w: w.edit_text self.multiline_window = multiline_window self.number_of_windows = number_of_windows self.main_display = main_display self.line_kwargs = dict(caption="", allow_tab=True, lexer=lexer, wrap='clip', main_display=main_display, smart_home=True, tabsize=tabsize)
def createLexers(self): lex = {} lex['.c'] = CFamilyLexer() lex['.h'] = CFamilyLexer() lex['.cpp'] = CppLexer() lex['.hpp'] = CppLexer() lex['.css'] = CssLexer() lex['.sass'] = SassLexer() lex['.yaml'] = YamlLexer() lex['.yml'] = YamlLexer() lex['.json'] = JsonLexer() lex['.cs'] = CSharpLexer() lex['.fs'] = FSharpLexer() lex['.e'] = EiffelLexer() lex['.erl'] = ErlangLexer() lex['.hrl'] = ErlangLexer() lex['.es'] = ErlangLexer() lex['.f03'] = FortranLexer() lex['.f90'] = FortranLexer() lex['.F03'] = FortranLexer() lex['.F90'] = FortranLexer() lex['.go'] = GoLexer() lex['.hs'] = HaskellLexer() lex['.v'] = VerilogLexer() lex['.vhdl'] = VhdlLexer() lex['.vhd'] = VhdlLexer() lex['.html'] = HtmlLexer() lex['.htm'] = HtmlLexer() lex['.xhtml'] = HtmlLexer() lex['.xml'] = XmlLexer() lex['.js'] = JavascriptLexer() lex['.tex'] = TypeScriptLexer() lex['.coffee'] = CoffeeScriptLexer() lex['.java'] = JavaLexer() lex['.scala'] = ScalaLexer() lex['.kt'] = KotlinLexer() lex['.ktm'] = KotlinLexer() lex['.kts'] = KotlinLexer() lex['.lisp'] = CommonLispLexer() lex['make'] = MakefileLexer() lex['Make'] = MakefileLexer() lex['CMake'] = CMakeLexer() lex['cmake'] = CMakeLexer() lex['.m'] = MatlabLexer() lex['.mat'] = MatlabLexer() lex['.dpr'] = DelphiLexer() lex['.perl'] = PerlLexer() lex['.php'] = PhpLexer() lex['.pr'] = PrologLexer() lex['.py'] = Python3Lexer() lex['.rb'] = RubyLexer() lex['.sh'] = BashLexer() lex['.sql'] = MySqlLexer() lex['.mysql'] = MySqlLexer() lex['.tcl'] = TclLexer() lex['.awk'] = AwkLexer() return lex
def theme_list(ctx: click.Context) -> None: """List all available themes with a short code snippet.""" for theme in themes: ctx.obj["console"].print( f"[bold red underline]Theme {theme}: [/bold red underline]\n") style = get_style_by_name(theme) print( highlight(THEME_PREVIEW, Python3Lexer(), Terminal256Formatter(style=style)))
def __init__(self, collection_file, env_file, autosave=False, style='fruity'): self.r = Requestor(collection_file, env_file) self.autosave = autosave self.http_lexer = HttpLexer() self.json_lexer = JsonLexer() self.python_lexer = Python3Lexer() self.formatter = Terminal256Formatter(style=style)
def print_poc(target: Target, path: list, receive_data_after_each_request, receive_data_after_fuzz) -> None: tokens = [] exploit_code = get_exploit_code(target, path, receive_data_after_each_request, receive_data_after_fuzz) print( pygments.highlight(exploit_code, Python3Lexer(), Terminal256Formatter(style='rrt')))
def showPyScript(self): """Output to Python script for Jupyter notebook.""" dlg = ScriptDialog( f"# Generate by Pyslvs v{_major}.{_minor}.{_build} ({_label})\n" f"# Project \"{self.DatabaseWidget.file_name.baseName()}\"\n" + slvs_process_script( tuple(vpoint.expr for vpoint in self.EntitiesPoint.data()), tuple((b, d) for b, d, a in self.InputsWidget.inputPairs())), Python3Lexer(), "Python script", ["Python3 Script (*.py)"], self) dlg.show() dlg.exec_()
def _ensure_lexer(self): if self.lexer is not None: return try: lexer = pygments_cache.get_lexer_for_filename(self.name) except pygments.util.ClassNotFound: lexer = TextLexer() lexer = Python3Lexer() if isinstance(lexer, PythonLexer) else lexer lexer.add_filter(NonEmptyFilter()) lexer.add_filter('tokenmerge') self.lexer = self.line_kwargs['lexer'] = lexer
def get_tokens_unprocessed(self, text): for index, token, value in Python3Lexer.get_tokens_unprocessed( self, text): if token is Name and value in self.extra_vars: yield index, Name.Namespace, value elif token is Name and value in self.extra_callables: yield index, Name.Function, value elif token is Name and value in self.extra_classes: yield index, Name.Class, value else: yield index, token, value
def detail(category, module_name): if f'{category}.{module_name}' not in examples.__all__ or module_name.startswith('_'): abort(404) module = importlib.import_module(f'examples.{category}.{module_name}') code = inspect.getsource(module) example = { 'title': module_name, 'scenes': get_scene_details(category, module), 'code': highlight(code, Python3Lexer(), HtmlFormatter()), 'filename': f'{module_name}.py', 'category': category, } return render_template('detail.html', example=example, style=HtmlFormatter().get_style_defs('.highlight'))
def theme_list() -> None: """List all available themes with a short code snippet.""" for theme in themes: CONSOLE.print(f"[bold red underline]Theme {theme}: [/bold red underline]\n") try: style = get_style_by_name(theme) except ClassNotFound: style = scheme_dict[theme] finally: print( highlight( THEME_PREVIEW, Python3Lexer(), Terminal256Formatter(style=style) ) )
def export_data(self, api_calls: List[ApiCall], return_raw=False): file_header = """ # python3 -m pip install locust - See https://docs.locust.io/en/stable/installation.html # Running the tests with web ui # locust -f this_file.py # Or to run it without web ui for 20(secs) with 10 users and 2 users to spawn every second # locust -f locust_test.py --headless --users 10 --hatch-rate 2 --run-time 20s from locust import HttpUser, SequentialTaskSet, constant, constant_pacing, between, task import json import random import string import uuid def random_uuid(): return str(uuid.uuid4()) def random_str(length=10, with_punctuation=False): selection = string.ascii_letters + string.digits selection = selection + string.punctuation if with_punctuation else selection return ''.join(random.choice(selection) for i in range(length)) def random_int(min=0, max=100): return random.randint(min, max) class ApiTestSteps(SequentialTaskSet): """ file_footer = """ class ApiUser(HttpUser): tasks = [ApiTestSteps] host = "localhost" # See https://docs.locust.io/en/stable/api.html#locust.wait_time wait_time = constant(1) # wait between requests. Other options between(5, 15) or constant_pacing(1) """ output = [ self.__export_api_call(idx, api_call) for idx, api_call in enumerate(api_calls) ] unformatted_code = file_header + "\n".join(output) + file_footer formatted_code, _ = format_python_code(unformatted_code) return ( formatted_code if return_raw else highlight(formatted_code, Python3Lexer(), HtmlFormatter()) )
def print_code(code): template = """<style> {} </style> {} """ lexer = Python3Lexer() formatter = HtmlFormatter(cssclass='pygments') html_code = highlight(code, lexer, formatter) css = formatter.get_style_defs('.pygments') html = template.format(css, html_code) return display(HTML(html))
def highlight2(self, event=None): """Highlight the syntax of the current line""" text_widget = self.get_current() row = text_widget.index('insert').split('.')[0] self.remove_tags2(1) content = text_widget.get("1.0", 'end') # lines = content.split("\n") text_widget.mark_set("range_start", "1" + ".0") data = text_widget.get("1.0", "end") for token, content in lex(data, Python3Lexer()): text_widget.mark_set("range_end", "range_start + %dc" % len(content)) text_widget.tag_add(str(token), "range_start", "range_end") text_widget.mark_set("range_start", "range_end") self.tag_conf()
def python_file_to_json(req, file_name: str): try: with open(file_name, "r") as file: data = file.read() except IOError as e: data = "Error: {}".format(e.strerror) if req.accept_mimetypes.accept_html: html = highlight(data, Python3Lexer(), HtmlFormatter(full=True, linenos='table')) return Response(html, content_type="text/html") json_res = json.dumps(data, sort_keys=True, indent=4, separators=(',', ': ')) return Response(json_res, content_type="application/json")
def export_data(self, api_calls: List[ApiCall]): file_header = """ # Install the Python Requests library import requests import json import random import string import uuid """ output = [self.__export_api_call(api_call) for api_call in api_calls] unformatted_code = file_header + "\n".join(output) formatted_code, _ = format_python_code(unformatted_code) return highlight(formatted_code, Python3Lexer(), HtmlFormatter())
def __show_py_script(self): """Output to Python script for Jupyter notebook.""" dlg = ScriptDialog( f"# Generate by Pyslvs {__version__}\n" f"# Project \"{self.database_widget.file_name.baseName()}\"\n" + slvs_process_script( tuple(vpoint.expr() for vpoint in self.entities_point.data()), tuple((b, d) for b, d, a in self.inputs_widget.input_pairs()) ), Python3Lexer(), "Python script", ["Python3 Script (*.py)"], self ) dlg.show() dlg.exec() dlg.deleteLater()
def format(self, record): ## We want to show some code lines while logging. So that its eays to know #create a list of all the linenumber: lines lines = [] with open(record.pathname) as src: for index, line in enumerate(src.readlines(), start=1): if index == record.lineno: lines.append('{:4d}***: {}'.format(index, line)) else: lines.append('{:7d}: {}'.format(index, line)) # select +/-3 lines from the current line start = (record.lineno - 1) - 5 end = (record.lineno - 1) + 5 if record.lineno == len(lines): end = record.lineno - 1 if end > len(lines) - 1: end = len(lines) - 1 if record.lineno - 1 == 0: start = 0 if start < 0: start = 0 code = ''.join(lines[start:end + 1]) #lines[start:length] # colorize the code import pygments from pygments.lexers.python import Python3Lexer from pygments.formatters import TerminalTrueColorFormatter code = pygments.highlight( code, Python3Lexer(), #TerminalTrueColorFormatter(style='monokai') #use for terminal TerminalTrueColorFormatter() #use for jupyter notebook ) #add new attributes to record which will be used later # we also want to have the url requested and its method if exposed_request is not None: record.absolute_path = exposed_request.build_absolute_uri() record.method = exposed_request.method else: record.absolute_path = "NONE_NO_REQUEST_ABS_PATH " record.method = "NONE_NO_REQUEST_METHOD" record.codelines = code record.topline = "--------------------------------------------------------------------------------------------------------------" record.botline = "--------------------------------------------------------------------------------------------------------------" return super(VerFormatter, self).format(record)
def highlight(self, event=None): """Does syntax highlighting. """ data = self.get("1.0", "end-1c") if data != '': # Add tags self.mark_set("range_start", "1.0") # Run lexer for syntax highlighter for token, content in lex(data, Python3Lexer()): self.mark_set("range_end", "range_start + {}c".format(len(content))) self.tag_add(str(token), "range_start", "range_end") self.mark_set("range_start", "range_end") del event else: return
def highlight(self, event=None): """Highlight the syntax of the current line""" current = self.file_list[self.nb.index('current')] if current is not None and current.endswith('.py'): text_widget = self.get_current() row = text_widget.index('insert').split('.')[0] self.remove_tags(row) content = text_widget.get("1.0", 'end') lines = content.split("\n") text_widget.mark_set("range_start", row + ".0") data = text_widget.get(row + ".0", row + "." + str(len(lines[int(row) - 1]))) for token, content in lex(data, Python3Lexer()): text_widget.mark_set("range_end", "range_start + %dc" % len(content)) text_widget.tag_add(str(token), "range_start", "range_end") text_widget.mark_set("range_start", "range_end") self.tag_conf()
def show_rules(): if request.accept_mimetypes.accept_html: with open("rules/rules.py", "r") as file: data = file.read() html = highlight(data, Python3Lexer(), HtmlFormatter(full=True, linenos='table')) return Response(html, content_type="text/html") rules_txt = [{ "name": d["name"], "rule": inspect.getsource(d["rule"]).strip().split(':', 1)[1].strip() } for d in rules] data = json.dumps(rules_txt, sort_keys=True, indent=4, separators=(',', ': ')) return Response(data, content_type="application/json")
def __init__( self, code: str, font_size=48, image_pad=12, scale=0.15, use_line_numbers=True ): self.use_line_numbers = use_line_numbers self.font_size = font_size self.image_pad = image_pad self.scale = scale self.lexer = Python3Lexer() self.style = DraculaStyle self.code_lines = code.split("\n") self.image_paths = [] self.group = Group() self._render_lines() self._adjust_size() self._max_line_width = 0
def execute_files(file_dir): files = [] for file in os.listdir(os.path.join(app.config['UPLOAD_FOLDER'], file_dir)): filepath = os.path.join(app.config['UPLOAD_FOLDER'], file_dir, file) with open(filepath, "r") as py: code = Markup(highlight(py.read(), Python3Lexer(), HtmlFormatter())) outputs = [] for test in TEST_CASES[file]: out = Markup( highlight(run_file(filepath, test), BashLexer(), HtmlFormatter())) outputs.append(out) files.append((file, code, outputs)) return files
def export_data(self, api_calls: List[ApiCall], return_raw=False): file_header = """ # python3 -m pip install locustio - See https://docs.locust.io/en/stable/installation.html # Running the tests with web ui # locust -f this_file.py # Or to run it without web ui for 20(secs) with 10 users and 2 users to spawn every second # locust -f locust_test.py --no-web -c 10 -r 2 --run-time 20s from locust import HttpLocust, TaskSet, TaskSequence, seq_task import json import random import string import uuid def random_uuid(): return str(uuid.uuid4()) def random_str(length=10, with_punctuation=False): selection = string.ascii_letters + string.digits selection = selection + string.punctuation if with_punctuation else selection return ''.join(random.choice(selection) for i in range(length)) def random_int(min=0, max=100): return random.randint(min, max) class ApiTestSteps(TaskSequence): """ file_footer = """ class ApiUser(HttpLocust): task_set = ApiTestSteps host = "localhost" """ output = [ self.__export_api_call(idx, api_call) for idx, api_call in enumerate(api_calls) ] unformatted_code = file_header + "\n".join(output) + file_footer formatted_code, _ = format_python_code(unformatted_code) return (formatted_code if return_raw else highlight( formatted_code, Python3Lexer(), HtmlFormatter()))
def __init__(self, exception): self._exception = exception super().__init__() stack = traceback.extract_tb(exception.__traceback__) summary = traceback.StackSummary.from_list(stack) lines = summary.format() code = "".join(lines) lexer = Python3Lexer() formatter = HtmlFormatter(full=True) text = pygments.highlight(code, lexer, formatter) # Widgets self._widget = QtWidgets.QTextEdit() self._widget.setReadOnly(True) self._widget.setFont( QtGui.QFontDatabase.systemFont(QtGui.QFontDatabase.FixedFont) ) self._widget.setHtml(text)
def get_tokens_unprocessed(self, text): for index, token, value in Python3Lexer.get_tokens_unprocessed(self, text): if token is Name and value in self.EXTRA_KEYWORDS: yield index, Keyword, value else: yield index, token, value