def pygmentify_html(text, **kwargs): text = smart_text(text) lang = default_lang = 'text' # a tuple of known lexer names try: lexers_iter = LEXERS.itervalues() except AttributeError: lexers_iter = LEXERS.values() lexer_names = reduce(lambda a,b: a + b[2], lexers_iter, ()) # custom formatter formatter = ListHtmlFormatter(encoding='utf-8', **kwargs) subs = [] pre_re = re.compile(r'(<pre[^>]*>)(.*?)(</pre>)', re.DOTALL | re.UNICODE) br_re = re.compile(r'<br[^>]*?>', re.UNICODE) p_re = re.compile(r'<\/?p[^>]*>', re.UNICODE) lang_re = re.compile(r'lang=["\'](.+?)["\']', re.DOTALL | re.UNICODE) for pre_match in pre_re.findall(text): work_area = pre_match[1] work_area = br_re.sub('\n', work_area) match = lang_re.search (pre_match[0]) if match: lang = match.group(1).strip() if lang not in lexer_names: lang = default_lang lexer = get_lexer_by_name(lang, stripall=True) work_area = work_area.replace(u' ', u' ').replace(u'&', u'&').replace(u'<', u'<').replace(u'>', u'>').replace(u'"', u'"').replace(u''', u"'") work_area = p_re.sub('', work_area) work_area = highlight(work_area, lexer, formatter) subs.append([u''.join(pre_match), smart_text(work_area)]) for sub in subs: text = text.replace(sub[0], sub[1], 1) return text
def pygmentify_html(text, **kwargs): text = smart_text(text) lang = default_lang = 'text' # a tuple of known lexer names try: lexers_iter = LEXERS.itervalues() except AttributeError: lexers_iter = LEXERS.values() lexer_names = reduce(lambda a, b: a + b[2], lexers_iter, ()) # custom formatter formatter = ListHtmlFormatter(encoding='utf-8', **kwargs) subs = [] pre_re = re.compile(r'(<pre[^>]*>)(.*?)(</pre>)', re.DOTALL | re.UNICODE) br_re = re.compile(r'<br[^>]*?>', re.UNICODE) p_re = re.compile(r'<\/?p[^>]*>', re.UNICODE) lang_re = re.compile(r'lang=["\'](.+?)["\']', re.DOTALL | re.UNICODE) for pre_match in pre_re.findall(text): work_area = pre_match[1] work_area = br_re.sub('\n', work_area) match = lang_re.search(pre_match[0]) if match: lang = match.group(1).strip() if lang not in lexer_names: lang = default_lang lexer = get_lexer_by_name(lang, stripall=True) work_area = work_area.replace(u' ', u' ').replace( u'&', u'&').replace(u'<', u'<').replace(u'>', u'>').replace( u'"', u'"').replace(u''', u"'") work_area = p_re.sub('', work_area) work_area = highlight(work_area, lexer, formatter) subs.append([u''.join(pre_match), smart_text(work_area)]) for sub in subs: text = text.replace(sub[0], sub[1], 1) return text
def generate_lexer_docs(): from pygments.lexers import LEXERS out = [] modules = {} moduledocstrings = {} for classname, data in sorted(LEXERS.iteritems(), key=lambda x: x[0]): module = data[0] mod = __import__(module, None, None, [classname]) cls = getattr(mod, classname) if not cls.__doc__: print "Warning: %s does not have a docstring." % classname modules.setdefault(module, []).append( ( classname, cls.__doc__, ", ".join(data[2]) or "None", ", ".join(data[3]).replace("*", "\\*").replace("_", "\\") or "None", ", ".join(data[4]) or "None", ) ) if module not in moduledocstrings: moduledocstrings[module] = mod.__doc__ for module, lexers in sorted(modules.iteritems(), key=lambda x: x[0]): heading = moduledocstrings[module].splitlines()[4].strip().rstrip(".") out.append("\n" + heading + "\n" + "-" * len(heading) + "\n") for data in lexers: out.append(LEXERDOC % data) return "".join(out).decode("utf-8")
def code_highlight(content): css = '' if CODE_HIGHLIGHT: soup = BeautifulSoup(content) code_blocks = soup.findAll(u'pre') if code_blocks: if CODE_HIGHLIGHT_LINENOS: css = u'<link href="%scms_content/css/code_highlight_table.css" rel="stylesheet" type="text/css" />' % settings.MEDIA_URL else: css = u'<link href="%scms_content/css/code_highlight_div.css" rel="stylesheet" type="text/css" />' % settings.MEDIA_URL for code in code_blocks: if code.has_key(u'class'): lang = code[u'class'] if lang not in reduce(lambda a,b: a + b[2], LEXERS.itervalues(), ()): lang = CODE_HIGHLIGHT_DEFAULT lexer = get_lexer_by_name(lang, stripall=True, encoding=u'utf-8') else: try: lexer = guess_lexer(code.string) except ValueError: lexer = PythonLexer format = HtmlFormatter(cssclass=CODE_HIGHLIGHT_CSS, linenos=CODE_HIGHLIGHT_LINENOS) code.replaceWith(highlight(code.string, lexer, format)) return mark_safe(css + force_unicode(soup).replace('&', '&')) else: return content
def generate_lexer_docs(): from pygments.lexers import LEXERS out = [] modules = {} moduledocstrings = {} for classname, data in sorted(LEXERS.iteritems(), key=lambda x: x[0]): module = data[0] mod = __import__(module, None, None, [classname]) cls = getattr(mod, classname) if not cls.__doc__: print "Warning: %s does not have a docstring." % classname modules.setdefault(module, []).append(( classname, cls.__doc__, ', '.join(data[2]) or 'None', ', '.join(data[3]).replace('*', '\\*') or 'None', ', '.join(data[4]) or 'None')) if module not in moduledocstrings: moduledocstrings[module] = mod.__doc__ for module, lexers in sorted(modules.iteritems(), key=lambda x: x[0]): heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.') out.append('\n' + heading + '\n' + '-'*len(heading) + '\n') for data in lexers: out.append(LEXERDOC % data) return ''.join(out).decode('utf-8')
def pygmentify_html(text, **kwargs): text = smart_unicode(text) lang = default_lang = 'text' # a tuple of known lexer names lexer_names = reduce(lambda a, b: a + b[2], LEXERS.itervalues(), ()) # custom formatter defaults = {'encoding': 'utf-8'} defaults.update(kwargs) formatter = HtmlFormatter(**defaults) subs = [] pre_re = re.compile(r'(<pre[^>]*>)(.*?)(</pre>)', re.DOTALL | re.UNICODE) br_re = re.compile(r'<br[^>]*?>', re.UNICODE) lang_re = re.compile(r'lang="(.+?)"', re.DOTALL | re.UNICODE) for pre_match in pre_re.findall(text): work_area = pre_match[1] work_area = br_re.sub('\n', work_area) match = lang_re.search(pre_match[0]) if match: lang = match.group(1).strip() if lang not in lexer_names: lang = default_lang lexer = get_lexer_by_name(lang, stripall=True) work_area = work_area.replace(u' ', u' ').replace(u'&', u'&').replace(u'<', u'<').replace(u'>', u'>').replace(u'"', u'"').replace(u''', u"'") work_area = highlight(work_area, lexer, formatter) subs.append([u''.join(pre_match), smart_unicode(work_area)]) for sub in subs: text = text.replace(sub[0], sub[1], 1) return text
def create(request): """ Expect a post """ user = _auth(request) if user: data = json.loads(request.POST.get('data', '{}')) data['status'] = 'published' form = SnippetForm(data) if not form.is_valid(): return HttpResponse('VALIDATION') try: lexer_obj = guess_lexer(data['body']) for lex in LEXERS.itervalues(): if lexer_obj.name == lex[1]: lexer = lex[2][0].lower() break except ClassNotFound: lexer = u'text' try: snippet = Snippet( author = user, title = data['title'], description = data['description'], body=data['body'], tags=data['tags'], lexer=lexer, via=data['via'], privacy = data['privacy'], status = data['status'] ) snippet.save() return HttpResponse('SUCCESS') except: return HttpResponse('ERROR') else: return HttpResponse('NOT_AUTHORIZED')
def generate_lexer_docs(): from pygments.lexers import LEXERS out = [] modules = {} moduledocstrings = {} for classname, data in sorted(LEXERS.iteritems(), key=lambda x: x[0]): module = data[0] mod = __import__(module, None, None, [classname]) cls = getattr(mod, classname) if not cls.__doc__: print "Warning: %s does not have a docstring." % classname modules.setdefault(module, []).append( (classname, cls.__doc__, ', '.join(data[2]) or 'None', ', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None', ', '.join(data[4]) or 'None')) if module not in moduledocstrings: moduledocstrings[module] = mod.__doc__ for module, lexers in sorted(modules.iteritems(), key=lambda x: x[0]): heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.') out.append('\n' + heading + '\n' + '-' * len(heading) + '\n') for data in lexers: out.append(LEXERDOC % data) return ''.join(out).decode('utf-8')
def _process(request, id=None): """ Create/Update snippet """ if id is not None:#Update snippet = get_object_or_404(Snippet, pk=id) form = SnippetForm(instance=snippet) if request.user.is_staff or request.user.id != snippet.author_id: request.session['flash'] = ['Access denied', 'error']; return HttpResponseRedirect('/accounts/profile/') if 'delete' in request.POST: snippet.delete() request.session['flash'] = ['#%s deleted successfuly' % id, 'sucess'] return HttpResponseRedirect('/accounts/profile/') else:#Create snippet = None form = SnippetForm() if request.method == 'POST': form = SnippetForm(request.POST)#Bounding form to the POST data if not form.is_valid(): # redirect to form with errors return render_to_response('django_snippify/process.html', {'form': form }, context_instance=build_context(request)) formData = form.save(commit = False) formData.pk = id if 'preview' in request.POST: data = {} data['title'] = formData.title; data['preview_body'] = highlight(formData.body, get_lexer_by_name(formData.lexer), HtmlFormatter(cssclass = 'source')) data['lines'] = range(1, formData.body.count('\n') + 2) data['form'] = form data['snippet'] = formData return render_to_response('djnago_snippify/process.html', data, context_instance=build_context(request)) else:#save formData.author = request.user if not formData.lexer: try: lexer = guess_lexer(formData.body) for lex in LEXERS.itervalues(): if lexer.name == lex[1]: formData.lexer = lex[2][0].lower() except ClassNotFound: formData.lexer = 'text' formData.save() if snippet is not None and snippet.body != formData.body: try: last_version = SnippetVersion.objects.order_by('-version').filter(snippet = snippet).all()[0] new_version = SnippetVersion(snippet = snippet, version = last_version.version + 1, body = snippet.body) new_version.save() except: create_version = SnippetVersion(snippet = snippet, version = 1, body = snippet.body) create_version.save() request.session['flash'] = ['#%s %s successfuly' % (formData.pk, 'update' if id is not None else 'created'), 'sucess']; return HttpResponseRedirect('/accounts/profile/') else: return render_to_response('django_snippify/process.html', {'form': form, 'snippet': snippet }, context_instance=build_context(request))
def update(request, id=None): snippet = get_object_or_404(Snippet, pk=id) if request.user.id == snippet.author_id: if request.method == 'POST': # If the form has been submitted... form = SnippetForm(request.POST) # A form bound to the POST data if form.is_valid(): formData = form.save(commit = False) formData.pk = snippet.pk if 'delete' in request.POST: snippet.delete() request.session['flash'] = ['#' + str(formData.pk) +' deleted successfuly', 'sucess'] return HttpResponseRedirect('/accounts/profile/') if 'preview' in request.POST: data = {} data['title'] = formData.title; data['preview_body'] = highlight(formData.body, get_lexer_by_name(formData.lexer), HtmlFormatter(cssclass = 'source')) data['lines'] = range(1, formData.body.count('\n') + 2) data['form'] = form data['snippet'] = snippet return render_to_response('snippets/process.html', data, context_instance=build_context(request)) else: #save formData.author = request.user if not formData.lexer: try: lexer = guess_lexer(formData.body) for lex in LEXERS.itervalues(): if lexer.name == lex[1]: formData.lexer = lex[2][0].lower() except ClassNotFound: formData.lexer = 'text' formData.save() if snippet.body != formData.body: try: last_version = SnippetVersion.objects.order_by('-version').filter(snippet = snippet).all()[0] new_version = SnippetVersion(snippet = snippet, version = last_version.version + 1, body = snippet.body) new_version.save() except: create_version = SnippetVersion(snippet = snippet, version = 1, body = snippet.body) create_version.save() request.session['flash'] = ['#' + str(formData.pk) +' updated successfuly', 'sucess']; return HttpResponseRedirect('/accounts/profile/') # Redirect after POST else: return render_to_response('snippets/process.html', {'form': form }, context_instance=build_context(request)) else: form = SnippetForm(instance=snippet) return render_to_response('snippets/process.html', {'form': form, 'snippet': snippet }, context_instance=build_context(request)) else: request.session['flash'] = ['Access denied', 'error']; return HttpResponseRedirect('/accounts/profile/') # Redirect after POST
def create(request): data = {} if request.method == 'POST': data['form'] = SnippetForm(request.POST) # A form bound to the POST data if data['form'].is_valid(): formData = data['form'].save(commit = False) formData.author = request.user if not formData.lexer: try: lexer = guess_lexer(formData.body) for lex in LEXERS.itervalues(): if lexer.name == lex[1]: formData.lexer = lex[2][0].lower() except ClassNotFound: formData.lexer = u'text' if 'preview' in request.POST: data['title'] = formData.title; data['preview_body'] = highlight(formData.body, get_lexer_by_name(formData.lexer), HtmlFormatter(cssclass = 'source')) data['lines'] = range(1, formData.body.count('\n') + 2) return render_to_response('snippets/process.html', data, context_instance=build_context(request)) else:#save - notify followers this user and have the option on formData.body = str(formData.body).replace("\r\n","\n") formData.save() try: followers = UserFollow.objects.select_related().filter(followed_user=request.user).all() except: followers = None if followers: # this is very inneficient - find some other way for follower in followers: profile = follower.user.get_profile(); if profile.followed_user_created: #User wants to recieve a notification queue = EmailQueue( mail_to=follower.user.email, mail_subject="Followed user created a new snippet", mail_body=render_to_string('emails/followed_user_created.txt', { 'user': follower.user, 'username': request.user.username, 'SITE': request.META['HTTP_HOST']} ) ) queue.save() request.session['flash'] = ['#' + str(formData.pk) +' created successfuly', 'success'] return HttpResponseRedirect('/accounts/profile/') # Redirect after POST else: return render_to_response('snippets/process.html', data, context_instance=build_context(request)) else: data['form'] = SnippetForm() # An unbound form return render_to_response('snippets/process.html', data, context_instance=build_context(request))
def get_pygments_lexer(name): name = name.lower() if name == 'ipython2': from IPython.lib.lexers import IPythonLexer return IPythonLexer elif name == 'ipython3': from IPython.lib.lexers import IPython3Lexer return IPython3Lexer else: for module_name, cls_name, aliases, _, _ in LEXERS.values(): if name in aliases: return find_lexer_class(cls_name) warn("No lexer found for language %r. Treating as plain text." % name) from pygments.lexers.special import TextLexer return TextLexer
def code_highlighter(content): from pygments import highlight from pygments.formatters import HtmlFormatter from pygments.lexers import LEXERS, get_lexer_by_name from BeautifulSoup import BeautifulSoup # a tuple of known lexer names _lexer_names = reduce(lambda a,b: a + b[2], LEXERS.itervalues(), ()) # default formatter _formatter = HtmlFormatter(cssclass='codehighlight') soup = BeautifulSoup(content) for tag in soup.findAll('pre'): lexer_name = tag.get('class') if lexer_name and lexer_name in _lexer_names: lexer = get_lexer_by_name(lexer_name, stripnl=True, encoding='UTF-8') tag.replaceWith(highlight(tag.renderContents(), lexer, _formatter)) return force_unicode(soup)
def pygmentify_text(text): default_lang = 'text' #a tuple of known lexer names lexer_names = reduce(lambda a,b: a + b[2], LEXERS.itervalues(), ()) #default formatter formatter = HtmlFormatter(encoding='utf-8', linenos='inline') html_node = html.fragment_fromstring(text, create_parents='div') new_html_node = html_node for code_node in html_node.findall('pre'): if not code_node.text: continue lang = code_node.attrib.get('lang', default_lang) if lang not in lexer_names: lang = default_lang lexer = get_lexer_by_name(lang, stripall=True) new_code_node = html.fragment_fromstring(highlight(code_node.text, lexer, formatter)) new_html_node.replace(code_node, new_code_node) #need to strip the enclosing div return html.tostring(new_html_node, encoding=unicode, method='xml')[5:-6]
import posixpath import pygments.lexers.web pygments_lexer_cache = {} file_ext_to_lexer_alias_cache = { '.pycon': 'pycon', '.rbcon': 'rbcon', '.Rd': 'latex', '.svg': 'xml', '.jinja': 'jinja' } # Add all pygments standard mappings. for module_name, name, alias_or_aliases, file_extensions, mime_types in list( PYGMENTS_LEXERS.values()): try: alias = alias_or_aliases[0] except IndexError: alias = alias_or_aliases for ext in file_extensions: ext = ext.lstrip("*") file_ext_to_lexer_alias_cache[ext] = alias class SyntaxHighlightMarkdownFilter(DexyFilter): """ Surrounds code with highlighting instructions for Markdown """ aliases = ['pyg4md']
import dexy.exceptions import posixpath import pygments.lexers.web pygments_lexer_cache = {} file_ext_to_lexer_alias_cache = { '.pycon' : 'pycon', '.rbcon' : 'rbcon', '.Rd' : 'latex', '.svg' : 'xml', '.jinja' : 'jinja' } # Add all pygments standard mappings. for module_name, name, aliases, file_extensions, _ in PYGMENTS_LEXERS.itervalues(): alias = aliases[0] for ext in file_extensions: ext = ext.lstrip("*") file_ext_to_lexer_alias_cache[ext] = alias class SyntaxHighlightRstFilter(DexyFilter): """ Surrounds code with highlighting instructions for ReST """ aliases = ['pyg4rst'] _settings = { 'n' : ("Number of chars to indent.", 2), 'data-type' : 'sectioned' }
@register_for('*.png', '*.jpg', '*.jpeg', '*.gif') def render_images(ctx, blob_obj): w, h = utils.calc_thumb_size(blob_obj.data, (640, 480)) url = ctx.url_for('view_obj', rev=blob_obj.commit.name, path=blob_obj.root_path) raw_url = url + '?raw=1' body = '<a href="%s"><img src="%s" width="%d" height="%s"></a>' % \ (raw_url, raw_url, w, h) return Document(title=blob_obj.name, body=body) formatter = HtmlFormatter(noclasses=True, linenos=True) @register_for(*[p for l in LEXERS.values() for p in l[3]]) def render_sourcecode(ctx, blob_obj): try: data = blob_obj.data.decode('utf-8') except UnicodeDecodeError: data = blob_obj.data try: lexer = guess_lexer_for_filename(blob_obj.name, data) except ValueError: # no lexer found - use the text one instead of an exception lexer = TextLexer() return Document(title=blob_obj.name, description=lexer.name,
def init(): """Create entries""" for key, value in LEXERS.items(): # print(f' "{key.replace("Lexer","").lower()}", {list(value[3])},') KNOWN_PYGMENTS[key.replace("Lexer", "").lower()] = list(value[3])
def load_languages(cls): from pygments.lexers import LEXERS languages = [item[1] for item in LEXERS.itervalues()] cls.objects.all().delete() # purge all languages for l in languages: Language(name=l).save() # add language
import dexy.exceptions import pygments.lexers.web from pygments.lexers import LEXERS as PYGMENTS_LEXERS pygments_lexer_cache = {} file_ext_to_lexer_alias_cache = { '.pycon': 'pycon', '.rbcon': 'rbcon', '.Rd': 'latex', '.svg': 'xml', '.jinja': 'jinja' } # Add all pygments standard mappings. for module_name, name, aliases, file_extensions, _ in PYGMENTS_LEXERS.itervalues( ): alias = aliases[0] for ext in file_extensions: ext = ext.lstrip("*") file_ext_to_lexer_alias_cache[ext] = alias class SyntaxHighlightRstFilter(DexyFilter): """ Surrounds code with highlighting instructions for ReST """ aliases = ['pyg4rst'] _settings = { 'n': ("Number of chars to indent.", 2), 'output-data-type': 'sectioned' }
def _lexer_names(): ret = [] for lexer in LEXERS.itervalues(): ret.append((lexer[2][0], lexer[1])) ret.sort() return tuple(ret)
#from django import template from pygments.lexers import LEXERS, get_lexer_by_name from pygments import highlight from pygments.formatters import HtmlFormatter #from django.template.defaultfilters import stringfilter from google.appengine.ext import webapp register = webapp.template.create_template_register() #register = template.Library() _lexer_names = reduce(lambda a,b: a + b[2], LEXERS.itervalues(), ()) _formatter = HtmlFormatter(cssclass='highlight') #@register.filter #@stringfilter def pygments(value, lexer_name): if lexer_name and lexer_name in _lexer_names: lexer = get_lexer_by_name(lexer_name, stripnl=True, encoding='UTF-8') return highlight(value, lexer, _formatter) else: return value register.filter(pygments) def shortor(value): value_lines = value.split('\r\n') if len(value_lines) > 5: return ('\r\n').join(value_lines[:5]) else: return value register.filter(shortor)
def process(request, pk=None): """ Create/Update snippet """ if pk is not None: #Update snippet = get_object_or_404(Snippet, pk=pk) form = SnippetUpdateForm(instance=snippet) if request.user != snippet.author and not request.user.is_staff: request.session['flash'] = ['Access denied', 'error'] return HttpResponseRedirect('/accounts/profile/') if 'delete' in request.POST: snippet.delete() request.session['flash'] = ['#%s deleted successfuly' % pk, 'sucess'] return HttpResponseRedirect('/accounts/profile/') else: #Create snippet = None form = SnippetCreateForm() if request.method == 'POST': if snippet is not None: form = SnippetUpdateForm(request.POST, instance=snippet) else: form = SnippetCreateForm(request.POST) if not form.is_valid(): # redirect to form with errors return render_to_response('snippets/process.html', { 'form': form }, context_instance=build_context(request)) formData = form.save(commit=False) if snippet is None: formData.author = request.user if not formData.lexer: try: lexer = guess_lexer(formData.body) for lex in LEXERS.itervalues(): if lexer.name == lex[1]: formData.lexer = lex[2][0].lower() except ClassNotFound: formData.lexer = 'text' formData.save() form.save_m2m() #Save tags if snippet is not None and snippet.body != formData.body: #The body has changed - create a new version try: last_version = SnippetVersion.objects.order_by('-version').\ filter(snippet=snippet).all()[0] version = SnippetVersion(snippet=snippet, version=last_version.version + 1, body=snippet.body) except: version = SnippetVersion(snippet=snippet, version=1, body=snippet.body) version.save() request.session['flash'] = ['#%s %s successfuly' % (formData.pk, 'update' if pk is not None else 'created'), 'sucess'] return HttpResponseRedirect('/accounts/profile/') else: return render_to_response('snippets/process.html', { 'form': form, 'snippet': snippet }, context_instance=build_context(request))