def on_changed_body(target, value, oldvalue, initiator): allowed_tags = ['a', 'abbr', 'b', 'blockquote', 'code', 'em', 'i', 'li', 'ol', 'ul', 'pre', 'strong', 'h1', 'h2', 'h3', 'p'] target.body_html = bleach.linkify(bleach.clean(markdown(value, output_format='html'), tags=allowed_tags, strip=True)) target.body_abstract = bleach.linkify(bleach.clean(markdown(value, output_format='html'), tags=['p'], strip=True))
def test_email_link(): eq_('a [email protected] mailto', linkify('a [email protected] mailto')) eq_('a <a href="mailto:[email protected]" rel="nofollow">[email protected]</a> mailto', linkify('a [email protected] mailto', parse_email=True)) eq_('email to <a href="*****@*****.**" rel="nofollow">[email protected]</a>', linkify('email to <a href="*****@*****.**">[email protected]</a>', parse_email=True))
def contains_spam_url_patterns(text): assert settings.SPAM_URL_PATTERNS, "Don't use it without some patterns" html = bleach.clean(text) problems = [] regex = re.compile(r"|".join([re.escape(x) for x in settings.SPAM_URL_PATTERNS])) def scrutinize_link(attrs, new, **kwargs): href_key = (None, "href") href = attrs[href_key] if href.startswith("mailto:") or href.startswith("tel:"): # Leave untouched return if not (href.startswith("http:") or href.startswith("https:")): # Bail if it's not a HTTP URL, such as ssh:// or ftp:// return found = regex.findall(href) if found: problems.append(found) bleach.linkify(html, callbacks=[scrutinize_link]) return bool(problems)
def test_simple_link(): eq_('a <a href="http://example.com" rel="nofollow">http://example.com' '</a> link', linkify('a http://example.com link')) eq_('a <a href="https://example.com" rel="nofollow">https://example.com' '</a> link', linkify('a https://example.com link'))
def test_link_in_html(): eq_('<i><a href="http://yy.com" rel="nofollow">http://yy.com</a></i>', linkify('<i>http://yy.com</i>')) eq_('<em><strong><a href="http://xx.com" rel="nofollow">http://xx.com' '</a></strong></em>', linkify('<em><strong>http://xx.com</strong></em>'))
def html_tags_edit(self): text = self.content attrs = { '*': ['class'], 'a': ['href', 'rel'], 'img': ['alt', 'src'], } try: final_text = "" pat = re.compile(r'[#,@](\w+)') hashtags = pat.finditer(text) i=0 for hasgtag in hashtags: search_query = "\'" + "/search?search=" + urllib.quote(hasgtag.group()) + "\'" final_text += (text[i:hasgtag.span()[0]] + "<a href=" + search_query + ">" + hasgtag.group() + "</a>") i = hasgtag.span()[1] final_text += (text[i:]) if final_text == "": text = bleach.clean(text, tags=['img', 'a'], attributes=attrs, strip=True) text = bleach.linkify(text) return mark_safe(text) else: final_text = bleach.clean(final_text, tags=['img', 'a'], attributes=attrs, strip=True) final_text = bleach.linkify(final_text) return mark_safe(final_text) except: return text
def test_elements_inside_links(): in_(('<a href="#" rel="nofollow">hello<br></a>', '<a rel="nofollow" href="#">hello<br></a>'), linkify('<a href="#">hello<br></a>')) in_(('<a href="#" rel="nofollow"><strong>bold</strong> hello<br></a>', '<a rel="nofollow" href="#"><strong>bold</strong> hello<br></a>'), linkify('<a href="#"><strong>bold</strong> hello<br></a>'))
def test_ignore_bad_protocols(): assert ( linkify('foohttp://bar') == 'foohttp://bar' ) assert ( linkify('fohttp://exampl.com') == 'fohttp://<a href="http://exampl.com" rel="nofollow">exampl.com</a>' )
def test_rel_already_there(self): """Make sure rel attribute is updated not replaced""" linked = ('Click <a href="http://example.com" rel="tooltip">' 'here</a>.') link_good = 'Click <a href="http://example.com" rel="tooltip nofollow">here</a>.' assert linkify(linked) == link_good assert linkify(link_good) == link_good
def process_formdata(self, valuelist): super(TinyMceField, self).process_formdata(valuelist) # Sanitize data self.data = bleach.clean(self.data, tags=self.sanitize_tags, attributes=self.sanitize_attributes) if self.linkify: if self.nofollow: self.data = bleach.linkify(self.data) else: self.data = bleach.linkify(self.data, callbacks=[])
def test_idempotent(): """Make sure that applying the filter twice doesn't change anything.""" dirty = u'<span>invalid & </span> < extra http://link.com<em>' clean = bleach.clean(dirty) eq_(clean, bleach.clean(clean)) linked = bleach.linkify(dirty) eq_(linked, bleach.linkify(linked))
def test_trailing_slash(): eq_('<a href="http://example.com/" rel="nofollow">http://example.com/</a>', linkify('http://example.com/')) eq_('<a href="http://example.com/foo/" rel="nofollow">' 'http://example.com/foo/</a>', linkify('http://example.com/foo/')) eq_('<a href="http://example.com/foo/bar/" rel="nofollow">' 'http://example.com/foo/bar/</a>', linkify('http://example.com/foo/bar/'))
def test_link_in_html(): assert ( linkify('<i>http://yy.com</i>') == '<i><a href="http://yy.com" rel="nofollow">http://yy.com</a></i>' ) assert ( linkify('<em><strong>http://xx.com</strong></em>') == '<em><strong><a href="http://xx.com" rel="nofollow">http://xx.com</a></strong></em>' )
def test_email_link_escaping(): eq_('''<a href='mailto:"james"@example.com' rel="nofollow">''' '''"james"@example.com</a>''', linkify('"james"@example.com', parse_email=True)) eq_('''<a href="mailto:"j'ames"@example.com" rel="nofollow">''' '''"j'ames"@example.com</a>''', linkify('"j\'ames"@example.com', parse_email=True)) eq_('''<a href='mailto:"ja>mes"@example.com' rel="nofollow">''' '''"ja>mes"@example.com</a>''', linkify('"ja>mes"@example.com', parse_email=True))
def remove_links(html): """Remove all the links from the given html.""" # First call bleach.linkify to transform text links to real links. linkified = bleach.linkify(unicode(html)) # Then call bleach.linkify with a callback that will "empty" all the links: # <a href="http://example.com">http://example.com</a> will become # <a></a>. empty_links = bleach.linkify(linkified, callbacks=[lambda attrs, new: {"_text": ""}]) # Now simply remove those empty links. return empty_links.replace("<a></a>", "")
def test_elements_inside_links(): assert ( linkify('<a href="#">hello<br></a>') == '<a href="#" rel="nofollow">hello<br></a>' ) assert ( linkify('<a href="#"><strong>bold</strong> hello<br></a>') == '<a href="#" rel="nofollow"><strong>bold</strong> hello<br></a>' )
def test_link_query(): eq_('<a href="http://xx.com/?test=win" rel="nofollow">' 'http://xx.com/?test=win</a>', linkify('http://xx.com/?test=win')) eq_('<a href="http://xx.com/?test=win" rel="nofollow">' 'xx.com/?test=win</a>', linkify('xx.com/?test=win')) eq_('<a href="http://xx.com?test=win" rel="nofollow">' 'xx.com?test=win</a>', linkify('xx.com?test=win'))
def test_linkify_ignore_more_path(): """Ignored closing parentheses if first in the path""" inp1 = '(foo http://bar.com/)' exp1 = '(foo <a href="http://bar.com/" rel="nofollow">http://bar.com/</a>)' out1 = linkify(inp1) eq_(exp1, out1) inp2 = '(foo http://bar.com)' exp2 = '(foo <a href="http://bar.com" rel="nofollow">http://bar.com</a>)' out2 = linkify(inp2) eq_(exp2, out2)
def test_rel_already_there(): """Make sure rel attribute is updated not replaced""" linked = ('Click <a href="http://example.com" rel="tooltip">' 'here</a>.') link_good = (('Click <a href="http://example.com" rel="tooltip nofollow">' 'here</a>.'), ('Click <a rel="tooltip nofollow" href="http://example.com">' 'here</a>.')) in_(link_good, bleach.linkify(linked)) in_(link_good, bleach.linkify(link_good[0]))
def render_view(self, field, **kwargs): links = u'' if isinstance(field, wtforms.fields.FieldList): for entry in field.entries: link = bleach.linkify(entry.data, parse_email=True) if link: links = links + u' {} <i class="fa fa-envelope"></i><br>'.format(link) else: link = bleach.linkify(field.object_data, parse_email=True) if link: links = u'{} <i class="fa fa-envelope"></i>'.format(link) return links
def test_only_text_is_linkified(self): some_text = 'text' some_type = int no_type = None assert linkify(some_text) == some_text with pytest.raises(TypeError): linkify(some_type) with pytest.raises(TypeError): linkify(no_type)
def test_bleach(self): "Testing html cleaning" eq = self.assertEqual inp = '''http://www.psu.edu''' exp = '''<a href="http://www.psu.edu" rel="nofollow">http://www.psu.edu</a>''' got = bleach.linkify(inp) #eq(got, exp) inp = '''http://%s/u/123''' % settings.SITE_DOMAIN exp = '''<a href="http://www.psu.edu" rel="nofollow">http://www.psu.edu</a>''' got = bleach.linkify(inp)
def on_changed_body(target, value, oldvalue, initiator): tags = ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code', 'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul', 'h1', 'h2', 'h3', 'p', 'div', 'img'] attrs = {'*': ['class'], 'a': ['href', 'rel'], 'img': ['src', 'alt'], } num = value.find("<!--more-->") if num == -1: body_slug_tpl = value else: body_slug_tpl = value[:num] target.body_html = bleach.linkify(bleach.clean(markdown(value, output_format='html'), tags, attrs, strip=True)) target.body_slug = bleach.linkify(bleach.clean(markdown(body_slug_tpl, output_format='html'), tags, attrs, strip=True))
def test_link_query(): assert ( linkify('http://xx.com/?test=win') == '<a href="http://xx.com/?test=win" rel="nofollow">http://xx.com/?test=win</a>' ) assert ( linkify('xx.com/?test=win') == '<a href="http://xx.com/?test=win" rel="nofollow">xx.com/?test=win</a>' ) assert ( linkify('xx.com?test=win') == '<a href="http://xx.com?test=win" rel="nofollow">xx.com?test=win</a>' )
def test_trailing_slash(): assert ( linkify('http://examp.com/') == '<a href="http://examp.com/" rel="nofollow">http://examp.com/</a>' ) assert ( linkify('http://example.com/foo/') == '<a href="http://example.com/foo/" rel="nofollow">http://example.com/foo/</a>' ) assert ( linkify('http://example.com/foo/bar/') == '<a href="http://example.com/foo/bar/" rel="nofollow">http://example.com/foo/bar/</a>' )
def test_simple_link(): assert ( linkify('a http://example.com link') == 'a <a href="http://example.com" rel="nofollow">http://example.com</a> link' ) assert ( linkify('a https://example.com link') == 'a <a href="https://example.com" rel="nofollow">https://example.com</a> link' ) assert ( linkify('a example.com link') == 'a <a href="http://example.com" rel="nofollow">example.com</a> link' )
def test_trailing_slash(): in_(('<a href="http://examp.com/" rel="nofollow">http://examp.com/</a>', '<a rel="nofollow" href="http://examp.com/">http://examp.com/</a>'), linkify('http://examp.com/')) in_(('<a href="http://example.com/foo/" rel="nofollow">' 'http://example.com/foo/</a>', '<a rel="nofollow" href="http://example.com/foo/">' 'http://example.com/foo/</a>'), linkify('http://example.com/foo/')) in_(('<a href="http://example.com/foo/bar/" rel="nofollow">' 'http://example.com/foo/bar/</a>', '<a rel="nofollow" href="http://example.com/foo/bar/">' 'http://example.com/foo/bar/</a>'), linkify('http://example.com/foo/bar/'))
def test_simple_link(): in_(('a <a href="http://example.com" rel="nofollow">http://example.com' '</a> link', 'a <a rel="nofollow" href="http://example.com">http://example.com' '</a> link'), linkify('a http://example.com link')) in_(('a <a href="https://example.com" rel="nofollow">https://example.com' '</a> link', 'a <a rel="nofollow" href="https://example.com">https://example.com' '</a> link'), linkify('a https://example.com link')) in_(('a <a href="http://example.com" rel="nofollow">example.com</a> link', 'a <a rel="nofollow" href="http://example.com">example.com</a> link'), linkify('a example.com link'))
def test_skip_pre(): """Skip linkification in <pre> tags.""" simple = 'http://xx.com <pre>http://xx.com</pre>' linked = ('<a href="http://xx.com" rel="nofollow">http://xx.com</a> ' '<pre>http://xx.com</pre>') all_linked = ('<a href="http://xx.com" rel="nofollow">http://xx.com</a> ' '<pre><a href="http://xx.com" rel="nofollow">http://xx.com' '</a></pre>') eq_(linked, linkify(simple, skip_pre=True)) eq_(all_linked, linkify(simple)) already_linked = '<pre><a href="http://xx.com">xx</a></pre>' nofollowed = '<pre><a href="http://xx.com" rel="nofollow">xx</a></pre>' eq_(nofollowed, linkify(already_linked)) eq_(nofollowed, linkify(already_linked, skip_pre=True))
def has_links(html): """Return True if links (text or markup) are found in the given html.""" # Call bleach.linkify to transform text links to real links, and add some # content to the ``href`` attribute. If the result is different from the # initial string, links were found. class LinkFound(Exception): pass def raise_on_link(attrs, new): raise LinkFound try: bleach.linkify(html, callbacks=[raise_on_link]) except LinkFound: return True return False
def on_changed_body(target, value, oldvalue, initiator): allowed_tags = ['a', 'abbr', 'acronym', 'b', 'code', 'em', 'i', 'strong'] target.body_html = bleach.linkify(bleach.clean( markdown(value, output_format='html'), tags=allowed_tags, strip=True))
def linkfy_post(self): return bleach.linkify(escape(self.caption))
def trustedcommonmark(value): """Returns HTML given some commonmark Markdown. Also allows real HTML, so do not use this with untrusted input.""" linkified_value = bleach.linkify(value, parse_email=True) return parse_commonmark(linkified_value)
def test_russian_simple(): assert clean('Домашняя') == 'Домашняя' assert linkify('Домашняя') == 'Домашняя'
def on_change_body(target, value, oldvalue, initiator): allowed_tags = ["a", "abbr", "acronym", "b", "code", "em", "i"] target.body_html = bleach.linkify( bleach.clean(markdown(value, output_format="html"), tags=allowed_tags, strip=True))
def on_body_change(target, value, oldvalue, initiator): allowed_tags = ['a', 'ul', 'strong', 'p', 'h1', 'h2', 'h3'] html_body = markdown(value, output_format='html') html_body = bleach.clean(html_body, tags=allowed_tags, strip=True) html_body = bleach.linkify(html_body) target.html_body = html_body
def test_japanese_safe_simple(): assert clean('ヘルプとチュートリアル') == 'ヘルプとチュートリアル' assert linkify('ヘルプとチュートリアル') == 'ヘルプとチュートリアル'
def markup(context, text, bodhi=True): """ Return HTML from a markdown string. Args: context (mako.runtime.Context): Unused. text (str): Markdown text to be converted to HTML. bodhi (bool): Enable or disable Bodhi markup extensions. Returns: str: HTML representation of the markdown text. """ # determine the major component of the bleach version installed. # this is similar to the approach that Pagure uses to determine the bleach version # https://pagure.io/pagure/pull-request/2269#request_diff bleach_major_v = int(bleach.__version__.split('.')[0]) # the only difference in the bleach API that we use between v1 and v2 is # the formatting of the attributes parameter. Bleach 1 only allowed you # to specify attributes to be whitelisted for all whitelisted tags. # Bleach 2 requires you to specify the list of attributes whitelisted for # specific tags. if bleach_major_v >= 2: markdown_attrs = { "img": ["src", "alt", "title"], "a": ["href", "alt", "title"], "div": ["class"], } else: markdown_attrs = ["src", "href", "alt", "title", "class"] markdown_tags = [ "h1", "h2", "h3", "h4", "h5", "h6", "b", "i", "strong", "em", "tt", "p", "br", "span", "div", "blockquote", "code", "hr", "pre", "ul", "ol", "li", "dd", "dt", "img", "a", ] extensions = [ 'markdown.extensions.fenced_code', ] if bodhi == True: extensions.append(ffmarkdown.BodhiExtension()) markdown_text = markdown.markdown(text, extensions=extensions) # previously, we linkified text in ffmarkdown.py, but this was causing issues like #1721 # so now we use the bleach linkifier to do this for us. markdown_text = bleach.linkify(markdown_text, parse_email=True) # previously, we used the Safe Mode in python-markdown to strip all HTML # tags. Safe Mode is deprecated, so we now use Bleach to sanitize all HTML # tags after running it through the markdown parser return bleach.clean(markdown_text, tags=markdown_tags, attributes=markdown_attrs)
def linkify(value): return bleach.linkify(value, skip_tags=['pre', 'code'], parse_email=False)
def snap_details(snap_name): """ A view to display the snap details page for specific snaps. This queries the snapcraft API (api.snapcraft.io) and passes some of the data through to the snap-details.html template, with appropriate sanitation. """ today = datetime.datetime.utcnow().date() week_ago = today - relativedelta.relativedelta(weeks=1) details_response = _get_from_cache( snap_details_url.format(snap_name=snap_name), headers=details_query_headers) details = details_response.json() if details_response.status_code >= 400: message = ('Failed to get snap details for {snap_name}'.format( **locals())) if details_response.status_code == 404: message = 'Snap not found: {snap_name}'.format(**locals()) flask.abort(details_response.status_code, message) metrics_query_json = [{ "metric_name": "installed_base_by_country_percent", "snap_id": details['snap_id'], "start": week_ago.strftime('%Y-%m-%d'), "end": today.strftime('%Y-%m-%d') }] metrics_response = _get_from_cache( snap_metrics_url.format(snap_name=snap_name), headers=metrics_query_headers, json=metrics_query_json) geodata = metrics_response.json()[0]['series'] # Normalise geodata from API users_by_country = {} for country_percentages in geodata: country_code = country_percentages['name'] percentages = [] for daily_percent in country_percentages['values']: if daily_percent is not None: percentages.append(daily_percent) if len(percentages) > 0: users_by_country[country_code] = (sum(percentages) / len(percentages)) else: users_by_country[country_code] = None # Build up country info for every country country_data = {} for country in pycountry.countries: country_data[country.numeric] = { 'name': country.name, 'code': country.alpha_2, 'percentage_of_users': users_by_country.get(country.alpha_2) } description = details['description'].strip() paragraphs = re.compile(r'[\n\r]{2,}').split(description) formatted_paragraphs = [] # Sanitise paragraphs def external(attrs, new=False): url_parts = urllib.parse.urlparse(attrs[(None, "href")]) if url_parts.netloc and url_parts.netloc != 'snapcraft.io': if (None, "class") not in attrs: attrs[(None, "class")] = "p-link--external" elif "p-link--external" not in attrs[(None, "class")]: attrs[(None, "class")] += " p-link--external" return attrs for paragraph in paragraphs: callbacks = bleach.linkifier.DEFAULT_CALLBACKS callbacks.append(external) paragraph = bleach.clean(paragraph, tags=[]) paragraph = bleach.linkify(paragraph, callbacks=callbacks) formatted_paragraphs.append(paragraph) context = { # Data direct from details API 'snap_title': details['title'], 'package_name': details['package_name'], 'icon_url': details['icon_url'], 'version': details['version'], 'revision': details['revision'], 'license': details['license'], 'publisher': details['publisher'], 'screenshot_urls': details['screenshot_urls'], 'prices': details['prices'], 'support_url': details.get('support_url'), 'summary': details['summary'], 'description_paragraphs': formatted_paragraphs, # Transformed API data 'filesize': humanize.naturalsize(details['binary_filesize']), 'last_updated': (humanize.naturaldate(parser.parse(details.get('last_updated')))), # Data from metrics API 'countries': country_data, # Context info 'details_api_error': details_response.old_data_from_error, 'metrics_api_error': metrics_response.old_data_from_error, 'is_linux': 'Linux' in flask.request.headers['User-Agent'] } return flask.render_template('snap-details.html', **context)
def linkify(input): return bleach.linkify(input)
def linkfy_post(self): return bleach.linkify(escape(self.post))
def linkify_with_outgoing(text): """Wrapper around bleach.linkify: uses get_outgoing_url.""" callbacks = [linkify_bounce_url_callback, bleach.callbacks.nofollow] return bleach.linkify(six.text_type(text), callbacks=callbacks)
def linkify(text): return bleach.linkify(text)
def linkify(text): text = bleach.clean(text, tags=[], attributes={}, styles=[], strip=True) return bleach.linkify(text)
def on_changed_content(target, value, oldvalue, initiator): target.content_html = bleach.linkify( markdown(value, output_format='html', extensions=['markdown.extensions.extra']))
def clean_html_tags(data): return bleach.linkify(bleach.clean(data, generally_xss_safe), skip_pre=True)
def on_changed_body(target, value, oldvalue, initiator): allowed_tags = ['a'] target.body_html = bleach.linkify( bleach.clean(value, tags=allowed_tags, strip=True))
def proposal_view(space, proposal): if proposal.proposal_space != space: return redirect(proposal.url_for(), code=301) comments = sorted( Comment.query.filter_by(commentspace=proposal.comments, parent=None).order_by('created_at').all(), key=lambda c: c.votes.count, reverse=True) commentform = CommentForm(model=Comment) delcommentform = DeleteCommentForm() if request.method == 'POST': if request.form.get( 'form.id') == 'newcomment' and commentform.validate( ) and 'new-comment' in g.permissions: send_mail_info = [] if commentform.comment_edit_id.data: comment = Comment.query.get( int(commentform.comment_edit_id.data)) if comment: if 'edit-comment' in comment.permissions( g.user, g.permissions): comment.message = commentform.message.data comment.edited_at = datetime.utcnow() flash(_("Your comment has been edited"), 'info') else: flash(_("You can only edit your own comments"), 'info') else: flash(_("No such comment"), 'error') else: comment = Comment(user=g.user, commentspace=proposal.comments, message=commentform.message.data) if commentform.parent_id.data: parent = Comment.query.get(int(commentform.parent_id.data)) if parent.user.email: if parent.user == proposal.user: # check if parent comment & proposal owner are same if not g.user == parent.user: # check if parent comment is by proposal owner send_mail_info.append({ 'to': proposal.user.email or proposal.email, 'subject': u"{space} Funnel: {proposal}".format( space=space.title, proposal=proposal.title), 'template': 'proposal_comment_reply_email.md' }) else: # send mail to parent comment owner & proposal owner if not parent.user == g.user: send_mail_info.append({ 'to': parent.user.email, 'subject': u"{space} Funnel: {proposal}".format( space=space.title, proposal=proposal.title), 'template': 'proposal_comment_to_proposer_email.md' }) if not proposal.user == g.user: send_mail_info.append({ 'to': proposal.user.email or proposal.email, 'subject': u"{space} Funnel: {proposal}".format( space=space.title, proposal=proposal.title), 'template': 'proposal_comment_email.md' }) if parent and parent.commentspace == proposal.comments: comment.parent = parent else: # for top level comment if not proposal.user == g.user: send_mail_info.append({ 'to': proposal.user.email or proposal.email, 'subject': u"{space} Funnel: {proposal}".format( space=space.title, proposal=proposal.title), 'template': 'proposal_comment_email.md' }) proposal.comments.count += 1 comment.votes.vote(g.user) # Vote for your own comment db.session.add(comment) flash(_("Your comment has been posted"), 'info') db.session.commit() to_redirect = comment.url_for(proposal=proposal, _external=True) for item in send_mail_info: email_body = render_template(item.pop('template'), proposal=proposal, comment=comment, link=to_redirect) if item.get('to'): # Sender is set to None to prevent revealing email. send_mail(sender=None, body=email_body, **item) # Redirect despite this being the same page because HTTP 303 is required to not break # the browser Back button return redirect(to_redirect, code=303) elif request.form.get( 'form.id') == 'delcomment' and delcommentform.validate(): comment = Comment.query.get(int(delcommentform.comment_id.data)) if comment: if 'delete-comment' in comment.permissions( g.user, g.permissions): comment.delete() proposal.comments.count -= 1 db.session.commit() flash(_("Your comment was deleted"), 'info') else: flash(_("You did not post that comment"), 'error') else: flash(_("No such comment"), 'error') return redirect(proposal.url_for(), code=303) links = [ Markup(linkify(unicode(escape(l)))) for l in proposal.links.replace('\r\n', '\n').split('\n') if l ] if proposal.status != PROPOSALSTATUS.DRAFT: statusform = ProposalStatusForm(status=proposal.status) else: statusform = None blogpost = requests.get( proposal.blog_post).json() if proposal.blog_post else None return render_template('proposal.html', space=space, proposal=proposal, comments=comments, commentform=commentform, delcommentform=delcommentform, breadcrumbs=[(space.url_for(), space.title)], blogpost=blogpost, votes_groups=proposal.votes_by_group(), PROPOSALSTATUS=PROPOSALSTATUS, links=links, statusform=statusform)
def filter_linkify(text): return mark_safe(bleach.linkify(escape(text)))
def test_mixed_linkify(): assert (linkify('Домашняя http://example.com ヘルプとチュートリアル') in ( 'Домашняя <a href="http://example.com" rel="nofollow">http://example.com</a> ヘルプとチュートリアル', 'Домашняя <a rel="nofollow" href="http://example.com">http://example.com</a> ヘルプとチュートリアル' ))
def test_japanese_safe_simple(): eq_('ヘルプとチュートリアル', clean('ヘルプとチュートリアル')) eq_('ヘルプとチュートリアル', linkify('ヘルプとチュートリアル'))
def tweet_markup(text): """Escape markup, parse URIs into links.""" text = bleach.linkify(bleach.clean(text, strip=True)) return text
def check(test, expected_output): in_(expected_output, linkify(test))
def scrubemail_filter(data, css_junk=''): return Markup(scrubemail(unicode(bleach.linkify(bleach.clean(data))), rot13=True, css_junk=css_junk))
def test_mixed_linkify(): in_(('Домашняя <a href="http://example.com" rel="nofollow">' 'http://example.com</a> ヘルプとチュートリアル', 'Домашняя <a rel="nofollow" href="http://example.com">' 'http://example.com</a> ヘルプとチュートリアル'), linkify('Домашняя http://example.com ヘルプとチュートリアル'))
def untrustedcommonmark(value): """Returns HTML given some commonmark Markdown. Cleans actual HTML from input using bleach, suitable for use with untrusted input.""" linkified_value = bleach.linkify(bleach.clean(value), parse_email=True) return parse_commonmark(linkified_value)
def test_russian_simple(): eq_('Домашняя', clean('Домашняя')) eq_('Домашняя', linkify('Домашняя'))
def on_changed_recipe_steps(target, value, oldvalue, iterator): if value: target.recipe_steps_html = bleach.linkify( bleach.clean(markdown(value, output_format='html'), tags=allowed_tags, strip=True))
def markdown_filter(text): text = markdown2.markdown(text, extras=settings.MARKDOWN_FILTER_EXTRAS) html = bleach.clean(text, tags=settings.MARKDOWN_FILTER_WHITELIST_TAGS) return bleach.linkify(html)