def creole(value): try: from creoleparser import text2html except ImportError: if settings.DEBUG: raise template.TemplateSyntaxError, "Error in {% creole %} filter: The Python creoleparser library isn't installed." return force_unicode(value) else: return mark_safe(force_unicode(text2html(force_unicode(smart_str(value)))))
def get_toc(self): """Render a table of contents for this content.""" content = self.get_content() # Extract anything that looks like a heading. headings = [] for row in content.splitlines(): row = row.strip() match = re.match(r'(=+)(.*?)=*$', row) if match: depth = len(match.groups()[0]) name = match.groups()[1].strip() headings.append((depth, remove_links(name))) # Don't bother showing a TOC if there aren't any headings. if not headings: return "" # Ensure the lowest depth is 1, so: # == foo # === bar # becomes [(1, 'foo',), (2, 'bar')] min_depth = min(depth for (depth, name) in headings) headings = [(depth + 1 - min_depth, name) for (depth, name) in headings] # Ensure that the initial depth is 1, so: # === foo # == bar # === baz # becomes [(1, 'foo'), (1, 'bar'), (2, 'baz')] first_depth, _ = headings[0] if first_depth != 1: excess = first_depth - 1 for index, (depth, name) in enumerate(headings): if depth == 1: break headings[index] = (depth - excess, name) # FIXME: # = foo # === bar # should be [(1, 'foo',), (2, 'bar')] # Write headings as a nested bulleted list. headings_creole = [] for depth, name in headings: link = "#%s" % (creole_slugify(name)) headings_creole.append("%s [[%s|%s]]" % ("#" * depth, link, name)) toc_template = "\n".join(headings_creole) return text2html(toc_template)
def blog(request, post_id=None): context = RequestContext(request) if post_id is None: posts_data = [] posts = Post.objects.all().order_by('-created_time')[:5] for post in posts: post_datum = {} post_datum['post'] = post post_datum['parsed'] = text2html(post.content) posts_data.append(post_datum) context['posts_data'] = posts_data return render_to_response('blog/main.html', context) else: try: post_obj = Post.objects.get(id=post_id) except Post.DoesNotExist: post_obj = None if post_obj is not None: context['post'] = post_obj context['parsed'] = text2html(post_obj.content) return render_to_response('blog/post.html', context) else: return HttpResponse('Post Not Found!')
def html_offer_text(self): """ Return the HTML version of this offer's offer_text Cached to save frequent conversion. """ key = self.html_cache_key() html = cache.get(key) if html: return mark_safe(html) else: html = text2html(self.offer_text()) cache.set(key, html, 24*3600) # expires on self.save() above return mark_safe(html)
def html_offer_text(self): """ Return the HTML version of this offer's offer_text Cached to save frequent conversion. """ key = self.html_cache_key() html = cache.get(key) if html: return mark_safe(html) else: html = text2html(self.offer_text()) cache.set(key, html, 24 * 3600) # expires on self.save() above return mark_safe(html)
def wiki(request, wiki_id=None): context = RequestContext(request) if wiki_id is None: wiki_list = Wiki.objects.all() context['wikis'] = wiki_list return render_to_response('backstage/wiki.html', context) else: try: wiki_obj = Wiki.objects.get(id=wiki_id) except Wiki.DoesNotExist: wiki_obj = None if wiki_obj is not None: context['wiki'] = wiki_obj context['parsed'] = text2html(wiki_obj.document) context['revisions'] = WikiRevisionHistory.objects.filter(wiki=wiki_obj).order_by('-created_time') return render_to_response('backstage/wiki_detail.html', context) else: return HttpResponse('Wiki Not Found!')
def get_trac_wiki_book_article(bibtex_key): try: postgres_string = config["sqlalchemy.url"] + "trac" engine = create_engine(postgres_string) connection = engine.connect() except Exception: return "Could not connect to database. Does the database exist here?" else: wiki_text = "" result = connection.execute("select * from wiki where name = '%s' order by time desc" % bibtex_key) if result.rowcount > 0: row = result.fetchone() row_text = row['text'] wiki_text = text2html(row_text).decode("utf-8") else: wiki_text = "Could not find an entry for this book. Please check the trac wiki at <a href=\"http://trac.cidles.eu/wiki\">http://trac.cidles.eu/wiki</a>" connection.close() wiki_text = wiki_text + u"<p><small>Edit this page at <a href=\"http://trac.cidles.eu/wiki/%s\">http://trac.cidles.eu/wiki/%s</a></small></p>" % (bibtex_key, bibtex_key) return literal(wiki_text)
def wiki(request, wiki_id=None): context = RequestContext(request) if wiki_id is None: wiki_list = Wiki.objects.all() context['wikis'] = wiki_list return render_to_response('backstage/wiki.html', context) else: try: wiki_obj = Wiki.objects.get(id=wiki_id) except Wiki.DoesNotExist: wiki_obj = None if wiki_obj is not None: context['wiki'] = wiki_obj context['parsed'] = text2html(wiki_obj.document) context['revisions'] = WikiRevisionHistory.objects.filter( wiki=wiki_obj).order_by('-created_time') return render_to_response('backstage/wiki_detail.html', context) else: return HttpResponse('Wiki Not Found!')
def wiki(text): return text2html(text)
def render(self, text, **kwargs): return creoleparser.text2html(text)
def render(tiddler, environ): # XXX this pays no attention to path and it should when # creating links. return text2html(tiddler.text)
def creole(value): return creoleparser.text2html(value)
def wsearch(query, methods=['POST', 'GET']): try: print query head = str(query) style = '''<head> <link rel="stylesheet" type="text/css" href="wiki.css"> </head>''' title = "<title>{0}</title>".format(str(head)) result = Q(query) uri = result.abstract.url page = wikipedia.page(title=query) content = text2html(page.content) for elem in page.links: refrence = "<a href=http://localhost:5000/{0}><center><br>{0}</center></body></a>".format(elem) summary = "<body><center>{0}</center></body>".format(content) us = UrlShortener() url = UrlShortener.shorten(us, page.url) home = str("/") re = "<br><center><a href={0}>Home</a></center>".format(home) body = "<br><center><a href={0}>Random</a></center>".format(str('{success:')) link = "<br><center><a href={0}>WikiPage</a></center>".format(uri) wiki = str(style + title + "\n" + "<br>"+ summary + "\n" + body + "\n" + link + re + refrence + "<center>" + "</center>") except wikipedia.DisambiguationError: print "\n" try: head = wikipedia.random(pages=1) style = '''<head> <link rel="stylesheet" type="text/css" href="wiki.css"> </head>''' title = "<title>{0}</title>".format(str(head)) page = wikipedia.page(title=head) content = text2html(page.content) result = duckduckgo.query(head) uri = result.abstract.url summ = wikipedia.summary(str(head)) summary = "<body><center><div class='page'>{0}</div><center><body>".format(content) for elem in page.links: refrence = "<a href=http://localhost:5000/{0}><center><br>{0}</center></body></a>".format(elem) us = UrlShortener() url = UrlShortener.shorten(us, page.url) home = str("/") re = "<br><a href={0}>Home</a>".format(home) body = "<br><a href={0}>Random</a>".format(str('{success:')) link = "<br><a href={0}>WikiPage</a>".format(uri) wiki = str(style + title + "\n" + summary + "\n" + body + "\n" + link + re + refrence) return wiki except wikipedia.DisambiguationError, UnicodeEncodeError: head = wikipedia.random(pages=1) style = '''<head> <link rel="stylesheet" type="text/css" href="wiki.css"> </head>''' title = "<title>{0}</title>".format(str(head)) page = wikipedia.page(title=head) content = text2html(page.content) result = duckduckgo.query(head) uri = result.abstract.url summ = wikipedia.summary(str(head)) summary = "<body><center><div class='page'>{0}</div><center><body>".format(content) for elem in page.links: refrence = "<a href=http://localhost:5000/" + elem + "><center><br>{0}</center></body></a>".format(elem) us = UrlShortener() url = UrlShortener.shorten(us, page.url) home = str("/") re = "<br><a href={0}>Home</a>".format(home) body = "<br><a href={0}>Random</a>".format(str('{success:')) link = "<br><a href={0}>WikiPage</a>".format(uri) wiki = str(style + title + "\n" + summary + "\n" + body + "\n" + link + refrence)
def render_text(self): return mark_safe(text2html(self.text, method='xhtml'))
def render(self, text, **kwargs): result = creoleparser.text2html(text) if PY3K and isinstance(result, bytes): result = result.decode('utf-8') return result
# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # ------------------------------------------------------------------- import sys import creoleparser COMMAND_BREAK = "------wriaki-creole-break------" Acc = "" while 1: L = sys.stdin.readline() if L.strip() == COMMAND_BREAK: H = creoleparser.text2html(Acc.decode('utf-8')) print H print COMMAND_BREAK sys.stdout.flush() Acc = "" elif L == "": break else: Acc += L
def render_content(self): return mark_safe(text2html(self.content, method='xhtml'))
def wiki2html(text): from creoleparser import text2html return text2html(text, method="xhtml")