def _get_rss(): def _rss_datetime(ts): dt = datetime.fromtimestamp(ts, UTC_0) return dt.strftime('%a, %d %b %Y %H:%M:%S GMT') def _safe_str(s): if isinstance(s, str): return s if isinstance(s, unicode): return s.encode('utf-8') return str(s) limit = 20 name = u'廖雪峰的官方网站' description = u'' copyright = 'copyright 2013' domain = ctx.request.host articles = articles = Articles.select('where publish_time<? order by publish_time desc limit ?', time.time(), 50) for a in articles: a.content = texts.md2html(texts.get(a.content_id)) rss_time = articles and articles[0].publish_time or time.time() L = [ '<?xml version="1.0"?>\n<rss version="2.0"><channel><title><![CDATA[', name, ']]></title><link>http://', domain, '/</link><description><![CDATA[', description, ']]></description><lastBuildDate>', _rss_datetime(rss_time), '</lastBuildDate><generator>BrighterPage</generator><ttl>3600</ttl>' ] for a in articles: url = 'http://%s/article/%s' % (domain, a._id) L.append('<item><title><![CDATA[') L.append(a.name) L.append(']]></title><link>') L.append(url) L.append('</link><guid>') L.append(url) L.append('</guid><author><![CDATA[') L.append(a.user_name) L.append(']]></author><pubDate>') L.append(_rss_datetime(a.publish_time)) L.append('</pubDate><description><![CDATA[') L.append(texts.md2html(texts.get(a.content_id))) L.append(']]></description></item>') L.append(r'</channel></rss>') return ''.join(map(_safe_str, L))
def _get_rss(): def _rss_datetime(ts): dt = datetime.fromtimestamp(ts, UTC_0) return dt.strftime('%a, %d %b %Y %H:%M:%S GMT') def _safe_str(s): if isinstance(s, str): return s if isinstance(s, unicode): return s.encode('utf-8') return str(s) limit = 20 name = u'廖雪峰的官方网站' description = u'' copyright = 'copyright 2013' domain = ctx.request.host articles = articles = Articles.select( 'where publish_time<? order by publish_time desc limit ?', time.time(), 50) for a in articles: a.content = texts.md2html(texts.get(a.content_id)) rss_time = articles and articles[0].publish_time or time.time() L = [ '<?xml version="1.0"?>\n<rss version="2.0"><channel><title><![CDATA[', name, ']]></title><link>http://', domain, '/</link><description><![CDATA[', description, ']]></description><lastBuildDate>', _rss_datetime(rss_time), '</lastBuildDate><generator>BrighterPage</generator><ttl>3600</ttl>' ] for a in articles: url = 'http://%s/article/%s' % (domain, a._id) L.append('<item><title><![CDATA[') L.append(a.name) L.append(']]></title><link>') L.append(url) L.append('</link><guid>') L.append(url) L.append('</guid><author><![CDATA[') L.append(a.user_name) L.append(']]></author><pubDate>') L.append(_rss_datetime(a.publish_time)) L.append('</pubDate><description><![CDATA[') L.append(texts.md2html(texts.get(a.content_id))) L.append(']]></description></item>') L.append(r'</channel></rss>') return ''.join(map(_safe_str, L))
def web_get_article(aid): article = Articles.get_by_id(aid) if article is None or article.draft: raise notfound article.reads = counters.incr(aid) article.content = texts.md2html(texts.get(article.content_id)) category = Categories.get_by_id(article.category_id) return dict(article=article, category=category, comments=comments.get_comments(aid))
def web_wikipage(wid, pid): page = _get_full_wikipage(pid) if page.wiki_id != wid: raise notfound() wiki = _get_wiki(wid) tree = _get_wikipages(wiki) content = texts.md2html(texts.get(page.content_id)) page.reads = counters.incr(pid) return dict(wiki=wiki, page=page, tree=tree, name=page.name, content=content, comments=comments.get_comments(pid))
def get_page(alias): page = Pages.select_one('where alias=?', alias) if page is None: raise notfound() page.content = texts.md2html(texts.get(page.content_id)) return dict(page=page)
def web_wiki(wid): wiki = _get_wiki(wid) tree = _get_wikipages(wiki) content = texts.md2html(texts.get(wiki.content_id)) wiki.reads = counters.incr(wid) return dict(wiki=wiki, page=None, tree=tree, name=wiki.name, content=content, comments=comments.get_comments(wid))