def get_page_content(pageid): ia = Articles.select().group_by(Articles.iamap); content = Articles.select().where(Articles.pageid == pageid ).limit(1) t0 = time() value = content[0] value.webpage = Markup(value.webpage) t1 = time() print 'Query from Articles takes %f' %(t1-t0) return render_template('page.html', content = value , ia = ia)
def api_delete_category(cid): cat = Categories.get_by_id(cid) if cat is None: raise APIValueError('_id', 'category not found.') if len(Articles.select('where category_id=?', cat._id)) > 0: raise APIValueError('_id', 'cannot delete non-empty categories.') cat.delete() _clear_categories_cache() return dict(result=True)
def homepage(): categories = _get_categories() cat_dict = dict(((c._id, c.name) for c in categories)) fn_get_category_name = lambda cid: cat_dict.get(cid, u'ERROR') articles = Articles.select('where publish_time<? order by publish_time desc limit ?', time.time(), 10) reads = counters.counts((a._id for a in articles)) for a, r in zip(articles, reads): a.reads = r return dict(articles=articles, fn_get_category_name=fn_get_category_name)
def api_delete_category(cid): cat = Categories.get_by_id(cid) if cat is None: raise APIValueError('_id', 'category not found.') if len(Articles.select('where category_id=?', cat._id)) > 0: raise APIValueError('_id', 'cannot delete non-empty categories.') cat.delete() _clear_categories_cache() return dict(result=True)
def homepage(): categories = _get_categories() cat_dict = dict(((c._id, c.name) for c in categories)) fn_get_category_name = lambda cid: cat_dict.get(cid, u'ERROR') articles = Articles.select( 'where publish_time<? order by publish_time desc limit ?', time.time(), 10) reads = counters.counts((a._id for a in articles)) for a, r in zip(articles, reads): a.reads = r return dict(articles=articles, fn_get_category_name=fn_get_category_name)
def _get_rss(): def _rss_datetime(ts): dt = datetime.fromtimestamp(ts, UTC_0) return dt.strftime('%a, %d %b %Y %H:%M:%S GMT') def _safe_str(s): if isinstance(s, str): return s if isinstance(s, unicode): return s.encode('utf-8') return str(s) limit = 20 name = u'廖雪峰的官方网站' description = u'' copyright = 'copyright 2013' domain = ctx.request.host articles = articles = Articles.select('where publish_time<? order by publish_time desc limit ?', time.time(), 50) for a in articles: a.content = texts.md2html(texts.get(a.content_id)) rss_time = articles and articles[0].publish_time or time.time() L = [ '<?xml version="1.0"?>\n<rss version="2.0"><channel><title><![CDATA[', name, ']]></title><link>http://', domain, '/</link><description><![CDATA[', description, ']]></description><lastBuildDate>', _rss_datetime(rss_time), '</lastBuildDate><generator>BrighterPage</generator><ttl>3600</ttl>' ] for a in articles: url = 'http://%s/article/%s' % (domain, a._id) L.append('<item><title><![CDATA[') L.append(a.name) L.append(']]></title><link>') L.append(url) L.append('</link><guid>') L.append(url) L.append('</guid><author><![CDATA[') L.append(a.user_name) L.append(']]></author><pubDate>') L.append(_rss_datetime(a.publish_time)) L.append('</pubDate><description><![CDATA[') L.append(texts.md2html(texts.get(a.content_id))) L.append(']]></description></item>') L.append(r'</channel></rss>') return ''.join(map(_safe_str, L))
def _get_rss(): def _rss_datetime(ts): dt = datetime.fromtimestamp(ts, UTC_0) return dt.strftime('%a, %d %b %Y %H:%M:%S GMT') def _safe_str(s): if isinstance(s, str): return s if isinstance(s, unicode): return s.encode('utf-8') return str(s) limit = 20 name = u'廖雪峰的官方网站' description = u'' copyright = 'copyright 2013' domain = ctx.request.host articles = articles = Articles.select( 'where publish_time<? order by publish_time desc limit ?', time.time(), 50) for a in articles: a.content = texts.md2html(texts.get(a.content_id)) rss_time = articles and articles[0].publish_time or time.time() L = [ '<?xml version="1.0"?>\n<rss version="2.0"><channel><title><![CDATA[', name, ']]></title><link>http://', domain, '/</link><description><![CDATA[', description, ']]></description><lastBuildDate>', _rss_datetime(rss_time), '</lastBuildDate><generator>BrighterPage</generator><ttl>3600</ttl>' ] for a in articles: url = 'http://%s/article/%s' % (domain, a._id) L.append('<item><title><![CDATA[') L.append(a.name) L.append(']]></title><link>') L.append(url) L.append('</link><guid>') L.append(url) L.append('</guid><author><![CDATA[') L.append(a.user_name) L.append(']]></author><pubDate>') L.append(_rss_datetime(a.publish_time)) L.append('</pubDate><description><![CDATA[') L.append(texts.md2html(texts.get(a.content_id))) L.append(']]></description></item>') L.append(r'</channel></rss>') return ''.join(map(_safe_str, L))
def get_ia_content(iamap): ia = Articles.select().group_by(Articles.iamap); content = Articles.select().where(Articles.iamap == iamap ) return render_template('maps.html', content = content, ia = ia)