def topic(request, topic=''): t = db.get_topic(topic) if t is None: return _404_response() start, end, limit = _parse_params(request) articles = db.list_articles_by_topic(topic, start=start, end=end, limit=limit) t['articles'] = articles return json_response(t)
def topic(request): topics_str = _get(request, 'topic') if not topics_str: return HttpResponseRedirect('/') limit = _get(request, 'limit') if limit is None: limit = 100 elif limit == 'all': limit = 0 else: try: limit = int(limit) except Exception: limit = 100 topics = topics_str.split(common.TOPIC_SEPARATOR) topic_dicts = [dict(topic=t, rm_q='') for t in topics] if len(topics) == 1: articles = db.list_articles_by_topic(topics[0], limit=limit) else: articles = db.list_articles_by_topics(topics, limit=limit) for topic_dict in topic_dicts: tmp_topics = topics[:] tmp_topics.remove(topic_dict['topic']) topic_dict['rm_q'] = _join_topic_str(tmp_topics, common.TOPIC_SEPARATOR) def _process_row(a): a['created_date'] = a['created'][:10] #.timepstr('%Y-%M-%D') a['url'] = urllib.unquote(a['url']) a['source_url'] = urllib.unquote(a['source_url']) return a articles = [_process_row(a) for a in articles] limit = limit if len(articles) == limit else 0 related_topics = db.get_related_topics(topics, limit=10) for item in related_topics: item['q'] = topics_str + common.TOPIC_SEPARATOR + item['title'] highlight_pattern=_join_topic_str(topics, common.DISPLAY_SEPARATOR) return render_to_response('topic.html', dict(topics=topic_dicts, topic=topics_str, highlight_pattern=highlight_pattern, articles=articles, limit=limit, related_topics=related_topics))
import common import database as db logger = common.get_logger() topics = db.list_topics() for t in topics: articles = db.list_articles_by_topic(t) for a in articles: url = a['url'] source_url = url[:url.find('/', url.find('//') + 2)] print a['source'], source_url db.update_source(a['source'], source_url)