def test_summary(): from clld.util import summary # text consisting of unique words text = "This is a long text, which we want to summarize." assert summary(text[:20]) == text[:20] assert summary(text, len(text) - 1).endswith('...') assert summary('One verylongword', 10) == 'One ...' assert summary('One verylongword', 2) == '...'
def atom_feed(request, feed_url): """ Proxy feeds so they can be accessed via XHR requests. We also convert RSS to ATOM so that the javascript Feed component can read them. """ ctx = {'url': feed_url, 'title': None, 'entries': []} try: res = requests.get(ctx['url'], timeout=(3.05, 1)) except Timeout: res = None if res and res.status_code == 200: d = feedparser.parse(res.content.strip()) ctx['title'] = getattr(d.feed, 'title', None) for e in d.entries: ctx['entries'].append(dict( title=e.title, link=e.link, updated=datetime.fromtimestamp(mktime(e.published_parsed)).isoformat(), summary=summary(e.description))) response = render_to_response('atom_feed.mako', ctx, request=request) response.content_type = 'application/atom+xml' return response
def atom_feed(request, feed_url): """ Proxy feeds so they can be accessed via XHR requests. We also convert RSS to ATOM so that the javascript Feed component can read them. """ ctx = {'url': feed_url, 'title': None, 'entries': []} try: res = requests.get(ctx['url'], timeout=(3.05, 1)) except Timeout: res = None if res and res.status_code == 200: d = feedparser.parse(res.content.strip()) ctx['title'] = d.feed.title for e in d.entries: ctx['entries'].append( dict(title=e.title, link=e.link, updated=datetime.fromtimestamp(mktime( e.published_parsed)).isoformat(), summary=summary(e.description))) response = render_to_response('atom_feed.mako', ctx, request=request) response.content_type = 'application/atom+xml' return response