def recent_feed(): feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) threads = Thread.query.order_by(Thread.last_updated.desc()).filter(Thread.user==None).all() for thread in threads: url = url_for('thread', display_hash=thread.display_hash, title=thread.slug()) feed.add(thread.title, '', content_type='html', author=thread.display_name, url=url, updated=thread.last_updated, published=thread.date_created) return feed.get_response()
def test_atom_add_one(self): a = AtomFeed(title='test_title', id=1) f = FeedEntry( title='test_title', id=1, updated=datetime.datetime.now()) assert len(a.entries) == 0 a.add(f) assert len(a.entries) == 1
def feed(site): if site == 'hackernews': title = 'Hacker News Digest' news_list = models.HackerNews.query.order_by('submit_time desc').all() else: title = 'Startup News Digest' news_list = models.StartupNews.query.order_by('submit_time desc').all() feed = AtomFeed(title, updated=models.LastUpdated.get(site), feed_url=request.url, url=urljoin(request.url_root, url_for(site)), author={ 'name': 'polyrabbit', 'uri': 'https://github.com/polyrabbit/'} ) for news in news_list: feed.add(news.title, content=news.summary and ('<img src="%s" style="width: 220px; float: left" />' % news.image.url if news.img_id else '') + news.summary, author={ 'name': news.author, 'uri': news.author_link } if news.author_link else (), url=news.url, updated=news.submit_time,) return feed.get_response()
def atomfeed(): t = request.args.get('t') if not t: t = 'url' r = get_last(t) if not (r.status_code == 200 and len(r.response) > 0): return {} j = json.loads(r.response[0]) if len(j) < 1: return {} feed = AtomFeed('Les liens (pas forcement) SFW de GCU-Squad!', feed_url=request.url, url=request.url_root, subtitle='Adresses vues sur le canal #[email protected]') for item in j: post = item['_source'] for url in post['urls']: datefmt = '%Y-%m-%dT%H:%M:%S.%f' pubdate = datetime.datetime.strptime(post['fulldate'], datefmt) update = pubdate if post['tags']: line = '{0} #{1}#'.format(post['line'], ','.join(post['tags'])) else: line = post['line'] feed.add(title=line, title_type='text', url=url, author=post['nick'], published=pubdate, updated=update) return feed.get_response()
def feed(): blogging_engine = _get_blogging_engine(current_app) storage = blogging_engine.storage config = blogging_engine.config count = config.get("BLOGGING_FEED_LIMIT") posts = storage.get_posts(count=count, offset=None, recent=True, user_id=None, tag=None, include_draft=False) feed = AtomFeed( '%s - All Articles' % config.get("BLOGGING_SITENAME", "Flask-Blogging"), feed_url=request.url, url=request.url_root, generator=None) if len(posts): feed_posts_fetched.send(blogging_engine.app, engine=blogging_engine, posts=posts) for post in posts: blogging_engine.process_post(post, render=True) feed.add(post["title"], str(post["rendered_text"]), content_type='html', author=post["user_name"], url=config.get("BLOGGING_SITEURL", "")+post["url"], updated=post["last_modified_date"], published=post["post_date"]) feed_posts_processed.send(blogging_engine.app, engine=blogging_engine, feed=feed) response = feed.get_response() response.headers["Content-Type"] = "application/xml" return response
def test_feedparser_authors(): for test in tests: if test.skip: continue feed = AtomFeed(title="Test Feed", feed_url="http://testfeed.com") entry = FeedEntry( title="Test Entry", url="http://testfeed.com/testentry", updated=datetime.utcnow(), content="Test Entry", author=test.namestring, ) feed.entries.append(entry) rss = feed.to_string() parsed = feedparser.parse(rss) assert parsed.entries is not None assert len(parsed.entries) == 1 assert len(parsed.entries[0].authors) == len(test.expected)
def last_24_hr(): posts = get_recent_posts(filter_recent=True) feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) for post in posts: add_post_to_feed(feed, post) return feed.get_response()
def feed_folder(folder): if folder.split('/')[0] != 'admin': folder = Folder.query.filter(guid=folder).one() if folder: posts = Post.query.filter(folder_id=folder.id, status='published', type='post').order_by('created').limit(20, 0, array=True) feed = AtomFeed( g.options['name']+' • ' + folder.name, subtitle=folder.seo_content, feed_url=request.url_root+'feed/', url=request.url_root, generator=None ) for post in posts: feed.add( post.title, post.content, content_type='html', author=post.user.nicename, url=request.url_root+post.guid, updated=post.modified, published=post.created ) response = feed.get_response() response.headers["Content-Type"] = 'application/xml' return response else: return is_404() else: return is_admin_404()
def feed(): feed = AtomFeed( g.options['name'], subtitle=g.options['slogan'], feed_url=request.url_root+'feed/', url=request.url_root, generator=None ) posts = Post.query.filter(status='published', type='post').order_by('created').limit(20, 0, array=True) for post in posts: feed.add( post.title, post.content, content_type='html', author=post.user.nicename, url=request.url_root+post.guid, updated=post.modified, published=post.created ) response = feed.get_response() response.headers["Content-Type"] = 'application/xml' return response
def _feeds(query=None, title='Recent Articles'): posts = db.find("posts.files", query or {}).\ sort([("uploadDate", -1)]).limit(PAGE_SIZE) feed = AtomFeed(title, feed_url=request.url, url=request.url_root) from monblog.common.encodings import force_bytes for post in posts: author = conf.BLOG_SETTINGS.get("AUTHOR", "") url = urljoin(request.url_root, url_for("get_post", post_id=str(post["_id"]))) text = force_bytes( db.fs.get(objectid.ObjectId(post["_id"])).read(), "ascii", errors="ignore") feed.add( post["metadata"].get("title"), markdown(text), id=url, content_type='html', url=url, updated=post["uploadDate"], published=post["uploadDate"], author=post["metadata"].get("author", author)) return feed.get_response()
def atom_feed(request): """ generates the atom feed with the newest images """ user = User.query.filter_by( username = request.matchdict['user']).first() if not user or not user.has_privilege(u'active'): return render_404(request) cursor = MediaEntry.query.filter_by( uploader = user.id, state = u'processed').\ order_by(MediaEntry.created.desc()).\ limit(ATOM_DEFAULT_NR_OF_UPDATED_ITEMS) """ ATOM feed id is a tag URI (see http://en.wikipedia.org/wiki/Tag_URI) """ atomlinks = [{ 'href': request.urlgen( 'mediagoblin.user_pages.user_home', qualified=True, user=request.matchdict['user']), 'rel': 'alternate', 'type': 'text/html' }] if mg_globals.app_config["push_urls"]: for push_url in mg_globals.app_config["push_urls"]: atomlinks.append({ 'rel': 'hub', 'href': push_url}) feed = AtomFeed( "MediaGoblin: Feed for user '%s'" % request.matchdict['user'], feed_url=request.url, id='tag:{host},{year}:gallery.user-{user}'.format( host=request.host, year=datetime.datetime.today().strftime('%Y'), user=request.matchdict['user']), links=atomlinks) for entry in cursor: feed.add(entry.get('title'), entry.description_html, id=entry.url_for_self(request.urlgen, qualified=True), content_type='html', author={ 'name': entry.get_uploader.username, 'uri': request.urlgen( 'mediagoblin.user_pages.user_home', qualified=True, user=entry.get_uploader.username)}, updated=entry.get('created'), links=[{ 'href': entry.url_for_self( request.urlgen, qualified=True), 'rel': 'alternate', 'type': 'text/html'}]) return feed.get_response()
def get(self): feed = AtomFeed( 'Recently Added Watches - Wanna Buy A Watch', feed_url=request.url, url=request.url_root ) # Prepare query watches = session.query(Watch) # TODO: .limit(15) and date_added watches = watches.order_by(Watch.title).all() for watch in watches: feed.add( watch.title, unicode(watch.long_description), content_type='html', author='wbaw', url=BASE_URL + '/' + watch.page_url + '?src=wbawsearch', # TODO: Add updated and published updated=datetime.now(), published=datetime.now() ) return feed.get_response()
def generate_feed(artifact): feed = AtomFeed( title=title or 'Feed', subtitle=unicode(subtitle or ''), subtitle_type=hasattr(subtitle, '__html__') and 'html' or 'text', feed_url=feed_url, url=embed_url, id=get_id(ctx.env.project.id + 'lektor') ) for item in items: feed.add( get_item_title(item, item_title_field), get_item_body(item, item_body_field), xml_base=url_to(item, external=True), url=url_to(item, external=True), content_type='html', id=get_id(u'%slektor/%s' % ( ctx.env.project.id, item['_path'].encode('utf-8'), )), author=get_item_author(item, item_author_field), updated=get_item_updated(item, item_date_field)) with artifact.open('wb') as f: f.write(feed.to_string().encode('utf-8') + '\n')
def do_atom(path=None): feed = AtomFeed('Changes' + (' - %s' % path if path != '/' else ''), feed_url=request.url, url=request.url_root) history = [] if path != '/': for entry in get_history(path): entry.insert(1, path) history.append(entry) if len(history) == ATOM_LIMIT: break else: for path in get_documents_list(): for entry in get_history(path): entry.insert(1, path) history.append(entry) history = sorted(history, key=lambda x: x[0], reverse=True)[:ATOM_LIMIT] for date, path, rev, author, desc in history: feed.add(path, desc if desc != '-' else 'No summary available', url=url_for('index', path=path, do='compare', to=rev), author=author, updated=date) return feed.get_response()
def atom(ctx): feed = AtomFeed(ctx.odb.name, feed_url=ctx.url_for("atom"), url=ctx.url_for("root"), subtitle=ctx.odb.description) pattern = ctx.app.recent_doc_pattern for added_date, root_path in utils.recent_files(ctx, count=10, pattern=pattern): blob_obj = ctx.odb.head.tree[root_path] assert isinstance(blob_obj, BlobObject) current_blob_obj = ctx.odb.head.tree[blob_obj.abs_name] doc = render_blob(ctx, current_blob_obj) url = "http://" + ctx.request.host + ctx.url_for("view_obj", rev="HEAD", path=blob_obj.root_path) feed.add( doc.title, doc.body, title_type="html", content_type="html", author=doc.author_name, url=url, updated=doc.last_modified, published=added_date, ) return feed.get_response()
def recent_feed(): # Create an atom feed with the latest items # the number of items is given by the setting ENDPOINT_RESULTS_PER_PAGE # Based on: http://flask.pocoo.org/snippets/10/ feed = AtomFeed('Recent Items', feed_url=request.url, url=request.url_root, author={'name': 'David Castillo', 'email': '*****@*****.**', 'uri': 'http://github.com/davcs86'}, generator=('self generated', '', '0.1')) # query the items items = db_session.query(Item).order_by(Item.created_date.desc()) \ .limit(app.config['ENDPOINT_RESULTS_PER_PAGE']).all() # add them to the feed for item in items: feed.add(item.name, unicode(item.description), content_type='html', author=item.author.nickname, url=make_external(url_for('item_detail', item_id=item.id)), categories=[{'name': g.name} for g in item.categories], updated=item.updated_date, links=[{'href': g.locate(), 'rel': 'enclosure', 'type': g.mimetype} for g in item.picture.all()], published=item.created_date) return feed.get_response()
def itemsATOM(): ''' Returns an Atom feed of all items ''' feed = AtomFeed(title="Imperial Catalog", subtitle="A catalog of Galactice Empire items", feed_url="http://localhost:8000/feed", url="http://localhost:8000", author="Sean Fallmann") categories = getAllCategories() for c in categories: for i in c.items: feed.add( category=c.name, title=i.name, id=i.id, content=i.description, content_type="html", author=i.user.name, url="http://localhost:8000/%s/%s" % (c.name, i.name), updated=datetime.datetime.utcnow(), ) return feed
def announcement_feed(): def bjdate(d): from datetime import timedelta return (d + timedelta(hours=8)).strftime('%Y年%m月%d日') type_ = request.args.get('type', '') typecn = type_to_cn(type_) exchange = request.args.get('exchange', '') cond = {} feedtitle = '邮币卡公告聚合' if type_: cond['type_'] = type_ feedtitle += ' - {}'.format(typecn) if exchange: cond['exchange'] = exchange feedtitle += ' - {}'.format(exchange) feed = AtomFeed(feedtitle, feed_url=request.url, url=request.url_root) announcements = list( Announcement.query(cond, sort=[('updated_at', -1)], limit=20)) for a in announcements: feed.add('{} {}'.format(bjdate(a.published_at), a.title.strip()), '更多内容请点击标题连接', content_type='text', author=a.exchange, url=a.url, updated=a.updated_at, published=a.published_at) return feed.get_response()
def make_rss(data): """ Make RSS data from provided data """ # Creat RSS feed object feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) # Make feed list from data i = 1 current_time = datetime.now() for item in data: if item['link'] == '' or item['link'] is None: continue if item['title'] == '': item['title'] = 'Unknow Title' try: feed.add(item['title'], unicode(item['description']), content_type='html', url=item['link'], updated=current_time ) except Exception, e: print (i, e, item['title'], unicode( item['description']), item['link'], current_time) i += 1
def _topic_feed(request, title, query, order_by): # non moderators cannot see deleted posts, so we filter them out first # for moderators we mark the posts up as deleted so that # they can be kept apart from non-deleted ones. if not request.user or not request.user.is_moderator: query = query.filter_by(is_deleted=False) query = query.order_by(_topic_order[order_by]) query = query.options(eagerload('author'), eagerload('question')) query = query.limit(max(0, min(50, request.args.get('num', 10, type=int)))) feed = AtomFeed(u'%s — %s' % (title, settings.WEBSITE_TITLE), subtitle=settings.WEBSITE_TAGLINE, feed_url=request.url, url=request.url_root) for topic in query.all(): title = topic.title if topic.is_deleted: title += u' ' + _(u'(deleted)') feed.add(title, topic.question.rendered_text, content_type='html', author=topic.author.display_name, url=url_for(topic, _external=True), id=topic.guid, updated=topic.last_change, published=topic.date) return feed.get_response()
def feed(): posts = get_recent_posts(filter_recent=False) feed = AtomFeed('All Articles', feed_url=request.url, url=request.url_root) for post in posts: add_post_to_feed(feed, post) return feed.get_response()
def feed(self, feed_title, title, content, url, published=None, summary=None, enclosure=None, media_thumbnail=None): feed = AtomFeed(feed_title, feed_url=WP_FEED_URL) tz = pytz.timezone(faker.timezone()) published = published or faker.date_time(tzinfo=tz) kwargs = { 'content_type': 'html', 'author': faker.name(), 'url': url, 'updated': faker.date_time_between(start_date=published, tzinfo=tz), 'published': published } if summary: kwargs['summary'] = summary if enclosure: kwargs['links'] = [{ 'type': enclosure['type'], 'href': enclosure['url'], 'rel': 'enclosure', 'length': faker.pyint(), }] feed.add(title, content, **kwargs) out = feed.to_string() if media_thumbnail: el = '<media:thumbnail url="{0}" />'.format(media_thumbnail) out = out.replace('<feed', '<feed xmlns:media="http://search.yahoo.com/mrss/"') out = out.replace('</entry>', '{0}</entry>'.format(el)) return out
def atom_feed(): config = current_app.config feed = AtomFeed( config['SITE_TITLE'], feed_url=request.url, url=request.url_root, ) results, results_page = g.gitpages.index( 1, ref=current_app.default_ref, statuses=g.allowed_statuses, ) for page in results: doc = page.doc() utc_date = page.info.date.astimezone(pytz.utc) feed.add( doc['title'], doc['body'], content_type='html', url=urljoin(request.url_root, page.to_url()), updated=utc_date, published=utc_date, ) return feed.get_response()
def recent_feed(): feed = AtomFeed(_('Last datasets'), feed_url=request.url, url=request.url_root) datasets = (Dataset.objects.visible().order_by('-created_at') .limit(current_site.feed_size)) for dataset in datasets: author = None if dataset.organization: author = { 'name': dataset.organization.name, 'uri': url_for('organizations.show', org=dataset.organization.id, _external=True), } elif dataset.owner: author = { 'name': dataset.owner.fullname, 'uri': url_for('users.show', user=dataset.owner.id, _external=True), } feed.add(dataset.title, render_template('dataset/feed_item.html', dataset=dataset), content_type='html', author=author, url=url_for('datasets.show', dataset=dataset.id, _external=True), updated=dataset.last_modified, published=dataset.created_at) return feed.get_response()
def feed(tag): if tag and not TagCloud.objects(tag=tag, count__gt=0).first(): return abort(404) title = 'late.am' if tag: title = '%s - Posts about %s' % (title, tag) feed = AtomFeed( title=title, feed_url=url_for('feed', _external=True), url=url_for('index', _external=True), author={'name': 'Dan Crosta', 'email': '*****@*****.**'}, icon=staticurl('mug.png', _external=True), generator=('plog', 'https://github.com/dcrosta/plog', '0.1'), ) posts = Post.objects(published=True).order_by('-pubdate') if tag: posts.filter(tags=tag) for post in posts[:20]: feed.add( title=post.title, content=domarkdown(post.blurb + '\n' + post.body), content_type='html', author={'name': 'Dan Crosta', 'email': '*****@*****.**'}, url=url_for('post', slug=post.slug, _external=True), id=url_for('permalink', post_id=post.pk, _external=True), published=post.pubdate, updated=post.updated) response = make_response(unicode(feed)) response.headers['Content-Type'] = 'application/atom+xml; charset=UTF-8' return response
def recent_feed(): log.info("Generating RSS Feed") feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) log.debug("Querying for all blog posts") posts = models.Post.query \ .filter(~models.Post.tags.any(models.Tag.name.in_(['home']))) \ .order_by(models.Post.updated.desc()) \ .limit(15) \ .all() base_url = configuration.BASE_URL + '/blog/post/' counter = 1 log.debug("Printing posts in RSS Feed:") for post in posts: url = base_url + str(counter) counter += 1 log.debug("\t\t" + post.title) feed.add(post.title, md.markdown(post.body), content_type='html', author=post.author.first_name + " " + post.author.last_name, url=make_external(url), updated=post.updated, published=post.created) return feed.get_response()
def recent_feed(): feed = AtomFeed(_('Last reuses'), feed_url=request.url, url=request.url_root) reuses = Reuse.objects.visible().order_by('-created_at').limit(15) for reuse in reuses: author = None if reuse.organization: author = { 'name': reuse.organization.name, 'uri': url_for('organizations.show', org=reuse.organization.id, _external=True), } elif reuse.owner: author = { 'name': reuse.owner.fullname, 'uri': url_for('users.show', user=reuse.owner.id, _external=True), } feed.add(reuse.title, render_template('reuse/feed_item.html', reuse=reuse), content_type='html', author=author, url=url_for('reuses.show', reuse=reuse.id, _external=True), updated=reuse.created_at, published=reuse.created_at) return feed.get_response()
def rss(): feed = AtomFeed('Hacker News TLDR', feed_url=request.url, url=request.url_root) stories = _get_stories() for story in stories: if not story.get(BODY, {}).get(SENTENCES): body = 'Unable to generate summary' else: body = '<ul>{}</ul>'.format( '\n'.join( "<li>{}</li>".format( sentence ) for sentence in story[BODY][SENTENCES] ) ) body += "<br/><a href={}>HN Comments</a>".format( 'https://news.ycombinator.com/item?id={}'.format( story[HACKER_NEWS_ID] ) ) feed.add(story[TITLE], body, content_type='html', updated=datetime.strptime( story[DATE_FOUND], '%Y-%m-%d %H:%M:%S.%f'), url=urljoin(request.url_root, story[URL]), ) return feed.get_response()
def get(self, name): setting = current_app.config.get('SITE', { 'title': '', 'description': '' }) title = setting['title'] description = setting['description'] feed = AtomFeed( '%s·%s' % (name, title), feed_url=request.url, url=request.url_root, subtitle=description) topics = Topic.query.filter_by(tags__name=name).limit(10) for topic in topics: if topic.content_type == Topic.CONTENT_TYPE_MARKDOWN: content = topic.content else: content = topic.content feed.add(topic.title, content, content_type='html', author=topic.author.username, url=urljoin( request.url_root, url_for( 'topic.topic', topicId=topic.id)), updated=topic.updated_at, published=topic.created_at) return feed.get_response()
def recent_atom(): app.logger.debug(request.url_root) feed = AtomFeed('Recent Events', feed_url=request.url, url=request.url_root) events = session.query(Event).all() for e in events: feed.add(id=e.id, title=e.title, content_type='html', updated=e.created) return feed.get_response()