def recent_feed(): feed = AtomFeed(_('Last datasets'), feed_url=request.url, url=request.url_root) datasets = (Dataset.objects.visible().order_by('-created_at').limit( current_site.feed_size)) for dataset in datasets: author = None if dataset.organization: author = { 'name': dataset.organization.name, 'uri': url_for('organizations.show', org=dataset.organization.id, _external=True), } elif dataset.owner: author = { 'name': dataset.owner.fullname, 'uri': url_for('users.show', user=dataset.owner.id, _external=True), } feed.add(dataset.title, render_template('dataset/feed_item.html', dataset=dataset), content_type='html', author=author, url=url_for('datasets.show', dataset=dataset.id, _external=True), updated=dataset.last_modified, published=dataset.created_at) return feed.get_response()
def itemsATOM(): ''' Returns an Atom feed of all items ''' feed = AtomFeed(title="Imperial Catalog", subtitle="A catalog of Galactice Empire items", feed_url="http://localhost:8000/feed", url="http://localhost:8000", author="Sean Fallmann") categories = getAllCategories() for c in categories: for i in c.items: feed.add( category=c.name, title=i.name, id=i.id, content=i.description, content_type="html", author=i.user.name, url="http://localhost:8000/%s/%s" % (c.name, i.name), updated=datetime.datetime.utcnow(), ) return feed
def test_atom_add_one(self): a = AtomFeed(title='test_title', id=1) f = FeedEntry( title='test_title', id=1, updated=datetime.datetime.now()) assert len(a.entries) == 0 a.add(f) assert len(a.entries) == 1
def activity_feed(): feed = AtomFeed('Site activity', feed_url=request.url, url=request.url_root) activities = Activity.objects.order_by('-created_at').limit( current_site.feed_size) for activity in activities: feed.add('Activity', 'Description') # datasets = Dataset.objects.visible().order_by('-date').limit(15) # for dataset in datasets: # author = None # if dataset.organization: # author = { # 'name': dataset.organization.name, # 'uri': url_for('organizations.show', org=dataset.organization, _external=True), # } # elif dataset.owner: # author = { # 'name': dataset.owner.fullname, # 'uri': url_for('users.show', user=dataset.owner, _external=True), # } # feed.add(dataset.title, dataset.description, # content_type='html', # author=author, # url=url_for('datasets.show', dataset=dataset, _external=True), # updated=dataset.last_modified, # published=dataset.created_at) return feed.get_response()
def feeds_guild(sort=None, guildname=None): if not guildname: abort(404) guild = get_guild(guildname) page = int(request.args.get("page", 1)) t = request.args.get('t') ids = guild.idlist(sort=sort, page=page, t=t) posts = get_posts(ids, sort=sort) feed = AtomFeed( title=f'{sort.capitalize()} posts from +{guild.name} on ruqqus', feed_url=request.url, url=request.url_root) for post in posts: feed.add(post.title, post.body_html, content_type='html', author=post.author.username, url=full_link(post.permalink), updated=datetime.fromtimestamp(post.created_utc), published=datetime.fromtimestamp(post.created_utc), links=[{ 'href': post.url }]) return feed.get_response()
def feed_latest_posts(): feed_url = request.url url_root = request.url_root.strip("/") if "SITE_URL" in app.config: url_root = app.config["SITE_URL"] feed_url = "%s%s" % (url_root, request.path) feed = AtomFeed("Recent posts", feed_url=feed_url, url=url_root) posts = Post.query.order_by(desc(Post.date)).\ filter(Post.status == PostStatus.PUBLISH).\ filter(Post.type == PostType.POST) for post in posts: content = post.content if post.markdown: content = markdown.markdown(content) if post.author: author_name = post.author.nicename else: author_name = "Empty" feed.add(post.title, unicode(content), content_type='html', author=author_name, url="%s/%s" % (url_root, post.name), updated=post.date, published=post.modified) return feed.get_response()
def recent_feed(): feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) threads = Thread.query.order_by(Thread.last_updated.desc()).filter(Thread.user==None).all() for thread in threads: url = url_for('thread', display_hash=thread.display_hash, title=thread.slug()) feed.add(thread.title, '', content_type='html', author=thread.display_name, url=url, updated=thread.last_updated, published=thread.date_created) return feed.get_response()
def atomfeed(): t = request.args.get('t') if not t: t = 'url' r = get_last(t) if not (r.status_code == 200 and len(r.response) > 0): return {} j = json.loads(r.response[0]) if len(j) < 1: return {} feed = AtomFeed('Les liens (pas forcement) SFW de GCU-Squad!', feed_url=request.url, url=request.url_root, subtitle='Adresses vues sur le canal #[email protected]') for item in j: post = item['_source'] for url in post['urls']: datefmt = '%Y-%m-%dT%H:%M:%S.%f' pubdate = datetime.datetime.strptime(post['fulldate'], datefmt) update = pubdate if post['tags']: line = '{0} #{1}#'.format(post['line'], ','.join(post['tags'])) else: line = post['line'] feed.add(title=line, title_type='text', url=url, author=post['nick'], published=pubdate, updated=update) return feed.get_response()
def recent_atom(): app.logger.debug(request.url_root) feed = AtomFeed('Recent Events', feed_url=request.url, url=request.url_root) events = session.query(Event).all() for e in events: feed.add(id=e.id, title=e.title, content_type='html', updated=e.created) return feed.get_response()
def feed(): """Returns 50 latest plays""" feed = AtomFeed('Home Runs Only', feed_url=request.url, url=request.url_root) plays = Play.query.order_by(Play.at.desc()).limit(50) for play in plays: feed.add(play.catchy_journalist_title(), 'Home run hit by {batter} off of {pitcher} on {date}'.format( batter=play.batter.get_full_name(), pitcher=play.pitcher.get_full_name(), date=play.at.strftime('%B %d, %Y'), ), content_type='html', author=play.pitcher.get_full_name(), url=urlparse.urljoin( request.url_root, url_for('view_play', content_id=play.content_id)), updated=play.at, published=play.at) return feed.get_response()
def feed(site): if site == 'hackernews': title = 'Hacker News Digest' news_list = models.HackerNews.query.order_by('submit_time desc').all() else: title = 'Startup News Digest' news_list = models.StartupNews.query.order_by('submit_time desc').all() feed = AtomFeed(title, updated=models.LastUpdated.get(site), feed_url=request.url, url=urljoin(request.url_root, url_for(site)), author={ 'name': 'polyrabbit', 'uri': 'https://github.com/polyrabbit/' }) for news in news_list: feed.add( news.title, content=news.summary and ('<img src="%s" style="width: 220px; float: left" />' % news.image.url if news.img_id else '') + news.summary, author={ 'name': news.author, 'uri': news.author_link } if news.author_link else (), url=news.url, updated=news.submit_time, ) return feed.get_response()
def _topic_feed(request, title, query, order_by): # non moderators cannot see deleted posts, so we filter them out first # for moderators we mark the posts up as deleted so that # they can be kept apart from non-deleted ones. if not request.user or not request.user.is_moderator: query = query.filter_by(is_deleted=False) query = query.order_by(_topic_order[order_by]) query = query.options(eagerload('author'), eagerload('question')) query = query.limit(max(0, min(50, request.args.get('num', 10, type=int)))) feed = AtomFeed(u'%s — %s' % (title, settings.WEBSITE_TITLE), subtitle=settings.WEBSITE_TAGLINE, feed_url=request.url, url=request.url_root) for topic in query.all(): title = topic.title if topic.is_deleted: title += u' ' + _(u'(deleted)') feed.add(title, topic.question.rendered_text, content_type='html', author=topic.author.display_name, url=url_for(topic, _external=True), id=topic.guid, updated=topic.last_change, published=topic.date) return feed.get_response()
def getsingle(): connect('c') storedata = {} name1 = request.args.get('name1') type = request.args.get('type') if " " in name1: name1 = name1.replace(" ", "") if (type == 'json'): for t in Crime.objects: if (t.id == name1): storedata[t.id] = t.data return jsonify({'alldata': storedata}), 200 if (type == 'xml'): for t in Crime.objects: if (t.id == name1): nowtime = datetime.datetime.fromtimestamp(time.time()) xml = dicttoxml.dicttoxml(t.data[0], custom_root='content') dom = parseString(xml).toprettyxml() dom = re.sub('<\?xml version="[0-9]*\.[0-9]*" \?>\n', '', dom) feed = AtomFeed( id= 'https://mlab.com/databases/my-database/collections/crime/' + t.id, title=t.id, updated=nowtime, author="z5129269") storedata[t.id] = feed.to_string() + dom return jsonify({'alldata': storedata}), 200
def create_atom_feed(name, data, query, size, start, url, to_atom): if query == '*': title_query = 'All' else: title_query = query title = '{name}: Atom Feed for query: "{title_query}"'.format( name=name, title_query=title_query) author = 'COS' links = [{ 'href': '{url}?page=1'.format(url=url), 'rel': 'first' }, { 'href': '{url}?page={page}'.format(url=url, page=(start / size) + 2), 'rel': 'next' }, { 'href': '{url}?page={page}'.format(url=url, page=(start / size)), 'rel': 'previous' }] links = links[1:-1] if (start / size) == 0 else links feed = AtomFeed(title=title, feed_url=url, author=author, links=links) for doc in data: feed.add(**to_atom(doc)) return feed
def do_atom(path=None): feed = AtomFeed('Changes' + (' - %s' % path if path != '/' else ''), feed_url=request.url, url=request.url_root) history = [] if path != '/': for entry in get_history(path): entry.insert(1, path) history.append(entry) if len(history) == ATOM_LIMIT: break else: for path in get_documents_list(): for entry in get_history(path): entry.insert(1, path) history.append(entry) history = sorted(history, key=lambda x: x[0], reverse=True)[:ATOM_LIMIT] for date, path, rev, author, desc in history: feed.add(path, desc if desc != '-' else 'No summary available', url=url_for('index', path=path, do='compare', to=rev), author=author, updated=date) return feed.get_response()
def last_24_hr(): posts = get_recent_posts(filter_recent=True) feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) for post in posts: add_post_to_feed(feed, post) return feed.get_response()
def generate_feed(artifact): feed = AtomFeed( title=title or 'Feed', subtitle=unicode(subtitle or ''), subtitle_type=hasattr(subtitle, '__html__') and 'html' or 'text', feed_url=feed_url, url=embed_url, id=get_id(ctx.env.project.id + 'lektor') ) for item in items: feed.add( get_item_title(item, item_title_field), get_item_body(item, item_body_field), xml_base=url_to(item, external=True), url=url_to(item, external=True), content_type='html', id=get_id(u'%slektor/%s' % ( ctx.env.project.id, item['_path'].encode('utf-8'), )), author=get_item_author(item, item_author_field), updated=get_item_updated(item, item_date_field)) with artifact.open('wb') as f: f.write(feed.to_string().encode('utf-8') + '\n')
def feed_view(): data = [ g.redis.hgetall("{}:{}".format(GLOBAL['ProcessName'], imgId)) for imgId in list(g.redis.smembers(picKey)) ] data = [ i for i in sorted(data, key=lambda k: (k.get('ctime', 0), k.get('imgUrl', 0)), reverse=True) ][:15] feed = AtomFeed(g.site["site_RssTitle"], subtitle='Cherry Blossoms', feed_url=request.url, url=request.url_root, icon=url_for('static', filename='images/favicon.ico', _external=True), author=__author__) for img in data: title = timestamp_datetime(float(img['ctime'])) content = u'<img src="{}">'.format(img['imgUrl']) feed.add(title, content, content_type='html', id=img['imgId'], url=img['imgUrl'], author=__author__, updated=datetime.datetime.fromtimestamp(float(img['ctime'])), published=datetime.datetime.fromtimestamp(float( img['ctime']))) return feed.get_response()
def get(self): feed = AtomFeed( 'Recently Added Watches - Wanna Buy A Watch', feed_url=request.url, url=request.url_root ) # Prepare query watches = session.query(Watch) # TODO: .limit(15) and date_added watches = watches.order_by(Watch.title).all() for watch in watches: feed.add( watch.title, unicode(watch.long_description), content_type='html', author='wbaw', url=BASE_URL + '/' + watch.page_url + '?src=wbawsearch', # TODO: Add updated and published updated=datetime.now(), published=datetime.now() ) return feed.get_response()
def feed(): blogging_engine = _get_blogging_engine(current_app) storage = blogging_engine.storage config = blogging_engine.config count = config.get("BLOGGING_FEED_LIMIT") posts = storage.get_posts(count=count, offset=None, recent=True, user_id=None, tag=None, include_draft=False) feed = AtomFeed( '%s - All Articles' % config.get("BLOGGING_SITENAME", "Flask-Blogging"), feed_url=request.url, url=request.url_root, generator=None) if len(posts): feed_posts_fetched.send(blogging_engine.app, engine=blogging_engine, posts=posts) for post in posts: blogging_engine.process_post(post, render=True) feed.add(post["title"], str(post["rendered_text"]), content_type='html', author=post["user_name"], url=config.get("BLOGGING_SITEURL", "")+post["url"], updated=post["last_modified_date"], published=post["post_date"]) feed_posts_processed.send(blogging_engine.app, engine=blogging_engine, feed=feed) response = feed.get_response() response.headers["Content-Type"] = "application/xml" return response
def feed(): feed = AtomFeed(g.options['name'], subtitle=g.options['slogan'], feed_url=request.url_root + 'feed/', url=request.url_root, generator=None) posts = Post.query.filter( status='published', type='post').order_by('created').limit(20, 0, array=True) for post in posts: feed.add(post.title, post.content, content_type='html', author=post.user.nicename, url=request.url_root + post.guid, updated=post.modified, published=post.created) response = feed.get_response() response.headers["Content-Type"] = 'application/xml' return response
def _feeds(query=None, title='Recent Articles'): posts = db.find("posts.files", query or {}).\ sort([("uploadDate", -1)]).limit(PAGE_SIZE) feed = AtomFeed(title, feed_url=request.url, url=request.url_root) from monblog.common.encodings import force_bytes for post in posts: author = conf.BLOG_SETTINGS.get("AUTHOR", "") url = urljoin(request.url_root, url_for("get_post", post_id=str(post["_id"]))) text = force_bytes( db.fs.get(objectid.ObjectId(post["_id"])).read(), "ascii", errors="ignore") feed.add( post["metadata"].get("title"), markdown(text), id=url, content_type='html', url=url, updated=post["uploadDate"], published=post["uploadDate"], author=post["metadata"].get("author", author)) return feed.get_response()
def recentAtom(): feed = AtomFeed( title="Recent Items", feed_url=request.url, url=request.host_url, subtitle="The most recently created catalog items.", ) for item in db_session.query(Item) \ .order_by(Item.updated_on.desc()) \ .limit(10): categories = [{ 'term': tag.name.lower(), 'label': tag.name } for tag in item.tags] feed.add(title=item.name, url=url_for('viewItem', item_name=item.name, item_id=item.id), updated=item.updated_on, published=item.created_on, content_type='text', content=unicode(item.description), categories=categories, author={ 'name': "Random dude", 'email': '*****@*****.**' }) # Replace with user once auth in place return feed.get_response()
def atom_feed(request): """ generates the atom feed with the newest images """ user = User.query.filter_by( username = request.matchdict['user']).first() if not user or not user.has_privilege(u'active'): return render_404(request) cursor = MediaEntry.query.filter_by( uploader = user.id, state = u'processed').\ order_by(MediaEntry.created.desc()).\ limit(ATOM_DEFAULT_NR_OF_UPDATED_ITEMS) """ ATOM feed id is a tag URI (see http://en.wikipedia.org/wiki/Tag_URI) """ atomlinks = [{ 'href': request.urlgen( 'mediagoblin.user_pages.user_home', qualified=True, user=request.matchdict['user']), 'rel': 'alternate', 'type': 'text/html' }] if mg_globals.app_config["push_urls"]: for push_url in mg_globals.app_config["push_urls"]: atomlinks.append({ 'rel': 'hub', 'href': push_url}) feed = AtomFeed( "MediaGoblin: Feed for user '%s'" % request.matchdict['user'], feed_url=request.url, id='tag:{host},{year}:gallery.user-{user}'.format( host=request.host, year=datetime.datetime.today().strftime('%Y'), user=request.matchdict['user']), links=atomlinks) for entry in cursor: feed.add(entry.get('title'), entry.description_html, id=entry.url_for_self(request.urlgen, qualified=True), content_type='html', author={ 'name': entry.get_uploader.username, 'uri': request.urlgen( 'mediagoblin.user_pages.user_home', qualified=True, user=entry.get_uploader.username)}, updated=entry.get('created'), links=[{ 'href': entry.url_for_self( request.urlgen, qualified=True), 'rel': 'alternate', 'type': 'text/html'}]) return feed.get_response()
def get(self, name): setting = current_app.config.get('SITE', { 'title': '', 'description': '' }) title = setting['title'] description = setting['description'] feed = AtomFeed( '%s·%s' % (name, title), feed_url=request.url, url=request.url_root, subtitle=description) topics = Topic.query.filter_by(tags__name=name).limit(10) for topic in topics: if topic.content_type == Topic.CONTENT_TYPE_MARKDOWN: content = topic.content else: content = topic.content feed.add(topic.title, content, content_type='html', author=topic.author.username, url=urljoin( request.url_root, url_for( 'topic.topic', topicId=topic.id)), updated=topic.updated_at, published=topic.created_at) return feed.get_response()
def recent_episodes(): feed = AtomFeed( title='Arrowverse.info - Recent Episodes', feed_url=request.url, url=request.url_root, logo=url_for('static', filename='favicon.png', _external=True), icon=url_for('static', filename='favicon.png', _external=True), ) hide_shows_list = request.args.getlist('hide_show') newest_first_episode_list = get_full_series_episode_list( excluded_series=hide_shows_list)[::-1] for episode in newest_first_episode_list[:15]: title = '{series} - {episode_id} - {episode_name}'.format(**episode) content = '{series} {episode_id} {episode_name} will air on {air_date}'.format( **episode) show_dict = app.config['SHOW_DICT_WITH_NAMES'][episode['series']] data_source = f"{show_dict['root']}{show_dict['url']}" feed.add( title=title, content=content, content_type='text', url=data_source, author=show_dict['root'], updated=datetime.now(), ) return feed.get_response()
def recent_feed(): feed = AtomFeed('Recent Items', feed_url=request.url, url=request.url_root, subtitle="Most recent items.") items = session.query(Item).order_by(asc(Item.dateAdded)) for item in items: categories = [] cat = {'term': item.category.name, 'label': 'none'} categories.append(cat) author = {'name': item.user.name, 'email': item.user.email} item_id = request.url_root + 'catalog/items/' + str(item.id) entry = FeedEntry(item.name, item.description, content_type='html', author=author, categories=categories, added=item.dateAdded, id=item_id, published=item.dateAdded, updated=item.lastUpdated) feed.add(entry) return feed.get_response()
def feed_folder(folder): if folder.split('/')[0] != 'admin': folder = Folder.query.filter(guid=folder).one() if folder: posts = Post.query.filter(folder_id=folder.id, status='published', type='post').order_by('created').limit(20, 0, array=True) feed = AtomFeed( g.options['name']+' • ' + folder.name, subtitle=folder.seo_content, feed_url=request.url_root+'feed/', url=request.url_root, generator=None ) for post in posts: feed.add( post.title, post.content, content_type='html', author=post.user.nicename, url=request.url_root+post.guid, updated=post.modified, published=post.created ) response = feed.get_response() response.headers["Content-Type"] = 'application/xml' return response else: return is_404() else: return is_admin_404()
def test_feedparser_authors(): for test in tests: if test.skip: continue feed = AtomFeed(title="Test Feed", feed_url="http://testfeed.com") entry = FeedEntry( title="Test Entry", url="http://testfeed.com/testentry", updated=datetime.utcnow(), content="Test Entry", author=test.namestring, ) feed.entries.append(entry) rss = feed.to_string() parsed = feedparser.parse(rss) assert parsed.entries is not None assert len(parsed.entries) == 1 assert len(parsed.entries[0].authors) == len(test.expected)
def feed(): feed = AtomFeed( g.options['name'], subtitle=g.options['slogan'], feed_url=request.url_root+'feed/', url=request.url_root, generator=None ) posts = Post.query.filter(status='published', type='post').order_by('created').limit(20, 0, array=True) for post in posts: feed.add( post.title, post.content, content_type='html', author=post.user.nicename, url=request.url_root+post.guid, updated=post.modified, published=post.created ) response = feed.get_response() response.headers["Content-Type"] = 'application/xml' return response
def make_rss(data): """ Make RSS data from provided data """ # Creat RSS feed object feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) # Make feed list from data i = 1 current_time = datetime.now() for item in data: if item['link'] == '' or item['link'] is None: continue if item['title'] == '': item['title'] = 'Unknow Title' try: feed.add(item['title'], unicode(item['description']), content_type='html', url=item['link'], updated=current_time ) except Exception, e: print (i, e, item['title'], unicode( item['description']), item['link'], current_time) i += 1
def recent_feed(): # Create an atom feed with the latest items # the number of items is given by the setting ENDPOINT_RESULTS_PER_PAGE # Based on: http://flask.pocoo.org/snippets/10/ feed = AtomFeed('Recent Items', feed_url=request.url, url=request.url_root, author={'name': 'David Castillo', 'email': '*****@*****.**', 'uri': 'http://github.com/davcs86'}, generator=('self generated', '', '0.1')) # query the items items = db_session.query(Item).order_by(Item.created_date.desc()) \ .limit(app.config['ENDPOINT_RESULTS_PER_PAGE']).all() # add them to the feed for item in items: feed.add(item.name, unicode(item.description), content_type='html', author=item.author.nickname, url=make_external(url_for('item_detail', item_id=item.id)), categories=[{'name': g.name} for g in item.categories], updated=item.updated_date, links=[{'href': g.locate(), 'rel': 'enclosure', 'type': g.mimetype} for g in item.picture.all()], published=item.created_date) return feed.get_response()
def feed(): posts = get_recent_posts(filter_recent=False) feed = AtomFeed('All Articles', feed_url=request.url, url=request.url_root) for post in posts: add_post_to_feed(feed, post) return feed.get_response()
def announcement_feed(): def bjdate(d): from datetime import timedelta return (d + timedelta(hours=8)).strftime('%Y年%m月%d日') type_ = request.args.get('type', '') typecn = type_to_cn(type_) exchange = request.args.get('exchange', '') cond = {} feedtitle = '邮币卡公告聚合' if type_: cond['type_'] = type_ feedtitle += ' - {}'.format(typecn) if exchange: cond['exchange'] = exchange feedtitle += ' - {}'.format(exchange) feed = AtomFeed(feedtitle, feed_url=request.url, url=request.url_root) announcements = list( Announcement.query(cond, sort=[('updated_at', -1)], limit=20)) for a in announcements: feed.add('{} {}'.format(bjdate(a.published_at), a.title.strip()), '更多内容请点击标题连接', content_type='text', author=a.exchange, url=a.url, updated=a.updated_at, published=a.published_at) return feed.get_response()
def feeds(sort=None): cutoff = int(time.time()) - (60 * 60 * 24) # 1 day posts = db.query(Submission).filter(Submission.created_utc>=cutoff, Submission.is_banned == False, Submission.is_deleted == False, Submission.stickied == False) if sort == "hot": posts = posts.order_by(text("submissions.rank_hot desc")) elif sort == "fiery": posts = posts.order_by(text("submissions.rank_fiery desc")) elif sort == "top": posts = posts.order_by(text("submissions.score desc")) feed = AtomFeed(title=f'Top 5 {sort} Posts from ruqqus', feed_url=request.url, url=request.url_root) posts = posts.limit(5).all() for post in posts: feed.add(post.title, post.body_html, content_type='html', author=post.author.username, url=f"https://ruqqus.com{post.permalink}", updated=datetime.fromtimestamp(post.created_utc), published=datetime.fromtimestamp(post.created_utc)) return feed.get_response()
def feeds_public(sort=None): page = int(request.args.get("page", 1)) t = request.args.get('t') posts = frontlist(sort=sort, page=page, t=t, v=None, hide_offensive=False, ids_only=False) feed = AtomFeed(title=f'Top 5 {sort} Posts from ruqqus', feed_url=request.url, url=request.url_root) for post in posts: feed.add(post.title, post.body_html, content_type='html', author=post.author.username, url=full_link(post.permalink), updated=datetime.fromtimestamp(post.created_utc), published=datetime.fromtimestamp(post.created_utc), links=[{ 'href': post.url }]) return feed.get_response()
def build_feed(): feed = AtomFeed(current_app.config['SITE_NAME'], feed_url=current_app.config['DOMAIN'] + 'rss.xml', url=current_app.config['DOMAIN'], subtitle=current_app.config['SUBTITLE'], author=current_app.config['AUTHOR'], updated=datetime.datetime.now()) entries = Entry.get_all_published() for _entry in entries: time = datetime.datetime.strptime(_entry['date'], '%Y-%m-%d %H:%M:%S') feed.add(unicode(_entry['title']), unicode(markdown(_entry['content'])), content_type='html', author=current_app.config['AUTHOR'], published=time, updated=time, id=current_app.config['DOMAIN'] + _entry['slug'] + '/', url=current_app.config['DOMAIN'] + 'posts/' + _entry['slug'] + '/') with codecs.open(BASE_DIR + '/rss.xml', 'w', 'utf-8-sig') as f: f.write(feed.to_string())
def atom_feed(): config = current_app.config feed = AtomFeed( config['SITE_TITLE'], feed_url=request.url, url=request.url_root, ) results, results_page = g.gitpages.index( 1, ref=current_app.default_ref, statuses=g.allowed_statuses, ) for page in results: doc = page.doc() utc_date = page.info.date.astimezone(pytz.utc) feed.add( doc['title'], doc['body'], content_type='html', url=urljoin(request.url_root, page.to_url()), updated=utc_date, published=utc_date, ) return feed.get_response()
def feed(name): if current_app.config.get('PRIVATE_WIKI') and current_user.is_anonymous: return current_app.login_manager.unauthorized() cname = to_canonical(name) wiki_name = current_app.config['SITE_TITLE'] start = 0 length = int(request.args.get('length', 20)) the_feed = AtomFeed( title="{} - Recent changes for page '{}'".format(wiki_name, cname), url=url_for('wiki.page', name=cname, _external=True), id="{}_pagefeed_{}".format(to_canonical(wiki_name), cname), feed_url=url_for('wiki.feed', name=cname, _external=True), generator=("Realms wiki", 'https://github.com/scragg0x/realms-wiki', __version__) ) page = g.current_wiki.get_page(cname) items = list(itertools.islice(page.history, start, start + length)) # type: list[dict] for item in items: the_feed.add( title="Commit '{}'".format(item['sha']), content=item['message'], url=url_for('wiki.commit', name=name, sha=item['sha'], _external=True), id="{}/{}".format(item['sha'], cname), author=item['author'], updated=datetime.fromtimestamp(item['time']) ) response = make_response((the_feed.to_string(), {'Content-type': 'application/atom+xml; charset=utf-8'})) response.add_etag() return response.make_conditional(request)
def recent_feed(): feed = AtomFeed(_('Last reuses'), feed_url=request.url, url=request.url_root) reuses = Reuse.objects.visible().order_by('-created_at').limit(15) for reuse in reuses: author = None if reuse.organization: author = { 'name': reuse.organization.name, 'uri': url_for('organizations.show', org=reuse.organization.id, _external=True), } elif reuse.owner: author = { 'name': reuse.owner.fullname, 'uri': url_for('users.show', user=reuse.owner.id, _external=True), } feed.add(reuse.title, render_template('reuse/feed_item.html', reuse=reuse), content_type='html', author=author, url=url_for('reuses.show', reuse=reuse.id, _external=True), updated=reuse.created_at, published=reuse.created_at) return feed.get_response()
def feed(self, feed_title, title, content, url, published=None, summary=None, enclosure=None, media_thumbnail=None): feed = AtomFeed(feed_title, feed_url=WP_FEED_URL) tz = pytz.timezone(faker.timezone()) published = published or faker.date_time(tzinfo=tz) kwargs = { 'content_type': 'html', 'author': faker.name(), 'url': url, 'updated': faker.date_time_between(start_date=published, tzinfo=tz), 'published': published } if summary: kwargs['summary'] = summary if enclosure: kwargs['links'] = [{ 'type': enclosure['type'], 'href': enclosure['url'], 'rel': 'enclosure', 'length': faker.pyint(), }] feed.add(title, content, **kwargs) out = feed.to_string() if media_thumbnail: el = '<media:thumbnail url="{0}" />'.format(media_thumbnail) out = out.replace('<feed', '<feed xmlns:media="http://search.yahoo.com/mrss/"') out = out.replace('</entry>', '{0}</entry>'.format(el)) return out
def feed_folder(folder): if folder.split('/')[0] != 'admin': folder = Folder.query.filter(guid=folder).one() if folder: posts = Post.query.filter(folder_id=folder.id, status='published', type='post').order_by('created').limit( 20, 0, array=True) feed = AtomFeed(g.options['name'] + ' • ' + folder.name, subtitle=folder.seo_content, feed_url=request.url_root + 'feed/', url=request.url_root, generator=None) for post in posts: feed.add(post.title, post.content, content_type='html', author=post.user.nicename, url=request.url_root + post.guid, updated=post.modified, published=post.created) response = feed.get_response() response.headers["Content-Type"] = 'application/xml' return response else: return is_404() else: return is_admin_404()
def recent_feed(): feed = AtomFeed(_('Last datasets'), feed_url=request.url, url=request.url_root) datasets = (Dataset.objects.visible().order_by('-created_at') .limit(current_site.feed_size)) for dataset in datasets: author = None if dataset.organization: author = { 'name': dataset.organization.name, 'uri': url_for('organizations.show', org=dataset.organization.id, _external=True), } elif dataset.owner: author = { 'name': dataset.owner.fullname, 'uri': url_for('users.show', user=dataset.owner.id, _external=True), } feed.add(dataset.title, render_template('dataset/feed_item.html', dataset=dataset), content_type='html', author=author, url=url_for('datasets.show', dataset=dataset.id, _external=True), updated=dataset.last_modified, published=dataset.created_at) return feed.get_response()
def feed(tag): if tag and not TagCloud.objects(tag=tag, count__gt=0).first(): return abort(404) title = 'late.am' if tag: title = '%s - Posts about %s' % (title, tag) feed = AtomFeed( title=title, feed_url=url_for('feed', _external=True), url=url_for('index', _external=True), author={'name': 'Dan Crosta', 'email': '*****@*****.**'}, icon=staticurl('mug.png', _external=True), generator=('plog', 'https://github.com/dcrosta/plog', '0.1'), ) posts = Post.objects(published=True).order_by('-pubdate') if tag: posts.filter(tags=tag) for post in posts[:20]: feed.add( title=post.title, content=domarkdown(post.blurb + '\n' + post.body), content_type='html', author={'name': 'Dan Crosta', 'email': '*****@*****.**'}, url=url_for('post', slug=post.slug, _external=True), id=url_for('permalink', post_id=post.pk, _external=True), published=post.pubdate, updated=post.updated) response = make_response(unicode(feed)) response.headers['Content-Type'] = 'application/atom+xml; charset=UTF-8' return response
def recent_feed(): log.info("Generating RSS Feed") feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) log.debug("Querying for all blog posts") posts = models.Post.query \ .filter(~models.Post.tags.any(models.Tag.name.in_(['home']))) \ .order_by(models.Post.updated.desc()) \ .limit(15) \ .all() base_url = configuration.BASE_URL + '/blog/post/' counter = 1 log.debug("Printing posts in RSS Feed:") for post in posts: url = base_url + str(counter) counter += 1 log.debug("\t\t" + post.title) feed.add(post.title, md.markdown(post.body), content_type='html', author=post.author.first_name + " " + post.author.last_name, url=make_external(url), updated=post.updated, published=post.created) return feed.get_response()
def get(self, name): setting = current_app.config.get('SITE', { 'title': '', 'description': '' }) title = setting['title'] description = setting['description'] feed = AtomFeed('%s·%s' % (name, title), feed_url=request.url, url=request.url_root, subtitle=description) topics = Topic.query.filter_by(tags__name=name).limit(10) for topic in topics: if topic.content_type == Topic.CONTENT_TYPE_MARKDOWN: content = topic.content else: content = topic.content feed.add(topic.title, content, content_type='html', author=topic.author.username, url=urljoin(request.url_root, url_for('topic.topic', topicId=topic.id)), updated=topic.updated_at, published=topic.created_at) return feed.get_response()
def feed(site): if site == 'hackernews': title = 'Hacker News Digest' news_list = models.HackerNews.query.order_by('submit_time desc').all() else: title = 'Startup News Digest' news_list = models.StartupNews.query.order_by('submit_time desc').all() feed = AtomFeed(title, updated=models.LastUpdated.get(site), feed_url=request.url, url=urljoin(request.url_root, url_for(site)), author={ 'name': 'polyrabbit', 'uri': 'https://github.com/polyrabbit/'} ) for news in news_list: feed.add(news.title, content=news.summary and ('<img src="%s" style="width: 220px; float: left" />' % news.image.url if news.img_id else '') + news.summary, author={ 'name': news.author, 'uri': news.author_link } if news.author_link else (), url=news.url, updated=news.submit_time,) return feed.get_response()
def rss(): feed = AtomFeed('Hacker News TLDR', feed_url=request.url, url=request.url_root) stories = _get_stories() for story in stories: if not story.get(BODY, {}).get(SENTENCES): body = 'Unable to generate summary' else: body = '<ul>{}</ul>'.format( '\n'.join( "<li>{}</li>".format( sentence ) for sentence in story[BODY][SENTENCES] ) ) body += "<br/><a href={}>HN Comments</a>".format( 'https://news.ycombinator.com/item?id={}'.format( story[HACKER_NEWS_ID] ) ) feed.add(story[TITLE], body, content_type='html', updated=datetime.strptime( story[DATE_FOUND], '%Y-%m-%d %H:%M:%S.%f'), url=urljoin(request.url_root, story[URL]), ) return feed.get_response()
def feed(): latest_feed = AtomFeed("{0} - Posts".format(app.config['SETTINGS_TITLE']), feed_url=request.url, url=request.url_root) roles = computed_user_roles() latest_posts = Post.query \ .filter_by(status='published') \ .join(Category) \ .filter(Category.roles.any(Role.id.in_(roles))) \ .join(PostRating) \ .order_by(desc(Post.creation_date)) \ .limit(15) \ .all() for post in latest_posts: author = '' if post.user: author = post.user.username updated = post.edit_date if post.edit_date else post.creation_date latest_feed.add(post.title, unicode(post.content), content_type='html', author=author, url=url_for('posts.view', category=post.category.url, uuid=post.uuid, slug=post.slug, _external=True), updated=updated, published=post.creation_date) return latest_feed.get_response()
def atom(ctx): feed = AtomFeed(ctx.odb.name, feed_url=ctx.url_for("atom"), url=ctx.url_for("root"), subtitle=ctx.odb.description) pattern = ctx.app.recent_doc_pattern for added_date, root_path in utils.recent_files(ctx, count=10, pattern=pattern): blob_obj = ctx.odb.head.tree[root_path] assert isinstance(blob_obj, BlobObject) current_blob_obj = ctx.odb.head.tree[blob_obj.abs_name] doc = render_blob(ctx, current_blob_obj) url = "http://" + ctx.request.host + ctx.url_for("view_obj", rev="HEAD", path=blob_obj.root_path) feed.add( doc.title, doc.body, title_type="html", content_type="html", author=doc.author_name, url=url, updated=doc.last_modified, published=added_date, ) return feed.get_response()
def atom_feed(subreddit_name=None, sort_type="hot"): """ """ feed = AtomFeed(title=f"upvote.pub > {subreddit_name}", feed_url=request.url, url=request.url_root) # Pseudo frontpage subreddit --> None if subreddit_name == 'frontpage': subreddit_name = None subreddit = Subreddit.query.filter_by(name=subreddit_name).first() trending = True if request.args.get('trending') else False thread_paginator = process_thread_paginator(trending=trending, subreddit=subreddit, sort_type=sort_type) for thread in thread_paginator.items: thread_url = url_for('threads.thread_permalink', subreddit_name=thread.subreddit.name, thread_id=thread.id, title=slugify(thread.publication.pub_title), _external=True) feed.add(thread.publication.pub_title, thread.publication.pub_abstract, content_type='html', author=thread.publication.pub_authors, url=thread_url, updated=thread.created_on, published=thread.updated_on) return feed.get_response()
def recent_feed(): feed_title = OctBlogSettings['blog_meta']['name'] feed = AtomFeed(feed_title, feed_url=request.url, url=request.url_root) # data = {} # data['allow_donate'] = OctBlogSettings['donation']['allow_donate'] # data['donation_msg'] = OctBlogSettings['donation']['donation_msg'] # data['display_wechat'] = OctBlogSettings['wechat']['display_wechat'] # data['wechat_msg'] = OctBlogSettings['wechat']['wechat_msg'] # data['display_copyright'] = OctBlogSettings['copyright']['display_copyright'] # data['copyright_msg'] = OctBlogSettings['copyright']['copyright_msg'] # post_footer = get_post_footer(**data) posts = models.Post.objects.filter(post_type='post', is_draft=False)[:15] only_abstract_in_feed = OctBlogSettings['only_abstract_in_feed'] content = 'abstract' if only_abstract_in_feed else 'content_html' for post in posts: # return post.get_absolute_url() feed.add( post.title, # unicode(post.content_html), # post.abstract, getattr(post, content), content_type='html', author=post.author.username, url=post.get_absolute_url(), updated=post.update_time, published=post.pub_time) return feed.get_response()
def recent_feed(): feed_title = OctBlogSettings['blog_meta']['name'] feed = AtomFeed(feed_title, feed_url=request.url, url=request.url_root, icon=url_for('static', filename='img/favicon-32x32.png', _external=True), logo=url_for('static', filename='img/favicon-96x96.png', _external=True)) data = {} data['allow_donate'] = OctBlogSettings['donation']['allow_donate'] data['donation_msg'] = OctBlogSettings['donation']['donation_msg'] data['display_wechat'] = OctBlogSettings['wechat']['display_wechat'] data['wechat_msg'] = OctBlogSettings['wechat']['wechat_msg'] data['display_copyright'] = OctBlogSettings['copyright']['display_copyright'] data['copyright_msg'] = OctBlogSettings['copyright']['copyright_msg'] post_footer = get_post_footer(**data) posts = models.Post.objects.filter(post_type='post', is_draft=False)[:15] for post in posts: # return post.get_absolute_url() feed.add(post.title, unicode(post.content_html+post_footer), content_type='html', author=post.author.username, url=make_external(post.get_absolute_url()), updated=post.update_time, published=post.pub_time) return feed.get_response()
def build_artifact(self, artifact): ctx = get_ctx() feed_source = self.source page = feed_source.parent feed = AtomFeed(title=page.record_label + u' — Pallets Project', feed_url=url_to(feed_source, external=True), url=url_to('/blog', external=True), id=get_id(ctx.env.project.id)) for item in page.children.order_by('-pub_date').limit(10): item_author = item['author'] feed.add( item['title'], text_type(item['body']), xml_base=url_to(item, external=True), url=url_to(item, external=True), content_type='html', id=get_id(u'%s/%s' % (ctx.env.project.id, item['_path'].encode('utf-8'))), author=item_author, updated=datetime(*item['pub_date'].timetuple()[:3])) with artifact.open('wb') as f: f.write(feed.to_string().encode('utf-8'))