def get(self): feed = AtomFeed( 'Recently Added Watches - Wanna Buy A Watch', feed_url=request.url, url=request.url_root ) # Prepare query watches = session.query(Watch) # TODO: .limit(15) and date_added watches = watches.order_by(Watch.title).all() for watch in watches: feed.add( watch.title, unicode(watch.long_description), content_type='html', author='wbaw', url=BASE_URL + '/' + watch.page_url + '?src=wbawsearch', # TODO: Add updated and published updated=datetime.now(), published=datetime.now() ) return feed.get_response()
def feed(): feed = AtomFeed( g.options['name'], subtitle=g.options['slogan'], feed_url=request.url_root+'feed/', url=request.url_root, generator=None ) posts = Post.query.filter(status='published', type='post').order_by('created').limit(20, 0, array=True) for post in posts: feed.add( post.title, post.content, content_type='html', author=post.user.nicename, url=request.url_root+post.guid, updated=post.modified, published=post.created ) response = feed.get_response() response.headers["Content-Type"] = 'application/xml' return response
def atom_feed(): config = current_app.config feed = AtomFeed( config['SITE_TITLE'], feed_url=request.url, url=request.url_root, ) results, results_page = g.gitpages.index( 1, ref=current_app.default_ref, statuses=g.allowed_statuses, ) for page in results: doc = page.doc() utc_date = page.info.date.astimezone(pytz.utc) feed.add( doc['title'], doc['body'], content_type='html', url=urljoin(request.url_root, page.to_url()), updated=utc_date, published=utc_date, ) return feed.get_response()
def make_rss(data): """ Make RSS data from provided data """ # Creat RSS feed object feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) # Make feed list from data i = 1 current_time = datetime.now() for item in data: if item['link'] == '' or item['link'] is None: continue if item['title'] == '': item['title'] = 'Unknow Title' try: feed.add(item['title'], unicode(item['description']), content_type='html', url=item['link'], updated=current_time ) except Exception, e: print (i, e, item['title'], unicode( item['description']), item['link'], current_time) i += 1
def atomfeed(): t = request.args.get('t') if not t: t = 'url' r = get_last(t) if not (r.status_code == 200 and len(r.response) > 0): return {} j = json.loads(r.response[0]) if len(j) < 1: return {} feed = AtomFeed('Les liens (pas forcement) SFW de GCU-Squad!', feed_url=request.url, url=request.url_root, subtitle='Adresses vues sur le canal #[email protected]') for item in j: post = item['_source'] for url in post['urls']: datefmt = '%Y-%m-%dT%H:%M:%S.%f' pubdate = datetime.datetime.strptime(post['fulldate'], datefmt) update = pubdate if post['tags']: line = '{0} #{1}#'.format(post['line'], ','.join(post['tags'])) else: line = post['line'] feed.add(title=line, title_type='text', url=url, author=post['nick'], published=pubdate, updated=update) return feed.get_response()
def itemsATOM(): ''' Returns an Atom feed of all items ''' feed = AtomFeed(title="Imperial Catalog", subtitle="A catalog of Galactice Empire items", feed_url="http://localhost:8000/feed", url="http://localhost:8000", author="Sean Fallmann") categories = getAllCategories() for c in categories: for i in c.items: feed.add( category=c.name, title=i.name, id=i.id, content=i.description, content_type="html", author=i.user.name, url="http://localhost:8000/%s/%s" % (c.name, i.name), updated=datetime.datetime.utcnow(), ) return feed
def feed(): blogging_engine = _get_blogging_engine(current_app) storage = blogging_engine.storage config = blogging_engine.config count = config.get("BLOGGING_FEED_LIMIT") posts = storage.get_posts(count=count, offset=None, recent=True, user_id=None, tag=None, include_draft=False) feed = AtomFeed( '%s - All Articles' % config.get("BLOGGING_SITENAME", "Flask-Blogging"), feed_url=request.url, url=request.url_root, generator=None) if len(posts): feed_posts_fetched.send(blogging_engine.app, engine=blogging_engine, posts=posts) for post in posts: blogging_engine.process_post(post, render=True) feed.add(post["title"], str(post["rendered_text"]), content_type='html', author=post["user_name"], url=config.get("BLOGGING_SITEURL", "")+post["url"], updated=post["last_modified_date"], published=post["post_date"]) feed_posts_processed.send(blogging_engine.app, engine=blogging_engine, feed=feed) response = feed.get_response() response.headers["Content-Type"] = "application/xml" return response
def _feeds(query=None, title='Recent Articles'): posts = db.find("posts.files", query or {}).\ sort([("uploadDate", -1)]).limit(PAGE_SIZE) feed = AtomFeed(title, feed_url=request.url, url=request.url_root) from monblog.common.encodings import force_bytes for post in posts: author = conf.BLOG_SETTINGS.get("AUTHOR", "") url = urljoin(request.url_root, url_for("get_post", post_id=str(post["_id"]))) text = force_bytes( db.fs.get(objectid.ObjectId(post["_id"])).read(), "ascii", errors="ignore") feed.add( post["metadata"].get("title"), markdown(text), id=url, content_type='html', url=url, updated=post["uploadDate"], published=post["uploadDate"], author=post["metadata"].get("author", author)) return feed.get_response()
def atom(ctx): feed = AtomFeed(ctx.odb.name, feed_url=ctx.url_for("atom"), url=ctx.url_for("root"), subtitle=ctx.odb.description) pattern = ctx.app.recent_doc_pattern for added_date, root_path in utils.recent_files(ctx, count=10, pattern=pattern): blob_obj = ctx.odb.head.tree[root_path] assert isinstance(blob_obj, BlobObject) current_blob_obj = ctx.odb.head.tree[blob_obj.abs_name] doc = render_blob(ctx, current_blob_obj) url = "http://" + ctx.request.host + ctx.url_for("view_obj", rev="HEAD", path=blob_obj.root_path) feed.add( doc.title, doc.body, title_type="html", content_type="html", author=doc.author_name, url=url, updated=doc.last_modified, published=added_date, ) return feed.get_response()
def rss(): feed = AtomFeed('Hacker News TLDR', feed_url=request.url, url=request.url_root) stories = _get_stories() for story in stories: if not story.get(BODY, {}).get(SENTENCES): body = 'Unable to generate summary' else: body = '<ul>{}</ul>'.format( '\n'.join( "<li>{}</li>".format( sentence ) for sentence in story[BODY][SENTENCES] ) ) body += "<br/><a href={}>HN Comments</a>".format( 'https://news.ycombinator.com/item?id={}'.format( story[HACKER_NEWS_ID] ) ) feed.add(story[TITLE], body, content_type='html', updated=datetime.strptime( story[DATE_FOUND], '%Y-%m-%d %H:%M:%S.%f'), url=urljoin(request.url_root, story[URL]), ) return feed.get_response()
def feed(tag): if tag and not TagCloud.objects(tag=tag, count__gt=0).first(): return abort(404) title = 'late.am' if tag: title = '%s - Posts about %s' % (title, tag) feed = AtomFeed( title=title, feed_url=url_for('feed', _external=True), url=url_for('index', _external=True), author={'name': 'Dan Crosta', 'email': '*****@*****.**'}, icon=staticurl('mug.png', _external=True), generator=('plog', 'https://github.com/dcrosta/plog', '0.1'), ) posts = Post.objects(published=True).order_by('-pubdate') if tag: posts.filter(tags=tag) for post in posts[:20]: feed.add( title=post.title, content=domarkdown(post.blurb + '\n' + post.body), content_type='html', author={'name': 'Dan Crosta', 'email': '*****@*****.**'}, url=url_for('post', slug=post.slug, _external=True), id=url_for('permalink', post_id=post.pk, _external=True), published=post.pubdate, updated=post.updated) response = make_response(unicode(feed)) response.headers['Content-Type'] = 'application/atom+xml; charset=UTF-8' return response
def recent_feed(): feed = AtomFeed(_('Last datasets'), feed_url=request.url, url=request.url_root) datasets = (Dataset.objects.visible().order_by('-created_at') .limit(current_site.feed_size)) for dataset in datasets: author = None if dataset.organization: author = { 'name': dataset.organization.name, 'uri': url_for('organizations.show', org=dataset.organization.id, _external=True), } elif dataset.owner: author = { 'name': dataset.owner.fullname, 'uri': url_for('users.show', user=dataset.owner.id, _external=True), } feed.add(dataset.title, render_template('dataset/feed_item.html', dataset=dataset), content_type='html', author=author, url=url_for('datasets.show', dataset=dataset.id, _external=True), updated=dataset.last_modified, published=dataset.created_at) return feed.get_response()
def recent_feed(): log.info("Generating RSS Feed") feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) log.debug("Querying for all blog posts") posts = models.Post.query \ .filter(~models.Post.tags.any(models.Tag.name.in_(['home']))) \ .order_by(models.Post.updated.desc()) \ .limit(15) \ .all() base_url = configuration.BASE_URL + '/blog/post/' counter = 1 log.debug("Printing posts in RSS Feed:") for post in posts: url = base_url + str(counter) counter += 1 log.debug("\t\t" + post.title) feed.add(post.title, md.markdown(post.body), content_type='html', author=post.author.first_name + " " + post.author.last_name, url=make_external(url), updated=post.updated, published=post.created) return feed.get_response()
def test_atom_add_one(self): a = AtomFeed(title='test_title', id=1) f = FeedEntry( title='test_title', id=1, updated=datetime.datetime.now()) assert len(a.entries) == 0 a.add(f) assert len(a.entries) == 1
def announcement_feed(): def bjdate(d): from datetime import timedelta return (d + timedelta(hours=8)).strftime('%Y年%m月%d日') type_ = request.args.get('type', '') typecn = type_to_cn(type_) exchange = request.args.get('exchange', '') cond = {} feedtitle = '邮币卡公告聚合' if type_: cond['type_'] = type_ feedtitle += ' - {}'.format(typecn) if exchange: cond['exchange'] = exchange feedtitle += ' - {}'.format(exchange) feed = AtomFeed(feedtitle, feed_url=request.url, url=request.url_root) announcements = list( Announcement.query(cond, sort=[('updated_at', -1)], limit=20)) for a in announcements: feed.add('{} {}'.format(bjdate(a.published_at), a.title.strip()), '更多内容请点击标题连接', content_type='text', author=a.exchange, url=a.url, updated=a.updated_at, published=a.published_at) return feed.get_response()
def recent_feed(): feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) threads = Thread.query.order_by(Thread.last_updated.desc()).filter(Thread.user==None).all() for thread in threads: url = url_for('thread', display_hash=thread.display_hash, title=thread.slug()) feed.add(thread.title, '', content_type='html', author=thread.display_name, url=url, updated=thread.last_updated, published=thread.date_created) return feed.get_response()
def do_atom(path=None): feed = AtomFeed('Changes' + (' - %s' % path if path != '/' else ''), feed_url=request.url, url=request.url_root) history = [] if path != '/': for entry in get_history(path): entry.insert(1, path) history.append(entry) if len(history) == ATOM_LIMIT: break else: for path in get_documents_list(): for entry in get_history(path): entry.insert(1, path) history.append(entry) history = sorted(history, key=lambda x: x[0], reverse=True)[:ATOM_LIMIT] for date, path, rev, author, desc in history: feed.add(path, desc if desc != '-' else 'No summary available', url=url_for('index', path=path, do='compare', to=rev), author=author, updated=date) return feed.get_response()
def blog_feed(): posts = Gallery.get_list(published=True, to_json=True) feed_url = "{}/blog/feed.atom".format(g.app_base_link) feed = AtomFeed('KateHeddleston.com Blog Posts', feed_url=feed_url, url='{}/blog'.format(g.app_base_link)) for post in posts: post_html = [] for item in post.get('items'): item_text = "" if item.get('title'): item_text += u'<h2>{}</h2><br>'.format(item.get('title')) if item.get('image_name'): img_src = u'{}/{}'.format(post.get('base_url'), item.get('image_name')) item_text += u"<img src='{}' />".format(img_src) if item.get('image_caption'): item_text += u"<div>{}</div>".format(item.get('image_caption')) item_text += '<br>' item_text += item.get('body') post_html.append(item_text) text = '</p><p>'.join(post_html) text = '<p>' + text + '</p>' post_url = "{}/blog/{}".format(g.app_base_link, post.get('uuid')) published_at = datetime.datetime.strptime(post['published_at_raw'], '%Y-%m-%dT%H:%M:%S') feed.add(post.get('name'), unicode(text), content_type='html', author=post.get('author'), url=post_url, updated=published_at, published=published_at) return feed.get_response()
def recent_atom(): app.logger.debug(request.url_root) feed = AtomFeed('Recent Events', feed_url=request.url, url=request.url_root) events = session.query(Event).all() for e in events: feed.add(id=e.id, title=e.title, content_type='html', updated=e.created) return feed.get_response()
def get(self, name): setting = current_app.config.get('SITE', { 'title': '', 'description': '' }) title = setting['title'] description = setting['description'] feed = AtomFeed( '%s·%s' % (name, title), feed_url=request.url, url=request.url_root, subtitle=description) topics = Topic.query.filter_by(tags__name=name).limit(10) for topic in topics: if topic.content_type == Topic.CONTENT_TYPE_MARKDOWN: content = topic.content else: content = topic.content feed.add(topic.title, content, content_type='html', author=topic.author.username, url=urljoin( request.url_root, url_for( 'topic.topic', topicId=topic.id)), updated=topic.updated_at, published=topic.created_at) return feed.get_response()
def publictag_feed(tagkey): """ rss/atom representation of the Read-only overview of the bookmarks in the userkey/tag of this PublicTag """ try: this_tag = PublicTag.get(PublicTag.tagkey == tagkey) bookmarks = Bookmark.select().where( Bookmark.userkey == this_tag.userkey, Bookmark.tags.contains(this_tag.tag), Bookmark.status == Bookmark.VISIBLE ) feed = AtomFeed(this_tag.tag, feed_url=request.url, url=make_external(url_for('publictag_page', tagkey=tagkey))) for bookmark in bookmarks: updated_date = bookmark.modified_date if not bookmark.modified_date: updated_date = bookmark.created_date bookmarktitle = '{} (no title)'.format(bookmark.url) if bookmark.title: bookmarktitle = bookmark.title feed.add( bookmarktitle, content_type='html', author='digimarks', url=bookmark.url, updated=updated_date, published=bookmark.created_date ) return feed.get_response() except PublicTag.DoesNotExist: abort(404)
def get_feed(feed_id): feed_id = secure_filename(feed_id) feed_config_filepath = os.path.join(APP_CONFIG_FEEDS, feed_id+".json") if not os.path.isfile(feed_config_filepath): print feed_config_filepath abort(404) feed = feedops.FusedFeed.load_from_spec_file(feed_config_filepath) feed.fetch() feed_uri = request.url_root if len(feed.sources) == 1: # if there is only 1 source in a fusedfeed # just give the feed's html alternate # TODO: instead, we should generate our own HTML representation feed_uri = feed.sources[0].html_uri output = AtomFeed(feed.name, feed_url=request.url, author="FeedFuser", links=[{"href":feed_uri, "rel":"alternate", "type":"text/html"}]) for entry in feed.entries: title = entry.title if not entry.title: title = entry.link feed_item = FeedEntry(id=entry.guid, title=title, updated=entry.update_date, author=entry.author, published=entry.pub_date, links=[{"href":entry.link, "rel":"alternate", "type":"text/html"}]) if entry.summary: feed_item.summary = unicode(entry.summary) feed_item.summary_type = "text" if entry.summary_type == "text/plain" else "html" if entry.content: feed_item.content = unicode(entry.content) feed_item.content_type = "text" if entry.content_type == "text/plain" else "html" output.add(feed_item) return output.get_response()
def _topic_feed(request, title, query, order_by): # non moderators cannot see deleted posts, so we filter them out first # for moderators we mark the posts up as deleted so that # they can be kept apart from non-deleted ones. if not request.user or not request.user.is_moderator: query = query.filter_by(is_deleted=False) query = query.order_by(_topic_order[order_by]) query = query.options(eagerload('author'), eagerload('question')) query = query.limit(max(0, min(50, request.args.get('num', 10, type=int)))) feed = AtomFeed(u'%s — %s' % (title, settings.WEBSITE_TITLE), subtitle=settings.WEBSITE_TAGLINE, feed_url=request.url, url=request.url_root) for topic in query.all(): title = topic.title if topic.is_deleted: title += u' ' + _(u'(deleted)') feed.add(title, topic.question.rendered_text, content_type='html', author=topic.author.display_name, url=url_for(topic, _external=True), id=topic.guid, updated=topic.last_change, published=topic.date) return feed.get_response()
def generate_feed(artifact): feed = AtomFeed( title=title or 'Feed', subtitle=unicode(subtitle or ''), subtitle_type=hasattr(subtitle, '__html__') and 'html' or 'text', feed_url=feed_url, url=embed_url, id=get_id(ctx.env.project.id + 'lektor') ) for item in items: feed.add( get_item_title(item, item_title_field), get_item_body(item, item_body_field), xml_base=url_to(item, external=True), url=url_to(item, external=True), content_type='html', id=get_id(u'%slektor/%s' % ( ctx.env.project.id, item['_path'].encode('utf-8'), )), author=get_item_author(item, item_author_field), updated=get_item_updated(item, item_date_field)) with artifact.open('wb') as f: f.write(feed.to_string().encode('utf-8') + '\n')
def recent_feed(): # Create an atom feed with the latest items # the number of items is given by the setting ENDPOINT_RESULTS_PER_PAGE # Based on: http://flask.pocoo.org/snippets/10/ feed = AtomFeed('Recent Items', feed_url=request.url, url=request.url_root, author={'name': 'David Castillo', 'email': '*****@*****.**', 'uri': 'http://github.com/davcs86'}, generator=('self generated', '', '0.1')) # query the items items = db_session.query(Item).order_by(Item.created_date.desc()) \ .limit(app.config['ENDPOINT_RESULTS_PER_PAGE']).all() # add them to the feed for item in items: feed.add(item.name, unicode(item.description), content_type='html', author=item.author.nickname, url=make_external(url_for('item_detail', item_id=item.id)), categories=[{'name': g.name} for g in item.categories], updated=item.updated_date, links=[{'href': g.locate(), 'rel': 'enclosure', 'type': g.mimetype} for g in item.picture.all()], published=item.created_date) return feed.get_response()
def feed_folder(folder): if folder.split('/')[0] != 'admin': folder = Folder.query.filter(guid=folder).one() if folder: posts = Post.query.filter(folder_id=folder.id, status='published', type='post').order_by('created').limit(20, 0, array=True) feed = AtomFeed( g.options['name']+' • ' + folder.name, subtitle=folder.seo_content, feed_url=request.url_root+'feed/', url=request.url_root, generator=None ) for post in posts: feed.add( post.title, post.content, content_type='html', author=post.user.nicename, url=request.url_root+post.guid, updated=post.modified, published=post.created ) response = feed.get_response() response.headers["Content-Type"] = 'application/xml' return response else: return is_404() else: return is_admin_404()
def feed(self, feed_title, title, content, url, published=None, summary=None, enclosure=None, media_thumbnail=None): feed = AtomFeed(feed_title, feed_url=WP_FEED_URL) tz = pytz.timezone(faker.timezone()) published = published or faker.date_time(tzinfo=tz) kwargs = { 'content_type': 'html', 'author': faker.name(), 'url': url, 'updated': faker.date_time_between(start_date=published, tzinfo=tz), 'published': published } if summary: kwargs['summary'] = summary if enclosure: kwargs['links'] = [{ 'type': enclosure['type'], 'href': enclosure['url'], 'rel': 'enclosure', 'length': faker.pyint(), }] feed.add(title, content, **kwargs) out = feed.to_string() if media_thumbnail: el = '<media:thumbnail url="{0}" />'.format(media_thumbnail) out = out.replace('<feed', '<feed xmlns:media="http://search.yahoo.com/mrss/"') out = out.replace('</entry>', '{0}</entry>'.format(el)) return out
def recent_feed(): feed = AtomFeed(_('Last reuses'), feed_url=request.url, url=request.url_root) reuses = Reuse.objects.visible().order_by('-created_at').limit(15) for reuse in reuses: author = None if reuse.organization: author = { 'name': reuse.organization.name, 'uri': url_for('organizations.show', org=reuse.organization.id, _external=True), } elif reuse.owner: author = { 'name': reuse.owner.fullname, 'uri': url_for('users.show', user=reuse.owner.id, _external=True), } feed.add(reuse.title, render_template('reuse/feed_item.html', reuse=reuse), content_type='html', author=author, url=url_for('reuses.show', reuse=reuse.id, _external=True), updated=reuse.created_at, published=reuse.created_at) return feed.get_response()
def feed(site): if site == 'hackernews': title = 'Hacker News Digest' news_list = models.HackerNews.query.order_by('submit_time desc').all() else: title = 'Startup News Digest' news_list = models.StartupNews.query.order_by('submit_time desc').all() feed = AtomFeed(title, updated=models.LastUpdated.get(site), feed_url=request.url, url=urljoin(request.url_root, url_for(site)), author={ 'name': 'polyrabbit', 'uri': 'https://github.com/polyrabbit/'} ) for news in news_list: feed.add(news.title, content=news.summary and ('<img src="%s" style="width: 220px; float: left" />' % news.image.url if news.img_id else '') + news.summary, author={ 'name': news.author, 'uri': news.author_link } if news.author_link else (), url=news.url, updated=news.submit_time,) return feed.get_response()
def atom_feed(request): """ generates the atom feed with the newest images """ user = User.query.filter_by( username = request.matchdict['user']).first() if not user or not user.has_privilege(u'active'): return render_404(request) cursor = MediaEntry.query.filter_by( uploader = user.id, state = u'processed').\ order_by(MediaEntry.created.desc()).\ limit(ATOM_DEFAULT_NR_OF_UPDATED_ITEMS) """ ATOM feed id is a tag URI (see http://en.wikipedia.org/wiki/Tag_URI) """ atomlinks = [{ 'href': request.urlgen( 'mediagoblin.user_pages.user_home', qualified=True, user=request.matchdict['user']), 'rel': 'alternate', 'type': 'text/html' }] if mg_globals.app_config["push_urls"]: for push_url in mg_globals.app_config["push_urls"]: atomlinks.append({ 'rel': 'hub', 'href': push_url}) feed = AtomFeed( "MediaGoblin: Feed for user '%s'" % request.matchdict['user'], feed_url=request.url, id='tag:{host},{year}:gallery.user-{user}'.format( host=request.host, year=datetime.datetime.today().strftime('%Y'), user=request.matchdict['user']), links=atomlinks) for entry in cursor: feed.add(entry.get('title'), entry.description_html, id=entry.url_for_self(request.urlgen, qualified=True), content_type='html', author={ 'name': entry.get_uploader.username, 'uri': request.urlgen( 'mediagoblin.user_pages.user_home', qualified=True, user=entry.get_uploader.username)}, updated=entry.get('created'), links=[{ 'href': entry.url_for_self( request.urlgen, qualified=True), 'rel': 'alternate', 'type': 'text/html'}]) return feed.get_response()
def collection_list_feed(): language = session.get('lang', get_locale()) collection = controllers.get_current_collection() title = 'SciELO - %s - %s' % (collection.name, _('Últimos periódicos inseridos na coleção')) subtitle = _('10 últimos periódicos inseridos na coleção %s' % collection.name) feed = AtomFeed(title, subtitle=subtitle, feed_url=request.url, url=request.url_root) journals = controllers.get_journals_paginated( title_query='', page=1, order_by='-created', per_page=10) if not journals.items: feed.add('Nenhum periódico encontrado', url=request.url, updated=datetime.now()) for journal in journals.items: issues = controllers.get_issues_by_jid(journal.jid, is_public=True) last_issue = issues[0] if issues else None articles = [] if last_issue: articles = controllers.get_articles_by_iid(last_issue.iid, is_public=True) result_dict = OrderedDict() for article in articles: section = article.get_section_by_lang(language[:2]) result_dict.setdefault(section, []) result_dict[section].append(article) context = { 'journal': journal, 'articles': result_dict, 'language': language, 'last_issue': last_issue } feed.add(journal.title, render_template("collection/list_feed_content.html", **context), content_type='html', author=journal.publisher_name, url=url_external('main.journal_detail', url_seg=journal.url_segment), updated=journal.updated, published=journal.created) return feed.get_response()
def feed(): feed = AtomFeed('Recent Posts', feed_url=request.url, url=request.url_root) posts = models.blog.posts[:10] title = lambda p: '%s : %s' % (p.title, p.subtitle) if hasattr( p, 'subtitle') else p.title for post in posts: feed.add(title(post), unicode(post.html), content_type='html', author='Ang Gao', url=post.url(_external=True), updated=post.date, published=post.date) return feed.get_response()
def feed() -> Response: feed = AtomFeed( 'Recent Article', feed_url=request.url, url=request.url_root) posts = Post.query.order_by(Post.date.desc()).limit(15) for post in posts: feed.add( post.title, str(markdown(post.content)), content_type='html', author=post.author or 'Unnamed', url=urljoin(request.url_root, post.url), updated=post.last_modified, published=post.date) return feed.get_response()
def anna_feedi(nimi): historiat = Historia.select().where(Historia.kayttaja == Kayttaja.get( nimi=nimi)) feed = AtomFeed('Lnchrio', feed_url=request.url, url=request.url_root) for h in historiat: feed.add(h.otsikko, published=h.aika, id=h.id, url=request.url_root + 'historia/' + str(h.id), author=nimi, updated=h.aika) return feed.get_response()
def rss(): feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) articles = Article.query.order_by(Article.create_timestramp.desc()) \ .limit(15).all() for article in articles: feed.add(article.title, unicode(article.markdown_html), content_type='html', author=User.query.get(article.author_id).nickname, url=make_external(url_for('main.article', id=article.id)), updated=article.last_edit_timestramp) return feed.get_response()
def feed() -> Response: feed = AtomFeed(g.site["name"], feed_url=request.url, url=request.url_root) posts = Post.query.filter_by(is_draft=False).order_by(Post.date.desc()).limit(15) for post in posts: feed.add( post.title, str(markdown(post.content)), content_type="html", author=post.author or "Unnamed", url=urljoin(request.url_root, post.url), updated=post.last_modified, published=post.date, ) return feed.get_response()
def index_feed(): feed = AtomFeed('wanted opendata.by', feed_url=request.url, url=request.url_root) for dataset in db.session.query(DataSet).order_by( DataSet.create.desc()).limit(20).all(): feed.add(dataset.name, markdown(dataset.description), content_type='html', url=url_for('.dataset', id=dataset.id), published=dataset.create, updated=dataset.create) db.session.rollback() return feed.get_response()
def feed(): # 订阅 data = g.api.blog_get_all(limit=10).get("data") feed = AtomFeed(g.api.get_sys_config().get("data").get("site_feedname"), feed_url=request.url, url=request.url_root, subtitle="From the latest article in {}".format(request.url)) for article in data: updated = article['update_time'][:10] if article['update_time'] else article['create_time'][:10] feed.add(article['title'], unicode(article['content']), content_type='html', author=article['author'], id=article['id'], url=url_for(".blogShow", bid=article['id'], utm_source='feed', _external=True), updated=datetime.datetime.strptime(updated, "%Y-%m-%d"), published=datetime.datetime.strptime(article['create_time'][:10], "%Y-%m-%d")) return feed.get_response()
def recent_feed(): movies = db_helper.get_recent_movies() feed = AtomFeed('Recent Movies', feed_url=request.url, url=request.url_root) for movie in movies: movie_url = 'director/{director_id}/movie/''{movie_id}'\ .format(director_id=movie.director_id, movie_id=movie.id) rendered_text = movie.description feed.add(movie.name, unicode(rendered_text), content_type='html', trailer=movie.trailer, url=make_external(movie_url), updated=movie.last_update) return feed.get_response()
def recent_feed(): feed = AtomFeed(app.config['BLOG_TITLE'] + '::Recent Articles', feed_url=request.url, url=request.url_root) posts = postClass.get_posts(int(app.config['PER_PAGE']), 0) for post in posts['data']: post_entry = post['preview'] if post['preview'] else post['body'] feed.add(post['title'], (post_entry), content_type='html', author=post['author'], url=make_external( url_for('single_post', permalink=post['permalink'])), updated=post['date']) return feed.get_response()
def render_page(): feed = AtomFeed('Blender Cloud - Latest updates', feed_url=ensure_schema(request.url), url=ensure_schema(request.url_root)) # Get latest blog posts api = system_util.pillar_api() latest_posts = Node.all( { 'where': { 'node_type': 'post', 'properties.status': 'published' }, 'embedded': { 'user': 1 }, 'sort': '-_created', 'max_results': '15' }, api=api) newest = None # Populate the feed for post in latest_posts._items: author = post.user.fullname or post.user.username updated = post._updated if post._updated else post._created url = ensure_schema( urllib.parse.urljoin(request.host_url, url_for_node(node=post))) content = post.properties.content[:500] content = '<p>{0}... <a href="{1}">Read more</a></p>'.format( content, url) if newest is None: newest = updated else: newest = max(newest, updated) feed.add(post.name, str(content), content_type='html', author=author, url=url, updated=updated, published=post._created) resp = feed.get_response() if newest is not None: resp.headers['Last-Modified'] = newest.strftime( current_app.config['RFC1123_DATE_FORMAT']) return resp
def create_feed(): """Create an AtomFeed instance with latest documents""" feed = AtomFeed(FEED_TITLE, feed_url=request.url, url=request.url_root) newest_documents = Document.query.order_by(Document.timestamp.desc()) \ .limit(FEED_NUM_DOCUMENTS).all() for doc in newest_documents: feed.add(doc.title or u'(no title given)', make_feed_body(doc), content_type='html', url=url_for('doc', doc_id=doc.id), updated=doc.get_date()) return feed
def recent_feed(): feed = AtomFeed(u'Recent Flask Snippets', subtitle=u'Recent additions to the Flask snippet archive', feed_url=request.url, url=request.url_root) snippets = Snippet.query.order_by(Snippet.pub_date.desc()).limit(15) for snippet in snippets: feed.add(snippet.title, unicode(snippet.rendered_body), content_type='html', author=snippet.author.name, url=urljoin(request.url_root, snippet.url), updated=snippet.pub_date) return feed.get_response()
def recent_feed(): feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) posts = db.posts.find().sort("created_at", -1).limit(15) for post in posts: feed.add(post['title'], post['html'], content_type='html', author="mikey", url=request.url_root + str(post['_id']), updated=post['created_at'], published=post['created_at']) return feed.get_response()
def atom_feed(): feed = AtomFeed("My Blog", feed_url=request.url, url=request.host_url, subtitle="My example blog") for post in Post.query.limit(10).all(): feed.add(post.title, post.announce, content_type='html', id=post.id, updated=post.created, published=post.created, url=url_for('posts.post_detail', pk=post.id)) return feed.get_response()
def Feed(): feed = AtomFeed('Últimas Noticías do Diário do Nordeste', feed_url=request.url, url=request.url_root) noticias = Noticia.select().order_by(Noticia.Data.desc()) for noticia in noticias: feed.add(noticia.Titulo, noticia.Corpo, id=noticia.id, content_type='html', author=noticia.Autor, url=noticia.Link, updated=noticia.Data) return feed.get_response()
def feeds(): feeds = AtomFeed(title="Peach Blog's Feeds", feed_url=request.url, url=request.url_root) posts = Post.query.order_by(Post.create_at.desc()).all() for post in posts: feeds.add(post.title, markdown.html(post.content), content_type='html', author='Leetao', url=get_abs_url(post.title), updated=post.last_update, published=post.last_update) return feeds.get_response()
def atomfeed(): feed = AtomFeed('Mempool | Satoshi Nakamoto Institute', feed_url=request.url, url=request.url_root) articles = BlogPost.query.order_by(desc(BlogPost.date)).all() for article in articles: articleurl = url_for('blogpost', slug=article.slug, _external=True) content = article.excerpt + "<br><br><a href='"+articleurl+"'>Read more...</a>" feed.add(article.title, unicode(content), content_type='html', author=article.author[0].first + ' ' + article.author[0].last, url=articleurl, updated=article.date, published=article.date) return feed.get_response()
def blog_feed(): feed = AtomFeed('My recent blog postings', feed_url=DOMAIN + url_for('blog_feed'), url=DOMAIN) blog_list = page_list(blogs, 'published', 10, 'date', True) for b in blog_list: feed.add(b.meta['title'], content_type='html', url=DOMAIN + url_for('blog_detail', path=b.path), author=b.meta['author'], updated=b.meta['lastmod'], published=b.meta['date'], summary=b.meta['excerpt']) return feed.get_response()
def atom_feed(): feed = AtomFeed('albertyw.com', feed_url=request.url, url=request.url_root) for post in list(note_util.get_notes())[:5]: url = url_for('handlers.note', slug=post.slug) url = urljoin(request.url_root, url) feed.add( post.title, post.note, content_type='html', author='Albert Wang', url=url, updated=post.time, ) return feed.get_response()
async def _feed(request): feed = AtomFeed(title=SITE_TITLE, updated=datetime.now(), feed_url=request.url, url=request.host) posts = (await Post.get_all())[:10] for post in posts: body = post.html_content summary = post.excerpt feed.add( post.title, body, content_type='html', summary=summary, summary_type='html', author=AUTHOR, url=post.url, id=post.id, updated=post.created_at, published=post.created_at ) return feed.to_string()
def topic_feed(request, id, slug=None): """A feed for the answers to a question.""" topic = Topic.query.eagerposts().get(id) # if the topic id does not exist or the topic is from a different # language, we abort with 404 early if topic is None or topic.locale != request.view_lang: raise NotFound() # make sure the slug is okay, otherwise redirect to the real one # to ensure URLs are unique. if slug is None or topic.slug != slug: return redirect(url_for(topic, action='feed')) # deleted posts cannot be seen by people without privilegs if topic.is_deleted and not (request.user and request.user.is_moderator): raise Forbidden() feed = AtomFeed(u'%s — %s' % (topic.title, settings.WEBSITE_TITLE), subtitle=settings.WEBSITE_TAGLINE, feed_url=request.url, url=request.url_root) feed.add(topic.title, topic.question.rendered_text, content_type='html', author=topic.question.author.display_name, url=url_for(topic, _external=True), id=topic.guid, updated=topic.question.updated, published=topic.question.created) for reply in topic.replies: if reply.is_deleted and not (request.user and request.user.is_moderator): continue title = _(u'Answer by %s') % reply.author.display_name if reply.is_deleted: title += u' ' + _('(deleted)') feed.add(title, reply.rendered_text, content_type='html', author=reply.author.display_name, url=url_for(reply, _external=True), id=reply.guid, updated=reply.updated, created=reply.created) return feed.get_response()
def comments_feed(id): post = Post.query.get(id) if post is None: abort(404) feed = AtomFeed(u'Comments for post “%s”' % post.title, feed_url=request.url, url=request.url_root) for comment in post.comments: feed.add(comment.title or u'Untitled Comment', unicode(comment.rendered_text), content_type='html', author=comment.author.name, url=request.url, updated=comment.pub_date) return feed.get_response()
def feed(): feed = AtomFeed('forum.poehali.net', feed_url=request.url, url=request.url_root, icon=url_for('favicon')) for topic in db.session.query(Topic).order_by( Topic.updated.desc()).limit(20).all(): feed.add(topic.title, topic.body, content_type='html', url=topic.url, published=topic.published, updated=topic.updated) db.session.rollback() return feed.get_response()
def recent_feed(): feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) articles = Article.query.order_by(Article.pub_date.desc()) \ .limit(15).all() for article in articles: feed.add(article.title, unicode(article.rendered_text), content_type='html', author=article.author.name, url=make_external(article.url), updated=article.last_update, published=article.published) return feed.get_response()
def recent_feed(): feed = AtomFeed('Artigos Recentes', feed_url=request.url, url=request.url_root) artigos = Artigo.query.order_by(Artigo.data.desc()).limit(5).all() for artigo in artigos: feed.add(artigo.titulo, misaka.html(artigo.texto), content_type='html', author=artigo.author.nome, url=make_external(url_for('read_artigo', id=artigo.id)), updated=artigo.data) return feed.get_response()
def show_entry(name): connect(host='mongodb://*****:*****@ds255539.mlab.com:55539/9321test') feed = AtomFeed(title='Single Collection', feed_url=request.url) for a in Area.objects: if a.name.lower().replace(' ', '') == name.lower().replace(' ', ''): #print(xmlify(a.offenses,wrap="all", indent=" ")) data = json.loads(a.to_json()) data2 = xmlify(data, wrap="all", indent=" ") entry = FeedEntry(title = a.name, url=request.url, updated=datetime.datetime.utcnow(),author = {'name':'admin'}, \ content_type="application/xml",content = data2) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 200 #ATOM return jsonify(LGA_name=False), 404
def recent_feed(): feed = AtomFeed('Luk Posting', feed_url=request.url, url=request.url_root, author=request.url_root) for entry in entries: feed.add( entry.title, entry.body, content_type='html', url='http://google.pl', # url=urljoin(request.url_root, url_for("entries.detail", slug=entry.slug) ), updated=datetime.now(), published=datetime.now()) return feed.get_response()
def feed(): feed = AtomFeed('BrightonPy Events', feed_url=request.url, url=request.url_root) for meeting in reversed(get_meetings()): # A little hack date = meeting['datetime'] - timedelta(weeks=1) feed.add(meeting['title'], unicode(meeting['content']), author=meeting['speaker'], url=urljoin(request.url_root, url_for('meeting', date=meeting['path'])), updated=date, published=date) return feed.get_response()
def feed(): feed = AtomFeed('KoMo', feed_url=request.url, url=request.url_root) posts = blog.posts[:10] title = lambda p: '%s: %s' % (p.title, p.subtitle) if hasattr(p, 'subtitle') else p.title for post in posts: feed.add(title(post), unicode(post.html), content_type='html', author='Konstantin Monakhov', url=post.url(_external=True), updated=datetime.combine(post.date, datetime.min.time()), published=datetime.combine(post.date, datetime.min.time())) return feed.get_response()