def atom_feed(request): """ generates the atom feed with the newest images """ user = User.query.filter_by( username = request.matchdict['user']).first() if not user or not user.has_privilege(u'active'): return render_404(request) cursor = MediaEntry.query.filter_by( uploader = user.id, state = u'processed').\ order_by(MediaEntry.created.desc()).\ limit(ATOM_DEFAULT_NR_OF_UPDATED_ITEMS) """ ATOM feed id is a tag URI (see http://en.wikipedia.org/wiki/Tag_URI) """ atomlinks = [{ 'href': request.urlgen( 'mediagoblin.user_pages.user_home', qualified=True, user=request.matchdict['user']), 'rel': 'alternate', 'type': 'text/html' }] if mg_globals.app_config["push_urls"]: for push_url in mg_globals.app_config["push_urls"]: atomlinks.append({ 'rel': 'hub', 'href': push_url}) feed = AtomFeed( "MediaGoblin: Feed for user '%s'" % request.matchdict['user'], feed_url=request.url, id='tag:{host},{year}:gallery.user-{user}'.format( host=request.host, year=datetime.datetime.today().strftime('%Y'), user=request.matchdict['user']), links=atomlinks) for entry in cursor: feed.add(entry.get('title'), entry.description_html, id=entry.url_for_self(request.urlgen, qualified=True), content_type='html', author={ 'name': entry.get_uploader.username, 'uri': request.urlgen( 'mediagoblin.user_pages.user_home', qualified=True, user=entry.get_uploader.username)}, updated=entry.get('created'), links=[{ 'href': entry.url_for_self( request.urlgen, qualified=True), 'rel': 'alternate', 'type': 'text/html'}]) return feed.get_response()
def get(self, name): setting = current_app.config.get('SITE', { 'title': '', 'description': '' }) title = setting['title'] description = setting['description'] feed = AtomFeed( '%s·%s' % (name, title), feed_url=request.url, url=request.url_root, subtitle=description) topics = Topic.query.filter_by(tags__name=name).limit(10) for topic in topics: if topic.content_type == Topic.CONTENT_TYPE_MARKDOWN: content = topic.content else: content = topic.content feed.add(topic.title, content, content_type='html', author=topic.author.username, url=urljoin( request.url_root, url_for( 'topic.topic', topicId=topic.id)), updated=topic.updated_at, published=topic.created_at) return feed.get_response()
def publictag_feed(tagkey): """ rss/atom representation of the Read-only overview of the bookmarks in the userkey/tag of this PublicTag """ try: this_tag = PublicTag.get(PublicTag.tagkey == tagkey) bookmarks = Bookmark.select().where( Bookmark.userkey == this_tag.userkey, Bookmark.tags.contains(this_tag.tag), Bookmark.status == Bookmark.VISIBLE ) feed = AtomFeed(this_tag.tag, feed_url=request.url, url=make_external(url_for('publictag_page', tagkey=tagkey))) for bookmark in bookmarks: updated_date = bookmark.modified_date if not bookmark.modified_date: updated_date = bookmark.created_date bookmarktitle = '{} (no title)'.format(bookmark.url) if bookmark.title: bookmarktitle = bookmark.title feed.add( bookmarktitle, content_type='html', author='digimarks', url=bookmark.url, updated=updated_date, published=bookmark.created_date ) return feed.get_response() except PublicTag.DoesNotExist: abort(404)
def recent_feed(): feed = AtomFeed(_('Last datasets'), feed_url=request.url, url=request.url_root) datasets = (Dataset.objects.visible().order_by('-created_at') .limit(current_site.feed_size)) for dataset in datasets: author = None if dataset.organization: author = { 'name': dataset.organization.name, 'uri': url_for('organizations.show', org=dataset.organization.id, _external=True), } elif dataset.owner: author = { 'name': dataset.owner.fullname, 'uri': url_for('users.show', user=dataset.owner.id, _external=True), } feed.add(dataset.title, render_template('dataset/feed_item.html', dataset=dataset), content_type='html', author=author, url=url_for('datasets.show', dataset=dataset.id, _external=True), updated=dataset.last_modified, published=dataset.created_at) return feed.get_response()
def recent_feed(): log.info("Generating RSS Feed") feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) log.debug("Querying for all blog posts") posts = models.Post.query \ .filter(~models.Post.tags.any(models.Tag.name.in_(['home']))) \ .order_by(models.Post.updated.desc()) \ .limit(15) \ .all() base_url = configuration.BASE_URL + '/blog/post/' counter = 1 log.debug("Printing posts in RSS Feed:") for post in posts: url = base_url + str(counter) counter += 1 log.debug("\t\t" + post.title) feed.add(post.title, md.markdown(post.body), content_type='html', author=post.author.first_name + " " + post.author.last_name, url=make_external(url), updated=post.updated, published=post.created) return feed.get_response()
def rss(): feed = AtomFeed('Hacker News TLDR', feed_url=request.url, url=request.url_root) stories = _get_stories() for story in stories: if not story.get(BODY, {}).get(SENTENCES): body = 'Unable to generate summary' else: body = '<ul>{}</ul>'.format( '\n'.join( "<li>{}</li>".format( sentence ) for sentence in story[BODY][SENTENCES] ) ) body += "<br/><a href={}>HN Comments</a>".format( 'https://news.ycombinator.com/item?id={}'.format( story[HACKER_NEWS_ID] ) ) feed.add(story[TITLE], body, content_type='html', updated=datetime.strptime( story[DATE_FOUND], '%Y-%m-%d %H:%M:%S.%f'), url=urljoin(request.url_root, story[URL]), ) return feed.get_response()
def feed(site): if site == 'hackernews': title = 'Hacker News Digest' news_list = models.HackerNews.query.order_by('submit_time desc').all() else: title = 'Startup News Digest' news_list = models.StartupNews.query.order_by('submit_time desc').all() feed = AtomFeed(title, updated=models.LastUpdated.get(site), feed_url=request.url, url=urljoin(request.url_root, url_for(site)), author={ 'name': 'polyrabbit', 'uri': 'https://github.com/polyrabbit/'} ) for news in news_list: feed.add(news.title, content=news.summary and ('<img src="%s" style="width: 220px; float: left" />' % news.image.url if news.img_id else '') + news.summary, author={ 'name': news.author, 'uri': news.author_link } if news.author_link else (), url=news.url, updated=news.submit_time,) return feed.get_response()
def announcement_feed(): def bjdate(d): from datetime import timedelta return (d + timedelta(hours=8)).strftime('%Y年%m月%d日') type_ = request.args.get('type', '') typecn = type_to_cn(type_) exchange = request.args.get('exchange', '') cond = {} feedtitle = '邮币卡公告聚合' if type_: cond['type_'] = type_ feedtitle += ' - {}'.format(typecn) if exchange: cond['exchange'] = exchange feedtitle += ' - {}'.format(exchange) feed = AtomFeed(feedtitle, feed_url=request.url, url=request.url_root) announcements = list( Announcement.query(cond, sort=[('updated_at', -1)], limit=20)) for a in announcements: feed.add('{} {}'.format(bjdate(a.published_at), a.title.strip()), '更多内容请点击标题连接', content_type='text', author=a.exchange, url=a.url, updated=a.updated_at, published=a.published_at) return feed.get_response()
def blog_feed(): posts = Gallery.get_list(published=True, to_json=True) feed_url = "{}/blog/feed.atom".format(g.app_base_link) feed = AtomFeed('KateHeddleston.com Blog Posts', feed_url=feed_url, url='{}/blog'.format(g.app_base_link)) for post in posts: post_html = [] for item in post.get('items'): item_text = "" if item.get('title'): item_text += u'<h2>{}</h2><br>'.format(item.get('title')) if item.get('image_name'): img_src = u'{}/{}'.format(post.get('base_url'), item.get('image_name')) item_text += u"<img src='{}' />".format(img_src) if item.get('image_caption'): item_text += u"<div>{}</div>".format(item.get('image_caption')) item_text += '<br>' item_text += item.get('body') post_html.append(item_text) text = '</p><p>'.join(post_html) text = '<p>' + text + '</p>' post_url = "{}/blog/{}".format(g.app_base_link, post.get('uuid')) published_at = datetime.datetime.strptime(post['published_at_raw'], '%Y-%m-%dT%H:%M:%S') feed.add(post.get('name'), unicode(text), content_type='html', author=post.get('author'), url=post_url, updated=published_at, published=published_at) return feed.get_response()
def feed(): blogging_engine = _get_blogging_engine(current_app) storage = blogging_engine.storage config = blogging_engine.config count = config.get("BLOGGING_FEED_LIMIT") posts = storage.get_posts(count=count, offset=None, recent=True, user_id=None, tag=None, include_draft=False) feed = AtomFeed( '%s - All Articles' % config.get("BLOGGING_SITENAME", "Flask-Blogging"), feed_url=request.url, url=request.url_root, generator=None) if len(posts): feed_posts_fetched.send(blogging_engine.app, engine=blogging_engine, posts=posts) for post in posts: blogging_engine.process_post(post, render=True) feed.add(post["title"], str(post["rendered_text"]), content_type='html', author=post["user_name"], url=config.get("BLOGGING_SITEURL", "")+post["url"], updated=post["last_modified_date"], published=post["post_date"]) feed_posts_processed.send(blogging_engine.app, engine=blogging_engine, feed=feed) response = feed.get_response() response.headers["Content-Type"] = "application/xml" return response
def _topic_feed(request, title, query, order_by): # non moderators cannot see deleted posts, so we filter them out first # for moderators we mark the posts up as deleted so that # they can be kept apart from non-deleted ones. if not request.user or not request.user.is_moderator: query = query.filter_by(is_deleted=False) query = query.order_by(_topic_order[order_by]) query = query.options(eagerload('author'), eagerload('question')) query = query.limit(max(0, min(50, request.args.get('num', 10, type=int)))) feed = AtomFeed(u'%s — %s' % (title, settings.WEBSITE_TITLE), subtitle=settings.WEBSITE_TAGLINE, feed_url=request.url, url=request.url_root) for topic in query.all(): title = topic.title if topic.is_deleted: title += u' ' + _(u'(deleted)') feed.add(title, topic.question.rendered_text, content_type='html', author=topic.author.display_name, url=url_for(topic, _external=True), id=topic.guid, updated=topic.last_change, published=topic.date) return feed.get_response()
def feed(): feed = AtomFeed( g.options['name'], subtitle=g.options['slogan'], feed_url=request.url_root+'feed/', url=request.url_root, generator=None ) posts = Post.query.filter(status='published', type='post').order_by('created').limit(20, 0, array=True) for post in posts: feed.add( post.title, post.content, content_type='html', author=post.user.nicename, url=request.url_root+post.guid, updated=post.modified, published=post.created ) response = feed.get_response() response.headers["Content-Type"] = 'application/xml' return response
def last_24_hr(): posts = get_recent_posts(filter_recent=True) feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) for post in posts: add_post_to_feed(feed, post) return feed.get_response()
def do_atom(path=None): feed = AtomFeed('Changes' + (' - %s' % path if path != '/' else ''), feed_url=request.url, url=request.url_root) history = [] if path != '/': for entry in get_history(path): entry.insert(1, path) history.append(entry) if len(history) == ATOM_LIMIT: break else: for path in get_documents_list(): for entry in get_history(path): entry.insert(1, path) history.append(entry) history = sorted(history, key=lambda x: x[0], reverse=True)[:ATOM_LIMIT] for date, path, rev, author, desc in history: feed.add(path, desc if desc != '-' else 'No summary available', url=url_for('index', path=path, do='compare', to=rev), author=author, updated=date) return feed.get_response()
def atom_feed(): config = current_app.config feed = AtomFeed( config['SITE_TITLE'], feed_url=request.url, url=request.url_root, ) results, results_page = g.gitpages.index( 1, ref=current_app.default_ref, statuses=g.allowed_statuses, ) for page in results: doc = page.doc() utc_date = page.info.date.astimezone(pytz.utc) feed.add( doc['title'], doc['body'], content_type='html', url=urljoin(request.url_root, page.to_url()), updated=utc_date, published=utc_date, ) return feed.get_response()
def recent_feed(): feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) threads = Thread.query.order_by(Thread.last_updated.desc()).filter(Thread.user==None).all() for thread in threads: url = url_for('thread', display_hash=thread.display_hash, title=thread.slug()) feed.add(thread.title, '', content_type='html', author=thread.display_name, url=url, updated=thread.last_updated, published=thread.date_created) return feed.get_response()
def recent_feed(): # Create an atom feed with the latest items # the number of items is given by the setting ENDPOINT_RESULTS_PER_PAGE # Based on: http://flask.pocoo.org/snippets/10/ feed = AtomFeed('Recent Items', feed_url=request.url, url=request.url_root, author={'name': 'David Castillo', 'email': '*****@*****.**', 'uri': 'http://github.com/davcs86'}, generator=('self generated', '', '0.1')) # query the items items = db_session.query(Item).order_by(Item.created_date.desc()) \ .limit(app.config['ENDPOINT_RESULTS_PER_PAGE']).all() # add them to the feed for item in items: feed.add(item.name, unicode(item.description), content_type='html', author=item.author.nickname, url=make_external(url_for('item_detail', item_id=item.id)), categories=[{'name': g.name} for g in item.categories], updated=item.updated_date, links=[{'href': g.locate(), 'rel': 'enclosure', 'type': g.mimetype} for g in item.picture.all()], published=item.created_date) return feed.get_response()
def feed_folder(folder): if folder.split('/')[0] != 'admin': folder = Folder.query.filter(guid=folder).one() if folder: posts = Post.query.filter(folder_id=folder.id, status='published', type='post').order_by('created').limit(20, 0, array=True) feed = AtomFeed( g.options['name']+' • ' + folder.name, subtitle=folder.seo_content, feed_url=request.url_root+'feed/', url=request.url_root, generator=None ) for post in posts: feed.add( post.title, post.content, content_type='html', author=post.user.nicename, url=request.url_root+post.guid, updated=post.modified, published=post.created ) response = feed.get_response() response.headers["Content-Type"] = 'application/xml' return response else: return is_404() else: return is_admin_404()
def rss(channel): urls = list(url_db.filter({'channel': channel}).run(g.rdb_conn))[:50] feed = AtomFeed('Recent URLs', feed_url=request.url, url=request.url_root) for url in urls: if url['type'].lower() == 'image': c = "<img src='" + url['url'] + "'></img>" else: c = "This URL is a {0}".format(url['type']) title = url.get('title', 'Missing Title') if title: if title == '': title = 'Missing Title' feed.add(title=title, content=c, content_type="xhtml", author=url['user'], url=url['url'], updated=url['timestamp'], published=url['timestamp']) data = feed.get_response() response = make_response(data) response.headers['Content-Type'] = 'application/atom+xml' return response
def _feeds(query=None, title='Recent Articles'): posts = db.find("posts.files", query or {}).\ sort([("uploadDate", -1)]).limit(PAGE_SIZE) feed = AtomFeed(title, feed_url=request.url, url=request.url_root) from monblog.common.encodings import force_bytes for post in posts: author = conf.BLOG_SETTINGS.get("AUTHOR", "") url = urljoin(request.url_root, url_for("get_post", post_id=str(post["_id"]))) text = force_bytes( db.fs.get(objectid.ObjectId(post["_id"])).read(), "ascii", errors="ignore") feed.add( post["metadata"].get("title"), markdown(text), id=url, content_type='html', url=url, updated=post["uploadDate"], published=post["uploadDate"], author=post["metadata"].get("author", author)) return feed.get_response()
def feed(): posts = get_recent_posts(filter_recent=False) feed = AtomFeed('All Articles', feed_url=request.url, url=request.url_root) for post in posts: add_post_to_feed(feed, post) return feed.get_response()
def recent_items_feed(): feed = AtomFeed('New Items', feed_url=request.url, url=request.url_root) items = catalogDb.get_latest_items(CUT_OFF_DATE, NO_LATEST_ITEMS) for item in items: text = " New item in category " + get_category_name(item.category_id) text += " Price Range" + str(item.pricerange) text += " Item Description " + item.description[100:] text += "... Read more here " text += create_external_url( url_for('category_item', category_name=format_name_for_url( get_category_name(item.category_id)), item_name=format_name_for_url(item.name), item_id=item.id)) feed.add(item.name, unicode(text), content_type='html', author="Sams Catalog App", url=create_external_url( url_for('category_item', category_name=format_name_for_url( get_category_name(item.category_id)), item_name=format_name_for_url(item.name), item_id=item.id)), updated=item.lastupdated or item.created, published=item.created) return feed.get_response()
def feed_story(story_id): story = Story.select_published().filter(lambda x: x.id == story_id).prefetch(Story.chapters).first() if not story: abort(404) feed = AtomFeed( title=story.title, feed_url=request.url, url=request.url_root ) chapters = [c for c in story.chapters if not c.draft] for c in chapters: assert c.first_published_at is not None, 'database is inconsistent: story {} has non-draft and non-published chapter {}'.format(story.id, c.order) chapters.sort(key=lambda x: (x.first_published_at, x.order), reverse=True) for chapter in chapters: data = chapter.text_preview feed.add( chapter.autotitle, data, content_type='text', url=url_for('chapter.view', story_id=story.id, chapter_order=chapter.order, _external=True), updated=chapter.updated, published=chapter.date, ) return feed.get_response()
def feed_chapters(): feed = AtomFeed( title='Обновления глав — {}'.format(sitename()), subtitle='Новые главы рассказов', feed_url=request.url, url=request.url_root ) chapters = select(c for c in Chapter if not c.draft and c.story_published) chapters = chapters.order_by(Chapter.first_published_at.desc(), Chapter.order.desc()) count = current_app.config['RSS'].get('chapters', 20) chapters = chapters.prefetch(Chapter.story)[:count] for chapter in chapters: story = chapter.story author = story.authors[0] data = chapter.text_preview feed.add( '{} : {}'.format(chapter.autotitle, story.title), data, content_type='text', author=author.username, url=url_for('chapter.view', story_id=story.id, chapter_order=chapter.order, _external=True), updated=chapter.updated, published=chapter.date, ) return feed.get_response()
def atomfeed(): t = request.args.get('t') if not t: t = 'url' r = get_last(t) if not (r.status_code == 200 and len(r.response) > 0): return {} j = json.loads(r.response[0]) if len(j) < 1: return {} feed = AtomFeed('Les liens (pas forcement) SFW de GCU-Squad!', feed_url=request.url, url=request.url_root, subtitle='Adresses vues sur le canal #[email protected]') for item in j: post = item['_source'] for url in post['urls']: datefmt = '%Y-%m-%dT%H:%M:%S.%f' pubdate = datetime.datetime.strptime(post['fulldate'], datefmt) update = pubdate if post['tags']: line = '{0} #{1}#'.format(post['line'], ','.join(post['tags'])) else: line = post['line'] feed.add(title=line, title_type='text', url=url, author=post['nick'], published=pubdate, updated=update) return feed.get_response()
def atom(ctx): feed = AtomFeed(ctx.odb.name, feed_url=ctx.url_for("atom"), url=ctx.url_for("root"), subtitle=ctx.odb.description) pattern = ctx.app.recent_doc_pattern for added_date, root_path in utils.recent_files(ctx, count=10, pattern=pattern): blob_obj = ctx.odb.head.tree[root_path] assert isinstance(blob_obj, BlobObject) current_blob_obj = ctx.odb.head.tree[blob_obj.abs_name] doc = render_blob(ctx, current_blob_obj) url = "http://" + ctx.request.host + ctx.url_for("view_obj", rev="HEAD", path=blob_obj.root_path) feed.add( doc.title, doc.body, title_type="html", content_type="html", author=doc.author_name, url=url, updated=doc.last_modified, published=added_date, ) return feed.get_response()
def get_feed(feed_id): feed_id = secure_filename(feed_id) feed_config_filepath = os.path.join(APP_CONFIG_FEEDS, feed_id+".json") if not os.path.isfile(feed_config_filepath): print feed_config_filepath abort(404) feed = feedops.FusedFeed.load_from_spec_file(feed_config_filepath) feed.fetch() feed_uri = request.url_root if len(feed.sources) == 1: # if there is only 1 source in a fusedfeed # just give the feed's html alternate # TODO: instead, we should generate our own HTML representation feed_uri = feed.sources[0].html_uri output = AtomFeed(feed.name, feed_url=request.url, author="FeedFuser", links=[{"href":feed_uri, "rel":"alternate", "type":"text/html"}]) for entry in feed.entries: title = entry.title if not entry.title: title = entry.link feed_item = FeedEntry(id=entry.guid, title=title, updated=entry.update_date, author=entry.author, published=entry.pub_date, links=[{"href":entry.link, "rel":"alternate", "type":"text/html"}]) if entry.summary: feed_item.summary = unicode(entry.summary) feed_item.summary_type = "text" if entry.summary_type == "text/plain" else "html" if entry.content: feed_item.content = unicode(entry.content) feed_item.content_type = "text" if entry.content_type == "text/plain" else "html" output.add(feed_item) return output.get_response()
def get(self): feed = AtomFeed( 'Recently Added Watches - Wanna Buy A Watch', feed_url=request.url, url=request.url_root ) # Prepare query watches = session.query(Watch) # TODO: .limit(15) and date_added watches = watches.order_by(Watch.title).all() for watch in watches: feed.add( watch.title, unicode(watch.long_description), content_type='html', author='wbaw', url=BASE_URL + '/' + watch.page_url + '?src=wbawsearch', # TODO: Add updated and published updated=datetime.now(), published=datetime.now() ) return feed.get_response()
def recent_feed(): feed = AtomFeed(_('Last reuses'), feed_url=request.url, url=request.url_root) reuses = Reuse.objects.visible().order_by('-created_at').limit(15) for reuse in reuses: author = None if reuse.organization: author = { 'name': reuse.organization.name, 'uri': url_for('organizations.show', org=reuse.organization.id, _external=True), } elif reuse.owner: author = { 'name': reuse.owner.fullname, 'uri': url_for('users.show', user=reuse.owner.id, _external=True), } feed.add(reuse.title, render_template('reuse/feed_item.html', reuse=reuse), content_type='html', author=author, url=url_for('reuses.show', reuse=reuse.id, _external=True), updated=reuse.created_at, published=reuse.created_at) return feed.get_response()
def recent_atom(): app.logger.debug(request.url_root) feed = AtomFeed('Recent Events', feed_url=request.url, url=request.url_root) events = session.query(Event).all() for e in events: feed.add(id=e.id, title=e.title, content_type='html', updated=e.created) return feed.get_response()
def recent_feed(): movies = db_helper.get_recent_movies() feed = AtomFeed('Recent Movies', feed_url=request.url, url=request.url_root) for movie in movies: movie_url = 'director/{director_id}/movie/''{movie_id}'\ .format(director_id=movie.director_id, movie_id=movie.id) rendered_text = movie.description feed.add(movie.name, unicode(rendered_text), content_type='html', trailer=movie.trailer, url=make_external(movie_url), updated=movie.last_update) return feed.get_response()
def opensearch_search(): """ OpenSearch search endpoint See http://www.opensearch.org/ """ q = request.args.get('q') start = request.args.get('start') num = 25 results = elastic.search( 'thing', query={'title^3,short_description,description,makers_string': q}, start=start, num=num) id_list = [result[0] for result in results] things = Thing.objects.filter(id__in=id_list) feed = AtomFeed("Search results for '%s'" % (q, ), feed_url=request.url, url=request.url_root) for thing in things: # TODO: only send the last Upload per mimetype? links = [ _create_link_dict(upload, request.host) for upload in thing.files ] authors = [maker.maker.format_name().strip() for maker in thing.makers] # http://werkzeug.pocoo.org/docs/0.11/contrib/atom/#werkzeug.contrib.atom.FeedEntry feed.add( FeedEntry(title=thing.title or "N/A", summary=thing.short_description, content=thing.description, author=authors, url="http://%s%s" % ( request.host, url_for('thing.detail', id=thing.id), ), links=links, updated=thing.created_at, published=thing.created_at)) return feed.get_response()
def atom_feed(): """Returns ATOM view of the 10 latest projects """ feed = AtomFeed('Recent Projects', feed_url=request.url, url=request.url_root) items = session.query(University).order_by( University.last_updated.desc()).limit(10) for item in items: feed.add( item.university_name, unicode(item.description), content_type='html', author=item.user.name, url=make_external(url_for('catalogItem', item_id=item.id)), updated=item.last_updated, published=item.last_updated) return feed.get_response()
def recent_feed(): feed = AtomFeed('Latest Bog Posts', feed_url=request.url, url=request.url_root, author=request.url_root) posts = models.Post.query.filter( models.Post.status == models.Post.STATUS_PUBLIC).order_by( models.Post.created_timestamp.desc()).limit(15).all() for post in posts: feed.add(post.title, post.body, content_type='html', url=urljoin(request.url_root, url_for("posts.detail", slug=post.slug)), updated=post.modified_timestamp, published=post.created_timestamp) return feed.get_response()
def scrape(url): source = feedparser.parse(fetch(url)) title = source.feed.title out = AtomFeed(title, feed_url=request.url, url=request.url_root) for entry in source.entries: for link in entry.links: if link.type in app.config['CONTENT_TYPES']: torrent_url = "%s.torrent" % link.url out.add(entry.title, summary=entry.summary, url=torrent_url, xml_base='', updated=datetime.fromtimestamp( mktime(entry.updated_parsed)), published=datetime.fromtimestamp( mktime(entry.published_parsed))) return out.get_response()
def feed(): """Register a new user.""" db_session = current_app.config["db_session"] rss_feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) posts = db_session.query(Referral).order_by(Referral.timestamp.desc()) posts = posts.limit(10).all() for post in posts: user = db_session.query(User).filter(User.id_ == post.user_id).one() rss_feed.add(post.title, post.description, content_type='html', author=user.username, url=os.path.join(request.url_root, "view", str(post.id_)), updated=post.timestamp, ) return rss_feed.get_response()
def dataset_feed(id): dataset = db.session.query(DataSet).filter(DataSet.id == id).first() if not dataset: abort(404) feed = AtomFeed('wanted opendata.by: {}'.format(dataset.name), feed_url=request.url, url=request.url_root) for vote in db.session.query(Vote).order_by( Vote.create.desc()).filter(Vote.dataset_id == id).limit(20).all(): feed.add(dataset.name, markdown(vote.comment), content_type='html', url=url_for('.dataset', id=id), published=vote.create, updated=vote.create) db.session.rollback() return feed.get_response()
def get(self): feed = AtomFeed(_("honmaple's Blog"), feed_url=request.url, url=request.url_root, subtitle='I like solitude, yearning for freedom') blogs = Blog.query.limit(10) for blog in blogs: feed.add(blog.title, escape(safe_markdown(blog.content)), content_type='html', author=blog.author.username, url=urljoin(request.url_root, url_for('blog.blog', blogId=blog.id)), updated=blog.created_at if blog.updated_at is not None else blog.updated_at, published=blog.created_at) return feed.get_response()
def atom_feed(): feed = AtomFeed(meta.site_title, feed_url=meta.site_url + "/feed") for post in Posts.get_posts(): feed.add(title=post.title, title_type="text", subtitle=meta.site_subtitle, content=post.content, content_type="html", summary=post.description, summary_type="text", url=meta.site_url + "/" + post.permalink, author=meta.site_author, published=post.date, updated=post.date) return feed.get_response()
def atom_feed(): feed = AtomFeed("Lucas Blog", feed_url="https://blog.lucas-hild.de/feed") for post in posts.get_posts(): feed.add(title=post["title"], title_type="text", subtitle="Softwareentwicklung", content=post["content"], content_type="html", summary=post["description"], summary_type="text", url="https://blog.lucas-hild.de/" + post["permalink"], author="Lucas Hild", published=post["date_time"], updated=post["date_time"]) return feed.get_response()
def index_atom(): ''' feed for the frontpage ''' feed = AtomFeed(SITENAME, feed_url=request.url, url=request.url_root) hours_ago = datetime.datetime.utcnow() - datetime.timedelta( hours=HOURS_TO_LIVE_FRONTPAGE) links = Link.objects(last_activity__gt=hours_ago).order_by('-upvotes')[:30] for link in links: feed.add(link.titel, content_type='html', author=SITENAME, url=make_external('comments/%s' % str(link.id)), updated=link.created_at) return feed.get_response()
def feed(): feed = AtomFeed('HoMaple的个人博客', feed_url=request.url, url=request.url_root, subtitle='I like solitude, yearning for freedom') articles = Articles.query.limit(15).all() for article in articles: feed.add(article.title, escape(safe_markdown(article.content)), content_type='html', author=article.author, url=make_external(url_for('blog.view', id=article.id)), updated=article.updated if article.updated is not None else article.publish, published=article.publish) return feed.get_response()
def recent_feed(): feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) articles = Article.query.order_by(Article.updated.desc()).all() for article in articles: feed.add( str(article.title), str(article.content), content_type='html', author=str(article.username), #url=make_external(article.url), url=article.id, summary=article.summary, updated=article.updated, published=article.published) return feed.get_response()
def feed(): feed = AtomFeed("Recent Blog Entries", feed_url=request.url, url=request.url_root) for post in posts.get_posts()[:-21:-1]: feed.add(post["title"], post["content"], content_type="html", author=post["author"], url=urljoin(request.url, url_for('blog.post', **post["url_values"])), updated=post["updated"] or post["date"], published=post["date"], summary=markdown_blog(post["teaser"]), summary_type="html") return feed.get_response()
def atomfeed(): feed = AtomFeed('Mempool | Satoshi Nakamoto Institute', feed_url=request.url, url=request.url_root) articles = BlogPost.query.order_by(desc(BlogPost.added)).all() for article in articles: articleurl = url_for('blogpost', slug=article.slug, _external=True) content = article.excerpt + "<br><br><a href='" + articleurl + "'>Read more...</a>" feed.add(article.title, unicode(content), content_type='html', author=article.author[0].first + ' ' + article.author[0].last, url=articleurl, updated=article.added, published=article.date) app.logger.info(str(request.remote_addr) + ', atomfeed') return feed.get_response()
def all_feed(): feed = AtomFeed("WUVT: Recent Articles", feed_url=request.url, url=request.url_root) articles = Article.query.filter_by(published=True).\ order_by(desc(Article.datetime)).limit(15).all() for article in articles: feed.add(article.title, unicode(article.html_content or article.html_summary), content_type='html', author=article.author.name, url=make_external(url_for('article', slug=article.slug)), updated=article.datetime, published=article.datetime) return feed.get_response()
def jam_feed(user): feed = AtomFeed('jams', feed_url=request.url, url=request.url_root) db = get_db() user_exists = db.query(User.name).filter(User.name == user).all() if not user_exists: abort(404) jams = db.query(Entry.title, Entry.text, Entry.name, Entry.tag, Entry.timestamp).filter(Entry.name == user).filter( Entry.tag == "jam").order_by( Entry.id.desc()).limit(2).all() for jam in jams: feed.add(jam.title, content_type='html', url=jam.text, author=jam.name, updated=jam.timestamp) return feed.get_response()
def feed_page(): with db.session_context() as session: posts = session.query(db.Post).order_by(db.Post.published).limit(10) feed = AtomFeed(title="Recent Posts", feed_url=request.url, url=request.url_root) for post in posts: updated = post.published if post.last_modified is None else post.last_modified feed.add(post.title, post.html, content_type="html", url="{0}/post?id={1}".format(request.url_root, post.rowid), updated=updated, published=post.published) return feed.get_response()
def feed(): atom_feed = AtomFeed('airtrack\'s Blog', feed_url=request.url) for post in get_posts_list(posts_dir): title = post['caption'] url = post['href'] post_time = post['post_time'] updated = post['date_time'] content = get_post_content(posts_dir, post_time, title) atom_feed.add(title, content, content_type='html', author='airtrack', url=url, updated=updated) return atom_feed.get_response()
def feed_stories(): feed = AtomFeed(title='Новые рассказы — {}'.format(sitename()), subtitle='Новые фанфики', feed_url=request.url, url=request.url_root) count = current_app.config['RSS'].get('stories', 20) stories = Story.select_published().order_by( Story.first_published_at.desc(), Story.id.desc())[:count] for story in stories: author = story.authors[0] feed.add(story.title, Markup(story.summary).striptags(), content_type='text', author=author.username, url=url_for('story.view', pk=story.id, _external=True), updated=story.updated, published=story.first_published_at or story.date) return feed.get_response()
def new_atom(): icon_url = "http://i.imgur.com/LJ0ru93.png" comics = Comic.query.order_by(Comic.published.desc()).limit(RSS_FEED_COUNT).all() atom_items = [comic_to_atom_item(comic) for comic in comics] atom = AtomFeed( "레진코믹스 새로운 만화", updated=comics[0].published, subtitle="레진코믹스에 새롭게 추가된 만화의 정보입니다.", subtitle_type='text', icon=icon_url, logo=icon_url, feed_url=request.url, url=URL_GENRE, entries=atom_items ) return atom.get_response()
def catalogAtom(): feed = AtomFeed('Catalog', feed_url=request.url) latest10Items = getLatestXItems(10) item_root = 'Item' for item in latest10Items: feed.add(item.title, item.description, content_type='text', author=item.user.name, url=url_for('showItemDetails', category_name=item.category.name, item_title=item.title), updated=item.date_added) return feed.get_response()
def main(): response = requests.get(CRYPTO_LIST).json() crypto_data = {symbol.lower(): data for symbol, data in response['Data'].items()} feed = AtomFeed('Crypto Prices', feed_url='/', url='/', subtitle='Crypto updates for given symbols') for symbol in args.symbols.split(','): price, url = get_price(crypto_data, symbol) title = RESPONSE_TEMPLATE.format(symbol=symbol, price=price, target_currency=TARGET_CURRENCY) id = (str(uuid.uuid4())) author = 'crypto-price' date = datetime.utcnow() feed.add(title, "", content_type='html', author=author, url=url, id=id, published=date, updated=date) response = feed.get_response().response[0].decode("utf-8") if args.file: with open(args.file, 'w') as f: f.write(response) else: print(response)
def atom_feed(cls, uri): """ Returns atom feed for articles published under a particular category. """ try: category, = cls.search([ ('unique_name', '=', uri), ], limit=1) except ValueError: abort(404) feed = AtomFeed("Articles by Category %s" % category.unique_name, feed_url=request.url, url=request.host_url) for article in category.published_articles: feed.add(**article.serialize(purpose='atom')) return feed.get_response()
def recent_feed(): feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) articles = Blog.rss_blog(g.db) for article in articles: print article.date dir(article.date) data = get_time(article.date) feed.add(article.title, unicode(article.content_html), content_type='html', author="22too", url=make_external("http://www.22too.com/blog/" + str(article.id)), updated=time(1, 1, 1, tzinfo=GMT1())) return feed.get_response()
def get(self): title = SITE['title'] subtitle = SITE['subtitle'] feed = AtomFeed('%s' % (title), feed_url=request.url, url=request.url_root, subtitle=subtitle) articles = Article.query.limit(10) for article in articles: feed.add(article.title, article.to_html(), content_type='html', author=article.user.username, url=urljoin(request.url_root, url_for('blog.article', pk=article.id)), updated=article.updated_at, published=article.created_at) return feed.get_response()
def feeds(): feed = AtomFeed(title='Latest Posts from My Blog', feed_url=request.url, url=request.url_root) # Sort post by created date blogs = Blog.query.all() for post in blogs: feed.add(post.title, post.posted, content_type='html', id=post.id, author=post.blogger.username, published=post.posted, updated=post.posted) return feed.get_response()
def recent_feed(): feed = AtomFeed('Latest Items from Sports Catalog App', feed_url=request.url, url=request.url_root) newItems = session.query(CategoryItem).order_by \ (desc(CategoryItem.date_created)).limit(12).all() for item in newItems: feed.add(item.title, unicode(item.description), content_type='html', author=item.user.name, url=make_external( url_for('showItemDetails', category_title=item.category.title, item_title=item.title)), updated=item.date_created) return feed.get_response()
def atomfeed(): feed = AtomFeed('Mempool | Satoshi Nakamoto Institute', feed_url=request.url, url=request.url_root) articles = BlogPost.query.order_by(desc(BlogPost.added)).all() for article in articles: articleurl = url_for('blogpost', slug=article.slug, _external=True) page = pages.get(article.slug) content = escape(page.html) feed.add(article.title, content, content_type='html', author=article.author[0].first + ' ' + article.author[0].last, url=articleurl, updated=date_to_localized_datetime(article.added), published=date_to_localized_datetime(article.date)) app.logger.info(str(request.remote_addr) + ', atomfeed') return feed.get_response()
def recent_feed(): feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) posts = [page for page in pages if 'date' in page.meta] sorted_posts = sorted(posts, reverse=True, key=lambda page: page.meta['date'])[:10] for post in sorted_posts: feed.add(post.meta['title'], unicode(post.body[:500] + '\n\n....'), content_type='html', author='Jordan Moeser', url=make_external(post.path), updated=page.meta['date']) return feed.get_response(), 200, { 'Content-Type': 'application/atom+xml; charset=utf-8' }