def feed(): def convert_to_dict(p): post_d = p.to_dict() del post_d['raw_content'] post_d['content'] = get_parser(p.format).parse_whole(p.raw_content) post_d['url'] = site['root_url'] + make_abs_url(p.unique_key) return post_d posts = map( convert_to_dict, islice(storage.get_posts(include_draft=False), 0, current_app.config['FEED_COUNT'])) atom = AtomFeed(title=site['title'], subtitle=site['subtitle'], url=site['root_url'] + request.script_root, feed_url=site['root_url'] + url_for('.feed'), author=site.get('author')) for post_ in posts: atom.add(title=post_['title'], content=post_['content'], url=post_['url'], id=post_['unique_key'], published=post_['created'].replace( tzinfo=timezone_from_str(site['timezone'])), updated=post_['updated'].replace( tzinfo=timezone_from_str(site['timezone'])), author=post_['author']) response = make_response(atom.to_string()) response.content_type = 'application/atom+xml; charset=utf-8' return response
def feed(self, feed_title, title, content, url, published=None, summary=None, enclosure=None, media_thumbnail=None): feed = AtomFeed(feed_title, feed_url=WP_FEED_URL) tz = pytz.timezone(faker.timezone()) published = published or faker.date_time(tzinfo=tz) kwargs = { 'content_type': 'html', 'author': faker.name(), 'url': url, 'updated': faker.date_time_between(start_date=published, tzinfo=tz), 'published': published } if summary: kwargs['summary'] = summary if enclosure: kwargs['links'] = [{ 'type': enclosure['type'], 'href': enclosure['url'], 'rel': 'enclosure', 'length': faker.pyint(), }] feed.add(title, content, **kwargs) out = feed.to_string() if media_thumbnail: el = '<media:thumbnail url="{0}" />'.format(media_thumbnail) out = out.replace('<feed', '<feed xmlns:media="http://search.yahoo.com/mrss/"') out = out.replace('</entry>', '{0}</entry>'.format(el)) return out
def serialize_category_atom(category, url, user, event_filter): """Export the events in a category to Atom :param category: The category to export :param url: The URL of the feed :param user: The user who needs to be able to access the events :param event_filter: A SQLalchemy criterion to restrict which events will be returned. Usually something involving the start/end date of the event. """ query = (Event.query.filter(Event.category_chain_overlaps( category.id), ~Event.is_deleted, event_filter).options( load_only('id', 'category_id', 'start_dt', 'title', 'description', 'protection_mode', 'access_key'), subqueryload('acl_entries')).order_by(Event.start_dt)) events = [e for e in query if e.can_access(user)] feed = AtomFeed(feed_url=url, title='Indico Feed [{}]'.format(category.title)) for event in events: feed.add( title=event.title, summary=unicode(event.description), # get rid of RichMarkup url=event.external_url, updated=event.start_dt) return BytesIO(feed.to_string().encode('utf-8'))
def build_feed(): feed = AtomFeed(current_app.config['SITE_NAME'], feed_url=current_app.config['DOMAIN'] + 'rss.xml', url=current_app.config['DOMAIN'], subtitle=current_app.config['SUBTITLE'], author=current_app.config['AUTHOR'], updated=datetime.datetime.now()) entries = Entry.get_all_published() for _entry in entries: time = datetime.datetime.strptime(_entry['date'], '%Y-%m-%d %H:%M:%S') feed.add(unicode(_entry['title']), unicode(markdown(_entry['content'])), content_type='html', author=current_app.config['AUTHOR'], published=time, updated=time, id=current_app.config['DOMAIN'] + _entry['slug'] + '/', url=current_app.config['DOMAIN'] + 'posts/' + _entry['slug'] + '/' ) with codecs.open(BASE_DIR + '/rss.xml', 'w', 'utf-8-sig') as f: f.write(feed.to_string())
def build_feed(): feed = AtomFeed(SITE_NAME, feed_url=DOMAIN + 'rss.xml', url=DOMAIN, subtitle=SUBTITLE, author=AUTHOR, updated=datetime.datetime.now()) entries = Entry.get_all_published() for _entry in entries: time = datetime.datetime.strptime(_entry['date'], '%Y-%m-%d %H:%M:%S') feed.add(unicode(_entry['title']), unicode(markdown(_entry['content'])), content_type='html', author=AUTHOR, published=time, updated=time, id=DOMAIN + _entry['slug'] + '/', url=DOMAIN + 'posts/' + _entry['slug'] + '/' ) with codecs.open('./ghpages/rss.xml', 'w', 'utf-8-sig') as f: f.write(feed.to_string())
def build_artifact(self, artifact): ctx = get_ctx() feed_source = self.source page = feed_source.parent feed = AtomFeed(title=page.record_label + u' — Pallets Project', feed_url=url_to(feed_source, external=True), url=url_to('/blog', external=True), id=get_id(ctx.env.project.id)) for item in page.children.order_by('-pub_date').limit(10): item_author = item['author'] feed.add( item['title'], text_type(item['body']), xml_base=url_to(item, external=True), url=url_to(item, external=True), content_type='html', id=get_id(u'%s/%s' % (ctx.env.project.id, item['_path'].encode('utf-8'))), author=item_author, updated=datetime(*item['pub_date'].timetuple()[:3])) with artifact.open('wb') as f: f.write(feed.to_string().encode('utf-8'))
def getsingle(): connect('c') storedata = {} name1 = request.args.get('name1') type = request.args.get('type') if " " in name1: name1 = name1.replace(" ", "") if (type == 'json'): for t in Crime.objects: if (t.id == name1): storedata[t.id] = t.data return jsonify({'alldata': storedata}), 200 if (type == 'xml'): for t in Crime.objects: if (t.id == name1): nowtime = datetime.datetime.fromtimestamp(time.time()) xml = dicttoxml.dicttoxml(t.data[0], custom_root='content') dom = parseString(xml).toprettyxml() dom = re.sub('<\?xml version="[0-9]*\.[0-9]*" \?>\n', '', dom) feed = AtomFeed( id= 'https://mlab.com/databases/my-database/collections/crime/' + t.id, title=t.id, updated=nowtime, author="z5129269") storedata[t.id] = feed.to_string() + dom return jsonify({'alldata': storedata}), 200
def compute_atom_feed(request): feed = AtomFeed( title = catonmat_title, subtitle = catonmat_subtitle, feed_url = 'http://www.catonmat.net/feed', url = 'http://www.catonmat.net', author = peteris, icon = 'http://www.catonmat.net/favicon.ico', generator = ('catonmat blog', 'http://www.catonmat.net', 'v1.0') ) # TODO: logo='http://www.catonmat.net/) pages = session. \ query(Page). \ join(Rss). \ order_by(Rss.publish_date.desc()). \ limit(config.rss_items). \ all() for page in pages: feed.add(title = page.title, content = page.parsed_content, content_type = 'html', author = peteris, url = 'http://www.catonmat.net' + page.request_path, id = page.page_id, updated = page.last_update, published = page.rss_page.publish_date) return feed.to_string()
def generate_feed(artifact): feed = AtomFeed( title=title or 'Feed', subtitle=unicode(subtitle or ''), subtitle_type=hasattr(subtitle, '__html__') and 'html' or 'text', feed_url=feed_url, url=embed_url, id=get_id(ctx.env.project.id + 'lektor') ) for item in items: feed.add( get_item_title(item, item_title_field), get_item_body(item, item_body_field), xml_base=url_to(item, external=True), url=url_to(item, external=True), content_type='html', id=get_id(u'%slektor/%s' % ( ctx.env.project.id, item['_path'].encode('utf-8'), )), author=get_item_author(item, item_author_field), updated=get_item_updated(item, item_date_field)) with artifact.open('wb') as f: f.write(feed.to_string().encode('utf-8') + '\n')
def build_atom(self, entries): lang = entries[0].language feed_title = "OpenRailwayMap ({})".format(lang) feed_url = "https://blog.openrailwaymap.org/{}.atom".format(lang) feed = AtomFeed(feed_title, feed_url=feed_url, title_type="text", author="OpenRailwayMap developers") for entry in entries: dest_path = "https://blog.openrailwaymap.org/{}/{}".format( lang, entry.destination_path) updated = entry.moddate if entry.moddate is None or entry.moddate == "": updated = entry.pubdate with open(self.get_source_filename(entry.language, entry.id), "r") as entry_src: entry_text = entry_src.read() feed.add(entry.title, entry_text, content_type="html", author=entry.authors, url=dest_path, updated=updated, published=entry.pubdate) with open("{}/{}.atom".format(self.output_directory, lang), "w") as feedfile: feedfile.write(feed.to_string())
def test_feedparser_authors(): for test in tests: if test.skip: continue feed = AtomFeed(title="Test Feed", feed_url="http://testfeed.com") entry = FeedEntry( title="Test Entry", url="http://testfeed.com/testentry", updated=datetime.utcnow(), content="Test Entry", author=test.namestring, ) feed.entries.append(entry) rss = feed.to_string() parsed = feedparser.parse(rss) assert parsed.entries is not None assert len(parsed.entries) == 1 assert len(parsed.entries[0].authors) == len(test.expected)
def feed(name): if current_app.config.get('PRIVATE_WIKI') and current_user.is_anonymous: return current_app.login_manager.unauthorized() cname = to_canonical(name) wiki_name = current_app.config['SITE_TITLE'] start = 0 length = int(request.args.get('length', 20)) the_feed = AtomFeed( title="{} - Recent changes for page '{}'".format(wiki_name, cname), url=url_for('wiki.page', name=cname, _external=True), id="{}_pagefeed_{}".format(to_canonical(wiki_name), cname), feed_url=url_for('wiki.feed', name=cname, _external=True), generator=("Realms wiki", 'https://github.com/scragg0x/realms-wiki', __version__) ) page = g.current_wiki.get_page(cname) items = list(itertools.islice(page.history, start, start + length)) # type: list[dict] for item in items: the_feed.add( title="Commit '{}'".format(item['sha']), content=item['message'], url=url_for('wiki.commit', name=name, sha=item['sha'], _external=True), id="{}/{}".format(item['sha'], cname), author=item['author'], updated=datetime.fromtimestamp(item['time']) ) response = make_response((the_feed.to_string(), {'Content-type': 'application/atom+xml; charset=utf-8'})) response.add_etag() return response.make_conditional(request)
def build_feed(): feed = AtomFeed(current_app.config['SITE_NAME'], feed_url=current_app.config['DOMAIN'] + 'rss.xml', url=current_app.config['DOMAIN'], subtitle=current_app.config['SUBTITLE'], author=current_app.config['AUTHOR'], updated=datetime.datetime.now()) entries = Entry.get_all_published() for _entry in entries: time = datetime.datetime.strptime(_entry['date'], '%Y-%m-%d %H:%M:%S') feed.add(unicode(_entry['title']), unicode(markdown(_entry['content'])), content_type='html', author=current_app.config['AUTHOR'], published=time, updated=time, id=current_app.config['DOMAIN'] + _entry['slug'] + '/', url=current_app.config['DOMAIN'] + 'posts/' + _entry['slug'] + '/') with codecs.open(BASE_DIR + '/rss.xml', 'w', 'utf-8-sig') as f: f.write(feed.to_string())
def feed(name): if current_app.config.get('PRIVATE_WIKI') and current_user.is_anonymous: return current_app.login_manager.unauthorized() cname = to_canonical(name) wiki_name = current_app.config['SITE_TITLE'] start = 0 length = int(request.args.get('length', 20)) the_feed = AtomFeed( title="{} - Recent changes for page '{}'".format(wiki_name, cname), url=url_for('wiki.page', name=cname, _external=True), id="{}_pagefeed_{}".format(to_canonical(wiki_name), cname), feed_url=url_for('wiki.feed', name=cname, _external=True), generator=("Realms wiki", 'https://github.com/scragg0x/realms-wiki', __version__) ) page = g.current_wiki.get_page(cname) items = list(itertools.islice(page.history, start, start + length)) # type: list[dict] for item in items: the_feed.add( title="Commit '{}'".format(item['sha']), content=item['message'], url=url_for('wiki.commit', name=name, sha=item['sha'], _external=True), id="{}/{}".format(item['sha'], cname), author=item['author'], updated=datetime.fromtimestamp(item['time']) ) response = make_response((the_feed.to_string(), {'Content-type': 'application/atom+xml; charset=utf-8'})) response.add_etag() return response.make_conditional(request)
def feed(self, feed_title, title, content, url, published=None, summary=None, enclosure=None, media_thumbnail=None): feed = AtomFeed(feed_title, feed_url=WP_FEED_URL) tz = pytz.timezone(faker.timezone()) published = published or faker.date_time(tzinfo=tz) kwargs = { 'content_type': 'html', 'author': faker.name(), 'url': url, 'updated': faker.date_time_between(start_date=published, tzinfo=tz), 'published': published } if summary: kwargs['summary'] = summary if enclosure: kwargs['links'] = [{ 'type': enclosure['type'], 'href': enclosure['url'], 'rel': 'enclosure', 'length': faker.pyint(), }] feed.add(title, content, **kwargs) out = feed.to_string() if media_thumbnail: el = '<media:thumbnail url="{0}" />'.format(media_thumbnail) out = out.replace('<feed', '<feed xmlns:media="http://search.yahoo.com/mrss/"') out = out.replace('</entry>', '{0}</entry>'.format(el)) return out
def build_artifact(self, artifact): ctx = get_ctx() feed_source = self.source page = feed_source.parent feed = AtomFeed( title=page.record_label + u' — Pallets Project', feed_url=url_to(feed_source, external=True), url=url_to('/blog', external=True), id=get_id(ctx.env.project.id) ) for item in page.children.order_by( '-pub_date', '-pub_order', 'title' ).limit(10): item_author = item['author'] feed.add( item['title'], text_type(item['body']), xml_base=url_to(item, external=True), url=url_to(item, external=True), content_type='html', id=get_id(u'%s/%s' % ( ctx.env.project.id, item['_path'].encode('utf-8'))), author=item_author, updated=datetime(*item['pub_date'].timetuple()[:3])) with artifact.open('wb') as f: f.write(feed.to_string().encode('utf-8'))
def build_artifact(self, artifact): ctx = get_ctx() feed_source = self.source blog = feed_source.parent summary = get(blog, feed_source.blog_summary_field) or '' subtitle_type = ('html' if hasattr(summary, '__html__') else 'text') blog_author = unicode(get(blog, feed_source.blog_author_field) or '') generator = ('Lektor Atom Plugin', 'https://github.com/ajdavis/lektor-atom', pkg_resources.get_distribution('lektor-atom').version) project_id = ctx.env.load_config().base_url if not project_id: project_id = ctx.env.project.id feed = AtomFeed( title=feed_source.feed_name, subtitle=unicode(summary), subtitle_type=subtitle_type, author=blog_author, feed_url=url_to(feed_source, external=True), url=url_to(blog, external=True), id=get_id(project_id), generator=generator) if feed_source.items: # "feed_source.items" is a string like "site.query('/blog')". expr = Expression(ctx.env, feed_source.items) items = expr.evaluate(ctx.pad) else: items = blog.children if feed_source.item_model: items = items.filter(F._model == feed_source.item_model) order_by = '-' + feed_source.item_date_field items = items.order_by(order_by).limit(int(feed_source.limit)) for item in items: item_author_field = feed_source.item_author_field item_author = get(item, item_author_field) or blog_author feed.add( get_item_title(item, feed_source.item_title_field), get_item_body(item, feed_source.item_body_field), xml_base=url_to(item, external=True), url=url_to(item, external=True), content_type='html', id=get_id(u'%s/%s' % ( project_id, item['_path'].encode('utf-8'))), author=item_author, updated=get_item_updated(item, feed_source.item_date_field)) with artifact.open('wb') as f: f.write(feed.to_string().encode('utf-8'))
def atom_feed(): feed = AtomFeed(current_app.config.get('SITE_NAME', "My Site"), feed_url=request.url, url=request.host_url, subtitle=current_app.config.get('SITE_SUBTITLE', None)) for post in posts: entry = _generate_entry(post) feed.add(entry) resp = make_response(feed.to_string()) resp.mimetype = "application/atom+xml" return resp
def latest_posts(): feed = AtomFeed('Libcoffee.net', feed_url=request.url, url=request.url_root) for p in Post.objects_published().order('-created_at')[:POSTS_PER_PAGE]: feed.add(p.title, unicode(p.__html__()), content_type='html', author=p.author.nickname(), url=p.absolute_url(external=True), updated=p.updated_at, published=p.created_at) return Response(feed.to_string(), mimetype='application/atom+xml')
def blog_feed(): feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) posts = Blog.query.order_by(Blog.timestamp.desc()).all() for post in posts: feed.add(post.title, unicode(post.data), content_type='html', url=post.get_url(), updated=post.timestamp) return Response(feed.to_string(), mimetype='text/xml')
def build_artifact(self, artifact): ctx = get_ctx() feed_source = self.source blog = feed_source.parent summary = get(blog, feed_source.blog_summary_field) or '' subtitle_type = ('html' if hasattr(summary, '__html__') else 'text') blog_author = unicode(get(blog, feed_source.blog_author_field) or '') generator = ('Lektor Atom Plugin', 'https://github.com/ajdavis/lektor-atom', pkg_resources.get_distribution('lektor-atom').version) project_id = ctx.env.load_config().base_url if not project_id: project_id = ctx.env.project.id feed = AtomFeed(title=feed_source.feed_name, subtitle=unicode(summary), subtitle_type=subtitle_type, author=blog_author, feed_url=url_to(feed_source, external=True), url=url_to(blog, external=True), id=get_id(project_id), generator=generator) if feed_source.items: # "feed_source.items" is a string like "site.query('/blog')". expr = Expression(ctx.env, feed_source.items) items = expr.evaluate(ctx.pad) else: items = blog.children if feed_source.item_model: items = items.filter(F._model == feed_source.item_model) order_by = '-' + feed_source.item_date_field items = items.order_by(order_by).limit(int(feed_source.limit)) for item in items: item_author_field = feed_source.item_author_field item_author = get(item, item_author_field) or blog_author feed.add(get_item_title(item, feed_source.item_title_field), get_item_body(item, feed_source.item_body_field), xml_base=url_to(item, external=True), url=url_to(item, external=True), content_type='html', id=get_id(u'%s/%s' % (project_id, item['_path'].encode('utf-8'))), author=item_author, updated=get_item_updated(item, feed_source.item_date_field)) with artifact.open('wb') as f: f.write(feed.to_string().encode('utf-8'))
def blog_feed(): feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) posts = Blog.query.order_by(Blog.timestamp.desc()).all() for post in posts: feed.add(post.title, unicode(post.data), content_type='html', url=post.get_url(), updated=post.timestamp) return Response(feed.to_string(), mimetype='text/xml')
def render_feed(ctx, posts): base_url = ctx.get('rootpath') + 'feed.xml' feed = AtomFeed("My Blog", feed_url=base_url, url=ctx.get('rootpath'), subtitle="My example blog for a feed test.") for post in posts[0:10]: feed.add(post.get('title'), post.html, content_type='html', author=post.get('author', 'None'), url=base_url + post.url_path, id=base_url + post.url_path, updated=post.get('date'), published=post.get('date')) return feed.to_string()
def download_atom(): feed = AtomFeed(title='Mes bookmarks', feed_url='xxx', url='http://www.markme.com') for bookmark in mongo.db.bookmarks.find({'user._id': ObjectId(current_user.get_id())}): feed.add(id=bookmark['_id'], title=bookmark['title'], content=bookmark['description'], content_type='text', updated=bookmark['published'], links=[dict(href=bookmark['url']), dict(via=bookmark['referrer'])], categories=[dict(term=tag, label=tag) for tag in bookmark['tags']], author=dict(name=current_user.nickname, nickname=current_user.nickname, email=current_user.email)) return send_file(StringIO(feed.to_string().encode('utf-8')), attachment_filename='bookmarks.xml', as_attachment=True, mimetype='application/atom+xml')
def show_collection(): connect(host='mongodb://*****:*****@ds255539.mlab.com:55539/9321test') feed = AtomFeed(title='All Available Collections',feed_url=request.url) for a in Area.objects: data = json.loads(a.to_json()) data2 = xmlify(data, wrap="all", indent=" ") url = 'http://127.0.0.1:5000'+ url_for('show_entry', name=a.name) entry = FeedEntry(title=a.name, url=url, updated=datetime.datetime.utcnow(), author={'name': 'admin'}, \ content_type="application/xml",content= data2) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 200
def atom_feed(): feeds = current_app.config['FEEDS'] feed = AtomFeed('The City of Myles', feed_url=request.url, url=request.url_root) entries = get_feed_entries(feeds)[:20] for entry in entries: feed.add(entry.title, entry.description, context_type='html', author=entry.publisher, url=entry.link, published=entry.published, updated=entry.updated) return Response(feed.to_string(), mimetype="application/xml")
async def _feed(request): feed = AtomFeed(title=SITE_TITLE, updated=datetime.now(), feed_url=request.url, url=request.host) posts = (await Post.get_all())[:10] for post in posts: body = post.html_content summary = post.excerpt feed.add( post.title, body, content_type='html', summary=summary, summary_type='html', author=AUTHOR, url=post.url, id=post.id, updated=post.created_at, published=post.created_at ) return feed.to_string()
def write_tag_feed(builder, tag): blog_author = builder.config.root_get('author') url = builder.config.root_get('canonical_url') or 'http://localhost/' feed = AtomFeed(u'Recent Blog Posts', subtitle=u'Recent blog posts', feed_url=urljoin(url, builder.link_to('blog_feed')), url=url) for entry in get_tagged_entries(builder, tag)[:10]: feed.add(entry.title, unicode(entry.render_contents()), content_type='html', author=blog_author, url=urljoin(url, entry.slug), updated=entry.pub_date) with builder.open_link_file('tagfeed', tag=tag.name) as f: f.write(feed.to_string().encode('utf-8') + '\n')
def show_entry(name): connect(host='mongodb://*****:*****@ds255539.mlab.com:55539/9321test') feed = AtomFeed(title='Single Collection', feed_url=request.url) for a in Area.objects: if a.name.lower().replace(' ', '') == name.lower().replace(' ', ''): #print(xmlify(a.offenses,wrap="all", indent=" ")) data = json.loads(a.to_json()) data2 = xmlify(data, wrap="all", indent=" ") entry = FeedEntry(title = a.name, url=request.url, updated=datetime.datetime.utcnow(),author = {'name':'admin'}, \ content_type="application/xml",content = data2) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 200 #ATOM return jsonify(LGA_name=False), 404
def test_feed(self): category_key = self.categories.add("category") existing_tags = ["a new tag", "a new new tag"] existing_tag_keys = self.tags.add(existing_tags) self.posts.add("about", "body text", category_key, existing_tag_keys) response = self.client.get(path='/recent.atom') feed = AtomFeed('Recent Articles', feed_url='http://localhost/recent.atom', url=request.url_root) feed = self.posts.add_to_feed(feed, request.url) return self.assertEqual(feed.to_string(), response.data.decode('utf8'))
def write_feed(builder): blog_author = builder.config.root_get('author') url = builder.config.root_get('canonical_url') or 'http://localhost/' name = builder.config.get('feed.name') or u'Recent Blog Posts' subtitle = builder.config.get('feed.subtitle') or u'Recent blog posts' feed = AtomFeed(name, subtitle=subtitle, feed_url=urljoin(url, builder.link_to('blog_feed')), url=url) for entry in get_all_entries(builder)[:10]: feed.add(entry.title, unicode(entry.render_contents()), content_type='html', author=blog_author, url=urljoin(url, entry.slug), updated=entry.pub_date) with builder.open_link_file('blog_feed') as f: f.write(feed.to_string().encode('utf-8') + '\n')
def write_feed(builder): blog_author = builder.config.root_get('author') url = builder.config.root_get('canonical_url') or 'http://localhost/' name = builder.config.get('feed.name') or u'Recent Blog Posts' subtitle = builder.config.get('feed.subtitle') or u'Recent blog posts' feed = AtomFeed(name, subtitle=subtitle, feed_url=urljoin(url, builder.link_to('blog_feed')), url=url) for entry in get_all_entries(builder)[:10]: feed.add(entry.title, six.text_type(entry.render_contents()), content_type='html', author=blog_author, url=urljoin(url, entry.slug), updated=entry.pub_date) with builder.open_link_file('blog_feed') as f: f.write(feed.to_string() + '\n')
def write_feed(builder): blog_author = builder.config.root_get("author") url = builder.config.root_get("canonical_url") or "http://localhost/" name = builder.config.get("feed.name") or u"Recent Blog Posts" subtitle = builder.config.get("feed.subtitle") or u"Recent blog posts" feed = AtomFeed(name, subtitle=subtitle, feed_url=urljoin(url, builder.link_to("blog_feed")), url=url) for entry in get_all_entries(builder)[:10]: feed.add( entry.title, unicode(entry.render_contents()), content_type="html", author=blog_author, url=urljoin(url, entry.slug), updated=entry.pub_date, ) with builder.open_link_file("blog_feed") as f: f.write(feed.to_string().encode("utf-8") + "\n")
def hello(): feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) s = select([db.articles]).order_by("updated") result = db.conn.execute(s) for row in result: feed.add(title=row.title, content="", content_type="html", author=row.title, url=row.url, updated=row.updated) return feed.to_string()
def atom_feed(): feed = AtomFeed(current_app.config.get('SITE_NAME', "My Site"), feed_url=request.url, url=request.host_url, subtitle=current_app.config.get('SITE_SUBTITLE', None)) for post in posts: entry = FeedEntry(post.title, url=post.external_url, updated=post.pub_date, content=post.body, summary=post.config.get('summary', None), author={ 'name': current_app.config.get('AUTHOR_NAME'), 'email': current_app.config.get('AUTHOR_EMAIL') }) feed.add(entry) resp = make_response(feed.to_string()) resp.mimetype = "application/atom+xml" return resp
def test_render_home_with_blog(self, rmock, client): '''It should render the home page with the latest blog article''' post_url = faker.uri() feed = AtomFeed('Some blog', feed_url=WP_ATOM_URL) feed.add('Some post', '<div>Some content</div>', content_type='html', author=faker.name(), url=post_url, updated=faker.date_time(), published=faker.date_time()) rmock.get(WP_ATOM_URL, text=feed.to_string(), headers={'Content-Type': 'application/atom+xml'}) response = client.get(url_for('site.home')) assert200(response) assert 'Some post' in response.data.decode('utf8') assert post_url in response.data.decode('utf8')
def hello(): feed = AtomFeed('Recent Articles', feed_url=request.url, url=request.url_root) s = select([db.articles]).order_by("updated") result = db.conn.execute(s) for row in result: feed.add(title=row.title, content="", content_type="html", author=row.title, url=row.url, updated=row.updated) return feed.to_string()
def _execute(self, fossils): results = fossils['results'] if not isinstance(results, list): results = [results] feed = AtomFeed( title='Indico Feed', feed_url=fossils['url'] ) for fossil in results: feed.add( title=to_unicode(fossil['title']) or None, summary=to_unicode(fossil['description']) or None, url=fossil['url'], updated=_deserialize_date(fossil['startDate']) # ugh, but that's better than creationDate ) return feed.to_string()
def atom(self, feed_title, feed_url, feed_subtitle, site_url, author, *args, **kwargs): items = self.pages(*args, **kwargs) atom = AtomFeed(title=feed_title, subtitle=feed_subtitle, feed_url=feed_url, url=site_url) for item in items: item.render() atom.add(title=item.title, content=item.content, content_type='html', author=author, url=site_url+item.url, published=dateparse(item.date), updated=dateparse(item.date), xml_base=None) return atom.to_string()
def render_feed(ctx, posts): base_url = ctx.get('rootpath') + 'feed.xml' feed = AtomFeed("My Blog", feed_url=base_url, url=ctx.get('rootpath'), subtitle="My example blog for a feed test.") for post in posts[0:10]: feed.add(post.get('title'), post.html, content_type='html', author=post.get('author', 'None'), url=base_url + post.url_path, id=base_url + post.url_path, updated=post.get('date'), published=post.get('date')) return feed.to_string()
def test_render_home_with_blog(self): '''It should render the home page with the latest blog article''' post_url = faker.uri() feed = AtomFeed('Some blog', feed_url=WP_ATOM_URL) feed.add('Some post', '<div>Some content</div>', content_type='html', author=faker.name(), url=post_url, updated=faker.date_time(), published=faker.date_time()) httpretty.register_uri(httpretty.GET, WP_ATOM_URL, body=feed.to_string(), content_type='application/atom+xml') response = self.get(url_for('site.home')) self.assert200(response) self.assertIn('Some post', response.data.decode('utf8')) self.assertIn(post_url, response.data.decode('utf8'))
def feed(): _feed = AtomFeed(title=app.config["SiteTitle"], subtitle=app.config["SiteSubTitle"], icon=url_for("favicon", _external=True), url=request.url_root) posts = get_latest_posts(12).get("posts", []) for _post in posts: _feed.add(title=_post.title, content=_post.safe_html, content_type='html', author=_post.author.nickname, url=url_for("post", postid=_post.id, _external=True), updated=_post.updated_date, published=_post.post_date) return Response(_feed.to_string(), mimetype='application/xml')
def feed(): _feed = AtomFeed(title=app.config["SiteTitle"], subtitle=app.config["SiteSubTitle"], icon=url_for("favicon", _external=True), url=request.url_root) posts = get_latest_posts(12).get("posts", []) for _post in posts: _feed.add(title=_post.title, content=_post.safe_html, content_type='html', author=_post.author.nickname, url=url_for("post", postid=_post.id, _external=True), updated=_post.updated_date, published=_post.post_date) return Response(_feed.to_string(), mimetype='application/xml')
def test_render_home_with_blog(self): '''It should render the home page with the latest blog article''' post_url = faker.uri() feed = AtomFeed('Some blog', feed_url=WP_ATOM_URL) feed.add('Some post', '<div>Some content</div>', content_type='html', author=faker.name(), url=post_url, updated=faker.date_time(), published=faker.date_time()) httpretty.register_uri(httpretty.GET, WP_ATOM_URL, body=feed.to_string(), content_type='application/atom+xml') response = self.get(url_for('site.home')) self.assert200(response) self.assertIn('Some post', response.data.decode('utf8')) self.assertIn(post_url, response.data.decode('utf8'))
def write_tag_feed(builder, tag): blog_author = builder.config.root_get('author') url = builder.config.root_get('canonical_url') or 'http://localhost/' name = builder.config.get('feed.name') or u'Recent Blog Posts' subtitle = builder.config.get('feed.subtitle') or u'Recent blog posts' feed = AtomFeed(name, subtitle=subtitle, feed_url=urljoin(url, builder.link_to('blog_feed')), url=url) entries = get_tagged_entries(builder, tag) entries.sort(key=lambda x: (x.pub_date or ''), reverse=True) for entry in entries[:10]: feed.add(entry.title, unicode(entry.render_contents()), content_type='html', author=blog_author, url=urljoin(url, entry.slug), updated=entry.pub_date) with builder.open_link_file('tagfeed', tag=tag.name) as f: f.write(feed.to_string().encode('utf-8') + '\n')
def atom(self, feed_title, feed_url, feed_subtitle, site_url, author, *args, **kwargs): items = self.pages(*args, **kwargs) atom = AtomFeed(title=feed_title, subtitle=feed_subtitle, feed_url=feed_url, url=site_url) for item in items: item.render() atom.add(title=item.title, content=item.content, content_type='html', author=author, url=site_url + item.url, published=dateparse(item.date), updated=dateparse(item.date), xml_base=None) return atom.to_string()
def feed(): _feed = AtomFeed(title = BLOG_TITLE, icon = r"http://www.rdoge.cc/static/favicon.ico", url=request.url_root ) posts = Post.objects[:12] for _post in posts: _feed.add( title = _post.title, content = _post.content, content_tyep = "html", author = _post.author.nickname, url = url_for("main.posts", title=_post.title, _external=True), published = _post.created_at, updated = _post.created_at ) return Response(_feed.to_string(), mimetype="application/xml")
class FeedBuilder(BlogBuilder): """Transform blog metadata and posts into an Atom feed.""" def __init__(self, metadata): self.metadata = metadata self._feed = AtomFeed(**metadata) def add(self, posts): """Add blog posts to the feed.""" for post in posts: self._feed.add( FeedEntry( summary=post.summary, title=post.title, title_type='html', url=post.url, updated=post.date, )) def _generate_output(self): return self._feed.to_string()
class FeedBuilder(BlogBuilder): """Transform blog metadata and posts into an Atom feed.""" def __init__(self, metadata): self.metadata = metadata self._feed = AtomFeed(**metadata) def add(self, posts): """Add blog posts to the feed.""" for post in posts: self._feed.add(FeedEntry( summary=post.summary, title=post.title, title_type='html', url=post.url, updated=post.date, )) def _generate_output(self): return self._feed.to_string()
def _feed(): feed = AtomFeed(title=current_app.config.get('SITE_TITLE'), updated=datetime.now(), feed_url=request.url, url=request.host) posts = Post.get_all() for post in posts: body = post.html_content summary = post.excerpt feed.add(post.title, body, content_type='html', summary=summary, summary_type='html', author=current_app.config.get('AUTHOR'), url=post.url, id=post.id, updated=post.created_at, published=post.created_at) return feed.to_string()
def feed(): flamelinks = _flamelinks()[:10] #Icky, really should do this in the db... # TODO Pull this metadata elsewhere atomfeed = AtomFeed( title="Flame Links", subtitle="Subtitle", feed_url=flask.url_for('feed', _external=True), url=flask.url_for('home', _external=True), author='Kristian Glass', ) for flamelink in flamelinks: atomfeed.add( title=flamelink.title, content=flamelink.title, content_type='text', url=flask.url_for('out', flamelink_id=flamelink.id, _external=True), updated=flamelink.pinboard_time, ) return flask.Response(atomfeed.to_string(), mimetype='application/atom+xml')
def feed(): name = app.config.get('SITE_NAME') subtitle = app.config.get('SITE_DESCRIPTION') or 'Recent Blog Posts' url = app.config.get('URL') feed = AtomFeed(title=name, subtitle=subtitle, feed_url=url_for('all_posts'), url=url) for post in posts: feed.add(post.meta.get('title'), unicode(post.html), content_type='html', author=post.meta.get('author', app.config.get('DEFAULT_AUTHOR')), url=url_for('post', year=post.meta.get('published').year, month=post.meta.get('published').month, day=post.meta.get('published').day, path=post.path), updated=datetime.combine( post.meta.get('updated') or post.meta.get('published'), time())) return make_response(feed.to_string().encode('utf-8') + '\n')
async def _feed(request): feed = AtomFeed(title=SITE_TITLE, updated=datetime.now(), feed_url=request.url, url=request.host) posts = await Post.sync_filter(status=Post.STATUS_ONLINE, orderings=['-id'], limit=10) for post in posts: body = post.html_content_for_rss summary = post.excerpt feed.add( # type: ignore post.title, body, content_type='html', summary=summary, summary_type='html', author=OWNER, url=post.canonical_url, id=post.id, updated=post.created_at, published=post.created_at) return feed.to_string() # type: ignore
def build_feed(): feed = AtomFeed(SITE_NAME, feed_url=DOMAIN + 'rss.xml', url=DOMAIN, subtitle=SUBTITLE, author=AUTHOR, updated=datetime.datetime.now()) entries = Entry.get_all_published() for _entry in entries: time = datetime.datetime.strptime(_entry['date'], '%Y-%m-%d %H:%M:%S') feed.add(unicode(_entry['title']), unicode(markdown(_entry['content'])), content_type='html', author=AUTHOR, published=time, updated=time, id=DOMAIN + _entry['slug'] + '/', url=DOMAIN + 'posts/' + _entry['slug'] + '/') with codecs.open('./ghpages/rss.xml', 'w', 'utf-8-sig') as f: f.write(feed.to_string())
def download_atom(): feed = AtomFeed(title='Mes bookmarks', feed_url='xxx', url='http://www.markme.com') for bookmark in mongo.db.bookmarks.find( {'user._id': ObjectId(current_user.get_id())}): feed.add( id=bookmark['_id'], title=bookmark['title'], content=bookmark['description'], content_type='text', updated=bookmark['published'], links=[dict(href=bookmark['url']), dict(via=bookmark['referrer'])], categories=[dict(term=tag, label=tag) for tag in bookmark['tags']], author=dict(name=current_user.nickname, nickname=current_user.nickname, email=current_user.email)) return send_file(StringIO(feed.to_string().encode('utf-8')), attachment_filename='bookmarks.xml', as_attachment=True, mimetype='application/atom+xml')
def generate_atom_feeds(app): """Generate archive pages for all posts, categories, tags, authors, and drafts.""" if not ablog.builder_support(app): return blog = Blog(app) url = blog.blog_baseurl if not url: raise StopIteration try: from werkzeug.contrib.atom import AtomFeed except ImportError: app.warn("werkzeug is not found, continue without atom feeds support.") return feed_path = os.path.join(app.builder.outdir, blog.blog_path, 'atom.xml') feeds = [(blog.posts, blog.blog_path, feed_path, blog.blog_title, os_path_join(url, blog.blog_path, 'atom.xml'))] if blog.blog_feed_archives: for header, catalog in [ (_('Posts by'), blog.author), (_('Posts from'), blog.location), (_('Posts in'), blog.language), (_('Posts in'), blog.category), (_('Posted in'), blog.archive), (_('Posts tagged'), blog.tags), ]: for coll in catalog: # skip collections containing only drafts if not len(coll): continue folder = os.path.join(app.builder.outdir, coll.path) if not os.path.isdir(folder): os.makedirs(folder) feeds.append( (coll, coll.path, os.path.join(folder, 'atom.xml'), blog.blog_title + u' - ' + header + u' ' + text_type(coll), os_path_join(url, coll.path, 'atom.xml'))) # Config options feed_length = blog.blog_feed_length feed_fulltext = blog.blog_feed_fulltext for feed_posts, pagename, feed_path, feed_title, feed_url in feeds: feed = AtomFeed(feed_title, title_type='text', url=url, feed_url=feed_url, subtitle=blog.blog_feed_subtitle, generator=('ABlog', 'http://ablog.readthedocs.org', ablog.__version__)) for i, post in enumerate(feed_posts): if feed_length and i == feed_length: break post_url = os_path_join(url, app.builder.get_target_uri(post.docname)) if post.section: post_url += '#' + post.section if blog.blog_feed_titles: content = None else: content = post.to_html(pagename, fulltext=feed_fulltext) feed.add(post.title, content=content, title_type='text', content_type='html', author=', '.join(a.name for a in post.author), url=post_url, id=post_url, updated=post.update, published=post.date) with open(feed_path, 'w') as out: feed_str = feed.to_string() try: out.write(feed_str.encode('utf-8')) except TypeError: out.write(feed_str) if 0: # this is to make the function a generator # and make work for Sphinx 'html-collect-pages' yield
def generate_archive_pages(app): """Generate archive pages for all posts, categories, tags, authors, and drafts.""" blog = Blog(app) for post in blog.posts: for redirect in post.redirect: yield (redirect, {'redirect': post.docname, 'post': post}, 'redirect.html') atom_feed = bool(blog.blog_baseurl) for title, header, catalog in [ (_('Authors'), _('Posts by'), blog.author), (_('Locations'), _('Posts from'), blog.location), (_('Languages'), _('Posts in'), blog.language), (_('Categories'), _('Posts in'), blog.category), (_('All posts'), _('Posted in'), blog.archive), (_('Tags'), _('Posts tagged'), blog.tags),]: if not len(catalog): continue context = { 'parents': [], 'title': title, 'header': header, 'catalog': catalog, 'summary': True, 'atom_feed': atom_feed, 'feed_path': blog.blog_path, 'archive_feed': False, } yield (catalog.docname, context, 'archive.html') for collection in catalog: if not len(collection): continue context = { 'parents': [], 'title': u'{} {}'.format(header, collection), 'header': header, 'catalog': [collection], 'summary': True, 'atom_feed': atom_feed, 'feed_path': collection.path if blog.blog_feed_archives else blog.blog_path, 'archive_feed': atom_feed and blog.blog_feed_archives } yield (collection.docname, context, 'archive.html') context = { 'parents': [], 'title': _('Drafts'), 'catalog': [blog.drafts], 'summary': True, } yield (blog.drafts.docname, context, 'archive.html') url = blog.blog_baseurl if not url: return from werkzeug.contrib.atom import AtomFeed feed_path = os.path.join(app.builder.outdir, blog.blog_path, 'atom.xml') feeds = [(blog.posts, feed_path, blog.blog_title, os.path.join(url, blog.blog_path, 'atom.xml'))] if blog.blog_feed_archives: for header, catalog in [ (_('Posts by'), blog.author), (_('Posts from'), blog.location), (_('Posts in'), blog.language), (_('Posts in'), blog.category), (_('Posted in'), blog.archive), (_('Posts tagged'), blog.tags),]: for coll in catalog: # skip collections containing only drafts if not len(coll): continue folder = os.path.join(app.builder.outdir, coll.path) if not os.path.isdir(folder): os.makedirs(folder) feeds.append((coll, os.path.join(folder, 'atom.xml'), blog.blog_title + ' - ' + header + ' ' + str(coll), os.path.join(url, coll.path, 'atom.xml'))) for feed_posts, feed_path, feed_title, feed_url in feeds: feed = AtomFeed(feed_title, title_type='text', url=url, feed_url=feed_url, subtitle=blog.blog_feed_subtitle, generator=('ABlog', 'http://blog.readthedocs.org', ablog.__version__)) for post in feed_posts: post_url = os.path.join( url, app.builder.get_target_uri(post.docname)) if post.section: post_url += '#' + post.section feed.add(post.title, content=post.to_html(blog.blog_path, fulltext=blog.blog_feed_fulltext), title_type='text', content_type='html', author=', '.join(a.name for a in post.author), url=post_url, id=post_url, updated=post.update, published=post.date) with open(feed_path, 'w') as out: feed_str = feed.to_string() try: out.write(feed_str.encode('utf-8')) except TypeError: out.write(feed_str)
def filter_entry(): connect(host='mongodb://*****:*****@ds255539.mlab.com:55539/9321test') raw_str = str(request.url) raw_str2 = raw_str.split('+') if raw_str2[4] == 'lgaName': #query-type-one name1 = raw_str2[2] name2 = raw_str2[6] feed = AtomFeed(title='Query 1 Search Results', feed_url=request.url) url1 = 'http://127.0.0.1:5000' + url_for('show_entry', name=name1) url2 = 'http://127.0.0.1:5000' + url_for('show_entry', name=name2) for a in Area.objects: #Search for name 1 if a.name.lower().replace(' ', '') == name1.lower().replace(' ', ''): data = json.loads(a.to_json()) data2 = xmlify(data, wrap="all", indent=" ") entry = FeedEntry(title=a.name, url=url1, updated=datetime.datetime.utcnow(), author={'name': 'admin'}, \ content_type="application/xml", content=data2) feed.add(entry) for a in Area.objects: #Search for name 2 if a.name.lower().replace(' ', '') == name2.lower().replace(' ', ''): data = json.loads(a.to_json()) data2 = xmlify(data, wrap="all", indent=" ") entry = FeedEntry(title=a.name, url=url2, updated=datetime.datetime.utcnow(), author={'name': 'admin'}, \ content_type="application/xml", content=data2) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 200 elif raw_str2[4] == 'year': #query-type-two name1 = raw_str2[2] year1 = raw_str2[6] feed = AtomFeed(title='Query 2 Search Results', feed_url=request.url) url1 = 'http://127.0.0.1:5000' + url_for('show_entry', name=name1) for a in Area.objects: #Search for name if a.name.lower().replace(' ', '') == name1.lower().replace(' ', ''): if year1 == '2014': fakedb = defaultdict(list) for stas in a.offenses: fakedb[stas.id].append({'offence_group':stas.offence_group, 'offence_type':stas.offence_type, \ 'incidents_2014':stas.incidents_2014, 'rate_2014':stas.rate_2014}) j_fakedb = json.dumps(fakedb,indent=4) data2 = xmlify(j_fakedb, wrap="all", indent=" ") entry = FeedEntry(title=a.name, url=url1, updated=datetime.datetime.utcnow(), author={'name': 'admin'}, \ content_type="application/xml", content=data2) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 200 elif year1 == '2015': fakedb = defaultdict(list) for stas in a.offenses: fakedb[stas.id].append({'offence_group': stas.offence_group, 'offence_type': stas.offence_type, \ 'incidents_2015': stas.incidents_2015, 'rate_2015': stas.rate_2015}) j_fakedb = json.dumps(fakedb, indent=4) data2 = xmlify(j_fakedb, wrap="all", indent=" ") entry = FeedEntry(title=a.name, url=url1, updated=datetime.datetime.utcnow(), author={'name': 'admin'}, \ content_type="application/xml", content=data2) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 200 elif year1 == '2016': fakedb = defaultdict(list) for stas in a.offenses: fakedb[stas.id].append({'offence_group': stas.offence_group, 'offence_type': stas.offence_type, \ 'incidents_2016': stas.incidents_2016, 'rate_2016': stas.rate_2016}) j_fakedb = json.dumps(fakedb, indent=4) data2 = xmlify(j_fakedb, wrap="all", indent=" ") entry = FeedEntry(title=a.name, url=url1, updated=datetime.datetime.utcnow(), author={'name': 'admin'}, \ content_type="application/xml", content=data2) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 200 elif year1 == '2012': fakedb = defaultdict(list) for stas in a.offenses: fakedb[stas.id].append({'offence_group': stas.offence_group, 'offence_type': stas.offence_type, \ 'incidents_2012': stas.incidents_2012, 'rate_2012': stas.rate_2012}) j_fakedb = json.dumps(fakedb, indent=4) data2 = xmlify(j_fakedb, wrap="all", indent=" ") entry = FeedEntry(title=a.name, url=url1, updated=datetime.datetime.utcnow(), author={'name': 'admin'}, \ content_type="application/xml", content=data2) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 200 elif year1 == '2013': fakedb = defaultdict(list) for stas in a.offenses: fakedb[stas.id].append({'offence_group': stas.offence_group, 'offence_type': stas.offence_type, \ 'incidents_2013': stas.incidents_2013, 'rate_2013': stas.rate_2013}) j_fakedb = json.dumps(fakedb, indent=4) data2 = xmlify(j_fakedb, wrap="all", indent=" ") entry = FeedEntry(title=a.name, url=url1, updated=datetime.datetime.utcnow(), author={'name': 'admin'}, \ content_type="application/xml", content=data2) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 200 else: return jsonify(Input_Year=False), 400 return jsonify(Input=False),404
def generate_atom_feeds(app): """Generate archive pages for all posts, categories, tags, authors, and drafts.""" blog = Blog(app) url = blog.blog_baseurl if not url: raise StopIteration from werkzeug.contrib.atom import AtomFeed feed_path = os.path.join(app.builder.outdir, blog.blog_path, 'atom.xml') feeds = [(blog.posts, feed_path, blog.blog_title, os_path_join(url, blog.blog_path, 'atom.xml'))] if blog.blog_feed_archives: for header, catalog in [ (_('Posts by'), blog.author), (_('Posts from'), blog.location), (_('Posts in'), blog.language), (_('Posts in'), blog.category), (_('Posted in'), blog.archive), (_('Posts tagged'), blog.tags),]: for coll in catalog: # skip collections containing only drafts if not len(coll): continue folder = os.path.join(app.builder.outdir, coll.path) if not os.path.isdir(folder): os.makedirs(folder) feeds.append((coll, os.path.join(folder, 'atom.xml'), blog.blog_title + u' - ' + header + u' ' + text_type(coll), os_path_join(url, coll.path, 'atom.xml'))) # Config options feed_length = blog.blog_feed_length feed_fulltext = blog.blog_feed_fulltext for feed_posts, feed_path, feed_title, feed_url in feeds: feed = AtomFeed(feed_title, title_type='text', url=url, feed_url=feed_url, subtitle=blog.blog_feed_subtitle, generator=('ABlog', 'http://ablog.readthedocs.org', ablog.__version__)) for i, post in enumerate(feed_posts): if feed_length and i == feed_length: break post_url = os_path_join( url, app.builder.get_target_uri(post.docname)) if post.section: post_url += '#' + post.section if blog.blog_feed_titles: content = None else: content = post.to_html(blog.blog_path, fulltext=feed_fulltext) feed.add(post.title, content=content, title_type='text', content_type='html', author=', '.join(a.name for a in post.author), url=post_url, id=post_url, updated=post.update, published=post.date) with open(feed_path, 'w') as out: feed_str = feed.to_string() try: out.write(feed_str.encode('utf-8')) except TypeError: out.write(feed_str) if 0: # this is to make the function a generator # and make work for Sphinx 'html-collect-pages' yield
def add_entry(): connect(host='mongodb://*****:*****@ds255539.mlab.com:55539/9321test') parser = reqparse.RequestParser() parser.add_argument('name', type=str, help='LGA name input error') parser.add_argument('postcode', type=int, help='Postcode input error') args = parser.parse_args() postcode = args.get("postcode")#check postcode to find the LGA name ############################# postcode ###################################### if postcode: name_list = [] for k, v in postdic.items(): for s in v: if s == postcode: name_list.append(k) #regions founded if name_list == []: #check whether the input postcode in the postdic return jsonify(Input_postcode=False),404 feed = AtomFeed(title='Multi-collections POST complete', feed_url=request.url) for a in Area.objects: for n in name_list: if a.name.lower().replace(' ', '') == n.lower().replace(' ', ''): name_list.remove(n) for n in name_list: name = n url = 'http://127.0.0.1:5000' + url_for('show_entry', name=name) # download from the internet -- check 400 - if 2 not in 1 -- not up-to-date dld_url = 'http://www.bocsar.nsw.gov.au/Documents/RCS-Annual/'+name+'lga.xlsx' r = requests.get(dld_url) with open(name + 'lga.xlsx','wb') as f: f.write(r.content) excel_url = name + 'lga.xlsx' data = xlrd.open_workbook(excel_url).sheets()[0] id = 0 # initialisation id for embeded document p = [] # used to collect Offense() for row in range(7, 69): # for further update id = id + 1 offence_group = str(data.cell(row, 0).value) if offence_group != '': backup = offence_group if offence_group == '': offence_group = backup offence_type = str(data.cell(row, 1).value) incidents_2012 = str(data.cell(row, 2).value) rate_2012 = str(data.cell(row, 3).value) incidents_2013 = str(data.cell(row, 4).value) rate_2013 = str(data.cell(row, 5).value) incidents_2014 = str(data.cell(row, 6).value) rate_2014 = str(data.cell(row, 7).value) incidents_2015 = str(data.cell(row, 8).value) rate_2015 = str(data.cell(row, 9).value) incidents_2016 = str(data.cell(row, 10).value) rate_2016 = str(data.cell(row, 11).value) trend_24m = str(data.cell(row, 12).value) trend_60m = str(data.cell(row, 13).value) lga_rank = str(data.cell(row, 14).value) p.append(Offense(id, offence_group, offence_type, incidents_2012, rate_2012, incidents_2013, rate_2013, \ incidents_2014, rate_2014, incidents_2015, rate_2015, incidents_2016, \ rate_2016, trend_24m, trend_60m, lga_rank)) t = Area(name, p) t.save() entry = FeedEntry(title=name, url=url, updated=datetime.datetime.utcnow(), author={'name': 'admin'}) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" ############################# name ###################################### else: #if postcode not given, check the name field name = args.get("name").lower().replace(' ', '') if not name: return jsonify(Input=False), 404 url = 'http://127.0.0.1:5000' + url_for('show_entry', name=name) #if LGA or postcode that already has been imported before for a in Area.objects: if a.name.lower().replace(' ', '') == name.lower().replace(' ', ''): feed = AtomFeed(title='Already existed', feed_url=url) entry = FeedEntry(title=name, url=url, updated=datetime.datetime.utcnow(), author={'name': 'admin'}) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 200 #download from the internet -- check 400 - if 2 not in 1 -- not up-to-date dld_url = 'http://www.bocsar.nsw.gov.au/Documents/RCS-Annual/'+name+'lga.xlsx' r = requests.get(dld_url) with open(name + 'lga.xlsx','wb') as f: f.write(r.content) excel_url = name + 'lga.xlsx' data = xlrd.open_workbook(excel_url).sheets()[0] id = 0 #initialisation id for embeded document p = [] #used to collect Offense() for row in range(7, 69): #for further update id = id+1 offence_group = str(data.cell(row, 0).value) if offence_group != '': backup = offence_group if offence_group == '': offence_group = backup offence_type = str(data.cell(row, 1).value) incidents_2012 = str(data.cell(row, 2).value) rate_2012 = str(data.cell(row, 3).value) incidents_2013 = str(data.cell(row, 4).value) rate_2013 = str(data.cell(row, 5).value) incidents_2014 = str(data.cell(row, 6).value) rate_2014 = str(data.cell(row, 7).value) incidents_2015 = str(data.cell(row, 8).value) rate_2015 = str(data.cell(row, 9).value) incidents_2016 = str(data.cell(row, 10).value) rate_2016 = str(data.cell(row, 11).value) trend_24m = str(data.cell(row, 12).value) trend_60m = str(data.cell(row, 13).value) lga_rank = str(data.cell(row, 14).value) p.append(Offense(id, offence_group, offence_type, incidents_2012, rate_2012, incidents_2013, rate_2013, \ incidents_2014, rate_2014, incidents_2015, rate_2015, incidents_2016, \ rate_2016, trend_24m, trend_60m, lga_rank)) t = Area(name,p) t.save() feed = AtomFeed(title='Sucess POST Activity', feed_url=url) entry = FeedEntry(title=name, url=url, updated=datetime.datetime.utcnow(), author={'name': 'admin'}) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 201