def bookatom(book_id): book = Book.query.get(book_id) entry = FeedEntry(book.title, author=book.author.as_dict(), id=book.book_id, updated=book.last_updated, published=book.published, links=[ {'type': "image/jpeg", 'rel': "http://opds-spec.org/image", 'href': book.cover_large}, {'type': "image/jpeg", 'rel': "http://opds-spec.org/image/thumbnail", 'href': book.cover_large}, {'type': "application/epub+zip", 'rel': "http://opds-spec.org/acquisition", 'href': book.epub_url}, {'type': "application/atom+xml;type=entry;profile=opds-catalog", 'rel': "alternate", 'href': url_for('book_routes.bookpage', book_id=book.book_id, _external=True)}, ] ) response = make_response('<?xml version="1.0" encoding="UTF-8"?>\n'+ ''.join([ el for el in entry.generate() ])) response.headers['Content-Type'] = 'application/atom+xml' return response
def get_feed(feed_id): feed_id = secure_filename(feed_id) feed_config_filepath = os.path.join(APP_CONFIG_FEEDS, feed_id+".json") if not os.path.isfile(feed_config_filepath): print feed_config_filepath abort(404) feed = feedops.FusedFeed.load_from_spec_file(feed_config_filepath) feed.fetch() feed_uri = request.url_root if len(feed.sources) == 1: # if there is only 1 source in a fusedfeed # just give the feed's html alternate # TODO: instead, we should generate our own HTML representation feed_uri = feed.sources[0].html_uri output = AtomFeed(feed.name, feed_url=request.url, author="FeedFuser", links=[{"href":feed_uri, "rel":"alternate", "type":"text/html"}]) for entry in feed.entries: title = entry.title if not entry.title: title = entry.link feed_item = FeedEntry(id=entry.guid, title=title, updated=entry.update_date, author=entry.author, published=entry.pub_date, links=[{"href":entry.link, "rel":"alternate", "type":"text/html"}]) if entry.summary: feed_item.summary = unicode(entry.summary) feed_item.summary_type = "text" if entry.summary_type == "text/plain" else "html" if entry.content: feed_item.content = unicode(entry.content) feed_item.content_type = "text" if entry.content_type == "text/plain" else "html" output.add(feed_item) return output.get_response()
def rss_feed(district): # the feedback of creating one document entry = FeedEntry( id=addr + district, title=district + '_detail', updated=datetime.now(), author='Zhuowen Deng', ) return entry.to_string()
def get_collection(): connect('statistics') output, id = {}, [] for t in Offence.objects: id.append(t.id) entry = FeedEntry(id=id, title='all collections', updated=datetime.now(), author='Steve') return entry.to_string()
def content_feed(region): #the detail of the region L = [] for t in Offence.objects(id=region): for e in t.data: L.append([ e.offence, e.type, e.data1, e.data2, e.data3, e.data4, e.data5, e.data6, e.data7, e.data8, e.data9, e.data10, e.data11, e.data11, e.data13 ]) entry = FeedEntry(id=addr + region, title=region + 'detail', updated=datetime.now(), author='Zhuowen Deng', content=L) return entry.to_string()
def recent_feed(): feed = AtomFeed('Recent Items', feed_url=request.url, url=request.url_root, subtitle="Most recent items.") items = session.query(Item).order_by(asc(Item.dateAdded)) for item in items: categories = [] cat = {'term': item.category.name, 'label': 'none'} categories.append(cat) author = {'name': item.user.name, 'email': item.user.email} item_id = request.url_root + 'catalog/items/' + str(item.id) entry = FeedEntry(item.name, item.description, content_type='html', author=author, categories=categories, added=item.dateAdded, id=item_id, published=item.dateAdded, updated=item.lastUpdated) feed.add(entry) return feed.get_response()
def _generate_entry(article): return FeedEntry(article.title, url=article.link, updated=article.last_modified, published=article.created_at, content=article.body_html, summary=article.summary or '')
def test_atom_add_one(self): a = AtomFeed(title='test_title', id=1) f = FeedEntry( title='test_title', id=1, updated=datetime.datetime.now()) assert len(a.entries) == 0 a.add(f) assert len(a.entries) == 1
def make_feed_entry(self): return FeedEntry(self.title, render(self.content), content_type='html', author=self.author or u'好规划网理财师', url=self.url, updated=self.update_time, published=self.publish_time)
def jsontoXML(doc, d): xjson = doc.to_json() xjson = json.loads(xjson) xml = dicttoxml(xjson, custom_root='data', attr_type=False) xml = xml.decode('utf-8') xml = xml[39:] entry = FeedEntry(id="http://127.0.0.1:5000/lga/" + d, content_type="xhtml", content=xml, author="Chieh An Liang", title=d.upper() + ' Recorded Crime Statistics', updated=datetime.now()) return entry
def add(self, posts): """Add blog posts to the feed.""" for post in posts: self._feed.add( FeedEntry( summary=post.summary, title=post.title, title_type='html', url=post.url, updated=post.date, ))
def bookatom(book_id): book = Book.query.get(book_id) entry = FeedEntry( book.title, author=book.author.as_dict(), id=url_for('book_routes.bookpage', book_id=book.book_id, _external=True), updated=book.last_updated, published=book.published, links=[ { 'type': "image/jpeg", 'rel': "http://opds-spec.org/image", 'href': book.cover_large }, { 'type': "image/jpeg", 'rel': "http://opds-spec.org/image/thumbnail", 'href': book.cover_thumb }, { 'type': "application/epub+zip", 'rel': "http://opds-spec.org/acquisition", 'href': book.epub_url }, { 'type': "application/atom+xml;type=entry;profile=opds-catalog", 'rel': "alternate", 'href': url_for('book_routes.bookpage', book_id=book.book_id, _external=True) }, ]) response = make_response('<?xml version="1.0" encoding="UTF-8"?>\n' + ''.join([el for el in entry.generate()])) response.headers['Content-Type'] = 'application/atom+xml' return response
def content_col(region, year): #the column info of one region in one year L, index = [], 0 output = [] for t in Offence.objects(id=region): for e in t.data: L.append([ e.offence, e.type, e.data1, e.data2, e.data3, e.data4, e.data5, e.data6, e.data7, e.data8, e.data9, e.data10, e.data11, e.data11, e.data13 ]) for e in L[0]: if year in e: index = L[0].index(e) for line in L: output.append([line[0], line[1], line[index], line[index + 1]]) entry = FeedEntry(id=addr + region, title=region + '_detail', updated=datetime.now(), author='Zhuowen Deng', content=output) return entry.to_string()
def test_feed_entry_to_str(self): updated_time = datetime.datetime.now() expected_feed_entry_str = """ <entry> <title type="text">test_title</title> <id>1</id> <updated>%s</updated> </entry> """ % format_iso8601(updated_time) f = FeedEntry(title="test_title", id=1, updated=updated_time) assert str(f).strip().replace( " ", "") == expected_feed_entry_str.strip().replace(" ", "")
def episode_to_atom_item(episode): banner = URL_EPISODE_BANNER_FORMAT % (episode.episodeId, ) des_message = '<img src="%s"></img><p>%s</p>' % (banner, episode.artistComment) return FeedEntry( title=episode.title, title_type='text', summary=des_message, summary_type='html', url=URL_EPISODE_FORMAT % (episode.episodeId,), published=episode.published, updated=episode.published, author=Comic.query.get(episode.comicId).artistDisplayName )
def event_to_entry(event): """ Convert an event to an atom feed entry. """ r = renderer(event) entry = FeedEntry(title=event.account.name + ' ' + r.__html__(), title_type='html', summary=event.message, summary_type='html', url=None, author=event.account.full_name, id='urn:datahub:entry:%s' % event.id, updated=event.time, published=event.time) return entry
def show_collection(): connect(host='mongodb://*****:*****@ds255539.mlab.com:55539/9321test') feed = AtomFeed(title='All Available Collections',feed_url=request.url) for a in Area.objects: data = json.loads(a.to_json()) data2 = xmlify(data, wrap="all", indent=" ") url = 'http://127.0.0.1:5000'+ url_for('show_entry', name=a.name) entry = FeedEntry(title=a.name, url=url, updated=datetime.datetime.utcnow(), author={'name': 'admin'}, \ content_type="application/xml",content= data2) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 200
def test_feed_entry_to_str(self): updated_time = datetime.datetime.now() expected_feed_entry_str = ''' <entry> <title type="text">test_title</title> <id>1</id> <updated>%s</updated> </entry> ''' % format_iso8601(updated_time) f = FeedEntry(title='test_title', id=1, updated=updated_time) assert str(f).strip().replace(' ', '') == \ expected_feed_entry_str.strip().replace(' ', '')
def comic_to_atom_item(comic): banner = URL_COMIC_BANNER_FORMAT % (comic.comicId, comic.banners) des_message = '<img src="%s"></img><p>[%s]</p><h3>"%s"</h3><p>%s</p>' % \ (banner, comic.genre, comic.comment, comic.synopsis) return FeedEntry( title="%s - %s" % (comic.title, comic.artistDisplayName), title_type='text', summary=des_message, summary_type='html', url=URL_COMIC_FORMAT % (comic.comicId,), published=comic.published, updated=comic.updated if (comic.updated - datetime(1970, 1, 1)).total_seconds() > 0 else comic.published, author=comic.artistDisplayName )
def show_entry(name): connect(host='mongodb://*****:*****@ds255539.mlab.com:55539/9321test') feed = AtomFeed(title='Single Collection', feed_url=request.url) for a in Area.objects: if a.name.lower().replace(' ', '') == name.lower().replace(' ', ''): #print(xmlify(a.offenses,wrap="all", indent=" ")) data = json.loads(a.to_json()) data2 = xmlify(data, wrap="all", indent=" ") entry = FeedEntry(title = a.name, url=request.url, updated=datetime.datetime.utcnow(),author = {'name':'admin'}, \ content_type="application/xml",content = data2) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 200 #ATOM return jsonify(LGA_name=False), 404
def opensearch_search(): """ OpenSearch search endpoint See http://www.opensearch.org/ """ q = request.args.get('q') start = request.args.get('start') num = 25 results = elastic.search( 'thing', query={'title^3,short_description,description,makers_string': q}, start=start, num=num) id_list = [result[0] for result in results] things = Thing.objects.filter(id__in=id_list) feed = AtomFeed("Search results for '%s'" % (q, ), feed_url=request.url, url=request.url_root) for thing in things: # TODO: only send the last Upload per mimetype? links = [ _create_link_dict(upload, request.host) for upload in thing.files ] authors = [maker.maker.format_name().strip() for maker in thing.makers] # http://werkzeug.pocoo.org/docs/0.11/contrib/atom/#werkzeug.contrib.atom.FeedEntry feed.add( FeedEntry(title=thing.title or "N/A", summary=thing.short_description, content=thing.description, author=authors, url="http://%s%s" % ( request.host, url_for('thing.detail', id=thing.id), ), links=links, updated=thing.created_at, published=thing.created_at)) return feed.get_response()
def get_feed_item(post: dict) -> FeedEntry: """ Returns a FeedEntry object for adding to an AtomFeed item from the werkzeug.contrib.atom module Args: post (dict): post dict from the parser Returns: FeedEntry: item for the atom feed """ return FeedEntry( id=post["guid"]["rendered"], title=post["title"]["rendered"], content=remove_macro_tags(post["content"]["rendered"]), summary=remove_macro_tags(post["excerpt"]["rendered"]), url=post["link"], updated=parse_datetime(post["modified_gmt"]), published=parse_datetime(post["date_gmt"]), )
def get_lga(item): connect('lga') if request.args.get('tag') not in ["0", "1"]: return jsonify(Error="Please enter a valid file format: 0 for ATOM, 1 for JSON"), 404 found = 0 for i in Lga.objects: if i.id == item: x = i found = 1 break if not found: return jsonify(message="No record found"), 404 xjson = x.to_json() xjson = json.loads(xjson) if request.args.get('tag') == '1': xjson = json.dumps(xjson) return xjson, 200 if request.args.get('tag') == '0': xml = dicttoxml(xjson, custom_root='data', attr_type=False) xml = xml.decode('utf-8') xml = xml[39:] entry = [FeedEntry(id="http://127.0.0.1:5000/lga/" + item, content_type="xhtml", content=xml, author="Chieh An Liang", title=item + ' Recorded Crime Statistics', updated=datetime.now())] feed = AtomFeed(title='NSW Recorded Crime Statistics', id="http://127.0.0.1:5000/lga/" + item, author="Chieh An Liang", entries=entry) return feed.get_response(), 200
def atom(tag=None): """General purpose atom feed.""" title = current_app.config['TITLE'] if tag is not None: tags = tag.split('/') for _tag in tags: if _tag not in current_app.blohg.content.tags: abort(404) title += u' » %s' % ' + '.join(tags) posts = current_app.blohg.content.get_by_tag(tags) else: posts = current_app.blohg.content.get_all(True) feed = AtomFeed(title=title, subtitle=current_app.config['TAGLINE'], url=url_for('views.home', _external=True), id=url_for('views.atom', tag=tag), feed_url=url_for('views.atom', tag=tag, _external=True), author=current_app.config['AUTHOR'], generator=('blohg', None, None)) posts_per_atom_feed = \ current_app.config.get('POSTS_PER_ATOM_FEED', current_app.config.get('POSTS_PER_PAGE')) for post in posts[:int(posts_per_atom_feed)]: feed.add( FeedEntry(title=post.title, content=post.full_raw_html, summary=post.abstract_raw_html, id=url_for('views.content', slug=post.slug), url=url_for('views.content', slug=post.slug, _external=True), author=dict(name=post.author_name, email=post.author_email), published=post.datetime, updated=post.datetime)) return feed
authors = [ dict(name='Author 1', email='*****@*****.**', uri='http://test.com/author1'), dict(name='Author 2', email='*****@*****.**', uri='http://test.com/author2') ] author = 'By Scott Bixby in New York and David Crouch ([email protected]) in Gothenburg' feed = AtomFeed(title='Test Feed', feed_url='http://testfeed.com') entry = FeedEntry(title='Test Entry', url='http://testfeed.com/testentry', updated=datetime.utcnow(), content='Test Entry', author=author) feed.entries.append(entry) rss = feed.to_string() pprint(rss) print() parsed = feedparser.parse(rss) pprint(parsed.feed) print() pprint(parsed.entries[0]) pprint(parsed.entries[0].author_detail) print()
def gen_feed(account): source = retrieve(SEARCH_URL.format(account.name)) url = extract_element(source, ACCOUNT_BASE_XPATH + "//p/a/@href") current_app.logger.info(inspect.stack()[0][3] + "\t\t\t\t" + url) while not url: requests_cache.clear() source = retrieve(SEARCH_URL.format(account.name)) url = extract_element(source, ACCOUNT_BASE_XPATH + "//p/a/@href") current_app.logger.info(inspect.stack()[0][3] + "\t\t\t\t" + url) atom = AtomFeed(account.text, feed_url=url_for("main.feed", name=account.name, _external=1), author=account.auth) articles = get_articles(url) for item in articles: article = Article( item["title"].replace(""", '"'), url_for("main.a2link", url=item["cover"], _external=1), item["digest"].replace(""", '"'), item["content"].replace(""", '"'), item["read_num"], item["post_date"], account) atom.add( FeedEntry(article.title, article.content, url=article.cover, updated=datetime.now())) try: db.session.merge(article) db.session.commit() except IntegrityError: db.session.rollback() with open(os.getcwd() + "/app/static/feeds/" + account.name + ".xml", 'w') as temp: temp.write(atom.to_string().encode("utf-8")) feed = Feed(atom.feed_url, account) try: db.session.merge(feed) db.session.commit() except IntegrityError: db.session.rollback() return feed
def test_parse_atom(session, feed): parser = RssParser() parser.feed = feed authors = [ dict(name="John Doe", email="*****@*****.**", uri="http://test.com/testauthor"), dict( name="Jane Smith", email="*****@*****.**", uri="http://test.com/testauthor2" ), ] atomfeed = AtomFeed(title="Test Feed", feed_url="http://testfeed.com") entry = FeedEntry( title="Jane Smith", url="http://testfeed.com/testentry", id="1234", updated=datetime.utcnow(), content="Test Entry", author=authors, ) atomfeed.entries.append(entry) parser.data = atomfeed.to_string() parser.parse() session.commit() parsed_authors = parser.authors assert len(parser.entries) == 1 assert len(parsed_authors) == 2 assert parser.len_new_entries == 1 assert parser.len_updated_entries == 0 assert next((e for e in parser.entries if e.guid == "1234"), None) for a in authors: r = next((r for r in parsed_authors if r.name == a["name"]), None) assert r is not None assert r.name == a["name"] assert r.email == a["email"] assert r.url == a["uri"] entry2 = FeedEntry( title="New Entry", url="http://testfeed.com/newentry", id="98765", updated=datetime.utcnow(), content="New Entry", author=authors, ) atomfeed.entries.append(entry2) # Parse feed again, should have 1 new entry and no updated entries parser = RssParser() parser.feed = feed parser.data = atomfeed.to_string() parser.parse() parsed_authors = parser.authors assert len(parser.entries) == 1 assert len(parsed_authors) == 2 assert parser.len_new_entries == 1 assert parser.len_updated_entries == 0 assert next((e for e in parser.entries if e.guid == "98765"), None) # Update entry content and parse feed again, should have 1 updated entry and no new entries entry.content = "Updated Test Entry" parser = RssParser() parser.feed = feed parser.data = atomfeed.to_string() parser.parse() parsed_authors = parser.authors assert len(parser.entries) == 1 assert len(parsed_authors) == 0 assert parser.len_new_entries == 0 assert parser.len_updated_entries == 1 assert next((e for e in parser.entries if e.guid == "1234"), None)
def filter(): url = request.url params = url.replace('http://127.0.0.1:5000/lga/filter?', '') params = params.lower() params = params.split() feed = AtomFeed(title='NSW Recorded Crime Statistics', id="http://127.0.0.1:5000/lga/filter", author="Chieh An Liang") connect('lga') for i in range(len(params)): # two query case if params[i] in ['and', 'or']: separator = params[i] term1 = params[:i] term2 = params[i + 1:] if term1[1] != 'eq' or term1[0] != 'lganame' or term2[1] != 'eq': return jsonify(Error="Filter mode not supported"), 404 lgaValue1 = term1[2:] lgaValue1 = ''.join(lgaValue1) if separator == 'or': lgaValue2 = term2[2:] lgaValue2 = ''.join(lgaValue2) print('lgaValue2: ', lgaValue2) if term2[0] != 'lganame': return jsonify(Error="Filter mode not supported"), 404 x1 = Lga.objects(district=lgaValue1) x2 = Lga.objects(district=lgaValue2) if not (x1 or x2): return jsonify(Error="Lga Names not found"), 404 elif not x1: feed.add(jsontoXML(x2, lgaValue2)) elif not x2: feed.add(jsontoXML(x1, lgaValue1)) else: feed.add(jsontoXML(x1, lgaValue1)) feed.add(jsontoXML(x2, lgaValue2)) return feed.get_response(), 200 else: print(term1) print(term2) try: yearFind = term2[2] except: return jsonify(Error="Please input year value"), 404 try: int(yearFind) except: return jsonify(Error="Year should be 4 digits value"), 404 if len(yearFind) != 4: return jsonify(Error="Year should be 4 digits value"), 404 if term2[0] != 'year': return jsonify(Error="Filter mode not supported"), 404 x1 = Lga.objects(district=lgaValue1) if not x1: return jsonify(Error="lgaName not Found"), 404 x1 = x1.to_json() x1 = json.loads(x1) qResult = [] for dict_item in x1: for k, v in dict_item.items(): if k == "offencegroups": for k1, v1 in v.items(): for k2, v2 in v1.items(): if k2 == "offencetypes": for k3, v3 in v2.items(): for k4, v4 in v3.items(): if k4 == "years": for year, vfinal in v4.items(): if year == yearFind: qResult.append(vfinal) qr = dicttoxml(qResult, attr_type=False) xml = qr.decode('utf-8') xml = xml[63:-7] feed.add(FeedEntry(id="http://127.0.0.1:5000/lga/" + lgaValue1, content_type="xhtml", content=xml, author="Chieh An Liang", title=lgaValue1.upper() + ' Recorded Crime Statistics ' + yearFind, updated=datetime.now())) return feed.get_response(), 200 # single query case lgaValue = params[2:] lgaValue = ''.join(lgaValue) if params[1] != 'eq' or params[0] != 'lganame': return jsonify(Error="Filter mode not supported"), 404 # check if value found x = Lga.objects(district=lgaValue) if not x: return jsonify(Error="Lga Names not found"), 404 feed.add(jsontoXML(x, lgaValue)) return feed.get_response(), 200
def test_feed_entry_no_args(self): with pytest.raises(ValueError): FeedEntry()
def add_entry(): connect(host='mongodb://*****:*****@ds255539.mlab.com:55539/9321test') parser = reqparse.RequestParser() parser.add_argument('name', type=str, help='LGA name input error') parser.add_argument('postcode', type=int, help='Postcode input error') args = parser.parse_args() postcode = args.get("postcode")#check postcode to find the LGA name ############################# postcode ###################################### if postcode: name_list = [] for k, v in postdic.items(): for s in v: if s == postcode: name_list.append(k) #regions founded if name_list == []: #check whether the input postcode in the postdic return jsonify(Input_postcode=False),404 feed = AtomFeed(title='Multi-collections POST complete', feed_url=request.url) for a in Area.objects: for n in name_list: if a.name.lower().replace(' ', '') == n.lower().replace(' ', ''): name_list.remove(n) for n in name_list: name = n url = 'http://127.0.0.1:5000' + url_for('show_entry', name=name) # download from the internet -- check 400 - if 2 not in 1 -- not up-to-date dld_url = 'http://www.bocsar.nsw.gov.au/Documents/RCS-Annual/'+name+'lga.xlsx' r = requests.get(dld_url) with open(name + 'lga.xlsx','wb') as f: f.write(r.content) excel_url = name + 'lga.xlsx' data = xlrd.open_workbook(excel_url).sheets()[0] id = 0 # initialisation id for embeded document p = [] # used to collect Offense() for row in range(7, 69): # for further update id = id + 1 offence_group = str(data.cell(row, 0).value) if offence_group != '': backup = offence_group if offence_group == '': offence_group = backup offence_type = str(data.cell(row, 1).value) incidents_2012 = str(data.cell(row, 2).value) rate_2012 = str(data.cell(row, 3).value) incidents_2013 = str(data.cell(row, 4).value) rate_2013 = str(data.cell(row, 5).value) incidents_2014 = str(data.cell(row, 6).value) rate_2014 = str(data.cell(row, 7).value) incidents_2015 = str(data.cell(row, 8).value) rate_2015 = str(data.cell(row, 9).value) incidents_2016 = str(data.cell(row, 10).value) rate_2016 = str(data.cell(row, 11).value) trend_24m = str(data.cell(row, 12).value) trend_60m = str(data.cell(row, 13).value) lga_rank = str(data.cell(row, 14).value) p.append(Offense(id, offence_group, offence_type, incidents_2012, rate_2012, incidents_2013, rate_2013, \ incidents_2014, rate_2014, incidents_2015, rate_2015, incidents_2016, \ rate_2016, trend_24m, trend_60m, lga_rank)) t = Area(name, p) t.save() entry = FeedEntry(title=name, url=url, updated=datetime.datetime.utcnow(), author={'name': 'admin'}) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" ############################# name ###################################### else: #if postcode not given, check the name field name = args.get("name").lower().replace(' ', '') if not name: return jsonify(Input=False), 404 url = 'http://127.0.0.1:5000' + url_for('show_entry', name=name) #if LGA or postcode that already has been imported before for a in Area.objects: if a.name.lower().replace(' ', '') == name.lower().replace(' ', ''): feed = AtomFeed(title='Already existed', feed_url=url) entry = FeedEntry(title=name, url=url, updated=datetime.datetime.utcnow(), author={'name': 'admin'}) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 200 #download from the internet -- check 400 - if 2 not in 1 -- not up-to-date dld_url = 'http://www.bocsar.nsw.gov.au/Documents/RCS-Annual/'+name+'lga.xlsx' r = requests.get(dld_url) with open(name + 'lga.xlsx','wb') as f: f.write(r.content) excel_url = name + 'lga.xlsx' data = xlrd.open_workbook(excel_url).sheets()[0] id = 0 #initialisation id for embeded document p = [] #used to collect Offense() for row in range(7, 69): #for further update id = id+1 offence_group = str(data.cell(row, 0).value) if offence_group != '': backup = offence_group if offence_group == '': offence_group = backup offence_type = str(data.cell(row, 1).value) incidents_2012 = str(data.cell(row, 2).value) rate_2012 = str(data.cell(row, 3).value) incidents_2013 = str(data.cell(row, 4).value) rate_2013 = str(data.cell(row, 5).value) incidents_2014 = str(data.cell(row, 6).value) rate_2014 = str(data.cell(row, 7).value) incidents_2015 = str(data.cell(row, 8).value) rate_2015 = str(data.cell(row, 9).value) incidents_2016 = str(data.cell(row, 10).value) rate_2016 = str(data.cell(row, 11).value) trend_24m = str(data.cell(row, 12).value) trend_60m = str(data.cell(row, 13).value) lga_rank = str(data.cell(row, 14).value) p.append(Offense(id, offence_group, offence_type, incidents_2012, rate_2012, incidents_2013, rate_2013, \ incidents_2014, rate_2014, incidents_2015, rate_2015, incidents_2016, \ rate_2016, trend_24m, trend_60m, lga_rank)) t = Area(name,p) t.save() feed = AtomFeed(title='Sucess POST Activity', feed_url=url) entry = FeedEntry(title=name, url=url, updated=datetime.datetime.utcnow(), author={'name': 'admin'}) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 201
def filter_entry(): connect(host='mongodb://*****:*****@ds255539.mlab.com:55539/9321test') raw_str = str(request.url) raw_str2 = raw_str.split('+') if raw_str2[4] == 'lgaName': #query-type-one name1 = raw_str2[2] name2 = raw_str2[6] feed = AtomFeed(title='Query 1 Search Results', feed_url=request.url) url1 = 'http://127.0.0.1:5000' + url_for('show_entry', name=name1) url2 = 'http://127.0.0.1:5000' + url_for('show_entry', name=name2) for a in Area.objects: #Search for name 1 if a.name.lower().replace(' ', '') == name1.lower().replace(' ', ''): data = json.loads(a.to_json()) data2 = xmlify(data, wrap="all", indent=" ") entry = FeedEntry(title=a.name, url=url1, updated=datetime.datetime.utcnow(), author={'name': 'admin'}, \ content_type="application/xml", content=data2) feed.add(entry) for a in Area.objects: #Search for name 2 if a.name.lower().replace(' ', '') == name2.lower().replace(' ', ''): data = json.loads(a.to_json()) data2 = xmlify(data, wrap="all", indent=" ") entry = FeedEntry(title=a.name, url=url2, updated=datetime.datetime.utcnow(), author={'name': 'admin'}, \ content_type="application/xml", content=data2) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 200 elif raw_str2[4] == 'year': #query-type-two name1 = raw_str2[2] year1 = raw_str2[6] feed = AtomFeed(title='Query 2 Search Results', feed_url=request.url) url1 = 'http://127.0.0.1:5000' + url_for('show_entry', name=name1) for a in Area.objects: #Search for name if a.name.lower().replace(' ', '') == name1.lower().replace(' ', ''): if year1 == '2014': fakedb = defaultdict(list) for stas in a.offenses: fakedb[stas.id].append({'offence_group':stas.offence_group, 'offence_type':stas.offence_type, \ 'incidents_2014':stas.incidents_2014, 'rate_2014':stas.rate_2014}) j_fakedb = json.dumps(fakedb,indent=4) data2 = xmlify(j_fakedb, wrap="all", indent=" ") entry = FeedEntry(title=a.name, url=url1, updated=datetime.datetime.utcnow(), author={'name': 'admin'}, \ content_type="application/xml", content=data2) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 200 elif year1 == '2015': fakedb = defaultdict(list) for stas in a.offenses: fakedb[stas.id].append({'offence_group': stas.offence_group, 'offence_type': stas.offence_type, \ 'incidents_2015': stas.incidents_2015, 'rate_2015': stas.rate_2015}) j_fakedb = json.dumps(fakedb, indent=4) data2 = xmlify(j_fakedb, wrap="all", indent=" ") entry = FeedEntry(title=a.name, url=url1, updated=datetime.datetime.utcnow(), author={'name': 'admin'}, \ content_type="application/xml", content=data2) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 200 elif year1 == '2016': fakedb = defaultdict(list) for stas in a.offenses: fakedb[stas.id].append({'offence_group': stas.offence_group, 'offence_type': stas.offence_type, \ 'incidents_2016': stas.incidents_2016, 'rate_2016': stas.rate_2016}) j_fakedb = json.dumps(fakedb, indent=4) data2 = xmlify(j_fakedb, wrap="all", indent=" ") entry = FeedEntry(title=a.name, url=url1, updated=datetime.datetime.utcnow(), author={'name': 'admin'}, \ content_type="application/xml", content=data2) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 200 elif year1 == '2012': fakedb = defaultdict(list) for stas in a.offenses: fakedb[stas.id].append({'offence_group': stas.offence_group, 'offence_type': stas.offence_type, \ 'incidents_2012': stas.incidents_2012, 'rate_2012': stas.rate_2012}) j_fakedb = json.dumps(fakedb, indent=4) data2 = xmlify(j_fakedb, wrap="all", indent=" ") entry = FeedEntry(title=a.name, url=url1, updated=datetime.datetime.utcnow(), author={'name': 'admin'}, \ content_type="application/xml", content=data2) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 200 elif year1 == '2013': fakedb = defaultdict(list) for stas in a.offenses: fakedb[stas.id].append({'offence_group': stas.offence_group, 'offence_type': stas.offence_type, \ 'incidents_2013': stas.incidents_2013, 'rate_2013': stas.rate_2013}) j_fakedb = json.dumps(fakedb, indent=4) data2 = xmlify(j_fakedb, wrap="all", indent=" ") entry = FeedEntry(title=a.name, url=url1, updated=datetime.datetime.utcnow(), author={'name': 'admin'}, \ content_type="application/xml", content=data2) feed.add(entry) response = make_response(feed.to_string()) response.mimetype = "application/atom+xml" return response, 200 else: return jsonify(Input_Year=False), 400 return jsonify(Input=False),404
def test_feed_entry_no_updated(self): with pytest.raises(ValueError): FeedEntry(title='test_title', id=1)
def test_feed_entry_no_id(self): with pytest.raises(ValueError): FeedEntry(title="test_title")