def collection_get(cid='', format="json", include_history=False): logger.info(u"in collection_get".format(cid=cid)) # if not include items, then just return the collection straight from couch if (request.args.get("include_items") in ["0", "false", "False"]): coll = collection.get_collection_doc(cid) if not coll: abort_custom(404, "collection not found") # except if format is csv. can't do that. if format == "csv": abort_custom(405, "csv method not supported for not include_items") else: response_code = 200 resp = make_response(json.dumps(coll, sort_keys=True, indent=4), response_code) else: include_history = (request.args.get("include_history", 0) in ["1", "true", "True"]) (coll_with_items, something_currently_updating ) = collection.get_collection_with_items_for_client( cid, myrefsets, myredis, mydao, include_history) # return success if all reporting is complete for all items if something_currently_updating: response_code = 210 # update is not complete yet else: response_code = 200 if format == "csv": # remove scopus before exporting to csv, so don't add magic keep-scopus keys to clean method clean_items = [ item_module.clean_for_export(item) for item in coll_with_items["items"] ] csv = collection.make_csv_stream(clean_items) resp = make_response(csv, response_code) resp.mimetype = "text/csv;charset=UTF-8" resp.headers.add( "Content-Disposition", "attachment; filename=impactstory-{cid}.csv".format(cid=cid)) resp.headers.add("Content-Encoding", "UTF-8") else: secret_key = os.getenv("API_ADMIN_KEY") if request.args.get("api_admin_key"): supplied_key = request.args.get("api_admin_key", "") else: supplied_key = request.args.get("key", "") clean_if_necessary_items = [ item_module.clean_for_export(item, supplied_key, secret_key) for item in coll_with_items["items"] ] coll_with_items["items"] = clean_if_necessary_items resp = make_response( json.dumps(coll_with_items, sort_keys=True, indent=4), response_code) return resp
def products_get(tiids_string, format="json", most_recent_metric_date=None, most_recent_diff_metric_date=None): tiids = tiids_string.split(",") override_export_clean = (format == "csv") cleaned_items_dict = cleaned_items(tiids, myredis, override_export_clean, most_recent_metric_date, most_recent_diff_metric_date) response_code = 200 if not collection.is_all_done(tiids, myredis): response_code = 210 # update is not complete yet if format == "csv": csv = collection.make_csv_stream(cleaned_items_dict.values()) resp = make_response(csv, response_code) resp.mimetype = "text/csv;charset=UTF-8" resp.headers.add("Content-Encoding", "UTF-8") else: resp = make_response( json.dumps({"products": cleaned_items_dict}, sort_keys=True, indent=4), response_code) return resp
def collection_get(cid='', format="json", include_history=False): logger.info(u"in collection_get".format(cid=cid)) # if not include items, then just return the collection straight from couch if (request.args.get("include_items") in ["0", "false", "False"]): coll = collection.get_collection_doc(cid) if not coll: abort_custom(404, "collection not found") # except if format is csv. can't do that. if format == "csv": abort_custom(405, "csv method not supported for not include_items") else: response_code = 200 resp = make_response(json.dumps(coll, sort_keys=True, indent=4), response_code) else: include_history = (request.args.get("include_history", 0) in ["1", "true", "True"]) (coll_with_items, something_currently_updating) = collection.get_collection_with_items_for_client(cid, myrefsets, myredis, mydao, include_history) # return success if all reporting is complete for all items if something_currently_updating: response_code = 210 # update is not complete yet else: response_code = 200 if format == "csv": # remove scopus before exporting to csv, so don't add magic keep-scopus keys to clean method clean_items = [item_module.clean_for_export(item) for item in coll_with_items["items"]] csv = collection.make_csv_stream(clean_items) resp = make_response(csv, response_code) resp.mimetype = "text/csv;charset=UTF-8" resp.headers.add("Content-Disposition", "attachment; filename=impactstory-{cid}.csv".format( cid=cid)) resp.headers.add("Content-Encoding", "UTF-8") else: secret_key = os.getenv("API_ADMIN_KEY") if request.args.get("api_admin_key"): supplied_key = request.args.get("api_admin_key", "") else: supplied_key = request.args.get("key", "") clean_if_necessary_items = [item_module.clean_for_export(item, supplied_key, secret_key) for item in coll_with_items["items"]] coll_with_items["items"] = clean_if_necessary_items resp = make_response(json.dumps(coll_with_items, sort_keys=True, indent=4), response_code) return resp
def collection_get(cid='', format="json", include_history=False): coll = mydao.get(cid) if not coll: abort(404) # if not include items, then just return the collection straight from couch if (request.args.get("include_items") in ["0", "false", "False"]): # except if format is csv. can't do that. if format == "csv": abort(405) # method not supported else: response_code = 200 resp = make_response(json.dumps(coll, sort_keys=True, indent=4), response_code) resp.mimetype = "application/json" else: try: include_history = (request.args.get("include_history", 0) in ["1", "true", "True"]) (coll_with_items, something_currently_updating) = collection.get_collection_with_items_for_client(cid, myrefsets, myredis, mydao, include_history) except (LookupError, AttributeError): logger.error("couldn't get tiids for GET collection '{cid}'".format(cid=cid)) abort(404) # not found # return success if all reporting is complete for all items if something_currently_updating: response_code = 210 # update is not complete yet else: response_code = 200 if format == "csv": # remove scopus before exporting to csv, so don't add magic keep-scopus keys to clean method clean_items = [item_module.clean_for_export(item) for item in coll_with_items["items"]] csv = collection.make_csv_stream(clean_items) resp = make_response(csv, response_code) resp.mimetype = "text/csv;charset=UTF-8" resp.headers.add("Content-Disposition", "attachment; filename=impactstory-{cid}.csv".format( cid=cid)) resp.headers.add("Content-Encoding", "UTF-8") else: api_key = request.args.get("key", None) clean_if_necessary_items = [item_module.clean_for_export(item, api_key, os.getenv("API_KEY")) for item in coll_with_items["items"]] coll_with_items["items"] = clean_if_necessary_items resp = make_response(json.dumps(coll_with_items, sort_keys=True, indent=4), response_code) resp.mimetype = "application/json" return resp
def products_get(tiids_string, format="json"): tiids = tiids_string.split(",") cleaned_items_dict = cleaned_items(tiids, myredis) response_code = 200 if collection.is_something_currently_updating(cleaned_items_dict, myredis): response_code = 210 # update is not complete yet if format == "csv": csv = collection.make_csv_stream(cleaned_items_dict.values()) resp = make_response(csv, response_code) resp.mimetype = "text/csv;charset=UTF-8" resp.headers.add("Content-Encoding", "UTF-8") else: resp = make_response(json.dumps({"products": cleaned_items_dict}, sort_keys=True, indent=4), response_code) return resp
def products_get(tiids_string, format="json", most_recent_metric_date=None, most_recent_diff_metric_date=None): tiids = tiids_string.split(",") override_export_clean = (format=="csv") cleaned_items_dict = cleaned_items(tiids, myredis, override_export_clean, most_recent_metric_date, most_recent_diff_metric_date) response_code = 200 if not collection.is_all_done(tiids, myredis): response_code = 210 # update is not complete yet if format == "csv": csv = collection.make_csv_stream(cleaned_items_dict.values()) resp = make_response(csv, response_code) resp.mimetype = "text/csv;charset=UTF-8" resp.headers.add("Content-Encoding", "UTF-8") else: resp = make_response(json.dumps({"products": cleaned_items_dict}, sort_keys=True, indent=4), response_code) return resp
def products_get(tiids_string, format="json"): tiids = tiids_string.split(",") cleaned_items_dict = cleaned_items(tiids, myredis) response_code = 200 if collection.is_something_currently_updating(cleaned_items_dict, myredis): response_code = 210 # update is not complete yet if format == "csv": csv = collection.make_csv_stream(cleaned_items_dict.values()) resp = make_response(csv, response_code) resp.mimetype = "text/csv;charset=UTF-8" resp.headers.add("Content-Encoding", "UTF-8") else: resp = make_response( json.dumps({"products": cleaned_items_dict}, sort_keys=True, indent=4), response_code) return resp
def test_make_csv_stream(self): csv = collection.make_csv_stream(API_ITEMS_JSON) expected = 'tiid,title,doi,dryad:most_downloaded_file,dryad:package_views,dryad:total_downloads,mendeley:groups,mendeley:readers,plosalm:crossref,plosalm:html_views,plosalm:pdf_views,plosalm:pmc_abstract,plosalm:pmc_figure,plosalm:pmc_full-text,plosalm:pmc_pdf,plosalm:pmc_supp-data,plosalm:pmc_unique-ip,plosalm:pubmed_central,plosalm:scopus,wikipedia:mentions\r\nf2b45fcab1da11e19199c8bcc8937e3f,"Tumor-Immune Interaction, Surgical Treatment, and Cancer Recurrence in a Mathematical Model of Melanoma",10.1371/journal.pcbi.1000362,,,,1,13,7,2075,484,29,13,232,113,0,251,2,11,\r\nc1eba010b1da11e19199c8bcc8937e3f,"Data from: Comparison of quantitative and molecular genetic variation of native vs. invasive populations of purple loosestrife (Lythrum salicaria L., Lythraceae)",10.5061/dryad.1295,70,537,114,,,,,,,,,,,,,,\r\nc202754cb1da11e19199c8bcc8937e3f,Adventures in Semantic Publishing: Exemplar Semantic Enhancements of a Research Article,10.1371/journal.pcbi.1000361,,,,4,52,13,11521,1097,70,39,624,149,6,580,12,19,1\r\nf2dc3f36b1da11e19199c8bcc8937e3f,Design Principles for Riboswitch Function,10.1371/journal.pcbi.1000363,,,,4,57,16,3361,1112,37,54,434,285,41,495,9,19,\r\n' assert_equals(csv, expected)