Beispiel #1
0
def add_items_to_collection(cid=""):
    """
    Adds new items to a collection.
    """

    abort_if_fails_collection_edit_auth(request)

    try:
        if "tiids" in request.json:
            collection_object = collection.add_items_to_collection_object(
                cid=cid, tiids=request.json["tiids"], alias_tuples=None)
        else:
            #to be depricated
            collection_object = collection.add_items_to_collection(
                cid=cid, aliases=request.json["aliases"], myredis=myredis)
    except (AttributeError, TypeError) as e:
        # we got missing or improperly formated data.
        logger.error(
            u"PUT /collection/{id}/items threw an error: '{error_str}'. input: {json}."
            .format(id=cid, error_str=e, json=request.json))
        abort_custom(500, "Error adding items to collection")

    (coll_doc, is_updating) = collection.get_collection_with_items_for_client(
        cid, myrefsets, myredis, mydao, include_history=False)

    resp = make_response(json.dumps(coll_doc, sort_keys=True, indent=4), 200)

    return resp
Beispiel #2
0
def collection_get(cid='', format="json", include_history=False):
    logger.info(u"in collection_get".format(cid=cid))

    # if not include items, then just return the collection straight from couch
    if (request.args.get("include_items") in ["0", "false", "False"]):
        coll = collection.get_collection_doc(cid)
        if not coll:
            abort_custom(404, "collection not found")

        # except if format is csv.  can't do that.
        if format == "csv":
            abort_custom(405, "csv method not supported for not include_items")
        else:
            response_code = 200
            resp = make_response(json.dumps(coll, sort_keys=True, indent=4),
                                 response_code)
    else:
        include_history = (request.args.get("include_history", 0)
                           in ["1", "true", "True"])
        (coll_with_items, something_currently_updating
         ) = collection.get_collection_with_items_for_client(
             cid, myrefsets, myredis, mydao, include_history)

        # return success if all reporting is complete for all items
        if something_currently_updating:
            response_code = 210  # update is not complete yet
        else:
            response_code = 200

        if format == "csv":
            # remove scopus before exporting to csv, so don't add magic keep-scopus keys to clean method
            clean_items = [
                item_module.clean_for_export(item)
                for item in coll_with_items["items"]
            ]
            csv = collection.make_csv_stream(clean_items)
            resp = make_response(csv, response_code)
            resp.mimetype = "text/csv;charset=UTF-8"
            resp.headers.add(
                "Content-Disposition",
                "attachment; filename=impactstory-{cid}.csv".format(cid=cid))
            resp.headers.add("Content-Encoding", "UTF-8")
        else:

            secret_key = os.getenv("API_ADMIN_KEY")
            if request.args.get("api_admin_key"):
                supplied_key = request.args.get("api_admin_key", "")
            else:
                supplied_key = request.args.get("key", "")

            clean_if_necessary_items = [
                item_module.clean_for_export(item, supplied_key, secret_key)
                for item in coll_with_items["items"]
            ]

            coll_with_items["items"] = clean_if_necessary_items
            resp = make_response(
                json.dumps(coll_with_items, sort_keys=True, indent=4),
                response_code)
    return resp
Beispiel #3
0
def add_items_to_collection(cid=""):
    """
    Adds new items to a collection.
    """

    abort_if_fails_collection_edit_auth(request)

    try:
        if "tiids" in request.json:
            collection_object = collection.add_items_to_collection_object(
                    cid=cid, 
                    tiids=request.json["tiids"], 
                    alias_tuples=None)
        else:
            #to be depricated
            collection_object = collection.add_items_to_collection(
                cid=cid, 
                aliases=request.json["aliases"], 
                myredis=myredis)
    except (AttributeError, TypeError) as e:
        # we got missing or improperly formated data.
        logger.error(u"PUT /collection/{id}/items threw an error: '{error_str}'. input: {json}.".format(
                id=cid,
                error_str=e,
                json=request.json))
        abort_custom(500, "Error adding items to collection")

    (coll_doc, is_updating) = collection.get_collection_with_items_for_client(cid, myrefsets, myredis, mydao, include_history=False)

    resp = make_response(json.dumps(coll_doc, sort_keys=True, indent=4), 200)

    return resp
    def test_get_collection_with_items_for_client_include_history(self):
        self.create_test_collection()

        (returned_doc, still_updating) = collection.get_collection_with_items_for_client("testcollectionid", None, self.r, self.d, include_history=True)
        assert_equals(still_updating, False)
        print returned_doc
        print json.dumps(returned_doc, sort_keys=True, indent=4)
        expected = {'created': '2012-08-23T14:40:16.888800', 'items': [{'created': '2012-08-23T14:40:16.888800', 'currently_updating': False, 'metrics': {u'mendeley:discipline': {'provenance_url': u'http://www.mendeley.com/research/origin-planetary-nebulae/', 'values': {'raw': u'[{"name": "Astronomy / Astrophysics / Space Science", "value": 100, "id": 2}]', 'raw_history': {'2013-06-22T23:03:15.852461': u'[{"name": "Astronomy / Astrophysics / Space Science", "value": 100, "id": 2}]'}}, 'static_meta': {'provider_url': 'http://www.mendeley.com/', 'icon': 'http://www.mendeley.com/favicon.ico', 'display_name': 'discipline, top 3 percentages', 'description': 'Percent of readers by discipline, for top three disciplines (csv, api only)', 'provider': 'Mendeley'}}, u'mendeley:readers': {'provenance_url': u'http://www.mendeley.com/research/origin-planetary-nebulae/', 'values': {'raw': u'9', 'raw_history': {'2013-07-24T18:04:41.035841': u'9'}}, 'static_meta': {'provider_url': 'http://www.mendeley.com/', 'icon': 'http://www.mendeley.com/favicon.ico', 'display_name': 'readers', 'description': 'The number of readers who have added the article to their libraries', 'provider': 'Mendeley'}}}, 'last_modified': '2012-08-23T14:40:16.888800', 'biblio': {'genre': 'article'}, '_id': u'iaw9rzldigp4xc7p20bycnkg', 'type': 'item', 'aliases': {u'pmid': [u'16023720']}}, {'created': '2012-08-23T14:40:16.888800', 'currently_updating': False, 'metrics': {u'topsy:tweets': {'provenance_url': u'http://topsydrilldown', 'values': {'raw': u'22', 'raw_history': {'2013-11-22T20:41:03.178277': u'22'}}, 'static_meta': {'provider_url': 'http://www.topsy.com/', 'icon': 'http://twitter.com/phoenix/favicon.ico', 'display_name': 'tweets', 'description': 'Number of times the item has been tweeted', 'provider': 'Topsy'}}}, 'last_modified': '2012-08-23T14:40:16.888800', 'biblio': {'genre': 'article'}, '_id': u'itsq6fgx8ogi9ixysbipmtxx', 'type': 'item', 'aliases': {u'pmid': [u'16413797']}}], 'title': u'mycollection', 'alias_tiids': {u'iaw9rzldigp4xc7p20bycnkg': u'iaw9rzldigp4xc7p20bycnkg', u'itsq6fgx8ogi9ixysbipmtxx': u'itsq6fgx8ogi9ixysbipmtxx'}, 'last_modified': '2012-08-23T14:40:16.888800', '_id': u'testcollectionid', 'type': 'collection'}
        #print json.dumps(expected, sort_keys=True, indent=4)

        assert_equals(returned_doc, expected)
Beispiel #5
0
    def test_get_collection_with_items_for_client_include_history(self):
        self.create_test_collection()

        (returned_doc, still_updating) = collection.get_collection_with_items_for_client("testcollectionid", None, self.r, self.d, include_history=True)
        assert_equals(still_updating, False)
        print returned_doc
        print json.dumps(returned_doc, sort_keys=True, indent=4)
        expected = {'created': '2012-08-23T14:40:16.888800', 'items': [{'created': '2012-08-23T14:40:16.888800', 'currently_updating': False, 'metrics': {u'mendeley:discipline': {'provenance_url': u'http://www.mendeley.com/research/origin-planetary-nebulae/', 'values': {'raw': u'[{"name": "Astronomy / Astrophysics / Space Science", "value": 100, "id": 2}]', 'raw_history': {'2013-06-22T23:03:15.852461': u'[{"name": "Astronomy / Astrophysics / Space Science", "value": 100, "id": 2}]'}}, 'static_meta': {'provider_url': 'http://www.mendeley.com/', 'icon': 'http://www.mendeley.com/favicon.ico', 'display_name': 'discipline, top 3 percentages', 'description': 'Percent of readers by discipline, for top three disciplines (csv, api only)', 'provider': 'Mendeley'}}, u'mendeley:readers': {'provenance_url': u'http://www.mendeley.com/research/origin-planetary-nebulae/', 'values': {'raw': u'9', 'raw_history': {'2013-07-24T18:04:41.035841': u'9'}}, 'static_meta': {'provider_url': 'http://www.mendeley.com/', 'icon': 'http://www.mendeley.com/favicon.ico', 'display_name': 'readers', 'description': 'The number of readers who have added the article to their libraries', 'provider': 'Mendeley'}}}, 'last_modified': '2012-08-23T14:40:16.888800', 'biblio': {'genre': 'article'}, '_id': u'iaw9rzldigp4xc7p20bycnkg', 'type': 'item', 'aliases': {u'pmid': [u'16023720']}}, {'created': '2012-08-23T14:40:16.888800', 'currently_updating': False, 'metrics': {u'topsy:tweets': {'provenance_url': u'http://topsydrilldown', 'values': {'raw': u'22', 'raw_history': {'2013-11-22T20:41:03.178277': u'22'}}, 'static_meta': {'provider_url': 'http://www.topsy.com/', 'icon': 'http://twitter.com/phoenix/favicon.ico', 'display_name': 'tweets', 'description': 'Number of times the item has been tweeted', 'provider': 'Topsy'}}}, 'last_modified': '2012-08-23T14:40:16.888800', 'biblio': {'genre': 'article'}, '_id': u'itsq6fgx8ogi9ixysbipmtxx', 'type': 'item', 'aliases': {u'pmid': [u'16413797']}}], 'title': u'mycollection', 'alias_tiids': {u'iaw9rzldigp4xc7p20bycnkg': u'iaw9rzldigp4xc7p20bycnkg', u'itsq6fgx8ogi9ixysbipmtxx': u'itsq6fgx8ogi9ixysbipmtxx'}, 'last_modified': '2012-08-23T14:40:16.888800', '_id': u'testcollectionid', 'type': 'collection'}
        #print json.dumps(expected, sort_keys=True, indent=4)

        assert_equals(returned_doc, expected)
Beispiel #6
0
def collection_get(cid='', format="json", include_history=False):
    logger.info(u"in collection_get".format(cid=cid))

    # if not include items, then just return the collection straight from couch
    if (request.args.get("include_items") in ["0", "false", "False"]):
        coll = collection.get_collection_doc(cid)
        if not coll:
            abort_custom(404, "collection not found")

        # except if format is csv.  can't do that.
        if format == "csv":
            abort_custom(405, "csv method not supported for not include_items")
        else:
            response_code = 200
            resp = make_response(json.dumps(coll, sort_keys=True, indent=4),
                                 response_code)
    else:
        include_history = (request.args.get("include_history", 0) in ["1", "true", "True"])
        (coll_with_items, something_currently_updating) = collection.get_collection_with_items_for_client(cid, myrefsets, myredis, mydao, include_history)

        # return success if all reporting is complete for all items    
        if something_currently_updating:
            response_code = 210 # update is not complete yet
        else:
            response_code = 200

        if format == "csv":
            # remove scopus before exporting to csv, so don't add magic keep-scopus keys to clean method
            clean_items = [item_module.clean_for_export(item) for item in coll_with_items["items"]]
            csv = collection.make_csv_stream(clean_items)
            resp = make_response(csv, response_code)
            resp.mimetype = "text/csv;charset=UTF-8"
            resp.headers.add("Content-Disposition",
                             "attachment; filename=impactstory-{cid}.csv".format(
                                cid=cid))
            resp.headers.add("Content-Encoding",
                             "UTF-8")
        else:

            secret_key = os.getenv("API_ADMIN_KEY") 
            if request.args.get("api_admin_key"):
                supplied_key = request.args.get("api_admin_key", "")
            else:
                supplied_key = request.args.get("key", "")

            clean_if_necessary_items = [item_module.clean_for_export(item, supplied_key, secret_key)
                for item in coll_with_items["items"]]

            coll_with_items["items"] = clean_if_necessary_items
            resp = make_response(json.dumps(coll_with_items, sort_keys=True, indent=4),
                                 response_code)
    return resp
Beispiel #7
0
def collection_get(cid='', format="json", include_history=False):
    coll = mydao.get(cid)
    if not coll:
        abort(404)

    # if not include items, then just return the collection straight from couch
    if (request.args.get("include_items") in ["0", "false", "False"]):
        # except if format is csv.  can't do that.
        if format == "csv":
            abort(405)  # method not supported
        else:
            response_code = 200
            resp = make_response(json.dumps(coll, sort_keys=True, indent=4),
                                 response_code)
            resp.mimetype = "application/json"
    else:
        try:
            include_history = (request.args.get("include_history", 0) in ["1", "true", "True"])
            (coll_with_items, something_currently_updating) = collection.get_collection_with_items_for_client(cid, myrefsets, myredis, mydao, include_history)
        except (LookupError, AttributeError):  
            logger.error("couldn't get tiids for GET collection '{cid}'".format(cid=cid))
            abort(404)  # not found

        # return success if all reporting is complete for all items    
        if something_currently_updating:
            response_code = 210 # update is not complete yet
        else:
            response_code = 200

        if format == "csv":
            # remove scopus before exporting to csv, so don't add magic keep-scopus keys to clean method
            clean_items = [item_module.clean_for_export(item) for item in coll_with_items["items"]]
            csv = collection.make_csv_stream(clean_items)
            resp = make_response(csv, response_code)
            resp.mimetype = "text/csv;charset=UTF-8"
            resp.headers.add("Content-Disposition",
                             "attachment; filename=impactstory-{cid}.csv".format(
                                cid=cid))
            resp.headers.add("Content-Encoding",
                             "UTF-8")
        else:
            api_key = request.args.get("key", None)
            clean_if_necessary_items = [item_module.clean_for_export(item, api_key, os.getenv("API_KEY")) 
                for item in coll_with_items["items"]]
            coll_with_items["items"] = clean_if_necessary_items
            resp = make_response(json.dumps(coll_with_items, sort_keys=True, indent=4),
                                 response_code)
            resp.mimetype = "application/json"
    return resp
 def test_get_collection_with_items_for_client(self):
     test_collection = {"_id": "testcollectionid", "title": "mycollection", "type":"collection", "alias_tiids": {
                                                "pmid:16023720": "iaw9rzldigp4xc7p20bycnkg",
                                                "pmid:16413797": "itsq6fgx8ogi9ixysbipmtxx"}}
     self.d.db.save(test_collection)
     test_items = [
         {"_id": "iaw9rzldigp4xc7p20bycnkg", "type":"item", "biblio":{}, "aliases":[("pmid", "16023720")]},
         {"_id": "itsq6fgx8ogi9ixysbipmtxx", "type":"item", "biblio":{}, "aliases":[("pmid", "16413797")]}
     ]
     for item_doc in test_items:
         self.d.db.save(item_doc)
     response = collection.get_collection_with_items_for_client("testcollectionid", None, self.r, self.d)
     expected = "heather"
     assert_equals(response[1], False)
     assert_equals(response[0].keys(), ['items', '_rev', '_id', 'type', 'title'])
     assert_equals(sorted(response[0]["items"][0].keys()), sorted(['is_registered', '_rev', 'currently_updating', 'metrics', 'biblio', '_id', 'type', 'aliases']))
Beispiel #9
0
def remove_items_from_collection(cid=""):
    """
    Deletes items from a collection
    """
    abort_if_fails_collection_edit_auth(request)

    try:
        collection_object = collection.remove_items_from_collection(
            cid=cid,
            tiids_to_delete=request.json["tiids"],
            myredis=myredis,
            mydao=mydao)
    except (AttributeError, TypeError, KeyError) as e:
        # we got missing or improperly formated data.
        logger.error(
            u"DELETE /collection/{id}/items threw an error: '{error_str}'. input: {json}."
            .format(id=cid, error_str=e, json=request.json))
        abort_custom(500, "Error deleting items from collection")

    (coll_doc, is_updating) = collection.get_collection_with_items_for_client(
        cid, myrefsets, myredis, mydao, include_history=False)
    resp = make_response(json.dumps(coll_doc, sort_keys=True, indent=4), 200)
    return resp
Beispiel #10
0
def remove_items_from_collection(cid=""):
    """
    Deletes items from a collection
    """
    abort_if_fails_collection_edit_auth(request)

    try:
        collection_object = collection.remove_items_from_collection(
            cid=cid, 
            tiids_to_delete=request.json["tiids"], 
            myredis=myredis, 
            mydao=mydao)
    except (AttributeError, TypeError, KeyError) as e:
        # we got missing or improperly formated data.
        logger.error(u"DELETE /collection/{id}/items threw an error: '{error_str}'. input: {json}.".format(
                id=cid,
                error_str=e,
                json=request.json))
        abort_custom(500, "Error deleting items from collection")

    (coll_doc, is_updating) = collection.get_collection_with_items_for_client(cid, myrefsets, myredis, mydao, include_history=False)
    resp = make_response(json.dumps(coll_doc, sort_keys=True, indent=4), 200)
    return resp