def queryAltmetric(pmid): """ Check the altmetric journal percentile score of the publication """ a = Altmetric() try: resp = a.pmid(pmid) if resp is None: logger.debug("PMID %s. Not found" % pmid) return -1 else: if 'context' in resp: metric = resp['context']['journal'][ 'pct'] # Percentage attention for this journal logger.debug("PMID %s. Metric %s" % (pmid, metric)) return metric logger.debug("PMID %s. Percentage attention not found" % pmid) return -2 except AltmetricHTTPException as e: if e.status_code == 403: logger.error( "You aren't authorized for this call: {}".format(pmid)) elif e.status_code == 420: logger.error( 'You are being rate limited, currently {}'.format(pmid)) elif e.status_code == 502: logger.error( 'The API version you are using is currently down for maintenance.' ) elif e.status_code == 404: logger.error('Invalid API function') logger.error(e.msg) logger.warn("PMID %s. Exception %s" % (pmid, e.msg)) return -3
def update(self): rsp = None a = Altmetric(settings.ALTMETRIC_API_KEY) ids = self.paper.get_ids() # Fetch altmetric data based on paper supported id try: if 'doi' in ids: rsp = a.doi(ids.get('doi', '')) if not rsp and 'arx' in ids: rsp = a.arxiv(ids.get('arx')) if not rsp and 'pmi' in ids: rsp = a.pmid(ids.get('pmi')) except AltmetricHTTPException: raise # Parse json if rsp: self.score = rsp.get('score') self.altmetric_id = rsp.get('altmetric_id') self.altmetric_jid = rsp.get('altmetric_jid', '') self.score_1d = rsp.get('history').get('1d') self.score_2d = rsp.get('history').get('2d') self.score_3d = rsp.get('history').get('3d') self.score_4d = rsp.get('history').get('4d') self.score_5d = rsp.get('history').get('5d') self.score_6d = rsp.get('history').get('6d') self.score_1w = rsp.get('history').get('1w') self.score_1m = rsp.get('history').get('1m') self.score_3m = rsp.get('history').get('3m') self.score_6m = rsp.get('history').get('6m') self.score_1y = rsp.get('history').get('1y') self.cited_by_posts_count = rsp.get('cited_by_posts_count', 0) self.cited_by_delicious_count = rsp.get('cited_by_delicious_count', 0) self.cited_by_fbwalls_count = rsp.get('cited_by_fbwalss_count', 0) self.cited_by_feeds_count = rsp.get('cited_by_feeds_count', 0) self.cited_by_forum_count = rsp.get('cited_by_forum_count', 0) self.cited_by_gplus_count = rsp.get('cited_by_gplus_count', 0) self.cited_by_linkedin_count = rsp.get('cited_by_linkedin_count', 0) self.cited_by_msm_count = rsp.get('cited_by_msm_count', 0) self.cited_by_peer_review_sites_count = rsp.get( 'cited_by_peer_review_sites_count', 0) self.cited_by_pinners_count = rsp.get('cited_by_pinners_count', 0) self.cited_by_policies_count = rsp.get('cited_by_policies_count', 0) self.cited_by_qs_count = rsp.get('cited_by_qs_count', 0) self.cited_by_rdts_count = rsp.get('cited_by_rdts_count', 0) self.cited_by_rh_count = rsp.get('cited_by_rh_count', 0) self.cited_by_tweeters_count = rsp.get('cited_by_tweeters_count', 0) self.cited_by_videos_count = rsp.get('cited_by_videos_count', 0) self.cited_by_weibo_count = rsp.get('cited_by_weibo_count', 0) self.cited_by_wikipedia_count = rsp.get('cited_by_wikipedia_count', 0) if isinstance(rsp.get('readers'), dict): self.readers_citeulike = rsp.get('readers').get('citeulike') self.readers_mendeley = rsp.get('readers').get('mendeley') self.image = rsp.get('images').get('small') match = re.findall(r'types=(P?[\w]+)', self.image) if match: self.type = match[0] # save self.save()
outputFile.write('pmid, score, numReaders, cited (altmetric), scopus\n') am_metadata = Altmetric() c = 0 for line in setLinks: if c > numLines2Skip: parsedData = line.split('\n')[0].split(',') pmid = parsedData[2] if not (pmid in listpmid_alreadyDone): paperMetadata = am_metadata.pmid(pmid) if paperMetadata != None: print('Evaluating pmid ' + pmid) score = paperMetadata['score'] numReaders = paperMetadata['readers_count'] cited_by = 0 for key in paperMetadata.keys(): if 'cited_by_' in key: cited_by += paperMetadata[key] if 'scopus_subjects' in paperMetadata.keys(): scopus_subjects = paperMetadata['scopus_subjects']
now = datetime.now() try: resp = paperbuzz.search(doi) err = None except Exception as e: resp = None err = e row = [pmid, doi, json.dumps(resp), str(err), str(now)] csv_writer.writerow(row) # Query Altmetric API logging.info('Collecting Altmetric.com metrics for {}'.format(query)) with open(str(output_dir / query / "altmetric.csv"), "w") as f: csv_writer = csv.writer(f) csv_writer.writerow(METRICS_COLUMNS) for pmid, doi in tqdm(zip(input_df.pmid, input_df.doi), total=len(input_df)): now = datetime.now() try: resp = altmetric.pmid(str(pmid)) err = None except Exception as e: resp = None err = e row = [pmid, doi, json.dumps(resp), str(err), str(now)] csv_writer.writerow(row)