def semantic_scholar_alias(NAME): """ inputs a URL that's full of publication orientated links, preferably the authors scholar page. """ author_results = [] aliases = None dois, coauthors, titles, visit_urls = author_to_urls(NAME) inv_alias_dict = {} velocity = {} for d in dois: paper = sch.paper(d, timeout=32) if "authors" in paper.keys(): all_coauthors = paper["authors"] for co_name in all_coauthors: key = co_name["name"] if (NAME.split(" ")[0] in key.split(" ")[0] or key.split(" ")[0] in NAME.split(" ")[0] or NAME.split(" ")[-1] in key.split(" ")[-1]): author = sch.author(co_name["authorId"], timeout=32) if "aliases" in author.keys(): aliases = author["aliases"] return aliases
def fetch_semantic_author(ssID, fillpapers=True): ''' Queries semantic scholar for a given author Also fills all papers if fillpapers is True (a bit slower)''' author = sch.author(ssID, timeout=2) if fillpapers: filledpapers = fetch_semantic_papers(author) author['filledpapers'] = filledpapers return author
def get_all_papers(): """Determine the ids of all relevant research papers.""" neumann = 143993045 valenzuela = 143990000 li = 144000000 grant = 144100000 kemper = 144122431 authorIds = [neumann, valenzuela, li, grant, kemper] paperIds = [] for authorId in authorIds: author = ss.author(authorId) papers = [] if author != {}: papers = author['papers'] for paper in papers: paperId = paper['paperId'] paperIds.append(paperId) return paperIds
def papercollector(id, year): author = sch.author(id) paper = author["papers"] collectedpapers = [] for p in paper: if p["year"] == year: a = sch.paper(p["paperId"])["abstract"] # print(a) try: lan = detect(a) # print(lan) if lan == 'en': p["abstract"] = a collectedpapers.append(p) except TypeError: collectedpapers.append(p) print(collectedpapers) return collectedpapers
def test_author(): data = sch.author(2262347) assert data['name'] == 'Alan M. Turing'
def test_author(self): data = sch.author(2262347, timeout=5) self.assertEqual(data['name'], 'A. Turing')
def loop(_id, _papers): papers = sch.author(_id, timeout=2) for paper in papers['papers']: _papers.append(paper['paperId']) _papers = sorted(list(dict.fromkeys(_papers))) # de-duplicate and sort
variable declarations count: ID for every paper numbers_of_papers: for numbers of papers that the user want to view MAX: number of documents the author has written author: select an author in the semantic scholar database name: author name file: the csv file CSVstring: string to write to the CSV file auth_name_file: name of the file, compose of iD and author name team: group of people who worked on the same document """ print("inserisci l'id dell'autore che si vuole analizzare") print("esempio: Turing 2262347") iD_auth = input() author = sch.author(iD_auth) try: MAX = int(len(author['papers'])) pass except KeyError: print("Numero autore non valido: " + iD_auth) print("Il programma verrà interrotto...") time.sleep(2) exit() print("quanti documenti scientifici vuoi analizzare?") print("documenti totali di questo autore: " + str(MAX)) numbers_of_papers = int(input()) if (numbers_of_papers < 0 or numbers_of_papers > MAX): print("Numero non valido, verranno analizzati tutti i documenti") numbers_of_papers = MAX