def saveEntryOutBib(a, m=None): """Remove unwanted fields and add the bibtex entry to the output file Parameters: a: the bibtex entry m: the ID (bibtex key) of the entry, if it is not the default one """ entry = ( bibtexparser.bparser.BibTexParser(common_strings=True) .parse(a) .entries[0] ) for u in self.unwantedFields: try: del entry[u] except KeyError: pass if m is not None: m = m.strip() if m != entry["ID"].strip(): entry["ID"] = m db.entries = [entry] bibf = pbWriter.write(db) try: with open(outFileName, "a") as o: o.write(bibf) pBLogger.info(exstr.entryInserted % m) except IOError: pBLogger.exception(exstr.errorWrite % outFileName) return False
def initUI(self): """Create window layout and buttons, read log file content and print it in the `QPlainTextEdit` """ self.setWindowTitle(self.title) grid = QVBoxLayout() grid.setSpacing(1) grid.addWidget( PBLabel(dwstr.logFileRead % pbConfig.params["logFileName"])) try: with open(pbConfig.params["logFileName"]) as r: text = r.read() except IOError: text = dwstr.clearLogFailRead pBLogger.exception(text) self.textEdit = QPlainTextEdit(text) self.textEdit.setReadOnly(True) grid.addWidget(self.textEdit) self.closeButton = QPushButton(dwstr.close, self) self.closeButton.setAutoDefault(True) self.closeButton.clicked.connect(self.close) grid.addWidget(self.closeButton) self.clearButton = QPushButton(dwstr.clearLogTitle, self) self.clearButton.clicked.connect(self.clearLog) grid.addWidget(self.clearButton) self.setGeometry(100, 100, 800, 800) self.setLayout(grid)
def removeUnusedBibtexs(existingBibsDict): """Functions that reads the list of bibtex entries in the existing .bib file and removes the ones that are not inside \cite commands """ newDict = {} notFound = [] for k, v in existingBibsDict.items(): if k in self.allCitations: newDict[k] = existingBibsDict[k] else: notFound.append(k) db.entries = [ newDict[k] for k in sorted( [e["ID"] for e in newDict.values()], key=lambda s: s.lower() ) ] bibf = pbWriter.write(db) try: with open(outFileName, "w") as o: o.write(exstr.byPhysbiblio + bibf) pBLogger.info(exstr.entriesRemoved % notFound) except IOError: pBLogger.exception(exstr.errorWrite % outFileName)
def getGenericInfo(self, string, fields, rows=pbConfig.params["maxExternalAPIResults"]): """Use the unofficial python client for the ADS API to obtain a list of results from a given search string Parameters: string: the search string fields: a list with the names of the required fields rows: the number of rows to obtain Output: a list of ads objects with the obtained entries """ ads.config.token = pbConfig.params["ADSToken"] try: self.q = ads.SearchQuery(q=string, fl=fields, rows=rows) l = list(self.q) except ads.exceptions.APIResponseError: pBLogger.exception(self.unauthorized) except Exception: pBLogger.exception(self.genericFetchError, exc_info=True) else: pBLogger.info(self.getLimitInfo()) return l return []
def getSeries(url): response = self.http.get(url, timeout=self.timeout) text = response.content.decode("utf-8") try: return json.loads(text)["hits"]["hits"] except ValueError: pBLogger.warning(isstr.emptyResponse) return [] except Exception: pBLogger.exception(isstr.errorReadPage) return []
def backupCopy(self, fileName): """Creates a backup copy of the given file. Parameters: fileName: the name of the file to be backed up """ if os.path.isfile(fileName): try: shutil.copy2(fileName, fileName + self.backupExtension) except IOError: pBLogger.exception(exstr.cannotWriteBackup) return False else: return True return False
def restoreBackupCopy(self, fileName): """Restores the backup copy of the given file, if any. Parameters: fileName: the name of the file to be restore """ if os.path.isfile(fileName + self.backupExtension): try: shutil.copy2(fileName + self.backupExtension, fileName) except IOError: pBLogger.exception(exstr.cannotRestoreBackup) return False else: return True return False
def rmBackupCopy(self, fileName): """Deletes the backup copy of the given file, if any. Parameters: fileName: the name of the file of which the backup should be deleted """ if os.path.isfile(fileName + self.backupExtension): try: os.remove(fileName + self.backupExtension) except IOError: pBLogger.exception(exstr.cannotRemoveBackup) return False else: return True return True
def loadInterfaces(self): """Load the subclasses that will interface with the main websites to search bibtex info and saves them into a dictionary (`self.webSearch`). The subclasses are read scanning the package directory. """ if self.loaded: return for method in self.interfaces: try: _temp = __import__("physbiblio.webimport." + method, globals(), locals(), ["WebSearch"]) self.webSearch[method] = getattr(_temp, "WebSearch")() except Exception: pBLogger.exception(self.errorImportMethod % method) self.loaded = True
def prepareSelected(self): """Fill the dictionary `self.selectedElements` according to previous selection """ self.layoutAboutToBeChanged.emit() self.selectedElements = {} try: for bib in self.dataList: self.selectedElements[self.getIdentifier(bib)] = False except AttributeError: pBLogger.exception(ccstr.dataListNotDef) for prevK in self.previous: try: if self.selectedElements[prevK] == False: self.selectedElements[prevK] = True except (KeyError, IndexError): pBLogger.exception(ccstr.invalidIdentif % (prevK)) self.layoutChanged.emit()
def exportRows(self, fileName, rows): """Export the given entries into a .bib file. Parameters: fileName: the name of the output bibtex file rows: the list of entries to be exported """ self.backupCopy(fileName) if rows != []: try: with codecs.open(fileName, "w", "utf-8") as bibfile: for q in rows: bibfile.write(q["bibtex"] + "\n") except Exception: pBLogger.exception(exstr.errorExport, traceback) self.restoreBackupCopy(fileName) else: pBLogger.info(exstr.noElement) self.rmBackupCopy(fileName)
def retrieveUrlFirst(self, string): """Retrieves the first (only) result from the content of the given web page. Parameters: string: the search string (the DOI) Output: returns the bibtex string """ url = self.createUrl(string) pBLogger.info(self.searchInfo % (string, url)) text = self.textFromUrl(url, self.headers) if "<title>Error: DOI Not Found</title>" in text: return "" try: return parse_accents_str(text[:]) except Exception: pBLogger.exception(self.genericError) return ""
def retrieveUrlFirst(self, string): """Retrieves the first (only) result from the content of the given web page. Parameters: string: the search string (the ISBN) Output: returns the bibtex string """ self.urlArgs["isbn"] = string url = self.createUrl() pBLogger.info(self.searchInfo % (string, url)) text = self.textFromUrl(url) if "Not found" in text: return "" try: return parse_accents_str(text[:]) except Exception: pBLogger.exception(self.genericError) return ""
def getBibtexs(self, bibcodes): """Obtain a string containing the bibtex entries for all the requested bibcodes Parameter: bibcodes: a single bibcode (string containing the ADS identifier of a given entry) or a list of bibcodes Output: a string with all the bibtex entries """ ads.config.token = pbConfig.params["ADSToken"] try: self.q = ads.ExportQuery(bibcodes=bibcodes, format="bibtex") export = self.q.execute() except ads.exceptions.APIResponseError: pBLogger.exception(self.unauthorized) except Exception: pBLogger.exception(self.genericExportError, exc_info=True) else: pBLogger.info(self.getLimitInfo()) return export return ""
def updateExportedBib(self, fileName, overwrite=False): """Reads a bibtex file and updates the entries that it contains, for example if the entry has been published. Parameters: fileName: the name of the considered bibtex file overwrite (boolean, default False): if True, the previous version of the file is replaced and no backup copy is created Output: True if successful, False if errors occurred """ self.backupCopy(fileName) bibfile = "" try: with open(fileName) as r: bibfile += r.read() except IOError: pBLogger.exception(exstr.cannotWrite) return False try: biblist = bibtexparser.bparser.BibTexParser(common_strings=True).parse( bibfile ) except IndexError: pBLogger.exception(exstr.errorLoading) return False db = bibtexparser.bibdatabase.BibDatabase() db.entries = [] for b in biblist.entries: key = b["ID"] element = pBDB.bibs.getByBibkey(key, saveQuery=False) if len(element) > 0: db.entries.append(element[0]["bibtexDict"]) else: db.entries.append(b) txt = pbWriter.write(db).strip() try: with codecs.open(fileName, "w", "utf-8") as outfile: outfile.write(txt) except Exception: pBLogger.exception(exstr.errorExport) self.restoreBackupCopy(fileName) return False if overwrite: self.rmBackupCopy(fileName) return True
def exportForTexFile( self, texFileName, outFileName, overwrite=False, autosave=True, updateExisting=False, removeUnused=False, reorder=False, newOperation=True, ): """Reads a .tex file looking for the \cite{} commands, collects the bibtex entries cited in the text and stores them in a bibtex file. The entries are taken from the database first, or from INSPIRE-HEP if possible. The downloaded entries are saved in the database. Parameters: texFileName: the name (or a list of names) of the considered .tex file(s) outFileName: the name of the output file, where the required entries will be added overwrite (boolean, default False): if True, the previous version of the file is replaced and no backup copy is created autosave (boolean, default True): if True, the changes to the database are automatically saved. updateExisting (boolean, default False): if True, remove duplicates and update entries that have been chenged in the DB removeUnused (boolean, default False): if True, remove bibtex entries that are no more cited in the tex files reorder (boolean, default False): if True, reorder (not update!) the bibtex entries in the bib files before adding the new ones newOperation (boolean, default True): reset the self.existingBibsList and read file .bib content. Time consuming! better to just keep it updated when using multiple texs... Output: True if successful, False if errors occurred """ db = bibtexparser.bibdatabase.BibDatabase() def printOutput( reqBibkeys, miss, retr, nFound, unexp, nKeys, warn, totalCites, full=False ): """Print information on the process""" pBLogger.info(exstr.resume) if totalCites is not None: pBLogger.info(exstr.keysFound % totalCites) pBLogger.info(exstr.newKeysFound % len(reqBibkeys)) j = ", " if full: pBLogger.info(j.join(reqBibkeys)) if len(miss) > 0: pBLogger.info(exstr.missingEntries % len(miss)) if full: pBLogger.info(j.join(miss)) if len(retr) > 0: pBLogger.info(exstr.retrievedEntries % len(retr)) pBLogger.info(j.join(retr)) if len(nFound) > 0: pBLogger.info(exstr.entriesNotFound % len(nFound)) pBLogger.info(j.join(nFound)) if len(unexp) > 0: pBLogger.info(exstr.unexpectedForEntries % len(unexp)) pBLogger.info(j.join(unexp)) if len(nKeys.keys()) > 0: pBLogger.info( exstr.nonMatchingEntries % len(nKeys.keys()) + "\n".join(["'%s' => '%s'" % (k, n) for k, n in nKeys.items()]) ) pBLogger.info(exstr.totalWarnings % warn) def saveEntryOutBib(a, m=None): """Remove unwanted fields and add the bibtex entry to the output file Parameters: a: the bibtex entry m: the ID (bibtex key) of the entry, if it is not the default one """ entry = ( bibtexparser.bparser.BibTexParser(common_strings=True) .parse(a) .entries[0] ) for u in self.unwantedFields: try: del entry[u] except KeyError: pass if m is not None: m = m.strip() if m != entry["ID"].strip(): entry["ID"] = m db.entries = [entry] bibf = pbWriter.write(db) try: with open(outFileName, "a") as o: o.write(bibf) pBLogger.info(exstr.entryInserted % m) except IOError: pBLogger.exception(exstr.errorWrite % outFileName) return False def removeUnusedBibtexs(existingBibsDict): """Functions that reads the list of bibtex entries in the existing .bib file and removes the ones that are not inside \cite commands """ newDict = {} notFound = [] for k, v in existingBibsDict.items(): if k in self.allCitations: newDict[k] = existingBibsDict[k] else: notFound.append(k) db.entries = [ newDict[k] for k in sorted( [e["ID"] for e in newDict.values()], key=lambda s: s.lower() ) ] bibf = pbWriter.write(db) try: with open(outFileName, "w") as o: o.write(exstr.byPhysbiblio + bibf) pBLogger.info(exstr.entriesRemoved % notFound) except IOError: pBLogger.exception(exstr.errorWrite % outFileName) self.exportForTexFlag = True pBLogger.info(exstr.startEFTF) pBLogger.info(exstr.readFrom % texFileName) pBLogger.info(exstr.saveTo % outFileName) if autosave: pBLogger.info(exstr.autoSave) missing = [] newKeys = {} notFound = [] requiredBibkeys = [] retrieved = [] unexpected = [] warnings = 0 totalCites = 0 # if overwrite, reset the output file if overwrite: updateExisting = False removeUnused = False reorder = False try: with open(outFileName, "w") as o: o.write(exstr.byPhysbiblio) except IOError: pBLogger.exception(exstr.cannotWrite) return False # read previous content of output file, if any try: with open(outFileName, "r") as f: existingBibText = f.readlines() except IOError: pBLogger.error(exstr.cannotRead % outFileName) try: open(outFileName, "w").close() except IOError: pBLogger.exception(exstr.cannotCreate % outFileName) return False existingBibText = "" # this is time consuming if there are many entries. # Do not load it every time for multiple texs! if newOperation: self.allCitations = set([]) if existingBibText != "": self.existingBibsList = pBDB.bibs.parseAllBibtexs( existingBibText, verbose=False ) else: self.existingBibsList = [] # work with dictionary, so that if there are repeated entries # (entries with same ID) they are automatically discarded existingBibsDict = CaseInsensitiveDict() for e in self.existingBibsList: existingBibsDict[e["ID"]] = e # if requested, do some cleaning if updateExisting or reorder: # update entry from DB if existing if updateExisting: for k, v in existingBibsDict.items(): e = pBDB.bibs.getByBibtex(k, saveQuery=False) if len(e) > 0 and e[0]["bibtexDict"] != v: existingBibsDict[k] = e[0]["bibtexDict"] if existingBibsDict[k]["ID"].lower() != k.lower(): existingBibsDict[k]["ID"] = k # write new (updated) bib content # (so also repeated entries are removed) db.entries = [ existingBibsDict[k] for k in sorted( [e["ID"] for e in existingBibsDict.values()], key=lambda s: s.lower(), ) ] bibf = pbWriter.write(db) try: with open(outFileName, "w") as o: o.write(exstr.byPhysbiblio + bibf) pBLogger.info(exstr.outputUpdated) except IOError: pBLogger.exception(exstr.errorWrite % outFileName) # if there is a list of tex files, run this function # for each of them...no updateExisting and removeUnused! if isinstance(texFileName, list): if len(texFileName) == 0: return False elif len(texFileName) == 1: texFileName = texFileName[0] else: for t in texFileName: req, m, ret, nF, un, nK, w, cits = self.exportForTexFile( t, outFileName, overwrite=False, autosave=autosave, updateExisting=False, removeUnused=False, reorder=False, newOperation=False, ) requiredBibkeys += req missing += m retrieved += ret notFound += nF unexpected += un for k, v in nK.items(): newKeys[k] = v warnings += w pBLogger.info(exstr.doneAllTexs) if removeUnused: removeUnusedBibtexs(existingBibsDict) printOutput( requiredBibkeys, missing, retrieved, notFound, unexpected, newKeys, warnings, len(self.allCitations), full=True, ) return ( requiredBibkeys, missing, retrieved, notFound, unexpected, newKeys, warnings, len(self.allCitations), ) # read the texFile keyscont = "" try: with open(texFileName) as r: keyscont += r.read() except IOError: pBLogger.exception(exstr.errorNoFile % texFileName) return False # extract \cite* commands matchKeys = "([0-9A-Za-z_\-':\+\.\&]+)" cite = re.compile( "\\\\(cite|citep|citet)\{([\n ]*" + matchKeys + "[,]?[\n ]*)*\}", re.MULTILINE, ) # find \cite{...} citeKeys = re.compile( matchKeys, re.MULTILINE ) # find the keys inside \cite{...} citaz = [m for m in cite.finditer(keyscont) if m != ""] pBLogger.info(exstr.citeFound % len(citaz)) # extract required keys from \cite* commands for c in citaz: try: for e in [l.group(1) for l in citeKeys.finditer(c.group())]: e = e.strip() if e == "" or e in ["cite", "citep", "citet"]: continue self.allCitations.add(e) if e not in requiredBibkeys: try: # this it's just to check if already present tmp = existingBibsDict[e] except KeyError: requiredBibkeys.append(e) except (IndexError, AttributeError, TypeError): pBLogger.warning(exstr.errorCitation % c.group()) a = [] pBLogger.info( exstr.newKeysTotal % (len(requiredBibkeys), len(self.allCitations)) ) # if True, remove unused bibtex entries if removeUnused: removeUnusedBibtexs(existingBibsDict) # check what is missing in the database and insert/import # what is needed: for m in requiredBibkeys: if m.strip() == "": continue entry = pBDB.bibs.getByBibtex(m) entryMissing = len(entry) == 0 if not self.exportForTexFlag: # if flag set, stop execution and # go to the end skipping everything continue elif not entryMissing: # if already in the database, just insert it as it is bibtex = entry[0]["bibtex"] bibtexDict = entry[0]["bibtexDict"] else: # if no entry is found, mark it as missing missing.append(m) # if not present, try INSPIRE import pBLogger.info(exstr.keyMissing % m) newWeb = pBDB.bibs.loadAndInsert(m, returnBibtex=True) newCheck = pBDB.bibs.getByBibtex(m, saveQuery=False) # if the import worked, insert the entry if len(newCheck) > 0: # if key is not matching, # just replace it in the exported bib and print a message if m.strip().lower() != newCheck[0]["bibkey"].lower(): warnings += 1 newKeys[m] = newCheck[0]["bibkey"] if newCheck[0]["bibkey"] not in retrieved: retrieved.append(newCheck[0]["bibkey"]) pBDB.catBib.insert( pbConfig.params["defaultCategories"], newCheck[0]["bibkey"] ) bibtex = newCheck[0]["bibtex"] bibtexDict = newCheck[0]["bibtexDict"] else: # if nothing found, add a warning for the end warnings += 1 notFound.append(m) continue pBLogger.info("\n") # save in output file try: bibtexDict["ID"] = m self.existingBibsList.append(bibtexDict) saveEntryOutBib(bibtex, m) except: unexpected.append(m) pBLogger.exception(exstr.unexpectedEntry % m) if autosave: pBDB.commit() printOutput( requiredBibkeys, missing, retrieved, notFound, unexpected, newKeys, warnings, len(self.allCitations), ) return ( requiredBibkeys, missing, retrieved, notFound, unexpected, newKeys, warnings, len(self.allCitations), )
def arxivRetriever(self, string, searchType="all", additionalArgs=None, fullDict=False): """Reads the feed content got from arxiv into a dictionary, used to return a bibtex. Parameters: string: the search string searchType: the search method in arxiv API (default 'all'). The possible values are: ti-> Title au -> Author abs -> Abstract co -> Comment jr -> Journal Reference cat -> Subject Category rn -> Report Number id -> Id (use id_list instead) all -> All of the above additionalArgs: a dictionary of additional arguments that can be passed to self.urlArgs (default None) fullDict (logical): return the bibtex dictionary in addition to the bibtex text (default False) Output: the bibtex text (optional, depending on fullDict): the bibtex Dictionary """ if additionalArgs: for k, v in additionalArgs.items(): self.urlArgs[k] = v self.urlArgs["search_query"] = searchType + ":" + string url = self.createUrl() pBLogger.info(self.searchInfo % (searchType, string, url)) text = parse_accents_str(self.textFromUrl(url)) try: data = feedparser.parse(text) db = BibDatabase() db.entries = [] dictionaries = [] for entry in data["entries"]: dictionary = {} idArx = (entry["id"].replace("http://arxiv.org/abs/", "").replace( "https://arxiv.org/abs/", "")) pos = idArx.find("v") if pos >= 0: idArx = idArx[0:pos] dictionary["ENTRYTYPE"] = "article" dictionary["ID"] = idArx dictionary["archiveprefix"] = "arXiv" dictionary["title"] = entry["title"] dictionary["arxiv"] = idArx try: dictionary["doi"] = entry["arxiv_doi"] except KeyError as e: pBLogger.debug("KeyError: %s" % e) dictionary["abstract"] = entry["summary"].replace("\n", " ") dictionary["authors"] = " and ".join( [au["name"] for au in entry["authors"]]) dictionary["primaryclass"] = entry["arxiv_primary_category"][ "term"] year = self.getYear(dictionary["arxiv"]) if year is not None: dictionary["year"] = year db.entries.append(dictionary) dictionaries.append(dictionary) if fullDict: dictionary = dictionaries[0] for d in dictionaries: if string in d["arxiv"]: dictionary = d return pbWriter.write(db), dictionary else: return pbWriter.write(db) except Exception: # intercept all other possible errors pBLogger.exception(self.genericError) if fullDict: return "", {} else: return ""
def plotStats( self, paper=False, author=False, show=False, save=False, path=".", markPapers=False, pickVal=6, ): """Plot the collected information, using matplotlib.pyplot. Parameters: paper (boolean, default False): plot statistics for the last analyzed paper author (boolean, default False): plot statistics for the last analyzed author show (boolean, default False): True to show the plots in a separate window (with matplotlib.pyplot.show()) save (boolean, default False): True to save the plots into files. path (string): where to save the plots markPapers (boolean, default False): True to draw a vertical lines at the dates corresponding to a paper appearing pickVal (float, default 6): the picker tolerance Output: False if paper==False and author==False, the matplotlib.pyplot figure containing the citation plot if paper==True, a list of matplotlib.pyplot figures containing the various plots if author==True """ if paper and self.paperPlotInfo is not None: if len(self.paperPlotInfo["citList"][0]) > 0: pBLogger.info(isstr.plotPaper % self.paperPlotInfo["id"]) fig, ax = plt.subplots() plt.plot( self.paperPlotInfo["citList"][0], self.paperPlotInfo["citList"][1], picker=True, pickradius=pickVal, ) fig.autofmt_xdate() if save: pdf = PdfPages( osp.join(path, self.paperPlotInfo["id"] + ".pdf")) pdf.savefig() pdf.close() if show: plt.show() plt.close() return fig elif author and self.authorPlotInfo is not None: pBLogger.info(isstr.plotAuthor % self.authorPlotInfo["name"]) try: ymin = min( int(self.authorPlotInfo["allLi"][0][0].strftime("%Y")) - 2, int(self.authorPlotInfo["paLi"][0][0].strftime("%Y")) - 2, ) ymax = max( int(self.authorPlotInfo["allLi"][0][-1].strftime("%Y")) + 2, int(self.authorPlotInfo["paLi"][0][-1].strftime("%Y")) + 2, ) except: try: ymin = int( self.authorPlotInfo["paLi"][0][0].strftime("%Y")) - 2 ymax = int( self.authorPlotInfo["paLi"][0][-1].strftime("%Y")) + 2 except: pBLogger.warning(isstr.noPublications) return False figs = [] if len(self.authorPlotInfo["paLi"][0]) > 0: fig, ax = plt.subplots() plt.title(isstr.paperNumber) plt.plot( self.authorPlotInfo["paLi"][0], self.authorPlotInfo["paLi"][1], picker=True, pickradius=pickVal, ) fig.autofmt_xdate() if save: pdf = PdfPages( osp.join(path, self.authorPlotInfo["name"] + "_papers.pdf")) pdf.savefig() pdf.close() if show: plt.show() plt.close() figs.append(fig) if len(self.authorPlotInfo["paLi"][0]) > 0: fig, ax = plt.subplots() plt.title(isstr.paperYear) ax.hist( [ int(q.strftime("%Y")) for q in self.authorPlotInfo["paLi"][0] ], bins=range(ymin, ymax), picker=True, ) ax.get_xaxis().get_major_formatter().set_useOffset(False) plt.xlim([ymin, ymax]) if save: pdf = PdfPages( osp.join( path, self.authorPlotInfo["name"] + "_yearPapers.pdf")) pdf.savefig() pdf.close() if show: plt.show() plt.close() figs.append(fig) if len(self.authorPlotInfo["allLi"][0]) > 0: fig, ax = plt.subplots() plt.title(isstr.totalCitations) plt.plot( self.authorPlotInfo["allLi"][0], self.authorPlotInfo["allLi"][1], picker=True, pickradius=pickVal, ) fig.autofmt_xdate() if save: pdf = PdfPages( osp.join(path, self.authorPlotInfo["name"] + "_allCit.pdf")) pdf.savefig() pdf.close() if show: plt.show() plt.close() figs.append(fig) if len(self.authorPlotInfo["allLi"][0]) > 0: fig, ax = plt.subplots() plt.title(isstr.citationsYear) ax.hist( [ int(q.strftime("%Y")) for q in self.authorPlotInfo["allLi"][0] ], bins=range(ymin, ymax), picker=True, ) ax.get_xaxis().get_major_formatter().set_useOffset(False) plt.xlim([ymin, ymax]) if save: pdf = PdfPages( osp.join(path, self.authorPlotInfo["name"] + "_yearCit.pdf")) pdf.savefig() pdf.close() if show: plt.show() plt.close() figs.append(fig) if len(self.authorPlotInfo["meanLi"][0]) > 0: fig, ax = plt.subplots() plt.title(isstr.meanCitations) plt.plot( self.authorPlotInfo["meanLi"][0], self.authorPlotInfo["meanLi"][1], picker=True, pickradius=pickVal, ) fig.autofmt_xdate() if markPapers: for q in self.authorPlotInfo["paLi"][0]: plt.axvline( datetime.datetime( int(q.strftime("%Y")), int(q.strftime("%m")), int(q.strftime("%d")), ), color="k", ls="--", ) if save: pdf = PdfPages( osp.join(path, self.authorPlotInfo["name"] + "_meanCit.pdf")) pdf.savefig() pdf.close() if show: plt.show() plt.close() figs.append(fig) if len(self.authorPlotInfo["aI"].keys()) > 0: fig, ax = plt.subplots() plt.title(isstr.citationsPaper) for i, p in enumerate(self.authorPlotInfo["aI"].keys()): try: plt.plot( self.authorPlotInfo["aI"][p]["citingPapersList"] [0], self.authorPlotInfo["aI"][p]["citingPapersList"] [1], ) except: pBLogger.exception(isstr.errorPlotting) fig.autofmt_xdate() if save: pdf = PdfPages( osp.join(path, self.authorPlotInfo["name"] + "_paperCit.pdf")) pdf.savefig() pdf.close() if show: plt.show() plt.close() figs.append(fig) return figs else: pBLogger.info(isstr.noPlot) return False