def pybtex_to_bibtex_string( entry: Entry, bibtex_key: str, delimiters: tuple[str, str] = ("{", "}"), indent: str = " ", sort: bool = False, unicode: bool = True, ) -> str: """String representation of BibTeX entry.""" out = f"@{entry.type}{{{bibtex_key},\n{indent}" content = [] left, right = delimiters assert entry.persons is not None for key, persons in entry.persons.items(): persons_str = " and ".join([_get_person_str(p) for p in persons]) if not unicode: persons_str = unicode_to_latex(persons_str) content.append(f"{key.lower()} = {left}{persons_str}{right}") assert entry.fields is not None keys = entry.fields.keys() if sort: keys = sorted(keys) # translator = LatexNodes2Text() for key in keys: value = entry.fields[key] # Always make keys lowercase key = key.lower() if key == "month": month_string = translate_month(value) if month_string: content.append(f"{key} = {month_string}") continue try: value = value.replace("\N{REPLACEMENT CHARACTER}", "?") except AttributeError: pass # skip title; otherwise, the "protective" braces (e.g., "{Krylov}") get # escaped as well if not unicode and key != "title": value = unicode_to_latex(value) if value is not None: content.append(f"{key} = {left}{value}{right}") # Make sure that every line ends with a comma out += indent.join([line + ",\n" for line in content]) out += "}" return out
def JournalVolumePage(article): '''Give journal, volume, page as string (coping with arXiv or in press)''' if unicode_to_latex(article.bibstem[0]) == 'arXiv': return article.page[0] elif article.volume is None: return unicode_to_latex(article.bibstem[0]) + ', in press' else: return unicode_to_latex(article.bibstem[0])+', ' + str(article.volume) \ +', ' + article.page[0]
def WriteArticleListing(fileout, article): '''Writes latex ebtry for paper in my preferred format''' SurnameFound = False # latex for enumerated list fileout.write('\\item ``' + unicode_to_latex(article.title[0]) + '\'\', ') # Solo papers if len(article.author) == 1: fileout.write(AuthorNameAbbreviation(article.author[0]) + ', ') # Short (N<4) author lists elif len(article.author) <= 4: for i in range(len(article.author) - 1): fileout.write(AuthorNameAbbreviation(article.author[i])) if (i < len(article.author) - 2): fileout.write(', ') fileout.write( ' \\& ' + AuthorNameAbbreviation(article.author[len(article.author) - 1]) + ', ') else: # Long author lists for i in range(4): fileout.write(AuthorNameAbbreviation(article.author[i]) + ', ') if SurnameofListmaker(article.author[i]): SurnameFound = True if SurnameFound: fileout.write('et al., ') else: fileout.write('et al. (including ' + AuthorName + '), ') fileout.write(article.year + ', ' + JournalVolumePage(article) + '.') # Citations? if (article.citation_count is None) or (article.citation_count == 0): fileout.write('\n\n') else: fileout.write(' \\textit{(Citations to date ' + str(article.citation_count) + ')}\n\n')
def get_details(cls, aa, norm, tex=False): now = datetime.now() now = now.replace(microsecond=0) detail_values = dict(program='proMAD', version=config.version, url=config.url, date=now.date().isoformat(), time=now.time().isoformat(), array_name=aa.array_data['name'], array_type=aa.array_data['array_type'], array_id=aa.array_data['id'], norm_name=cls.norm_names[norm], norm=norm, norm_description=cls.norm_descriptions[norm], unit=cls.norm_unit[norm]) if tex: for key in detail_values: detail_values[key] = unicode_to_latex(detail_values[key]) detail_names = dict(date='Date', time='Time', program='Program', version='Version', url='URL', array_name='Array Name', array_type='Array Type', array_id='Array ID', norm_name='Method', norm='Method key', norm_description='Method description', unit='Unit') return detail_values, detail_names
def replace_key( key: str, data, bib_entry: str, replacements: List[Tuple[str, str]], works: crossref.restful.Works, ) -> str: bib_type = bib_entry.split("{")[0] bib_context = bib_entry.split(",", maxsplit=1)[1] # Now only modify `bib_context` because we don't want to touch the key. # Replace non-ascii characters by LaTeX equivalent bib_context = unicode_to_latex(bib_context, non_ascii_only=True) to_replace = replacements.copy() with contextlib.suppress(Exception): # Use the journal abbrv. from crossref, not used if hard coded. to_replace.append(journal_from_crossref(data, works)) for old, new in to_replace: bib_context = bib_context.replace(old, new) result = bib_type + "{" + key + "," + bib_context if "pages = {" not in result: # Add the page number if it's missing with contextlib.suppress(Exception): pages = pages_from_crossref(data, works) lines = result.split("\n") lines.insert(2, f"\tpages = {{{pages}}},") result = "\n".join(lines) return result
def fragments_for_entry(entry): "Generate LaTeX for a specific entry." title = unicode_to_latex(entry['title']) fragments = [f"\\subsection{{Uit: {title}}}"] for line1, line2, line3 in entry['haikus']: command = f"\\haiku{{{line1}}}{{{line2}}}{{{line3}}}\\\\" fragments.append(command) return fragments
def text_for_auteur(auteur, entries): "Generate LaTeX for a specific auteur." auteur = unicode_to_latex(auteur) fragments = [f"\\section{{{auteur}}}"] for entry in entries: fragments.extend(fragments_for_entry(entry)) latex = '\n\n'.join(fragments) return latex
def generateCanvas(self): # 1. Maak rechte lijnen for a in self.t: self.c.stroke( path.path( path.moveto(0, 0), path.lineto(self.s * self.r * np.cos(a), self.s * self.r * np.sin(a)) ), [style.linestyle.solid, color.gray(0.9)] ) # 2. Maak het grid aan - aantal lijnen = gridlines - 1 (buitenlijn) for r in np.linspace(0, self.r, self.g + 1): clr = [style.linestyle.solid, color.gray(0.9)] if r < self.r: clr = [ # Stippellijn style.linestyle( style.linecap.round, style.dash([0, 2]) ), # Donker grijze kleur color.gray(0.3) ] self.c.stroke( path.path( path.moveto(0, self.s * r), *[path.lineto(self.s * r * np.cos(a), self.s * r * np.sin(a)) for a in self.t], path.closepath() ), # Geef de stijl mee = f(r) clr ) # 3. Voeg titels toe - Index van pandas Series titels = self.layers[0]['data'].index for t, a in zip(titels[:self.n], self.t): # Bepaal de horizontale uitlijning halign = [text.halign.boxcenter, text.halign.flushcenter] if int(np.cos(a) * 10) < 0: halign = [text.halign.boxright, text.halign.flushright] elif int(np.cos(a) * 10) > 0: halign = [text.halign.boxleft, text.halign.flushleft] # Voeg de tekst toe aan de context extra_r = 1 extra_x = 0 valign = text.valign.middle if np.sin(a) < -0.5: extra_r = 0 valign = text.valign.bottom extra_x = -0.6 if np.cos(a) < 0 else 0.6 self.c.text( self.s * (self.r + extra_r) * np.cos(a) + extra_x, self.s * (self.r + extra_r) * np.sin(a), # Trancodeer de text naar Latex code unicode_to_latex(t), # Geef mee hoe de tekstbox gedefinieerd is [text.parbox(5), *halign, valign, color.gray(0.3)] )
def fix_utf8_field(entry, field, args): if field not in entry: return entry value = entry[field] if field is args.utf8: value = LatexNodes2Text().latex_to_text(value) elif field is args.latex: value = unicode_to_latex(value) entry[field] = value return entry
def AuthorNameAbbreviation(author): '''Give author name in LaTeX with format: Surname, FirstInitial.''' # Convert to Latex fullname = unicode_to_latex(author) # Replace first names with first initial commaplace = fullname.find(',') if commaplace >= 0: if (fullname[:commaplace] == Surname): return AuthorName else: return fullname[:commaplace + 3] + '.' else: return fullname
def clean_unicode(text): clean_text = "" for i_character in text: encoded_character = latexencode.unicode_to_latex(i_character) if len(encoded_character) > 1 and ( encoded_character[0] != constants.L_BRACE or encoded_character[-1] != constants.R_BRACE): encoded_character = encoded_character.replace( constants.R_BRACE, "").split(constants.L_BRACE) encoded_character = " ".join(encoded_character) encoded_character = enclose_braces(encoded_character) clean_text += encoded_character return clean_text
def latex_maketitle(self, html_baseurl): # - see this: https://tex.stackexchange.com/questions/409677/edit-1st-page-only # - ALSO ADDED THE SUPER IMPORTANT \makeatletter according to # https://groups.google.com/d/msg/sphinx-users/S_ip2b-lrRs/62zkfWcODwAJ return r''' \makeatletter \pagestyle{empty} \thispagestyle{empty} \noindent\rule{\textwidth}{1pt}\par \begingroup % for PDF information dictionary \def\endgraf{ }\def\and{\& }% \pdfstringdefDisableCommands{\def\\{, }}% overwrite hyperref setup \hypersetup{pdfauthor={\@author}, pdftitle={\@title}}% \endgroup \begin{flushright} \sphinxlogo \py@HeaderFamily {\Huge \@title }\par ''' + r"{\itshape\large %s}\par" % unicode_to_latex( self.subtitle) + \ r''' \vspace{25pt} {\Large \begin{tabular}[t]{c} \@author \end{tabular}}\par \vspace{25pt} \@date \par \py@authoraddress \par \end{flushright} \@thanks \setcounter{footnote}{0} \let\thanks\relax\let\maketitle\relax %\gdef\@thanks{}\gdef\@author{}\gdef\@title{} \vfill \noindent Copyright \copyright\ \the\year\ by \@author. \vskip 10pt \noindent \@title\ is available under the Creative Commons Attribution 4.0 International License, granting you the right to copy, redistribute, modify, and sell it, so long as you attribute the original to \@author\ and identify any changes that you have made. Full terms of the license are available at: \vskip 10pt \noindent \url{http://creativecommons.org/licenses/by/4.0/} \vskip 10pt \noindent The complete book can be found online for free at: \vskip 10pt''' + (r''' \noindent \url{%s}''' % html_baseurl)
def write_bibtex(bibtex_entries): bib_database = bibtexparser.bibdatabase.BibDatabase() for e in bibtex_entries: # pop the useless contents e.pop('created_time', None) e.pop('file', None) e.pop('abstract', None) for k in e: if isinstance(e[k], list): e[k] = ' and '.join(e[k]) e[k] = unicode_to_latex(e[k]) bib_database.entries = bibtex_entries writer = BibTexWriter() writer.contents = ['comments', 'entries'] writer.indent = ' ' writer.order_entries_by = ('ENTRYTYPE', 'author', 'year') bibtex_str = bibtexparser.dumps(bib_database, writer) return bibtex_str
def uniCode2Latex(text: str, withConvert: bool = False) -> str: ''' converts unicode text to latex and fixes UTF-8 chars for latex in a certain range: ₀:$_0$ ... ₉:$_9$ see https://github.com/phfaist/pylatexenc/issues/72 Args: text(str): the string to fix withConvert(bool): if unicode to latex libary conversion should be used Return: str: latex presentation of UTF-8 char ''' for code in range(8320, 8330): text = text.replace(chr(code), f"$_{code-8320}$") if withConvert: latex = unicode_to_latex(text) # workaround {\textbackslash} being returned #latex=latex.replace("{\\textbackslash}",'\\') text = latex return text
def generate_pdf(): df, drive = get_briefs() # Make data latex safe cols = ["Title"] + list(df.columns[6:11]) for col in cols: df[col] = df[col].apply(lambda x: unicode_to_latex(x)) # Generate category list categories = [ n for n in pd.unique(df[df.columns[6:12]].values.ravel("K")) if isinstance(n, str) and n != "nan" ] briefs_by_category = {} for category in sorted(categories): briefs = df[(df[df.columns[6:12]] == category).any(axis=1)] briefs_by_category[category] = [{ "index": index, "title": brief["Title"] } for index, brief in briefs.iterrows()] # Generate flat list briefs_sorted = [] for index, brief in df.sort_values("Title").iterrows(): file_path = os.path.join(cache_path, brief["file_id"] + ".pdf").replace("\\", "/") briefs_sorted.append({ "index": index, "title": brief["Title"], "path": file_path, "id": brief["file_id"] }) briefs = sorted(briefs_sorted, key=lambda item: int(item["index"])) latex_string = latex_jinja_env.get_template("template.tex") with open(os.path.join(path, "indexed_briefs.tex"), "w") as f: f.write( latex_string.render(briefs_by_category=briefs_by_category, briefs_sorted=briefs_sorted, briefs=briefs)) os.system(r"pdflatex -interaction=nonstopmode " + os.path.join(path, "indexed_briefs.tex")) for file in os.listdir(path): if file.startswith("indexed_briefs") and not (file.endswith(".pdf") or file.endswith(".tex")): os.remove(file) # Get list of all files in Google (F**k Google) Drive files = get_pdf(drive_=drive) if files: drive.files().delete(fileId=files['id']).execute() # Upload new file file_metadata = { 'name': f'Indexed Briefs ({str(date.today())})', 'description': 'GlckOayFQgdIdOqRBOL8', "parents": ['1PSgntCxfM-2YidrIjS8hzfzdzoDGv0ze'] } media = http.MediaFileUpload('indexed_briefs.pdf', mimetype='application/pdf') drive.files().create(body=file_metadata, media_body=media).execute()
def df_to_table(df, caption=None, label=None, centering=True, floating='h', fn_out=None, remove_table_number=False, max_string_length=1000, multicolumn=False, add_phantom=False, phantom_length=4, phantom_column_position=0, **kwargs): # TODO pandas 1.0.0 also adds label and caption assert isinstance(df, pd.DataFrame) if add_phantom: # TODO add phantom on multiple columns phantom_column = f'\\phantom{{{"x" * phantom_length}}}' if isinstance(df.columns, pd.MultiIndex): phantom_column = (phantom_column, ) * df.columns.nlevels columns = list(df.columns)[:phantom_column_position] + [ phantom_column ] + list(df.columns)[phantom_column_position:] df[phantom_column] = '' df = df[columns] s = r'\begin{table' if multicolumn: s += '*' s += '}[' + floating + ']\n' if centering: s += r'\centering' + '\n' kwargs.setdefault('escape', True) with pd.option_context("max_colwidth", max_string_length): s += df.to_latex(**kwargs) if caption is not None: s += r'\caption' s += '' if not remove_table_number else '*' s += '{' if kwargs['escape']: s += latexencode.unicode_to_latex(caption) else: s += caption s += '}\n' if label: s += r'\label{' + label + '}\n' s += r'\end{table' if multicolumn: s += '*' s += r'}' if fn_out: with open(fn_out, 'w+') as wf: wf.write(s) else: return s
def exp_latex(cls, aa, file, norm='hist_raw', additional_info=None): """ Export results as tex file. Parameters ---------- aa: ArrayAnalyse instant norm: str evaluation strategy selection (see ArrayAnalyse.evaluate) file: can be a path to a file (a string), a path-like object, or a file-like object (string based) additional_info: List[Dict] list with dictionaries containing name and value key """ overview = [] reference = [] data = aa.evaluate(norm=norm, double_spot=True) for entry in data: pos = aa.get_position_string(entry['position']) if not isinstance(pos, str): pos = ", ".join(pos) if 'Reference' in entry['info'][0]: reference.append( dict(name=unicode_to_latex(entry['info'][0]), gene_id=entry['info'][1], position=pos, value=f"{entry['value']:.4g}", sort=entry['value'])) else: overview.append( dict(name=unicode_to_latex(entry['info'][0]), gene_id=entry['info'][1], position=pos, value=f"{entry['value']:.4g}", sort=entry['value'])) best = sorted(overview, key=lambda s: s['sort'], reverse=True)[:15] overview = sorted(overview + reference, key=lambda s: s['name'].lower()) info_dict = dict(overview=overview, best=best) info_dict['dv'], info_dict['dn'] = cls.get_details(aa, norm, tex=True) col_num = sum(aa.array_data['net_layout_x']) row_num = sum(aa.array_data['net_layout_y']) if additional_info: for entry in additional_info: entry['name'] = unicode_to_latex(entry['name']) entry['value'] = unicode_to_latex(entry['value']) info_dict['additional_info'] = additional_info info_dict['ai'] = dict(row=[(get_column_letter(row_num - n), f'{n/row_num + 0.5/row_num:.3f}') for n in range(row_num)], col=[(str(n + 1), f'{n/col_num + 0.5/col_num:.3f}') for n in range(col_num)]) template = cls.tex_env.get_template('short_report.tex') content = template.render(**info_dict) if isinstance(file, os.PathLike) or isinstance(file, str): file = Path(file) file.write_text(content) shutil.copy(config.template_folder / 'logo.png', file.parent / 'logo.png') aa.figure_alignment(file=file.parent / 'figure_alignment.jpg') elif isinstance(file, (io.TextIOBase, io.TextIOWrapper)): file.seek(0) file.write(content)
def SurnameofListmaker(author): '''Boolian. True if author is the one we're making the list for.''' fullname = unicode_to_latex(author) return (fullname[:len(Surname)] == Surname)
def fix(string): return unicode_to_latex(string)
def text_to_latex(text): code = unicode_to_latex(text) #code = '*UnicodeEncodeError*' return code
print(f"{pkg['name']} not in bib file") cite_cmd = "" if pkg['name'] in bib_map: cite_cmd = r",\newline ".join([ f"\\citet{{{cite_key}}}" for cite_key in bib_map[pkg['name']]['cite'] ]) # Parse list of maintainers to make line wrapping better: maintainer_block = re.sub(' [<(].*?[>)]', '', pkg["maintainer"]) maintainers = [x.split(',') for x in maintainer_block.split(' and ')] maintainers = [ x.strip() for x in sum(maintainers, []) if len(x.strip()) > 0 ] maintainers = [unicode_to_latex(x).replace(" ", "~") for x in maintainers] name = re.sub('_', r'\_', pkg["name"]) pypi_name = re.sub('_', r'\_', pkg["pypi_name"]) lines.append( row.format(url=pkg["repo_url"], name=name, pypi_name=pypi_name, maintainer=r",\newline ".join(maintainers), cite_command=cite_cmd)) print("\n".join(lines)) with open(this_path / "../generated/affiliated-table.tex", "w") as f1: f1.writelines(lines)
print("@misc{%s," % paper["@id"]) elif paper["@type"] == "Patent": print("@misc{%s," % paper["@id"]) first = True for key, value in paper.items(): if key.startswith( "@") or key == "grants" or key == "open" or key == "license": pass else: if not first: sys.stdout.write(",\n") else: first = False if key == "author": sys.stdout.write( "author=\"%s\"" % " and ".join([unicode_to_latex(v) for v in value])) elif key == "editor": sys.stdout.write("editor=\"%s\"" % " and ".join(value)) elif key == "title": sys.stdout.write("title={{%s}}" % fix(value)) else: sys.stdout.write("%s=\"%s\"" % (key, fix(str(value)))) print("") print("}") print("") #out.write("\\bibliographystyle{ieeetr}\n") #out.write("\\bibliography{../publications}\n") #out.write("\\end{document}\n")
articles = list(ads.SearchQuery( q='(orcid:'+ ORCID +')'\ 'database:astronomy',rows=nArticlesMax, fl=['id', 'first_author','author', 'author_norm', 'year', 'title','citation_count', 'volume','bibstem', 'page','identifier','bibcode'], sort='date')) if (len(articles) == nArticlesMax): print('WARNING: Too many articles to parse') # Remove articles I don't really want in there for i, article in enumerate(articles): Journal = unicode_to_latex(article.bibstem[0]) if (Journal == 'EAS') | (Journal == 'EPJWC'): del articles[i] # Count citations Ncites = 0 NcitesFirstAuthor = 0 print('bibcodes are:') for article in articles: print(article.title[0], article.bibcode) if article.citation_count is not None: Ncites += article.citation_count if article.first_author[0:len(Surname)] == Surname: NcitesFirstAuthor += article.citation_count print()
def unicode_to_latex(cls, text): """A wrapper for calendar.month_name""" return unicode_to_latex(text)
unvetted = authors[ ~(np.isin(authors[email_col], list(unq_emails)) | np.isin(authors['Email Address'], list(unq_emails)) | np.isin(authors[email_col], vetted_emails)) ] unvetted[name_col, email_col].pprint(max_width=1000) if len(unvetted) > 0: raise RuntimeError("Unvetted authors in the authorship form results!") all_authors = [] for row in authors: # Author name name = row['Your name (as it will appear in the journal)'] if "\\" not in name: name = unicode_to_latex(name) name = name.replace(" ", "~") if len(row['Your ORCID (if you have one)']) > 0: orcid = f"[{row['Your ORCID (if you have one)'].strip()}]" else: orcid = '' author = rf"\author{orcid}{{{name}}}" # Affiliation if ( len(row['Institutional affiliation']) > 0 and row['Institutional affiliation'] != 'None' and row['Institutional affiliation'] != 'Unaffiliated' ): affils = row['Institutional affiliation'].split(";")