示例#1
0
 def test_build_with_body_and_packages(self):
     doc = Document('With options', doc_type='standalone', options=['12pt', 'Spam'], egg=42)
     doc.add_package('tikz')
     sec = doc.new_section('Section', label='Section')
     sec.add_text('Hey')
     assert doc.build(False, False, False) == cleandoc(r'''\documentclass[12pt, Spam, egg=42]{standalone}
         \usepackage[utf8]{inputenc}
         \usepackage[top=2.5cm, bottom=2.5cm, left=2.5cm, right=2.5cm]{geometry}
         \usepackage{tikz}
         \begin{document}
         \begin{section}{Section}
         \label{section:Section}
         Hey
         \end{section}
         \end{document}''')
示例#2
0
    def write(self, output_path):
        if not os.path.exists(output_path):
            os.makedirs(output_path)

        doc = Document(filename='table4',
                       filepath=output_path,
                       doc_type='article',
                       options=('12pt', ))
        doc.add_to_preamble(r"\usepgfplotslibrary{fillbetween}")
        doc.add_to_preamble(r'\usepgfplotslibrary{colorbrewer}')
        doc.add_to_preamble(r'\pgfplotsset{compat=1.15, colormap/Blues}')

        sec = doc.new_section('All graphs')

        self.write_CSLS(sec, output_path)
        self.write_vocabulary_cutoff(sec, output_path)
        self.write_stochastic(sec, output_path)

        doc.build(save_to_disk=True, compile_to_pdf=False, show_pdf=False)
示例#3
0
文件: table.py 项目: ngarneau/vecmap
    def write(self, output_path):
        if not os.path.exists(output_path):
            os.makedirs(output_path)

        doc = Document(filename='grid_search_experiments',
                       filepath=output_path,
                       doc_type='article',
                       options=('12pt', ))
        doc.add_to_preamble(r"\usepgfplotslibrary{fillbetween}")
        doc.add_to_preamble(r'\usepgfplotslibrary{colorbrewer}')
        doc.add_to_preamble(
            r'\pgfplotsset{compat=1.15, colormap/Blues, every axis/.append style={label style={font=\footnotesize}, tick label style={font=\footnotesize}}}'
        )

        sec = doc.new_section('All graphs')

        self.write_CSLS(sec, output_path)
        self.write_vocabulary_cutoff(sec, output_path)
        self.write_stochastic(sec, output_path)

        doc.build(save_to_disk=True, compile_to_pdf=False, show_pdf=False)
from python2latex import Document, Plot, Color
import numpy as np

# Create the document
filepath = './examples/more complex plot example/'
filename = 'more_complex_plot_example'
doc = Document(filename, doc_type='article', filepath=filepath)
sec = doc.new_section('More complex plot')
sec.add_text(
    'This section shows how to make a more complex plot integrated directly into a tex file.'
)

# Create the data
X = np.linspace(0, 2 * np.pi, 100)
Y1 = np.sin(X)
Y2 = np.cos(X)

# Create a plot
plot = sec.new(Plot(plot_name=filename, plot_path=filepath))
plot.caption = 'More complex plot'

nice_blue = Color(.07, .22, .29, color_name='nice_blue')
nice_orange = Color(.85, .33, .28, color_name='nice_orange')

plot.add_plot(X, Y1, nice_blue, 'dashed',
              legend='sine')  # Add colors and legend to the plot
plot.add_plot(X, Y2, nice_orange, line_width='3pt', legend='cosine')
plot.legend_position = 'south east'  # Place the legend where you want

# Add a label to each axis
plot.x_label = 'Radians'
示例#5
0
from python2latex import Document

doc = Document(filename='simple_document_example', filepath='./examples/simple document example', doc_type='article', options=('12pt',))
doc.set_margins(top='3cm', bottom='3cm', margins='2cm')
sec = doc.new_section('Spam and Egg', label='spam_egg')
sec.add_text('The Monty Python slays the Spam and eats the Egg.')

tex = doc.build() # Builds to tex and compile to pdf
print(tex) # Prints the tex string that generated the pdf
示例#6
0
    def write(self, output_path):
        if not os.path.exists(output_path):
            os.makedirs(output_path)
        experiment = self.experiments['Reproduced Results']
        metrics = experiment.aggregate_runs()

        doc = Document(filename='table1',
                       filepath=output_path,
                       doc_type='article',
                       options=('12pt', ))
        sec = doc.new_section('Table 1')
        col, row = 17, 4
        table = sec.new(
            LatexTable(shape=(row, col),
                       alignment=['l'] + ['c'] * 16,
                       float_format='.1f',
                       label='original_results'))
        table.caption = self.CAPTION
        table.label_pos = 'bottom'

        # Main header
        table[0, 1:5].multicell(bold('EN-DE'), h_align='c')
        table[0, 5:9].multicell(bold('EN-ES'), h_align='c')
        table[0, 9:13].multicell(bold('EN-FI'), h_align='c')
        table[0, 13:17].multicell(bold('EN-IT'), h_align='c')
        table[0, 1:5].add_rule(trim_left=True, trim_right='.3em')
        table[0, 5:9].add_rule(trim_left='.3em', trim_right='.3em')
        table[0, 9:13].add_rule(trim_left='.3em', trim_right='.3em')
        table[0, 13:17].add_rule(trim_left='.3em', trim_right=True)

        # Sub header
        table[1, 1:17] = (['best', 'avg', 's', 't'] * 4)
        table[1, 0:17].add_rule(trim_left=True, trim_right=True)

        table[2, 0] = 'Original'
        table[2, 1] = self.ORIGINAL_RESULTS['de']['best']
        table[2, 2] = self.ORIGINAL_RESULTS['de']['avg']
        table[2, 3] = self.ORIGINAL_RESULTS['de']['successful']
        table[2, 4] = self.ORIGINAL_RESULTS['de']['time']
        table[2, 5] = self.ORIGINAL_RESULTS['es']['best']
        table[2, 6] = self.ORIGINAL_RESULTS['es']['avg']
        table[2, 7] = self.ORIGINAL_RESULTS['es']['successful']
        table[2, 8] = self.ORIGINAL_RESULTS['es']['time']
        table[2, 9] = self.ORIGINAL_RESULTS['fi']['best']
        table[2, 10] = self.ORIGINAL_RESULTS['fi']['avg']
        table[2, 11] = self.ORIGINAL_RESULTS['fi']['successful']
        table[2, 12] = self.ORIGINAL_RESULTS['fi']['time']
        table[2, 13] = self.ORIGINAL_RESULTS['it']['best']
        table[2, 14] = self.ORIGINAL_RESULTS['it']['avg']
        table[2, 15] = self.ORIGINAL_RESULTS['it']['successful']
        table[2, 16] = self.ORIGINAL_RESULTS['it']['time']

        table[3, 0] = bold('Reproduced')
        table[3, 1] = np.max(metrics['accuracies']['de'])
        table[3, 2] = np.average(metrics['accuracies']['de'])
        table[3,
              3] = np.sum(np.array(metrics['accuracies']['de']) > 1.0) / len(
                  metrics['accuracies']['de'])
        table[3, 4] = np.average(metrics['times']['de'])
        table[3, 5] = np.max(metrics['accuracies']['es'])
        table[3, 6] = np.average(metrics['accuracies']['es'])
        table[3,
              7] = np.sum(np.array(metrics['accuracies']['es']) > 1.0) / len(
                  metrics['accuracies']['es'])
        table[3, 8] = np.average(metrics['times']['es'])
        table[3, 9] = np.max(metrics['accuracies']['fi'])
        table[3, 10] = np.average(metrics['accuracies']['fi'])
        table[3,
              11] = np.sum(np.array(metrics['accuracies']['fi']) > 1.0) / len(
                  metrics['accuracies']['fi'])
        table[3, 12] = np.average(metrics['times']['fi'])
        table[3, 13] = np.max(metrics['accuracies']['it'])
        table[3, 14] = np.average(metrics['accuracies']['it'])
        table[3,
              15] = np.sum(np.array(metrics['accuracies']['it']) > 1.0) / len(
                  metrics['accuracies']['it'])
        table[3, 16] = np.average(metrics['times']['it'])

        tex = doc.build(save_to_disk=True,
                        compile_to_pdf=False,
                        show_pdf=False)
示例#7
0
    def write(self, output_path):
        if not os.path.exists(output_path):
            os.makedirs(output_path)
        experiment = self.experiments['Other Languages']
        metrics = experiment.aggregate_runs()

        random_experiment = self.experiments[
            'Other Languages Unsup. Init (Random)']
        random_metrics = random_experiment.aggregate_runs()

        random_cutoff_experiment = self.experiments[
            'Other Languages Unsup. Init (Random Cutoff)']
        random_cutoff_metrics = random_cutoff_experiment.aggregate_runs()

        stochastic_experiment = self.experiments['Other Languages Stochastic']
        stochastic_metrics = stochastic_experiment.aggregate_runs()

        csls_experiment = self.experiments['Other Languages CSLS']
        csls_metrics = csls_experiment.aggregate_runs()

        bidirectional_experiment = self.experiments[
            'Other Languages Bidrectional']
        bidirectional_metrics = bidirectional_experiment.aggregate_runs()

        reweighting_experiment = self.experiments[
            'Other Languages Re-weighting']
        reweighting_metrics = reweighting_experiment.aggregate_runs()

        doc = Document(filename='table3',
                       filepath=output_path,
                       doc_type='article',
                       options=('12pt', ))
        sec = doc.new_section('Table 3')

        col, row = 17, 9
        table = sec.new(
            LatexTable(shape=(row, col),
                       alignment=['l'] + ['c'] * 16,
                       float_format='.1f',
                       label='other_languages_results'))
        table.caption = self.CAPTION
        table.label_pos = 'bottom'

        # Main header
        table[0, 1:5].multicell(bold('EN-ET'), h_align='c')
        table[0, 5:9].multicell(bold('EN-FA'), h_align='c')
        table[0, 9:13].multicell(bold('EN-LV'), h_align='c')
        table[0, 13:17].multicell(bold('EN-VI'), h_align='c')
        table[0, 1:5].add_rule(trim_left=True, trim_right='.3em')
        table[0, 5:9].add_rule(trim_left='.3em', trim_right='.3em')
        table[0, 9:13].add_rule(trim_left='.3em', trim_right='.3em')
        table[0, 13:17].add_rule(trim_left='.3em', trim_right=True)

        # Sub header
        table[1, 1:17] = (['best', 'avg', 's', 't'] * 4)
        table[1, 0:17].add_rule(trim_left=True, trim_right=True)

        table[2, 0] = bold('Vecmap')
        table[2, 1] = np.max(metrics['accuracies']['et'])
        table[2, 2] = np.average(metrics['accuracies']['et'])
        table[2,
              3] = np.sum(np.array(metrics['accuracies']['et']) > 1.0) / len(
                  metrics['accuracies']['et'])
        table[2, 4] = np.average(metrics['times']['et'])
        table[2, 5] = np.max(metrics['accuracies']['fa'])
        table[2, 6] = np.average(metrics['accuracies']['fa'])
        table[2,
              7] = np.sum(np.array(metrics['accuracies']['fa']) > 1.0) / len(
                  metrics['accuracies']['fa'])
        table[2, 8] = np.average(metrics['times']['fa'])
        table[2, 9] = np.max(metrics['accuracies']['lv'])
        table[2, 10] = np.average(metrics['accuracies']['lv'])
        table[2,
              11] = np.sum(np.array(metrics['accuracies']['lv']) > 1.0) / len(
                  metrics['accuracies']['lv'])
        table[2, 12] = np.average(metrics['times']['lv'])
        table[2, 13] = np.max(metrics['accuracies']['vi'])
        table[2, 14] = np.average(metrics['accuracies']['vi'])
        table[2,
              15] = np.sum(np.array(metrics['accuracies']['vi']) > 1.0) / len(
                  metrics['accuracies']['vi'])
        table[2, 16] = np.average(metrics['times']['vi'])

        table[3, 0] = bold('- Unsupervised (Random)')
        table[3, 1] = np.max(random_metrics['accuracies']['et'])
        table[3, 2] = np.average(random_metrics['accuracies']['et'])
        table[3, 3] = np.sum(
            np.array(random_metrics['accuracies']['et']) > 1.0) / len(
                metrics['accuracies']['et'])
        table[3, 4] = np.average(random_metrics['times']['et'])
        table[3, 5] = np.max(random_metrics['accuracies']['fa'])
        table[3, 6] = np.average(random_metrics['accuracies']['fa'])
        table[3, 7] = np.sum(
            np.array(random_metrics['accuracies']['fa']) > 1.0) / len(
                metrics['accuracies']['fa'])
        table[3, 8] = np.average(random_metrics['times']['fa'])
        table[3, 9] = np.max(random_metrics['accuracies']['lv'])
        table[3, 10] = np.average(random_metrics['accuracies']['lv'])
        table[3, 11] = np.sum(
            np.array(random_metrics['accuracies']['lv']) > 1.0) / len(
                metrics['accuracies']['lv'])
        table[3, 12] = np.average(random_metrics['times']['lv'])
        table[3, 13] = np.max(random_metrics['accuracies']['vi'])
        table[3, 14] = np.average(random_metrics['accuracies']['vi'])
        table[3, 15] = np.sum(
            np.array(random_metrics['accuracies']['vi']) > 1.0) / len(
                metrics['accuracies']['vi'])
        table[3, 16] = np.average(random_metrics['times']['vi'])

        table[4, 0] = bold('- Unsupervised (Random Cutoff)')
        table[4, 1] = np.max(random_cutoff_metrics['accuracies']['et'])
        table[4, 2] = np.average(random_cutoff_metrics['accuracies']['et'])
        table[4, 3] = np.sum(
            np.array(random_cutoff_metrics['accuracies']['et']) > 1.0) / len(
                metrics['accuracies']['et'])
        table[4, 4] = np.average(random_cutoff_metrics['times']['et'])
        table[4, 5] = np.max(random_cutoff_metrics['accuracies']['fa'])
        table[4, 6] = np.average(random_cutoff_metrics['accuracies']['fa'])
        table[4, 7] = np.sum(
            np.array(random_cutoff_metrics['accuracies']['fa']) > 1.0) / len(
                metrics['accuracies']['fa'])
        table[4, 8] = np.average(random_cutoff_metrics['times']['fa'])
        table[4, 9] = np.max(random_cutoff_metrics['accuracies']['lv'])
        table[4, 10] = np.average(random_cutoff_metrics['accuracies']['lv'])
        table[4, 11] = np.sum(
            np.array(random_cutoff_metrics['accuracies']['lv']) > 1.0) / len(
                metrics['accuracies']['lv'])
        table[4, 12] = np.average(random_cutoff_metrics['times']['lv'])
        table[4, 13] = np.max(random_cutoff_metrics['accuracies']['vi'])
        table[4, 14] = np.average(random_cutoff_metrics['accuracies']['vi'])
        table[4, 15] = np.sum(
            np.array(random_cutoff_metrics['accuracies']['vi']) > 1.0) / len(
                metrics['accuracies']['vi'])
        table[4, 16] = np.average(random_cutoff_metrics['times']['vi'])

        table[5, 0] = bold('- Stochastic')
        table[5, 1] = np.max(stochastic_metrics['accuracies']['et'])
        table[5, 2] = np.average(stochastic_metrics['accuracies']['et'])
        table[5, 3] = np.sum(
            np.array(stochastic_metrics['accuracies']['et']) > 1.0) / len(
                metrics['accuracies']['et'])
        table[5, 4] = np.average(stochastic_metrics['times']['et'])
        table[5, 5] = np.max(stochastic_metrics['accuracies']['fa'])
        table[5, 6] = np.average(stochastic_metrics['accuracies']['fa'])
        table[5, 7] = np.sum(
            np.array(stochastic_metrics['accuracies']['fa']) > 1.0) / len(
                metrics['accuracies']['fa'])
        table[5, 8] = np.average(stochastic_metrics['times']['fa'])
        table[5, 9] = np.max(stochastic_metrics['accuracies']['lv'])
        table[5, 10] = np.average(stochastic_metrics['accuracies']['lv'])
        table[5, 11] = np.sum(
            np.array(stochastic_metrics['accuracies']['lv']) > 1.0) / len(
                metrics['accuracies']['lv'])
        table[5, 12] = np.average(stochastic_metrics['times']['lv'])
        table[5, 13] = np.max(stochastic_metrics['accuracies']['vi'])
        table[5, 14] = np.average(stochastic_metrics['accuracies']['vi'])
        table[5, 15] = np.sum(
            np.array(stochastic_metrics['accuracies']['vi']) > 1.0) / len(
                metrics['accuracies']['vi'])
        table[5, 16] = np.average(stochastic_metrics['times']['vi'])

        table[6, 0] = bold('- CSLS')
        table[6, 1] = np.max(csls_metrics['accuracies']['et'])
        table[6, 2] = np.average(csls_metrics['accuracies']['et'])
        table[6, 3] = np.sum(
            np.array(csls_metrics['accuracies']['et']) > 1.0) / len(
                metrics['accuracies']['et'])
        table[6, 4] = np.average(csls_metrics['times']['et'])
        table[6, 5] = np.max(csls_metrics['accuracies']['fa'])
        table[6, 6] = np.average(csls_metrics['accuracies']['fa'])
        table[6, 7] = np.sum(
            np.array(csls_metrics['accuracies']['fa']) > 1.0) / len(
                metrics['accuracies']['fa'])
        table[6, 8] = np.average(csls_metrics['times']['fa'])
        table[6, 9] = np.max(csls_metrics['accuracies']['lv'])
        table[6, 10] = np.average(csls_metrics['accuracies']['lv'])
        table[6, 11] = np.sum(
            np.array(csls_metrics['accuracies']['lv']) > 1.0) / len(
                metrics['accuracies']['lv'])
        table[6, 12] = np.average(csls_metrics['times']['lv'])
        table[6, 13] = np.max(csls_metrics['accuracies']['vi'])
        table[6, 14] = np.average(csls_metrics['accuracies']['vi'])
        table[6, 15] = np.sum(
            np.array(csls_metrics['accuracies']['vi']) > 1.0) / len(
                metrics['accuracies']['vi'])
        table[6, 16] = np.average(csls_metrics['times']['vi'])

        table[7, 0] = bold('- Bidirectional')
        table[7, 1] = np.max(bidirectional_metrics['accuracies']['et'])
        table[7, 2] = np.average(bidirectional_metrics['accuracies']['et'])
        table[7, 3] = np.sum(
            np.array(bidirectional_metrics['accuracies']['et']) > 1.0) / len(
                metrics['accuracies']['et'])
        table[7, 4] = np.average(bidirectional_metrics['times']['et'])
        table[7, 5] = np.max(bidirectional_metrics['accuracies']['fa'])
        table[7, 6] = np.average(bidirectional_metrics['accuracies']['fa'])
        table[7, 7] = np.sum(
            np.array(bidirectional_metrics['accuracies']['fa']) > 1.0) / len(
                metrics['accuracies']['fa'])
        table[7, 8] = np.average(bidirectional_metrics['times']['fa'])
        table[7, 9] = np.max(bidirectional_metrics['accuracies']['lv'])
        table[7, 10] = np.average(bidirectional_metrics['accuracies']['lv'])
        table[7, 11] = np.sum(
            np.array(bidirectional_metrics['accuracies']['lv']) > 1.0) / len(
                metrics['accuracies']['lv'])
        table[7, 12] = np.average(bidirectional_metrics['times']['lv'])
        table[7, 13] = np.max(bidirectional_metrics['accuracies']['vi'])
        table[7, 14] = np.average(bidirectional_metrics['accuracies']['vi'])
        table[7, 15] = np.sum(
            np.array(bidirectional_metrics['accuracies']['vi']) > 1.0) / len(
                metrics['accuracies']['vi'])
        table[7, 16] = np.average(bidirectional_metrics['times']['vi'])

        table[8, 0] = bold('- Reweighting')
        table[8, 1] = np.max(reweighting_metrics['accuracies']['et'])
        table[8, 2] = np.average(reweighting_metrics['accuracies']['et'])
        table[8, 3] = np.sum(
            np.array(reweighting_metrics['accuracies']['et']) > 1.0) / len(
                metrics['accuracies']['et'])
        table[8, 4] = np.average(reweighting_metrics['times']['et'])
        table[8, 5] = np.max(reweighting_metrics['accuracies']['fa'])
        table[8, 6] = np.average(reweighting_metrics['accuracies']['fa'])
        table[8, 7] = np.sum(
            np.array(reweighting_metrics['accuracies']['fa']) > 1.0) / len(
                metrics['accuracies']['fa'])
        table[8, 8] = np.average(reweighting_metrics['times']['fa'])
        table[8, 9] = np.max(reweighting_metrics['accuracies']['lv'])
        table[8, 10] = np.average(reweighting_metrics['accuracies']['lv'])
        table[8, 11] = np.sum(
            np.array(reweighting_metrics['accuracies']['lv']) > 1.0) / len(
                metrics['accuracies']['lv'])
        table[8, 12] = np.average(reweighting_metrics['times']['lv'])
        table[8, 13] = np.max(reweighting_metrics['accuracies']['vi'])
        table[8, 14] = np.average(reweighting_metrics['accuracies']['vi'])
        table[8, 15] = np.sum(
            np.array(reweighting_metrics['accuracies']['vi']) > 1.0) / len(
                metrics['accuracies']['vi'])
        table[8, 16] = np.average(reweighting_metrics['times']['vi'])

        tex = doc.build(save_to_disk=True,
                        compile_to_pdf=False,
                        show_pdf=False)
示例#8
0
    def write(self, output_path):
        if not os.path.exists(output_path):
            os.makedirs(output_path)
        doc = Document(filename='table2',
                       filepath=output_path,
                       doc_type='article',
                       options=('12pt', ))
        sec = doc.new_section('Table 2')
        col, row = 17, 17
        table = sec.new(
            LatexTable(shape=(row, col),
                       alignment=['l'] + ['c'] * 16,
                       float_format='.1f',
                       label='ablation_study'))
        table.caption = self.CAPTION
        table.label_pos = 'bottom'

        # Main header
        table[0, 1:5].multicell(bold('EN-DE'), h_align='c')
        table[0, 5:9].multicell(bold('EN-ES'), h_align='c')
        table[0, 9:13].multicell(bold('EN-FI'), h_align='c')
        table[0, 13:17].multicell(bold('EN-IT'), h_align='c')
        table[0, 1:5].add_rule(trim_left=True, trim_right='.3em')
        table[0, 5:9].add_rule(trim_left='.3em', trim_right='.3em')
        table[0, 9:13].add_rule(trim_left='.3em', trim_right='.3em')
        table[0, 13:17].add_rule(trim_left='.3em', trim_right=True)

        # Sub header
        table[1, 1:17] = (['best', 'avg', 's', 't'] * 4)
        table[1, 0:17].add_rule(trim_left=True, trim_right=True)

        ### Full system metrics
        table[2, 0] = 'Full System'
        table = self.write_original_row(table, 2,
                                        self.ORIGINAL_RESULTS['Full System'])
        experiment = self.experiments['Full System']
        metrics = experiment.aggregate_runs()
        table[3, 0] = bold('Reproduced')
        table = self.write_new_row(table, 3, metrics)
        table[3, 0:17].add_rule(trim_left=True, trim_right=True)

        ### Unsup. Init
        table[4, 0] = '- Unsup. Init.'
        table = self.write_original_row(table, 4,
                                        self.ORIGINAL_RESULTS['Unsup. Init'])
        experiment = self.experiments['Unsup. Init (Random)']
        metrics = experiment.aggregate_runs()
        table[5, 0] = bold('Rand.')
        table = self.write_new_row(table, 5, metrics)
        experiment = self.experiments['Unsup. Init (Random Cutoff)']
        metrics = experiment.aggregate_runs()
        table[6, 0] = bold('Rand. Cut.')
        table = self.write_new_row(table, 6, metrics)
        table[6, 0:17].add_rule(trim_left=True, trim_right=True)

        ### Stochastic
        table[7, 0] = '- Stochastic'
        table = self.write_original_row(table, 7,
                                        self.ORIGINAL_RESULTS['Stochastic'])
        experiment = self.experiments['Stochastic']
        metrics = experiment.aggregate_runs()
        table[8, 0] = bold('Reproduced')
        table = self.write_new_row(table, 8, metrics)
        table[8, 0:17].add_rule(trim_left=True, trim_right=True)

        ### Cutoff
        table[9, 0] = '- Cutoff (k=100k)'
        table = self.write_original_row(
            table, 9, self.ORIGINAL_RESULTS['Cutoff (k=100k)'])
        # experiment = self.experiments['Cutoff (k=100k)']
        # metrics = experiment.aggregate_runs()
        table[10, 0] = bold('Reproduced')
        # table = self.write_new_row(table, 10, metrics)
        table[10, 1:] = ['-'] * 16
        table[10, 0:17].add_rule(trim_left=True, trim_right=True)

        ### CSLS
        table[11, 0] = '- CSLS'
        table = self.write_original_row(table, 11,
                                        self.ORIGINAL_RESULTS['CSLS'])
        experiment = self.experiments['CSLS']
        metrics = experiment.aggregate_runs()
        table[12, 0] = bold('Reproduced')
        table = self.write_new_row(table, 12, metrics)
        table[12, 0:17].add_rule(trim_left=True, trim_right=True)

        ### Bidrectional
        table[13, 0] = '- Bidrectional'
        table = self.write_original_row(table, 13,
                                        self.ORIGINAL_RESULTS['Bidrectional'])
        experiment = self.experiments['Bidrectional']
        metrics = experiment.aggregate_runs()
        table[14, 0] = bold('Reproduced')
        table = self.write_new_row(table, 14, metrics)
        table[14, 0:17].add_rule(trim_left=True, trim_right=True)

        ### Re-weighting
        table[15, 0] = '- Re-weighting'
        table = self.write_original_row(table, 15,
                                        self.ORIGINAL_RESULTS['Re-weighting'])
        experiment = self.experiments['Re-weighting']
        metrics = experiment.aggregate_runs()
        table[16, 0] = bold('Reproduced')
        table = self.write_new_row(table, 16, metrics)

        tex = doc.build(save_to_disk=True,
                        compile_to_pdf=False,
                        show_pdf=False)
from python2latex import Document, Table, italic
import numpy as np

doc = Document(filename='more_complex_table_from_numpy_array_example',
               filepath='examples/table examples',
               doc_type='article',
               options=('12pt', ))

sec = doc.new_section('Testing tables from numpy array')
sec.add_text("This section tests tables from numpy array.")

col, row = 6, 4
data = np.random.rand(row, col)

table = sec.new(
    Table(shape=(row + 2, col + 1), alignment='c', float_format='.2f'))
# Set a caption if desired
table.caption = 'Table from numpy array'
table.caption_space = '10pt'  # Space between table and caption.

# Set entries with slices
table[2:, 1:] = data
# Overwrite data if needed, whatever the object type
table[2:, 1] = [i * 1000 for i in range(row)]

# Change format of cells easily
table[2:, 1].format_spec = '.0e'  # Exponential format

# Apply custom functions on the cell content for flexibility
table[2, 1].apply_command(lambda value: f'${value}$')
from python2latex import Document, TexEnvironment

doc = Document(filename='unsupported_env_example',
               doc_type='article',
               filepath='examples/unsupported env example',
               options=('12pt', ))

sec = doc.new_section('Unsupported env')
sec.add_text("This section shows how to create unsupported env if needed.")

sec.add_package(
    'amsmath')  # Add needed packages in any TexEnvironment, at any level
align = sec.new(TexEnvironment('align', label='align_label'))
align.add_text(r"""e^{i\pi} &= \cos \pi + i \sin \pi\\
         &= -1""")  # Use raw strings to alleviate tex writing

tex = doc.build()
print(tex)
示例#11
0
    filepath = './examples/plot examples/predefined palettes comparison/'
    filename = 'PREDEFINED_PALETTES_comparison'
    doc = Document(filename, doc_type='article', filepath=filepath)

    # Insert title
    center = doc.new(TexEnvironment('center'))
    center += r"\huge \bf Predefined color maps and palettes"

    doc += """\\noindent
    python2latex provides three color maps natively. They are defined in the JCh axes of the CIECAM02 color model, which is linear to human perception of colors. Moreover, three ``dynamic'' palettes have been defined, one for each color map. They are dynamic in that the range of colors used to produce the palette changes with the number of colors needed. This allows for a good repartition of hues and brightness for all choices of number of colors.

    All three color maps have been designed to be colorblind friendly for all types of colorblindness. To do so, all color maps are only increasing or decreasing in lightness, which helps to distinguish hues that may look similar to a colorblind. This also has the advantage that the palettes are also viable in levels of gray.
    """

    # First section
    sec = doc.new_section(r'The \texttt{holi} color map')
    sec += """
    The ``holi'' color map was designed to provide a set of easily distinguishable hues for any needed number of colors. It is optimized for palettes of 5 or 6 colors, but other numbers of color also generate very good palettes. It is colorblind friendly for all types of colorblindness for up to 5 colors, but can still be acceptable for more colors. The name ``holi'' comes from the Hindu festival of colors. This is the default color map of python2latex.

    Below is a graph of the color map in the J-h axes of the CIECAM02 color model, followed by the colors generated according to the number of colors needed.
    """
    plot_palette(doc, 'holi')
    doc += r'\clearpage'

    sec = doc.new_section(r'The \texttt{aube} color map')
    sec += """
    The ``aube'' color map was designed to cover blue and red hues, setting aside green. It is best suited for one to five colors palettes, but can be acceptable for more. It is perceptually linear in hue and in brightness. These properties makes it colorblind friendly for any colorblindness. The name ``aube'' is the French word for dawn.

    Below is a graph of the color map in the J-h axes of the CIECAM02 color model, followed by the colors generated according to the number of colors needed.
    """
    plot_palette(doc, 'aube')