コード例 #1
0
ファイル: layout.py プロジェクト: Kankelborg-Group/ESIS
def figure() -> pylatex.Figure:
    result = pylatex.Figure(position='!ht')
    result._star_latex_name = True
    result.add_image(
        filename=str(pdf()),
        width=None,
    )
    result.add_caption(pylatex.NoEscape(
        r"""
\roy{\ESIS\ optical layout. 
Dashed lines indicate the positions of unpopulated channels. 
The blue lines represent the path of \OV\ through the system.} The \ESIS\ instrument is a pseudo-Gregorian design.
The secondary mirror is replaced by a segmented array of concave diffraction gratings.
The field stop at prime focus defines instrument spatial/spectral \FOV.
\CCDs\ are arrayed around the primary mirror, each associated with a particular grating.
Eight grating positions appear in this schematic; only six fit within the volume of the rocket payload.
\NumChannelsWords\ channels are populated for the first flight."""
    ))
    result.append(kgpy.latex.Label('fig:layout'))
    return result
コード例 #2
0
def figure() -> pylatex.Figure:
    result = pylatex.Figure()
    result.add_image(str(pdf()), width=None)
    result.add_caption(
        pylatex.NoEscape(r"""
(Top) Measured reflectance for several multilayer coated witness samples 
\roy{at an incidence angle of \gratingWitnessMeasurementIncidenceAngle\ on \testGratingDate.
The white regions indicate wavelengths that intercept the detector and the gray regions indicate wavelengths that
miss the detector.
Note the suppression of second order relative to the first order and the consistency of the coatings between each 
channel.
The Channel \gratingWitnessMissingChannel\ grating measurement is missing due to issues in the measurement apparatus.
(Bottom) Comparison of the efficiency of the three main \ESIS\ optical components: primary mirror, grating and filter.
The primary mirror efficiency is based on measurements of a \Si\ witness sample taken on \primaryMeasurementDate\ at an 
angle of incidence of \primaryWitnessMeasurementIncidenceAngle. 
The grating efficiency is from a measurement of the Channel \testGratingChannelIndex\ grating taken on \testGratingDate\
at an angle of incidence of \gratingMeasurementIncidenceAngle.
The filter efficiency is a theoretical model that includes the filter mesh, \filterThickness\ of \filterMaterial\ and
\filterOxideThickness\ of \filterMaterial\ oxide.}"""))
    result.append(kgpy.latex.Label('fig:componentEfficiencyVsWavelength'))
    return result
コード例 #3
0
ファイル: output.py プロジェクト: yongwangCPH/PyHDX
    def test_subfigure(self):
        fig = plt.figure()
        plt.plot([2, 3, 42, 1])

        file_path = self._save_fig(fig)

        with self.doc.create(pyl.Figure(position='h!')) as kittens:
            w = str(0.25)
            for i in range(8):
                with self.doc.create(
                        pyl.SubFigure(
                            position='b',
                            width=pyl.NoEscape(w +
                                               r'\linewidth'))) as left_kitten:

                    left_kitten.add_image(file_path,
                                          width=pyl.NoEscape(r'\linewidth'))
                    left_kitten.add_caption(f'Kitten on the {i}')
                if i % 4 == 3:
                    self.doc.append('\n')
            kittens.add_caption("Two kittens")
コード例 #4
0
    def make_subfigure(self, fig_funcs, layout=(5, 4), close=True):
        #todo figure out how to iterate properly
        n = np.product(layout)
        chunks = grouper(n, fig_funcs)
        w = str(1/layout[1])
        pbar = tqdm(total=len(fig_funcs))
        for chunk in chunks:
            with self.doc.create(pyl.Figure(position='ht')) as tex_fig:
                for i, fig_func in enumerate(chunk):
                    if fig_func is None:
                        continue
                    with self.doc.create(pyl.SubFigure(position='b', width=pyl.NoEscape(w + r'\linewidth'))) as subfig:
                        fig = fig_func()
                        file_path = self._save_fig(fig, bbox_inches='tight') # todo access these kwargs
                        if close:
                            plt.close(fig)
                        subfig.add_image(file_path, width=pyl.NoEscape(r'\linewidth'))
                    if i % layout[1] == layout[1] - 1:
                        self.doc.append('\n')
                    pbar.update(1)

            self.doc.append(pyl.NewPage())
コード例 #5
0
def figure() -> pylatex.Figure:
    result = pylatex.Figure(position='htb!')
    result._star_latex_name = True
    result.append(
        kgpy.latex.aas.Gridline(
            [kgpy.latex.aas.Fig(pdf(), kgpy.latex.textwidth, '(a)')]))

    result.append(
        kgpy.latex.aas.Gridline([
            kgpy.latex.aas.LeftFig(schematic_primary.pdf(),
                                   kgpy.latex.columnwidth, '(b)'),
            kgpy.latex.aas.RightFig(schematic_grating.pdf(),
                                    kgpy.latex.columnwidth, '(c)'),
        ]))

    result.add_caption(
        pylatex.NoEscape(
            r"""(a) Schematic diagram of a single channel of the \ESIS\ optical system.
(b) Clear aperture of the primary mirror, size of the central obscuration, and the footprint of the beam for each 
channel.
(c) Clear aperture of Channel 1's diffraction grating."""))
    result.append(kgpy.latex.Label('fig:schematic'))
    return result
コード例 #6
0
    def prepare_document(self, directory, subdirs=False, file_types='.png'):
        """
        compile any pdf, jpg and png files
        in directory into a latex pdf document
        :return:
        """
        if subdirs not in [True, False]:
            raise errors.InputError(
                'subdirs argument should be either True or False')

        if isinstance(file_types, str):
            file_types = [file_types]

        if subdirs:
            files = self.search_recursive(directory)

        else:
            files = self.search()

        if files is []:
            raise errors.InputError(
                '''Cannot locate pdf, jpg or png files in "{}". Please 
                give the full path to where your model selection results are
                 plotted. 
                '''.format(directory))
        doc = pylatex.Document(self.filename, documentclass='article')

        for k, v in list(files.items()):
            assert isinstance(v, list)
            if v is not None:
                with doc.create(
                        pylatex.Section(os.path.join(*Path(k).parts[-1:]))):
                    doc.append(k)
                    doc.append(pylatex.NoEscape(r'\\*'))
                    if len(v) == 1:
                        with doc.create(
                                pylatex.Figure(position='htbp!',
                                               width=pylatex.NoEscape(
                                                   r'0.3\linewidth'))) as fig:

                            fig.add_image(v[0])
                            fig.add_caption(
                                os.path.join(*Path(v[0]).parts[-2:]))
                            # fig.add_label(v[0])
                    else:
                        with doc.create(
                                pylatex.Figure(
                                    position='htbp!',
                                    width=pylatex.NoEscape(r'\linewidth'),
                                )) as fig:
                            for i in range(len(v)):
                                with doc.create(
                                        pylatex.SubFigure(
                                            width=pylatex.NoEscape(
                                                r'0.3\linewidth'))) as sub:
                                    sub.add_image(v[i])
                                    # sub.add_caption('')
                                if i % 3 == 0:
                                    doc.append(pylatex.NoEscape(r'\break'))
                                    # sub.add_label(i)
                            fig.add_caption(
                                os.path.join(*Path(v[i]).parts[-3:-1]))
                    doc.append(pylatex.NoEscape(r'\hfill'))

        doc.generate_pdf()
        LOG.info('PDF generated at "{}"'.format(doc.default_filepath))
コード例 #7
0
    def _add_fits(self,
                  doc,
                  fit_log,
                  fit_name,
                  operator_set,
                  ratio,
                  gap=False,
                  const=False):
        with doc.create(pylatex.Center()) as centered:
            if gap and const and util.ERR_PREC <= 4:
                cols = "X[c] X[c] X[4,c] X[4,c] X[4,c] X[4,c] X[2,c] X[2,c] X[2,c] X[c]"
            elif (gap or const) and util.ERR_PREC <= 4:
                cols = "X[c] X[c] X[4,c] X[4,c] X[4,c] X[2,c] X[2,c] X[2,c] X[c]"
            else:
                cols = "X[c] X[c] X[4,c] X[4,c] X[2,c] X[2,c] X[2,c] X[c]"
            with centered.create(pylatex.LongTabu(
                    cols, to=r"\linewidth")) as fit_table:
                if ratio:
                    energy_header = r"$a_t \Delta E_{\rm fit}$"
                else:
                    energy_header = r"$a_t E_{\rm fit}$"
                header_row = [
                    pylatex.NoEscape(r"$t_{\rm min}$"),
                    pylatex.NoEscape(r"$t_{\rm max}$"),
                    pylatex.NoEscape(energy_header),
                    "A",
                    pylatex.NoEscape(r"$\chi^2 / \text{dof}$"),
                    pylatex.NoEscape(r"$p$-value"),
                    pylatex.NoEscape(r"$t_{\rm exc}$"),
                    pylatex.NoEscape(r"$\sigma_{\rm cut}$"),
                ]
                if const and util.ERR_PREC <= 4:
                    header_row.insert(4, pylatex.NoEscape(r"const"))
                if gap and util.ERR_PREC <= 4:
                    header_row.insert(4, pylatex.NoEscape(r"$a \Delta$"))

                fit_table.add_row(header_row, mapper=[pylatex.utils.bold])
                fit_table.add_hline()
                fit_table.end_table_header()
                for fit_info, fit_result in fit_log.fits.items():
                    value_row = [
                        fit_info.tmin,
                        fit_info.tmax,
                        fit_result.energy,
                        fit_result.amplitude,
                        round(fit_result.chisq, 2),
                        round(fit_result.quality, 2),
                        fit_info.exclude_times,
                        round(fit_info.noise_cutoff, 1),
                    ]
                    if const and util.ERR_PREC <= 4:
                        value_row.insert(4, fit_result.const)
                    if gap and util.ERR_PREC <= 4:
                        value_row.insert(4, fit_result.gap)
                    fit_table.add_row(value_row)

        if self.fit_plots:
            for fit_info1, fit_info2 in zip(*[iter(fit_log.fits.keys())] * 2):
                fit_plot1 = self.fit_plotfile(operator_set,
                                              fit_name,
                                              fit_info1,
                                              extension=util.PlotExtension.pdf)
                fit_plot2 = self.fit_plotfile(operator_set,
                                              fit_name,
                                              fit_info2,
                                              extension=util.PlotExtension.pdf)

                with doc.create(pylatex.Figure(position='H')) as fig:
                    with doc.create(
                            pylatex.SubFigure(position='b',
                                              width=pylatex.NoEscape(
                                                  r'0.5\linewidth'))) as fig1:
                        caption = f"{fit_info1.model.short_name}, $t_{{\\rm fit}} = {fit_info1.tmin}, {fit_info1.tmax}$"
                        if fit_info1.ratio:
                            caption += " Ratio"
                        if fit_info1.exclude_times:
                            caption += f" $t_{{\\rm exc}}$ = {fit_info1.exclude_times}"
                        if fit_info1.noise_cutoff:
                            caption += f" $\sigma_{{\\rm cut}} = {fit_info1.noise_cutoff}$"
                        util.add_image(fig1,
                                       self.results_dir,
                                       fit_plot1,
                                       width="1.0",
                                       caption=caption)
                    with doc.create(
                            pylatex.SubFigure(position='b',
                                              width=pylatex.NoEscape(
                                                  r'0.5\linewidth'))) as fig2:
                        caption = f"{fit_info2.model.short_name}, $t_{{\\rm fit}} = {fit_info2.tmin}, {fit_info2.tmax}$"
                        if fit_info1.ratio:
                            caption += " Ratio"
                        if fit_info2.exclude_times:
                            caption += f" $t_{{\\rm exc}}$ = {fit_info2.exclude_times}"
                        if fit_info2.noise_cutoff:
                            caption += f" $\sigma_{{\\rm cut}} = {fit_info2.noise_cutoff}$"
                        util.add_image(fig2,
                                       self.results_dir,
                                       fit_plot2,
                                       width="1.0",
                                       caption=caption)

            if len(fit_log.fits) % 2 != 0:
                fit_info = list(fit_log.fits.keys())[-1]
                fit_plot = self.fit_plotfile(operator_set,
                                             fit_name,
                                             fit_info,
                                             extension=util.PlotExtension.pdf)
                with doc.create(pylatex.Figure(position='H')) as fig:
                    caption = f"{fit_info.model.short_name}, $t_{{\\rm fit}} = {fit_info.tmin}, {fit_info.tmax}$"
                    if fit_info.ratio:
                        caption += " Ratio"
                    if fit_info.exclude_times:
                        caption += f" $t_{{\\rm exc}}$ = {fit_info.exclude_times}"
                    if fit_info.noise_cutoff:
                        caption += f" $\sigma_{{\\rm cut}} = {fit_info.noise_cutoff}$"
                    util.add_image(fig,
                                   self.results_dir,
                                   fit_plot,
                                   width="0.5",
                                   caption=caption)

        doc.append(pylatex.NoEscape(r"\newpage"))
コード例 #8
0
    def _add_tmins(self, doc, fit_infos, fit_name, operator_set, ratio):
        for fit_info1, fit_info2 in zip(*[iter(fit_infos)] * 2):
            fit_plot1 = self.tmin_fit_plotfile(
                operator_set,
                fit_name,
                fit_info1,
                extension=util.PlotExtension.pdf)
            fit_plot2 = self.tmin_fit_plotfile(
                operator_set,
                fit_name,
                fit_info2,
                extension=util.PlotExtension.pdf)

            with doc.create(pylatex.Figure(position='H')) as fig:
                with doc.create(
                        pylatex.SubFigure(
                            position='b',
                            width=pylatex.NoEscape(r'0.5\linewidth'))) as fig1:
                    caption = f"{fit_info1.model.short_name}, $t_{{\\rm max}} = {fit_info1.tmax}$"
                    if fit_info1.ratio:
                        caption += " Ratio"
                    if fit_info1.exclude_times:
                        caption += f" $t_{{\\rm exc}}$ = {fit_info1.exclude_times}"
                    util.add_image(fig1,
                                   self.results_dir,
                                   fit_plot1,
                                   width="1.0",
                                   caption=caption)
                with doc.create(
                        pylatex.SubFigure(
                            position='b',
                            width=pylatex.NoEscape(r'0.5\linewidth'))) as fig2:
                    caption = f"{fit_info2.model.short_name}, $t_{{\\rm max}} = {fit_info2.tmax}$"
                    if fit_info1.ratio:
                        caption += " Ratio"
                    if fit_info2.exclude_times:
                        caption += f" $t_{{\\rm exc}}$ = {fit_info2.exclude_times}"
                    util.add_image(fig2,
                                   self.results_dir,
                                   fit_plot2,
                                   width="1.0",
                                   caption=caption)

        if len(fit_infos) % 2 != 0:
            fit_info = list(fit_infos)[-1]
            fit_plot = self.tmin_fit_plotfile(operator_set,
                                              fit_name,
                                              fit_info,
                                              extension=util.PlotExtension.pdf)
            with doc.create(pylatex.Figure(position='H')) as fig:
                caption = f"{fit_info.model.short_name}, $t_{{\\rm max}} = {fit_info.tmax}$"
                if fit_info.ratio:
                    caption += " Ratio"
                if fit_info.exclude_times:
                    caption += f" $t_{{\\rm exc}}$ = {fit_info.exclude_times}"
                util.add_image(fig,
                               self.results_dir,
                               fit_plot,
                               width="0.5",
                               caption=caption)

        doc.append(pylatex.NoEscape(r"\newpage"))
コード例 #9
0
def _add_data(doc: pl.Document, ds: Dataset, nr: NonRedundantization,
              meth: MLMethod):
    name = f'{ds.name}_{nr.name}_{meth.name}'
    directory = ds.name

    aVp_graph = f'{name}.jpg'
    angle_dist_graph = f'{name}_angledistribution.jpg'
    error_dist_graph = f'{name}_errordistribution.jpg'
    sqerror_graph = f'{name}_sqerror_vs_actual.jpg'
    stats_csv_all = f'{name}_stats_all.csv'
    stats_csv_out = f'{name}_stats_out.csv'

    actualVpred_file = os.path.join(directory, aVp_graph)
    ang_dist_file = os.path.join(directory, angle_dist_graph)
    error_dist_file = os.path.join(directory, error_dist_graph)
    sqerror_file = os.path.join(directory, sqerror_graph)

    df_all = pd.read_csv(os.path.join(directory, stats_csv_all))
    df_out = pd.read_csv(os.path.join(directory, stats_csv_out))

    with doc.create(pl.Section(f'Method: {ds.name}, {nr.name}, {meth.name}')):
        with doc.create(pl.Subsection('Summary of method:')):
            doc.append(f'Dataset: {ds.name}')
            doc.append(f'\nNon-redundantization: {nr.name}')
            doc.append(f'\nType of machine learning used: {meth.name}')

    with doc.create(pl.Subsection('Summary of the data:')):
        with doc.create(pl.Figure(position='!htbp')) as actualVpred:
            actualVpred.add_image(actualVpred_file, width='300px')
            actualVpred.add_caption(
                'Graph showing the predicted packing angle against the actual packing angle, when using the above specified methods of non-redundetization and machine learning.'
            )

        with doc.create(pl.Table(position='!htbp')) as table:
            table.add_caption('Summary of results for all data')
            table.append(pl.Command('centering'))
            table.append(pl.NoEscape(df_all.to_latex(escape=False)))

        with doc.create(pl.Table(position='!htbp')) as table:
            table.add_caption('Summary of results for outliers.')
            table.append(pl.Command('centering'))
            table.append(pl.NoEscape(df_out.to_latex(escape=False)))

        with doc.create(pl.Figure(position='!htbp')) as graphs:
            with doc.create(
                    pl.SubFigure(position='!htbp',
                                 width=pl.NoEscape(
                                     r'0.30\linewidth'))) as ang_dist_graph:
                ang_dist_graph.add_image(ang_dist_file,
                                         width=pl.NoEscape(r'\linewidth'))
                ang_dist_graph.add_caption(
                    'Frequency distribution of the packing angle.')
            with doc.create(
                    pl.SubFigure(position='!htbp',
                                 width=pl.NoEscape(
                                     r'0.33\linewidth'))) as error_dist_graph:
                error_dist_graph.add_image(error_dist_file,
                                           width=pl.NoEscape(r'\linewidth'))
                error_dist_graph.add_caption(
                    'Distribution of errors calculated as the difference between the predicted and actual interface angle.'
                )
            with doc.create(
                    pl.SubFigure(position='!htbp',
                                 width=pl.NoEscape(
                                     r'0.33\linewidth'))) as sqerror_graph:
                sqerror_graph.add_image(sqerror_file,
                                        width=pl.NoEscape(r'\linewidth'))
                sqerror_graph.add_caption(
                    'Squared error in predicted packing angle against actual packing angle.'
                )
            graphs.add_caption('Graphs for further metrics.')
コード例 #10
0
ファイル: workflow.py プロジェクト: granrothge/dgsres
def fit_all_in_one(config):
    """fit all grid points, and compose PDF reports
    
    A PDF includes fitted resolution function plot, a table of fitting parameters,
    a grid plot of interpolated resolution functions,
    and plots of comparison between the simulated and fitted resolution functions.
    """
    import pylatex, dill
    Ei = config.Ei
    Erange = (-0.3 * Ei, .95 * Ei)
    width = r'1\textwidth'
    for sl in config.slices:
        doc = _wph.initReportDoc("%s-fit-report" % sl.name)  # report document
        qaxis = sl.grid.qaxis
        Eaxis = sl.grid.Eaxis
        # info
        _wph.slice_info_section(sl, doc)

        # fit
        with doc.create(pylatex.Section('Fit resolution functions on grid')):
            # path to saved result
            path = '%s-fit_all_grid_points.dill' % sl.name
            if os.path.exists(path):
                qE2fitter, nofit = dill.load(open(path))
            else:
                qE2fitter, nofit = fit_all_grid_points(sl,
                                                       config,
                                                       use_cache=True)
                dill.dump((qE2fitter, nofit), open(path, 'w'), recurse=True)
            # plot
            with doc.create(pylatex.Figure(position='htbp')) as plot:
                plt.figure()
                plot_resfits_on_grid(qE2fitter, sl, config, figsize=(10, 10))
                plot.add_plot(width=pylatex.NoEscape(width))
                plot.add_caption('Fitted resolution functions for %s' %
                                 sl.name)
                plt.close()
            doc.append(pylatex.utils.NoEscape(r"\clearpage"))
        # save
        pklfile = '%s-fit_results.pkl' % sl.name
        save_fits_as_pickle(qE2fitter, pklfile)
        import pickle as pkl
        qE2fitres = pkl.load(open(pklfile))

        # parameters
        with doc.create(pylatex.Subsection('Fitted parameters')):
            s = format_parameter_table(qE2fitres)
            doc.append(_wph.verbatim(s))

        # interpolated model
        with doc.create(pylatex.Subsection('Interpolated model')):
            imodel = get_interped_resolution_model(sl)
            qs = (qaxis.ticks() + qaxis.step / 2.)[:-1]
            Es = (Eaxis.ticks() + Eaxis.step / 2.)[:-1]
            dqgrid, dEgrid = qE2fitter.values()[0].qEgrids
            # plot
            with doc.create(pylatex.Figure(position='htbp')) as plot:
                plt.figure()
                plot_interpolated_resolution_on_grid(imodel,
                                                     qs,
                                                     Es,
                                                     dqgrid,
                                                     dEgrid,
                                                     figsize=(10, 10))
                plot.add_plot(width=pylatex.NoEscape(width))
                plot.add_caption('Interpolated resolution functions for %s' %
                                 sl.name)
                plt.close()
            doc.append(pylatex.utils.NoEscape(r"\clearpage"))

        # one by one comparison plots
        with doc.create(
                pylatex.Section('Comparing fits to mcvine simulations')):
            for qE, fitter in qE2fitter.items():
                with doc.create(pylatex.Figure(position='htbp')) as plot:
                    plt.figure()
                    plot_compare_fit_to_data(fitter)
                    plot.add_plot(width=pylatex.NoEscape(width))
                    plot.add_caption('Resolution at q=%s, E=%s' % qE)
                    plt.close()
                doc.append(
                    pylatex.utils.NoEscape(r"\clearpage")
                )  # otherwise latex complain about "too many floats"

        # save PDF
        doc.generate_pdf(clean_tex=False)
        continue
    return
コード例 #11
0
ファイル: workflow.py プロジェクト: granrothge/dgsres
def simulate_all_in_one(config):
    """simulate all grid points, and compose PDF reports

    A PDF includes basic info, a plot of dynamical range, and a plot of simulated resolution functions
    on a grid.
    """
    import pylatex
    Ei = config.Ei
    Erange = (-0.3 * Ei, .95 * Ei)
    for sl in config.slices:
        doc = _wph.initReportDoc("%s-sim-report" % sl.name)  # report document
        # info
        _wph.slice_info_section(sl, doc)

        qaxis = sl.grid.qaxis
        Eaxis = sl.grid.Eaxis

        # dyn range plot
        # larger q range for a broader view
        ratio = 1.
        expanded_qaxis = sx.axis(
            min=qaxis.min - (qaxis.max - qaxis.min) * ratio / 2,
            max=qaxis.max + (qaxis.max - qaxis.min) * ratio / 2,
            step=qaxis.step).ticks()
        width = r'1\textwidth'
        with doc.create(pylatex.Section('Dynamical range')):
            with doc.create(pylatex.Figure(position='htbp')) as plot:
                plt.figure()
                plotDynRange(sl.hkl0,
                             sl.hkl_projection,
                             qaxis=expanded_qaxis,
                             Erange=Erange,
                             config=config)
                plot.add_plot(width=pylatex.NoEscape(width))
                plot.add_caption('Dynamical range for slice %s' % sl.name)
                plt.close()

        # simulate
        with doc.create(
                pylatex.Section('Simulated resolution functions on a grid')):
            outputs, failed = simulate_all_grid_points(
                slice=sl,
                config=config,
                Nrounds_beam=config.sim_Nrounds_beam,
                overwrite=False)

            if failed:
                # this seems unecessary as what is missing is clear in the plot
                """
                doc.append("Failed to calculate resolution functions for the following (Q,E) pairs:")
                with doc.create(pylatex.Itemize()) as itemize:
                    for f in failed:
                        itemize.add_item(str(f))
                """
                pass
            # plot
            with doc.create(pylatex.Figure(position='htbp')) as plot:
                plt.figure()
                plot_resolution_on_grid(sl, config, figsize=(10, 10))
                plot.add_plot(width=pylatex.NoEscape(width))
                plot.add_caption('Simulated resolution functions for %s' %
                                 sl.name)
                plt.close()
        # save pdf
        doc.generate_pdf(clean_tex=False)
        continue
    return
コード例 #12
0
ファイル: gpcreport.py プロジェクト: codeaudit/autogpc
    def makeInteractionFigure(self, ker, cum, n_terms):
        """
        Create figure for interaction analysis, which includes a subfigure of
        the latest additive component and a subfigure of posterior of the overall
        compositional kernel up to now.

        :param ker: latest additive component to be plotted
        :type ker: GPCKernel
        :param cum: overall compositional kernel up to and including `ker`
        :type cum: GPCKernel
        :param n_terms: number of additive terms considered in `cum` so far
        """
        assert isinstance(ker, GPCKernel), 'Kernel must be of type GPCKernel'
        assert isinstance(cum, GPCKernel), 'Kernel must be of type GPCKernel'

        doc = self.doc
        kerDims = ker.getActiveDims()
        cumDims = cum.getActiveDims()

        img1Name = 'additive{0}ker'.format(n_terms)
        img1Format = '.eps' if len(kerDims) != 3 else '.png'
        img1Filename = img1Name + img1Format
        ker.draw(os.path.join(self.path, img1Name), active_dims_only=True)

        if n_terms == 1 or len(cumDims) > 3:
            # Only present current additive component
            caption_str = r"Trained classifier on " + dims2text(
                kerDims, ker.data) + "."
            with doc.create(pl.Figure(position='htbp!')) as fig:
                fig.add_image(img1Filename,
                              width=ut.NoEscape(r'0.7\textwidth'))
                fig.add_caption(ut.NoEscape(caption_str))
                self.fignum += 1

        else:
            # Present both current component and cumulative kernel
            img2Name = 'additive{0}cum'.format(n_terms)
            img2Format = '.eps' if len(cumDims) != 3 else '.png'
            img2Filename = img2Name + img2Format
            cum.draw(os.path.join(self.path, img2Name), active_dims_only=True)
            caption1_str = r"Current additive component involving " + dims2text(
                kerDims, ker.data) + "."
            caption2_str = r"Previous and current components combined, involving " + dims2text(
                cumDims, cum.data) + "."
            caption_str = r"Trained classifier on " + dims2text(
                cumDims, cum.data) + "."

            with doc.create(pl.Figure(position='htbp!')) as fig:
                with doc.create(
                        pl.SubFigure(
                            position='b',
                            width=ut.NoEscape(r'0.47\textwidth'))) as subfig1:
                    subfig1.add_image(img1Filename,
                                      width=ut.NoEscape(r'\textwidth'))
                    subfig1.add_caption(ut.NoEscape(caption1_str))
                doc.append(ut.NoEscape(r'\hfill'))
                with doc.create(
                        pl.SubFigure(
                            position='b',
                            width=ut.NoEscape(r'0.47\textwidth'))) as subfig2:
                    subfig2.add_image(img2Filename,
                                      width=ut.NoEscape(r'\textwidth'))
                    subfig2.add_caption(ut.NoEscape(caption2_str))
                fig.add_caption(ut.NoEscape(caption_str))
                self.fignum += 1
コード例 #13
0
ファイル: gpcreport.py プロジェクト: codeaudit/autogpc
    def describeOneVariable(self, ker):
        """
        Generate a subsection describing a particular variable.

        :param ker:
        :type ker:
        """
        assert isinstance(ker, GPCKernel), 'Argument must be of type GPCKernel'
        assert len(
            ker.getActiveDims()) == 1, 'The kernel must be one-dimensional'

        dim = ker.getActiveDims()[0]
        data = ker.data
        ds = data.getDataShape()

        xmu, xsd, xmin, xmax = ds['x_mu'][dim], ds['x_sd'][dim], ds['x_min'][
            dim], ds['x_max'][dim]
        error = ker.error()
        mon = ker.monotonicity()
        per = ker.period()

        doc = self.doc
        with doc.create(
                pl.Subsection(ut.NoEscape(dims2text([dim], data, cap=True)))):
            # Routine description
            s = dims2text([dim], data, cap=True) + " has " \
              + "mean value {0:.2f} and standard deviation {1:.2f}. ".format(xmu, xsd) \
              + "Its observed minimum and maximum are {0:.2f} and {1:.2f} respectively. ".format(xmin, xmax) \
              + "A GP classifier trained on this variable alone can achieve " \
              + r"a cross-validated classification error of {0:.2f}\%. ".format(error * 100)

            # Significance
            s += "\n\n"
            e0 = float(self.constker.error())
            if error / e0 < 0.25:
                s += "Compared with the null model (baseline), this variable "
                s += "contains strong evidence whether the sample belongs to "
                s += "class `{0}'. ".format(data.YLabel[1])
            elif error / e0 < 0.8:
                s += "Compared with the null model (baseline), this variable "
                s += "contains some evidence of class label assignment. "
            elif error / e0 < 1.0:
                s += "This variable provides little evidence of class label "
                s += r"assignment given a baseline error rate of {0:.2f}\%. ".format(
                    e0 * 100)
            else:
                s += "The classification performance (in terms of error rate) "
                s += "based on this variable alone is even worse than "
                s += r'that of the na{\"i}ve baseline classifier. '

            # Monotonicity and periodicity - only do this for significant factors
            if error / e0 < 0.8:
                if mon != 0:
                    corr_str = "positive" if mon > 0 else "negative"
                    s += "There is a " + corr_str + " correlation between the value "
                    s += "of this variable and the likelihood of the sample being "
                    s += "classified as positive. "
                elif per != 0:
                    s += "The class assignment is approximately periodic with "
                    s += dims2text([dim], data) + ". "
                    s += "The period is about {0:.2f}. ".format(per)
                else:
                    s += "No significant monotonicity or periodicity "
                    s += "is associated with this variable. "

            s += "The GP posterior trained on this variable is plotted in Figure {0}. ".format(
                self.fignum)
            doc.append(ut.NoEscape(s))

            # Plotting
            imgName = 'var{0}'.format(dim)
            imgFormat = '.eps'
            imgFilename = imgName + imgFormat
            ker.draw(os.path.join(self.path, imgName), active_dims_only=True)
            caption_str = r"Trained classifier on " + dims2text([dim],
                                                                ker.data) + "."
            with doc.create(pl.Figure(position='htbp!')) as fig:
                fig.add_image(imgFilename, width=ut.NoEscape(r'0.7\textwidth'))
                fig.add_caption(ut.NoEscape(caption_str))
                self.fignum += 1
コード例 #14
0
def _place_figure(file_path, width=r"\textwidth", doc=None):
    with doc.create(pyl.Figure(position="H")) as tex_fig:
        tex_fig.add_image(str(file_path), width=pyl.NoEscape(width))
コード例 #15
0
def create_overleaf_files(overleaf):
    files = []

    articles = get_project_articles(FIGSHARE_PROJECT_ID)
    #print(articles)
    for article in articles:
        #print(article['title'])
        newfiles = get_files_of_article(article['id'])
        for i, f in enumerate(newfiles):
            newfiles[i]['article_id'] = article['id']
            newfiles[i]['article_name'] = article['title']
        files += newfiles

    fdf = pd.DataFrame(files)
    #print("fdf",fdf)

    fdf.sort_values(by=['article_id', 'article_name', 'name'])
    fdfo = fdf[['article_id', 'article_name', 'name']]
    fdfo = fdfo.merge(overleaf[['article_id', 'name', 'overleaf']],
                      on=['article_id', 'name'],
                      how='outer')

    #print("fdfo", fdfo)

    fdfo = fdfo.where(pd.notnull(fdfo), None)

    for_download = overleaf.merge(fdf[['article_id', 'name', 'download_url']],
                                  on=['article_id', 'name'])

    #print("for_download",for_download)

    # create individual files
    for row in for_download.iterrows():
        if len(row[1]['overleaf']) > 0:
            download_url = row[1]['download_url']
            file = raw_issue_request('GET', download_url, binary=True)
            if '.pkl' in row[1]['name']:
                with open(
                        '/mnt/labbook/output/untracked/tmp_overleaf-{}/{}'.
                        format(head, row[1]['name']), 'wb') as f:
                    f.write(file)
                df = pd.read_pickle(
                    '/mnt/labbook/output/untracked/tmp_overleaf-{}/{}'.format(
                        head, row[1]['name']))
                df.to_latex(
                    '/mnt/labbook/output/untracked/overleaf-{}/figshare/{}.tex'
                    .format(head, row[1]['overleaf']))
                repo.git.add('figshare/{}.tex'.format(row[1]['overleaf']))
            else:
                extension = row[1]['name'].split('.')[-1]
                with open(
                        '/mnt/labbook/output/untracked/overleaf-{}/figshare/{}.{}'
                        .format(head, row[1]['overleaf'],
                                extension), 'wb') as f:
                    f.write(file)
                    repo.git.add('figshare/{}.{}'.format(
                        row[1]['overleaf'], extension))

    # create bibliography file
    adf = pd.DataFrame(articles)
    #print(adf)
    bib_data = BibliographyData()

    for row in for_download.iterrows():

        if len(row[1]['overleaf']) > 0:
            idx = adf[adf['id'] == row[1]['article_id']].index[0]
            bib_data.add_entry(key=row[1]['overleaf'],
                               entry=Entry('article', [
                                   ('title', adf.at[idx, 'title']),
                                   ('journal', "figshare"),
                                   ('doi', adf.at[idx, 'doi']),
                               ]))

    bib_data.to_file(
        '/mnt/labbook/output/untracked/overleaf-{}/figures_tables.bib'.format(
            head))
    repo.git.add('figures_tables.bib')

    # write supplementary tex

    geometry_options = {"tmargin": "1cm", "lmargin": "1cm"}
    doc = ltx.Document(geometry_options=geometry_options)
    doc.preamble.append(ltx.Package('biblatex', options=['sorting=none']))
    doc.preamble.append(
        ltx.Command('addbibresource',
                    arguments=[ltx.NoEscape("figures_tables.bib")]))
    doc.preamble.append(ltx.Package('booktabs'))
    doc.preamble.append(ltx.Package('longtable'))

    with doc.create(ltx.Subsection('images and tables supplementary file')):
        for row in for_download.iterrows():
            if len(row[1]['overleaf']) > 0:
                idx = adf[adf['id'] == row[1]['article_id']].index[0]
                #print("The name is...",row[1]['name'])
                if '.pkl' in row[1]['name']:
                    #print("I should be including something here")
                    with doc.create(ltx.Table(position='hbt')) as table_holder:
                        table_holder.append(
                            ltx.Command('input',
                                        arguments=[
                                            ltx.NoEscape(
                                                "figshare/{}.tex".format(
                                                    row[1]['overleaf']))
                                        ]))
                        if row[1]['caption'] is not None:
                            table_holder.add_caption(row[1]['caption'])
                            with open(
                                    "/mnt/labbook/output/untracked/overleaf-{}/figshare/{}_caption.tex"
                                    .format(head, row[1]['overleaf']),
                                    "w") as text_file:
                                text_file.write(row[1]['caption'])
                        else:
                            table_holder.add_caption(adf.at[idx, 'title'])
                            with open(
                                    "/mnt/labbook/output/untracked/overleaf-{}/figshare/{}_caption.tex"
                                    .format(head, row[1]['overleaf']),
                                    "w") as text_file:
                                text_file.write(adf.at[idx, 'title'])
                        repo.git.add('figshare/{}_caption.tex'.format(
                            row[1]['overleaf']))
                        table_holder.append(
                            ltx.Command(
                                'cite',
                                arguments=[ltx.NoEscape(row[1]['overleaf'])]))

                else:
                    with doc.create(
                            ltx.Figure(position='hbt')) as image_holder:
                        image_holder.add_image('figshare/{}'.format(
                            row[1]['overleaf']))
                        #print("THE CAPTION IS:", row[1]['caption'])
                        if row[1]['caption'] is not None:
                            image_holder.add_caption(row[1]['caption'])
                            with open(
                                    "/mnt/labbook/output/untracked/overleaf-{}/figshare/{}_caption.tex"
                                    .format(head, row[1]['overleaf']),
                                    "w") as text_file:
                                text_file.write(
                                    ltx.utils.escape_latex(row[1]['caption']))
                        else:
                            image_holder.add_caption(
                                ltx.utils.escape_latex(adf.at[idx, 'title']))
                            with open(
                                    "/mnt/labbook/output/untracked/overleaf-{}/figshare/{}_caption.tex"
                                    .format(head, row[1]['overleaf']),
                                    "w") as text_file:
                                text_file.write(
                                    ltx.utils.escape_latex(adf.at[idx,
                                                                  'title']))
                        repo.git.add('figshare/{}_caption.tex'.format(
                            row[1]['overleaf']))
                        image_holder.append(
                            ltx.Command(
                                'cite',
                                arguments=[ltx.NoEscape(row[1]['overleaf'])]))

    doc.append(ltx.Command('printbibliography'))

    doc.generate_tex(
        '/mnt/labbook/output/untracked/overleaf-{}/supplementary'.format(head))
    repo.git.add('supplementary.tex')
コード例 #16
0
ファイル: util.py プロジェクト: andrewhanlon/sigmond_scripts
def add_correlator(doc, task_handler, correlator, name, obs_handler):

    operator_src = operator_info.operator.Operator(correlator.getSource())
    subtractvev = task_handler.subtractvev and operator_src.channel.vev
    if correlator.isSinkSourceSame():
        left_pdf_file = task_handler.correlator_plotfile(
            correlator, name, extension=PlotExtension.pdf)
        right_pdf_file = task_handler.energy_plotfile(
            operator_src, name, extension=PlotExtension.pdf)

    else:
        left_pdf_file = task_handler.correlator_plotfile(
            correlator,
            name,
            complex_arg=sigmond.ComplexArg.RealPart,
            extension=PlotExtension.pdf)
        right_pdf_file = task_handler.correlator_plotfile(
            correlator,
            name,
            complex_arg=sigmond.ComplexArg.ImaginaryPart,
            extension=PlotExtension.pdf)

    if correlator.isSinkSourceSame():
        left_estimates = sigmond.getCorrelatorEstimates(
            obs_handler, correlator, task_handler.hermitian, subtractvev,
            sigmond.ComplexArg.RealPart, task_handler.sampling_mode)
        right_estimates = sigmond.getEffectiveEnergy(
            obs_handler, correlator, task_handler.hermitian, subtractvev,
            sigmond.ComplexArg.RealPart, task_handler.sampling_mode,
            task_handler.plot_info.timestep,
            task_handler.plot_info.eff_energy_type.value, 0.)
    else:
        left_estimates = sigmond.getCorrelatorEstimates(
            obs_handler, correlator, task_handler.hermitian, subtractvev,
            sigmond.ComplexArg.RealPart, task_handler.sampling_mode)
        right_estimates = sigmond.getCorrelatorEstimates(
            obs_handler, correlator, task_handler.hermitian, subtractvev,
            sigmond.ComplexArg.ImaginaryPart, task_handler.sampling_mode)

    if correlator.isSinkSourceSame():
        doc.append(pylatex.NoEscape(rf"Score: {score(left_estimates)}"))

    with doc.create(pylatex.Figure(position='H')):
        with doc.create(
                pylatex.SubFigure(
                    position='b',
                    width=pylatex.NoEscape(r'0.5\linewidth'))) as left_fig:
            add_image(left_fig,
                      task_handler.results_dir,
                      left_pdf_file,
                      width="1.0")
        with doc.create(
                pylatex.SubFigure(
                    position='b',
                    width=pylatex.NoEscape(r'0.5\linewidth'))) as right_fig:
            add_image(right_fig,
                      task_handler.results_dir,
                      right_pdf_file,
                      width="1.0")

    if correlator.isSinkSourceSame():
        header_row = [
            pylatex.NoEscape(r"$t$"),
            pylatex.NoEscape(r"$C(t)$"),
            pylatex.NoEscape(r"$\delta C(t)$"),
        ]
        if task_handler.plot_info.eff_energy_type.value < 2:
            header_row.extend([
                pylatex.NoEscape(
                    rf"$a_t E_{{\rm eff}} (t + {task_handler.plot_info.timestep}/2)$"
                ),
                pylatex.NoEscape(
                    rf"$\delta a_t E_{{\rm eff}} (t + {task_handler.plot_info.timestep}/2)$"
                ),
            ])
        else:
            header_row.extend([
                pylatex.NoEscape(r"$a_t E_{\rm eff} (t)$"),
                pylatex.NoEscape(r"$\delta a_t E_{\rm eff} (t)$"),
            ])

    else:
        header_row = [
            pylatex.NoEscape(r"$t$"),
            pylatex.NoEscape(r"$Re C(t)$"),
            pylatex.NoEscape(r"$\delta Re C(t)$"),
            pylatex.NoEscape(r"$Im C(t)$"),
            pylatex.NoEscape(r"$\delta Im C(t)$")
        ]

    with doc.create(pylatex.Center()) as centered:
        with centered.create(
                pylatex.LongTabu("X[c] X[2,c] X[2,c] X[2,c] X[2,c]",
                                 to=r"\linewidth")) as data_table:
            data_table.add_row(header_row, mapper=[pylatex.utils.bold])
            data_table.add_hline()
            data_table.end_table_header()
            for t in sorted(left_estimates.keys()):
                left_value = left_estimates[t].getFullEstimate()
                left_error = left_estimates[t].getSymmetricError()
                left_est = nice_value(left_value, left_error)
                left_rel_error = round(left_estimates[t].getRelativeError(), 4)
                t_right = t
                if correlator.isSinkSourceSame(
                ) and task_handler.plot_info.eff_energy_type.value < 2:
                    t_right = t + 0.5 * task_handler.plot_info.timestep

                if t_right in right_estimates:
                    right_value = right_estimates[t_right].getFullEstimate()
                    right_error = right_estimates[t_right].getSymmetricError()
                    right_est = nice_value(right_value, right_error)
                    right_rel_error = round(
                        right_estimates[t_right].getRelativeError(), 4)
                else:
                    right_est = ""
                    right_rel_error = ""

                row = [
                    int(t), left_est, left_rel_error, right_est,
                    right_rel_error
                ]
                data_table.add_row(row)

    doc.append(pylatex.NoEscape(r"\newpage"))
コード例 #17
0
ファイル: analyzer.py プロジェクト: moshes7/odeval
    def perfromance_report(self, save_pdf=False, pdf_name='analyzer_report'):
        """
        Generate performance report. By default, images of confusion matrix and performance metrics will be saved.
        If save_pdf is True, a pdf will also be saved.
        Output will be saved in self.output_dir

        Parameters
        ----------
        save_pdf : bool, optional
            If True, pdf report will be saved.
        pdf_name : str, optional
            PDF report file name, used only if save_pdf is True.

        Returns
        -------
        None.
        """

        # save confusion matrix
        if self.bbox_match_method == 'iou':
            title_str = 'Match Pred Method = IOU\nScore_th = {} | IOU_th = {}'.format(
                self.score_th, self.iou_th)
        elif self.bbox_match_method == 'pred_bbox_center':
            title_str = 'Match Pred Method = Centers\nScore_th = {}'.format(
                self.score_th, self.bbox_match_method)

        image_path_cm = os.path.join(self.output_dir,
                                     'total confusion matrix.png')
        h_ax, h_fig = ConfusionMatrix.plot_confusion_matrix(
            self.cm,
            display_labels=self.class_names,
            add_miss_detection_col=self.add_miss_detection_col,
            add_false_detection_row=self.add_false_detection_row,
            title_str=title_str,
            display=False,
            save_fig_name=image_path_cm,
        )

        # save performance metrics

        # text files
        self.metrics_tables['global'].to_csv(
            os.path.join(self.output_dir, 'global metrics.csv'))
        self.metrics_tables['class'].to_csv(
            os.path.join(self.output_dir, 'class metrics.csv'))

        # images
        image_path_class_metrics = os.path.join(self.output_dir,
                                                'class metrics.png')
        image_path_global_metrics = os.path.join(self.output_dir,
                                                 'global metrics.png')
        dfi.export(self.metrics_tables['class'], image_path_class_metrics)
        dfi.export(self.metrics_tables['global'], image_path_global_metrics)

        # save pdf report
        if save_pdf:

            doc = pylatex.Document(geometry_options={'margin': '0.2in'})

            doc.preamble.append(pylatex.Command('title', 'Analyzer Report'))
            doc.preamble.append(
                pylatex.Command('date', pylatex.NoEscape(r'\today')))
            doc.append(pylatex.NoEscape(r'\maketitle'))

            # plot confusion matrix
            with doc.create(pylatex.Section('Confusion Matrix')):
                with doc.create(pylatex.Figure(position='h!')) as fig:

                    fig.add_image(image_path_cm, placement='center')

            # global metrics
            with doc.create(pylatex.Section('Performance Metrics')):
                # with doc.create(pylatex.Section('Global Metrics')):
                with doc.create(pylatex.Figure(position='h!')) as fig:
                    fig.add_image(image_path_global_metrics,
                                  width=200,
                                  placement='center')

            # class metrics
            # with doc.create(pylatex.Section('Class Metrics')):
                with doc.create(pylatex.Figure(position='h!')) as fig:
                    fig.add_image(image_path_class_metrics, placement='center')

            # save report
            output_full_name = '{}'.format(pdf_name)
            report_file = os.path.abspath(
                os.path.join(self.output_dir, output_full_name))
            doc.generate_pdf(report_file,
                             clean_tex=True,
                             compiler='pdflatex',
                             silent=True)

        pass
コード例 #18
0
def _add_data(doc: pl.Document, dataset):
    name = f'{dataset}_NR2_GBReg'
    directory = dataset

    aVp_graph = f'{name}.jpg'
    angle_dist_graph = f'{name}_angledistribution.jpg'
    error_dist_graph = f'{name}_errordistribution.jpg'
    sqerror_graph = f'{name}_sqerror_vs_actual.jpg'
    stats_csv_all = f'{name}_stats_all.csv'
    stats_csv_out = f'{name}_stats_out.csv'

    actualVpred_file = os.path.join(directory, aVp_graph)
    ang_dist_file = os.path.join(directory, angle_dist_graph)
    error_dist_file = os.path.join(directory, error_dist_graph)
    sqerror_file = os.path.join(directory, sqerror_graph)

    df_all = pd.read_csv(os.path.join(directory, stats_csv_all))
    df_out = pd.read_csv(os.path.join(directory, stats_csv_out))

    with doc.create(pl.Section(f'Results')):
        with doc.create(pl.Subsection('Summary of method:')):
            doc.append('Trained on PreAF2 dataset.')
            doc.append('\n')
            doc.append(f'Dataset tested: {dataset}')
            doc.append('\n')
            doc.append(f'GBR parameters: {gbr_params}.')
            doc.append('\n')

    with doc.create(pl.Subsection('Summary of the data:')):
        with doc.create(pl.Figure(position='!htbp')) as actualVpred:
            actualVpred.add_image(actualVpred_file, width='300px')
            actualVpred.add_caption(
                'Graph showing the predicted packing angle against the actual packing angle.'
            )

        with doc.create(pl.Table(position='!htbp')) as table:
            table.add_caption('Summary of results for all data')
            table.append(pl.Command('centering'))
            table.append(pl.NoEscape(df_all.to_latex(escape=False)))

        with doc.create(pl.Table(position='!htbp')) as table:
            table.add_caption('Summary of results for outliers.')
            table.append(pl.Command('centering'))
            table.append(pl.NoEscape(df_out.to_latex(escape=False)))

        with doc.create(pl.Figure(position='!htbp')) as graphs:
            with doc.create(
                    pl.SubFigure(position='!htbp',
                                 width=pl.NoEscape(
                                     r'0.30\linewidth'))) as ang_dist_graph:
                ang_dist_graph.add_image(ang_dist_file,
                                         width=pl.NoEscape(r'\linewidth'))
                ang_dist_graph.add_caption(
                    'Frequency distribution of the packing angle.')
            with doc.create(
                    pl.SubFigure(position='!htbp',
                                 width=pl.NoEscape(
                                     r'0.33\linewidth'))) as error_dist_graph:
                error_dist_graph.add_image(error_dist_file,
                                           width=pl.NoEscape(r'\linewidth'))
                error_dist_graph.add_caption(
                    'Distribution of errors calculated as the difference between the predicted and actual interface \
                        angle.')
            with doc.create(
                    pl.SubFigure(position='!htbp',
                                 width=pl.NoEscape(
                                     r'0.33\linewidth'))) as sqerror_graph:
                sqerror_graph.add_image(sqerror_file,
                                        width=pl.NoEscape(r'\linewidth'))
                sqerror_graph.add_caption(
                    'Squared error in predicted packing angle against actual packing angle.'
                )
            graphs.add_caption('Graphs for further metrics.')