Ejemplo n.º 1
0
def filterstats(input_fn, output_dir, topn=None,
                maxeerates=[0.25, 0.5, 0.75, 1, 1.25, 1.5], maxns=None):

    if not os.path.isdir(output_dir):
        raise ValueError("directory {} does not exist".format(output_dir))

    minlen_fn = os.path.join(output_dir, "filterstats_minlen.txt")
    trunclen_fn = os.path.join(output_dir, "filterstats_trunclen.txt")
    plot_fn = os.path.join(output_dir, "filterstats_plot.png")

    minlen, trunclen = _stats(
        input_fn=input_fn,
        topn=topn,
        maxeerates=maxeerates,
        maxns=maxns)

    minlen.to_csv(minlen_fn, sep="\t", float_format="%.3f", index=False)
    trunclen.to_csv(trunclen_fn, sep="\t", float_format="%.3f", index=False)

    # custom rc. svg.fonttype": "none" corrects the conversion of text in PDF
    # and SVG files
    rc = {
        "xtick.labelsize": 8,
        "ytick.labelsize": 8,
        "axes.labelsize": 10,
        "legend.fontsize": 10,
        "svg.fonttype": "none"}

    with plt.rc_context(rc=rc):
        _plot(minlen, trunclen, plot_fn)
Ejemplo n.º 2
0
def _fig_size_cntx(fig, fig_size_inches, tight_layout):
    """Resize a figure in a context

    Parameters
    ----------
    fig : matplotlib.figure.Figure
        The figure to resize
    fig_size_inches : tuple
        The (height, width) to use in the context. If None, the size
        is not changed
    tight_layout : boolean
        When True, tight layout is used.
    """
    orig_size = fig.get_size_inches()
    orig_layout = fig.get_tight_layout()
    if fig_size_inches is not None:
        fig.set_size_inches(*fig_size_inches)
    fig.set_tight_layout(tight_layout)
    if tight_layout:
        rc_params = {'savefig.bbox': 'tight'}
    else:
        rc_params = {'savefig.bbox': 'standard'}
    try:
        with plt.rc_context(rc_params):
            yield fig
    finally:
        fig.set_size_inches(*orig_size)
        fig.set_tight_layout(orig_layout)
Ejemplo n.º 3
0
def stats(input_fn, output_dir, topn=None):

    if not os.path.isdir(output_dir):
        raise ValueError("directory {} does not exist".format(output_dir))

    len_dist_fn = os.path.join(output_dir, "stats_lendist.txt")
    qual_dist_fn = os.path.join(output_dir, "stats_qualdist.txt")
    qual_summ_fn = os.path.join(output_dir, "stats_qualsumm.txt")

    len_dist_plot_fn = os.path.join(output_dir, "stats_lendist_plot.png")
    qual_dist_plot_fn = os.path.join(output_dir, "stats_qualdist_plot.png")
    qual_summ_plot_fn = os.path.join(output_dir, "stats_qualsumm_plot.png")

    len_dist, qual_dist, qual_summ = _stats(input_fn=input_fn, topn=topn)

    len_dist.to_csv(len_dist_fn, sep="\t", float_format="%.3f", index=False)
    qual_dist.to_csv(qual_dist_fn, sep="\t", float_format="%.3f", index=False)
    qual_summ.to_csv(qual_summ_fn, sep="\t", float_format="%.3f", index=False)

    # custom rc. "svg.fonttype: none" corrects the conversion of text in PDF
    # and SVG files
    rc = {
        "xtick.labelsize": 8,
        "ytick.labelsize": 8,
        "axes.labelsize": 10,
        "legend.fontsize": 10,
        "svg.fonttype": "none"}

    with plt.rc_context(rc=rc):
        _plot_len_dist(len_dist, len_dist_plot_fn)
        _plot_qual_dist(qual_dist, qual_dist_plot_fn)
        _plot_qual_summ(qual_summ, qual_summ_plot_fn)
Ejemplo n.º 4
0
    def minorticksubplot(xminor, yminor, i):
        rc = {'xtick.minor.visible': xminor,
              'ytick.minor.visible': yminor}
        with plt.rc_context(rc=rc):
            ax = fig.add_subplot(2, 2, i)

        assert (len(ax.xaxis.get_minor_ticks()) > 0) == xminor
        assert (len(ax.yaxis.get_minor_ticks()) > 0) == yminor
Ejemplo n.º 5
0
def plot_alpha(metadata, category, hue):
    import seaborn as sns
    with plt.rc_context(dict(sns.axes_style("darkgrid"),
                             **sns.plotting_context("notebook", font_scale=2))):
        width = len(metadata[category].unique())
        plt.figure(figsize=(width*4, 8))
        sns.boxplot(x=category, y='Alpha diversity',
                    data=metadata.sort(category), hue=hue, palette='cubehelix')
Ejemplo n.º 6
0
    def test_get_color_cycle(self):

        if mpl_ge_150:
            colors = [(1., 0., 0.), (0, 1., 0.)]
            prop_cycle = plt.cycler(color=colors)
            with plt.rc_context({"axes.prop_cycle": prop_cycle}):
                result = utils.get_color_cycle()
                assert result == colors
Ejemplo n.º 7
0
    def view(self, test=False):
        """Displays the graph"""

        if test:
            self._attr["style"] = True
            AttrConf.MPL_STYLE["interactive"] = False

        if self._attr["concat"]:
            if self._attr["style"]:
                with plt.rc_context(AttrConf.MPL_STYLE):
                    self._plot_concat()
            else:
                self._plot_concat()
        else:
            if self._attr["style"]:
                with plt.rc_context(AttrConf.MPL_STYLE):
                    self._plot(self._attr["permute"])
            else:
                self._plot(self._attr["permute"])
Ejemplo n.º 8
0
def create_icon_axes(fig, ax_position, lw_bars, lw_grid, lw_border, rgrid):
    """
    Create a polar axes containing the matplotlib radar plot.

    Parameters
    ----------
    fig : matplotlib.figure.Figure
        The figure to draw into.
    ax_position : (float, float, float, float)
        The position of the created Axes in figure coordinates as
        (x, y, width, height).
    lw_bars : float
        The linewidth of the bars.
    lw_grid : float
        The linewidth of the grid.
    lw_border : float
        The linewidth of the Axes border.
    rgrid : array-like
        Positions of the radial grid.

    Returns
    -------
    ax : matplotlib.axes.Axes
        The created Axes.
    """
    with plt.rc_context({'axes.edgecolor': MPL_BLUE,
                         'axes.linewidth': lw_border}):
        ax = fig.add_axes(ax_position, projection='polar')
        ax.set_axisbelow(True)

        N = 7
        arc = 2. * np.pi
        theta = np.arange(0.0, arc, arc / N)
        radii = np.array([2, 6, 8, 7, 4, 5, 8])
        width = np.pi / 4 * np.array([0.4, 0.4, 0.6, 0.8, 0.2, 0.5, 0.3])
        bars = ax.bar(theta, radii, width=width, bottom=0.0, align='edge',
                      edgecolor='0.3', lw=lw_bars)
        for r, bar in zip(radii, bars):
            color = *cm.jet(r / 10.)[:3], 0.6  # color from jet with alpha=0.6
            bar.set_facecolor(color)

        ax.tick_params(labelbottom=False, labeltop=False,
                       labelleft=False, labelright=False)

        ax.grid(lw=lw_grid, color='0.9')
        ax.set_rmax(9)
        ax.set_yticks(rgrid)

        # the actual visible background - extends a bit beyond the axis
        ax.add_patch(Rectangle((0, 0), arc, 9.58,
                               facecolor='white', zorder=0,
                               clip_on=False, in_layout=False))
        return ax
Ejemplo n.º 9
0
 def prepare(self):
     sns.set_style('ticks')
     sns.set_context('paper')
     with plt.rc_context(plot_params):
         self.fig = plt.figure(figsize=(7, 7))
         gs = plt.GridSpec(3, 2)
         self.ax = {
             'ispectrum': self.fig.add_subplot(gs[2, :]),
             'scatter': self.fig.add_subplot(gs[:2, :]),
         }
         # self.ax['violin'] = self.fig.add_subplot(gs[1:, -1])
     self.gs = gs
Ejemplo n.º 10
0
    def view(self, test=False):
        """Displays the graph"""

        if test:
            self._attr["style"] = True
            AttrConf.MPL_STYLE["interactive"] = False

        permute = self._attr["permute"] and not self._attr["concat"]
        if self._attr["style"]:
            with plt.rc_context(AttrConf.MPL_STYLE):
                self._resolve(permute, self._attr["concat"])
        else:
            self._resolve(permute, self._attr["concat"])
Ejemplo n.º 11
0
def stats(input_fn, output_dir, step=100, replace=False, seed=0):

    if not os.path.isdir(output_dir):
        raise ValueError("directory {} does not exist".format(output_dir))

    sample_summ_fn = os.path.join(output_dir, "tablestats_samplesumm.txt")
    otu_summ_fn = os.path.join(output_dir, "tablestats_otusumm.txt")
    rarecurve_fn = os.path.join(output_dir, "tablestats_rarecurve.txt")
    rarecurve_plot_fn = os.path.join(output_dir, "tablestats_rarecurve_plot.png")

    table = micca.table.read(input_fn)

    # sample summary
    sample_summ = pd.DataFrame({
        "Depth": table.sum(),
        "NOTU": (table > 0).sum(),
        "NSingle": (table == 1).sum()},
        columns=["Depth", "NOTU", "NSingle"])
    sample_summ.index.name = "Sample"
    sample_summ.sort_values(by="Depth", inplace=True)
    sample_summ.to_csv(sample_summ_fn, sep='\t')

    # OTU summary
    otu_summ = pd.DataFrame({
        "N": table.sum(axis=1),
        "NSample": (table > 0).sum(axis=1)},
        columns=["N", "NSample"])
    otu_summ.index.name = "OTU"
    otu_summ.sort_values(by="N", inplace=True, ascending=False)
    otu_summ.to_csv(otu_summ_fn, sep='\t')

    # rarefaction curves
    rarecurve = micca.table.rarecurve(table, step=step, replace=replace,
                                      seed=seed)
    rarecurve.to_csv(rarecurve_fn, sep='\t', float_format="%.0f", na_rep="NA")

    # custom rc. "svg.fonttype: none" corrects the conversion of text in PDF
    # and SVG files
    rc = {
        "xtick.labelsize": 8,
        "ytick.labelsize": 8,
        "axes.labelsize": 10,
        "legend.fontsize": 10,
        "svg.fonttype": "none"}

    with plt.rc_context(rc=rc):
        fig = plt.figure(figsize=(10, 6))
        plt.plot(rarecurve.index, rarecurve.to_numpy(), color="k")
        plt.xlabel("Depth")
        plt.ylabel("#OTUs")
        fig.savefig(rarecurve_plot_fn, dpi=300, bbox_inches='tight', format="png")
Ejemplo n.º 12
0
 def prepare(self):
     sns.set_style('ticks')
     sns.set_context('paper')
     with plt.rc_context(plot_params):
         self.fig = plt.figure(figsize=(7, 7))
         gs = plt.GridSpec(3, 12)
         self.ax = {
             'spectrum': self.fig.add_subplot(gs[:2, 4:]),
             'ISI': self.fig.add_subplot(gs[0, :4]),
             'cycle': self.fig.add_subplot(gs[1, :4]),
             'vs_freq': self.fig.add_subplot(gs[2, :4]),
         }
         self.ax['cycle_ampl'] = self.ax['cycle'].twinx()
         self.ax['circ'] = self.fig.add_subplot(gs[2, 4:8])
         self.ax['contrast'] = self.fig.add_subplot(gs[2, 8:])
Ejemplo n.º 13
0
    def prepare(self):
        sns.set_style('ticks')
        sns.set_context('paper')
        with plt.rc_context(plot_params):
            self.fig = plt.figure(figsize=(7, 7))
            gs = plt.GridSpec(3, 2)
            self.ax = {
                'scatter': self.fig.add_subplot(gs[-1, :]),
                # 'spectrum': self.fig.add_subplot(gs[1:, :-1]),
                'ISI': self.fig.add_subplot(gs[1, 1]),
                'EOD': self.fig.add_subplot(gs[1, 0]),

            }
            self.ax['scatter_base'] =  self.fig.add_subplot(gs[0, :])
            self.ax['EOD_ampl'] = self.ax['EOD'].twinx()
        self.gs = gs
def plot_dendrogram(clustering, size=10):
    link = clustering['linkage']
    labels = clustering['labels']
    link_function, colors = get_dendrogram_color_fun(link, clustering['reorder_vec'],
                                                     labels)
    
    # set figure properties
    figsize = (size, size*.6)
    with sns.axes_style('white'):
        fig = plt.figure(figsize=figsize)
        # **********************************
        # plot dendrogram
        # **********************************
        with plt.rc_context({'lines.linewidth': size*.125}):
            dendrogram(link,  link_color_func=link_function,
                       orientation='top')
Ejemplo n.º 15
0
def plot_stacked_bar(df):
    import seaborn as sns
    with plt.rc_context(dict(sns.axes_style("darkgrid"),
                         **sns.plotting_context("notebook", font_scale=1.8))):
        f, ax = plt.subplots(1, figsize=(10, 10))
        x = list(range(len(df.columns)))
        bottom = np.array([0] * len(df.columns))
        cat_percents = []
        for id_ in df.index:
            color = '#' + ''.join(np.random.choice(list('ABCDEF123456789'), 6))
            ax.bar(x, df.loc[id_], color=color, bottom=bottom, align='center')
            bottom = df.loc[id_] + bottom
            cat_percents.append(''.join(["[{0:.2f}] ".format(x) for x in df.loc[id_].tolist()]))

        legend_labels = [' '.join(e) for e in zip(cat_percents, df.index.tolist())]

        ax.set_xticks(x)
        ax.set_xticklabels(df.columns.tolist())
        ax.set_ylim([0, 1])
        ax.legend(legend_labels, loc='center left', bbox_to_anchor=(1, 0.5))
Ejemplo n.º 16
0
    def render(self):
        """
        Actually render the figure.

        :returns: A :mod:`matplotlib` figure object.
        """
        # Use custom matplotlib context
        with plt.rc_context(rc=custom_mpl.custom_rc(rc=self.custom_mpl_rc)):
            # Create figure if necessary
            figure, axes = self._render_grid()

            # Render depending on animation type
            if self.animation["type"] is False:
                self._render_no_animation(axes)
            elif self.animation["type"] == "gif":
                self._render_gif_animation(figure, axes)
            elif self.animation["type"] == "animation":
                # TODO
                return None
            else:
                return None
            # Use tight_layout to optimize layout, use custom padding
            figure.tight_layout(pad=1)  # TODO: Messes up animations
        return figure
Ejemplo n.º 17
0
def newton_calcula(request, form):
    plt.rcParams.update(plt.rcParamsDefault)
    plt.close('all')

    try:

        valores = form.cleaned_data  # funcion y valor inicial
        context = {'form': form}
        starting = valores['ini']
        fucn = valores['f']

        x, y, z, t = sympy.symbols('x y z t')
        fx = sympy.sympify(fucn)
        dfdx = sympy.diff(fx, x)

        e = .00001
        x0 = starting
        iterations = 0
        delta = 1
        b = 1
        iteraciones_permitidas = 60

        while e < delta:

            r = x0 - fx.subs(x, x0) / dfdx.subs(x, x0)
            delta = abs((r - x0) / r)
            iterations += 1
            x0 = r
            if iterations > iteraciones_permitidas:
                b = 0
                break

    except Exception:
        print_exc()
        return errors_view(request)

    else:

        fxx = sympy.lambdify(x, fx, "numpy")
        nuevo = estiliza_string(fucn)

        t = np.arange(r - 25, r + 25, .5)
        s = []

        for n in t:
            s.append(float(fxx(n)))

        plt.rc_context({'axes.edgecolor': 'black', 'xtick.color': 'black', 'ytick.color': 'black'})
        # plt.style.use("dark_background")
        fig, ax = plt.subplots()

        # plt.axvline(0, color='black')
        ax.axhline(0, color='gray')

        ax.plot(t, s, label=f'f(x) = {nuevo}', color='#40E0D0')
        ax.grid(color="gray")

        if b == 1:  # si se encontro corte antes de 50 iteraciones
            plt.plot(r, fxx(r), marker='o', markersize=5, color="red", label=f"Corte con Eje x = {r:.4f}")
            ax.set(xlabel='x', ylabel='f(x)', title=f"Raíz calculada después de {iterations} iteraciones")

        else:
            ax.hlines(0, 0, 0, color='r', label='No Se Encontró Corte con Eje X')
            ax.set(xlabel='x', ylabel='f(x)',
                   title=f"No se logro encontrar raíz después de {iteraciones_permitidas} iteraciones")

        plt.legend(loc='best')

        buf = BytesIO()
        fig.savefig(buf, format='jpg', quality=90, dpi=160, facecolor="#f3f2f1", edgecolor='#f3f2f1')
        buf.seek(0)
        uri = 'data:image/png;base64,' + parse.quote(b64encode(buf.read()))

        context['image'] = uri

        return render(request, "newton_calculado.html", context)
Ejemplo n.º 18
0
def visualise(X, y, Xq, fq_exp, fq_cov, noise = 1.0, n_draws = 10):

    FONTSIZE = 25
    FONTNAME = 'Sans Serif'
    TICKSIZE = 24

    mycmap = cm.get_cmap(name = 'gist_rainbow', lut = None)

    rcparams = {
        'backend': 'pdf',
        'axes.labelsize': TICKSIZE,
        'text.fontsize': FONTSIZE,
        'legend.fontsize': FONTSIZE,
        'xtick.labelsize': TICKSIZE,
        'ytick.labelsize': TICKSIZE,
        'text.usetex': True,
        'axes.color_cycle': [mycmap(k) for k in np.linspace(0, 1, n_draws)]
    }

    plt.rc_context(rcparams)

    fq_var = fq_cov.diagonal()
    yq_draws = gp.draws(n_draws, fq_exp, fq_cov)

    yq_var = fq_var + noise**2
    yq_ub = fq_exp + 2 * yq_var
    yq_lb = fq_exp - 2 * yq_var

    if y.shape[0] == 0:
        title = 'prior'
    else:
        title = 'posterior'

    # Plot
    fig1 = plt.figure(figsize = (8.0, 6.0))
    fig2 = plt.figure(figsize = (8.0, 6.0))
    ax1 = fig1.add_subplot(111)
    ax2 = fig2.add_subplot(111)

    ax1.plot(Xq, fq_exp, 'k-', linewidth = 2)
    ax1.fill_between(Xq[:, 0], yq_ub, yq_lb, facecolor = (0.9, 0.9, 0.9), edgecolor = (0.5, 0.5, 0.5))
    ax1.plot(X[:, 0], y, 'c.')
    ax1.set_ylim((-3.0, 3.0))
    ax1.set_xlabel('$x$', fontsize = FONTSIZE)
    ax1.set_ylabel('$f(x)$', fontsize = FONTSIZE)
    ax1.set_title(title, fontsize = FONTSIZE)

    ax2.plot(Xq, fq_exp, 'k-', linewidth = 2)
    ax2.fill_between(Xq[:, 0], yq_ub, yq_lb, facecolor = (0.9, 0.9, 0.9), edgecolor = (0.5, 0.5, 0.5))
    ax2.plot(X[:, 0], y, 'c.')
    for i in range(n_draws):
        ax2.plot(Xq[:, 0], yq_draws[i], '--')
    ax2.set_ylim((-3.0, 3.0))
    ax2.set_xlabel('$x$', fontsize = FONTSIZE)
    ax2.set_ylabel('$f(x)$', fontsize = FONTSIZE)
    ax2.set_title(title, fontsize = FONTSIZE)

    fig1.tight_layout()
    fig2.tight_layout()

    if y.shape[0] == 0:
        fig1.savefig('bayesian_modeling/prior.eps')
        fig2.savefig('bayesian_modeling/prior_draws.eps')
    else:
        fig1.savefig('bayesian_modeling/posterior%d.eps' % y.shape[0])
        fig2.savefig('bayesian_modeling/posterior_draws%d.eps' % y.shape[0])
Ejemplo n.º 19
0
    return np.sin(k / (N + 1) * np.pi)**2


t1 = np.linspace(-0.5, end + 1.5, 1001)
y1 = y(t1)

t2 = np.linspace(1, min(22, end), min(22, end))
y2 = y(t2)

rc = {
    "xtick.direction": "inout",
    "ytick.direction": "inout",
    "xtick.major.size": 6,
    "ytick.major.size": 6
}
with plt.rc_context(rc):
    fig, ax = plt.subplots(figsize=[8, 4])
    ax.grid(b=True, which='major', color='#666666', linestyle='-')
    ax.grid(b=True, which='minor', color='#bbbbbb', linestyle='-')
    ax.set_xticks(list(range(1, end + 3, 2)))
    ax.set_yticks([0.2, 0.4, 0.6, 0.8, 1.0])
    ax.plot(t1, y1, color='gray', linestyle='--')
    ax.scatter(t2, y2, zorder=3, color=(0, 0.4470, 0.7410))

    for i in range(1, min(22, end) + 1):
        ax.vlines(i, 0, y2[i - 1], color='gray', lw=1, linestyles='dotted')

    plt.xlabel('$k$')
    plt.ylabel('Weighting Factor $w_k$')

    arrowed_spines(fig, ax)
Ejemplo n.º 20
0
                                (DLtimestamplist[i], fstamp, tstamp)].value)
            A0max.append(np.amax(bp))

        A0 = A0amp * max(A0max)
        tcount = 0
        for tstamp in time[::tstep]:
            #for tstamp in time[0:3]:
            tcount = tcount + 1
            bp = np.transpose(f['BeamPower/%s/%s/%s/bp' %
                                (DLtimestamplist[i], fstamp, tstamp)].value)
            #bp to be dB
            bp_dB = 10 * np.log10(bp / A0)
            #print(np.amax(bp_dB))
            r, theta = np.meshgrid(cvec, thetavec)
            #-- Plot... ------------------------------------------------
            with plt.rc_context({'ytick.color': whitecolor}):
                fig, ax = plt.subplots(subplot_kw=dict(projection='polar'),
                                       figsize=(8, 8))
                levels = np.linspace(dBmin, dBmax, levelnum)
                cs = ax.contourf(theta,
                                 r,
                                 bp_dB,
                                 cmap=colormap,
                                 levels=levels,
                                 extend='both')
                # plot decoration
                ax.set_xticklabels(['E', '', 'N', '', 'W', '', 'S', ''])
                ax.set_yticks([2, 3, 4, 5])
                ax.annotate('[km/s]', (1.00, 0.701), xycoords='axes fraction')
                ax.set_title('f: %4.2f [Hz] Time: %s' % (freq, tstamp), y=1.08)
                cbar = fig.colorbar(cs,
#%% Plot
sns.set(font_scale=1.4, style='ticks')

rc_params = {
    'font.sans-serif': "Arial",  # just an example
    'svg.fonttype': 'none',
}

fig, ax = plt.subplots()
g1 = sns.swarmplot(data=data,
                   x='compound_group',
                   y='novelty_score',
                   ax=ax,
                   size=10)
g1 = sns.swarmplot(data=data,
                   x='compound_group',
                   y='novelty_score',
                   hue='correct',
                   ax=ax,
                   size=10)

ax.get_legend().remove()

g1.axes.set_xlabel('')
g1.axes.set_ylabel('novelty score')

with plt.rc_context(rc_params):
    plt.savefig('novelty_scores.pdf')
    plt.savefig('novelty_scores.svg')
Ejemplo n.º 22
0
def test_skewt_with_grid_enabled():
    """Test using SkewT when gridlines are already enabled (#271)."""
    with plt.rc_context(rc={'axes.grid': True}):
        # Also tests when we don't pass in Figure
        SkewT()
Ejemplo n.º 23
0
plot_keys = [
    ('RotSpeed', i_gspd, 'Generator Speed [rpm]', 1),
    ('BldPitch1', i_pit, 'Blade Pitch [deg]', 1),
    ('GenPwr', i_pow, 'Generator Power [MW]', 1e-3),
    ('RootMyb1', i_flp, 'Flapwise [MNm]', 1e-3),
    #             ('RootMEdg1', 'edge', 'Edgewise [MNm]', 1e-3),
    ('TwrBsMyt', i_tbfa, 'Tower Base Fore-Aft Moment [MNm]', 1e-3),
    ('TipDxb1', i_tipf, 'Flapwise Blade Tip Deflection [m]', 1)
]
alpha = 0.8
bd_maxwsp = 20.1  # cutoff for BeamDyn frequencies

# make figure
pltprms = {'font.size': 10, 'axes.labelsize': 10}
with plt.rc_context(pltprms):
    fig, axs = plt.subplots(2, 3, num=3, figsize=(9, 4), clear=True)

# plot stuff
for i, (fastname, h2name) in enumerate(model_keys):
    # make paths
    h2_path = steady_dir + f'{h2name}_Steady_stats.csv'
    fast_path = steady_dir + f'IEA15MW_torque_steady_{fastname}_stats.yaml'
    # load data
    h2_df = read_steady(h2_path)
    fast_df = read_steady(fast_path)
    for j, (fast_key, h2_chan, label, scl) in enumerate(plot_keys):
        ax = axs[j // 3, j % 3]
        # update fast_keys and scaling for beamdyn
        fst_scl, h2scl = scl, scl
        if 'GenPwr' in fast_key:
Ejemplo n.º 24
0
def boxplot(
    data,
    column=None,
    by=None,
    ax=None,
    fontsize=None,
    rot=0,
    grid=True,
    figsize=None,
    layout=None,
    return_type=None,
    **kwds,
):

    import matplotlib.pyplot as plt

    # validate return_type:
    if return_type not in BoxPlot._valid_return_types:
        raise ValueError("return_type must be {'axes', 'dict', 'both'}")

    if isinstance(data, pd.Series):
        data = data.to_frame("x")
        column = "x"

    def _get_colors():
        #  num_colors=3 is required as method maybe_color_bp takes the colors
        #  in positions 0 and 2.
        #  if colors not provided, use same defaults as DataFrame.plot.box
        result = get_standard_colors(num_colors=3)
        result = np.take(result, [0, 0, 2])
        result = np.append(result, "k")

        colors = kwds.pop("color", None)
        if colors:
            if is_dict_like(colors):
                # replace colors in result array with user-specified colors
                # taken from the colors dict parameter
                # "boxes" value placed in position 0, "whiskers" in 1, etc.
                valid_keys = ["boxes", "whiskers", "medians", "caps"]
                key_to_index = dict(zip(valid_keys, range(4)))
                for key, value in colors.items():
                    if key in valid_keys:
                        result[key_to_index[key]] = value
                    else:
                        raise ValueError(
                            f"color dict contains invalid key '{key}'. "
                            f"The key must be either {valid_keys}"
                        )
            else:
                result.fill(colors)

        return result

    def maybe_color_bp(bp, **kwds):
        # GH 30346, when users specifying those arguments explicitly, our defaults
        # for these four kwargs should be overridden; if not, use Pandas settings
        if not kwds.get("boxprops"):
            setp(bp["boxes"], color=colors[0], alpha=1)
        if not kwds.get("whiskerprops"):
            setp(bp["whiskers"], color=colors[1], alpha=1)
        if not kwds.get("medianprops"):
            setp(bp["medians"], color=colors[2], alpha=1)
        if not kwds.get("capprops"):
            setp(bp["caps"], color=colors[3], alpha=1)

    def plot_group(keys, values, ax: Axes, **kwds):
        # GH 45465: xlabel/ylabel need to be popped out before plotting happens
        xlabel, ylabel = kwds.pop("xlabel", None), kwds.pop("ylabel", None)
        if xlabel:
            ax.set_xlabel(pprint_thing(xlabel))
        if ylabel:
            ax.set_ylabel(pprint_thing(ylabel))

        keys = [pprint_thing(x) for x in keys]
        values = [np.asarray(remove_na_arraylike(v), dtype=object) for v in values]
        bp = ax.boxplot(values, **kwds)
        if fontsize is not None:
            ax.tick_params(axis="both", labelsize=fontsize)

        # GH 45465: x/y are flipped when "vert" changes
        is_vertical = kwds.get("vert", True)
        ticks = ax.get_xticks() if is_vertical else ax.get_yticks()
        if len(ticks) != len(keys):
            i, remainder = divmod(len(ticks), len(keys))
            assert remainder == 0, remainder
            keys *= i
        if is_vertical:
            ax.set_xticklabels(keys, rotation=rot)
        else:
            ax.set_yticklabels(keys, rotation=rot)
        maybe_color_bp(bp, **kwds)

        # Return axes in multiplot case, maybe revisit later # 985
        if return_type == "dict":
            return bp
        elif return_type == "both":
            return BoxPlot.BP(ax=ax, lines=bp)
        else:
            return ax

    colors = _get_colors()
    if column is None:
        columns = None
    else:
        if isinstance(column, (list, tuple)):
            columns = column
        else:
            columns = [column]

    if by is not None:
        # Prefer array return type for 2-D plots to match the subplot layout
        # https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580
        result = _grouped_plot_by_column(
            plot_group,
            data,
            columns=columns,
            by=by,
            grid=grid,
            figsize=figsize,
            ax=ax,
            layout=layout,
            return_type=return_type,
            **kwds,
        )
    else:
        if return_type is None:
            return_type = "axes"
        if layout is not None:
            raise ValueError("The 'layout' keyword is not supported when 'by' is None")

        if ax is None:
            rc = {"figure.figsize": figsize} if figsize is not None else {}
            with plt.rc_context(rc):
                ax = plt.gca()
        data = data._get_numeric_data()
        naxes = len(data.columns)
        if naxes == 0:
            raise ValueError(
                "boxplot method requires numerical columns, nothing to plot."
            )
        if columns is None:
            columns = data.columns
        else:
            data = data[columns]

        result = plot_group(columns, data.values.T, ax, **kwds)
        ax.grid(grid)

    return result
Ejemplo n.º 25
0
def boxplot(data, column=None, by=None, ax=None, fontsize=None,
            rot=0, grid=True, figsize=None, layout=None, return_type=None,
            **kwds):

    # validate return_type:
    if return_type not in BoxPlot._valid_return_types:
        raise ValueError("return_type must be {'axes', 'dict', 'both'}")

    if isinstance(data, ABCSeries):
        data = data.to_frame('x')
        column = 'x'

    def _get_colors():
        #  num_colors=3 is required as method maybe_color_bp takes the colors
        #  in positions 0 and 2.
        return _get_standard_colors(color=kwds.get('color'), num_colors=3)

    def maybe_color_bp(bp):
        if 'color' not in kwds:
            from matplotlib.artist import setp
            setp(bp['boxes'], color=colors[0], alpha=1)
            setp(bp['whiskers'], color=colors[0], alpha=1)
            setp(bp['medians'], color=colors[2], alpha=1)

    def plot_group(keys, values, ax):
        keys = [pprint_thing(x) for x in keys]
        values = [np.asarray(remove_na_arraylike(v)) for v in values]
        bp = ax.boxplot(values, **kwds)
        if fontsize is not None:
            ax.tick_params(axis='both', labelsize=fontsize)
        if kwds.get('vert', 1):
            ax.set_xticklabels(keys, rotation=rot)
        else:
            ax.set_yticklabels(keys, rotation=rot)
        maybe_color_bp(bp)

        # Return axes in multiplot case, maybe revisit later # 985
        if return_type == 'dict':
            return bp
        elif return_type == 'both':
            return BoxPlot.BP(ax=ax, lines=bp)
        else:
            return ax

    colors = _get_colors()
    if column is None:
        columns = None
    else:
        if isinstance(column, (list, tuple)):
            columns = column
        else:
            columns = [column]

    if by is not None:
        # Prefer array return type for 2-D plots to match the subplot layout
        # https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580
        result = _grouped_plot_by_column(plot_group, data, columns=columns,
                                         by=by, grid=grid, figsize=figsize,
                                         ax=ax, layout=layout,
                                         return_type=return_type)
    else:
        if return_type is None:
            return_type = 'axes'
        if layout is not None:
            raise ValueError("The 'layout' keyword is not supported when "
                             "'by' is None")

        if ax is None:
            rc = {'figure.figsize': figsize} if figsize is not None else {}
            with plt.rc_context(rc):
                ax = plt.gca()
        data = data._get_numeric_data()
        if columns is None:
            columns = data.columns
        else:
            data = data[columns]

        result = plot_group(columns, data.values.T, ax)
        ax.grid(grid)

    return result
import matplotlib
matplotlib.use('svg')
import matplotlib.pyplot as plt
import pandas as pd

import constants as c
import plotting

if __name__ == '__main__':
    samples = pd.read_pickle(
        os.path.join(c.Paths.output, 'ex_2_11', 'samples.pkl'))

    results = pd.read_pickle(
        os.path.join(c.Paths.output, 'ex_2_11', 'results.pkl'))

    with plt.rc_context(plotting.rc()):
        fig, ax = plt.subplots(1)
        samples.plot(ax=ax)
        ax.legend(title='Actions', bbox_to_anchor=(1, 1), loc='upper left')
        ax.grid(alpha=0.25)
        ax.set_xlabel('$t$')
        ax.set_ylabel('Action Values')
        ax.set_title('True Action Values on 10-Armed Bandit')
        # plt.tight_layout()
        fig.savefig(os.path.join(c.Paths.output, 'ex_2_11',
                                 'action_values.png'),
                    bbox_inches='tight')

        fig, ax = plt.subplots(1)
        results.plot(ax=ax)
        ax.set_xscale('log', basex=2)
Ejemplo n.º 27
0
import numpy as np
import matplotlib.pyplot as plt

np.random.seed(5)

fig = plt.figure(figsize=(6, 3))
fig.subplots_adjust(bottom=0.15, wspace=0.3, left=0.09, right=0.95)

x = np.linspace(2000, 2008, 9)
y = np.random.randn(9) + 50000

with plt.rc_context(rc={'axes.formatter.offset_threshold' : 2}):
    ax1 = fig.add_subplot(1, 2, 1)
    ax1.plot(x, y)
    ax1.set_title('classic')

ax2 = fig.add_subplot(1, 2, 2)
ax2.plot(x, y)
ax2.set_title('v2.0')
    def _i_mtv(self, data, wcs, title, isMask):
        """Internal routine to display an Image or Mask on a DS9 display"""

        title = str(title) if title else ""
        dataArr = data.getArray()

        if isMask:
            maskPlanes = data.getMaskPlaneDict()
            nMaskPlanes = max(maskPlanes.values()) + 1

            planes = {}  # build inverse dictionary
            for key in maskPlanes:
                planes[maskPlanes[key]] = key

            planeList = range(nMaskPlanes)

            maskArr = np.zeros_like(dataArr, dtype=np.int32)

            colorNames = ['black']
            colorGenerator = self.display.maskColorGenerator(omitBW=True)
            for p in planeList:
                color = self.display.getMaskPlaneColor(
                    planes[p]) if p in planes else None

                if not color:  # none was specified
                    color = next(colorGenerator)
                elif color.lower() == afwDisplay.IGNORE:
                    color = 'black'  # we'll set alpha = 0 anyway

                colorNames.append(color)
            #
            # Convert those colours to RGBA so we can have per-mask-plane transparency
            # and build a colour map
            #
            # Pixels equal to 0 don't get set (as no bits are set), so leave them transparent
            # and start our colours at [1] -- hence "i + 1" below
            #
            colors = mpColors.to_rgba_array(colorNames)
            alphaChannel = 3  # the alpha channel; the A in RGBA
            colors[0][alphaChannel] = 0.0  # it's black anyway
            for i, p in enumerate(planeList):
                if colorNames[i + 1] == 'black':
                    alpha = 0.0
                else:
                    alpha = 1 - self._getMaskTransparency(planes[p] if p in
                                                          planes else None)

                colors[i + 1][alphaChannel] = alpha

            cmap = mpColors.ListedColormap(colors)
            norm = mpColors.NoNorm()
        else:
            cmap = self._image_colormap
            norm = self._normalize

        ax = self._figure.gca()
        bbox = data.getBBox()
        extent = (bbox.getBeginX() - 0.5, bbox.getEndX() - 0.5,
                  bbox.getBeginY() - 0.5, bbox.getEndY() - 0.5)

        with pyplot.rc_context(dict(interactive=False)):
            if isMask:
                for i, p in reversed(list(enumerate(planeList))):
                    if colors[i +
                              1][alphaChannel] == 0:  # colors[0] is reserved
                        continue

                    bitIsSet = (dataArr & (1 << p)) != 0
                    if bitIsSet.sum() == 0:
                        continue

                    maskArr[
                        bitIsSet] = i + 1  # + 1 as we set colorNames[0] to black

                    if not self._fastMaskDisplay:  # we draw each bitplane separately
                        ax.imshow(maskArr,
                                  origin='lower',
                                  interpolation='nearest',
                                  extent=extent,
                                  cmap=cmap,
                                  norm=norm)
                        maskArr[:] = 0

                if self._fastMaskDisplay:  # we only draw the lowest bitplane
                    ax.imshow(maskArr,
                              origin='lower',
                              interpolation='nearest',
                              extent=extent,
                              cmap=cmap,
                              norm=norm)
            else:
                mappable = ax.imshow(dataArr,
                                     origin='lower',
                                     interpolation='nearest',
                                     extent=extent,
                                     cmap=cmap,
                                     norm=norm)
                self._mappable = mappable

        self._figure.canvas.draw_idle()
Ejemplo n.º 29
0
def plot_return_value_stability(
        ts: pd.Series,
        return_period,
        return_period_size: typing.Union[str, pd.Timedelta] = "1Y",
        thresholds=None,
        r: typing.Union[str, pd.Timedelta] = "24H",
        extremes_type: str = "high",
        distributions: typing.List[typing.Union[str,
                                                scipy.stats.rv_continuous]] = [
                                                    "genpareto",
                                                    "expon",
                                                ],
        alpha: typing.Optional[float] = None,
        n_samples: int = 100,
        figsize: tuple = (8, 5),
) -> tuple:  # pragma: no cover
    """
    Plot return value stability plot for given threshold values.

    The return value stability plot shows return values for given return period
    for given thresholds.
    The purpose of this plot is to investigate statibility and sensitivity of the
    Generalized Pareto Distribution model to threshold value.
    Threshold value selection should still be guided by the mean residual life plot
    and the parameter stability plot. This plot should be used as additional check.

    Parameters
    ----------
    ts : pandas.Series
        Time series of the signal.
    return_period : number
        Return period.
        Given as a multiple of `return_period_size`.
    return_period_size : str or pandas.Timedelta, optional
        Size of return period (default='1Y').
        If set to '30D', then a return period of 12
        would be roughly equivalent to a 1 year return period (360 days).
    thresholds : array-like, optional
        An array of thresholds for which the mean residual life plot is plotted.
        If None (default), plots mean residual life for 100 equally-spaced thresholds
        between 90th (10th if extremes_type='low') percentile
        and 10th largest (smallest if extremes_type='low') value in the series.
    r : str or pandas.Timedelta, optional
        Duration of window used to decluster the exceedances.
        By default r='24H' (24 hours).
    extremes_type : str, optional
        high (default) - extreme high values
        low - extreme low values
    distributions : list, optional
        List of distributions for which the return value curves are plotted.
        By default these are "genpareto" and "expon".
        A distribution must be either a name of distribution from with scipy.stats
        or a subclass of scipy.stats.rv_continuous.
        See https://docs.scipy.org/doc/scipy/reference/stats.html
    alpha : float, optional
        Confidence interval width in the range (0, 1).
        If None (default), then confidence interval is not shown.
    n_samples : int, optional
        Number of bootstrap samples used to estimate
        confidence interval bounds (default=100).
        Ignored if `alpha` is None.
    figsize : tuple, optional
        Figure size in inches in format (width, height).
        By default it is (8, 5).

    Returns
    -------
    figure : matplotlib.figure.Figure
        Figure object.
    axes : matplotlib.axes._axes.Axes
        Axes object.

    """
    # Get default thresholds
    if thresholds is None:
        thresholds = get_default_thresholds(
            ts=ts,
            extremes_type=extremes_type,
            num=100,
        )

    # Instantiate model
    model = EVA(data=ts)

    # Calculate return values for each threshold and distribution
    return_values: typing.Dict[str, typing.List[float]] = {}
    ci_lower: typing.Dict[str, typing.List[float]] = {}
    ci_upper: typing.Dict[str, typing.List[float]] = {}
    for distribution in distributions:
        for threshold in thresholds:
            model.get_extremes(
                method="POT",
                extremes_type=extremes_type,
                threshold=threshold,
                r=r,
            )
            model.fit_model(
                model="MLE",
                distribution=distribution,
            )
            rv, cil, ciu = model.get_return_value(
                return_period=return_period,
                return_period_size=return_period_size,
                alpha=alpha,
                n_samples=n_samples,
            )
            try:
                return_values[distribution].append(rv)
                ci_lower[distribution].append(cil)
                ci_upper[distribution].append(ciu)
            except KeyError:
                return_values[distribution] = [rv]
                ci_lower[distribution] = [cil]
                ci_upper[distribution] = [ciu]

    with plt.rc_context(rc=pyextremes_rc):
        # Create figure and axes
        fig, ax = plt.subplots(figsize=figsize, dpi=96)
        ax.grid(False)

        # Plot central estimate of return values
        for i, distribution in enumerate(distributions):
            color = pyextremes_rc["axes.prop_cycle"].by_key()["color"][i]
            ax.plot(
                thresholds,
                return_values[distribution],
                color=color,
                lw=2,
                ls="-",
                label=distribution,
                zorder=(i + 3) * 5,
            )

            # Plot confidence bounds
            if alpha is not None:
                for ci in [ci_lower[distribution], ci_upper[distribution]]:
                    ax.plot(
                        thresholds,
                        ci,
                        color=color,
                        lw=1,
                        ls="--",
                        zorder=(i + 2) * 5,
                    )
                ax.fill_between(
                    thresholds,
                    ci_lower[distribution],
                    ci_upper[distribution],
                    facecolor=color,
                    edgecolor="None",
                    alpha=0.25,
                    zorder=(i + 1) * 5,
                )

        # Plot legend
        ax.legend(
            frameon=True,
            framealpha=0.9,
        )

        # Label axes
        ax.set_xlabel("Threshold")
        ax.set_ylabel("Return value")

        return fig, ax
Ejemplo n.º 30
0
plt.figure()
plt.scatter(sepal_length,
            sepal_width,
            s=100 * petal_width,
            c=df["colors"],
            alpha=0.2)
plt.xlabel("longueur des sépales [cm]")
plt.ylabel("largeur des sépales [cm]")

# Création d'une légende à partir d'un scatter plot vide
for i, v in enumerate(variety.unique()):
    plt.scatter([], [], c="C{}".format(i), alpha=0.2, label=v)
plt.legend()

# Changement de taille de police uniquement pour cette figure
with plt.rc_context({"font.size": 5}):
    # Définition d'une grille de sous-figures
    fig, ax = plt.subplots(len(labels),
                           len(labels),
                           sharex="col",
                           sharey="row",
                           figsize=(1.5 * len(labels), 1.5 * len(labels)))

    for l1, d1 in labels.items():
        i1 = list(labels.keys()).index(l1)
        for l2, d2 in labels.items():
            i2 = list(labels.keys()).index(l2)
            for v in variety.unique():
                sc = (variety == v)
                if l1 == l2:
                    ax[i1, i2].hist(d1[sc], alpha=0.5, bins=10, density=True)
def plot_dendrogram(loading, clustering, title=None, 
                    break_lines=True, drop_list=None, double_drop_list=None,
                    absolute_loading=False,  size=4.6,  dpi=300, 
                    filename=None):
    """ Plots HCA results as dendrogram with loadings underneath
    
    Args:
        loading: pandas df, a results EFA loading matrix
        clustering: pandas df, a results HCA clustering
        title (optional): str, title to plot
        break_lines: whether to separate EFA heatmap based on clusters, default=True
        drop_list (optional): list of cluster indices to drop the cluster label
        drop_list (optional): list of cluster indices to drop the cluster label twice
        absolute_loading: whether to plot the absolute loading value, default False
        plot_dir: if set, where to save the plot
        
    """


    c = loading.shape[1]
    # extract cluster vars
    link = clustering['linkage']
    DVs = clustering['clustered_df'].columns
    ordered_loading = loading.loc[DVs]
    if absolute_loading:
        ordered_loading = abs(ordered_loading)
    # get cluster sizes
    labels=clustering['labels']
    cluster_sizes = [np.sum(labels==(i+1)) for i in range(max(labels))]
    link_function, colors = get_dendrogram_color_fun(link, clustering['reorder_vec'],
                                                     labels)
    
    # set figure properties
    figsize = (size, size*.6)
    # set up axes' size 
    heatmap_height = ordered_loading.shape[1]*.035
    heat_size = [.1, heatmap_height]
    dendro_size=[np.sum(heat_size), .3]
    # set up plot axes
    dendro_size = [.15,dendro_size[0], .78, dendro_size[1]]
    heatmap_size = [.15,heat_size[0],.78,heat_size[1]]
    cbar_size = [.935,heat_size[0],.015,heat_size[1]]
    ordered_loading = ordered_loading.T

    with sns.axes_style('white'):
        fig = plt.figure(figsize=figsize)
        ax1 = fig.add_axes(dendro_size) 
        # **********************************
        # plot dendrogram
        # **********************************
        with plt.rc_context({'lines.linewidth': size*.125}):
            dendrogram(link, ax=ax1, link_color_func=link_function,
                       orientation='top')
        # change axis properties
        ax1.tick_params(axis='x', which='major', labelsize=14,
                        labelbottom=False)
        ax1.get_yaxis().set_visible(False)
        ax1.spines['top'].set_visible(False)
        ax1.spines['right'].set_visible(False)
        ax1.spines['bottom'].set_visible(False)
        ax1.spines['left'].set_visible(False)
        # **********************************
        # plot loadings as heatmap below
         # **********************************
        ax2 = fig.add_axes(heatmap_size)
        cbar_ax = fig.add_axes(cbar_size)
        max_val = np.max(abs(loading.values))
        # bring to closest .25
        max_val = ceil(max_val*4)/4
        sns.heatmap(ordered_loading, ax=ax2, 
                    cbar=True, cbar_ax=cbar_ax,
                    yticklabels=True,
                    xticklabels=True,
                    vmax =  max_val, vmin = -max_val,
                    cbar_kws={'orientation': 'vertical',
                              'ticks': [-max_val, 0, max_val]},
                    cmap=sns.diverging_palette(220,15,n=100,as_cmap=True))
        ax2.set_yticklabels(ax2.get_yticklabels(), rotation=0)
        ax2.tick_params(axis='y', labelsize=size*heat_size[1]*30/c, pad=size/4, length=0)            
        # format cbar axis
        cbar_ax.set_yticklabels([format_num(-max_val), 0, format_num(max_val)])
        cbar_ax.tick_params(labelsize=size*heat_size[1]*25/c, length=0, pad=size/2)
        cbar_ax.set_ylabel('Factor Loading', rotation=-90, 
                       fontsize=size*heat_size[1]*30/c, labelpad=size*2)
        # add lines to heatmap to distinguish clusters
        if break_lines == True:
            xlim = ax2.get_xlim(); 
            ylim = ax2.get_ylim()
            step = xlim[1]/len(labels)
            cluster_breaks = [i*step for i in np.cumsum(cluster_sizes)]
            ax2.vlines(cluster_breaks[:-1], ylim[0], ylim[1], linestyles='dashed',
                       linewidth=size*.1, colors=[.5,.5,.5], zorder=10)
        # **********************************
        # plot cluster names
        # **********************************
        beginnings = np.hstack([[0],np.cumsum(cluster_sizes)[:-1]])
        centers = beginnings+np.array(cluster_sizes)//2+.5
        offset = .07
        if 'cluster_names' in clustering.keys():
            ax2.tick_params(axis='x', reset=True, top=False, bottom=False, width=size/8, length=0)
            names = [transform_name(i) for i in clustering['cluster_names']]
            ax2.set_xticks(centers)
            ax2.set_xticklabels(names, rotation=0, ha='center', 
                                fontsize=heatmap_size[2]*size*1)
            ticks = ax2.xaxis.get_ticklines()[::2]
            for i, label in enumerate(ax2.get_xticklabels()):
                if label.get_text() != '':
                    ax2.hlines(c+offset,beginnings[i]+.5,beginnings[i]+cluster_sizes[i]-.5, 
                               clip_on=False, color=colors[i], linewidth=size/5)
                    label.set_color(colors[i])
                    ticks[i].set_color(colors[i])
                    y_drop = .005
                    line_drop = .3
                    if drop_list and i in drop_list:
                        y_drop = .05
                        line_drop = 1.6
                    if double_drop_list and i in double_drop_list:
                        y_drop = .1
                        line_drop = 2.9
                    label.set_y(-(y_drop/heatmap_height+heatmap_height/c*offset))
                    ax2.vlines(beginnings[i]+cluster_sizes[i]/2, 
                               c+offset, c+offset+line_drop,
                               clip_on=False, color=colors[i], 
                               linewidth=size/7.5)

        # add title
        if title:
            ax1.set_title(title, fontsize=size*2, y=1.05)
            
    if filename is not None:
        save_figure(fig, filename,
                    {'bbox_inches': 'tight', 'dpi': dpi})
        plt.close()
    else:
        return fig
Ejemplo n.º 32
0
def boxplot(
    data,
    column=None,
    by=None,
    ax=None,
    fontsize=None,
    rot=0,
    grid=True,
    figsize=None,
    layout=None,
    return_type=None,
    **kwds,
):

    import matplotlib.pyplot as plt

    # validate return_type:
    if return_type not in BoxPlot._valid_return_types:
        raise ValueError("return_type must be {'axes', 'dict', 'both'}")

    if isinstance(data, ABCSeries):
        data = data.to_frame("x")
        column = "x"

    def _get_colors():
        #  num_colors=3 is required as method maybe_color_bp takes the colors
        #  in positions 0 and 2.
        #  if colors not provided, use same defaults as DataFrame.plot.box
        result = _get_standard_colors(num_colors=3)
        result = np.take(result, [0, 0, 2])
        result = np.append(result, "k")

        colors = kwds.pop("font", None)
        if colors:
            if is_dict_like(colors):
                # replace colors in result array with user-specified colors
                # taken from the colors dict parameter
                # "boxes" value placed in position 0, "whiskers" in 1, etc.
                valid_keys = ["boxes", "whiskers", "medians", "caps"]
                key_to_index = dict(zip(valid_keys, range(4)))
                for key, value in colors.items():
                    if key in valid_keys:
                        result[key_to_index[key]] = value
                    else:
                        raise ValueError(
                            f"font dict contains invalid key '{key}'. "
                            f"The key must be either {valid_keys}")
            else:
                result.fill(colors)

        return result

    def maybe_color_bp(bp):
        setp(bp["boxes"], color=colors[0], alpha=1)
        setp(bp["whiskers"], color=colors[1], alpha=1)
        setp(bp["medians"], color=colors[2], alpha=1)
        setp(bp["caps"], color=colors[3], alpha=1)

    def plot_group(keys, values, ax):
        keys = [pprint_thing(x) for x in keys]
        values = [np.asarray(remove_na_arraylike(v)) for v in values]
        bp = ax.boxplot(values, **kwds)
        if fontsize is not None:
            ax.tick_params(axis="both", labelsize=fontsize)
        if kwds.get("vert", 1):
            ax.set_xticklabels(keys, rotation=rot)
        else:
            ax.set_yticklabels(keys, rotation=rot)
        maybe_color_bp(bp)

        # Return axes in multiplot case, maybe revisit later # 985
        if return_type == "dict":
            return bp
        elif return_type == "both":
            return BoxPlot.BP(ax=ax, lines=bp)
        else:
            return ax

    colors = _get_colors()
    if column is None:
        columns = None
    else:
        if isinstance(column, (list, tuple)):
            columns = column
        else:
            columns = [column]

    if by is not None:
        # Prefer array return type for 2-D plots to match the subplot layout
        # https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580
        result = _grouped_plot_by_column(
            plot_group,
            data,
            columns=columns,
            by=by,
            grid=grid,
            figsize=figsize,
            ax=ax,
            layout=layout,
            return_type=return_type,
        )
    else:
        if return_type is None:
            return_type = "axes"
        if layout is not None:
            raise ValueError(
                "The 'layout' keyword is not supported when 'by' is None")

        if ax is None:
            rc = {"figure.figsize": figsize} if figsize is not None else {}
            with plt.rc_context(rc):
                ax = plt.gca()
        data = data._get_numeric_data()
        if columns is None:
            columns = data.columns
        else:
            data = data[columns]

        result = plot_group(columns, data.values.T, ax)
        ax.grid(grid)

    return result
Ejemplo n.º 33
0
def plot_fluxnet_comparison_one_site(driver, science_test_data_dir,
                                     compare_data_dict, result_dir, plot_dir,
                                     plots_to_make, context, style, var_names,
                                     months, obs_dir, subdir):

    if check_site_files(obs_dir, subdir):
        # get CSV file from site directory to get lat/lng for site
        lat, lng = get_fluxnet_lat_lon(obs_dir, subdir)
        print(lat, lng)

        # loop over data to compare
        data = {}
        for key, items in compare_data_dict.items():

            if key == "ecflux":
                try:
                    # load Ameriflux data
                    data[key] = read_fluxnet_obs(subdir,
                                                 science_test_data_dir,
                                                 items)
                except OSError:
                    warnings.warn(
                        "this %s site does not have data" % subdir)

            elif key == "VIC.4.2.d":
                try:
                    # load VIC 4.2 simulations
                    data[key] = read_vic_42_output(lat, lng,
                                                   science_test_data_dir,
                                                   items)

                except OSError:
                    warnings.warn(
                        "this site has a lat/lng precision issue")

            else:
                try:
                    # load VIC 5 simulations
                    data[key] = read_vic_5_output(lat, lng,
                                                  result_dir,
                                                  items)
                except OSError:
                    warnings.warn(
                        "this site has a lat/lng precision issue")

        # make figures

        # plot preferences
        fs = 15
        dpi = 150

        if 'annual_mean_diurnal_cycle' in plots_to_make:

            # make annual mean diurnal cycle plots
            with plt.rc_context(dict(sns.axes_style(style),
                                     **sns.plotting_context(context))):
                f, axarr = plt.subplots(4, 1, figsize=(8, 8), sharex=True)

                for i, (vic_var, variable_name) in enumerate(
                        var_names.items()):

                    # calculate annual mean diurnal cycle for each
                    # DataFrame
                    annual_mean = {}
                    for key, df in data.items():
                        annual_mean[key] = pd.DataFrame(
                            df[vic_var].groupby(df.index.hour).mean())

                    df = pd.DataFrame(
                        {key: d[vic_var] for key, d in annual_mean.items()
                         if vic_var in d})

                    for key, series in df.iteritems():
                        series.plot(
                            linewidth=compare_data_dict[key]['linewidth'],
                            ax=axarr[i],
                            color=compare_data_dict[key]['color'],
                            linestyle=compare_data_dict[key]['linestyle'],
                            zorder=compare_data_dict[key]['zorder'])

                    axarr[i].legend(loc='upper left')
                    axarr[i].set_ylabel(
                        '%s ($W/{m^2}$)' % variable_name,
                        size=fs)
                    axarr[i].set_xlabel('Time of Day (Hour)', size=fs)
                    axarr[i].set_xlim([0, 24])
                    axarr[i].xaxis.set_ticks(np.arange(0, 24, 3))

                # save plot
                plotname = '%s_%s.png' % (lat, lng)
                os.makedirs(os.path.join(plot_dir, 'annual_mean'),
                            exist_ok=True)
                savepath = os.path.join(plot_dir, 'annual_mean', plotname)
                plt.savefig(savepath, bbox_inches='tight', dpi=dpi)

                plt.clf()
                plt.close()

        if 'monthly_mean_diurnal_cycle' in plots_to_make:

            # make monthly mean diurnal cycle plots
            with plt.rc_context(dict(sns.axes_style(style),
                                     **sns.plotting_context(context))):
                f, axarr = plt.subplots(4, 12, figsize=(35, 7),
                                        sharex=True,
                                        sharey=True)

                for i, (vic_var, variable_name) in enumerate(
                        var_names.items()):

                    # calculate monthly mean diurnal cycle
                    monthly_mean = {}
                    for (key, df) in data.items():
                        monthly_mean[key] = pd.DataFrame(
                            df[vic_var].groupby([df.index.month,
                                                 df.index.hour]).mean())

                    df = pd.DataFrame(
                        {key: d[vic_var] for key, d in monthly_mean.items()
                         if vic_var in d})

                    for j, month in enumerate(months):

                        for key, series in df.iteritems():
                            series[j + 1].plot(
                                linewidth=compare_data_dict[key]['linewidth'],
                                ax=axarr[i, j],
                                color=compare_data_dict[key]['color'],
                                linestyle=compare_data_dict[key]['linestyle'],
                                zorder=compare_data_dict[key]['zorder'])

                        axarr[i, j].set_ylabel(
                            '%s \n ($W/{m^2}$)' % variable_name,
                            size=fs)
                        axarr[i, j].set_xlabel('', size=fs)
                        axarr[i, j].set_xlim([0, 24])
                        axarr[i, j].xaxis.set_ticks(np.arange(0, 24, 3))
                        if i == 0:
                            axarr[i, j].set_title(month, size=fs)

                # add legend
                axarr[0, -1].legend(loc='center left',
                                    bbox_to_anchor=(1, 0.5))

                # add common x label
                f.text(0.5, 0.04, 'Time of Day (Hour)', ha='center',
                       size=fs)

                # save plot
                plotname = '%s_%s.png' % (lat, lng)
                os.makedirs(os.path.join(plot_dir, 'monthly_mean'),
                            exist_ok=True)
                savepath = os.path.join(plot_dir,
                                        'monthly_mean', plotname)
                plt.savefig(savepath, bbox_inches='tight', dpi=dpi)

                plt.clf()
                plt.close()
Ejemplo n.º 34
0
def fancy_show_stars(
        image,
        stars,
        ref_stars=None,
        target=None,
        size=15,
        pixel_scale=None,
        contrast=0.05,
        aperture=None,
        marker_color=np.array([131, 220, 255]) / 255,
        proper_motion=False,
        n_stars=None,
        flip=False,
        view="all",
        zoom=True,
        options={},
):
    """
    Plot stack image and detected stars

    Parameters
    ----------
    size: float (optional)
        pyplot figure (size, size)
    image: int (optional)
        index of image to plot in light files. Default is None, which show stack image
    contrast: float
        contrast within [0, 1] (zscale is applied here)
    marker_color: [r, g, b]
    proper_motion: bool
        whether to display proper motion on the image
    n_stars: int
        max number of stars to show
    flip: bool
        flip image
    view: 'all', 'reference'
        - ``reference`` : only highlight target and comparison stars
        - ``all`` : all stars are shown
    zoom: bool
        whether to include a zoom view
    options: dict
        style options:
            - to do

    Examples
    --------

    .. code-block:: python3

        from specphot.observations import Photometry

        phot = Photometry("your_path")
        phot.plot_stars(view="reference")

    .. image:: /user_guide/gallery/plot_stars.png
       :align: center
    """
    _options = {
        "aperture_color": "seagreen",
        "aperture_ls": "--"
    }
    _options.update(options)

    marker_size = 9

    if isinstance(image, str):
        image = fits.getdata(image)

    image_size = np.array(np.shape(image))[::-1]

    fig = plt.figure(figsize=(size, size))

    if flip:
        image = utils.z_scale(image, c=contrast)[::-1, ::-1]
        stars = np.array(image_size) - stars
    else:
        image = utils.z_scale(image, c=contrast)

    ax = fig.add_subplot(111)
    ax.imshow(image, cmap="Greys_r")
    plt.title("Stack image", loc="left")

    size_factor = size / 15
    fontsize = min(size_factor, 1) * 15
    label_yoffset = min(size_factor, 1) * 30

    if view == "all":

        for i, coord in enumerate(stars):
            circle = mpatches.Circle(coord, marker_size, fill=None, ec=marker_color, )
            ax = plt.gca()
            ax.add_artist(circle)
            plt.annotate(str(i),
                         xy=[coord[0], coord[1] + marker_size+1],
                         color=marker_color,
                         ha='center', fontsize=12, va='top')

    if ref_stars is not None:
        circle = mpatches.Circle(stars[target, :], marker_size, fill=None, ec=marker_color, label="target")
        ax = plt.gca()
        ax.add_artist(circle)
        plt.annotate(target, xy=[stars[target][0], stars[target][1] + marker_size+1],
                     color=marker_color, ha='center', fontsize=12, va='top')

        plt.imshow(image, cmap="Greys_r")

        for i in ref_stars:
            circle = mpatches.Circle(stars[i, :], marker_size, fill=None, ec="yellow", label="comparison")
            ax.add_artist(circle)
            plt.annotate(str(i), xy=[stars[i][0], stars[i][1] + marker_size+1], color="yellow",
                         fontsize=12,
                         ha='center',
                         va='top')

        other_stars = np.arange(len(stars))

        other_stars = np.setdiff1d(other_stars, target)
        other_stars = np.setdiff1d(other_stars, ref_stars)

        for i in other_stars:
            circle = mpatches.Circle(stars[i, :], marker_size, fill=None, ec=marker_color, label="comparison",
                                     alpha=0.4)
            ax.add_artist(circle)

    plt.tight_layout()

    if pixel_scale is not None:
        ob = AnchoredHScaleBar(size=60 / pixel_scale, label="1'", loc=4, frameon=False, extent=0,
                               pad=0.6, sep=4, linekw=dict(color="white", linewidth=0.8))
        ax.add_artist(ob)

    if target is not None and zoom:
        with plt.rc_context({
            'axes.edgecolor': "white",
            'xtick.color': "white",
            'ytick.color': "white"
        }):
            x, y = stars[target]
            rect = patches.Rectangle(
                (x - 80, y - 80),
                160, 160, linewidth=1,
                edgecolor='white',
                facecolor='none',
                alpha=0.3)

            ax.add_patch(rect)
            axins = zoomed_inset_axes(ax, 2.5, loc=1)
            axins.imshow(image, cmap="Greys_r", origin="upper")
            if aperture is not None:
                ap = aperture / 2
                aperture = patches.Circle(
                    (x, y),
                    ap, linewidth=1,
                    ls=_options["aperture_ls"],
                    edgecolor=_options["aperture_color"],
                    facecolor='none',
                    alpha=1)
                axins.add_patch(aperture)
            axins.set_xlim([x - 80, x + 80])
            axins.set_ylim([y + 80, y - 80])

            if pixel_scale is not None:
                obin = AnchoredHScaleBar(size=15 / pixel_scale, label="15\"", loc=4,
                                         frameon=False, extent=0, pad=0.6, sep=4,
                                         linekw=dict(color="white", linewidth=0.8))
                axins.add_artist(obin)

    return fig
Ejemplo n.º 35
0
    exp._set_labels_and_prepare_data(label_assoc, labels, sample_assoc)
    exp._set_data_and_labels_for_examples(logger.get_value('examples'))
    labels = exp.labels  # Get subset of labels without examples
    learner = exp._get_new_learner()
    learner.dico = logger.get_last_value('dictionary')
    data = exp.data
    # Compute coefficients from data
    print("Re-computing internal coefficients...")
    internal = learner.reconstruct_internal_multi(exp.modalities,
                                                  data,
                                                  exp.iter_test)
    # Compute mutual information for each pair of coef and label
    n_labels = len(label_assoc[0])
    by_label = organize_by_values(labels, nb_values=n_labels)
    mutual_information_matrices.append(np.array([
        mutual_information_by_label(internal[:, i], by_label)
        for i in range(K)
        ]).T)
    # Each matrix is of dim (K x n_labels)
    # Plot result to disk
    with plt.rc_context(rc=DEFAULT_PLOT_PARAMS):
        plt.interactive(False)
        fig = plt.figure()
        sound_labels = label_assoc[exp.modalities.index("sound")]
        plot_mutual_info(mutual_information_matrices[-1], sound_labels)
        for ext in ['svg', 'pdf', 'eps']:
            path = os.path.join(DEFAULT_FIG_DEST,
                                'mutual_info_{}.{}'.format(exp_idx, ext))
            fig.savefig(path, transparent=True)
            print('Written: {}.'.format(path))
Ejemplo n.º 36
0
 def _do_plot(self):
     self._do_compute()
     with plt.rc_context({'axes.edgecolor': 'gray'}):
         self._plot_spikes_in_feature_space_multi(
             self._spike_waveforms_list, self._opts)
Ejemplo n.º 37
0
def draw_termite_plot(
    values_mat,
    col_labels,
    row_labels,
    *,
    highlight_cols=None,
    highlight_colors=None,
    save=False,
    rc_params=None,
):
    """
    Make a "termite" plot, typically used for assessing topic models with a tabular
    layout that promotes comparison of terms both within and across topics.

    Args:
        values_mat (:class:`np.ndarray` or matrix): matrix of values with shape
            (# row labels, # col labels) used to size the dots on the grid
        col_labels (seq[str]): labels used to identify x-axis ticks on the grid
        row_labels(seq[str]): labels used to identify y-axis ticks on the grid
        highlight_cols (int or seq[int], optional): indices for columns
            to visually highlight in the plot with contrasting colors
        highlight_colors (tuple of 2-tuples): each 2-tuple corresponds to a pair
            of (light/dark) matplotlib-friendly colors used to highlight a single
            column; if not specified (default), a good set of 6 pairs are used
        save (str, optional): give the full /path/to/fname on disk to save figure
        rc_params (dict, optional): allow passing parameters to rc_context in matplotlib.plyplot,
            details in https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.pyplot.rc_context.html

    Returns:
        :obj:`matplotlib.axes.Axes.axis`: Axis on which termite plot is plotted.

    Raises:
        ValueError: if more columns are selected for highlighting than colors
            or if any of the inputs' dimensions don't match

    References:
        Chuang, Jason, Christopher D. Manning, and Jeffrey Heer. "Termite:
        Visualization techniques for assessing textual topic models."
        Proceedings of the International Working Conference on Advanced
        Visual Interfaces. ACM, 2012.

    See Also:
        :meth:`TopicModel.termite_plot() <textacy.tm.topic_model.TopicModel.termite_plot>`
    """
    try:
        plt
    except NameError:
        raise ImportError(
            "`matplotlib` is not installed, so `textacy.viz` won't work; "
            "install it individually via `$ pip install matplotlib`, or "
            "along with textacy via `pip install textacy[viz]`.")
    n_rows, n_cols = values_mat.shape
    max_val = np.max(values_mat)

    if n_rows != len(row_labels):
        msg = "values_mat and row_labels dimensions don't match: {} vs. {}".format(
            n_rows, len(row_labels))
        raise ValueError(msg)
    if n_cols != len(col_labels):
        msg = "values_mat and col_labels dimensions don't match: {} vs. {}".format(
            n_cols, len(col_labels))
        raise ValueError(msg)

    if highlight_colors is None:
        highlight_colors = COLOR_PAIRS
    if highlight_cols is not None:
        if isinstance(highlight_cols, int):
            highlight_cols = (highlight_cols, )
        elif len(highlight_cols) > len(highlight_colors):
            msg = "no more than {} columns may be highlighted at once".format(
                len(highlight_colors))
            raise ValueError(msg)
        highlight_colors = {
            hc: COLOR_PAIRS[i]
            for i, hc in enumerate(highlight_cols)
        }

    _rc_params = RC_PARAMS.copy()
    if rc_params:
        _rc_params.update(rc_params)

    with plt.rc_context(RC_PARAMS):
        fig, ax = plt.subplots(figsize=(pow(n_cols, 0.8), pow(n_rows, 0.66)))

        _ = ax.set_yticks(range(n_rows))
        yticklabels = ax.set_yticklabels(row_labels, fontsize=14, color="gray")
        if highlight_cols is not None:
            for i, ticklabel in enumerate(yticklabels):
                max_tick_val = max(values_mat[i, hc] for hc in range(n_cols))
                for hc in highlight_cols:
                    if max_tick_val > 0 and values_mat[i, hc] == max_tick_val:
                        ticklabel.set_color(highlight_colors[hc][1])

        ax.get_xaxis().set_ticks_position("top")
        _ = ax.set_xticks(range(n_cols))
        xticklabels = ax.set_xticklabels(col_labels,
                                         fontsize=14,
                                         color="gray",
                                         rotation=-60,
                                         ha="right")

        # Create offset transform by 5 points in x direction
        dx = 10 / 72
        dy = 0
        offset = matplotlib.transforms.ScaledTranslation(
            dx, dy, fig.dpi_scale_trans)
        for label in ax.xaxis.get_majorticklabels():
            label.set_transform(label.get_transform() + offset)

        if highlight_cols is not None:
            gridlines = ax.get_xgridlines()
            for i, ticklabel in enumerate(xticklabels):
                if i in highlight_cols:
                    ticklabel.set_color(highlight_colors[i][1])
                    gridlines[i].set_color(highlight_colors[i][0])
                    gridlines[i].set_alpha(0.5)

        for col_ind in range(n_cols):
            if highlight_cols is not None and col_ind in highlight_cols:
                ax.scatter(
                    [col_ind for _ in range(n_rows)],
                    [i for i in range(n_rows)],
                    s=600 * (values_mat[:, col_ind] / max_val),
                    alpha=0.5,
                    linewidth=1,
                    color=highlight_colors[col_ind][0],
                    edgecolor=highlight_colors[col_ind][1],
                )
            else:
                ax.scatter(
                    [col_ind for _ in range(n_rows)],
                    [i for i in range(n_rows)],
                    s=600 * (values_mat[:, col_ind] / max_val),
                    alpha=0.5,
                    linewidth=1,
                    color="lightgray",
                    edgecolor="gray",
                )

            _ = ax.set_xlim(left=-1, right=n_cols)
            _ = ax.set_ylim(bottom=-1, top=n_rows)

            ax.invert_yaxis()  # otherwise, values/labels go from bottom to top

    if save:
        fig.savefig(save, bbox_inches="tight", dpi=100)

    return ax
Ejemplo n.º 38
0
def plot_mean_residual_life(
        ts: pd.Series,
        thresholds=None,
        extremes_type: str = "high",
        alpha: float = 0.95,
        figsize: tuple = (8, 5),
) -> tuple:  # pragma: no cover
    """
    Plot mean residual life for given threshold values.

    The mean residual life plot should be approximately linear above a threshold
    for which the Generalized Pareto Distribution model is valid.
    The strategy is to select the smallest (largest for extremes_type='low')
    threshold value immediately above (below for extremes_type='low')
    which the plot is approximately linear.

    Parameters
    ----------
    ts : pandas.Series
        Time series of the signal.
    thresholds : array-like, optional
        An array of thresholds for which the mean residual life plot is plotted.
        If None (default), plots mean residual life for 100 equally-spaced thresholds
        between 90th (10th if extremes_type='low') percentile
        and 10th largest (smallest if extremes_type='low') value in the series.
    extremes_type : str, optional
        high (default) - extreme high values
        low - extreme low values
    alpha : float, optional
        Confidence interval width in the range (0, 1), by default it is 0.95.
        If None, then confidence interval is not shown.
    figsize : tuple, optional
        Figure size in inches in format (width, height).
        By default it is (8, 5).

    Returns
    -------
    figure : matplotlib.figure.Figure
        Figure object.
    axes : matplotlib.axes._axes.Axes
        Axes object.

    """
    # Get default thresholds
    if thresholds is None:
        thresholds = get_default_thresholds(
            ts=ts,
            extremes_type=extremes_type,
            num=100,
        )

    # Calculate mean residual life for each threshold
    mean_residual_lives, mrl_confidence = [], []
    for threshold in thresholds:
        if extremes_type == "high":
            exceedances = ts.loc[ts > threshold] - threshold
        elif extremes_type == "low":
            exceedances = ts.loc[ts < threshold] - threshold
        else:
            raise ValueError(
                f"invalid value in '{extremes_type}' for the 'extremes_type' argument"
            )

        mean_residual_lives.append(exceedances.mean())
        if alpha is not None:
            mrl_confidence.append(
                scipy.stats.norm.interval(
                    alpha=alpha,
                    loc=exceedances.mean(),
                    scale=exceedances.std(ddof=1) / np.sqrt(len(exceedances)),
                ))

    with plt.rc_context(rc=pyextremes_rc):
        # Create figure and axes
        fig, ax = plt.subplots(figsize=figsize, dpi=96)
        ax.grid(False)

        # Plotting central estimates of mean residual life
        ax.plot(
            thresholds,
            mean_residual_lives,
            color="#F85C50",
            lw=2,
            ls="-",
            zorder=15,
        )

        # Plot confidence intervals
        if alpha is not None:
            for ci in np.transpose(mrl_confidence):
                ax.plot(thresholds,
                        ci,
                        color="#5199FF",
                        lw=1,
                        ls="--",
                        zorder=10)
            ax.fill_between(
                thresholds,
                *np.transpose(mrl_confidence),
                facecolor="#5199FF",
                edgecolor="None",
                alpha=0.25,
                zorder=5,
            )

        # Label axes
        ax.set_xlabel("Threshold")
        ax.set_ylabel("Mean excess")

        return fig, ax
Ejemplo n.º 39
0
fig, ax = plt.subplots()
dots = np.arange(10) / 100. + .03
x, y = np.meshgrid(dots, dots)
data = [x.ravel(), y.ravel()]
ax.scatter(*data, c=data[1])

################################################################################
# Sometimes choosing evenly-distributed ticks results in strange tick numbers.
# If you'd like Matplotlib to keep ticks located at round numbers, you can
# change this behavior with the following rcParams value:

print(plt.rcParams['axes.autolimit_mode'])

# Now change this value and see the results
with plt.rc_context({'axes.autolimit_mode': 'round_numbers'}):
    fig, ax = plt.subplots()
    ax.scatter(*data, c=data[1])

################################################################################
# You can also alter the margins of the axes around the data by
# with ``axes.(x,y)margin``:

with plt.rc_context({'axes.autolimit_mode': 'round_numbers',
                     'axes.xmargin': .8,
                     'axes.ymargin': .8}):
    fig, ax = plt.subplots()
    ax.scatter(*data, c=data[1])

plt.show()
Ejemplo n.º 40
0
        ax = fig.add_subplot(1,2,1).autoscale(tight=True)
#         sys.exit()
        ax=plt.subplot2grid((1,40), (0, 0), colspan=10, axisbg='white')

#         fig.subplots_adjust(wspace=0, hspace=0)

        ax1.set_title('Roary matrix\n(%d gene clusters)'%roary.shape[0])

        if options.labels:
#             changed font size to 3 for large trees.
            fsize = 3
#             fsize = 12 - 0.1*roary.shape[1]
#             if fsize < 7:
#                 fsize = 7
            with plt.rc_context({'font.size': fsize}):
                Phylo.draw(t, axes=ax,
                           show_confidence=False,
#                            label_func=lambda x: str(x)[:10],
                           xticks=([],), yticks=([],),
                           ylabel=('',), xlabel=('',),
                           xlim=(-mdist*0.1,mdist+mdist*0.45-mdist*roary.shape[1]*0.001),
                           axis=('on',),
                           title=('Tree\n(%d strains)'%roary.shape[1],), 
                           do_show=False,
                          )
        else:
            Phylo.draw(t, axes=ax, 
                       show_confidence=False,
                       label_func=lambda x: None,
                       xticks=([],), yticks=([],),
Ejemplo n.º 41
0
            aim -= int(line.replace('up ', ''))

        yield x, y, aim


# plt.figure()
# plt.plot(DEPTH)
# x, y, a = map(np.asarray, zip(*plot_posv2()))
# dx, dy = map(np.asarray, zip(*plot_pos()))
# # plt.plot(x, y)
# plt.plot(dx, dy)
# plt.show()

n_frames = len(INPUT.splitlines())

with plt.rc_context({'axes.edgecolor': '#ffff66', 'xtick.color': '#ffff66', 'ytick.color': '#ffff66'}):
    pos = plot_posv2()
    fig, ax = plt.subplots(facecolor="#0f0f23")
    ax.set_facecolor("#0f0f23")
    ax.tick_params(color="#ffff66", which='both')
    ax.xaxis.label.set_color("#ffff66")
    ax.yaxis.label.set_color("#ffff66")

    xdata, ydata = [], []
    depthx, depthy = [], []
    ln, = plt.plot([], [], color="#ffff66")
    ln2, = plt.plot([], [], color="#ff4c4c")
    arrow = plt.arrow(0, 0, 1, 0, color="#ffff66")
    ax.set_xlim(0, 2007)
    ax.set_ylim(0, 668080)
    ax.invert_yaxis()
Ejemplo n.º 42
0
def newton_multi(request):
    x, y, z, w = sympy.symbols('x y z w')

    valores = request.POST
    n = len(valores)

    plt.rcParams.update(plt.rcParamsDefault)
    plt.close('all')

    if n == 5:
        iteraciones = 10

        resul = {'titulos': ['n', 'Vector Solución', 'f₁(x, y), f₂(x, y)'], 'filas': []}
        f1 = sympy.sympify(valores['f1'])
        x0 = float(valores['x0'])

        f2 = sympy.sympify(valores['f2'])
        y0 = float(valores['y0'])

        # derivadas parciales
        f1x = sympy.diff(f1, x)
        f1y = sympy.diff(f1, y)

        f2x = sympy.diff(f2, x)
        f2y = sympy.diff(f2, y)

        # vector de las funciones iniciales
        v = sympy.Matrix([[f1], [f2]])

        # inversa,de la jacobiana
        j_inv = (sympy.Matrix([[f1x, f1y],
                               [f2x, f2y]])) ** -1

        # lamdify de las matrices
        jaco = sympy.lambdify([x, y], j_inv, 'numpy')
        fxfy = sympy.lambdify([x, y], v, 'numpy')

        solucion = np.array([[x0], [y0]])

        for n in range(1, iteraciones + 1):
            # Dandole formato a los valores
            sol = fxfy(solucion[0][0], solucion[1][0])
            xs = f'{solucion[0][0]:.6f}'
            ys = f'{solucion[1][0]:.6f}'
            fxn = f'{float(sol[0]):.6f}'
            fyn = f'{float(sol[1]):.6f}'

            resul['filas'].append([n, xs + ' | ' + ys, fxn + ' | ' + fyn])

            j = jaco(solucion[0][0], solucion[1][0]).dot(fxfy(solucion[0][0], solucion[1][0]))
            solucion = solucion - j

        context = {'context': resul}
        context["jaco"] = sympy.latex(sympy.Matrix([[f1x, f1y],
                                                    [f2x, f2y]])).replace("\left[", ' ').strip("\\right]").replace(
            "matrix", "bmatrix")
        # graficación
        plt.rc_context({'axes.edgecolor': 'w', 'xtick.color': 'w', 'ytick.color': 'w'})
        # plt.style.use("dark_background")

        xs = solucion[0][0]  # x solucion
        ys = solucion[1][0]  # y solucion

        titulo = '\n' + estiliza_string(valores['f1']) + ', ' + estiliza_string(valores['f2']) + '\n'

        plt.rc_context({'axes.edgecolor': 'gray', 'xtick.color': 'gray', 'ytick.color': 'gray'})
        p = plot3d(f1, f2, (x, xs - 3, xs + 3), (y, ys - 3, ys + 3), title=titulo, nb_of_points_x=35, nb_of_points_y=35,
                   xlabel='X', ylabel='Y')
        buf = BytesIO()
        p._backend.fig.savefig(buf, format='jpg', quality=90, bbox_inches='tight', facecolor="#f3f2f1", dpi=150,
                               transparent=True)
        buf.seek(0)
        uri = 'data:image/png;base64,' + parse.quote(b64encode(buf.read()))
        context['image'] = uri
        p._backend.close()


    elif n == 7:
        iteraciones = 15

        resul = {'titulos': ['n', 'Vector Solución', 'f₁ | f₂ | f₃'], 'filas': []}
        f1 = sympy.sympify(valores['f1'])
        x0 = float(valores['x0'])

        f2 = sympy.sympify(valores['f2'])
        y0 = float(valores['y0'])

        f3 = sympy.sympify(valores['f3'])
        z0 = float(valores['z0'])

        # derivadas parciales
        f1x = sympy.diff(f1, x)
        f1y = sympy.diff(f1, y)
        f1z = sympy.diff(f1, z)

        f2x = sympy.diff(f2, x)
        f2y = sympy.diff(f2, y)
        f2z = sympy.diff(f2, z)

        f3x = sympy.diff(f3, x)
        f3y = sympy.diff(f3, y)
        f3z = sympy.diff(f3, z)

        # funciones iniciales
        v = sympy.Matrix([[f1], [f2], [f3]])

        # inversa jacobiana

        j_inv = (sympy.Matrix([[f1x, f1y, f1z],
                               [f2x, f2y, f2z],
                               [f3x, f3y, f3z]])) ** -1

        jaco = sympy.lambdify([x, y, z], j_inv, "numpy")
        fxfyfz = sympy.lambdify([x, y, z], v, "numpy")

        solucion = np.array([[x0], [y0], [z0]])

        for n in range(1, iteraciones + 1):
            # Dandole formato a los valores
            sol = fxfyfz(solucion[0][0], solucion[1][0], solucion[2][0])
            xs = f'{solucion[0][0]:.4f}'
            ys = f'{solucion[1][0]:.4f}'
            zs = f'{solucion[2][0]:.4f}'

            fxn = f'{float(sol[0]):.4f}'
            fyn = f'{float(sol[1]):.4f}'
            fzn = f'{float(sol[2]):.4f}'

            resul['filas'].append([n, xs + ' | ' + ys + ' | ' + zs, fxn + ' | ' + fyn + ' | ' + fzn])

            j = jaco(solucion[0][0], solucion[1][0], solucion[2][0]).dot(
                fxfyfz(solucion[0][0], solucion[1][0], solucion[2][0]))
            solucion -= j

        context = {'context': resul}
        context["jaco"] = sympy.latex(sympy.Matrix([[f1x, f1y, f1z],
                                                    [f2x, f2y, f2z],
                                                    [f3x, f3y, f3z]])).replace("\left[", ' ').strip("\\right]").replace(
            "matrix", "bmatrix")

    context["inv"] = sympy.latex(j_inv).replace("\left[", ' ').strip("\\right]").replace("matrix", "bmatrix")
    context["mat"] = sympy.latex(v).replace("\left[", ' ').strip("\\right]").replace("matrix", "bmatrix")
    return render(request, "newton_calculado_multi.html", context)
Ejemplo n.º 43
0
def monitor(pid, logfile=None, plot=None, duration=None, interval=None,
            include_children=False):

    # We import psutil here so that the module can be imported even if psutil
    # is not present (for example if accessing the version)
    import psutil

    pr = psutil.Process(pid)

    # Record start time
    start_time = time.time()

    if logfile:
        f = open(logfile, 'w')
        f.write("# {0:12s} {1:12s} {2:12s} {3:12s}\n".format(
            'Elapsed time'.center(12),
            'CPU (%)'.center(12),
            'Real (MB)'.center(12),
            'Virtual (MB)'.center(12))
        )

    log = {}
    log['times'] = []
    log['cpu'] = []
    log['mem_real'] = []
    log['mem_virtual'] = []

    try:

        # Start main event loop
        while True:

            # Find current time
            current_time = time.time()

            try:
                pr_status = pr.status()
            except TypeError:  # psutil < 2.0
                pr_status = pr.status
            except psutil.NoSuchProcess:  # pragma: no cover
                break

            # Check if process status indicates we should exit
            if pr_status in [psutil.STATUS_ZOMBIE, psutil.STATUS_DEAD]:
                print("Process finished ({0:.2f} seconds)"
                      .format(current_time - start_time))
                break

            # Check if we have reached the maximum time
            if duration is not None and current_time - start_time > duration:
                break

            # Get current CPU and memory
            try:
                current_cpu = get_percent(pr)
                current_mem = get_memory(pr)
            except Exception:
                break
            current_mem_real = current_mem.rss / 1024. ** 2
            current_mem_virtual = current_mem.vms / 1024. ** 2

            # Get information for children
            if include_children:
                for child in all_children(pr):
                    try:
                        current_cpu += get_percent(child)
                        current_mem = get_memory(child)
                    except Exception:
                        continue
                    current_mem_real += current_mem.rss / 1024. ** 2
                    current_mem_virtual += current_mem.vms / 1024. ** 2

            if logfile:
                f.write("{0:12.3f} {1:12.3f} {2:12.3f} {3:12.3f}\n".format(
                    current_time - start_time,
                    current_cpu,
                    current_mem_real,
                    current_mem_virtual))
                f.flush()

            if interval is not None:
                time.sleep(interval)

            # If plotting, record the values
            if plot:
                log['times'].append(current_time - start_time)
                log['cpu'].append(current_cpu)
                log['mem_real'].append(current_mem_real)
                log['mem_virtual'].append(current_mem_virtual)

    except KeyboardInterrupt:  # pragma: no cover
        pass

    if logfile:
        f.close()

    if plot:

        # Use non-interactive backend, to enable operation on headless machines
        import matplotlib.pyplot as plt
        with plt.rc_context({'backend': 'Agg'}):

            fig = plt.figure()
            ax = fig.add_subplot(1, 1, 1)

            ax.plot(log['times'], log['cpu'], '-', lw=1, color='r')

            ax.set_ylabel('CPU (%)', color='r')
            ax.set_xlabel('time (s)')
            ax.set_ylim(0., max(log['cpu']) * 1.2)

            ax2 = ax.twinx()

            ax2.plot(log['times'], log['mem_real'], '-', lw=1, color='b')
            ax2.set_ylim(0., max(log['mem_real']) * 1.2)

            ax2.set_ylabel('Real Memory (MB)', color='b')

            ax.grid()

            fig.savefig(plot)
Ejemplo n.º 44
0
def plot_ruiplot(zvals,
                 i,
                 inputs,
                 labels,
                 outputs,
                 width=25,
                 ax=None,
                 styles=RUI_STYLES):
    ## mds    print('zvals = ',zvals)
    ## mds    print(' i    =  ',i)
    ## mds    print('  inputs = ', inputs)
    ## mds    print('  labels = ', labels)
    ## mds    print('  outputs = ', outputs)
    x_bins = np.round(zvals[i - width:i + width] - 0.05, 2)
    ## mds    print('x_bins = ',x_bins)
    y_kernel = inputs.squeeze()[i - width:i + width] * 2500
    ## mds    print('y_kernel = ', y_kernel)
    y_target = labels.squeeze()[i - width:i + width]
    ## mds    print('y_target = ', y_target)
    y_predicted = outputs.squeeze()[i - width:i + width]
    ## mds    print('y_predicted = ', y_predicted)

    with plt.rc_context(mystyle):
        if ax is None:
            fig, ax = plt.subplots(figsize=(12, 7))

        ax.xaxis.set_major_formatter(FormatStrFormatter("%.2f"))
        ax.set_xlim(zvals[i - width] - 0.05, zvals[i + width] - 0.05)
        ax.set_xlabel("z values [mm]")
        ax.bar(x_bins,
               y_kernel,
               width=0.1,
               **styles["kernel"],
               label="Kernel Density")
        ax.legend(loc="upper left")

        ax.set_ylim(0, max(y_kernel) * 1.2)

        ax.set_ylabel("Kernel Density", color=get_color(styles["kernel"]))

        ax_prob = ax.twinx()
        p1 = ax_prob.bar(x_bins,
                         y_target,
                         width=0.1,
                         **styles["target"],
                         label="Target")
        p2 = ax_prob.bar(x_bins,
                         y_predicted,
                         width=0.1,
                         **styles["predicted"],
                         label="Predicted")

        #ax_prob.set_ylim(0, max(0.8, 1.2 * max(y_predicted)))
        ax_prob.set_ylim(0, max(1.5,
                                1.2 * max(max(y_predicted), max(y_target))))
        ax_prob.set_ylabel("Probability", color=get_color(styles["predicted"]))
        if np.any(np.isnan(labels)):
            grey_y = np.isnan(y_target) * 0.2
            ax_prob.bar(x_bins,
                        grey_y,
                        width=0.1,
                        **styles["masked"],
                        label="Masked")

        ax_prob.legend(loc="upper right")

    return ax, ax_prob, y_kernel
        'c': costs[low_:high_, 0].reshape(-1),
        'brake': costs[low_:high_, -1].reshape(-1),
        'lane': costs[low_:high_, 3].reshape(-1),
    }
    
    episodes.append(new_episode)

discounted_costs = np.array([[discounted_sum(x['c'],.95),discounted_sum(x['brake'],.95),discounted_sum(x['lane'],.95)]  for x in episodes])
data = dd.io.load('car_policy_improvement.h5')
DQN = [-39.61397106365249, 7.703194041056963, 115.62071639160499]
LSPI = pd.read_csv('lspi_results.csv')
plt.rc('text', usetex=True)


lines, fill_betweens= [], []
plt.rc_context({'axes.edgecolor':'k'})
fig, ax1 = plt.subplots()
ax1.grid(alpha=.35)
max_iterations = 27
iterations = range(len(data['g_eval'][0][:max_iterations]))
colors = color_gen()
constraint_names = ['Braking', 'Center of Lane']
constraint_upper_bound = [5.8, 85.]#[1.5, 5.]
locations = ['lower left', 'lower center', 'lower right']
fontsize = 16
legend_fontsize = 16
legend_title_fontsize = 16
major_tick_mark_size = 14


def derandomize(data, constraints, min_iteration):
Ejemplo n.º 46
0
def clustermap(df, Zx=None, Zy=None, aw=3, ah=3, lw=1, vmin=None, vmax=None, cmap=plt.cm.Blues,
               origin='lower', dendrogram_pos='top', ylabel_pos='left',
               cohort_s=None, cohort_colors=None, #cohort_labels=None,
               fontsize=10, clabel='', cfontsize=10, label_colors=None, colorbar_orientation='vertical',
               method='average', metric='euclidean', optimal_ordering=False, value_labels=False,
               rotation=-45, ha='left', va='top', tri=False, rasterized=False,
               dl=1, dr=1, dt=0.2, lh=0.1, ls=0.01,
               db=1.5, dd=0.4, ds=0.03, ch=1, cw=0.175, dc=0.1, dtc=0):

    if cohort_s is not None:
        if isinstance(cohort_s, pd.Series):
            cohort_s = [cohort_s]
            # cohort_labels = [cohort_labels]
        n = len(cohort_s)
        if cohort_colors is None:
            cohort_colors = []
            for k in range(n):
                nc = len(np.unique(cohort_s[k]))
                cohort_colors.append({i:j for i,j in zip(np.unique(cohort_s[k]), plt.cm.get_cmap('Spectral_r', nc)(np.arange(nc)))})
    else:
        n = 0

    if Zx is None:
        Zy = hierarchy.linkage(df,   method=method, metric=metric, optimal_ordering=optimal_ordering)
        Zx = hierarchy.linkage(df.T, method=method, metric=metric, optimal_ordering=optimal_ordering)
    elif Zy is None:
        Zy = Zx

    fw = dl+aw+dr
    fh = db+ah+ds+dd+dt+n*(lh+ls)
    fig = plt.figure(figsize=(fw,fh))
    if dendrogram_pos=='top':
        ax = fig.add_axes([dl/fw, db/fh, aw/fw, ah/fh])
        lax = []
        for k in range(n):
            lax.append(
                fig.add_axes([dl/fw, (db+ah+(k+1)*ls+k*lh)/fh, aw/fw, lh/fh], sharex=ax)
            )
        dax = fig.add_axes([dl/fw,         (db+ah+n*(ls+lh)+ds)/fh, aw/fw, dd/fh])
        cax = fig.add_axes([(dl+aw+dc)/fw, (db+ah-ch-dtc)/fh, cw/fw, ch/fh])
        axes = [ax, *lax, dax, cax]
    else:
        dax = fig.add_axes([dl/fw, db/fh, aw/fw, dd/fh])
        ax =  fig.add_axes([dl/fw, (db+dd+ds)/fh, aw/fw, ah/fh])
        cax = fig.add_axes([(dl+aw+dc)/fw, (db+dd+ds)/fh, cw/fw, ch/fh])
        axes = [ax, dax, cax]

    if Zx is not None:
        with plt.rc_context({'lines.linewidth': lw}):
            z = hierarchy.dendrogram(Zx, ax=dax,  orientation='top', link_color_func=lambda k: 'k')
        ix = df.columns[hierarchy.leaves_list(Zx)]
        iy = df.index[hierarchy.leaves_list(Zy)]
    else:
        ix = df.columns
    dax.axis('off')

    if dendrogram_pos=='bottom':
        dax.invert_yaxis()

    df = df.loc[iy, ix].copy()
    if tri:
        if dendrogram_pos=='top':
            df.values[np.triu_indices(df.shape[0])] = np.NaN
        elif dendrogram_pos=='bottom':
            df.values[np.tril_indices(df.shape[0])] = np.NaN


    if value_labels:
        irange = np.arange(df.shape[0])
        jrange = np.arange(df.shape[1])
        for i in irange:
            for j in jrange:
                if not np.isnan(df.values[j,i]):
                    ax.text(i, j, f'{df.values[j,i]:.2f}', ha='center', va='center')

    h = ax.imshow(df, origin=origin, cmap=cmap, vmin=vmin, vmax=vmax, rasterized=rasterized, aspect='auto')
    ax.set_xticks(np.arange(df.shape[1]))
    ax.set_yticks(np.arange(df.shape[0]))
    ax.set_xticklabels(ix, rotation=rotation, fontsize=fontsize, ha=ha, va=va)
    ax.set_yticklabels(iy, fontsize=fontsize)

    # plot cohort labels
    for k in range(n):
        cohort_index_s = cohort_s[k].map({j:i for i,j in enumerate(cohort_s[k].unique())})
        cmap2 = colors.ListedColormap([cohort_colors[k][j] for j in cohort_s[k].unique()], 'indexed')
        lax[k].imshow(cohort_index_s[ix].values.reshape(1,-1), aspect='auto', origin='lower', cmap=cmap2)
        # if cluster_labels is not None:
        if ylabel_pos == 'left':
            lax[k].set_ylabel(cohort_s[k].name, fontsize=10, rotation=0, va='center', ha='right')
        elif ylabel_pos == 'right':
            lax[k].yaxis.set_label_position(ylabel_pos)
            lax[k].set_ylabel(cohort_s[k].name, fontsize=10, rotation=0, va='center', ha='left')
        for i in lax[k].spines:
            lax[k].spines[i].set_visible(False)
        lax[k].set_xticks([])
        lax[k].set_yticks([])

    if dendrogram_pos=='bottom':
        ax.yaxis.tick_right()
    # else:
    #     ax.xaxis.tick_top()

    if label_colors is not None:  # plot label dots at bottom
        s = 1.015
        # xlim = ax.get_xlim()
        # b = xlim[1] - s*np.diff(xlim)
        # ax.set_xlim(xlim)
        # ax.scatter([b]*df.shape[1], np.arange(df.shape[1]), s=48, c=label_colors[hierarchy.leaves_list(Zx)], clip_on=False)
        # ax.tick_params(axis='y', pad=12)

        # s = 1.02
        # ylim = ax.get_ylim()
        # b = ylim[1] - s*np.diff(ylim)
        # ax.set_ylim(ylim)
        # ax.scatter(np.arange(df.shape[1]), [b]*df.shape[1], s=36, c=label_colors[hierarchy.leaves_list(Zx)], clip_on=False)
        # ax.tick_params(axis='x', pad=12)

    cbar = plt.colorbar(h, cax=cax, orientation=colorbar_orientation)
    cax.locator_params(nbins=4)

    cbar.set_label(clabel, fontsize=cfontsize+2)
    cax.tick_params(labelsize=cfontsize)

    for i in ['left', 'top', 'right', 'bottom']:
        ax.spines[i].set_visible(False)
    ax.tick_params(length=0)

    plt.sca(ax)
    return axes
Ejemplo n.º 47
0
})

# %% [markdown]
# > Use pcolormesh to create maps of wind speed, U and V at 500, 6000 and 15000 geopotential meters
#
# xarray `FacetGrid` can easily create a figure with plots at different levels. But not for different variables.

# %%
levs = [500, 6000, 15000]

# colorbar overlays the plots if autolayout is used
# still can't see the colorbar label
# could set `add_colorbar` to False and add it manually in its own ax
# or pass `cbar_ax`...
# many options here: http://xarray.pydata.org/en/stable/generated/xarray.DataArray.plot.pcolormesh.html
with plt.rc_context({"figure.autolayout": False}):
    ds.uh.sel(hgt=levs).plot.pcolormesh(row="hgt",
                                        x="x",
                                        y="y",
                                        size=2.9,
                                        aspect=1.3,
                                        cbar_kwargs=dict(shrink=0.5,
                                                         fraction=0.2))
    # note passing axes not supported with facted plots

# %%
# This is a hacky way to do this, probably there are easier ways, maybe with `mpl_toolkits.axes_grid1`
width_ratios = []
for _ in range(3):
    width_ratios.extend(
        [1, 0.05, 0.11,
Ejemplo n.º 48
0
def plot_td_waveform_resp(
    params1, params2, ant=True, outPath='td-detector-res.pdf', resi=True, norm=True, xmin=None, xmax=None):
    """
    Generate time domain plot of gw detector response

    This function will produce a two column, two row plot of a two gw detector
    responses based on a numerical relativity waveform. In each column the LHO
    and LLO detector responses are plotted for the two cases. The individual
    polarizations of the strain are also plotted as background.

    This figure is designed to be ploted to compare polarizations and so the
    titles of the columns display the polarizations, however there is no
    restriction on the two parameter objects.

    Parameters
    ----------
    params1: object
        The parameters of the waveform to plot in the left column. The fields of
        this object correspond to the kwargs of the
        `pycbc.waveform.get_td_waveform()` method and the positional arguments
        of `pycbc.detector.Detector.antenna_pattern()`. For the later the fields
        should be supplied as `params.ra`, `.dec`, `.polarization` and
        `.geocentric_end_time`.
    params2: object
        The parameters of the waveform to plot in the right column. The fields
        of this object correspond to the kwargs of the
        `pycbc.waveform.get_td_waveform()` method and the positional arguments
        of `pycbc.detector.Detector.antenna_pattern()`. For the later the fields
        should be supplied as `params.ra`, `.dec`, `.polarization` and
        `.geocentric_end_time`.
    ant: boolean, optional
        If True, plot Fp*hp and Fc*hc in the background. If False plot hp and
        hc in the background.
    outPath: string, optional
        Image file path to which the plot can be written.
    resi: boolean, optional
        Plot the residuals of the two waveforms.
    xmin: float, optional
        Minimum x-axis limit.
    xmax: float, optional
        Maximum x-axis limit.
    """

    # Custom configuration
    with plt.rc_context(dict({},**{
        'legend.fontsize':10,
        'axes.labelsize':11,
        'font.family':'serif',
        'font.size':11,
        'xtick.labelsize':11,
        'ytick.labelsize':11,
        'figure.figsize':(16,10),
        'savefig.dpi':80,
        'figure.subplot.bottom': 0.06,
        'figure.subplot.left': 0.06,
        'figure.subplot.right': 0.975,
        'figure.subplot.top': 0.975,
        'axes.unicode_minus': False
    })):

        # Prepare figure
        fig       = plt.figure()
        gs_strain = plt.GridSpec(10, 2, hspace=0, wspace=0)

        # Add axes
        ax1H1 = fig.add_subplot(gs_strain[0:5,0:2])
        ax1L1 = fig.add_subplot(gs_strain[5:10,0:2],sharex=ax1H1,sharey=ax1H1)

        # Generate data
        hp1, hc1, ps1 = get_td_waveform_resp(params1)
        hp2, hc2, ps2 = get_td_waveform_resp(params2)

        colors1 = [tableau20[0],tableau20[1]]
        colors2 = [tableau20[6],tableau20[7]]

        if resi:

            # Plot H1 response
            hResp1H1 = -1*(ps1['H1'].f_plus*hp1+ps1['H1'].f_cross*hc1)
            hResp2H1 = -1*(ps2['H1'].f_plus*hp2+ps2['H1'].f_cross*hc2)
            t0       = np.max((hp1.sample_times[0],hp2.sample_times[0]))
            t1       = np.min((hp1.sample_times[-1],hp2.sample_times[-1]))
            t        = np.linspace(t0,t1,len(hp1))
            hResp1H1 = IUS(hp1.sample_times,hResp1H1,k=5)
            hResp2H1 = IUS(hp2.sample_times,hResp2H1,k=5)
            hRespH1  = hResp2H1(t)-hResp1H1(t)
            hRespH1  /= hResp1H1(t)/100.0 if norm else 1
            ax1H1.plot(
                t,
                hRespH1 if not norm else np.abs(hRespH1),
                color=colors1[0],
                lw=1.5)

            # Plot L1 response
            hResp1L1 = -1*(ps1['L1'].f_plus*hp1+ps1['L1'].f_cross*hc1)
            hResp2L1 = -1*(ps2['L1'].f_plus*hp2+ps2['L1'].f_cross*hc2)
            t0       = np.max((hp1.sample_times[0],hp2.sample_times[0]))
            t1       = np.min((hp1.sample_times[-1],hp2.sample_times[-1]))
            t        = np.linspace(t0,t1,len(hp1))
            hResp1L1 = IUS(hp1.sample_times,hResp1L1,k=5)
            hResp2L1 = IUS(hp2.sample_times,hResp2L1,k=5)
            hRespL1  = hResp2L1(t)-hResp1L1(t)
            hResp1L1 = hResp1L1(t)
            mask     = hResp1L1 == 0
            hResp1L1[mask] = 1
            hRespL1  /= hResp1L1/100.0 if norm else 1
            ax1L1.plot(
                t,
                hRespL1 if not norm else np.abs(hRespL1),
                color=colors1[0],
                lw=1.5)

            # Prepare H1 gridlines
            ax1H1.grid(alpha=0.3)
            ax1H1.xaxis.set_minor_locator(AutoMinorLocator(2))
            ax1H1.grid(False)

            # Prepare L1 gridlines
            ax1L1.grid(alpha=0.3)
            ax1L1.xaxis.set_minor_locator(AutoMinorLocator(2))
            ax1L1.grid(False)

            # Prepare strain range
            ymax = np.max((np.mean(np.abs(hRespH1)),np.mean(np.abs(hRespH1))))*2 if norm else np.max((np.abs(hRespH1).max(),np.abs(hRespL1).max()))

            ymax *= 1.25
            ax1H1.set_ylim(0 if norm else -ymax,ymax)

        else:

            # Plot left and right columns
            columns = (
                (ax1H1,ax1L1,hp1,hc1,ps1,params1,colors1),
                (ax1H1,ax1L1,hp2,hc2,ps2,params2,colors2))

            for axH1,axL1,hp,hc,ps,params,colors in columns:

                # Plot h+
                axH1.plot(
                    hp.get_sample_times(),
                    ps['H1'].f_plus*hp if ant else hp,
                    color=colors[1],
                    ls='-',
                    label='$F_+h_+$' if ant else '$h_+$')

                # Plot hx
                axH1.plot(
                    hc.get_sample_times(),
                    ps['H1'].f_cross*hc if ant else hc,
                    color=colors[1],
                    ls='--',dashes=(2,2),
                    label='$F_{\\times}h_{\\times}$' if ant else '$h_{\\times}$')

                # Plot H1 response
                axH1.plot(
                    hc.get_sample_times(),
                    -1*(ps['H1'].f_plus*hp+ps['H1'].f_cross*hc),
                    color=colors[0],
                    label='$\\psi={0:.0f}^{{\circ}}$'.format((180.0/np.pi)*params.polarization),
                    lw=1.5)

                # Plot h+
                axL1.plot(
                    hp.get_sample_times(),
                    ps['L1'].f_plus*hp if ant else hp,
                    color=colors[1],
                    ls='-',
                    label='$F_+h_+$' if ant else '$h_+$')

                # Plot hx
                axL1.plot(
                    hc.get_sample_times(),
                    ps['L1'].f_cross*hc if ant else hc,
                    color=colors[1],
                    ls='--',dashes=(2,2),
                    label='$F_{\\times}h_{\\times}$' if ant else '$h_{\\times}$')

                # Plot L1 response
                axL1.plot(
                    hc.get_sample_times(),
                    ps['L1'].f_plus*hp+ps['L1'].f_cross*hc,
                    color=colors[0],
                    label='$\\psi={0:.0f}^{{\circ}}$'.format((180.0/np.pi)*params.polarization),
                    lw=1.5)

                # Prepare H1 gridlines
                axH1.grid(alpha=0.3)
                axH1.xaxis.set_minor_locator(AutoMinorLocator(2))
                axH1.grid(False)

                # Prepare L1 gridlines
                axL1.grid(alpha=0.3)
                axL1.xaxis.set_minor_locator(AutoMinorLocator(2))
                axL1.grid(False)

            # Prepare strain range
            if ant:
                ymax = np.max((
                    np.max(np.abs(ps1['H1'].f_plus*hp1.data)),
                    np.max(np.abs(ps1['H1'].f_cross*hc1.data)),
                    np.max(np.abs(ps1['L1'].f_plus*hp1.data)),
                    np.max(np.abs(ps1['L1'].f_cross*hc1.data)),
                    np.max(np.abs(ps2['H1'].f_plus*hp2.data)),
                    np.max(np.abs(ps2['H1'].f_cross*hc2.data)),
                    np.max(np.abs(ps2['L1'].f_plus*hp2.data)),
                    np.max(np.abs(ps2['L1'].f_cross*hc2.data))))
            else:
                ymax = np.max((
                    np.max(np.abs(hp1.data)),
                    np.max(np.abs(hc1.data)),
                    np.max(np.abs(hp2.data)),
                    np.max(np.abs(hc2.data))))

            ymax *= 1.25
            ax1H1.set_ylim(-ymax,ymax)

        # Prepare time range
        # xmin = (hp1.get_sample_times()[0]) if xmin == None else xmin
        # xmax = (hp1.get_sample_times()[-1])/5 if xmax == None else xmax
        xmin = -0.18; xmax = 0.18
        ax1H1.set_xlim(xmin,xmax)

        # Condition yticks
        tickTol = ax1H1.get_ylim()[1]
        ticks   = ax1H1.get_yticks()
        ticks   = [e for e in ticks if (np.abs(e)-(4./5.)*tickTol) < 0]
        ax1H1.set_yticks(ticks)
        ax1L1.set_yticks(ticks)

        # Prepare Strain label
        yH1      = ax1H1.get_yaxis()
        offset   = yH1.major.formatter \
            .format_data_short(ax1H1.get_yticks()[-1]).split('e')
        exponent = '0' if len(offset)==1 else offset[1].rstrip()
        ax1H1.set_ylabel(
            "Residual (%)" if norm else "Strain $\mathregular{{(10^{{{0:s}}})}}$".format(exponent),x=0,y=0)

        # Prepare Time label
        ax1L1.set_xlabel('Time (s)',x=0.5,y=0)

        # Prepare polarization labels
        titleStr = (
            '$\\iota_{{1}}={0:.2f}^{{\circ}}$\t$\\iota_{{2}}={1:.2f}^{{\circ}}$\n'
            '$\\psi_{{1}}={2:.2f}^{{\circ}}$\t$\\psi_{{2}}={3:.2f}^{{\circ}}$\n'
            '$\\phi_{{1}}={4:.2f}^{{\circ}}$\t\t$\\phi_{{2}}={5:.2f}^{{\circ}}$')
        iota1 = (180.0/np.pi)*params1.inclination
        iota2  = (180.0/np.pi)*params2.inclination
        psi1 = (180.0/np.pi)*params1.polarization
        psi2  = (180.0/np.pi)*params2.polarization
        phi1 = (180.0/np.pi)*params1.coa_phase
        phi2  = (180.0/np.pi)*params2.coa_phase

        ax1H1Pos = ax1H1.get_position()
        fig.text(
            ax1H1Pos.x1-0.01,
            ax1H1Pos.y1-0.01,
            titleStr.format(iota1,iota2,psi1,psi2,phi1,phi2),
            va="top",
            ha="right")

        # Remove offset text from yaxes
        for ax in (ax1H1,ax1L1):
            ax.get_yaxis().get_offset_text().set_visible(False)

        # Remove ticks from shared axes
        plt.setp(ax1H1.get_xticklabels(), visible=False)

        # Dump plot
        fig.savefig(outPath)

        # Kill figure
        plt.show()
        plt.close(fig)
def main():

    """Test Options"""
    FILENAME = 'scott_reef_analysis.py'
    T_SEED = sea.io.parse('-tseed', 250)
    Q_SEED = sea.io.parse('-qseed', 500)
    UNIQUE = sea.io.parse('-unique', False)
    U_SEED = sea.io.parse('-useed', 100)
    N_TRAIN = sea.io.parse('-ntrain', 200)
    N_QUERY = sea.io.parse('-nquery', 100000)
    NOTRAIN = sea.io.parse('-skiptrain', False)
    MODEL_ONLY = sea.io.parse('-model-only', False)
    LONG_SCALE_ONLY = sea.io.parse('-long-scale', False)
    BATCH_START = sea.io.parse('-batch-start', 'on')

    MISSION_LENGTH = sea.io.parse('-mission-length', 0)
    METHOD = sea.io.parse('-method', 'LMDE')
    GREEDY = sea.io.parse('-greedy', False)
    N_TRIALS = sea.io.parse('-ntrials', 200)
    START_POINT1 = sea.io.parse('-start', 375000.0, arg = 1)
    START_POINT2 = sea.io.parse('-start', 8440000.0, arg = 2)
    H_STEPS = sea.io.parse('-hsteps', 30)
    HORIZON = sea.io.parse('-horizon', 5000.0)
    CHAOS = sea.io.parse('-chaos', False)
    M_STEP = sea.io.parse('-mstep', 1)
    N_DRAWS = sea.io.parse('-ndraws', 500)
    DEPTH_PENALTY = sea.io.parse('-depth-penalty', False)
    SKIP_FEATURE_PLOT = sea.io.parse('-skip-feature-plot', False)
    TWO_COLORBAR = sea.io.parse('-two-colorbar', False)
    FIXED_TYPE = sea.io.parse('-fixed-type', '')

    FONTSIZE = 50
    FONTNAME = 'Sans Serif'
    TICKSIZE = 24
    SAVE_TRIALS = 25

    """Model Options"""
    SAVE_RESULTS = True

    approxmethod = 'laplace'
    multimethod = 'OVA'
    fusemethod = 'EXCLUSION'
    responsename = 'probit'
    batchstart = True if (BATCH_START == 'on') else False
    batchlearn = False
    walltime = 3600.0
    train = not NOTRAIN
    white_fn = pre.standardise

    n_train = N_TRAIN
    n_query = N_QUERY

    """Visualisation Options"""
    mycmap = cm.get_cmap(name = 'jet', lut = None)
    vis_fix_range = True
    vis_x_min = 360000
    vis_x_max = 390000
    vis_y_min = 8430000
    vis_y_max = 8450000
    vis_range = (vis_x_min, vis_x_max, vis_y_min, vis_y_max)
    colorcenter_analysis = 'mean'
    colorcenter_lde = colorcenter_analysis
    y_names_all = [ 'None',
                    'Under-Exposed', 
                    'Under-Exposed',
                    'Barron Sand 1',
                    'Low Density Coral 1',
                    'Sand Biota 1',
                    'Low Density Coral 2',
                    'Dense Coral 1',
                    'Dense Coral 2',
                    'Dense Coral 3',
                    'Sand Biota 2',
                    'Low Density Coral 3',
                    'Low Density Coral 4',
                    'Patch 1',
                    'Patch 2',
                    'Patch 3',
                    'Barron Sand 2',
                    'Sand Biota 3',
                    'Over-Exposed',
                    'Barron Sand 3',
                    'Under-Exposed',
                    'Under-Exposed',
                    'Sand Biota 4',
                    'Misc',
                    'Under-Exposed']

    assert len(y_names_all) == 25
               
    rcparams = {
        'backend': 'pdf',
        'axes.labelsize': TICKSIZE,
        'text.fontsize': FONTSIZE,
        'legend.fontsize': FONTSIZE,
        'xtick.labelsize': TICKSIZE,
        'ytick.labelsize': TICKSIZE,
        'text.usetex': True,
        'figure.figsize': sea.vis.fig_size(350.0)
    }

    plt.rc_context(rcparams)

    map_kwargs = {'marker': 'x', 's': 5}

    """Initialise Result Logging"""
    if SAVE_RESULTS:
        home_directory = "../../../Results/scott-reef/"
        save_directory = "t%d_q%d_ts%d_qs%d_method_%s%s%s_start%.1f%.1f_"\
        "hsteps%d_horizon%.1f/" % (N_TRAIN, N_QUERY, T_SEED, Q_SEED, 
                METHOD, '_GREEDY' if GREEDY else '', 
                ('_FTYPE_%s' % FIXED_TYPE) if FIXED_TYPE else '',
                START_POINT1, START_POINT2, H_STEPS, HORIZON)
        full_directory = gp.classifier.utils.create_directories(
            save_directory, 
            home_directory = home_directory, append_time = True)
        textfilename = '%slog.txt' % full_directory

    """Logging Options"""
    logging.basicConfig(level = logging.DEBUG,
                        format =    '%(asctime)s %(name)-12s '\
                                    '%(levelname)-8s %(message)s',
                        datefmt = '%m-%d %H:%M',
                        filename = textfilename,
                        filemode = 'a')
    gp.classifier.set_multiclass_logging_level(logging.DEBUG)

    # Define a Handler which writes INFO messages or higher to the sys.stderr
    console = logging.StreamHandler()
    console.setLevel(logging.DEBUG)

    # Set a format which is simpler for console use
    formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')

    # Tell the handler to use this format
    console.setFormatter(formatter)

    # Add the handler to the root logger
    logging.getLogger().addHandler(console)

    """Process Options"""
    test_options  = {   'T_SEED': T_SEED,
                        'Q_SEED': Q_SEED,
                        'UNIQUE': UNIQUE,
                        'U_SEED': U_SEED,
                        'N_TRAIN': N_TRAIN,
                        'N_QUERY': N_QUERY,
                        'NOTRAIN': NOTRAIN,
                        'MODEL_ONLY': MODEL_ONLY,
                        'LONG_SCALE_ONLY': LONG_SCALE_ONLY,
                        'BATCH_START': BATCH_START,
                        'MISSION_LENGTH': MISSION_LENGTH,
                        'METHOD': METHOD,
                        'GREEDY': GREEDY,
                        'N_TRIALS': N_TRIALS,
                        'START_POINT1': START_POINT1,
                        'START_POINT2': START_POINT2,
                        'H_STEPS': H_STEPS,
                        'HORIZON': HORIZON,
                        'CHAOS': CHAOS,
                        'M_STEP': M_STEP,
                        'N_DRAWS': N_DRAWS,
                        'DEPTH_PENALTY': DEPTH_PENALTY,
                        'SKIP_FEATURE_PLOT': SKIP_FEATURE_PLOT,
                        'TWO_COLORBAR': TWO_COLORBAR,
                        'FIXED_TYPE': FIXED_TYPE,}

    model_options = {   'approxmethod': approxmethod,
                        'multimethod': multimethod,
                        'fusemethod': fusemethod,
                        'responsename': responsename,
                        'batchstart': batchstart,
                        'batchlearn': batchlearn,
                        'walltime': walltime,
                        'train': train}

    logging.info(sys.argv)
    logging.info(test_options)
    logging.info(model_options)
    
    """File Locations"""
    directory_data = '../../../Data/'
    filename_training_data = 'training_data_unmerged.npz'
    filename_query_points = 'query_points.npz'
    filename_truth = directory_data + 'truthmodel_t800_q100000_ts250_qs500.npz'
    filename_start = directory_data + 'finalmodel_t200_q100000_ts250_qs500'\
        '_method_LMDE_start377500_8440000_hsteps30_horizon5000.npz'

    """Sample Training Data and Query Points"""
    if LONG_SCALE_ONLY:
        i_features = [0, 3, 4]
        feature_names = [   'Bathymetry (Depth)', 
                            'Aspect (Long Scale)',
                            'Rugosity (Long Scale)']
        kerneldef = sea.model.kerneldef3
    else:
        i_features = [0, 1, 2, 3, 4]
        feature_names = [   'Bathymetry (Depth)', 
                            'Aspect (Short Scale)',
                            'Rugosity (Short Scale)',
                            'Aspect (Long Scale)',
                            'Rugosity (Long Scale)']
        kerneldef = sea.model.kerneldef5

    X, F, y, Xq, Fq, i_train, i_query = \
        sea.io.sample(*sea.io.load(directory_data, 
            filename_training_data, filename_query_points), 
            n_train = n_train, n_query = n_query,
            t_seed = T_SEED, q_seed = Q_SEED, 
            features = i_features, unique_labels = UNIQUE, unique_seed = U_SEED)

    start_indices = np.random.choice(np.arange(Xq.shape[0]), 
                            size = 2500, replace = False)

    yq_truth = sea.io.load_ground_truth(filename_truth, 
        assert_query_seed = Q_SEED)

    y_unique = np.unique(y)
    assert y_unique.shape[0] == 17
    logging.info('There are %d unique labels' % y_unique.shape[0])

    y_names = [y_names_all[i] for i in y_unique.astype(int)]
    logging.info('Habitat Labels: {0}'.format(y_names))

    """Whiten the feature space"""
    logging.info('Applying whitening on training and query features...')
    feature_fn = sea.feature.compose(Xq, Fq, white_fn)
    Fw, white_params = white_fn(F)
    Fqw = white_fn(Fq, params = white_params)

    k_features = F.shape[1]


    logging.info('Whitening Parameters:')
    logging.info(white_params)

    if not SKIP_FEATURE_PLOT:

        """Visualise Sampled Training Locations"""
        fig = plt.figure(figsize = (19.2, 10.8))
        plt.scatter(
            X[:, 0], X[:, 1], 
            marker = 'x', c = y, 
            vmin = y_unique[0], vmax = y_unique[-1], 
            cmap = mycmap)
        sea.vis.describe_plot(title = '(a) Training Labels', 
            xlabel = 'x [Eastings (km)]', ylabel = 'y [Northings (km)]', 
            clabel = 'Habitat Labels', cticks = y_unique, cticklabels = y_names,
            vis_range = vis_range, aspect_equal = True, 
            fontsize = FONTSIZE, fontname = FONTNAME, ticksize = TICKSIZE, axis_scale = 1e3)
        if TWO_COLORBAR:
            plt.scatter(
                X[:, 0], X[:, 1], 
                marker = 'x', c = y, 
                vmin = y_unique[0], vmax = y_unique[-1], 
                cmap = mycmap)
            sea.vis.describe_plot(title = '(a) Training Labels', 
                xlabel = 'x [Eastings (km)]', ylabel = 'y [Northings (km)]', 
                clabel = 'Habitat Labels', cticks = y_unique, cticklabels = y_unique,
                vis_range = vis_range, aspect_equal = True, 
                fontsize = FONTSIZE, fontname = FONTNAME, ticksize = TICKSIZE, axis_scale = 1e3)
        fig.tight_layout()

        """Visualise Features at Sampled Query Locations"""
        letters = ['b', 'c', 'd', 'e', 'f']
        feature_labels = ['Depth', 'Aspect', 'Rugosity', 'Aspect', 'Rugosity']
        feature_units = ['$\mathrm{m}$', '$\mathrm{m}$/$\mathrm{m}$', '$\mathrm{m}^{2}$/$\mathrm{m}^{2}$', '$\mathrm{m}$/$\mathrm{m}$', '$\mathrm{m}^{2}$/$\mathrm{m}^{2}$']
        for k in range(k_features):
            fig = plt.figure(figsize = (19.2, 10.8))
            sea.vis.scatter(
                Xq[:, 0], Xq[:, 1], 
                c = Fq[:, k], colorcenter = 'mean', cmap = mycmap, **map_kwargs)
            sea.vis.describe_plot(
                title = '(%s) Feature: %s' % (letters[k], feature_names[k]), 
                xlabel = 'x [Eastings (km)]', ylabel = 'y [Northings (km)]', 
                clabel = '%s (%s)' % (feature_labels[k], feature_units[k]),
                vis_range = vis_range, aspect_equal = True, 
                fontsize = FONTSIZE, fontname = FONTNAME, ticksize = TICKSIZE, axis_scale = 1e3)
            if TWO_COLORBAR:
                sea.vis.scatter(
                    Xq[:, 0], Xq[:, 1], 
                    c = Fqw[:, k], colorcenter = 'mean', cmap = mycmap, **map_kwargs)
                sea.vis.describe_plot(
                    title = '(%s) Feature: %s' % (letters[k], feature_names[k]),
                    xlabel = 'x [Eastings (km)]', ylabel = 'y [Northings (km)]', 
                    clabel = 'Whitened %s' % feature_labels[k],
                    vis_range = vis_range, aspect_equal = True, 
                    fontsize = FONTSIZE, fontname = FONTNAME, ticksize = TICKSIZE, axis_scale = 1e3)
            fig.tight_layout()
            logging.info('Plotted feature map for: %s' % feature_names[k])

    """Classifier Training"""
    logging.info('===Begin Classifier Training===')
    logging.info('Number of training points: %d' % n_train)

    optimiser_config = gp.OptConfig()
    optimiser_config.sigma = gp.auto_range(kerneldef)
    optimiser_config.walltime = walltime

    # User can choose to batch start each binary classifier with different
    # initial hyperparameters for faster training
    if batchstart:

        if LONG_SCALE_ONLY:
            initial_hyperparams = \
                                    [    [138.04629034049745, 1.3026163180402612, 2.0609539037086777, 13.662777662608923], \
                                         [22.688174688988205, 0.85939961064783144, 3.1849018302605145, 16.259612437640932], \
                                         [4.9644266771973573, 12.863027059688255, 9.6883729945945429, 11.298947254002099], \
                                         [2.8722041026032517, 1.7551920752264887, 3.2957930714865951, 2.0261411106598501], \
                                         [1.95985751313864, 1.7015679970507851, 3.8989285686128072, 3.0227232782849662], \
                                         [4.5387717477104914, 6.1923651207093151, 2.6865876299919011, 1.2053732599769598], \
                                         [1.4406689190161075, 0.84772066855527117, 1.5917366549768182, 2.2383935939629236], \
                                         [1.5227503435926373, 2.3834299628669449, 1.8308476182313158, 1.3574417717717639], \
                                         [2.6346222397798864, 1.5157083797833799, 3.584415559552045, 3.6937042394472979], \
                                         [24.187203476683973, 2.4970022673408536, 2.7272769326695716, 5.14139220925684], \
                                         [3.1383721252068657, 3.7387409500864055, 5.4078774438507038, 2.6037751359723482], \
                                         [8.4478301876452306, 5.0320403406718492, 2.8834212079291985, 2.9415227772920427], \
                                         [3.1426495531487646, 4.0212439378901861, 0.60134852594003851, 1.7306126149977454], \
                                         [15.635374455133983, 3.2520409675251392, 0.48800515908613024, 6.194364208177519], \
                                         [79.02112152577908, 1.6997190910449294, 4.7100928164230398, 18.54561945000215], \
                                         [2.4147686613391968, 4.77983081183657, 5.6427304713913555, 3.6184518253194233], \
                                         [5.0053135736819581, 7.5224457127374018, 10.501213860336557, 14.976120894135667] ]
        else:
            # 200 training points
            initial_hyperparams = \
                                    [    [158.04660629989138, 1.2683731889725351, 66.115010215389162, 49.467620758110257, 2.1003587537731203, 148.19413243571267], \
                                         [65.998503029331715, 0.96182325466796015, 76.537506649529078, 8.7072874430795792, 2.7122005803599105, 31.811834175256053], \
                                         [7.2059309204166064, 248.71972897462479, 640.00239325817472, 168.88943619928102, 90.693076836996767, 262.04071654280534], \
                                         [6.0223358705627561, 5.7983854605769629, 7.151322371121573, 98.235863530785863, 108.97929055450719, 3.4049522081768151], \
                                         [2.8661028631425438, 202.71822125606951, 71.427958520531689, 67.841056622412466, 396.4966008975731, 8.7928316190417863], \
                                         [4.2274366302519937, 2.6787248415957619, 155.52642217518203, 12.902809348832989, 78.595731986539263, 91.408564485980548], \
                                         [1.9655515448696013, 1.0078459070165113, 47.264505099891736, 90.096628012215518, 39.449679781547545, 12.422258217851045], \
                                         [3.1222954891045123, 2.4207646445523401, 238.89962477023275, 110.82548792960249, 96.535214824647056, 13.310640119621617], \
                                         [4.9184407605671376, 2.2582428037913012, 220.5347539837725, 98.366730030395019, 51.082771850913375, 29.11755699767917], \
                                         [46.140309605544743, 3.4122114646182515, 66.780957403339869, 56.618087864345419, 3.4707008796589727, 8.5854627686410119], \
                                         [4.3654723196710217, 4.8059179997026096, 190.44003623250873, 122.8813398402322, 95.879994772087869, 3.3765033232957848], \
                                         [4.4788217098193668, 10.934823441369865, 123.33728936692876, 382.04572230065276, 227.51098327130356, 79.977071466413577], \
                                         [4.2294957554111283, 1.3726514772357896, 208.19938841666934, 174.7937853783985, 10.037269529873239, 127.83511529606474], \
                                         [27.075374606311115, 1.814069988416549, 7.7469247716502938, 170.97327917010006, 40.037475847074973, 56.043993677737269], \
                                         [83.400751642781657, 1.6747947434483126, 17.596540717504126, 112.41698370483411, 4.6137167168846629, 42.078275936624557], \
                                         [3.0901973040826474, 12.319455336644344, 70.719245134201159, 140.9221718578234, 224.77657336458043, 4.57995836705937], \
                                         [7.0310415618402358, 211.04475504456954, 202.37890056818438, 261.95210773212585, 156.94082106951919, 511.27885698486267] ]

            # 17 training points
            initial_hyperparams = \
                                    [    [195.98487106074634, 6.7545874303906324, 329.16664672945632, 31.504964572830737, 1.9724700463442919, 21.940669950105065], \
                                         [95.829408268109617, 1.7949030403189434, 35.784549991952147, 1.8075392976819478, 10.31753783934046, 27.645539145569355], \
                                         [2.4679367048563332, 275.24358705136979, 636.92711361596116, 168.68139572408469, 87.432840499884747, 258.97633592547965], \
                                         [2.3420253385025367, 22.62310001101493, 20.298926088403892, 212.56013373337302, 209.02779685614902, 15.565081046326867], \
                                         [2.9748867112646384, 200.28885691135244, 1.1710790148367796, 52.732688292649328, 350.37993600980144, 6.3245982727950123], \
                                         [4.3591136780785931, 16.744013045345845, 111.84371029836002, 2.6271052576644083, 102.14193905175958, 143.95163894287077], \
                                         [2.6009968687635978, 7.8504445946888541, 279.91377309872604, 571.13535376074287, 51.648328357923504, 57.43636247986246], \
                                         [2.4669769583966881, 31.401111683458542, 1270.673818203853, 337.26773045092648, 368.74473748301619, 75.987016939102659], \
                                         [3.192504460520428, 4.0107134770438648, 266.75067564837178, 100.4706686565055, 2.2156505927398489, 46.563261471015231], \
                                         [2.7471255292767269, 5.9252925255696667, 66.230363001377597, 29.189047727296355, 4.7635337732844629, 9.4122327461445128], \
                                         [3.7276325071593375, 14.779658802248324, 197.5161462630272, 262.24241780595895, 101.47737618311446, 2.008726905058027], \
                                         [56.268937406820683, 2.4305349403533811, 199.24414072357104, 2050.7935600821183, 254.31421742040177, 419.97964986565347], \
                                         [2.1187879592937864, 7.151426051051188, 673.33446347673043, 367.65295120249033, 18.135417989990003, 55.041101285874831], \
                                         [94.324177544164343, 9.5192423889915716, 2.4718983405006063, 126.99001654949174, 28.768268229786504, 130.52935833526666], \
                                         [2.1644252127344439, 3.7891115623661094, 25.035857452933165, 150.28115119268051, 6.1879295261351688, 63.966209357387271], \
                                         [6.6898343574716925, 1.8055110508489738, 44.732688365789286, 12.947694755847623, 71.881458838567767, 7.7704768954169774], \
                                         [2.4864586001908822, 214.54661462714421, 223.8076780527073, 248.81280629025022, 153.15645218324957, 515.19782558493819] ]

        batch_config = gp.batch_start(optimiser_config, initial_hyperparams)
        logging.info('Using Batch Start Configuration')

    else:
        batch_config = optimiser_config
    
    # Obtain the response function
    responsefunction = gp.classifier.responses.get(responsename)

    # Train the classifier!
    logging.info('Learning...')
    if batchlearn:
        previous_history = np.load(filename_start)
        learned_classifier = list(previous_history['learned_classifier'])
        white_params = previous_history['white_params']
        batch_config = \
            gp.classifier.batch_start(optimiser_config, learned_classifier)
        Fqw = white_fn(Fq, white_params)
    else:
        learned_classifier = gp.classifier.learn(Fw, y, 
            kerneldef, responsefunction, batch_config, 
            multimethod = multimethod, approxmethod = approxmethod, 
            train = train, ftol = 1e-10)

    # Print the learnt kernel with its hyperparameters
    print_function = gp.describer(kerneldef)
    gp.classifier.utils.print_learned_kernels(print_function, 
        learned_classifier, y_unique)

    # Print the matrix of learned classifier hyperparameters
    logging.info('Matrix of learned hyperparameters')
    gp.classifier.utils.print_hyperparam_matrix(learned_classifier)

    """Classifier Prediction"""

    yq_pred, yq_mie, yq_lde = sea.model.predictions(learned_classifier, Fqw,
        fusemethod = fusemethod)
    yq_esd = gp.classifier.equivalent_standard_deviation(yq_lde)
    miss_ratio = sea.model.miss_ratio(yq_pred, yq_truth)
    yq_lde_mean = yq_lde.mean()
    yq_mie_mean = yq_mie.mean()
    yq_pred_hist, _ = np.histogram(yq_pred, bins = np.arange(23), density  = True)
    logging.info('Miss Ratio: {0:.2f}%'.format(100 * miss_ratio))
    logging.info('Average Marginalised Linearised Model Differential Entropy: '\
        '{0:.2f}'.format(yq_lde_mean))
    logging.info('Average Marginalised Information Entropy: '\
        '{0:.2f}'.format(yq_mie_mean))

    """Visualise Query Prediction and Entropy"""
    fig = plt.figure(figsize = (19.2, 10.8))
    sea.vis.scatter(
        Xq[:, 0], Xq[:, 1], 
        c = yq_truth, vmin = y_unique[0], vmax = y_unique[-1], cmap = mycmap,
        **map_kwargs)
    sea.vis.describe_plot(title = 'Ground Truth Benthic Habitat Map', 
        xlabel = 'x [Eastings (km)]', ylabel = 'y [Northings (km)]', 
        clabel = 'Habitat Labels', cticks = y_unique, cticklabels = y_names,
        vis_range = vis_range, aspect_equal = True, 
        fontsize = FONTSIZE, fontname = FONTNAME, ticksize = TICKSIZE, 
        axis_scale = 1e3)
    fig.tight_layout()

    fig = plt.figure(figsize = (19.2, 10.8))
    sea.vis.scatter(
        Xq[:, 0], Xq[:, 1], 
        c = yq_pred, vmin = y_unique[0], vmax = y_unique[-1], cmap = mycmap,
        **map_kwargs)
    sea.vis.describe_plot(
        title = 'Prediction Map [Miss Ratio: {0:.2f}\%]'.format(100 * miss_ratio), 
        xlabel = 'x [Eastings (km)]', ylabel = 'y [Northings (km)]', 
        clabel = 'Habitat Labels', cticks = y_unique, cticklabels = y_names,
        vis_range = vis_range, aspect_equal = True, 
        fontsize = FONTSIZE, fontname = FONTNAME, ticksize = TICKSIZE, 
        axis_scale = 1e3)
    fig.tight_layout()

    fig = plt.figure(figsize = (19.2, 10.8))
    sea.vis.scatter(
        Xq[:, 0], Xq[:, 1], 
        c = yq_mie, cmap = cm.coolwarm, colorcenter = 'none', 
        **map_kwargs)
    sea.vis.describe_plot(title = 'Prediction Information Entropy', 
        xlabel = 'x [Eastings (km)]', ylabel = 'y [Northings (km)]', 
        clabel = 'Information Entropy',
        vis_range = vis_range, aspect_equal = True, 
        fontsize = FONTSIZE, fontname = FONTNAME, ticksize = TICKSIZE, 
        axis_scale = 1e3)
    fig.tight_layout()

    fig = plt.figure(figsize = (19.2, 10.8))
    sea.vis.scatter(
        Xq[:, 0], Xq[:, 1], 
        c = np.log(yq_mie), cmap = cm.coolwarm, colorcenter = 'none', 
        **map_kwargs)
    sea.vis.describe_plot(title = 'Log Prediction Information Entropy', 
        xlabel = 'x [Eastings (km)]', ylabel = 'y [Northings (km)]', 
        clabel = 'Information Entropy',
        vis_range = vis_range, aspect_equal = True, 
        fontsize = FONTSIZE, fontname = FONTNAME, ticksize = TICKSIZE, 
        axis_scale = 1e3)
    fig.tight_layout()

    fig = plt.figure(figsize = (19.2, 10.8))
    sea.vis.scatter(
        Xq[:, 0], Xq[:, 1], 
        c = yq_lde, cmap = cm.coolwarm, colorcenter = colorcenter_lde, 
        **map_kwargs)
    sea.vis.describe_plot(title = 'Linearised Model Differential Entropy', 
        xlabel = 'x [Eastings (km)]', ylabel = 'y [Northings (km)]', 
        clabel = 'Differential Entropy',
        vis_range = vis_range, aspect_equal = True, 
        fontsize = FONTSIZE, fontname = FONTNAME, ticksize = TICKSIZE, 
        axis_scale = 1e3)
    fig.tight_layout()

    fig = plt.figure(figsize = (19.2, 10.8))
    sea.vis.scatter(
        Xq[:, 0], Xq[:, 1], 
        c = yq_esd, cmap = cm.coolwarm, colorcenter = colorcenter_analysis, 
        **map_kwargs)
    sea.vis.describe_plot(title = 'Equivalent Standard Deviation', 
        xlabel = 'x [Eastings (km)]', ylabel = 'y [Northings (km)]', 
        clabel = 'Standard Deviation',
        vis_range = vis_range, aspect_equal = True, 
        fontsize = FONTSIZE, fontname = FONTNAME, ticksize = TICKSIZE, 
        axis_scale = 1e3)
    fig.tight_layout()
    
    # fig = plt.figure(figsize = (8.0, 6.0))
    # plt.bar()

    """Save Results"""

    if SAVE_RESULTS:
        gp.classifier.utils.save_all_figures(full_directory, 
            axis_equal = True, extension = 'eps', rcparams = rcparams)
        shutil.copy2('./%s' % FILENAME , full_directory)
        np.savez('%sinitialmodel.npz' % full_directory, 
                learned_classifier = learned_classifier,
                t_seed = T_SEED, q_seed = Q_SEED,
                n_train = n_train, n_query = n_query,
                i_train = i_train, i_query = i_query,
                yq_pred = yq_pred, yq_mie = yq_mie, yq_lde = yq_lde,
                white_params = white_params)
    if MODEL_ONLY:
        plt.show()
        return

    """Informative Seafloor Exploration: Setup"""
    xq_now = feature_fn.closest_locations(np.array([[START_POINT1, START_POINT2]]))
    horizon = HORIZON
    h_steps = H_STEPS

    if GREEDY or (METHOD == 'RANDOM') or (METHOD == 'FIXED'):
        horizon /= h_steps
        h_steps /= h_steps

    theta_bound = np.deg2rad(40)
    theta_bounds = theta_bound * np.ones(h_steps)
    theta_stack_low  = -theta_bounds
    theta_stack_high = +theta_bounds

    xtol_rel = 1e-2
    ftol_rel = 1e-3
    ctol = 1e-10

    theta_stack_init = np.deg2rad(0) * np.ones(h_steps)
    theta_stack_init[0] = np.deg2rad(180)
    theta_stack_low[0] = np.deg2rad(0)
    theta_stack_high[0] = np.deg2rad(360)
    r = horizon/h_steps
    choice_walltime = 1500.0

    k_step = 1
    m_step = 1

    bound = 100

    assert k_step == 1

    """Informative Seafloor Exploration: Initialisation"""
    # The observed data till now
    X_now = X.copy()
    y_now = y.copy()

    # Observe the current location
    i_observe = sea.feature.closest_indices(xq_now, Xq)
    yq_now = yq_truth[i_observe]

    # Add the observed data to the training set
    X_now = np.concatenate((X_now, xq_now[[-1]]), axis = 0)
    y_now = np.append(y_now, yq_now)

    # Add the new location to the array of travelled coordinates
    xq1_nows = xq_now[:, 0]
    xq2_nows = xq_now[:, 1]
    yq_nows = yq_now.copy()

    # Plot the current situation
    fig1 = plt.figure(figsize = (19.2, 10.8))
    fig2 = plt.figure(figsize = (19.2, 10.8))
    fig3 = plt.figure(figsize = (19.2, 10.8))
    fig4 = plt.figure(figsize = (19.2, 10.8))

    # Start exploring
    i_trials = 0
    n_trials = N_TRIALS
    miss_ratio_array = np.nan * np.ones(n_trials)
    yq_mie_mean_array = np.nan * np.ones(n_trials)
    yq_lde_mean_array = np.nan * np.ones(n_trials)
    entropy_opt_array = np.nan * np.ones(n_trials)
    yq_esd_mean_array = np.nan * np.ones(n_trials)

    if METHOD == 'FIXED':
        if FIXED_TYPE == 'curves':
            turns = np.random.normal(loc = 0, scale = np.deg2rad(30), size = n_trials)
        elif FIXED_TYPE == 'lines':
            turns = np.zeros(n_trials)
            n_turns = 5
            turns[(np.arange(n_turns) * n_trials / n_turns).astype(int)] = np.random.normal(loc = 0, scale = np.deg2rad(30), size = n_turns)
        elif FIXED_TYPE == 'spiral':
            if MISSION_LENGTH > 0:
                turns = np.array([np.linspace(np.deg2rad(30), np.deg2rad(0), num = MISSION_LENGTH) for i in np.arange(n_trials/MISSION_LENGTH)]).flatten()
            else:
                turns = np.linspace(np.deg2rad(30), np.deg2rad(0), num = n_trials)
        else:
            raise TypeError('No such FIXED method as %s' % METHOD)

    while i_trials < n_trials:

        if MISSION_LENGTH > 0:
            if ((i_trials + 1) % MISSION_LENGTH == 0):

                if METHOD in ['LMDE', 'MCPIE', 'AMPIE']:
                    acquisition_name = METHOD

                    xq_now = sea.explore.compute_new_starting_location(start_indices, Xq, Fqw, 
                        learned_classifier, acquisition = acquisition_name)

                else:
                    
                    xq_now = Xq[np.random.choice(start_indices, size = 1, replace = False)]


        if METHOD == 'FIXED':
            theta_stack_init[0] += turns[i_trials]
            theta_stack_init[0] = np.mod(theta_stack_init[0], 2 * np.pi)

        # Propose a path
        if m_step <= k_step:
            if METHOD == 'RANDOM':
                xq_path, theta_stack_opt, entropy_opt = \
                    sea.explore.random_path(theta_stack_init, r, xq_now[-1], 
                        learned_classifier, feature_fn, white_params, 
                        bound = bound, 
                        chaos = CHAOS)
            elif METHOD == 'FIXED':
                xq_path, theta_stack_opt, entropy_opt = \
                    sea.explore.fixed_path(theta_stack_init, r, xq_now[-1], 
                        learned_classifier, feature_fn, white_params,
                        bound = bound, 
                        current_step = i_trials, 
                        turns = turns)
            else:
                xq_path, theta_stack_opt, entropy_opt = \
                    sea.explore.optimal_path(theta_stack_init, r, xq_now[-1],
                        learned_classifier, feature_fn, white_params,
                        objective = METHOD,
                        turn_limit = theta_bound,
                        bound = bound,
                        theta_stack_low = theta_stack_low,
                        theta_stack_high = theta_stack_high,
                        walltime = choice_walltime,
                        xtol_rel = xtol_rel,
                        ftol_rel = ftol_rel,
                        ctol = ctol,
                        globalopt = False,
                        n_draws = N_DRAWS,
                        depth_penalty = DEPTH_PENALTY)
            logging.info('Optimal Joint Entropy: %.5f' % entropy_opt)

            m_step = M_STEP
            logging.info('Taking %d steps' % m_step)
        else:
            m_step -= 1
            theta_stack_opt = theta_stack_init.copy()
            xq_path = sea.explore.forward_path_model(theta_stack_init, 
                r, xq_now[-1])
            logging.info('%d steps left' % m_step)

        # Path steps into the proposed path
        xq_now = xq_path[:k_step]

        # Initialise the next path angles
        theta_stack_init = sea.explore.shift_path(theta_stack_opt, 
            k_step = k_step, theta_bounds = theta_bounds)
        np.clip(theta_stack_init, 
            theta_stack_low + 1e-4, theta_stack_high - 1e-4, 
            out = theta_stack_init)

        # Observe the current location
        i_observe = sea.feature.closest_indices(xq_now, Xq)
        yq_now = yq_truth[i_observe]

        # Add the observed data to the training set
        X_now = np.concatenate((X_now, xq_now), axis = 0)
        y_now = np.append(y_now, yq_now)

        # Add the new location to the array of travelled coordinates
        xq1_nows = np.append(xq1_nows, xq_now[:, 0])
        xq2_nows = np.append(xq2_nows, xq_now[:, 1])
        yq_nows = np.append(yq_nows, yq_now)

        # Update that into the model
        Fw_now, white_params = feature_fn(X_now)
        logging.info('Learning Classifier...')
        batch_config = \
            gp.classifier.batch_start(optimiser_config, learned_classifier)
        try:
            learned_classifier = gp.classifier.learn(Fw_now, y_now, 
                kerneldef, responsefunction, batch_config, 
                multimethod = multimethod, approxmethod = approxmethod,
                train = True, ftol = 1e-6)
        except Exception as e:
            logging.warning('Training failed: {0}'.format(e))
            try:
                learned_classifier = gp.classifier.learn(Fw_now, y_now, 
                    kerneldef, responsefunction, batch_config, 
                    multimethod = multimethod, approxmethod = approxmethod,
                    train = False, ftol = 1e-6)
            except Exception as e:
                logging.warning('Learning also failed: {0}'.format(e))
                pass    
        logging.info('Finished Learning')

        # This is the finite horizon optimal route
        fqw_path = feature_fn(xq_path, white_params)
        xq1_path = xq_path[:, 0][k_step:]
        xq2_path = xq_path[:, 1][k_step:]
        yq_path = gp.classifier.classify(gp.classifier.predict(fqw_path, 
            learned_classifier), y_unique)[k_step:]

        """ Computing Analysis Maps """
        Fqw = white_fn(Fq, white_params)

        yq_pred, yq_mie, yq_lde = \
            sea.model.predictions(learned_classifier, Fqw, 
                fusemethod = fusemethod)
        yq_esd = gp.classifier.equivalent_standard_deviation(yq_lde)
        miss_ratio = sea.model.miss_ratio(yq_pred, yq_truth)
        yq_mie_mean = yq_mie.mean()
        yq_lde_mean = yq_lde.mean()
        yq_esd_mean = yq_esd.mean()
        logging.info('Miss Ratio: {0:.2f}%'.format(100 * miss_ratio))
        logging.info('Average Marginalised Linearised Model Differential Entropy: '\
            '{0:.2f}'.format(yq_lde_mean))
        logging.info('Average Marginalised Information Entropy: '\
            '{0:.2f}'.format(yq_mie_mean))


        """ Save history """
        miss_ratio_array[i_trials] = miss_ratio
        yq_mie_mean_array[i_trials] = yq_mie_mean
        yq_lde_mean_array[i_trials] = yq_lde_mean
        yq_esd_mean_array[i_trials] = yq_esd_mean
        entropy_opt_array[i_trials] = entropy_opt

        # Find the bounds of the entropy predictions
        vmin1 = yq_lde.min()
        vmax1 = yq_lde.max()
        vmin2 = yq_mie.min()
        vmax2 = yq_mie.max()
        vmin3 = yq_esd.min()
        vmax3 = yq_esd.max()

        logging.info('Plotting...')

        if ((i_trials + 1) % MISSION_LENGTH == 0) or ((i_trials + 2) % MISSION_LENGTH == 0) or (i_trials <= 10) or (((i_trials + 1) % SAVE_TRIALS) == 0):
            SAVE_EPS = True
        else:
            SAVE_EPS = False

        """ Linearised Entropy Map """

        # Prepare Figure 1
        plt.figure(fig1.number)
        plt.clf()
        sea.vis.scatter(
            Xq[:, 0], Xq[:, 1], 
            c = yq_lde, cmap = cm.coolwarm, colorcenter = colorcenter_lde,
            **map_kwargs)
        sea.vis.describe_plot(title = 'Linearised Model Differential Entropy', 
            xlabel = 'x [Eastings (km)]', ylabel = 'y [Northings (km)]', 
            clabel = 'Differential Entropy',
            vis_range = vis_range, aspect_equal = True, 
            fontsize = FONTSIZE, fontname = FONTNAME, ticksize = TICKSIZE, 
            axis_scale = 1e3)

        # Plot the path on top
        sea.vis.scatter(xq1_nows, xq2_nows, c = yq_nows, s = 60, 
            facecolors = 'none', 
            vmin = y_unique[0], vmax = y_unique[-1], 
            cmap = mycmap)
        if MISSION_LENGTH == 0:
            sea.vis.plot(xq1_nows, xq2_nows, c = 'k', linewidth = 2)
        else:
            sea.vis.plot(xq1_nows, xq2_nows, c = 'k', linestyle = '--', linewidth = 1)
            xq1_nows_split = sea.vis.split_array(xq1_nows, MISSION_LENGTH)
            xq2_nows_split = sea.vis.split_array(xq2_nows, MISSION_LENGTH)
            [sea.vis.plot(xq1_nows_split[i], xq2_nows_split[i], c = 'k', linewidth = 2) for i in range(xq1_nows_split.shape[0])]
        sea.vis.scatter(xq_now[:, 0], xq_now[:, 1], c = yq_now, s = 120, 
            vmin = y_unique[0], vmax = y_unique[-1], 
            cmap = mycmap)

        # Plot the horizon
        gp.classifier.utils.plot_circle(xq_now[-1], horizon, c = 'k', 
            linewidth = 2, marker = '.')

        plt.gca().arrow(xq_now[-1][0], xq_now[-1][1] + r, 0, -r/4, 
            head_width = r/4, head_length = r/4, fc = 'k', ec = 'k')

        # Save the plot
        fig1.tight_layout()
        plt.gca().set_aspect('equal', adjustable = 'box')
        plt.savefig('%slde%d.png' 
            % (full_directory, i_trials + 1))
        if SAVE_EPS:
            plt.savefig('%slde%d.eps' 
                % (full_directory, i_trials + 1))

        # Plot the proposed path
        sea.vis.scatter(xq1_path, xq2_path, c = yq_path, 
            s = 60, marker = 'D', 
            vmin = y_unique[0], vmax = y_unique[-1], cmap = mycmap)
        sea.vis.plot(xq1_path, xq2_path, c = 'k', linewidth = 2)

        # Save the plot
        fig1.tight_layout()
        plt.gca().set_aspect('equal', adjustable = 'box')
        plt.savefig('%slde_propose%d.png' 
            % (full_directory, i_trials + 1))
        if SAVE_EPS:
            plt.savefig('%slde_propose%d.eps' 
                % (full_directory, i_trials + 1))

        """ True Entropy Map """

        # Prepare Figure 3
        plt.figure(fig2.number)
        plt.clf()
        sea.vis.scatter(
            Xq[:, 0], Xq[:, 1], 
            c = yq_mie, cmap = cm.coolwarm, colorcenter = colorcenter_analysis,
            **map_kwargs)
        sea.vis.describe_plot(title = 'Prediction Information Entropy', 
            xlabel = 'x [Eastings (km)]', ylabel = 'y [Northings (km)]', 
            clabel = 'Information Entropy',
            vis_range = vis_range, aspect_equal = True, 
            fontsize = FONTSIZE, fontname = FONTNAME, ticksize = TICKSIZE, 
            axis_scale = 1e3)

        # Plot the path on top
        sea.vis.scatter(xq1_nows, xq2_nows, c = yq_nows, s = 60, 
            facecolors = 'none', 
            vmin = y_unique[0], vmax = y_unique[-1], 
            cmap = mycmap)
        if MISSION_LENGTH == 0:
            sea.vis.plot(xq1_nows, xq2_nows, c = 'k', linewidth = 2)
        else:
            sea.vis.plot(xq1_nows, xq2_nows, c = 'k', linestyle = '--', linewidth = 1)
            xq1_nows_split = sea.vis.split_array(xq1_nows, MISSION_LENGTH)
            xq2_nows_split = sea.vis.split_array(xq2_nows, MISSION_LENGTH)
            [sea.vis.plot(xq1_nows_split[i], xq2_nows_split[i], c = 'k', linewidth = 2) for i in range(xq1_nows_split.shape[0])]
        sea.vis.scatter(xq_now[:, 0], xq_now[:, 1], c = yq_now, s = 120, 
            vmin = y_unique[0], vmax = y_unique[-1], 
            cmap = mycmap)

        # Plot the horizon
        gp.classifier.utils.plot_circle(xq_now[-1], horizon, c = 'k', 
            linewidth = 2, marker = '.')

        plt.gca().arrow(xq_now[-1][0], xq_now[-1][1] + r, 0, -r/4, 
            head_width = r/4, head_length = r/4, fc = 'k', ec = 'k')

        # Save the plot
        fig2.tight_layout()
        plt.gca().set_aspect('equal', adjustable = 'box')
        plt.savefig('%smie%d.png' 
            % (full_directory, i_trials + 1))
        if SAVE_EPS:
            plt.savefig('%smie%d.eps' 
                % (full_directory, i_trials + 1))

        # Plot the proposed path
        sea.vis.scatter(xq1_path, xq2_path, c = yq_path, 
            s = 60, marker = 'D', 
            vmin = y_unique[0], vmax = y_unique[-1], cmap = mycmap)
        sea.vis.plot(xq1_path, xq2_path, c = 'k', linewidth = 2)

        # Save the plot
        fig2.tight_layout()
        plt.gca().set_aspect('equal', adjustable = 'box')
        plt.savefig('%smie_propose%d.png' 
            % (full_directory, i_trials + 1))
        if SAVE_EPS:
            plt.savefig('%smie_propose%d.eps' 
                % (full_directory, i_trials + 1))

        """ Class Prediction Map """

        # Prepare Figure 4
        plt.figure(fig3.number)
        plt.clf()
        sea.vis.scatter(
            Xq[:, 0], Xq[:, 1], 
            c = yq_pred, vmin = y_unique[0], vmax = y_unique[-1], cmap = mycmap,
            **map_kwargs)
        sea.vis.describe_plot(
            title = 'Prediction Map [Miss Ratio: {0:.2f}\%]'.format(
                100 * miss_ratio), 
            xlabel = 'x [Eastings (km)]', ylabel = 'y [Northings (km)]', 
            clabel = 'Habitat Labels', cticks = y_unique, cticklabels = y_names,
            vis_range = vis_range, aspect_equal = True, 
            fontsize = FONTSIZE, fontname = FONTNAME, ticksize = TICKSIZE, 
            axis_scale = 1e3)

        # Plot the path on top
        sea.vis.scatter(xq1_nows, xq2_nows, c = yq_nows, s = 60, 
            facecolors = 'none', 
            vmin = y_unique[0], vmax = y_unique[-1], 
            cmap = mycmap)
        if MISSION_LENGTH == 0:
            sea.vis.plot(xq1_nows, xq2_nows, c = 'k', linewidth = 2)
        else:
            sea.vis.plot(xq1_nows, xq2_nows, c = 'k', linestyle = '--', linewidth = 1)
            xq1_nows_split = sea.vis.split_array(xq1_nows, MISSION_LENGTH)
            xq2_nows_split = sea.vis.split_array(xq2_nows, MISSION_LENGTH)
            [sea.vis.plot(xq1_nows_split[i], xq2_nows_split[i], c = 'k', linewidth = 2) for i in range(xq1_nows_split.shape[0])]
        sea.vis.scatter(xq_now[:, 0], xq_now[:, 1], c = yq_now, s = 120, 
            vmin = y_unique[0], vmax = y_unique[-1], 
            cmap = mycmap)

        # Plot the horizon
        gp.classifier.utils.plot_circle(xq_now[-1], horizon, c = 'k', 
            linewidth = 2, marker = '.')

        plt.gca().arrow(xq_now[-1][0], xq_now[-1][1] + r, 0, -r/4, 
            head_width = r/4, head_length = r/4, fc = 'k', ec = 'k')

        # Save the plot
        fig3.tight_layout()
        plt.gca().set_aspect('equal', adjustable = 'box')
        plt.savefig('%spred%d.png' 
            % (full_directory, i_trials + 1))
        if SAVE_EPS:
            plt.savefig('%spred%d.eps' 
                % (full_directory, i_trials + 1))


        # Plot the proposed path
        sea.vis.scatter(xq1_path, xq2_path, c = yq_path, 
            s = 60, marker = 'D', 
            vmin = y_unique[0], vmax = y_unique[-1], cmap = mycmap)
        sea.vis.plot(xq1_path, xq2_path, c = 'k', linewidth = 2)

        # Save the plot
        fig3.tight_layout()
        plt.gca().set_aspect('equal', adjustable = 'box')
        plt.savefig('%spred_propose%d.png' 
            % (full_directory, i_trials + 1))
        if SAVE_EPS:
            plt.savefig('%spred_propose%d.eps' 
                % (full_directory, i_trials + 1))

        # Prepare Figure 5
        plt.figure(fig4.number)
        plt.clf()
        fontsize = 24
        ticksize = 14

        steps_array = np.arange(i_trials + 1) + 1
        ax = plt.subplot(4, 1, 1)
        plt.plot(steps_array, 100 * miss_ratio_array[:(i_trials + 1)])
        plt.title('Percentage of Prediction Misses', fontsize = fontsize)
        plt.ylabel('Misses (\%)', fontsize = fontsize)
        ax.set_xticklabels( () )

        ax = plt.subplot(4, 1, 2)
        plt.plot(steps_array, yq_lde_mean_array[:(i_trials + 1)])
        plt.title('Average Marginalised Differential Entropy', 
            fontsize = fontsize)
        plt.ylabel('Entropy (nats)', fontsize = fontsize)
        ax.set_xticklabels( () )

        ax = plt.subplot(4, 1, 3)
        plt.plot(steps_array, yq_mie_mean_array[:(i_trials + 1)])
        plt.title('Average Marginalised Information Entropy', 
            fontsize = fontsize)
        plt.ylabel('Entropy (nats)', fontsize = fontsize)
        ax.set_xticklabels( () )

        ax = plt.subplot(4, 1, 4)
        plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)
        plt.plot(steps_array, entropy_opt_array[:(i_trials + 1)])
        plt.title('Entropy Metric of Proposed Path', fontsize = fontsize)
        plt.ylabel('Entropy (nats)', fontsize = fontsize)

        plt.xlabel('Steps', fontsize = fontsize)
        for tick in plt.gca().xaxis.get_major_ticks():
            tick.label.set_fontsize(ticksize) 
        for tick in plt.gca().yaxis.get_major_ticks():
            tick.label.set_fontsize(ticksize) 

        # Save the plot
        fig4.tight_layout()
        plt.savefig('%shistory%d.png' 
            % (full_directory, i_trials + 1))
        logging.info('Plotted and Saved Iteration')
    
        # Move on to the next step
        i_trials += 1

        np.savez('%shistory%d.npz' % (full_directory, i_trials), 
            learned_classifier = learned_classifier,
            miss_ratio_array = miss_ratio_array,
            yq_lde_mean_array = yq_lde_mean_array,
            yq_mie_mean_array = yq_mie_mean_array,
            entropy_opt_array = entropy_opt_array,
            yq_esd_mean_array = yq_esd_mean_array,
            t_seed = T_SEED, q_seed = Q_SEED,
            n_train = n_train, n_query = n_query,
            i_train = i_train, i_query = i_query,
            yq_lde = yq_lde,
            yq_mie = yq_mie,
            yq_pred = yq_pred,
            white_params = white_params,
            X_now = X_now,
            Fw_now = Fw_now,
            y_now = y_now,
            xq1_path = xq1_path,
            xq2_path = xq2_path,
            fqw_path = fqw_path,
            yq_path = yq_path,
            xq1_nows = xq1_nows,
            xq2_nows = xq2_nows,
            yq_nows = yq_nows,
            vis_range = vis_range,
            colorcenter_analysis = colorcenter_analysis,
            colorcenter_lde = colorcenter_lde,
            y_unique = y_unique,
            mycmap = mycmap,
            i_trials = i_trials,
            theta_stack_opt = theta_stack_opt,
            theta_stack_init = theta_stack_init,
            xq_path = xq_path,
            xq_now = xq_now,
            yq_now = yq_now,
            i_observe = i_observe,
            horizon = horizon,
            h_steps = h_steps,
            r = r,
            y_names = y_names,
            FONTSIZE = FONTSIZE,
            FONTNAME = FONTNAME,
            TICKSIZE = TICKSIZE,
            SAVE_TRIALS = SAVE_TRIALS)

        logging.info('White Params: {0}'.format(white_params))

    np.savez('%shistory.npz' % full_directory, 
                learned_classifier = learned_classifier,
                miss_ratio_array = miss_ratio_array,
                yq_lde_mean_array = yq_lde_mean_array,
                yq_mie_mean_array = yq_mie_mean_array,
                entropy_opt_array = entropy_opt_array,
                yq_esd_mean_array = yq_esd_mean_array,
                t_seed = T_SEED, q_seed = Q_SEED,
                n_train = n_train, n_query = n_query,
                i_train = i_train, i_query = i_query,
                yq_lde = yq_lde,
                yq_mie = yq_mie,
                yq_pred = yq_pred,
                white_params = white_params,
                X_now = X_now,
                Fw_now = Fw_now,
                y_now = y_now,
                xq1_path = xq1_path,
                xq2_path = xq2_path,
                fqw_path = fqw_path,
                yq_path = yq_path,
                xq1_nows = xq1_nows,
                xq2_nows = xq2_nows,
                yq_nows = yq_nows,
                vis_range = vis_range,
                colorcenter_analysis = colorcenter_analysis,
                colorcenter_lde = colorcenter_lde,
                y_unique = y_unique,
                mycmap = mycmap,
                i_trials = i_trials,
                theta_stack_opt = theta_stack_opt,
                theta_stack_init = theta_stack_init,
                xq_path = xq_path,
                xq_now = xq_now,
                yq_now = yq_now,
                i_observe = i_observe,
                horizon = horizon,
                h_steps = h_steps,
                r = r,
                y_names = y_names,
                FONTSIZE = FONTSIZE,
                FONTNAME = FONTNAME,
                TICKSIZE = TICKSIZE,
                SAVE_TRIALS = SAVE_TRIALS)

    plt.show()
Ejemplo n.º 50
0
        ax1.set_yticks([])
        ax1.set_xticks([])
        ax1.axis("off")

        ax = fig.add_subplot(1, 2, 1)
        ax = plt.subplot2grid((1, 40), (0, 0), colspan=10, axisbg="white")

        fig.subplots_adjust(wspace=0, hspace=0)

        ax1.set_title("Roary matrix\n(%d gene clusters)" % roary.shape[0])

        if options.labels:
            fsize = 12 - 0.1 * roary.shape[1]
            if fsize < 7:
                fsize = 7
            with plt.rc_context({"font.size": fsize}):
                Phylo.draw(
                    t,
                    axes=ax,
                    show_confidence=False,
                    label_func=lambda x: str(x)[:10],
                    xticks=([],),
                    yticks=([],),
                    ylabel=("",),
                    xlabel=("",),
                    xlim=(-mdist * 0.1, mdist + mdist * 0.45 - mdist * roary.shape[1] * 0.001),
                    axis=("off",),
                    title=("Tree\n(%d strains)" % roary.shape[1],),
                    do_show=False,
                )
        else:
Ejemplo n.º 51
0
def draw_termite_plot(values_mat, col_labels, row_labels,
                      highlight_cols=None, highlight_colors=None,
                      save=False):
    """
    Make a "termite" plot, typically used for assessing topic models with a tabular
    layout that promotes comparison of terms both within and across topics.

    Args:
        values_mat (``np.ndarray`` or matrix): matrix of values with shape
            (# row labels, # col labels) used to size the dots on the grid
        col_labels (seq[str]): labels used to identify x-axis ticks on the grid
        row_labels(seq[str]): labels used to identify y-axis ticks on the grid
        highlight_cols (int or seq[int], optional): indices for columns
            to visually highlight in the plot with contrasting colors
        highlight_colors (tuple of 2-tuples): each 2-tuple corresponds to a pair
            of (light/dark) matplotlib-friendly colors used to highlight a single
            column; if not specified (default), a good set of 6 pairs are used
        save (str, optional): give the full /path/to/fname on disk to save figure

    Returns:
        ``matplotlib.axes.Axes.axis``: axis on which termite plot is plotted

    Raises:
        ValueError: if more columns are selected for highlighting than colors
            or if any of the inputs' dimensions don't match

    References:
        .. Chuang, Jason, Christopher D. Manning, and Jeffrey Heer. "Termite:
            Visualization techniques for assessing textual topic models."
            Proceedings of the International Working Conference on Advanced
            Visual Interfaces. ACM, 2012.

    .. seealso:: :func:`TopicModel.termite_plot <textacy.tm.TopicModel.termite_plot>`
    """
    try:
        plt
    except NameError:
        raise ImportError(
            'matplotlib is not installed, so textacy.viz won\'t work; install it \
            individually, or along with textacy via `pip install textacy[viz]`')
    n_rows, n_cols = values_mat.shape
    max_val = np.max(values_mat)

    if n_rows != len(row_labels):
        msg = "values_mat and row_labels dimensions don't match: {} vs. {}".format(
            n_rows, len(row_labels))
        raise ValueError(msg)
    if n_cols != len(col_labels):
        msg = "values_mat and col_labels dimensions don't match: {} vs. {}".format(
            n_cols, len(col_labels))
        raise ValueError(msg)

    if highlight_colors is None:
        highlight_colors = COLOR_PAIRS
    if highlight_cols is not None:
        if isinstance(highlight_cols, int):
            highlight_cols = (highlight_cols,)
        elif len(highlight_cols) > len(highlight_colors):
            msg = 'no more than {} columns may be highlighted at once'.format(
                len(highlight_colors))
            raise ValueError(msg)
        highlight_colors = {hc: COLOR_PAIRS[i]
                            for i, hc in enumerate(highlight_cols)}

    with plt.rc_context(RC_PARAMS):
        fig, ax = plt.subplots(figsize=(pow(n_cols, 0.8), pow(n_rows, 0.66)))

        _ = ax.set_yticks(range(n_rows))
        yticklabels = ax.set_yticklabels(row_labels,
                                         fontsize=14, color='gray')
        if highlight_cols is not None:
            for i, ticklabel in enumerate(yticklabels):
                max_tick_val = max(values_mat[i, hc] for hc in highlight_cols)
                for hc in highlight_cols:
                    if max_tick_val > 0 and values_mat[i, hc] == max_tick_val:
                        ticklabel.set_color(highlight_colors[hc][1])

        ax.get_xaxis().set_ticks_position('top')
        _ = ax.set_xticks(range(n_cols))
        xticklabels = ax.set_xticklabels(col_labels,
                                         fontsize=14, color='gray',
                                         rotation=30, ha='left')
        if highlight_cols is not None:
            gridlines = ax.get_xgridlines()
            for i, ticklabel in enumerate(xticklabels):
                if i in highlight_cols:
                    ticklabel.set_color(highlight_colors[i][1])
                    gridlines[i].set_color(highlight_colors[i][0])
                    gridlines[i].set_alpha(0.5)

        for col_ind in range(n_cols):
            if highlight_cols is not None and col_ind in highlight_cols:
                ax.scatter([col_ind for _ in range(n_rows)],
                           [i for i in range(n_rows)],
                           s=600 * (values_mat[:, col_ind] / max_val),
                           alpha=0.5, linewidth=1,
                           color=highlight_colors[col_ind][0],
                           edgecolor=highlight_colors[col_ind][1])
            else:
                ax.scatter([col_ind for _ in range(n_rows)],
                           [i for i in range(n_rows)],
                           s=600 * (values_mat[:, col_ind] / max_val),
                           alpha=0.5, linewidth=1,
                           color='lightgray', edgecolor='gray')

            _ = ax.set_xlim(left=-1, right=n_cols)
            _ = ax.set_ylim(bottom=-1, top=n_rows)

            ax.invert_yaxis()  # otherwise, values/labels go from bottom to top

    if save:
        fig.savefig(save, bbox_inches='tight', dpi=100)

    return ax
Ejemplo n.º 52
0
def create_icon_axes(fig, ax_position, lw_bars, lw_grid, lw_border, rgrid):
    """
    Create a polar axes containing the Matplotlib radar plot.

    Parameters
    ----------
    fig : matplotlib.figure.Figure
        The figure to draw into.
    ax_position : (float, float, float, float)
        The position of the created Axes in figure coordinates as
        (x, y, width, height).
    lw_bars : float
        The linewidth of the bars.
    lw_grid : float
        The linewidth of the grid.
    lw_border : float
        The linewidth of the Axes border.
    rgrid : array-like
        Positions of the radial grid.

    Returns
    -------
    ax : matplotlib.axes.Axes
        The created Axes.
    """
    with plt.rc_context({
            'axes.edgecolor': MPL_BLUE,
            'axes.linewidth': lw_border
    }):
        ax = fig.add_axes(ax_position, projection='polar')
        ax.set_axisbelow(True)

        N = 7
        arc = 2. * np.pi
        theta = np.arange(0.0, arc, arc / N)
        radii = np.array([2, 6, 8, 7, 4, 5, 8])
        width = np.pi / 4 * np.array([0.4, 0.4, 0.6, 0.8, 0.2, 0.5, 0.3])
        bars = ax.bar(theta,
                      radii,
                      width=width,
                      bottom=0.0,
                      align='edge',
                      edgecolor='0.3',
                      lw=lw_bars)
        for r, bar in zip(radii, bars):
            color = *cm.jet(r / 10.)[:3], 0.6  # color from jet with alpha=0.6
            bar.set_facecolor(color)

        ax.tick_params(labelbottom=False,
                       labeltop=False,
                       labelleft=False,
                       labelright=False)

        ax.grid(lw=lw_grid, color='0.9')
        ax.set_rmax(9)
        ax.set_yticks(rgrid)

        # The actual visible background - extends a bit beyond the axis
        ax.add_patch(
            Rectangle((0, 0),
                      arc,
                      9.58,
                      facecolor='white',
                      zorder=0,
                      clip_on=False,
                      in_layout=False))
        return ax
def plot_data(directory, *args, ncolors = 1, descript = '', label_font_size = 24, ncol = 4):

    L = 0.0
    colors = cm.rainbow(np.linspace(0 + L, 1 - L, num = ncolors))

    fontsize = 64
    axis_tick_font_size = 30
    
    params = {
        'backend': 'ps',
        # 'axes.labelsize': 10,
        # 'text.fontsize': 10,
        # 'legend.fontsize': 10,
        # 'xtick.labelsize': 8,
        # 'ytick.labelsize': 8,
        'text.usetex': True,
        'figure.figsize': fig_size(350.0)
    }

    plt.rc_context(params)

    fig = plt.figure(figsize = (20, 15))
    ax1 = fig.add_subplot(211)
    # ax2 = fig.add_subplot(312)
    ax3 = fig.add_subplot(212)

    mission_starts = np.array([40, 80, 120, 160, 199]) * (5000.0/30.0)
    [ax1.axvline(x, color = 'k', linestyle = '--') for x in mission_starts]
    [ax3.axvline(x, color = 'k', linestyle = '--') for x in mission_starts]

    percentages = np.arange(0, 100, 10)
    [ax1.axhline(y, color = 'k', linestyle = '--') for y in percentages]
    entropies = np.arange(1.0, 2.8, 0.2)
    [ax3.axhline(y, color = 'k', linestyle = '--') for y in entropies]

    for arg in args:

        miss_ratio_array, yq_lde_mean_array, yq_mie_mean_array, info = arg

        iterations = np.arange(miss_ratio_array.shape[0]) + 1
        
        color = colors[info['index']]
        label = info['label']
        steps = info['steps']
        if 'linestyle' in info:
            linestyle = info['linestyle']
        else:
            linestyle = 'solid'

        iterations_plt = np.append(0, iterations[:steps]) * (5000.0/30.0)
        miss_ratio_plt = np.append(99.39, 100 * miss_ratio_array[:steps])
        yq_lde_plt = np.append(-0.20, yq_lde_mean_array[:steps])
        yq_mie_plt = np.append(2.56, yq_mie_mean_array[:steps])

        ax1.plot(iterations_plt, miss_ratio_plt, c = color, label = label, linewidth = 2.0, linestyle = linestyle)
        ax1.set_ylim((0, 100))
        # ax2.plot(iterations_plt, yq_lde_plt, c = color, label = label, linewidth = 2.0, linestyle = linestyle)
        ax3.plot(iterations_plt, yq_mie_plt, c = color, label = label, linewidth = 2.0, linestyle = linestyle)

    ax1.legend(bbox_to_anchor = (0., 0.0, 1., .05), loc = 3,
           ncol = ncol, borderaxespad = 0., fontsize = label_font_size)

    ax1.set_title('Percentage of Map Prediction Misses', fontsize = fontsize)
    ax1.set_ylabel('Misses (\%)', fontsize = fontsize)
    ax1.set_xticklabels( () )

    # ax2.set_title('Average Marginalised L. Model Differential Entropy', fontsize = fontsize)
    # ax2.set_ylabel('Entropy (nats)', fontsize = fontsize)
    # ax2.set_xticklabels( () )

    ax3.set_title('Avg. Marg. Prediction Information Entropy', fontsize = fontsize)
    ax3.set_ylabel('Entropy (nats)', fontsize = fontsize)
    ax3.get_xaxis().get_major_formatter().set_useOffset(False)
    ax3.set_xlabel('Distance Traveled (km)', fontsize = fontsize)

    for tick in ax1.yaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size)
    # for tick in ax2.yaxis.get_major_ticks():
    #     tick.label.set_fontsize(axis_tick_font_size)
    for tick in ax3.xaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size)
    for tick in ax3.yaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size)

    # ticks = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x/1e3))
    # ax3.xaxis.set_major_formatter(ticks)
    # ax3.yaxis.set_major_formatter(ticks)

    # Save the plot
    fig.tight_layout()
    fig.savefig('%sserial_compare_%s.eps' % (directory, descript))
Ejemplo n.º 54
0
 def decorator(*args, **kwargs):
     with plt.rc_context():
         return func(*args, **kwargs)
def main():

    FONTSIZE = 50
    FONTNAME = 'Sans Serif'
    TICKSIZE = 24

    rcparams = {
        'backend': 'pdf',
        'axes.labelsize': TICKSIZE,
        'text.fontsize': FONTSIZE,
        'legend.fontsize': FONTSIZE,
        'xtick.labelsize': TICKSIZE,
        'ytick.labelsize': TICKSIZE,
        'text.usetex': True,
    }

    plt.rc_context(rcparams)

    """
    Demostration Options
    """
    logging.basicConfig(level = logging.DEBUG)

    # If using parallel functionality, you must call this to set the appropriate
    # logging level
    gp.classifier.set_multiclass_logging_level(logging.DEBUG)

    np.random.seed(100)
    # Feature Generation Parameters and Demonstration Options
    SAVE_OUTPUTS = True # We don't want to make files everywhere for a demo.
    SHOW_RAW_BINARY = True
    test_range_min = -2.5
    test_range_max = +2.5
    test_ranges = (test_range_min, test_range_max)
    n_train = 500
    n_query = 1000
    n_dims  = 2   # <- Must be 2 for vis
    n_cores = None # number of cores for multi-class (None -> default: c-1)
    walltime = 300.0
    approxmethod = 'laplace' # 'laplace' or 'pls'
    multimethod = 'OVA' # 'AVA' or 'OVA', ignored for binary problem
    fusemethod = 'EXCLUSION' # 'MODE' or 'EXCLUSION', ignored for binary
    responsename = 'probit' # 'probit' or 'logistic'
    batch_start = False
    entropy_threshold = None

    n_draws = 6
    n_draws_est = 2500
    rows_subplot = 2
    cols_subplot = 3

    assert rows_subplot * cols_subplot >= n_draws

    # Decision boundaries
    db1 = lambda x1, x2: (((x1 - 1)**2 + x2**2/4) * 
            (0.9*(x1 + 1)**2 + x2**2/2) < 1.6) & \
            ((x1 + x2) < 1.5)
    db2 = lambda x1, x2: (((x1 - 1)**2 + x2**2/4) * 
            (0.9*(x1 + 1)**2 + x2**2/2) > 0.3)
    db3 = lambda x1, x2: ((x1 + x2) < 2) & ((x1 + x2) > -2.2)
    db4 = lambda x1, x2: ((x1 - 0.75)**2 + (x2 + 0.8)**2 > 0.3**2)
    db5 = lambda x1, x2: ((x1/2)**2 + x2**2 > 0.3)
    db6 = lambda x1, x2: (((x1)/8)**2 + (x2 + 1.5)**2 > 0.2**2)
    db7 = lambda x1, x2: (((x1)/8)**2 + ((x2 - 1.4)/1.25)**2 > 0.2**2)
    db4a = lambda x1, x2: ((x1 - 1.25)**2 + (x2 - 1.25)**2 > 0.5**2) & ((x1 - 0.75)**2 + (x2 + 1.2)**2 > 0.6**2) & ((x1 + 0.75)**2 + (x2 + 1.2)**2 > 0.3**2) & ((x1 + 1.3)**2 + (x2 - 1.3)**2 > 0.4**2)
    db5a = lambda x1, x2: ((x1/2)**2 + x2**2 > 0.3) & (x1 > 0)
    db5b = lambda x1, x2: ((x1/2)**2 + x2**2 > 0.3) & (x1 < 0) & ((x1 + 0.75)**2 + (x2 - 1.2)**2 > 0.6**2)
    db1a = lambda x1, x2: (((x1 - 1)**2 + x2**2/4) * 
            (0.9*(x1 + 1)**2 + x2**2/2) < 1.6) & \
            ((x1 + x2) < 1.6) | ((x1 + 0.75)**2 + (x2 + 1.2)**2 < 0.6**2)
    db1b = lambda x1, x2: (((x1 - 1)**2 + x2**2/4) * 
            (0.9*(x1 + 1)**2 + x2**2/2) < 1.6) & ((x1/2)**2 + (x2)**2 > 0.4**2) & \
            ((x1 + x2) < 1.5) | ((x1 + 0.75)**2 + (x2 - 1.5)**2 < 0.4**2) | ((x1 + x2) > 2.1) & (x1 < 1.8) & (x2 < 1.8) # | (((x1 + 0.25)/4)**2 + (x2 + 1.5)**2 < 0.32**2) # & (((x1 + 0.25)/4)**2 + (x2 + 1.5)**2 > 0.18**2)
    db1c = lambda x1, x2: (((x1 - 1)**2 + x2**2/4) * 
            (0.9*(x1 + 1)**2 + x2**2/2) < 1.6) & ((x1/2)**2 + (x2)**2 > 0.4**2) & \
            ((x1 + x2) < 1.5) | ((x1 + 0.75)**2 + (x2 - 1.5)**2 < 0.4**2) | ((x1 + x2) > 2.1) & (x1 < 1.8) & (x2 < 1.8) | (((x1 + 0.25)/4)**2 + (x2 + 1.75)**2 < 0.32**2) & (((x1 + 0.25)/4)**2 + (x2 + 1.75)**2 > 0.18**2)
    db8 = lambda x1, x2: (np.sin(2*x1 + 3*x2) > 0) | (((x1 - 1)**2 + x2**2/4) * 
            (0.9*(x1 + 1)**2 + x2**2/2) < 1.4) & \
            ((x1 + x2) < 1.5) | (x1 < -1.9) | (x1 > +1.9) | (x2 < -1.9) | (x2 > +1.9) | ((x1 + 0.75)**2 + (x2 - 1.5)**2 < 0.3**2)
    # db9 = lambda x1, x2: ((x1)**2 + (x2)**2 < 0.3**2) | ((x1)**2 + (x2)**2 > 0.5**2) |
    decision_boundary  = [db5b, db1c, db4a] # db1b # [db5b, db1c, db4a] # [db5b, db1c, db4a, db8, db6, db7]

    """
    Data Generation
    """

    X = np.random.uniform(test_range_min + 0.5, test_range_max - 0.5, 
        size = (n_train, n_dims))
    x1 = X[:, 0]
    x2 = X[:, 1]
    
    Xw, whitenparams = pre.whiten(X)

    n_train = X.shape[0]
    logging.info('Training Points: %d' % n_train)

    # Training Labels
    y = gp.classifier.utils.make_decision(X, decision_boundary)
    y_unique = np.unique(y)
    assert y_unique.dtype == int

    if y_unique.shape[0] == 2:
        mycmap = cm.get_cmap(name = 'bone', lut = None)
        mycmap2 = cm.get_cmap(name = 'BrBG', lut = None)
    else:
        mycmap = cm.get_cmap(name = 'gist_rainbow', lut = None)
        mycmap2 = cm.get_cmap(name = 'gist_rainbow', lut = None)
    """
    Classifier Training
    """

    # Training
    fig = plt.figure()
    gp.classifier.utils.visualise_decision_boundary(plt.gca(),
        test_range_min, test_range_max, decision_boundary)
    
    plt.scatter(x1, x2, c = y, marker = 'x', cmap = mycmap)
    plt.title('Training Labels')
    plt.xlabel('$x_{1}$')
    plt.ylabel('$x_{2}$')
    cbar = plt.colorbar()
    cbar.set_ticks(y_unique)
    cbar.set_ticklabels(y_unique)
    plt.xlim((test_range_min, test_range_max))
    plt.ylim((test_range_min, test_range_max))
    plt.gca().patch.set_facecolor('gray')
    print('Plotted Training Set')

    plt.show()

    # Training
    print('===Begin Classifier Training===')
    optimiser_config = gp.OptConfig()
    optimiser_config.sigma = gp.auto_range(kerneldef)
    optimiser_config.walltime = walltime

    # User can choose to batch start each binary classifier with different
    # initial hyperparameters for faster training
    if batch_start:
        if y_unique.shape[0] == 2:
            initial_hyperparams = [100, 0.1, 0.1]
        elif multimethod == 'OVA':
            initial_hyperparams = [  [356.468, 0.762, 0.530], \
                                     [356.556, 0.836, 0.763], \
                                     [472.006, 1.648, 1.550], \
                                     [239.720, 1.307, 0.721] ]
        elif multimethod == 'AVA':
            initial_hyperparams = [ [14.9670, 0.547, 0.402],  \
                                    [251.979, 1.583, 1.318], \
                                    [420.376, 1.452, 0.750], \
                                    [780.641, 1.397, 1.682], \
                                    [490.353, 2.299, 1.526], \
                                    [73.999, 1.584, 0.954]]
        else:
            raise ValueError
        batch_config = gp.batch_start(optimiser_config, initial_hyperparams)
    else:
        batch_config = optimiser_config

    # Obtain the response function
    responsefunction = gp.classifier.responses.get(responsename)

    # Train the classifier!
    learned_classifier = gp.classifier.learn(Xw, y, kerneldef,
        responsefunction, batch_config, 
        multimethod = multimethod, approxmethod = approxmethod,
        train = True, ftol = 1e-6, processes = n_cores)

    # Print learned kernels
    print_function = gp.describer(kerneldef)
    gp.classifier.utils.print_learned_kernels(print_function, 
                                            learned_classifier, y_unique)

    # Print the matrix of learned classifier hyperparameters
    logging.info('Matrix of learned hyperparameters')
    gp.classifier.utils.print_hyperparam_matrix(learned_classifier)


    """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
                        THE GAP BETWEEN ANALYSIS AND PLOTS
    """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""







    """
    Classifier Prediction Results (Plots)
    """

    logging.info('Plotting... please wait')

    Xq = gp.classifier.utils.query_map(test_ranges, n_points = 250)
    Xqw = pre.whiten(Xq, whitenparams)
    yq_truth = gp.classifier.utils.make_decision(Xq, decision_boundary)

    fig = plt.figure(figsize = (19.2, 10.8))
    ax1 = fig.add_subplot(231)
    ax2 = fig.add_subplot(232)
    ax3 = fig.add_subplot(233)
    ax4 = fig.add_subplot(234)
    ax5 = fig.add_subplot(235)
    ax6 = fig.add_subplot(236)

    fontsize = 20
    axis_tick_font_size = 14

    """
    Plot: Ground Truth
    """

    # Training
    gp.classifier.utils.visualise_map(ax1, yq_truth, test_ranges, cmap = mycmap)
    ax1.set_title('Ground Truth', fontsize = fontsize)
    ax1.set_xlabel('$x_{1}$', fontsize = fontsize)
    ax1.set_ylabel('$x_{2}$', fontsize = fontsize)
    cbar = plt.colorbar()
    cbar.set_ticks(y_unique)
    cbar.set_ticklabels(y_unique)
    gp.classifier.utils.visualise_decision_boundary(ax1,
        test_range_min, test_range_max, decision_boundary)
    logging.info('Plotted Prediction Labels')
    plt.gca().set_aspect('equal', adjustable = 'box')
    for tick in plt.gca().xaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size) 
    for tick in plt.gca().yaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size) 

    """
    Plot: Training Set
    """

    # Training
    gp.classifier.utils.visualise_decision_boundary(ax2,
        test_range_min, test_range_max, decision_boundary)
    plt.scatter(x1, x2, c = y, marker = 'x', cmap = mycmap)
    ax2.set_title('Training Labels', fontsize = fontsize)
    ax2.set_xlabel('$x_{1}$', fontsize = fontsize)
    ax2.set_ylabel('$x_{2}$', fontsize = fontsize)
    cbar = plt.colorbar()
    cbar.set_ticks(y_unique)
    cbar.set_ticklabels(y_unique)
    ax2.set_xlim((test_range_min, test_range_max))
    ax2.set_ylim((test_range_min, test_range_max))
    plt.gca().patch.set_facecolor('gray')
    logging.info('Plotted Training Set')
    plt.gca().set_aspect('equal', adjustable = 'box')
    for tick in plt.gca().xaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size) 
    for tick in plt.gca().yaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size) 
        
    """
    Plot: Query Computations
    """

    # Compute Linearised and True Entropy for plotting
    logging.info('Plot: Caching Predictor...')
    predictor = gp.classifier.query(learned_classifier, Xqw)

    logging.info('Plot: Computing Expectance...')
    exp = gp.classifier.expectance(learned_classifier, predictor)

    logging.info('Plot: Computing Variance...')
    var = gp.classifier.variance(learned_classifier, predictor)

    logging.info('Plot: Computing Linearised Entropy...')
    start_time = time.clock()
    yq_lmde = gp.classifier.linearised_model_differential_entropy(
        exp, var, learned_classifier)
    time_lmde = time.clock() - start_time

    logging.info('Plot: Computing Equivalent Standard Deviation')
    eqsd = gp.classifier.equivalent_standard_deviation(yq_lmde)

    logging.info('Plot: Computing Prediction Probabilities...')
    yq_prob = gp.classifier.predict_from_latent(exp, var, learned_classifier, 
        fusemethod = fusemethod)

    logging.info('Plot: Computing True Entropy...')
    start_time = time.clock()
    yq_pie = gp.classifier.entropy(yq_prob)
    time_pie = time.clock() - start_time

    n_draws_low = 25
    n_draws_med = 250
    n_draws_high = 2500

    logging.info('Plot: Computing MCPIE with %d samples' % n_draws_low)
    start_time = time.clock()
    yq_mcpie_low = gp.classifier.monte_carlo_prediction_information_entropy(exp, var, learned_classifier, n_draws = n_draws_low)
    time_mcpie_low = time.clock() - start_time

    logging.info('Plot: Computing MCPIE with %d samples' % n_draws_med)
    start_time = time.clock()
    yq_mcpie_med = gp.classifier.monte_carlo_prediction_information_entropy(exp, var, learned_classifier, n_draws = n_draws_med)
    time_mcpie_med = time.clock() - start_time

    logging.info('Plot: Computing MCPIE with %d samples' % n_draws_high)
    start_time = time.clock()
    yq_mcpie_high = gp.classifier.monte_carlo_prediction_information_entropy(exp, var, learned_classifier, n_draws = n_draws_high)
    time_mcpie_high = time.clock() - start_time

    logging.info('Plot: Computing Class Predicitons')
    yq_pred = gp.classifier.classify(yq_prob, y_unique)

    mistake_ratio = (yq_truth - yq_pred).nonzero()[0].shape[0] / yq_truth.shape[0]

    timing = {  'time_lmde': time_lmde,
                'time_pie': time_pie,
                'time_mcpie_low': time_mcpie_low,
                'time_mcpie_med': time_mcpie_med,
                'time_mcpie_high': time_mcpie_high}
    logging.info(timing)

    # """
    # THIS SECTION IS EXTRA FOR COLLECTING TIME COMPLEXITY DATA
    # PLEASE COMMENT OUT UNDER NORMAL CIRCUMSTANCES
    # """

    # # Compute Linearised and True Entropy for plotting
    # Xq = np.random.rand(1000, 2)
    # Xqw = pre.whiten(Xq, whitenparams)
    # logging.info('Plot: Caching Predictor...')
    # predictor = gp.classifier.query(learned_classifier, Xqw)

    # logging.info('Plot: Computing Expectance...')
    # exp = gp.classifier.expectance(learned_classifier, predictor)

    # logging.info('Plot: Computing Variance...')
    # cov = gp.classifier.covariance(learned_classifier, predictor)

    # logging.info('Plot: Computing Linearised Entropy...')
    # start_time = time.clock()
    # yq_lmde = gp.classifier.linearised_model_differential_entropy(
    #     exp, cov, learned_classifier)
    # time_lmde = time.clock() - start_time

    # logging.info('Plot: Computing Equivalent Standard Deviation')
    # eqsd = gp.classifier.equivalent_standard_deviation(yq_lmde)

    # logging.info('Plot: Computing Prediction Probabilities...')
    # yq_prob = gp.classifier.predict_from_latent(exp, cov, learned_classifier, 
    #     fusemethod = fusemethod)

    # logging.info('Plot: Computing True Entropy...')
    # start_time = time.clock()
    # yq_pie = gp.classifier.entropy(yq_prob)
    # time_pie = time.clock() - start_time

    # n_draws_low = 25
    # n_draws_med = 250
    # n_draws_high = 2500

    # logging.info('Plot: Computing MCPIE with %d samples' % n_draws_low)
    # start_time = time.clock()
    # yq_mcpie_low = gp.classifier.monte_carlo_prediction_information_entropy(exp, cov, learned_classifier, n_draws = n_draws_low)
    # time_mcpie_low = time.clock() - start_time

    # logging.info('Plot: Computing MCPIE with %d samples' % n_draws_med)
    # start_time = time.clock()
    # yq_mcpie_med = gp.classifier.monte_carlo_prediction_information_entropy(exp, cov, learned_classifier, n_draws = n_draws_med)
    # time_mcpie_med = time.clock() - start_time

    # logging.info('Plot: Computing MCPIE with %d samples' % n_draws_high)
    # start_time = time.clock()
    # yq_mcpie_high = gp.classifier.monte_carlo_prediction_information_entropy(exp, cov, learned_classifier, n_draws = n_draws_high)
    # time_mcpie_high = time.clock() - start_time

    # logging.info('Plot: Computing Class Predicitons')
    # yq_pred = gp.classifier.classify(yq_prob, y_unique)

    # timing = {  'time_lmde': time_lmde,
    #             'time_pie': time_pie,
    #             'time_mcpie_low': time_mcpie_low,
    #             'time_mcpie_med': time_mcpie_med,
    #             'time_mcpie_high': time_mcpie_high}
    # logging.info(timing)
    # print(yq_mcpie_high, yq_lmde)
    # return

    # """
    # Plot: Prediction Labels
    # """

    # Query (Prediction Map)
    gp.classifier.utils.visualise_map(ax3, yq_pred, test_ranges, 
        boundaries = True, cmap = mycmap)
    ax3.set_title('Prediction [Miss Ratio: %.1f %s]' % (100 * mistake_ratio, '\%'), fontsize = fontsize)
    ax3.set_xlabel('$x_{1}$', fontsize = fontsize)
    ax3.set_ylabel('$x_{2}$', fontsize = fontsize)
    cbar = plt.colorbar()
    cbar.set_ticks(y_unique)
    cbar.set_ticklabels(y_unique)
    logging.info('Plotted Prediction Labels')
    plt.gca().set_aspect('equal', adjustable = 'box')
    for tick in plt.gca().xaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size) 
    for tick in plt.gca().yaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size) 
        
    """
    Plot: Prediction Information Entropy onto Training Set
    """

    gp.classifier.utils.visualise_map(ax4, yq_pie, test_ranges, 
        threshold = entropy_threshold, cmap = cm.coolwarm)
    ax4.set_title('Prediction Information Entropy', fontsize = fontsize)
    ax4.set_xlabel('$x_{1}$', fontsize = fontsize)
    ax4.set_ylabel('$x_{2}$', fontsize = fontsize)
    plt.colorbar()
    ax4.scatter(x1, x2, c = y, marker = 'x', cmap = mycmap)
    ax4.set_xlim((test_range_min, test_range_max))
    ax4.set_ylim((test_range_min, test_range_max))
    logging.info('Plotted Prediction Information Entropy on Training Set')
    plt.gca().set_aspect('equal', adjustable = 'box')
    for tick in plt.gca().xaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size) 
    for tick in plt.gca().yaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size) 
        
    """
    Plot: Monte Carlo Prediction Information Entropy onto Training Set
    """

    gp.classifier.utils.visualise_map(ax5, yq_mcpie_high, test_ranges, 
        threshold = entropy_threshold, cmap = cm.coolwarm)
    ax5.set_title('M.C. Prediction Information Entropy', fontsize = fontsize, x = 0.45)
    ax5.set_xlabel('$x_{1}$', fontsize = fontsize)
    ax5.set_ylabel('$x_{2}$', fontsize = fontsize)
    plt.colorbar()
    ax5.scatter(x1, x2, c = y, marker = 'x', cmap = mycmap)
    ax5.set_xlim((test_range_min, test_range_max))
    ax5.set_ylim((test_range_min, test_range_max))
    logging.info('Plotted Monte Carlo Prediction Information Entropy on Training Set')
    plt.gca().set_aspect('equal', adjustable = 'box')
    for tick in plt.gca().xaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size) 
    for tick in plt.gca().yaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size) 
        
    """
    Plot: Linearised Model Differential Entropy onto Training Set
    """

    yq_lmde_min = yq_lmde.min()
    yq_lmde_max = yq_lmde.max()
    gp.classifier.utils.visualise_map(ax6, yq_lmde, test_ranges, 
        threshold = entropy_threshold, cmap = cm.coolwarm, 
        vmin = -yq_lmde_max, vmax = yq_lmde_max)
    ax6.set_title('L. Model Differential Entropy', fontsize = fontsize)
    ax6.set_xlabel('$x_{1}$', fontsize = fontsize)
    ax6.set_ylabel('$x_{2}$', fontsize = fontsize)
    plt.colorbar()
    ax6.scatter(x1, x2, c = y, marker = 'x', cmap = mycmap)
    ax6.set_xlim((test_range_min, test_range_max))
    ax6.set_ylim((test_range_min, test_range_max))
    logging.info('Plotted Linearised Model Differential Entropy on Training Set')
    plt.gca().set_aspect('equal', adjustable = 'box')
    for tick in plt.gca().xaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size) 
    for tick in plt.gca().yaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size) 

    # plt.show()
    fig.tight_layout()
    sea.vis.savefig(fig, './mcpie_lmde_comparison/mcpie_lmde_comparison.eps')

    fig = plt.figure(figsize = (19.2, 10.8/2))
    ax1 = fig.add_subplot(131)
    ax2 = fig.add_subplot(132)
    ax3 = fig.add_subplot(133)

    gp.classifier.utils.visualise_map(ax1, yq_mcpie_low, test_ranges, 
        threshold = entropy_threshold, cmap = cm.coolwarm)
    ax1.set_title('MCPIE with %d Samples' % n_draws_low, fontsize = fontsize)
    ax1.set_xlabel('$x_{1}$', fontsize = fontsize)
    ax1.set_ylabel('$x_{2}$', fontsize = fontsize)
    plt.colorbar()
    ax1.scatter(x1, x2, c = y, marker = 'x', cmap = mycmap)
    ax1.set_xlim((test_range_min, test_range_max))
    ax1.set_ylim((test_range_min, test_range_max))
    logging.info('Plotted MCPIE with %d Samples on Training Set' % n_draws_low)
    plt.gca().set_aspect('equal', adjustable = 'box')
    for tick in plt.gca().xaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size) 
    for tick in plt.gca().yaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size) 

    gp.classifier.utils.visualise_map(ax2, yq_mcpie_med, test_ranges, 
        threshold = entropy_threshold, cmap = cm.coolwarm)
    ax2.set_title('MCPIE with %d Samples' % n_draws_med, fontsize = fontsize)
    ax2.set_xlabel('$x_{1}$', fontsize = fontsize)
    ax2.set_ylabel('$x_{2}$', fontsize = fontsize)
    plt.colorbar()
    ax2.scatter(x1, x2, c = y, marker = 'x', cmap = mycmap)
    ax2.set_xlim((test_range_min, test_range_max))
    ax2.set_ylim((test_range_min, test_range_max))
    logging.info('Plotted MCPIE with %d Samples on Training Set' % n_draws_med)
    plt.gca().set_aspect('equal', adjustable = 'box')
    for tick in plt.gca().xaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size) 
    for tick in plt.gca().yaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size) 

    gp.classifier.utils.visualise_map(ax3, yq_mcpie_high, test_ranges, 
        threshold = entropy_threshold, cmap = cm.coolwarm)
    ax3.set_title('MCPIE with %d Samples' % n_draws_high, fontsize = fontsize)
    ax3.set_xlabel('$x_{1}$', fontsize = fontsize)
    ax3.set_ylabel('$x_{2}$', fontsize = fontsize)
    plt.colorbar()
    ax3.scatter(x1, x2, c = y, marker = 'x', cmap = mycmap)
    ax3.set_xlim((test_range_min, test_range_max))
    ax3.set_ylim((test_range_min, test_range_max))
    logging.info('Plotted MCPIE with %d Samples on Training Set' % n_draws_high)
    plt.gca().set_aspect('equal', adjustable = 'box')
    for tick in plt.gca().xaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size) 
    for tick in plt.gca().yaxis.get_major_ticks():
        tick.label.set_fontsize(axis_tick_font_size) 

    # plt.show()
    fig.tight_layout()
    sea.vis.savefig(fig, './mcpie_lmde_comparison/mcpie_accuracy.eps')

    logging.info(timing)
    plt.show()
Ejemplo n.º 56
0
# Prepare Plot
threshold = 1.7
fig = plt.figure(figsize=(25, 5))
plt.title("Gold Tree", fontsize=26)
plt.yticks([])
plt.tick_params(labelsize=16)
n = 18
mh = 2
color_list = ['c', 'c', 'c', 'c', 'g', 'k', 'r', 'r', 'r', 'r', 'm', 'm', 'm', 'm', 'k', 'k']
p = 30
orientation = 'top'
no_labels = False
print("Plotting")
plt.tight_layout()
with plt.rc_context({'lines.linewidth': 2.0}):
    _plot_dendrogram(icoord, dcoord, ivl, p, n, mh, orientation,
                     no_labels, color_list,
                     leaf_font_size=26.,
                     leaf_rotation=None,
                     contraction_marks=None,
                     ax=None,
                     above_threshold_color="k")

plt.savefig(save_dir + "goldtree_sentences" + str(int(threshold * 100)) + ".png")

# Rabinovych tree
# Coordinates for the tree
#
icoord = [[5.0, 5.0, 15.0, 15.0], [10.0, 10.0, 25.0, 25.0], [17.5, 17.5, 35.0, 35.0], [45.0, 45.0, 55.0, 55.0],
          [50.0, 50.0, 65.0, 65.0], [26.25, 26.25, 57.5, 57.5], [75.0, 75.0, 85.0, 85.0],
Ejemplo n.º 57
0
def plot_snotel_comparison_one_site(
        driver, science_test_data_dir,
        compare_data_dict,
        result_dir, plot_dir,
        plots_to_make,
        plot_variables, context, style, filename):
    
    print(plots_to_make)
    
    # get lat/lng from filename
    file_split = re.split('_', filename)
    lng = file_split[3].split('.txt')[0]
    lat = file_split[2]
    print('Plotting {} {}'.format(lat, lng))

    # loop over data to compare
    data = {}
    for key, items in compare_data_dict.items():

        # read in data
        if key == "snotel":
            data[key] = read_snotel_swe_obs(filename,
                                            science_test_data_dir,
                                            items)

        elif key == "VIC.4.2.d":
            data[key] = read_vic_42_output(lat, lng,
                                           science_test_data_dir,
                                           items)

        else:
            data[key] = read_vic_5_output(lat, lng,
                                          result_dir,
                                          items)

    # loop over variables to plot
    for plot_variable, units in plot_variables.items():

        if 'water_year' in plots_to_make:

            with plt.rc_context(dict(sns.axes_style(style),
                                     **sns.plotting_context(context))):
                fig, ax = plt.subplots(figsize=(10, 10))

                df = pd.DataFrame({key: d[plot_variable] for key, d in
                                   data.items() if plot_variable in d})

                for key, series in df.iteritems():
                    series.plot(
                        use_index=True,
                        linewidth=compare_data_dict[key]['linewidth'],
                        ax=ax,
                        color=compare_data_dict[key]['color'],
                        linestyle=compare_data_dict[key]
                        ['linestyle'],
                        zorder=compare_data_dict[key]['zorder'])

                ax.legend(loc='upper left')
                ax.set_ylabel("%s [%s]" % (plot_variable, units))

                # save figure
                os.makedirs(os.path.join(plot_dir, plot_variable),
                            exist_ok=True)
                plotname = '%s_%s.png' % (lat, lng)
                savepath = os.path.join(plot_dir, plot_variable, plotname)
                plt.savefig(savepath, bbox_inches='tight')
                print(savepath)
                plt.clf()
                plt.close()
Ejemplo n.º 58
0
def styled_fig_ax(size='wide',
                  font_size=10.0,
                  zero_lines=True,
                  y_axis_grid=True,
                  seaborn=False,
                  subplots_rows=None,
                  subplots_columns=None,
                  subplots_kwargs=None,
                  x_formatter=None,
                  y_formatter=None,
                  other_rc_params=None):
    """ Context manager for a styled axis.

        :param size: accepts 'wide' (top/bottom of slide), 'tall' (left side of slide),
                    'tallest' (left side of slide, lots of text)
        :param font_size: the main font size; some sizes are set relative to this; overridden on tallest.
        :param zero_lines: strong lines at x=0 and y=0
        :param other_rc_params: any custom rcParams to include
        :param subplots_rows: the number of rows in the subplot (default=None)
        :param subplots_columns: the number of columns in the subplot (default=None)
        :param subplots_kwargs: dict of kwargs for subplots call (e.g., sharex=True) (default={})
        :param x_formatter: Formatter to use for major ticks on x axis
        :param y_formatter: Formatter to use for major ticks on y axis
    """
    subplots_kwargs = {} if subplots_kwargs is None else subplots_kwargs
    other_rc_params = {} if other_rc_params is None else other_rc_params

    sizes = {
        'quarter': (6.8, 3.7),
        'wide': (9.75, 3.6),
        'tall': (6., 7.5),
        'tallest': (6, 8.5),
        'square': (6., 6.),
        'custom': other_rc_params.get('figure.figsize', None),
    }
    figure_size = sizes[size]

    original_params = plt.rcParams.copy()

    if sizes == 'tallest':
        plt.rcParams['savefig.pad_inches'] = 0.0
        plt.rcParams['savefig.pad_inches'] = 0.0

        if font_size > 6.0:
            font_size = 6.0

    # set globally here since this seems ignored by rc_context.....
    # apologies for side effects.
    plt.rcParams['axes.titlesize'] = 1.25 * font_size
    plt.rcParams['figure.dpi'] = 196
    plt.rcParams['savefig.dpi'] = 196
    plt.rcParams['legend.frameon'] = False
    plt.rcParams['legend.fontsize'] = 0.8 * font_size

    rc_params = {
        'figure.figsize': figure_size,

        # font
        'font.family': 'Verdana',
        'font.size': font_size,
        'axes.labelsize': font_size,
        'xtick.labelsize': 0.8 * font_size,
        'ytick.labelsize': 0.8 * font_size,

        # remove extras
        'xtick.major.size': 0,  # major tick size in points
        'xtick.minor.size': 0,  # minor tick size in points
        'ytick.major.size': 0,  # major tick size in points
        'ytick.minor.size': 0,  # minor tick size in points

        # colors
        'axes.prop_cycle': cycler('color', get_palette(dict=False, hex=True)),

        # grid
        'axes.facecolor': 'white',
        'axes.edgecolor': '.8',
        'axes.grid': False,
        'grid.linestyle': '-',
        'grid.linewidth': 0.25,
        'grid.color': '#a3a3a3',
        'axes.linewidth': 0.0,
    }

    rc_params.update(other_rc_params)

    def _adjust_figure_inplace(fig, ax):
        # set just x axis grid lines
        ax.grid(True)
        ax.xaxis.grid(False)

        if not y_axis_grid:
            ax.yaxis.grid(False)

        ax.set_axisbelow(True)

        if zero_lines:
            ax.axvline(x=0, c='k', linestyle='-', lw=0.7, alpha=0.5)
            ax.axhline(y=0, c='k', linestyle='-', lw=0.7, alpha=0.5)

        if x_formatter:
            ax.xaxis.set_major_formatter(x_formatter)
        if y_formatter:
            ax.yaxis.set_major_formatter(y_formatter)

        fig.tight_layout()

    if seaborn:
        with sns.axes_style(rc=rc_params):
            sns.set_palette(get_palette())
            yield
            fig = plt.gcf()
            fig.set_size_inches(figure_size)

            # hack for joint plots (looks like main plot is always first)
            ax = plt.gcf().get_axes()[0]

            _adjust_figure_inplace(fig, ax)
    else:
        with plt.rc_context(rc_params):
            if xor((subplots_rows is not None),
                   (subplots_columns is not None)):
                raise ValueError(
                    "Must pass both subplots_rows and subplots_columns or neither."
                )

            if subplots_rows is not None:
                fig, axes = plt.subplots(subplots_rows,
                                         subplots_columns,
                                         figsize=figure_size,
                                         **subplots_kwargs)

                for ax in axes:
                    _adjust_figure_inplace(fig, ax)

                yield axes
            else:
                fig, ax = plt.subplots(figsize=figure_size)

                _adjust_figure_inplace(fig, ax)
                yield ax

        # reset after context manager is closed
        for k, v in original_params.items():
            plt.rcParams[k] = v
Ejemplo n.º 59
0
def Dendrogram(table,
               distance_metric="correlation",
               linkage_method="complete",
               axis=1,
               lw=1.,
               n_clust=None,
               optimal_row_ordering=True,
               optimal_col_ordering=True,
               ax=None):
    """Function that plots a dendrogram on axis 0 (rows), or axis 1
    (columns) of a :class:`pandas.DataFrame`.

    :param table: Data matrix used to calculate dendrograms.
    :type table: :class:`pandas.DataFrame`
    :param distance_metric: Distance metric used to determine distance between
        two vectors. The distance function can be either of 'braycurtis',
        'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice',
        'euclidean', 'hamming', 'jaccard', 'jensenshannon', 'kulsinski',
        'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 
        'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule',
        defaults to 'correlation'.
    :type distance_metric: str, optional
    :param linkage_method: Methods for calculating the distance between the
        newly formed cluster u and each v. Possible methods are, 'single',
        'complete', 'average', 'weighted', and 'centroid', defaults to
        'complete'.
    :type linkage_method: str, optional
    :param axis: Axis of table used for plotting dendrogram (0 = rows,
        1 = columns), defaults to 1.
    :type axis: int, optional
    :param lw: Width of the lines (in points) defining the dengrogram, defaults
        to 1.
    :type lw: float, optional
    :param n_clust: Number of clusters in which the dendrogram should be cut.
        Note: Dendrogram cutting makes use of scipy.cluster.hierarchy.cut_tree
        function, defaults to None.
    :type n_clust: int, optional
    :param optimal_row_ordering: If True, the rows will be ordered optimally
        with regards to the cluster separation. Be careuful: Can take a long
        time, depending on the number of rows, defaults to True.
    :type optimal_row_ordering: bool, optional
    :param optimal_col_ordering: If True, the columns will be ordered optimally 
        with regards to the cluster separation. Be careuful: Can take a long
        time, depending on the number of columns, defaults to True.
    :type optimal_col_ordering: bool, optional
    :param ax: Axes n which to plot the dendrogram, defaults to None.
    :type ax: :class:`matplotlib.axes._subplots.AxesSubplot`

    :return: Tuple containing the following entries:
        1. Resulting dictionary from scipy.cluster.hierarchy.dendrogram
        function.
        2. linkage matrix, that is returned from
        scipy.cluster.hierarchy.linkage function
        3. dictionary containing the row_ids, (in case of axis = 0), or
        column_ids (in case of axis = 1) that are assigned to different
        clusters. Only makes sense if n_clust is not None.
    :rtype: dict
    """
    ax = ax if ax is not None else plt.gca()

    ids = None
    dendrogram_dict = None
    linkage_matrix = None
    cluster_dict = None
    color_threshold = 0
    if (axis == 0):
        ids = list(table.index)
        distance_matrix = pdist(table, metric=distance_metric)
        linkage_matrix = linkage(distance_matrix,
                                 metric=distance_metric,
                                 method=linkage_method,
                                 optimal_ordering=optimal_row_ordering)
        if (not n_clust is None):
            cluster_dict = {}
            color_threshold = linkage_matrix[-1 * (n_clust - 1), 2]
            grouping = cut_tree(linkage_matrix, n_clusters=n_clust)
            for i in range(len(grouping)):
                group = grouping[i, 0]
                if (not (group in cluster_dict)):
                    cluster_dict[group] = [ids[i]]
                else:
                    cluster_dict[group] += [ids[i]]
        with plt.rc_context({'lines.linewidth': lw}):
            dendrogram_dict = dendrogram(linkage_matrix,
                                         orientation="left",
                                         color_threshold=color_threshold)
    elif (axis == 1):
        ids = table.columns
        distance_matrix = pdist(table.T, metric=distance_metric)
        linkage_matrix = linkage(distance_matrix,
                                 metric=distance_metric,
                                 method=linkage_method,
                                 optimal_ordering=optimal_col_ordering)
        if (not n_clust is None):
            cluster_dict = {}
            color_threshold = linkage_matrix[-1 * (n_clust - 1), 2]
            grouping = cut_tree(linkage_matrix, n_clusters=n_clust)
            for i in range(len(grouping)):
                group = grouping[i, 0]
                if (not (group in cluster_dict)):
                    cluster_dict[group] = [ids[i]]
                else:
                    cluster_dict[group] += [ids[i]]

        with plt.rc_context({'lines.linewidth': lw}):
            dendrogram_dict = dendrogram(linkage_matrix,
                                         color_threshold=color_threshold)

    ax.spines["left"].set_visible(False)
    ax.spines["right"].set_visible(False)
    ax.spines["bottom"].set_visible(False)
    ax.spines["top"].set_visible(False)

    plt.xticks([], [])
    plt.yticks([], [])

    return dendrogram_dict, linkage_matrix, cluster_dict
# Set some colors
aocText = "#CCCCCC"
aocBackground = "#0F0F23"
aocGold = "#FFFF66"
aocSilver = "#9999CC"

# Set background color
fig = plt.figure()
fig.patch.set_facecolor(aocBackground)

# Do some of the styling
with plt.rc_context({
        "axes.edgecolor": aocText,
        "xtick.color": aocText,
        "ytick.color": aocText,
        "figure.facecolor": aocBackground,
        "axes.facecolor": aocBackground
}):

    # These variables will get set to some values via scraping the info from the leaderboard in a future update
    days = [1, 2, 3, 4, 5, 6]
    goldStars = [114000, 96000, 79000, 63000, 55000, 48000]
    silverStars = [7600, 2600, 2400, 8000, 1100, 1950]

    leaderboard = leaderboardUtils.getLeaderboard()
    #print( leaderboard)
    days = []
    for item in leaderboard:
        days.append(int(item[0]))