Exemple #1
0
def simulate_increasing(data_size, margin=0.3, max_iter=100, learning_rate=0.1,
                        steps=5, start=None, end=None):
    """Simulate learning an increasing training data set.

    Generates an unseperable data set, and trains on an increasing training
    set, then tests and plots.

    start: Initial (first step) training data set size.
    end: Final (last step) training data set size.
    """
    data = generate_sphere_data(data_size, margin=margin)
    train_data, test_data = split_list(data, 0.75)

    # Initialize start/end sizes if not given.
    start = len(train_data)/steps if start is None else start
    end = len(train_data) if end is None else end

    w_colors = ['b', 'c', 'm', 'y', 'k']  # w vector (line) graph color.
    w_gs = []  # w plot graphs.
    sizes = []  # Training data set sizes.
    success = []  # Success rates according to training data set sizes.
    for i in xrange(steps):
        # Increase training data size according to iteration.
        size = start + i*end/steps
        current_train_data = train_data[:size]

        w = train(current_train_data, max_iter=max_iter, r=learning_rate)
        error = test(test_data, w)

        status(current_train_data, test_data, error)
        print

        # Record size-success statistics.
        sizes.append(size)
        success.append(100 - error)

        # Plot decision boundary.
        w_color = w_colors[i] if i < len(w_colors) else w_colors[-1]
        figure(0)
        g, = plot_w(current_train_data, w, color=w_color)
        w_gs.append(g)

    figure(0).suptitle('Test data size: %d\nMaximum iterations: %d' % (len(test_data), max_iter))
    plot_w_legend(w_gs, sizes)
    plot_data(data)

    figure(1).suptitle('Success rate according to training set size.')
    plot_success_per_size(sizes, success)

    show()
Exemple #2
0
def plot_gsea_heatmap_(gsea_results, sel_genesets, factor, out):
    print(gsea_results)
    if gsea_results.endswith("_T.nc"):
        abs = True
    else:
        abs = False
    gsea = xr.open_dataset(gsea_results).load()
    gsea['gene_set'] = (xr.apply_ufunc(np.char.decode,
                                       gsea['gene_set']).astype('object'))
    gsea['mri_feature'] = np.arange(1,
                                    gsea['mri_feature'].shape[0] + 1,
                                    dtype='i2')

    geneset_annot = (pd.read_table(
        sel_genesets, sep='\t', quotechar='"',
        comment='#').set_index('gene_set').to_xarray())

    factor_idx = factor - 1
    with plot.figure(figsize=(7.0, 3.5)) as fig:
        plot_gsea_heatmap(gsea, geneset_annot, factor_idx, fig, abs)
        fig.savefig(out, format="svg")
Umax = 0.25

r = np.linspace(-20.0, 20.0, 1000)
cs = 1.0
U = -cs / abs(r)
U[U < Umin] = np.NaN

laser = -r / 30.0
Ubent = U + laser

lr = r[-1] - r[0]
lU = Umax - Umin

r0 = abs(cs / Xe_Z0_Ip)

fig = plot.figure()

ax1, ax1b = plot.get_axis_two_scales(fig, scale_y=cst.Eh_to_eV, ax2_ylabel="Energy [eV]")

# Ip
ax1.plot([-r0, r0], [-Xe_Z0_Ip, -Xe_Z0_Ip], "-m")

# Threshold
ax1.axhline(0.0, color="k", ls="-", alpha=0.5)

# U(r)
ax1.plot(r, U, "--k", alpha=0.5, label="Unperturbed ion")

# Laser
ax1.plot(r, laser, "-r", label="Laser")
        V_v_new.append(s['V_v'])
        #error Vapour
        errorV2= s['P'] - (p['R']*s['T'])/(s['V_v']-s['b']) + s['a']/s['V_v']**2
        #print errorV
        #error Liquid
        errorL2= s['P'] - (p['R']*s['T'])/(s['V_l']-s['b']) + s['a']/s['V_l']**2
        
        errorVnew.append(errorV2)
        errorLnew.append(errorL2)
       
     
    
   
    
    # Plot Volume root results on data
    plot.figure(1)
    plot.plot(V_l_old, p['P'], 'b*', label='Old liquid root')
    plot.plot(V_v_old, p['P'],  'r*', label='Old vapour root')
    plot.plot(V_l_new, p['P'], 'bx', label='New liquid root')
    plot.plot(V_v_new, p['P'],  'rx', label='New vapour root')
    plot.ylabel("Pressure / Pa")
    plot.title("Volume root differences on data P, "
               "T for {}".format(p['name'][0]))
    plot.legend()
    plot.show()
#
    plot.figure(2)
    plot.plot(range(len(V_l_old)),
              numpy.array(V_l_old) - numpy.array(V_l_new),
              'b*', label='$\Delta V_l$')
    plot.plot(range(len(V_v_old)),

r = np.linspace(-20.0, 20.0, 1000)
lr = r[-1] - r[0]
r01 = 0.0
r02 = -8.2
r03 = 0.0
r04 = +8.2
cs = 1.0
U1 = -cs/abs(r-r01)
U2 = -cs/abs(r-r02) - cs/abs(r-r03) - cs/abs(r-r04)
U1[U1<Umin] = np.NaN
U2[U2<Umin] = np.NaN
r0 = abs(cs/Xe_Z0_Ip)

fig = plot.figure(figsize = (10.0, 7.0))

ax1, ax1b = plot.get_axis_two_scales(fig,
                                     scale_y = cst.Eh_to_eV,
                                     ax2_ylabel = 'Energy [eV]',
                                     subplot = 121)
ax2, ax2b = plot.get_axis_two_scales(fig,
                                     scale_y = cst.Eh_to_eV,
                                     ax2_ylabel = 'Energy [eV]',
                                     sharex = ax1, sharey = ax1,
                                     subplot = 122)
ax1b.get_yaxis().set_visible(False)
ax2.get_yaxis().set_visible(False)

# ******************************************************
ax1.plot(r, U1)
Exemple #6
0
    ax.text(0.8*d, 0.6*d, 'z')

    # Corners
    g = d / 8.0 # Gap
    ax.text(-g, 0,      '(i,j,k+1)',    verticalalignment = 'center', horizontalalignment = 'right') # 0
    ax.text(s+g,0,      '(i+1,j,k+1)',  verticalalignment = 'center', horizontalalignment = 'left')  # 1
    ax.text(-g, s,      '(i,j+1,k+1)',  verticalalignment = 'center', horizontalalignment = 'right') # 2
    ax.text(s+g,s,      '(i+1,j+1,k+1)',verticalalignment = 'center', horizontalalignment = 'left')  # 3
    ax.text(d-g, d,      '(i,j,k)',     verticalalignment = 'center', horizontalalignment = 'right') # 4
    ax.text(d+s+g,d,    '(i+1,j,k+1)',  verticalalignment = 'center', horizontalalignment = 'left')  # 5
    ax.text(d-g, s+d,   '(i,j+1,k)',    verticalalignment = 'center', horizontalalignment = 'right') # 6
    ax.text(s+d+g, s+d, '(i+1,j+1,k)',  verticalalignment = 'center', horizontalalignment = 'left')  # 7

# ******************************************************************************************
# Yee cell (FDTD)
fig_yee = plot.figure()
ax = fig_yee.add_subplot(1,1,1, aspect = 'equal', xticks=[], yticks=[], frameon=False)

plot_cell(ax)

# H arrows              x           y           dx      dy
ax.add_patch(plt.Arrow((s+d)/2.0,   d/2.0,      0.0,    d,      width = d/2.0, edgecolor='none', facecolor = 'blue'))
ax.add_patch(plt.Arrow(d/2.0,       (s+d)/2.0,  d,      0.0,    width = d/2.0, edgecolor='none', facecolor = 'blue'))
ax.add_patch(plt.Arrow(d+s/2.0,     d+s/2.0,    -d/3.0, -d/3.0, width = d/2.0, edgecolor='none', facecolor = 'blue'))
# H text
ax.text(d/2.0,       (s+d)/2.0,     '$H_{x(i,j+1/2,k+1/2)}$', color = 'blue', verticalalignment = 'top',    horizontalalignment = 'center')
ax.text((s+d)/2.0,   d/2.0,         '$H_{y(i+1/2,j,k+1/2)}$', color = 'blue', verticalalignment = 'top',    horizontalalignment = 'center')
ax.text(d+s/2.0,   d+s/2.0,         '$H_{z(i+1/2,j+1/2,k)}$', color = 'blue', verticalalignment = 'bottom', horizontalalignment = 'center')

# E arrows              x           y           dx      dy
ax.add_patch(plt.Arrow(d+s/2.0,     d,          d,      0.0,    width = d/2.0, edgecolor='none', facecolor = 'red'))
Exemple #7
0
# frequency space where the time series show high common power. Torrence and
# Compo (1998) state that the percent point function -- PPF (inverse of the
# cumulative distribution function) of a chi-square distribution at 95%
# confidence and two degrees of freedom is Z2(95%)=3.999. However, calculating
# the PPF using chi2.ppf gives Z2(95%)=5.991. To ensure similar significance
# intervals as in Grinsted et al. (2004), one has to use confidence of 86.46%.
xwt = wavelet.xwt(t1, s1, t2, s2, significance_level=0.8646, normalize=True)

# Calculate the wavelet coherence (WTC). The WTC finds regions in time
# frequency space where the two time seris co-vary, but do not necessarily have
# high power.
wct = wavelet.wct(t1, s1, t2, s2, significance_level=0.8646, normalize=True)
# Do the plotting!
pylab.close('all')

fig = wavplot.figure(ap=dict(
    left=0.07, bottom=0.06, right=0.95, top=0.95, wspace=0.05, hspace=0.10))
ax = fig.add_subplot(2, 1, 1)
fig, ax = wavplot.cwt(t1, s1, cwt1, sig1, fig=fig, ax=ax, extend='both')
bx = fig.add_subplot(2, 1, 2, sharex=ax)
fig, bx = wavplot.cwt(t2, s2, cwt2, sig2, fig=fig, ax=bx, extend='both')
ax.set_xlim = ([t2.min(), t1.max()])
if save:
    fig.savefig('sample_ao-bmi_cwt.png')

fig = wavplot.figure(fp=dict())
ax = fig.add_subplot(1, 1, 1)
fig, ax = wavplot.xwt(*xwt, fig=fig, ax=ax, extend='both')
ax.set_xlim = ([xwt[1].min(), xwt[1].max()])
if save:
    fig.savefig('sample_ao-bmi_xwt.png')
Exemple #8
0
# frequency space where the time series show high common power. Torrence and
# Compo (1998) state that the percent point function -- PPF (inverse of the
# cumulative distribution function) of a chi-square distribution at 95%
# confidence and two degrees of freedom is Z2(95%)=3.999. However, calculating
# the PPF using chi2.ppf gives Z2(95%)=5.991. To ensure similar significance 
# intervals as in Grinsted et al. (2004), one has to use confidence of 86.46%.
xwt = wavelet.xwt(t1, s1, t2, s2, significance_level=0.8646, normalize=True)

# Calculate the wavelet coherence (WTC). The WTC finds regions in time 
# frequency space where the two time seris co-vary, but do not necessarily have
# high power.
wct = wavelet.wct(t1, s1, t2, s2, significance_level=0.8646, normalize=True)
# Do the plotting!
pylab.close('all')

fig = wavplot.figure(ap=dict(left=0.07, bottom=0.06, right=0.95, 
    top=0.95, wspace=0.05, hspace=0.10))
ax = fig.add_subplot(2, 1, 1)
fig, ax = wavplot.cwt(t1, s1, cwt1, sig1, fig=fig, ax=ax, extend='both')
bx = fig.add_subplot(2, 1, 2, sharex=ax)
fig, bx = wavplot.cwt(t2, s2, cwt2, sig2, fig=fig, ax=bx, extend='both')
ax.set_xlim = ([t2.min(), t1.max()])
if save:
    fig.savefig('sample_ao-bmi_cwt.png')

fig = wavplot.figure(fp=dict())
ax = fig.add_subplot(1, 1, 1)
fig, ax = wavplot.xwt(*xwt, fig=fig, ax=ax, extend='both')
ax.set_xlim = ([xwt[1].min(), xwt[1].max()])
if save:
    fig.savefig('sample_ao-bmi_xwt.png')
    
arrow_Vb.Print()
arrow_Ub.Print()
arrow_ImpE_Kinf1.Print()
arrow_ImpE_Kinf2.Print()
arrow_ImpE_K_thresh.Print()
arrow_ImpE_U_thresh.Print()
arrow_DeltaU.Print()

# ******************************************************************************
# Plots
# ******************************************************************************

if options.twofigures:
    figs = []
    if plot_V:
        fig1 = plot.figure()
        figs.append(fig1)
        plt.subplots_adjust(hspace=0.0)
    if plot_U:
        fig2 = plot.figure()
        figs.append(fig2)
        plt.subplots_adjust(hspace=0.0)
    figs_N = 1
    fig1_y = 1
    fig2_y = 1
else:
    fig1 = plot.figure()
    fig2 = fig1
    figs = [fig1]
    figs_N = 2
    fig1_y = 1