def plot_virtual_ase_slices(model, changes, atlas, new_figure=True, n_slices=25,
                            xlims=None,
                            fitter=None, denom_cutoff=5):

    denoms, virtualslices, fitter = get_virtual_ase_slices(
        model, changes, atlas, n_slices,
        fitter=fitter, denom_cutoff=denom_cutoff
    )
    if new_figure:
        mpl.figure()
    virtualslices = np.ma.masked_where(np.isnan(virtualslices), virtualslices)
    mpl.cm.RdBu.set_bad((0.5, 0.5, 0.5))
    mpl.cm.RdBu.set_over((0.5, 0.5, 0.5))
    mpl.cm.RdBu.set_under((0.5, 0.5, 0.5))
    xs = np.linspace(atlas.x.min(), atlas.x.max(), n_slices, endpoint=True)
    mpl.pcolormesh(xs, [0,1], virtualslices, cmap=mpl.cm.RdBu, vmin=-1, vmax=1)
    ax = mpl.gca()
    #ax.set_aspect(1)
    ax.hlines([0, 1], xs[0], xs[-1])
    ax.vlines([xs[0], xs[-1]], 0, 1)
    if xlims:
        ax.set_xlim(xlims)
    ax.set_ylim(-0.1, 1.1)
    ax.set_xticks([])
    ax.set_yticks([])
    return denoms, virtualslices
Example #2
0
def show_plot(X, y, n_neighbors=10, h=0.2):
    # Create color maps
    cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF','#FFAAAA', '#AAFFAA', '#AAAAFF','#FFAAAA', '#AAFFAA', '#AAAAFF','#AAAAFF'])
    cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000',])

    for weights in ['uniform', 'distance']:
        # we create an instance of Neighbours Classifier and fit the data.
        clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
        clf.fit(X, y)
        clf.n_neighbors = n_neighbors

        # Plot the decision boundary. For that, we will assign a color to each
        # point in the mesh [x_min, x_max]x[y_min, y_max].
        x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
        y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
        xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                             np.arange(y_min, y_max, h))
        Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

        # Put the result into a color plot
        Z = Z.reshape(xx.shape)
        plt.figure()
        plt.pcolormesh(xx, yy, Z, cmap=cmap_light)

        # Plot also the training points
        plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
        plt.xlim(xx.min(), xx.max())
        plt.ylim(yy.min(), yy.max())
        plt.title("3-Class classification (k = %i, weights = '%s')"
                  % (n_neighbors, weights))

    plt.show()
Example #3
0
def plotSpectogramF0Segments(x, fs, w, N, H, f0, segments):
    """
    Code for plotting the f0 contour on top of the spectrogram
    """
    # frequency range to plot
    maxplotfreq = 1000.0    
    fontSize = 16

    fig = plt.figure()
    ax = fig.add_subplot(111)

    mX, pX = stft.stftAnal(x, fs, w, N, H)                      #using same params as used for analysis
    mX = np.transpose(mX[:,:int(N*(maxplotfreq/fs))+1])
    
    timeStamps = np.arange(mX.shape[1])*H/float(fs)                             
    binFreqs = np.arange(mX.shape[0])*fs/float(N)
    
    plt.pcolormesh(timeStamps, binFreqs, mX)
    plt.plot(timeStamps, f0, color = 'k', linewidth=5)

    for ii in range(segments.shape[0]):
        plt.plot(timeStamps[segments[ii,0]:segments[ii,1]], f0[segments[ii,0]:segments[ii,1]], color = '#A9E2F3', linewidth=1.5)        
    
    plt.autoscale(tight=True)
    plt.ylabel('Frequency (Hz)', fontsize = fontSize)
    plt.xlabel('Time (s)', fontsize = fontSize)
    plt.legend(('f0','segments'))
    
    xLim = ax.get_xlim()
    yLim = ax.get_ylim()
    ax.set_aspect((xLim[1]-xLim[0])/(2.0*(yLim[1]-yLim[0])))    
    plt.autoscale(tight=True) 
    plt.show()
Example #4
0
def plot_2d(x, y, mean, variance, ei, slice_at, v1_name, v2_name):
    h_fig = pplt.figure(figsize=(20, 8), dpi=100)
    pplt.subplot(131)
    h_mean = pplt.pcolormesh(x, y,
                             mean.reshape(x.shape[0], y.shape[0]))
    pplt.colorbar(h_mean)
    slice_at_list = np.squeeze(np.asarray(slice_at)).tolist()
    slice_at_string = str(["%.2f" % member for member in slice_at_list])
    pplt.xlabel(r'$' + v1_name + '$')
    pplt.ylabel(r'$' + v2_name + '$')
    pplt.title(r'Mean, slice along $( ' + v1_name + ',' + v2_name + ')$ at ' +
               slice_at_string)

    pplt.subplot(132)
    h_var = pplt.pcolormesh(x, y, 2*np.sqrt(variance.reshape(x.shape[0],
                                                   y.shape[0])))
    pplt.colorbar(h_var)
    pplt.xlabel(r'$' + v1_name + '$')
    pplt.ylabel(r'$' + v2_name + '$')
    pplt.title(r'2*Stdev, slice along $( ' + v1_name + ',' + v2_name + ')$' )

    pplt.subplot(133)
    h_ei = pplt.pcolormesh(x, y, ei.reshape(x.shape[0], y.shape[0]))
    pplt.colorbar(h_var)
    pplt.xlabel(r'$' + v1_name + '$')
    pplt.ylabel(r'$' + v2_name + '$')
    pplt.title(r'EI, slice along $( ' + v1_name + ',' + v2_name + ')$')

    pplt.draw()
    return (h_fig, h_mean, h_var, h_ei)
Example #5
0
def plt_data():
    t = [[0,1], [1,0], [1, 1], [0, 0]]
    t2 = [1, 1, 1, 0]
    X = np.array(t)
    Y = np.array(t2)

    h = .02  # step size in the mesh

    logreg = linear_model.LogisticRegression(C=1e5)

    # we create an instance of Neighbours Classifier and fit the data.
    logreg.fit(X, Y)
    # Plot the decision boundary. For that, we will assign a color to each
    # point in the mesh [x_min, m_max]x[y_min, y_max].
    x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
    y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
    Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])

    # Put the result into a color plot
    Z = Z.reshape(xx.shape)
    plt.figure(1, figsize=(4, 3))
    plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)

    # Plot also the training points
    plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
    plt.xlabel('Sepal length')
    plt.ylabel('Sepal width')

    plt.xlim(xx.min(), xx.max())
    plt.ylim(yy.min(), yy.max())
    plt.xticks(())
    plt.yticks(())

    plt.show()
Example #6
0
def imshow_active_cells(grid, values, var_name=None, var_units=None,
                grid_units=(None, None), symmetric_cbar=False,
                cmap='pink'):
    """
    .. deprecated:: 0.6
    Use :meth:`imshow_active_cell_grid`, above, instead.
    """
    data = values.view()
    data.shape = (grid.shape[0]-2, grid.shape[1]-2)

    y = np.arange(data.shape[0]) - grid.dx * .5
    x = np.arange(data.shape[1]) - grid.dx * .5

    if symmetric_cbar:
        (var_min, var_max) = (data.min(), data.max())
        limit = max(abs(var_min), abs(var_max))
        limits = (-limit, limit)
    else:
        limits = (None, None)

    plt.pcolormesh(x, y, data, vmin=limits[0], vmax=limits[1], cmap=cmap)

    plt.gca().set_aspect(1.)
    plt.autoscale(tight=True)

    plt.colorbar()

    plt.xlabel('X (%s)' % grid_units[1])
    plt.ylabel('Y (%s)' % grid_units[0])

    if var_name is not None:
        plt.title('%s (%s)' % (var_name, var_units))

    plt.show()
Example #7
0
def Pcolor(xs, ys, zs, pcolor=True, contour=False, **options):
    """Makes a pseudocolor plot.
    
    xs:
    ys:
    zs:
    pcolor: boolean, whether to make a pseudocolor plot
    contour: boolean, whether to make a contour plot
    options: keyword args passed to pyplot.pcolor and/or pyplot.contour
    """
    Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)

    X, Y = np.meshgrid(xs, ys)
    Z = zs

    x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
    axes = pyplot.gca()
    axes.xaxis.set_major_formatter(x_formatter)

    if pcolor:
        pyplot.pcolormesh(X, Y, Z, **options)

    if contour:
        cs = pyplot.contour(X, Y, Z, **options)
        pyplot.clabel(cs, inline=1, fontsize=10)
Example #8
0
def fit_and_plot(cand, spd):
    data = cand.profile
    n = len(data)
    rms = np.std(data[(n/2):])
    xs = np.linspace(0.0, 1.0, n, endpoint=False)
    G = gauss._compute_data(cand)
    print "    Reduced chi-squared: %f" % (G.get_chisqr(data) / G.get_dof(n))
    print "    Baseline rms: %f" % rms
    print "    %s" % G.components[0]

    fig1 = plt.figure(figsize=(10,10))
    plt.subplots_adjust(wspace=0, hspace=0)

    # upper
    ax1 = plt.subplot2grid((3,1), (0,0), rowspan=2, colspan=1)
    ax1.plot(xs, data/rms, color="black", label="data")
    ax1.plot(xs, G.components[0].make_gaussian(n), color="red", label="best fit")

    # lower
    ax2 = plt.subplot2grid((3,1), (2,0), sharex=ax1)
    ax2.plot(xs, data/rms - G.components[0].make_gaussian(n), color="black", label="residuals")
    ax2.set_xlabel("Fraction of pulse window")

    plt.figure()
    plt.pcolormesh(xs, spd.waterfall_freq_axis(), spd.data_zerodm_dedisp, cmap=Greys)
    plt.xlabel("Fraction of pulse window")
    plt.ylabel("Frequency (MHz)")
    plt.xlim(0, 1)
    plt.ylim(spd.min_freq, spd.max_freq)

    plt.show()
Example #9
0
def prettyPicture(clf, X_test, y_test):
    x_min = 0.0;
    x_max = 1.0
    y_min = 0.0;
    y_max = 1.0

    # Plot the decision boundary. For that, we will assign a color to each
    # point in the mesh [x_min, m_max]x[y_min, y_max].
    h = .01  # step size in the mesh
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
    Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

    # Put the result into a color plot
    Z = Z.reshape(xx.shape)
    plt.xlim(xx.min(), xx.max())
    plt.ylim(yy.min(), yy.max())

    plt.pcolormesh(xx, yy, Z)

    # Plot also the test points
    grade_sig = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii] == 0]
    bumpy_sig = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii] == 0]
    grade_bkg = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii] == 1]
    bumpy_bkg = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii] == 1]

    plt.scatter(grade_sig, bumpy_sig, color="b", label="fast")
    plt.scatter(grade_bkg, bumpy_bkg, color="r", label="slow")
    plt.legend()
    plt.xlabel("bumpiness")
    plt.ylabel("grade")

    plt.savefig("test.png")
def plotmapdBtime():
    pcolormesh(yoko, time*1e6, dB(S11c), vmin=-65, vmax=-30)
    title("Reflection (dB) vs flux \n and time (1 us pulse) at 4.46 GHz")
    xlabel("Flux (V)")
    ylabel("Time (us)")
    #ylim(0, 1.5)
    colorbar()
Example #11
0
    def test_unimodality_of_GEV(self):
        x0 = 1500

        mu = 1000
        data = np.array([x0])

        ksi = np.arange(-2, 2, 0.01)
        sigma = np.arange(10, 8000, 10)

        n_ksi = len(ksi)
        n_sigma = len(sigma)

        z = np.zeros((n_ksi, n_sigma))

        for i, the_ksi in enumerate(ksi):
            for j, the_sigma in enumerate(sigma):
                z[i, j] = gevfit.objective_function_stationary_high([the_sigma, mu, the_ksi], data)


        sigma, ksi = np.meshgrid(sigma, ksi)
        z = np.ma.masked_where(z == gevfit.BIG_NUM, z)
        z = np.ma.masked_where(z > 9, z)

        plt.figure()
        plt.pcolormesh(ksi, sigma, z)
        plt.colorbar()
        plt.xlabel('$\\xi$')
        plt.ylabel('$\\sigma$')
        plt.title('$\\mu = %.1f, x = %.1f$' % (mu, x0))

        plt.show()


        pass
def train_plot(features, labels):
    y0, y1 = features[:, 2].min() * .9, features[:, 2].max() * 1.1
    x0, x1 = features[:, 0].min() * .9, features[:, 0].max() * 1.1
    X = np.linspace(x0, x1, 100)
    Y = np.linspace(y0, y1, 100)
    X, Y = np.meshgrid(X, Y)

    model = fit_model(1, features[:, (0, 2)], np.array(labels))
    C = predict(
        np.vstack([X.ravel(), Y.ravel()]).T, model).reshape(X.shape)
    if COLOUR_FIGURE:
        cmap = ListedColormap([(1., .6, .6), (.6, 1., .6), (.6, .6, 1.)])
    else:
        cmap = ListedColormap([(1., 1., 1.), (.2, .2, .2), (.6, .6, .6)])
    plt.xlim(x0, x1)
    plt.ylim(y0, y1)
    plt.xlabel(feature_names[0])
    plt.ylabel(feature_names[2])
    plt.pcolormesh(X, Y, C, cmap=cmap)
    if COLOUR_FIGURE:
        cmap = ListedColormap([(1., .0, .0), (.0, 1., .0), (.0, .0, 1.)])
        plt.scatter(features[:, 0], features[:, 2], c=labels, cmap=cmap)
    else:
        for lab, ma in zip(range(3), "Do^"):
            plt.plot(features[labels == lab, 0], features[
                     labels == lab, 2], ma, c=(1., 1., 1.))
def plotmaptime():
    pcolormesh(yoko, time*1e6, absolute(Magcom))
    title("Reflection vs flux \n and time (1 us pulse) at 4.46 GHz")
    xlabel("Flux (V)")
    ylabel("Time (us)")
    #ylim(0, 1.5)
    colorbar()
Example #14
0
def write_potential(N=2.5, pphw=20, amplitude=1.0, sigmax=1e-1, sigmay=1e-1,
                    L=100., W=1.0, x_R0=0.05, y_R0=0.4, loop_type='Bell',
                    init_phase=0.0, shape='RAP', plot=True,
                    plot_dimensions=False, direction='right',
                    boundary_only=False, with_boundary=False, boundary_phase=0.0,
                    theta=0.0, smearing=False, verbose=True, linearized=False):

    p = Potential(N=N, pphw=pphw, amplitude=amplitude, sigmax=sigmax,
                  sigmay=sigmay, x_R0=x_R0, y_R0=y_R0, init_phase=init_phase,
                  shape=shape, L=L, W=W, loop_type=loop_type,
                  direction=direction, boundary_only=boundary_only,
                  with_boundary=with_boundary, theta=theta,
                  verbose=verbose, linearized=linearized)

    if not boundary_only:
        imag, imag_vector = p.imag, p.imag_vector
        real, real_vector = p.real, p.real_vector
    X, Y = p.X, p.Y

    if not boundary_only:
        if plot:
            import matplotlib.pyplot as plt
            if plot_dimensions:
                plt.figure(figsize=(L, W))
            plt.pcolormesh(X, Y, imag, cmap='RdBu_r')
            plt.savefig("imag.png")
            plt.pcolormesh(X, Y, real, cmap='RdBu_r')
            plt.savefig("real.png")

        np.savetxt("potential_imag.dat", zip(xrange(len(imag_vector)), imag_vector),
                   fmt=["%i", "%.12f"])
        np.savetxt("potential_real.dat", zip(xrange(len(real_vector)), real_vector),
                   fmt=["%i", "%.12f"])
        if shape != 'science':
            np.savez("potential_imag_xy.npz", X=X, Y=Y, P=imag_vector,
                     X_nodes=p.xnodes, Y_nodes=p.ynodes,
                     sigmax=sigmax, sigmay=sigmay)

    if shape == 'RAP':
        xi_lower, xi_upper = p.WG.get_boundary(theta=theta, smearing=smearing,
                                               boundary_phase=boundary_phase)
        # set last element to 0 (xi_lower) or W (xi_upper)
        print "WARNING: end of boundary not set zero!"
        # xi_lower[-1] = 0.0
        # xi_upper[-1] = W
        np.savetxt("upper.boundary", zip(xrange(p.nx), xi_upper))
        np.savetxt("lower.boundary", zip(xrange(p.nx), xi_lower))
        eps, delta = p.WG.get_cycle_parameters()
        np.savetxt("boundary.eps_delta", zip(eps, delta))
    if shape == 'RAP_TQD':
        eps_prime, delta_prime, theta_prime = p.WG.get_quantum_driving_parameters()
        xi_lower, xi_upper = p.WG.get_boundary(eps=eps_prime, delta=delta_prime,
                                               theta=theta_prime,
                                               smearing=smearing)
        # set last element to 0 (xi_lower) or W (xi_upper)
        xi_lower[-1] = 0.0
        xi_upper[-1] = W
        np.savetxt("upper.boundary", zip(xrange(p.nx), xi_upper))
        np.savetxt("lower.boundary", zip(xrange(p.nx), xi_lower))
        np.savetxt("boundary.eps_delta_theta", zip(eps_prime, delta_prime, theta_prime))
    def visualize(self, output_file, width=2, show_charts=False):
        X = self.X

        # Create a grid of points
        x_min, x_max = min(X[:, 0] - width), max(X[:, 0] + width)
        y_min, y_max = min(X[:, 1] - width), max(X[:, 1] + width)
        xx,yy = np.meshgrid(np.arange(x_min, x_max, .05), np.arange(y_min,
            y_max, .05))

        # Flatten the grid so the values match spec for self.predict
        xx_flat = xx.flatten()
        yy_flat = yy.flatten()
        X_topredict = np.vstack((xx_flat,yy_flat)).T

        # Get the class predictions
        Y_hat = self.predict(X_topredict)
        Y_hat = Y_hat.reshape((xx.shape[0], xx.shape[1]))
        
        cMap = c.ListedColormap(['r','b','g'])

        # Visualize them.
        plt.figure()
        plt.pcolormesh(xx,yy,Y_hat, cmap=cMap)
        plt.scatter(X[:, 0], X[:, 1], c=self.C, cmap=cMap)
        plt.savefig(output_file)
        if show_charts:
            plt.show()
Example #16
0
def plot_2d_rabi_vs_powers(RABI_FOLDER):
    '''

    Args:
        RABI_FOLDER: folder with the rabi data to use

    Returns:
        rabi_data: rabi contrast APD output 2 / APD output 1 versus tau
        taus: tau values in the rabi sweep
        powers: power values used in the 2D sweep, in dBm

    '''

    rabi_data = []
    taus = []
    powers = []
    for f in sorted(glob.glob('{:s}/data_subscripts/*'.format(RABI_FOLDER))):
        data = Script.load_data(f)
        if 'tau' not in data or data is None: # not a Rabi folder (e.g., find_nv instead)
            continue
        cnts = np.transpose(data['counts'])
        cnts1 = cnts[1]
        cnts0 = cnts[0]
        single_rabi_data = cnts1/cnts0
        rabi_data.append(single_rabi_data)
        taus = data['tau']
        power = float(f.split('_')[-1])
        powers.append(power)
    rabi_data = np.array(rabi_data)[np.argsort(powers)]
    powers = np.sort(np.array(powers))
    plt.pcolormesh(taus, powers, rabi_data)
    plt.xlabel('tau values (ns)')
    plt.ylabel('input power (dBm)')

    return rabi_data, taus, powers
def plot_eta(pT_lower_cut):

	properties_reco = [parse_file("/home/aashish/pythia_reco.dat", pT_lower_cut=pT_lower_cut), parse_file("/home/aashish/herwig_reco.dat", pT_lower_cut=pT_lower_cut), parse_file("/home/aashish/sherpa_reco.dat", pT_lower_cut=pT_lower_cut)]
	properties_truth = [parse_file("/home/aashish/pythia_truth.dat", pT_lower_cut=pT_lower_cut), parse_file("/home/aashish/herwig_truth.dat", pT_lower_cut=pT_lower_cut), parse_file("/home/aashish/sherpa_truth.dat", pT_lower_cut=pT_lower_cut)]
	labels = ["pythia", "herwig", "sherpa"]

	for prop_reco, prop_truth, label in zip(properties_reco, properties_truth, labels):
	
		x = prop_truth['hardest_eta']
		y = prop_reco['hardest_eta']

		H, xedges, yedges = np.histogram2d(x, y, bins=200, normed=1 )

		H = np.rot90(H)
		H = np.flipud(H)

		Hmasked = np.ma.masked_where(H == 0, H) # Mask pixels with a value of zero

		plt.pcolormesh(xedges,yedges, Hmasked)

		cbar = plt.colorbar()
		cbar.ax.set_ylabel('Counts')

		plt.xlim(0, 3)
		plt.ylim(0, 3)

		plt.xlabel('Truth $\eta$', fontsize=50, labelpad=75)
		plt.ylabel('Reco $\eta$', fontsize=50, labelpad=75)

		plt.gcf().set_size_inches(30, 30, forward=1)
		plt.gcf().set_snap(True)

		plt.savefig("plots/With MC/2D/eta_" + label + ".pdf")

		plt.clf()
Example #18
0
def plot_polcomp_dynspec(tims, frqs, jones):
    """Plot dynamic power spectra of each polarization component."""
    #fig, (ax0, ax1) = plt.subplots(nrows=2)
    p_ch = numpy.abs(jones[:,:,0,0].squeeze())**2+numpy.abs(jones[:,:,0,1].squeeze())**2
    q_ch = numpy.abs(jones[:,:,1,1].squeeze())**2+numpy.abs(jones[:,:,1,0].squeeze())**2
    ftims=matplotlib.dates.date2num(tims)
    dynspecunit = 'flux arb.'
    # In dB
    dBunit = False
    if dBunit:
        p_ch = 10*numpy.log10(p_ch)
        q_ch = 10*numpy.log10(q_ch)
        dynspecunit += ' dB'
    dynspecunit += ' unit'
    plt.figure()
    plt.subplot(211)
    plt.pcolormesh(numpy.asarray(tims), frqs, p_ch)
    plt.title('p-channel')
    #plt.clim(0, 1.0)
    plt.colorbar().set_label(dynspecunit)
    plt.subplot(212)
    plt.pcolormesh(numpy.asarray(tims), frqs, q_ch)
    plt.title('q-channel')
    #plt.clim(0, 1.0)
    plt.xlabel('Time')
    plt.ylabel('Frequency')
    plt.colorbar().set_label(dynspecunit)
    plt.show()
Example #19
0
def demo():
    from time import time
    import matplotlib.pyplot as plt

    #create qx and qy evenly spaces
    qx = np.linspace(-.02, .02, 128)
    qy = np.linspace(-.02, .02, 128)
    qx, qy = np.meshgrid(qx, qy)

    #saved shape of qx
    r_shape = qx.shape
    #reshape for calculation; resize as float32
    qx = qx.flatten()
    qy = qy.flatten()

    #int main
    pars = EllipsoidParameters(.027, 60, 180, .297e-6, 5.773e-06, 4.9, 0, 90)

    t = time()
    result = GpuEllipse(qx, qy)
    result.x = result.ellipsoid_fit(qx, qy, pars, b_n=35, t_n=35, a_n=1, p_n=1, sigma=3, b_w=.1, t_w=.1, a_w=.1, p_w=.1)
    result.x = np.reshape(result.x, r_shape)
    tt = time()
    print("Time taken: %f" % (tt - t))

    plt.pcolormesh(result.x)
    plt.show()
Example #20
0
def test_complete():
    fig = plt.figure('Figure with a label?', figsize=(10, 6))

    plt.suptitle('Can you fit any more in a figure?')

    # make some arbitrary data
    x, y = np.arange(8), np.arange(10)
    data = u = v = np.linspace(0, 10, 80).reshape(10, 8)
    v = np.sin(v * -0.6)

    plt.subplot(3, 3, 1)
    plt.plot(list(xrange(10)))

    plt.subplot(3, 3, 2)
    plt.contourf(data, hatches=['//', 'ooo'])
    plt.colorbar()

    plt.subplot(3, 3, 3)
    plt.pcolormesh(data)

    plt.subplot(3, 3, 4)
    plt.imshow(data)

    plt.subplot(3, 3, 5)
    plt.pcolor(data)

    plt.subplot(3, 3, 6)
    plt.streamplot(x, y, u, v)

    plt.subplot(3, 3, 7)
    plt.quiver(x, y, u, v)

    plt.subplot(3, 3, 8)
    plt.scatter(x, x**2, label='$x^2$')
    plt.legend(loc='upper left')

    plt.subplot(3, 3, 9)
    plt.errorbar(x, x * -0.5, xerr=0.2, yerr=0.4)

    ###### plotting is done, now test its pickle-ability #########

    # Uncomment to debug any unpicklable objects. This is slow (~200 seconds).
#    recursive_pickle(fig)

    result_fh = BytesIO()
    pickle.dump(fig, result_fh, pickle.HIGHEST_PROTOCOL)

    plt.close('all')

    # make doubly sure that there are no figures left
    assert_equal(plt._pylab_helpers.Gcf.figs, {})

    # wind back the fh and load in the figure
    result_fh.seek(0)
    fig = pickle.load(result_fh)

    # make sure there is now a figure manager
    assert_not_equal(plt._pylab_helpers.Gcf.figs, {})

    assert_equal(fig.get_label(), 'Figure with a label?')
def plot_q_qhat(q, t):
    # Plot Potential Vorticity
    plt.clf()
    plt.subplot(2,1,1)
    plt.pcolormesh(xx/1e3,yy/1e3,q)
    plt.colorbar()
    plt.axes([-Lx/2e3, Lx/2e3, -Ly/2e3, Ly/2e3])
    name = "PV at t = %5.2f" % (t/(3600.0*24.0))
    plt.title(name)

    # compute power spectrum and shift ffts
    qe = np.vstack((q0,-np.flipud(q)))
    qhat = np.absolute(fftn(qe))
    kx = fftshift((parms.ikx/parms.ikx[0,1]).real)
    ky = fftshift((parms.iky/parms.iky[1,0]).real)
    qhat = fftshift(qhat)

    Sx, Sy = int(parms.Nx/2), parms.Ny
    Sk = 1.5

    # Plot power spectrum
    plt.subplot(2,1,2)
    #plt.pcolor(kx[Sy:Sy+20,Sx:Sx+20],ky[Sy:Sy+20,Sx:Sx+20],qhat[Sy:Sy+20,Sx:Sx+20])
    plt.pcolor(kx[Sy:int(Sk*Sy),Sx:int(Sk*Sx)],ky[Sy:int(Sk*Sy),Sx:int(Sk*Sx)],
               qhat[Sy:int(Sk*Sy),Sx:int(Sk*Sx)])
    plt.axis([0, 10, 0, 10])
    plt.colorbar()
    name = "PS at t = %5.2f" % (t/(3600.0*24.0))
    plt.title(name)

    plt.draw()
Example #22
0
    def test_tri_polar(self):
        # load data
        cubes = iris.load(tests.get_data_path(['NetCDF', 'ORCA2', 'votemper.nc']))
        cube = cubes[0]
        # The netCDF file has different data types for the points and
        # bounds of 'depth'. This wasn't previously supported, so we
        # emulate that old behaviour.
        cube.coord('depth').bounds = cube.coord('depth').bounds.astype(np.float32)

        # define a latitude trajectory (put coords in a different order to the cube, just to be awkward)
        latitudes = range(-90, 90, 2)
        longitudes = [-90]*len(latitudes)
        sample_points = [('longitude', longitudes), ('latitude', latitudes)]

        # extract
        sampled_cube = iris.analysis.trajectory.interpolate(cube, sample_points)
        self.assertCML(sampled_cube, ('trajectory', 'tri_polar_latitude_slice.cml'))

        # turn it upside down for the visualisation
        plot_cube = sampled_cube[0]
        plot_cube = plot_cube[::-1, :]

        plt.clf()
        plt.pcolormesh(plot_cube.data, vmin=cube.data.min(), vmax=cube.data.max())
        plt.colorbar()
        self.check_graphic()

        # Try to request linear interpolation.
        # Not allowed, as we have multi-dimensional coords.
        self.assertRaises(iris.exceptions.CoordinateMultiDimError, iris.analysis.trajectory.interpolate, cube, sample_points, method="linear")

        # Try to request unknown interpolation.
        self.assertRaises(ValueError, iris.analysis.trajectory.interpolate, cube, sample_points, method="linekar")
Example #23
0
def plot_spectrogram(raw_data, nfft, fs, channel_bottom, print_frequency_graph):
    data_shape = raw_data.shape

    print("Generating spectrogram...")
    plt_num = 1
    plt.clf()
    plt.figure(1)

    channel_data = []
    for i in range(0, data_shape[1]):
        plt.subplot(8, 2, plt_num)

        f, t, Sxx = signal.spectrogram(x=raw_data[:, i], nfft=nfft, fs=fs, noverlap=127, nperseg=128,
                                       scaling='density')  # returns PSD power per Hz
        plt.pcolormesh(t, f, Sxx)

        plt.xlabel('Time (sec)')
        plt.ylabel('Frequency (Hz)')
        plt.title('Channel %s' % (i + channel_bottom))
        plt_num += 1
        channel_data.append([f, t, Sxx])
        print("\tChannel %d spectrogram generated" % i)
    if print_frequency_graph:
        plt.show()
    return channel_data
Example #24
0
def Picture(clf, X_test, y_test):
    x_min = 200.0
    x_max = 1000.0
    y_min = 600.0
    y_max = 2500.0

    # Plot the decision boundary. For that, we will assign a color to each
    # point in the mesh [x_min, m_max]x[y_min, y_max].
    h = 1  # step size in the mesh
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
    Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

    # Put the result into a color plot
    Z = Z.reshape(xx.shape)
    plt.xlim(xx.min(), xx.max())
    plt.ylim(yy.min(), yy.max())

    plt.pcolormesh(xx, yy, Z, cmap=pl.cm.seismic)

    # Plot also the test points
    x1 = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii] == 0]
    y1 = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii] == 0]
    x2 = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii] == 1]
    y2 = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii] == 1]
    x3 = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii] == 2]
    y3 = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii] == 2]

    plt.scatter(x1, y1, color="b", label="class1")
    plt.scatter(x2, y2, color="r", label="class2")
    plt.scatter(x3, y3, color="g", label="class3")
    plt.legend()
    plt.xlabel("x")
    plt.ylabel("y")

    plt.savefig("testrf.png")
def train_Quasi_linear_SVM():
	from sklearn import svm
	clf = svm.SVC(kernel=get_KernelMatrix)
	X_train = np.r_[X1,X2]
	Y_train = np.r_[Y1,Y2]
	scatter(X[:,0],X[:,1],c='g')
	clf.fit(X_train, Y_train)

	y_pred = clf.predict(X_test)

	#scatter(X_test, y_pred)

	clf = svm.SVC(kernel=get_KernelMatrix)
	clf.fit(X, y)
	clf.predict(X_test1)

	# Plot the decision boundary. For that, we will assign a color to each
	# point in the mesh [x_min, m_max]x[y_min, y_max].
	h = 0.05
	x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
	y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
	xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
	Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

	# Put the result into a color plot
	Z = Z.reshape(xx.shape)
	plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)

	# Plot also the training points
	plt.scatter(X_train[:, 0], X_train[:, 1], c=Y_train)
	plt.title('2-Class classification using Support Vector Machine with quasi-linear kernel')
	plt.axis('tight')
	plt.legend([Y_train[0], Y_train[-1]], ['negtive sample', 'postive sample'])
	plt.show()
Example #26
0
def plot_basins(f, Df, roots, xmin, xmax, ymin, ymax, numpoints=100, iters=15, colormap='brg'):
    '''Plot the basins of attraction of f.
    
    INPUTS:
    f       - A function handle. Should represent a function 
            from C to C.
    Df      - A function handle. Should be the derivative of f.
    roots   - An array of the zeros of f.
    xmin, xmax, ymin, ymax - Scalars that define the domain 
            for the plot.
    numpoints - A scalar that determines the resolution of 
            the plot. Defaults to 100.
    iters   - Number of times to iterate Newton's method. 
            Defaults to 15.
    colormap - A colormap to use in the plot. Defaults to 'brg'.    
    '''
    xreal = np.linspace(xmin, xmax, numpoints)
    ximag = np.linspace(ymin, ymax, numpoints)
    Xreal, Ximag = np.meshgrid(xreal, ximag)
    xold = Xreal+1j*Ximag
    n = 0
    while n <= iters:
        xnew = xold - f(xold)/Df(xold)
        xold = xnew
        n += 1 

    converged_to = np.empty_like(xnew)
    for i in xrange(xnew.shape[0]):
        for j in xrange(xnew.shape[1]):
            root = np.abs(roots-xnew[i,j]).argmin()
            converged_to[i,j] = root

    plt.pcolormesh(Xreal, Ximag, converged_to, cmap=colormap)
Example #27
0
def plotter(filename, xmin=0, xmax=300, Nx=2000,
            ymin=0, ymax=1, Ny=2000, sigma_x=3, sigma_y=0.01):
    root = '/home/cyneo/Work/Scans/Processed Data/Extracted CSV/'
    file1 = os.path.abspath(root + filename + '.csv')
    nx = linspace(xmin, xmax, Nx)
    ny = linspace(ymin, ymax, Ny)
    x, y = meshgrid(nx, ny)
    mastermesh = []
    with open(file1, 'r', encoding='utf8') as filein:
        file_reader = csv.reader(filein)
        next(file_reader)

        for word, frequency, inhubness, outhubness in file_reader:
            # want to feed the values into the center points
            if mastermesh == []:
                mastermesh = dgaussian(x, y, float(frequency),
                                       float(outhubness), sigma_x, sigma_y)
            else:
                mastermesh += dgaussian(x, y, float(frequency),
                                        float(outhubness), sigma_x, sigma_y)

    for x in range(len(mastermesh)):
        for y in range(len(mastermesh[x])):
            mastermesh[x, y] = np.log(mastermesh[x, y]+1)

    x, y = meshgrid(nx, ny)
    plt.pcolormesh(x, y, mastermesh)
    plt.show()
    outfile = os.path.abspath(root + filename + ' Array')
    np.save(outfile, mastermesh)
Example #28
0
def test_pcolormesh_global_with_wrap3():
    nx, ny = 33, 17
    xbnds = np.linspace(-1.875, 358.125, nx, endpoint=True)
    ybnds = np.linspace(91.25, -91.25, ny, endpoint=True)
    xbnds, ybnds = np.meshgrid(xbnds, ybnds)

    data = np.exp(np.sin(np.deg2rad(xbnds)) + np.cos(np.deg2rad(ybnds)))

    # this step is not necessary, but makes the plot even harder to do (i.e.
    # it really puts cartopy through its paces)
    ybnds = np.append(ybnds, ybnds[:, 1:2], axis=1)
    xbnds = np.append(xbnds, xbnds[:, 1:2] + 360, axis=1)
    data = np.ma.concatenate([data, data[:, 0:1]], axis=1)

    data = data[:-1, :-1]
    data = np.ma.masked_greater(data, 2.6)

    ax = plt.subplot(211, projection=ccrs.PlateCarree(-45))
    c = plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
    assert c._wrapped_collection_fix is not None, \
        'No pcolormesh wrapping was done when it should have been.'

    ax.coastlines()
    ax.set_global()  # make sure everything is visible

    ax = plt.subplot(212, projection=ccrs.PlateCarree(-1.87499952))
    plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
    ax.coastlines()
    ax.set_global()  # make sure everything is visible
Example #29
0
def pcolorRandom():
    "Makes a pcolormesh plot of randomly generated data pts."
    # make up some randomly distributed data
    npts = 100
    x = uniform(-3, 3, npts)
    y = uniform(-3, 3, npts)
    z = x * N.exp(-x ** 2 - y ** 2)

    # define grid.
    xi = N.arange(-3.1, 3.1, 0.05)
    yi = N.arange(-3.1, 3.1, 0.05)

    # grid the data.
    zi = griddata(x, y, z, xi, yi)

    # contour the gridded data, plotting dots at the randomly spaced data points.
    plt.pcolormesh(xi, yi, zi)	
    #CS = plt.contour(xi,yi,zi,15,linewidths=0.5,colors='k')
    #CS = plt.contourf(xi,yi,zi,15,cmap=plt.cm.jet)
    plt.colorbar() # draw colorbar

    # plot data points.
    plt.scatter(x, y, marker='o', c='b', s=5)
    plt.xlim(-3, 3)
    plt.ylim(-3, 3)
    plt.title('griddata test (%d points)' % npts)
    plt.show()
Example #30
0
def iris_test(): # example code for LogisticRegression
    iris = datasets.load_iris()
    X = iris.data[:,:2] # first two features
    Y = iris.target
    h=.02 # step size in the mesh
    logreg = linear_model.LogisticRegression()
    logreg.fit(X,Y)

    x_min, x_max = X[:,0].min() - .5, X[:,0].max() + .5
    y_min, y_max = X[:,1].min() - .5, X[:,1].max() + .5
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
    Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])

    Z = Z.reshape(xx.shape)
    plt.figure(1, figsize=(4,3))
    plt.pcolormesh(xx,yy,Z,cmap=plt.cm.Paired)

    plt.scatter(X[:,0],X[:,1],c=Y,edgecolors='k',cmap=plt.cm.Paired)
    plt.xlabel('Sepal length')
    plt.ylabel('Sepal width')
    plt.xlim(xx.min(), xx.max())
    plt.ylim(yy.min(), yy.max())
    plt.xticks(())
    plt.yticks(())
    plt.show()
    x = np.array([5., 8.])
    iteration = 0
    verbose = 1
    grad = egrad(f)
    H_f = jacobian(grad)
    color = 'black'

    if n == 2:  # if dimension = 2 we could plot function
        x_line = np.arange(-10, 10, 0.05)
        y_line = np.arange(-10, 10, 0.05)
        x_grid, y_grid = np.meshgrid(x_line, y_line, sparse=True)
        function_values = f([x_grid, y_grid])

        plt.pcolormesh(x_line,
                       y_line,
                       function_values,
                       cmap=cm.get_cmap('inferno_r'),
                       alpha=0.8)

    while not stop_condition(grad(x), epsilon) and iteration < 4:
        iteration += 1

        if verbose == 1:
            print('-' * 40)
            print(f'Iteration: {iteration}')

        for i in range(n):
            print(f'Fixing coordinates exept: {i}')

            e = np.zeros(n)
            e[i] = 1
Example #32
0
    """
    Do measurements by invoking test_cores() for every desired measurement (TLB type and level).
    Plot the result using matplotlib.
    """
    numpy.set_printoptions(threshold=numpy.nan, linewidth=numpy.nan, precision=1)
    l2size=128

#    test_cores('-D',3,l2size,'dtlb_load_misses.miss_causes_a_walk', cpu_uarch + '-stlb-sets.pdf', 2)
    l1dtlb = test_cores('-D',3,16,'dtlb_load_misses.stlb_hit', cpu_uarch + '-dtlb-sets.pdf', 1)
    l1itlb = test_cores('-C',3,16,'itlb_misses.stlb_hit', cpu_uarch + '-itlb-sets.pdf', 1)

    f, axarr = plt.subplots(nrows=1, ncols=2)
    f.tight_layout()
    matplotlib.rc('font', size=5)
    plt.subplot(1,2,1, adjustable='box', aspect=0.8)
    pcm = plt.pcolormesh(l1dtlb)
    plt.xticks([0,4,8,12,16])
    plt.yticks([0,4,8,12,16])
    plt.gca().invert_yaxis()
    plt.xlabel('TLB set')
    plt.ylabel('TLB set')
    plt.title('L1 dtlb')

    plt.subplot(1,2,2, adjustable='box', aspect=0.8)
    pcm = plt.pcolormesh(l1itlb)
    plt.gca().invert_yaxis()
    plt.xticks([0,4,8])
    plt.yticks([0,4,8])
    plt.xlabel('TLB set')
    plt.ylabel('TLB set')
    plt.title('L1 itlb')
    f = open(out_filename, 'wb')
    pickle.dump([
        results, mult_inf_range, ntemp_range, total_analysis_rmse,
        total_forecast_rmse, total_analysis_sprd, total_forecast_sprd
    ], f)
    f.close()

if PlotTheExperiment:

    f = open(out_filename, 'rb')
    [
        results, mult_inf_range, ntemp_range, total_analysis_rmse,
        total_forecast_rmse, total_analysis_sprd, total_forecast_sprd
    ] = pickle.load(f)
    f.close()

    import matplotlib.pyplot as plt

    plt.pcolormesh(ntemp_range, mult_inf_range, total_analysis_rmse)
    plt.colorbar()
    plt.title('Analysis Rmse')
    plt.xlabel('Tempering Iterantions')
    plt.ylabel('Multiplicative Inflation')
    plt.show()

    plt.plot(total_analysis_sprd[:, 0], total_analysis_rmse[:, 0])
    plt.plot(total_analysis_sprd[:, 1], total_analysis_rmse[:, 1])
    plt.plot(total_analysis_sprd[:, -1], total_analysis_rmse[:, -1])

    plt.show()
Example #34
0
    a2 = nonzero(h0[a1] > 4000)
    b2 = nonzero(pType[a1][a2] == 2)
    c2 = nonzero(clutF[a1][a2][b2] > 165)
    zKum = ma.array(zKu, mask=zKu < 0)
    zKam = ma.array(zKa, mask=zKa < 0)
    for j1, j2 in zip(a1[0][a2][b2][c2], a1[1][a2][b2][c2]):
        if hIce[j1, j2] > 0:
            zKuL.append(zKum[j1, j2 + 22, :])
    n0 += len(c2[0])
    print(n0)
    print(h0.mean())
    if h0.mean() > 4000 and cmax > 5:
        plt.figure()
        plt.subplot(211)
        plt.pcolormesh(zKum[:, 24, ::-1].T, cmap='jet', vmax=45)
        plt.xlim(i1, i2)
        plt.ylim(0, 120)
        plt.subplot(212)
        plt.pcolormesh(zKam[:, 12, ::-1].T, cmap='jet', vmax=35)  #
        plt.xlim(i1, i2)
        plt.ylim(0, 120)
        plt.savefig('crossSect%2.2i.png' % ifig)  # it also saves some plots
        plt.close('all')
    ifig += 1
    print(fname)
    plt.show()

zKuL = array(zKuL)
zKuL[zKuL < 0] = 0
import xarray as xr
Example #35
0
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])

for shrinkage in [None, .2]:
    # we create an instance of Neighbours Classifier and fit the data.
    clf = NearestCentroid(shrink_threshold=shrinkage)
    clf.fit(X, y)
    y_pred = clf.predict(X)
    print(shrinkage, np.mean(y == y_pred))
    # Plot the decision boundary. For that, we will assign a color to each
    # point in the mesh [x_min, x_max]x[y_min, y_max].
    x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                         np.arange(y_min, y_max, h))
    Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

    # Put the result into a color plot
    Z = Z.reshape(xx.shape)
    plt.figure()
    plt.pcolormesh(xx, yy, Z, cmap=cmap_light)

    # Plot also the training points
    plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
    plt.title("3-Class classification (shrink_threshold=%r)" % shrinkage)
    plt.axis('tight')

plt.show()
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rc('font', family='serif')

R1, theta1, phi1, Q1 = parse_QSL_Rbinfile(filename1)
Q_grid1 = np.sign(Q1) * np.log(abs(Q1))
Q_grid1[np.isinf(Q_grid1)] = np.nan
R2, theta2, phi2, Q2 = parse_QSL_Rbinfile(filename2)
Q_grid2 = np.sign(Q2) * np.log(abs(Q2))
Q_grid2[np.isinf(Q_grid2)] = np.nan

plt.figure(figsize=(10, 10))
plt.subplot(2, 1, 1)
plt.pcolormesh(phi1 * 180.0 / np.pi,
               theta1 * 180.0 / np.pi,
               Q_grid1,
               cmap='RdBu_r',
               vmin=-10,
               vmax=10)
plt.title(r"$R=" + "{:.2f}".format(R1) + r"$ Mm", fontsize=20)
plt.xlabel(r'$\phi$ [$^{\circ}$]', fontsize=20)
plt.ylabel(r'$\theta$ [$^{\circ}$]', fontsize=20)
plt.tick_params(axis='both',
                which='major',
                labelsize=18,
                direction="in",
                bottom=True,
                top=True,
                left=True,
                right=True)

plt.subplot(2, 1, 2)
Example #37
0
zKuL=array(zKuL)
zXL=array(zXL)
zKaL=array(zKaL)
dL=array(dL)
import pickle
pickle.dump([zKuL,zKaL,zXL,dL],open('iphex0612.pklz','wb'))
hint=h1
x1=100
x2=400
x1=230
x2=250
for i in range(8):
    plt.figure()
    x1=100+i*60
    x2=100+i*60+60
    plt.subplot(311)
    plt.pcolormesh(dL,hint,zKuL.T,vmin=0,vmax=50,cmap="jet")
    plt.xlim(x1,x2)
    plt.colorbar()
    plt.ylim(0,10)
    plt.subplot(312)
    plt.pcolormesh(dL,hint,(zKaL).T,vmin=0,vmax=40,cmap="jet")
    plt.ylim(0,10)
    plt.xlim(x1,x2)
    plt.colorbar()
    plt.subplot(313)
    plt.pcolormesh(dL,hint,zXL.T,vmin=0,vmax=50,cmap="jet")
    plt.ylim(0,10)
    plt.xlim(x1,x2)
    plt.colorbar()
Example #38
0
# draw visualization of parameter effects

plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
    # evaluate decision function in a grid
    Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)

    # visualize decision function for these parameters
    plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
    plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
              size='medium')

    # visualize parameter's effect on decision function
    plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
    plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
    plt.xticks(())
    plt.yticks(())
    plt.axis('tight')

# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))

# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
Example #39
0
def create(path: str,
           name: str,
           header: dict,
           traces: List[Trace],
           old_style: bool = False) -> Figure:
    rcParams.update({'font.size': 9})
    log.info('Making PID plot...')
    fig = plt.figure('Response plot: Log number: ' + header['logNum'] +
                     '          ' + path,
                     figsize=(16, 8))
    # gridspec devides window into 24 horizontal, 3*10 vertical fields
    gs1 = GridSpec(24,
                   3 * 10,
                   wspace=0.6,
                   hspace=0.7,
                   left=0.04,
                   right=1.,
                   bottom=0.05,
                   top=0.97)

    for i, trace in enumerate(traces):
        ax0 = plt.subplot(gs1[0:6, i * 10:i * 10 + 9])
        plt.title(trace.name)
        plt.plot(trace.time, trace.gyro, label=trace.name + ' gyro')
        plt.plot(trace.time, trace.input, label=trace.name + ' loop input')
        plt.ylabel('degrees/second')
        ax0.get_yaxis().set_label_coords(-0.1, 0.5)
        plt.grid()
        tracelim = np.max([np.abs(trace.gyro), np.abs(trace.input)])
        plt.ylim([-tracelim * 1.1, tracelim * 1.1])
        plt.legend(loc=1)
        plt.setp(ax0.get_xticklabels(), visible=False)

        ax1 = plt.subplot(gs1[6:8, i * 10:i * 10 + 9], sharex=ax0)
        plt.hlines(header['tpa_percent'],
                   trace.time[0],
                   trace.time[-1],
                   label='tpa',
                   colors='red',
                   alpha=0.5)
        plt.fill_between(trace.time,
                         0.,
                         trace.throttle,
                         label='throttle',
                         color='grey',
                         alpha=0.2)
        plt.ylabel('throttle %')
        ax1.get_yaxis().set_label_coords(-0.1, 0.5)
        plt.grid()
        plt.xlim([trace.time[0], trace.time[-1]])
        plt.ylim([0, 100])
        plt.legend(loc=1)
        plt.xlabel('log time in s')

        if old_style:
            # response vs. time in color plot
            plt.setp(ax1.get_xticklabels(), visible=False)
            ax2 = plt.subplot(gs1[9:16, i * 10:i * 10 + 9], sharex=ax0)
            plt.pcolormesh(trace.avr_t,
                           trace.time_resp,
                           np.transpose(trace.spec_sm),
                           vmin=0,
                           vmax=2.)
            plt.ylabel('response time in s')
            ax2.get_yaxis().set_label_coords(-0.1, 0.5)
            plt.xlabel('log time in s')
            plt.xlim([trace.avr_t[0], trace.avr_t[-1]])
        else:
            # response vs throttle plot. more useful.
            ax2 = plt.subplot(gs1[9:16, i * 10:i * 10 + 9])
            plt.title(trace.name + ' response', y=0.88, color='w')
            plt.pcolormesh(trace.thr_response['throt_scale'],
                           trace.time_resp,
                           trace.thr_response['hist2d_norm'],
                           vmin=0.,
                           vmax=2.)
            plt.ylabel('response time in s')
            ax2.get_yaxis().set_label_coords(-0.1, 0.5)
            plt.xlabel('throttle in %')
            plt.xlim([0., 100.])

        cmap = plt.cm.get_cmap('Blues')
        cmap._init()
        alphas = np.abs(np.linspace(0., 0.5, cmap.N, dtype=np.float64))
        cmap._lut[:-3, -1] = alphas
        ax3 = plt.subplot(gs1[17:, i * 10:i * 10 + 9])
        plt.contourf(*trace.resp_low[2],
                     cmap=cmap,
                     linestyles=None,
                     antialiased=True,
                     levels=np.linspace(0, 1, 20, dtype=np.float64))
        plt.plot(trace.time_resp,
                 trace.resp_low[0],
                 label=trace.name + ' step response ' + '(<' +
                 str(int(Trace.threshold)) + ') ' + ' PID ' +
                 header[trace.name + 'PID'])

        if trace.high_mask.sum() > 0:
            cmap = plt.cm.get_cmap('Oranges')
            cmap._init()
            alphas = np.abs(np.linspace(0., 0.5, cmap.N, dtype=np.float64))
            cmap._lut[:-3, -1] = alphas
            plt.contourf(*trace.resp_high[2],
                         cmap=cmap,
                         linestyles=None,
                         antialiased=True,
                         levels=np.linspace(0, 1, 20, dtype=np.float64))
            plt.plot(trace.time_resp,
                     trace.resp_high[0],
                     label=trace.name + ' step response ' + '(>' +
                     str(int(Trace.threshold)) + ') ' + ' PID ' +
                     header[trace.name + 'PID'])
        plt.xlim([-0.001, 0.501])

        plt.legend(loc=1)
        plt.ylim([0., 2])
        plt.ylabel('strength')
        ax3.get_yaxis().set_label_coords(-0.1, 0.5)
        plt.xlabel('response time in s')

        plt.grid()

    meanfreq = 1. / (traces[0].time[1] - traces[0].time[0])
    ax4 = plt.subplot(gs1[12, -1])
    t = BANNER + " | Betaflight: Version " + header['version'] + ' | Craftname: ' + header[
        'craftName'] + \
        ' | meanFreq: ' + str(int(meanfreq)) + ' | rcRate/Expo: ' + header['rcRate'] + '/' + header[
            'rcExpo'] + '\nrcYawRate/Expo: ' + header['rcYawRate'] + '/' \
        + header['rcYawExpo'] + ' | deadBand: ' + header['deadBand'] + ' | yawDeadBand: ' + \
        header['yawDeadBand'] \
        + ' | Throttle min/tpa/max: ' + header['minThrottle'] + '/' + header['tpa_breakpoint'] + '/' + \
        header['maxThrottle'] \
        + ' | dynThrPID: ' + header['dynThrottle'] + '| D-TermSP: ' + header[
            'dTermSetPoint'] + '| vbatComp: ' + header['vbatComp']

    plt.text(0,
             0,
             t,
             ha='left',
             va='center',
             rotation=90,
             color='grey',
             alpha=0.5,
             fontsize=TEXTSIZE)
    ax4.axis('off')
    log.info('Saving as image...')
    plt.savefig(path[:-13] + name + '_' + str(header['logNum']) +
                '_response.png')
    return fig
Example #40
0
def _imshow_grid_values(grid, values, plot_name=None, var_name=None,
                        var_units=None, grid_units=(None, None),
                        symmetric_cbar=False, cmap='pink', limits=None,
                        colorbar_label = None,
                        allow_colorbar=True, vmin=None, vmax=None,
                        norm=None, shrink=1., color_for_closed='black',
                        color_for_background=None, show_elements=False,
                        output=None):

    gridtypes = inspect.getmro(grid.__class__)

    cmap = plt.get_cmap(cmap)
    if color_for_closed is not None:
        cmap.set_bad(color=color_for_closed)
    else:
        cmap.set_bad(alpha=0.)

    if isinstance(grid, RasterModelGrid):
        if values.ndim != 2:
            raise ValueError('values must have ndim == 2')

        y = np.arange(values.shape[0] + 1) * grid.dy - grid.dy * .5
        x = np.arange(values.shape[1] + 1) * grid.dx - grid.dx * .5

        kwds = dict(cmap=cmap)
        (kwds['vmin'], kwds['vmax']) = (values.min(), values.max())
        if (limits is None) and ((vmin is None) and (vmax is None)):
            if symmetric_cbar:
                (var_min, var_max) = (values.min(), values.max())
                limit = max(abs(var_min), abs(var_max))
                (kwds['vmin'], kwds['vmax']) = (- limit, limit)
        elif limits is not None:
            (kwds['vmin'], kwds['vmax']) = (limits[0], limits[1])
        else:
            if vmin is not None:
                kwds['vmin'] = vmin
            if vmax is not None:
                kwds['vmax'] = vmax

        if np.isclose(grid.dx, grid.dy):
            if values.size == grid.number_of_nodes:
                myimage = plt.imshow(
                    values.reshape(grid.shape), origin='lower',
                    extent=(x[0], x[-1], y[0], y[-1]), **kwds)
            else:  # this is a cell grid, and has been reshaped already...
                myimage = plt.imshow(values, origin='lower',
                                     extent=(x[0], x[-1], y[0], y[-1]), **kwds)
        myimage = plt.pcolormesh(x, y, values, **kwds)

        plt.gca().set_aspect(1.)
        plt.autoscale(tight=True)

        if allow_colorbar:
            cb = plt.colorbar(norm=norm, shrink=shrink)
            if colorbar_label:
                cb.set_label(colorbar_label)
    elif VoronoiDelaunayGrid in gridtypes:
        # This is still very much ad-hoc, and needs prettifying.
        # We should save the modifications needed to plot color all the way
        # to the diagram edge *into* the grid, for faster plotting.
        # (see http://stackoverflow.com/questions/20515554/...
        # colorize-voronoi-diagram)
        # (This technique is not implemented yet)
        from scipy.spatial import voronoi_plot_2d
        import matplotlib.colors as colors
        import matplotlib.cm as cmx
        cm = plt.get_cmap(cmap)

        if (limits is None) and ((vmin is None) and (vmax is None)):
            # only want to work with NOT CLOSED nodes
            open_nodes = grid.status_at_node != 4
            if symmetric_cbar:
                (var_min, var_max) = (values.flat[
                    open_nodes].min(), values.flat[open_nodes].max())
                limit = max(abs(var_min), abs(var_max))
                (vmin, vmax) = (- limit, limit)
            else:
                (vmin, vmax) = (values.flat[
                    open_nodes].min(), values.flat[open_nodes].max())
        elif limits is not None:
            (vmin, vmax) = (limits[0], limits[1])
        else:
            open_nodes = grid.status_at_node != 4
            if vmin is None:
                vmin = values.flat[open_nodes].min()
            if vmax is None:
                vmax = values.flat[open_nodes].max()

        cNorm = colors.Normalize(vmin, vmax)
        scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
        colorVal = scalarMap.to_rgba(values)

        if show_elements:
            myimage = voronoi_plot_2d(grid.vor, show_vertices=False,
                                      show_points=False)

        # show_points to be supported in scipy0.18, but harmless for now
        mycolors = (i for i in colorVal)
        for order in grid.vor.point_region:
            region = grid.vor.regions[order]
            colortouse = next(mycolors)
            if -1 not in region:
                polygon = [grid.vor.vertices[i] for i in region]
                plt.fill(*zip(*polygon), color=colortouse)

        plt.gca().set_aspect(1.)
        # plt.autoscale(tight=True)
        # Tempting though it is to move the boundary outboard of the outermost
        # nodes (e.g., to the outermost corners), this is a bad idea, as the
        # outermost cells tend to have highly elongated shapes which make the
        # plot look stupid
        plt.xlim((np.min(grid.node_x), np.max(grid.node_x)))
        plt.ylim((np.min(grid.node_y), np.max(grid.node_y)))

        scalarMap.set_array(values)
        if allow_colorbar:
            cb = plt.colorbar(scalarMap, shrink=shrink)

    if grid_units[1] is None and grid_units[0] is None:
        grid_units = grid.axis_units
        if grid_units[1] == '-' and grid_units[0] == '-':
            plt.xlabel('X')
            plt.ylabel('Y')
        else:
            plt.xlabel('X (%s)' % grid_units[1])
            plt.ylabel('Y (%s)' % grid_units[0])
    else:
        plt.xlabel('X (%s)' % grid_units[1])
        plt.ylabel('Y (%s)' % grid_units[0])

    if plot_name is not None:
        plt.title('%s' % (plot_name))

    if var_name is not None or var_units is not None:
        if var_name is not None:
            assert type(var_name) is str
            if var_units is not None:
                assert type(var_units) is str
                colorbar_label = var_name + ' (' + var_units + ')'
            else:
                colorbar_label = var_name
        else:
            assert type(var_units) is str
            colorbar_label = '(' + var_units + ')'
        assert type(colorbar_label) is str
        assert allow_colorbar
        cb.set_label(colorbar_label)

    if color_for_background is not None:
        plt.gca().set_axis_bgcolor(color_for_background)

    if output is not None:
        if type(output) is str:
            plt.savefig(output)
            plt.clf()
        elif output:
            plt.show()
def keo_sp_calibration(fit_path,masterdark_fname,solve_pars_fname,save_fname=None,area_rad=3, med_size=15,lat_deg=55.9305361,lon_deg=48.7444861,hei_m=91.):
    if save_fname==None:
        save_fname=solve_pars_fname.split('/')[-1][0:-11]+'.spcal'

    hdulist = fits.open(masterdark_fname,ignore_missing_end=True)
    masterdark=hdulist[0].data
    hdulist.close()

    err, az0,alt0,a,b,c,d=get_solve_pars(solve_pars_fname)
    M_s1c = get_scale_and_orientation_info(c,d)

    spath="./.temp/"
    if not os.path.exists(spath):
        os.makedirs(spath)
    fit_filenames=[fit_path+'/'+fn for fn in next(os.walk(fit_path))[2]]

    R_median=np.zeros(len(fit_filenames))
    R_std=np.zeros(len(fit_filenames))
    
    fig=plt.figure(figsize=(12.8,7.2))
    fig.set_size_inches(12.8, 7.2)

#     ax=plt.axes(position=[0.035000000000000003+dx/2, 0.061764705882352888, 0.50624999999999998, 0.89999999999999991])
    ax=plt.axes(position=[0.035000000000000003+dx/2, 0.26, 0.50624999999999998, 0.7])
    plt.axis('off')
    ax1=plt.axes(position=[0.58205882352941185+dx, 0.71052941176470596-0.15, 0.35,0.4])
    plt.grid()
    ax2=plt.axes(position=[0.58205882352941185+dx, 0.061764705882352888, 0.35, 0.4])
    plt.grid()

    ax3=plt.axes(position=[0.035000000000000003+dx/2,0.04, 0.5/4, 0.7/4])
    plt.grid(b=True)
    ax4=plt.axes(position=[0.035000000000000003+dx/2+0.19,0.04, 0.5/4, 0.7/4])
    plt.grid(b=True)
    ax5=plt.axes(position=[0.035000000000000003+dx/2+0.38,0.04, 0.5/4, 0.7/4])
    plt.grid(b=True)

    R_day=0

#     vrange=1000
#     vshift=2000

    alt_min=44*np.pi/180
    alt_max=62*np.pi/180

#     area_rad=3
#     med_size=31

    XX,YY=np.meshgrid(range(2*area_rad+1),range(2*area_rad+1))

    fid=open(save_fname,'w')
    fid.write("# Camera calibration coefficients [Rayleighs per ADC unit] for each fit file:\n")

    for i in range(len(fit_filenames)):
#     for i in range(5):
#     for i in range(109,120):
        sys.stdout.write('\r')
        sys.stdout.write("Processing frame "+str(i+1)+"/"+str(len(fit_filenames)))
        sys.stdout.flush()
        fit_fname=fit_filenames[i]
        hdulist = fits.open(fit_fname,ignore_missing_end=True)
        img=hdulist[0].data.astype('float')
        img0=np.copy(img)
        img=img-masterdark.astype('float')
        img_medfilt=img - ss.medfilt(img,kernel_size=med_size)
#         np.save('img.npy',img)
        hdulist.close()

        med_img=np.median(img)
        max_img0=np.max(img0)
#         print(max_img0)

        keo_site=EarthLocation(lat=lat_deg*u.deg, lon=lon_deg*u.deg, height=hei_m*u.m)
        date_obs=keo_get_date_obs(fit_fname)

        png_prefix=spath+"frame_keo_"+str(date_obs.year)[2::]+"{0:0>2}".format(date_obs.month)+"{0:0>2}".format(date_obs.day)+"_"

        BS_coord=SkyCoord(RA, DEC, frame='icrs', unit='rad');
        altaz=BS_coord.transform_to(AltAz(obstime=date_obs, location=keo_site,temperature=20*u.deg_C,pressure=1013*u.hPa,
                                               relative_humidity=0.5,obswl=630.0*u.nm))
        AZ=altaz.az.rad
        ALT=altaz.alt.rad

        X,Y = arc_hor2pix(AZ,ALT,az0,alt0,c,d)

        # Catalog filtration
        filt_mask=np.zeros(len(NUM),dtype=bool)
        for j in range(len(NUM)):
            if ALT[j]>=alt_min and ALT[j]<=alt_max and X[j]>=10 and Y[j]>=10 and X[j]<=img.shape[1]-10 and Y[j]<=img.shape[0]-10:
                filt_mask[j]=True
        NUM_filt=NUM[filt_mask]
        BS_ID_filt=BS_ID[filt_mask]
        RA_filt=RA[filt_mask]
        DEC_filt=DEC[filt_mask]
        MAG_filt=MAG[filt_mask]
        FLUX_filt=FLUX[filt_mask]
        SP_type_filt=[SP_type[j] for j in range(len(SP_type)) if filt_mask[j]==True]
        ALT_filt=ALT[filt_mask]
        AZ_filt=AZ[filt_mask]
        X_filt=X[filt_mask]
        Y_filt=Y[filt_mask]

        star_pixels=np.zeros(len(NUM_filt),dtype=int)
        star_adc=np.zeros(len(NUM_filt))

        AREA=np.zeros((2*area_rad+1,2*area_rad+1,len(NUM_filt)))
        AREA0=np.zeros((2*area_rad+1,2*area_rad+1,len(NUM_filt)))

        for j in range(len(NUM_filt)):
            sum_temp=0
            num=0
            AREA[:,:,j]=np.copy(img_medfilt[int(Y_filt[j])-area_rad:int(Y_filt[j])+area_rad+1, int(X_filt[j])-area_rad:int(X_filt[j])+area_rad+1])
            AREA0[:,:,j]=np.copy(img0[int(Y_filt[j])-area_rad:int(Y_filt[j])+area_rad+1, int(X_filt[j])-area_rad:int(X_filt[j])+area_rad+1])

            arg_max=np.argmax(AREA[:,:,j])
#             print("max=", np.max(AREA[:,:,j]))
            x0=XX.flat[arg_max]
            y0=YY.flat[arg_max]

            AREA[:,:,j]=np.copy(img_medfilt[int(Y_filt[j])-area_rad+y0-area_rad:int(Y_filt[j])+y0+1, int(X_filt[j])-area_rad+x0-area_rad:int(X_filt[j])+x0+1])
            AREA0[:,:,j]=np.copy(img0[int(Y_filt[j])-area_rad+y0-area_rad:int(Y_filt[j])+y0+1, int(X_filt[j])-area_rad+x0-area_rad:int(X_filt[j])+x0+1])

            Rast=np.sqrt((XX-area_rad)**2+(YY-area_rad)**2)

#             print(Rast)

            area=np.copy(AREA[:,:,j])

            for k in range((2*area_rad+1)**2):
                if Rast.flat[k]<=area_rad:
                    num+=1
                    sum_temp+=AREA[:,:,j].flat[k]
            star_pixels[j]=num
            star_adc[j]=sum_temp
#             print(star_pixels[j],star_adc[j])
#             print(AREA[:,:,j])
#             print(" ")


        # Catalog filtration 2
        filt_mask=np.zeros(len(NUM_filt),dtype=bool)
        for j in range(len(NUM_filt)):
            if np.max(AREA0[:,:,j])<0.9*max_img0:
                if np.max(AREA[:,:,j])>1000:
                    filt_mask[j]=True

        AREA_filt2=np.copy(AREA[:,:,filt_mask])
        AREA0_filt2=np.copy(AREA0[:,:,filt_mask])

        NUM_filt2=NUM_filt[filt_mask]
        BS_ID_filt2=BS_ID_filt[filt_mask]
        RA_filt2=RA_filt[filt_mask]
        DEC_filt2=DEC_filt[filt_mask]
        MAG_filt2=MAG_filt[filt_mask]
        FLUX_filt2=FLUX_filt[filt_mask]
        SP_type_filt2=[SP_type_filt[j] for j in range(len(SP_type_filt)) if filt_mask[j]==True]
        X_filt2=X_filt[filt_mask]
        Y_filt2=Y_filt[filt_mask]
        ALT_filt2=ALT_filt[filt_mask]
        AZ_filt2=AZ_filt[filt_mask]
        star_pixels_filt2=star_pixels[filt_mask]
        star_adc_filt2=star_adc[filt_mask]

        R=np.zeros(len(NUM_filt2))
        for j in range(len(NUM_filt2)):
            sol_angle=tan_get_pixel_solid_angle(M_s1c,np.pi/2-ALT_filt2[j])
            br=get_brightness_in_Rayleighs(20,sol_angle,1,FLUX_filt2[j])
            sa=star_adc_filt2[j]
            R[j]=br/sa
#             print('br[R]=', br, "; sa[ADC.u]=",sa,"; R=",R[j])

        x_bord=np.arange(2*area_rad+1,dtype=float)
        y1_bord=np.sqrt(area_rad**2-(x_bord-area_rad)**2)+area_rad+0.5
        y2_bord=-np.sqrt(area_rad**2-(x_bord-area_rad)**2)+area_rad+0.5

        if len(NUM_filt2)>0:

            sort_ord=np.argsort(star_pixels_filt2)

            plt.sca(ax3)
            idd=0
            area1=AREA_filt2[:,:,sort_ord[idd]]
            area1_max=np.max(AREA0_filt2[:,:,sort_ord[idd]])
            area1_id=BS_ID_filt2[sort_ord[idd]]
            plt.pcolormesh(area1, cmap="seismic",vmin=-1000, vmax=1000)

            plt.plot(x_bord+0.5,y1_bord,'k-',lw=2)
            plt.plot(x_bord+0.5,y2_bord,'k-',lw=2)

            plt.ylim(area_rad*2+1,0)
            plt.xlim(0,area_rad*2+1)
            plt.title(str(area1_id),loc='left',fontsize='smaller')
            plt.title(str(int(star_adc_filt2[sort_ord[idd]])),loc='right',fontsize='smaller')
            plt.title(str(int(R[sort_ord[idd]]*1000)/1000),fontsize='smaller')
            plt.ylabel(str(int(R[sort_ord[idd]]*star_adc_filt2[sort_ord[idd]])))

            if len(NUM_filt2)>1:
                plt.sca(ax4)
                idd=1
                area2=AREA_filt2[:,:,sort_ord[idd]]
                area2_max=np.max(AREA0_filt2[:,:,sort_ord[idd]])
                area2_id=BS_ID_filt2[sort_ord[idd]]
                plt.pcolormesh(area2, cmap="seismic",vmin=-1000, vmax=1000)

                plt.plot(x_bord+0.5,y1_bord,'k-',lw=2)
                plt.plot(x_bord+0.5,y2_bord,'k-',lw=2)

                plt.ylim(area_rad*2+1,0)
                plt.xlim(0,area_rad*2+1)
                plt.title(str(area2_id),loc='left',fontsize='smaller')
                plt.title(str(int(star_adc_filt2[sort_ord[idd]])),loc='right',fontsize='smaller')
                plt.title(str(int(R[sort_ord[idd]]*1000)/1000),fontsize='smaller')
                plt.ylabel(str(int(R[sort_ord[idd]]*star_adc_filt2[sort_ord[idd]])))
            if len(NUM_filt2)>2:
                plt.sca(ax5)
                idd=2
                area3=AREA_filt2[:,:,sort_ord[idd]]
                area3_max=np.max(AREA0_filt2[:,:,sort_ord[idd]])
                area3_id=BS_ID_filt2[sort_ord[idd]]
                plt.pcolormesh(area3, cmap="seismic",vmin=-1000, vmax=1000)

                plt.plot(x_bord+0.5,y1_bord,'k-',lw=2)
                plt.plot(x_bord+0.5,y2_bord,'k-',lw=2)

                plt.ylim(area_rad*2+1,0)
                plt.xlim(0,area_rad*2+1)
                plt.title(str(area3_id),loc='left',fontsize='smaller')
                plt.title(str(int(star_adc_filt2[sort_ord[idd]])),loc='right',fontsize='smaller')
                plt.title(str(int(R[sort_ord[idd]]*1000)/1000),fontsize='smaller')
                plt.ylabel(str(int(R[sort_ord[idd]]*star_adc_filt2[sort_ord[idd]])))



        # print(len(R),R)
        if len(R)>0:
            R_median[i]=np.median(R)
            temp=(R-R_median[i])**2
            R_std[i]=np.sqrt(np.median(temp))
        #     print(R_median[i])

        plt.sca(ax1)
        plt.plot([0, 50000], [R_median[i], R_median[i]], c='r', lw=2)
        plt.scatter(R*star_adc_filt2,R,s=np.pi*(star_adc_filt2/12000*7)**2)
#         print(R[0]*star_adc_filt2[0])
#         print(R[0]*star_adc_filt2[0]*star_pixels_filt2[0])

        for j in range(len(X_filt2)):
            plt.text(R[j]*star_adc_filt2[j],R[j],str(BS_ID_filt2[j])+"_"+str(star_pixels_filt2[j]))
        plt.ylabel('Calibration coef. [R/ADCu]')
        plt.xlabel('Star summ brightness [R]')
        plt.title(date_obs, loc='left')
        plt.title(R_median[i], loc='right')
        ax1.set_xlim((0, 30000))
        ax1.set_ylim((0, 1))
        plt.grid(b=True)

        plt.sca(ax2)
        plt.xlim([0,len(fit_filenames)-1])
        plt.ylim([0,1])
        plt.plot(i,R_median[i],"r.")
        plt.grid(b=True)
        plt.ylabel('Calibration coef. [R/ADCu]')
        plt.xlabel('frame number')
        if i==len(fit_filenames)-1:
            R_day=np.median(R_median[np.where(R_median>0)])
            plt.plot([0, 5000], [R_day, R_day], c='b', lw=2)
            plt.title(R_day, loc='right')

        plt.sca(ax)
        plt.pcolormesh(img, cmap="gray", vmin=np.median(img)-300, vmax=np.median(img)+300)
        plt.axis('equal')
        plt.plot(X,Y,marker="o", lw=0.,mew=mew,mec="b", mfc='none',ms=ms)
        plt.plot(X_filt,Y_filt,marker="o", lw=0.,mew=mew,mec="g", mfc='none',ms=ms)
        plt.plot(X_filt2,Y_filt2,marker="o", lw=0.,mew=mew,mec="r", mfc='none',ms=ms)
        for j in range(len(X_filt2)):
            plt.text(X_filt2[j],Y_filt2[j],str(BS_ID_filt2[j])+"_" + "{0:4.2f}".format(R[j]), color='w')
        ax.set_xlim((0,img.shape[1]))
        ax.set_ylim((img.shape[0],0))
        plt.title(fit_fname.split('/')[-1] + " " + "{0:0>2}".format(date_obs.hour) + ":" + "{0:0>2}".format(date_obs.minute) + ":" + "{0:0>2}".format(date_obs.second))
    #     plt.show()
        plt.axis('off')
        png_fname=png_prefix+"{0:0>4}".format(i+1)+".png"
    #   print(png_fname)
        plt.savefig(png_fname)
        ax.clear()
        ax1.clear()
        ax3.clear()
        ax4.clear()
        ax5.clear()

        fid.write(fit_fname.split('/')[-1] + " " + str(R_median[i])+ " " + str(R_std[i]) + "\n")

    plt.close()
    sys.stdout.write('\n')
    sys.stdout.flush()
    fid.close()

    return png_prefix, len(fit_filenames), R_median
Example #42
0
lost = 0
nothingness = 0
rewards = []
fig = plt.figure(figsize=(20, 20))
for ep in range(TOT_EPISODES):
    anim = []
    win = 0
    tot_reward = 0
    mondo = World(WORLD_DIM, bilbo=bilbo, obstacle=False, random_spawn=True)
    #do deep Q-stuff
    game_ended = False
    epoch = 0
    current_state = bilbo.get_state()
    env = mondo.create_env(d)

    anim.append((plt.pcolormesh(env, cmap='CMRmap'), ))
    while not game_ended and epoch < MAX_EPOCH:
        #the near it gets to the dragon the more random the movement
        epoch += 1
        mondo.move_dragon()
        action = bilbo.get_action(0, possible_moves)
        bilbo.move(inverse_possible_moves[action])()
        new_state = bilbo.get_state()
        reward = bilbo.reward(current_state, new_state)
        if not reward in [-2, 1]:
            tot_reward += reward
        game_ended = bilbo.game_ended()
        current_state = new_state

        if reward == TREASURE_REWARD:
            win += 1
def visualize_feats(sound, features='fbank', win_size_ms = 20, \
    win_shift_ms = 10,num_filters=40,num_mfcc=40, samplerate=None,\
        save_pic=False, name4pic=None):
    '''Visualize feature extraction depending on set parameters
    
    Parameters
    ----------
    sound : str or numpy.ndarray
        If str, wavfile (must be compatible with scipy.io.wavfile). Otherwise 
        the samples of the sound data. Note: in the latter case, `samplerate`
        must be declared.
    features : str
        Either 'mfcc' or 'fbank' features. MFCC: mel frequency cepstral
        coefficients; FBANK: mel-log filterbank energies (default 'fbank')
    win_size_ms : int or float
        Window length in milliseconds for Fourier transform to be applied
        (default 20)
    win_shift_ms : int or float 
        Window overlap in milliseconds; default set at 50% window size 
        (default 10)
    num_filters : int
        Number of mel-filters to be used when applying mel-scale. For 
        'fbank' features, 20-128 are common, with 40 being very common.
        (default 40)
    num_mfcc : int
        Number of mel frequency cepstral coefficients. First coefficient
        pertains to loudness; 2-13 frequencies relevant for speech; 13-40
        for acoustic environment analysis or non-linguistic information.
        Note: it is not possible to choose only 2-13 or 13-40; if `num_mfcc`
        is set to 40, all 40 coefficients will be included.
        (default 40). 
    samplerate : int, optional
        The sample rate of the sound data or the desired sample rate of
        the wavfile to be loaded. (default None)
    '''
    data, sr = load_sound(sound, samplerate=samplerate)
    win_samples = int(win_size_ms * sr // 1000)
    if 'fbank' in features:
        feats = logfbank(data,
                         samplerate=sr,
                         winlen=win_size_ms * 0.001,
                         winstep=win_shift_ms * 0.001,
                         nfilt=num_filters,
                         nfft=win_samples)
        axis_feature_label = 'Mel Filters'
    elif 'mfcc' in features:
        feats = mfcc(data,
                     samplerate=sr,
                     winlen=win_size_ms * 0.001,
                     winstep=win_shift_ms * 0.001,
                     nfilt=num_filters,
                     numcep=num_mfcc,
                     nfft=win_samples)
        axis_feature_label = 'Mel Freq Cepstral Coefficients'
    feats = feats.T
    plt.clf()
    plt.pcolormesh(feats)
    plt.xlabel('Frames (each {} ms)'.format(win_size_ms))
    plt.ylabel('Num {}'.format(axis_feature_label))
    plt.title('{} Features'.format(features.upper()))
    if save_pic:
        outputname = name4pic or 'visualize{}feats'.format(features.upper())
        plt.savefig('{}.png'.format(outputname))
    else:
        plt.show()
Example #44
0
def fitting(Z,
            n,
            remain3D=False,
            remain2D=False,
            barchart=False,
            interferogram=False,
            removepiston=True):
    """
	------------------------------------------------
	fitting(Z,n)

	Fitting an aberration to several orthonormal Zernike
	polynomials.

	Return: n-th Zernike coefficients for a fitting surface aberration
			Zernike coefficients barchart
			Remaining aberration
			Fiting surface plot
	Input:
	Z: A surface or aberration matrix measure from inteferometer
	   or something else.

	n: How many order of Zernike Polynomials you want to fit

	reamin(default==Flase): show the surface after remove fitting
	aberrations.

	removepiston: if remove piston, default = True
	------------------------------------------------
	"""

    fitlist = []
    l = len(Z)
    x2 = __np__.linspace(-1, 1, l)
    y2 = __np__.linspace(-1, 1, l)
    [X2, Y2] = __np__.meshgrid(x2, y2)
    r = __np__.sqrt(X2**2 + Y2**2)
    u = __np__.arctan2(Y2, X2)
    for i in range(n):
        C = [0] * i + [1] + [0] * (37 - i - 1)
        ZF = __zernikepolar__(C, r, u)
        for i in range(l):
            for j in range(l):
                if x2[i]**2 + y2[j]**2 > 1:
                    ZF[i][j] = 0
        a = sum(sum(Z * ZF)) * 2 * 2 / l / l / __np__.pi
        fitlist.append(round(a, 3))

    l1 = len(fitlist)
    fitlist = fitlist + [0] * (37 - l1)
    Z_new = Z - __zernikepolar__(fitlist, r, u)
    for i in range(l):
        for j in range(l):
            if x2[i]**2 + y2[j]**2 > 1:
                Z_new[i][j] = 0

    #plot bar chart of zernike
    if barchart == True:
        fitlist1 = fitlist[0:n]
        index = __np__.arange(n)
        fig = __plt__.figure(figsize=(9, 6), dpi=80)
        xticklist = []
        width = 0.6
        for i in index:
            xticklist.append('Z' + str(i + 1))
        barfigure = __plt__.bar(index,
                                fitlist1,
                                width,
                                color='#2E9AFE',
                                edgecolor='#2E9AFE')
        __plt__.xticks(index + width / 2, xticklist)
        __plt__.xlabel('Zernike Polynomials', fontsize=18)
        __plt__.ylabel('Coefficient', fontsize=18)
        __plt__.title('Fitting Zernike Polynomials Coefficient', fontsize=18)

        __plt__.show()
    else:
        pass

    if remain3D == True:

        fig = __plt__.figure(figsize=(12, 8), dpi=80)
        ax = fig.gca(projection='3d')
        surf = ax.plot_surface(X2,
                               Y2,
                               Z_new,
                               rstride=1,
                               cstride=1,
                               cmap=__cm__.RdYlGn,
                               linewidth=0,
                               antialiased=False,
                               alpha=0.6)
        v = max(abs(Z.max()), abs(Z.min()))
        ax.set_zlim(-v, v)
        ax.zaxis.set_major_locator(__LinearLocator__(10))
        ax.zaxis.set_major_formatter(__FormatStrFormatter__('%.02f'))
        cset = ax.contourf(X2,
                           Y2,
                           Z_new,
                           zdir='z',
                           offset=-v,
                           cmap=__cm__.RdYlGn)
        fig.colorbar(surf, shrink=1, aspect=30)
        __plt__.title('Remaining Aberration', fontsize=18)
        p2v = round(__tools__.peak2valley(Z_new), 5)
        rms1 = round(__tools__.rms(Z_new), 5)
        label_new = "P-V: " + str(p2v) + "\n" + "RMS: " + str(rms1)
        ax.text2D(0.02, 0.1, label_new, transform=ax.transAxes)
        __plt__.show()
    else:
        pass

    if remain2D == True:
        fig = __plt__.figure(figsize=(9, 6), dpi=80)
        ax = fig.gca()
        im = __plt__.pcolormesh(X2, Y2, Z_new, cmap=__cm__.RdYlGn)
        __plt__.colorbar()
        __plt__.title('Remaining Aberration', fontsize=18)
        ax.set_aspect('equal', 'datalim')
        __plt__.show()
    else:
        pass

    if interferogram == True:
        zernike_coefficient = Coefficient(fitlist)
        __interferometer__.twyman_green(zernike_coefficient)
    else:
        pass
    if removepiston == True:
        fitlist[0] = 0
    else:
        pass
    C = Coefficient(fitlist)  #output zernike Coefficient class
    __tools__.zernikeprint(fitlist)
    return fitlist, C
Example #45
0
    def visualize_pred(self, image_in, tfrecord_file, predictions_list):

        # Pipeline of dataset and iterator
        dataset = tf.data.TFRecordDataset([tfrecord_file])
        dataset = dataset.map(parse)
        iterator = dataset.make_one_shot_iterator()
        next_image_data = iterator.get_next()

        num_of_stixels = len(predictions_list)
        print('Number of stixels to be proceesed  {}'.format(num_of_stixels))

        # Go through the TFRecord and reconstruct the images + predictions

        # Init new image
        new_im = Image.new('RGB', (self.image_width, self.H))
        grid = np.zeros((self.grid_y_width, self.grid_x_width))
        x_offset = 0
        first_time = True
        fig, ax = plt.subplots()

        # Go through all the stixels in the tfrecord file
        #for i in range(num_of_stixels):
        for i in range(num_of_stixels):
            image_data = self.sess.run(next_image_data)
            image = image_data[0]['image']
            im = Image.fromarray(np.uint8(image))
            frame_id = image_data[1]
            prediction = predictions_list[i]['classes']
            prediction_softmax = predictions_list[i]['probabilities']

            #####################################################################################
            ## Collect all image predictions into a new array tp be filtered by CRF++/CRFSuite ##
            #####################################################################################

            #label = image_data[1]
            #frame_id = image_data[2]
            #frame_name = image_data[3]

            # Collect and visualize stixels
            new_im.paste(im, (frame_id * 5, 0))
            x_offset += 5
            if self.debug_image:
                plt.plot(int(params.image_width / 2) + 5 * (frame_id), prediction * self.prediction_to_pixels, marker='o', markersize=4, color="red")
            # visualize probabilities
            grid[:,frame_id] = prediction_softmax
            plt.draw()

        # Use CRF to find the best path
        from code.crf import viterbi
        best_path = viterbi(grid.T, N, T, W_trans)

        # Plot the CRF border line
        best_path_points = []
        for index, path in enumerate(best_path):
            best_path_points.append([int(params.image_width / 2) + index * 5, path * 5 + self.y_spacing])
        plt.plot(np.array(best_path_points)[:,0], np.array(best_path_points)[:,1], color="blue", linewidth=self.plot_border_width)

        # If labeles exist and not in debug mode, plot the labels
        annotation_in = image_in.replace('.jpg', '.csv')
        if os.path.exists(annotation_in):
            del_y = self.image_size[1] - self.H
            #del_y = self.image_size[1] - 370
            # Init from the CSV file
            with open(annotation_in) as csv_file:
                csv_reader = csv.reader(csv_file, delimiter=',')
                label_coords = []
                for index, row in enumerate(csv_reader):
                    new_tuple = tuple(row)
                    x_coord = int(new_tuple[0])
                    y_coord = max(int(new_tuple[1]) - del_y, 0)
                    plt.plot(x_coord, y_coord, marker='o', markersize=5, color="red")
                    label_coords.append([x_coord, y_coord])

            # Compute the prediction accuracy


            # If not in debug mode, display the labels
            if not self.debug_image:
                plt.plot(np.array(label_coords)[:, 0], np.array(label_coords)[:, 1], color="red", linewidth=1.0)



        if self.debug_image:
            #In debug mode plot the softmax probabilities
            grid = np.ma.masked_array(grid, grid < .0001)
            plt.pcolormesh(self.X, self.Y, grid, norm=colors.LogNorm(), alpha = 0.75)
        plt.imshow(new_im, cmap='gray', alpha=1.0, interpolation='none')

        name = ' {} N{}_T{}_Tr{}.jpg'.format(self.model_name, N, T, W_trans)
        if self.debug_image:
            name.replace('.jpg',' debug.jpg')
            name = ' {} N{}_T{}_Tr{} debug.jpg'.format(self.model_name, N, T, W_trans)
            print('replacing name to indicate debug !!!!!!')
            print(name)

        image_out_name = os.path.basename(image_in)
        image_out_name = image_out_name.replace('.jpg', name)
        image_out_name = os.path.basename(image_out_name)
        image_file_name = os.path.join(self.out_folder, image_out_name)
        plt.savefig(image_file_name, format='jpg')
        print('saving fig to ', image_file_name)

        if self.show_images:
            plt.show()
        plt.close()
Example #46
0
Quasi_linear_kernel = partial(get_Quasi_linear_Kernel.get_KernelMatrix,RMat=RMat)

clf = svm.SVC(kernel=Quasi_linear_kernel)
clf.fit(X, Y)


#========================== plot data face ================================
plot_step = 0.01
x_min = X[:, 0].min() 
x_max = X[:, 0].max() 
y_min = X[:, 1].min() 
y_max = X[:, 1].max() 
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step))
Z = myTree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx,yy,Z, cmap=plt.cm.Paired)
plt.imshow(Z, interpolation='nearest', cmap=plt.cm.PuOr_r)
plt.contour(xx, yy, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
        levels=[-.5, 0, .5],linewidths=2)

Z_svm = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z_svm = Z_svm.reshape(xx.shape)
contours = plt.contourf(xx,yy,Z_svm, cmap=plt.cm.Paired)

# another way to pass the kernel matrix
K_train = Quasi_linear_kernel(X,X)
K_test = Quasi_linear_kernel(np.c_[xx.ravel(), yy.ravel()],X)
clf = svm.SVC(kernel='precomputed' , C=100)
clf.fit(K_train, Y )
Z = clf.predict(K_test)
Z = Z.reshape(xx.shape)
Example #47
0
    #databaru = (np.max(data_np))
    #print(databaru)

    #data = np.array(data_int, dtype='b')
    #f, t, Sxx = signal.spectrogram(yf, RATE, nperseg=64)
    f, t, Sxx = signal.spectrogram(data_np,
                                   RATE,
                                   nperseg=64,
                                   nfft=256,
                                   noverlap=60)
    #f, t, Sxx = signal.spectrogram(yf, RATE, noverlap=250)
    #f, t, Sxx = signal.spectrogram(data_np, fs=CHUNK)
    dBS = 10 * np.log10(Sxx)  #convert db
    #plt.pcolormesh(t, f, dBS)
    plt.pcolormesh(t, f, dBS, cmap='nipy_spectral')
    #plt.pcolormesh(t, f, dBS)
    #plt.show()
    #Accent, Accent_r, Blues, Blues_r, BrBG, BrBG_r, BuGn, BuGn_r, BuPu, BuPu_r, CMRmap, CMRmap_r, Dark2, Dark2_r, GnBu, GnBu_r, Greens, Greens_r, Greys, Greys_r,
    #OrRd, OrRd_r, Oranges, Oranges_r, PRGn, PRGn_r, Paired, Paired_r, Pastel1, Pastel1_r, Pastel2, Pastel2_r, PiYG, PiYG_r, PuBu, PuBuGn, PuBuGn_r, PuBu_r, PuOr,
    # PuOr_r, PuRd, PuRd_r, Purples, Purples_r, RdBu, RdBu_r, RdGy, RdGy_r, RdPu, RdPu_r, RdYlBu, RdYlBu_r, RdYlGn, RdYlGn_r, Reds, Reds_r,
    #  Set1, Set1_r, Set2, Set2_r, Set3, Set3_r, Spectral, Spectral_r, Wistia, Wistia_r, YlGn, YlGnBu, YlGnBu_r, YlGn_r, YlOrBr, YlOrBr_r, YlOrRd, YlOrRd_r, afmhot,
    #afmhot_r, autumn, autumn_r, binary, binary_r, bone, bone_r, brg, brg_r, bwr, bwr_r, cividis, cividis_r, cool, cool_r, coolwarm, coolwarm_r, copper, copper_r,
    # cubehelix, cubehelix_r, flag, flag_r, gist_earth, gist_earth_r, gist_gray, gist_gray_r, gist_heat, gist_heat_r, gist_ncar, gist_ncar_r, gist_rainbow, gist_rainbow_r,
    # gist_stern, gist_stern_r, gist_yarg, gist_yarg_r, gnuplot, gnuplot2, gnuplot2_r, gnuplot_r, gray, gray_r, hot, hot_r, hsv, hsv_r, inferno, inferno_r, jet, jet_r,
    # magma, magma_r, nipy_spectral, nipy_spectral_r, ocean, ocean_r, pink, pink_r, plasma, plasma_r, prism, prism_r, rainbow, rainbow_r, seismic, seismic_r, spring, spring_r,
    # summer, summer_r, tab10, tab10_r, tab20, tab20_r, tab20b, tab20b_r, tab20c, tab20c_r, terrain, terrain_r, viridis, viridis_r, winter, winter_r
    widths = np.arange(1, 50)
    cwtmatr = signal.cwt(data_np, signal.ricker, widths)
    #scales = mlpy.wavelet.autoscales(N=len(data),dt=1,dj=0.05,wf='morlet',p=omega0)
    #spec = mlpy.wavelet.cwt(data[:,1],dt=1,scales=scales,wf='morlet',p=omega0)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages 
import matplotlib

x = np.arange(0.01, 10, 0.05)
y = np.arange(0.01, 10, 0.05)
X, Y = np.meshgrid(x, y)
m = np.array([1.0, 2.0])

Z = X + Y - m[0] - m[1] - X * np.log(X/m[0]) - Y * np.log(Y/m[1])

fig = plt.figure()
ax = plt.axes()
plt.pcolormesh(X, Y, Z, cmap='magma')
pp=plt.colorbar (orientation="vertical")
plt.rcParams['font.size'] = 16
plt.rcParams['xtick.labelsize'] = 16
plt.rcParams['ytick.labelsize'] = 16
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True

cont=plt.contour(X,Y,Z,8,vmin=-1,vmax=1, colors=['black'])
cont.clabel(fmt='%1.1f', fontsize=16)
plt.plot(1.0,2.0,marker='.',markersize=16)
plt.xlabel(r'$x_1$', fontsize=16)
plt.ylabel(r'$x_2$', fontsize=16)
plt.tick_params(which='major', labelsize=16)
plt.gca().set_aspect('equal')

ppdf = PdfPages('information_entropy.pdf')
Example #49
0
interp.data[~I_ann] = np.nan

a = np.reshape(interp.qx_data, (sans.nq, sans.nq))
b = np.reshape(interp.qy_data, (sans.nq, sans.nq))
c = np.reshape(interp.data, (sans.nq, sans.nq))
# vmax = np.nanmax(c)
# vmin = np.nanmin(c)
vmax = np.nanmax(sans.expData.data)
vmin = np.nanmin(sans.expData.data)
print(vmax)

# d = np.reshape(I_ann, (50,50))
# ax = plt.subplot()
plt.figure(figsize=[5, 5], dpi=500)
# plt.plot(np.array([0, 0.06]), [0, 0.06])
plt.pcolormesh(a, b, c, cmap=parula, vmin=2, vmax=vmax, norm=mcolors.LogNorm())
# plt.colorbar(shrink=0.7, ticks=[20, 25, 30, 35])
plt.xlim(-0.055, 0.055)
plt.ylim(-0.055, 0.055)
plt.xticks([-0.05, -0.025, 0, 0.025, 0.05], [-50, -25, 0, 25, 50])
plt.yticks([-0.05, -0.025, 0, 0.025, 0.05], [-50, -25, 0, 25, 50])
fontsize = '16'

plt.xlabel('q ' + r'/ $\bf{10^{-3} \: \: \AA^{-1}}$',
           fontweight='bold',
           fontsize=fontsize)
plt.ylabel('q ' + r'/ $\bf{10^{-3} \: \: \AA^{-1}}$',
           fontweight='bold',
           fontsize=fontsize)
plt.rc('figure', frameon=False)
plt.rc('axes.spines', top=False)
Example #50
0
def parseFQ(inf):
    print 'reading ' + inf + '...'
    if inf[-3:] == '.gz':
        print 'detected gzip suffix...'
        f = gzip.open(inf, 'r')
    else:
        f = open(inf, 'r')

    IS_SAM = False
    if inf[-4:] == '.sam':
        print 'detected sam input...'
        IS_SAM = True

    rRead = 0
    actual_readlen = 0
    qDict = {}
    while True:

        if IS_SAM:
            data4 = f.readline()
            if not len(data4):
                break
            try:
                data4 = data4.split('\t')[10]
            except IndexError:
                break
            # need to add some input checking here? Yup, probably.
        else:
            data1 = f.readline()
            data2 = f.readline()
            data3 = f.readline()
            data4 = f.readline()
            if not all([data1, data2, data3, data4]):
                break

        if actual_readlen == 0:
            if inf[-3:] != '.gz' and not IS_SAM:
                totalSize = os.path.getsize(inf)
                entrySize = sum([len(n) for n in [data1, data2, data3, data4]])
                print 'estimated number of reads in file:', int(
                    float(totalSize) / entrySize)
            actual_readlen = len(data4) - 1
            print 'assuming read length is uniform...'
            print 'detected read length (from first read found):', actual_readlen
            priorQ = np.zeros([actual_readlen, RQ])
            totalQ = [None] + [
                np.zeros([RQ, RQ]) for n in xrange(actual_readlen - 1)
            ]

        # sanity-check readlengths
        if len(data4) - 1 != actual_readlen:
            print 'skipping read with unexpected length...'
            continue

        for i in range(len(data4) - 1):
            q = ord(data4[i]) - offQ
            qDict[q] = True
            if i == 0:
                priorQ[i][q] += 1
            else:
                totalQ[i][prevQ, q] += 1
                priorQ[i][q] += 1
            prevQ = q

        rRead += 1
        if rRead % PRINT_EVERY == 0:
            print rRead
        if MAX_READS > 0 and rRead >= MAX_READS:
            break
    f.close()

    # some sanity checking again...
    QRANGE = [min(qDict.keys()), max(qDict.keys())]
    if QRANGE[0] < 0:
        print '\nError: Read in Q-scores below 0\n'
        exit(1)
    if QRANGE[1] > RQ:
        print '\nError: Read in Q-scores above specified maximum:', QRANGE[
            1], '>', RQ, '\n'
        exit(1)

    print 'computing probabilities...'
    probQ = [None] + [[[0. for m in xrange(RQ)] for n in xrange(RQ)]
                      for p in xrange(actual_readlen - 1)]
    for p in xrange(1, actual_readlen):
        for i in xrange(RQ):
            rowSum = float(np.sum(totalQ[p][i, :])) + PROB_SMOOTH * RQ
            if rowSum <= 0.:
                continue
            for j in xrange(RQ):
                probQ[p][i][j] = (totalQ[p][i][j] + PROB_SMOOTH) / rowSum

    initQ = [[0. for m in xrange(RQ)] for n in xrange(actual_readlen)]
    for i in xrange(actual_readlen):
        rowSum = float(np.sum(priorQ[i, :])) + INIT_SMOOTH * RQ
        if rowSum <= 0.:
            continue
        for j in xrange(RQ):
            initQ[i][j] = (priorQ[i][j] + INIT_SMOOTH) / rowSum

    if PLOT_STUFF:
        mpl.rcParams.update({
            'font.size': 14,
            'font.weight': 'bold',
            'lines.linewidth': 3
        })

        mpl.figure(1)
        Z = np.array(initQ).T
        X, Y = np.meshgrid(range(0, len(Z[0]) + 1), range(0, len(Z) + 1))
        mpl.pcolormesh(X, Y, Z, vmin=0., vmax=0.25)
        mpl.axis([0, len(Z[0]), 0, len(Z)])
        mpl.yticks(range(0, len(Z), 10), range(0, len(Z), 10))
        mpl.xticks(range(0, len(Z[0]), 10), range(0, len(Z[0]), 10))
        mpl.xlabel('Read Position')
        mpl.ylabel('Quality Score')
        mpl.title('Q-Score Prior Probabilities')
        mpl.colorbar()

        mpl.show()

        VMIN_LOG = [-4, 0]
        minVal = 10**VMIN_LOG[0]
        qLabels = [
            str(n) for n in range(QRANGE[0], QRANGE[1] + 1) if n % 5 == 0
        ]
        print qLabels
        qTicksx = [int(n) + 0.5 for n in qLabels]
        qTicksy = [(RQ - int(n)) - 0.5 for n in qLabels]

        for p in xrange(1, actual_readlen, 10):
            currentDat = np.array(probQ[p])
            for i in xrange(len(currentDat)):
                for j in xrange(len(currentDat[i])):
                    currentDat[i][j] = max(minVal, currentDat[i][j])

            # matrix indices:		pcolormesh plotting:	plot labels and axes:
            #
            #      y				   ^					   ^
            #	   -->				 x |					 y |
            #  x |					    -->					    -->
            #    v 					    y					    x
            #
            # to plot a MxN matrix 'Z' with rowNames and colNames we need to:
            #
            # pcolormesh(X,Y,Z[::-1,:])		# invert x-axis
            # # swap x/y axis parameters and labels, remember x is still inverted:
            # xlim([yMin,yMax])
            # ylim([M-xMax,M-xMin])
            # xticks()
            #

            mpl.figure(p + 1)
            Z = np.log10(currentDat)
            X, Y = np.meshgrid(range(0, len(Z[0]) + 1), range(0, len(Z) + 1))
            mpl.pcolormesh(X,
                           Y,
                           Z[::-1, :],
                           vmin=VMIN_LOG[0],
                           vmax=VMIN_LOG[1],
                           cmap='jet')
            mpl.xlim([QRANGE[0], QRANGE[1] + 1])
            mpl.ylim([RQ - QRANGE[1] - 1, RQ - QRANGE[0]])
            mpl.yticks(qTicksy, qLabels)
            mpl.xticks(qTicksx, qLabels)
            mpl.xlabel('\n' + r'$Q_{i+1}$')
            mpl.ylabel(r'$Q_i$')
            mpl.title('Q-Score Transition Frequencies [Read Pos:' + str(p) +
                      ']')
            cb = mpl.colorbar()
            cb.set_ticks([-4, -3, -2, -1, 0])
            cb.set_ticklabels([
                r'$10^{-4}$', r'$10^{-3}$', r'$10^{-2}$', r'$10^{-1}$',
                r'$10^{0}$'
            ])

        #mpl.tight_layout()
        mpl.show()

    print 'estimating average error rate via simulation...'
    Qscores = range(RQ)
    #print (len(initQ), len(initQ[0]))
    #print (len(probQ), len(probQ[1]), len(probQ[1][0]))

    initDistByPos = [
        DiscreteDistribution(initQ[i], Qscores) for i in xrange(len(initQ))
    ]
    probDistByPosByPrevQ = [None]
    for i in xrange(1, len(initQ)):
        probDistByPosByPrevQ.append([])
        for j in xrange(len(initQ[0])):
            if np.sum(
                    probQ[i][j]
            ) <= 0.:  # if we don't have sufficient data for a transition, use the previous qscore
                probDistByPosByPrevQ[-1].append(
                    DiscreteDistribution([1], [Qscores[j]],
                                         degenerateVal=Qscores[j]))
            else:
                probDistByPosByPrevQ[-1].append(
                    DiscreteDistribution(probQ[i][j], Qscores))

    countDict = {}
    for q in Qscores:
        countDict[q] = 0
    for samp in xrange(1, N_SAMP + 1):
        if samp % PRINT_EVERY == 0:
            print samp
        myQ = initDistByPos[0].sample()
        countDict[myQ] += 1
        for i in xrange(1, len(initQ)):
            myQ = probDistByPosByPrevQ[i][myQ].sample()
            countDict[myQ] += 1

    totBases = float(sum(countDict.values()))
    avgError = 0.
    for k in sorted(countDict.keys()):
        eVal = 10.**(-k / 10.)
        #print k, eVal, countDict[k]
        avgError += eVal * (countDict[k] / totBases)
    print 'AVG ERROR RATE:', avgError

    return (initQ, probQ, avgError)
Example #51
0
    x1, x2 = np.meshgrid(t1, t2)  # 生成网格采样点
    x_test = np.stack((x1.flat, x2.flat), axis=1)  # 测试点

    # # 无意义,只是为了凑另外两个维度
    # x3 = np.ones(x1.size) * np.average(x[:, 2])
    # x4 = np.ones(x1.size) * np.average(x[:, 3])
    # x_test = np.stack((x1.flat, x2.flat, x3, x4), axis=1)  # 测试点

    mpl.rcParams['font.sans-serif'] = [u'simHei']
    mpl.rcParams['axes.unicode_minus'] = False
    cm_light = mpl.colors.ListedColormap(['#77E0A0', '#FF8080', '#A0A0FF'])
    cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b'])
    y_hat = lr.predict(x_test)  # 预测值
    y_hat = y_hat.reshape(x1.shape)  # 使之与输入的形状相同
    plt.figure(facecolor='w')
    plt.pcolormesh(x1, x2, y_hat, cmap=cm_light)  # 预测值的显示
    plt.scatter(x[:, 0], x[:, 1], c=y, edgecolors='k', s=50,
                cmap=cm_dark)  # 样本的显示
    plt.xlabel(u'花萼长度', fontsize=14)
    plt.ylabel(u'花萼宽度', fontsize=14)
    plt.xlim(x1_min, x1_max)
    plt.ylim(x2_min, x2_max)
    plt.grid()
    patchs = [
        mpatches.Patch(color='#77E0A0', label='Iris-setosa'),
        mpatches.Patch(color='#FF8080', label='Iris-versicolor'),
        mpatches.Patch(color='#A0A0FF', label='Iris-virginica')
    ]
    plt.legend(handles=patchs, fancybox=True, framealpha=0.8)
    plt.title(u'鸢尾花Logistic回归分类效果 - 标准化', fontsize=17)
    plt.show()
Example #52
0
svm2_grid_hat = svm2_grid_hat.reshape(x1.shape)

svm3_grid_hat = svm3.predict(grid_show)
svm3_grid_hat = svm3_grid_hat.reshape(x1.shape)

svm4_grid_hat = svm4.predict(grid_show)
svm4_grid_hat = svm4_grid_hat.reshape(x1.shape)

cm_light = mpl.colors.ListedColormap(['#00ffcc', '#ffa0a0', '#a0a0ff'])
cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b'])
plt.figure(facecolor='w', figsize=(14, 7))

##linnear-svm
plt.subplot(221)
##区域图
plt.pcolormesh(x1, x2, svm1_grid_hat, cmap=cm_light)
##所有样本点
plt.scatter(x[0], x[1], c=y, edgecolors='k', s=50, cmap=cm_dark)  #样本
##测试数据集
plt.scatter(x_test[0], x_test[1], s=120, facecolors='none', zorder=10)  #圈中测试样本
##lable列表
plt.xlabel(iris_feature[0], fontsize=13)
plt.ylabel(iris_feature[1], fontsize=13)
plt.xlim(x1_min, x1_max)
plt.ylim(x2_min, x2_max)
plt.title('鸢尾花数据linear-SVM分类', fontsize=16)
plt.grid(b=True, ls=':')
plt.tight_layout(pad=1.5)

##rbf-svm
plt.subplot(222)
Example #53
0
    if bin_gap is not None:
        min_e, max_e = np.min(energy_grid), np.max(energy_grid)
        num_bins = int(((max_e - min_e) / bin_gap)**1. / 2.) + 1
        bins = [min_e + i**2. * bin_gap for i in np.arange(num_bins)]
    else:
        bins = np.linspace(np.min(energy_grid) - 0.1,
                           np.max(energy_grid),
                           20,
                           endpoint=True)

    if suffix is not None:
        savepath = '%s/energy_%s.png' % (saveto, suffix)
    else:
        savepath = '%s/energy.png' % (saveto)

    plt.pcolormesh(dx, dy, energy_grid)
    CS = plt.contour(dx, dy, energy_grid, bins, colors='white')
    plt.clabel(CS, inline=1, fontsize=10, fontcolor='white')

    plt.savefig(savepath, format='png')
    plt.clf()


def plot_data((gen_data, real_data, axis), saveto, suffix=None):
    if suffix is not None:
        savepath = '%s/comparison_%s.png' % (saveto, suffix)
    else:
        savepath = '%s/comparison.png' % (saveto)

    plt.plot(gen_data[:, 0],
             gen_data[:, 1],
        mc_local_endpoints['x'].append(output_track_x)
        mc_local_endpoints['y'].append(output_track_y)
        mc_local_endpoints['z'].append(output_track_z)

# print numner of events processed
print(" ----- processed events (pixelhit):" + str(PixelHit.GetEntries()) + " empty_mc_branch:" + str(empty_mc_branch))

print("Plotting...")

# plot pixel collected charge versus MC parcticle endpoint coordinates
if print_all or print_og or print_2d:
    mc_hit_histogram = plt.figure(figsize=(11, 7))
    H2, xedges, yedges, binnmmber = stats.binned_statistic_2d(mc_local_endpoints['x'], mc_local_endpoints['y'], values=pixel_hit['signal'], statistic='mean', bins=[100, 100])
    XX, YY = np.meshgrid(xedges, yedges)
    Hm = ma.masked_where(np.isnan(H2), H2)
    plt.pcolormesh(XX, YY, Hm, cmap="inferno")
    plt.ylabel("pixel y [mm]")
    plt.xlabel("pixel x [mm]")
    cbar = plt.colorbar(pad=.015, aspect=20)
    cbar.set_label("hit signal")
    if save_pdf:  mc_hit_histogram.savefig(path.join(outDir , "GlobalHeatMap.pdf"), bbox_inches='tight')

if print_all or print_og or print_2d:
    mc_hit_histogram_z = plt.figure(figsize=(11, 7))
    H2, xedges, yedges, binnmmber = stats.binned_statistic_2d(mc_local_endpoints['x'], mc_local_endpoints['z'],
                                                              values=pixel_hit['signal'], statistic='mean', bins=[100, 100])
    XX, YY = np.meshgrid(xedges, yedges)
    Hm = ma.masked_where(np.isnan(H2), H2)
    plt.pcolormesh(XX, YY, Hm.T, cmap="inferno")
    plt.ylabel("pixel z [mm]")
    plt.xlabel("pixel x [mm]")
Example #55
0
X = np.array([x, x])

y0 = np.zeros(len(x))
y = np.abs(z)
Y = np.array([y0, y])

Z = np.array([z, z])
C = np.angle(Z)

# Affichages des informations du graph
plt.rcParams.update({'mathtext.default': 'regular'})  # Activer la syntax LateX
plt.title("$y = e^{-i2 \pi x}  e^{-x^{2}}$")  #CHANGER ICI
plt.xlabel("x")
plt.ylabel("Module de y")

# Affichage du module
plt.plot(x, y, "k")

# Affichage de l'argument
plt.pcolormesh(X,
               Y,
               C,
               shading="gouraud",
               cmap=plt.cm.hsv,
               vmin=-np.pi,
               vmax=np.pi)
plt.colorbar().set_label("Argument de y")

plt.show()
Example #56
0
        else:
            title = '线性核,C=%.1f' % param[1]

        clf.fit(x, y)
        y_hat = clf.predict(x)
        print('准确率:', accuracy_score(y, y_hat))

        # 画图
        print(title)
        print('支撑向量的数目:', clf.n_support_)
        print('支撑向量的系数:', clf.dual_coef_)
        print('支撑向量:', clf.support_)
        plt.subplot(3, 4, i + 1)
        grid_hat = clf.predict(grid_test)  # 预测分类值
        grid_hat = grid_hat.reshape(x1.shape)  # 使之与输入的形状相同
        plt.pcolormesh(x1, x2, grid_hat, cmap=cm_light, alpha=0.8)
        plt.scatter(x[0], x[1], c=y, edgecolors='k', s=40,
                    cmap=cm_dark)  # 样本的显示
        plt.scatter(x.loc[clf.support_, 0],
                    x.loc[clf.support_, 1],
                    edgecolors='k',
                    facecolors='none',
                    s=100,
                    marker='o')  # 支撑向量
        z = clf.decision_function(grid_test)
        # print 'z = \n', z
        print('clf.decision_function(x) = ', clf.decision_function(x))
        print('clf.predict(x) = ', clf.predict(x))
        z = z.reshape(x1.shape)
        plt.contour(x1,
                    x2,
Example #57
0
plt.subplot(2, 2, 1)
plt.plot(t, x)
plt.axis([0, x.size / float(fs), min(x), max(x)])
plt.ylabel('amplitud')
plt.xlabel('tiempo')
plt.title('audio: x')

# graficar espectrograma en el rango definido
plt.subplot(2, 2, 2)
numFrames = int(mX[:, 0].size)
frmTime = H * np.arange(numFrames) / float(fs)
bins = (Freq > rango[0]) & (Freq < rango[1])

Sxx = np.transpose(mX[:, bins])

plt.pcolormesh(frmTime, Freq[bins], Sxx)
plt.ylabel('Frequencia [Hz]')
plt.xlabel('Time [sec]')
plt.autoscale(tight=True)

#graficar el promedio del espectro de todo el archivo de audio
mX = np.mean(mX, axis=0)
plt.subplot(2, 2, 3)
plt.plot(Freq[bins], mX[bins])

plt.xlabel('Frequencia [Hz]')
plt.ylabel('Magnitud en dB')

##########################################
#BLOQUE 2
'''
Example #58
0
def main(inputFile1='../../sounds/ocean.wav', inputFile2='../../sounds/speech-male.wav', window1='hamming',  window2='hamming', 
	M1=1024, M2=1024, N1=1024, N2=1024, H1=256, smoothf = .5, balancef = 0.2):
	"""
	Function to perform a morph between two sounds
	inputFile1: name of input sound file to be used as source
	inputFile2: name of input sound file to be used as filter
	window1 and window2: windows for both files
	M1 and M2: window sizes for both files
	N1 and N2: fft sizes for both sounds
	H1: hop size for sound 1 (the one for sound 2 is computed automatically)
	smoothf: smoothing factor to be applyed to magnitude spectrum of sound 2 before morphing
	balancef: balance factor between booth sounds, 0 is sound 1 and 1 is sound 2
	"""

	# read input sounds
	(fs, x1) = UF.wavread(inputFile1)
	(fs, x2) = UF.wavread(inputFile2)

	# compute analysis windows
	w1 = get_window(window1, M1)
	w2 = get_window(window2, M2)

	# perform morphing
	y = STFTT.stftMorph(x1, x2, fs, w1, N1, w2, N2, H1, smoothf, balancef)

	# compute the magnitude and phase spectrogram of input sound (for plotting)
	mX1, pX1 = STFT.stftAnal(x1, fs, w1, N1, H1)
	
	# compute the magnitude and phase spectrogram of output sound (for plotting)
	mY, pY = STFT.stftAnal(y, fs, w1, N1, H1)
	
	# write output sound
	outputFile = 'output_sounds/' + os.path.basename(inputFile1)[:-4] + '_stftMorph.wav'
	UF.wavwrite(y, fs, outputFile)

	# create figure to plot
	plt.figure(figsize=(12, 9))

	# frequency range to plot
	maxplotfreq = 10000.0

	# plot sound 1
	plt.subplot(4,1,1)
	plt.plot(np.arange(x1.size)/float(fs), x1)
	plt.axis([0, x1.size/float(fs), min(x1), max(x1)])
	plt.ylabel('amplitude')
	plt.xlabel('time (sec)')
	plt.title('input sound: x')

	# plot magnitude spectrogram of sound 1
	plt.subplot(4,1,2)
	numFrames = int(mX1[:,0].size)
	frmTime = H1*np.arange(numFrames)/float(fs)                             
	binFreq = fs*np.arange(N1*maxplotfreq/fs)/N1  
	plt.pcolormesh(frmTime, binFreq, np.transpose(mX1[:,:N1*maxplotfreq/fs+1]))
	plt.xlabel('time (sec)')
	plt.ylabel('frequency (Hz)')
	plt.title('magnitude spectrogram of x')
	plt.autoscale(tight=True)

	# plot magnitude spectrogram of morphed sound 
	plt.subplot(4,1,3)
	numFrames = int(mY[:,0].size)
	frmTime = H1*np.arange(numFrames)/float(fs)                             
	binFreq = fs*np.arange(N1*maxplotfreq/fs)/N1 
	plt.pcolormesh(frmTime, binFreq, np.transpose(mY[:,:N1*maxplotfreq/fs+1]))
	plt.xlabel('time (sec)')
	plt.ylabel('frequency (Hz)')
	plt.title('magnitude spectrogram of y')
	plt.autoscale(tight=True)

	# plot the morphed sound
	plt.subplot(4,1,4)
	plt.plot(np.arange(y.size)/float(fs), y)
	plt.axis([0, y.size/float(fs), min(y), max(y)])
	plt.ylabel('amplitude')
	plt.xlabel('time (sec)')
	plt.title('output sound: y')

	plt.tight_layout()
	plt.show(block=False)
from sklearn.model_selection import train_test_split
import numpy as np

# 生成样本数量为500, 分类数为5的数据集
X, y = make_blobs(n_samples=500, centers=5, random_state=8)
# 将数据集拆分成训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=8)

# 使用高斯朴素贝叶斯
gnb = GaussianNB()
gnb.fit(X_train, y_train)

print('\n代码运行结果: ')
print('训练集数据得分:{:.3f}'.format(gnb.score(X_train, y_train)))
print('测试及数据得分:{:.3f}'.format(gnb.score(X_test, y_test)))

# 限定横轴与纵轴的最大值
x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5
y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5
# 用不同的背景色表示不同的分类
xx, yy = np.meshgrid(np.arange(x_min, x_max, .02),
                     np.arange(y_min, y_max, .02))
z = gnb.predict(np.c_[(xx.ravel(), yy.ravel())]).reshape(xx.shape)
plt.pcolormesh(xx, yy, z, cmap=plt.cm.Spectral)
# 将训练集和测试集用散点图表示
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=plt.cm.cool, edgecolors='k')
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=plt.cm.cool, marker='*', edgecolors='k')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title('Classifier GaussianNB')
plt.show()
Example #60
0

sampler = MetropolisSampler(D=2, sigma=np.array([[1, 0], [0, 1]]), p=p)
samples = sampler.sample(np.array([1, 0]), N=5000, stride=10)
fig = plt.figure(figsize=(12, 5))

print(f"sample mean : {np.mean(samples, axis=0)}")
print(f"sample covariance : {np.cov(samples, rowvar=False)}")

# two dimensional histogram
H, xx, yy = np.histogram2d(samples[:, 0], samples[:, 1], bins=25, normed=True)
H = H.T
XX, YY = np.meshgrid(xx, yy)

# contour plot of the density function
xx_f = np.linspace(-1.5, 1.5, 100)
yy_f = np.linspace(-1.5, 1.5, 101)
XX_f, YY_f = np.meshgrid(xx_f, yy_f)
Z_f = np.exp(
    -0.5 *
    (10 * XX_f * XX_f - 12 * XX_f * YY_f + 10 * YY_f * YY_f)) / (2 * np.pi) * 8

plt.subplot(121)
plt.plot(samples[:, 0], samples[:, 1], ',')
plt.contour(XX_f, YY_f, Z_f)
plt.colorbar()
plt.subplot(122)
plt.pcolormesh(XX, YY, H)
plt.colorbar()
plt.contour(XX_f, YY_f, Z_f)
fig.savefig("2D_normal_sampling.png")