示例#1
0
def plot_quad_mesh(pts, quads):
    """Plot a quadrilateral mesh."""

    def plot_quad(pts):
        """Plot one quad."""
        plot(np.r_[pts[:,0], pts[0,0]],
             np.r_[pts[:,1], pts[0,1]],
             lw=1.5, color="black")

    hold(True)
    for k,q in enumerate(quads):
        plot_quad(pts[q])

    if len(quads) < 400:
        for k,q in enumerate(quads):
            center = pts[q].sum(axis=0) / 4.0
            text(center[0], center[1], "%d" % k, color="black",
                 fontsize=10,
                 horizontalalignment='center', verticalalignment='center')

    if len(pts) < 200:
        for k,p in enumerate(pts):
            text(p[0], p[1], "%d" % k, color="black",
                 fontsize=10,
                 bbox=dict(boxstyle = "round", fc = "white"),
                 horizontalalignment='center', verticalalignment='center')
示例#2
0
文件: exactV.py 项目: pism/pism
def plot_xc(t_years):
    """Plot the location of the calving front."""
    x = x_c(t_years * secpera) / 1000.0   # convert to km
    _, _, y_min, y_max = axis()

    hold(True)
    plot([x, x], [y_min, y_max], '--g')
示例#3
0
def plot_bpm_file(pool):
    bpm    = pool.descriptors['tempotap_bpm']['values'][0]
    intervals = pool.descriptors['tempotap_intervals']['values'][0]
    bpm_periods = [60./interval for interval in intervals]
    ticks  = pool.descriptors['tempotap_ticks']['values'][0]
    rubato_start = pool.descriptors['tempotap_rubato_start']['values'][0]
    rubato_stop = pool.descriptors['tempotap_rubato_stop']['values'][0]
    print 'bpm', bpm
    print 'ticks', ticks
    print 'rubato_start', rubato_start
    print 'rubato_stop', rubato_stop
    print 'intervals', intervals
    import pylab
    pylab.plot(ticks,[bpm_periods[0]] + bpm_periods,'r+-')
    pylab.hold(True)
    pylab.plot([ticks[0],ticks[-1]],[bpm]*2,'g-')
    pylab.plot(rubato_start,[bpm]*len(rubato_start),'b+')
    pylab.plot(rubato_stop,[bpm]*len(rubato_stop),'b|')
    # ground truth
    if 'gt_ticks' in pool.descriptors.keys():
        gt_ticks  = pool.descriptors['gt_ticks']['values'][0]
        if len(gt_ticks) > 1:
            gt_bpm_periods = [60./(gt_ticks[i] - gt_ticks[i-1]) for i in range(1,len(gt_ticks))]
            p1 = pylab.plot(gt_ticks,[gt_bpm_periods[0]] + gt_bpm_periods,'rx:')
            p2 = pylab.plot(gt_ticks,[gt_bpm_periods[0]] + gt_bpm_periods,'rx:')
            #pylab.legend((p1[0],p2[0]),('Men','Women'))
    pylab.hold(False)
    pylab.show()
示例#4
0
def makeBoxPlot(outputDir, distances):

	plt = figure()
	ax = axes()
	hold(True)
	bp = boxplot(distances)
	savefig(outputDir + "/boxplot.png")
示例#5
0
def _test_graph():
    i = 10000
    x = np.linspace(0,3.7*pi,i)
    y = (0.3*np.sin(x) + np.sin(1.3 * x) + 0.9 * np.sin(4.2 * x) + 0.06 *
    np.random.randn(i))
    y *= -1
    x = range(i)

    _max, _min = peakdetect(y,x,750, 0.30)
    xm = [p[0] for p in _max]
    ym = [p[1] for p in _max]
    xn = [p[0] for p in _min]
    yn = [p[1] for p in _min]

    plot = pylab.plot(x,y)
    pylab.hold(True)
    pylab.plot(xm, ym, 'r+')
    pylab.plot(xn, yn, 'g+')

    _max, _min = peak_det_bad.peakdetect(y, 0.7, x)
    xm = [p[0] for p in _max]
    ym = [p[1] for p in _max]
    xn = [p[0] for p in _min]
    yn = [p[1] for p in _min]
    pylab.plot(xm, ym, 'y*')
    pylab.plot(xn, yn, 'k*')
    pylab.show()
示例#6
0
def plot_spectrum():
    #get the data...    
    a_0=struct.unpack('>1024l',fpga.read('even',1024*4,0))
    a_1=struct.unpack('>1024l',fpga.read('odd',1024*4,0))

    interleave_a=[]

    for i in range(1024):
        interleave_a.append(a_0[i])
        interleave_a.append(a_1[i])

    pylab.figure(num=1,figsize=(10,10))
    pylab.ioff()
    pylab.plot(interleave_a)
    #pylab.semilogy(interleave_a)
    pylab.title('Integration number %i.'%prev_integration)
    pylab.ylabel('Power (arbitrary units)')
    pylab.grid()
    pylab.xlabel('Channel')
    pylab.xlim(0,2048)
    pylab.ioff()

    pylab.hold(False)
    pylab.show()
    pylab.draw()
示例#7
0
def PlotTorques(robot, traj0, traj1, dt=0.001, taumin=[], taumax=[],
                figstart=0):
    from pylab import figure, clf, hold, gca, plot, axis, title, xlabel, ylabel
    colorcycle = ['r', 'g', 'b', 'm', 'c', 'y', 'k']
    colorcycle = colorcycle[0:traj0.dimension]
    Tmax = max(traj0.duration, traj1.duration)
    tvect0, tauvect0 = ComputeTorques(traj0, robot, dt)
    tvect1, tauvect1 = ComputeTorques(traj1, robot, dt)
    figure(figstart)
    clf()
    hold('on')
    ax = gca()
    ax.set_color_cycle(colorcycle)
    plot(tvect0, tauvect0, '--', linewidth=2)
    ax.set_color_cycle(colorcycle)
    plot(tvect1, tauvect1, linewidth=2)
    ax.set_color_cycle(colorcycle)
    for a in taumax:
        plot([0, Tmax], [a, a], '-.')
    ax.set_color_cycle(colorcycle)
    for a in taumin:
        plot([0, Tmax], [a, a], '-.')
    if len(taumax) > 0:
        axis([0, Tmax, 1.2 * min(taumin), 1.2 * max(taumax)])
    title('Joint torques', fontsize=20)
    xlabel('Time (s)', fontsize=18)
    ylabel('Joint torques (Nm)', fontsize=18)
示例#8
0
def demo():
    from pylab import hold, linspace, subplot, plot, legend, show
    hold(True)
    #y = [9,6,1,3,8,4,2]
    #y = [9,11,13,3,-2,0,2]
    y = [9, 11, 2, 3, 8, 0]
    #y = [9,9,1,3,8,2,2]
    x = linspace(0, 1, len(y))
    t = linspace(x[0], x[-1], 400)
    subplot(211)
    plot(t, bspline(y, t, clamp=False), '-.y',
         label="unclamped bspline")  # bspline
    # bspline
    plot(t, bspline(y, t, clamp=True), '-y', label="clamped bspline")
    plot(sorted(x), y, ':oy', label="control points")
    legend()
    #left, right = _derivs(t, bspline(y, t, clamp=False))
    #print(left, (y[1] - y[0]) / (x[1] - x[0]))

    subplot(212)
    xt, yt = pbs(x, y, t, clamp=False)
    plot(xt, yt, '-.b', label="unclamped pbs")  # pbs
    xt, yt = pbs(x, y, t, clamp=True)
    plot(xt, yt, '-b', label="clamped pbs")  # pbs
    #xt,yt = pbs(x,y,t,clamp=True, parametric=True)
    # plot(xt,yt,'-g') # pbs
    plot(sorted(x), y, ':ob', label="control points")
    legend()
    show()
示例#9
0
def _smooth_demo():
    from numpy import linspace, sin, ones
    from pylab import subplot, plot, hold, axis, legend, title, show, randn

    t = linspace(-4, 4, 100)
    x = sin(t)
    xn = x + randn(len(t)) * 0.1
    y = smooth(x)

    ws = 31

    subplot(211)
    plot(ones(ws))

    windows = ["flat", "hanning", "hamming", "bartlett", "blackman"]

    hold(True)
    for w in windows[1:]:
        eval("plot(" + w + "(ws) )")

    axis([0, 30, 0, 1.1])

    legend(windows)
    title("The smoothing windows")
    subplot(212)
    plot(x)
    plot(xn)
    for w in windows:
        plot(smooth(xn, 10, w))
    l = ["original signal", "signal with noise"]
    l.extend(windows)

    legend(l)
    title("Smoothing a noisy signal")
    show()
 def plotResults(self, titlestr="", ylimits=[0.5,1.05], plotfunc = pl.semilogx, ylimitsB=[0,101],
                  legend_loc=3, show=True ):
     pl.figure(num=None, figsize=(15,5))
     xvals = range(1, (1+len(self.removed)) )
     #Two subplots. One the left is the test accuracy vs. iteration
     pl.subplot(1,2,1)
     plotfunc(xvals, self.test_acc_list, "b", label="Test Accuracy")    
     pl.hold(True)
     plotfunc(xvals, self.getRollingAvgTestAcc(window_size=10), "r", label="Test Acc (rolling avg)")
     plotfunc(xvals, self.getRollingAvgTrainAcc(window_size=10), "g--", label="Train Acc (rolling avg)")
     pl.ylim(ylimits)
     if titlestr == "":
         pl.title("Iterative Feature Removal")
     else:
         pl.title(titlestr)
     pl.ylabel("Test Accuracy")
     pl.xlabel("Iteration")
     pl.legend(loc=legend_loc) #3=lower left
     pl.hold(False)
     
     #second subplot. On the right is the number of features removed per iteration
     pl.subplot(1,2,2)
     Ns = [ len(lst) for lst in self.removed ]
     pl.semilogx(xvals, Ns, "bo", label="#Features per Iteration")
     pl.xlabel("Iteration")
     pl.ylabel("Number of Features Selected")
     pl.title("Number of Features Removed per Iteration")
     pl.ylim(ylimitsB)
     
     pl.subplots_adjust(left=0.05, bottom=0.15, right=0.95, top=0.90, wspace=0.20, hspace=0.20)
     if show: pl.show()
示例#11
0
def plotRes_varyingTrees( data_dict, dataset_name, max_correct=3000 , show=True):
    '''
    Plots the results of a varyingNumTrees() experiment, using a dictionary
    structure to hold the data. See the loadRes_varyingTrees() comments on the
    dictionary layout.
    '''
    xvals = data_dict['NumTrees']
    
    #prox forest trials
    pf_avg = data_dict['PF'].mean(axis=0)
    pf_std = data_dict['PF'].std(axis=0)
    pf_95_conf = 1.96 * pf_std / math.sqrt(data_dict['PF'].shape[0])

    #kdt forest trials
    kdt_avg = data_dict['KDT'].mean(axis=0)
    kdt_std = data_dict['KDT'].std(axis=0)
    kdt_95_conf = 1.96 * kdt_std / math.sqrt(data_dict['KDT'].shape[0])
    
    #plot average results of each, bounded by lower and upper bounds of 95% conf intv
    pl.hold(True)
    pl.errorbar(xvals, pf_avg/max_correct, yerr=pf_95_conf/max_correct, fmt='-r', 
                label="PF")
    pl.errorbar(xvals, kdt_avg/max_correct, yerr=kdt_95_conf/max_correct, fmt='-.b',
                label="KDT")
    pl.ylim([0,1.05])
    pl.title(dataset_name)
    pl.xlabel("Number of Trees in Forest")
    pl.ylabel("Percent Correct")
    pl.legend(loc='lower right')
    if show: pl.show()
def test_radial_profiles():
    arr = random_periodic_upsample(128, 16, seed=0)
    mask = np.zeros(arr.shape, dtype=np.bool_)
    arr_x = vcalc.cderivative(arr, 'X_DIR')
    arr_y = vcalc.cderivative(arr, 'Y_DIR')
    arr_div = np.sqrt(arr_x**2 + arr_y**2)
    surf = _cp.TopoSurface(arr)
    rprofs = radial_profiles(surf, threshold=25, expand_regions=1, other_arr=arr_div, mask=mask)
    arr[mask] = 2 * arr.max()
    pl.imshow(arr, interpolation='nearest')
    pl.figure()
    pl.imshow(arr_div)
    pl.figure()
    pl.hold(True)
    linreg_xy = ([], [])
    for minmax, (rprof, region) in rprofs.items():
        # minmax_flux = arr_div[minmax]
        pts, fluxes, avg_fluxes, avg_fluxes_errs, avg_dists, avg_dists_errs = \
                zip(*rprof)
        linreg_xy[0].extend(fluxes)
        linreg_xy[1].extend(avg_fluxes)
        # fluxes = np.abs(np.array(fluxes) - minmax_flux)
        # avg_fluxes = np.abs(np.array(avg_fluxes) - minmax_flux)
        # pl.plot(avg_dists, avg_fluxes, 'd-')
        pl.plot(avg_dists, avg_fluxes, 'd-')
    pl.grid()
    slope, intercept, rval, pval, stderr = stats.linregress(*linreg_xy)
    print
    print "slope: %f" % slope
    print "intercept: %f" % intercept
    print "rval: %f" % rval
    print "pval: %f" % pval
    print "stderr: %f" % stderr
    import pdb; pdb.set_trace()
def plot_the_overview(samples, i, j,  output_image_file):

    pylab.hold(True)
    pylab.scatter(samples[:,i], samples[:,j])
    pylab.draw()
    pylab.savefig(output_image_file, dpi=150)
    pylab.close()
示例#14
0
def Wave2DShow(ufield, ds, vel=None, vmin=None, vmax=None):
    r"""
    Show a 2D pressure field at some instant of time.
    As background is shown velocity field.
    Same dimension as ufield.

    * ufield    : 2d pressure field at an instant of time
    * ds        : space discretization
    * vel       : 2d background velocity field
    * vmin/vmax : vmin/vmax of imshow
    """
    #max index time and max index space
    maxt = np.shape(snapshots)[0]
    maxk = np.shape(snapshots)[1]
    maxi = np.shape(snapshots)[2]    

    print "vmin : ", vmin, "vmax : ", vmax
    # space axis starting at 0 in x and z (using y coz' plotting)
    # extents of the picture,
    xmin, xmax = 0, ds*maxi
    ymin, ymax = 0, ds*maxk
    extent= xmin, xmax, ymax, ymin
    py.hold(True)
    if not vel == None:
        py.imshow(vel, interpolation='bilinear', cmap=cm.jet, extent=extent,  origin='upper', aspect='auto')

    py.imshow(ufield, interpolation='bilinear', cmap=cm.Greys_r, alpha=0.8, extent=extent, origin='upper', aspect='auto', vmin=vmin, vmax=vmax)
    py.hold(False)
    # optional cmap=cm.jet, apect='auto' adjust aspect to the previous plot
    py.show()
def pinwheel_overlay(pinwheels, contours=None, style='wo',linewidth=1,mmap=None):
   """
   Plots the pinwheel locations and optionally the real and imaginary
   pinwheel contours. Designed to be overlayed over an OR map.
   """
   fig = plt.figure(frameon=False)
   fig.patch.set_alpha(0.0)
   ax = plt.subplot(111, aspect='equal', frameon=True)
   ax.patch.set_alpha(0.0)
   plt.hold(True)
    
   plt.imshow(mmap,cmap='hsv',extent=(0, 1.0, 0, 1.0))
   (recontours, imcontours) = contours if contours else ([],[])
   for recontour in recontours:
      plt.plot(recontour[:,0], recontour[:,1],'k',linewidth=linewidth)
   for imcontour in imcontours:
      plt.plot(imcontour[:,0], imcontour[:,1],'w', linewidth=linewidth)

   Xs, Ys = zip(*pinwheels)
   plt.plot(np.array(Xs), np.array(Ys), style)

   plt.xlim((0.0,1.0));         plt.ylim((0.0,1.0))
   ax.xaxis.set_ticks([]);      ax.yaxis.set_ticks([])
   ax.xaxis.set_ticklabels([]); ax.yaxis.set_ticklabels([])
   return fig
示例#16
0
def plot_bars(pos_count, title='', max_pathway_length=8, legend_loc='upper right'):
    n_labels = len(pos_count)
    ind = np.arange(max_pathway_length)
    width = 0.2
    
    fig = pylab.figure()
    pylab.hold(True)
    ax = fig.add_subplot(111)

    colors = {'No known regulation':'grey', 'Activated':'green', 'Inhibited':'red', 'Mixed regulation':'blue'}
    plot_order = ['Inhibited', 'Mixed regulation', 'Activated', 'No known regulation']    
    
    i = 0
    for label in plot_order:
        curr_vals = pos_count[label][1:max_pathway_length+1]
        if (sum(curr_vals) < 20):
            n_labels -= 1
            continue
        ax.bar(ind + i * width, tuple([j * 1.0 /sum(curr_vals) for j in curr_vals]), width, color=colors[label], label=('%s (%d)' % (label, sum(curr_vals))))
        i += 1
    
    ax.set_ylabel('Fraction of reactions per type')
    ax.set_xlabel('Position in pathway')
    
    ax.set_xticks(ind+ width * n_labels/2)
    ax.set_xticklabels( ind + 1 )
    
    legendfont = matplotlib.font_manager.FontProperties(size=11)
    pylab.legend(loc=legend_loc, prop=legendfont)
    pylab.title(title)

    pylab.hold(False)
    
    return fig
def genSensitivityResults():
    ''' Generates sensitivity results regarding number of tree levels and how
        increasing the number of estimation parameters affects convergence.
        Didn't have space to include these figures in the conference paper.
    '''
 
    shahx_a,shahy_a = genConvergencePlots("shah", numLevels=4)
    shahx_b,shahy_b = genConvergencePlots("shah", numLevels=10)
    shahx_c,shahy_c = genConvergencePlots("shah", numLevels=25)
    shahx_d,shahy_d = genConvergencePlots("shah", numLevels=50)
    
    compare_fig = pl.figure('Convergence of different metrics')
    pl.hold(True)
    shaha = pl.plot(shahx_a,shahy_a[:,0],'k-',label="# Shah Levels = 2",linewidth=3)
    shahb = pl.plot(shahx_b,shahy_b[:,0],'k-',label="# Shah Levels = 4",linewidth=1)
    shahc = pl.plot(shahx_c,shahy_c[:,0],'b--',label="# Shah Levels = 10",linewidth=3)
    shahd = pl.plot(shahx_d,shahy_d[:,0],'b--',label="# Shah Levels = 50",linewidth=1)    
    pl.hold(False)
    pl.xlabel("Number of A/B Comparisons used in training")
    pl.ylabel("Prediction accuracy")
    pl.title("Comparison of various metrics")
    pl.ylim((.5,1.0))
    pl.xlim((0,shahx_a[-1]))
    pl.legend(loc=4)
    # Uncomment if you want interactive plotting
    #pl.show()
    pl.savefig(plot_path+"sensitivity_convergence_comparison.pdf")
def plot_arm_speed(axis, startTime=-1):
    rootName = 'siemensSensors'
    f = netcdf.netcdf_file(rootName+'Data.nc', 'r')
    data1 = f.variables[rootName+'.data.'+'carouselSpeedSetpoint'].data[startSample:]
    data2 = f.variables[rootName+'.data.'+'carouselSpeedSmoothed'].data[startSample:]
    ts_trigger = f.variables[rootName+'.data.ts_trigger'].data[startSample:]*1.0e-9

    # Load the actual arm speed from the arm gyro
    rootName = 'armboneLisaSensors'
    fiile = netcdf.netcdf_file(rootName+'Data.nc', 'r')

    rawdata4 = fiile.variables['armboneLisaSensors.GyroState.gr'].data[startSample:]
    ts_trigger4 = fiile.variables['armboneLisaSensors.GyroState.ts_trigger'].data[startSample:]*1.0e-9
    #fullscale = 2000 # deg/sec
    #data4 = -1.0 * rawdata4 / (2**15) * fullscale * pi/180 - 0.0202 # Rad/s
    data4 = rawdata4

    if startTime == -1:
        startTime = ts_trigger[0]

    times = ts_trigger-startTime
    times4 = ts_trigger4-startTime

    pylab.hold(True)

    plot(times, data2, '.-', label='On Motor Side of Belt')
    plot(times4, data4,'.-',  label='From Gyro on Arm')
    plot(times, data1, '.-', label='Setpoint (Echoed)')
    ylabel('Arm rotation speed [Rad/s]')
    xlabel('Time [s]')
    #legend(['Setpoint (Echoed)', 'Setpoint (Sent)', 'On Motor Side of Belt', 'From Gyro on Arm'])
    title('Plot of Signals Related to Arm Speed')
    return startTime
示例#19
0
def plot_coord_mapping(mapper,sheet,style='b-'):
    """
    Plot a coordinate mapping for a sheet.

    Given a CoordinateMapperFn (as for a CFProjection) and a sheet
    of the projection, plot a grid showing where the sheet's units
    are mapped.
    """

    from pylab import plot,hold,ishold

    xs = sheet.sheet_rows()
    ys = sheet.sheet_cols()

    hold_on = ishold()
    if not hold_on:
        plot()
    hold(True)

    for y in ys:
        pts = [mapper(x,y) for x in xs]
        plot([u for u,v in pts],
             [v for u,v in pts],
             style)

    for x in xs:
        pts = [mapper(x,y) for y in ys]
        plot([u for u,v in pts],
             [v for u,v in pts],
             style)

    hold(hold_on)
示例#20
0
def pyData2C( delta = 1e-2 ):
    #Load all the simulated trajectories
    file_name = os.path.join(RESULTS_DIR, 'OU_Xs.N=10.npy')
    trajectoryBank = load(file_name)
    
    #Select an arbitrary trajectory: (here the 2nd)
    figure(); hold(True);
    n_thin = int(delta / dt); print n_thin, delta 
    idx = 3;
    ts, Xs = trajectoryBank[:,0], trajectoryBank[:,idx]
    #Select sampling rate:    
    #Generate sampled data, by sub-sampling the fine trajectory:    
    ts_thin = ts[::n_thin]; Xs_thin =  Xs[::n_thin]
    
    file_name = os.path.join(RESULTS_DIR, 'Xdata.txt',);
#    outfile = open(file_name, 'w')
#    outfile.print(N);
#    outfile.print(delta);
#    for X in Xs_thin:
#        outfile.print(X)
#        
#    outfile.close()
    
    savetxt(file_name, Xs_thin, "%f");
    
    print 'Saved to ', file_name
    print 'N, delta,', len(Xs_thin), delta
示例#21
0
def run_demo(with_plots=True):
    """
    An example on how to simulate a model using the DAE simulator. The result 
    can be compared with that of sim_rlc.py which has solved the same problem 
    using dymola. Also writes information to a file.
    """

    curr_dir = os.path.dirname(os.path.abspath(__file__))

    model_name = "RLC_Circuit"
    mofile = curr_dir + "/files/RLC_Circuit.mo"

    jmu_name = compile_jmu(model_name, mofile)
    model = JMUModel(jmu_name)
    init_res = model.initialize()

    (E_dae, A_dae, B_dae, F_dae, g_dae, state_names, input_names, algebraic_names, dx0, x0, u0, w0, t0) = linearize_dae(
        init_res.model
    )

    (A_ode, B_ode, g_ode, H_ode, M_ode, q_ode) = linear_dae_to_ode(E_dae, A_dae, B_dae, F_dae, g_dae)

    res1 = model.simulate()

    jmu_name = compile_jmu("RLC_Circuit_Linearized", mofile)
    lin_model = JMUModel(jmu_name)
    res2 = lin_model.simulate()

    c_v_1 = res1["capacitor.v"]
    i_p_i_1 = res1["inductor.p.i"]
    i_p1_i_1 = res1["inductor1.p.i"]
    t_1 = res1["time"]

    c_v_2 = res2["x[1]"]
    i_p_i_2 = res2["x[2]"]
    i_p1_i_2 = res2["x[3]"]
    t_2 = res2["time"]

    assert N.abs(res1.final("capacitor.v") - res2.final("x[1]")) < 1e-3

    if with_plots:
        p.figure(1)
        p.hold(True)
        p.subplot(311)
        p.plot(t_1, c_v_1)
        p.plot(t_2, c_v_2, "g")
        p.ylabel("c.v")
        p.legend(("original model", "linearized ODE"))
        p.grid()
        p.subplot(312)
        p.plot(t_1, i_p_i_1)
        p.plot(t_2, i_p_i_2, "g")
        p.ylabel("i.p.i")
        p.grid()
        p.subplot(313)
        p.plot(t_1, i_p1_i_1)
        p.plot(t_2, i_p1_i_2, "g")
        p.ylabel("i.p1.i")
        p.grid()
        p.show()
示例#22
0
def estimateHarness( delta = 1e-1,
                     alpha = .0,
                     num_samples=10,
                     Tf_sample = Tf ):
    #Load all the simulated trajectories
    file_name = os.path.join(RESULTS_DIR,
                              'OU_Xs.a=%.3f_N=%d.npy'%(alpha,
                                                       num_samples));
    trajectoryBank = load(file_name)
    
    #Select an arbitrary trajectory: (here the 2nd)
    figure(); hold(True);
    n_thin = int(delta / dt); print n_thin
    N_sample = int(Tf_sample / dt) 
#    for idx in xrange(1,10):
#    for idx in [2]: #xrange(3,4):
    for idx in xrange(1,num_samples+1):
        ts, Xs = trajectoryBank[:N_sample,0], trajectoryBank[:N_sample,idx]
    
        #Select sampling rate:    
        #Generate sampled data, by sub-sampling the fine trajectory:    
        ts_thin = ts[::n_thin];
        Xs_thin = Xs[::n_thin];
        
        #Obtain estimator
#        est_params = estimateParams(Xs_thin, delta)
#        print 'est original: %.4f,%.4f, %.4f'%(est_params[0],est_params[1],est_params[2])
        est_params = estimateParamsBeta(Xs_thin, delta, alpha)
        print 'est reduced: %.4f,%.4f, %.4f'%(est_params[0],est_params[1],est_params[2])
        plot(ts_thin, Xs_thin);
         
    print 'true param values:', [mu, beta, sigma]
示例#23
0
def BetavsT_AP_P(folder,keys):
  AP = Analysis.AnalyseFile()
  P = Analysis.AnalyseFile()
  I = 300e-6

  for f in folder:
    a=Analysis.AnalyseFile(f)
    fit= a.curve_fit(quad,'Current','Voltage',bounds=lambda x,y:x,result=True,header='Fit',asrow=True)
   
    if f['iterator'] == 7:
      AP.add_column(fit,str(f['iterator']))
      AP['Sample Temp'] = f['Sample Temp']
      Spc = ((-0.01411*f['Sample Temp'])-0.11185)*1e-6  
      T_d = -fit[0]*(I*I)/Spc
      AP['DeltaTemp'] = T_d
    elif f['iterator'] == 6:
      P.add_column(fit,str(f['iterator']))
      P['Sample Temp'] = f['Sample Temp']
      Spc = ((-0.01411*f['Sample Temp'])-0.11185)*1e-6  
      T_d = -fit[0]*(I*I)/Spc
      P['DeltaTemp'] = T_d
  
  
    
  plt.hold(True)
  plt.title(r'$\beta$ coef of NLIV vs Temp')
  plt.xlabel('Temperture (K)')
  plt.ylabel(r'$\beta$ (V/A$^2$)')
  plt.plot(f['IVtemp'],P['DeltaTemp']-AP['DeltaTemp'],'ok')
示例#24
0
def NormDeltaRvT(folder,keys):
  if folder[0]['IVtemp']<250 and folder[0]['IVtemp']>5:
    APiterator = [5,10]
    AP = Analysis.AnalyseFile()
    P = Analysis.AnalyseFile()
    tsum = 0.0
    for f in folder:
      if f['iterator'] in APiterator:
        AP.add_column(f.column('Voltage'),str(f['iterator']))
      else:
        P.add_column(f.column('Voltage'),str(f['iterator']))
      tsum = tsum + f['Sample Temp']
      
    AP.apply(func,0,replace=False,header='Mean NLV')
    AP.add_column(f.Current,column_header = 'Current')
    P.apply(func,0,replace=False,header='Mean NLV')
    P.add_column(f.Current,column_header = 'Current')
    
    APfit= AP.curve_fit(quad,'Current','Mean NLV',bounds=lambda x,y:x,result=True,header='Fit',asrow=True)
    Pfit = P.curve_fit(quad,'Current','Mean NLV',bounds=lambda x,y:x,result=True,header='Fit',asrow=True)
    
    DeltaR = Pfit[2] - APfit[2]
    ErrDeltaR = numpy.sqrt((Pfit[3]**2)+(APfit[3]**2))
    Spinsig.append(DeltaR/Res_Cu(tsum/10))
    Spinsig_error.append(ErrDeltaR)
    
    Temp.append(tsum/10)
    
    plt.hold(True)
    plt.title('$\Delta$R$_s$ vs T from linear coef of\nNLIV fit for '+f['Sample ID'],verticalalignment='bottom')
    plt.xlabel('Temperture (K)')
    plt.ylabel(r'$\Delta$R$_s$/$\rho$')
    plt.errorbar(f['IVtemp'],1e3*DeltaR,1e3*ErrDeltaR,ecolor='k',marker='o',mfc='r', mec='k')
    #plt.plot(f['IVtemp'],ErrDeltaR,'ok')
    return Temp, Spinsig
示例#25
0
def anim_slices(dir="bckreact_b5_n01_E1_M14_t4d3",out=[2,3,4,5,6,7,8,9],end=[1.,1.,1.],N=None,var='d',log=True,fig=1,over=False, rmax=10.0):
#def anim_slices(dir="backreaction_b5_3d3yr",out=[3,6],end=[1.,1.,1.],N=None,var='d',log=True,fig=1,over=False, rmax=15.0, mp4="dens_maps.mp4"):
    # reading time intervals
    tseq = read_times(dir=dir+'/')

    pylab.close(fig)
    figure = matplotlib.pyplot.figure(num=fig)
    #a = matplotlib.pyplot.gca()
    a = None
    
    ims = []
    for i in out:
        print 'output #',i, '  age:',tseq[i-1]
        connect(dir=dir,out=i,var=[var])
        load_slices()
        [map,img, jet] = show_slice(var=var,log=False,rmax=rmax,ax=a,time=tseq[i-1])
        ann = pylab.annotate(tseq[i-1]+' yrs', xy=(.8, .9),  xycoords='axes fraction', horizontalalignment='center', verticalalignment='center', color="white") 
	ims.append((img,ann,))
	a = jet.ax
	#ims.append((img,ann,jet,))
	# artificially expand the longer steps
        #if eval(tseq[i-1])>(5.e2-1.0): 
            #ims.append((img,ann))
        #    if eval(tseq[i-1])>1.5e3-1.0:  ims.append((img,ann))
	fname = '_tmp%03d.png'%i
	print 'Saving frame', fname
	figure.savefig(basedir+dir+'/'+fname)
	pylab.hold(False)

    matplotlib.pyplot.xlabel('r, pc')
    matplotlib.pyplot.ylabel('r, pc')

    im_ani = anim.ArtistAnimation(figure, ims, interval=200, repeat_delay=500, blit=True)
示例#26
0
def DRIVplot(folder,keys):
  T = 281
  APiterator = [5,10]
  AP = Analysis.AnalyseFile()
  P = Analysis.AnalyseFile()
  if folder[0]['IVtemp'] == T:
    scale = 1e6
    plt.hold(True)
    plt.title('NLIV in P and AP at ' + str(T) + 'K')
    plt.xlabel('Current ($\mu$A)')
    plt.ylabel('V$_{NL}$ ($\mu$V)')
    for f in folder:
      if f['iterator'] in APiterator:
        AP.add_column(f.Voltage,str(f['iterator']))
      else:
        P.add_column(f.Voltage,str(f['iterator']))        
    AP.apply(func,0,replace=False,header='Mean NLVoltage')
    P.apply(func,0,replace=False,header='Mean NLVoltage')    
    
    I = numpy.arange(-295e-6,295e-6,1e-6)
    
    ap = interpolate.interp1d(f.column('Current'),AP.column('Mean NLV'))    
    p = interpolate.interp1d(f.column('Current'),P.column('Mean NLV')) 
    
    print P
    plt.title(' ',verticalalignment='bottom')
    plt.xlabel('Current ($\mu$A)')
    #plt.ylabel('V$_{NL}$/|I| (V/A)')
    plt.ylabel('$\Delta$V$_{NL}$/|I| (mV/A)') 
    plt.plot(f.column('Current')*scale,1e3*(P.column('Mean NLV')-AP.column('Mean NLV'))/abs(f.column('Current')),label =''+str(T)+ ' K')
    #plt.plot(f.column('Current')*scale,1e3*(P.column('Mean NLV'))/abs(f.column('Current')),label ='P at '+str(T)+ ' K')
    #plt.plot(f.column('Current')*scale,1e3*(AP.column('Mean NLV'))/abs(f.column('Current')),label ='AP at '+str(T)+ ' K')        
    plt.legend(loc='upper left')
  else:
    return 1  
示例#27
0
	def plot_groups_at_time_point(self,t, feat1, feat2):


		markers = ['ro', 'go', 'bo', 'yo', 'ko', 'mo', 'co']

		cp_list = self.cell_tracker.list_of_cell_profiles_per_timestamp[t].list_of_cell_profiles



		fig = pylab.figure( facecolor='white')

		counter = -1

		for group_name in self.groups.keys():

			counter +=1
			gr = self.groups[group_name][t]

			feat1_vals = []
			feat2_vals = []
			
			for idx in gr:
				feat1_vals.append(cp_list[idx].dict_of_features[feat1])
				feat2_vals.append(cp_list[idx].dict_of_features[feat2])

			pylab.plot(feat1_vals, feat2_vals, markers[counter], label = group_name)

			pylab.hold(True)

		fig.canvas.set_window_title("Time point %s" % t)

		pylab.legend(loc="best")
		pylab.xlabel(feat1)
		pylab.ylabel(feat2)
		pylab.grid()
示例#28
0
	def plot_dist_to_nucleus_hists(self):

		fig = pylab.figure(figsize=(7,5), facecolor='white')
		fig.canvas.set_window_title("Distance to nucleus histograms")

		num_bins = len(self.cell_tracker.list_of_cell_profiles_per_timestamp[0].list_of_cell_profiles[0].dict_of_features["border_to_nucleus_dist_hist"])
		color_idx = np.linspace(0, 1, num_bins )
	
		bins = np.arange(0,1, 1/float(num_bins))


		# histogram of cell sizes
		for colorshift, i in zip(color_idx, xrange(len(self.cell_tracker.list_of_cell_profiles_per_timestamp))):
			ts = self.cell_tracker.list_of_cell_profiles_per_timestamp[i].time_stamp
			hists = self.cell_tracker.list_of_cell_profiles_per_timestamp[i].list_of_cell_profiles[0].dict_of_features["border_to_nucleus_dist_hist"]
			for idx in  xrange(1,len(self.cell_tracker.list_of_cell_profiles_per_timestamp[i].list_of_cell_profiles)):
				cp_item = self.cell_tracker.list_of_cell_profiles_per_timestamp[i].list_of_cell_profiles[idx]
				hist = cp_item.dict_of_features["border_to_nucleus_dist_hist"]
				hist = np.array(hist) / float(np.sum(hist))
				hists = np.vstack((hists, hist))

			hist = hists.mean(0)
			pylab.plot(bins, hist , linewidth=3.0, color=pylab.cm.winter(colorshift), label="t = "+str(ts))
			pylab.hold(True)
		
		pylab.xlabel("")
		pylab.ylabel("")
		fig.canvas.set_window_title("Distance to nucleus histograms")
	
		pylab.legend()
示例#29
0
def plotTrackingHigh(trackResults, settings):
  fig = pylab.figure()
  fig.clf()
  if (settings.plotTrackingNumPts > len(trackResults[0].I_P)):
    x_pts = [i*0.001 for i in range(len(trackResults[0].I_P))]
  else:
    x_pts = [i*0.001 for i in range(settings.plotTrackingNumPts)]
  colors = [(0,0,0),\
            (0,0,1),\
            (0,1,0),\
            (0,1,1),\
            (1,0,0),\
            (1,0,1),\
            (1,1,0),\
            (0,0,0.5),\
            (0,0.5,0),\
            (0,0.5,0.5),\
            (0.5,0,0),\
            (0.5,0,0.5),\
            (0.5,0.5,0),\
            (0.5,0.5,0.5)]
  pylab.title("Prompt correlation magnitude of each channel")
  pylab.xlabel("Time")
  pylab.hold(True)

  for channelNr in range(len(trackResults)):
    pylab.plot(x_pts,\
             np.sqrt(np.square(trackResults[channelNr].I_P[0:len(x_pts)])\
               + np.square(trackResults[channelNr].Q_P[0:len(x_pts)])),\
             color=colors[channelNr], label=("PRN %2d" % (trackResults[channelNr].PRN)))
  pylab.legend()
  pylab.hold(False)

  return fig
示例#30
0
    def plot(isDraw=True):
        # return
        pylab.subplot(3, 1, 1)
        pylab.title("State")

        pylab.plot([x[0, 0] for x in kal.xhist], "k-", label="Estimated state")
        pylab.hold(True)
        pylab.plot([t[0, 0] for t in states[1:]], "r-", label="True state")
        # pylab.legend(loc="lower right")

        pylab.hold(False)
        pylab.subplot(3, 1, 2)
        pylab.plot([t[0, 0] for t in measures], label="Measurement")
        pylab.hold(False)
        pylab.legend()

        pylab.hold(False)
        pylab.subplot(3, 1, 3)
        pylab.plot([tmpx[0, 0] - t[0, 0] for tmpx, t in zip(kal.xhist, states[1:])], label="Estimation error")
        pylab.hold(False)
        pylab.legend()

        if isDraw:
            pylab.draw()
        else:
            pylab.show()
示例#31
0
 def plot_profile(self,
                  axis,
                  index,
                  n_bins=100,
                  range_plot=None,
                  true_value=None,
                  estimated_value=None,
                  figure=None,
                  cmap=None,
                  color_line=None,
                  title="Profile"):
     # FIXME: implementation is 2-D only, extend to 3-D
     if axis != 0 and axis != 1:
         raise ("axis: 0 or 1")
     if cmap is None:
         cdict = {
             'red': ((0.0, 1.0, 1.0), (1.0, 0.7, 0.7)),
             'green': ((0.0, 1.0, 1.0), (1.0, 0.0, 0.0)),
             'blue': ((0.0, 1.0, 1.0), (1.0, 0.0, 0.0))
         }
         cmap = pylab.matplotlib.colors.LinearSegmentedColormap(
             'my_colormap', cdict, 256)
     shape = self._tracer.shape()
     n_points = shape[1 - axis]
     M = np.zeros((n_points, n_bins))
     if range_plot is None:
         range_plot = (self._tracer.min(), self._tracer.max())
     for i in range(n_points):
         if axis == 0:
             M[i, :] = self._tracer.histogram((index, i),
                                              n_bins=100,
                                              range_histogram=range_plot)[0]
         else:
             M[i, :] = self._tracer.histogram((i, index),
                                              n_bins=100,
                                              range_histogram=range_plot)[0]
     if axis == 0:
         p = self._tracer.get_samples()[:, index, :]
         if true_value is not None:
             p_t = true_value[index, :]
         if estimated_value is not None:
             p_e = estimated_value[index, :]
     else:
         p = self._tracer.get_samples()[:, :, index]
         if true_value is not None:
             p_t = true_value[:, index]
         if estimated_value is not None:
             p_e = estimated_value[:, index]
     p_m = np.mean(p, 0)
     if color_line is None:
         color_line = hsv_to_rgb(0.2, 0.0, 0.3)
     if not figure:
         pass  # figure = pylab.figure()
     else:
         pylab.figure(figure.number)
     pylab.hold(1)
     pylab.plot((n_bins / (range_plot[1] - range_plot[0])) * p_m,
                color=color_line,
                linewidth=1,
                linestyle='dashed')
     if true_value is not None:
         pylab.plot((n_bins / (range_plot[1] - range_plot[0])) * p_t,
                    color=color_line,
                    linewidth=1,
                    linestyle='solid')
     if estimated_value is not None:
         pylab.plot((n_bins / (range_plot[1] - range_plot[0])) * p_e,
                    color=color_line,
                    linewidth=1,
                    linestyle='dotted')
     pylab.imshow(M.transpose(), cmap=cmap, origin="lower", aspect="auto")
     pylab.title(title)
     pylab.draw()
示例#32
0
def Wave2DAnim(snapshots,
               ds,
               dt,
               vel,
               filename='wave2danim',
               norm=True,
               vmin=None,
               vmax=None,
               anim="avi",
               fps=15):
    r"""
    Create an animation file from a matrix resulting from a simulation of a 2d wave field.
    Creates many intermediate files to achieve that, uses ImageMagick.
    Z is downward.

    * snapshots : is a 3d matrix [time][nz][nx] - pressure field
    * ds        : space equal in x/y axis
    * dt        : time increment between simulation steps
    * vel       : 2d background velocity field
    * filename  : file name for the animation file
    * anim      : animation type (gif or avi)
    * fps       : frames per second
    * norm      : scale the values getting the general max and min (vmax/vmin)
    * vmin      : global minimum of snapshots
    * vmax      : global maximum of snapshots
    """
    py.ion()
    #max index time and max index space
    maxt = np.shape(snapshots)[0]
    maxk = np.shape(snapshots)[1]
    maxi = np.shape(snapshots)[2]
    if norm:
        # get the maximum and minimum values of the last 5%
        # snapshots to not blow the scale during the animation
        # get the maximum and minimum values of the last 5%
        # snapshots to not blow the scale during the animation
        snaptmp = snapshots[-int(0.05 * maxt):]
        vmax = snaptmp.max()
        vmin = snaptmp.min()

        print "vmin : ", vmin, "vmax : ", vmax
    # space axis starting at 0 in x and z (using y coz' plotting)
    # extents of the picture,
    xmin, xmax = 0, ds * maxi
    ymin, ymax = 0, ds * maxk
    extent = xmin, xmax, ymax, ymin
    # font position
    width = xmax - xmin
    height = ymax - ymin
    posx = 0.8 * width + xmin
    posz = 0.8 * height + ymin
    # not working?
    # verticalalignment='top',
    # horizontalalignment='right'
    _ClearTempImages(filename, "png")  # clear any previous existing
    for t in xrange(maxt):
        py.hold(True)
        py.imshow(vel,
                  interpolation='bilinear',
                  cmap=cm.jet,
                  extent=extent,
                  origin='upper',
                  aspect='auto')
        py.imshow(snapshots[t],
                  interpolation='bilinear',
                  cmap=cm.Greys_r,
                  alpha=0.8,
                  extent=extent,
                  origin='upper',
                  aspect='auto',
                  vmin=vmin,
                  vmax=vmax)
        # optional cmap=cm.jet, apect='auto' adjust aspect to the previous plot
        py.show()
        # draw time
        py.text(posx,
                posz,
                "{0:1.5f}".format(t * dt),
                alpha=0.8,
                style='italic',
                color='b')
        # since its just math objects would be perfect
        # will be something like Wave1DAnim001.png
        py.savefig(filename + "{0:03d}".format(t) + '.png', dpi=150)
        sys.stderr.write("\r progressing .. %.1f%%" %
                         (100.0 * float(t) / maxt))
        sys.stderr.flush()
        py.clf()
    sys.stdout.write(" done! \n")
    py.hold(False)
    py.ioff()
    py.close()
    if (anim == "gif"):
        AnimFromPng(filename, fps=fps)
    else:
        AnimFromPng(filename, False, fps)
    _ClearTempImages(filename, "png")
示例#33
0
#REFERENCE
#listofalltimes[0] = times for density 25
#listofalltimes[1] = times for density 30
#listofalltimes[2] = times for density 35
#listofalltimes[3] = times for density 40
#listofalltimes[4] = times for density 45
#listofalltimes[5] = times for density 50

#listofalltimes[0][0] = times for density 25 with u = 2
#listofalltimes[0][1] =  times for density 25 with u = 3
#listofalltimes[0][2] = times for density 25 with u = 4

fig = figure()
ax = axes()
hold(True)


def boxsettings(bp):
    color = [(0.9769448735268946, 0.6468696110452877, 0.2151452804329661),
             (0.37645505989354233, 0.6875228836084111, 0.30496111115768654),
             (0.6151274326753975, 0.4961389476149738, 0.15244053646953548)]
    i = 0
    for box in bp['boxes']:
        box.set(color='#000000', linewidth=1)
        box.set(facecolor=color[i])
        i = i + 1

    for whisker in bp['whiskers']:
        whisker.set(color='#000000', linewidth=1, ls='-')
示例#34
0
a_sine += (a_random - 0.5) * 1.0

# Loudia's solution # --------------------------------- #
window = loudia.Window(frameSize, loudia.Window.HAMMING)
fft = loudia.FFT(fftSize)
peaks = loudia.PeakDetectionComplex(5, 4)
peaksinterp = loudia.PeakInterpolationComplex()
trajs = loudia.PeakTracking(5, 4, 3)


r_sine_windowed = window.process(a_sine)
r_sine_mag = fft.process(r_sine_windowed)
r_sine_peakpos, r_sine_peakmag, r_sine_peakphase = peaks.process(r_sine_mag)
r_sine_peakipos, r_sine_peakimag, r_sine_peakphasei = peaksinterp.process(r_sine_mag, r_sine_peakpos, r_sine_peakmag, r_sine_peakphase)
r_sine_trajpos, r_sine_trajmag = trajs.process(r_sine_mag, r_sine_peakipos, r_sine_peakimag)
# -------------------------------------------------------- #

print r_sine_mag
print r_sine_peakpos
print r_sine_peakmag
print r_sine_trajpos, r_sine_trajmag

import pylab
pylab.hold(True)
pylab.plot(abs(r_sine_mag[0,:]))
pylab.hold(True)
pylab.scatter(r_sine_peakpos[0,:], r_sine_peakmag[0,:])

pylab.show()
示例#35
0
numCondsToPlot = 3
S_top = np.log10(plotDepthResults(top, what, numCondsToPlot) / 1e-6) * 20.0
S_bottom = np.log10(
    plotDepthResults(bottom, what, numCondsToPlot) / 1e-6) * 20.0

m = [0, -4.0, -8.0, -12.0]
top_mu = S_top[:5, :].mean(axis=0)
top_err = S_top[:5, :].std(axis=0) / (len(top)**0.5)

bottom_mu = S_bottom[-5:, :].mean(axis=0)
bottom_err = S_bottom[-5:, :].std(axis=0) / (len(bottom)**0.5)

pl.close('all')
pl.figure()
pl.errorbar(m, top_mu, yerr=top_err, linewidth=3)
pl.hold(True)
pl.errorbar(m, bottom_mu, yerr=bottom_err, color='r', linewidth=3)
pl.plot(m, S_top.T, '--', linewidth=2, color=[0.5, 0.5, 0.5])
pl.plot(m, S_bottom.T, '--', linewidth=2, color=[0.5, 0.5, 0.5])
pl.xlabel('Modulation Depth (dB re: 100%)', fontsize=20)
pl.ylabel('EFR magnitude (dB re: 1uV)', fontsize=20)
pl.xlim((-12.1, 0.1))
pl.show()

allsubj = top + bottom
S_all = np.log10(plotDepthResults(allsubj, what, numCondsToPlot) / 1e-6) * 20
N_all = np.log10(plotDepthResults(allsubj, 'N', numCondsToPlot) / 1e-6) * 20
correction = 24.1
all_mu = S_all.mean(axis=0) - correction
all_err = S_all.std(axis=0) / (len(allsubj)**0.5)
all_mu_n = N_all.mean() - correction
def plot_frame(features_array, ground_truth, sampleRate, hop_size, acf,
               periods, phases, bin2hz, minlag, maxlag, mcomb):
    import numpy
    from pylab import figure, subplot, imshow, plot, axis, hold, clf, show

    # norm features, acf and peaks
    pfeatures = numpy.array(features_array).transpose()
    maxis = pfeatures.max(axis=1)
    for i in range(len(maxis)):
        pfeatures[i] /= maxis[i]
    normed_acf = []
    for i in range(len(acf)):
        normed_acf.append(acf[i] / max(acf[i]))
    acf = normed_acf

    # begin plot
    figure()
    clf()

    subplot(311)
    hold(True)
    for i in range(len(pfeatures)):
        plot(pfeatures[i] + i)
    hold(False)
    axis('tight')

    subplot(312)
    hold(True)
    for i in range(len(mcomb)):
        plot(mcomb[i] / mcomb[i].max() + i)
        plot([mcomb[i].argmax()] * 2, [i, i + 1])
        plot([periods[i]] * 2, [i, i + 1])
    # plot the ground truth
    if ground_truth != None:
        plot([bpmtolag(ground_truth, sampleRate, hop_size)] * 2,
             [0., len(acf)], 'r-')
    # plot the bpm estimate
    for i in range(len(periods)):
        plot([periods[i]] * 2, [i, i + 1], 'g-')
    hold(False)
    axis('tight')

    subplot(313)
    hold(True)
    for i in range(len(pfeatures)):
        periodnum = 4
        phout = [0. for j in range(len(pfeatures[i]) / periodnum)]
        if periods[i] == 0: continue
        for j in range(len(phout)):
            for a in range(periodnum):
                phout[j] += pfeatures[i][a * periods[i] + j]
        plot(phout / max(phout) + i)
        phase = phases[i]
        if phase >= periods[i]:
            while phase >= periods[i]:
                phase -= periods[i]
        while phase < len(pfeatures[i]):
            plot([phase] * 2, [i, i + 1], 'r-')
            phase += periods[i]
    hold(False)
    axis([0., len(pfeatures[i]), 0., len(pfeatures)])

    show()
def peakdetect_fft(y_axis, x_axis, pad_len=5):
    """
    Performs a FFT calculation on the data and zero-pads the results to
    increase the time domain resolution after performing the inverse fft and
    send the data to the 'peakdetect' function for peak 
    detection.
    
    Omitting the x_axis is forbidden as it would make the resulting x_axis
    value silly if it was returned as the index 50.234 or similar.
    
    Will find at least 1 less peak then the 'peakdetect_zero_crossing'
    function, but should result in a more precise value of the peak as
    resolution has been increased. Some peaks are lost in an attempt to
    minimize spectral leakage by calculating the fft between two zero
    crossings for n amount of signal periods.
    
    The biggest time eater in this function is the ifft and thereafter it's
    the 'peakdetect' function which takes only half the time of the ifft.
    Speed improvementd could include to check if 2**n points could be used for
    fft and ifft or change the 'peakdetect' to the 'peakdetect_zero_crossing',
    which is maybe 10 times faster than 'peakdetct'. The pro of 'peakdetect'
    is that it resutls in one less lost peak. It should also be noted that the
    time used by the ifft function can change greatly depending on the input.
    
    keyword arguments:
    y_axis -- A list containg the signal over which to find peaks
    x_axis -- A x-axis whose values correspond to the y_axis list and is used
        in the return to specify the postion of the peaks.
    pad_len -- (optional) By how many times the time resolution should be
        increased by, e.g. 1 doubles the resolution. The amount is rounded up
        to the nearest 2 ** n amount (default: 5)
    
    return -- two lists [max_peaks, min_peaks] containing the positive and
        negative peaks respectively. Each cell of the lists contains a tupple
        of: (position, peak_value) 
        to get the average peak value do: np.mean(max_peaks, 0)[1] on the
        results to unpack one of the lists into x, y coordinates do: 
        x, y = zip(*tab)
    """
    # check input data
    x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)
    zero_indices = zero_crossings(y_axis, window=11)
    #select a n amount of periods
    last_indice = -1 - (1 - len(zero_indices) & 1)
    # Calculate the fft between the first and last zero crossing
    # this method could be ignored if the begining and the end of the signal
    # are discardable as any errors induced from not using whole periods
    # should mainly manifest in the beginning and the end of the signal, but
    # not in the rest of the signal
    fft_data = fft(y_axis[zero_indices[0]:zero_indices[last_indice]])
    padd = lambda x, c: x[:len(x) // 2] + [0] * c + x[len(x) // 2:]
    n = lambda x: int(log(x) / log(2)) + 1
    # padds to 2**n amount of samples
    fft_padded = padd(list(fft_data),
                      2**n(len(fft_data) * pad_len) - len(fft_data))

    # There is amplitude decrease directly proportional to the sample increase
    sf = len(fft_padded) / float(len(fft_data))
    # There might be a leakage giving the result an imaginary component
    # Return only the real component
    y_axis_ifft = ifft(fft_padded).real * sf  #(pad_len + 1)
    x_axis_ifft = np.linspace(x_axis[zero_indices[0]],
                              x_axis[zero_indices[last_indice]],
                              len(y_axis_ifft))
    # get the peaks to the interpolated waveform
    max_peaks, min_peaks = peakdetect(y_axis_ifft,
                                      x_axis_ifft,
                                      500,
                                      delta=abs(np.diff(y_axis).max() * 2))
    #max_peaks, min_peaks = peakdetect_zero_crossing(y_axis_ifft, x_axis_ifft)

    # store one 20th of a period as waveform data
    data_len = int(np.diff(zero_indices).mean()) / 10
    data_len += 1 - data_len & 1

    fitted_wave = []
    for peaks in [max_peaks, min_peaks]:
        peak_fit_tmp = []
        index = 0
        for peak in peaks:
            index = np.where(x_axis_ifft[index:] == peak[0])[0][0] + index
            x_fit_lim = x_axis_ifft[index - data_len // 2:index +
                                    data_len // 2 + 1]
            y_fit_lim = y_axis_ifft[index - data_len // 2:index +
                                    data_len // 2 + 1]

            peak_fit_tmp.append([x_fit_lim, y_fit_lim])
        fitted_wave.append(peak_fit_tmp)

    #pylab.plot(range(len(fft_data)), fft_data)
    #pylab.show()

    pylab.plot(x_axis, y_axis)
    pylab.hold(True)
    pylab.plot(x_axis_ifft, y_axis_ifft)
    #for max_p in max_peaks:
    #    pylab.plot(max_p[0], max_p[1], 'xr')
    pylab.show()
    return [max_peaks, min_peaks]
示例#38
0
def plotDepthResults(subjlist,
                     what,
                     numCondsToPlot,
                     summary=False,
                     max=False,
                     ch_sel=30):

    # froot = '/home/hari/Documents/PythonCodes/research/DepthResults/'
    froot = '/home/hari/Documents/DepthResults/'

    nsubjs = len(subjlist)
    condlist = [[1, 7], [2, 8], [3, 9], [4, 10]]
    condstemlist = ['_0dB', '_m4dB', '_m8dB', '_m12dB']
    nconds = len(condlist)
    whatever_all = np.zeros((nsubjs, nconds))

    for k, subj in enumerate(subjlist):
        fpath = froot + subj + '/'

        whatever = np.zeros(len(condlist))

        for condind, cond in enumerate(condlist):
            condstem = condstemlist[condind]
            load_name = fpath + subj + condstem + '.mat'
            dat = io.loadmat(load_name)
            f = dat['f']

            f_ind = np.argmin(abs(f - 100))

            if (what == 'cplv'):
                summary = False
                whatever[condind] = dat[what][f_ind]

            else:

                if (summary):
                    if (max):
                        whatever[condind] = np.max(dat[what][:, f_ind])
                    else:
                        f_pca = (np.logical_and(f > 80, f < 115)).squeeze()
                        C = np.cov(dat[what][:, f_pca])
                        lambdas, wts = linalg.eigh(C)
                        w = wts[:, -1] / (wts[:, -1]).sum()
                        whatever[condind] = np.dot(w, dat[what])[f_ind]
                else:
                    whatever[condind] = dat[what][ch_sel, f_ind]

        whatever_all[k, :] = whatever
        pl.plot(20 * np.log10(whatever[0:numCondsToPlot] / 1e-6),
                'o-',
                linewidth=2)
        pl.ylabel(what + ' (dB re: 1 sq.micro.V)', fontsize=20)
        pl.xlabel('Modulation Depth', fontsize=20)
        pl.hold(True)
        ax = pl.gca()
        for tick in ax.xaxis.get_major_ticks():
            tick.label1.set_fontsize(20)
        for tick in ax.yaxis.get_major_ticks():
            tick.label1.set_fontsize(20)

    pl.show()
    pl.legend(subjlist)
    return whatever_all