def scoring(truth, predicted_in, legit):
    breaks = detect_breaks(legit)
    for i, ind in enumerate(breaks[:-1]):
        if len(predicted_in[breaks[i]+1:breaks[i+1]])> 50:
            predicted_in[breaks[i]+1:breaks[i+1]] = savgol_filter(predicted_in[breaks[i]+1:breaks[i+1]], 31, 2)
        elif len(predicted_in[breaks[i]+1:breaks[i+1]])> 10:
            predicted_in[breaks[i]+1:breaks[i+1]] = savgol_filter(predicted_in[breaks[i]+1:breaks[i+1]], 5, 2)
    # plt.plot(legit/30.0, truth*15, label="truth")
    # plt.plot(legit/30.0, predicted_in, label="predicted")
    # plt.plot(legit/30.0, np.zeros(len(legit))+0.75)
    # plt.ylim([0,20])
    # plt.legend()
    # plt.show()

    pred_locs = np.where(predicted_in>0.75)[0]
    predicted = np.zeros(len(predicted_in))
    predicted[pred_locs] = 1
    for loc in pred_locs:
        predicted[loc-15:loc+15] = 1
    true_correct = sum(np.logical_and(predicted,truth==1))
    true = sum(truth)
    detected = sum(predicted)
    if detected==0:
        precision = np.NAN
    else:
        precision = true_correct/float(detected)
    if true == 0:
        recall = np.NAN
    else:
        recall = true_correct/true

    return np.array([precision, recall, true/30.0])
示例#2
0
def smooth(x,y):
    buf = y.copy()
    buf = savgol_filter(buf, 137, 1, mode='interp')
    buf = savgol_filter(buf, 137, 1, mode='interp')
    #y = savgol_filter(y, 31, 1, mode='interp')
    #y[900:y.shape[0]] = y[900]
    #ind = np.linspace(0,19,20,dtype=np.int)
    #ind = np.append(ind,np.linspace(1003,1023,20,dtype=np.int))
    #slope, intercept, r_value, p_value, std_err = stats.linregress(x[ind],y[ind])
    #print((slope, intercept))
    #slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
    #y = y - (-slope*x+intercept)
    #slope = (buf[1023] - buf[200]) / (x[1023] -x[200])
    #intercept = buf[200] - slope*x[200]
    #res = buf - (slope*x  + intercept)

    ind1 = np.linspace(300,319,20,dtype=np.int)
    ind2 = np.linspace(1003,1023,10,dtype=np.int)
    slope = (np.mean(buf[ind2]) - np.mean(buf[ind1])) / (np.mean(x[ind2]) - np.mean(x[ind1]))
    intercept = np.mean(buf[ind1]) - slope*np.mean(x[ind1])
    #print((np.mean(y[ind1]),np.mean(x[ind1])))
    #print((slope, intercept))
    l = slope*x + intercept
    buf = np.subtract(buf,l)
    #slope = -(np.mean(y[ind2]) - np.mean(y[ind1])) / (np.mean(x[ind2]) - np.mean(x[ind1]))
    #intercept = np.mean(y[ind1]) - slope*np.mean(x[ind1])
    #l = slope*x + intercept
    #y = l # np.subtract(y,l)
    #y = y - np.min(y)
    buf = buf*gauss(x,1,600,500,0)
    #s = interpolate.InterpolatedUnivariateSpline(np.linspace(200,900,len(x)),x)
    #return s(np.linspace(200,900,len(x)))
    return buf
示例#3
0
    def createWG( self ):
        if ( self.wpd is None ):
            print ("No WebPlotDigitizer object given!")
            return 1

        wg = self.wpd.get("waveguide")
        if ( wg is None ):
            print ("Error when reading the waveguide data!")
            return
        x = np.linspace( np.min(wg.x), np.max(wg.x), len(wg.x) )
        interpolator = interp.interp1d( wg.x, wg.y )
        y = interpolator(x)

        # Smooth data
        wLen = int( len(y)/2 )
        if ( wLen%2 == 0 ):
            wLen += 1
        smoothedY = sig.savgol_filter( y, wLen, 3 )
        self.smoothedWG = wpd.WpdDataset()
        self.smoothedWG.x = x
        self.smoothedWG.y= smoothedY

        deriv = sig.savgol_filter(y, wLen, 3, deriv=1, delta=x[1]-x[0] )
        self.wgSlope = wpd.WpdDataset()
        self.wgSlope.x = x
        self.wgSlope.y = deriv
        self.angles = None
        self.angleX = None
def savgol(time, y):
    """
    Calculates velocity and acceleration using savgol filter from scipy. Since
    the filter assumes that the whole signal is known, I perform differentiation
    on the final two points after savgol is applied.
    """
    y_d = np.zeros(time.shape)
    y_dd = np.zeros(time.shape)
    window = poly_window

    for i in range(window * sigma, time.shape[0]):
        y_history = y[i - window * sigma + 1:i + 1]
        filtered_values = signal.savgol_filter(
            y_history, window_length=window, polyorder=degree)
        y_d[i] = (filtered_values[-1] - filtered_values[-2]) / time_step

    for i in range(window * sigma, time.shape[0]):
        y_history = y_d[i - window * sigma + 1:i + 1]
        filtered_values = signal.savgol_filter(
            y_history, window_length=window, polyorder=degree)
        y_dd[i] = (filtered_values[-1] - filtered_values[-2]) / time_step

    y_d = y_d / encoder_resolution
    y_dd = y_dd / encoder_resolution

    return y_d, y_dd
    def doTask(self):
        self.initialize()
        N = self.pts.shape[0]
        
        angles = []
        for i in range(N-1):
            pos = self.pts[i,:]
            nextpos = self.pts[i+1,:]
            angle = self.get_angle(np.ravel(pos), np.ravel(nextpos))
            angles.append(angle)

        for i in range(len(angles)-2):
            angles[i] = 0.5 * angles[i] + 0.35 * angles[i+1] + 0.15 * angles[i+2]
            angles = savgol_filter(angles, self.factor * (N/12) + 1, 2)



        for i in range(N-1):
            self.cut()
            pos = self.pts[i,:]
            nextpos = self.pts[i+1,:]
            frame = self.get_frame_next(np.ravel(pos), np.ravel(nextpos), offset=0.004, angle = angles[i])
            self.nextpos.publish(Pose(frame.position, frame.orientation))

            if not self.simulate:
                self.psm1.move_cartesian_frame(frame)
            else:
                print "[ClosedLoopCut] Simulated Move to", frame
                time.sleep(1)

            curpt = np.ravel(np.array(self.psm1.get_current_cartesian_position().position))
            self.pts[i,:] = curpt
            self.pts[i+1,:2] = savgol_filter(self.pts[:,:2], 5, 2, axis=0)[i+1,:]
            self.traj.append(self.psm1.get_current_cartesian_position())
def smoothSavgol(Data, N = 11, order=2):

	y = savgol_filter(Data[0], N, order)
	for i in xrange(3):  #http://pubs.acs.org/doi/pdf/10.1021/ac50064a018
		
		y = savgol_filter(y, N, order)
	
	return np.array([y, Data[1]])
示例#7
0
def smooth(data):
    # smooth turning noise and distance in the hexbug
    nparray = np.array(data)
    x, y = nparray.T
    resampledx = savgol_filter(x,window_length=31,polyorder=3)
    resampledy = savgol_filter(y,window_length=31,polyorder=3)
    smoothed = np.column_stack((resampledx, resampledy)).tolist()
    return smoothed
def test_sg_filter_basic():
    # Some basic test cases for savgol_filter().
    x = np.array([1.0, 2.0, 1.0])
    y = savgol_filter(x, 3, 1, mode='constant')
    assert_allclose(y, [1.0, 4.0 / 3, 1.0])

    y = savgol_filter(x, 3, 1, mode='mirror')
    assert_allclose(y, [5.0 / 3, 4.0 / 3, 5.0 / 3])
示例#9
0
def mktelluric(fs=None):
    iraf.cd('work')
    if fs is None:
        fs = glob('sci_com.fits')
    if len(fs) == 0:
        print "WARNING: No flux-calibrated spectra to make the a telluric correction."
        iraf.cd('..')
        return
        
    if not os.path.exists('tel'):
        os.mkdir('tel')
        
    # for each file
    f = fs[0]
    # if it is a standard star combined file
    if isstdstar(f):
        # read in the spectrum and calculate the wavelengths of the pixels
        hdu = pyfits.open(f)
        spec = hdu[0].data.copy()
        hdr = hdu[0].header.copy()
        hdu.close()
        waves = fitshdr_to_wave(hdr)
        
        template_spectrum = signal.savgol_filter(spec, 21, 3)
        noise = np.abs(spec - template_spectrum)
        noise = ndimage.filters.gaussian_filter1d(noise, 100.0)
        not_telluric = np.ones(spec.shape, dtype=np.bool)
        # For each telluric region
        for wavereg in telluricWaves:
            in_telluric_region = np.logical_and(waves >= wavereg[0],
                                                waves <= wavereg[1])
            not_telluric = np.logical_and(not_telluric,
                                             np.logical_not(in_telluric_region))
        
        # Smooth the spectrum so that the spline doesn't go as crazy
        # Use the Savitzky-Golay filter to presevere the edges of the
        # absorption features (both atomospheric and intrinsic to the star)
        sgspec = signal.savgol_filter(spec, 31, 3)
        #Get the number of data points to set the smoothing criteria for the 
        # spline
        m = not_telluric.sum()
        intpr = interpolate.splrep(waves[not_telluric], sgspec[not_telluric], 
                                   w = 1/noise[not_telluric], k=2,  s=10 * m)

         # Replace the telluric with the smoothed function
        smoothedspec = interpolate.splev(waves, intpr)
        smoothedspec[not_telluric] = spec[not_telluric]
        # Divide the original and the telluric corrected spectra to
        # get the correction factor
        correction = spec / smoothedspec

        # Save the correction
        dout = np.ones((2, len(waves)))
        dout[0] = waves
        dout[1] = correction
        np.savetxt('tel/telcor.dat', dout.transpose())
            
    iraf.cd('..')
示例#10
0
def RichardsonSilberberg(data, tau, time=None):
    D = data.view(np.ndarray)
    rn = tau * np.diff(D) + D[:-2, :]
    rn = spSignal.savgol_filter(rn, 11, 4)  # , deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)[source]
    # rn = SavitzyGolay(rn, kernel = 11, order = 4) # old
    if time is not None:
        vn = rn - tau * spSignal.savgol_filter(np.diff(D), 11, 4)
        return (rn, vn)
    else:
        return rn
示例#11
0
def test_sg_filter_2d():
    x = np.array([[1.0, 2.0, 1.0],
                  [2.0, 4.0, 2.0]])
    expected = np.array([[1.0, 4.0 / 3, 1.0],
                         [2.0, 8.0 / 3, 2.0]])
    y = savgol_filter(x, 3, 1, mode='constant')
    assert_allclose(y, expected)

    y = savgol_filter(x.T, 3, 1, mode='constant', axis=0)
    assert_allclose(y, expected.T)
示例#12
0
def fit_semiconductor(t, data, sav_n=11, sav_deg=4, mode='sav', tr=0.4):
    from scipy.signal import savgol_filter
    from scipy.ndimage import gaussian_filter1d
    from scipy.optimize import leastsq
    ger =   data[..., -1].sum(2).squeeze()
    plt.subplot(121)
    plt.title('Germanium sum')
    plt.plot(t, ger[:,  0])
    plt.plot(t, ger[:,  1])
    if mode =='sav':
        plt.plot(t, savgol_filter(ger[:, 0], sav_n, sav_deg, 0))
        plt.plot(t, savgol_filter(ger[:, 1], sav_n, sav_deg, 0))
    plt.xlim(-1, 3)
    plt.subplot(122)
    plt.title('First dervitate')
    if mode == 'sav':
        derv0 = savgol_filter(ger[:, 0], sav_n, sav_deg, 1)
        derv1 = savgol_filter(ger[:, 1], sav_n, sav_deg, 1)
    elif mode == 'gauss':
        derv0 =  gaussian_filter1d(ger[:, 0], sav_n, order=1)
        derv1 =  gaussian_filter1d(ger[:, 1], sav_n, order=1)
    plt.plot(t , derv0)
    plt.plot(t , derv1)
    plt.xlim(-.8, .8)
    plt.ylim(0, 700)
    plt.minorticks_on()
    plt.grid(1)

    def gaussian(p, ch, res=True):

        i, j = dv.fi(t, -tr), dv.fi(t, tr)
        w = p[0]
        A = p[1]
        x0 = p[2]
        fit = A*np.exp(-(t[i:j]-x0)**2/(2*w**2))
        if res:
            return fit-ch[i:j]
        else:
            return fit


    x0 = leastsq(gaussian, [.2, max(derv0), 0], derv0)
    plt.plot(t[dv.fi(t, -tr):dv.fi(t, tr)], gaussian(x0[0], 0, 0), '--k', )
    plt.text(0.05, 0.9, 'x$_0$ = %.2f\nFWHM = %.2f\nA = %.1f\n'%(x0[0][2],2.35*x0[0][0], x0[0][1]),
             transform=plt.gca().transAxes, va='top')

    x0 = leastsq(gaussian, [.2, max(derv1), 0], derv1)
    plt.plot(t[dv.fi(t, -tr):dv.fi(t, tr)], gaussian(x0[0], 1, 0), '--b', )

    plt.xlim(-.8, .8)
    plt.minorticks_on()
    plt.grid(0)
    plt.tight_layout()
    plt.text(0.5, 0.9, 'x$_0$ = %.2f\nFWHM = %.2f\nA = %.1f\n'%(x0[0][2],2.35*x0[0][0], x0[0][1]),
             transform=plt.gca().transAxes, va='top')
示例#13
0
def sg_filter(s1, winsize1=15, winsize2=11):
    s1m = ni.median_filter(s1, 11)
    #s1m = s1

    #winsize1 = 15
    #winsize2 = 11

    f1 = savgol_filter(s1m, winsize1, 3)

    f1_std = np.nanstd(s1-f1)

    if 0: # calculate weight
        f1_mask = np.abs(s1-f1) > 2.*f1_std
        f1_mask2 = ni.binary_opening(f1_mask, iterations=int(winsize2*0.2))
        f1_mask3 = ni.binary_closing(f1_mask2, iterations=int(winsize2*0.2))
        f1_mask4 = ni.binary_dilation(f1_mask3, iterations=int(winsize2))

        weight = ni.gaussian_filter(f1_mask4.astype("d"), winsize2)
    else:
        fd2 = savgol_filter(s1m, winsize1, 3, deriv=2)
        fd2_std = np.std(fd2)
        f1_mask = np.abs(fd2) > 2.*fd2_std

        f1_mask = f1_mask | (s1m < s1m.max()*0.4)

        f1_mask4 = ni.binary_dilation(f1_mask, iterations=int(winsize2))
        #f1_mask4[:300] = True
        #f1_mask4[-300:] = True
        weight = ni.gaussian_filter(f1_mask4.astype("d"), winsize2*.5)

    # find a region where deviation is significant

    if np.any(weight):
        weight/=weight.max()
        f2 = savgol_filter(s1m, winsize2, 5)
        f12 = f1*(1.-weight) + f2*weight
    else:
        f12 = f1
        weight = np.zeros(f12.shape)


    if 0:
        ax1.cla()
        ax2.cla()
        ax1.plot(f12)
        ax2.plot(s1 - f1, color="0.5")
        ax2.plot(s1 - f12)
        ax2.plot(weight * f1_std*2)

        ax2.set_ylim(-0.02, 0.02)

    return f12, f1_std
示例#14
0
def calculate_velocity(series):
    """
    Given the pen samples ndarray in series, calculate velocity for x and y
    fields, updating the associated '*_velocity' fields
    with the calculated result.
    :param series: markwrite sample ndarray.
    :return:
    """

    # Calculate velocity using 1st deriv scipy.signal.savgol_filter,
    # as it does not phase shift the data and does a good job of
    # smoothing out noise with minimal signal distortion.
    # See http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.savgol_filter.html#scipy.signal.savgol_filter
    if len(series)==1:
        # nothing to do
        return

    dx = series['x_filtered'][1:]-series['x_filtered'][0:-1]
    dy = series['y_filtered'][1:]-series['y_filtered'][0:-1]
    xy_velocity = np.hypot(dx, dy)

    wlength = 0
    polyo = 0
    if len(series)> window_length:
        wlength = window_length
        polyo = polyorder
    elif len(series) > 10:
        wlength = 5
        polyo = 3

    if wlength > 0:
        series['x_velocity'] = savgol_filter(series['x'], wlength, polyo,
                                         deriv=1, delta=1.0)
        series['y_velocity'] = savgol_filter(series['y'], wlength, polyo,
                                         deriv=1, delta=1.0)
        #series['xy_velocity'][1:] = savgol_filter(xy_velocity, wlength, polyo)
        #series['xy_velocity'][0] = series['xy_velocity'][1]
        #series['xy_acceleration'] = savgol_filter(series['xy_velocity'],
        #                                           wlength, polyo,
        #                                           deriv=1, delta=1.0)

    else:
        series['x_velocity'][1:] = dx
        series['y_velocity'][1:] = dy
        series['x_velocity'][0] = series['x_velocity'][1]
        series['y_velocity'][0] = series['y_velocity'][1]

    series['xy_velocity'][1:] = xy_velocity
    series['xy_velocity'][0] = series['xy_velocity'][1]

    series['xy_acceleration'][1:] = series['xy_velocity'][1:]-series['xy_velocity'][:-1]
    series['xy_acceleration'][0] = series['xy_acceleration'][1]
def smoothSkeletons(skeleton, length_resampling = 131, smooth_win = 11, pol_degree = 3):
    xx = savgol_filter(skeleton[:,0], smooth_win, pol_degree)
    yy = savgol_filter(skeleton[:,1], smooth_win, pol_degree)
    
    ii = np.arange(xx.size)
    ii_new = np.linspace(0, xx.size-1, length_resampling)
    
    fx = interp1d(ii, xx)
    fy = interp1d(ii, yy)
    
    xx_new = fx(ii_new)
    yy_new = fy(ii_new)
    
    skel_new = np.vstack((xx_new, yy_new)).T
    return skel_new
示例#16
0
def test_sg_filter_trivial():
    """ Test some trivial edge cases for savgol_filter()."""
    x = np.array([1.0])
    y = savgol_filter(x, 1, 0)
    assert_equal(y, [1.0])

    # Input is a single value.  With a window length of 3 and polyorder 1,
    # the value in y is from the straight-line fit of (-1,0), (0,3) and
    # (1, 0) at 0. This is just the average of the three values, hence 1.0.
    x = np.array([3.0])
    y = savgol_filter(x, 3, 1, mode='constant')
    assert_equal(y, [1.0])

    x = np.array([3.0])
    y = savgol_filter(x, 3, 1, mode='nearest')
    assert_equal(y, [3.0])
示例#17
0
def preprocess(num, subj, subj_dir, subj_warp_dir, force_warp=False):
    bold_path = 'BOLD/task001_run00%i/bold_dico_bold7Tp1_to_subjbold7Tp1.nii.gz' % (num+1)
    bold_path = os.path.join(DATA_DIR, subj, bold_path)
    template_path = os.path.join(DATA_DIR, 'templates', 'grpbold7Tp1', 'brain.nii.gz')
    warp_path = os.path.join(DATA_DIR, subj, 'templates', 'bold7Tp1', 'in_grpbold7Tp1', 'subj2tmpl_warp.nii.gz')

    output_path = os.path.join(subj_warp_dir, 'run00%i.nii.gz' % num)

    if force_warp or not os.path.exists(output_path):
        print 'Warping image #%i...' % num
        subprocess.call(['fsl5.0-applywarp', '-i', bold_path, '-o', output_path, '-r', template_path, '-w', warp_path, '-d', 'float'])
    else:
        print 'Reusing cached warp image #%i' % num

    print 'Loading image #%i...' % num
    bold = load(output_path)

    masker = NiftiMasker(load(MASK_FILE))
    # masker = niftimasker(load(MASK_FILE), detrend=true, smoothing_fwhm=4.0,
    #                     high_pass=0.01, t_r=2.0, standardize=true)
    masker.fit()
    print 'Removing confounds from image #%i...' % num
    data = masker.transform(bold, confounds(num, subj))
    print 'Detrending image #%i...' % num
    filtered = np.float32(savgol_filter(data, 61, 5, axis=0))
    img = masker.inverse_transform(data-filtered)
    print 'Smoothing image #%i...' % num
    img = image.smooth_img(img, 4.0)
    print 'Saving image #%i...' % num
    save(img, os.path.join(subj_dir, 'run00%i.nii.gz' % num))
    print 'Finished with image #%i' % num
示例#18
0
 def evaluate_pfmce(self):
     print "Evaluating performance...."
     if not self.mdp:
         print "No MDP found"
         self.history = []
         return
     action_classifier = pickle.load(open('../data/action_classifier.pkl'))
     history = np.array(self.history)
     history = history[25:-6:5, :] # downsampling, cut the first half second
     history = savgol_filter(history, 31, 3, axis=0) # smoothing
     actions = []
     states = []
     for signal in history:
         emg_l = signal[0:8]/EMG_WEIGHT
         emg_u = signal[18:26]/EMG_WEIGHT
         emg = np.hstack((emg_l,emg_u))
         actions.append(int(action_classifier.predict(emg)[0]))
         states.append(int(self.classifier.predict(signal)[0]))
     print actions
     print states
     result = evaluate(actions, states, self.mdp)
     print result
     score = 100*math.exp((result-self.baseline)/200)
     print "Performance score =", int(score)
     np.savetxt('user_data/'+self.user_id, history, delimiter=',')
     self.history = []     
示例#19
0
def eche_split_current_by_sweep_and_generate_intended_voltage(tech,sim_lib_dict,echem_t,echem_i,smooth_paramd=None,time_delta=0.5):
    if tech.startswith('CV'):
        sim_basis_v=echem_t*np.nan
        sweep_currents=[]
        profd=sim_lib_dict['echem_profile']
        segment_index_arr=[]
        for count,(t0,t1,v0,v1) in enumerate(zip(profd['sweep_endpoint_times_s'],profd['sweep_endpoint_times_s'][1:],profd['sweep_endpoint_potentials_vrhe'],profd['sweep_endpoint_potentials_vrhe'][1:])):
            inds=np.where((echem_t>=t0) & (echem_t<=t1))[0]
            fr=(echem_t[inds]-t0)/(t1-t0)
            sim_basis_v[inds]=v0+fr*(v1-v0)
            sweep_currents+=[echem_i[inds]]
            segment_index_arr+=[count]*len(inds)#TODO. nothing here makes this be the correct total length because echem_t may be longer (go beyond bounds of the profile)
        for nanind in np.where(np.isnan(sim_basis_v))[0]:
            goodinds=np.where(np.logical_not(np.isnan(sim_basis_v)))[0]
            replace_ind=goodinds[np.argmin((echem_t[goodinds]-echem_t[nanind])**2)]
            if np.abs(echem_t[replace_ind]-echem_t[nanind])<time_delta:
                sim_basis_v[nanind]=sim_basis_v[replace_ind]
        if smooth_paramd is None or smooth_paramd['SGfilter_order']==0 or smooth_paramd['SGfilter_nptsoneside']<3:
            return sweep_currents,sim_basis_v,np.hstack(sweep_currents),np.array(segment_index_arr)
        smooth_sweep_currents=[signal.savgol_filter(arr, 2*smooth_paramd['SGfilter_nptsoneside']+1, smooth_paramd['SGfilter_order']) for arr in sweep_currents]
        echem_i_smooth=np.hstack(smooth_sweep_currents)
        echem_i_smooth[:int(smooth_paramd['SGfilter_nptsoneside']*1.5)]=echem_i[:int(smooth_paramd['SGfilter_nptsoneside']*1.5)]#replace first 0.75 window with raw data due to initial transient
        return sweep_currents,sim_basis_v,echem_i_smooth,np.array(segment_index_arr)
    else:
        print 'technique not supperted for ecms analysis'
def smoothCurve(curve, window = 5, pol_degree = 3):
    '''smooth curves using the savgol_filter'''
    
    if curve.shape[0] < window:
        #nothing to do here return an empty array
        return np.full_like(curve, np.nan)

    #consider the case of one (widths) or two dimensions (skeletons, contours)
    if curve.ndim == 1:
        smoothed_curve =  savgol_filter(curve, window, pol_degree)
    else:
        smoothed_curve = np.zeros_like(curve)
        for nn in range(curve.ndim):
            smoothed_curve[:,nn] = savgol_filter(curve[:,nn], window, pol_degree)

    return smoothed_curve
示例#21
0
def general_filtering(Y, method, kwargs):
    """Wrapper function to contain all the possible smoothing functions in
    order to be easy and quick usable for other parts of this package.
    """

#    if method == 'order_filter':
#        Ys = signal.order_filter(Y, **kwargs)
#    elif method == 'medfilt':
#        Ys = signal.medfilt(Y, **kwargs)
#    elif method == 'wiener':
#        Ys = signal.wiener(Y, **kwargs)
#    elif method == 'lfilter':
#        Ys = signal.lfilter(Y, **kwargs)
#    elif method == 'filtfilt':
#        Ys = signal.filtfilt(Y, **kwargs)
    if method == 'savgol_filter':
        Ys = signal.savgol_filter(Y, **kwargs)
    elif method == 'savitzky_golay':
        Ys = savitzky_golay_matrix(Y, **kwargs)
    elif method == 'weighted_MA':
        Ys = smooth_weighted_MA_matrix(Y, **kwargs)
    elif method == 'fft_passband':
        Ys = fft_passband_filter(Y, **kwargs)
    elif method == 'reweighting':
        Ys = general_reweighting(Y, **kwargs)
    ## DISCRETE TS
    elif method == 'collapse':
        Ys = collapser(Y, **kwargs)
    elif method == 'substitution':
        Ys = substitution(Y, **kwargs)

    return Ys
示例#22
0
def get_fringe_frequency(series, multiplier=2.0):
    """Predict scattering fringe frequency from the derivative of a timeseries

    Parameters
    ----------
    series : `~gwpy.timeseries.TimeSeries`
        timeseries record of relative motion

    multiplier : `float`
        harmonic number of fringe frequency

    Returns
    -------
    fringef : `~gwpy.timeseries.TimeSeries`
        timeseries record of fringe frequency

    See Also
    --------
    scipy.signal.savgol_filter
        for an implementation of the Savitzky-Golay filter
    """
    velocity = type(series)(savgol_filter(series.value, 5, 2, deriv=1))
    velocity.__array_finalize__(series)
    fringef = numpy.abs(multiplier * 2. / 1.064 * velocity *
                        velocity.sample_rate.value)
    fringef.override_unit('Hz')
    return fringef
示例#23
0
    def estimate_error(self, idx):
        """
        Calculate errors for estimate, average, and differential
        """
        try:
            t = range(0, self.T_COEF) # the time frame in the past
            t_plus = range(self.T_COEF + 1, self.T_COEF *2) # the time frame in the future

            val = int(idx)
            self.offset_history.append(val)
            while len(self.offset_history) > self.NUM_SAMPLES:
                self.offset_history.pop(0)
            smoothed_values = sig.savgol_filter(self.offset_history, self.T_COEF, 2)

            # Estimated
            e = int(smoothed_values[-1]) # get latest
            if e > self.CAMERA_WIDTH / 2: e = (self.CAMERA_WIDTH / 2) - 1 
            elif e < -self.CAMERA_WIDTH / 2: e = -self.CAMERA_WIDTH / 2

            # Projected
            f = np.polyfit(t, smoothed_values[-self.T_COEF:], deg=self.REGRESSION_DEG)
            vals = np.polyval(f, t_plus)
            de = vals[-1] # differential error
            ie = int(np.mean(vals)) # integral error
            if ie > self.CAMERA_WIDTH / 2 - 1: ie = (self.CAMERA_WIDTH / 2) -1
            elif ie < -self.CAMERA_WIDTH / 2: ie = -self.CAMERA_WIDTH / 2
        except Exception as error:
            self.pretty_print("ROW", "Error: %s" % str(error))
        return e, ie, de
示例#24
0
def build_features(ride, normalized=False, version=1):
  if version == 3:
    ride = savgol_filter(np.array(ride).T, 7, 2).T

  distances = np.array(euclidian_distances(ride))
  #ride_length = distances.sum()
  #ride_speed = ride_length / len(ride)
  distances_no_stops = distances[distances > 1.5]
  #stops_ratio = len(distances[distances < 1.5]) / (len(distances) + 1.0)
  ride_length_no_stops = distances_no_stops.sum()
  ride_speed_no_stops = ride_length_no_stops / (len(distances_no_stops) + 1)
  features = [
      #ride_length,
      #ride_speed,
      ride_length_no_stops,
      #stops_ratio,
      euclidian_distance(ride[0], ride[-1]),
  ]
  if version == 1:
    features.append(ride_speed_no_stops)

  features.extend(get_ride_histograms(distances, normalized=normalized, version=version))

  g_forces = get_g_forces(ride, distances=distances)
  if version == 1:
    h_g_forces = np.histogram(g_forces, bins=range(0, 600, 50))[0]
  else:
    h_g_forces = np.histogram(g_forces, bins=range(0, 600, 10))[0]
  features.extend(h_g_forces)

  return np.array(features)
示例#25
0
    def get_saved_current(self):
        try:
            fo = pickle.load(open(self.fileSave, "rb"))
            i = fo['II_HI']
            t_array_i = fo['time']*1E-6
            delta_t = np.diff(t_array_i)[0]
        except FileNotFoundError:
            raise
            
        rt_plot = df.RelativeTimePlot(t_array_i+self.t_delay_from_dataset, i, max_points=1E9)
        rt_plot.plot()

        plt.show()

        zero_time = rt_plot.zero_time

        i = i[int(zero_time/delta_t):int((zero_time + 110E-6)/delta_t)] - i[int(zero_time/delta_t)]

        t_array_i = t_array_i[int(zero_time/delta_t):int((zero_time + 110E-6)/delta_t)] - zero_time
        
        ### Smooth out the signal using a Savitzky-Golay filter
        from scipy.signal import savgol_filter
        ii_savgol = savgol_filter(i, 9, 5)
        i = ii_savgol

        data = {}
        data['I'] = i
        data['time'] = t_array_i
        
        pickle.dump(data, open(self.fileSaveI, 'wb'))
 def filter(self, mz_array, intensity_array):
     from scipy.signal import savgol_filter
     smoothed = savgol_filter(
         intensity_array, window_length=self.window_length,
         polyorder=self.polyorder, deriv=self.deriv).clip(0)
     mask = smoothed > 0
     return mz_array[mask], smoothed[mask]
示例#27
0
def get_distance_acc_words(ride, step=5):
  ride = np.array(ride)
  ride1 = savgol_filter(ride.T, 7, 2).T
  ride0 = np.roll(ride1, step, axis=0)[step:]
  ride1 = ride1[step:]

  distance_vectors = ride1 - ride0
  acc_vectors = np.vstack((distance_vectors, [0,0])) - \
      np.vstack(([0,0], distance_vectors))
  acc_vectors = acc_vectors[1:-1]
  distance_vectors = distance_vectors[:-1]

  distances = np.linalg.norm(distance_vectors, axis=1)
  acc_projection = (distance_vectors[:,0] * acc_vectors[:,0] + \
      distance_vectors[:,1] * acc_vectors[:,1]) / np.maximum(distances, 0.01)
  acc = np.linalg.norm(acc_vectors, axis=1)
  acc_rejection = np.sqrt(np.maximum(acc**2 - acc_projection**2,0))

  DIST_TH = np.array([0.5, 3, 8, 12, 22, 30]) * step
  PROJ_TH = [-8, -4, -1, -0.1, 0.1, 1, 3, 5]
  REJ_TH = [0.1, 0.8, 3, 6, 10]

  features = np.vstack((
      np.digitize(distances, DIST_TH),
      np.digitize(acc_projection, PROJ_TH),
      np.digitize(acc_rejection, REJ_TH)
  )).T
  features = ' '.join(['%s_%s_%s' % (f[0], f[1], f[2]) for f in features])
  return features
示例#28
0
def get_acc4acc_words(ride, step=5, version=1):
  ride = np.array(ride)
  ride1 = savgol_filter(ride.T, 7, 2).T
  ride0 = np.roll(ride1, step, axis=0)[step:]
  ride1 = ride1[step:]

  distance_vectors = ride1 - ride0
  acc_vectors = distance_vectors[1:] - distance_vectors[:-1]
  acc4acc_vectors = acc_vectors[1:] - acc_vectors[:-1]
  acc_vectors = acc_vectors[:-1]

  acc = np.linalg.norm(acc_vectors, axis=1)
  acc4acc = np.linalg.norm(acc4acc_vectors, axis=1)
  ACC_TH = [0.1, 0.3, 0.7, 1.1, 1.6, 2.3, 3.5, 5, 6.5, 9]
  ACC4ACC_TH = [0.1, 0.3, 0.7, 1.2, 2, 2.8]

  if version == 1:
    features = np.vstack((
        np.digitize(acc, ACC_TH),
        np.digitize(acc4acc, ACC4ACC_TH),
    )).T
    features = ' '.join(['%s_%s' % (f[0], f[1]) for f in features])

  else:
    features = ' '.join(['a%s' % f for f in np.digitize(acc, ACC_TH)])

  return features
示例#29
0
def helper_sgdidv(w):
	'''Perform Savitzky-Golay smoothing and get 1st derivative'''
	w['XX'] = signal.savgol_filter(
			w['XX'], int(w['sgdidv_samples']), int(w['sgdidv_order']), deriv=1, delta=w['ystep'] / 0.0129064037)
	w['cbar_quantity'] = 'dI/dV'
	w['cbar_unit'] = '$\mu$Siemens'
	w['cbar_unit'] = r'$\mathrm{e}^2/\mathrm{h}$'
示例#30
0
	def calcAccelereation(self, data):
		smoothed = savgol_filter(data, 11, 2)
		# smoothed = data
		smoothed = np.diff(smoothed)
		for i in range(len(smoothed)):
			smoothed[i] /= self.dt
		return smoothed
示例#31
0
def find_bg(signal):
    freq, ampl = np.histogram(signal, 500)
    freq_f = savgol_filter(freq, 31, 1)
    # print 'binsize = {}'.format(np.diff(ampl)[0])
    # plt.plot(ampl[:-1],freq_f)
    return ampl[np.argmax(freq_f)]
colors = cm.rainbow(np.linspace(0, 1, x_div * y_div * len(directory_meas)))
color_tmp = 0
#raise Exception
for index_chip in range(len(directory_meas)):
    for this_area_x in range(x_div):
        for this_area_y in range(y_div):
            index_fit = np.where(
                QE[index_chip, 1:-1, this_area_y, this_area_x] != 0)
            xx = np.linspace(wavelengths[index_fit[0][1]],
                             wavelengths[index_fit[0][-1]], 1000)
            # interpolate + smooth
            itp = interp1d(wavelengths[1:-1],
                           QE[index_chip, 1:-1, this_area_y, this_area_x],
                           kind='linear')
            window_size, poly_order = 101, 2
            yy_sg = savgol_filter(itp(xx), window_size, poly_order)
            plt.plot(wavelengths[1:-1],
                     100.0 * (QE[index_chip, 1:-1, this_area_y, this_area_x]),
                     'o--',
                     color=colors[color_tmp],
                     label='Chip: ' + str(sensor_name_list[index_chip]) +
                     ', X: ' + str(frame_x_divisions[this_area_x]) + ', Y: ' +
                     str(frame_y_divisions[this_area_y]))
            color_tmp = color_tmp + 1
            plt.plot(xx,
                     100.0 * yy_sg,
                     'k',
                     label='Fit for chip: ' +
                     str(sensor_name_list[index_chip]) + ', X: ' +
                     str(frame_x_divisions[this_area_x]) + ', Y: ' +
                     str(frame_y_divisions[this_area_y]))
                if plot_phase_vec_during_processing == True:
                    fig, ax = plt.subplots(nrows=1, ncols=1)
                    fig.suptitle('downsampled')
                    ax.plot(time_vec__avg, j_di_phase__avg)
                    ax.set_xlabel(r'time [ns]')
                    ax.set_ylabel(r'$J_{di}$ phase [rad.]')
                    plt.show()

                # apply smoothing filter
                window_size__temp = np.min([window_size, len(I_di__avg)])
                if (window_size__temp % 2) == 0:
                    window_size__temp -= 1
                poly_order__temp = np.min(
                    [window_size__temp - 1, polynomial_order])
                I_di__avg = savgol_filter(
                    I_di__avg, window_size__temp,
                    poly_order__temp)  # polynomial order 3
                j_di_phase__avg = savgol_filter(
                    j_di_phase__avg, window_size__temp,
                    poly_order__temp)  # polynomial order 3
                time_vec__avg = savgol_filter(
                    time_vec__avg,
                    np.min([window_size__temp,
                            len(time_vec__avg)]),
                    poly_order__temp)  # polynomial order 3

                if plot_phase_vec_during_processing == True:
                    fig, ax = plt.subplots(nrows=1, ncols=1)
                    fig.suptitle('smoothed')
                    ax.plot(time_vec__avg, j_di_phase__avg)
                    ax.set_xlabel(r'time [ns]')
示例#34
0
                if slice_index == 0:
                    plt.plot(vertical_slice / np.max(row_max_value),
                             detector_row_number,
                             color=colours[colour_index],
                             label="Vertical rows %i-%i" %
                             (np.min(detector_row_number),
                              np.max(detector_row_number)))
                else:
                    plt.plot(vertical_slice / np.max(row_max_value),
                             detector_row_number,
                             color=colours[colour_index])

    row_numbers = np.arange(np.min(row_number), np.max(row_number))
    row_max_values = np.interp(row_numbers, row_number, row_max_value)

    smooth = savgol_filter(row_max_values[row_max_values > DETECTOR_CUTOFF], 9,
                           5)
    smooth_rows = row_numbers[row_max_values > DETECTOR_CUTOFF]
    #        plt.scatter(row_numbers, row_max_values)
    plt.plot(smooth / np.max(row_max_value),
             smooth_rows,
             linewidth=5,
             color="k",
             label="Instrument sensitivity")
    plt.legend(loc="upper right")

    if SAVE_FIGS:
        plt.savefig(
            channel +
            "_MCC_line_scan_vertical_columns_on_detector_where_sun_is_seen.png"
        )
示例#35
0
def smoothsvgfilter(df,a,b):
    svgfilter = lambda x: savgol_filter(x,a,b)
    df_smooth=df.apply(svgfilter)
    return df_smooth
示例#36
0
                rhoy.append(float(bob[l][1]))
        f.close()

        #Omit First 5 unphysical values
        rhox = rhox[5:]
        rhoy = rhoy[5:]

        #Set upstream density to unity
        omp = conf.cfl / conf.c_omp  #Plasma frequency
        qe = (omp**2. * conf.gamma) / ((conf.ppc * .5) *
                                       (1. + abs(conf.me / conf.mi)))
        for conv in range(len(rhoy)):
            rhoy[conv] = rhoy[conv] / (conf.ppc * qe)

        #Apply Savitzky-Golay filter to data
        rhoy_f = savgol_filter(rhoy, 51, 2)  #21,2 ... 51,1
        CR = np.amax(
            rhoy_f)  #Compression ratio set as maximum of filtered density

        xs_index = find_nearest(rhoy_f, CR / 2)  #Midpoint of shock found

        #Data for lap appended
        x_shock.append(rhox[xs_index])
        t_laps.append(i * conf.interval)

        print("Frame {} appended".format(i))

    #First 15 frames ommitted - Remove unphysical data points
    ###FIX FOR LARGE SCALE SIMULATIONS
    x_shock = x_shock[15:]
    t_laps = t_laps[15:]
示例#37
0
      if (i+1) % 100 == 0:
          log = open(output_file + "_log.txt", "a")
          log.write(time.strftime("%H:%M:%S", time.gmtime(time.time() - start_time)) + "\n")
          log.write('%d iterations' % (i+1) + "\n")
          log.write('loss: %.3f' % np.mean(train_res_loss[-100:]) + "\n")
          log.write('recon_error: %.3f' % np.mean(train_res_recon_error[-100:])+ "\n")
          log.write('perplexity: %.3f' % np.mean(train_res_perplexity[-100:])+ "\n\n")
          log.close()


      if (i+1) % 1000 == 0:
          torch.save(vae, output_file + ".pt")

          embeddings_list.append(pd.DataFrame(embeddings).astype("float"))
          
          train_res_recon_error_smooth = savgol_filter(train_res_recon_error[100:], 201, 7)
          train_res_perplexity_smooth = savgol_filter(train_res_perplexity[100:], 201 , 7)

          f = plt.figure(figsize=(16,8))
          ax = f.add_subplot(1,2,1)
          ax.plot(train_res_recon_error_smooth)
          ax.set_yscale('log')
          ax.set_title('Smoothed NMSE.')
          ax.set_xlabel('iteration')

          ax = f.add_subplot(1,2,2)
          ax.plot(train_res_perplexity_smooth)
          ax.set_title('Smoothed Average codebook usage (perplexity).')
          ax.set_xlabel('iteration')

          f.savefig(output_file + "_loss.png")
示例#38
0
def wind():
    database_name = 'DataLogger'
    database_table = 'OURWEATHERTable'
    database_user_name = 'datalogger'
    database_password = '******'
    hostname = 'localhost'
    ax_dict = {}
    time_now = datetime.strftime(datetime.now(), '%H:%M, %A')
    db_connection = mdb.connect(hostname, database_user_name,
                                database_password, database_name)
    cursor = db_connection.cursor()

    query = 'SELECT Date, WS, GS FROM WindSevenDay ORDER BY Date ASC'

    try:
        cursor.execute(query)
        result = cursor.fetchall()
    except:
        e = sys.exc_info()[0]
        print(f"the error is {e}")

    time = []
    ws = []
    gs = []

    for record in result:
        time.append(record[0])
        ws.append(record[1])
        gs.append(record[2])

#   wss = ws
    wss = signal.savgol_filter(ws, 35, 5)
    #   gss = gs
    gss = signal.savgol_filter(gs, 35, 5)
    fds = [dates.date2num(d) for d in time]
    x = None
    y = None
    label1 = "Wind Speed"
    label2 = "Gust"
    title = "Wind with Gust"
    xlabel = "Date"
    ylabel = "MPH"

    query_max_min = 'SELECT Date, Max, Min FROM TemperatureMaxMin WHERE Date = CURDATE()'
    try:
        cursor.execute(query_max_min)
        result_max_min = cursor.fetchall()
    except:
        e = sys.exc_info()[0]
        print(f"the error is {e}")
    # print(result_max_min)   # if returns empty need way to still print

    query_current_condition = 'SELECT id, Current_Wind_Speed, Current_Wind_Direction, Outdoor_Temperature FROM OURWEATHERTable ORDER BY id DESC LIMIT 1'
    try:
        cursor.execute(query_current_condition)
        result_current_condition = cursor.fetchall()
    except:
        e = sys.exc_info()[0]
        print(f"The error is {e}")

    compass = {
        0.0: 'North',
        22.5: 'North',
        45: 'Northeast',
        67.5: 'East',
        90: 'East',
        112.5: 'East',
        135: 'Southeast',
        157.5: 'South',
        180: 'South',
        202.5: 'South',
        225: 'Southwest',
        247.5: 'West',
        270: 'West',
        292.5: 'West',
        315: 'Northwest',
        337.5: 'North',
        360: 'North',
    }

    # fig, ax = pyplot.subplots(figsize=(17.0, 8.0), facecolor='green')
    fig = pyplot.figure(num="My Figure", facecolor='green')
    #   gs = fig.add_gridspec(1, 4)

    ax_dict = {
        'fig': fig,
        'x1': fds,
        'y1': wss,
        'label1': label1,
        'x2': fds,
        'y2': gss,
        'label2': label2,
        'title': title,
        'xlabel': xlabel,
        'ylabel': ylabel,
    }

    ax = make_ax(ax_dict)

    try:
        pyplot.figtext(
            0.15,
            0.85,
            f"{time_now}\nTemperature now: {result_current_condition[0][3]*9/5+32:.0f}  \nHigh: {result_max_min[0][1]:.1f} \nLow: {result_max_min[0][2]:.1f} \nWind is {result_current_condition[0][1]*0.6214:.0f} MPH from the {compass[result_current_condition[0][2]]}",
            fontsize=20,
            horizontalalignment='left',
            verticalalignment='top')
    except IndexError:
        print(f"The error is {sys.exc_info()[0]} : {sys.exc_info()[1]}.")


#    pyplot.figtext(0.75, 0.85, f"This week: \nMax: {max(temperature):.1f} \nMin: {min(temperature):.1f} \nAve: {mean(temperature):.1f}", fontsize=15, horizontalalignment='left', verticalalignment='top')
#   if y[-1] > 80:
#      print(y[-1])
#      pyplot.figtext(0.75, 0.4, f"The Heat Index is: {y[-1]:.1f}", fontsize=15)

    pyplot.savefig('/var/www/html/TempHeatIndexSevenDayGraph.png')
    mng = pyplot.get_current_fig_manager()
    #    print(mng)
    # ok    mng.resize(*mng.window.maxsize())  # max with window outline
    # no    mng.frame.Maximize(True)
    # no    mng.window.state('zoomed')
    # no   mng.window.showMaximized()
    mng.full_screen_toggle()  # full screen no outline

    pyplot.show(block=False)
    pyplot.pause(15)
    pyplot.close(fig="My Figure")

    cursor.close()
    db_connection.close()
    gc.collect()
示例#39
0
    # Read the data from a csv file. Columns separated by \t.
    # The first line of the file contains the scanned wavelengths
    tmpdata = np.loadtxt(os.path.join(prjdir, 'marzipan.csv'), delimiter='\t')
    wl = tmpdata[0]  #Wavelenght
    spectrum = tmpdata[1:]

    # Get dataset dimension
    n, p = spectrum.shape

    final_spectrum = []
    con = sqlite.connect('samples.db')
    cur = con.cursor()
    # Do the same things for all the spectrum
    for h in range(n):
        almost_good = spectrum[h, :]
        good_f = sps.savgol_filter(almost_good.tolist(), 51, 3)

        h = max(good_f) - min(good_f)
        l = max(wl) - min(wl)

        #Calibration values
        CAL_V = [0.0008 / 1.5 * h, 0.0001 / 1.5 * h, 20 / 1300 * l]

        #Get derivative function
        derivative = np.diff(good_f)

        #print ("%d %d" % (len(wl[:-1]), len(derivative)))

        #get the x data for the local minima
        minima = better_local(derivative, good_f, wl, CAL_V, "minima")
示例#40
0
def backtrack1(Bounds,Base):
    All_statfn= list()
    for i in range(0,len(Bounds)):
        print(i)
        base2 = Base + str(Bounds[i][1]) + "/" 
        files = natsorted(os.listdir(base2))
        stat = list()
        back_track = int(Bounds[i][0][4]*100)
        x = int(Bounds[i][0][0])
        y = int(Bounds[i][0][1])
        h = int(Bounds[i][0][2]/2)
        w = int(Bounds[i][0][3]/2)

        img0 = cv2.imread(base2 + str(back_track) +".jpg")[y-h:y+h,x-w:x+w]
        img0 = cv2.cvtColor(img0, cv2.COLOR_BGR2GRAY)

        for idx in range(np.min((26750,2*back_track)),90,-5):
            img1 = cv2.imread(base2 + str(idx) +".jpg")[y-h:y+h,x-w:x+w]
            img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)

            stat.append(measure.compare_ssim(img0,img1,multichannel=True,win_size=3))


        for idx in range(0,len(stat)-35):
            stat[idx] = np.max((stat[idx:idx+35]))


        All_statfn.append(stat)    
        
        
    Times = {}
    count = 0
    for stat in All_statfn:
        Times[Bounds[count][1]] = 999

        count+=1

    count = 0
    for stat in All_statfn:
        stat = reject_outliers(stat)
        if np.max((stat)) > 0.5 and np.min((stat))<0.55:
            stat = savgol_filter(stat,21,1)

            nstat = (list(reversed(stat)) -min(stat))/(max(stat)-min(stat))
            Found = check_continual(nstat,150)
            if Found:
                if (np.where(np.array(nstat)>=0.4))[0][0] != 0:
                    Times[Bounds[count][1]] = np.min((Times[Bounds[count][1]],np.min(((np.where(np.array(nstat)>=0.5)[0][0])*5/30,Times[Bounds[count][1]]))))

        count+=1
        
    return Times,All_statfn
        
                    
        
        

                    
    
        
                    
    
示例#41
0
def ha(sf,
       sfn,
       mX,
       pX,
       params,
       verbose=[],
       onlySelected=False,
       hc=-2,
       div=8,
       L=30,
       fs=44100,
       gt=[]):
    """
    Applies harmonic analysis to onset candidates and returns the selected ones
    
    sf: spectral flux (filtered). used for candidate selection
    sfn: raw spectral flux. will be used for chord segmentation later
    mX, pX: freq. transform. mX is used, pX is for synthesis and observation purposes
    verbose: onset candids in this list will enable verbose mode.
    onlySelected: if True, only the candidates in verbose list will be processed.
    
    params: parameters used in freq. transform
    
    """

    M, N, H, B = params

    idx = candidSelection(sf, t=0.025, hw=25)
    idx = np.concatenate((np.zeros(1), idx, np.array([sf.shape[0]])))
    idx_orig = idx.copy()
    mask = np.ones(idx.shape)
    mask[0] = 0
    mask[-1] = 0
    errors = np.zeros(mX.shape[0])
    scores = np.zeros(idx.shape)
    freqs = []

    tFlag = False
    vFlag = False  # flag to enable prints and plots

    rms = np.sum(mX, axis=1)
    rms = rms - np.mean(rms)
    rms = rms / np.max(rms)
    rms = savgol_filter(rms, 3, 1)

    rms_t = -0.1

    # sending every onset candidate to harmonic analysis
    for i in range(len(idx) - 2, 0, -1):

        if onlySelected:
            if idx[i] not in verbose:
                continue

        b = int((idx[i] - (10240 / H)) if (idx[i] > (idx[i - 1] +
                                                     (10240 / H))) else idx[i -
                                                                            1])
        e = int((idx[i] + (10240 / H)) if (idx[i] < (idx[i + 1] -
                                                     (10240 / H))) else idx[i +
                                                                            1])

        if np.mean(rms[int(idx[i]):int(idx[i]) + 50]) < rms_t:
            continue

        onst = int(idx[i] - b)
        pmX = np.copy(mX[b:e])

        if idx[i] in verbose:
            print("\nOnset candidate:")
            print("onset frame: %d" % idx[i])
            print("sf onset number: %d" % i)
            vFlag = True
            y = MRStftSynth(pmX, pX[b:e], M, H, B)
            print("synthesized sound")
            ipd.display(ipd.Audio(data=y, rate=fs))

        if vFlag:
            print("STFT around candidate")
            plt.pcolormesh(np.arange(pmX.shape[0]), np.arange(pmX.shape[1]),
                           np.transpose(pmX))
            plt.show()

            print("filtered spectral flux")
            plt.plot(sf[b:e])
            plt.show()
            print("raw spectral flux")
            plt.plot(sfn[b:e])
            plt.show()

        allErrors, allf0s, pmXv = f0detection(pmX, pX[b:e], sfn[b:e], -100, 10,
                                              onst, vFlag, hc, div, params, fs,
                                              tFlag)

        aL = np.min((e - idx[i] / 2, L))
        segments = getSegments(allf0s, allErrors, onst, pmX, vFlag)
        scores[i], freq, segmentScores = harmonicScore(segments, aL, vFlag,
                                                       tFlag)
        freqs.append(freq)

        if scores[
                i] < 1:  # prevent rejected candidates from creating boundary for adjacent onset
            idx[i] = sf.shape[0]

        if vFlag:
            print("Score for this onset: %d" % scores[i])

        if tFlag and scores[i] < 1:
            pred_time = np.abs(idx[i] * (H / fs))
            closest_gt_ind = np.argmin(pred_time - gt)[0]
            if np.abs(gt[closest_gt_ind] - pred_time) < 0.05:
                if score[i] > 1:
                    tp.append[idx[i]]
                if score[i] < 1:
                    fn.append[idx[i]]

                    print("STFT around onset")
                    plt.pcolormesh(np.arange(pmX.shape[0]),
                                   np.arange(pmX.shape[1]), np.transpose(pmX))
                    plt.show()

                    y = MRStftSynth(pmXv, pX, M, H, B)
                    ipd.display(ipd.Audio(data=y, rate=fs))

                    plt.pcolormesh(np.arange(pmXv.shape[0]),
                                   np.arange(pmXv.shape[1]),
                                   np.transpose(pmXv))
                    plt.show()

        vFlag = False
        tFlag = False

    avg = np.mean(scores)
    mask[scores < 1] = 0
    result = idx_orig[mask == 1]
    return idx_orig[1:-1], result, freqs, scores[1:-1]
示例#42
0
    def gaussian_fit(self, N=1, guess=None, plot=False, Nsvf=251):
        from scipy.signal import savgol_filter
        from scipy.interpolate import UnivariateSpline
        """Performs a Gaussian fit of the spectrum based on an initial guess
        
        
        Parameters
        ----------
        
        Nsvf : int
            Length of the Savitzky-Golay filter window (odd integer)
            
            
        """
        x = self.axis.data
        y = self.data

        if guess is None:

            raise Exception("Guess is required at this time")
            # FIXME: create a reasonable guess
            guess = [1.0, 11000.0, 300.0, 0.2, 11800, 400, 0.2, 12500, 300]

            #
            # Find local maxima and guess their parameters
            #

            # Fit with a given number of Gaussian functions

            if not self._splines_initialized:
                self._set_splines()

            # get first derivative and smooth it
            der = self._spline_r.derivative()
            y1 = der(x)
            y1sm = savgol_filter(y1, Nsvf, polyorder=3)

            # get second derivative and smooth it
            y1sm_spl_der = UnivariateSpline(x, y1sm, s=0).derivative()(x)
            y2sm = savgol_filter(y1sm_spl_der, Nsvf, polyorder=3)

            # find positions of optima by looking for zeros of y1sm

            # isolate maxima by looking at the value of y2sm

            #plt.plot(x, der(x))
            #plt.plot(x, y1sm)
            plt.plot(x, y2sm)
            plt.show()

        def funcf(x, *p):
            return _n_gaussians(x, N, *p)

        # minimize, leastsq,
        from scipy.optimize import curve_fit
        popt, pcov = curve_fit(funcf, x, y, p0=guess)

        if plot:

            plt.plot(x, y)
            plt.plot(x, _n_gaussians(x, N, *popt))
            for i in range(N):
                a = popt[3 * i]
                print(i, a)
                b = popt[3 * i + 1]
                c = popt[3 * i + 2]
                y = _gaussian(x, a, b, c)
                plt.plot(x, y, '-r')
            plt.show()

        # FIXME: Create a readable report

        return popt, pcov
示例#43
0
for data_mean, label in zip(data_mean, labels):
    plt.plot(f_sup, data_mean, label=label)

plt.legend()
plt.show()

num_peaks=10

data_mean=np.mean(data,axis=2)
data_mean[0]=data_ace_temp
 
# model MG
data_MG_sparse=remove_est_florescence(f_sup,data[1])
data_MG_mean=np.mean(data_MG_sparse,axis=1)
data_MG_mean_smooth = sci.savgol_filter(data_MG_mean, window_length = 11, polyorder = 5)

[comp_rangeM, comp_beta_gaussM, comp_beta_lorM, comp_beta_gen_lorM, comp_beta_cosM, comp_MSEM, comp_biasM]=model_spectrum_ste(f_sup,data_MG_mean_smooth ,num_peaks)

vecM=[comp_rangeM, comp_beta_gaussM, comp_beta_lorM, comp_beta_gen_lorM, comp_beta_cosM, comp_MSEM, comp_biasM]

recap_spectrum(f_sup,data_MG_mean_smooth,num_peaks,*vecM)

plt.plot(f_sup,data_MG_mean_smooth)

#------------------------------------------------------------------------------

# # model ACE 

data_ACE_sparse=remove_est_florescence(f_sup,data_mean[0])
# set up subplots

local_max = []
lmax = [34, 236, 440, 660]
local_min = []
lmin = [179, 384, 531, 694]
int_v_exposure_lmax0 = []
int_v_exposure_lmax1 = []
int_v_exposure_lmax2 = []
int_v_exposure_lmax3 = []
f0, ax0 = plt.subplots(1, 3, figsize=(20, 5))
for counter, fn in enumerate(file_list):
    SD = pd.read_csv(Data_Dir + '/' + str(fn), header=0, sep=',')
    theta = (np.array(SD['Columns']) * slope) + intercept
    intensity = (np.array(SD['Sample Intensity']) - np.array(SD['Nitrogen Intensity']))
    intensity_filtered = savgol_filter(intensity, window_length=151, polyorder=2, deriv=0)
    ax0[0].semilogy(theta, intensity, label=str(fn).replace('.', '_').split('_')[2])
    ax0[1].semilogy(theta, intensity_filtered, label=str(fn).replace('.', '_').split('_')[2])
    local_max.append(argrelextrema(intensity_filtered, np.greater, order=10))
    local_min.append(argrelextrema(intensity_filtered, np.less, order=10))
    #print(local_min)
    #print([theta[element] for element in local_min])
    int_v_exposure_lmax0.append(intensity_filtered[lmax[0]])
    int_v_exposure_lmax1.append(intensity_filtered[lmax[1]])
    int_v_exposure_lmax2.append(intensity_filtered[lmax[2]])
    int_v_exposure_lmax3.append(intensity_filtered[lmax[3]])
    ax0[0].plot([theta[element] for element in lmax], [intensity[element] for element in lmax], color='black', marker='X', ls='')
    #ax0[1].plot([theta[element] for element in lmax], [intensity_filtered[element] for element in lmax], color='black', marker='X', ls='')
ax0[0].legend(loc=1)
ax0[0].set_xlabel("\u03b8", fontsize=12)
ax0[0].set_ylabel('Intensity', fontsize=12, labelpad=10)
示例#45
0
            f = int(f)
            g0 = signal.savgol_coeffs(f, k, deriv=0)
            g1 = signal.savgol_coeffs(f, k, deriv=1) * -1
            Half_win1 = ((f + 1) / 2) - 1
            Half_win1 = int(Half_win1)
            sg0 = np.zeros(int(n - Half_win1 - 1), dtype=float)
            sg1 = np.zeros(int(n - Half_win1 - 1), dtype=float)
            for ii in range(int((f + 1) / 2), int(n - (f + 1) / 2 + 1)):
                sg0[ii - 1] = np.dot(
                    g0, noise_dIdV[ii - 1 - Half_win1:ii - 1 + Half_win1 + 1])
                sg1[ii - 1] = np.dot(
                    g1, noise_dIdV[ii - 1 - Half_win1:ii - 1 + Half_win1 + 1])

            sg1 = sg1 / dV_new

            sg0_new = signal.savgol_filter(noise_dIdV, f, k, deriv=0)
            sg1_new = signal.savgol_filter(noise_dIdV, f, k, deriv=1)
            sg2_new = signal.savgol_filter(noise_dIdV, f, k, deriv=2)
            sg1_new = sg1_new / dV_new

            SG_param_add[bb] = (sum(
                (idl_IV_d2[0:len(sg1), 1] - sg1)**2 / len(sg1)))
            bb = bb + 1

        SG_param_new = np.zeros((SG_param.shape[0], SG_param.shape[1] + 1))
        SG_param_new[:, :-1] = SG_param
        SG_param_new[:, -1] = SG_param_add

        id_min_SG_param = np.where(SG_param_new[:, 4] == min(SG_param_new[:,
                                                                          4]))
        k1_min = SG_param_new[id_min_SG_param, 0]
示例#46
0
 def filterfun(self, y):
     self.set()
     window, order = self.dataset.Window, self.dataset.Order
     window += (window % 2 == 0)  # must be odd
     return savgol_filter(y, window, order)
def findSpikesandDwells(CombinedFilesNames):

    ZSensor_filename = CombinedFilesNames[
        0]  #filename will end up being ZSensor0.txt.csv if the original csv file is named ZSensor0.txt
    Defl_filename = CombinedFilesNames[1]
    NotePath = CombinedFilesNames[2]
    print ZSensor_filename  #makes it easier to track which results go with which file
    # print Defl_filename
    # print NotePath

    OpenedFile = open(NotePath, "r")
    for line in OpenedFile:
        fields = line.split(";")
        for i in fields:
            if "K=" in i:
                CleanSpringConstant = float(i.replace("K=", ""))
                springConstant = CleanSpringConstant * 1E3  #converts spring constant from N/m to pN/nm

            if "NearestForcePull=" in i:
                SimplifiedRampNumber = i.replace("NearestForcePull=", "")
                SimplifiedRampNumber = SimplifiedRampNumber[-7:]

            if "DefVOffset=" in i:
                CleanDefVOffset = float(i.replace("DefVOffset=", ""))

            if "SamplingRate_Hz=" in i:
                SampleRate = float(i.replace("SamplingRate_Hz=", ""))

            if "Invols=" in i:
                CleanInvols = float(i.replace("Invols=", ""))

            if "Force_N=" in i:
                Force = float(i.replace("Force_N=", ""))

            if "Iteration=" in i and "Script" not in i:
                Iteration = str(int(i.replace("Iteration=", "")))

            if "ZLVDTSens=" in i:
                Zsensitivity = float(i.replace("ZLVDTSens=", ""))

            if "ZLVDTOffset" in i:
                ZSenseOffset = float(i.replace("ZLVDTOffset=", ""))

    CombinedValues = []
    CombinedValues.append(CleanSpringConstant)
    CombinedValues.append(CleanDefVOffset)
    CombinedValues.append(CleanInvols)
    CombinedValues.append(SampleRate)
    CombinedValues.append(Force)

    print CombinedValues  # Provides a single list with the spring constant, DefVOffset, Invols, and Sample rate (Hz), and Force (N) directly from the parameters file

    colnames_ZSensor = [
        'time_s', 'position_Z_nm'
    ]  #the two columns in the csv are time and Z sensor position
    data_ZSensor = pandas.read_csv(
        ZSensor_filename, names=colnames_ZSensor
    )  #read in the data from the csv with the assigned names

    time_s = data_ZSensor.time_s.tolist(
    )  #write the time data into a list - not currently used for anything
    position_Z_nm = data_ZSensor.position_Z_nm.tolist(
    )  #write the zSensor position into a list

    colnames_Defl = [
        'time_ms_defl', 'Deflection_nm', 'Force_pN'
    ]  #the two columns in the csv are time and Z sensor position
    data_Defl = pandas.read_csv(
        Defl_filename, names=colnames_Defl
    )  #read in the data from the csv with the assigned names

    time_ms_defl = data_Defl.time_ms_defl.tolist(
    )  #write the time data into a list - not currently used for anything
    Defl_nm = data_Defl.Deflection_nm.tolist(
    )  #write the zSensor position into a list
    Force_pN = data_Defl.Force_pN.tolist()

    combined_Z_Defl = []
    combined_Z_Defl = zip(position_Z_nm, Defl_nm)

    Extension = []
    for x, y in combined_Z_Defl:
        ext = x - y
        Extension.append(ext)

    timevsextension = zip(time_s, Extension)

    with open(ZSensor_filename + '_extension.csv', "wb") as NewFile:
        writer = csv.writer(NewFile)
        writer.writerows(timevsextension)

    #These values allow small differences in sample rate to be dealt with
    DividedSampleRate = SampleRate / 1000
    RoundedSampleRate = round(DividedSampleRate, 0)

    #because for an unknown reason the extension is sometimes different than the time, this corrects for that by extending the extension list with the last value
    if len(time_s) > len(Extension):
        print len(time_s), len(Extension)
        count = len(Extension)
        copyValue = Extension[count - 1]
        while len(Extension) < len(time_s):
            Extension.append(copyValue)
        print "there was a difference in length of time and extension that was corrected for"
        testStop = raw_input(
            'press enter to continue '
        )  #so you know when there was a mismatch and to ensure that the mismatch wasn't too large

    if RoundedSampleRate == 1:
        time = numpy.asarray(time_s)
        xx = numpy.linspace(time.min(), time.max(), 2000)
        interpolated_time_extension = interp1d(time_s,
                                               Extension,
                                               kind='linear')
        window_size, poly_order = 9, 2
        Smooth_Extension = savgol_filter(interpolated_time_extension(xx),
                                         window_size, poly_order)

    elif RoundedSampleRate == 10:
        time = numpy.asarray(time_s)
        xx = numpy.linspace(time.min(), time.max(), 3000)
        interpolated_time_extension = interp1d(time_s,
                                               Extension,
                                               kind='linear')
        window_size, poly_order = 15, 2
        Smooth_Extension = savgol_filter(interpolated_time_extension(xx),
                                         window_size, poly_order)

    elif RoundedSampleRate == 50:
        print 'time length is: ', len(time_s), 'Extension length is: ', len(
            Extension)
        time = numpy.asarray(time_s)
        xx = numpy.linspace(time.min(), time.max(), 5000)
        interpolated_time_extension = interp1d(time_s,
                                               Extension,
                                               kind='linear')
        window_size, poly_order = 31, 2
        Smooth_Extension = savgol_filter(interpolated_time_extension(xx),
                                         window_size, poly_order)

    else:
        print "sample rate not 1000 or 50000, leaving extension alone"
        time = numpy.asarray(time_s)
        Smooth_Extension = Extension
        # xx = numpy.linspace(time.min(), time.max(), 2000)
        xx = time_s


################################################################################################################################
    if RoundedSampleRate == 1:
        time = numpy.asarray(time_s)
        xx = numpy.linspace(time.min(), time.max(), 2000)
        interpolated_time_position_Z_nm = interp1d(time_s,
                                                   position_Z_nm,
                                                   kind='linear')
        window_size, poly_order = 9, 2
        Smooth_position_Z_nm = savgol_filter(
            interpolated_time_position_Z_nm(xx), window_size, poly_order)

    elif RoundedSampleRate == 10:
        time = numpy.asarray(time_s)
        xx = numpy.linspace(time.min(), time.max(), 3000)
        interpolated_time_position_Z_nm = interp1d(time_s,
                                                   position_Z_nm,
                                                   kind='linear')
        window_size, poly_order = 15, 2
        Smooth_position_Z_nm = savgol_filter(
            interpolated_time_position_Z_nm(xx), window_size, poly_order)

    elif RoundedSampleRate == 50:
        time = numpy.asarray(time_s)
        xx = numpy.linspace(time.min(), time.max(), 5000)
        interpolated_time_position_Z_nm = interp1d(time_s,
                                                   position_Z_nm,
                                                   kind='linear')
        window_size, poly_order = 31, 2
        Smooth_position_Z_nm = savgol_filter(
            interpolated_time_position_Z_nm(xx), window_size, poly_order)

    else:
        print "sample rate not 1000 or 50000, leaving extension alone"
        Smooth_position_Z_nm = position_Z_nm
        # xx = numpy.linspace(time.min(), time.max(), 3000)
        xx = numpy.asarray(time_s)

    time_vs_smoothExtension = zip(xx, Smooth_Extension)

    time_vs_smoothZposition = zip(xx, Smooth_position_Z_nm)

    with open(ZSensor_filename + '_SmoothExtension.csv', "wb") as NewFile:
        writer = csv.writer(NewFile)
        writer.writerows(time_vs_smoothExtension)

    time_vs_Force = zip(time_s, Force_pN)
    with open(ZSensor_filename + '_Force.csv', "wb") as NewFile:
        writer = csv.writer(NewFile)
        writer.writerows(time_vs_Force)

    medianZ = median(position_Z_nm)
    mZMinus = medianZ - 1
    mZPlus = medianZ + 1

    medianForce = median(Force_pN)

    medianExtension = median(Extension)
    mExtensionMinus = medianExtension - 10
    mExtensionPlus = medianExtension + 10

    print len(time_s), len(Force_pN)
    print len(time_ms_defl), len(Force_pN)

    if len(time_ms_defl) > len(Force_pN):
        Force_pN.append(Force_pN[-1])

    if len(time_ms_defl) == len(Force_pN):
        plt.close()
        plt.plot(time_ms_defl, Force_pN)
        plt.title(Iteration + '_' + SimplifiedRampNumber + '_' + str(Force))
        plt.savefig(Iteration + '_' + SimplifiedRampNumber + '_' + str(Force) +
                    'F_full.png')

    # plt.close()
    # plt.plot(time_s, position_Z_nm)
    # plt.plot(xx, Smooth_position_Z_nm, color = 'red')
    # plt.savefig(SimplifiedClampNumber + SimplifiedRampNumber + 'Z_full.png')

    # plt.close()
    # plt.plot(time_s, position_Z_nm)
    # plt.plot(xx, Smooth_position_Z_nm, color = 'red')
    # plt.ylim(mZMinus, mZPlus)
    # plt.savefig(SimplifiedClampNumber + SimplifiedRampNumber + 'Z_zoom.png')

    if len(time_s) == len(Extension):
        plt.close()
        plt.plot(time_s, Extension)
        plt.plot(xx, Smooth_Extension, color='red')
        plt.title(Iteration + '_' + SimplifiedRampNumber + '_' + str(Force))
        plt.savefig(Iteration + '_' + SimplifiedRampNumber + '_' + str(Force) +
                    'E_full.png')

    if len(time_s) == len(Extension):
        plt.close()
        plt.plot(time_s, Extension)
        plt.plot(xx, Smooth_Extension, color='red')
        plt.ylim(mExtensionMinus, mExtensionPlus)
        plt.title(Iteration + '_' + SimplifiedRampNumber + '_' + str(Force))
        plt.savefig(Iteration + '_' + SimplifiedRampNumber + '_' + str(Force) +
                    'E_zoom.png')
        plt.close()

    return
示例#48
0
	args=numpy.where(twotheta_deg<=max(tt))
	twotheta_deg,yobs=twotheta_deg[args],yobs[args]
	yh=f(twotheta_deg)

	plt.clf()
	plt.plot(twotheta_deg,yobs)
	plt.plot(twotheta_deg,yh)

	step,argscut=numpy.gradient(twotheta_deg)[0],[]
	for i,valuei in enumerate(yh):
		if twotheta_deg[i]>min(twotheta_deg)+2 and valuei>min(yh[i-int(2/step):i+int(2/step)])*2:
			argscut.append(numpy.arange(i-int(1/2/step),i+int(1/2/step)))
	twotheta_deg,yobs,yh=numpy.delete(twotheta_deg,argscut),numpy.delete(yobs,argscut),numpy.delete(yh,argscut)

	yh,emission=signal.savgol_filter(yh,101,1),'CuKa1'
	Lfsq=	1/(2*numpy.sin(numpy.radians(twotheta_deg/2))*numpy.sin(numpy.radians(twotheta_deg)))*\
			fsquared(2*numpy.sin(numpy.radians(twotheta_deg/2))/xu.utilities_noconf.wavelength(emission),[xu.materials.atom.Atom('C',1)],xu.utilities_noconf.energy(emission))
	params=lmfit.Parameters()
	params.add('Cyh',1)
	def minfunc(params):
		prm=params.valuesdict()
		return (Lfsq/Lfsq[-1]-(yobs-prm['Cyh']*yh)/(yobs-prm['Cyh']*yh)[-1])[numpy.where(twotheta_deg>90)]
	result=lmfit.minimize(minfunc,params)
	prm=result.params.valuesdict()
	yobs-=prm['Cyh']*yh

	plt.plot(twotheta_deg,yobs)
	plt.plot(twotheta_deg,Lfsq/Lfsq[-1]*yobs[-1])
	plt.plot(twotheta_deg,prm['Cyh']*yh)
	plt.text(min(twotheta_deg),min(prm['Cyh']*yh),prm['Cyh'].round(2))
示例#49
0
    pmCtr = np.array([pm[ctrZidx[nX], nX] for nX in range(dimX)
                      ])  #get concentration along the centerline
    tCtr = np.array(
        [csdict['temp'][-1, ctrZidx[nX], nX] for nX in range(dimX)])

    xmax, ymax = np.nanargmax(ctrZidx), np.nanmax(
        ctrZidx)  #get location of maximum centerline height
    centerline = ma.masked_where(
        plume.lvltall[ctrZidx] == 0, plume.lvltall[ctrZidx]
    )  #make sure centerline is only calculated inside the plume
    centerline.mask[:int(1000 / plume.dx)] = True
    # smoothCenterline = savgol_filter(centerline, 51, 3)             # smooth centerline height (window size 31, polynomial order 3)

    filter_window = max(int(plume.read_tag('W', [Case]) * 10 + 1), 51)
    smoothCenterline = savgol_filter(
        centerline, filter_window,
        3)  # smooth centerline height (window size 31, polynomial order 3)

    #calculate concentration changes along the centerline
    dPMdX = pmCtr[1:] - pmCtr[0:-1]
    # smoothPM = savgol_filter(dPMdX, 101, 3) # window size 101, polynomial order 3
    smoothPM = savgol_filter(dPMdX, filter_window,
                             3)  # window size 101, polynomial order 3

    # stablePMmask = [True if abs(smoothPM[nX])< np.nanmax(smoothPM)*0.05 and nX > np.nanargmax(smoothPM) else False for nX in range(dimX-1) ]
    stablePMmask = [True if abs(smoothPM[nX])< np.nanmax(smoothPM)*0.1 and \
                            abs(smoothCenterline[nX+1]-smoothCenterline[nX]) < 5 and \
                            nX > np.nanargmax(centerline[~centerline.mask][:-50]) and\
                            nX > np.nanargmax(smoothPM) and\
                            nX > np.nanargmax(centerline) +10 and\
                            centerline[nX] < plume.lvltall[-1]-200 and \
示例#50
0
def detectAftershocks(st,
                      template,
                      trig,
                      min_time=20.,
                      threshold=0.75,
                      newsamprate=20.,
                      before=30.,
                      after=200.,
                      smoothwin=201,
                      smoothorder=3):
    """
    Loops through all triggers in trigger_times, finds the first arrival times 
    of all traces, and compares these to those of the known event. If the arrival 
    times are within min_time_diff for at least min_stations number of stations, 
    an aftershock is detected.
    INPUT
    st - obspy stream object with seismic data
    template - obspy stream of known event
    trig - list of coicidence_trigger objects
    min_time (float) - optional; minimum time between aftershocks to count as 
        separate events (sec)
    threshold (float) - optional; cross-correlation threshold for use in 
        templateXcorrRA
    newsamprate (float) - optional; sampling rate to resample template to. Must
        match the sampling rate of the stream object the template is being 
        compared to.
    before (float) - seconds before trigger time to look for aftershocks in
    after (float) - seconds after trigger time to look for aftershocks in
    smoothwin (int) - window length in samples for Savgol smoothing of envelopes
    smoothorder (int) - polynomial order for Savgol smoothing
    OUTPUT
    aftershocks (list of UTCDateTimes) - times of detected aftershocks
    discard (list): list of discarded triggers
    st_after (list): list of obspy streams of extfacted aftershock triggers
    """

    # Create empty lists to store aftershock times
    aftershocks = []
    st_after = []
    discard = []

    # Process template
    try:
        tproc = template.copy()
        for tr in tproc:
            tr.data = filte.envelope(tr.data)

        # Process signal slice around each triggering time
        for t in range(0, len(trig)):
            print('Processing trigger %i of %i...' % (t + 1, len(trig)))
            print(trig[t]['time'])

            # Take slice of signal around trigger time
            temp = st.copy().trim(trig[t]['time'] - before,
                                  trig[t]['time'] + after)

            # Resample signal
            temp.resample(newsamprate)

            # Process trigger
            stproc = temp.copy()
            for tr in stproc:
                tr.data = spsignal.savgol_filter(filte.envelope(tr.data),
                                                 smoothwin, smoothorder)

            # Check if only stations present in tproc are present in stproc
            tproc_stations = [tr.id for tr in tproc]
            for tr in stproc:
                if tr.id not in tproc_stations:
                    stproc.remove(tr)

            # Check if tproc is not longer than stproc
            if len(tproc[0]) > len(stproc[0]):
                max_index = len(stproc[0])
                for s in range(len(stproc)):
                    stproc[s].data = stproc[s].data[:max_index]
                for s in range(len(tproc)):
                    tproc[s].data = tproc[s].data[:max_index]

            # Cross correlate
            times = []
            xcorFunc, xcorLags, ccs, times = sigproc.templateXcorrRA(
                stproc, tproc, threshold=threshold)

            # Check if event meets threshold
            if len(times) == 0:
                discard.append(trig[t])
            else:
                # Take highest value returned and adjust for before time to get
                # approximate event time
                indx = np.argmax(xcorFunc)
                aftershocks.append(stproc[0].stats.starttime + xcorLags[indx] +
                                   before)

        # If any times in aftershocks within min_time of each other, save first one
        if len(aftershocks) > 1:
            new_aftershocks = [aftershocks[0]]
            aftershocks.sort()  # Sort times from earliest to latest
            for a in range(1, len(aftershocks)):
                if aftershocks[a] - aftershocks[a - 1] > min_time:
                    new_aftershocks.append(aftershocks[a])

            aftershocks = new_aftershocks

        for af in aftershocks:
            st_after.append(st.copy().trim(af - before, af + after))

        print('%i aftershock(s) found.' % len(aftershocks))
    except:
        print(
            'Error occurred while finding aftershocks. Returning all trigger times.'
        )
        traceback.print_exc()
        aftershocks = [trig[t]['time'] for t in range(len(trig))]

    print('')  # Print blank line

    return (aftershocks, discard, st_after)
示例#51
0
def plot_GFP_timecourse_7seq(evoked_dict, ch_type='eeg', filter=True):
    """
    Function to plot the mean and the sem of the GFP for all the sequences across the participants.
    :param evoked_dict:
    :param ch_type:
    :param filter:
    :return:
    """

    evoked_dict_copy = copy.deepcopy(evoked_dict)

    # Additional parameters
    units = dict(eeg='uV', grad='fT/cm', mag='fT')
    ch_colors = dict(eeg='green', grad='red', mag='blue')

    # Create group average GFP per sequence
    allseq_mean = []
    allseq_ub = []
    allseq_lb = []
    for nseq in range(7):
        cond = list(evoked_dict_copy.keys())[nseq]
        data = copy.deepcopy(evoked_dict)
        data = data[cond]
        gfp_cond, times = GFP_funcs.gfp_evoked(data)
        mean = np.mean(gfp_cond[ch_type], axis=0)
        ub = mean + sem(gfp_cond[ch_type], axis=0)
        lb = mean - sem(gfp_cond[ch_type], axis=0)
        if filter:
            mean = savgol_filter(mean, 11, 3)
            ub = savgol_filter(ub, 11, 3)
            lb = savgol_filter(lb, 11, 3)
        allseq_mean.append(mean)
        allseq_ub.append(ub)
        allseq_lb.append(lb)
    times = times * 1000

    if times[-1] > 3000:
        datatype = 'fullseq'
        figsize = (9, 9)
    else:
        datatype = 'items'
        figsize = (3, 9)

    # Create figure
    fig, ax = plt.subplots(7,
                           1,
                           figsize=figsize,
                           sharex=False,
                           sharey=True,
                           constrained_layout=True)
    fig.suptitle(ch_type, fontsize=12)

    # Plot
    for nseq in range(7):
        seqname, seqtxtXY, violation_positions = epoching_funcs.get_seqInfo(
            nseq + 1)
        mean = allseq_mean[nseq]
        ub = allseq_ub[nseq]
        lb = allseq_lb[nseq]
        ax[nseq].set_title(seqname, loc='left', weight='bold', fontsize=12)
        ax[nseq].fill_between(times, ub, lb, color='black', alpha=.2)
        ax[nseq].plot(times,
                      mean,
                      color=ch_colors[ch_type],
                      linewidth=1.5,
                      label=seqname)
        ax[nseq].axvline(0, linestyle='-', color='black', linewidth=2)
        ax[nseq].set_xlim([min(times), max(times)])
        # Add vertical lines
        for xx in range(16):
            ax[nseq].axvline(250 * xx,
                             linestyle='--',
                             color='black',
                             linewidth=1)
        # Remove spines
        for key in ('top', 'right', 'bottom'):
            ax[nseq].spines[key].set(visible=False)
        ax[nseq].set_ylabel('GFP (' + units[ch_type] + ')')
        ax[nseq].set_xticks([], [])
        fmt = ticker.ScalarFormatter(useMathText=True)
        fmt.set_powerlimits((0, 0))
        ax[nseq].get_yaxis().set_major_formatter(fmt)
        if datatype == 'items':
            ax[nseq].get_yaxis().get_offset_text().set_position(
                (-0.22, 0))  # move 'x10^x', does not work with y
        elif datatype == 'fullseq':
            ax[nseq].get_yaxis().get_offset_text().set_position(
                (-0.07, 0))  # move 'x10^x', does not work with y
    ax[nseq].set_xlabel('Time (ms)')
    if datatype == 'items':
        ax[nseq].set_xticks(range(0, 800, 200), [])
    elif datatype == 'fullseq':
        ax[nseq].set_xticks(range(-500, 4500, 500), [])
        # Add "xY" using the same yval for all
        ylim = ax[nseq].get_ylim()
        yval = ylim[1] - ylim[1] * 0.1
        for nseq in range(7):
            seqname, seqtxtXY, violation_positions = epoching_funcs.get_seqInfo(
                nseq + 1)
            for xx in range(16):
                ax[nseq].text(250 * (xx + 1) - 125,
                              yval,
                              seqtxtXY[xx],
                              horizontalalignment='center',
                              fontsize=12)

    return fig
# Load reference data (to get total sugar values)
ref_url = 'https://raw.githubusercontent.com/sr2322/mytests/main/finals_ref.csv'
ref_data = pd.read_csv(ref_url, sep=',')
SampleTSraw = ref_data.values[:, 1]
SampleTSes = []
for s in SampleTSraw:
    test_s = str(s)
    if test_s[0].isnumeric():
        SampleTSes.append(float(s))
    else:
        SampleTSes.append(float(0.00))
TS_max = max(SampleTSes)

# Apply correction
Xmsc = snv(Xmsc)  # Take the first element of the output tuple
Xmsc = savgol_filter(Xmsc, 35, polyorder=3, deriv=0)

linregcoeffs_msc = []
Xs_msc = pd.DataFrame(data=Xmsc, index=SampleTSes, columns=wl_str)

for wl_str_i in wl_str:
    Column_Xs_msc = Xs_msc[wl_str_i].values
    slope, intercept, r_value, p_value, std_err = stats.linregress(
        SampleTSes, Column_Xs_msc)
    linregcoeffs_msc.append(round(r_value, 2))
#linregcoeffs_msc = np.array(linregcoeffs_msc)
print(max(linregcoeffs_msc))
print(min(linregcoeffs_msc))

## Plot original and corrected spectra
plt.figure(figsize=(24, 27))
示例#53
0
    def fillDataStructure2(self):
        lookupY = self._vhLookUp
        for strand in self._scaffold.strand5p().generator3pStrand():
            vh = strand.virtualHelix()
            coord = vh.coord()
            vhNum = vh._number

            ycoord = lookupY[vhNum]
            stapleStrands = vh.stapleStrandSet()
            low, high = strand.idxs()

            if strand.sequence():
                #print "Strand has sequence\n"
                sequence = strand.sequence()
                compS = strand.getComplementStrands()
                numericSequence = self.convertSeqtoNum(sequence)
                fiveToThree = strand.isDrawn5to3()
                #print "Start set to %s; End set to %s\n" %(start,end)

            if fiveToThree == 0:
                numericSequence = numericSequence[::-1]
            bcount = 0
            for x in range(low, high + 1):
                subn = strand.insertionLengthBetweenIdxs(x, x)
                if (stapleStrands.hasStrandAt(x, x) and subn != -1):
                    self._z.append(numericSequence[bcount])
                    self._y.append(ycoord)
                    self._x.append(x)
                    bcount += 1
                elif subn == -1:
                    self._z.append('nan')
                    self._y.append(ycoord)
                    self._x.append(x)
                else:
                    self._z.append(None)
                    self._y.append(ycoord)
                    self._x.append(x)

        xmax = max(self._x)
        xmin = min(self._x)
        ymax = max(self._y)
        self._map = self.initDataStructure2(xmax, ymax)
        for n in range(len(self._x)):
            self._map[self._y[n]][self._x[n]] = self._z[n]

        for y in range(len(self._map)):
            noneSt = -1
            addEnd = 0
            for x in range(len(self._map[y])):
                if self._map[y][x] != None and noneSt == -1:
                    noneSt = x
                if self._map[y][x] == 'nan':
                    del (self._map[y][x])
                    if addEnd % 2 == 1:
                        self._map[y].append(None)
                    else:
                        self._map[y].insert(0, None)
                    addEnd += 1

        for p in range(len(self._map)):
            mRow = signal.savgol_filter(self._map[p], self._smooth, 2).tolist()
            #Sgolay filter will have values greater than 1 or less than 0. This shouldn't
            #be possible, so fix it here.
            mRow = [
                x if np.isnan(x) == True or
                (np.isnan(x) == False and x < 1) else 1 for x in mRow
            ]
            mRow = [
                x if np.isnan(x) == True or
                (np.isnan(x) == False and x > 0) else 0 for x in mRow
            ]
            self._map[p] = mRow
示例#54
0
else:
    lower, upper = 10, -10
ypos_ref, xpos_ref = funcs.detpixel_trace(band,
                                          d2cMaps,
                                          sliceID=ref_slice,
                                          alpha_pos=ref_alpha)
sci_fm_data_ref = mrs_transmission_img[ypos_ref, xpos_ref][lower:upper]

# create a finer grid
step = 0.2
fine_grid = np.arange(lower, 1023 - abs(upper) + step, step)
if band == '2C': fine_grid = fine_grid[:-2]
sci_fm_data_ref_fine = interp1d(lower + np.arange(len(sci_fm_data_ref)),
                                sci_fm_data_ref)(fine_grid)
if band in ['1B', '2C']:
    sci_fm_data_ref_fine = savgol_filter(sci_fm_data_ref_fine, 201, 2)

pix_offsets = []
offsets = np.arange(1, 200)
wider_offsets = np.arange(-200, 200)
if band == '2C':
    offsets = np.arange(1, 300)
    wider_offsets = np.arange(-200, 300)
plot = True
for islice in range(1, nslices + 1):
    # for islice in [1]:
    if islice == 10:
        pix_offset = 0.
        pix_offsets.append(round(pix_offset, 2))
        continue
    ypos, xpos = funcs.detpixel_trace(band,
示例#55
0
def make_bao_plot(fname):
    """Does the work of making the BAO figure."""

    # Now make the figure.
    fig, ax = plt.subplots(1, 2, figsize=(8, 4))

    jj = 0
    for iz, zz in enumerate(zlist):
        # Read the data from file.
        aa = 1.0 / (1.0 + zz)
        a0, b1, b2 = alphad['%0.1f' % zz], b1lstd['%0.1f' %
                                                  zz], b2lstd['%0.1f' % zz],

        #PS
        pkd = np.loadtxt(dpath + "HI_bias_{:06.4f}.txt".format(aa))[1:, :]
        bb = pkd[1:6, 2].mean()
        #redshift space
        pks = np.loadtxt(dpath + "HI_pks_1d_{:06.4f}.txt".format(aa))[1:, :]
        pks = ius(pks[:, 0], pks[:, 1])(pkd[:, 0])

        # Now read linear theory and put it on the same grid -- currently
        # not accounting for finite bin width.
        lin = np.loadtxt("../../data/pklin_{:6.4f}.txt".format(aa))
        lin = ius(lin[:, 0], lin[:, 1])(pkd[:, 0])

        ps = pkd[:, 2]**2 * pkd[:, 3]
        ss = savgol_filter(ps, winsize, polyorder=2)
        rat = ps / ss
        ss = savgol_filter(pks, winsize, polyorder=2)
        rats = pks / ss
        ss = savgol_filter(lin, winsize, polyorder=2)
        ratlin = lin / ss

        ax[0].plot(pkd[:,0],rat+0.2*(jj),'C%d-'%iz,\
                    label="z={:.1f}".format(zz))
        ax[0].plot(pkd[:, 0], rats + 0.2 * (jj), 'C%d--' % iz, alpha=0.5, lw=2)
        ax[0].plot(pkd[:, 0], ratlin + 0.2 * (jj), ':', color='gray', lw=1.5)
        ax[0].axhline(1 + 0.2 * (jj), color='gray', lw=0.5, alpha=0.5)

        #xi

        #ilpk, ilpklin, ilpkza = get_ilpk(zz)
        ilpk = get_ilpk(zz)
        ilpks = get_ilpk(zz, mode='red')

        kk = np.logspace(-4, 2, 10000)
        xif = P2xi(kk)
        rr, xi = xif(ilpk(kk))
        rr, xis = xif(ilpks(kk))
        mask = (rr > 1) & (rr < 150)
        rr, xi, xis = rr[mask], xi[mask], xis[mask]
        #read theory
        xiz = np.loadtxt("../../theory/zeld_{:6.4f}.xir".format(aa)).T
        rrz = xiz[0]
        #xilin = xiz[1]*(1+b1)**2
        xilin = xiz[1] * (bb)**2
        ff = (0.31 / (0.31 + 0.69 * aa**3))**0.55
        ffb = ff / (1 + b1)
        #kaiser = (1+b1)**2*(1 + 2*ffb/3 + ffb**2/5)
        kaiser = (bb)**2 * (1 + 2 * ffb / 3 + ffb**2 / 5)
        xilins = xiz[1] * kaiser
        #interpolate theory on data
        xilinrr = ius(rrz, xilin)(rr)
        xilinsrr = ius(rrz, xilins)(rr)

        off = 10
        ax[1].plot(rr,
                   jj * off + rr**2 * xi,
                   'C%d' % iz,
                   label="z={:.1f}".format(zz))
        ax[1].plot(rr, jj * off + rr**2 * xis, 'C%d--' % iz, lw=2, alpha=0.5)
        ax[1].plot(rrz, jj * off + xilin, ':', color='gray', lw=1.5)
        ax[1].plot(rrz, jj * off + xilins, '-', color='gray', lw=0.1)

        jj = jj + 1


#        ax[1].plot(rr, 0.2*jj + rr**2*xi/xilinrr, 'C%d'%iz, label="z={:.1f}".format(zz))
#        ax[1].plot(rr, 0.2*jj + rr**2*xis/xilinsrr, 'C%d--'%iz, lw=2, alpha=0.7)

#ZA
#        xiza = xiz[2] + b1*xiz[3] + b2*xiz[4] + b1**2*xiz[5]+\
#                   b1*b2*xiz[6] + b2**2*xiz[7] + a0*xiz[8]
#        xizas = xiz[2+9] + b1*xiz[3+9] + b2*xiz[4+9] + b1**2*xiz[5+9]+\
#                   b1*b2*xiz[6+9] + b2**2*xiz[7+9] + a0*xiz[8+9]
#        ax[1].plot(rrz, jj*10+ xiza, 'C%d--'%iz, lw=2, alpha=0.5)
#        ax[1].plot(rrz, jj*10+ xizas, 'k:', lw=2, alpha=0.5)

# Tidy up the plot.
    ax[0].legend(ncol=2, framealpha=0.5, prop=fontmanage)
    ax[0].set_xlim(0.045, 0.4)
    ax[0].set_ylim(0.75, 1.5)
    #ax[1].legend(ncol=2,framealpha=0.5,prop=fontmanage)
    ax[1].set_xlim(55, 120)
    ax[1].set_ylim(0., 37)
    #ax[1].set_ylim(0.75, 5)
    for ii in range(ax.size):
        ax[ii].set_xscale('linear')
        ax[ii].set_yscale('linear')
    # Put on some more labels.
    ax[0].set_xlabel(r'$k\quad [h\,{\rm Mpc}^{-1}]$', fontdict=font)
    ax[1].set_xlabel(r'$r\quad [{\rm Mpc}/h]$', fontdict=font)
    ax[0].set_ylabel(r'$P(k)/P_{\rm nw}(k)$+offset', fontdict=font)
    ax[1].set_ylabel(r'$r^2 \xi(r)$+offset', fontdict=font)
    for axis in ax.flatten():
        for tick in axis.xaxis.get_major_ticks():
            tick.label.set_fontproperties(fontmanage)
        for tick in axis.yaxis.get_major_ticks():
            tick.label.set_fontproperties(fontmanage)

    # and finish up.
    plt.tight_layout()
    plt.savefig(fname)
示例#56
0
def find_bg(signal):
    freq, ampl = np.histogram(signal, 50)
    freq_f = savgol_filter(freq, 11, 3)
    return ampl[np.argmax(freq_f)]
示例#57
0
def plot_hpd(
    x,
    y,
    credible_interval=0.94,
    color="C1",
    circular=False,
    smooth=True,
    smooth_kwargs=None,
    fill_kwargs=None,
    plot_kwargs=None,
    ax=None,
):
    """
    Plot hpd intervals for regression data.

    Parameters
    ----------
    x : array-like
        Values to plot
    y : array-like
        values ​​from which to compute the hpd
    credible_interval : float, optional
        Credible interval to plot. Defaults to 0.94.
    color : str
        Color used for the limits of the HPD interval and fill. Should be a valid matplotlib color
    circular : bool, optional
        Whether to compute the hpd taking into account `x` is a circular variable
        (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).
    smooth : boolean
        If True the result will be smoothed by first computing a linear interpolation of the data
        over a regular grid and then applying the Savitzky-Golay filter to the interpolated data.
        Defaults to True.
    smooth_kwargs : dict, optional
        Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for
        details
    fill_kwargs : dict
        Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill).
    plot_kwargs : dict
        Keywords passed to HPD limits
    ax : matplotlib axes

    Returns
    -------
    ax : matplotlib axes
    """
    if plot_kwargs is None:
        plot_kwargs = {}
    plot_kwargs.setdefault("color", color)

    if fill_kwargs is None:
        fill_kwargs = {}
    fill_kwargs.setdefault("color", color)
    fill_kwargs.setdefault("alpha", 0.5)

    if ax is None:
        ax = gca()

    hpd_ = hpd(y, credible_interval=credible_interval, circular=circular)

    if smooth:
        if smooth_kwargs is None:
            smooth_kwargs = {}
        smooth_kwargs.setdefault("window_length", 55)
        smooth_kwargs.setdefault("polyorder", 2)
        x_data = np.linspace(x.min(), x.max(), 200)
        hpd_interp = griddata(x, hpd_, x_data)
        y_data = savgol_filter(hpd_interp, axis=0, **smooth_kwargs)
    else:
        idx = np.argsort(x)
        x_data = x[idx]
        y_data = hpd_[idx]

    ax.plot(x_data, y_data, **plot_kwargs)
    ax.fill_between(x_data, y_data[:, 0], y_data[:, 1], **fill_kwargs)

    return ax
示例#58
0
mzi = []

#Load paths for lab books
NO = '/Users/ben/Dropbox/Radical Spectroscopy/REMPI/NO/'
NP = '/Users/ben/Dropbox/Radical Spectroscopy/REMPI/NP/'
NT = '/Users/ben/Dropbox/Radical Spectroscopy/REMPI/NT/'
NU = '/Users/ben/Dropbox/Radical Spectroscopy/REMPI/NU/'
NX = '/Users/ben/Dropbox/Radical Spectroscopy/REMPI/NX/'
NY = '/Users/ben/Dropbox/Radical Spectroscopy/REMPI/NY/'
#print('load lab book paths')
#print('-- NO\n-- NP\n-- NT\n-- NU\n-- NX\n-- NY')

#x,y = np.loadtxt(NU+'nu071b.dat',usecols=(2,3),unpack=True)
x, y = np.loadtxt('nu033ta.dat', unpack=True)
y *= -10
sg = savgol_filter(y, 5, 2)
y = sg

#Frist, baseline subtraction
subr = np.logical_and(x > 85, x < 90)
yr = y[subr]
ym = np.mean(yr)
y -= ym

#Second, select threshold for peaks.
xc = 0
for i in range(0, np.size(x)):
    if x[i] <= xc: continue
    if y[i] <= th:
        for j in range(i, np.size(x)):
            if y[j] >= th:
示例#59
0
def savgol_smooth(data, n=7):
    return savgol_filter(data, n, 3, mode='nearest')
示例#60
0
                    local_max.append((i, pts[i]))
    return local_min, local_max


symbol = 'AAPL'
df = web.DataReader(symbol, 'yahoo', '2019-01-01', '2019-04-01')
series = df['Close']
series.index = np.arange(series.shape[0])

month_diff = series.shape[0] // 30
if month_diff == 0:
    month_diff = 1

smooth = int(2 * month_diff + 3)

pts = savgol_filter(series, smooth, 3)

local_min, local_max = local_min_max(pts)

local_min_slope, local_min_int = regression_ceof(local_min)
local_max_slope, local_max_int = regression_ceof(local_max)
support = (local_min_slope * np.array(series.index)) + local_min_int
resistance = (local_max_slope * np.array(series.index)) + local_max_int

plt.title(symbol)
plt.xlabel('Days')
plt.ylabel('Prices')
plt.plot(series, label=symbol)
plt.plot(support, label='Support', c='r')
plt.plot(resistance, label='Resistance', c='g')
plt.legend()