Exemplo n.º 1
0
def FineTunning(Signal1,Initial_Locations,Final_Locations,Range1,L_bin):
    """
   select the start and end point of transcribed regions 
   Parameters
        Signal1: reads density of every bp in the region (1D array)
        Initial_Locations: initial start point of the region
        Final_Locations: end point of the region
        Range1: the number of bp from which to select a potential new start/end point
        L_bin: bin size
    Return
        Initial_Locations: the new regions start point
        Final_Locations: the new region end point
    """
    Initial_Locations1=np.zeros(len(Initial_Locations))
    Final_Locations1=np.zeros(len(Final_Locations))
    for k in range(len(Initial_Locations)):
        #R=np.floor(0.1*(Final_Locations[k]-Initial_Locations[k]))
        Signal=Signal1[Initial_Locations[k]-Range1/2:Initial_Locations[k]+Range1/2]
        val=HaarWavelet(np.sqrt(L_bin),L_bin,Signal,Range1)
        ind1 = detect_peaks(val,mpd=1,threshold=0)
        Initial_Locations1[k]=ind1[0]+Initial_Locations[k]-Range1/2
        Signal=Signal1[Final_Locations[k]-Range1/2:Final_Locations[k]+Range1/2]
        ind1=detect_peaks(val,mpd=1,threshold=0,valley=True)
        Final_Locations1[k]=ind1[0]+Final_Locations[k]-Range1/2
    return Initial_Locations,Final_Locations
Exemplo n.º 2
0
	def plot_cm(self, minVal, maxVal, y_axmin=0, y_axmax=255, array=None, smooth=False, peaks=False, mind=1, ind=1, canny=False):
		#Create Plot of Contour Mean Intensities.
		if self.all_cm == None or self.filtered_contours == None:
			self.big_cm(canny=canny, minVal=minVal, maxVal=maxVal, array=array)

		c_m = self.all_cm

		if peaks and not smooth:

			data = c_m[:,ind,1]
			window = signal.general_gaussian(4, p=0.5, sig=100)
			filtered = signal.fftconvolve(window, data)
			filtered = (np.average(data)/np.average(filtered))*filtered
		 		
			detect_peaks(filtered, show=True, mpd = mind, y_axmin=y_axmin, y_axmax=y_axmax)
			#Source: http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb

		if smooth:
			xnew=np.linspace(0,len(c_m[:,:,1]),len(c_m[:,:,1]))
			y1=c_m[:,ind,1]#for now must be specified
			f=interp1d(xnew,y1,kind='cubic')(xnew)
			plt.plot(f)
			plt.ylabel('Mean Intensity')
			plt.show()

		if not smooth and not peaks:
			plt.plot(c_m[:,:,1]) 
			plt.ylabel('Mean Intensity')
			plt.show()
def get_Ampl(ecg):
    # # uncomment this if peak values needed
    # # instead of avg values
    # # peak amplitude absolute value
    # __cur_peak_ampl = max(abs(ecg['ecg_val']))
    # # print(__cur_peak_ampl)
    """
    avg peak amplitude - calc average of peaks

    code for peak detect taken from:
    <http://nbviewer.jupyter.org/github/
    demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb>

    adjusted specifically for Hexoskin data
    """
    peak_indices = detect_peaks(np.array(ecg['ecg_val']), mph=1450, mpd=100)
    trough_indices = detect_peaks(np.array(ecg['ecg_val']),
                                  mph=-1320,
                                  mpd=100,
                                  valley=True)
    # # uncomment to visualize
    # plt.plot(ecg['hexoskin_timestamps'], ecg['ecg_val'])
    # plt.plot([ecg['hexoskin_timestamps'][i] for i in peak_indices],
    #          [1400] * len(peak_indices), 'bo')
    # plt.plot([ecg['hexoskin_timestamps'][i] for i in trough_indices],
    #          [1410] * len(trough_indices), 'go')
    # plt.show()

    __peak_avg_ampl = abs(
        (sum([ecg['ecg_val'][i] for i in peak_indices]) / len(peak_indices)))
    __trough_avg_ampl = abs(
        (sum([ecg['ecg_val'][i]
              for i in trough_indices]) / len(trough_indices)))
    __mean_ampl = (__peak_avg_ampl + __trough_avg_ampl) / 2
    return __mean_ampl
    def DHA_detect(self, prev_ampl):
        # avg peak amplitude - calc average of peaks
        # <http://nbviewer.jupyter.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb>
        peak_indices = detect_peaks(np.array(self.ecg['ecg_val']),
                                    mph=0,
                                    mpd=100)
        trough_indices = detect_peaks(np.array(self.ecg['ecg_val']),
                                      mph=0.2,
                                      mpd=100,
                                      valley=True)
        # # uncomment to visualize
        # plt.plot(self.ecg['hexoskin_timestamps'], self.ecg['ecg_val'])
        # plt.plot([self.ecg['hexoskin_timestamps'][i] for i in
        #          peak_indices], [0] * len(peak_indices), 'bo')
        # plt.plot([self.ecg['hexoskin_timestamps'][i] for i in
        #          trough_indices], [-0.1] * len(trough_indices), 'go')
        # plt.show()

        self.mean = (self.ecg['ecg_val'].sum()) / len(self.ecg)
        __peak_avg_ampl = abs(
            (sum([self.ecg['ecg_val'][i]
                  for i in peak_indices]) / len(peak_indices)) - self.mean)
        __trough_avg_ampl = abs(
            (sum([self.ecg['ecg_val'][i]
                  for i in trough_indices]) / len(trough_indices)) + self.mean)
        __cur_ampl = (__peak_avg_ampl + __trough_avg_ampl) / 2

        # Note that __cur_ampl is in the mV unit as
        # it is needed for further processing
        # But we use self.mean_ampl as it is easier
        # to calulate for a window in general
        if self.mean_ampl > prev_ampl:
            return __cur_ampl, True
        else:
            return __cur_ampl, False
Exemplo n.º 5
0
def Proprio_Analyse():
    exercise = "prop"
    global maximus
    gx = savgol_filter(hexdata[:, 3], 61, 3)
    gy = savgol_filter(hexdata[:, 4], 21, 5)
    gz = savgol_filter(hexdata[:, 5], 21, 5)
    ff = savgol_filter(filtreflex, 101, 3)
    Min_Mvt = detect_peaks(ff, mph=-30, mpd=150, edge='rising', valley='true')
    Max_GyrX = detect_peaks(gx, mph=20, mpd=200, edge='rising')
    me = mean(gx[Max_GyrX])
    Max_GyrX = detect_peaks(gx, mph=me - 20, mpd=70, edge='rising')
    Min_GyrX = detect_peaks(gx, mph=20, mpd=100, edge='rising', valley='true')
    me = mean(gx[Min_GyrX])
    Min_GyrX = detect_peaks(gx,
                            mph=-me - 20,
                            mpd=100,
                            edge='rising',
                            valley='true')
    maximus = maximus = np.zeros([len(Min_GyrX), 11])
    #for i in range(0,len(Min_Mvt)-1):

    for i in range(len(Min_GyrX)):
        maximus[i][0] = searchmin_Avant(Min_GyrX[i], exercise, Min_Mvt)
        maximus[i][1] = Min_GyrX[i]
        maximus[i][2] = searchmin_Apres(Min_GyrX[i], Max_GyrX)
        maximus[i][3] = searchmin_Apres(Min_GyrX[i], Min_Mvt)
        maximus[i][4] = maximus[i][3] - maximus[i][0]
        maximus[i][5] = variationsignal(int(maximus[i][0]), int(maximus[i][3]),
                                        int(maximus[i][1]), 'flex', filtreflex,
                                        'prop')
    filtreMvtsProprio()

    return maximus
Exemplo n.º 6
0
    def GetPeaks(self, windowsize, peak_or_valley=False):

        if max(abs(np.array(
                self.activation_list))) == 0:  # no peaks if no response
            self.peaks = np.zeros(len(self.stimulus_category))

        else:
            normalized_activation_list = self.activation_list / max(
                abs(np.array(self.activation_list))
            )  # normalize the curve to max at 1, then the threshold for a peak is above o.5
            smoothed_activation_list = smooth(normalized_activation_list,
                                              windowsize)

            peak_locations = np.zeros(
                len(self.stimulus_category)
            )  # output is an array where peaks are indicated by ones, index corresponds to wavelength

            peak_indices = detect_peaks.detect_peaks(smoothed_activation_list,
                                                     mph=0.5,
                                                     edge=None,
                                                     valley=peak_or_valley)
            for index in peak_indices:
                peak_locations[index] = 1

            peak_indices_negative = detect_peaks.detect_peaks(
                smoothed_activation_list * -1.,
                mph=0.5,
                edge=None,
                valley=peak_or_valley)  #look for peaks in inhibition
            for index in peak_indices_negative:
                peak_locations[index] = 1
            self.peaks = peak_locations
    def FindPeaks(self,filtered_signal_2):
        """
        searches for the peak position in the smoothed noise-less signal
        takes: smoothed noise-less signal
        returns: List of Peak Indeces
        """
        #rmsn = 1.9#rmsval#1.5
        indices = np.where(self.peakless<0)[0] 
        # search indices, where wave is below upper limit
        peakless_cut = np.zeros(len(self.peakless)) 
        # create np array of zeros of aquivalent size
        peakless_cut[indices] = self.peakless[indices] 
        # fill only those indices, where wave is below upper limit -> peakless signal



        if args.regr:
            if args.regr ==1:
                rms = calcregr(self.V,self.savepath,self.iterT)
            if args.regr ==2:
                rms = self.high_pass

        else:
            rmsn = calcrmsn(self.V)
            rms = np.std(self.peakless) *rmsn
        #print rms*2
        #print rms
        if args.plot == 3:
            pulse_pos = detect_peaks(filtered_signal_2, rms, mpd=25,show=True)
        else:
            pulse_pos = detect_peaks(filtered_signal_2, rms, mpd=25,show=False)
 
        return pulse_pos
Exemplo n.º 8
0
def extentionAnalyse():
    exercise = "extention"
    maxx = detect_peaks(filtreflex,
                        mph=-50,
                        mpd=20,
                        edge='rising',
                        valley='true')
    minn = detect_peaks(filtreflex, mph=40, mpd=30, edge='rising')
    k = 0
    maxxx = []
    minnn = []
    #################################################################################################
    #////////////////////////////Filtrage minimal des mins et MAx\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\#
    #################################################################################################
    for i in range(0, len(maxx - k)):
        if flex[maxx[i]] < 70:
            maxxx.append(maxx[i])
    for i in range(0, len(minn)):
        if flex[minn[i]] > -20:
            minnn.append(minn[i])
    minlen = len(minnn)
    mouvements = []
    size = 0
    maximus = np.zeros([len(maxxx), 11])
    maximus = getMaximus()
    maximus = maximus[maximus[:, 6] < 150]
    maximus = maximus[maximus[:, 6] > 10]
    maximus = maximus[maximus[:, 3] - maximus[:, 0] > 15]
    maximus = maximus[maximus[:, 3] - maximus[:, 0] > 15]
    maximus = maximus[maximus[:, 3] - maximus[:, 0] < 150]
    notes = getNotes()
    Notes = pd.DataFrame(notes, columns=names)

    return
Exemplo n.º 9
0
def findPeaks(a,n):
    mpd = 80
    mph =4
    edge='rising'
    
    a_rows = np.shape(a)[0]
    ax,ay,az,am = splitVec(a)
    
    a_p = np.zeros((a_rows,4*n));
    a_np = np.zeros((a_rows,4*n));
    for i in range (a_rows):
        fx_peaks = detect_peaks(ax[i,:],edge=edge, show=False)
        fy_peaks = detect_peaks(ay[i,:],edge=edge, show=False)
        fz_peaks = detect_peaks(az[i,:],edge=edge, show=False)
        fm_peaks = detect_peaks(am[i,:],edge=edge, show=False)
        
        a_p[i,:n] = np.sort(ax[i,fx_peaks])[-1*n:]
        a_p[i,n:2*n] = np.sort(ay[i,fy_peaks])[-1*n:]
        a_p[i,2*n:3*n] = np.sort(az[i,fz_peaks])[-1*n:]
        a_p[i,3*n:] = np.sort(am[i,fm_peaks])[-1*n:]
        
        a_np[i,:n] = len(fx_peaks);
        a_np[i,n:2*n] = len(fy_peaks);
        a_np[i,2*n:3*n] = len(fz_peaks);
        a_np[i,3*n:] = len(fm_peaks);

    return a_p, a_np #uncomment if you want magnitude
    def optimize_peak_finding(self, y):
        ''' Make sure we get a proper order of peaks and valleys. Determine the zoom size where the first 20 peaks or will show up, and the proper minimum ADC distance between peaks (mpd for detect_peaks). 
	Could pass the gain and make sure the mpd is not bigger than the gain, or calculate the gain on the first two peaks...'''
        # To do: if there is no peak in ~2*mpd, we should reduce the mpd
        # Look at smoothing https://pythonhosted.org/scikits.datasmooth/regularsmooth.html
        NonzeroIndeces = y.nonzero()
        xmin = np.amin(NonzeroIndeces)
        xmax = np.amax(NonzeroIndeces)
        width = xmax - xmin
        xminzoom = xmin - 0.02 * width
        xmaxzoom = xmax
        p = detect_peaks(y, edge='falling', mpd=self._MinPeakADCDist)
        v = detect_peaks(y,
                         edge='falling',
                         mpd=self._MinPeakADCDist,
                         valley=True)
        Np = p.size
        Nv = v.size
        if (p.size > 10 and v.size > 10): print p[:10], v[:10]
        # Check whether there are two consecutive peaks before any valley, then increase mpd by 2:
        NTrial = 0
        while True:
            NBad = 0
            for i in np.arange(0,
                               min(p.size, v.size, 20) -
                               1):  # loop up to 20 peaks or smallest number
                if p[0] > v[0] and p[i] < v[i]:
                    print(
                        "Wrong! Peak is before valley! i=%i p[i]=%i v[i]=%i" %
                        (i, p[i], v[i]))
                    NBad += 1
                if p[i] > v[i + 1]:
                    NBad += 1
                    print(
                        "Wrong! Two consecutive peaks with no valleys in between!"
                    )
                if y[p[i]] < y[p[i + 1]]:
                    print(
                        "Wrong! This peak height is smaller than next peak height! i=%i y[p[i]]=%i y[p[i+1]]=%i"
                        % (i, y[p[i]], y[p[i + 1]]))
                    NBad += 1
            if NBad > 1 and NTrial < 10:  # One error is ok, specially in the hump region. If two errors, then rerun:
                self._MinPeakADCDist += 2
                print("Optimize: Going again NBad=%i, mpd=%i" %
                      (NBad, self._MinPeakADCDist))
                NTrial += 1
                return self.optimize_peak_finding(y)
            else:
                break

        mpd = self._MinPeakADCDist
        if (p.size > 20):
            xmaxzoom = p[20] + 1  # the location of the 20th peak
        else:
            xmaxzoom = p[p.size - 1] + 1  # or as high as we can go
        print(
            'Optimized parameters: _MinZoomADC=%i _MaxZoomADC=%i _MinPeakADCDist=%i NPeaks=%i'
            % (xminzoom, xmaxzoom, mpd, self.NPeaks))
        return xminzoom, xmaxzoom, mpd
Exemplo n.º 11
0
    def search_peaks(self):
        '''this function will detect the peaks and return:
        peaks, peaks2: stream 1 and stream 2 peaks list
        (referenced to each stream array) and which will be used to shift streams
        gr_plt_data, vn_plt_data: stream 1 and 2 plot data already sliced
        delta_time: time shift between 2 streams, calculated with first peaks
        '''
        peaks_not_detected = True
        user_cancelled = False

        while (peaks_not_detected and not user_cancelled):
            print()
            print()
            print('input GRAPHTEC peak threshold')
            graphtec_mph = tksd.askfloat('Input', 'Enter GRAPHTEC peak threshold', minvalue = 0.0001, maxvalue = 5.0)
            if graphtec_mph:

                print()
                print()
                print('input VectorNav peak threshold')
                #get the threshold
                vn_mph = tksd.askfloat('Input', 'Enter VectorNav peak threshold', minvalue = 0.0001, maxvalue = 50.0)
                if vn_mph:
                    peaks = detect_peaks(self.graphtec['CH8_diff'].values, mph=graphtec_mph)          

                    peaks2 = detect_peaks(self.vn_clean['Acceleration.Z_diff'].values, mph=vn_mph)
                    if len(peaks)==0:
                        print('Graphtec peaks not detectd - choose a lower threshold')
                    if len(peaks2)==0:
                        print('VectorNav peaks not detectd - choose a lower threshold')
                    if (len(peaks)>0) and (len(peaks2)>0):
                        print('peaks found')
                        peaks_not_detected = False
                else:
                    user_cancelled = True
            else:
                user_cancelled = True
        if user_cancelled == False:
            
            print('Graphtec initial timestamp: {}'.format(self.graphtec['wrong_dt'].iloc[0]))
            print('Graphtec peak detected at: {}'.format(self.graphtec['wrong_dt'].iloc[peaks[0]]))
            print('VectorNav initial timestamp: {}'.format(self.vn_clean['time'].iloc[0]))
            print('VectorNav peak detected at: {}'.format(self.vn_clean['time'].iloc[peaks2[0]]))
            print()

            dt_window = 10 # plot plus or minus seconds around found peak
            
            delta_time = self.graphtec['wrong_dt'].iloc[peaks[0]]-self.vn_clean['time'].iloc[peaks2[0]]
            print()
            gr_plt_data = self.graphtec[(self.graphtec['wrong_dt'] > (self.graphtec['wrong_dt'].iloc[peaks[0]]-pd.Timedelta(seconds=dt_window))) &
                  (self.graphtec['wrong_dt'] < (self.graphtec['wrong_dt'].iloc[peaks[0]]+pd.Timedelta(seconds=dt_window)))]

            vn_plt_data = self.vn_clean[(self.vn_clean['time'] > (self.vn_clean['time'].iloc[peaks2[0]]-pd.Timedelta(seconds=dt_window))) &
                  (self.vn_clean['time'] < (self.vn_clean['time'].iloc[peaks2[0]]+pd.Timedelta(seconds=dt_window)))]

            return peaks, peaks2, gr_plt_data, vn_plt_data, delta_time
        else:
            return [np.array(None), np.array(None), np.array(None), np.array(None), np.array(None)]
Exemplo n.º 12
0
def ExtremaLocation (Serias,FDR_Point):
    ind1 = detect_peaks(Serias,mpd=150,threshold=0.0001)
    ind2 = detect_peaks(Serias,mpd=150,threshold=0.0001,valley=True)
    ind=np.sort(np.append(ind1,ind2))   
    a=np.stack((np.abs(Serias[ind]-Serias[ind+1]),np.abs(Serias[ind]-Serias[ind-1])))
    a=np.abs(a)
    MaxChange=a.max(0)
    IndExt=np.argsort(MaxChange)
    Loc=ind[IndExt[-int(np.floor(FDR_Point*len(ind))):]]
    return Loc
Exemplo n.º 13
0
def Thickness_CalliperAlg(signal_matrices, EZ=False):
    skip_chn = 1
    skip_time_stamp = 1
    MAP_SIZE = (96, 520)
    if EZ == True:
        skip_chn = 2
        skip_time_stamp = 5
        MAP_SIZE = (int(96 / skip_chn), int(520 / skip_time_stamp))
    distance = np.zeros(MAP_SIZE)
    START_DELAY = 6601
    TOTAL_CHN, TOTAL_ROUND, SIGNAL_LENGTH = signal_matrices.shape
    thickness_map = np.zeros(MAP_SIZE) + timeFlight
    for chn in range(0, TOTAL_CHN, skip_chn):
        for rd in range(0, TOTAL_ROUND, skip_time_stamp):
            signal = signal_matrices[trLayout[chn], rd, :]
            #signal = signal_matrices[chn, rd, :]
            norm_signal = signal / np.max(np.absolute(signal))
            # USE Numpy.argmax instead of for loop to save time
            trigger = np.argmax(norm_signal > 0.594)
            if (trigger < 20) or (trigger > 1900):
                trigger = 20
            else:
                pass
                #main_reflection = norm_signal[trigger - 20 : trigger + 280]
            distance[chn, rd] = (START_DELAY + trigger) * 740.0 / 15000000
            main_reflection = norm_signal[trigger - 20:trigger + 280]
            abs_conv_result = np.absolute(np.convolve(main_reflection, s))
            peaks_locs = detect_peaks(abs_conv_result,
                                      mph=None,
                                      mpd=20,
                                      show=False)
            peaks_value = abs_conv_result[peaks_locs]
            envelopObject = interpolate.interp1d(peaks_locs,
                                                 peaks_value,
                                                 kind='quadratic')
            xnew = np.linspace(peaks_locs[0],
                               peaks_locs[-1],
                               num=peaks_locs[-1] - peaks_locs[0] + 1)
            envelop = envelopObject(xnew)
            filtered_peaks_locs = detect_peaks(envelop,
                                               mph=None,
                                               mpd=20,
                                               show=False)
            peak_diff = np.diff(filtered_peaks_locs)
            chn = int(chn / skip_chn)
            rd = int(rd / skip_time_stamp)
            if (coating == False):
                if (len(peak_diff) > 2):
                    thickness_point = np.median(peak_diff)
                    if thickness_point < timeFlight * 1.2:
                        thickness_map[chn, rd] = thickness_point
                else:
                    thickness_map[chn, rd] = timeFlight
    print("Done")
    return distance, thickness_map
Exemplo n.º 14
0
    def get_peaks(self, ts):
        """
        * Return number of peaks found in each of the three time
        * series (x_axis ,y_axis and z_axis) contained in ts 
        """

        peaks_x = len(detect_peaks(ts.x))
        peaks_y = len(detect_peaks(ts.y))
        peaks_z = len(detect_peaks(ts.z))

        return peaks_x, peaks_y, peaks_z
Exemplo n.º 15
0
def main():
    parser = argparse.ArgumentParser(
        description='Density Based [k-means] Bootstrap method demo')
    parser.add_argument('-d',
                        '--dataset',
                        default='dataset.txt',
                        help='Dataset to analyze')
    parser.add_argument('-k',
                        '--kmeans_maxiter',
                        default=10,
                        help='k-means max iterations')
    parser.add_argument('-b',
                        '--dbb_maxiter',
                        default=5,
                        help='DBB max iterations')
    parser.add_argument('-s', '--show', default=5, help='Produces plots')
    args = parser.parse_args(sys.argv[1:])

    with open(args.dataset, 'rb') as dataset:
        points = [map(float, row.strip().split()[:2]) for row in dataset]

    # Maybe we shall wrap with `np.asarray`
    xs, ys = zip(*points)
    hx = compute_density(xs)
    hy = compute_density(ys)

    px = detect_peaks(hx[1])
    py = detect_peaks(hy[1])

    centroids = [(hx[0][x], hy[0][y]) for x in px for y in py]

    # Compute pic borders once for all
    picbound = (int(min(xs) * 0.99), int(max(xs) * 1.01), int(min(ys) * 0.99),
                int(max(ys) * 1.01))

    # Top bar
    plot_cool_figure(xs, ys, hx, hy, centroids, px, py, args.dataset, picbound)

    j = 0
    for i in xrange(int(args.dbb_maxiter)):
        for (clusters, centroids,
             cstats) in kmeans(points,
                               centroids=centroids,
                               max_iter=int(args.kmeans_maxiter),
                               sbs=True):
            log.info('DBB iter: %d. K-Means iter: %d', i, j)
            j += 1

        ellipses = find_ellipses(centroids, clusters)
        plot_density_ellipses(xs, ys, ellipses, args.dataset, i, picbound)
        merges = find_merges(ellipses)
        if not merges:
            break
        centroids = merge(cstats, merges)
def cal_loss(yp, yt):
    total_loss = 0
    for i in range(len(yt)):
        cur_y = yt[i]
        peak = detect_peaks.detect_peaks(cur_y, mpd=150, show=False)
        valley = detect_peaks.detect_peaks(cur_y,
                                           valley=True,
                                           mpd=150,
                                           show=False)
        total_loss += ((yt[i][peak] - yp[i][peak])**2).sum() + (
            (yt[i][valley] - yp[i][valley])**2).sum()
    total_loss = total_loss / len(yt)
    return total_loss
	def peak_detect(self, 
		threshold=1.2,
		high_limit=700, 
		lower_limit = 300,
		peak_length = 0.002 # in seconds, TBD in
		):

		fft_peaks = []
		for row in range(self.udp_server.num_fft_chan):
		    fft_peaks.append(False)

		led_data = 0
		fft_index = 0   
		for fft_buffer in self.eq_data:
			# Auto thresholding for the peaks
			FFTThreshold = np.mean(fft_buffer) * threshold
			if FFTThreshold > high_limit:
				FFTThreshold = high_limit	
			if FFTThreshold < lower_limit:
				FFTThreshold = lower_limit	

			indexes = detect_peaks(fft_buffer, mph = FFTThreshold, mpd = peak_length/self.delay)
			
			if len(indexes):
				if indexes[0]==1: 
					fft_peaks[fft_index] = True	

			fft_index += 1

		self.fft_peaks = fft_peaks
Exemplo n.º 18
0
def process_profile_PS3(Amplit, SamplingFreq, TimeStart, FilterFreq, Downsample):
    High = 10e6
    Low =2.5e6

    Time = TimeStart + 1e3 * (np.arange(0, Amplit.size, 1) / SamplingFreq)
    #plt.plot(Time,Amplit)
    Amplit = butter_bandpass_filter(Amplit,Low, High, SamplingFreq, order=1)
    #plt.plot(Time,Amplit)

    # Trimm the vectors around centre
    #TimeAround = np.int(2e3/SamplingFreq)
    #IndexCentre = np.where(Amplit == np.max(Amplit))
    #Time = Time[IndexCentre-TimeAround:IndexCentre+TimeAround]
    #Amplit = Amplit[IndexCentre-TimeAround:IndexCentre+TimeAround]

    mpd = np.int(2e-6 * SamplingFreq)
    indexes = detect_peaks(Amplit, mpd=mpd)

    #IntegralAround = np.int(1.13e-6 * SamplingFreq)
    #IntegralAround =  np.int(100e-9*SamplingFreq)  Integrals of 200ns
    #indexes = indexes[10:len(indexes)-10]

    #plt.plot(Time[indexes],Amplit[indexes],'.r')
    #plt.show()

    Amplit_p = Amplit[indexes]
    Time_p = Time[indexes]

    #Averaging_Window = 5
    #Amplit_p = np.convolve(Amplit_p, np.ones((Averaging_Window,)) / Averaging_Window, mode='valid')
    #Time_p = np.convolve(Time_p, np.ones((Averaging_Window,)) / Averaging_Window, mode='valid')

    return [Time_p, Amplit_p]
Exemplo n.º 19
0
def ExtremLocation_Random (Serias,FDR_Point,Min_Dist,Min_Val):
    """
    finds extermum points from the Haar wavlet coefficents, select points randomly
        Serias: HW coefficents
        FDR_Points: the fraction of the extermum points selected
        Min_Dist: the minimum distance in bins between extermum points
        Min_Val: minimum value for extermum point
    Return
        1D array of extermom points with the size of fraction of FDR_Point
        from the total selected points
    """
    ind1 = detect_peaks(Serias,mpd=Min_Dist,threshold=Min_Val)
    ind2 = detect_peaks(Serias,mpd=Min_Dist,threshold=Min_Val,valley=True)
    ind=np.sort(np.append(ind1,ind2))   
    Loc=ind[IndExt[-int(np.floor(FDR_Point*len(ind))):]]
    return Loc 
Exemplo n.º 20
0
def process_profile_PS2(Amplit, SamplingFreq, TimeStart, FilterFreq, Downsample):
    # Bunch By Bunch integrals with 4 Bunches on PS
    High = 15e6

    Time = TimeStart + 1e3 * (np.arange(0, Amplit.size, 1) / SamplingFreq)
    Amplit = butter_lowpass_filter(Amplit, High, SamplingFreq, order=1)


    mpd = np.int(1e-6 * SamplingFreq)
    indexes = detect_peaks(Amplit, mpd=mpd)

    #IntegralAround = np.int(1.13e-6 * SamplingFreq)
    IntegralAround =  np.int(300e-9*SamplingFreq) # Integrals of 200ns
    indexes = indexes[10:len(indexes)-10]

    #plt.plot(Time,Amplit)
    #plt.plot(Time[indexes-IntegralAround],Amplit[indexes-IntegralAround],'.b')
    #plt.plot(Time[indexes+IntegralAround],Amplit[indexes+IntegralAround],'.r')
    #plt.show()

    Amplit_p=[]
    Baseline = []
    for i in indexes:
        Amplit_p.append(np.sum(Amplit[i-IntegralAround:i+IntegralAround]))
        Baseline.append(np.sum(Amplit[i-(mpd/2):i-(mpd/2)+IntegralAround]))

    Amplit_p = np.asarray(Amplit_p) - np.asarray(Baseline)
    #Amplit_p = np.asarray(Amplit_p)
    Time_p = Time[indexes]

    #Averaging_Window = 5
    #Amplit_p = np.convolve(Amplit_p, np.ones((Averaging_Window,)) / Averaging_Window, mode='valid')
    #Time_p = np.convolve(Time_p, np.ones((Averaging_Window,)) / Averaging_Window, mode='valid')

    return [Time_p, Amplit_p]
Exemplo n.º 21
0
    def add_local_peaks_log(self, ordered_list):

        self.add_log('-----------------------------')
        self.add_log('Local Peaks List')

        # Get ordered count list
        count_list = []
        for item in ordered_list:
            count_list.append(item['count'])

        mean = statistics.mean(count_list)
        stdev = statistics.stdev(count_list)
        self.add_log('Mean=' + str(mean) + ', SD=' + str(stdev))

        peaks_list = detect_peaks(count_list, mph=(mean + stdev), mpd=10)
        for i in range(len(peaks_list)):
            peak_index = peaks_list[i]
            chat_count = ordered_list[peak_index]['count']
            timestamp = ordered_list[peak_index]['timestamp']
            console_index = self.add_log(
                str(i + 1) + '. Count=' + str(chat_count) + " timestamp=" +
                timestamp)

            timestamp_split = timestamp.split(':')
            hour = timestamp_split[0] + 'h'
            minute = timestamp_split[1] + 'm'
            second = timestamp_split[2] + 's'
            self.console_link[str(
                console_index
            )] = 'https://www.twitch.tv/videos/' + self.current_video_id + '?t=' + hour + minute + second
def GetSortedPeak(frq, comb_sig, T, N, f_s):
    max_peak_height = 0.1 * np.nanmax(comb_sig)
    threshold = 0.05 * np.nanmax(comb_sig)
    #Get indices of peak
    peak = detect_peaks(comb_sig,
                        edge='rising',
                        mph=max_peak_height,
                        mpd=2,
                        threshold=threshold)

    m = []
    mm = []
    for i in peak:
        m.append(comb_sig[i])
        mm.append(frq[i])

    mmm = np.argsort(m)
    n = []
    nn = []
    for i in mmm:
        n.append(m[i])
        nn.append(mm[i])

    n = n[::-1]
    nn = nn[::-1]

    return n, nn
def compute_corners_using_curvature_and_speed(
        stroke,
        smoothed_pen_speeds,
        curvatures,
        curvature_threshold=CURVATURE_THRESHOLD,
        speed_threshold_2=SPEED_THRESHOLD_2):
    """
    param stroke : a Stroke object with N x,y,t data points
    param smoothed_pen_speeds : an array of the smoothed pen speeds at each point on a stroke.
    param curvatures : an array of curvatures
    param curvature_threshold : in degress per pixel. The minimum threshold for the curvature of
        a point for the point to be considered a segmentation point.
    param speed_threshold_2 : a percentage (between 0 and 1). The threshold determines the
        maximum percentage of the average pen speed allowed for a point to be considered a
        segmentation point.


    return : a list of all segmentation points
    """
    #TODO: your part 5b code here
    avg_speed = sum(smoothed_pen_speeds) / len(smoothed_pen_speeds)
    s_threshold = speed_threshold_2 * avg_speed
    c_threshold = math.radians(curvature_threshold)
    curvatures = [abs(i) for i in curvatures]
    peaks = detect_peaks(curvatures, mph=c_threshold)
    new_peaks = [i for i in peaks if smoothed_pen_speeds[i] <= s_threshold]
    return new_peaks
Exemplo n.º 24
0
def GetSortedPeak(X, Y):
    #SubFunction for FrequencyDomainInformation
    max_peak_height = 0.1 * np.nanmax(Y)
    threshold = 0.05 * np.nanmax(Y)
    #Get indices of peak
    peak = detect_peaks(Y,
                        edge='rising',
                        mph=max_peak_height,
                        mpd=2,
                        threshold=threshold)

    m = []
    mm = []
    for i in peak:
        m.append(Y[i])
        mm.append(X[i])

    mmm = np.argsort(m)
    n = []
    nn = []
    for i in mmm:
        n.append(m[i])
        nn.append(mm[i])

    n = n[::-1]
    nn = nn[::-1]

    return n, nn
Exemplo n.º 25
0
def find_lane_centers(laneIMG_binary, left_peak_previous, right_peak_previous):
    # find peaks as the starting points of the lanes (left and right)
    vector_sum_of_lane_marks = np.sum(laneIMG_binary, axis=0)
    #    peaks, _ = find_peaks(vector_sum_of_lane_marks, distance=peaks_distance)
    #    peaks = peakutils.indexes(vector_sum_of_lane_marks, min_dist=peaks_distance)
    peaks = detect_peaks(vector_sum_of_lane_marks, mpd=peaks_distance)

    if (peaks.shape[0] == 1):
        # print('only one line')
        ##                lane_center_left, lane_center_right = False, False
        current_peak = peaks[0]
        # if the current peak is closer to previous left line center, we say right line is missing
        if (np.abs(current_peak - left_peak_previous) <=
                np.abs(current_peak - right_peak_previous)):
            lane_center_right = False
            lane_center_left = current_peak
##                    print('left line remains, right line is missing')
        else:
            lane_center_left = False
            lane_center_right = current_peak
##                    print('right line remains, left line is missing')
# no peak is detected
    elif (peaks.shape[0] == 0):
        lane_center_left, lane_center_right = False, False
    else:
        # we only use the first two peaks as the starting points of the lanes
        peaks = peaks[:2]
        lane_center_left = peaks[0]
        lane_center_right = peaks[1]
    return lane_center_left, lane_center_right
Exemplo n.º 26
0
def peak_amplitudes(orbit_dict, orbit, nskip, show, save):
    """ Finds peak amplitude as a function of inverse field.
    Inputs:
        orbit_dict: dict containing each separate orbit
        orbit: (string) name of orbit you're looking at (a key in orbit_dict)
        nskip: number of points to skip at beginning of signal 
        show: (Boolean) plotting option for detect_peaks()
        save: if detect_peaks() was successful, set save=True and run again
    Returns: DataFrame containing peak amplitude vs. inverse field if save=True
    """
    peaks = abs(orbit_dict['Osc'].Freq)
    peak_ind = detect_peaks(peaks, show=show)
    peak_fields = np.array([orbit_dict['Osc'].InvField[i] for i in peak_ind])
    peak_amps = np.array([peaks[i] for i in peak_ind])
    if not save:
        print('Happy with the peaks detected?')
        print('If so, set save=True and run again.\n')
        return None
    print('The below peaks have been added to the dataset under orbit ' +
          orbit + '.\n')
    df_peaks = pd.DataFrame({'Amp': peak_amps, 'InvField': peak_fields})
    plt.plot(peak_fields, 1e-3 * peak_amps, 'o')
    plt.xlabel(r'Inverse Field (T${}^{-1}$)')
    plt.ylabel(r'Amplitude (kHz)')
    plt.title(orbit + ' orbit amplitudes')
    plt.show()
    return df_peaks
Exemplo n.º 27
0
def max_finding(Acc, rhos, thetas, minheight = 85, lookahead = 35):
    idmax = np.argmax(Acc)
    rho_max_index, theta_max_index = np.unravel_index(idmax, Acc.shape)
    peak_indices = dp.detect_peaks(Acc[:, theta_max_index], mph = minheight, mpd = lookahead)
    s_max = np.max(rhos[peak_indices])
    theta_max = thetas[theta_max_index]
    return theta_max, s_max, theta_max_index, peak_indices
Exemplo n.º 28
0
def main():

    x = np.sin(
        2 * np.pi * 5 * np.linspace(0, 1, 200)) + np.random.randn(200) / 5
    # set minimum peak height = 0 and minimum peak distance = 20
    ind = detect_peaks(x, mph=0, mpd=20, show=True)
    print(ind)
Exemplo n.º 29
0
def get_peaks_data(t1, frames_second, vel, mph=10):
    # this function should return the number of change of directions, event duration & frequency of change.
    # get indices
    col = vel.columns
    values = vel[col].dropna().values
    data = np.asanyarray([i[0] for i in values])
    peakind = detect_peaks(data, mph=mph)
    number_peaks = len(peakind)

    # this is done in order to consider changes of direction that consist
    # of one stop event followed by swimming with a slow velocity
    if number_peaks == 1:
        number_peaks = 2

    # get frames for the particle contained in vel.
    particle = vel.columns.values
    t_i = t1[t1['particle'] == particle[0]]

    min_value = t_i['frame.1'].iloc[0]
    max_value = t_i['frame.1'].iloc[-1]
    time = (max_value - min_value) / frames_second

    # frequency of change
    change_dir = number_peaks / 2
    change_dir = np.floor(change_dir)

    frequency = change_dir / time

    return change_dir, time, frequency
Exemplo n.º 30
0
        def _findLine(theta_deg):
            theta = np.deg2rad(theta_deg)
            peakIndex = detect_peaks(accumulator[:, theta_deg], mpd=1, threshold=2, show=False)
            peakRhos = peakIndex*self.gridSize-self.maxRange
            if debug: print("theta(deg): ", theta_deg, "\t rhos(m)", peakRhos)

            return theta, peakRhos
def read_psync_and_correct_ult(filename, ult_data):
    (Fs, sync_data_orig) = io_wav.read(filename)
    sync_data = sync_data_orig.copy()

    # clip
    sync_threshold = np.max(sync_data) * 0.6
    for s in range(len(sync_data)):
        if sync_data[s] > sync_threshold:
            sync_data[s] = sync_threshold

    # find peeks
    peakind1 = detect_peaks(sync_data, mph=0.9*sync_threshold, mpd=10, threshold=0, edge='rising')
    
    # this is a know bug: there are three pulses, after which there is a 2-300 ms silence, 
    # and the pulses continue again
    if (np.abs( (peakind1[3] - peakind1[2]) - (peakind1[2] - peakind1[1]) ) / Fs) > 0.2:
        bug_log = 'first 3 pulses omitted from sync and ultrasound data: ' + \
            str(peakind1[0] / Fs) + 's, ' + str(peakind1[1] / Fs) + 's, ' + str(peakind1[2] / Fs) + 's'
        print(bug_log)
        
        peakind1 = peakind1[3:]
        ult_data = ult_data[3:]
    
    for i in range(1, len(peakind1) - 2):
        # if there is a significant difference between peak distances, raise error
        if np.abs( (peakind1[i + 2] - peakind1[i + 1]) - (peakind1[i + 1] - peakind1[i]) ) > 1:
            bug_log = 'pulse locations: ' + str(peakind1[i]) + ', ' + str(peakind1[i + 1]) + ', ' +  str(peakind1[i + 2])
            print(bug_log)
            bug_log = 'distances: ' + str(peakind1[i + 1] - peakind1[i]) + ', ' + str(peakind1[i + 2] - peakind1[i + 1])
            print(bug_log)
            
            raise ValueError('pulse sync data contains wrong pulses, check it manually!')
    
    return ([p for p in peakind1], ult_data)
Exemplo n.º 32
0
def setup_peak():
    # 1500 data points, sampling rate 50hz
    # each point is 1/30 hz
    # 3-7 hz is around 90-210 datapts in
    l = read_csv('fft_data.csv')
    for i in l:
        i[1] = i[1].replace(';', ',')
    eval_list = [[dt, literal_eval(data)] for [dt, data] in l]
    top_peaks = []
    raw_peaks = []
    for i, group in enumerate(
            eval_list[:-1]):  # last freq set is cut off, kill it
        top_peaks.append([group[0]])
        raw_peaks.append([group[0]])
        for dim in group[1]:
            all_peaks = []
            mph = 10
            while len(all_peaks) < 3:
                all_peaks = detect_peaks(
                    dim[10: len(dim) / 2], mph=mph, mpd=10)
                mph -= 1
            index_fix = np.array(all_peaks) + 10
            sorted_peaks = sorted(
                index_fix,
                key=lambda index: -
                dim[index])  # - to sort fr top down
            raw_peaks[i].append(sorted_peaks)
            top = sorted_peaks[:3]
            top_peaks[i].append(top)

    write_csv('raw_peaks.csv', raw_peaks)
    write_csv('peaks_top3.csv', top_peaks)
Exemplo n.º 33
0
    def do_test(self, **kwargs):
        x = np.random.randn(1000)
        x[600:801] = np.nan

        peaks1 = PyFindPeaksEx.find_peaks(x, **kwargs)
        peaks2 = detect_peaks.detect_peaks(x, **kwargs)

        self.assertTrue(np.array_equal(peaks1, peaks2))
Exemplo n.º 34
0
def make_division():
    name_del = []
    distinguish_path = chose_distinguish_entry.get()  # 从chose_distinguish_entry获取路径
    distinguish_path.replace('\\', '/')  # 替换字符
    if distinguish_path[-1] != '/':  # 加上'/'
        distinguish_path += '/'
    distinguish_path_yanghe = distinguish_path + '氧合'  # 氧合文件夹路径
    distinguish_path_tuoyang = distinguish_path + '脱氧'
    distinguish_path_del = distinguish_path + '删除'
    distinguish_path = distinguish_path.decode()
    distinguish_path_yanghe = distinguish_path_yanghe.decode()  # 解码
    distinguish_path_tuoyang = distinguish_path_tuoyang.decode()
    distinguish_path_del = distinguish_path_del.decode()

    if not os.path.exists(distinguish_path_yanghe):  # 判断氧合
        os.makedirs(distinguish_path_yanghe)  # 建立氧合文件夹
    if not os.path.exists(distinguish_path_tuoyang):  # 判断脱氧
        os.makedirs(distinguish_path_tuoyang)
    if not os.path.exists(distinguish_path_del):  # 判断脱氧
        os.makedirs(distinguish_path_del)

    all_file_path = get_list_from_path.get_all_file_path(distinguish_path)  # 获取所有文件的路径
    for path in all_file_path:
        file = open(path)
        s_line = []
        x_axis = []
        s_peaks = []
        for line in file:  # 逐行读取
            (x, y) = line.split()
            s_line.append(float(y))  # 合成一条线
            x_axis.append(float(x))  # 组成x坐标
        file.close()
        peaks = [x_axis[ind] for ind in detect_peaks(s_line, show=False)]  # detect_peaks返回的是索引值
        s_peaks = [peak for peak in peaks if 1624.00 < peak < 1643.00]  # 查找峰值范围
        if max([s_line[ind] for ind in detect_peaks(s_line, show=False)[19:-20:1]]) > 250:
            if s_peaks:
                shutil.move(path, distinguish_path_yanghe)
            else:
                shutil.move(path, distinguish_path_tuoyang)
        else:
            shutil.move(path, distinguish_path_del)
            name_del.append(os.path.basename(path))
    if name_del:
        tkMessageBox.showinfo('以下文件而被移动至“删除”', '小于200的文件: %d' % name_del)
Exemplo n.º 35
0
	def ppeaks(self, minVal, maxVal, ind=1, mph=1, mpd=1, mph2=-95, mpd2=1, thresh=0, 
		array=None, fps=40, calc_widths=False, minWidth=4, maxWidth=50, avg_fluor=True, 
		show=False, show_vl=False, deltax=25, y_axmin=0, y_axmax=2.2, stddev=None,lam=100, p=.001, niter=6, normed=False):
		''' Peak Detection of Contour Mean Intensities.
		http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
		mph : detect peaks that are greater than minimum peak height.
    	mph2: same, just for valleys (and negative!)
    	mpd : detect peaks that are at least separated by minimum peak distance.
    	mpd2 : same, but valleys
    	threshold : detect peaks (valleys) that are greater (smaller) than `threshold`
        			in relation to their immediate neighbors.

		Seems to work: (see iPython for other population params.)
		for i in range(length):
    		q.ppeaks(ind=i, mph=85, mpd=15,thresh=0.15, minVal=70, maxVal=175)
    	Normed divides each bit of data by its baseline.
		'''
		if self.all_cm == None or self.filtered_contours == None:
			self.big_cm(canny=True, array=array, minVal=minVal, maxVal=maxVal)
		if stddev==None:
			c_m = self.all_cm
			data = c_m[:,ind,1]
		if stddev!=None: #this means look at nonblinky cells (stddev will be low, coded earlier.)
			if self.stdd==None:
				self.create_std(stddev=stddev, avg_fluor=avg_fluor)
			ar = self.stdd
			data = ar[ind,:]

		window = signal.general_gaussian(4, p=0.5, sig=100)
		filtered = signal.fftconvolve(window, data)
		filtered = (np.average(data)/np.average(filtered))*filtered
		filtered = filtered[4:-5] #truncates bad boundaries from fftconvolve
		original_filtered = np.copy(filtered)

		bline=self.baseline_als(filtered, lam=lam, p=p, niter=niter)
		if normed:
			filtered=filtered/bline
		peaks = detect_peaks(x=filtered, mpd=mpd, mph=mph, threshold=thresh, edge='rising', 
									show=True, y_axmin=0, y_axmax=y_axmax)

		#collect the normed traces of all cells. 
		self.normed_trace.append(filtered)

		y_peaks = []
		for ind in peaks:
			y_peaks.append(filtered[ind])
				
		if calc_widths==True:
			self.p_width(peaks=peaks, bline=bline, filtered=filtered, minWidth=minWidth, maxWidth=maxWidth, cell_ind=ind, show=show, data=data, deltax=deltax, normed=normed)
		
		frames = np.arange(0,len(filtered))
		if show_vl==True:
			plt.plot(frames, original_filtered, 'm', frames, bline,'b--') #filtered-->original_filtered
			plt.show()
Exemplo n.º 36
0
def find_peaks(signal, v_mph, v_mpd, v_cutMin, v_cutMax):
    #signal[-100:100] = 0.0
    
    
    foierSignal = filterFourier(signal, v_cutMin, v_cutMax)
    diffFoierSignal = diff(foierSignal,1)
    
    dev_flag=True
    if (dev_flag == True):
      import matplotlib.pyplot as plt
      from random import randint
      
      plt.figure()
      plt.plot(np.linspace(100, len(signal)-100, num=len(signal.real[100:-100])), 40*signal.real[100:-100], '-r')
      plt.plot(np.linspace(100, len(diffFoierSignal)-100, num=len(diffFoierSignal.real[100:-100])), -diffFoierSignal.real[100:-100], '-g')
      plt.savefig('test/%sdifStress.png'%( randint(0,99) ), dpi=150)

    from detect_peaks import detect_peaks
    ind = detect_peaks(-diffFoierSignal.real[150:-150], mph=v_mph, mpd=v_mpd, show=False)
    
    vmax = []
    vmin = []
    id_vmax = []
    id_vmin = []
    i = 0
    d_find = 50
    while i < len(ind):
        s = signal
        
        if (len(ind) != 0 ):
          if ( np.fabs(len(s)-ind[i]) > d_find and ind[i] > d_find ):
            vmax.append(max( s[ ind[i] - d_find : ind[i] + d_find] ) )     
            id_vmax.append([k for k, j in enumerate(s[ind[i]-d_find:ind[i]+d_find]) if j == vmax[i]][0] + ind[i] - d_find)  
            
          else:
            vmax.append(max( s[ ind[i] - d_find : ind[i]] ) )
            id_vmax.append([k for k, j in enumerate(s[ind[i]-d_find:ind[i]]) if j == vmax[i]][0] + ind[i]-d_find)
            
            
          if ( np.fabs(len(s)-ind[i]) > d_find and ind[i] > d_find ):
            vmin.append(min( s[ ind[i] - d_find : ind[i] + d_find] ) )
            id_vmin.append([k for k, j in enumerate(s[ind[i] - d_find : ind[i] + d_find]) if j == vmin[i]][0]+ind[i]-d_find)
            
          else:
            vmin.append(min( s[ ind[i] : ind[i] + d_find] ) )
            id_vmin.append([k for k, j in enumerate(s[ ind[i] : ind[i] + d_find ] ) if j == vmin[i]][0]+ind[i]-d_find)
          
        i += 1
    return [id_vmax,vmax,id_vmin,vmin]
    
Exemplo n.º 37
0
 def test_octave_findpeaks_equal_matlab_findpeaks_minpeakheight_2(self):
     """ Check that Octave findpeaks mimics well the original MatLab findpeaks, with minpeakheight filter. """
     # Find peaks on this vector.
     vector = [
         0.000000000000001, 3.651411362475055, 4.347239816515587,
         3.229238311887470, 2.057044119108341, 4.289416174922050,
         4.623656294357088, 16.991500296151141, 23.710596923344340,
         5.194447742667983, 5.392090702263596
     ]
     # 'MinPeakHeight', 22
     loc = detect_peaks(vector, mph=22)
     print(loc)
     self.assertEqual(
         loc.tolist(),
         [9-1])
Exemplo n.º 38
0
 def test_octave_findpeaks_equal_matlab_findpeaks_minpeakheight_3(self):
     """ Check that Octave findpeaks mimics well the original MatLab findpeaks, with minpeakheight filter. """
     # Find peaks on this vector.
     vector = [
         0.000000000000002, 4.304968393969253, 2.524429995956715,
         1.362350996472030, 8.651011827706597, 5.355103964053995,
         4.166135802128525, 7.111434648523146, 41.368426443580518,
         13.753049599045664, 11.652130301046128
     ]
     # 'MinPeakHeight', 22
     loc = detect_peaks(vector, mph=22, mpd=None)
     print(loc)
     self.assertEqual(
         loc.tolist(),
         [9-1])
Exemplo n.º 39
0
 def test_octave_findpeaks_equal_matlab_findpeaks_minpeakheight_1(self):
     """ Check that Octave findpeaks mimics well the original MatLab findpeaks, with minpeakheight filter. """
     # Find peaks on this vector.
     vector = [
         0.000000000000002, 8.065338269152255, 0.345981261752651,
         3.773585143328164, 8.902504869392125, 10.153129735333088,
         9.310914486231075, 52.420530313341835, 21.453422488606648,
         11.328972030373752, 1.811055956166194
     ]
     # 'MinPeakHeight', 22
     loc = detect_peaks(vector, mph=22)
     print(loc)
     self.assertEqual(
         loc.tolist(),
         [8-1])
Exemplo n.º 40
0
def do_test(x, **kwargs):
    if x is None:
        x = np.random.randn(100)
        x[60:81] = np.nan
        x[-1] = 20
        
    peaks1 = PyFindPeaksEx.find_peaks(x, **kwargs)
    peaks2 = detect_peaks.detect_peaks(x, **kwargs)

    print(peaks1)
    print(peaks2)
    if not np.array_equal(peaks1, peaks2):
        with open(r'data.txt', 'w', encoding='utf-8') as fp:
            for n in x.tolist():
                fp.write('{0}\n'.format(n))
    assert(np.array_equal(peaks1, peaks2))
def covFeatures(x,fs):
    import numpy
    from scipy import signal #fftpack, ?? may need this see website: http://stackoverflow.com/questions/4688715/find-time-shift-between-two-similar-waveforms
    feats = numpy.zeros(3)
    c = numpy.correlate(x,x,"same")
    lags = numpy.argmax(signal.correlate(x,x))
    
    #lags = ?
    #I CANNOT FIGURE OUT HOW TO FIND "LAGS"
    #lags appears to be an argument 
    #http://www.mathworks.com/help/signal/ref/xcorr.html
    #example: [r,lags] = xcorr(___) also returns a vector with the lags at which the correlations are computed.
    
    ##from the matlab demo for this project
    ## Autocorrelation as a feature
    # Autocorrelation can also be powerful for frequency estimation.
    # It is especially effective for estimating low-pitch fundamental frequencies

    # xcorr with only one input will compute the autocorrelation 
    #[c, lags] = xcorr(abw);
    #according to matlab: lags is an output of the xcorr function, returning the "lag indices as a vector"

        
    minprom = 0.0005
    mindist_xunits = 0.3
    minpkdist = numpy.floor(mindist_xunits/(1/fs))

    from detect_peaks import detect_peaks
    locs = detect_peaks(c,threshold=minprom,mpd=minpkdist,show=True)
    pks = c[locs]
    #currently this finds zero peaks because minprom is too large. I left it because the filter is most likely wrong right now, resulting in wrong peak heights.

    tc = (1/fs)*lags
    tcl = tc(locs)
    tcl = (abs(tc))[locs]
    
    # Feature 0 - peak height at 0
    if tcl is not None:
        feats[0] = pks[(len(pks)+1)/2]

    # Features 1 and 2 - position and height of first peak 
    #if length(tcl) >= 3:
    if numpy.ndarray.size(tcl) >= 3:
        feats[1] = tcl[(len(pks)+1)/2+1]
        feats[2] = pks[(len(pks)+1)/2+1]
Exemplo n.º 42
0
def extract_peaks(dataset,plotFlag=False):

  nTimeSeries = 2
  
  # load time-series (TS)
  # **************************************************************************
  if (type(dataset) == str):
    if (dataset.endswith(".txt")):
      fulldata = np.loadtxt(dataset)
    else:
      fulldata = np.load(dataset)
  elif (type(dataset) == numpy.darray):
    fulldata = dataset

  # Builds a subset by taking only rows firstSample .. lastSample from base dataset
  #if (lastSample != None):
  #  data = fulldata[firstSample:lastSample,0:nTimeSeries]
  #else:
  #  data = fulldata[firstSample:,0:nTimeSeries]

  data = fulldata[:,0:nTimeSeries+1]
  #plot(data[:,0], data[:,1])
  
  nSamples = data.shape[0]
  
  # Find peaks.
  peak_data = np.zeros((nSamples, nTimeSeries+1))
  peak_data[:,0] = data[:,0]
  width = np.array([50, 75, 100, 150])
  for i in range(1,nTimeSeries+1):
    indexes = detect_peaks(data[:,i], mph=600, mpd=200, edge="rising")
    #print indexes
    #indexes = find_peaks_cwt(data[:,i], width, noise_perc=0.1)
    #plot(data[indexes,0], data[indexes,i], "ro")
    for j in indexes:
      peak_data[j,i] = 1.0
  
  #print peak_data

  #plot(peak_data[:,0], peak_data[:,1])
  
  #show()
  
  return peak_data
Exemplo n.º 43
0
 def test_octave_findpeaks_equal_matlab_findpeaks_minpeakheight_minpeakdistance(self):
     """ Check that Octave findpeaks mimics well the original MatLab findpeaks, with minpeakheight and minpeakdistance filter. """
     # Find peaks on this vector.
     vector = [
         0.199196234460946, 0.150971091401259, 0.066830193587158, -0.007815333052105, -0.044616654524390, -0.055795361348227, -0.076137152400651, -0.118170367279712, -0.163440493736020, -0.190516609994619, -0.176483713717207, -0.126265512667095,
         -0.085683530051180, -0.070626701579825, -0.056650272247038, -0.018164912522573, 0.042641790158567, 0.084300842806316, 0.091380642181674, 0.086612641403415, 0.076804338682254, 0.065114059315175, 0.061730123648466, 0.062054559470569,
         0.037808369894233, -0.007903466706924, -0.022105492056923, 0.022875099403569, 0.100256509561853, 0.161610966145234, 0.188078783724511, 0.179791428716887, 0.127483188979423, 0.037101235419981, -0.061551863605861, -0.134872789642774,
         -0.170882136762535, -0.180232519836007, -0.193873842670550, -0.220596208762850, -0.217710728542538, -0.154566709841264, -0.052288376793704, 0.024309953763214, 0.036995233638215, 0.027385387267975, 0.034756425571608, 0.044538621477845,
         0.048179094187324, 0.062762787751685, 0.093756722731978, 0.128746079656537, 0.140220257694886, 0.107177963642096, 0.064168137422344, 0.049034449543362, 0.043561872239351, 0.037112836659310, 0.049484512152412, 0.075511915362878,
         0.082621740035262, 0.059833540054286, 0.025160333364946, -0.011362411779154, -0.059885473889260, -0.116916348401991, -0.160033412094328, -0.186277401172449, -0.227970985597943, -0.293012110994312, -0.316846014874940, -0.235793951154457,
         -0.071213154358508, 0.087635348114046, 0.166528547043995, 0.156622093806762, 0.114536824444267, 0.098795472321648, 0.106794539180316, 0.123935062619566, 0.138240918685253, 0.120041711787775, 0.065711290699853, -0.020477124669418,
         -0.121124845572754, -0.163652703975820, -0.088146112206319, 0.062253992836015, 0.185115302006708, 0.251310089224804, 0.275507327595166, 0.240646546675415, 0.144130827133559, 0.028378284476590, -0.050543164088393, -0.082379193202235,
         -0.108933261445066, -0.149993661967355, -0.188079227296676, -0.184552832746794
     ]
     # 'MinPeakHeight', 0.05, 'MinPeakDistance', 10, 'MinPeakWidth', 0
     loc = detect_peaks(vector, mph=0.05, mpd=10)
     print(loc)
     self.assertEqual(
         loc.tolist(),
         [19-1, 31-1, 53-1, 75-1, 91-1])
def spectralPeaksFeatures(x,fs):
    mindist_xunits = 0.3
    
    import numpy
    feats = numpy.zeros(12,float)
    
    N = 4096
    minpkdist = numpy.floor(mindist_xunits/(1/fs))
    
    import scipy
    from scipy import signal
    window = scipy.signal.get_window('boxcar',len(x))
    f,p = scipy.signal.welch(x,fs,window,noverlap='None',nfft=N)

    from detect_peaks import detect_peaks
    locs = detect_peaks(p,mpd=minpkdist,show=True)
    pks = p[locs]
    #Matlab only detects 20 peaks. No option in this code.
    
    if pks is not None:
        mx = min(6,len(pks))

        idx = sorted(range(len(pks)),key=lambda x:pks[x],reverse = True)
        spks = sorted(pks,reverse=True)
        
        slocs = locs[idx]
        
        pks = spks[0:mx]
        locs = slocs[0:mx]

        idx = sorted(range(len(locs)),key=lambda x:pks[x])
        slocs = sorted(slocs)        
        
        spks = pks[idx]
        pks = spks
        locs = slocs
    
    fpk = f[locs]

    feats[0:(len(pks)-1)] = fpk
    feats[6:(6+len(pks))] = pks
Exemplo n.º 45
0
def find_peaks(spectrum_slice):
    raw_x = spectrum_slice.index
    raw_y = spectrum_slice.values

    s = UnivariateSpline(raw_x, raw_y, s=1)
    # resample
    sx = np.linspace(raw_x.min(), raw_x.max(), num=len(raw_x)*5)
    sy = s(sx)

    # peak_indices = detect_peaks(sy, edge='rising',
                                # kpsh=True, threshold=2, show=True)
    peak_indices = detect_peaks(sy, mph=1100, kpsh=True)
    # peak_indices = peakutils.peak.indexes(sy, min_dist=10)
    peaks_x = [sx[i] for i in peak_indices]
    peaks_y = [sy[i] for i in peak_indices]

    # plt.plot(raw_x, raw_y, '--')
    # plt.plot(sx, sy, '-')
    # plt.scatter(peaks_x, peaks_y, s=60, marker='+', color='red')
    # plt.show()

    return list(zip(peaks_x, peaks_y))
Exemplo n.º 46
0
def fix_xlsx(path, walk_speed):
    wb = load_workbook(filename=path)
    sheet1 = wb.active
    take_abs_val(sheet1, 2)  # Taking the abs_val ignores zeroing and low level noise errors!
    hr = sheet1.max_row

    # generate list of all power values over this particular 5sec interval excel sheet
    pwr_list = []
    for i in range(4, hr+1):
        pwr_list.append(sheet1.cell(row=i, column=2).value)

    # Find the single maximum power peak for the full 5sec interval
    max_peak = round(max(pwr_list), 4)

    # Detect the peaks that are over 80% of the max peak and are spaced a minimum 90% of the walk frequency apart.
    peaks = detect_peaks(pwr_list, mph=(0.8*max_peak), mpd=(0.9*gen_guess_frequency(walk_speed)), edge='rising', show=False)

    # Copy only the data that falls between the first and the last peaks to column 3 of the Excel sheet.
    for i in range(4, hr+1):
        if peaks[0] <= (i-4) <= peaks[-1]:
            sheet1.cell(row=i, column=3).value = sheet1.cell(row=i, column=2).value
        else:
            continue

    # Now calculate the average of this truncated Column 3 (D) to get the true Average Power for this 5sec Interval
    truncated_pwr_list = []
    for i in range(4, hr+1):
        if sheet1.cell(row=i, column=3).value is not None:
            truncated_pwr_list.append(sheet1.cell(row=i, column=3).value)
        else:
            continue
    avg_pwr = round(statistics.mean(truncated_pwr_list), 2)
    sheet1['D1'].value = "Avg. Power"
    sheet1['D2'].value = "(W)"
    # sheet1['D4'].value = "=ROUND(AVERAGE(B4:B15050),2)"
    sheet1['D4'].value = avg_pwr
    wb.save(filename=path)
    return avg_pwr
Exemplo n.º 47
0
    file1_BW = "Temperature_CG_" + label_BW + "_" + str(time)
    file1 = "Temperature_CG_" + label_BW + "_" + str(time)
    #

    T_B = lagrangian_stats.read_Scalar(file0_B, zn, xn, yn)
    T_BW = lagrangian_stats.read_Scalar(file0_BW, zn, xn, yn)

    FT_B = []  # np.zeros((xn/1,yn))
    FT_BW = []  # np.zeros((xn/1,yn))

    #
    for k in range(1):
        Npks = []
        for j in range(0, len(Ylist), 10):
            Tp = T_B[1, j, :] - np.mean(T_B[1, j, :])
            pks = detect_peaks.detect_peaks(Tp, valley=True, mph=0.03)
            Npks.append(len(pks))

        #   plt.plot(Xlist,Tp)
        #   plt.scatter(Xlist[pks],Tp[pks])
        #   plt.savefig('./plot/'+label_B+'/'+file1_B+'_z'+str(Zlist[k])+'_'+str(j)+'_sec.eps',bbox_inches='tight')
        #   print       './plot/'+label_B+'/'+file1_B+'_z'+str(Zlist[k])+'_'+str(j)+'_sec.eps'
        #   plt.close()

        Npks = np.asarray(Npks)

        print "Cells mean size", np.mean(8000.0 / Npks)
        print "Cells std size", np.std(8000.0 / Npks)
        print "Cells max size", np.max(8000.0 / Npks)
        print "Cells min size", np.min(8000.0 / Npks)
Exemplo n.º 48
0
 def __peaks(self):
     self.peaks = [self.y_axis[self.ind] for self.ind in detect_peaks(self.y_axis, show=False)]
Exemplo n.º 49
0
 def __locs(self):
     self.locs = [self.x_axis[self.ind] for self.ind in detect_peaks(self.y_axis, show=False)]
Exemplo n.º 50
0
    def __init__(self, wavelets, event_times, threshold, gain):
        """
        Initiate new class instance
        Parameters
        ----------
        wavelets : 'list'
            list of nx2 arrays of [time, v] for each wavelet
        event_times : 'array like'
            1D array of start time for each wavelet
        threshold : 'float'
            threshold in dB used to extract wavelets
        gain : 'float'
            pre-gain

        Returns
        -------
        self.wavelets : 'list'
            list of nx2 arrays of [time, v] for each wavelet
        self.event_times : 'array like'
            1D array of start time for each wavelet
        self.threshold : 'float'
            threshold in dB used to extract wavelets
        self.gain : 'float'
            pre-gain
        self.counts : 'array like'
            number of peaks (counts) within each wavelet
        self.amplitudes : 'array like'
            amplitude in dB of each wavelet
        self.rise_ts : 'array like'
            time to peak height (rise time) of wavelets
        self.durations : 'array like'
            duration of wavelet
        self.energies : 'array like'
            MARSE energy of wavelets
        self.data : 'dict like'
            Pandas Data Frame containing wavelets features
        """
        self.wavelets = wavelets
        self.event_times = event_times
        self.gain = gain
        self.threshold = threshold

        counts = []
        amplitudes = []
        rise_ts = []
        durations = []
        energies = []

        for wavelet in wavelets:
            peak_pos = detect_peaks(wavelet[:, 1], mph=get_Vt(threshold,
                                    gain=gain))

            counts.append(len(peak_pos))
            amplitudes.append(get_dB(np.max(wavelet[:, 1]), gain=gain))
            rise_ts.append(wavelet[np.argmax(wavelet[:, 1]), 0] -
                           wavelet[peak_pos[0], 0])
            durations.append(wavelet[peak_pos[-1], 0] -
                             wavelet[peak_pos[0], 0])
            MARSE = wavelet[peak_pos[0]:, 1]
            energies.append(np.sum(MARSE[MARSE > 0]))

        self.counts = np.asarray(counts)
        self.amplitudes = np.asarray(amplitudes)
        self.rise_ts = np.asarray(rise_ts)
        self.durations = np.asarray(durations)
        self.energies = np.asarray(energies)

        self.data = pd.DataFrame({
            'event_times': self.event_times,
            'counts': self.counts,
            'amplitudes': self.amplitudes,
            'rise_ts': self.rise_ts,
            'durations': self.durations,
            'energies': self.energies})
Exemplo n.º 51
0
    def find_kids_vna(self, path, plot_chan = False, plot_sweep = False):
	bb_freqs = np.linspace(-255.0e6, 255.0e6, 1000)
	lo_freqs, Is, Qs = self.open_stored(path)
	channels = np.arange(np.shape(Is)[1])
	chan_freqs = np.zeros((len(channels),len(lo_freqs)))
	#bb_freqs, freq_step = np.linspace(-200.0e6, 200.0e6, 500, retstep = True)
	rf_freqs = np.sort(bb_freqs + 750.0e6)
	mags = np.zeros((len(channels),len(lo_freqs)))
	scaled_mags = np.zeros((len(channels),len(lo_freqs)))
	chan_peaks = []
	peak_idx = []
	for chan in channels:
		mags[chan] = 20*np.log10(np.sqrt(Is[:,chan]**2 + Qs[:,chan]**2)) 
		diff = 0. - np.mean(mags[chan])
		scaled = diff + mags[chan]
		chan_freqs[chan] = np.sort(lo_freqs + bb_freqs[chan])
		#plt.plot(chan_freqs[chan]/1.0e9, scaled_mags[chan])
		x = chan_freqs[chan]
		chan_data = np.abs(scaled) - np.max(scaled) 
		scaled_mags[chan] = chan_data
		#thresh = np.mean(ydata) + np.std(ydata)
		thresh = np.max(scaled) - 0.7*np.max(scaled)
		upper_thresh = thresh + 0.5*np.std(chan_data)
		window = signal.hanning(3)
		chan_data_smoothed = signal.convolve(chan_data,signal.hanning(3), mode = 'same')
		peaks = detect_peaks(chan_data, mph = thresh, mpd = np.round(50.0e3/2.5e3))
		if len(peaks) > 0:
			for peak in peaks:
				if not ((thresh + 0.5 < chan_data[peak])):
					peaks = np.delete(peaks,np.where(peaks == peak)[0])
			if len(peaks) > 3:
				for peak in peaks:
					peaks = np.delete(peaks,np.where(chan_data[peaks] == np.min(chan_data[peaks]))[0])
			#if len(peaks) == 3:
				#for peak in peaks: 
				#	if not chan_data[peak] > upper_thresh:	
				#		peaks = np.delete(peaks,np.where(chan_data[peaks] == np.min(chan_data[peaks]))[0])
		if len(peaks) > 0:
			for peak in peaks:
				peak_idx.append(peak + 205*chan)	

		if plot_chan:
			if len(peaks) > 0:
				plt.plot(x/1.0e9, chan_data, c='g')
				plt.scatter(x[peaks]/1.0e9, chan_data[peaks], c='r')
				#plt.scatter(x/1.0e9, chan_data, c='g')
				plt.axhline(thresh, c = 'b', linestyle = '--')
				plt.title('Chan = ' + str(chan))
				plt.ylabel('Mag (dB)')
				plt.xlabel('Freq (GHz)')
				plt.ion()
				plt.show()	
				raw_input()
				plt.clf()
			else:
				pass
	peak_idx = np.hstack(peak_idx)
	print 'Located', len(peak_idx), 'detectors'  
	if plot_sweep:
		scaled_mags = np.hstack(scaled_mags)
		chan_freqs = np.hstack(chan_freqs)
		plt.figure(figsize = (18,10))
		plt.plot(chan_freqs/1.0e9, scaled_mags, c='g')
		#[plt.axvline(chan_freqs[205*chan]/1.0e9) for chan in channels]
		plt.scatter(chan_freqs[peak_idx]/1.0e9, scaled_mags[peak_idx], c='r')
		plt.axhline(thresh, c = 'b', linestyle = '--')
		plt.ylabel('Mag (dB)')
		plt.xlabel('Freq (GHz)')
		plt.show()
	return peak_idx, chan_freqs, scaled_mags	
Exemplo n.º 52
0
surf = ax.plot_surface(Y2, X2, Z2, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
ax.set_xlabel('Time')
ax.set_ylabel('MFCC')

plt.show()

'''


#getting peaks from spec, this will give us an array with all the time slices we need to get MFCCs for
spec_peaks_array = [];
print(len(fbank_feat[1,:]))
print(len(fbank_feat[:,1]))
for n in range (0,logSizeX):

    ind = detect_peaks(fbank_feat[:,n], mph = 0.8, mpd = 10)
    
  

    spec_peaks_array = np.concatenate((ind, spec_peaks_array), axis = 0)
print(spec_peaks_array)
print(len(spec_peaks_array))

#spec_peaks_array is a list of the time coordinates of the peaks for the call. Theses are the locations we need to get the MFCC's for

##get rid of duplications in spec_peaks array

spec_peaks = list(set(spec_peaks_array))

print(spec_peaks)
print(len(spec_peaks))
def count_steps2(x, samp_rate):
    x1 = su.filter(x, samp_rate, 2)
    peaks = detect_peaks(x1, mph=1)
    return len(peaks)
    def histograms(self, roi, n, verbose):
        """
        Calculate horizontal and vertical histograms for a specified region of interest.

        Args:
            roi (matrix): region of interest cut from page.
            n (int): Consider every n pixel while searching for peaks.
            verbose (bool): verbose mode.
        """

        # CALCULATE HISTOGRAMS ###############################################

        height, width = roi.shape
        horizontal = []
        vertical = []

        horizontal_hist = cv2.cvtColor(roi, cv2.COLOR_GRAY2RGB)
        vertical_hist = cv2.cvtColor(roi, cv2.COLOR_GRAY2RGB)

        for i in range(height):
            value = width - cv2.countNonZero(roi[i, :])
            if value == width:
                value = width - 1
            vertical.append(value)
            vertical_hist[i][value] = [0, 0, 255]
            if i == 0:
                before = (value, i)
            else:
                cv2.line(vertical_hist, before, (value, i), (0, 0, 255), 2)
                before = (value, i)

        for i in range(width):
            value = height - cv2.countNonZero(roi[:, i])
            if value == height:
                value = height - 1
            horizontal.append(value)
            horizontal_hist[value][i] = [255, 0, 0]
            if i == 0:
                before = (i, value)
            else:
                cv2.line(horizontal_hist, before, (i, value), (255, 0, 0), 2)
                before = (i, value)

        # HISTOGRAM PEAKS AND VALLEYS #######################################

        horizontal_less = horizontal[0::n]
        vertical_less = vertical[0::n]

        # VALLEYS ###########################################################

        horizontal_valleys = detect_peaks(
            horizontal_less, mph=None, mpd=10, threshold=0, edge='falling', kpsh=False, valley=True, show=False, ax=None)
        for i in range(len(horizontal_valleys)):
            cv2.line(horizontal_hist, (horizontal_valleys[i] * n, horizontal[horizontal_valleys[
                     i] * n]), (horizontal_valleys[i] * n, horizontal[horizontal_valleys[i] * n]), (255, 255, 0), 20)

        vertical_valleys = detect_peaks(vertical_less, mph=None, mpd=10, threshold=0,
                                        edge='falling', kpsh=False, valley=True, show=False, ax=None)
        for i in range(len(vertical_valleys)):
            cv2.line(vertical_hist, (vertical[vertical_valleys[i] * n], vertical_valleys[i] * n),
                     (vertical[vertical_valleys[i] * n], vertical_valleys[i] * n), (255, 255, 0), 20)

        # PEAKS #############################################################

        horizontal_peaks = detect_peaks(horizontal_less, mph=None, mpd=10, threshold=0,
                                        edge='falling', kpsh=False, valley=False, show=False, ax=None)
        for i in range(len(horizontal_peaks)):
            cv2.line(horizontal_hist, (horizontal_peaks[i] * n, horizontal[horizontal_peaks[
                     i] * n]), (horizontal_peaks[i] * n, horizontal[horizontal_peaks[i] * n]), (0, 255, 0), 20)

        vertical_peaks = detect_peaks(vertical_less, mph=None, mpd=10, threshold=0,
                                      edge='falling', kpsh=False, valley=False, show=False, ax=None)
        for i in range(len(vertical_peaks)):
            cv2.line(vertical_hist, (vertical[vertical_peaks[i] * n], vertical_peaks[i] * n),
                     (vertical[vertical_peaks[i] * n], vertical_peaks[i] * n), (0, 255, 0), 20)

        # SHOW HISTOGRAMS IN VERBOSE MODE ##################################

        self.features["HORIZONTAL_PEAKS"] = str(len(horizontal_peaks))
        self.features["HORIZONTAL_VALLEYS"] = str(len(horizontal_valleys))
        self.features["VERTICAL_PEAKS"] = str(len(vertical_peaks))
        self.features["VERTICAL_VALLEYS"] = str(len(vertical_valleys))

        if verbose:
            cv2.imshow('horizontal', horizontal_hist)
            cv2.imshow('vertical', vertical_hist)
            cv2.waitKey()
Exemplo n.º 55
0
"""
Calculate forward, sideward, angular and translational speed 
"""
Vf = np.multiply( (X[1:]-X[:-1]), np.cos(Rad[:-1]) ) + np.multiply( Y[1:]-Y[:-1], np.sin(Rad[:-1])) 
Vf = Vf * fps
Vs = np.multiply( -(X[1:]-X[:-1]), np.sin(Rad[:-1]) ) + np.multiply( Y[1:]-Y[:-1], np.cos(Rad[:-1]))
Vs = Vs * fps
AngleSav = savitzky_golay(Angle, 51., 3.) # window size 51, polynomial order 3
Vr = fps*(np.diff(AngleSav))
Vt = np.sqrt(np.multiply(Vf,Vf) + np.multiply(Vs,Vs));
aVt = np.abs(np.diff(Vt))

"""
Clear jumps
"""
peaks = detect_peaks(aVt, mph=10, mpd=5)
#peaks = peakutils.peak.indexes(aVt, thres=0.3, min_dist=2)

Vf_tmp = Vf
peaks_tmp = np.hstack( (np.zeros(1), peaks, (np.ones(1)*Vf.size)) ) 
numjumps = peaks_tmp.size - 1
print("#Jump events:", numjumps)
for i in range(numjumps):
    aux = Vf_tmp[peaks_tmp[i]:peaks_tmp[i+1]];
    aux[np.isnan(aux)] = [];
    env=10
    #print(i, aux.size, peaks_tmp[i+1])
    if np.mean(aux) < 0 and aux.size > 30:
        Vf[peaks_tmp[i]:peaks_tmp[i+1]] = - Vf[peaks_tmp[i]:peaks_tmp[i+1]]
        Vs[peaks_tmp[i]:peaks_tmp[i+1]] = - Vs[peaks_tmp[i]:peaks_tmp[i+1]]
        Vr[peaks_tmp[i-1]:peaks_tmp[i]] = - Vr[peaks_tmp[i-1]:peaks_tmp[i]]
Exemplo n.º 56
0
def main():

    x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
    # set minimum peak height = 0 and minimum peak distance = 20
    ind = detect_peaks(x, mph=0, mpd=20, show=True)
    print(ind)
Exemplo n.º 57
0
def findRealPeaks(A_1, tf, f, f_valid, sens_FindPeaks, sens_TFOrigin, sens_SumUpPeaks, maxamp, problvl):
    
    nt = A_1.shape[0]
    nf = A_1.shape[1]
    
    factor = 1.5
    
    peakamp = [0]*nt
    peakloc = [0]*nt
    collectivepeaks = [[]]*nt
    ispeakfromTF = [[]]*nt
    gap = [[]]*nt
    numrealpeaks = [0]*nt
    peakloc_ind = [False]*nf
    
    
    def TFpeakCheck(peakAmp, TFAmp):
        return ((peakAmp/(factor*(sens_TFOrigin/50)*TFAmp)) < problvl) & (TFAmp > -2*(sens_TFOrigin/50)+4)
    
    for i in range(nt):
        peakloci = detect_peaks.detect_peaks(A_1[i][f_valid],mph=0.15*numpy.square((sens_FindPeaks/50)-2)*maxamp,mpd=3)
        peakampi = A_1[i][f_valid][peakloci]
        peakloc[i] = f[f_valid][peakloci]
        peakamp[i] = peakampi
        collectivepeaks[i] = []
        
        if len(peakloci)==0:
            ispeakfromTF[i] = [NaN]
            gap[i] = []
        elif len(peakloci)==1:
            numrealpeaks[i] = 1;
            collectivepeaks[i] = [[peakloci[0]]];
            ispeakfromTF[i] = [[((peakampi[0]/(factor*(sens_TFOrigin/50)*tf[peakloci[0]])) < problvl) & (tf[peakloci[0]] > -2*(sens_TFOrigin/50)+4)]]
            peakloc_ind[peakloci[0]] = True;
            gap[i] = []
        else:
            numrealpeaks[i] = 1
            collectivepeaks[i] = [[peakloci[0]]]
            ispeakfromTF[i] = [[((peakampi[0]/(factor*(sens_TFOrigin/50)*tf[peakloci[0]])) < problvl) & (tf[peakloci[0]] > -2*(sens_TFOrigin/50)+4)]]
            gap[i] = [];
            for j in range(len(peakloci)-1):
                freesp = findFreeSpace(A_1[i], f, f[[peakloci[j], peakloci[j+1]]], problvl);
                gap[i].append(freesp)
                if (f[peakloci[j+1]] - f[collectivepeaks[i][-1][-1]] > -10*(sens_SumUpPeaks/50+20)) & (gap[i][j][0] != 0):
                    numrealpeaks[i] = numrealpeaks[i] + 1
                    collectivepeaks[i].append([peakloci[j+1]])
                    ispeakfromTF[i].append([((peakampi[0]/(factor*(sens_TFOrigin/50)*tf[peakloci[0]])) < problvl) & (tf[peakloci[0]] > -2*(sens_TFOrigin/50)+4)])
                    
                else:
                    collectivepeaks[i][-1].append(peakloci[j+1])
                    ispeakfromTF[i][-1].append(((peakampi[0]/(factor*(sens_TFOrigin/50)*tf[peakloci[0]])) < problvl) & (tf[peakloci[0]] > -2*(sens_TFOrigin/50)+4))
                    
                peakloc_ind[peakloci[j]] = True
        
    realpeakloc = []
    isrealpeakfromTF = [[]]*nt
    for i in range(nt):
        realpeakloc.append([0]*numrealpeaks[i])
        if numrealpeaks[i]>0:
            for j in range(numrealpeaks[i]):
                realpeakloc[i][j]=numpy.mean(f[f_valid][collectivepeaks[i][j]])
                if len(ispeakfromTF[i])>0:
                    if numpy.mean(ispeakfromTF[i][j])<= .5:
                        isrealpeakfromTF[i].append(0)
                        pass
                    else:
                        isrealpeakfromTF[i].append(1)
                        pass
                    pass
                else:
                    isrealpeakfromTF[i].append(.95);
                    pass
                pass
            pass
        else:
            isrealpeakfromTF[i] = [.95]
            pass
            
        pass
    meannumrealpeaks = numpy.mean(numpy.array(numrealpeaks)[numpy.where(numpy.array(numrealpeaks)>0)])
        
    return peakloc_ind, collectivepeaks, numrealpeaks, meannumrealpeaks, realpeakloc, isrealpeakfromTF
Exemplo n.º 58
0
    bin_means = np.histogram(x, bins, weights=y)[0]
    errors = np.sqrt(bin_means+1)

    X = 0.5*(bins[:-1] + bins[1:])
    Y = bin_means
    Err = errors

    totY = totY + Y
    totErr = np.sqrt(totErr**2 + Err**2)

    # pl.errorbar(X,Y,Err)

    data['binnedX'] = X
    data['binnedY'] = Y
    data['binnedErr'] = Err



S = InterpolatedUnivariateSpline(X,Y)
x = np.linspace(minFreq,maxFreq,1000000)
y = S(x)


print detect_peaks(y,mpd = 200000, show = False)

start = time.clock()
for i in xrange(100):
    ind = detect_peaks(y,mpd = 200000, show=False)

print (time.clock() - start)/100
        if GetSerial() == 255:
            CurTempVoltage = (GetSerial() * 256) + GetSerial();

            CurPulse = (GetSerial() * 256) + GetSerial();
            
            if len(data) > list_length:
                data.pop(0);
    
            data.append(CurPulse);
            
            #apply calibration fit (round to 1 DP)
            CurTemp = round(((0.0476 * CurTempVoltage) - 3.5764),1);
                        
            if len(data) > (list_length - 1):
                data_processed = np.asarray(data);
                peaks = detect_peaks(data_processed, show=False, mpd=10);
                
                if len(peaks) > 4:
                    distances = np.diff(peaks);
                    filt_dist = filter(lambda x: 20 < x < 60, distances);
                    strip(filt_dist);
                    mean = np.mean(filt_dist);
                    newpulse = 60 / (mean * 0.025);
            
            if newpulse < 140 and newpulse > 40:
                pulse = newpulse;
            
            print str(filt_dist) + " ; " + str(pulse);
            
            ToWrite = str(CurTemp) + ',' + str(int(pulse)) + ',' + strftime("%H:%M:%S", gmtime()) + '.' + str(datetime.now().microsecond / 10000);            
                
Exemplo n.º 60
0
def generate_wav_sign_change_bits(wavefile):
    samplewidth = wavefile.getsampwidth()
    nchannels = wavefile.getnchannels()
    rate = wavefile.getframerate()
    previous = 0
    max = 0
    rev = 0
    det = 0
    while True:
        if rev > 0:
            wavefile.setpos(wavefile.tell()-rev)
        frames = bytearray(wavefile.readframes(4000))
        if not frames:
            break

        # Extract most significant bytes from left-most audio channel
        if nchannels > 1:
            del frames[samplewidth::4]    # Delete the right stereo channel    
            del frames[samplewidth::3]
            msdata = np.array(struct.unpack('h'*(len(frames)/2), frames)) / 1000
            minp = 15
            rate = 1
            mph = 1
            indexes = np.array(detect_peaks(msdata, mph=mph, mpd=minp))
            indexll = np.array(detect_peaks(msdata * -1, mph=mph, mpd=minp))
        else:
            msdata = (128 - np.array(struct.unpack('B'*(len(frames)), frames))) / 10.0
            minp = 2 if rate == 8000 else 15
            mph = 1.0
            rate = 5 if rate == 8000 else 1
            #print (45/rate, 25/rate, 35/rate)
            #print (45/rate, 25/rate, 35/rate)
            indexes = np.array(detect_peaks(msdata, mph=mph, mpd=minp))
            indexll = np.array(detect_peaks(msdata * -1, mph=mph, mpd=minp))
        #msbytes = bytearray(frames[samplewidth-1::samplewidth*nchannels])
            #print("index=",indexes[-1], len(msdata))
        #if rev > 0:
            #print ("rev=", rev)
        if len(indexes) > 1:
            rev = (len(msdata) - indexes[-3]) + 10/rate
            b = indexes[1:len(indexes)] - indexes[0:len(indexes)-1]
            str = []
            ch = np.zeros((len(b)))
            chs = ["" for x in range(len(b))]
            det = det + 1
            for i in range(1,len(b)-1):
                v = b[i]
                s = ''
                l = np.argmin(msdata[indexes[i-1]:indexes[i]]) + indexes[i-1]
                r = np.argmin(msdata[indexes[i]:indexes[i+1]]) + indexes[i]             
                a = np.sum(msdata[l:r] - msdata[l])
                #print (indexes[i-1:i+2], l,r,a)
                h = np.max(msdata[indexes[i]-1:indexes[i+1]])-np.min(msdata[indexes[i]:indexes[i+1]])
                ch[i] = indexes[i]
                if v >= 15/rate and v <= 50/rate:
                       #print (msdata[indexes[i]:indexes[i+1]], min(msdata[indexes[i]:indexes[i+1]]))
                    #if min(msdata[indexes[i]:indexes[i+1]]) < 0:
                    if v < 28/rate and r-l < 30:
                        s = '0'
                    elif v <= 35/rate:
                        if a > 60/rate:
                            s = '1'
                        elif a <= 60/rate:
                            s = '0'
                        else:
                            s = '?'
                        s = '?'
                        if r-l > 30:
                            s = '1'
                        else:
                            s = '0'
                    elif v > 35/rate:
                        s = '1'
                    else:
                        s = '*'
                        #det = det + 1
                        #print (v, a/v, h, s)
                    #if det > 0:
                    #    print (v, r-l, a/v, h, s)
                else:
                    s = '#'
                str.append(s)
                chs[i] = s
            if ''.join(str) != '0' * len(str):
                #print (str)
                #detect_peaks(msdata, mph=2000, mpd=15, show=True)
                #print (b)
                i = 0
                for s in str:
                    # Emit a stream of sign-change bits
                    #print ("%d>%c "%(b[i],s), end='')
                    i = i + 1
                    yield s
                if det > 1000:
                    #print ("rev=", rev)
                    np.array(detect_peaks(msdata, mph=mph, mpd=minp, edge='both', show=True, cut=(rev-b[-2]), check=ch, strs=chs))
                #det = det + 1
            else:
                yield '-'
            #rev = 0 if rev > 90 else rev
            #print("rev=", rev, wavefile.tell(), wavefile.tell()-rev+1200, len(msdata), indexes[-2])
            #print(msdata[0:rev])
            #print(msdata[len(msdata)-rev:len(msdata)-1])
        else:
            rev = 0