Exemplo n.º 1
0
    def FindProminencePEaks(self,window,peak):
        prominences1 = peak_prominences(self.df1[window[0]:window[1],1], peak[0])[0]
        prominences2 = peak_prominences(self.df2[window[0]:window[1],1], peak[2])[0]
        prominences3 = peak_prominences(self.df3[window[0]:window[1],1], peak[4])[0]
        prominences  = [prominences1,prominences2,prominences3]

        return prominences
def peak_finder(x_hat):
    peaks, _ = find_peaks(x_hat)
    prominences = peak_prominences(x_hat, peaks)[0]

    threshold = np.sort(prominences)[::-1][2]
    peaks, _ = find_peaks(x_hat, prominence=(threshold, None))
    prominences = peak_prominences(x_hat, peaks)[0]
    contour_heights = x_hat[peaks] - prominences
    plt.vlines(x=peaks, ymin=contour_heights, ymax=x_hat[peaks])
    return peaks
Exemplo n.º 3
0
def get_features(df):
    """Get features from sensor data
    For each sensor, peaks, promenences and periodograms features are computed.
        
    Parameters:
        df: pd.DataFrame
        Dataframe with 10 columns, corresponding to data from sensors
    Returns:
        features: list
        List with features
    """
    features = []
    # zeros_crossings
    features.extend(librosa.zero_crossings(df.values, axis=0).sum(axis=0))

    # find_peaks
    features.extend(df.apply(find_peaks, axis=0).iloc[0, :].apply(len).values)

    # peak_widths_max
    λ0 = lambda x: np.max(peak_widths(x,
                                      find_peaks(x)[0])[0]) if len(
                                          find_peaks(x)[0]) != 0 else 0
    features.extend(df.apply(λ0).values)

    # peak_widths_mean
    λ01 = lambda x: np.mean(peak_widths(x,
                                        find_peaks(x)[0])[0]) if len(
                                            find_peaks(x)[0]) != 0 else 0
    features.extend(df.apply(λ01).values)

    # peak_prominences_max
    λ1 = lambda x: np.max(peak_prominences(x,
                                           find_peaks(x)[0])[0]) if len(
                                               find_peaks(x)[0]) != 0 else 0
    features.extend(df.apply(λ1).values)

    # peak_prominences_mean
    λ11 = lambda x: np.mean(peak_prominences(x,
                                             find_peaks(x)[0])[0]) if len(
                                                 find_peaks(x)[0]) != 0 else 0
    features.extend(df.apply(λ11).values)

    # periodogram_max
    λ2 = lambda x: np.max(periodogram(x[~x.isna()], 100)[1]) if ~x.isna().all(
    ) else 0
    features.extend(np.sqrt(df.apply(λ2).values))  # Es un estimado del RMS

    # periodogram_mean
    λ3 = lambda x: np.mean(periodogram(x[~x.isna()], 100)[1]) if ~x.isna().all(
    ) else 0
    features.extend(df.apply(λ3).values)

    return features
Exemplo n.º 4
0
def detect_peaks(score_arr, x_dist=6, thresh=0.15, width=1):
    """Find peaks from total scores of sample groups."""
    # Group data by sample group
    grouped = score_arr.groupby(score_arr.loc[:, 'group'])

    # Create DF for peak data
    all_peaks = pd.DataFrame(columns=['group', 'peak', 'prominence', 'sign'])

    # For each sample group:
    for grp, data in grouped:
        # Drop empty values
        total = data.value.dropna()

        # POSITIVE peaks
        peaks, _ = find_peaks(total, distance=x_dist, height=thresh, width=width)  # , threshold=thresh)
        # Find prominence of found peaks
        prom = peak_prominences(total, peaks)[0]
        peaks = total.index[peaks].get_level_values(1).values
        peak_dict = {'group': grp, 'peak': peaks, 'prominence': prom, 'sign': 'pos'}

        # NEGATIVE peaks
        # neg_total = total * -1
        # npeaks, _ = find_peaks(neg_total, distance=4, height=thresh,
        #                           width=2)#, threshold=thresh)
        # nprom = peak_prominences(neg_total, npeaks)[0]
        # npeaks = neg_total.index[npeaks].get_level_values(1).values
        # npeak_dict = {'group': grp, 'peak': npeaks, 'prominence': nprom * -1,
        #               'sign': 'neg'}

        # Add groups data to full peak data
        all_peaks = all_peaks.append(pd.DataFrame(peak_dict), ignore_index=True)
        # all_peaks = all_peaks.append(pd.DataFrame(npeak_dict), ignore_index=True)
    return all_peaks
def callPeaks(x_data):
    '''
    detect peaks in data and call them based on height realtive to primary peak

    ARGs
        x_data (list or numpy.array) of floats

    Returns
        list of peak indices

    '''
    called_peaks = []

    # find all peaks
    peaks, _ = find_peaks(x_data)

    # find height of each peak where baseline is neighboring trough
    counter_heights = peak_prominences(x_data, peaks)[0]

    # find maximum peak, indicates primary growth phase
    max_height = np.max(counter_heights)
    for pp, cc in zip(peaks, counter_heights):
        # only call a peak if its height is at least 10% of maximum peak height
        if cc > 0.1 * max_height:
            called_peaks.append(pp)

    return called_peaks
Exemplo n.º 6
0
def get_peaks(time_series):
    # obtain peak values
    peak_indices = find_peaks(time_series['Amplitude'],
                              height=[5, 200],
                              distance=300)[0]  #get indices of peaks
    prominence = peak_prominences(time_series['Amplitude'],
                                  peak_indices,
                                  wlen=None)[0]  # get prominences of peaks

    #combine peaks with measured values into dataframe
    peaks_df = pd.DataFrame(
        columns=['Indices', 'Amplitude', 'Prominence', 'Label'])
    peaks_df['Indices'] = peak_indices
    peaks_df['Amplitude'] = peaks_df.apply(
        lambda row: time_series['Amplitude'][row.Indices], axis=1)
    peaks_df['Prominence'] = prominence

    #get labels for each value based on domain knowledge
    labels = [''] * len(time_series['Amplitude'])
    q1 = np.quantile(time_series['Amplitude'][peak_indices], 0.25)
    q3 = np.quantile(time_series['Amplitude'][peak_indices], 0.75)
    for item in peak_indices:
        if time_series['Amplitude'][item] <= q1:
            labels = fill_labels(labels, item, 'Bend')
        elif ((time_series['Amplitude'][peak_indices][item] >= q1) &
              (time_series['Amplitude'][item] <= q3)):
            labels = fill_labels(labels, item, 'Notch')
        else:
            labels = fill_labels(labels, item, 'End')

    # add label to peaks dataframe
    peaks_df['Label'] = peaks_df.apply(lambda row: labels[row.Indices], axis=1)

    return peaks_df
Exemplo n.º 7
0
    def __init__(self, original, sigma, row=None):
        '''
        Initialisation function of the class. The following attributes are always created for every instance of the class:

        - a blurred version of the sample
        - the peak positions within the sample
        - the prominences of said peaks
        - the mean prominences of the peaks along one row
        - the total mean across all of the rows of the sample, by taking the mean of the means
        
            INPUTS: 
            original - greyscale sample, should be a np array
            sigma - the sigma value of Gaussian Blur (standard deviation)
            row - the row of sample which is tested for lines -> this leads to a large assumption that the sample is uniform, and may need to change 
        '''

        self.original = original
        self.sigma = sigma
        self.row = row
        self.gauss_blur = ndimage.gaussian_filter(self.original, self.sigma)
        '''The following allow looping the finding of peaks over every row in the sample, to gain more accurate results. List comprehension has been used to do this. E.g. in self.peak_position, what was previously a normal list containing the peak positions of one row, has now become a list of lists (e.g. [[1, 2, 3,], [2, 3, 4], [3, 4, 5]]) of the peak positions of every row'''
        self.peak_positions = [find_peaks(row)[0] for row in self.gauss_blur]
        self.prominences = [
            peak_prominences(row, self.peak_positions[i])[0]
            for i, row in enumerate(self.gauss_blur)
        ]
        self.mean_prominences = [
            np.mean(prominence) for prominence in self.prominences
        ]
        self.total_mean = np.mean(self.mean_prominences)
Exemplo n.º 8
0
def to_dataframe(result, meta, directory, line_num="average", save=False):
    peaks, _ = find_peaks(-result, distance=50)
    SS = peak_prominences(-result, peaks, wlen = 70)
    CD50 = peak_widths(-result, peaks, rel_height=0.5, prominence_data=SS)
    CD90 = peak_widths(-result, peaks, rel_height=0.9, prominence_data=SS)
    time_frame = meta['Timepoint'][-1]/600 *100

    max_contract = []
    max_relax = []
    for i in range (len(peaks)):
        max_contract.append(np.max(np.diff(-result[SS[1][i]:peaks[i]]))*time_frame)
        max_relax.append(np.min(np.diff(-result[peaks[i]:SS[2][i]:]))*time_frame)

    df = pd.DataFrame(result[SS[1]], columns = ['Basal length'])
    df['Peak length in um'] = result[peaks]
    df['Time to peak in msec'] = peaks*time_frame - SS[1]*time_frame
    df['CD90 in msec'] = CD90[0] * time_frame
    df['CD50 in msec'] = CD50[0] * time_frame
    df['Max contractile in um/msec'] = max_contract
    df['Max relaxation in um/msec'] = max_relax
    int_peak = np.zeros(len(peaks), 'int')
    int_peak[1:] = np.diff(peaks)
    df['Peak intervals in ms'] = int_peak *time_frame
    beat_rate = np.zeros(len(peaks))
    beat_rate[1:] = 6000/((np.diff(peaks)*time_frame)/2)
    df['Beating rate in beat/min'] = beat_rate

    if save == True:
        try:
            df.to_csv(directory+'/'+'line'+line_num+'_'+'{}.csv'.format(meta["Name"]))
        except FileNotFoundError:
            df.to_csv('line'+line_num+'_'+'{}.csv'.format(meta["Name"]))

    return df
Exemplo n.º 9
0
def get_periods(mjd,mag,err,fltr,objname='',N = 5,pmin=.2,bands=['u','g','r','i','z','Y','VR']):
    
    # The filter information here uses indices determined from the order they
    # appear in bands. To run psearch we want to reassign these indices to remove
    # any unused bands. For example, if only 'g', 'r' and 'z' are used, indices
    # should be 0,1,2 and not 1,2,4.
    cnt  = Counter(fltr)
    
    mult = np.where(np.array(list(cnt.values()))>1)[0]
    sel  = np.in1d(fltr, mult)
    
    fltinds = list(set(fltr))
    replace = {fltinds[i]:i for i in range(len(fltinds))}
    newinds = np.array([replace.get(n,n) for n in fltr],dtype=np.float64)
    fltrnms = (np.array(bands))[list(set(fltr[sel]))]
    
    dphi = 0.02
    plist, psiarray, thresh = \
            psearch_py3.psearch_py( mjd[sel], mag[sel], err[sel], 
                                   newinds[sel], fltrnms, pmin, dphi )
    
    psi = psiarray.sum(0)
    
    pkinds = find_peaks(psi,distance=len(plist)/2000)[0]
    prom   = peak_prominences(psi,pkinds)[0]
    inds0  = pkinds[np.argsort(-prom)[:10*N]]
    inds   = inds0[np.argsort(-psi[inds0])[:N]]
    
    plot_periodogram(plist,psi,inds,objname)
    
    return plist[inds]
def EMGFeatures(raw_signal, fs=128):
    # Statistical Features
    [_, std, maxv, minv] = StatFeature(raw_signal)

    # Power Spectrum
    w = np.hamming(len(raw_signal))
    w, psd = periodogram(raw_signal, window=w, detrend=False)
    _, _, _, maxHFD, _, _ = findLFHF(psd, w)

    # Time Series
    kurt = kurtosis(raw_signal)
    sk = skew(raw_signal)

    # Peak Features
    [peaks, _] = find_peaks(raw_signal)
    pprom = peak_prominences(raw_signal, peaks)[0]
    contour_heights = raw_signal[peaks] - pprom
    pwid = peak_widths(raw_signal, peaks, rel_height=0.4)[0]
    [ppmean, ppstd, _, ppmin] = StatFeature(pprom)
    [pwmean, pwstd, pwmax, pwmin] = StatFeature(pwid)

    return np.array([
        std, maxv, minv, maxHFD, kurt, sk, ppmean, ppstd, ppmin, pwmean, pwstd,
        pwmax, pwmin
    ])
Exemplo n.º 11
0
def calculate(x):
    peaks, _ = find_peaks(x, distance=2)
    prominences = peak_prominences(x, peaks)[0]

    peaks = np.extract(prominences > x.mean() * 0.005, peaks)
    prominences = np.extract(prominences > x.mean() * 0.005, prominences)
    return peaks, prominences
Exemplo n.º 12
0
def accurate_peak_positions(peak_positions,
                            line_profile,
                            low_prominence=TARGET_PROMINENCE,
                            high_prominence=numpy.inf,
                            centroid_calculation=True):
    """
    Post-processing method after peaks have been calculated using the 'all_peaks' method. The peak are filtered based
    on their peak prominence. Additionally, peak positions can be corrected by applying centroid corrections based on the
    line profile.

    Parameters
    ----------
    peak_positions: Detected peak positions of the 'all_peaks' method.
    line_profile: Original line profile used to detect all peaks. This array will be further
    analyzed to better determine the peak positions.
    low_prominence: Lower prominence bound for detecting a peak.
    high_prominence: Higher prominence bound for detecting a peak.
    centroid_calculation: Use centroid calculation to better determine the peak position regardless of the number of
    measurements / illumination angles used.

    Returns
    -------
    NumPy array with the positions of all detected peaks.
    """
    n_roi = normalize(line_profile)
    peak_prominence = numpy.array(peak_prominences(n_roi, peak_positions)[0])
    selected_peaks = peak_positions[(peak_prominence > low_prominence)
                                    & (peak_prominence < high_prominence)]

    if centroid_calculation:
        return centroid_correction(n_roi, selected_peaks, low_prominence,
                                   high_prominence)

    return selected_peaks
    def on_click(event):
        print(event.x, event.y, event.xdata, event.ydata)
        params = {
            'mag': mag,
            'blend': 0.,
            'u0': u0,
            't0': t0,
            'tE': tE_from_xvt(x=event.xdata, vt=event.ydata, mass=mass),
            'delta_u': delta_u_from_x(x=event.xdata, mass=mass),
            'theta': theta * np.pi / 180.
        }
        fig, axs = plt.subplots(2, 1, sharex=True)

        cnopa = microlens_simple(time_range, params.values())
        cpara = microlens(time_range, params.values())

        #prominences
        peaks, _ = find_peaks(mag - cpara)
        prominences = peak_prominences(mag - cpara, peaks)[0]
        print(prominences)
        contour_heights = cpara[peaks] + prominences
        axs[0].vlines(x=time_range[peaks],
                      ymin=contour_heights,
                      ymax=cpara[peaks])

        axs[0].plot(time_range, cpara)
        axs[0].plot(time_range, cnopa)
        axs[0].invert_yaxis()
        axs[1].plot(time_range, cpara - cnopa)
        axs[1].invert_yaxis()
        fig.suptitle(r'$t_E = $' + str(event.ydata) + r', $\delta_u = $' +
                     str(event.xdata))
        plt.show()
Exemplo n.º 14
0
def fundamentals(pt, side, condit):
    # slice through our SG and find the LOWEST peak for each time
    fSG = SGs[pt][condit][side]
    fund_freq = np.zeros_like(fSG["T"])
    for tt, time in enumerate(fSG["T"]):
        peaks, _ = sig.find_peaks(10 * np.log10(fSG["SG"][8:50, tt]),
                                  height=-35)
        if peaks != []:
            proms = sig.peak_prominences(10 * np.log10(fSG["SG"][8:50, tt]),
                                         peaks)
            most_prom_peak = np.argmax(proms[0])
            fund_freq[tt] = fSG["F"][peaks[most_prom_peak]]
    # plt.figure()
    # plt.plot(10*np.log10(fSG['SG'][8:50,6500]))

    timeseries = dbo.load_BR_dict(Ephys[pt][condit]["Filename"], sec_offset=0)
    end_time = timeseries[side].shape[0] / 422

    sos_lpf = sig.butter(10, 10, output="sos", fs=422)
    filt_ts = sig.sosfilt(sos_lpf, timeseries[side])
    filt_ts = sig.decimate(filt_ts, 40)
    fig, ax1 = plt.subplots()

    ax1.plot(np.linspace(0, end_time, filt_ts.shape[0]), filt_ts)

    gauss_fund_freq = ndimage.gaussian_filter1d(fund_freq, 10)
    ax2 = ax1.twinx()
    ax2.plot(fSG["T"], fund_freq, color="green", alpha=0.2)
    ax2.plot(fSG["T"], gauss_fund_freq, color="blue")
Exemplo n.º 15
0
def calc_ratio(result_array):
     dataNew=result_array[1:-1]
     n = len(dataNew)
     base = round(n/2)
     index1 = 1
     diff = 0
     neg_array = 0
     while(index1<n):
         diff=dataNew[base]-dataNew[index1]
         neg_array = np.append(neg_array, diff)
         index1=index1+1
     end = len(neg_array)
     base = neg_array[end-1]
     peaks, _ = find_peaks(neg_array-base, threshold=1, width=(1,10))
     prominences = peak_prominences(neg_array-base, peaks)[0]
     print("prominences", prominences)
     plt.plot(neg_array-base)
     plt.plot(peaks, neg_array[peaks]-base, 'x')
     plt.savefig('peaks.png')
     index2 = 0
     points_array = 0
     while(index2<len(peaks)):
         point =  peaks[index2]
         points_array = np.append(points_array, (neg_array[point]-base))
         index2=index2+1
     points_array.sort()
     n = len(points_array)
     print(points_array)
     if (n==1):
        peak_ratio = 0
     else:
        peak_ratio = points_array[n-2]/points_array[n-1]
        return peak_ratio
Exemplo n.º 16
0
    def _calculate(self, x):
        peaks, _ = find_peaks(x, distance=10)
        prominences = peak_prominences(x, peaks)[0]

        peaks = np.extract(prominences > x.mean() * 0.001, peaks)
        prominences = np.extract(prominences > x.mean() * 0.001, prominences)
        return peaks, prominences
def process_book(para_to_sent_dir, density_dir, output_dir, book_id):

    # Read densities file
    with open(os.path.join(density_dir, book_id + '.pkl'), 'rb') as f:
        densities = pickle.load(f)

    # Read para_to_sent file
    with open(os.path.join(para_to_sent_dir, book_id + '.pkl'), 'rb') as f:
        para_to_sent = pickle.load(f)

    # Get valid sentence numbers (that come at ends of paragraphs)
    valid_sent_nums = list(para_to_sent.values())

    # Corresponding densities
    valid_densities = [densities[x] for x in sorted(valid_sent_nums[:-1])]

    # Get peak indices and prominences
    peaks, _ = signal.find_peaks([-x for x in valid_densities])
    prominences = signal.peak_prominences([-x for x in valid_densities],
                                          peaks)[0]

    # Get sentence numbers corresponding to peak indices
    peak_sent_nums = [valid_sent_nums[idx] for idx in peaks]
    peak_sent_proms = prominences

    with open(os.path.join(output_dir, book_id + '.pkl'), 'wb') as f:
        pickle.dump([list(peak_sent_nums), list(peak_sent_proms)], f)

    print(book_id, ' success!')
    return book_id, 'Success'
def peak_feature_extracter(signal, window_size, stride):
    feature = []
    for start in range(0, len(signal), stride):
        if len(signal) < start + window_size:
            break
        ts_range = signal[start:start + window_size]
        # calculate peak feature
        peaks, _ = find_peaks(ts_range, threshold=0.05)
        if len(peaks) == 0:
            num_of_peaks = 0.
            min_width_of_peak = 0.
            max_width_of_peak = 0.
            mean_width_of_peak = 0.
            std_width_of_peak = 0.

            min_height_of_peak = 0.
            max_height_of_peak = 0.
            mean_height_of_peak = 0.
            std_height_of_peak = 0.

            min_prominence = 0.
            max_prominence = 0.
            mean_prominence = 0.
            std_prominence = 0.
        else:
            widths = peak_widths(ts_range, peaks, rel_height=0.5)
            num_of_peaks = len(peaks) / len(ts_range)
            min_width_of_peak = np.min(widths[0]) / 100
            max_width_of_peak = np.max(widths[0]) / 100
            mean_width_of_peak = np.mean(widths[0]) / 100
            #std_width_of_peak = np.std(widths[0])/100

            min_height_of_peak = np.min(widths[1])
            max_height_of_peak = np.max(widths[1])
            mean_height_of_peak = np.mean(widths[1])
            #std_height_of_peak = np.std(widths[1])

            prominences = peak_prominences(ts_range, peaks)[0]
            min_prominence = np.min(prominences)
            max_prominence = np.max(prominences)
            mean_prominence = np.mean(prominences)
            #std_prominence = np.std(prominences)

        feature.append(
            np.asarray([
                num_of_peaks,
                min_width_of_peak,
                max_width_of_peak,
                mean_width_of_peak,
                #std_width_of_peak,
                min_height_of_peak,
                max_height_of_peak,
                mean_height_of_peak,
                #std_height_of_peak,
                min_prominence,
                max_prominence,
                mean_prominence
                #std_prominence
            ]))
    return feature
Exemplo n.º 19
0
def get_key_frames(video):
    """
    Reads through the frame differences of a video,
    gets standard deviation of peaks' prominences. If prominence is more than 3 std, than it is a hold estimate
    """

    x, y = get_frame_difference(video)
    y = [1 - n for n in y]

    x = np.array(x)
    y = np.array(y)

    diff = list(zip(y, x))

    peaks, _ = signal.find_peaks(y)
    first = peaks[0]
    y = y[first:]

    peaks, _ = signal.find_peaks(y)

    prominences = signal.peak_prominences(y, peaks)[0]
    std = np.std(prominences)
    key_prom = []
    for p in prominences:
        if p > 3 * std:
            key_prom.append(p)

    min_p = np.min(key_prom) - 0.0000001

    key_peaks, _ = signal.find_peaks(y, prominence=min_p)

    frames = [peak + first for peak in key_peaks]
    return frames
Exemplo n.º 20
0
def peak_prom(data, peaks):
    prom = np.zeros(len(peaks))
    for i in range(len(peaks)):
        try:
            prom[i] = sig.peak_prominences(data, [int(peaks[i])])[0][0]
        except:
            prom[i] = 0
    return prom
Exemplo n.º 21
0
def get_no_peaks(signal, n):
    all_peaks = ss.find_peaks(signal)[0]
    proms = ss.peak_prominences(signal, all_peaks)[0]

    # Taking the n most prominent peaks
    index = np.argsort(proms)
    all_peaks = all_peaks[index]

    return all_peaks[-n:]
Exemplo n.º 22
0
def peaksFunction(x, y):
    index = np.where((x > 1.9) & (x < 2.5))
    index = index[0][:]
    signal = y[index[0:-1]]
    peak, _ = find_peaks(signal, prominence=(5, None))
    prominences = peak_prominences(signal, peak)[0]
    # contour_heights = signal[peak] - prominences
    a = np.amax(prominences)
    return a
Exemplo n.º 23
0
def peaks_cwt_1(data_mean):
    peaks = sci.find_peaks_cwt(data_mean, np.arange(0.001,10)) 
    [ prominences , l_ips, r_ips]=sci.peak_prominences(data_mean, peaks ,wlen=5)
    results_eighty = sci.peak_widths(data_mean, peaks, rel_height=0.8)
    peak_width=results_eighty[0]
    l_ips=(np.floor(results_eighty[2]))
    r_ips=(np.ceil(results_eighty[3]))
    l_ips.astype(int)
    r_ips.astype(int)
    return [peaks, prominences , l_ips, r_ips, peak_width]
Exemplo n.º 24
0
def looop(stream, soundAnalysis):
    global CHUNK, avgThreshold, RATE, peckStatus, promThreshold, freqRange, peckCount, weightPerFeed, weightFeedCount

    data = np.fromstring(stream.read(CHUNK, exception_on_overflow=False),
                         dtype=np.int16)
    peak = np.average(np.abs(data)) * 2
    #if peak is high calculate fft
    # print(peak)
    if (peak > avgThreshold[1]):
        #calculate fft
        #fftx, ffty in fftval
        fftVal = getFFT(data, RATE)
        #chop unnecessary frequency information
        fftVal = Chopfft(fftVal)
        #find the index of the peaks in the fftsignal
        # peakIndex = signal.find_peaks_cwt(fftVal[1], np.arange(0.1,1))
        peakIndex, _ = signal.find_peaks(fftVal[1])

        #find the highest peak
        highestPeakIndex = np.argmax(fftVal[1])
        highestPeak = [
            fftVal[0][highestPeakIndex], fftVal[1][highestPeakIndex]
        ]  #[frequency, yvalue or amplitude]
        # print("Higest Peak at frequency:", highestPeak[0])		#print the frequency whrere there is the hiest peak

        #finding the highest prominent peak///optional ??? //is more flexiable
        peakProminance = signal.peak_prominences(
            fftVal[1], peakIndex
        )[0]  #returns prominance of each peak givenn hence shape of 'peakIndex' and 'peakProminance' is same
        PeakPIndex = np.argmax(
            peakProminance)  #returns the index of maximum element in the list
        indexOfHigestProminance = peakIndex[PeakPIndex]
        higestProminancePeak = [
            fftVal[0][indexOfHigestProminance],
            fftVal[1][indexOfHigestProminance]
        ]
        # print("Higest PeakProminance at frequency:", higestProminancePeak[0])

        # print("Higest prominance Value of peak:", higestProminancePeak[1])

        #check if the prominance of the higest peak of greater than the threhold
        if (higestProminancePeak[1] > promThreshold[1]):
            #if yes try to increment the peckCount
            if incORnot() == 1:
                peckCount += 1
                weightFeedCount = peckCount * weightPerFeed
                soundAnalysis[0] += 1
                soundAnalysis[1] = soundAnalysis[0] * weightPerFeed
                print("Peck Count: ", peckCount)

        elif (higestProminancePeak[1] < promThreshold[0]):
            resetPeckStatus()

    elif (peak < avgThreshold[0]):
        resetPeckStatus()
Exemplo n.º 25
0
def Nanostick_Peak_Analysis(num, loud=False):

    # Locate the spectral peaks and their FWHM
    # Compute the mode Q-factor from this data Q = \Delta lambda / \lambda
    # R. Sheehan 2 - 7 - 2019

    FUNC_NAME = ".Nanostick_Spectrum()"  # use this in exception handling messages
    ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME

    try:
        DATA_HOME = 'c:/users/robert/Research/CAPPA/Data/COSMICC/Nanosticks/June_2019/Passive_Results/'

        if os.path.isdir(DATA_HOME):
            os.chdir(DATA_HOME)
            print(os.getcwd())

            filename = 'd1_3%(v1)d_.dat' % {"v1": num}

            if glob.glob(filename):
                print(filename, "exists")

                data = np.loadtxt(filename, unpack=True)

                if data is not None:
                    from scipy.signal import find_peaks, peak_prominences, peak_widths

                    peaks, heights = find_peaks(-1.0 * data[1], height=5)
                    prominences = peak_prominences(
                        -1.0 * data[1], peaks)[0]  # compute peak prominences
                    widths = peak_widths(
                        -1.0 * data[1], peaks,
                        rel_height=0.5)[0]  # compute peak FWHM

                    #print(widths)

                    for i in range(0, len(peaks), 1):
                        if prominences[i] > 2:
                            Q = 500.0 * data[0][peaks[i]] / widths[i]
                            print("Peak:", data[0][peaks[i]],
                                  heights['peak_heights'][i], prominences[i],
                                  widths[i] / 500.0, Q)

                    del data
                else:
                    raise Exception
            else:
                raise Exception
        else:
            raise EnvironmentError
    except EnvironmentError:
        print(ERR_STATEMENT)
        print('Cannot find', DATA_HOME)
    except Exception as e:
        print(ERR_STATEMENT)
        print(e)
Exemplo n.º 26
0
def get_signal_peaks_and_prominences(data):
    """ Get the signal peaks and peak prominences.

        :param data array: One-dimensional array.

        :return peaks array: The peaks of our signal.
        :return prominences array: The prominences of the peaks.
    """
    peaks, _ = sig.find_peaks(data)
    prominences = sig.peak_prominences(data, peaks)[0]

    return peaks, prominences
Exemplo n.º 27
0
def id_cand(f2ndir=f2ndir):

    npy_fils = [i for i in os.listdir(f2ndir) if i.endswith('fits.npy')]  #[1:]

    for fil in npy_fils:

        #Load npy file
        ar = np.load(f2ndir + '/' + fil)

        #Sub-band npy array
        sub_factor = 32
        ar_sb = np.nanmean(ar.reshape(-1, sub_factor, ar.shape[1]), axis=1)

        #Integrate to get absolute-valued and normalized timeseries and calculate 'snr'
        ar_ts = np.abs(ar_sb.sum(0) / np.max(ar_sb.sum(0)))
        fig = plt.figure(figsize=(20, 10))
        ax1 = fig.add_subplot(121)
        #plt.plot(ar_ts)

        #Smooth timeseries with Savitzky Golay filter
        ts_sg = ss.savgol_filter(ar_ts, 115, 9)[100:-100]
        ts_sg_snr = 10 * np.log10(np.max(ts_sg) / np.min(ts_sg))
        print('SNR: ', ts_sg_snr)
        print('Pulse File: ', fil)

        #Signal search for peaks, and normalized peak prominence
        ar_pks = ss.find_peaks(ts_sg)
        #print('Peaks: ', ar_pks[0])
        ar_pk_prom = ss.peak_prominences(ts_sg, ar_pks[0])[0]
        norm_pk_prom = ar_pk_prom / np.max(ar_pk_prom)
        peak_prom_snr = 10 * np.log10(
            np.max(norm_pk_prom) / np.min(norm_pk_prom))
        print('Prominence SNR: ', peak_prom_snr)
        plt.title('Time Series | Peak Prominence SNR: ' + str(peak_prom_snr))
        plt.plot(ts_sg)
        #print('Peak Prominences: ', norm_pk_prom)

        #Plot dynamic spectrum
        if ts_sg_snr:
            #ar_corr = ss.correlate(ar_sb, ar_sb, mode = 'full')
            ax2 = fig.add_subplot(122)
            plt.title('Dynamic Spectrum | Candidate Detected! | SNR: ' +
                      str(ts_sg_snr))
            plt.imshow(ar_sb, aspect='auto')
            plt.gca().invert_yaxis()
            plt.savefig(fil + '_' + str(ts_sg_snr) + '.png')

#plt.show()
        else:
            ax2 = fig.add_subplot(122)
            plt.title('Dynamic Spectrum | No Candidate Found | SNR: ' +
                      str(ts_sg_snr))
            plt.imshow(ar_sb, aspect='auto')
Exemplo n.º 28
0
def filter_single_field(single_field,resolution):
    edge_sobel = filters.sobel(single_field)
    model = gaussian_kde(np.ravel(edge_sobel))
    x = np.linspace(0,1.0,resolution)
    y = model.pdf(x)
    peaks, _ = find_peaks(y)
    prom,left_bases, right_bases = peak_prominences(y,peaks)
    cutoff = x[right_bases[-2]]
    loc = np.where(edge_sobel>cutoff)
    filtered_image = np.zeros_like(edge_sobel)
    filtered_image[loc] = 1
    labeled_image = measure.label(filtered_image)
    return filtered_image,labeled_image
Exemplo n.º 29
0
    def max_prominence(x: np.ndarray):
        try:
            peaks, _ = find_peaks(x)

            if len(peaks) > 1:
                prominence, _, _ = peak_prominences(x, peaks)
                out = np.max(prominence)
            else:
                out = 0
        except ValueError:
            out = np.nan

        return out
Exemplo n.º 30
0
def new_peak_prominences(distances):
    """
    Adapted calculation of prominence of peaks, based on the original scipy code
    
    Args:
        distances: dissimarity scores
    Returns:
        prominence scores
    """
    with warnings.catch_warnings():
        warnings.filterwarnings("ignore")
        all_peak_prom = peak_prominences(distances, range(len(distances)))
    return all_peak_prom