def set_maximums_and_minimums(self):
        x, y, z = zip(*self.plot)
        self.x = numpy.array(x)
        self.y = numpy.array(y)

        self.base = peakutils.baseline(self.y, 2)
        self.indices = peakutils.indexes(numpy.subtract(self.y,self.base), thres=0.3, min_dist=10)

        self.polar_side_maximums = []
        for i in range(len(self.indices)):
            self.polar_side_maximums.append(self.plot[self.indices[i]])

        # invert plot to find minimums
        radius_range = self.y.max()-self.y.min()
        inverted_y_plot = []
        for i in range(len(self.y)):
            difference_from_range = radius_range - self.y[i]
            inverted_y_plot.append(difference_from_range + self.y.min())

        self.inverted_y_plot = numpy.array(inverted_y_plot)

        self.min_base = peakutils.baseline(self.inverted_y_plot, 2)
        self.min_indices = peakutils.indexes(numpy.subtract(self.inverted_y_plot,self.min_base), thres=0.3, min_dist=10)

        self.polar_side_minimums = []
        for i in range(len(self.min_indices)):
            self.polar_side_minimums.append(self.plot[self.min_indices[i]])
예제 #2
0
def get_peaks(bundle, ionname, mindist=1):
    """Calculates location of peaks and troughs in g(r)s.
    :Arguments:
        *bundle*
            bundle of sims
        *ionname*
            name of ion of interest
        *mindist*
            minimum distance between peaks and troughs; default = 1
    :Returns:
        *m*
            midpoints of bins
        *density*
            density histogram values
        *peaks*
            indexes of peak locations
        *mins*
            indexes of minimum locations
    """
    frames = []
    for s in bundle:
        for iondata in s['coordination/' + ionname.upper() + '/'].data:
            frames.append(s['coordination/' + ionname.upper() + '/'].data[iondata])
    m, density = coordination.gee(frames, binnumber=200)
    x = int(round(mindist / (m[1] - m[0])))
    peaks = peakutils.indexes(density, thres=.1, min_dist=x)
    mins = peakutils.indexes(-density, thres=.1, min_dist=x)
    return m, density, peaks, mins
예제 #3
0
파일: algo2.py 프로젝트: edawine/fatools
def find_raw_peaks(data, params, offset, expected_peak_number=0):
    """
    params.min_dist
    params.norm_thres
    params.min_rfu
    params.max_peak_number
    """
    #print("expected:", expected_peak_number)
    # cut and pad data to overcome peaks at the end of array
    obs_data = np.append(data[offset:], [0,0,0])
    if False: #expected_peak_number:
        min_dist = params.min_dist
        indices = []
        norm_threshold = params.norm_thres
        expected_peak_number = expected_peak_number * 1.8
        while len(indices) <= expected_peak_number and norm_threshold > 1e-7:
            indices = indexes( obs_data, norm_threshold, min_dist)
            print(len(indices), norm_threshold)
            norm_threshold *= 0.5
    elif False:
        indices = indexes( obs_data, params.norm_thres, params.min_dist)

    indices = indexes( obs_data, 1e-7, params.min_dist)
    cverr(5, '## indices: %s' % str(indices))
    cverr(3, '## raw indices: %d' % len(indices))

    if len(indices) == 0:
        return []

    # normalize indices
    if offset > 0:
        indices = indices + offset

    # filter peaks by minimum rfu, and by maximum peak number after sorted by rfu
    peaks = [Peak(int(i), int(data[i])) for i in indices
             if data[i] >= params.min_rfu and params.min_rtime < i]
    #peaks = sorted( peaks, key = lambda x: x.rfu )[:params.max_peak_number * 2]

    #import pprint; pprint.pprint(peaks)
    #print('======')

    if expected_peak_number:
        peaks.sort( key = lambda x: x.rfu, reverse = True )
        peaks = peaks[: round(expected_peak_number * 2)]
        peaks.sort( key = lambda x: x.rtime )

    cverr(3, '## peak above min rfu: %d' % len(peaks))

    return peaks
예제 #4
0
파일: wave_curve.py 프로젝트: OpenWIM/pywim
def select_curve_by_threshold(
    signal_data: pd.Series,
    threshold: float, delta_x: int
) -> [pd.Series]:
    """

    """
    Δx = delta_x

    indexes = peakutils.indexes(signal_data, thres=0.5, min_dist=30)
    curves = []

    for ind_axle in indexes:
        i_start = ind_axle - Δx
        i_end = ind_axle + Δx

        p_start = Δx
        while i_start >= p_start:
            i_start -= 1
            if signal_data.iloc[i_start] <= threshold:
                break

        p_end = signal_data.size - Δx
        while i_end <= p_end:
            i_end += 1
            if signal_data.iloc[i_end] <= threshold:
                break
        curves.append(signal_data.iloc[i_start - Δx:i_end + Δx])

    return curves
def find_activity_intervals(C, Npeaks=5, tB=-3, tA=10, thres=0.3):
    # todo todocument
    import peakutils
    K, T = np.shape(C)
    L = []
    for i in range(K):
        if np.sum(np.abs(np.diff(C[i, :]))) == 0:
            L.append([])
            print('empyty component at:' + str(i))
            continue
        indexes = peakutils.indexes(C[i, :], thres=thres)
        srt_ind = indexes[np.argsort(C[i, indexes])][::-1]
        srt_ind = srt_ind[:Npeaks]
        L.append(srt_ind)

    LOC = []
    for i in range(K):
        if len(L[i]) > 0:
            interval = np.kron(L[i], np.ones(int(np.round(tA - tB)), dtype=int)) + \
                np.kron(np.ones(len(L[i]), dtype=int), np.arange(tB, tA))
            interval[interval < 0] = 0
            interval[interval > T - 1] = T - 1
            LOC.append(np.array(list(set(interval))))
        else:
            LOC.append(None)

    return LOC
예제 #6
0
def peak_finder(spectrum, energy):
    '''
    PEAK_FINDER will search for peaks within a certain range determined by the
    Energy given. It takes a Spectra file and an Energy value as input. The
    energy range to look in is given by the Full-Width-Half-Maximum (FWHM).
    If more than one peak is found in the given range, the peak with the
    highest amount of counts will be used.
    '''
    e0 = spectrum.energy_cal[0]
    eslope = spectrum.energy_cal[1]
    energy_axis = e0 + eslope*spectrum.channel

    peak_energy = []
    # rough estimate of fwhm.
    fwhm = 0.05*energy**0.5
    fwhm_range = 1

    # peak search area
    start_region = np.flatnonzero(energy_axis > energy - fwhm_range * fwhm)[0]
    end_region = np.flatnonzero(energy_axis > energy + fwhm_range * fwhm)[0]
    y = spectrum.data[start_region:end_region]
    indexes = peakutils.indexes(y, thres=0.5, min_dist=4)
    tallest_peak = []
    if indexes.size == 0:
        peak_energy.append(int((end_region - start_region) / 2) + start_region)
    else:
        for i in range(indexes.size):
            spot = spectrum.data[indexes[i]+start_region]
            tallest_peak.append(spot)
        indexes = indexes[np.argmax(tallest_peak)]
        peak_energy.append(int(indexes+start_region))
    peak_energy = float(energy_axis[peak_energy])
    return(peak_energy)
예제 #7
0
def detect_start_end_times(pattern_wav, recording_wav, sr, overlap):
    """Find matches for the start/end pattern within the recorded audio"""

    # Compute the STFT of the recordings
    specgram1 = numpy.array(stft.spectrogram(pattern_wav, overlap=overlap))
    specgram2 = numpy.array(stft.spectrogram(recording_wav, overlap=overlap))

    # Restrict the spectrum to the frequency band occupied by the start/end pattern
    pattern = abs(specgram1[7:16,:])
    recording = abs(specgram2[7:16,:])

    # Search for matches of the pattern in the input recording and return a confidence score
    # for each time position of the input recording
    confidence = match_template(recording, pattern)

    # Search for peaks in the confidence score, and choose the two highest peaks
    # Minimum distance between consecutive peaks is set to 1 second
    peaks = peakutils.indexes(confidence[0], thres=0, min_dist=seconds_to_samples(1, overlap, sr))
    peaks = sorted(peaks, key=lambda p: -confidence[0,p])[:2]

    #TODO: throw errors instead of printing, if necessary
    if len(peaks) < 1:
        print "Could not detect a starting beep!"
    elif len(peaks) < 2:
        print "Could only detect one starting beep!"
    else:
        start, end = sorted(peaks)
        print "Initial beep detected at " + "%.3f" % samples_to_seconds(start, overlap, sr) + " seconds."
        print "Final beep detected at " + "%.3f" % samples_to_seconds(end, overlap, sr) + " seconds."
    return samples_to_seconds(start, overlap, sr), samples_to_seconds(end, overlap, sr)
def generate_standard_curve(file, ladder):

	#Finds peaks in ladder lane and calculates STD curve.
	#This can then be used to calculate the molecular weight of samples


	x= pandas.read_csv(file, delim_whitespace=True)
	x = x.ix[1:]
	np_array = np.array(x["Y"])
	base = peakutils.baseline(np_array,2)
	indexes = peakutils.indexes(np_array,thres=0.1, min_dist=25) #Adjust these if peak calling fails for ladder lane!


	#plt.plot(np_array)
	#plt.show()

	print indexes

	if len(indexes) == len(ladder): #If we have found the wrong number of peaks

		max_STD =  len(np_array)
		rf = indexes/len(np_array)
		ladder = np.array(ladder)
		ladder = np.log(ladder) #gets logs for log(molecular weights) vs distance linear regression.
		
		slope, intercept, r_value, pvalue, std_err = stats.linregress(rf,ladder) #Do some linear regression to get line statistics

		return [slope, intercept, r_value, max_STD]

	else:

		return "Error"
예제 #9
0
파일: kernel.py 프로젝트: nicktgr15/sac
def checkerboard_matrix_filtering(similarity_matrix, kernel_width, kernel_type="default", thresh=0.25):

    """
    Moving the checkerboard matrix over the main diagonal of the similarity matrix one sample at a time.

    :param kernel_type:
    :param thresh:
    :param similarity_matrix:
    :param kernel_width: the size of one quarter of the checkerboard matrix
    :return: peaks and convolution values
    """

    checkerboard_matrix = get_checkerboard_matrix(kernel_width, kernel_type)

    # The values calculated in this step are starting from the 'kernel_width' position and ending
    # at length - kernel_width
    d = []
    for i in range(0, similarity_matrix.shape[0] - 2 * kernel_width):
        base = similarity_matrix[i:i + kernel_width * 2, i:i + kernel_width * 2]
        d.append(np.sum(np.multiply(base, checkerboard_matrix)))

    # The missing values from 0 to kernel_width are calculated here
    top_left_d = []
    for i in range(0, kernel_width):
        base = similarity_matrix[0:i + kernel_width, 0:i + kernel_width]
        top_left_d.append(np.sum(np.multiply(base, checkerboard_matrix[kernel_width - i:, kernel_width - i:])))

    # The missing kernel_width values at the bottom right are set to 0
    convolution_values = top_left_d + d + [0 for i in range(0, kernel_width)]

    # peaks = find_peaks_cwt(convolution_values, np.arange(1, peak_range))
    peaks = peakutils.indexes(convolution_values, thres=thresh)

    peaks = [0] + list(peaks) + [len(convolution_values)-1]
    return peaks, convolution_values
def find_activity_intervals(C, Npeaks=5, tB=-5, tA=25, thres=0.3):

    import peakutils

    K, T = np.shape(C)
    L = []
    for i in range(K):
        indexes = peakutils.indexes(C[i, :], thres=thres)
        srt_ind = indexes[np.argsort(C[i, indexes])][::-1]
        srt_ind = srt_ind[:Npeaks]
        L.append(srt_ind)

    LOC = []
    for i in range(K):
        if len(L[i]) > 0:
            interval = np.kron(L[i], np.ones(tA - tB, dtype=int)) + np.kron(
                np.ones(len(L[i]), dtype=int), np.arange(tB, tA)
            )
            interval[interval < 0] = 0
            interval[interval > T - 1] = T - 1
            LOC.append(np.array(list(set(interval))))
        else:
            LOC.append(None)

    return LOC
def get_peaks(file, thres_num, minimum, max_STD):

	#This function finds the peaks in data given to it.
    #Only used on sample lanes not ladder lane.
    #Variables thres_num and minimum cam be adjusted if neccecary.

	x= pandas.read_csv(file, delim_whitespace=True)

	x = x.ix[1:] #First row in bad so remove

	np_array = np.array(x["Y"])

	plt.plot(np_array)
	plt.show()

	base = peakutils.baseline(np_array,2) #remove baseline to get better peak calling

	indexes = peakutils.indexes(np_array,thres=thres_num, min_dist=minimum) # find peaks


	#print indexes

	rf = indexes/max_STD

	return rf
예제 #12
0
def get_simple_vals(eda_array):
    """Takes in an array of eda values, returns a list containing one value for the mean, one for the sum, and one for the peak frequency."""
    if len(eda_array)==0:
        return [0,0,0]
    eda_array = np.array(eda_array)
    x = np.linspace(1, len(eda_array), num=len(eda_array))
    y = eda_array
    # normalize with log
    # and blur with gaussian
    y = filters.gaussian_filter(y, 30)
    indexes = peakutils.indexes(y, thres=np.mean(y), min_dist=10)
   
    
    # print("indexes......")
    # print("length:" + str(len(indexes)))
    # print(indexes)
    # #print(x[indexes])
    # #print(y[indexes])

    if len(indexes) == 0:
        return [0,0,0]
    timestamp = datetime.datetime.now()
    print(timestamp)
    mean_ppa = sum(y)/len(y)
    sum_ppa = sum(y)
    freq = len(indexes)

    the_features = [mean_ppa, sum_ppa, freq]

    return the_features
예제 #13
0
def detect_weel_times(LSsn, decimation = 8):
    """
    Return the passby times of the train axes
    
    param:
    ------
    LSsn: Signal
        LS signal
    decimation: int pow of 2
        decimate and filter the signal
    
    return:
    ------
    - tPeaks: np.array
        passby times of the train axes
    """
    s = LSsn.decimate(decimation)
    y = np.array(s)
    t = s.times()
    # define the minimal possible distance in time between two axes
    maxSpeed = 200/3.6
    axleDistance = 1.5
    dt = axleDistance/maxSpeed
    # minimal distance in frames
    min_dist =  int(np.round(dt * s.fs))
    # use peaksUtils module for detecting maxima
    indexes = peakutils.indexes(y, thres = 0.05, min_dist = min_dist)
    tPeaks = peakutils.interpolate(t, y, ind=indexes)
    print('Minimal time interval between maxima is: ', dt,',which is equvalent to ', min_dist,' samples')
    return tPeaks
예제 #14
0
    def test_octave_findpeaks_equal_matlab_findpeaks_minpeakheight_minpeakdistance(self):
        """ Check that Octave findpeaks mimics well the original MatLab findpeaks, with minpeakheight and minpeakdistance filter. """
        # Find peaks on this vector.
        vector = [
            0.199196234460946, 0.150971091401259, 0.066830193587158, -0.007815333052105, -0.044616654524390, -0.055795361348227, -0.076137152400651, -0.118170367279712, -0.163440493736020, -0.190516609994619, -0.176483713717207, -0.126265512667095,
            -0.085683530051180, -0.070626701579825, -0.056650272247038, -0.018164912522573, 0.042641790158567, 0.084300842806316, 0.091380642181674, 0.086612641403415, 0.076804338682254, 0.065114059315175, 0.061730123648466, 0.062054559470569,
            0.037808369894233, -0.007903466706924, -0.022105492056923, 0.022875099403569, 0.100256509561853, 0.161610966145234, 0.188078783724511, 0.179791428716887, 0.127483188979423, 0.037101235419981, -0.061551863605861, -0.134872789642774,
            -0.170882136762535, -0.180232519836007, -0.193873842670550, -0.220596208762850, -0.217710728542538, -0.154566709841264, -0.052288376793704, 0.024309953763214, 0.036995233638215, 0.027385387267975, 0.034756425571608, 0.044538621477845,
            0.048179094187324, 0.062762787751685, 0.093756722731978, 0.128746079656537, 0.140220257694886, 0.107177963642096, 0.064168137422344, 0.049034449543362, 0.043561872239351, 0.037112836659310, 0.049484512152412, 0.075511915362878,
            0.082621740035262, 0.059833540054286, 0.025160333364946, -0.011362411779154, -0.059885473889260, -0.116916348401991, -0.160033412094328, -0.186277401172449, -0.227970985597943, -0.293012110994312, -0.316846014874940, -0.235793951154457,
            -0.071213154358508, 0.087635348114046, 0.166528547043995, 0.156622093806762, 0.114536824444267, 0.098795472321648, 0.106794539180316, 0.123935062619566, 0.138240918685253, 0.120041711787775, 0.065711290699853, -0.020477124669418,
            -0.121124845572754, -0.163652703975820, -0.088146112206319, 0.062253992836015, 0.185115302006708, 0.251310089224804, 0.275507327595166, 0.240646546675415, 0.144130827133559, 0.028378284476590, -0.050543164088393, -0.082379193202235,
            -0.108933261445066, -0.149993661967355, -0.188079227296676, -0.184552832746794
        ]
        # Set to 0 the negative values
        # so they don't interfe in peak search,
        # as Octave findpeaks don't ignore negative values by default
        # (in fact it can search for local minima with the DoubleSided mode).
        # v = np.array(vector)
        # for i in range(0, len(v)):
        #     if v[i] < 0:
        #         v[i] = 0

        # 'MinPeakHeight', 0.05, 'MinPeakDistance', 10, 'MinPeakWidth', 0
        loc = peakutils.indexes(np.array(vector), thres=0.15, min_dist=10)
        print(loc)
        self.assertEqual(
            loc.tolist(),
            [19-1, 31-1, 53-1, 75-1, 91-1])
def get_frequences(rr,time_axis):
    # -- Normalize non-normal peaks
    rr_peak_index = peakutils.indexes(rr,thres=0.1, min_dist=5) # get non-normal peaks
    time_nn, rr_nn = [], []

    # find non-normal's neighbours
    for i in range(len(rr)):
        if i in rr_peak_index:
            # try-except for boundary instances
            try:
                time_nn.append(time_axis[i-1])
                rr_nn.append(rr[i-1])
            except:
                pass
            try:
                time_nn.append(time_axis[i+1])
                rr_nn.append(rr[i+1])
            except:
                pass

    # interpolate non-normal peaks
    interpolate_nn = interpolate.interp1d(time_nn,rr_nn)
    nn_rr = interpolate_nn([element for i,element in enumerate(time_axis) if i in rr_peak_index])
    for i in range(len(nn_rr)):
        rr[rr_peak_index[i]] = nn_rr[i]

    # -- Prepare RR
    time_grid = np.arange(np.min(time_axis),np.max(time_axis),0.25) # time grid with freq step 1/INTERPOLATION_FREQ

    # interpolate data
    interpolate_rr = interpolate.interp1d(time_axis, rr, kind='cubic')
    interpolated_rr = interpolate_rr(time_grid)

    # remove linear trend along axis from data
    detrend_rr = interpolated_rr - np.mean(interpolated_rr)

    # use Tukey window for smoothing curves
    # window = signal.tukey(len(detrend_rr),alpha=0.25)
    import tukey
    window = tukey.tukey(len(detrend_rr),alpha=0.25)
    detrend_rr = (window*detrend_rr)/1000

    # -- Obtaing spectrum
    spectr = (np.absolute(np.fft.fft(detrend_rr,2048))) # zero padding to 2 ^ 11
    spectr = spectr[0:len(spectr)/2] # use only positive section

    freqs = np.linspace(start=0, stop=INTERPOLATION_FREQ/2, num=len(spectr), endpoint=True) # frequence space

    # plot
    # plt.plot(freqs[:len(spectr)/4],spectr[:len(spectr)/4])
    # plt.show()

    # -- Return TP, LF, HF and LF/HF
    LF = get_spectrum_power(spectr, freqs, LF_MIN, LF_MAX)
    HF = get_spectrum_power(spectr, freqs, HF_MIN, HF_MAX)
    TP = get_spectrum_power(spectr, freqs, 0., HF_MAX)

    return(TP,LF,HF,LF/HF)
예제 #16
0
def calibration_check(spectrum):
    '''
    calibration_check will search for certain peaks that are expected to occur
    in every measured spectra. The energies it searches for are based on peaks
    that occur in background radiation. Once these peaks are found, it
    compares the energy of that peak to the actual energy the peak should be
    at. This check is based on expected detector resolution and if the energy
    deviates to far from the expected value (if beyond half a FWHM), then
    calibration_check sends a message indicating a fix is needed. Only Spectra
    are taken as input.
    '''
    E0 = spectrum.energy_cal[0]
    Eslope = spectrum.energy_cal[1]
    energy_axis = E0 + Eslope*spectrum.channel

    peak_channel = []
    found_energy = []
    offsets = []
    skip = 0
    fix = 0
    for energy in energy_list:
        # rough estimate of fwhm.
        fwhm = 0.05*energy**0.5
        energy_range = 0.015*energy

        # peak gross area

        start_region = np.flatnonzero(energy_axis > energy - energy_range)[0]

        end_region = np.flatnonzero(energy_axis > energy + energy_range)[0]
        y = spectrum.data[start_region:end_region]
        indexes = peakutils.indexes(y, thres=0.5, min_dist=4)
        tallest_peak = []
        if indexes.size == 0:
            print('peak not found')
            offsets.append(np.nan)
            skip += 1
        else:
            for i in range(indexes.size):
                spot = spectrum.data[indexes[i]+start_region]
                tallest_peak.append(spot)
            indexes = indexes[np.argmax(tallest_peak)]
            peak_channel.append(int(indexes+start_region))
            found_energy.append(energy)
            difference = abs((energy -
                              float(energy_axis[int(indexes+start_region)])))
            offsets.append(difference)
            if difference > 0.5*fwhm:
                fix += 1
    if skip > 4:
        message = 'error'
    elif fix >= 4:
        message = 'fix'
    else:
        message = 'fine'
    offsets.append(message)
    return(peak_channel, found_energy, message, offsets)
    def find_peak_indexes(self):
        indexes = peakutils.indexes(self.intensity, thres=self.threshold,
                                    min_dist=self.min_distance)

        self.centroid_indexes = indexes[
            np.where(self.intensity[indexes] > self.min_val)]

        self.centroids = self.wavelength[self.centroid_indexes]
        self.amplitudes = self.intensity[self.centroid_indexes]
        self.sigmas = np.ones(len(self.wavelength[self.centroid_indexes]))
예제 #18
0
    def process_frames(self, data):
        data = data[0]
        # filter to smooth noise
        data = savgol_filter(data, 51, 3)
        PeakIndex = pe.indexes(data, thres=self.parameters['thresh'],
                               min_dist=self.parameters['min_distance'])

        PeakIndexOut = np.zeros(data.shape)
        PeakIndexOut[PeakIndex] = 1
        return PeakIndexOut
def peak_finder_pro(measurement):
    E0 = measurement.energy_cal[0]
    Eslope = measurement.energy_cal[1]
    energy_axis = E0 + Eslope*measurement.channel
    increment = 25
    counts = measurement.data
    peaks_found = []
    plop = []
    start = 0
    end = start + increment
    count = 0
    for energy in energy_axis:
        count += 1
        if end >= len(measurement.channel):
            break
        else:
            count_total = sum(counts[start:end])
            average_range = count_total / (len(counts[start:end]))
            average_ends = (counts[start] + counts[end]) / 2
            threshold = 1.1 * average_ends
            if average_range > threshold:
                counts_range = counts[start:end]
                middle = int((start + end) / 2)
                average_fwhm = 0.05 * energy_axis[middle]**0.5
                average_fwhm_dist = int(average_fwhm / Eslope)
                indexes = p.indexes(np.log(counts_range),
                                    thres=0.1, min_dist=average_fwhm_dist)
                for index in indexes:
                    plop.append(index+start)
                    uncertainty = average_range**0.5
                    significance = ((counts[start+index]-average_range) /
                                    uncertainty)
                    if significance > 4:
                        peaks_found.append(index + start)
                    else:
                        pass
                start += 25
                end = start + increment
            else:
                start += 1
                end = start + increment
    return peaks_found
    print(peaks_found)
    print(count)
    print(len(peaks_found))
    plt.figure()
    plt.title('Filtered Peaks')
    plt.plot(measurement.channel, np.log(counts))
    plt.plot(measurement.channel[peaks_found],
             np.log(counts[peaks_found]), 'ro')
    plt.figure()
    plt.title('All Peaks')
    plt.plot(measurement.channel, np.log(counts))
    plt.plot(measurement.channel[plop], np.log(counts[plop]), 'bo')
    plt.show()
def compute_segments_from_predictions(predictions, beat_times):
    """
    Computes the segment times from a prediction curve and the beat times
    using peak picking.
    """
    predictions = np.squeeze(predictions)
    predictions = post_processing(predictions)
    peak_loc = peakutils.indexes(predictions, min_dist=8, thres=0.05)
    segment_times = beat_times[peak_loc]

    return segment_times
예제 #21
0
파일: process.py 프로젝트: idreyn/robin
def echo_start_index(sample):
	MIN_DIST = 500
	THRESHOLD = 0.2
	indices = peakutils.indexes(
		sample,
		min_dist=MIN_DIST,
		thres=THRESHOLD
	)
	[p0,p1] = indices[0:2]
	constrained = moving_average(np.abs(sample[p0:p1]), 101)
	return np.where(constrained == min(constrained))[0][0]
예제 #22
0
def do_plot1d_peakutil():
    plot.cla()
    y=specdata[1]
    plot.plot(x,y)
    plot.xlim([startfreq*1e-6,stopfreq*1e-6])
    plot.title('Spectrum',fontsize=30)
    plot.ylabel("Amplitude(dB)",fontsize=30)
    plot.xlabel("Frequency(MHz)",fontsize=30)
    indexes=peakutils.indexes(y+80,thres=0.3,min_dist=100)
    pplot(x,y,indexes)
    plot.show(False)
    plot.pause(0.001)
예제 #23
0
파일: ENNet.py 프로젝트: etteerr/NeuronNet
 def _calculateSpikeEventtimes(data, dt):
     """
     returns for an trace all spike events
     :param data:
     :return:
     """
     #import pylab as pl
     if len(data) < 1:
         return []
     idxs = pu.indexes(np.array(data), thres=0.1, min_dist=100)
     timeEvents = idxs * dt
     return timeEvents
    def find_peak_and_frequency(self, power_max, xf_max):
        """
        Peak detection with the PeakUtils library
        :param power_max: ndarray - maximum power spectrum
        :param xf_max: ndarray - frequency parts in an equally interval
        :return: peak: ndarray - a pair of power and frequency
        """

        indexes = peakutils.indexes(power_max, thres=0.9, min_dist=30)
        peak = np.hstack((xf_max[indexes], power_max[indexes]))

        return peak
예제 #25
0
    def test_peaks(self):
        y = load('noise')[:, 1]
        filtered = scipy.signal.savgol_filter(y, 21, 1)
        n_peaks = 8

        idx = peakutils.indexes(filtered, thres=0.08, min_dist=50)

        for p in range(idx.size, 1):
            self.assertGreater(idx[p], 0)
            self.assertLess(idx[p], idx.size - 1)
            self.assertGreater(idx[p], idx[p - 1])

        self.assertEqual(idx.size, n_peaks)
예제 #26
0
def fast_find_peaks(ys, xs, **kw):
    try:
        from peakutils import indexes
    except ImportError:
        from pyface.message_dialog import warning
        warning(None, 'PeakUtils required to identify and label peaks.\n\n'
                      'Please install PeakUtils. From commandline use "pip install peakutils"')
        return [], []

    ys, xs = asarray(ys), asarray(xs)
    indexes = indexes(ys, **kw)
    peaks_x = interpolate(xs, ys, ind=indexes)
    return peaks_x, ys[indexes]
예제 #27
0
def interpolate(x, y, ind=None, width=10, func=None):
    """
    modified from peakutils to handle edge peaks

    Tries to enhance the resolution of the peak detection by using
    Gaussian fitting, centroid computation or an arbitrary function on the
    neighborhood of each previously detected peak index.

    Parameters
    ----------
    x : ndarray
        Data on the x dimension.
    y : ndarray
        Data on the y dimension.
    ind : ndarray
        Indexes of the previously detected peaks. If None, indexes() will be
        called with the default parameters.
    width : int
        Number of points (before and after) each peak index to pass to *func*
        in order to encrease the resolution in *x*.
    func : function(x,y)
        Function that will be called to detect an unique peak in the x,y data.

    Returns
    -------
    ndarray :
        Array with the adjusted peak positions (in *x*)
    """

    out = []
    try:
        if func is None:
            from peakutils import gaussian_fit
            func = gaussian_fit

        if ind is None:
            from peakutils import indexes
            ind = indexes(y)

        for slice_ in (slice(max(0, i - width), min(i + width, y.shape[0])) for i in ind):
            try:
                fit = func(x[slice_], y[slice_])
                out.append(fit)
            except Exception:
                pass
    except ImportError:
        from pyface.message_dialog import warning
        warning(None, 'PeakUtils required to identify and label peaks.\n\n'
                      'Please install PeakUtils. From commandline use "pip install peakutils"')

    return array(out)
예제 #28
0
 def find_peaks(self, data):
     """
     Description: find index of peaks from data sequence
     Input: a data sequence to calculate
     Output: index list of peaks
     """
     '''
     ======================== *** Not fine-tuned *** ========================
     ======================== *** Need to find suitable method *** ========================
     '''
     threshold = 0.3
     min_dist = 2
     indexes = peakutils.indexes(np.asarray(data), threshold, min_dist)
     return indexes
예제 #29
0
    def _track_points(self, search_indices, direction):
        """
        The actual point tracking routine.
        Should only be run via the start_tracking function.
        Calls itself recursively and walks along one branch in both directions,
        starting from given start_coords and detects peaks or dips in a given span.
        For the next call, the search_indices are shifted depending on the distance
        between the last found points.
        Recursion stops as soon as the boundary of the dataset is reached in both directions.
        Detected points are added to the hidden _points_x and _points_y arrays.
        """
        
        # Test if still in data
        if (search_indices[0]<0 or search_indices[0]>=len(self.xdata) or
            search_indices[1]-self.span/2<0 or search_indices[1]+self.span/2>=len(self.ydata)):
            print "Reached boundary of dataset"
            return
        
        search_data = self._sig * self.data[int(search_indices[0]), int(search_indices[1]-self.span/2) : int(search_indices[1]+self.span/2)]

        indexes_found = peakutils.indexes(search_data, thres=self._thres, min_dist=self._min_dist)

        # add found peaks to arrays, and repeat recursively with shifted search_indices
        if indexes_found.size>=1:
            y_new = search_indices[1] - int(self.span/2) + indexes_found[0]
            self._points_y = np.append(self._points_y,y_new)
            self._points_x = np.append(self._points_x,search_indices[0])
            
            # If more than one peak found, print it and take the first one
            if indexes_found.size>1:
                print "Found more than one peak in trace " + str(search_indices[0])
                print "First found peak added"
                
        # If no peak found, print it and continue with next current trace        
        else:
            print "No peak found in trace " + str(search_indices[0])
            # Add distance between two last found peaks to shift
            if len(self._points_y) >= 2:
                y_new = self._points_y[-1] + (self._points_y[-1] - self._points_y[-2])
            else:
                y_new = search_indices[1]
             
        # shift search intervall (if not the first point) and search in shifted intervall
        if direction==0:
            self._track_points([search_indices[0]+1,search_indices[1]], direction=1)
            self._track_points([search_indices[0]-1,search_indices[1]], direction=-1)
        else:
            search_indices[1] = search_indices[1] + (y_new - search_indices[1])
            self._track_points([search_indices[0]+direction,search_indices[1]], direction=direction)
예제 #30
0
 def test_octave_findpeaks_equal_matlab_findpeaks_minpeakheight_3(self):
     """ Check that Octave findpeaks mimics well the original MatLab findpeaks, with minpeakheight filter. """
     # Find peaks on this vector.
     vector = [
         0.000000000000002, 4.304968393969253, 2.524429995956715,
         1.362350996472030, 8.651011827706597, 5.355103964053995,
         4.166135802128525, 7.111434648523146, 41.368426443580518,
         13.753049599045664, 11.652130301046128
     ]
     # 'MinPeakHeight', 22
     loc = peakutils.indexes(vector)
     print(loc)
     self.assertEqual(
         loc.tolist(),
         [9-1])
            else:
                br_phase_at_peak = np.nan
                br_rate_at_peak = np.nan

            data_loaded['br_phase_at_peak'].append(br_phase_at_peak)
            data_loaded['br_rate_at_peak'].append(br_rate_at_peak)

            ## Perform the peak_detection (Vertical)
            thresholdV = 3 * np.std(
                clipV[(ib - 100):ib]
            )  #Set the threshold as standard deviation movement 100ms before stim
            prior_meanV = np.mean(
                clipV[(ib - 100):ib])  #Mean position 100ms before stim

            indices = peakutils.indexes(clipV, thres=thresholdV, min_dist=10)
            if len(indices) == 0 or max(indices) < ib:
                max_deflectionV = np.nan
                max_deflection_timeV = np.nan
            else:
                first_peakV = indices[np.where(indices > ib)[0][0]]
                max_deflectionV = clipV[first_peakV] - prior_meanV
                max_deflection_timeV = first_peakV - ib

            data_loaded['max_deflection_V'].append(max_deflectionV)
            data_loaded['max_deflection_timeV'].append(max_deflection_timeV)

    #%%
    df = pd.DataFrame(data=data_loaded)
    df['stim_power'] = df['stim_duration'] * df['stim_current'] * 2.1 / 1000
    df = df.round({'stim_power': 1, 'stim_current': 0})
예제 #32
0
 def getFFT(self, signal, fs):
     x, y = self.calcFFT(signal, fs)
     indexes = peakutils.indexes(y, thres=0.2, min_dist=100)
     print("Os picos detectados são: ")
     print(indexes)
     return indexes
예제 #33
0
파일: __init__.py 프로젝트: singerng/vivid2
smoothed_saturations = smooth(
    saturations, SMOOTHING_LEN * SAMPLING_FREQ * 2 // STFT_SEG_LEN)

freq_perc = np.exp(
    np.linspace(np.log(LOW_FREQUENCY), np.log(HIGH_FREQUENCY), freqs.size))
volumes_perc = interp1d(log_freqs[1:],
                        volumes[1:, :],
                        fill_value='extrapolate',
                        kind='quadratic',
                        axis=0)(np.log(freq_perc))
normalized_volumes_perc = zscore(volumes_perc, axis=1)
saturations_perc = normalized_volumes_perc.sum(axis=0)
smoothed_saturations_perc = smooth(
    saturations_perc, SMOOTHING_LEN * SAMPLING_FREQ * 2 // STFT_SEG_LEN)

indexes = peakutils.indexes(smooth(
    np.gradient(smoothed_saturations_perc),
    SMOOTHING_LEN * 10 * SAMPLING_FREQ * 2 // STFT_SEG_LEN),
                            min_dist=100)

# plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=100)
# plt.title('STFT Magnitude')
# plt.ylabel('Frequency [Hz]')
# plt.xlabel('Time [sec]')
# plt.show()

# backend.play_sound(file)

# while True:
#     print(backend.get_position())
#     time.sleep(.1)
예제 #34
0
파일: sig_mov.py 프로젝트: crcali/BrainBot
def signal_movement(signal, speed_rate, bins_size):
    '''
    set the number of the peaks as the indicator of speed
    '''
    speed = len(pku.indexes((signal, speed_rate, bins_size / 20)))
    return speed
예제 #35
0
cols = ['x', 'y']
for fname in os.listdir(path_dir):
    print(fname)
    file_path = (os.path.join(path_dir, fname))
    df = pd.read_csv(file_path,
                     sep='\t',
                     header=4,
                     engine='python',
                     names=cols)
    df.sort_values(by='x', ascending=True, inplace=True)
    df.drop_duplicates(inplace=True)
    df['y_invert'] = df['y'].mean() - df.y
    #    df.plot('x','y_invert')
    base = peakutils.baseline(df.y_invert, 2)
    indexes = peakutils.indexes(df.y_invert - base,
                                thres=0.00001,
                                min_dist=200)
    #    print(indexes)
    #    pplot(df.x,df.y_invert, indexes)
    for i in indexes:
        if i[(i < 100) & (i > 5)]:
            peak_x = i
            x2, y2 = df.x, df.y_invert
            skewness = stats.skew(y2)
            tck = interpolate.splrep(
                x2, y2, s=.00000001
            )  # s =m-sqrt(2m) where m= #datapts and s is smoothness factor
            x_ = np.arange(np.min(x2), np.max(x2), 0.003)
            y_ = interpolate.splev(x_, tck, der=0)

            HM = (np.max(y_) - np.min(y_)) / 2
예제 #36
0
import numpy as np
import peakutils
import matplotlib.pyplot as plt

t, T7, T8 = np.genfromtxt('dynamisch200.txt', unpack=True)

indexesmax = peakutils.indexes(T7, thres=0.02 / max(T7), min_dist=50)
indexesmin = peakutils.indexes(-T7, thres=0.02 / max(-T7), min_dist=50)

plt.plot(t, T7, 'b-', label='Messwerte')
plt.plot(indexesmax, T7[indexesmax], 'rx')
plt.plot(indexesmin, T7[indexesmin], 'rx')
#plt.show()

print(indexesmax)
print(indexesmin)
예제 #37
0
zero = np.ones(400) * -0.5

one_t = np.arange(400) / Fs
one = np.ones(400) * 0.5

preamble_t = np.arange(800 * 4) / Fs
preamble = np.concatenate((zero, zero, zero, one, one, one, zero, one))

bit_t = np.arange(800) / Fs
bit = np.concatenate((one, zero))

mask_t = np.arange(800 * 44) / Fs
mask_l = 800 * 44

scan = np.convolve(data, preamble[::-1], mode='same') / 800
ind = peakutils.indexes(scan, thres=0.75, min_dist=2000)
ind = ind + 2 * 800

data_grid = []

for start in ind:
    sample = data[start:start + mask_l]
    deriv = np.diff(sample)
    plt.plot(mask_t[1:] + time[start], deriv)
    maxes = peakutils.indexes(deriv, thres=0.75, min_dist=250)
    mins = peakutils.indexes(-deriv, thres=0.75, min_dist=250)
    maxes = list(maxes)
    mins = list(mins)
    bits = str()
    t = 300
    while (t < mask_l):
예제 #38
0
 def __pt_indexes(self, pt_signal):
     return pu.indexes(pt_signal,
                       thres=IDX_THRESHOLD,
                       min_dist=self.window_size)
예제 #39
0
def cwesr_fit(x,
              y,
              filenum=0,
              gauss=False,
              gamp=dgamp,
              gwidth=dgwidth,
              gctr=2870,
              d_gsplit=20,
              min_width=4,
              max_width=15,
              max_counts=3e5,
              max_ctr=2875,
              max_splitting=200):

    dlen = len(y)
    b, a = signal.butter(1, 0.4, btype='lowpass')
    yfilt = signal.filtfilt(b, a, y)
    indexes = peakutils.indexes(np.max(yfilt) - yfilt, thres=0.45, min_dist=2)
    sm1 = []
    sm2 = []
    mintab = np.transpose([x[indexes], y[indexes]])

    for i in range(0, len(mintab)):
        if mintab[i][0] < 2873:
            sm1.append(mintab[i])
        else:
            sm2.append(mintab[i])
    sm1s = sorted(sm1, key=lambda x: x[1])
    sm2s = sorted(sm2, key=lambda x: x[1])

    dmax = np.max(y)
    dmin = np.min(y)
    amp = dmax - dmin

    ystart = np.mean(np.append(y[0:3], y[-3:]))

    if len(sm1s) >= 1 and len(sm2s) >= 1:
        fc1 = sm1s[0][0]
        fc2 = sm2s[0][0]
        gsplit = np.abs(fc2 - fc1)
        # gctr = (fc2+fc1)/2
        gamp = ystart - sm1s[0][1]
    elif len(sm1s) == 0 and len(sm2s) == 1:
        gsplit = 0
        gamp = (ystart - sm2s[0][1]) / 2
        gctr = sm2s[0][0]
    # elif len(sm1s) == 0 and len(sm2s) >= 2:
    # 	fc1 = sm2s[0][0]
    # 	fc2 = sm2s[1][0]
    # 	gsplit = np.abs(fc2-fc1)
    # 	gamp = (ystart-sm2s[0][1])
    # 	gctr = (fc2+fc1)/2
    elif len(sm1s) == 1 and len(sm2s) == 0:
        gsplit = 0
        gamp = (ystart - sm1s[0][1]) / 2
        gctr = sm1s[0][0]
    # elif len(sm1s) >= 2 and len(sm2s) == 0:
    # 	fc1 = sm1s[0][0]
    # 	fc2 = sm1s[1][0]
    # 	gsplit = np.abs(fc2-fc1)
    # 	gamp = (ystart-sm1s[0][1])
    # 	gctr = (fc2+fc1)/2
    else:
        gsplit = d_gsplit
        gamp = ystart - dmin

    lbounds2 = [
        0, 2865, -max_splitting, amp / 8, min_width, amp / 8, min_width
    ]
    ubounds2 = [
        max_counts, max_ctr, max_splitting, 4 * amp, max_width, 4 * amp,
        max_width
    ]
    guess = [ystart, gctr, gsplit, gamp, gwidth, gamp, gwidth]

    try:
        if (gauss):
            popt, pcov = curve_fit(fit_gaussian,
                                   x,
                                   y,
                                   p0=guess,
                                   bounds=(lbounds2, ubounds2))
        else:
            popt, pcov = curve_fit(fit_lorentzian,
                                   x,
                                   y,
                                   p0=guess,
                                   bounds=(lbounds2, ubounds2))
    except:
        popt = [0, 0, 1e3, 1, 1, 1, 1]
        pcov = np.zeros((7, 7))
        print('fit fail on file ' + str(filenum))

    if (gauss):
        fit = fit_gaussian(x, *popt)
        fitg = fit_gaussian(x, *guess)
    else:
        fit = fit_lorentzian(x, *popt)
        fitg = fit_lorentzian(x, *guess)

    return popt, pcov, fit, fitg
예제 #40
0
ROI = img[:, :, 2]
#ROI = cv2.equalizeHist(ROI)
#print('plotting')
avg = -ROI.mean(axis=0)
avg = avg + 200
baseline_vals = peakutils.baseline(avg)

_, ax = plt.subplots(2, 1)

ax[0].plot(avg)
ax[0].plot(baseline_vals)
ax[0].set_ylabel('8-bit Value')
ax[0].grid(True)

cleanDat = avg - baseline_vals
indices = peakutils.indexes(cleanDat, thres=0.5, min_dist=30)
peaks = cleanDat[indices]

ax[1].plot(indices, peaks, 'x')
ax[1].plot(cleanDat)
ax[1].set_xlabel('pixel Index')
ax[1].set_ylabel('8-bit Value')
ax[1].grid(True)

#  print('finished plotting')
plt.savefig(imagePath + '/raw_data.jpg')
plt.close()
#  print('found {} peaks'.format(len(indices)))

if len(indices) == 2:
    print('matched')
def ajustar_erf(datos, graficar=False, debug=True):
    '''
	Dado un diccionario con el formato {dacVal:cuentas}, ajusta una erf a la bajada de 1SPE y devuelve los parametros del ajuste.
	El mismo, lo hace mediante la siguiente rutina:
	1) Realiza un smooth de los datos mediante el metodo Savitzky Golay, y deriva el resultado.
	Sobre este, aplica la funcion findpeaks para encontrar el maximo (y por lo tanto,
	el punto de inflexion de los datos originales). Ademas, calcula un ancho aproximado de esta bajada
	2) A partir del punto del maximo y del ancho calculados, recorta los datos originales (sin smooth)
	a los que le ajusta una funcion error
	FYI: El punto de inflexion de erfunc se alcanza en x=c
	'''
    x_data = list(datos.keys())
    y_data = list(datos.values())

    #plt.semilogy(x_data,y_data)
    #plt.show()

    #Antes de comenzar el paso 1, recortamos los ultimos datos, para evitar tener ceros
    #La razon, es que el cuentapicos trabaja en escala logaritmica (para hacer mas facil la busqueda de picos)
    x_data = [i for i in x_data if y_data[x_data.index(i)] > 0]
    y_data = [i for i in y_data if i > 0]
    if len(y_data) < 20:
        print("Unicamente hay linea de base")
        return False

    #Comenzamos con el paso 1
    y = savitzky_golay(-np.log(np.asarray(y_data)),
                       window_size=7,
                       order=2,
                       deriv=1)
    #Buscamos picos dentro del smooth
    indices = peakutils.indexes(y, min_dist=len(y_data) // 25, thres=0.4)
    #Filtramos los picos hallados
    indicesf = []
    for l in indices:
        if y[l] > 0.2 * max(y):
            indicesf.append(l)

    #Vamos a necesitar tambien buscar el valor de DAC10 de la linea de base. Para ello, me fijo cuando y cae por debajo de la mitad
    bajada_base = y_data.index(max(y_data))
    subida_base = y_data.index(max(y_data))
    thres = 0.2 * max(y_data)
    try:
        while y_data[subida_base] > thres:
            subida_base += 1
        while y_data[bajada_base] > thres:
            bajada_base -= 1
    except:
        print("Error al buscar ancho linea de base")
        if debug:
            plt.semilogy(x_data, y_data)
            plt.show()
        return False
    indice10_linea_de_base = (x_data[subida_base] + x_data[bajada_base]) / 2

    #Buscamos el ancho de la bajada de 1SPE. La razon por la cual busco el ancho en torno al primer pico, es que el primero,
    #corresponde a la 'bajada' del rectangulo del ruido

    ##Busco los de inflexion contiguos al pico, correspondientes al doblamiento en el Plateua
    indicesl = peakutils.indexes(-y, min_dist=len(y_data) // 25, thres=0.1)
    try:
        for j in range(0, len(indicesl)):
            if indicesl[j] < indicesf[1] and indicesl[j + 1] > indicesf[1]:
                cotasup = indicesl[j + 1]
                cotainf = indicesl[j]
                break
    except:
        print("Error al buscar ancho linea de base")
        if debug:
            plt.semilogy(x_data, y_data)
            plt.show()
            plt.plot(x_data, -y)
            plt.plot([x_data[j] for j in indicesl],
                     [-y_data[j] for j in indicesl])
            plt.show()
        return False

    #Una vez hallados los anchos, los abrimos un poco mas
    cotainf = cotainf - 4
    cotasup = cotasup + 4
    #Encontramos algunos casos donde el tamaño de la meseta entre ruido y 1SPE era muy pequeña, lo que hacia que la
    #deteccion de anchos sea erronea. Por ello, imponemos los siguientes limites:
    if cotainf <= indicesf[0] + 3:
        cotainf = indicesf[0] + 3

    if graficar:
        trazas = []
        trazas.append(
            go.Scatter(x=x_data,
                       y=y_data,
                       line=dict(color=('rgb(204, 0, 0)')),
                       name='Cuentas'))
        trazas.append(
            go.Scatter(x=[x_data[j] for j in indicesf],
                       y=[y_data[j] for j in indicesf],
                       mode='markers',
                       marker=dict(size=8,
                                   color='rgb(200,100,0)',
                                   symbol='cross'),
                       name='Picos'))
        trazas.append(
            go.Scatter(x=[x_data[j] for j in [bajada_base, subida_base]],
                       y=[y_data[j] for j in [bajada_base, subida_base]],
                       mode='markers',
                       marker=dict(size=8,
                                   color='rgb(250,250,250)',
                                   symbol='cross'),
                       name='Exteremos linea de base'))
        trazas.append(
            go.Scatter(x=[x_data[cotainf], x_data[cotasup]],
                       y=[y_data[cotainf], y_data[cotasup]],
                       mode='markers',
                       marker=dict(size=8,
                                   color='rgb(0,0,255)',
                                   symbol='cross'),
                       name='Limites de ajuste'))

    #Recortamos los datos en funcion a lo hallado anteriormente. Unicamente nos interesa el punto de 1SPE, no el resto de la escalera
    x_data = x_data[cotainf:cotasup + 1]
    y_data = y_data[cotainf:cotasup + 1]

    #Inicializamos valores de ajute de acuerdo a sugerencia hallada en https://mathematica.stackexchange.com/a/42166
    p0 = [
        -(max(y_data) - min(y_data)) / 2, 2 / len(x_data),
        np.mean(x_data), (max(y_data) - min(y_data)) / 2
    ]

    #Fiteamos una erf. La razon del with es para que el fiteo levante un error, si tiene problemas para ajustar
    with warnings.catch_warnings():
        warnings.simplefilter("error", OptimizeWarning)
        try:
            params, extras = curve_fit(erfunc,
                                       x_data,
                                       y_data,
                                       p0,
                                       ftol=1.49012e-14,
                                       xtol=1.49012e-14,
                                       maxfev=10**8)
            #Es bueno el fiteo? Calculemos r^2
            residuals = y_data - erfunc(x_data, *params)
            ss_res = np.sum(residuals**2)
            r_squared = 1 - ss_res / np.sum((y_data - np.mean(y_data))**2)
            if r_squared < 0.95:
                print("Fiteo malo")
                print(r_squared)
                if debug:
                    plt.semilogy(x_data, y_data)
                    plt.semilogy(x_data, erfunc(x_data, *params))
                    plt.show()
                return False
        except OptimizeWarning:
            print("Error en fiteo")
            return False

    if graficar:
        y_adj = []
        for x in x_data:
            y_adj.append(erfunc(x, *params))
        trazas.append(
            go.Scatter(x=x_data,
                       y=y_adj,
                       line=dict(color=('rgb(0, 250, 0)')),
                       name='Ajuste erf'))
        return params, extras, trazas, indice10_linea_de_base
    else:
        return params, extras, indice10_linea_de_base
    dataY = signal.detrend(dataY)
    Y = np.fft.fft(np.asarray(dataY)) / n  # fft computing and normalization
    Y = np.fft.fftshift(Y)
    '''PLOTS'''
    fig = plt.figure(figsize=(12, 8))
    ax = fig.gca()
    plt.plot(frq, abs(Y))  # plotting the spectrum
    tic = Fs / 2
    ax.set_xlim([-tic, tic])
    #plt.xticks(np.arange(-tic,tic,1),fontsize=30)
    plt.yticks(fontsize=22)
    plt.xlabel('Frequency (Hz)', fontsize=34)
    plt.ylabel('Abs(FFT)', fontsize=34)
    plt.tight_layout()

    indexes = peakutils.indexes(abs(Y), thres=0.4)
    [
        plt.plot(frq[indexes[i]],
                 abs(Y)[indexes[i]], 'ro') for i in range(len(indexes))
    ]

    fig.savefig(savepath + '\\FFT.png', dpi=500)
    fig.savefig(savepath + '\\FFT.svg', format='svg', dpi=1200)

    with open(savepath + '\\FFT_freq.txt', 'w') as f:
        f.write('Frequency (Hz)\tAbs(FFT)\n')
        for i in indexes:
            f.write('%.2f\t%.6f\n' % (frq[i], abs(Y)[i]))

    with open(savepath + '\\FFT.txt', 'w') as f:
        f.write('Frequency (Hz)\tAbs(FFT)\n')
예제 #43
0
def get_mean_peak(fid_filename, make_plot=False):
    if not os.path.isfile(fid_filename):
        return None

    pd_data = pd.read_csv(fid_filename)
    # data = milk_data.iloc[:, 10]

    data = pd_data['fidelity'].tolist()

    # print((len(data)-1)/20000*1000)
    cb = np.array(data)
    # print(cb)
    cb_2 = 1 - cb

    indices = peakutils.indexes(cb, thres=thres_up / max(cb), min_dist=1)

    indices_2 = peakutils.indexes(cb_2,
                                  thres=thres_down / max(cb_2),
                                  min_dist=1)
    # print(indices_2)
    ind = {}

    HIGH = []
    LOW = []

    for i in indices:
        ind[i] = 'max'
        HIGH.append(i)

    for i in indices_2:
        ind[i] = 'min'
        LOW.append(i)

    keys = sorted(ind.keys())

    PL_width = []
    PL_height = []
    PL_hw = []

    # print(LOW)
    # print(HIGH)
    # print(ind)
    # print(keys)

    for i in range(0, len(LOW) - 1):
        # print(LOW[i], LOW[i+1])

        MAX = 0
        MAX_H = data[LOW[i]]

        for m in keys:
            if (m > LOW[i] and m < LOW[i + 1] and ind[m] == 'max'):
                if (data[m] > MAX_H):
                    # print("\t", m)
                    MAX = m
                    MAX_H = data[m]
                    # if(data[MAX])
                # MAX = max(m, MAX)
            if (m > LOW[i + 1]):
                break

        if MAX > 0:
            w = abs(LOW[i] - LOW[i + 1]) / 20000 * 1000
            # print("\t\t", LOW[i], LOW[i+1], data[LOW[i]], data[LOW[i+1]])
            h = MAX_H

            PL_width.append(w)
            PL_height.append(h)
            PL_hw.append(h / w)

            # print(MAX)

        #     # print(ind[LOW[i]])
        #     print(ind[m], end='')
        # print()
        # print(indices_2[i], indices_2[i+1])
        # for j in range(1, len(HIGH) - 1):

    # exit(1)
    # for i in range(1, len(keys) - 1):
    #     if ind[keys[i - 1]] == 'min' and ind[keys[i]] == 'max' and ind[keys[i + 1]] == 'min':
    #         w = abs(keys[i - 1] - keys[i + 1])
    #         h = data[keys[i]]

    #         PL_width.append(w)
    #         PL_height.append(h)
    #         PL_hw.append(h / w)

    # print(keys[i], w)

    # print(np.mean(PL_width))
    # print(PL_width)

    if make_plot:
        trace = go.Scatter(x=[j for j in range(len(data))],
                           y=data,
                           mode='lines',
                           name='Original Plot')

        trace2 = go.Scatter(
            x=indices,
            y=[data[j] for j in indices],
            mode='markers',
            marker=dict(
                size=8,
                color='rgb(255,0,0)',
                # symbol='cross'
            ),
            name='Detected Peaks')

        trace3 = go.Scatter(x=indices_2,
                            y=[data[j] for j in indices_2],
                            mode='markers',
                            marker=dict(size=8,
                                        color='rgb(0,255,0)',
                                        symbol='cross'),
                            name='Detected Peaks')

        data = [trace, trace2, trace3]

        plotly.offline.plot(data, filename=fid_filename + '.html')

    return {
        'width': np.mean(PL_width),
        'height': np.mean(PL_height),
        'rel': 1 / np.mean(PL_hw)
    }
예제 #44
0
 def test_list_peaks(self):
     out = peakutils.indexes([1, 2, 1, 3, 5, 7, 4, 1], thres=0, min_dist=1)
     expected = numpy.array([1, 5])
     assert_array_almost_equal(out, expected)
예제 #45
0
def get_bouts_in_file(file_path, hparams, loaded_p=None):
    # path of the wav_file
    # h_params from the rosa spectrogram plus the parameters:
    #     'read_wav_fun': load_couple, # function for loading the wav_like_stream (has to returns fs, ndarray)
    #     'min_segment': 30, # Minimum length of supra_threshold to consider a 'syllable'
    #     'min_silence': 200, # Minmum distance between groups of syllables to consider separate bouts
    #     'bout_lim': 200, # same as min_dinscance !!! Clean that out!
    #     'min_bout': 250, # min bout duration
    #     'peak_thresh_rms': 2.5, # threshold (rms) for peak acceptance,
    #     'thresh_rms': 1 # threshold for detection of syllables

    # Decide and see if it CAN load the power

    s_f, wav_i = hparams['read_wav_fun'](file_path, mmap=False)
    #logger.debug('s_f {}'.format(s_f))

    # Get the bouts. If loaded_p is none, it will copute it
    the_bouts, the_p, all_p, all_syl = get_the_bouts(wav_i,
                                                     hparams,
                                                     loaded_p=loaded_p)

    if the_bouts.size > 0:
        step_ms = hparams['frame_shift_ms']
        pk_dist = hparams['min_segment']
        bout_pd = pd.DataFrame(the_bouts * step_ms,
                               columns=['start_ms', 'end_ms'])
        bout_pd['start_sample'] = bout_pd['start_ms'] * (s_f // 1000)
        bout_pd['end_sample'] = bout_pd['end_ms'] * (s_f // 1000)

        bout_pd['p_step'] = the_p
        # the extrema over the file
        bout_pd['rms_p'] = st.rms(all_p)
        bout_pd['peak_p'] = bout_pd['p_step'].apply(np.max)
        # check whether the peak power is larger than hparams['peak_thresh_rms'] times the rms through the file
        bout_pd['bout_check'] = bout_pd.apply(lambda row: \
                                              (row['peak_p'] > hparams['peak_thresh_rms'] * row['rms_p']),
                                              axis=1)
        bout_pd['file'] = file_path
        bout_pd['len_ms'] = bout_pd.apply(
            lambda r: r['end_ms'] - r['start_ms'], axis=1)

        syl_pd = pd.DataFrame(all_syl * step_ms,
                              columns=['start_ms', 'end_ms'])
        bout_pd['syl_in'] = bout_pd.apply(lambda r: \
                                          syl_pd[(syl_pd['start_ms'] >= r['start_ms']) & \
                                                 (syl_pd['start_ms'] <= r['end_ms'])].values,
                                          axis=1)
        bout_pd['n_syl'] = bout_pd['syl_in'].apply(len)
        # get all the peaks larger than the threshold(peak_thresh_rms * rms)
        bout_pd['peaks_p'] = bout_pd.apply(lambda r: peakutils.indexes(
            r['p_step'],
            thres=hparams['peak_thresh_rms'] * r['rms_p'] / r['p_step'].max(),
            min_dist=pk_dist // step_ms),
                                           axis=1)
        bout_pd['n_peaks'] = bout_pd['peaks_p'].apply(len)
        bout_pd['l_p_ratio'] = bout_pd.apply(
            lambda r: np.nan
            if r['n_peaks'] == 0 else r['len_ms'] / (r['n_peaks']),
            axis=1)

    else:
        bout_pd = pd.DataFrame()
    return bout_pd, wav_i, all_p
예제 #46
0
def get_peak_ratio(file_name,
                   lambda_1,
                   lambda_2,
                   file_name_2=None,
                   acq_time_1=None,
                   acq_time_2=None):

    if (file_name_2 is not None):
        wavelenght_1, intensity_1 = np.genfromtxt(file_name, unpack=True)
        intensity_1 = intensity_1 / acq_time_1

        peakind = peakutils.indexes(intensity_1, thres=0.1, min_dist=0.5)
        x_fit_1 = np.zeros(21)
        y_fit_1 = np.zeros(21)

        for i in range(0, 21):
            x_fit_1[i] = wavelenght_1[peakind[np.abs(wavelenght_1[peakind] -
                                                     lambda_1).argmin()] + i -
                                      10]
            y_fit_1[i] = intensity_1[peakind[np.abs(wavelenght_1[peakind] -
                                                    lambda_1).argmin()] + i -
                                     10]
        I1, x1, s1 = peakutils.peak.gaussian_fit(x_fit_1,
                                                 y_fit_1,
                                                 center_only=False)

        wavelenght_2, intensity_2 = np.genfromtxt(file_name_2, unpack=True)
        intensity_2 = intensity_2 / acq_time_2

        peakind = peakutils.indexes(intensity_2, thres=0.1, min_dist=0.5)
        x_fit_2 = np.zeros(21)
        y_fit_2 = np.zeros(21)

        for i in range(0, 21):
            x_fit_2[i] = wavelenght_2[peakind[np.abs(wavelenght_2[peakind] -
                                                     lambda_2).argmin()] + i -
                                      10]
            y_fit_2[i] = intensity_2[peakind[np.abs(wavelenght_2[peakind] -
                                                    lambda_2).argmin()] + i -
                                     10]
        I2, x2, s2 = peakutils.peak.gaussian_fit(x_fit_2,
                                                 y_fit_2,
                                                 center_only=False)

    else:
        wavelenght, intensity = np.genfromtxt(file_name, unpack=True)

        peakind = peakutils.indexes(intensity, thres=0.1, min_dist=0.5)

        x_fit = np.zeros(21)
        y_fit = np.zeros(21)

        for i in range(0, 21):
            x_fit[i] = wavelenght[peakind[np.abs(wavelenght[peakind] -
                                                 lambda_1).argmin()] + i - 10]
            y_fit[i] = intensity[peakind[np.abs(wavelenght[peakind] -
                                                lambda_1).argmin()] + i - 10]
        I1, x1, s1 = peakutils.peak.gaussian_fit(x_fit,
                                                 y_fit,
                                                 center_only=False)

        for i in range(0, 21):
            x_fit[i] = wavelenght[peakind[np.abs(wavelenght[peakind] -
                                                 lambda_2).argmin()] + i - 10]
            y_fit[i] = intensity[peakind[np.abs(wavelenght[peakind] -
                                                lambda_2).argmin()] + i - 10]
        I2, x2, s2 = peakutils.peak.gaussian_fit(x_fit,
                                                 y_fit,
                                                 center_only=False)

    ratio = I1 / I2
    ratio_err = ratio * (s1 / I1 + s2 / I2)

    return ratio
def detect_peaks(fourier_df,
                 signal_df,
                 true_df=None,
                 t0=None,
                 t1=None,
                 min_thresh=1900,
                 max_thresh=500000,
                 min_dist=13,
                 key_len=60,
                 back_prop=.3,
                 figsz=(18, 10),
                 to_add=None,
                 signal=None,
                 save_dir=None):
    '''
        Detect Peaks in the fourier_df 

            fourier_df : Dataframe containing transformed fourier signal 

            t0 : The start time to analyze 

            t1 : The end time to analyze 

            min_thresh : The minimum threshold to include as a key press 

            max_thresh : The maximium threshold value to include as a key press 

            min_dist : The minimum distanec allowed between two peaks 

            key_len : The fixed length of a key press in milliseconds 

            back_prop : The key_len of a key press is started at back_prop * key_len 
                and ends at back_prop * (1-key_len)

            figsz : The size of the plot

            to_add : If not None, is a list of indices to add as peaks 

    '''
    if t0 and t1:
        print('time range: %.2fs - %.2fs' % (t0, t1))

    # number of milliseconds for a key stroke
    key_len_in_sec = key_len / 1000.

    # Copy over dataframes
    sfourier_df = fourier_df.copy()
    ssignal_df = signal_df.copy()

    # Restrict to timeperiod
    if t0:
        sfourier_df = sfourier_df[sfourier_df.index >= t0]
        ssignal_df = ssignal_df[ssignal_df.index >= t0]
    if t1:
        sfourier_df = sfourier_df[sfourier_df.index <= t1]
        ssignal_df = ssignal_df[ssignal_df.index <= t1]

    if signal:
        signal.emit()

    # Finds the index of the closest value in an array
    def find_nearest(array, value):
        idx = (np.abs(array - value)).argmin()
        return idx

    # Plot with peaks detected
    fig = plt.figure(figsize=figsz)
    ax = plt.subplot(131)
    ax2 = plt.subplot(132)
    ax3 = plt.subplot(133)

    # The fourier signal
    sig = sfourier_df['signal'].values

    # Get indexes withing thresholds
    indexes = peakutils.indexes(sig,
                                min_dist=min_dist,
                                thres=min_thresh / float(sig.max()))
    indexes = indexes[(sig[indexes] <= max_thresh)]

    if signal:
        signal.emit()

    # Add on additional indices if specified
    if to_add and len(to_add) > 0:
        vals = sfourier_df.index.values
        indexes = np.append(indexes,
                            [find_nearest(vals, item) for item in to_add])

    # Add on column to indicate if time point is a peak
    sfourier_df['is_peak'] = False
    sfourier_df.ix[sfourier_df.index[indexes], 'is_peak'] = True

    if signal:
        signal.emit()

    # Create dataframe of peaks
    peaks = sfourier_df[sfourier_df['is_peak']].copy()
    del peaks['is_peak']
    peaks.index.name = 'peak time'
    peaks = peaks.reset_index()

    peaks['start time'] = peaks['peak time'] - back_prop * key_len_in_sec
    peaks['end time'] = peaks['peak time'] + (1 - back_prop) * key_len_in_sec

    print('Number of Keys detected:', len(indexes))
    if signal:
        signal.emit()

    if true_df is not None:
        diff = determine_difference(true_df, peaks)
        fourier_df_ma = pd.rolling_max(fourier_df['signal'],
                                       20,
                                       center=True,
                                       min_periods=1)

    # Plot the entire signal with peaks
    sfourier_df['signal'].plot(ax=ax)
    if len(indexes) > 0:
        sfourier_df['signal'].iloc[indexes].plot(style='*', ax=ax, title='all')
    ax.axhline(y=min_thresh, linewidth=1, c='r')

    if true_df is not None:

        true_peaks = true_df['peak time'] + diff
        if t0:
            true_peaks = true_peaks[true_peaks >= t0]
        if t1:
            true_peaks = true_peaks[true_peaks <= t1]

        vals = fourier_df.index.values
        inds = np.array(
            [find_nearest(vals, item) for item in true_peaks.values])

        plt_vals = fourier_df_ma.ix[vals[inds]]
        ax.scatter(true_peaks.values, plt_vals, marker='*', c='r', zorder=100)
        ax2.scatter(true_peaks.values, plt_vals, marker='*', c='r', zorder=100)

    # Plot the entire signal zoomed in  on the threshold
    sfourier_df['signal'].plot(ax=ax2)
    if len(indexes) > 0:
        sfourier_df['signal'].iloc[indexes].plot(style='*',
                                                 ax=ax2,
                                                 title='zoomed')
    ax2.axhline(y=min_thresh, linewidth=1, c='r')

    # Change the threshold
    mx = np.max([min_thresh * 1.1, sfourier_df['signal'].max() * .15])
    ax2.set_ylim((0, mx))

    if signal:
        signal.emit()

    # Plot a shortened time period
    if not t0:
        t0 = 0.0
    t1 = t0 + 10.

    if t0:
        fourier_df = fourier_df[fourier_df.index >= t0]
        signal_df = signal_df[signal_df.index >= t0]
    if t1:
        fourier_df = fourier_df[fourier_df.index <= t1]
        signal_df = signal_df[signal_df.index <= t1]

    if signal:
        signal.emit()

    # Get signal during shortened period
    sig = fourier_df['signal'].values

    # Get indexes withing thresholds
    indexes = peakutils.indexes(sig,
                                min_dist=min_dist,
                                thres=min_thresh / float(sig.max()))
    indexes = indexes[(sig[indexes] <= max_thresh)]

    # Plot first 10 seconds of clip
    fourier_df['signal'].plot(ax=ax3)
    if len(indexes) > 0:
        fourier_df['signal'].iloc[indexes].plot(style='*',
                                                ax=ax3,
                                                title='zoomed beginning')
    ax3.axhline(y=min_thresh, linewidth=1, c='r')
    ax3.set_ylim((0, mx))

    if true_df is not None:
        true_peaks = true_df['peak time'] + diff
        if t0:
            true_peaks = true_peaks[true_peaks >= t0]
        if t1:
            true_peaks = true_peaks[true_peaks <= t1]

        vals = fourier_df.index.values
        inds = np.array(
            [find_nearest(vals, item) for item in true_peaks.values])

        plt_vals = fourier_df_ma.ix[vals[inds]]
        ax3.scatter(true_peaks.values, plt_vals, marker='*', c='r', zorder=100)

    if signal:
        signal.emit()

    if save_dir:

        fname = os.path.join(save_dir, 'peaks.png')
        fig.savefig(fname)

        fname = os.path.join(save_dir, 'FigureObject.peaks.pickle')
        pickle.dump(fig, open(fname, 'wb'))

    return sfourier_df, ssignal_df, peaks
예제 #48
0
            distance_btwn_presses[-1] = nTRs - 1
        song_bounds.append([distance_btwn_presses]) 
        # replace zeros with ones where presses occurred and store vector in song specific button press time holder
        subj_press_vector[distance_btwn_presses] = 1
        songPressTimes = np.concatenate([songPressTimes,subj_press_vector],axis=0)
    bounds.append(song_bounds)
    allPressTimes.append(np.reshape(songPressTimes,(len(subjs),int(nTRs))))

all_songs_indexes = []

for i in range(len(durs)):
    combined = np.zeros((durs[i],1))
    for t in np.arange(0,len(combined)):
        combined[t] = sum([min(abs(x[0]-(t+1)))<=3 for x in bounds[i]])
    combined = combined.reshape((durs[i]))
    indexes = peakutils.indexes(combined,thres=0.5, min_dist=5)
    all_songs_indexes.append([indexes])

# compute subject specific bound average across songs
subj_bounds = np.zeros((len(subjs),len(durs)))

for s in range(len(subjs)):
    for i in range(len(durs)):
        subj_bounds[s,i] = len(bounds[i][s][0])

subj_bounds_mean = np.mean(subj_bounds,axis=1)
mean_of_mean_bounds = np.mean(subj_bounds_mean)
std_of_mean_bounds = np.std(subj_bounds_mean)

# plot peaks for each song
for i in range(len(all_songs_indexes)):
def find_peak(amplitude):

    peak_indicies = pu.indexes(amplitude, thres=.2)

    return max(peak_indicies)
예제 #50
0
def main():

    # declare um objeto da classe da sua biblioteca de apoio (cedida)
    signal = sig.signalMeu()
    # declare uma variavel com a frequencia de amostragem, sendo 44100
    freqDeAmostragem = 44100
    # voce importou a bilioteca sounddevice como, por exemplo, sd. entao
    # os seguintes parametros devem ser setados:

    sd.default.samplerate = freqDeAmostragem  # taxa de amostragem
    sd.default.channels = 2  # voce pode ter que alterar isso dependendo da sua placa
    duration = 5  # tempo em segundos que ira aquisitar o sinal acustico captado pelo mic

    # faca um printo na tela dizendo que a captacao comecará em n segundos. e entao
    # use um time.sleep para a espera
    for i in range(6):
        print('A captação começará em ', i, ' segundos.         \r')
        time.sleep(1)

    # faca um print informando que a gravacao foi inicializada
    print("A gravação foi inicializada.")

    # declare uma variavel "duracao" com a duracao em segundos da gravacao. poucos segundos ...
    start = time.time()
    # calcule o numero de amostras "numAmostras" que serao feitas (numero de aquisicoes)
    numAmostras = duration * freqDeAmostragem
    audio = sd.rec(int(numAmostras), freqDeAmostragem, channels=1)
    sd.wait()
    end = time.time()
    print("...     FIM")
    dt = end - start
    print("Durou: ", dt, " segundos")

    # analise sua variavel "audio". pode ser um vetor com 1 ou 2 colunas, lista ..
    print("Audio recebido: ", audio)

    # grave uma variavel com apenas a parte que interessa (dados)
    audio_graf = []
    for i in range(len(audio)):
        audio_graf.append(audio[i][0])
    # use a funcao linspace e crie o vetor tempo. Um instante correspondente a cada amostra!
    t = np.linspace(0.0, dt, len(audio_graf))

    # plot do gravico  áudio vs tempo!

    # Calcula e exibe o Fourier do sinal audio. como saida tem-se a amplitude e as frequencias
    xf, yf = signal.calcFFT(audio_graf, freqDeAmostragem)
    # xf, yf = signal.calcFFT(y, fs)
    fig, axs = plt.subplots(2, 1)
    axs[0].plot(t, audio_graf)
    axs[0].set_xlabel('time')
    axs[0].set_ylabel('Tone Recived')
    axs[0].grid(True)

    # esta funcao analisa o fourier e encontra os picos
    # voce deve aprender a usa-la. ha como ajustar a sensibilidade, ou seja, o que é um pico?
    # voce deve tambem evitar que dois picos proximos sejam identificados, pois pequenas variacoes na
    # frequencia do sinal podem gerar mais de um pico, e na verdade tempos apenas 1.
    # probpeaks = []
    # print(len(probpeaks))
    # for i in range(len(yf)):
    #     if yf[i]<1800 and yf[i]>600:
    #         probpeaks.append(yf[i])
    # probpeaks = np.array(probpeaks)
    # print(probpeaks)
    index = peakutils.indexes(yf, thres=0.2, min_dist=30)
    pplot(xf, yf, index)
    axs[1].set_ylabel('Fourier/Peaks')
    axs[1].grid(True)

    # for x,y in index:

    #     label = "{:.2f}".format(y)

    #     plt.annotate(label, # this is the text
    #                 (x,y), # this is the point to label
    #                 textcoords="offset points", # how to position the text
    #                 xytext=(0,10), # distance from text to points (x,y)
    #                 ha='center') # horizontal alignment can be left, right or center

    fig.tight_layout()
    plt.show()
    # print(index)
    value = xf[index]
    print(value)

    if value[0] <= 710:
        if value[1] <= 1250:
            print("1")
        elif value[1] <= 1390:
            print("2")
        elif value[1] <= 1490:
            print("3")
        else:
            print("A")
    elif value[0] <= 790:
        if value[1] <= 1250:
            print("4")
        elif value[1] <= 1390:
            print("5")
        elif value[1] <= 1490:
            print("6")
        else:
            print("B")
    elif value[0] <= 890:
        if value[1] <= 1250:
            print("7")
        elif value[1] <= 1390:
            print("8")
        elif value[1] <= 1490:
            print("9")
        else:
            print("C")
    else:
        if value[1] <= 1250:
            print("X")
        elif value[1] <= 1390:
            print("0")
        elif value[1] <= 1490:
            print("#")
        else:
            print("D")
    def estimate_cycle_lenght(self, buff):
        autoCorr = self.autocorr(buff)
        autoCorr = self.smooth(autoCorr, 1)
        # normalize
        autoCorr = autoCorr / np.max(autoCorr)

        detectedPeaksNum = 0
        threshold = 0.9
        indexes = []

        lteuDetected = False
        ontime = 0
        dc = 0
        cycleLength = 0

        while detectedPeaksNum < 3:
            indexes = peakutils.indexes(autoCorr, thres=threshold, min_dist=40)
            detectedPeaksNum = len(indexes)
            threshold = threshold - 0.1
            if threshold < 0.3:
                # no pattern discovered
                # print("---No LTE-U detected")
                return [lteuDetected, cycleLength, ontime, dc]

        diff = np.diff(indexes, axis=0)
        avg = np.mean(diff)
        ravg = self.my_round(avg, 10)
        cycleLength = np.uint(ravg * self.samplingInterval)
        lteuDetected = True

        # check different DC to find max corr
        singleSampleNum = cycleLength / self.samplingInterval
        # 4 cycles
        sampleNum = 4 * singleSampleNum
        lteuCycle = np.zeros(np.int(sampleNum), dtype=np.float)
        maxCorr = 0.0

        samples = buff[1000:].astype(np.float)

        for i in range(np.int(cycleLength + 1)):
            lteuCycle.fill(-0.5)

            ss = np.int(0)
            ee = np.int(ss + i / self.samplingInterval)
            lteuCycle[ss:ee] = 0.5

            ss = np.int(1 * cycleLength / self.samplingInterval)
            ee = np.int(ss + i / self.samplingInterval)
            lteuCycle[ss:ee] = 0.5

            ss = np.int(2 * cycleLength / self.samplingInterval)
            ee = np.int(ss + i / self.samplingInterval)
            lteuCycle[ss:ee] = 0.5

            ss = np.int(3 * cycleLength / self.samplingInterval)
            ee = np.int(ss + i / self.samplingInterval)
            lteuCycle[ss:ee] = 0.5

            corr = np.correlate(lteuCycle, samples, mode='same')
            myMaxCorr = np.max(corr)

            if myMaxCorr > maxCorr:
                maxCorr = myMaxCorr
                ontime = i
                dc = ontime / cycleLength

        if ontime <= 3:
            # print("---No LTE-U detected")
            lteuDetected = False
            cycleLength = 0
            ontime = 0
            dc = 0
            return [lteuDetected, cycleLength, ontime, dc]

        if dc <= 0.05:
            # print("---No LTE-U detected")
            lteuDetected = False
            cycleLength = 0
            ontime = 0
            dc = 0
            return [lteuDetected, cycleLength, ontime, dc]

        lteuCycle.fill(-0.5)
        ss = np.int(0)
        ee = np.int(ss + ontime / self.samplingInterval)
        lteuCycle[ss:ee] = 0.5

        ss = np.int(1 * cycleLength / self.samplingInterval)
        ee = np.int(ss + ontime / self.samplingInterval)
        lteuCycle[ss:ee] = 0.5

        ss = np.int(2 * cycleLength / self.samplingInterval)
        ee = np.int(ss + ontime / self.samplingInterval)
        lteuCycle[ss:ee] = 0.5

        ss = np.int(3 * cycleLength / self.samplingInterval)
        ee = np.int(ss + ontime / self.samplingInterval)
        lteuCycle[ss:ee] = 0.5

        maxPossibleCorr = np.correlate(lteuCycle, lteuCycle, mode='same')
        maxPossibleCorr = np.max(maxPossibleCorr)

        maxCorr = np.correlate(lteuCycle, samples, mode='same')
        maxCorr = np.max(maxCorr)

        if maxCorr <= 0.5 * maxPossibleCorr:
            # print("---No LTE-U detected")
            lteuDetected = False
            cycleLength = 0
            ontime = 0
            dc = 0
            return [lteuDetected, cycleLength, ontime, dc]

        # print("---Detected LTE-U Cycle Length [ms]: ", cycleLength)
        # print("---Detected on-time: ", ontime)
        # print("---Detected DC: ", dc)

        return [lteuDetected, cycleLength, ontime, dc]
### LVP ###
in_lvp = []
minlvp, maxlvp = split(lvp, 20)
#lvp_thres =0.5 * (np.max(lvp)- np.min(lvp))+np.min(lvp)
lvp_thres = 0.5 * (
    maxlvp - minlvp) + minlvp  #Conservative min threshold for peak height
print(lvp_thres)
for i in range(3600):
    #peakutils detects peaks using derivatives. So peaks at the ends of the signal cannot be identified. Hence the
    #signal is divided as 0-1001, 1000-2001, 2000-3001, etc. Signal is divided and checked as there are fluctuations
    #and a global min and max cannot set accurate thresholds for peak detection.
    if i != 0:
        lvp_sm = lvp[1000 * i - 1:1000 * (i + 1) + 1]
        #Peaks are found using two criteria: 1. Greater than half of the mid in the sequence, 2. Higher than absolute
        #threshold. Only the peaks fulfilling both criteria are valid peaks
        in_lvp_sm = peakutils.indexes(lvp_sm, thres=0.5, min_dist=200)
        in_lvp_abs = peakutils.indexes(lvp_sm,
                                       thres=lvp_thres,
                                       min_dist=200,
                                       thres_abs=True)
        in_lvp_sm = list(set(in_lvp_sm) & set(in_lvp_abs))
        in_lvp_sm.sort(
        )  #set operations can mess up the order, so sorting is required
        in_lvp_sm_adj = [element + i * 1000 - 1 for element in in_lvp_sm
                         ]  # Setting the absolute peak locations
    else:
        lvp_sm = lvp[1000 * i:1000 * (i + 1) + 1]
        in_lvp_sm = peakutils.indexes(lvp_sm, thres=0.5, min_dist=200)
        in_lvp_abs = peakutils.indexes(lvp_sm,
                                       thres=lvp_thres,
                                       min_dist=200,
def main():

    params = get_params()

    files = glob.glob('%s/*_results.pickle' % directory)

    templates = [
        os.path.basename(filename).replace('_results.pickle', '')
        for filename in files
    ]
    data = []
    for template in templates:
        print 'template', template

        parameters = pickle.load(
            open(os.path.join(directory, '%s_parameters.pickle' % template)))
        results = pickle.load(
            open(os.path.join(directory, '%s_results.pickle' % template)))
        gap = parameters['undulator_gap_encoder_position']
        if gap > 23:
            continue
        #if abs(gap-8.3) > 0.1:
        #continue
        diode = results['calibrated_diode']['observations']
        diode = np.array(diode)

        diode_chronos = diode[:, 0]
        diode_current = diode[:, 1]

        actuator = results['actuator']['observations']
        actuator = np.array(actuator)

        actuator_chronos = actuator[:, 0]
        actuator_position = actuator[:, 1]

        fast_shutter = results['fast_shutter']['observations']
        fast_shutter = np.array(fast_shutter)

        fast_shutter_chronos = fast_shutter[:, 0]
        fast_shutter_state = fast_shutter[:, 1]

        start_end_indices = peakutils.indexes(
            np.abs(np.gradient(fast_shutter_state)))

        start_chronos, end_chronos = fast_shutter_chronos[start_end_indices]

        #dark_current = np.vstack([diode_current[diode_chronos < start_chronos - fast_shutter_chronos_uncertainty], diode_current[diode_chronos > end_chronos + fast_shutter_chronos_uncertainty]]).mean()
        print diode_current.shape
        print diode_chronos.shape
        dark_current = diode_current[diode_chronos < start_chronos -
                                     fast_shutter_chronos_uncertainty].mean()
        diode_current -= dark_current

        actuator_scan_indices = np.logical_and(
            actuator_chronos >
            start_chronos + fast_shutter_chronos_uncertainty * 5,
            actuator_chronos <
            end_chronos - fast_shutter_chronos_uncertainty * 5)
        actuator_scan_chronos = actuator_chronos[actuator_scan_indices]
        actuator_scan_position = actuator_position[actuator_scan_indices]

        position_chronos_fit = np.polyfit(actuator_scan_chronos,
                                          actuator_scan_position, 1)

        position_linear_predictor = np.poly1d(position_chronos_fit)

        diode_scan_indices = np.logical_and(
            diode_chronos >
            start_chronos + fast_shutter_chronos_uncertainty * 5,
            diode_chronos < end_chronos - fast_shutter_chronos_uncertainty * 5)
        diode_scan_chronos = diode_chronos[diode_scan_indices]
        diode_scan_current = diode_current[diode_scan_indices]

        thetas = position_linear_predictor(diode_scan_chronos)
        energies = get_energy_from_theta(thetas, units_energy=eV)

        flux = get_flux(diode_scan_current, energies, params)

        if energies[0] > energies[-1]:
            energies = energies[::-1]
            flux = flux[::-1]

        filtered_flux = medfilt(flux, 5)
        peaks = peakutils.indexes(filtered_flux, min_dist=55, thres=0.012)
        #peaks = peakutils.indexes(flux, min_dist=1, thres=0.02)

        harmonics = np.arange(1, 21)

        theoretic_harmonic_energies = undulator_peak_energy(gap,
                                                            harmonics,
                                                            detune=False)
        print 'theory'
        print theoretic_harmonic_energies
        print 'detected peaks'
        print energies[peaks][::-1]

        print 'distance_matrix'
        thr = [(t, 0) for t in theoretic_harmonic_energies]
        ep = [(e, 0) for e in energies[peaks][::-1]]
        fluxes = flux[peaks][::-1]

        print 'theory'
        print thr
        print 'detected peaks'
        print ep
        dm = distance_matrix(thr, ep)
        print dm.shape
        print np.arange(1, 21)
        print dm.argmin(axis=1)
        print dm.min(axis=1)
        minimums = dm.argmin(axis=0)
        print minimums
        print dm.min(axis=1)

        matches = np.where(dm < 210)

        print 'ep with criteria'
        ep2 = energies[peaks][::-1]
        ep_matched = ep2[matches[1]]
        print 'harmonics with criteria'
        thr_matched = theoretic_harmonic_energies[matches[0]]
        fluxes_matched = fluxes[matches[1]]

        #peak_half_width = 45.

        #from scipy.ndimage import center_of_mass
        #for l, e in enumerate(ep_matched):
        #print 'starting peak position refinement, l, e', l, e
        #shift = peak_half_width
        #k = 0
        #while shift >= 5:
        #k+=1
        #indices = np.logical_and(energies<e+peak_half_width, energies>e-peak_half_width)
        #less_then = np.logical_and(energies<e, energies>e-peak_half_width)
        #more_then = np.logical_and(energies>e, energies<e-peak_half_width)
        #print 'sum(indices) initial', sum(indices)
        #if sum(less_then) > sum(more_then):
        #diff = sum(less_then) - sum(more_then)
        #print 'diff', diff
        #valid = np.where(indices == True)[0]
        #print 'valid', valid
        #indices[valid[:diff]] = False
        #elif sum(less_then) < sum(more_then):
        #diff = -sum(less_then) + sum(more_then)
        #print 'diff', diff
        #valid = np.where(indices == True)[0]
        #print 'valid', valid
        #indices[valid[-diff:]] = False

        #print 'sum(indices)', sum(indices)
        #xp = energies[indices]
        #print 'energies', xp
        #fp = flux[indices]
        #print 'fluxes', fp
        #x = np.linspace(xp[0], xp[-1], 101)
        #f = np.interp(x, xp, fp)
        #com = center_of_mass(f)
        #print 'com', com
        #new_e = x[int(round(com[0]))]
        #print new_e
        #shift = e - new_e
        #print 'k, shift', k, shift
        #e = new_e
        #print 'new_e', new_e
        #ep_matched[l] = new_e
        #print

        matched = np.array(
            zip(matches[0] + 1, thr_matched, ep_matched,
                np.abs(thr_matched - ep_matched), fluxes_matched))
        print matched

        #pylab.vlines(undulator_peak_energy(gap, np.arange(1, 21), detune=True), 0, 1.1*flux.max(), color='cyan', label='theoretic harmonic peak positions')
        #pylab.vlines(theoretic_harmonic_energies, 0, 1.1*flux.max(), color='magenta', label='theoretic harmonic positions')
        pylab.figure()
        for k, thr, ep, diff, flx in matched:
            data.append([gap, int(k), ep, flx])
            pylab.annotate(s='%d' % k,
                           xy=(ep, flx),
                           xytext=(ep + 150, 1.1 * flx),
                           arrowprops=dict(arrowstyle='->',
                                           connectionstyle="arc3"))

        pylab.plot(energies, flux, label='flux')
        #pylab.plot(energies, filtered_flux, label='filtered_flux')
        #pylab.plot(energies[peaks], flux[peaks], 'rx', mew=2, label='peaks')
        pylab.plot(matched[:, 2],
                   matched[:, -1],
                   'rx',
                   mew=2,
                   label='harmonics')
        pylab.xlabel('energy [eV]')
        pylab.ylabel('flux [ph/s]')
        pylab.legend()
        pylab.title('Energy scan, %s mm, undulator U24 Proxima 2A, SOLEIL' %
                    template.replace('_', ' '))

    f = open('data_2017-09-06.pickle', 'w')
    pickle.dump(np.array(data), f)
    f.close()

    pylab.show()
예제 #54
0
def main():

    #declare um objeto da classe da sua biblioteca de apoio (cedida)    
    #declare uma variavel com a frequencia de amostragem, sendo 44100
    signal = signalMeu()
    freqDeAmostragem = 44100
    
    #voce importou a bilioteca sounddevice como, por exemplo, sd. entao
    # os seguintes parametros devem ser setados:
    
    sd.default.samplerate = freqDeAmostragem  #taxa de amostragem
    sd.default.channels = 1  #voce pode ter que alterar isso dependendo da sua placa
    duration = 2 #tempo em segundos que ira aquisitar o sinal acustico captado pelo mic


    # faca um print na tela dizendo que a captacao comecará em n segundos. e entao 
    n = 1
    print("A captaçao comecara em {} segundos ".format(n))
    time.sleep(n)
    #use um time.sleep para a espera
    #faca um print informando que a gravacao foi inicializada
    print("A gravaçao foi inicializada")
   
    #declare uma variavel "duracao" com a duracao em segundos da gravacao. poucos segundos ... 
    numAmostras =  freqDeAmostragem * duration
    #calcule o numero de amostras "numAmostras" que serao feitas (numero de aquisicoes)
   
    audio = sd.rec(int(numAmostras), freqDeAmostragem, channels=1)
    sd.wait()
    print("...     FIM")
    
    
    #analise sua variavel "audio". pode ser um vetor com 1 ou 2 colunas, lista ...
    #grave uma variavel com apenas a parte que interessa (dados)
    dados = []
    for e in audio[:,0]:
        dados.append(e)
        
    # use a funcao linspace e crie o vetor tempo. Um instante correspondente a cada amostra!

    t = np.linspace(0,duration,int(numAmostras))

    #plot do gravico  áudio vs tempo!
    plt.plot(dados,t)
    plt.grid()
    plt.title('audio vs tempo')
    plt.show()
   
    
    ## Calcula e exibe o Fourier do sinal audio. como saida tem-se a amplitude e as frequencias

    xf, yf = signal.calcFFT(dados, freqDeAmostragem)
    plt.figure("F(y)")
    plt.plot(xf,yf)
    plt.grid()
    plt.title('Fourier audio')
    plt.show()
    

    #esta funcao analisa o fourier e encontra os picos
    #voce deve aprender a usa-la. ha como ajustar a sensibilidade, ou seja, o que é um pico?
    #voce deve tambem evitar que dois picos proximos sejam identificados, pois pequenas variacoes na
    #frequencia do sinal podem gerar mais de um pico, e na verdade tempos apenas 1.
   
    #index = peakutils.indexes(,,)
    #printe os picos encontrados! 
    index = peakutils.indexes(yf, thres=0.1, min_dist=50)
    picos = [xf[i] for i in index if xf[i] > 600 and xf[i] < 1800]
    maxi = max(picos, key=int)
    minn = min(picos, key=int)
    delta = 10
    print("Picos : {}  ".format(picos))

    tabela_DTMF = {"1":[1209, 697], "2":[1336, 697], "3":[1477, 697], "A":[1633, 697],
         "4":[1209, 770], "5":[1336, 770], "6":[1477, 770], "B":[1633, 770],
         "7":[1209, 852], "8":[1336, 852], "9":[1477, 852], "C":[1633, 852],
         "X":[1209, 941], "0":[1336, 941], "#":[1477, 941], "D":[1633, 941]}

    if 1209 + delta >= maxi >= 1209 - delta :
        if 697 + delta >= minn >= 697 - delta:     
            print("A tecla encontrada foi a : 1  ")  

    if 1336 + delta >= maxi >= 1336 - delta:
        if 697 + delta >= minn >= 697 - delta:   
            print("A tecla encontrada foi a : 2  ")  

    if 1477 + delta >=  maxi >= 1477 - delta:
        if 697 + delta >= minn >= 697 - delta :   
            print("A tecla encontrada foi a : 3  ")  


    if 1633 + delta >= maxi >= 1633 - delta:
        if 697 + delta >= minn >= 697 - delta :   
            print("A tecla encontrada foi a : A  ")  

    if 1209 + delta >= maxi >= 1209 - delta :
        if 770 + delta >= minn >= 770 - delta :   
            print("A tecla encontrada foi a : 4  ")  

    if 1336 + delta >= maxi >= 1336 - delta :
        if 770 + delta >= minn >= 770 - delta:   
            print("A tecla encontrada foi a : 5  ")    

    if 1477 + delta >= maxi >= 1477 - delta: 
        if 770 + delta >= minn >= 770 - delta:   
            print("A tecla encontrada foi a : 6  ")    

    if 1633 + delta >= maxi >= 1633 - delta:
        if 770 + delta >= minn >= 770 - delta :   
            print("A tecla encontrada foi a : B  ")    

    if 1209 + delta >= maxi >= 1209 - delta:
        if 852 + delta >= minn >= 852 - delta :   
            print("A tecla encontrada foi a : 7  ")

    if 1336 + delta >= maxi >= 1336 - delta:
        if 852 + delta >= minn >= 852 - delta :   
            print("A tecla encontrada foi a : 8  ")

    if 1477 + delta >= maxi >= 1477 - delta :
        if 852 + delta >= minn >= 852 - delta :   
            print("A tecla encontrada foi a : 9  ")    
    if 1633 + delta >= maxi >= 1633 - delta :
        if 852 + delta >= minn >= 852 - delta :   
            print("A tecla encontrada foi a : C  ")    

    
    if 1209 + delta  >= maxi >= 1209 - delta :
        if 941 + delta >= minn >= 941 - delta  :   
            print("A tecla encontrada foi a : X  ")  
    if 1336 + delta  >= maxi >= 1336 - delta:
        if 941 + delta >= minn >= 941 - delta :   
            print("A tecla encontrada foi a : 0  ")        
    if 1477 + delta  >= maxi >= 1477 - delta:
        if 941 + delta >= minn >= 941 - delta  :   
            print("A tecla encontrada foi a : #  ")    
    if 1633 + delta >= maxi >= 1633 - delta:
        if 941 + delta >= minn >= 941 - delta  :   
            print("A tecla encontrada foi a : D  ")    
예제 #55
0
""" peak detector with threshold """

import numpy as np
import matplotlib.dates as mdates
import pandas as pd
from datetime import datetime
import peakutils
from peakutils.plot import plot as pplot
from matplotlib import pyplot

headers = ['ts', 'x', 'y', 'z', 'm']
df = pd.read_csv('bammo_accel.csv', names=headers)

headers = ['ts', 'x', 'y', 'z', 'm']

df['ts'] = df['ts'].map(
    lambda x: datetime.strptime(str(x), '%Y-%m-%d %H:%M:%S.%f'))

x = df['ts']
y = df['m']
indexes = peakutils.indexes(y, thres=0.75, min_dist=30)
xpeak = str(x[indexes]).split()[1] + " " + str(x[indexes]).split()[2]
ypeak = str(y[indexes]).split()[1]

pyplot.figure(figsize=(10, 6))
pplot(x, y, indexes)
pyplot.title("Peak at " + xpeak + " " + ypeak + " Gs " + "max=" + str(y.max()))
pyplot.show()
예제 #56
0
def gaussianFits(fileName, fobs, outLoc, SN):

    data = loadData(fileName, fobs, outLoc)
    x = data.wave[1349:]  #could also write as hAregion[:,0]
    y = data.subSpec[1349:]
    width = 3.0  #know this from experience
    amphB = 0
    ampOIII = 0
    ampOI = 0
    amphA = 0
    ampNII = 0
    ampNII0 = 0
    ampSIIa = 0
    ampSIIb = 0
    xGuessSIIb = 6735

    #may need to make noise smarter soon - this is a test!!
    #NEED TO MAKE NOISE MUCH SMARTER
    noiseX = x[1450:1460]
    noiseY = y[1450:1460]

    noise = np.abs(3 * integrate.simps(noiseY, noiseX))

    #trying to fix the problem with the gaussian fits using peak utils indexes
    #automating guessing heights
    peaks = indexes(
        y, thres=0.3,
        min_dist=4.0)  #these values are somewhat arbitrary right now
    numPeaks1 = 0
    peak1 = [0]
    peakIndices1 = [0]
    numPeaks2 = 0
    peak2 = [0]
    peakIndices2 = [0]
    peakX = x[peaks]
    peakY = y[peaks]
    #buffer = 0
    newRange = len(peakY)
    negRange = len(peakY)
    for height in peakY:
        if height > newRange:
            break
        if peakY[height] < 0:
            np.delete(peakY, height)
            np.delete(peakX, height)
            newRange = newRange - 1
    print(peakX)

    hBisClear = False
    OIIIisClear = False
    OIisClear = False
    hAisClear = False
    NIIisClear = False
    SIIisClear = False
    tooNoisy = False

    Halpha = 0
    NII = 0
    Hbeta = 0
    OIII = 0
    SII = 0
    OI = 0
    doublePeak = 0
    hBnum = 0
    hBsens = []

    for i in range(len(peakX)):
        """if peakX[i] > 6550 and peakX[i] < 6780: #H-alpha, NII, and SII
            numPeaks2 = numPeaks2 + 1
            peak2.append(peakX[i])
            peakIndices2.append(i)"""
        if peakX[i] > 6557 and peakX[i] < 6567:
            numPeaks2 = numPeaks2 + 1
            peak2.append(peakX[i])
            peakIndices2.append(i)
            hAisClear = True
            amphA = peakY[i]
        if peakX[i] > 6577 and peakX[i] < 6587:
            numPeaks2 = numPeaks2 + 1
            peak2.append(peakX[i])
            peakIndices2.append(i)
            NIIisClear = True
            ampNII = peakY[i]
        if peakX[i] > 6710 and peakX[i] < 6740:
            doublePeak = doublePeak + 1
            numPeaks2 = numPeaks2 + 1
            peak2.append(peakX[i])
            peakIndices2.append(i)
            SIIisClear = True
            """if doublePeak==1:
                ampSIIa = peakY[i]
            elif doublePeak==2:
                ampSIIb = peakY[i]
                if np.abs(peakX[i] - xGuessSIIb) >= 2:
                    xGuessSIIb = peakX[i]"""
        if peakX[i] > 6713 and peakX[i] < 6722:
            ampSIIa = peakY[i]
            print('SIIa (' + str(peakX[i]) + ', ' + str(peakY[i]) + ')')
        if peakX[i] > 6727 and peakX[i] < 6733:
            ampSIIb = peakY[i]
            print('SIIb (' + str(peakX[i]) + ', ' + str(peakY[i]) + ')')
        if peakX[i] > 6577 and peakX[i] < 6587:
            numPeaks2 = numPeaks2 + 1
            peak2.append(peakX[i])
            peakIndices2.append(i)
            NIIisClear = True
            ampNII = peakY[i]
        if peakX[i] > 4850 and peakX[i] < 4870:  #H-beta
            numPeaks1 = numPeaks1 + 1
            hBnum = hBnum + 1
            peak1.append(peakX[i])
            peakIndices1.append(i)
            hBisClear = True
            if hBnum >= 1:
                hBsens.append(peakY[i])
                amphB = np.amax(hBsens)
            #amphB = peakY[i]
        if peakX[i] > 5000 and peakX[i] < 5015:  #OIII
            numPeaks1 = numPeaks1 + 1
            peak1.append(peakX[i])
            peakIndices1.append(i)
            OIIIisClear = True
            ampOIII = peakY[i]
        if peakX[i] > 6295 and peakX[i] < 6305:  #OI
            numPeaks1 = numPeaks1 + 1
            peak1.append(peakX[i])
            peakIndices1.append(i)
            OIisClear = True
            ampOI = peakY[i]
    peak2.remove(0)
    peak1.remove(0)
    peakIndices2.remove(0)
    peakIndices1.remove(0)
    print("numPeaks2 = " + str(numPeaks2))
    print("numPeaks1 = " + str(numPeaks1))

    print(peak2)
    print(peak1)

    #note: OI is usually so small, indexes probably won't pick it up
    tooNoisy = False
    """if numPeaks2==5:
        ampNII0 = peakY[peakIndices2[0]]
        amphA = peakY[peakIndices2[1]]
        ampNII = peakY[peakIndices2[2]]
        ampSIIa = peakY[peakIndices2[3]]
        ampSIIb = peakY[peakIndices2[4]]
    elif numPeaks2==4:
        ampNII0 = noise #placeholder because you need to do a triple gaussian fit 
        amphA = peakY[peakIndices2[0]]
        ampNII = peakY[peakIndices2[1]]
        ampSIIa = peakY[peakIndices2[2]]
        ampSIIb = peakY[peakIndices2[3]]
    if numPeaks2 < 3 or numPeaks2 > 9:
        tooNoisy = True
        print("too few peaks detected, check this spectra by hand")
    elif numPeaks2 > 25:
        tooNoisy = True
        print("too many peaks detected, check this spectra by hand")"""
    """if numPeaks1==2:
        amphB = peakY[peakIndices1[0]]
        ampOIII = peakY[peakIndices1[1]]
        ampOI = noise #this is just a place holder for now, means that .indexes couldn't find a peak at OI
    elif numPeaks1==3: #note! need to take care of uncertainties 
        amphB = peakY[peakIndices1[0]]
        ampOIII = peakY[peakIndices1[1]]
        ampOI = peakY[peakIndices1[2]]"""
    if numPeaks1 < 1:  #NOT LESS THAN TWO (gets rid of cases where H-b is visible but not O3, which happens a lot)
        #tooNoisy = True
        print("too few peaks detected, check this spectra by hand")
    #if hAisClear==False: tooNoisy = True
    #if SN < 13: tooNoisy = True
    #if hBisClear==False and OIIIisClear==False: tooNoisy = True

    if tooNoisy == False:
        guesshB = [amphB, 4865, width]
        guessOIII = [ampOIII, 5007, width]
        guessOI = [ampOI, 6300, width]
        guesshA = [amphA, 6563, width]
        guessNII = [ampNII, 6583, width]
        guessSIIa = [ampSIIa, 6716, width]  #6716
        guessSIIb = [ampSIIb, 6731, width]  #6731

        #guesshB = [4.35, 4864, 1]
        #guessOIII = [15, 5007, 3]
        #guessOI = [5, 6300, 3]
        #guesshA = [10, 6563, 3]
        #guessNII = [5, 6583, 3]
        #ampNII0 = 4
        #width = 3
        #guessSIIa = [3.1, 6716, 3] #6716
        #guessSIIb = [2.1, 6735, 1]
        #4.4 and 3.6

        #using the least square function to optimize the paramters for the gaussian fit(the params for the func() function)

        #singular-peak gaussian fits
        optimhB, flag = sp.leastsq(errfunc, guesshB, args=(x, y))
        optimOIII, flag = sp.leastsq(errfunc, guessOIII, args=(x, y))
        optimOI, flag = sp.leastsq(errfunc, guessOI, args=(x, y))

        #multi-peak gaussian fits
        guessSII = [
            guessSIIa[0], guessSIIa[1], guessSIIa[2], guessSIIb[0],
            guessSIIb[1], guessSIIb[2]
        ]
        #guessSII = [35, 6716, 3, 30, 6736, 3]
        optimSII, flag = sp.leastsq(doubleErrFunc, guessSII, args=(x, y))
        ySII = double_gaussian(x, optimSII[0], optimSII[1], optimSII[2],
                               optimSII[3], optimSII[4], optimSII[5])

        #writing H-alpha and NII as a double gaussian rn, un comment when done
        guesshANII = [
            ampNII0, 6548, width, guesshA[0], guesshA[1], guesshA[2],
            guessNII[0], guessNII[1], guessNII[2]
        ]
        guesshANII = [
            4.3, 6551, 1.0, guesshA[0], guesshA[1], guesshA[2], guessNII[0],
            guessNII[1], guessNII[2]
        ]

        optimhANII, flag = sp.leastsq(tripleErrFunc, guesshANII, args=(x, y))
        yhANII = triple_gaussian(x, optimhANII[0], optimhANII[1],
                                 optimhANII[2], optimhANII[3], optimhANII[4],
                                 optimhANII[5], optimhANII[6], optimhANII[7],
                                 optimhANII[8])
        """guesshANII2 = [guesshA[0], guesshA[1], guesshA[2], guessNII[0], guessNII[1], guessNII[2]]
        guesshANII2 = [38.9, 6565, 7, 13.7, 6586, 3]
        optimhANII, flag = sp.leastsq(doubleErrFunc, guesshANII2, args=(x, y))
        yhANII2 = double_gaussian(x, optimhANII[0], optimhANII[1], optimhANII[2], optimhANII[3], optimhANII[4], optimhANII[5])"""

        optimhA, flag = sp.leastsq(errfunc, guesshA, args=(x, y))
        optimNII, flag = sp.leastsq(errfunc, guessNII, args=(x, y))

        #now, calculating y-values and extracting each individual peak from multi-peak fit
        yNII0 = func(x, optimhANII[0], optimhANII[1], optimhANII[2])
        yhA = func(x, optimhANII[3], optimhANII[4], optimhANII[5])
        yNII = func(x, optimhANII[6], optimhANII[7], optimhANII[8])

        #yhA = func(x, optimhANII[0], optimhANII[1], optimhANII[2])
        #yNII = func(x, optimhANII[3], optimhANII[4], optimhANII[5])

        ySIIa = func(x, optimSII[0], optimSII[1], optimSII[2])
        ySIIb = func(x, optimSII[3], optimSII[4], optimSII[5])

        #calculating y values for a gaussian fit using new input paramters optimized above
        yhB = func(x, optimhB[0], optimhB[1], optimhB[2])
        yOIII = func(x, optimOIII[0], optimOIII[1], optimOIII[2])
        yOI = func(x, optimOI[0], optimOI[1], optimOI[2])
        """#comment below out after this run
        yhA = func(x, optimhA[0], optimhA[1], optimhA[2])
        yNII = func(x, optimNII[0], optimNII[1], optimNII[2])"""

        Hbeta = integrate.simps(yhB, x)
        OIII = integrate.simps(yOIII, x)
        OI = integrate.simps(yOI, x)
        Halpha = integrate.simps(yhA, x)
        NII = integrate.simps(yNII, x)
        #SII = integrate.simps(ySIIa, x) + integrate.simps(ySIIb, x) #not sure which of these two it is (are they different?)
        SII = integrate.simps(ySII, x)

        Halpha = integrate.simps(y[1705:1725], x[1705:1725])
        NII = integrate.simps(y[1725:1742], x[1725:1742])
        #SII = integrate.simps(y[3208:3238], x[3208:3238])

        #temporary measure!

        if Hbeta < 0: hBisClear = False
        if OIII < 0: OIIIisClear = False
        if OI < 0: OIisClear = False
        if Halpha < 0: hAisClear = False
        if NII < 0: NIIisClear = False
        if SII < 0: SIIisClear = False

        f = plt.figure(2, figsize=(16, 5))
        plt.clf()
        plt.xlim(4700, 7000)
        plt.grid()
        plt.tight_layout()
        plt.title('Gaussian Fit')

        plt.grid(c='k', linestyle='-', linewidth=1.0, alpha=0.25)
        plt.plot(x, y, c='k', linewidth=0.5)

        plt.plot(x, yhB, c='c', linewidth=0.75)
        plt.plot(x, yOIII, c='y', linewidth=0.75)
        plt.plot(x, yOI, c='m', linewidth=0.75)
        plt.plot(x, yhANII, c='b', lw=0.75)
        #plt.plot(x, yhANII2, c='b', lw=0.75)
        plt.plot(x, yhA, c='g', lw=0.75)
        plt.plot(x, yNII0, c='y', lw=0.75)
        plt.plot(x, yNII, c='g', lw=0.75)
        plt.plot(x, ySII, c='b', lw=0.75)
        plt.plot(x, ySIIa, c='r', lw=0.75)
        plt.plot(x, ySIIb, c='r', lw=0.75)

        f.savefig(outLoc + 'gaussian_fit.png', bbox_inches='tight')
        f.show()
        f.savefig(outLoc + 'gaussian_fit.png', bbox_inches='tight')
        raw_input()

    elif tooNoisy == True:
        print(
            "spectrum is too noisy to analyze automatically, please check by hand"
        )

    elineFluxes = namedtuple(
        'EmLines',
        'hAlpha NII hBeta OIII SII OI noise hAisClear NIIisClear hBisClear OIIIisClear SIIisClear OIisClear tooNoisy'
    )
    emLines = elineFluxes(Halpha, NII, Hbeta, OIII, SII, OI, noise, hAisClear,
                          NIIisClear, hBisClear, OIIIisClear, SIIisClear,
                          OIisClear, tooNoisy)
    return emLines  #potential problem here! maybe it has to return something no matter what!
예제 #57
0
파일: therm.py 프로젝트: xfaxca/pygaero
def peak_find(tseries_df,
              temp,
              ion_names,
              peak_threshold=0.05,
              min_dist=50,
              smth=False):
    """
    This function takes a pandas DataFrame containing desorption time series, along with a time series of Figaero
        heater temperatures. For each series (i.e., column in the DataFrame), the maximum value is found. For this,
        peakutils package is used. To ensure that a global maximum is found, parameters [peak_threshold] and [min_dist]
        may need to be optimized. However, the default values of 0.05 and 50 have been tested on desorption time series
        from several experiments, and no problems have been detected as of yet. Smaller min_dist may be needed to
        capture secondary TMax (TMax2) values.

    :param tseries_df: (DataFrame) pandas DataFrame with time series organized as the columns. Index should be
    :param temp: (float/int) Figaero desorption temperatures as recorded by EyeOn data, or other temperature logger.
    :param ion_names: (string) String names of ions to correspond to the time series in tseries_df (same order)
    :param peak_threshold: (float) Normalized peak threshold from 0. - 1.0.  Should be careful when using this in
            this DataFrame oriented function.  If some peaks are not found on some time series, it will not return
            the correct size dataframe and will throw an error because of passed-implied shape exception. Default value
            of 0.05 (or 5%) has been tested with figaero data from multiple experiments with no errors detected.
    :param min_dist: (int) The minimum distance of points between two peaks that will be identified. This is sensitive
            to the index resolution (e.g., time or temperature) of the input data in [tseries_df].
    :param smth: (bool) If true, time series are smoothed before finding the max values in the time series. NOT
            RECOMMENDED if the time series have already been smoothed before.
    :return: df_tmax: a pandas DataFrame of 5 columns with Tmax1, MaxSig1, Tmax2, MaxSig2, DubFlag (double peak flag)
    """
    # Check types of passed arguments to prevent errors during subsequent processing
    _check.check_dfs(values=[tseries_df])
    _check.check_num_equal(
        val1=tseries_df.shape[0],
        val2=len(temp))  # Number of datapoints must be equal
    _check.check_string(values=ion_names)
    _check.check_threshold(values=[peak_threshold],
                           thresh=1.0000001,
                           how='under')
    _check.check_threshold(values=[peak_threshold], thresh=0.0, how='over')
    _check.check_int(values=[min_dist])
    _check.check_bool(values=[smth])

    for i in range(0, len(tseries_df.columns)):
        # Process each ion's time series sequentially
        ion_tseries = tseries_df.values[:, i]

        if smth:
            ion_tseries = smooth(ion_tseries, window='hamming', window_len=11)

        # Find indices of peaks using peakutils.indexes (see code in libs/site-packages/peakutils).
        max_indices = peakutils.indexes(ion_tseries,
                                        thres=peak_threshold,
                                        min_dist=min_dist)
        # 2. The first two most prominent peaks are used to capture the major thermal behavior of the thermograms.
        dub_flag = 0
        npeaks = len(max_indices)
        # print('# of peaks for %s:' % tseries_df.columns[i], npeaks)                    # Debug line/optional output
        if npeaks == 0:
            # Optional output
            # print('No peaks above threshold found for ion %s. Assigning NaN to Tmax values.' % tseries_df.columns[i])
            if i == 0:
                TMax1 = []
                TMax2 = []
                MaxSig1 = []
                MaxSig2 = []
                DubFlag = []
                # give nan values to first ion since there are no peaks detected
            TMax1.append(np.nan)
            MaxSig1.append(np.nan)
            TMax2.append(np.nan)
            MaxSig2.append(np.nan)
            DubFlag.append(-1)
            # print('TMax1 for 0 peak ion now = ', TMax1[i])
        else:
            for j in max_indices:
                # print('j in max indices at count %.0f:' % dub_flag, j)        # debug line
                if i == 0:
                    if dub_flag == 0:
                        # Create Tmax/Tseries on very first pass (First element in max_indices for first ion)
                        TMax1 = [np.nan]
                        TMax2 = [np.nan]
                        MaxSig1 = [np.nan]
                        MaxSig2 = [np.nan]
                        DubFlag = [np.nan]
                        # Assign very first value
                        TMax1[i] = temp[j]
                        MaxSig1[i] = ion_tseries[j]
                        # print('first Tmax/SigMax assigned for ion', ion_names[i])
                        # print('Tmax =', TMax1, 'and MaxSig1 =', MaxSig1, '\n')
                        if npeaks == 1:
                            TMax2[i] = np.nan
                            TMax2[i] = np.nan
                            DubFlag[i] = 0
                    elif dub_flag == 1:
                        TMax2[i] = temp[j]
                        MaxSig2[i] = ion_tseries[j]
                        DubFlag[i] = 1
                    else:
                        pass
                    dub_flag += 1
                else:
                    if dub_flag == 0:
                        TMax1.append(temp[j])
                        MaxSig1.append(ion_tseries[j])
                        if npeaks == 1:
                            TMax2.append(np.nan)
                            MaxSig2.append(np.nan)
                            DubFlag.append(0)
                    elif dub_flag == 1:
                        TMax2.append(temp[j])
                        MaxSig2.append(ion_tseries[j])
                        DubFlag.append(1)
                    else:
                        pass
                    dub_flag += 1
    df_tmax = pd.DataFrame(data={
        'TMax1': TMax1,
        'MaxSig1': MaxSig1,
        'TMax2': TMax2,
        'MaxSig2': MaxSig2,
        'DubFlag': DubFlag
    },
                           index=tseries_df.columns.values)
    df_tmax.index.name = "Molecule"

    return df_tmax
예제 #58
0
    temp = []
    # for all channel of sensor
    for channel in n_channel_data_near_leak:
        denoised_signal = dwt_smoothing(x=channel, wavelet=dwt_wavelet, level=dwt_smooth_level)
        temp.append(denoised_signal)
    n_channel_data_near_leak = np.array(temp)


# PEAK DETECTION AND ROI -----------------------------------------------------------------------------------------------

time_start = time.time()

# detect peak by segments, to avoid affects by super peaks
peak_ch0, peak_ch1, peak_ch2, peak_ch3 = [], [], [], []
for seg, count in zip(n_channel_split, [0, 1]):
    peak_ch0.append([(x + (count*2500000)) for x in peakutils.indexes(seg[0], thres=0.5, min_dist=1500)])
    peak_ch1.append([(x + (count*2500000)) for x in peakutils.indexes(seg[1], thres=0.6, min_dist=5000)])
    peak_ch2.append([(x + (count*2500000)) for x in peakutils.indexes(seg[2], thres=0.6, min_dist=5000)])
    peak_ch3.append([(x + (count*2500000)) for x in peakutils.indexes(seg[3], thres=0.5, min_dist=1500)])

# convert list of list into single list
peak_ch0 = [i for sublist in peak_ch0 for i in sublist]
peak_ch1 = [i for sublist in peak_ch1 for i in sublist]
peak_ch2 = [i for sublist in peak_ch2 for i in sublist]
peak_ch3 = [i for sublist in peak_ch3 for i in sublist]
peak_list = [peak_ch0, peak_ch1, peak_ch2, peak_ch3]

# USING peakutils
# peak_list = []
# time_start = time.time()
# # channel 0
예제 #59
0
 def test_near_peaks2(self):
     out = peakutils.indexes(self.near, thres=0, min_dist=1)
     expected = numpy.array([1, 3, 5, 7, 9])
     assert_array_almost_equal(out, expected)
예제 #60
0
plt.show()

# What is the standard deviation of the noise
stdnoise = np.std(betterinoise)

#betteri_rect = np.absolute(betteri)
#plt.plot(s,betteri_rect)
#plt.show()

smoothi = signal.hilbert(betteri)
plt.figure(figsize=(10, 6))  #zoom in
plt.plot(s, smoothi, label='smoothened')

#Find the peaks using the peakutils library functions
indexes = peakutils.indexes(betteri,
                            thres=6 * stdnoise,
                            min_dist=100,
                            thres_abs=True)  #set 4*STD as an ABSOLUTE min

# Find peaks(max).
peak_indexes = signal.argrelextrema(betteri, np.greater)
peak_indexes = peak_indexes[0]

# Find valleys(min).
valley_indexes = signal.argrelextrema(betteri, np.less)
valley_indexes = valley_indexes[0]

#the plot with detected peaks
plt.figure(figsize=(10, 6))  #zoom in
pplot(s, betteri, indexes)
plt.title('Peak Identification')