Ejemplo n.º 1
0
def find_extrema(band, start_idx, end_idx, thresh):

    moda = []
    # ищем положительные максимумы
    pos = start_idx + find_peaks(band[start_idx:end_idx+1], height=thresh)[0]
    for i in pos:
        moda.append((i, band[i]))
    # ищем отрицательные минимумы
    neg = start_idx + find_peaks(-band[start_idx:end_idx+1], height=thresh)[0]
    for i in neg:
        moda.append((i, band[i]))

    moda.sort()
    return moda
Ejemplo n.º 2
0
def findPeaksOneCase(binHeightL,indexToBinCenterL,binWidth,peakWidth,widthRelHeight,requiredProminence,leftPeakLimit,rightPeakLimit):
    '''Make one call to find_peaks. Peaks must be wider than peakWidth
(which is given in units of score) and more prominent than
requiredProminence, and fall between leftPeakLimit and
rightPeakLimit. Returns tuple
(peakHeight,peakPos,leftExtremeOfPeakPos,rightExtremeOfPeakPos) of any peaks
that meet criteria. All position values are returned in units of
score.

    '''
    peakWidthInBins = peakWidth / binWidth
    # we always measure width widthRelHeight down from peak toward base
    peakIndL, propertiesD = find_peaks(binHeightL, width = peakWidthInBins, rel_height = widthRelHeight, prominence = requiredProminence)

    # make sure they are to the right of leftPeakLimit
    peakPosInScoreUnitsL = []
    for i in range(len(peakIndL)):
        peakInd = peakIndL[i]
        peakHeight = binHeightL[peakInd]
        peakPos = indexToBinCenterL[peakInd]
        if leftPeakLimit < peakPos <= rightPeakLimit:
            # peak falls between the specified limits
            leftExtremeOfPeakPosInd = int(round(propertiesD["left_ips"][i]))
            leftExtremeOfPeakPos = indexToBinCenterL[leftExtremeOfPeakPosInd]
            rightExtremeOfPeakPosInd = int(round(propertiesD["right_ips"][i]))
            rightExtremeOfPeakPos = indexToBinCenterL[rightExtremeOfPeakPosInd]
            peakPosInScoreUnitsL.append((peakHeight,peakPos,leftExtremeOfPeakPos,rightExtremeOfPeakPos))
    return peakPosInScoreUnitsL
Ejemplo n.º 3
0
def smooth_and_dering(outskyflux):
    outskyflux[np.isnan(outskyflux)] = 0.
    smthd_outflux = medfilt(outskyflux, 3)
    peak_inds, peak_props = find_peaks(outskyflux, height=(1500, 100000), width=(0, 20))
    heights = peak_props['peak_heights']
    ringing_factor = 1 + (heights // 1000)
    ring_lefts = (peak_inds - ringing_factor * (peak_inds - peak_props['left_bases'])).astype(int)
    peak_lefts = (peak_props['left_bases']).astype(int)
    ring_rights = (peak_inds + ringing_factor * (peak_props['right_bases'] - peak_inds)).astype(int)
    peak_rights = (peak_props['right_bases']).astype(int)

    corrected = smthd_outflux.copy()
    for rleft, pleft in zip(ring_lefts, peak_lefts):
        corrected[rleft:pleft] = smthd_outflux[rleft:pleft]
    for rright, pright in zip(ring_rights, peak_rights):
        corrected[pright:rright] = smthd_outflux[pright:rright]
    return corrected
Ejemplo n.º 4
0
def find_anchors(pos, min_count=3, min_dis=20000, wlen=800000, res=10000):

    from collections import Counter
    from scipy.signal import find_peaks, peak_widths

    min_dis = max(min_dis//res, 1)
    wlen = min(wlen//res, 20)

    count = Counter(pos)
    refidx = range(min(count)-1, max(count)+2) # extend 1 bin
    signal = np.r_[[count[i] for i in refidx]]
    summits = find_peaks(signal, height=min_count, distance=min_dis)[0]
    sorted_summits = [(signal[i],i) for i in summits]
    sorted_summits.sort(reverse=True) # sort by peak count
    
    peaks = set()
    records = {}
    for _, i in sorted_summits:
        tmp = peak_widths(signal, [i], rel_height=1, wlen=wlen)[2:4]
        li, ri = int(np.round(tmp[0][0])), int(np.round(tmp[1][0]))
        lb = refidx[li]
        rb = refidx[ri]
        if not len(peaks):
            peaks.add((refidx[i], lb, rb))
            for b in range(lb, rb+1):
                records[b] = (refidx[i], lb, rb)
        else:
            for b in range(lb, rb+1):
                if b in records:
                    # merge anchors
                    m_lb = min(lb, records[b][1])
                    m_rb = max(rb, records[b][2])
                    summit = records[b][0] # always the highest summit
                    peaks.remove(records[b])
                    break
            else: # loop terminates normally
                m_lb, m_rb, summit = lb, rb, refidx[i]
            peaks.add((summit, m_lb, m_rb))
            for b in range(m_lb, m_rb+1):
                records[b] = (summit, m_lb, m_rb)
    
    return peaks
Ejemplo n.º 5
0
    def __init__(self, wm, fm, f_x, coefs, all_wms, bounds=None,edge_line_distance=0.,initiate=True, fibname=''):
        self.bounds = bounds
        self.coefs = np.asarray(coefs,dtype=np.float64)

        p_x = np.arange(len(f_x)).astype(np.float64)
        self.p_x = p_x
        xspectra = fifth_order_poly(p_x,*coefs)

        deviation = edge_line_distance
        good_waves = ((wm>(xspectra[0]-deviation))&(wm<(xspectra[-1]+deviation)))
        wm,fm = wm[good_waves],fm[good_waves]
        good_waves = ((all_wms>(xspectra[0]-deviation))&(all_wms<(xspectra[-1]+deviation)))
        all_wms = all_wms[good_waves]
        del good_waves

        fydat = f_x - signal.medfilt(f_x, 171)
        fyreal = (f_x - f_x.min()) / 10.0

        peaks,properties = signal.find_peaks(fydat)  # find peaks
        fxpeak = xspectra[peaks]  # peaks in wavelength
        fxrpeak = p_x[peaks]  # peaks in pixels
        fypeak = fydat[peaks]  # peaks heights (for noise)
        fyrpeak = fyreal[peaks]  # peak heights
        noise = np.std(np.sort(fydat)[: (fydat.size //2)])  # noise level
        significant_peaks = fypeak > noise
        peaks = peaks[significant_peaks]
        fxpeak = fxpeak[significant_peaks]  # significant peaks in wavelength
        fxrpeak = fxrpeak[significant_peaks]  # significant peaks in pixels
        fypeak = fyrpeak[significant_peaks]  # significant peaks height

        line_matches = {'lines': [], 'peaks_p': [], 'peaks_w': [], 'peaks_h': []}
        for j in range(wm.size):
            line_matches['lines'].append(wm[j])  # line positions
            line_matches['peaks_p'].append(fxrpeak[np.argsort(np.abs(wm[j] - fxpeak))][0])  # closest peak (in pixels)
            line_matches['peaks_w'].append(
                fxpeak[np.argsort(np.abs(wm[j] - fxpeak))][0])  # closest peak (in wavelength)
            line_matches['peaks_h'].append(fypeak[np.argsort(np.abs(wm[j] - fxpeak))][0])  # closest peak (height)

        yspectra = fyreal

        self.lastind = 0

        self.j = 0
        self.px = p_x
        self.all_wms = all_wms
        self.wm = wm
        self.fm = fm

        self.xspectra = xspectra
        self.yspectra = yspectra
        self.peaks = peaks
        self.peaks_w = fxpeak
        self.peaks_p = fxrpeak
        self.peaks_h = fypeak
        self.line_matches = line_matches
        self.mindist_el, = np.where(self.peaks_w == self.line_matches['peaks_w'][self.j])
        self.fibname = fibname

        self.last = {'j':[],'lines':[],'peaks_h':[],'peaks_w':[],'peaks_p':[],'vlines':[],'wm':[],'fm':[]}
        if initiate:
            self.initiate_browser()
Ejemplo n.º 6
0
def run_automated_calibration(coarse_comp, complinelistdict, last_obs=None, print_itters = True, only_use_peaks = True):
    precision = 1e-4
    convergence_criteria = 1.0e-5 # change in correlation value from itteration to itteration
    waves, fluxes = generate_synthetic_spectra(complinelistdict, compnames=['HgAr', 'NeAr','Xe'],precision=precision,\
                                               maxheight=10000.,minwave=3400,maxwave=7500)

    ## Make sure the information is in astropy table format
    coarse_comp = Table(coarse_comp)
    # for fib in ['r209','r711','r808']:
    #     if fib in coarse_comp.colnames:
    #         coarse_comp.remove_column(fib)
    ## Define loop params
    counter = 0

    ## Initiate arrays/dicts for later appending inside loop (for keeping in scope)
    all_coefs = {}
    all_flags = {}

    ## Loop over fiber names (strings e.g. 'r101')
    ##hack!
    cam = coarse_comp.colnames[0][0]

    if cam =='b':
        numerics = np.asarray([(16 * (9 - int(fiber[1]))) + int(fiber[2:]) for fiber in coarse_comp.colnames])
    else:
        numerics = np.asarray([(16 * int(fiber[1])) + int(fiber[2:]) for fiber in coarse_comp.colnames])

    sorted = np.argsort(numerics)
    fibernames = np.array(coarse_comp.colnames)[sorted]

    if cam == 'r' and int(fibernames[0][1]) > 3:
        fibernames = fibernames[::-1]
    elif cam =='b' and int(fibernames[0][1]) < 6:
        fibernames = fibernames[::-1]

    burnin = 6;
    fibernames = np.concatenate([fibernames[:burnin], fibernames[1:burnin - 1][::-1], fibernames])

    for counter,fiber_identifier in enumerate(fibernames):#['r101','r408','r409','r608','r816']:
        #print("\n\n", fiber_identifier)

        ## Get the spectra (column with fiber name as column name)
        comp_spec = np.asarray(coarse_comp[fiber_identifier])

        if only_use_peaks:
            ## Find just the peaks in the calibration spectrum
            c_peak_inds, c_peak_props = find_peaks(comp_spec, height=(400, None), width=(0.1, 20), \
                                                   threshold=(None, None),
                                                   prominence=(200, None), wlen=101)

            ## create pixel array for mapping to wavelength
            c_peak_inds = np.asarray(c_peak_inds)
            # randoms = np.random.randint(low=0,high=len(comp_spec),size=120)
            randoms = np.arange(len(comp_spec))[::2]
            pix1 = np.concatenate((c_peak_inds,c_peak_inds-1,c_peak_inds+1,c_peak_inds-2,c_peak_inds+2,randoms))
            pix1 = np.unique(np.sort(pix1))
            pix1 = pix1[pix1<len(comp_spec)]
            comp_spec = comp_spec[pix1]
            #comp_spec[pix1<400] *= 100.
        else:
            pix1 = np.arange(len(comp_spec))

        abest, bbest, cbest, corrbest = 0., 0., 0., 0.
        alow, ahigh = 3000, 8000
        awidth, bwidth, cwidth = 12, 0.005, 5.0e-6

        if last_obs is None or fiber_identifier not in last_obs.keys():
            if counter == 0:
                avals = (alow, ahigh+1, 1)
                bvals = (1.0,1.01,0.1)
                cvals = (0, 1.0, 2)
                if print_itters:
                    print("\nItter 1 results, (fixing c to 0.):")
                abest, trashb, trashc, corrbest = fit_using_crosscorr(pixels=pix1, raw_spec=comp_spec,
                                                                      comp_highres_fluxes=fluxes, \
                                                                      avals=avals, bvals=bvals, cvals=cvals, \
                                                                      calib_wave_start=waves[0],
                                                                      flux_wave_precision=precision,\
                                                                      print_itters=print_itters)


                avals = (abest-40, abest+41, 0.5)
                bvals = (0.96,1.04,0.005)
                cvals = (-1.0e-5, 1.0e-5, 1.0e-6)
                if print_itters:
                    print("\nItter 1 results, (fixing c to 0.):")
                abest, bbest, cbest, corrbest = fit_using_crosscorr(pixels=pix1, raw_spec=comp_spec,
                                                                      comp_highres_fluxes=fluxes, \
                                                                      avals=avals, bvals=bvals, cvals=cvals, \
                                                                      calib_wave_start=waves[0],
                                                                      flux_wave_precision=precision,\
                                                                      print_itters=print_itters)

            elif counter < 3:
                compare_fiber = fibernames[counter - 1]
                [trasha, bbest, cbest, trash1, trash2, trash3] = all_coefs[compare_fiber]
                if (bbest < 0.96) or (bbest>1.04):
                    bbest = 1.0
                    cbest = 0.
                astep,bstep,cstep = 0.5, 0.04, 8.0e-6
                avals = (alow,   ahigh+astep,  astep)
                bvals = (bbest , bbest+bstep , bstep)
                cvals = (cbest , cbest+cstep , cstep)
                if print_itters:
                    print("\nItter 1 results, (fixing b and c to past vals):")
                abest, trashb, trashc, corrbest = fit_using_crosscorr(pixels=pix1, raw_spec=comp_spec,
                                                                    comp_highres_fluxes=fluxes, \
                                                                    avals=avals, bvals=bvals, cvals=cvals, \
                                                                    calib_wave_start=waves[0],
                                                                    flux_wave_precision=precision,\
                                                                      print_itters=print_itters)
            else:
                compare_fiber1 = fibernames[counter - 1]
                [a1, b1, c1, trash1, trash2, trash3] = all_coefs[compare_fiber1]
                compare_fiber2 = fibernames[counter - 2]
                [a2, b2, c2, trash1, trash2, trash3] = all_coefs[compare_fiber2]
                compare_fiber3 = fibernames[counter - 3]
                [a3, b3, c3, trash1, trash2, trash3] = all_coefs[compare_fiber3]
                abest = np.median([a1, a2, a3])
                bbest = np.median([b1, b2, b3])
                cbest = np.median([c1, c2, c3])

                if (bbest < 0.96) or (bbest>1.04):
                    bbest = 1.0
                    cbest = 0.
                astep,bstep,cstep = 0.5, 0.04, 8.0e-6
                aw = 30
                avals = (abest-aw,   abest+aw+astep,  astep)
                bvals = (bbest , bbest+bstep , bstep)
                cvals = (cbest , cbest+cstep , cstep)
                if print_itters:
                    print("\nItter 1 results, (fixing b and c to past vals):")
                abest, trashb, trashc, corrbest = fit_using_crosscorr(pixels=pix1, raw_spec=comp_spec,
                                                                    comp_highres_fluxes=fluxes, \
                                                                    avals=avals, bvals=bvals, cvals=cvals, \
                                                                    calib_wave_start=waves[0],
                                                                    flux_wave_precision=precision,\
                                                                      print_itters=print_itters)

                #awidth, bwidth, cwidth = 10,\
                #                         max([  min([  bwidth,np.max(bbest)-np.min(bbest)  ]),  1.0e-3    ]),\
                #                         max([  min([  cwidth,np.max(cbest)-np.min(cbest)  ]),  1.0e-7   ])

        else:
            [abest, bbest, cbest, trash1, trash2, trash3] = last_obs[fiber_identifier]
            awidth, bwidth, cwidth = awidth/2., bwidth/2., cwidth/2.
            if print_itters:
                print("\nItter 1 results:")
                print("--> Using previous obs value of:   a={:.2f}, b={:.5f}, c={:.2e}".format(abest, bbest, cbest))

        astep,bstep,cstep = 0.5, bwidth/10., cwidth/10.

        dcorr = 1.
        for itter in range(100):
            if print_itters:
                print("\nItter {:d} results:".format(itter+2))
            last_corrbest = corrbest
            incremental_res_div = 4.
            astep, bstep, cstep = astep/incremental_res_div, bstep/incremental_res_div, cstep/incremental_res_div
            awidth,bwidth,cwidth = awidth/incremental_res_div,bwidth/incremental_res_div,cwidth/incremental_res_div
            avals = ( abest-awidth, abest+awidth+astep, astep )
            bvals = ( bbest-bwidth, bbest+bwidth+bstep, bstep )
            cvals = ( cbest-cwidth, cbest+cwidth+cstep, cstep )
            abest_itt, bbest_itt, cbest_itt, corrbest = fit_using_crosscorr(pixels=pix1, raw_spec=comp_spec, comp_highres_fluxes=fluxes, \
                                                                avals=avals, bvals=bvals, cvals=cvals, \
                                                                calib_wave_start=waves[0], flux_wave_precision=precision,\
                                                                      print_itters=print_itters)
            if corrbest > last_corrbest:
                abest,bbest,cbest = abest_itt, bbest_itt, cbest_itt

            dcorr = np.abs(corrbest-last_corrbest)
            if dcorr < convergence_criteria:
                break

        print("\n\n", fiber_identifier)
        print("--> Results:   a={:.2f}, b={:.5f}, c={:.2e}".format(abest, bbest, cbest))

        all_coefs[fiber_identifier] = [abest, bbest, cbest, 0., 0., 0.]
        all_flags[fiber_identifier] = corrbest

    return Table(all_coefs)
Ejemplo n.º 7
0
def find_peaks(xn, min_prominence=30, min_spacing=2):
    peaks, props = signal.find_peaks(y,
                                     prominence=min_prominence,
                                     distance=min_spacing)
    return peaks
plt.plot(t, phi, label='Numerical Result') #plotting numerical result
plt.plot(t_test, phi_test, label='Analytical Result') #plotting test case
plt.title('Numerical vs. Analytical Pendulum Motion')
plt.xlabel('Time (s)')
plt.ylabel('Pendulum Angle (radians)')
plt.grid(True)
plt.legend()
#%% Find pendulum frequency as a function of initial angle
from scipy.signal import find_peaks #we will use this to find peaks
phi0s = np.linspace(0.01, 3, 25) #a range of initial angles
freqs = [] #list to contain frequencies
tmax = 30 #maximum time for integration

for phi0 in phi0s: #loop through every initial angle
    t, phi = integrate(phi0) #integrate the diff. eq. for each angle
    peaks = find_peaks(phi) #find the peaks of the solution curve
    period = t[peaks[0][1]] - t[peaks[0][0]] #subtract adjacent peaks to find period
    freq = 1/period #calculate frequency from period
    freqs.append(freq) #add frequency to list
    
plt.figure(2) #plot frequency vs. initial angle
plt.plot(phi0s, freqs)
plt.title('Frequency of Pendulum vs. Initial Angle')
plt.xlabel('Initial angle (radians)')
plt.ylabel(r'Frequency $(s^{-1})$')
#%% Damped pendulum motion

dt = 0.001 #step size
t0 = 0 #initial condition for t
phi0 = 0.1 #initial condition for the solution curve f(x)
dfdt0 = 0 #initial condition for the derivative of phi
Ejemplo n.º 9
0
    def collect_peaks(self, voiced_list, windowed_data, window_length):
        import numpy as np
        x = float(self.N.text())
        N = int(x)

        all_peaks = []
        peak_frames = []
        for i, frame in enumerate(windowed_data):

            if i in (voiced_list):
                dft_frame = np.fft.fft(frame, N)
                ceps_frame = np.real(
                    np.fft.ifft(np.log10(np.abs(np.fft.fft(frame,
                                                           N))))).reshape(
                                                               (N, 1)).ravel()
                total = []
                total = np.array(total)

                ones = np.ones((1, 15))
                zeros = np.zeros((1, (1024 - (2 * 15))))
                lif = np.concatenate((ones, zeros, ones), axis=1).ravel()
                ceps_frame = ceps_frame * lif
                dft_frame = np.fft.fft(ceps_frame, N)
                dft_frame = abs(dft_frame)

                peaks, _ = find_peaks(dft_frame[:int(len(dft_frame) / 2)],
                                      height=0)
                all_peaks.append(peaks)
                """zeros = np.zeros((1,1024)).ravel()
                for peak in list(peaks):
                    zeros[peak]=10000
                total = zeros + dft_frame
                peak_frames.append(total)"""

        smooth_peaks = self.makeSmooth(all_peaks, window_length)
        #print("smooth_peaks",smooth_peaks)

        peak_frames = []
        k = 0
        for i, frame in enumerate(windowed_data):

            if i in (voiced_list):
                #print("frame:",i)
                dft_frame = np.fft.fft(frame, N)
                ceps_frame = np.real(
                    np.fft.ifft(np.log10(np.abs(np.fft.fft(frame,
                                                           N))))).reshape(
                                                               (N, 1)).ravel()
                total = []
                total = np.array(total)

                ones = np.ones((1, 15))
                zeros = np.zeros((1, (1024 - (2 * 15))))
                lif = np.concatenate((ones, zeros, ones), axis=1).ravel()
                ceps_frame = ceps_frame * lif
                dft_frame = np.fft.fft(ceps_frame, N)
                dft_frame = abs(dft_frame)

                peaks = smooth_peaks[k]
                #print("Peaks:", peaks)
                k = k + 1

                zeros = np.zeros((1, 1024)).ravel()
                for peak in list(peaks):
                    zeros[peak] = 1000000
                total = zeros + dft_frame
                peak_frames.append(total)

        return peak_frames
    theta = find_tilt_y(x_acc, y_acc, z_acc)

    figs, axs = plt.subplots(3, 1)
    axs[0].plot(time, y_acc)
    axs[0].set_title('18 inch pendulum')
    axs[0].set_ylabel('y acc (in/s^2)')

    axs[1].plot(time, x_acc)
    axs[1].set_ylabel('x acc (in/s^2)')

    axs[2].plot(time, theta)
    axs[2].set_ylabel('theta (degrees)')
    axs[2].set_xlabel('time (s)')

    return plt.show()


#script
x_acc_filt = sig.medfilt(x_acc)
x_pks, _ = sig.find_peaks(x_acc_filt)

new_time = time[x_pks]
period = []

for x in range(len(new_time) - 1):
    y = new_time[x + 1] - new_time[x]
    period += [y]
period_18 = np.average(period)

#plots()
Ejemplo n.º 11
0
    def analyze_gsr(self, threshold):
        # create list to store analysis result
        gsr_result = []
        gsr_result.append([])  # for plesant
        gsr_result.append([])  # for neutral
        gsr_result.append([])  # for unplesant

        # If not initial user, applay adpative threhold & increase signal by adaptive threshold
        if self.user_order != 1:
            threshold = self.find_adaptiveT()

            # Increase signal of user2 using adaptive threshold
            for i in range(len(self.videoGSR)):
                for j in range(len(self.videoGSR[i])):
                    for m in range(len(self.videoGSR[i][j])):
                        self.videoGSR[i][j][
                            m] = self.videoGSR[i][j][m] * threshold
            threshold = emotional_analysis.initialT / threshold
        else:
            emotional_analysis.initialT = threshold

        # Analyize
        for i in range(len(self.videoGSR)):
            if i == 0: print("**************** Neutral ****************\n")
            elif i == 1: print("**************** Pleasant ****************\n")
            else: print("**************** Unpleasant ****************\n")
            for j in range(len(self.videoGSR[i])):
                _peaks, _ = find_peaks(self.videoGSR[i][j], height=threshold)

                # Find Max Peak
                if len(_peaks) != 0:  # When there is more than one peak
                    peak_value = []  # list for peak's y value

                    for peak_index in _peaks:
                        peak_value.append(
                            self.videoGSR[i][j][peak_index])  # peak's y value

                    max_peak = np.max(peak_value)
                    max_peak_index = _peaks[peak_value.index(max_peak)]
                    print("max_peak :", max_peak)
                    print("max_peak_index :", max_peak_index)

                    # Exclude peak that do not affect to result for better accuracy
                    if max_peak_index > 9900 and max_peak_index < 12000 and (
                            max_peak >= 0.036 and max_peak < 0.037) or (
                                max_peak < 0.35 and max_peak > 0.3):
                        _peaks = []

                if (len(_peaks) >= 1):
                    print("Emotion : pleasant or unpleasant")
                    gsr_result[i].append(1)
                else:
                    print("Emotion : neutral")
                    gsr_result[i].append(0)
                plt.ylim(-1, 1)
                plt.axis(option='auto')
                plt.plot(self.videoGSR[i][j])
                plt.show()
                print("\n")

        self.gsr_result = gsr_result
        print("Neutral: ", gsr_result[0].count(0))
        print("Pleasant:", gsr_result[1].count(1))
        print("Unpleasant:", gsr_result[2].count(1))
            directory = 'wrspice_data/constant_drive/from_saeed/4jj/'
            # directory = 'wrspice_data/fitting_data'
            # dend_cnst_drv_Llft08.00pH_Lrgt22.00pH_Ide78.00uA_Idrv30.00uA_Ldi0077.50nH_taudi0775ms_dt00.1ps.dat
            I_drive = I_drive_list[ii]
            file_name = 'dend_cnst_drv_Llft{:05.2f}pH_Lrgt{:05.2f}pH_Ide{:05.2f}uA_Idrv{:05.2f}uA_Ldi0077.50nH_taudi0775ms_dt00.1ps.dat'.format(
                L_left_list[pp], L_right_list[pp], I_de_list[qq], I_drive)
            data_dict = read_wr_data(directory + '/' + file_name)

            # find peaks for each jj
            time_vec = data_dict['time']
            j_di = data_dict['v(4)']  # j_di = data_dict['v(5)']

            # j_di_peaks, _ = find_peaks(j_di, height = jph[pp][qq][ii])
            j_di_peaks, _ = find_peaks(j_di,
                                       height=min_peak_height,
                                       distance=min_peak_distance)

            initial_ind = (np.abs(time_vec - 2.0e-9)).argmin()
            final_ind = j_di_peaks[-1] * 2 * downsample_factor
            # if len(j_di_peaks) > 1:
            #     final_ind = j_di_peaks[-1]*2*downsample_factor
            # else:
            #     final_ind = initial_ind+downsample_factor*window_size
            time_vec = time_vec[initial_ind:final_ind]
            j_di = j_di[initial_ind:final_ind]

            # find fluxon generation rate
            I_di = data_dict['i(L4)']
            I_di = I_di[initial_ind:final_ind]
Ejemplo n.º 13
0
    def split(self, plot=False):
        '''
        split returns a dataframe of the original array which excludes all arrays belonging to ELM's.
        It also returns a hot array with 0 for all intra-elm indices and 1 for all elm indices (the ones that were excluded
        from the dataframe.
        '''

        if hasattr(self, 'elmdf'):
            return self.elmdf

        pm = self.arr[1].T
        stop = self.stop_height  # default 1500
        pm_norm = (pm[:, stop:] - np.amin(pm[:, stop:])) / (
            np.amax(pm[:, stop:]) - np.amin(pm[:, stop:]))
        sums = np.sum(pm_norm, axis=1)

        peaks, props = find_peaks(sums,
                                  distance=10,
                                  prominence=(0.5, None),
                                  width=(10, 50),
                                  rel_height=0.95)
        l_elms = props['left_ips']
        r_elms = props['right_ips']
        r_elms = np.asarray([*map(np.ceil, r_elms)]).astype(int)
        l_elms = np.asarray([*map(np.floor, l_elms)]).astype(int)
        l_elms_time = self.arr[0][l_elms]
        r_elms_time = self.arr[0][r_elms]

        if plot:
            fig, ax = plt.subplots(1, 1)
            self.heatmap2d(ax=ax)
            for t in l_elms_time:
                ax.axvline(t, ymin=stop / self.arr[1].shape[0], c='red')
            for t in r_elms_time:
                ax.axvline(t, ymin=stop / self.arr[1].shape[0], c='green')
            plt.show()

        # make dict with keys, values, times
        elm_cycles = {}
        elms = list(zip(l_elms_time, r_elms_time))
        for elm_no, _ in enumerate(elms[:-1]):
            if elms[elm_no + 1][0] - elms[elm_no][
                    1] <= self.min_elm_window:  # default min_elm_window = 50
                continue

            start_ielm = index_match(self.arr[0], elms[elm_no][1])
            stop_ielm = index_match(self.arr[0], elms[elm_no + 1][0])

            for ielm_time in self.arr[0][start_ielm:stop_ielm]:
                ielm_index = np.argwhere(self.arr[0] == ielm_time)[0][0]
                elm_cycles[(elm_no, ielm_index, ielm_time,
                            self.arr[0][stop_ielm] -
                            ielm_time)] = self.arr[1].T[ielm_index]
                # elm_cycles[(elm_no, ielm_index, ielm_time, (ielm_time - self.arr[0][start_ielm]) /
                #             (self.arr[0][stop_ielm] - self.arr[0][start_ielm]))] = self.arr[1].T[ielm_index]
        index = pd.MultiIndex.from_tuples(
            elm_cycles.keys(),
            names=['ELM_No', 'Index', 'Time (ms)', 'T - ELM (ms)'])
        # index = pd.MultiIndex.from_tuples(elm_cycles.keys(), names=['ELM_No', 'Index', 'Time (ms)', '% ELM'])
        self.elmdf = pd.DataFrame(elm_cycles.values(), index=index)

        return self.elmdf
Ejemplo n.º 14
0
def threshold_protocol(f, fi, Vm_trail, sampling_rate, trace_unit,
                       protocol_unit):
    global Threshold_voltage
    fig = plt.figure(figsize=(16, 5))
    ax1 = fig.add_subplot(121)
    ax2 = fig.add_subplot(122)
    thresh_state = 0
    iter_num = 0
    trace_num = 0
    for v in enumerate(Vm_trail):
        iter_num += 1
        trace_number = v[0]
        trace = v[1][0]
        time = v[1][1]
        v = np.copy(trace)
        t = np.copy(time)
        if thresh_state == 0:
            Vm = str(np.around(np.mean(v[0:299]), decimals=2))
            del_Vm = str(
                np.around(
                    (np.mean(v[len(v) - 300:len(v)]) - np.mean(v[0:299])),
                    decimals=2))
            v_lowercut = v
            t = time
            v_lowercut[v_lowercut < -50] = -50
            v_smooth = butter_bandpass_filter(v_lowercut,
                                              1,
                                              500,
                                              sampling_rate,
                                              order=1)
            peaks = signal.find_peaks(x=v_smooth,
                                      height=None,
                                      threshold=None,
                                      distance=None,
                                      prominence=5,
                                      width=None,
                                      wlen=None,
                                      rel_height=0.5,
                                      plateau_size=None)
            v_cut = butter_bandpass_filter(v_smooth,
                                           50,
                                           500,
                                           sampling_rate,
                                           order=1)
            v_peaks = v_smooth
            t_peaks = t
            thr_peak = 3
            if trace[peaks[0]].any() > 0:
                thresh_state = 1
                dv = np.diff(v_smooth)
                dt = np.diff(t)
                dv_dt = dv / dt
                dv_dt_max = np.max(dv / dt)
                v_dt_max = np.where(dv_dt == dv_dt_max)[0] - 20
                t_dt_max = np.where(dv_dt == dv_dt_max)[0] - 20
                ax1.scatter(time[peaks[0] - 10],
                            trace[peaks[0] - 10],
                            color='r',
                            label='spike')
                ax1.plot(t, v, alpha=0.5, label='smoothened')
                ax1.plot(time,
                         trace,
                         alpha=0.5,
                         label=f'raw trace no. {iter_num}')
                ax1.scatter(time[t_dt_max],
                            trace[v_dt_max],
                            label="threshold",
                            color='k')
                Threshold_voltage = "firing threshold = " + str(
                    np.around(trace[v_dt_max][0], decimals=2))
                trace_num = iter_num
                plt.figtext(0.10,
                            0.0,
                            Threshold_voltage + "mV",
                            fontsize=12,
                            va="top",
                            ha="left")
                plt.figtext(0.10,
                            -0.05,
                            "membrane voltage = " + Vm + "mV",
                            fontsize=12,
                            va="top",
                            ha="left")
                plt.figtext(0.10,
                            -0.10,
                            "membrane voltage difference = " + del_Vm + "mV",
                            fontsize=12,
                            va="top",
                            ha="left")
        P_traces = protocols[0]
        iter_num_p = 0
        Threshold_injection = "NA"
        for p in P_traces:
            iter_num_p += 1
            if iter_num_p == 1:
                for i in p:
                    t_ = len(i) / sampling_rate
                    t = np.linspace(0, float(t_), len(i))
                    ax2.plot(i, color='g')

            elif iter_num_p == trace_num:
                c_inj = []
                for i in p:
                    t_ = len(i) / sampling_rate
                    t = np.linspace(0, float(t_), len(i))
                    ax2.plot(i, color='k')
                    c_inj.append(i)
                Threshold_injection = "Injected current at threshold =  " + str(
                    np.max(c_inj))

            elif iter_num_p == len(P_traces):
                for i in p:
                    t_ = len(i) / sampling_rate
                    t = np.linspace(0, float(t_), len(i))
                    ax2.plot(i, color='r')
            First_inj = mpatches.Patch(color='green', label='First injection')
            Thres_inj = mpatches.Patch(color='black',
                                       label='Threshold injection')
            Last_inj = mpatches.Patch(color='red', label='Final injection')
            ax2.legend(handles=[First_inj, Thres_inj, Last_inj])

    plt.figtext(0.55,
                0.0,
                Threshold_injection + "pA",
                fontsize=12,
                va="top",
                ha="left")
    ax1.set_title('Recording')
    ax1.set_ylabel(trace_unit)
    ax1.set_xlabel('time(s)')
    ax1.legend()
    ax2.set_title('Protocol trace')
    ax2.set_ylabel(protocol_unit)
    ax2.set_xlabel('time(s)')
    #        ax2.legend()
    plt.suptitle(f'Protocol type: {protocol_type} - {protocol_used}',
                 fontsize=15)
    plt.figtext(0.10,
                -0.15,
                f"sampling rate = {sampling_rate}",
                fontsize=12,
                va="top",
                ha="left")
    plt.figtext(0.10,
                -0.20,
                f"total recording time = {total_time}",
                fontsize=12,
                va="top",
                ha="left")
    outfile = str(outdir) + "/" + str(f.stem) + f" {protocol_used}_{fi}.png"
    plt.savefig(outfile, bbox_inches='tight')
    print("-----> Saved to %s" % outfile)
    fig = plt.close()
Ejemplo n.º 15
0
def visualize_om_prod_overlap(prod_df,
                              om_df,
                              prod_col_dict,
                              om_col_dict,
                              prod_fldr,
                              e_cumu,
                              be_cumu,
                              samp_freq="H",
                              pshift=0.0,
                              baselineflag=True):
    """
    Creates Plotly figures of performance data overlaid with coinciding O&M tickets.
    A separate figure for each site in the production data frame (prod_df) is generated.


    Parameters

    ----------
    prod_df: DataFrame
        A data frame corresponding to the performance data after (ideally) having been
        processed by the perf_om_NA_qc and overlappingDFs functions. This data
        frame needs to contain the columns specified in prod_col_dict.

    om_df: DataFrame
        A data frame corresponding to the O&M data after (ideally) having been processed
        by the perf_om_NA_qc and overlappingDFs functions. This data frame needs
        to contain the columns specified in om_col_dict.

    prod_col_dict: dict of {str : str}
        A dictionary that contains the column names relevant for the production data

        - **siteid** (*string*), should be assigned to associated site-ID column name in
          prod_df
        - **timestamp** (*string*), should be assigned to associated time-stamp column name in
          prod_df
        - **energyprod** (*string*), should be assigned to associated production column name in
          prod_df
        - **irradiance** (*string*), should be assigned to associated irradiance column name in
          prod_df. Data should be in [W/m^2].

    om_col_dict: dict of {str : str}
        A dictionary that contains the column names relevant for the O&M data

        - **siteid** (*string*), should be assigned to column name for user's site-ID
        - **datestart** (*string*), should be assigned to column name for user's
        O&M event start-date
        - **dateend** (*string*), should be assigned to column name for user's O&M event end-date
        - **workID** (*string*), should be assigned to column name for user's O&M unique event ID
        - **worktype** (*string*), should be assigned to column name for user's
          O&M ticket type (corrective, predictive, etc)
        - **asset** (*string*), should be assigned to column name for affected asset in user's
          O&M ticket

    prod_fldr: str
        Path to directory where plots should be saved.

    e_cumu: bool
        Boolean flag that specifies whether the production (energy output)
        data is input as cumulative information ("True") or on a per time-step basis ("False").

    be_cumu: bool
        Boolean that specifies whether the baseline production data is input as cumulative
        information ("True") or on a per time-step basis ("False").

    samp_freq: str
        Specifies how the performance data should be resampled.
        String value is any frequency that is valid for pandas.DataFrame.resample().
        For example, a value of 'D' will resample on a daily basis, and a
        value of 'H' will resample on an hourly basis.

    pshift: float
        Value that specifies how many hours the performance data
        should be shifted by to help align performance data with O&M data.
        Mostly necessary when resampling frequencies are larger than an hour

    baselineflag: bool
        Boolean that specifies whether or not to display the baseline (i.e.,
        expected production profile) as calculated with the irradiance data
        using the baseline production data. A value of 'True' will display the
        baseline production profile on the generated Plotly figures, and a value of
        'False' will not.

    Returns

    -------
    list
        List of Plotly figure handles generated by function for each site within prod_df.

    """

    # assigning dictionary items to local variables for cleaner code
    om_site = om_col_dict["siteid"]
    om_date_s = om_col_dict["datestart"]
    om_date_e = om_col_dict["dateend"]
    om_wo_id = om_col_dict["workID"]
    om_wtype = om_col_dict["worktype"]
    om_asset = om_col_dict["asset"]

    prod_site = prod_col_dict["siteid"]
    prod_ts = prod_col_dict["timestamp"]
    prod_ener = prod_col_dict["energyprod"]
    prod_baseline = prod_col_dict[
        "baseline"]  # if none is provided, using iec_calc() is recommended

    # creating local dataframes to not modify originals
    prod_df = prod_df.copy()
    om_df = om_df.copy()

    # Setting multi-indices for ease of plotting
    prod_df.set_index([prod_site, prod_ts], inplace=True)
    prod_df.sort_index(inplace=True)
    om_df.set_index([om_site, om_date_s], inplace=True)
    om_df.sort_index(inplace=True)

    figs = []
    for i in prod_df.index.get_level_values(0).unique():
        # Resampling the performance data to obtain daily energy production
        # (different between cumulative and non-cumulative energy output)

        if e_cumu:
            # energy data is cumulative over time, so take difference between
            # largest and smallest value on any given day
            tstep = np.diff(prod_df.loc[i].index)[2] / np.timedelta64(1, "s")
            if samp_freq == "H" and tstep >= 3599:  # 3599 to consider roundoff error
                enrg_site = prod_df.loc[i, prod_ener].diff()
            elif samp_freq == "T" and tstep >= 59.9:
                enrg_site = prod_df.loc[i, prod_ener].diff()
            else:
                enrg_site = (prod_df.loc[i, prod_ener].resample(
                    samp_freq, label="left").max() -
                             prod_df.loc[i, prod_ener].resample(
                                 samp_freq, label="left").min())
        else:
            # energy data is given on a per TIMESTEP basis, therefore...
            enrg_site = (prod_df.loc[i,
                                     prod_ener].resample(samp_freq,
                                                         label="left").sum())

        # Resampling baselineE and assigning to separate variable to not resample entire data frame.
        if be_cumu:  # baseline energy cumulative over time
            baseline_site = (prod_df.loc[i, prod_baseline].resample(
                samp_freq, label="left").max() -
                             prod_df.loc[i, prod_baseline].resample(
                                 samp_freq, label="left").min())
        else:  # baseline energy is on a per time-step basis
            baseline_site = (prod_df.loc[i, prod_baseline].resample(
                samp_freq, label="left").sum())

        # shifting time for prod_df and baseline
        baseline_site.index += pd.Timedelta(hours=pshift)
        enrg_site.index += pd.Timedelta(hours=pshift)

        # finding where energy dips:  first by location/index (by integer, not
        # index location => use iloc), and then by converting to dates using original index
        edips_all = find_peaks(enrg_site * -1)[0]
        edips_all_dates = enrg_site.index[edips_all]

        # Finding the corresponding closest dip to each OM om_date_s for
        # each ticket:  first by location/index, and then by converting those
        # indices to dates
        edips_nearom_indices = [
            np.argmin(abs(edips_all_dates - xx)) for xx in om_df.loc[i].index
        ]
        edips_nearom_dates = edips_all_dates[edips_nearom_indices]

        # Adding the nearest performance-dip-date to the OM data frame
        om_df.loc[i, "corr_perfDip"] = edips_nearom_dates

        # Taking largest value of daily output(perf-data) to create a [ficticious]
        # plot-value/column for OM-data => "_h" implies hover text
        om_df.loc[i, "perfval_plotcol"] = enrg_site.max()
        om_start_h = 0.75  # To place StartDate points in visible region
        om_end_h = 0.5  # To place EndDate points below the StartDate points
        om_reg_h = 1.05  # To make the om-region slightly higher than the perf data

        # Correction for om-region if baseline for production data is plotted
        if baselineflag:
            om_reg_hcorr = baseline_site.max() / enrg_site.max()
        else:
            om_reg_hcorr = 1.0

        # initializing plotly-figure
        fig = plotly.graph_objects.Figure(
            layout_yaxis_range=[-5,
                                enrg_site.max() * om_reg_h * om_reg_hcorr])

        # plotting all Perf data for i-th site (captured in enrg_site)
        if samp_freq == "D":
            perf_name = "Daily Energy"
            baseline_name = "Daily Baseline"
        elif samp_freq == "H":
            perf_name = "Hourly Energy"
            baseline_name = "Hourly Baseline"
        fig.add_trace(
            plotly.graph_objects.Scatter(x=enrg_site.index,
                                         y=enrg_site.values,
                                         name=perf_name))
        if baselineflag:
            fig.add_trace(
                plotly.graph_objects.Scatter(x=baseline_site.index,
                                             y=baseline_site.values,
                                             name=baseline_name))

        # For loop to add shaded regions for each ticket, where left side of the region corresponds
        # to the EventStart (index of om data in this case), and right side of region corresponds
        # to the EventEnd.  These two dates make the edges of the region, x below.
        for j in range(len(om_df.loc[i])):
            fig.add_trace(
                dict(
                    type="scatter",
                    x=[om_df.loc[i].index[j], om_df.loc[i, om_date_e][j]],
                    y=om_df.loc[i, "perfval_plotcol"].values[0:2] * om_reg_h *
                    om_reg_hcorr,
                    mode="markers+lines",
                    line=dict(width=0),
                    marker=dict(size=[0, 0]),
                    fill="tozeroy",
                    fillcolor="rgba(190,0,0,.15)",
                    hoverinfo="none",
                    showlegend=False,
                    name="OM Ticket",
                ))

        # Adding EventStart Points with hover-text
        fig.add_trace(
            plotly.graph_objects.Scatter(
                x=om_df.loc[i].index,
                y=om_df.loc[i, "perfval_plotcol"].values * om_start_h,
                mode="markers",
                hovertemplate="Start: " + "%{x} <br>" + "WO#: " +
                om_df.loc[i, om_wo_id].astype(str) + "<br>" + "Type: " +
                om_df.loc[i, om_wtype].astype(str) + "<br>" + "Asset: " +
                om_df.loc[i, om_asset].fillna("Asset_NA").astype(str) +
                "<br>" + "Nearest Prod Dip:  " +
                om_df.loc[i, "corr_perfDip"].dt.strftime("%b %d, %Y"),
                name="OM_start",
            ))

        # Adding EventEnd Points with hover-text
        fig.add_trace(
            plotly.graph_objects.Scatter(
                x=om_df.loc[i, om_date_e],
                y=om_df.loc[i, "perfval_plotcol"].values * om_end_h,
                mode="markers",
                hovertemplate="End: " + "%{x} <br>" + "WO#: " +
                om_df.loc[i, om_wo_id].astype(str) + "<br>" + "Type: " +
                om_df.loc[i, om_wtype].astype(str) + "<br>" + "Asset: " +
                om_df.loc[i, om_asset].fillna("Asset_NA").astype(str),
                name="OM_end",
            ))

        # Setting y-axes and title
        fig.update_yaxes(title_text="Energy Delivered (kWh)")
        fig.update_layout(title_text="Site: " + i)

        # appending fig object to figs list
        figs.append(fig)
        # Saving Figure
        fig.write_html(prod_fldr + "/" + i + ".html")

    # Resetting index before completion of function since DFs are mutable
    prod_df.reset_index(inplace=True)
    om_df.reset_index(inplace=True)

    return figs
Ejemplo n.º 16
0
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import find_peaks

x = (np.sin(2 * np.pi *
            (2**np.linspace(2, 10, 1000)) * np.arange(1000) / 48000) +
     np.random.normal(0, 1, 1000) * 0.15)

print(x)
peaks, _ = find_peaks(x, distance=20)
peaks2, _ = find_peaks(x, prominence=1)  # BEST!
peaks3, paek3_property = find_peaks(x, height=0, prominence=1)
peaks4, _ = find_peaks(
    x, threshold=0.4
)  # Required vertical distance to its direct neighbouring samples, pretty useless

print(paek3_property["peak_heights"])
print(paek3_property["prominences"])
print(paek3_property["left_bases"])

left_bases = paek3_property["left_bases"]
bottom_values = x[left_bases]
print("peaks", x[peaks3])
print("bottom val", bottom_values)
peak_heights = x[peaks3] - bottom_values
print("peak hieight", peak_heights, " mean ", peak_heights.mean())

plt.subplot(2, 2, 1)
plt.plot(peaks, x[peaks], "xr")
plt.plot(x)
plt.legend(["distance"])
Ejemplo n.º 17
0
def MUSIC(inputs, options, log=logging):
    """MUSIC

    implementation of Multiple SIgnal Classification (MUSIC) algorithm as described in [3].

    Inputs:
    in:
        in.array:               String containing array name: 'eigenmike', 'dicit', 'dummy', 'benchmark2'
        in.y:                   Data matrix
        in.fs:                  Sampling frequency [Hz]
        in.timestamps:          Vector of timestamps at which DoA estimates must be PROVIDED
        in.time:                6xT matrix of system clock times
        in.array.rotation:      Rotation matrix describing array orientation in 3D for each timestamp
        in.array.mic:           Matrix describing microphone positions for each timestamp
    opts:                     Settings structure generated by init()

    Outputs:
    out:
        out.source:             N x 1 struct array, one element for each N estimated sources. In this function: N = 1
                                (single-source)
        out.source(src_idx).azimuth:      Tx1 vector of azimuth estimates, where T is the number of timestamps
                                          in in.timestamps
        out.source(src_idx).elevation:    Tx1 vector of elevation estimates
        out.source(src_idx).time:         Tx1 vector of system time values of estimates (must be identical to in.time!)
        out.source(src_idx).timestamps:   Tx1 vector of timestamps of estimates (must be identical to in.timestamps!)

    References:
        [1]    J. Benesty, C. Jingdong, and I. Cohen, Design of Circular Differential Microphone Arrays.
               Springer, 2015.
        [2]    I. Cohen, J. Benesty, and S. Gannot, Speech Processing in Modern Communication, vol. 3. Berlin,
               Heidelberg: Springer Science & Business Media, 2009.
        [3]    H. L. Van Trees, Detection, Estimation, and Modulation Theory, Optimum Array Processing. John Wiley
               & Sons, 2004.
    """
    az = np.linspace(-np.pi, np.pi, 73)  # Resolution of azimuth: 5 dg
    el = np.linspace(0, np.pi, 19)  # Resolution of elevation: 10 dg

    if inputs.array_name == 'dicit':
        subarray = np.array([6, 7, 9])
        ref_mic = 1
    elif inputs.array_name == 'benchmark2':
        subarray = np.arange(12).astype(np.int)
        ref_mic = 1
    elif inputs.array_name == 'eigenmike':
        subarray = np.arange(32).astype(np.int)
        ref_mic = 1
    elif inputs.array_name == 'dummy':
        subarray = np.arange(4).astype(np.int)
        ref_mic = 1
    else:
        log.error('Array type {} does not exists'.format(inputs.array_name))
        sys.exit(1)

    # MUSIC
    numMic = subarray.shape[0]

    fftPoint = 1024
    frame_duration = 0.03
    frames_per_block = 100  # Number of frames per block
    block_step = 10

    frame_length = int(frame_duration * inputs.fs)

    # -> OptiTracker sampling rate

    # Unique timestamps:
    opti_timestamps, unique_idx = np.unique(inputs.timestamps,
                                            return_index=True)
    opti_rotation = inputs.array.rotation[:, unique_idx]
    opti_mics = inputs.array.mic[:, unique_idx]

    duration = inputs.y.shape[0]
    inputs.y = np.asfortranarray(inputs.y)

    # -> STFT
    X = np.stack([
        librosa.stft(inputs.y[:, ch],
                     n_fft=fftPoint,
                     hop_length=frame_length // 4,
                     win_length=fftPoint,
                     window='hamming',
                     pad_mode='reflect') for ch in subarray
    ],
                 axis=1)
    X = X.transpose(2, 0, 1)
    frame_timestamp = librosa.samples_to_time(np.arange(
        0, duration, frame_length // 4),
                                              sr=inputs.fs)
    fft_freq = librosa.fft_frequencies(sr=inputs.fs, n_fft=fftPoint)

    nframe = frame_timestamp.shape[0]

    # Check that the frame times and Optitracker times intersect:
    opti_timestamps = opti_timestamps[opti_timestamps < frame_timestamp[-1]]

    # -> MUSIC
    # Make blocks out of frames:
    frame_srt = np.arange(0, nframe - 1, block_step)
    frame_end = np.arange(frames_per_block, nframe, block_step)
    frame_end = np.pad(frame_end, (0, frame_srt.shape[0] - frame_end.shape[0]),
                       'constant',
                       constant_values=nframe - 1)
    nblocks = frame_srt.shape[0]

    block_timestamps = np.mean(
        [frame_timestamp[frame_srt], frame_timestamp[frame_end]], axis=0)
    # Bandlimit signals to avoid spatial aliasing / low freq effects:
    # NOTE: This is crucial for the DICIT array, the other arrays can be
    # evaluated for fullband signals.
    valid_freq_idx = (800 < fft_freq) * (fft_freq < 1400)
    valid_freq = fft_freq[valid_freq_idx]
    valid_X = X[:, valid_freq_idx]

    _power = np.zeros([fftPoint // 2 - 1, az.shape[0], el.shape[0], nblocks])
    for block_idx in range(nblocks):
        for freq_idx in range(valid_freq.shape[0]):  # fftPoint/2-1
            # Block of FFT frames:
            # print(freq_idx, block_idx)
            data_block = np.squeeze(
                valid_X[frame_srt[block_idx]:frame_end[block_idx],
                        freq_idx, :])
            if data_block.shape[0] > 1:  # ensure svd does not result in empty U
                # Find nearest OptiTrac sample:
                _diff = block_timestamps[block_idx] - opti_timestamps
                closest_opti_idx = np.argmin(_diff)
                # Autocorrelation
                Rxx = np.dot(data_block.T, np.conj(data_block))

                # Rxx = U * S * U^H, see [3] eq. (9.32)
                U, _, _ = np.linalg.svd(Rxx)

                # [3] eq. (9.37), using spectral sparsity assumption:
                # Signal subspace is 1 dimensional if 1 source is active, hence D = 1
                Un = U[:, 1:]

                for a_idx in range(az.shape[0]):
                    for e_idx in range(el.shape[0]):
                        az_idx = az[a_idx]
                        el_idx = el[e_idx]
                        # [2] eq (8.35) modified s.th. az = 0 and el = pi/2 result in eta = [0 1 0],
                        #  i.e., pointing along y-axis:
                        eta = np.array([
                            -np.sin(el_idx) * np.sin(az_idx),
                            np.sin(el_idx) * np.cos(az_idx),
                            np.cos(el_idx)
                        ])[:, None]
                        rot_eta = np.dot(
                            np.squeeze(opti_rotation[:, closest_opti_idx, :]),
                            eta)

                        # [2] eq (8.36) - TDoA:
                        tau = 1 / options.c * np.dot(
                            rot_eta.T,
                            np.squeeze(opti_mics[:, closest_opti_idx,
                                                 subarray]) -
                            np.repeat(
                                opti_mics[:,
                                          closest_opti_idx:closest_opti_idx +
                                          1, subarray[ref_mic]],
                                numMic,
                                axis=1))

                        # [2] eq (8.34) - Steering vector:
                        SV = np.exp(1j * 2 * np.pi * valid_freq[freq_idx] *
                                    tau).T

                        # [3] eq. (9.44):
                        _power[freq_idx, a_idx, e_idx,
                               block_idx] = 1. / np.sum(
                                   np.abs(
                                       np.linalg.multi_dot(
                                           [SV.conj().T, Un,
                                            Un.conj().T, SV])))
    # Sum spectra over all frequencies:
    _spectrum = _power.sum(0).transpose(2, 0, 1)

    # -> Find DOA
    # NOTE: Single-source assumption
    azimuth = np.full([nblocks], np.nan)
    elevation = np.full([nblocks], np.nan)

    for block_idx in range(nblocks):
        if _spectrum.shape[2] == 1:
            # find_peak code for 1 elevation
            locs = find_peaks(_spectrum[block_idx, :])
            if len(locs) > 0:
                azimuth[block_idx] = az[locs[0]]
        else:
            mesh_spec = np.squeeze(_spectrum[block_idx, :, :])

            # Extract regional maxima:
            lm = maximum_filter(mesh_spec, 1)
            # Global maximum:
            loc = np.argmax(lm)
            loc_az, loc_el = np.unravel_index(loc, mesh_spec.shape)
            azimuth[block_idx] = az[loc_az]
            elevation[block_idx] = el[loc_el]

    # -> Interpolate estimates to OptiTracker timestamps
    # Interpolate MUSIC estimates to required time stamps:
    # Use left np.NaN to be compatible with the matlab code.
    interp_azimuth = np.interp(inputs.timestamps,
                               block_timestamps,
                               azimuth,
                               left=np.NaN)
    interp_elevation = np.interp(inputs.timestamps,
                                 block_timestamps,
                                 elevation,
                                 left=np.NaN)

    # Output 1 - interpolated
    N_sources = 1
    out = Namespace()
    out.source = list()
    for _ in range(N_sources):
        noNaN = ~np.isnan(interp_azimuth)
        interp_azimuth[noNaN] = wrapToPi(interp_azimuth[noNaN])
        noNaN = ~np.isnan(interp_elevation)
        interp_elevation[noNaN] = wrapToPi(interp_elevation[noNaN])
        results = dict(
            year=inputs.time.dt.year,
            month=inputs.time.dt.month,
            day=inputs.time.dt.day,
            hour=inputs.time.dt.hour,
            minute=inputs.time.dt.minute,
            second=inputs.time.dt.second + inputs.time.dt.microsecond / 1e6,
            timestamps=inputs.timestamps,
            azimuth=interp_azimuth,
            elevation=interp_elevation,
        )
        out.source.append(results)
    return out
Ejemplo n.º 18
0
	def __init__(self, src=None, ax=None, alt_views=None, interpolate=True, norm=True, crop=None,
				 fix_rotations=False):
		"""crop: only take first x seconds of clip"""

		if src is None:
			Tk().withdraw()  # we don't want a full GUI, so keep the root window from appearing
			src = askopenfilename(initialdir=DataSources.c3d_data)  # Load correct file
			self.name = src.split("\\")[-1][:-4]

		else:
			self.name = src.split("\\")[-1][:-4]

		print("LOADING", src)
		if not os.path.isfile(src):
			src = os.path.join(DataSources.c3d_data, src)  # If relative reference, append to directory

		with open(src, 'rb') as handle:
			reader = c3d.Reader(handle)
			self._reader = reader

			# Extract meta data from file
			n_markers = reader.header.point_count  # Work out the total number of markers
			n_frames = reader.header.last_frame - reader.header.first_frame + 1

			freq = reader.header.frame_rate

			labels = str(reader.groups["POINT"].params["LABELS"].bytes)[
					 2:]  # Extract text of all labels, separated by many spaces
			# Convert separating spaces into commas
			for spaces in [25 - i for i in range(15)]:
				labels = labels.replace(" " * spaces, ", ")
			marker_labels = labels.split(", ")[:n_markers]

			data = []
			for i, points, analog in reader.read_frames():
				frame_data = []
				# Analog gives force plate data (I think)

				for n, point in enumerate(points):
					x, y, z, *_ = point  # Points gives x, y, z, error, n_cameras
					frame_data.append([x, y, z])

				data.append(frame_data)

			def interpolate_zero_vals(data):
				"""Gievn a time series of (x,y,z) points, identifies any ranges of (0,0,0) values, and replaces these
				with interpolated values either side of the zero range """
				is_zero = lambda arr: np.linalg.norm(arr) == 0
				for n, pos in enumerate(data):
					if n == 0: continue
					if n + 2 >= len(data): continue

					if is_zero(pos):
						# if 0 found, search ahead to find the range for which this goes on, and then begin interpolation

						n_start = n
						n_finish = n + 1

						while n_finish + 2 <= len(data) and is_zero(data[n_finish]):
							n_finish += 1

						# Note: n_start = index of first zero value
						# n_finish = index of first NON ZERO value

						# now the start and finish of the zero range is found
						start = Vector(*data[n_start - 1])
						finish = Vector(*data[n_finish])

						# Perform interpolation
						for i in range(n_finish - n_start):
							data[n_start + i] = (start + (finish - start) * (i + 1) / (n_finish - n_start + 1))

						data = interpolate_zero_vals(data)  # Run recursively to restart search for zeros
						break  # end this search within this run of the function

				return data

			# Split up into individual marker time series, interpolate each individually, and then combine together
			# for processing
			if interpolate: data = list(zip(*[interpolate_zero_vals(list(marker_data)) for marker_data in zip(*data)]))

			# REMOVE ROTATIONAL MODES OF PAWS. IDENTIFY SIGNIFICANT TROUGHS, SET TO (0, 0, 0)
			data = np.array(data)

			if fix_rotations:
				for idx in [n for n, i in enumerate(marker_labels) if "paw" in i]:
					vert_data = data[:, idx, 2]
					troughs, properties = signal.find_peaks(-vert_data, prominence=5, rel_height=.1, width=(None, 5))
					# print(signal.peak_widths(-vert_data, troughs, rel_height=0.1)[0])

					window = 10  # n frames to look before for start of trough
					for t in troughs:
						# go backwards to find where paw starts rotating (change in sign of grad)
						prev_cusps = np.where(np.diff(vert_data[t - window:t]) > 0)[0]  # cusps before trough
						if len(prev_cusps) == 0: continue  # skip ones where previous cannot be found
						rot_start = t - window + 1 + prev_cusps[-1]  # idx of start of trough

						next_higher = np.where(vert_data[rot_start + 1:] >= vert_data[rot_start])[
							0]  # next time vert is at a higher value
						if len(next_higher) == 0: continue
						rot_end = rot_start + 1 + next_higher[0]

						# replace trough with linear interpolation
						x = np.arange(rot_end - rot_start)
						m = (vert_data[rot_end] - vert_data[rot_start]) / (rot_end - rot_start)

						vert_data[rot_start: rot_end] = vert_data[rot_start] + m * x

			# if idx == 23:
			# plt.plot(vert_data)
			# plt.scatter(troughs, vert_data[troughs])
			# plt.show()

			if crop:
				data = data[:int(freq * crop)]

			super().__init__(ax, data, freq=freq, marker_labels=marker_labels, norm=norm)

			# self.n_frames = i

			self.get_bounds()

			### FIXES FOR MOCAP DATA THAT IS MISSING MARKERS:
			if "set_1" in src:
				self.create_averaged_marker("front top", "left front upper",
											"right front upper")  # form new joint at the front top of the system
				self.create_averaged_marker("right front ankle", "right front knee",
											"right front paw")  # right front ankle missing from data, for now just add as avg

			if "set_2" in src:
				self.create_averaged_marker("front top", "left front upper",
											"right front upper")  # form new joint at the front top of the system
				self.create_averaged_marker("rear top", "right rear upper",
											"left rear upper")  # form new joint at the front top of the system
				self.create_averaged_marker("left rear ankle", "left rear knee",
											"left rear paw")  # right front ankle missing from data, for now just add as avg
Ejemplo n.º 19
0
    def analyze_hr(self, threshold_hr, distance):
        # create list to store analysis result
        hr_result = []
        hr_result.append([])  # for plesant
        hr_result.append([])  # for neutral
        hr_result.append([])  # for unplesant

        for i in range(len(self.videoHR)):
            if i == 0: print("**************** Neutral ****************\n")
            elif i == 1: print("**************** Pleasant ****************\n")
            else: print("**************** Unpleasant ****************\n")
            for j in range(len(self.videoHR[i])):
                error = 0  # there is error -> 1 / not error -> 0

                # Peak detection for compute heart rate
                _peaks, _ = find_peaks(self.videoHR[i][j],
                                       height=threshold_hr,
                                       distance=distance)

                peak_time = []
                for t in range(1, len(_peaks)):
                    peak_time.append(60 / ((_peaks[t] - _peaks[t - 1]) / 2000))
                    # If the time between the two peaks is greater than 0.05,
                    # It's highly likely that the peak wasn't found properly.
                    # Therefore, the threshold should be lowered.
                    if peak_time[t - 1] > 0.05: error += 1  # if

                if error != 0:  # If there are problems -> set threahold to -0.03 & again peak detection
                    _peaks, _ = find_peaks(self.videoHR[i][j],
                                           height=-0.03,
                                           distance=distance)

                    # Compute heart rate using distance between peaks
                    peak_time = [
                        60 / ((_peaks[t] - _peaks[t - 1]) / 2000)
                        for t in range(1, len(_peaks))
                    ]

                # Find Max Peak
                if len(_peaks) != 0:  # When there is more than one peak
                    peak_value = []  # list for peak's y value

                    for peak_index in _peaks:
                        peak_value.append(
                            self.videoHR[i][j][peak_index])  # peak's y value

                    max_peak = np.max(peak_value)
                    max_peak_index = _peaks[peak_value.index(max_peak)]
                    print("max_peak :", max_peak)
                    print("max_peak_index :", max_peak_index)

                if (len(_peaks) >= 1):  # there is peak
                    plt.plot(_peaks, peak_value, "or")
                plt.plot(self.videoHR[i][j])
                plt.show()  # original data

                self.videoHR[i][j] = [
                    60 / ((_peaks[t] - _peaks[t - 1]) / 2000)
                    for t in range(1, len(_peaks))
                ]

                peak_sec = [
                    _peaks / 2000 for _peaks in _peaks if _peaks / 2000 <= 3
                ]
                peak_befo = peak_sec.index(
                    np.max(peak_sec))  # peak index before 3s
                peak_after = peak_befo + 1  # peak index after 3s

                # Compute average using heart rate of 3 seconds before event
                avg_heart = np.mean(self.videoHR[i][j][:peak_befo])

                # Compute deviation of 6 seconds after event using heart rate average
                self.videoHR[i][j] = self.videoHR[i][j] - avg_heart

                plt.axis(option='auto')
                plt.plot(self.videoHR[i][j])  # calculated heart rate
                plt.show()
                print("\n")
Y = np.loadtxt("data/martix_mu1_vs_a_gamma_IC4.txt")

# -------------------------------------- Result visualization -----------------------------------

plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.weight'] = 'bold'
plt.rcParams['font.sans-serif'] = ['Tahoma']
plt.rcParams.update({'font.size': 22})

fig = plt.figure(2)
# color1 = 'tab:red'
color1 = 'k'
size_of_fig = 8
fig.set_size_inches(size_of_fig + 6, size_of_fig + 2)
for i in range(5):
    peaks = sign.find_peaks(Y[:, i])
    for j in peaks[0]:
        Y[j, i] = (Y[j - 1, i] + Y[j + 1, i]) / 2.
# for i in np.arange(0,100, 1):

# tests = [0, 1, 5, 10, 20]
tests = [0, 24, 48, 72, 96]
for i in [0, 1, 2, 3, 4]:
    plt.plot(a_s, Y[:, i], color=color1, linewidth=4)
    # if i ==0:
    # plt.text(np.max(a_s) - 3 * (a_s[-1] - a_s[-2]), np.max(Y[:, i])+0.0008,
    #              '$\gamma={0}$'.format(round(A_s[tests[i]], 8)))

    plt.text(
        np.max(a_s) - 5. * (a_s[-1] - a_s[-2]), np.max(Y[:, i]),
        '$\gamma={0}$'.format(round(gF_s[tests[i]], 8)))
Ejemplo n.º 21
0
def plot_peaks(df, abase, distance=120, height=""):
    y = df[abase]
    peaks, _ = find_peaks(df[abase], distance=distance)
    plt.plot(df[abase].index, df[abase])
    plt.plot(peaks, y[peaks], "x")
    plt.show()
Ejemplo n.º 22
0
from intervals import Interval
from matplotlib import rcParams
import matplotlib.pyplot as plt
import math as m
from PIL import Image
from scipy.optimize import curve_fit
import matplotlib.patches as mpatches
from scipy.signal import find_peaks


a = np.loadtxt('Q3b.csv', delimiter=",", skiprows=1)

x=a[:,0]
y=a[:,1]

peaks, _ = find_peaks(y, distance=100)


fig, ax = plt.subplots()

rcParams['font.family']='sans-serif'
rcParams["legend.fancybox"] = False
rcParams['font.sans-serif']=['Helvetica']

#ax.set_xlim(0, 0.7)
ax.set_ylim(0, 260)
ax.plot(x, y,color='black', linewidth=1.0, label='data')
ax.plot(x[peaks], y[peaks], "x")
ax.set_xlabel("Distance(pixels)",size=14)
ax.set_ylabel("Intensity(a.u.)",size=14)
Ejemplo n.º 23
0
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal

df = pd.read_csv('ecg1.csv')
ecg = df['ECG (500 Hz)']

# normalizo
ecg = ecg - np.min(ecg)
ecg = ecg / np.max(ecg)
# plt.plot(ecg)
# plt.show()

# Encuentro picos onda R
# threshold = 0.3
# aux = ecg > threshold
# aux = ecg*aux
# plt.plot(aux)
# plt.show()
# ind_peaks, _ = signal.find_peaks(aux)
ind_peaks, _ = signal.find_peaks(ecg, height=0.3)
print(ind_peaks)
plt.plot(ecg)
plt.plot(ind_peaks, ecg[ind_peaks], 'ro')
plt.show()
Ejemplo n.º 24
0
i = 1
while i < len(
        time
):  # update position, velocity, and acceleration using previous values and time step
    thetaNext, wNext, accNext = update_system(acc[i - 1], theta[i - 1],
                                              w[i - 1], time[i - 1], time[i])
    theta.append(thetaNext)
    w.append(wNext)
    acc.append(accNext)
    i += 1

short_theta = np.array(
    theta[::20]
)  #finds peaks, filters out so only every 20th point is considered
short_time = time[::20]
theta_pks, _ = sig.find_peaks(short_theta)
acc_pks, _ = sig.find_peaks(acc)

time_of_pks = short_time[
    theta_pks]  #makes x and y values so the peaks can be graphed
short_theta_pks = short_theta[theta_pks]

period = []
avg_period = []
average = 0

i = 1
while i < len(time_of_pks):  #filters out peaks that are too close together
    if abs(time_of_pks[i - 1] - time_of_pks[i]) > .75:
        period.append(time_of_pks[i])
    i = i + 1
def find_features(X):

    ############################# FEATURE DANIEL #####################################

    # Analyse the peaks in the signal
    peaks = []
    peaks_index = []
    peaks_heights = []
    max_height = np.zeros((len(X), 1))
    mean_height = np.zeros((len(X), 1))
    var_height = np.zeros((len(X), 1))
    for ecg in range(len(X)):
        peaks.append(find_peaks(X[ecg, :], height=0.7,
                                distance=10))  #find peaks
        peaks_index.append(peaks[ecg][0])
        peaks_heights.append(peaks[ecg][1]['peak_heights'])
        if peaks_heights[ecg].size == 0:
            peaks_heights[ecg] = [0]
        else:
            peaks_heights[ecg] = peaks_heights[ecg]
        max_height[ecg, 0] = np.max(
            peaks_heights[ecg])  #FEATURE: Max height of peak
        mean_height[ecg, 0] = np.mean(
            peaks_heights[ecg])  #FEATURE: Mean height of peak
        var_height[ecg, 0] = np.var(
            peaks_heights[ecg])  #FEATURE: Variance in peak heigh

    peaks_int_max = np.zeros((len(peaks_index), 1))
    peaks_int_min = np.zeros((len(peaks_index), 1))
    peaks_int_med = np.zeros((len(peaks_index), 1))
    peaks_int_mean = np.zeros((len(peaks_index), 1))
    peaks_int = []
    for ecg in range(len(peaks_index)):
        A = []
        for peak in range(len(peaks_index[ecg]) - 1):
            A.append(peaks_index[ecg][peak + 1] - peaks_index[ecg][peak])
        peaks_int.append(np.array(A))
        if peaks_int[ecg].size == 0:
            peaks_int[ecg] = [0]
        else:
            peaks_int[ecg] = peaks_int[ecg]
        peaks_int_max[ecg, 0] = np.max(
            peaks_int[ecg])  # FEATURE: maximum time difference between peaks
        peaks_int_min[ecg, 0] = np.min(
            peaks_int[ecg])  # FEATURE: minimum time difference between peaks
        peaks_int_med[ecg, 0] = np.median(
            peaks_int[ecg])  # FEATURE: median time difference between peaks
        peaks_int_mean[ecg, 0] = np.mean(
            peaks_int[ecg])  #FEATURE: mean time difference between peaks

    # Find the signals with 'thick' peaks - peaks that last long
    peaks = []
    peaks_index_thick = []
    peaks_heights_thick = []
    peaks_thick = np.zeros((len(peaks_index), 1))
    for ecg in range(len(X)):
        peaks.append(find_peaks(X[ecg, :], height=0.3,
                                width=20))  #find thick peaks
        peaks_index_thick.append(peaks[ecg][0])
        peaks_heights_thick.append(peaks[ecg][1]['peak_heights'])
        if peaks_heights_thick[ecg].size == 0:
            peaks_heights_thick[ecg] = [0]
            peaks_index_thick[ecg] = [0]
        else:
            peaks_heights_thick[ecg] = peaks_heights_thick[ecg]
            peaks_index_thick[ecg] = peaks_index_thick[ecg]
        if peaks_heights_thick[ecg][0] == 0:
            peaks_thick[ecg] = 0
        else:
            peaks_thick[ecg] = [
                len(peaks_index_thick[ecg])
            ]  #FEATURE: number of peaks thicker than 20 and above 0.3 in height

    #FEATURE: Total length of heartbeat
    X_time = np.zeros((len(X), 1))

    for column in range(len(X)):
        row = 20
        while row < 185:
            if X[column, row] == X[column, row + 1] and X[
                    column, row + 1] == X[column, row + 2] and X[column,
                                                                 row + 2] == 0:
                X_time[column] = row
                row = 200
            else:
                row += 1
        if X_time[column] == 0:
            X_time[column] = 188

    ############################# FEATURE TRISTAN #####################################

    X_deriv = np.gradient(X, axis=1)

    # Parameters of the feature extraction
    signal_over_threshold = 0.7
    signal_under_threshold = 0.2
    last_samples = 125
    first_samples = 25
    local_maximum_threshold = 0.95
    local_maximum_distance = 20
    last_values_deriv = 75

    # 1. Overall value of signal over "signal_over_threshold"
    overall_value_signal_over = np.asarray(
        np.transpose(np.sum(np.where(X > signal_over_threshold, X, 0),
                            axis=1)))
    overall_value_signal_over = np.expand_dims(overall_value_signal_over,
                                               axis=1)

    # 2. Overall value of signal under "signal_under_threshold"
    overall_value_signal_under = np.transpose(
        np.sum(np.where(X < signal_under_threshold, X, 0), axis=1))
    overall_value_signal_under = np.expand_dims(overall_value_signal_under,
                                                axis=1)

    # 3. Overall value after 125 samples
    overall_value_after = np.transpose(np.sum(X[:, last_samples:], axis=1))
    overall_value_after = np.expand_dims(overall_value_after, axis=1)

    # Local maximum statistic
    peaks = []
    peaks_index = []
    peaks_index_average = []
    peaks_heights = []
    peaks_number = []
    for ecg in range(len(X)):
        peaks.append(
            find_peaks(X[ecg, first_samples:],
                       height=local_maximum_threshold,
                       distance=local_maximum_distance))  #find peaks
        peaks_index.append(peaks[ecg][0])
        peaks_index_average.append(np.mean(peaks[ecg][0]))
        peaks_heights.append(peaks[ecg][1]['peak_heights'])
        peaks_number.append(len(peaks[ecg][0]))

    # 4. The number of local maximums
    # Peaks number

    # 5. The average index of the local maximums
    # Peaks index average

    # 6. The height of the first local minimum
    height_first_local_min = np.transpose(np.amin(X[:, :first_samples],
                                                  axis=1))
    height_first_local_min = np.expand_dims(height_first_local_min, axis=1)

    # 7. The minimum value of the derivative
    min_value_deriv = np.transpose(np.amin(X_deriv, axis=1))
    min_value_deriv = np.expand_dims(min_value_deriv, axis=1)

    # 8. The variance of the last values of the derivative
    variance_last_values = np.transpose(
        np.var(X_deriv[:, last_values_deriv:], axis=1))
    variance_last_values = np.expand_dims(variance_last_values, axis=1)

    dataset1 = np.hstack(
        (max_height, mean_height, var_height, peaks_int_max, peaks_int_min,
         peaks_int_med, peaks_int_mean, peaks_thick, X_time))

    dataset2 = np.hstack(
        (overall_value_signal_over, overall_value_signal_under,
         overall_value_after, height_first_local_min, min_value_deriv,
         variance_last_values))

    return np.hstack((dataset1, dataset2))
Ejemplo n.º 26
0
def harmonicNoiseRatio(signal,
                       freq,
                       freq_lower=50,
                       freq_higher=450,
                       filter_autocorrelation=False,
                       epsilon=1e-9):
    r"""Computes Harmonic-Noise-Ratio  (HNR) using autocorrelation approximation.
    First, it computes the fundamental frequency using the power spectrum of ``signal``.
    Next, it computes the autocorrelation in Fourier space. 
    Then, local maxima in the autocorrelation are found, the HNR computed and the maximum HNR
    and the corresponding frequency is returned.

    .. math::
        R_{xx} = \frac{1}{N} \sum_{k=l}^{N-1} x[k]x[k-l]

    .. math::
        HNR = \frac{R_{xx}[T_0]}{R_{xx}[0]-R_{xx}[T_0]}

    :param signal: audio signal
    :type signal: numpy.ndarray
    :param freq: sampling rate/frequency, e.g. 44100
    :type freq: int
    :param freq_lower: lower frequency cut-off, defaults to 50
    :type freq_lower: int, optional
    :param freq_higher: higher frequency cut-off, defaults to 350
    :type freq_higher: int, optional
    :return: HNR [dB], F0_FFT [Hz], F0_Autocorr [Hz]
    :rtype: tuple(float, float, float)
    """
    # Create timestamps for signal ``s``
    time = np.arange(0, len(signal) / freq, 1 / freq)

    # rFFT from Signal
    fft = np.fft.rfft(signal)

    # Corresponding frequencies
    freqs = np.fft.rfftfreq(len(signal), 1 / freq)

    # Autocorrelation using fft
    R = np.fft.irfft(fft.conj() * fft)

    # Remove higher harmonics for peak detection
    if filter_autocorrelation:
        R_fft = np.fft.fft(R)
        R_freq = np.fft.fftfreq(R.size, d=1 / freq)
        R_fft[abs(R_freq) > freq_higher] = 0
        Rp = np.fft.ifft(R_fft).real
    else:
        Rp = R.copy()

    # Find peaks in autocorrelation
    p = find_peaks(Rp, width=20)[0]
    # Remove first peak artifacts
    p = p[1:] if p[0] < 10 else p
    # Remove unnatural peaks (minima and frequencies > 350)
    p = [i for i in p if 1 / time[i] < freq_higher and R[i] > 0]

    # Compute HNR for peaks that are higher than minimum frequency (freq_lower)
    hnr = [
        10 * np.log10(R[pi] / (R[0] - R[pi])) for pi in p
        if 1 / time[pi] > freq_lower
    ]

    return np.max(hnr), 1 / time[p[np.argmax(hnr)]]
Ejemplo n.º 27
0
add_style(markersize=12, fontsize=16)
#sav.saveplot(os.path.join(folder, 'Velocity.pdf'))

#%% Revisiting Velocity

# I'll be trying three things: 

#%% 1. How constant is the period algon a single measurement

widths = []
for k, (d, duty) in enumerate(zip(data, duty_cycle)):
    dt = d[1,0]
    read_data = d[:,1]
    
    # Calculate and store widths (periods)
    peaks = find_peaks(np.diff(read_data), prominence=1, height=2)[0]
    peaks = peaks.astype(float) * dt
    w = np.diff(peaks)
    widths.append(w)

    # Remove outliers    
    mean = np.mean(w)
    std = np.std(w)
    w = w[w < mean + 4*std]
    
    # Create, plot and format histogram
    f, ax = plt.subplots()
    ax.hist(w) #8 bins
    ax.legend({'Count = {}'.format(len(w))})
    ax.set_title('{}/{}: duty={}%'.format(k+1, len(data), duty))
    

# %%
def power_law(x, a, b):
    return a * x**(-1 / b)


def exponential(x, a, b):
    return a * np.exp(-x / b)


# %%
fig, ax = plt.subplots(3, 2)
ax = ax.reshape(6, )
for i, (r, g6, d) in enumerate(zip(r_all, G6, d_all)):
    peaks, props = signal.find_peaks(g6, width=10)
    r_peaks = r[peaks].real
    g6_peaks = g6[peaks].real
    popt_power, pcov_power = optimize.curve_fit(power_law,
                                                r_peaks,
                                                g6_peaks,
                                                p0=[g6_peaks.max(), 4])
    popt_exp, pcov_exp = optimize.curve_fit(exponential,
                                            r_peaks,
                                            g6_peaks,
                                            p0=[g6_peaks.max(), 20])

    power_fit = power_law(r, *popt_power)
    exp_fit = exponential(r, *popt_exp)
    ax[i].plot(r, g6, label='data')
    ax[i].plot(r, power_fit, label='power fit')
Ejemplo n.º 29
0
for filename in filenames:
    bead_size = 5  #Using smallest size, since even 10mm can make 5mm changes

    t, x, y, z, u, vx, vy, vz, uvx, uvy, uvz = read_file("../Data/" + filename)

    x = smooth_data(x, 10)  # Smoothing data to make peak finding better

    #Can be used to plot if wanted
    #    t = t[10000:30000]
    #    x = x[10000:30000]
    #    plt.figure()
    #    plt.plot(t,x)

    indices, properties = find_peaks(
        x, prominence=1, distance=30, width=5
    )  # Finds peaks in x, prominence main setting, distance and width settings reduce false peaks
    #    plt.plot(t[indices], x[indices] , 'ro')

    count = 0
    for i in range(len(indices)):  # For each peak
        try:
            if (np.absolute(x[indices[i]] - x[indices[i + 1]]) > bead_size
                ):  # maybe consider checking 2 points away for overall trends
                count += 1
#                plt.plot(t[indices[i]],x[indices[i]], 'gs')
        except IndexError:  # Can't be bothered to figure out indexing
            pass
    print(filename)
    print("Total cycles:", len(indices))
    print("Transitions:", count)
Ejemplo n.º 30
0
            soc_wi = np.copy(social_whisk_index).astype(int)
            sol_wi = np.random.permutation(solo_whisk_index)
            sol_wi = sol_wi[0:len(social_whisk_index)].astype(int)
        elif solo_whisk_index.shape[0] < social_whisk_index.shape[0]:
            sol_wi = np.copy(solo_whisk_index).astype(int)
            soc_wi = np.random.permutation(social_whisk_index)
            soc_wi = soc_wi[0:len(solo_whisk_index)].astype(int)

        GTM_frames_solo = l_mouse_frames[sol_wi, :, :]
        GTM_frames_social = l_mouse_frames[soc_wi, :, :]
        # GTM_social = np.mean(GTM_frames_social, axis=0)
        # GTM_solo = np.mean(GTM_frames_solo, axis=0)
        # GTM_social[~l_mouse_mask] = -100
        # GTM_solo[~l_mouse_mask] = -100

        whisk_events = sio.find_peaks(roi_gradient_signal, height=threshold)[0]
        social_duration = (end_interaction - start_interaction) / 28.815
        social_whisk_rate = 60 * np.intersect1d(
            whisk_events, interaction_time).size / social_duration

        solo_duration = (roi_gradient_signal.size - np.arange(
            start_first_translation, end_second_translation).size) / 28.815
        solo_whisk_rate = 60 * np.intersect1d(whisk_events,
                                              solo_time).size / solo_duration

        g2.create_dataset('SIGMA', data=SIGMA)
        g2.create_dataset('social_whisk_rate', data=social_whisk_rate)
        g2.create_dataset('solo_whisk_rate', data=solo_whisk_rate)
        g2.create_dataset('mask', data=l_mouse_mask)
        g2.create_dataset('roi_gradient_signal', data=roi_gradient_signal)
        g2.create_dataset('threshold', data=threshold)
Ejemplo n.º 31
0
    highres_flux += get_me_highres(coef,dat,highres)
    coef = bcoefs2.data[fib]
    dat = bspec2.data[fib]
    highres_flux += get_me_highres(coef,dat,highres)

highres_flux /= (2*len(fibers))
# plt.figure(); plt.plot(highres,highres_flux); plt.show()
from linebrowser import LineBrowser

iraf_vac = air_to_vacuum(iraf_air)
salt_vac = air_to_vacuum(salt_air)
# mock_coefs = (highres[0],highres[1]-highres[0],0.,0.,0.,0.)
# browser = LineBrowser(salt_vac, np.zeros(len(salt_vac)), highres_flux, mock_coefs, iraf_vac)
# browser.plot()
peaks, properties = signal.find_peaks(highres_flux, height=(2000, None), width=(1, 100000), \
                                                   threshold=(None, None),
                                                   prominence=(1000, None), wlen=10000000)  # find peaks
fxpeak = highres[peaks]  # peaks in wavelength
fypeak = highres_flux[peaks]  # peaks heights (for noise)
# noise = np.std(np.sort(highres_flux)[: (highres_flux.size // 2)])  # noise level
# significant_peaks = fypeak > noise
# peaks = peaks[significant_peaks]
# fxpeak = fxpeak[significant_peaks]  # significant peaks in wavelength
# fypeak = fypeak[significant_peaks]  # significant peaks height

pw,ph = fxpeak,fypeak

overlap = 0.2
to_delete = []
listw, listh = pw.tolist(), ph.tolist()
for ii, (w, h) in enumerate(zip(pw, ph)):
Ejemplo n.º 32
0
def calc_sinogram(x, y, hist_bins=300, quant_size=0, drop_90=False, plot=False):
    import numpy as np
    import pandas as pd
    import warnings
    from skimage.transform import radon  # , rescale, iradon, iradon_sart, hough_line
    from scipy import signal

    max_num=max(max(x),max(y))
    if quant_size:
        bins=np.arange(-(max_num/quant_size+1),(max_num/quant_size+1))*quant_size+quant_size*0.5
    if not quant_size or len(bins)>hist_bins:
        bins=np.linspace(-max_num, max_num, hist_bins, endpoint=True)
    H, xedges, yedges = np.histogram2d(x, y, bins=[bins]*2)  # in order to estimate the angle, we need the space to be square
    H = H[::-1].T
    if 0:  # maybe to get more resolution
        a=np.vstack([H,H])
        H=np.hstack(([a,a]))
    theta = np.linspace(0., 180., max(H.shape), endpoint=False)
    if 1:
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            sinogram = radon(H, theta=theta, circle=True)
        # sinogram=pd.DataFrame(sinogram, columns=np.linspace(0,180,sinogram.shape[0], endpoint=True)).stack().idxmax()[1]
        sinogram = pd.DataFrame(sinogram, columns=np.linspace(-90, 90, sinogram.shape[0], endpoint=False)).rename_axis('angle', axis=1).rename_axis('offset', axis=0)  # angle is from the horizon, you will get from -90 to 90
    else:  # hough gives very noisy output
        out, angles, d = hough_line(H, theta=theta)
        sinogram = pd.DataFrame(out, columns=angles, index=d).rename_axis('angle', axis=1).rename_axis('offset', axis=0)  # angle is from the horizon, you will get from -90 to 90

    if drop_90:
        sinogram[-90] = 0
    if 0:
        # this will tell us the angle. we can also use idxmax on the sinogram,
        # but the best is to find the angle that has the highest std,
        # becaues max is by single line and std is by all lines
        # we cannot take the angle sum, because they all summed to big numbers
        radon_estimated_angle = sinogram.std().idxmax()
    else:
        # max_peaking = signal.find_peaks(sinogram.std(), distance=sinogram.shape[1]//100)[0]
        max_peaking = signal.find_peaks(sinogram.std(), distance=4)[0].tolist()  # we want to find peaks so all values next to the peak will dropped
        max_peaking+=[0,sinogram.shape[0]-1]  # adding also the edges because they are not count as peaks
        max_peaking=np.array(max_peaking)
        # print(max_peaking)
        max_peaking=sinogram.std().iloc[max_peaking]
        # max_peaking=max_peaking[~max_peaking.index.isin([-90, 0, -45, 90, 45])]  # we multiply the pattern to the left right up and down so obviously we tend to get those angles
        radon_estimated_angle=max_peaking.idxmax()
    slop=np.tan(np.deg2rad(radon_estimated_angle))
    if np.isnan(radon_estimated_angle):  # you will get this if all quantized values are the same
        radon_estimated_angle=0  # ignore this - putting angle to rotate the data so it will take the original option and not all the other duplications
    sinogram_dict=dict(image=H,
                       sinogram=sinogram,
                       angle_by_std=radon_estimated_angle,
                       angle_by_idxmax=sinogram.stack().idxmax()[1],
                       overall_sinogram_std=sinogram.std().std(),
                       slop=slop,
                       y_avg=y.mean(),
                       x_avg=x.mean(),
                       actual_bin_size=len(bins))
    if plot:
        plot_sinogram(sinogram_dict['image'], sinogram_dict['sinogram'], sinogram_dict['angle_by_std'], slop=sinogram_dict['slop'])

    if 0:
        empty_sinogram = sinogram.copy().astype(int)*0
        empty_sinogram.loc[sinogram.index.to_series().median(), radon_estimated_angle]=1
        empty_sinogram=empty_sinogram.fillna(0)
        line_image=iradon(empty_sinogram.values, circle=True)# , interpolation='linear')
        line_image.max()
        line_image.min()
        line_image.mean()
        abc=pd.DataFrame(line_image)
        # line_image=(line_image > line_image.mean()) * 255
        # line_image = iradon_sart(sinogram.values, theta=theta)
        plt.close()
        plt.imshow(line_image, cmap=plt.cm.Greys_r)
        fig=abc.figure(kind='heatmap', colorscale='Greys', title='radon inverse - real world')
        fig=empty_sinogram.figure(kind='heatmap', colorscale='Greys', title='radon inverse - real world')
        py.offline.plot(fig)

    # print('sinogram %g' % radon_estimated_angle)  # trying to get all max numbers. you cannot do sum because all angles summed to the same values
    # this will tell us how much the image is just lines or just noise
    # below 3 is low correlation. and you have up to 3.5 to some cases that are at the middle
    # print('overall sinogram std %g' % sinogram.std().std())
    return sinogram_dict
Ejemplo n.º 33
0
	def handle(self, *args, **options):
		# vars
		experiment_name = options['expt']
		series_name = options['series']

		if experiment_name!='' and series_name!='':
			experiment = Experiment.objects.get(name=experiment_name)
			series = experiment.series.get(name=series_name)

			# cell_instance = series.cell_instances.get(t=48, cell__pk=4)
			# cell_instance = series.cell_instances.get(t=49, cell__pk=9)
			# cell_instance = series.cell_instances.get(pk=198)

			# load mask image
			# 1. for each cell mask, load mask image
			outlines = {}
			colours = ['red','green','blue','purple']
			for i, cell_mask in enumerate(cell_instance.masks.filter(channel__name__contains='zunique')):
				mask_img = cell_mask.load()

				# get edge
				edge_img = edge_image(mask_img)

				# get list of points
				points_r, points_c = np.where(edge_img)
				points = [list(lm) for lm in list(zip(points_r, points_c))]

				sorted_points = roll_edge_v1(points)

				# plot distances in order
				cell_centre = np.array([cell_mask.r, cell_mask.c])
				distances = np.array([np.linalg.norm(cell_centre - np.array(p)) for p in sorted_points])
				argmin = np.argmin(distances)
				distances = np.roll(distances, -argmin)
				distances = gf(distances, sigma=2)
				# plt.plot(distances)
				# plt.scatter(cell_mask.c, cell_mask.r)

				# find peaks in distance array
				peaks = find_peaks(distances, np.array([9]))
				# plt.scatter(peaks, [distances[peak] for peak in peaks])

				# roll back to find true peak positions
				true_peaks = np.array(peaks) + argmin
				true_peaks[true_peaks>=len(sorted_points)] -= len(sorted_points)

				# find end point of protrusions
				protrusion_end_points = [sorted_points[peak] for peak in true_peaks]

				for protrusion_end_point in protrusion_end_points:
					print('new protrusion for cell mask {} for cell instance {}'.format(cell_mask.pk, cell_instance.pk))
					relative_end_point = cell_centre - np.array(protrusion_end_point)
					print(cell_centre, protrusion_end_point)
					print('length from centre: {} microns'.format(np.linalg.norm(relative_end_point * series.scaling())))
					print('orientation: {} degrees'.format(180 / math.pi * math.atan2(relative_end_point[0], relative_end_point[1])))

				# plt.scatter([sorted_points[peak][1] for peak in true_peaks], [sorted_points[peak][0] for peak in true_peaks])

				# plot outlines to check
				plt.plot([point[1] for point in sorted_points], [point[0] for point in sorted_points], label='radius: 2')
				# plt.scatter(points_c, points_r, label=cell_mask.channel.name, color=colours[i])

			# plt.legend()
			# plt.axis('equal')
			plt.show()

		else:
			print('Please enter an experiment')
Ejemplo n.º 34
0
            img_piece = img_piece[0:ini_H, :]
            #img_piece = cv2.medianBlur(img_piece,5)
            #long =img_piece
            long = numpy.append(img_piece, img_piece, axis=1)
            long = numpy.append(long, img_piece, axis=1)
            B_line = numpy.sum(long, axis=0) / long.shape[0]
            B_line = signal.medfilt(B_line, 7)
            B_line = numpy.convolve(B_line,
                                    numpy.ones((200, )) / 200,
                                    mode='valid')
            B_line = B_line / max(B_line)
            len_b = len(B_line)
            border1 = numpy.zeros(len_b) + 1 - 0.5 * (1 - min(B_line))
            border2 = numpy.ones(len_b) * 1.1
            peaks, _ = find_peaks(B_line,
                                  height=(border1, border2),
                                  distance=30,
                                  prominence=0.05)
            start = peaks[1]
            # to search border
            for k in range(ini_W):
                this = B_line[start + k]
                if this <= border1[start + k]:
                    right = start + k + 100
                    break
            for k in range(ini_W):
                this = B_line[start - k]
                if this <= border1[start - k]:
                    left = start - k + 100
                    break

            if Display_sig_flag == True:
Ejemplo n.º 35
0
    # f[:bound_low] = 0
    # f[bound_high:-bound_high] =0
    # f[-bound_low:] = 0
    # plt.subplot(152),plt.imshow(np.log(1+np.abs(f)),"gray"),plt.title("fft2")
    f = butter_bandpass_filter(f, low, high, fps)
    # plt.show()
    fft_maximus = []

    for j in range(f.shape[0]):
        if low <= frequencies[j] <= high:
            fftMap = abs(f[j])
            fft_maximus.append(fftMap.max())
        else:
            fft_maximus.append(0)

    peaks, properties = signal.find_peaks(fft_maximus)
    max_peak = -1
    max_freq = 0

    for peak in peaks:
        if fft_maximus[peak] > max_freq:
            max_freq = fft_maximus[peak]
            max_peak = peak

    print(frequencies[max_peak] * 60)
    iff = fftpack.ifft(f, axis=0)
    iff = np.abs(iff)
    iff *= 100
    kd[i] += iff
    # print(find_heart_rate(f,frequencies,low,high))
Ejemplo n.º 36
0
	def find_protrusions(self):
		# load mask image and find edge
		mask_img = self.load()
		edge_img = edge_image(mask_img)

		# get list of points that lie on the edge: points_rc
		points_r, points_c = np.where(edge_img)
		points_rc = [list(lm) for lm in list(zip(points_r, points_c))]

		# sort points using a fixed radius
		count, max_count, sorted_points = roll_edge_v1(points_rc)

		if count<max_count:
			# get cell centre and calculate distances of edge points from this point
			cell_centre = np.array([self.r, self.c])
			distances = np.array([np.linalg.norm(cell_centre - np.array(p)) for p in sorted_points])

			# smooth to aid peak finding and shift the points to leave the smallest distance at zero
			argmin = np.argmin(distances)
			distances = np.roll(distances, -argmin)
			distances = gf(distances, sigma=2)

			# find peaks
			peaks = find_peaks(distances, np.array([9]))

			# shift peaks back to their original positions
			true_peaks = np.array(peaks) + argmin
			true_peaks[true_peaks>=len(sorted_points)] -= len(sorted_points) # rotate

			# find end points
			protrusion_end_points = [sorted_points[peak] for peak in true_peaks]

			# create new protrusion for each end point
			for protrusion_end_point in protrusion_end_points:
				relative_end_point = cell_centre - np.array(protrusion_end_point)

				# parameters
				r = relative_end_point[0]
				c = relative_end_point[1]
				length_from_centre = np.linalg.norm(relative_end_point * self.series.scaling()) # in microns
				length_from_mean = length_from_centre - np.mean(distances)
				orientation_from_horizontal = math.atan2(relative_end_point[0], relative_end_point[1])

				# print(self.cell_instance.pk, self.pk, r, c, length_from_centre, orientation_from_horizontal)

				protrusion, protrusion_created = self.protrusions.get_or_create(experiment=self.experiment,
																																				series=self.series,
																																				cell=self.cell,
																																				cell_instance=self.cell_instance,
																																				channel=self.channel,
																																				region=self.region,
																																				region_instance=self.region_instance,
																																				r=r,
																																				c=c)
				if protrusion_created:
					protrusion.length = length_from_centre
					protrusion.length_from_mean = length_from_mean
					protrusion.orientation = orientation_from_horizontal
					protrusion.save()

			return 'success', len(protrusion_end_points)
		else:
			return 'success', 0
    # zero timestamps and convert from ms to seconds
    timestamps_zero_sec = (timestamps_raw - timestamps_raw.iloc[0, 0]) / 1000

    #%%

    prom_multiplier = 2.91
    width_min = 5
    width_max = 100
    distance_min = 10
    wlen_limit = 160

    # ================================= pDCoCG PEAK DETECTION =================================

    threshold_prom = prom_multiplier * (data_column.mad())
    peak_ind, properties = find_peaks(data_column,
                                      prominence=threshold_prom,
                                      width=(width_min, width_max),
                                      distance=distance_min)
    peak_raw_times = timestamps_raw.iloc[peak_ind].values
    peak_times = timestamps_zero_sec.iloc[peak_ind].values
    peak_times_norm = peak_times / float(timestamps_zero_sec.iloc[-1])
    peak_yval = data_column[peak_ind]
    peak_prom = peak_prominences(data_column, peak_ind, wlen=wlen_limit)[0]
    peak_prom_norm = peak_prom / max(peak_prom)
    contour_min = peak_yval - peak_prom
    hwidths = peak_widths(data_column,
                          peak_ind,
                          rel_height=0.5,
                          wlen=wlen_limit)
    # hwidths ouputs 4 arrays: (1) the widths for each peak in samples (2) width heights (3&4) interpolated positions
    # of left and right intersection points of a horizontal line at the respective evaluation height
    hwidths_yval = (hwidths[1])
Ejemplo n.º 38
0
    def handle_user_agreement(self, xp, hierarchy):
        """
        For CN only.
        CN client is bugged. User Agreement and Privacy Policy may popup again even you have agreed with it.
        This method scrolls to the bottom and click AGREE.

        Returns:
            bool: If handled.
        """

        if server.server == 'cn':
            area_wait_results = self.get_for_any_ele([
                XPS('//*[@text="sdk协议"]', xp, hierarchy),
                XPS('//*[@content-desc="sdk协议"]', xp, hierarchy)
            ])
            if area_wait_results is False:
                return False
            agree_wait_results = self.get_for_any_ele([
                XPS('//*[@text="同意"]', xp, hierarchy),
                XPS('//*[@content-desc="同意"]', xp, hierarchy)
            ])
            start_padding_results = self.get_for_any_ele([
                XPS('//*[@text="隐私政策"]', xp, hierarchy),
                XPS('//*[@content-desc="隐私政策"]', xp, hierarchy),
                XPS('//*[@text="用户协议"]', xp, hierarchy),
                XPS('//*[@content-desc="用户协议"]', xp, hierarchy)
            ])
            start_margin_results = self.get_for_any_ele([
                XPS('//*[@text="请滑动阅读协议内容"]', xp, hierarchy),
                XPS('//*[@content-desc="请滑动阅读协议内容"]', xp, hierarchy)
            ])

            test_image_original = self.device.image
            image_handle_crop = crop(
                test_image_original,
                (start_padding_results[2], 0, start_margin_results[2], 720))
            # Image.fromarray(image_handle_crop).show()
            sims = color_similarity_2d(image_handle_crop,
                                       color=(182, 189, 202))
            points = np.sum(sims >= 255)
            if points == 0:
                return False
            sims_height = np.mean(sims, axis=1)
            # pyplot.plot(sims_height, color='r')
            # pyplot.show()
            peaks, __ = find_peaks(sims_height, height=225)
            if len(peaks) == 2:
                peaks = (peaks[0] + peaks[1]) / 2
            start_pos = [
                (start_padding_results[2] + start_margin_results[2]) / 2,
                float(peaks)
            ]
            end_pos = [
                (start_padding_results[2] + start_margin_results[2]) / 2,
                area_wait_results[3]
            ]
            logger.info("user agreement position find result: " +
                        ', '.join('%.2f' % _ for _ in start_pos))
            logger.info("user agreement area expect:          " +
                        'x:963-973, y:259-279')

            self.device.drag(start_pos,
                             end_pos,
                             segments=2,
                             shake=(0, 25),
                             point_random=(0, 0, 0, 0),
                             shake_random=(0, -5, 0, 5))
            AGREE = Button(area=agree_wait_results,
                           color=(),
                           button=agree_wait_results,
                           name='AGREE')
            self.device.click(AGREE)
            return True
Ejemplo n.º 39
0
    def block_extent_routine(self, args):

        self._block_pos_temp = np.array([0, 0])
        self._block_found_temp = False

        original_block_angle = np.arctan2(
            (args['target'][1] - self.current_position(grid=False)[1]),
            (-args['target'][0] +
             self.current_position(grid=False)[0])) - np.pi / 4
        #print(original_block_angle)

        sweep_angles = np.linspace(original_block_angle - 0.4,
                                   original_block_angle + 0.4, 20)

        trigger_value = 0.2

        if self._angle_index < len(sweep_angles) - 1:
            if sweep_angles[self._angle_index] > np.pi:
                desired_angle = sweep_angles[self._angle_index] - np.pi * 2
            elif sweep_angles[self._angle_index] < -np.pi:
                desired_angle = sweep_angles[self._angle_index] + np.pi * 2
            else:
                desired_angle = sweep_angles[self._angle_index]

            if self._angle_index == 0:
                self.set_heading(desired_angle, 5)
            else:
                self.set_heading(desired_angle, 15)

            current_orientation = np.array([
                -np.cos(self.current_angle()), 0, -np.sin(self.current_angle())
            ])
            desired_orientation = np.array(
                [-np.cos(desired_angle), 0, -np.sin(desired_angle)])
            cross_start = -np.cross(current_orientation,
                                    desired_orientation)[1]
            if np.abs(cross_start) < 0.01:
                #print(self._angle_index)
                self._angle_index = self._angle_index + 1
                self._dist_array.append(self.robot_data[self.robot_id][5])
        else:

            # rising_mask = ((np.array(self._dist_array[:-1]) < trigger_value) & (np.array(self._dist_array[1:]) > trigger_value))
            # falling_mask = ((np.array(self._dist_array[:-1]) > trigger_value) & (np.array(self._dist_array[1:]) < trigger_value))
            # print(np.flatnonzero(rising_mask) +1)
            # print(np.flatnonzero(falling_mask) +1)
            #print(np.array(self._dist_array)*-1)
            peaks, properties = find_peaks(np.array(self._dist_array) * -1,
                                           height=(-0.35, 0),
                                           width=3)
            #print(self._dist_array[peaks[0]])

            if len(peaks) == 0:
                print('no block found')
                self._angle_index = 0
                self._dist_array = []
                self.set_state('idle')
                self._block_found_temp = False
                args['env'].update_block((args['target'], 0, True))

            else:
                self._block_found_temp = True

                sweep1 = sweep_angles[int(properties["left_ips"][0])]
                sweep2 = sweep_angles[int(properties["right_ips"][0])]

                exact_angle = (sweep1 + sweep2) / 2

                if exact_angle + np.pi / 4 > np.pi:
                    exact_angle = exact_angle + np.pi / 4 - np.pi * 2
                elif sweep_angles[peaks[0]] + np.pi / 4 < -np.pi:
                    exact_angle = exact_angle + np.pi / 4 + np.pi * 2
                else:
                    exact_angle = exact_angle + np.pi / 4

                cross = self.set_heading(exact_angle, 5, info=True)
                #print(exact_position)
                if np.abs(cross) < 0.04:
                    exact_position = convert_to_grid_coords(
                        transform_local_coords([
                            self.robot_data[self.robot_id][1],
                            self.robot_data[self.robot_id][2], exact_angle
                        ], [0, self._dist_array[peaks[0]]]))
                    self._angle_index = 0
                    self._dist_array = []
                    self._block_pos_temp = np.array(
                        [int(exact_position[0]),
                         int(exact_position[1])])
                    self.set_state('go_to_target',
                                   target=self._block_pos_temp,
                                   early_stop=2,
                                   grip=0,
                                   block=True,
                                   empty=False,
                                   look_at=True,
                                   speed=0.6)
Ejemplo n.º 40
0
import numpy as np
from scipy.optimize import curve_fit
from scipy.signal import find_peaks
import matplotlib.pyplot as plt


def e(x, a, b, c):
    return a * np.exp(b * x) + c


t, U = np.genfromtxt('data.txt', unpack=True)
t *= 1e3

maxs, properties = find_peaks(U, prominence=1, distance=100)
mins, properties = find_peaks(-U, prominence=1, distance=100)

x = np.linspace(0, plt.xlim()[1])

parameters_max, pcov_max = curve_fit(e, t[maxs], U[maxs])
print(parameters_max, np.sqrt(np.diag(pcov_max)), sep='\n')

parameters_min, pcov_min = curve_fit(e, t[mins], U[mins])
print(parameters_min, np.sqrt(np.diag(pcov_min)), sep='\n')

plt.plot(t, U, 'k-', label='Gedämpfte Schwingung')

plt.plot(x, e(x, *parameters_max), label='Obere Einhüllende')
plt.plot(t[maxs], U[maxs], 'rx', label='Maxima')

plt.plot(x, e(x, *parameters_min), label='Untere Einhüllende')
plt.plot(t[mins], U[mins], 'bx', label='Minima')
Ejemplo n.º 41
0
    def __find_peaks(self):

        self.__peaks = [None] * 500
        self.__filter_pedometer()
        self.__peaks = sig.find_peaks(self.__data_buffer)[0]
        return
Ejemplo n.º 42
0
def find_points(
        sig,
        fs,
        metadata,
        bias,
        gain,
        **kwargs):
    """
        Поиск характерных точек
    :param sig: входной сигнал (многоканальный)
    :param fs: частота дискретизации
    :param metadata: первичная сегментация, содержащая qrs_start, qrs_end,
    qrs_center
    :param bias: уровень изолинии
    :param gain: усиление
    :return: None (результатом являются измененные значения в metadata)
    """

    qrs_duration_max = int(fs*kwargs.get(
        "qrs_duration_max",
        config.WAVES["qrs_duration_max"]
    ))

    pma_detection_on = kwargs.get(
        "pma_detection_on",
        config.PACEMAKER["detection"]
    )

    pma_duration_max = fs*kwargs.get(
        "pma_duration_max",
        config.PACEMAKER["spike_duration_max"]
    )

    pma_scale = 1   # номер уровня для поиска артефактов кардиостимулятора
    r_scale = 2     # номер уровня для поиска R-зубца
    p_scale = 4     # номер уровня для поиска P-зубца
    t_scale = 4     # номер уровня для поиска T-зубца
    f_scale = 4     # номер уровня для обнаружения трепетаний

    t_window_fraction = 0.6
    p_window_fraction = 1.0 - t_window_fraction

    num_scales = max(r_scale, p_scale, t_scale)
    num_cycles = len(metadata)
    pilot_chan = 1 if sig.shape[1] > 1 else 0

    for chan, x in signal_channels(sig):

        approx, detail = ddwt(x-bias[chan], num_scales=num_scales)

        # границы QRS здесь не определяем, надеемся на metadata

        # очень приближенная оценка шума
        noise = np.std(detail[1]) * 0.7
        #print(noise)

        if chan == pilot_chan:
            fibpos = find_peaks(detail[f_scale], height=noise/2)[0]
        else:
            fibpos = []

        for ncycle, qrs in enumerate(metadata):

            prev_r = int(metadata[ncycle - 1]["qrs_center"] * fs) \
                if ncycle else 0
            next_r = int(metadata[ncycle + 1]["qrs_center"] * fs) \
                if ncycle < num_cycles - 1 else len(x)
            cur_r = int(qrs["qrs_center"] * fs)

            # оценка изолинии
            iso = np.percentile(approx[r_scale][prev_r:next_r], 15)
            qrs["isolevel"][chan] = iso / gain[chan]

            # Поиск зубцов Q, R, S
            # узкое окно для поиска только R
            this_qrs = [int(qrs["qrs_start"] * fs), int(qrs["qrs_end"] * fs)]

            tight_bounds = [
                max(0, this_qrs[0]),
                min(len(x) - 1, this_qrs[1])
            ]

            # более широкое окно для поиска остальных зубцов
            prev_qrs =\
                int(metadata[ncycle - 1]["qrs_end"] * fs) if ncycle else 0

            next_qrs =\
                int(metadata[ncycle + 1]["qrs_start"] * fs) \
                    if ncycle < num_cycles - 1 else len(x)

            loose_bounds = [
                int((tight_bounds[0] + prev_qrs)/2),
                int((tight_bounds[1] + next_qrs)/2)
            ]

            #if ncycle==34 and chan==0:
            #    fig, axarr = plt.subplots(2, 1, sharex="col")
            #    fleft = int(metadata[ncycle-1]["qrs_end"]*fs)
            #    fright = int(qrs["qrs_start"]*fs)
            #    #x1 = tight_bounds[0]
            #    #x2 = tight_bounds[1]
            #    x1 = fleft
            #    x2 = fright
            #    xval = np.arange(x1, x2)
            #    axarr[0].plot(xval, approx[r_scale][x1:x2])
            #    axarr[0].grid()
            #    acf, v = detect_periodic(detail[f_scale][x1:x2])
            #    xv = x1 + np.arange(0, len(acf))
            #    axarr[1].plot(xv, acf)
            #    #axarr[1].plot(xval, approx[f_scale][x1:x2])
            #    #axarr[1].plot(xval, detail[f_scale][x1:x2], "m")
            #    axarr[1].grid()
            #    print("Look at the plots")
            #    plt.show(block=False)
            #print(ncycle, chan)

            if pma_detection_on and fs > 260:
                pma_modes = find_extrema(
                    detail[pma_scale], loose_bounds[0], loose_bounds[1],
                    noise / 2
                )

                pma = pma_search(pma_modes, tight_bounds[0], tight_bounds[1],
                           max_dur=pma_duration_max)

                if pma:
                    qrs["pma"][chan].append(pma)

            # все пики производной в широком окне
            modes = find_extrema(
                detail[r_scale], loose_bounds[0], loose_bounds[1], noise/2
            )

            qrssearch(modes, tight_bounds, approx[r_scale],
                      qrs, chan, iso, qrs_duration_max)

            # поиск P-зубца
            # окно для поиска
            wlen = (cur_r - prev_r) * p_window_fraction

            p_search_lb = int(prev_r + wlen)
            if ncycle:
                prev_t = metadata[ncycle-1]["t_end"][chan]
                if prev_t is None:
                    prev_t = metadata[ncycle - 1]["t_pos"][chan]
                if prev_t is not None:
                    p_search_lb = max(p_search_lb,  prev_t)

            pwindow = [
                p_search_lb,
                cur_r
            ]

            modas_subset = find_extrema(
                detail[p_scale], pwindow[0], pwindow[1], noise/2
            )

            # последняя мода перед R не учитывается, потому что относится к QRS
            pleft, pcenter, pright = ptsearch(
                modas_subset[:-1],
                approx[r_scale+1],
                bias=iso,
                limits=pwindow,
                height=noise/2
            )

            qrs["p_pos"][chan] = pcenter
            qrs["p_start"][chan] = pleft
            qrs["p_end"][chan] = pright

            # уточнение уровня изолинии по интервалу PQ
            if pright is not None:
                pq_end = qrs["q_pos"][chan]
                if pq_end is None:
                    pq_end = qrs["r_start"][chan]
                if pq_end is None:
                    pq_end = qrs["r_pos"][chan]
                if pq_end is not None and pq_end - pright > 1:
                    iso = np.median(approx[r_scale][pright:pq_end])
                    qrs["isolevel"][chan] = iso / gain[chan]

            # поиск T-зубца
            # окно для поиска
            wlen = (next_r - cur_r) * t_window_fraction
            twindow = [
                cur_r,
                int(cur_r + wlen)
            ]

            modas_subset = find_extrema(
                detail[p_scale], twindow[0], twindow[1], noise / 4
            )

            # первая мода справа от R не учитывается, потому что относится к QRS
            tleft, tcenter, tright = ptsearch(
                modas_subset[1:],
                approx[r_scale+1],
                bias=iso,
                limits=twindow,
                height=noise
            )

            qrs["t_pos"][chan] = tcenter
            qrs["t_start"][chan] = tleft
            qrs["t_end"][chan] = tright

            # поиск F-волн в промежутках между qrs
            if chan == pilot_chan and ncycle:
                #fleft = int(metadata[ncycle-1]["qrs_end"]*fs)
                #fright = int(qrs["qrs_start"]*fs)

                fleft = get_cycle_end(metadata[ncycle-1], chan, fs)
                fright = get_cycle_start(qrs, chan, fs)

                # Берем промежуток между QRS (с возможным захватом P и T)
                # и обнаруживаем в нем периодичность
                rest_range = [
                    int(metadata[ncycle-1]["qrs_end"]*fs),
                    int(qrs["qrs_start"]*fs)
                    ]

                # защита от слишком коротких пауз
                # TODO: разобраться, почему это происходит
                if rest_range[1] - rest_range[0] > 8:
                    pk = detect_periodic(detail[f_scale][
                                         rest_range[0]:rest_range[1]])[1]
                else:
                    pk = 0

                qrs["flutter"][chan] = pk
def reshape_4DSTEM_FlyBack(data, scan_x, plot_sum = False):
    """
    Reshapes the lazy-imported stack of dimensions: (xxxxxx|256, 256) to the correct scan pattern 
    shape: (x, y | 256,256).
    It utilises the over-exposed fly-back frame to identify the start of the lines in the first 10
    lines of frames,checks line length consistancy and finds the number of frames to skip at the
    beginning (this number is printed out as string output).
    
    Parameters
    ----------
    data : hyperspy lazily imported mib file with diensions of: framenumbers|256, 256
    plot_Sum: (default: Flase) Set to True to get the intensity profile of the first 10 lines
                - to check for peak finding correctness
       
    Returns
    -------
    data_reshaped : reshaped data (x, y | 256,256)
    optional: plots the sum intensity vs frames
    """
    data_crop = data.inav[0:np.int(10* np.sqrt(data.axes_manager[0].size))] #crop the first ~10 lines
    data_crop_t = data_crop.T
    data_crop_t_sum = data_crop_t.sum()
    intensity_array = data_crop_t_sum.data #summing over patterns
    intensity_array = intensity_array.compute() #out of lazy
    #Checking for local maxima to be more than _factor_ times the neighbouring elements
    #This factor is currently not robust to all datasets!
    #factor = np.int(max(intensity_array) / np.mean(intensity_array))
    #print(factor)
    #local_max = (np.r_[True, intensity_array[1:] > factor* intensity_array[:-1]] 
    #        & np.r_[intensity_array[:-1] > factor* intensity_array[1:], True])
    peaks = find_peaks(intensity_array, distance= scan_x)
    if plot_sum == True:
        import matplotlib.pyplot as plt
        
        fig1 = plt.figure()
        ax1 = fig1.add_subplot(121)
        ax2 = fig1.add_subplot(122)
        
        ax1.plot(intensity_array, 'k')
        ax2.plot(peaks[0],np.ones(len(peaks[0])),'*')
        
        ax1.set_title('sum intensity of first ~10 lines of frame')
        ax2.set_title('peaks detected')
    
    #peaks = np.ravel(np.where(local_max))
    lines = np.ediff1d(peaks[0]) #Diff between consecutive elements of the array
    line_len = lines[lines.size-1] # Assuming the last element to be the line length
    check = np.ravel(np.where(lines == line_len)) #Checking line lengths
    
    line_confirm = [np.ediff1d(check) == 1]
    if ~np.all(line_confirm): #In case there is a False in there take the index of the last False
        
        start_ind = np.where(line_confirm[0] == False)[-1][-1] + 2
        skip_ind = peaks[0][start_ind]
        
    else: #In case they are all True take the index of the first True
        skip_ind = peaks[0][check[0]] #number of frames to skip at the beginning
       
      
    n_lines = floor((data.data.shape[0] - skip_ind) / line_len) #Number of lines
    data_skip = data.inav[skip_ind:skip_ind + (n_lines * line_len)] #with the skipped frames removed
    
    data_skip.data = data_skip.data.reshape(n_lines, line_len, 256, 256)
    data_skip.axes_manager._axes.insert(0, data_skip.axes_manager[0].copy())
    data_skip.get_dimensions_from_data() #reshaped
    
    print('Number of frames skipped at the beginning: ', skip_ind)
    data_skip = data_skip.inav[1:]
    return data_skip