def wrapped_medfilt(x, y, ks=3, axis='y', **kwargs):
    """Use scipy.signal.medfilt to filter either the x or y data. Also loop the
    filter around to prevent edge effects.

    If the data forms a closed loop the medfilt should account for this,
    otherwise there will be artifacts introduced in points near (less than
    ks-1 / 2) the edge of the data. This version of medfilt accounts for that
    by prepending the last ks data points and appending the first data points,
    running medfilt, and then removing the pre/appended points.
    
    Args:
        ks: and odd number that represents the width of the filter. See medfilt
            for more detail.
        axis: either 'x' or 'y'. Indicates which axis medfilt should be called
            on.
    """
    from scipy.signal import medfilt
    _verify_axis(axis)
    x = np.concatenate((x[-ks:], x, x[:ks]))
    y = np.concatenate((y[-ks:], y, y[:ks]))
    if axis == 'x':
        x = medfilt(x, ks)
    elif axis == 'y': 
        y = medfilt(y, ks)
    return x[ks:-ks], y[ks:-ks]
Example #2
0
	def median_filter(self,size, extend=False):
		"""
		do a standard median filtering. give size in Angstroms, will be translated to nearest odd number of pixels.
		"""
		#check if wavelength calibration is linear:
		if (self.l[1]-self.l[0])==(self.l[-1]-self.l[-2]):
			linear=True
		else:
			linear=False
			l=self.l
			self.linearize()

		step=self.l[1]-self.l[0]
		add_dim=int(np.ceil(size/step // 2 * 2 + 1))

		if extend==True:
			f=np.insert(self.f,0,np.ones(add_dim)*self.f[0])
			f=np.append(f,np.ones(add_dim)*self.f[-1])
			fe=np.insert(self.fe,0,np.ones(add_dim)*self.fe[0])
			fe=np.append(fe,np.ones(add_dim)*self.fe[-1])
			self.f=signal.medfilt(f,add_dim)[add_dim:-add_dim]
			self.fe=signal.medfilt(fe,add_dim,)[add_dim:-add_dim]
			self.fe=self.fe/np.sqrt(add_dim)

		else:
			self.f=signal.medfilt(self.f,add_dim)
			self.fe=signal.medfilt(self.fe,add_dim)
			self.fe=self.fe/np.sqrt(add_dim)
		
		if linear==False:
			self.interpolate(l)
Example #3
0
def segment_digits(img):

    # la binariza en caso de que sea escala de grises
    if not img.dtype == 'bool':
        img = img > 0

    min_size = 32
    medfilt_k = 5

    img0 = morphology.remove_small_objects(img, min_size=min_size)

    sum1 = np.sum(img0, 1)
    sum1 = signal.medfilt(sum1, medfilt_k) # Suavizado del perfil acumulado
    bp1 = sum1 > 0

    # Obtener coordenada en y de los puntos inicio y fin de los dígitos (asumiendo una sola línea de dígitos)
    idx_top = [i for i in range(len(bp1)) if bp1[i]>0]
    idx_bottom = [len(bp1)-i+1 for i in range(len(bp1)) if bp1[len(bp1)-i-1]>0]
    if len(idx_top) > 0 and len(idx_bottom) > 0:
        bp1[idx_top[0]:idx_bottom[0]+1] = True

    sum0 = np.sum(img0, 0)
    sum0 = signal.medfilt(sum0, medfilt_k)
    bp0 = sum0 > 0

    # Obtener coordenada en x de los puntos inicio y fin de los dígitos
    idx_01_transition = [i for i in range(1, len(bp0)) if bp0[i-1]==False and bp0[i]==True]
    idx_10_transition = [i for i in range(len(bp0)-1) if bp0[i]==True and bp0[i+1]==False]
    bb=[]
    if len(idx_01_transition)==len(idx_10_transition):
        for i in range(len(idx_01_transition)):
            bb.append([idx_01_transition[i], idx_top[0], idx_10_transition[i], idx_bottom[0]])

    return bb
Example #4
0
    def interpolate(self):
        pitch = np.zeros((self.nframes))
        pitch[:] = self.samp_values
        pitch2 = medfilt(self.samp_values, self.SMOOTH_FACTOR)

        # This part in the original code is kind of confused and caused
        # some problems with the extrapolated points before the first
        # voiced frame and after the last voiced frame. So, I made some
        # small modifications in order to make it work better.
        edges = self.edges_finder(pitch)
        first_sample = pitch[0]
        last_sample = pitch[-1]

        if len(np.nonzero(pitch2)[0]) < 2:
            pitch[pitch == 0] = self.PTCH_TYP
        else:
            nz_pitch = pitch2[pitch2 > 0]
            pitch2 = scipy_interp.pchip(np.nonzero(pitch2)[0],
                                        nz_pitch)(range(self.nframes))
            pitch[pitch == 0] = pitch2[pitch == 0]
        if self.SMOOTH > 0:
            pitch = medfilt(pitch, self.SMOOTH_FACTOR)
        try:
            if first_sample == 0:
                pitch[:edges[0]-1] = pitch[edges[0]]
            if last_sample == 0:
                pitch[edges[-1]+1:] = pitch[edges[-1]]
        except:
            pass
        self.samp_interp = pitch
Example #5
0
def FindSlices(label):
    '''
    Locate likely isolated slices in each axis
    '''
    
    # Integral over volume
    ii = float(label.ravel().sum())
    
    # Project onto each axis
    Px = np.sum(np.sum(label,axis=2), axis=1) / ii
    Py = np.sum(np.sum(label,axis=2), axis=0) / ii
    Pz = np.sum(np.sum(label,axis=1), axis=0) / ii
    
    # Subtract median filtered baseline estimate (k = 3)
    # Retains only spikes
    Dx = Px - medfilt(Px)
    Dy = Py - medfilt(Py)
    Dz = Pz - medfilt(Pz)
    
    # Locate spikes in residual
    Dmin = 0.01
    Sx = np.where(Dx > Dmin)
    Sy = np.where(Dy > Dmin)
    Sz = np.where(Dz > Dmin)
    
    return Sx, Sy, Sz
Example #6
0
    def get_spectrogram(self, sig):
        sig = sig.astype(np.double)

        # DC
        if self.remove_dc:
            sig = self.dc_filter(sig)

        # median filter
        if self.medfilt_t > 0:
            sig = ss.medfilt(sig, self.medfilt_t)

        # preemph spectral tilt
        if self.pre_emph > 0:
            sig = self.pre_emphasis(sig)

        # gain normalize:
        sig = sig / np.abs(sig).max()

        # fft
        frames = self.stft(sig)

        # power spectrum
        frames = frames.real**2 / self.nfft

        # median filter 2d
        if self.medfilt_s[0] > 0 and self.medfilt_s[1] > 0:
            frames = ss.medfilt(frames, kernel_size=self.medfilt_s)

        return frames
 def ReadFiles(self):
     """Read the data from txt files"""
     try:
         import matplotlib.pyplot as plt
         self.names = tkFileDialog.askopenfilenames(title='Choose acceleration files')
         self.num = 0
         for name in self.names:
             self.data = genfromtxt(name, delimiter=' ')
             self.lista.append(self.data)
             noisy_x = self.lista[self.num].transpose()[0]
             noisy_y = self.lista[self.num].transpose()[1]
             noisy_z = self.lista[self.num].transpose()[2]
             self.noisy_x.append(self.lista[self.num].transpose()[0])
             self.noisy_y.append(self.lista[self.num].transpose()[0])
             self.noisy_z.append(self.lista[self.num].transpose()[0])
             self.num = self.num + 1
             n = 3  #order of the smedian filter
             x_set = medfilt(noisy_x,n)
             y_set = medfilt(noisy_y,n)
             z_set = medfilt(noisy_z,n)
             self.x_set.append(x_set)
             self.y_set.append(y_set)
             self.z_set.append(z_set)
             self.numSamples,m = self.lista[0].shape
             
     except:
         showerror("Error in the files ")
 def calc_derived_particle_info(self):
     if self.species == 'e':
         self.charge = -1.0
     else:
         self.charge = 1.0
     self.t = self.ptl.t * self.pic_info.dtwci / self.pic_info.dtwpe
     self.nt, = self.t.shape
     self.px = self.ptl.x / self.smime  # Change de to di
     self.py = self.ptl.y / self.smime
     self.pz = self.ptl.z / self.smime
     self.adjust_px()
     self.adjust_py()
     self.gama = np.sqrt(self.ptl.ux**2 + self.ptl.uy**2 + self.ptl.uz**2 +
                         1.0)
     self.mint = 0
     self.maxt = np.max(self.t)
     self.jdote_x = self.ptl.ux * self.ptl.ex * self.charge / self.gama
     self.jdote_y = self.ptl.uy * self.ptl.ey * self.charge / self.gama
     self.jdote_z = self.ptl.uz * self.ptl.ez * self.charge / self.gama
     self.dt = np.zeros(self.nt)
     self.dt[0:self.nt - 1] = np.diff(self.t)
     self.jdote_x_cum = np.cumsum(self.jdote_x) * self.dt
     self.jdote_y_cum = np.cumsum(self.jdote_y) * self.dt
     self.jdote_z_cum = np.cumsum(self.jdote_z) * self.dt
     self.jdote_tot_cum = self.jdote_x_cum + self.jdote_y_cum + self.jdote_z_cum
     kernel = 9
     self.ex = signal.medfilt(self.ptl.ex, kernel_size=(kernel))
     self.ey = signal.medfilt(self.ptl.ey, kernel_size=(kernel))
     self.ez = signal.medfilt(self.ptl.ez, kernel_size=(kernel))
     self.xmin_b = np.min(self.pxb)
     self.xmax_b = np.max(self.pxb)
 def __init__(self,surfaceList,dt,subslice=None,filter_kernel_size=0,stereo=True):
     '''
     Arguments:
         *surfaceList*: numpy array of Surfaces (N).
         *dt*: float, timestep
         *subslice*: numpy index tuples, created wit np.s_
     '''
     
     if subslice is None:
         if stereo:
             FourD=np.array([[s.data['Ux'],s.data['Uy'],s.data['Uz']] for s in surfaceList])
         else:
             FourD=np.array([[s.data['Ux'],s.data['Uy']] for s in surfaceList])
     else:
         if stereo:            
             FourD=np.array([[s.data['Ux'][subslice],s.data['Uy'][subslice],s.data['Uz'][subslice]] for s in surfaceList])
         else:
             FourD=np.array([[s.data['Ux'][subslice],s.data['Uy'][subslice]] for s in surfaceList])
         
     if filter_kernel_size>0:
         print 'start smoothing'
         for s in FourD:
             s[0]=sp.medfilt(s[0],kernel_size=filter_kernel_size)
             s[1]=sp.medfilt(s[1],kernel_size=filter_kernel_size)
             if stereo:
                 s[2]=sp.medfilt(s[2],kernel_size=filter_kernel_size)
         print 'stop'
     inputShape=FourD.shape
     vecs = FourD.reshape((inputShape[0],np.prod(inputShape[1:]))).T
     
     super(DMDPiv,self).__init__(vecs,dt)
     self.inputShape=inputShape
Example #10
0
 def removeCR(self, fitsFile, sigmaLevel=7):
     # use a median filter to smooth the region of interest first
     # if self.rootName == 'ibxy02jpq':
     #     import ipdb;ipdb.set_trace()
     im = fitsFile['sci', 1].data[5:5 + self.arraySize, 5:5 +
                                  self.arraySize] / self.flat * self.dqMask
     imROI = im[self.ROI[2]:self.ROI[3], self.ROI[0]:self.ROI[1]]
     # use a 5 pixel sized median filter to remove hot pixels
     err = fitsFile['err', 1].data[5:5 + self.arraySize, 5:5 +
                                   self.arraySize] / self.flat * self.dqMask
     errROI = err[self.ROI[2]:self.ROI[3], self.ROI[0]:self.ROI[1]]
     diff1 = np.abs(imROI - medfilt(imROI, [1, 7]))/errROI
     diff2 = np.abs(imROI - medfilt(imROI, [7, 1]))/errROI
     diff = np.minimum(diff1, diff2)
     yCR, xCR = np.where(diff > sigmaLevel)
     self.dqMask[self.ROI[2]+yCR, self.ROI[0]+xCR] = np.nan
     dqROI = self.dqMask[self.ROI[2]:self.ROI[3], self.ROI[0]:self.ROI[1]]
     self.scanRate = np.nanmean((imROI*dqROI)[:, 20:-20], axis=1) /\
                       np.median(np.nanmean((imROI*dqROI)[:, 20:-20], axis=1))
     self.scanDQIndex = np.where(np.abs(self.scanRate - 1) > 0.04)[0]  # find scan rate anomaly
     # for scanDQi in self.scanDQIndex:
     #     self.dqMask[self.ROI[2]+scanDQi-5:self.ROI[2]+scanDQi+6,
     #                 self.ROI[0]:self.ROI[1]] = np.nan
     print("file:{0}  {1}/{2} cosmic ray found".format(self.rootName, len(yCR), imROI.size))
     print("file:{0} {1} lines large scanrate".format(self.rootName, len(self.scanDQIndex)))
Example #11
0
    def compute_weight(pov, energy, params):
        if params.weight_type != 'binary_both':
            if params.weight_type == 'pov':
                weight = pov
            elif params.weight_type == 'energy':
                weight = energy
            elif params.weight_type == 'pov_energy_mult':
                if params.weight_norm_mult is True:
                    pov = (pov - np.min(pov)) / (np.max(pov) - np.min(pov))
                    energy = (energy - np.min(energy)) / (np.max(energy) - np.min(energy))
                weight = pov * energy
            elif params.weight_type == 'pov_energy_add':
                alpha = params.weight_alpha
                weight = alpha*pov + (1-alpha)*energy

            if params.weight_norm is True:
                weight = (weight - np.min(weight)) / (np.max(weight) - np.min(weight))

            if params.weight_binary is True:  # threshold it to binary
                weight = np.where(weight > params.weight_th, 1, 0)
                if params.weight_med_filt:
                    weight = signal.medfilt(weight, params.weight_med_filt)

        else:  # binary decision based on both
            pov = (pov - np.min(pov)) / (np.max(pov) - np.min(pov))
            energy = (energy - np.min(energy)) / (np.max(energy) - np.min(energy))
            weight = np.where((pov > params.weight_th) & (energy > params.weight_th), 1, 0)
            if params.weight_med_filt:
                weight = signal.medfilt(weight, params.weight_med_filt)

        return weight
Example #12
0
 def apply_median_filter(self, window_size=3):
     if self.mean_signal is None:
         self.x = signal.medfilt(self.x, window_size)
         self.y = signal.medfilt(self.y, window_size)
         self.z = signal.medfilt(self.z, window_size)
     else:
         self.mean_signal = signal.medfilt(self.mean_signal, window_size)
Example #13
0
    def redshift_estimate(self,early_type_wave,early_type_flux,wave,Flux_science,gal_prior=None):
        '''
        estimate redshift for object
        '''
        #manage redshift prior
        self.gal_prior = gal_prior

        #continuum subtract
        Flux_sc = Flux_science - signal.medfilt(Flux_science,171)
        early_type_flux_sc = early_type_flux - signal.medfilt(early_type_flux,171)

        #handle single redshift prior flag
        if self.est_pre_z == '1':
            if self.gal_prior:
                self.pre_z_est = self.gal_prior
            else:
                nospec = raw_input('You said you are either using a spectroscopic or photometric redshift prior. '\
                                        'You need to specify a prior value! Either enter a number in now or type (q) to exit')
                if nospec == 'q':
                    sys.exit()
                elif not nospec:
                    sys.exit()
                else:
                    self.gal_prior = np.float(nospec)
                    self.pre_z_est = self.gal_prior

        #handle user prior flag
        if self.est_pre_z == '2':
            print 'Take a look at the plotted galaxy spectrum and note, approximately, at what wavelength do you see the '+self.uline_n+' line. '\
                    'Then close the plot and enter that wavelength in angstroms.'
            plt.plot(wave,Flux_science)
            plt.xlim(self.lower_w,self.upper_w)
            plt.show()
            line_init = raw_input(self.uline_n+' approx. wavelength (A): ')
            self.pre_z_est = np.float(line_init)/self.uline - 1

        #handle no prior flag
        if self.est_pre_z == '3':
            self.pre_z_est = None

        redshift_est,cor,ztest,corr_val = self._cross_cor(self.pre_z_est,self.z_prior_width,early_type_wave,early_type_flux_sc,wave,Flux_sc)

        self.qualityval = 0
        self.first_pass = True
        self._GUI_display(redshift_est,ztest,corr_val,wave,Flux_science)
        #self.line_est = Estimateline(self.pspec,ax)
        #plt.show()
        try:
            self.pre_lam_est = self.line_est.lam
            self.pre_z_est = self.pre_lam_est/3950.0 - 1.0
            self.first_pass = False
            redshift_est,cor,ztest,corr_val = self._cross_cor(self.pre_z_est,self.z_prior_width,early_type_wave,early_type_flux_sc,wave,Flux_sc)
            print 'redshift est:',redshift_est
            self._GUI_display(redshift_est,ztest,corr_val,wave,Flux_science)
            total_new_shift = self.spectra2.dx_tot
            redshift_est = (self.uline*(1+redshift_est) - total_new_shift)/self.uline - 1
        except AttributeError:
            pass
        return redshift_est,cor,ztest,corr_val,self.qualityval
Example #14
0
def medfilt1D(img,medfilt_radius):
  " 1D median filter using specified radius "
  if medfilt_radius<=1: return img;
  if img.ndim==1:
    return sig.medfilt(img,medfilt_radius);
  else:
    lines = [ sig.medfilt(line ,medfilt_radius) for line in img ];
    return np.asarray(lines);
Example #15
0
    def __calc_gm(self, gseq, t_win=5):
        tmp = np.array([np.mean(x) for x in gseq])
        gmean = -1 * medfilt(tmp.real, t_win) + medfilt(tmp.imag, t_win) * 1j
        pca = PCA(n_components=1)
        gmean = pca.fit_transform(
            np.vstack((gmean.imag, gmean.real)).T).flatten()

        return gmean, pca
Example #16
0
def detrend_medfilt(d, K=8):
    d = np.concatenate([d[K-1::-1],d,d[:-K-1:-1]], axis=0)
    d = np.concatenate([d[:,K-1::-1],d,d[:,:-K-1:-1]], axis=1)
    d_sm = medfilt(d, 2*K+1)
    d_rs = d - d_sm 
    d_sq = np.abs(d_rs)**2
    sig = np.sqrt(medfilt(d_sq, 2*K+1) / .456) # puts median on same scale as average
    f = d_rs / sig
    return f[K:-K,K:-K]
Example #17
0
    def analyzeActualWindow(self,window,numSamples):
        """ function [gravity body] = AnalyzeActualWindow(window,numSamples)
        %
        % AnalyzeActualWindow separates the gravity and body acceleration features
        % contained in the window of real-time acceleration data, by first reducing
        % the noise on the raw data with a median filter and then discriminating
        % between the features with a low-pass IIR filter."""

        #REDUCE THE NOISE ON THE SIGNALS BY MEDIAN FILTERING
        n = 3  #order of the median filter
        x_axis = medfilt(window[:,0],n)
        y_axis = medfilt(window[:,1],n)
        z_axis = medfilt(window[:,2],n)

        #APPLY IIR FILTER TO GET THE GRAVITY COMPONENTS
        #IIR filter parameters (all frequencies are in Hz)
        Fs = 32;            # sampling frequency
        Fpass = 0.25;       # passband frequency
        Fstop = 2;          # stopband frequency
        Apass = 0.001;      # passband ripple (dB)
        Astop = 100;        # stopband attenuation (dB)
        match = 'pass';     # band to match exactly
        #Create the IIR filter


        # iirdesign agruements
        Wip = (Fpass)/(Fs/2)
        Wis = (Fstop+1e6)/(Fs/2)
        Rp = Apass             # passband ripple
        As = Astop            # stopband attenuation

        # The iirdesign takes passband, stopband, passband ripple, 
        # and stop attenuation. 
        bb, ab = ifd.iirdesign(Wip, Wis, Rp, As, ftype='cheby1')

        #Gravity components
        g1 = lfilter(bb,ab,x_axis)
        g2 = lfilter(bb,ab,y_axis)
        g3 = lfilter(bb,ab,z_axis)


        #COMPUTE THE BODY-ACCELERATION COMPONENTS BY SUBTRACTION  (PREGUNTA)
        gravity = zeros((numSamples,3));
        body = zeros((numSamples,3));

        i=0
        while(i < numSamples):
            gravity[i,0] = g1[i];
            gravity[i,1] = g2[i];
            gravity[i,2] = g3[i];
            body[i,0] = x_axis[i] - g1[i];
            body[i,1] = y_axis[i] - g2[i];
            body[i,2] = z_axis[i] - g3[i];
            i = i + 1

        #COMPUTE THE BODY-ACCELERATION COMPONENTS BY SUBTRACTION
        return gravity, body
def RS_Kalman(lat,lng,l_size=5 ,g_size=5):
    Lat=signal.medfilt(lat,l_size)
    Lng=signal.medfilt(lng,g_size)
    drewgps(Lat,Lng)
    Lat=KalmanByGroup(Lat)
    Lng=KalmanByGroup(Lng)
    drewgps(Lat,Lng)
    gps_data = [[Lat[i],Lng[i]] for i in range(len(Lat))]
    return gps_data
Example #19
0
    def normalize(self, data):
        if self.verbose: print 'Normalizing data'
        # Parameters
        filterWidth = 151
        scale = 30
##        nearZero = np.std(data) * 0.05
        nearZero = 10
        numCols = data.shape[1]
        maxValue = 400

        newData = np.zeros(data.shape)
        for i in range(numCols):
            d = data[:,i].copy()
            
            # Find envelope
            if self.verbose: print " - {}: Finding envelope".format(i)
            upperPeaks = np.zeros(d.shape)
            lowerPeaks = np.zeros(d.shape)
            currentUpper = 0.0
            currentLower = 0.0
            for j in range(1, len(d)-1):
                if (d[j-1] < d[j] > d[j+1]) and d[j] > 0:
                    currentUpper = d[j]
                if (d[j-1] > d[j] < d[j+1]) and d[j] < 0:
                    currentLower = d[j]

                upperPeaks[j] = currentUpper
                lowerPeaks[j] = currentLower

            # Decimate peak list
            dec = 10
            decUpperPeaks = upperPeaks[np.arange(1,len(upperPeaks), dec)]
            decLowerPeaks = lowerPeaks[np.arange(1,len(lowerPeaks), dec)]
        
            # Filter shortened peak list
            if self.verbose: print " - {}: Median filter".format(i)
            decUpperPeaks = sig.medfilt(decUpperPeaks, filterWidth)
            decLowerPeaks = sig.medfilt(decLowerPeaks, filterWidth)

            # Un-decimate peak list
            for x in range(len(upperPeaks)):
                upperPeaks[x] = decUpperPeaks[x/dec]
                lowerPeaks[x] = decLowerPeaks[x/dec]

            # Normalize using width of envelope
            width = (upperPeaks - lowerPeaks) / 2.0
            width[width < nearZero] = np.inf
            d = (d / width) * scale

            # Limit peask
            d[d > maxValue] = maxValue
            d[d < -maxValue] = -maxValue

            newData[:,i] = d
            
        return newData
def plot_example(missed, acknowledged):
    sensor_miss = import_sensorfile(missed)
    sensor_ack = import_sensorfile(acknowledged)

    # Window data
    mag_miss = window_data(process_input(sensor_miss))
    mag_ack = window_data(process_input(sensor_ack))

    # Window data
    mag_miss = window_data(process_input(sensor_miss))
    mag_ack = window_data(process_input(sensor_ack))

    # Filter setup
    kernel = 15

    # apply filter
    mag_miss_filter = sci.medfilt(mag_miss, kernel)
    mag_ack_filter = sci.medfilt(mag_ack, kernel)

    # calibrate data
    mag_miss_cal = mf.calibrate_median(mag_miss)
    mag_miss_cal_filter = mf.calibrate_median(mag_miss_filter)

    mag_ack_cal = mf.calibrate_median(mag_ack)
    mag_ack_cal_filter = mf.calibrate_median(mag_ack_filter)

    # PLOT
    sns.set_style("white")
    current_palette = sns.color_palette('muted')
    sns.set_palette(current_palette)

    plt.figure(0)

    # Plot RAW missed and acknowledged reminders
    ax1 = plt.subplot2grid((2, 1), (0, 0))
    plt.ylim([-1.5, 1.5])
    plt.ylabel('Acceleration (g)')
    plt.plot(mag_miss_cal, label='Recording 1')
    plt.legend(loc='lower left')

    ax2 = plt.subplot2grid((2, 1), (1, 0))
    # Plot Missed Reminder RAW
    plt.ylim([-1.5, 1.5])
    plt.ylabel('Acceleration (g)')
    plt.xlabel('t (ms)')
    plt.plot(mag_ack_cal, linestyle='-', label='Recording 2')
    plt.legend(loc='lower left')

    # CALC AND SAVE STATS
    stats_one = sp.calc_stats_for_data_stream_as_dictionary(mag_miss_cal)
    stats_two = sp.calc_stats_for_data_stream_as_dictionary(mag_ack_cal)

    data = [stats_one, stats_two]
    write_to_csv(data, 'example_waves')

    plt.show()
Example #21
0
def filter_acceleration(x, y, z):
    x = medfilt(np.array(x))
    y = medfilt(np.array(y))
    z = medfilt(np.array(z))

    x = butter_bandpass_filter(x, 0, 20, 50, order=3)
    y = butter_bandpass_filter(y, 0, 20, 50, order=3)
    z = butter_bandpass_filter(z, 0, 20, 50, order=3)

    return x, y, z
Example #22
0
def filter_gyroscope(x, y, z):
    x = medfilt(np.array(x))
    y = medfilt(np.array(y))
    z = medfilt(np.array(z))

    x = butter_bandpass_filter(x, 0.3, 20, 50, order=3)
    y = butter_bandpass_filter(y, 0.3, 20, 50, order=3)
    z = butter_bandpass_filter(z, 0.3, 20, 50, order=3)

    return x, y, z
Example #23
0
def process_cell(img):

    # la binariza en caso de que sea escala de grises
    if not img.dtype == 'bool':
        img = img > 0  # Binarizar

    # Calcular máscaras para limpiar lineas largas verticales
    h_k = 0.8
    sum0 = np.sum(img, 0)  # Aplastar la matriz a una fila con las sumas de los valores de cada columna.
    thr0 = sum0 < h_k * img.shape[0]
    thr0 = thr0.reshape(len(thr0), 1) # Convertirlo a vector de una dimensión

    # Calcular máscaras para limpiar lineas largas horizontales
    w_k = 0.5
    sum1 = np.sum(img, 1)
    thr1 = sum1 < w_k * img.shape[1]
    thr1 = thr1.reshape(len(thr1), 1)

    mask = thr0.transpose() * thr1 # Generar máscara final para la celda
    mask_lines = mask.copy()

    elem = morphology.square(5)
    mask = morphology.binary_erosion(mask, elem) # Eliminar ruido

    img1 = np.bitwise_and(mask, img) # Imagen filtrada

    # segmentación del bloque de números
    kerw = 5  # Kernel width
    thr_k = 0.8

    # Calcular mascara para marcar inicio y fin de región con dígitos horizontalmente
    sum0 = np.sum(img1, 0)
    sum0 = signal.medfilt(sum0, kerw)
    thr0 = sum0 > thr_k * np.median(sum0)
    thr0 = np.bitwise_and(thr0.cumsum() > 0, np.flipud(np.flipud(thr0).cumsum() > 0))
    thr0 = thr0.reshape(len(thr0), 1)

    # Calcular mascara para marcar inicio y fin de región con dígitos verticalmente
    sum1 = np.sum(img1, 1)
    sum1 = signal.medfilt(sum1, kerw)
    thr1 = sum1 > thr_k * np.median(sum1)
    thr1 = np.bitwise_and(thr1.cumsum() > 0, np.flipud(np.flipud(thr1).cumsum() > 0))
    thr1 = thr1.reshape(len(thr1), 1)

    # Mascara final para inicio y fin de caracteres (bounding box of digit region)
    mask = thr0.transpose() * thr1
    mask = morphology.binary_dilation(mask, morphology.square(2))


    img = np.bitwise_and(mask_lines.astype(img.dtype), img)  # Aplicar máscara para quitar lineas
    img = morphology.binary_dilation(img, morphology.disk(1)) # Dilatación para unir números quebrados por la máscara anterior
    img = morphology.binary_erosion(img, morphology.disk(1)) # Volver a la fomorma 'original' con los bordes unidos

    return np.bitwise_and(mask, img)
    def two_column_data(self, z, smooth):
        self.wave, self.flux = self.spectrum

        filterSize = int(len(self.wave)/self.nw) * smooth * 2 + 1
        preFiltered = medfilt(self.flux, kernel_size=filterSize)
        wave, flux = self.readSpectrumFile.two_col_input_spectrum(self.wave, preFiltered, z)
        binnedwave, binnedflux, minindex, maxindex = self.preProcess.log_wavelength(wave, flux)
        newflux, continuum = self.preProcess.continuum_removal(binnedwave, binnedflux, self.numSplinePoints, minindex, maxindex)
        meanzero = self.preProcess.mean_zero(binnedwave, newflux, minindex, maxindex)
        apodized = self.preProcess.apodize(binnedwave, meanzero, minindex, maxindex)


        #filterSize = smooth * 2 + 1
        medianFiltered = medfilt(apodized, kernel_size=1)#filterSize)


        # from scipy.interpolate import interp1d
        #
        # plt.plot(self.flux)
        #
        # spline = interp1d(binnedwave[minindex:maxindex], binnedflux[minindex:maxindex], kind='cubic')
        # waveSpline = np.linspace(binnedwave[minindex],binnedwave[maxindex-1],num=self.numSplinePoints)
        # print spline
        # print '###'
        # print spline(binnedwave[minindex:maxindex])
        # plt.figure('1')
        # plt.plot(waveSpline, spline(waveSpline), '--', label='spline')
        #
        # print wave
        # print binnedwave
        # print binnedflux
        # print len(binnedwave)
        # plt.plot(wave,flux)
        # plt.figure('2')
        # plt.plot(binnedwave, binnedflux, label='binned')
        # plt.plot(binnedwave, newflux, label='continuumSubtract1')
        # plt.plot(binnedwave, continuum, label='polyfit1')
        # print len(binnedwave)
        # print (min(binnedwave), max(binnedwave))
        # print len(newflux)
        #
        # #newflux2, poly2 = self.preProcess.continuum_removal(binnedwave, binnedflux, 6, minindex, maxindex)
        # #plt.plot(binnedwave, newflux2, label='continuumSubtract2')
        # #plt.plot(binnedwave, poly2, label='polyfit2')
        # plt.plot(binnedwave, apodized, label='taper')
        # plt.legend()
        # plt.figure('filtered')
        # plt.plot(medianFiltered)
        # plt.figure('3')
        # plt.plot(medfilt(apodized,kernel_size=3))
        # plt.show()

        return binnedwave, medianFiltered, minindex, maxindex
Example #25
0
 def get_median_bandpass(self, medlen=21, plot=False):
     self.median_bandpass_avg = medfilt(self.bandpass_avg, medlen)
     self.median_bandpass_std = medfilt(self.bandpass_std, medlen)
     if plot:
         plotxy(self.median_bandpass_avg, self.freqs,
                labx="Frequency (MHz)")
         plotxy(self.median_bandpass_avg+self.median_bandpass_std,
                self.freqs, color="red")
         plotxy(self.median_bandpass_avg-self.median_bandpass_std,
                self.freqs, color="red")
         closeplot()
     return self.median_bandpass_avg
Example #26
0
def HighPassFilter(t, px, py, moco_kernel, central_fix):
    """
    Slow drift correction by robust high-pass filtering

    Effectively forces long-term average pupil fixation to be centrally
    fixated in video space.

    Arguments
    ----
    t : 1D float array
        Video soft timestamps in seconds
    px : 1D float array
        Video space pupil center x
    py : 1D float array
        Video space pupil center y
    moco_kernel : integer
        Temporal kernel width in samples [31]
    central_fix : float tuple
        (x,y) coordinate in video space of central fixation

    Returns
    ----
    px_filt : 1D float array
        Drift corrected video space pupil center x
    py_filt : 1D float array
        Drift corrected video space pupil center y
    """

    # Force odd-valued kernel width
    moco_kernel = utils._forceodd(moco_kernel)

    print("  Highpass filtering with %d sample kernel" % moco_kernel)

    # Infill NaN regions
    # Replace NaNs with unreasonable but finite value for median filtering
    nan_idx = np.isnan(px)
    px[nan_idx] = -1e9
    py[nan_idx] = -1e9

    # Moving median filter to estimate baseline
    px_bline = medfilt(px, moco_kernel)
    py_bline = medfilt(py, moco_kernel)

    # Restore NaNs to vectors
    px_bline[nan_idx] = np.nan
    py_bline[nan_idx] = np.nan

    # Subtract baseline and add central fixation offset
    px_filt = px - px_bline + central_fix[0]
    py_filt = py - py_bline + central_fix[1]

    return px_filt, py_filt, px_bline, py_bline
Example #27
0
    def __calc_lm(self, lseq, t_win=5):
        for x in range(len(lseq)):
            if(len(lseq[x]) == 0):
                lseq[x] = [np.nan]
        ltmp = np.array([np.mean(x) for x in lseq])
        ltmp = medfilt(ltmp.real, t_win) + medfilt(ltmp.imag, t_win) * 1j
        lmean = np.abs(ltmp)
        idx = ~np.isnan(ltmp)
        size = np.mean([len(x) for x in lseq])
        length = np.sum(idx)
        lstd = nanmean([np.std(x) for x in lseq], axis=0)[0, 0]

        return lmean, size, length, lstd
Example #28
0
 def test_none(self):
     # Ticket #1124. Ensure this does not segfault.
     try:
         signal.medfilt(None)
     except:
         pass
     # Expand on this test to avoid a regression with possible contiguous
     # numpy arrays that have odd strides. The stride value below gets
     # us into wrong memory if used (but it does not need to be used)
     dummy = np.arange(10, dtype=np.float64)
     a = dummy[5:6]
     a.strides = 16
     assert_(signal.medfilt(a, 1) == 5.)
Example #29
0
 def test_none(self):
     """Ticket #1124. Ensure this does not segfault."""
     try:
         signal.medfilt(None)
     except:
         pass
     # Expand on this test to avoid a regression with possible contiguous
     # numpy arrays that have odd strides. The stride value below gets
     # us into wrong memory if used (but it does not need to be used)
     a = np.lib.stride_tricks.as_strided(np.ones(1), shape=(1,), strides=(2**31,))
     # a may be contiguous (and is certainly for numpy <1.8) because it
     # has only one element
     signal.medfilt(a)
Example #30
0
def clear_other():
    # get some linear data
    x = np.linspace(0, 1, 101)

    # add some noisy signal
    x[3::10] = 1.5

    p.plot(x)
    p.plot(signal.medfilt(x, 3))
    p.plot(signal.medfilt(x, 5))

    p.legend(['original signal', 'length 3', 'length 5'])
    p.show()
    for h in [7, 5, 4, 3, 2, 1]:  # remove exposures
        del img_hud[h]
    # remove median
    img_data = img_hud[1].data
    img_hud[1].data = img_data - np.median(img_data)
    # store to array
    sky_sub_imgs[:, :, i_s] = np.array(img_hud[1].data)
    i_s += 1
    # save
    img_hud.writeto(new_filename, overwrite=True)
    img_hud.close()

# median combine and save sky image
if running_sky_median:
    print 'Creating running sky median - can take a while'
    sky_median = medfilt(sky_sub_imgs, kernel_size=(1, 1, median_window))
    # replace frames at the beging and at the end of the running median sky
    n_f = len(image_filename)
    i_beg_replace = (median_window - 1) / 2
    for i_b in range(0, i_beg_replace):
        sky_median[:, :, i_b] = sky_median[:, :, i_beg_replace]
    i_end_replace = n_f - i_beg_replace - 1
    for i_b in range(i_end_replace + 1, n_f):
        sky_median[:, :, i_b] = sky_median[:, :, i_end_replace]
else:
    sky_image = np.median(sky_sub_imgs, axis=2)
    flat_hud[0].data = sky_image
    flat_hud.writeto('syk_image.fits', overwrite=True)
    flat_hud.close()

# substract sky image and median from image
Example #32
0
def callback(data):
    """
    This callback runs each time a LIDAR scan is obtained from
    the /scan topic in ROS. Returns a topic /detection. If no
    object is found, /detection = [0,0]. If an object is found,
    /detection = [angle,distance] to median of scan.
    """

    # Initialize parameters
    rate = rospy.Rate(10)
    scan_dist_thresh = 0.1  # Distance threshold to split obj into 2 obj.
    plot_data = True

    # Set max/min angle and increment
    scan_min = data.angle_min
    scan_max = data.angle_max
    scan_inc = data.angle_increment

    # Build angle array
    x = np.arange(scan_min, scan_max, scan_inc)

    # Pre-compute trig functions of angles
    xsin = np.sin(x)
    xcos = np.cos(x)

    # Apply a median filter to the range scans
    y = sg.medfilt(data.ranges, 1)

    # Calculate the difference between consecutive range values
    y_diff1 = np.power(np.diff(y), 2)

    # Convert range and bearing measurement to cartesian coordinates
    x_coord = y * xsin
    y_coord = y * xcos

    # Compute difference between consecutive values in cartesian coordinates
    x_diff = np.power(np.diff(x_coord), 2)
    y_diff = np.power(np.diff(y_coord), 2)

    # Compute physical distance between measurements
    dist = np.power(x_diff + y_diff, 0.5)

    # Segment the LIDAR scan based on physical distance between measurements
    x2 = np.array(
        np.split(x,
                 np.argwhere(dist > scan_dist_thresh).flatten()[1:]))
    y2 = np.array(
        np.split(y,
                 np.argwhere(dist > scan_dist_thresh).flatten()[1:]))
    dist2 = np.array(
        np.split(dist,
                 np.argwhere(dist > scan_dist_thresh).flatten()[1:]))
    x_coord2 = np.array(
        np.split(x_coord,
                 np.argwhere(dist > scan_dist_thresh).flatten()[1:]))
    y_coord2 = np.array(
        np.split(y_coord,
                 np.argwhere(dist > scan_dist_thresh).flatten()[1:]))

    rate = rospy.Rate(10.0)
    # Loop through each segmented object
    for i in range(len(x2)):

        # Check if there are at least 4 points in an object (reduces noise)
        xlen = len(x2[i]) - 0
        if xlen > 4:

            # Calculate distance of this object
            dist2_sum = np.sum(dist2[i][1:xlen - 1])

            detectX = 0.0
            detectY = 0.0

            # Check if this object is too small
            if dist2_sum > 0.25 and dist2_sum < 3:
                ang = np.median(x2[i])
                dis = np.median(y2[i])
                mn = min(y2[i][1:xlen])
                mx = max(y2[i][1:xlen])

                print "Ang"
                print ang
                print "Dis"
                print dis

                roboX = rospy.get_param("/currentRobotX")
                roboY = rospy.get_param("/currentRobotY")
                arenaPnt1 = rospy.get_param("/arenaPnt1")
                arenaPnt2 = rospy.get_param("/arenaPnt2")
                deadZone1 = rospy.get_param("/deadZone1")
                deadZone2 = rospy.get_param("/deadZone2")
                roboR = rospy.get_param("/currentRobotR")

                print "RoboX"
                print roboX
                print "RoboY"
                print roboY
                print "RoboR"
                print roboR

                detectX = roboX + (dis * math.cos(ang + roboR))
                detectY = roboY - (dis * math.sin(ang + roboR))

                print "DetectX"
                print detectX
                print "DetectY"
                print detectY

                if ang > -1.4 and ang < 1.4 and detectX > arenaPnt1[
                        0] and detectX < arenaPnt2[0] and detectY < arenaPnt1[
                            1] and detectY > arenaPnt2[1] and not (
                                detectX > deadZone1[0] and detectX <
                                deadZone2[0] and detectY < deadZone1[1]
                                and detectY > deadZone2[1]):
                    bearing = np.array([ang, dis], dtype=np.float32)
                else:
                    print "Bad box"

                rate.sleep()

    # Check if bearing exists. Store [0,0] if no object was found
    if 'bearing' not in locals():
        bearing = np.array([0, 0], dtype=np.float32)

    # Publish bearing to ROS on topic /detection
    pub = rospy.Publisher("/detection", numpy_msg(Floats), queue_size=1)
    pub.publish(bearing)

    # If we want to plot the LIDAR scan, open the plot environment
    if plot_data:
        plt.figure(1)
        for i in range(len(x2)):
            xlen = len(x2[i]) - 0
            if xlen > 4:
                dist2_sum = np.sum(dist2[i][1:xlen - 1])
                if dist2_sum < 0.25:
                    if plot_data:
                        plt.plot(x2[i][1:xlen],
                                 y2[i][1:xlen],
                                 'k-',
                                 linewidth=2.0)
                else:
                    if dist2_sum > 3:
                        if plot_data:
                            plt.plot(x2[i][1:xlen],
                                     y2[i][1:xlen],
                                     'b-',
                                     linewidth=2.0)
                    else:
                        ang = np.median(x2[i])  # - (math.pi / 2.0)
                        dis = np.median(y2[i])

                        roboX = rospy.get_param("/currentRobotX")
                        roboY = rospy.get_param("/currentRobotY")
                        arenaPnt1 = rospy.get_param("/arenaPnt1")
                        arenaPnt2 = rospy.get_param("/arenaPnt2")
                        deadZone1 = rospy.get_param("/deadZone1")
                        deadZone2 = rospy.get_param("/deadZone2")
                        roboR = rospy.get_param("/currentRobotR")

                        detectX = roboX + (dis * math.cos(ang + roboR))
                        detectY = roboY - (dis * math.sin(ang + roboR))

                        if ang > -1.4 and ang < 1.4 and detectX > arenaPnt1[
                                0] and detectX < arenaPnt2[
                                    0] and detectY < arenaPnt1[
                                        1] and detectY > arenaPnt2[1] and not (
                                            detectX > deadZone1[0]
                                            and detectX < deadZone2[0]
                                            and detectY < deadZone1[1]
                                            and detectY > deadZone2[1]):
                            if plot_data:
                                plt.plot(x2[i][1:xlen],
                                         y2[i][1:xlen],
                                         'g-',
                                         linewidth=2.0)
                        else:
                            if plot_data:
                                plt.plot(x2[i][1:xlen],
                                         y2[i][1:xlen],
                                         'r-',
                                         linewidth=2.0)
        plt.ylim([0, 20])
        plt.xlim([-5, 5])
        plt.gca().invert_xaxis()
        plt.xlabel('Left of robot [m] ')
        plt.ylabel('Front of robot [m]')
        plt.title('Laser Scan')
        imgdata = StringIO.StringIO()
        plt.savefig(imgdata, format='png')
        imgdata.seek(0)
        img_array = np.asarray(bytearray(imgdata.read()), dtype=np.uint8)
        im = cv2.imdecode(img_array, 1)
        bridge = CvBridge()
        image_output = rospy.Publisher("/output/keyevent_image",
                                       Image,
                                       queue_size=1)
        image_output.publish(bridge.cv2_to_imgmsg(im, "bgr8"))
        plt.close()
    pass
def MedianFilter(image):
    image = signal.medfilt(image, (5, 5))
    return image
Example #34
0
import rasterio
from scipy.signal import medfilt

path = "tests/data/RGB.byte.tif"
output = "/tmp/filtered.tif"

with rasterio.open(path) as src:
    array = src.read()
    profile = src.profile

# apply a 5x5 median filter to each band
filtered = medfilt(array, (1, 5, 5)).astype('uint8')

# Write to tif, using the same profile as the source
with rasterio.open(output, 'w', **profile) as dst:
    dst.write(filtered)
Example #35
0
        i = i + 1
    w111 = np.concatenate((w1, w11), axis=0)
    w1 = w111[100:600]
    x111 = np.concatenate((x1, x11), axis=0)
    x1 = x111[100:600]
    y111 = np.concatenate((y1, y11), axis=0)
    y1 = y111[100:600]
    z111 = np.concatenate((z1, z11), axis=0)
    z1 = z111[100:600]

    col1 = w1
    col2 = x1
    col3 = y1
    col4 = z1

    col1 = signal.medfilt(col1, 7)
    col2 = signal.medfilt(col2, 7)
    col3 = signal.medfilt(col3, 7)
    col4 = signal.medfilt(col4, 7)

    def butter_lowpass(cutOff, fs, order=1):
        nyq = 0.5 * fs
        normalCutoff = cutOff / nyq
        b, a = butter(order, normalCutoff, btype='low', analog=True)
        return b, a

    def butter_lowpass_filter(data, cutOff, fs, order=4):
        b, a = butter_lowpass(cutOff, fs, order=order)
        y = lfilter(b, a, data)
        return y
Example #36
0
i = 1

histogram_overlay, all_x, all_y = histogram_lane_detection(img,
                                                           steps,
                                                           search_window,
                                                           h_window,
                                                           peak_threshold=2000,
                                                           frame_debug=True)

cv2.imshow('OVERLAY', cv2.cvtColor(histogram_overlay, cv2.COLOR_RGB2BGR))

start = masked_img.shape[0] - (i * pixels_per_step)
end = start - pixels_per_step
histogram = np.sum(masked_img[end:start, :], axis=0)
histogram_smooth = signal.medfilt(histogram, h_window)
peaks = np.array(signal.find_peaks_cwt(histogram_smooth, np.arange(1, 10)))
print(peaks)
peaks = peaks[np.nonzero(histogram_smooth[peaks] > peak_threshold)]
print(peaks)
print(histogram_smooth)
print(histogram_smooth / max(histogram_smooth))
histogram_overlay = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
COLOR_BOX_BORDER = np.array([100, 160, 255], np.uint8)
COLOR_HIST_PLOT = np.array([225, 150, 50], np.uint8)
COLOR_POSITIVE_DETECTION = np.array([0, 150, 0], np.uint8)
COLOR_THRESHOLD = np.array([50, 225, 225], np.uint8)
COLOR_GRID = np.array([100, 150, 150], np.uint8)
print(histogram_smooth.shape)
print(start, end)
histogram_overlay[start - 2:start, :, :] = COLOR_BOX_BORDER
Example #37
0
#Making the CSV file:

#visititems() recursively gets every item(i.e group, dataset etc) and its address in the hdf file
#and passes it to a specified callable function
f.visititems(DatasetCheck)
with open('ScriptOutput.csv', 'w') as h:
    for row in csvTemplate:
        rowstring = []
        for item in row:
            rowstring.append(str(item))
        h.write(",".join(rowstring) + "\n")

#Plotting the Image:
imgHt = np.array(f['AwakeEventData']['XMPP-STREAK']['StreakImage'].get(
    'streakImageHeight'))[0]
imgWdth = np.array(f['AwakeEventData']['XMPP-STREAK']['StreakImage'].get(
    'streakImageWidth'))[0]
img = np.array(
    f['AwakeEventData']['XMPP-STREAK']['StreakImage'].get('streakImageData'))
img = medfilt(np.reshape(img, (imgHt, imgWdth)))
plt.imshow(img)
plt.savefig('XMPP-STREAK-Image.png')

#Creating the Datetime objects:
timestamp = round(1541962108935000000 / (10**(9)))
date_str = datetime.datetime.fromtimestamp(timestamp)
datetime_obj_UTC = datetime.datetime.strptime(str(date_str),
                                              "%Y-%m-%d %H:%M:%S")
datetime_obj_CERN = datetime_obj_UTC.replace(tzinfo=pytz.utc).astimezone(
    pytz.timezone('Europe/Zurich'))
Example #38
0
def run_epd(lcfile, epdparams=None, lccols=None, lccolnames=None, addcols=None,
            framecol='rstfc', sigclip=5.0, smooth=21, minndet=200,
            num_aps=3, outdir=None, outext='.epdlc'):
    '''
    Runs EPD on the given lcfile

    Args:
        lcfile (str): Path to lcfile.
        epdparams (dict or OrderedDict): keys are columns,
            values are either None or a callable that operates on a lightcurve
        lccols (list): Columns to read from the lc
        lccolnames (list): Names of columns read from the lc
        addcols (DataFrame): Additional external parameters
        framecol (str): Column to merge lc and addcols on

        sigclip (float): Sigma-clipping to be done on lc before fitting
        smooth (int or False): Smoothing parameter to apply to lc
        minndet (int): Minimum number of observations in lc
        num_aps (int): Number of apertures
        outext (str): Extension of output file.

    Returns:
        outfile if successful, None otherwise.
    '''
    # Defaults
    if epdparams is None:
        epdparams = DEFAULT_EPDPARAMS

    # Read LC
    lc = read_lc(lcfile, lccols, lccolnames)
    # Join with additional columns
    if addcols is not None:
        lc = pd.merge(lc, addcols, how='left', on=framecol)
    # Generate additional columns required for EPD
    lc = generate_newcols(lc, epdparams)
    epdcols = list(epdparams.keys())

    # Only use rows where all params are finite
    finite_params = np.ones(len(lc), dtype=bool)
    for col in epdcols:
        finite_params &= np.isfinite(lc[col])

    # Run for each aperture
    epcols = []
    for ap in range(1, num_aps+1):
        magcol = 'rm%d' % ap
        finiteind = np.isfinite(lc[magcol]) & finite_params
        if np.sum(np.ones_like(finiteind)[finiteind]) < minndet:
            continue

        mag_median = np.nanmedian(lc[magcol])
        mag_stdev = np.nanstd(lc[magcol])

        # Sigma clip
        if sigclip:
            excludeind = abs(lc[magcol] - mag_median) < (sigclip * mag_stdev)
            finalind = finiteind & excludeind
        else:
            finalind = finiteind

        # Smoothing
        if smooth:
            mags = medfilt(lc[magcol][finalind], smooth)
        else:
            mags = lc[magcol][finalind]

        ### Perform fitting
        # Construct the matrix from the EPD columns (and a constant term)
        epdmatrix = np.c_[lc[epdcols], np.ones(len(lc))]
        # Use least squares fitting to only chosen rows
        coeffs, residuals, rank, singulars = lstsq(epdmatrix[finalind],
                                                   mags)
        # Now compute full EPD mags
        lc.loc[:,'ep%d' % ap] = (lc[magcol] - np.dot(coeffs, epdmatrix.T)
                                 + mag_median)
        epcols.append('ep%d' % ap)

    # Save lcfile if at least one epdcolumn was generated
    lcid = os.path.splitext(os.path.basename(lcfile))[0]
    if len(epcols) == 0:
        print('%sZ: EPD failed for %s.' % (datetime.utcnow().isoformat(),
                                           lcid + '.grcollectilc'))
        return None
    else:
        if outdir is None:
            outdir = os.path.dirname(lcfile)
        outfile = os.path.join(outdir, lcid + outext)

        # Proper file output by re-reading lines from input file
        inf = open(lcfile, 'rb')
        inflines = inf.readlines()
        inf.close()

        outf = open(outfile, 'wb')
        for line, row in zip(inflines, lc.iterrows()):
            epmags = [('%.6f' % row[1][col]) for col in epcols]
            inline = line.decode('utf-8').rstrip('\n')
            outline = ' '.join([inline] + epmags) + '\n'
            outf.write(outline.encode('utf-8'))
        outf.close()

        print('%sZ: EPD OK for %s.' % (datetime.utcnow().isoformat(),
                                       lcid + '.grcollectilc'))
        return outfile
Example #39
0
def median_filter(shared_array, i, **kwargs):
    shared_array[i] = signal.medfilt(shared_array[i], kwargs['kernel_size'])
Example #40
0
NP = dims[0]

an_conv = np.copy(an)
start = int(sys.argv[4])
end = int(sys.argv[5])

B_start = start
B_end = end
NB = B_end - B_start

if (NB):
    B_v = an[start:end + 1] * np.cos(an[-1])
    B_h = an[start:end + 1] * np.sin(an[-1])

for i in range(0, B_start):
    an_conv[i] = flt.medfilt(an[i], 11)
    if (sigma):
        an_conv[i] = filters.gaussian_filter(an_conv[i], sigma)

if (NB):
    for i in range(B_start, B_end + 1):
        if (sigma):
            B_v[i - B_start] = filters.gaussian_filter(B_v[i - B_start], sigma)
            B_h[i - B_start] = filters.gaussian_filter(B_h[i - B_start], sigma)
        an_conv[i] = np.sqrt(B_v[i - B_start]**2.0 + B_h[i - B_start]**2.0)

if (NB):
    an_conv[-1] = np.cos(an_conv[-1])
    an_conv[-1] = filters.gaussian_filter(an_conv[-1], sigma)
    an_conv[-1] = np.clip(an_conv[-1], -0.99, 0.99)
    an_conv[-1] = np.arccos(an_conv[-1])
Example #41
0
    k = (win - 1) // 2
    m = y[i + 1:((i + 1) + k)]
    n = y[(i-k):i]
    if i < k:  # left boundaries
        zero1 = np.zeros(k-i, dtype=int)
        a = np.concatenate((zero1, y[:i+1], m), axis=None)
        q[i] = np.median(a)
    elif (i+((win-1)//2)) > (len(x)-1):  # right boundaries
        p = k-((len(x)-1)-i)
        zero2 = np.zeros(p, dtype=int)
        b = np.concatenate((n, y[i:], zero2), axis=None)
        q[i] = np.median(b)
    else:  # middle data
        c = y[i-((win-1)//2):(i+((win+1)//2))]
        q[i] = np.median(c)

plt.plot(x, q, linewidth=1, label='mujmedian', color='green')

# inbuilt np.medfilt() function
z = signal.medfilt(y, win)

plt.plot(x, z, linewidth=0.8, color='orangered', label='medfilt')
plt.legend()

plt.subplot(212)
plt.autoscale(enable=True, axis='x', tight=bool)
plt.plot(x, z-q)
plt.title('Error', loc='center')

plt.show()
Example #42
0
def clean_scan_using_variability(dynamical_spectrum, length, bandwidth,
                                 good_mask=None, freqsplat=None,
                                 noise_threshold=5., debug=True, nofilt=False,
                                 outfile="out", label="",
                                 smoothing_window=0.05,
                                 debug_file_format='pdf',
                                 info_string="Empty info string"):
    """Clean a spectroscopic scan using the difference of channel variability.

    From the dynamical spectrum, i.e. the list of spectra obtained in each
    sample of a scan, we calculate the rms variability of each frequency
    channel. This forms a sort of rms spectrum. We calculate the baseline of
    this spectrum, and all channels whose rms is above above noise_threshold
    times the reference median absolute deviation
    (:func:`srttools.fit.ref_mad`), calculated
    with a minimum window of 20 samples, are cut and assigned an interpolated
    value between the closest valid points.
    The baseline is calculated with
    :func:`srttools.fit.baseline_als`, using a lambda value depending on
    the number of channels, with a formula that has been shown to work in a few
    standard cases but might be modified in the future.

    Parameters
    ----------
    dynamical_spectrum : 2-d array
        Array of shape MxN, with M spectra of N elements each.
    length : float
        Duration in seconds of the scan (assumed to have constant sample time)
    bandwidth : float
        Bandwidth in MHz

    Other parameters
    ----------------
    good_mask : boolean array
        this mask specifies channels that should never be discarded as
        RFI, for example because they contain spectral lines
    freqsplat : str
        List of frequencies to be merged into one. See
        :func:`srttools.scan.interpret_frequency_range`
    noise_threshold : float
        The threshold, in sigmas, over which a given channel is
        considered noisy
    debug : bool
        Print out debugging information
    nofilt : bool
        Do not filter noisy channels (set noise_threshold to 1e32)
    outfile : str
        Root file name for the diagnostics plots (outfile_label.png)
    label : str
        Label to append to the filename (outfile_label.png)
    smoothing_window : float
        Width of smoothing window, in fraction of spectral length

    Returns
    -------
    results : object
        The attributes of this object are:

        lc : array-like
            The cleaned light curve
        freqmin : float
            Minimum frequency in MHz, referred to local oscillator
        freqmax : float
            Maximum frequency in MHz, referred to local oscillator

    See Also
    --------
    srttools.fit.baseline_als
    srttools.fit.ref_mad
    """
    try:
        bandwidth_unit = bandwidth.unit
        bandwidth = bandwidth.value
    except AttributeError:
        bandwidth_unit = u.MHz

    if len(dynamical_spectrum.shape) == 1:
        if not debug or not HAS_MPL:
            return None

        lc = dynamical_spectrum
        times = length * np.arange(lc.size) / lc.size

        # Now, PLOT IT ALL --------------------------------
        # Prepare subplots
        fig = plt.figure("{}_{}".format(outfile, label), figsize=(15, 15))
        plt.plot(times, lc)
        plt.xlabel('Time')
        plt.ylabel('Counts')
        plt.gca().text(0.05, 0.95, info_string, horizontalalignment='left',
                       verticalalignment='top',
                       transform=plt.gca().transAxes, fontsize=20)
        plt.savefig("{}_{}.{}".format(outfile, label, debug_file_format))
        plt.close(fig)
        return None

    dynspec_len, nbin = dynamical_spectrum.shape

    # Calculate first light curve

    times = length * np.arange(dynspec_len) / dynspec_len
    lc = np.sum(dynamical_spectrum, axis=1)
    if len(lc) > 10:
        lc = baseline_als(times, lc)
    else:
        lc -= np.median(lc)
    lcbins = np.arange(len(lc))

    # Calculate spectral variability curve

    meanspec = np.sum(dynamical_spectrum, axis=0) / dynspec_len
    spectral_var = \
        np.sqrt(np.sum((dynamical_spectrum - meanspec) ** 2,
                       axis=0) / dynspec_len) / meanspec

    df = bandwidth / len(meanspec)
    allbins = np.arange(len(meanspec)) * df

    # Mask frequencies -- avoid those excluded from splat

    freqmask = np.ones(len(meanspec), dtype=bool)
    freqmin, freqmax, binmin, binmax = \
        interpret_frequency_range(freqsplat, bandwidth, nbin)
    freqmask[0:binmin] = False
    freqmask[binmax:] = False

    # Calculate the variability image

    varimg = np.sqrt((dynamical_spectrum - meanspec) ** 2) / meanspec

    # Set up corrected spectral var

    mod_spectral_var = spectral_var.copy()
    mod_spectral_var[0:binmin] = spectral_var[binmin]
    mod_spectral_var[binmax:] = spectral_var[binmax]

    # Some statistical information on spectral var

    # median_spectral_var = np.median(mod_spectral_var[freqmask])
    stdref = ref_mad(mod_spectral_var[freqmask], 20)

    # Calculate baseline of spectral var ---------------
    # Empyrical formula, with no physical meaning

    smoothing_window_int = int(nbin * smoothing_window) // 2 * 2 + 1
    smoothing_window_int = np.max([smoothing_window_int, 11])
    baseline = medfilt(mod_spectral_var[binmin:binmax],
                       smoothing_window_int)

    baseline = \
        np.concatenate((np.zeros(binmin) + baseline[0],
                        baseline,
                        np.zeros(nbin - binmax) + baseline[-1]
                        ))

    # Set threshold

    if nofilt:
        wholemask = freqmask
    else:
        threshold = baseline + noise_threshold * stdref
        mask = spectral_var < threshold
        threshold = baseline - noise_threshold * stdref
        mask = mask & (spectral_var > threshold)

        wholemask = freqmask & mask

    if good_mask is None:
        good_mask = np.zeros_like(freqmask, dtype=bool)
    wholemask[good_mask] = 1

    # Calculate frequency-masked lc
    lc_masked = np.sum(dynamical_spectrum[:, freqmask], axis=1)
    # lc_masked = baseline_als(times, lc_masked, outlier_purging=False)
    if len(lc_masked) > 10:
        lc_masked = baseline_als(times, lc_masked, outlier_purging=False)
    else:
        lc_masked -= np.median(lc_masked)

    bad_intervals = contiguous_regions(np.logical_not(wholemask))

    # Calculate cleaned dynamical spectrum

    cleaned_dynamical_spectrum = \
        _clean_dyn_spec(dynamical_spectrum, bad_intervals)

    cleaned_meanspec = \
        np.sum(cleaned_dynamical_spectrum,
               axis=0) / len(cleaned_dynamical_spectrum)
    cleaned_varimg = \
        np.sqrt((cleaned_dynamical_spectrum - cleaned_meanspec) ** 2 /
                cleaned_meanspec ** 2)
    cleaned_spectral_var = \
        np.sqrt(np.sum((cleaned_dynamical_spectrum - cleaned_meanspec) ** 2,
                       axis=0) / dynspec_len) / cleaned_meanspec

    mean_varimg = np.mean(cleaned_varimg[:, freqmask])
    std_varimg = np.std(cleaned_varimg[:, freqmask])

    lc_corr = np.sum(cleaned_dynamical_spectrum[:, freqmask], axis=1)
    if len(lc_corr) > 10:
        lc_corr = baseline_als(times, lc_corr, outlier_purging=False)
    else:
        lc_corr -= np.median(lc_corr)

    results = type('test', (), {})()  # create empty object
    results.lc = lc_corr
    results.freqmin = freqmin * u.MHz
    results.freqmax = freqmax * u.MHz
    results.mask = wholemask

    if not debug or not HAS_MPL:
        return results

    # Now, PLOT IT ALL --------------------------------
    # Prepare subplots
    fig = plt.figure("{}_{}".format(outfile, label), figsize=(15, 15))

    if len(lc_corr) < 10:
        for i in dynamical_spectrum:
            plt.plot(allbins[1:], i[1:])

        plt.plot(allbins[1:], meanspec[1:])
        plt.xlabel('Time')
        plt.ylabel('Counts')
        ax = plt.gca()
        ax.text(0.05, 0.95, info_string, horizontalalignment='left',
                verticalalignment='top',
                transform=ax.transAxes, fontsize=20)
        plt.savefig("{}_{}.{}".format(outfile, label, debug_file_format))
        plt.close(fig)
        return results

    gs = GridSpec(4, 3, hspace=0, wspace=0,
                  height_ratios=(1.5, 1.5, 1.5, 1.5),
                  width_ratios=(3, 0., 1.2))
    ax_meanspec = plt.subplot(gs[0, 0])
    ax_dynspec = plt.subplot(gs[1, 0], sharex=ax_meanspec)
    ax_cleanspec = plt.subplot(gs[2, 0], sharex=ax_meanspec)
    ax_lc = plt.subplot(gs[1, 2], sharey=ax_dynspec)
    ax_cleanlc = plt.subplot(gs[2, 2], sharey=ax_dynspec, sharex=ax_lc)
    ax_var = plt.subplot(gs[3, 0], sharex=ax_meanspec)
    ax_text = plt.subplot(gs[0, 2])

    ax_meanspec.set_ylabel('Counts')
    ax_dynspec.set_ylabel('Sample')
    ax_cleanspec.set_ylabel('Sample')
    ax_var.set_ylabel('r.m.s.')
    ax_var.set_xlabel('Frequency from LO ({})'.format(bandwidth_unit))
    ax_cleanlc.set_xlabel('Counts')

    # Plot mean spectrum

    ax_meanspec.plot(allbins[1:], meanspec[1:], label="Unfiltered")
    # ax_meanspec.plot(allbins[1:], meanspec[1:], label="Whitelist applied")
    ax_meanspec.plot(allbins[wholemask], meanspec[wholemask],
                     label="Final mask")
    ax_meanspec.set_ylim([np.min(cleaned_meanspec),
                          np.max(cleaned_meanspec)])

    try:
        cmap = plt.get_cmap("magma")
    except Exception:
        cmap = plt.get_cmap("gnuplot2")
    ax_dynspec.imshow(varimg, origin="lower", aspect='auto',
                      cmap=cmap,
                      vmin=mean_varimg - 5 * std_varimg,
                      vmax=mean_varimg + 5 * std_varimg,
                      extent=(0, bandwidth,
                              0, varimg.shape[0]), interpolation='none')

    ax_cleanspec.imshow(cleaned_varimg, origin="lower", aspect='auto',
                        cmap=cmap,
                        vmin=mean_varimg - 5 * std_varimg,
                        vmax=mean_varimg + 5 * std_varimg,
                        extent=(0, bandwidth,
                                0, varimg.shape[0]), interpolation='none')

    # Plot variability

    ax_var.plot(allbins[1:], spectral_var[1:], label="Spectral rms")
    ax_var.plot(allbins[mask], spectral_var[mask])
    ax_var.plot(allbins, cleaned_spectral_var,
                zorder=10, color="k")
    ax_var.plot(allbins[1:], baseline[1:])
    ax_var.plot(allbins[1:],
                baseline[1:] + noise_threshold * stdref, color='r', lw=2)
    ax_var.plot(allbins[1:],
                baseline[1:] - noise_threshold * stdref, color='r', lw=2)
    minb = np.min(baseline[1:]) - 2 * noise_threshold * stdref
    maxb = np.max(baseline[1:]) + 2 * noise_threshold * stdref
    ax_var.set_ylim([minb, maxb])

    # Plot light curves

    ax_lc.plot(lc, lcbins, color="grey")
    ax_lc.plot(lc_masked, lcbins, color="b")
    ax_cleanlc.plot(lc_masked, lcbins, color="grey")
    ax_cleanlc.plot(lc_corr, lcbins, color="k")
    dlc = max(lc_corr) - min(lc_corr)
    ax_lc.set_xlim([np.min(lc_corr) - dlc / 10, max(lc_corr) + dlc / 10])

    # Indicate bad intervals

    for b in bad_intervals:
        maxsp = np.max(meanspec)
        ax_meanspec.plot(b * df, [maxsp] * 2, color='k', lw=2)
        middleimg = [varimg.shape[0] / 2]
        ax_dynspec.plot(b * df, [middleimg] * 2, color='k', lw=2)
        maxsp = np.max(spectral_var)
        ax_var.plot(b * df, [maxsp] * 2, color='k', lw=2)

    # Indicate freqmin and freqmax
    ax_var.set_xlim([0, allbins[-1]])

    ax_dynspec.axvline(freqmin)
    ax_dynspec.axvline(freqmax)
    ax_cleanspec.axvline(freqmin)
    ax_cleanspec.axvline(freqmax)
    ax_var.axvline(freqmin)
    ax_var.axvline(freqmax)
    ax_meanspec.axvline(freqmin)
    ax_meanspec.axvline(freqmax)

    ax_text.text(0.05, 0.95, info_string, horizontalalignment='left',
                 verticalalignment='top',
                 transform=ax_text.transAxes, fontsize=20)
    ax_text.axis("off")

    fig.tight_layout()

    plt.savefig(
        "{}_{}.{}".format(outfile, label, debug_file_format))
    plt.close(fig)
    return results
Example #43
0
def median_filter(signal, size=5):

    med_signal = medfilt(signal.flatten(), size)
    return med_signal
#------APERTURA DE LA SEÑAL-------
mat_struct = sio.loadmat('/home/luciasucunza/git_proyecto_ecg/Filtros/TP4_ecg.mat')

ecg_one_lead  = mat_struct['ecg_lead']
ecg_one_lead  = ecg_one_lead.flatten(1)
cant_muestras = len(ecg_one_lead)

fs       = 1000 
nyq_frec = fs / 2
t        = np.arange(cant_muestras) / fs

#------OBTENCION DE LA BASELINE POR MEDIANA MOVIL-------
to_todo = time.time()

baseline = sig.medfilt (ecg_one_lead, kernel_size = int (np.around(fs*0.5*0.2)    *2 +1  ))
baseline = sig.medfilt (baseline, kernel_size = int (np.around(fs*0.5*0.6)    *2 +1  )) 
baseline = sig.medfilt (baseline, kernel_size = int (np.around(fs*0.5*(1/50)) *2 +1  ))

tf_todo = time.time()

#------Parametros de Diezmado-------
to_500 = time.time() 

f_B         = 7

fs_old_1    = fs
fs_new_1    = 500
nyq_frec_1  = fs_old_1/2

fs_old_3    = fs_new_1
Example #45
0
def find_slits_corners_aps_1id(
    img,
    method='quadrant+',
    medfilt2_kernel_size=3,
    medfilt_kernel_size=23,
):
    """
    Automatically locate the slit box location by its four corners.

    NOTE:
    The four slits that form a binding box is the current setup at aps_1id,
    which reduce the illuminated region on the detector. Since the slits are
    stationary, they can serve as a reference to check detector drifting
    during the scan. Technically, the four slits should be used to find
    the transformation matrix (not necessarily affine) to correct the image.
    However, since we are dealing with 2D images with very little distortion,
    affine transformation matrices were used for approximation. Therefore
    the "four corners" are used instead of all four slits.

    Parameters
    ----------
    img : np.ndarray
        2D images
    method : str,  ['simple', 'quadrant', 'quadrant+'], optional
        method for auto detecting slit corners
            - simple    :: assume a rectange slit box, fast but less accurate
                           (1 pixel precision)
            - quadrant  :: subdivide the image into four quandrant, then use
                           an explicit method to find the corner
                           (1 pixel precision)
            - quadrant+ :: similar to quadrant, but use curve_fit (gauss1d) to
                           find the corner
                           (0.1 pixel precision)
    medfilt2_kernel_size : int, optional
        2D median filter kernel size for noise reduction
    medfilt_kernel_size : int, optional
        1D median filter kernel size for noise reduction

    Returns
    -------
    tuple
        autodetected slit corners (counter-clockwise order)
        (upperLeft, lowerLeft, lowerRight, upperRight)
    """
    img = medfilt2d(
        np.log(img.astype(np.float64)),
        kernel_size=medfilt2_kernel_size,
    )
    rows, cols = img.shape

    # simple method is simple, therefore it stands out
    if method.lower() == 'simple':
        # assuming a rectangle type slit box
        col_std = medfilt(np.std(img, axis=0), kernel_size=medfilt_kernel_size)
        row_std = medfilt(np.std(img, axis=1), kernel_size=medfilt_kernel_size)
        # NOTE: in the tiff img
        #  x is col index, y is the row index  ==> key point here !!!
        #  img slicing is doen with img[row_idx, col_idx]
        #  ==> so the image idx and corner position are FLIPPED!
        _left = np.argmax(np.gradient(col_std))
        _right = np.argmin(np.gradient(col_std))
        _top = np.argmax(np.gradient(row_std))
        _bottom = np.argmin(np.gradient(row_std))

        cnrs = np.array([
            [_left, _top],
            [_left, _bottom],
            [_right, _bottom],
            [_right, _top],
        ])
    else:
        # predefine all quadrants
        # Here let's assume that the four corners of the slit box are in the
        # four quadrant defined by the center of the image
        # i.e.
        #  uppper left quadrant: img[0     :cnt[1], 0     :cnt[0]]  => quadarnt origin =  (0,           0)
        #  lower  left quadrant: img[cnt[1]:      , 0     :cnt[0]]  => quadarnt origin =  (cnt[0],      0)
        #  lower right quadrant: img[cnt[1]:      , cnt[0]:      ]  => quadarnt origin =  (cnt[0], cnt[1])
        # upper right quadrant: img[0     :cnt[1], cnt[0]:      ]  => quadarnt
        # origin =  (0,      cnt[1])
        # center of image that defines FOUR quadrants
        cnt = [int(cols / 2), int(rows / 2)]
        Quadrant = namedtuple('Quadrant', 'img col_func, row_func')
        quadrants = [
            Quadrant(img=img[0:cnt[1], 0:cnt[0]],
                     col_func=np.argmax,
                     row_func=np.argmax),  # upper left,  1st quadrant
            # lower left,  2nd quadrant
            Quadrant(img=img[cnt[1]:, 0:cnt[0]],
                     col_func=np.argmax,
                     row_func=np.argmin),
            # lower right, 3rd quadrant
            Quadrant(img=img[cnt[1]:, cnt[0]:],
                     col_func=np.argmin,
                     row_func=np.argmin),
            # upper right, 4th quadrant
            Quadrant(img=img[0:cnt[0], cnt[1]:],
                     col_func=np.argmin,
                     row_func=np.argmax),
        ]
        # the origin in each quadrants ==> easier to set it here
        quadrantorigins = np.array([
            [0, 0],  # upper left,  1st quadrant
            [0, cnt[1]],  # lower left,  2nd quadrant
            # lower right, 3rd quadrant
            [cnt[0], cnt[1]],
            [cnt[1], 0],  # upper right, 4th quadrant
        ])
        # init four corners
        cnrs = np.zeros((4, 2))
        if method.lower() == 'quadrant':
            # the standard quadrant method
            for i, q in enumerate(quadrants):
                cnrs[i, :] = np.array([
                    q.col_func(
                        np.gradient(
                            medfilt(np.std(q.img, axis=0),
                                    kernel_size=medfilt_kernel_size))
                    ),  # x is col_idx
                    q.row_func(
                        np.gradient(
                            medfilt(np.std(q.img, axis=1),
                                    kernel_size=medfilt_kernel_size))),
                    # y is row_idx
                ])
            # add the origin offset back
            cnrs = cnrs + quadrantorigins
        elif method.lower() == 'quadrant+':
            # use Gaussian curve fitting to achive subpixel precision
            # TODO:
            # improve the curve fitting with Lorentz and Voigt fitting function
            for i, q in enumerate(quadrants):
                # -- find x subpixel position
                cnr_x_guess = q.col_func(
                    np.gradient(
                        medfilt(np.std(q.img, axis=0),
                                kernel_size=medfilt_kernel_size)))
                # isolate the strongest peak to fit
                tmpx = np.arange(cnr_x_guess - 10, cnr_x_guess + 11)
                tmpy = np.gradient(np.std(q.img, axis=0))[tmpx]
                # tmpy[0] is the value from the highest/lowest pixle
                # tmpx[0] is basically cnr_x_guess
                # 5.0 is the guessted std,
                coeff, _ = curve_fit(
                    gauss1d,
                    tmpx,
                    tmpy,
                    p0=[tmpy[0], tmpx[0], 5.0],
                    maxfev=int(1e6),
                )
                cnrs[i, 0] = coeff[1]  # x position
                # -- find y subpixel positoin
                cnr_y_guess = q.row_func(
                    np.gradient(
                        medfilt(np.std(q.img, axis=1),
                                kernel_size=medfilt_kernel_size)))
                # isolate the peak (x, y here is only associated with the peak)
                tmpx = np.arange(cnr_y_guess - 10, cnr_y_guess + 11)
                tmpy = np.gradient(np.std(q.img, axis=1))[tmpx]
                coeff, _ = curve_fit(
                    gauss1d,
                    tmpx,
                    tmpy,
                    p0=[tmpy[0], tmpx[0], 5.0],
                    maxfev=int(1e6),
                )
                cnrs[i, 1] = coeff[1]  # y posiiton
            # add the quadrant shift back
            cnrs = cnrs + quadrantorigins

        else:
            raise NotImplementedError(
                "Available methods are: simple, quadrant, quadrant+")

    # return the slit corner detected
    return cnrs
Example #46
0
def synthesize(
        fs,
        f0s,
        SPEC,
        NM=None,
        wavlen=None,
        ener_multT0=False,
        nm_cont=False  # If False, force binary state of the noise mask (by thresholding at 0.5)
    ,
        nm_lowpasswinlen=9,
        hp_f0coef=0.5  # factor of f0 for the cut-off of the high-pass filter (def. 0.5*f0)
    ,
        antipreechohwindur=0.001  # [s] Use to damp the signal at the beginning of the signal AND at the end of it
    # Following options are for post-processing the features, after the generation/transformation and thus before waveform synthesis
    ,
        pp_f0_rmsteps=False  # Removes steps in the f0 curve
    # (see sigproc.resampling.f0s_rmsteps(.) )
    ,
        pp_f0_smooth=None  # Smooth the f0 curve using median and FIR filters of given window duration [s]
    ,
        pp_atten1stharminsilences=None  # Typical value is -25
    ,
        verbose=1):

    winnbper = 4  # Number of periods in a synthesis windows. It still contains only one single pulse, but leaves space for the VTF to decay without being cut abruptly.

    # Copy the inputs to avoid modifying them
    f0s = f0s.copy()
    SPEC = SPEC.copy()
    if not NM is None: NM = NM.copy()
    else: NM = np.zeros(SPEC.shape)

    # Check the size of the inputs
    if f0s.shape[0] != SPEC.shape[0]:
        raise ValueError(
            'F0 size {} and spectrogram size {} do not match'.format(
                f0s.shape[0], SPEC.shape[0]))  # pragma: no cover
    if not NM is None:
        if SPEC.shape != NM.shape:
            raise ValueError(
                'spectrogram size {} and NM size {} do not match.'.format(
                    SPEC.shape, NM.shape))  # pragma: no cover

    if wavlen == None: wavlen = int(np.round(f0s[-1, 0] * fs))
    dftlen = (SPEC.shape[1] - 1) * 2
    shift = np.median(np.diff(f0s[:, 0]))
    if verbose > 0:
        print(
            'PML Synthesis (dur={}s, fs={}Hz, f0 in [{:.0f},{:.0f}]Hz, shift={}s, dftlen={})'
            .format(wavlen / float(fs), fs, np.min(f0s[:, 1]),
                    np.max(f0s[:, 1]), shift, dftlen))

    # Prepare the features

    # Enforce continuous f0
    f0s[:, 1] = np.interp(f0s[:, 0], f0s[f0s[:, 1] > 0, 0], f0s[f0s[:, 1] > 0,
                                                                1])
    # If asked, removes steps in the f0 curve
    if pp_f0_rmsteps:
        f0s = sp.f0s_rmsteps(f0s)
    # If asked, smooth the f0 curve using median and FIR filters
    if not pp_f0_smooth is None:
        print('    Smoothing f0 curve using {}[s] window'.format(pp_f0_smooth))
        import scipy.signal as sig
        lf0 = np.log(f0s[:, 1])
        bcoefslen = int(0.5 * pp_f0_smooth / shift) * 2 + 1
        lf0 = sig.medfilt(lf0, bcoefslen)
        bcoefs = np.hamming(bcoefslen)
        bcoefs = bcoefs / sum(bcoefs)
        lf0 = sig.filtfilt(bcoefs, [1], lf0)
        f0s[:, 1] = np.exp(lf0)

    winlenmax = getwinlen(np.min(f0s[:, 1]), fs, winnbper)
    if winlenmax > dftlen:
        warnings.warn(
            '\n\nWARNING: The maximum window length ({}) is bigger than the DFT length ({}). Please, increase the DFT length of your spectral features (the second dimension) or check if the f0 curve has extremly low values and try to clip them to higher values (at least higher than 50Hz). The f0 curve has been clipped to {}Hz.\n\n'
            .format(winlenmax, dftlen,
                    winnbper * fs / float(dftlen)))  # pragma: no cover
        f0s[:, 1] = np.clip(f0s[:, 1], winnbper * fs / float(dftlen - 2), 1e6)

    if not NM is None:
        # Remove noise below f0, as it is supposed to be already the case
        for n in range(NM.shape[0]):
            NM[n, :int((float(dftlen) / fs) * 2 * f0s[n, 1])] = 0.0

    if not nm_cont:
        print('    Forcing binary noise mask')
        NM[NM <= 0.5] = 0.0  # To be sure that voiced segments are not hoarse
        NM[NM > 0.5] = 1.0  # To be sure the noise segments are fully noisy

    # Generate the pulse positions [1](2) (i.e. the synthesis instants, the GCIs in voiced segments)
    ts = [0.0]
    while ts[-1] < float(wavlen) / fs:
        cf0 = np.interp(ts[-1], f0s[:, 0], f0s[:, 1])
        if cf0 < 50.0: cf0 = 50
        ts.append(ts[-1] + (1.0 / cf0))
    ts = np.array(ts)
    f0s = np.vstack((ts, np.interp(ts, f0s[:, 0], f0s[:, 1]))).T

    # Resample the features to the pulse positions

    # Spectral envelope uses the nearest, to avoid over-smoothing
    SPECR = np.zeros((f0s.shape[0], dftlen / 2 + 1))
    for n, t in enumerate(f0s[:, 0]):  # Nearest: Way better for plosives
        idx = int(np.round(t / shift))
        idx = np.clip(idx, 0, SPEC.shape[0] - 1)
        SPECR[n, :] = SPEC[idx, :]

    # Keep trace of the median energy [dB] over the whole signal
    ener = np.mean(SPECR, axis=1)
    idxacs = np.where(sp.mag2db(ener) > sp.mag2db(np.max(ener)) -
                      30)[0]  # Get approx active frames # TODO Param
    enermed = sp.mag2db(np.median(ener[idxacs]))  # Median energy [dB]
    ener = sp.mag2db(ener)

    # Resample the noise feature to the pulse positions
    # Smooth the frequency response of the mask in order to avoid Gibbs
    # (poor Gibbs nobody want to see him)
    nm_lowpasswin = np.hanning(nm_lowpasswinlen)
    nm_lowpasswin /= np.sum(nm_lowpasswin)
    NMR = np.zeros((f0s.shape[0], dftlen / 2 + 1))
    for n, t in enumerate(f0s[:, 0]):
        idx = int(np.round(t / shift))  # Nearest is better for plosives
        idx = np.clip(idx, 0, NM.shape[0] - 1)
        NMR[n, :] = NM[idx, :]
        if nm_lowpasswinlen > 1:
            NMR[n, :] = scipy.signal.filtfilt(nm_lowpasswin, [1.0], NMR[n, :])

    NMR = np.clip(NMR, 0.0, 1.0)

    # The complete waveform that we will fill with the pulses
    wav = np.zeros(wavlen)
    # Half window on the left of the synthesized segment to avoid pre-echo
    dampinhwin = np.hanning(
        1 +
        2 * int(np.round(antipreechohwindur * fs)))  # 1ms forced dampingwindow
    dampinhwin = dampinhwin[:(len(dampinhwin) - 1) / 2 + 1]

    for n, t in enumerate(f0s[:, 0]):
        f0 = f0s[n, 1]

        if verbose > 1:
            print "\rPM Synthesis (python) t={:4.3f}s f0={:3.3f}Hz               ".format(
                t, f0),

        # Window's length
        # TODO It should be ensured that the beggining and end of the
        #      noise is within the window. Nothing is doing this currently!
        winlen = getwinlen(f0, fs, winnbper)
        # TODO We also assume that the VTF's decay is shorter
        #      than winnbper-1 periods (dangerous with high pitched and tense voice).
        if winlen > dftlen:
            raise ValueError(
                'The window length ({}) is bigger than the DFT length ({}). Please, increase the dftlen of your spectral features or check if the f0 curve has extremly low values and try to clip them to higher values (at least higher than 50[Hz])'
                .format(winlen, dftlen))  # pragma: no cover

        # Set the rough position of the pulse in the window (the closest sample)
        # We keep a third of the window (1 period) on the left because the
        # pulse signal is minimum phase. And 2/3rd (remaining 2 periods)
        # on the right to let the VTF decay.
        pulseposinwin = int((1.0 / winnbper) * winlen)

        # The sample indices of the current pulse wrt. the final waveform
        winidx = int(round(fs * t)) + np.arange(winlen) - pulseposinwin

        # Build the pulse spectrum

        # Let start with a Dirac
        S = np.ones(dftlen / 2 + 1, dtype=np.complex64)

        # Add the delay to place the Dirac at the "GCI": exp(-j*2*pi*t_i)
        delay = -pulseposinwin - fs * (t - int(round(fs * t)) / float(fs))
        S *= np.exp((delay * 2j * np.pi / dftlen) * np.arange(dftlen / 2 + 1))

        # Add the spectral envelope
        # Both amplitude and phase
        E = SPECR[n, :]  # Take the amplitude from the given one
        if hp_f0coef != None:
            # High-pass it to avoid any residual DC component.
            fcut = hp_f0coef * f0
            if not pp_atten1stharminsilences is None and ener[
                    n] - enermed < pp_atten1stharminsilences:
                fcut = 1.5 * f0  # Try to cut between first and second harm
            HP = sp.butter2hspec(fcut, 4, fs, dftlen, high=True)
            E *= HP
            # Not necessarily good as it is non-causal, so make it causal...
            # ... together with the VTF response below.
        # Build the phase of the envelope from the amplitude
        E = sp.hspec2minphasehspec(E, replacezero=True)  # We spend 2 FFT here!
        S *= E  # Add it to the current pulse

        # Add energy correction wrt f0.
        # STRAIGHT and AHOCODER vocoders do it.
        # (why ? to equalize the energy when changing the pulse's duration ?)
        if ener_multT0:
            S *= np.sqrt(fs / f0)

        # Generate the segment of Gaussian noise
        # Use mid-points before/after pulse position
        if n > 0: leftbnd = int(np.round(fs * 0.5 * (f0s[n - 1, 0] + t)))
        else: leftbnd = int(np.round(fs * (t - 0.5 / f0s[n, 1])))  # int(0)
        if n < f0s.shape[0] - 1:
            rightbnd = int(np.round(fs * 0.5 * (t + f0s[n + 1, 0]))) - 1
        else:
            rightbnd = int(np.round(
                fs * (t + 0.5 / f0s[n, 1])))  #rightbnd=int(wavlen-1)
        gausswinlen = rightbnd - leftbnd  # The length of the noise segment
        gaussnoise4win = np.random.normal(size=(gausswinlen))  # The noise

        GN = np.fft.rfft(gaussnoise4win,
                         dftlen)  # Move the noise to freq domain
        # Normalize it by its energy (@Yannis, That's your answer at SSW9!)
        GN /= np.sqrt(np.mean(np.abs(GN)**2))
        # Place the noise within the pulse's window
        delay = (pulseposinwin - (leftbnd - winidx[0]))
        GN *= np.exp((delay * 2j * np.pi / dftlen) * np.arange(dftlen / 2 + 1))

        # Add it to the pulse spectrum, under the condition of the mask
        S *= GN**NMR[n, :]

        # That's it! the pulse spectrum is ready!

        # Move it to time domain
        deter = np.fft.irfft(S)[0:winlen]

        # Add half window on the left of the synthesized segment
        # to avoid any possible pre-echo
        deter[:leftbnd - winidx[0] - len(dampinhwin)] = 0.0
        deter[leftbnd - winidx[0] - len(dampinhwin):leftbnd -
              winidx[0]] *= dampinhwin

        # Add half window on the right
        # to avoid cutting the VTF response abruptly
        deter[-len(dampinhwin):] *= dampinhwin[::-1]

        # Write the synthesized segment in the final waveform
        if winidx[0] < 0 or winidx[-1] >= wavlen:
            # The window is partly outside of the waveform ...
            # ... thus copy only the existing part
            itouse = np.logical_and(winidx >= 0, winidx < wavlen)
            wav[winidx[itouse]] += deter[itouse]
        else:
            wav[winidx] += deter

    if verbose > 1:
        print '\r                                                               \r',

    if verbose > 2:  # pragma: no cover
        import matplotlib.pyplot as plt
        plt.ion()
        _, axs = plt.subplots(3, 1, sharex=True, sharey=False)
        times = np.arange(len(wav)) / float(fs)
        axs[0].plot(times, wav, 'k')
        axs[0].set_ylabel('Waveform\nAmplitude')
        axs[0].grid()
        axs[1].plot(f0s[:, 0], f0s[:, 1], 'k')
        axs[1].set_ylabel('F0\nFrequency [Hz]')
        axs[1].grid()
        axs[2].imshow(sp.mag2db(SPEC).T,
                      origin='lower',
                      aspect='auto',
                      interpolation='none',
                      extent=(f0s[0, 0], f0s[-1, 0], 0, 0.5 * fs))
        axs[2].set_ylabel('Amp. Envelope\nFrequency [Hz]')

        from IPython.core.debugger import Pdb
        Pdb().set_trace()

    return wav
    def process_image(self, index=None):
        if index is None:
            index = self.ui.file_listview.selectedIndexes()
        try:
            logger.info("Processing image {0}, {1}".format(index.row(), index.data()))
        except AttributeError:
            il = self.ui.file_listview.selectedIndexes()
            if len(il) > 0:
                index = self.ui.file_listview.selectedIndexes()[0]
            else:
                self.ui.image_size_label.setText("ERROR")
                self.ui.moment_1_label.setText("-.--")
                self.ui.moment_2_label.setText("-.--")
                return
        try:
            # Load selected image
            pic = np.load("{0}/data/{1}".format(QtCore.QDir.currentPath(), index.data()))
        except ValueError:
            self.ui.image_size_label.setText("ERROR")
            self.ui.moment_1_label.setText("-.--")
            self.ui.moment_2_label.setText("-.--")
            return
        self.ui.image_size_label.setText("{0} x {1}".format(pic.shape[1], pic.shape[0]))
        # logger.debug("Image size: {0} x {1}".format(pic.shape[0], pic.shape[1]))

        # Large ROI around spot, the spot should always be inside
        roi_t = self.ui.roi_top_spinbox.value()
        roi_h = self.ui.roi_height_spinbox.value()
        roi_l = self.ui.roi_left_spinbox.value()
        roi_w = self.ui.roi_width_spinbox.value()

        kernel = self.ui.medfilt_spinbox.value()
        if kernel % 2 == 0:
            kernel += 1

        pic_roi = np.double(pic[roi_t:roi_t + roi_h, roi_l:roi_l + roi_w])
        pic_proc = medfilt2d(np.double(pic[roi_t:roi_t+roi_h, roi_l:roi_l+roi_w]), kernel)
        # Background level from first 20 columns, one level for each row (the background structure is banded):
        bkg_level = pic_proc[:, 0:20].mean(1)
        self.ui.bkg_label.setText("{0:.1f}".format(bkg_level.mean()))
        pic_proc = np.maximum(0, pic_proc - 1.1 * bkg_level[:, np.newaxis])

        dx = 13e-6
        dx = 1
        x = dx * np.arange(pic_proc.shape[1])
        x0 = ((pic_proc * x[np.newaxis, :]).sum(1) / pic_proc.sum(1))
        x1 = (pic_proc * (x[np.newaxis, :] - x0[:, np.newaxis]) ** 2).sum(1) / pic_proc.sum(1)
        x0_good = x0[~np.isnan(x0)]
        x1_good = x1[~np.isnan(x1)]
        # xc = np.median(x0_good)
        # xs = np.median(x1_good)

        # Assume spot is in the central +-50 vertical pixels of the ROI. Get the index for the maximum value
        xc = pic_proc[pic_proc.shape[0]//2 - 50:pic_proc.shape[0]//2 + 50, :].sum(0).argmax()
        xs = 10
        logger.debug("x0: {0}, x1: {1}".format(x0_good.shape, x1_good.shape))
        logger.debug("xc: {0}, xs: {1}".format(xc, xs))
        # Cut an new ROI around the central pixel column xc, width xs * 10
        pic_roi2 = pic_roi[:, np.maximum(0, int(xc - xs * 5)):np.minimum(pic_roi.shape[1]-1, int(xc + xs * 5))]
        pic_proc2 = medfilt2d(np.double(pic_roi2), kernel)
        # Background level to cut in the reduced ROI:
        bkg_cut = self.ui.bkg_spinbox.value()
        pic_proc2 = np.maximum(0, pic_proc2 - bkg_cut)
        # Create mask around the signal spot by heavily median filtering in the vertical direction (mask_kern ~ 25)
        mask_kern = self.ui.mask_spinbox.value()
        if mask_kern % 2 == 0:
            mask_kern += 1
        mask = medfilt(np.maximum(0, pic_roi2 - bkg_cut), [mask_kern, kernel])
        pic_proc3 = pic_proc2 * (mask > 0)
        xt = dx * np.arange(pic_proc3.shape[1])
        xt0 = ((pic_proc3 * xt[np.newaxis, :]).sum(1) / pic_proc3.sum(1))
        xt1 = (pic_proc3 * (xt[np.newaxis, :] - xt0[:, np.newaxis]) ** 2).sum(1) / pic_proc3.sum(1)
        xt0_good = xt0[~np.isnan(xt0)]
        xt1_good = xt1[~np.isnan(xt1)]
        ind = ~np.isnan(xt1)
        charge = pic_proc3.sum()
        xt1_w = (xt1[ind] * pic_proc3[ind, :].sum(1)).sum() / charge
        self.ui.moment_1_label.setText("{0:.1f} pixels".format(xt1_w))
        self.ui.moment_2_label.setText("{0:.1f} pixels".format(xt1_good.mean()))
        self.ui.charge_label.setText("{0}".format(charge))
        if self.ui.processed_radiobutton.isChecked():
            self.ui.image_widget.setImage(pic_proc.transpose(), autoLevels=True)
        elif self.ui.tight_radiobutton.isChecked():
            try:
                self.ui.image_widget.setImage(pic_proc3.transpose(), autoLevels=True)
            except ValueError:
                logger.error("pic_proc3 shape {0}".format(pic_proc3.shape))
                self.ui.image_widget.setImage(pic_roi2.transpose(), autoLevels=True)
        else:
            self.ui.image_widget.setImage(pic_roi.transpose(), autoLevels=False)
Example #48
0
def main(parser):

    # print(bcolors.HEADER + "*** Conduction Velocity Analyzer ***" + bcolors.ENDC) # on windows cmd colors don't work
    print('\n***************************************')
    print('*** Conduction Velocity Analyzer ******')
    print('\n***************************************')

    # parse arguments
    args = parser.parse_args()

    txt_path = args.input_filepath[0]

    # extract filename and directory path
    filename = os.path.basename(txt_path)
    folderpath = os.path.dirname(txt_path)

    # extract experiment parameters from filename and print info to console
    burst, param = extract_info(filename, _print=True)

    # create excel file
    wb, sheet, out_path, n_col_sheet_block = prepare_excel_wb(
        folderpath, filename, burst, param)

    # read OM tracks values
    values = np.loadtxt(txt_path, dtype=np.float, usecols=(0, 1, 2))
    param[pKeys.exp_duration] = values.shape[0] / param[pKeys.frame_rate]
    print("- Duration of record : {} ms".format(param[pKeys.exp_duration]))

    # split timing, roi1 (apex) and roi2 (base) values
    full_ms, full_apex, full_base = values[:, 0], values[:, 1], values[:, 2]
    ''' ===== [ START ANALISIS ] ===== - for each burst '''
    for (i_burst, b) in enumerate(burst):

        print('\n\n********* Analyzing Burst at {}Hz *********'.format(
            b[bKeys.freq_stim]))

        # extract ramp
        ms = full_ms[b[bKeys.start_ms]:b[bKeys.stop_ms]]
        apex = full_apex[b[bKeys.start_ms]:b[bKeys.stop_ms]]
        base = full_base[b[bKeys.start_ms]:b[bKeys.stop_ms]]
        # plot_signals(apex, base, title="Ramp at {}Hz".format(b[keys.freq_stim]))

        # detrend signals
        apex_flat = sign.detrend(apex, type='linear')
        base_flat = sign.detrend(base, type='linear')

        # plot original and flattened
        # plot_signals(apex, base, title="Original Tracks")
        # plot_signals(apex_flat, base_flat, title="Detrended Tracks")

        # if selected, apply median filter and plot results
        if param[pKeys.rank] is not 0:
            apex_filt = sign.medfilt(apex_flat, kernel_size=param[pKeys.rank])
            base_filt = sign.medfilt(base_flat, kernel_size=param[pKeys.rank])
            # plot_signals(apex_filt, base_filt, title="Filtered Tracks (rank = {})".format(param[keys.rank]))

        # ENSURE TO USE THE RIGHT SIGNAL (filtered or only flattened)
        (apex_sgn,
         base_sgn) = (apex_filt,
                      base_filt) if param[pKeys.rank] is not 0 else (apex_flat,
                                                                     base_flat)
        ''' ===== [ FIND PEAKS, PERIODS and SELECT AP TO ANALYZE ] ===== '''

        # find peaks - restituisce (x, y) dei picchi
        # -> io passo il segnale invertito (perchè usa il 'max')
        # impongo che i picchi siano distanti fra loro almeno più di 2/3 del periodo
        # e prendo solo la x (tempo): [0]-> prendi solo la x del picco
        a_peaks = sign.find_peaks(-apex_sgn,
                                  distance=(2 / 3) * b[bKeys.AP_duration])[0]
        b_peaks = sign.find_peaks(-base_sgn,
                                  distance=(2 / 3) * b[bKeys.AP_duration])[0]

        # plotto i segnali con picchi e durate dei periodi
        plot_signals_with_peaks([apex_sgn, base_sgn], [a_peaks, b_peaks],
                                ['Apex', 'Base'],
                                "Ramp at {}Hz".format(b[bKeys.freq_stim]),
                                _enum_peaks=True,
                                _plot_intervals=True,
                                frame_rate=param[pKeys.frame_rate])

        # stima durata di ogni AP da differenza picchi consecutivi nell'apice
        print("- Control of stimulation frequency in the apex signal... ")
        AP_periods = np.diff(a_peaks) / param[pKeys.frame_rate]
        freq_stim_estimated = 1 / np.mean(AP_periods / 1000)
        print("-- Stimulation Frequency obtained: {0:0.1f}Hz".format(
            freq_stim_estimated))

        # user can select which potentials use to estimmate mean conduction velocity
        selection = input(
            "\n***AP selection to estimate mean current velocity.\n"
            "  Insert AP indexex between spaces. Example:\n"
            "  0 2 3 5 7 8 9 [<- then press 'Enter'].\n"
            "  Please, enter your selection here and the press 'Enter': \n")
        # extract selected indexes
        ap_selected_idx = selection.split(' ')
        print('- AP selected for Conduction Velocity Estimation: ')
        for l in ap_selected_idx:
            print(l, end='°, ')
        ''' ===== [ ANALYZE EACH ACTION POTENTIALS TO FIND DELAY APEX-BASE ] ===== '''
        # USE INTERPOLATION AND CROSS-CORRELATION TO ESTIMATE DELAY
        # from selected AP potentials
        # Idea: per ogni picco, prendo l'intorno giusto per selezionare il potenziale d'azione,
        # interpolo, calcolo delay usando il picco della cross-correlazione e poi medio tutti i delay

        cv_list = list()  # list of conduction velocity extracted for each AP

        # for each AP selected
        for (i_spike, spike_num) in enumerate(
                np.asarray(ap_selected_idx).astype(np.int)):

            # calculate extremes of selected action potential signal
            t1, t2 = np.int(b[bKeys.AP_duration] * spike_num), np.int(
                b[bKeys.AP_duration] * (spike_num + 1))
            ms_sel = ms[t1:t2]  # time
            base_sel = base_sgn[t1:t2]  # base
            apex_sel = apex_sgn[t1:t2]  # apex

            # interpolo i due segnali di fattore 0.2 (come su LabView)
            dt = 0.2

            # calcolo funzione di interpolazione delle due tracce
            f_apex = interpolate.interp1d(ms_sel, apex_sel)
            f_base = interpolate.interp1d(ms_sel, base_sel)

            # creo nuovo asse temporale
            ms_res = np.arange(ms_sel[0], ms_sel[-1], dt)

            # resample signals using interpolation functions calculated above
            apex_res = f_apex(ms_res)
            base_res = f_base(ms_res)

            # estimate delay by cross-correlation max values
            delay_ms = lag_finder_in_ms(apex_res, base_res, 1000 / dt)

            # estimate and save conduction velocity
            cv = param[pKeys.ROI_distance_mm] / delay_ms
            cv_list.append(cv)

            # write spike_num and cv
            sheet.write(i_spike + 7, (i_burst * (n_col_sheet_block + 1)) + 1,
                        "{}".format(int(spike_num)))
            sheet.write(i_spike + 7, (i_burst * (n_col_sheet_block + 1)) + 2,
                        "{0:0.3f}".format(cv))

        # estimate mean and std. error
        avg = np.mean(np.asarray(cv_list))
        sem = stats.sem(np.asarray(cv_list))

        # write mean and std. error into excel file
        sheet.write(17, (i_burst * (n_col_sheet_block + 1)) + 2,
                    "{0:0.3f}".format(avg))
        sheet.write(18, (i_burst * (n_col_sheet_block + 1)) + 2,
                    "{0:0.3f}".format(sem))

        # print results and average
        print("*** RESULTS:")
        print("- Conduction velocities in m/s:")
        for cv in cv_list:
            print("-- {0:0.3f} m/s".format(cv))

        print("\n- Average Conduction velocity: ", end='')
        print("-- {0:0.3f} +- {1:0.3f} m/s".format(avg, sem))

    # save excel file
    wb.save(out_path)
    print('\nOutput saved in:')
    print(out_path)
    print(
        ' --------- Conduction Velocity Analyzer: Process finished. ---------\n'
    )
    local_file="env_data/msd/flow-2018.csv",
    url=
    "http://wdl.water.ca.gov/waterdatalibrary/docs/Hydstra/docs/B95820Q/2018/FLOW_15-MINUTE_DATA_DATA.CSV",
    skiprows=3,
    parse_dates=['time'],
    names=['time', 'flow_cfs', 'quality', 'notes'])

ax2.plot(msd_flow.time, msd_flow.flow_cfs, label='MSD Flow (cfs)')
ax.axis(xmin=times.min(), xmax=times.max())

msd_turb = cdec.cdec_dataset('MSD',
                             times.min(),
                             times.max(),
                             sensor=27,
                             cache_dir='env_data')
msd_turb['turb_lp'] = ('time', ), signal.medfilt(msd_turb.sensor0027.values, 5)

msd_tempF = cdec.cdec_dataset('MSD',
                              times.min(),
                              times.max(),
                              sensor=25,
                              cache_dir='env_data')

ax_turb = ax2.twinx()
ax_turb.plot(msd_turb.time,
             msd_turb.sensor0027,
             label='MSD Turb. (NTU)',
             color='orange',
             alpha=0.2,
             lw=0.4)
ax_turb.plot(msd_turb.time,
Example #50
0
                line_count += 1
            else:
                kls.append(row[kli])
                recs.append(row[reci])

                for n, idx in enumerate(zidx):
                    zs[n].append(row[idx])

                line_count += 1

    if kls == []:
        print('{} had no progress, skipping'.format(model_name))
        continue

    kls = medfilt(np.asarray(kls[100:], dtype=np.float32), 19)
    recs = medfilt(np.asarray(recs[100:], dtype=np.float32), 19)

    plt.figure(figsize=(10, 10))
    plt.subplot(221)

    data = load_uniform_pendulum()

    v = VAE(load_from=model_name, network=network)

    mus, logvars = v.encode(data[:4000])[:2]
    sigmas = np.mean(np.exp(0.5 * logvars), axis=0)

    bars(d, sigmas, plt_shape, type='sigma', title=model_name)
    plt.legend()
Example #51
0
def plot_output(x_test,
                y_test,
                model,
                median_filtering=False,
                kernel_size=31,
                x_lim=None):

    score = model.evaluate(x_test, y_test, verbose=2)
    predictions = model.predict(x_test)
    # print('Test score:', score[0])
    # print('Test accuracy:', score[1])

    ########### 2 class predictions #####################################################
    positive_predictions = predictions[:, 0][np.where(y_test[:, 0])]
    negative_predictions = predictions[:, 1][np.where(y_test[:, 1])]

    if median_filtering:
        filt_predictions = np.zeros(np.shape(predictions))
        predictions[:, 0] = medfilt(predictions[:, 0], kernel_size=kernel_size)
        predictions[:, 1] = 1 - filt_predictions[:, 0]
        positive_predictions = predictions[:, 0][np.where(y_test[:, 0])]
        negative_predictions = predictions[:, 1][np.where(y_test[:, 1])]

    true_positive_rate = (sum(np.round(positive_predictions))) / sum(y_test[:,
                                                                            0])
    true_negative_rate = sum(np.round(negative_predictions)) / sum(y_test[:,
                                                                          1])

    figs = []

    f = plt.figure(figsize=(12, 6))
    plt.plot(predictions[:, 0], 'g.', markersize=12, label='y_pred')
    plt.plot(y_test[:, 0], '--b', linewidth=1, markersize=2, label='y_test')

    plt.legend(loc=9, ncol=2)

    plt.ylim([-0.1, 1.4])
    plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0])
    if x_lim:
        plt.xlim([x_lim[0], x_lim[1]])
    plt.ylabel('Classifier output')
    plt.xlabel('Signal window number')
    # if save_fig == 'publication_softmax':
    #     plt.tight_layout()
    #     plt.savefig('../../../TexFiles/Papers/ECML/Images/softmax_' + model_name + '.pdf')

    # if save_fig_individual:
    #     plt.savefig('Outputs/' + 'solo_softmax_' + model_name + '.pdf')

    # figs.append(f)
    print 'True positive rate', true_positive_rate, 'True negative rate', true_negative_rate

    #plt.savefig('Outputs/' + 'ClassOutput_' + model_name + '.pdf', transparent = True)
    #print 'saved as', 'ClassOutput_' + model_name + '.pdf'
    #plt.show()

    cnf_matrix = confusion_matrix(y_test[:, 1],
                                  np.round(predictions[:, 1]).astype(int))

    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))

    y_true = y_test[:, 0]
    y_score = predictions[:, 0]
    roc_score = roc_auc_score(y_true, y_score)
    fpr, tpr, thresholds = roc_curve(y_true, y_score)

    #plt.subplot(1,2,2)
    #plt.figure(figsize=(4,4))
    ax1.plot(fpr, tpr, '.-')
    ax1.plot([0, 1], [0, 1], 'k--')
    ax1.set_xlim([-0.01, 1.01])
    ax1.set_ylim([-0.01, 1.01])
    ax1.set_yticks([0.2, 0.4, 0.6, 0.8, 1.0])
    ax1.set_xlabel('False positive rate')
    ax1.set_ylabel('True positive rate')
    ax1.set_title('ROC, area = %.4f' % roc_score)

    y_test_pr = y_test[:, 0]

    precision = dict()
    recall = dict()
    average_precision = dict()
    for i in range(1):
        precision[i], recall[i], _ = precision_recall_curve(y_test_pr, y_score)
        average_precision[i] = average_precision_score(y_test_pr, y_score)

    # Plot Precision-Recall curve
    #plt.clf()
    ax2.plot(recall[0],
             precision[0],
             color='b',
             label='Precision-Recall curve')
    ax2.set_xlabel('Recall')
    ax2.set_ylabel('Precision')
    ax2.set_ylim([0.0, 1.05])
    ax2.set_xlim([0.0, 1.0])
    ax2.set_yticks([0.2, 0.4, 0.6, 0.8, 1.0])
    ax2.set_title('Precision-Recall, area = {0:0.3f}'.format(
        average_precision[0]))
    #plt.legend(loc="lower left")
    # if save_fig == 'publication':
    #     plt.tight_layout()
    #     plt.savefig('/Outputs/Papers/ECML/Images/metric_' + model_name + '.pdf')
    # plt.show()

    F1 = f1_score(np.round(predictions[:, 0]), y_test[:, 0], average='binary')
    print 'F1 score', F1
    perf_metrics = {
        "tpr": true_positive_rate,
        "tnr": true_negative_rate,
        "f1": F1,
        "roc": roc_score,
        "pr": average_precision[0],
        "conf_matrix": cnf_matrix
    }

    return predictions, perf_metrics
Example #52
0
def load_signal(DS, winL, winR, do_preprocess):

    class_ID = [[] for i in range(len(DS))]
    beat = [[] for i in range(len(DS))]  # record, beat, lead
    R_poses = [np.array([]) for i in range(len(DS))]
    Original_R_poses = [np.array([]) for i in range(len(DS))]
    valid_R = [np.array([]) for i in range(len(DS))]
    my_db = mit_db()
    patients = []

    # Lists
    # beats = []
    # classes = []
    # valid_R = np.empty([])
    # R_poses = np.empty([])
    # Original_R_poses = np.empty([])

    size_RR_max = 20

    pathDB = '/home/mondejar/dataset/ECG/'
    DB_name = 'mitdb'
    fs = 360
    jump_lines = 1

    # Read files: signal (.csv )  annotations (.txt)
    fRecords = list()
    fAnnotations = list()

    lst = os.listdir(pathDB + DB_name + "/csv")
    lst.sort()
    for file in lst:
        if file.endswith(".csv"):
            if int(file[0:3]) in DS:
                fRecords.append(file)
        elif file.endswith(".txt"):
            if int(file[0:3]) in DS:
                fAnnotations.append(file)

    MITBIH_classes = [
        'N', 'L', 'R', 'e', 'j', 'A', 'a', 'J', 'S', 'V', 'E', 'F'
    ]  #, 'P', '/', 'f', 'u']
    AAMI_classes = []
    AAMI_classes.append(['N', 'L', 'R'])  # N
    AAMI_classes.append(['A', 'a', 'J', 'S', 'e', 'j'])  # SVEB
    AAMI_classes.append(['V', 'E'])  # VEB
    AAMI_classes.append(['F'])  # F
    #AAMI_classes.append(['P', '/', 'f', 'u'])              # Q

    RAW_signals = []
    r_index = 0

    #for r, a in zip(fRecords, fAnnotations):
    for r in range(0, len(fRecords)):

        print("Processing signal " + str(r) + " / " + str(len(fRecords)) +
              "...")

        # 1. Read signalR_poses
        filename = pathDB + DB_name + "/csv/" + fRecords[r]
        print(filename)
        f = open(filename, 'rb')
        reader = csv.reader(f, delimiter=',')
        next(reader)  # skip first line!
        MLII_index = 1
        V1_index = 2
        if int(fRecords[r][0:3]) == 114:
            MLII_index = 2
            V1_index = 1

        MLII = []
        V1 = []
        for row in reader:
            MLII.append((int(row[MLII_index])))
            V1.append((int(row[V1_index])))
        f.close()

        RAW_signals.append(
            (MLII, V1)
        )  ## NOTE a copy must be created in order to preserve the original signal
        # display_signal(MLII)

        # 2. Read annotations
        filename = pathDB + DB_name + "/csv/" + fAnnotations[r]
        print(filename)
        f = open(filename, 'rb')
        next(f)  # skip first line!

        annotations = []
        for line in f:
            annotations.append(line)
        f.close
        # 3. Preprocessing signal!
        if do_preprocess:
            #scipy.signal
            # median_filter1D
            baseline = medfilt(MLII, 71)
            baseline = medfilt(baseline, 215)

            # Remove Baseline
            for i in range(0, len(MLII)):
                MLII[i] = MLII[i] - baseline[i]

            # TODO Remove High Freqs

            # median_filter1D
            baseline = medfilt(V1, 71)
            baseline = medfilt(baseline, 215)

            # Remove Baseline
            for i in range(0, len(V1)):
                V1[i] = V1[i] - baseline[i]

        # Extract the R-peaks from annotations
        for a in annotations:
            aS = a.split()

            pos = int(aS[1])
            originalPos = int(aS[1])
            classAnttd = aS[2]
            if pos > size_RR_max and pos < (len(MLII) - size_RR_max):
                index, value = max(enumerate(MLII[pos - size_RR_max:pos +
                                                  size_RR_max]),
                                   key=operator.itemgetter(1))
                pos = (pos - size_RR_max) + index

            peak_type = 0
            #pos = pos-1

            if classAnttd in MITBIH_classes:
                if (pos > winL and pos < (len(MLII) - winR)):
                    beat[r].append((MLII[pos - winL:pos + winR],
                                    V1[pos - winL:pos + winR]))
                    for i in range(0, len(AAMI_classes)):
                        if classAnttd in AAMI_classes[i]:
                            class_AAMI = i
                            break  #exit loop
                    #convert class
                    class_ID[r].append(class_AAMI)

                    valid_R[r] = np.append(valid_R[r], 1)
                else:
                    valid_R[r] = np.append(valid_R[r], 0)
            else:
                valid_R[r] = np.append(valid_R[r], 0)

            R_poses[r] = np.append(R_poses[r], pos)
            Original_R_poses[r] = np.append(Original_R_poses[r], originalPos)

        #R_poses[r] = R_poses[r][(valid_R[r] == 1)]
        #Original_R_poses[r] = Original_R_poses[r][(valid_R[r] == 1)]

    # Set the data into a bigger struct that keep all the records!
    my_db.filename = fRecords

    my_db.raw_signal = RAW_signals
    my_db.beat = beat  # record, beat, lead
    my_db.class_ID = class_ID
    my_db.valid_R = valid_R
    my_db.R_pos = R_poses
    my_db.orig_R_pos = Original_R_poses

    return my_db
Example #53
0
# In[13]:

# BF_imgs=np.zeros((masterdark.shape[0],masterdark.shape[1],len(base_frames_fullnames)))
BF_imgs = []
for i in range(len(base_frames_fullnames)):
    #     sys.stdout.write('\r')
    #     sys.stdout.write("Add base frame "+str(i+1)+"/"+str(len(base_frames_fullnames)))
    #     sys.stdout.flush()
    bf_fname = base_frames_fullnames[i]
    hdulist = fits.open(bf_fname, ignore_missing_end=True)
    #     BF_imgs[:,:,i]=hdulist[0].data.astype('float')
    BF_imgs.append(hdulist[0].data.astype('float'))
    hdulist.close()
    BF_imgs[-1] = (BF_imgs[-1] - masterdark.astype('float'))
    BF_imgs[-1] = ss.medfilt(BF_imgs[-1], kernel_size=avr_width1)

# In[14]:

# YPIX, XPIX = np.mgrid[1:BF_imgs[:,:,0].shape[0]+1, 1:BF_imgs[:,:,0].shape[1]+1]
YPIX, XPIX = np.mgrid[1:BF_imgs[0].shape[0] + 1, 1:BF_imgs[0].shape[1] + 1]

# In[15]:

err, az0, alt0, a, b, c, d = get_solve_pars(solve_pars_fname)

# In[16]:

#az_c, alt_c = arc_pix2hor(255,255,az0,alt0,a,b)
#print(az_c*180/np.pi,alt_c*180/np.pi)
# 292.814270937 83.6690150447
Example #54
0
def crop_video(opt, track, cropfile):

    cap = cv2.VideoCapture(
        os.path.join(opt.avi_dir, opt.reference, 'video.avi'))

    total_frames = cap.get(7)
    cap.set(1, track[0][0])  # CHANGE THIS !!!

    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    vOut = cv2.VideoWriter(cropfile + 't.avi', fourcc, cap.get(5), (224, 224))

    fw = cap.get(3)
    fh = cap.get(4)

    dets = [[], [], []]

    for det in track[1]:

        dets[0].append(
            ((det[3] - det[1]) * fw + (det[2] - det[0]) * fh) / 4)  # H+W / 4
        dets[1].append((det[1] + det[3]) * fw / 2)  # crop center x
        dets[2].append((det[0] + det[2]) * fh / 2)  # crop center y

    # Smooth detections
    dets[0] = signal.medfilt(dets[0], kernel_size=5)
    dets[1] = signal.medfilt(dets[1], kernel_size=5)
    dets[2] = signal.medfilt(dets[2], kernel_size=7)

    for det in zip(*dets):

        cs = opt.crop_scale

        bs = det[0]  # Detection box size
        bsi = int(bs * (1 + 2 * cs))  # Pad videos by this amount

        ret, frame = cap.read()

        frame = np.pad(frame, ((bsi, bsi), (bsi, bsi), (0, 0)),
                       'constant',
                       constant_values=(0, 0))
        my = det[2] + bsi  # BBox center Y
        mx = det[1] + bsi  # BBox center X

        face = frame[int(my - bs):int(my + bs * (1 + 2 * cs)),
                     int(mx - bs * (1 + cs)):int(mx + bs * (1 + cs))]

        vOut.write(cv2.resize(face, (224, 224)))

    audiotmp = os.path.join(opt.tmp_dir, opt.reference, 'audio.wav')
    audiostart = track[0][0] / cap.get(5)
    audioend = (track[0][-1] + 1) / cap.get(5)

    cap.release()
    vOut.release()

    # ========== CROP AUDIO FILE ==========

    command = (
        "ffmpeg -y -i %s -ac 1 -vn -acodec pcm_s16le -ar 16000 -ss %.3f -to %.3f %s -loglevel quiet"
        % (os.path.join(opt.avi_dir, opt.reference, 'video.avi'), audiostart,
           audioend, audiotmp))  # -async 1
    output = subprocess.call(command, shell=True, stdout=None)

    if output != 0:
        pdb.set_trace()

    sample_rate, audio = wavfile.read(audiotmp)

    # ========== COMBINE AUDIO AND VIDEO FILES ==========

    command = (
        "ffmpeg -y -i %st.avi -i %s -c:v copy -c:a copy %s.avi -loglevel quiet"
        % (cropfile, audiotmp, cropfile))  # -async 1
    output = subprocess.call(command, shell=True, stdout=None)

    if output != 0:
        pdb.set_trace()

    print('Written %s' % cropfile)

    os.remove(cropfile + 't.avi')

    return [track, dets]
Example #55
0
def smooth(flux):
    return medfilt(flux, kernel_size=7)
Example #56
0
                               usecols=(0, 1),
                               encoding=None,
                               skip_footer=data.size)
for entry in pte_entries:
    get_cachelines(entry[0], int(entry[1]))

print "\nSLAT cacheline candidates:"
pprint.pprint(set(cachelines))

# Read time values for each cachelines into a dictionary of {cacheline:[timings]}
cache_n_timing = {}
cache_list = []

for cacheline in range(ncacheline):
    sample = data[cacheline * nrounds:(cacheline + 1) * nrounds]
    sample = medfilt(sample)
    sample = savgol_filter(sample, nrounds - 1, 0)
    cache_list.append(sample)
    avg = sum(sample.tolist()) / nrounds
    cache_n_timing[cacheline] = avg

g_index = [s for s in range(ncacheline)]
g_cols = [r for r in range(nrounds)]
df = DataFrame(cache_list, index=g_index, columns=g_cols)

plt.pcolor(df)
plt.savefig("results/slatfilter.png")

sorted_cache_score = sorted(cache_n_timing.items(),
                            key=operator.itemgetter(1))[::-1]
Example #57
0
def dissectData(data, onbodywindow=2, thrsd=0.1, thrrange=0.5, samplerate=50):
    ''' for a given datastream, identify on-body segments 
    [and maybe pickle them
    input: pandas dataframe of data with accelerometer in columns named x,y,z
            window size in minutes 
    
    returns number of periods dict '''

    tt = data.copy()
    #   tt= returndf.copy()[30000:-60000]
    tt = tt[['x', 'y', 'z']]
    # tt.columns = ['x','y','z']
    if tt.isnull().sum().sum() != 0:
        print("nulls in data - I can't handle that ")
    ar = tt[['x', 'y', 'z']].values
    smoothedx = medfilt(ar[:, 0], 3)
    smoothedy = medfilt(ar[:, 1], 3)
    smoothedz = medfilt(ar[:, 2], 3)

    #tt['sx','sy','sz'] =smoothed[:,0:2]
    tt['sx'] = smoothedx
    tt['sy'] = smoothedy
    tt['sz'] = smoothedz
    # tt.plot()
    #np.std(tt.z.values)
    #tt.describe()
    tt.drop(['x', 'y', 'z'], axis=1, inplace=True)
    #tt.plot()
    #samplerate=50

    offBodySegments, allsds = getOffBody(
        onbodywindow, tt, samplerate, thrsd,
        thrrange)  # x minute mindow, 50 samples/second
    #plt.plot(np.asarray(list(offBodySegments.values())))
    #plt.show()
    #offBodySegments = offBodyPeriods.copy()
    offBodyar = np.asarray(list(offBodySegments.values()))
    # get periods of segments from ' off body' dict

    #only interested in > 10 minutes of wear time/offtime, so 5 consecutive zeros or
    # TODO 5 consecutive ones
    #offBodySegments = offBodyPeriods.copy()

    runsize = 5
    periodcounter = 0
    periods = {}  # generate dict of periods of on body - runs of 0's
    switch = np.sign(
        offBodyar[:runsize].sum())  # initial state 0 if 0 , 1 if > 0
    if switch == 0: startrun = 0
    for i in np.arange(len(offBodyar) - runsize):
        runsum = offBodyar[i:i + runsize].sum()
        # at beginning , record beginning
        oldswitch = switch
        switch = np.sign(runsum)  # either 0 or > 0
        if oldswitch != switch:  # state transition
            if switch == 0:  # start of new period
                print('start new period ', i)
                startrun = i  # all elements must be zero
            else:  # end of period switch from 0 to 1 - on to off body
                endrun = i + runsize - 2  # the first runsize-1 values are actualy 0
                print('end period ', endrun)
                periods[periodcounter] = [startrun, endrun]
                periodcounter += 1
        if (i + runsize == len(offBodyar) -
                1) & (switch == 0):  # end of array while in 0
            print('end of array processing', i + runsize,
                  len(offBodyar) - 1, switch)
            endrun = i + runsize
            periods[periodcounter] = [startrun, endrun]

    ws = 2  # minutes
    samplesize = int(ws * samplerate * 60)

    def picklePeriods(data, periods, samplesize, fn):
        import pickle
        import os
        epoch = {}
        #os.getcwd()
        for period in periods:
            length = periods[period][1] - periods[period][0] + 1
            length *= samplesize
            start = samplesize * periods[period][0]
            epoch[period] = data[start:start + length]
            pickle.dump(epoch[period], open(fn + '_' + str(period), 'wb'))

        return

    #fn='userid-date-device'
    #picklePeriods(data, periods, samplesize,fn)

# import pdb
#pdb.set_trace()

    return periods, offBodySegments
Example #58
0
        for j in range(0, len(start_lable)):
            lens = 0.0
            distance = []
            print(start_lable[j])
            print(end_lable[j])
            print('s', end_lable[j] - start_lable[j])
            for i in range(start_lable[j], end_lable[j]):
                x = data_x[i + 1] - data_x[i]
                y = data_y[i + 1] - data_y[i]
                z = data_z[i + 1] - data_z[i]
                # 用math.sqrt()求平方根
                lens = math.sqrt((x**2) + (y**2) + (z**2))
                # print('累加距离为',lens)1
                print(lens)
                distance.append(lens)
            distance_1 = signal.medfilt(distance, 3)

            np.savetxt('/Users/lipengzhi/Desktop/duibi_modle/' + str(j) +
                       '.csv',
                       distance_1,
                       delimiter=',')
            middle = int(len(distance_1) / 2)
            distance_fir = distance_1[:middle]
            distance_las = distance_1[middle:]
            print(distance_fir, distance_las)
            max_distance_fir = np.max(distance_fir)

            max_distance_las = np.max(distance_las)
            print('前一个波峰最大值', max_distance_fir)
            print('后一个波峰最大值', max_distance_las)
            print('两个峰值的差距', max_distance_fir - max_distance_las)
Example #59
0
def getPTdetail(vacc, sintheta, PTlog, smph=0.35, smpd=40):
    ''' get the type of postural transition : 
        0 - none: not a PT - don't know what it is ! 
        1- sist
        2 - stsi
        3 - bumpy sist (double and or twist)
        4 - bumpy stsi
        and add duration  
        input: VERTICAL  accelerometer data as array, log of start end  of PTS
        (PTlog) in sample times,
        smph: minimum peak height for sintheta
        smpd: minimum peak distance for sintheta - in samples '''
    minlength = 20  # samples = /50 seconds * 50Hz - minimum duration of PT
    from BMC.functions import detect_peaks
    Vv = getVertVelocity(vacc)
    sinfilt = medfilt(sintheta, 25)  # this may be too high
    sinfilt = movingAv(sintheta, 12)
    #plt.plot(Vv,label='Vv')
    PTdetail = {}
    # len(sintheta)
    for i, pt in PTlog.items():
        start, end, dur = pt
        #start,end,dur = PTlog[33] # testing
        if dur < minlength:
            continue
        showgr = False
        #plt.plot(sinfilt,label='sinfilt')
        # addGrid()
        ptsintheta = sinfilt[start:end]
        plt.plot(ptsintheta)
        # now we have one PT. get peaks for sintheta, then peaks for arb
        sinthpeaks = detect_peaks.detect_peaks(ptsintheta,
                                               mph=smph,
                                               mpd=smpd,
                                               edge='both',
                                               kpsh=True,
                                               show=showgr)

        # if 2 take the mean, else the median ?
        # TODO - if there are multiple peaks x distance apart, raise hte threshold to
        # separate them
        if len(sinthpeaks) == 0:
            print(i, 'no sin theta peaks found, rejecting PT. mph,mpd =', smph,
                  smpd)
            continue
        try:
            sinthpeak = np.mean(sinthpeaks)
            sinthy = ptsintheta[int(sinthpeak)]
        except ValueError:
            print('sinthpeaks: ', sinthpeak, 'PTlog: ', i)
            continue
        #plt.plot(ptvv)
        #addGrid()
        #get 1 secs before and after peak
        sinpeak = int(start + sinthpeak)
        offset = sinpeak - 50  # sample no. of start of data

        ptvv = Vv[offset:sinpeak + 50]
        Vvpeaks = detect_peaks.detect_peaks(ptvv,
                                            mph=0.25,
                                            mpd=40,
                                            show=showgr)
        if len(Vvpeaks) == 0:
            print(
                i,
                ' no Vertical velocity peaks found, rejecting PT. mph=',
            )
            continue

        Vvvalleys = detect_peaks.detect_peaks(ptvv,
                                              mph=None,
                                              mpd=20,
                                              valley=True,
                                              show=showgr)
        if len(Vvvalleys) == 0:
            print(i, ' no valleys found, rejecting PT')
            continue
        # we want the valley before the peak
        Vpeak = np.mean(
            Vvpeaks
        )  # assuming max of 2? may need the one closest to sintheta peak
        # find smallest negative value to get closest valley before peak
        Vvv = Vvvalleys - Vpeak  # this is a float , so we can:
        idx = (Vvv**-1).argmin()
        Vvalley = Vvvalleys[idx]
        # how far apart are peak and valley ... min 80, max 220 (by eye)
        #TODO reject if samples too far apart
        #note that the velocity zero can drift around , so if the valley is above zero,
        # adjust to -0.1 and make same adjust to vpeak
        if ptvv[Vvalley] >= 0:
            valley = -0.1
            diff = ptvv[Vvalley] - valley
            peak = ptvv[int(Vpeak)] - diff
        else:
            valley = ptvv[Vvalley]
            peak = ptvv[int(Vpeak)]

        ratio = abs(peak / valley)
        if ratio > 1.9:
            result = 1  #sist ratio arrived at empirically
            #TODO derive the ratio from the data
        else:
            result = 2  # stsi
        possintheta = [sinpeak, sinthy]
        pospeak = [Vpeak + offset, peak]
        posvalley = [Vvalley + offset, valley]

        #returnval  = [possinthteta,pospeak,posvalley]
        PTdetail[i] = [result, possintheta, pospeak, posvalley,
                       ratio]  # will add the duration later

    return PTdetail
Example #60
0
def getVectors(data, start=0, end=-1, gvalue=9.81):
    ''' from accelerometer data passed in as dataframe  , extract
    1. gravity vector 
    2. body vector
    3. vertical acceleration 
    4. realign original vectors with lag induced in the claculated vectors 
        by the butterworth filters 
    5. realigned horizontal vectors 
    
    input: dataframe of x, y , z ; start pos; end pos 
  
    '''
    #data=walk15.copy()
    from scipy.signal import medfilt

    #gvalue=9.82  # note g can change over time dependent on accelerometer
    #gvalue=9.74
    tt = data[start:end].copy()
    tt = data[['x', 'y', 'z']]
    tt.isnull().sum()
    ar = tt[['x', 'y', 'z']].values
    sx = medfilt(ar[:, 0], 3)
    sy = medfilt(ar[:, 1], 3)
    sz = medfilt(ar[:, 2], 3)
    # now butterworth with recommended filter params :
    # filter order 2, cut off frequency 1.6Hz delay 0.141 seconds
    xg, yg, zg = lpFilter(sx, sy, sz, 1.6, 50, 2)
    # delay signal by 0.141 s = 7 slots - chop 7 off beginning of true, 7 off the end of filtered
    xg = xg[:-7]
    yg = yg[:-7]
    zg = zg[:-7]
    sx = sx[7:]
    sy = sy[7:]
    sz = sz[7:]
    # do the same with the original data
    ar = ar[7:, :]
    #and adjust the dataframe
    newdf = data.iloc[7:, :].copy()
    # calculate inertial signals - i.e. body movements, by subtracting gravity
    xb = sx - xg
    yb = sy - yg
    zb = sz - zg
    arb = np.asarray([xb, yb, zb]).transpose()
    argr = np.asarray([xg, yg, zg]).transpose()
    ars = np.asarray([sx, sy, sz]).transpose()
    # normalise and get get dot product  for vertical acc
    normg = (xg**2 + yg**2 + zg**2)**0.5  # equiv to gdotg
    vacc = np.array([np.dot(a, b) for a, b in zip(argr, ars)])
    vacc = vacc / normg
    vacc = vacc - gvalue  # just to be explicit - body only
    # calculate in a different way using body acc only
    # this way introduces a variation in G that seems to destabilise the
    # signal slightly - it seems more valid to use the fixed gravity constant
    # calculated for the device
    #    vacc2 = np.array([np.dot(a,b) for a,b in zip(arb,argr)])
    #    vacc2 = vacc2/normg
    #    vacc2 = np.array([vacc2]).transpose()
    #    #vacc2.transpose().shape
    #   # argr.shape
    vacc2d = np.array([vacc]).transpose()
    Vvacc = vacc2d * argr  # vertical acceleration expressed as a vector
    Vhacc = arb - Vvacc

    return ar, arb, argr, ars, vacc, Vhacc, newdf