示例#1
0
def crosscorr_phase_angle(sig1, sig2, x, max_length=10000):
    """Return the cross correlation phase angle between 2 signals

    Parameters
    ----------
    sig1 : array
        signal of length L
    sig2 : array
        another signal of length L
    x : array
        time axis for the signals sig1 and sig2
    max_length : int, optional
        Maximum length for the signals, signals are resampled otherwise.
        Default is 10 000.
    """
    assert len(sig1) == len(sig2) == len(x), \
        "The signals don't have the same length."
    sig_length = len(sig1)
    # Resample if signal is too big thus slowing down correlation computation
    if sig_length > max_length:
        sig1, x = resample(sig1, max_length, x)
        sig2 = resample(sig2, max_length)
        sig_length = max_length
    corr = np.correlate(sig1, sig2, mode="same")
    xmean = sig_length/2
    return float(argmax(corr) - xmean)/sig_length*x[-1]  # *x[-1] to scale
def extract_data(i):
	global id_legend, _id, t, intra_trace, extra_trace, img_fname
	if i < n_datum and i>=0:
		_id = datum.keys()[i] # 'a0' or 's1' etc.
		img_fname = img_path+datum[_id][0]
		igor_trace = datum[_id][1]
		extra_id_0 = _id + '1_Ax1_Vm' # Axon
		extra_id_1 = _id + '1_Ch2_Vm' # Dendrite
		intra_id = _id + '1_Ch1_Vm'
		t = igor_trace[intra_id].axis[0] * 1e6
		t = t[::-1]
		intra_trace = igor_trace[intra_id].data * 1e3 # unit(mV)
		extra_trace = np.zeros((len(t),2))
		extra_trace[:,0] = igor_trace[extra_id_0].data * 1e6 # unit(uV) Axon (left)
		extra_trace[:,1] = igor_trace[extra_id_1].data * 1e6 # unit(uV) Dendrite (right)
		##################### down-sampling #####################
		N = 2 # down sampling 2 folds
		intra_trace,t_d = resample(intra_trace, intra_trace.shape[0]/N, t)
		extra_trace,t_d = resample(extra_trace, extra_trace.shape[0]/N, t)
		t = t_d
		print 'estimated fs ',1/(t[2]-t[1])
		###################### filtering process #####################
		fs = np.ceil(1/(t[2]-t[1]))
		print('sampling frequency %f' % fs)
		b, a = butter_bandpass(200,3000,fs,6)
		extra_trace = filtfilt(b, a, extra_trace.T, padlen=150, padtype="even")
		extra_trace = extra_trace.T
		###############################################################
		return _id, t, intra_trace, extra_trace, img_fname
	elif i<0:
		print('out of range, cannot less than 0')
	elif i >= n_datum:
		print('out of range, cannot less than %d' % n_datum)
def stochasticModel(x, w, N, stocf):
    # x: input array sound, w: analysis window, N: FFT size,
    # stocf: decimation factor of mag spectrum for stochastic analysis
    # y: output sound

    hN = N / 2  # size of positive spectrum
    hM = (w.size) / 2  # half analysis window size
    pin = hM  # initialize sound pointer in middle of analysis window
    fftbuffer = np.zeros(N)  # initialize buffer for FFT
    yw = np.zeros(w.size)  # initialize output sound frame
    w = w / sum(w)  # normalize analysis window
    ws = hanning(w.size) * 2  # synthesis window

    # -----analysis-----
    xw = x[pin - hM : pin + hM] * w  # window the input sound
    X = fft(xw)  # compute FFT
    mX = 20 * np.log10(abs(X[:hN]))  # magnitude spectrum of positive frequencies
    mXenv = resample(np.maximum(-200, mX), mX.size * stocf)  # decimate the mag spectrum
    pX = np.angle(X[:hN])
    # -----synthesis-----
    mY = resample(mXenv, hN)  # interpolate to original size
    pY = 2 * np.pi * np.random.rand(hN)  # generate phase random values
    Y = np.zeros(N, dtype=complex)
    Y[:hN] = 10 ** (mY / 20) * np.exp(1j * pY)  # generate positive freq.
    Y[hN + 1 :] = 10 ** (mY[:0:-1] / 20) * np.exp(-1j * pY[:0:-1])  # generate negative freq.

    fftbuffer = np.real(ifft(Y))  # inverse FFT
    y = ws * fftbuffer * N / 2  # overlap-add

    return mX, pX, mY, pY, y
 def upsample_sig(self):
     
     # Check that the upsample value is more than 0
     # upsample the signal
     self.cur_signal_up = signal.resample(self.cur_signal_raw, self.cur_signal_raw.size*self.N_up)
     tmpr = signal.resample(np.real(self.cur_signal_raw), self.cur_signal_raw.size*self.N_up)
     
     # change signal to magnitude of signal according to user specs
     if self.sig_type == 'm':
         self.cur_signal_up = self.sig_mag(self.cur_signal_up)
     
     # if the user asked to filter, add observation to average
     if self.filter_on:
         self.filter_sig()
     
     # if this is our first CIR, save it for reference, and flip flag
     if self.is_first_obs:
         self.start_time = self.cur_time
         self.num_taps = self.cur_signal_raw.size
         self.up_sig_len = self.cur_signal_up.size
         self.ref_complex_cir = self.cur_signal_up.copy()
         
         self.__init_mats()
         
         self.is_first_obs = 0
示例#5
0
def main():
  X,Y = [],[]
  Y2 = []
  index = 0
  with open(sys.argv[1],'r') as fp:
    lines = [i.strip().split() for i in fp.readlines() if i]
  for line in lines:
    y = float(line[1])
    x = index
    index += 1
    Y.append(y)
    X.append(x)
    if x > 1500 and x < 1700:
      print x,y,y-y*3*math.exp(-(x-1600)**2/1500)
      Y2.append(y-y*3*math.exp(-(x-1600)**2/1500))
    else:
      Y2.append(y)



  y = signal.resample(Y,100)
  y2 = signal.resample(Y2,100)

  y = [i/max(y) for i in y]
  y2 = [i/max(y2) for i in y2]

  with open("%s.resampled" %sys.argv[1], 'w') as fp:
    fp.write('x,y\n')
    fp.write('\n'.join(["%s,%s" % (x,y) for x,y in zip(range(len(y)),y)]))

  with open("%s.resampled.absorber" %sys.argv[1], 'w') as fp:
    fp.write('x,y\n')
    fp.write('\n'.join(["%s,%s" % (x,y) for x,y in zip(range(len(y2)),y2)]))
def stochasticModel(x, H, stocf):
	# stochastic analysis/synthesis of a sound, one frame at a time
	# x: input array sound, H: hop size, 
	# stocf: decimation factor of mag spectrum for stochastic analysis
	# returns y: output sound
	N = H*2                                                  # FFT size
	w = hanning(N)                                           # analysis/synthesis window
	x = np.append(np.zeros(H),x)                             # add zeros at beginning to center first window at sample 0
	x = np.append(x,np.zeros(H))                             # add zeros at the end to analyze last sample
	pin = 0                                                  # initialize sound pointer in middle of analysis window       
	pend = x.size-N                                          # last sample to start a frame
	y = np.zeros(x.size)                                     # initialize output array
	while pin<=pend:              
	#-----analysis-----             
		xw = x[pin:pin+N]*w                                    # window the input sound
		X = fft(xw)                                            # compute FFT
		mX = 20 * np.log10(abs(X[:H]))                         # magnitude spectrum of positive frequencies
		mYst = resample(np.maximum(-200, mX), mX.size*stocf)   # decimate the mag spectrum     
	#-----synthesis-----
		mY = resample(mYst, H)                                 # interpolate to original size
		pY = 2*np.pi*np.random.rand(H)                         # generate phase random values
		Y = np.zeros(N, dtype = complex)
		Y[:H] = 10**(mY/20) * np.exp(1j*pY)                    # generate positive freq.
		Y[H+1:] = 10**(mY[:0:-1]/20) * np.exp(-1j*pY[:0:-1])   # generate negative freq.
		fftbuffer = np.real(ifft(Y))                           # inverse FFT
		y[pin:pin+N] += w*fftbuffer                            # overlap-add
		pin += H  
	y = np.delete(y, range(H))                               # delete half of first window which was added 
	y = np.delete(y, range(y.size-H, y.size))                # delete half of last window which was added                                            # advance sound pointer
	return y
示例#7
0
    def subsample_align_upsampling(self, sig_tx, sig_rx, n_up=32):
        """
        Returns an aligned version of sig_tx and sig_rx by cropping and subsample alignment
        Using upsampling
        """
        assert (sig_tx.shape[0] == sig_rx.shape[0])

        if sig_tx.shape[0] % 2 == 1:
            sig_tx = sig_tx[:-1]
            sig_rx = sig_rx[:-1]

        sig1_up = signal.resample(sig_tx, sig_tx.shape[0] * n_up)
        sig2_up = signal.resample(sig_rx, sig_rx.shape[0] * n_up)

        off_meas = self.lag_upsampling(sig2_up, sig1_up, n_up=1)
        off = int(abs(off_meas))

        if off_meas > 0:
            sig1_up = sig1_up[:-off]
            sig2_up = sig2_up[off:]
        elif off_meas < 0:
            sig1_up = sig1_up[off:]
        sig2_up = sig2_up[:-off]

        sig_tx = signal.resample(sig1_up, sig1_up.shape[0] / n_up).astype(np.complex64)
        sig_rx = signal.resample(sig2_up, sig2_up.shape[0] / n_up).astype(np.complex64)
        return sig_tx, sig_rx
def data_parser(theta,kappa,tt,ch,tt_ch):

    theta_r = np.array([[resample(theta.values.squeeze()[i,950:1440],50)] for i in range(0,theta.shape[0])])
    theta_r = zscore(theta_r.squeeze(),axis=None)

    kappa_r = np.array([[resample(kappa.values.squeeze()[i,950:1440],50)] for i in range(0,kappa.shape[0])])
    kappa_r = zscore(kappa_r.squeeze(),axis=None)

    kappa_df = pd.DataFrame(kappa_r)
    theta_df = pd.DataFrame(theta_r)

    both_df = pd.concat([theta_df,kappa_df],axis=1)

    if tt_ch == 'tt':
        # trial type
        clean = np.nan_to_num(tt) !=0
        tt_c = tt[clean.squeeze()].values
    else :
        # choice
        clean = np.nan_to_num(ch) !=0
        tt_c = ch[clean.squeeze()].values

    # tt_c = tt[tt.values !=0|3].values
    both = both_df.values
    # both_c = both[clean.squeeze(),:]
    both_c = both[clean.squeeze(),:]

    # keeping one hot vector for now (incase we want it later)
#     labs = np.eye(3)[tt_c.astype(int)-1]
    # y[np.arange(3), a] = 1
#     labs = labs.squeeze()

    return both_c, tt_c, clean
示例#9
0
def rx_oversampled(frames, ref_frame, modulated_frame, x_preamble, data, rx_kernel, demapper, timeslots, fft_len, cp_len, cs_len):
    ref_frame_os = signal.resample(ref_frame, 2 * len(ref_frame))
    x_preamble_os = signal.resample(x_preamble, 2 * len(x_preamble))

    nyquist_frame_len = cp_len + 2 * fft_len + cs_len + cp_len + timeslots * fft_len + cs_len
    n_frames = np.shape(frames)[0]
    sync_frames = np.zeros((n_frames, nyquist_frame_len), dtype=np.complex)
    print('nyquist sampled frame len', nyquist_frame_len, 'with n_frames', n_frames)
    f_start = cp_len + 2 * fft_len + cs_len
    d_start = f_start + cp_len
    print('data start: ', d_start)
    for i, f in enumerate(frames[0:2]):
        tf = np.roll(f, 1)
        tf[0] = 0
        ff = signal.resample(tf, len(f) // 2)
        sframe = synchronize_time(ff, ref_frame_os, x_preamble_os, 2 * fft_len, 2 * cp_len)
        sframe = signal.resample(sframe, len(sframe) // 2)
        sframe = synchronize_freq_offsets(sframe, modulated_frame, x_preamble, fft_len, cp_len, samp_rate=3.125e6)
        print(len(sframe), len(ref_frame))
        rx_preamble = sframe[cp_len:cp_len + 2 * fft_len]
        avg_phase = calculate_avg_phase(rx_preamble, x_preamble)
        # m, c = calculate_avg_phase(rx_preamble, x_preamble)
        # avg_phase = calculate_avg_phase(sframe, ref_frame)
        # phase_eqs = m * np.arange(-cp_len, len(sframe) - cp_len) + c
        # sframe *= np.exp(-1j * phase_eqs)
        # sframe *= np.exp(-1j * avg_phase)
        sync_frames[i] = sframe
        rx_data_frame = sframe[d_start:d_start + fft_len * timeslots]
        # # rx_data_frame *= np.exp(-1j * avg_phase)
        #
        demodulate_frame(rx_data_frame, modulated_frame, rx_kernel, demapper, data, timeslots, fft_len)

    for i, f in enumerate(sync_frames[0:3]):
        rx_data_frame = f[d_start:d_start + fft_len * timeslots]
        demodulate_frame(rx_data_frame, modulated_frame, rx_kernel, demapper, data, timeslots, fft_len)
示例#10
0
    def filter(self):
        '''
        resamples time series
        :return:resampled time series with sampling frequency set to resamplerate
        '''
        # samplerate = self.time_series.attrs['samplerate']
        samplerate = float(self.time_series['samplerate'])


        time_axis_length = np.squeeze(self.time_series.coords['time'].shape)
        new_length = int(np.round(time_axis_length*self.resamplerate/samplerate))

        print new_length

        if self.time_axis_index<0:
            self.time_axis_index = self.time_series.get_axis_num('time')

        time_axis = self.time_series.coords[ self.time_series.dims[self.time_axis_index] ]

        try:
            time_axis_data = time_axis.data['time'] # time axis can be recarray with one of the arrays being time
        except (KeyError ,IndexError) as excp:
            # if we get here then most likely time axis is ndarray of floats
            time_axis_data = time_axis.data

        time_idx_array = np.arange(len(time_axis))


        if self.round_to_original_timepoints:
            filtered_array, new_time_idx_array = resample(self.time_series.data,
                                             new_length, t=time_idx_array,
                                             axis=self.time_axis_index, window=self.window)

            # print new_time_axis

            new_time_idx_array = np.rint(new_time_idx_array).astype(np.int)

            new_time_axis = time_axis[new_time_idx_array]

        else:
            filtered_array, new_time_axis = resample(self.time_series.data,
                                             new_length, t=time_axis_data,
                                             axis=self.time_axis_index, window=self.window)



        coords = []
        for i, dim_name in enumerate(self.time_series.dims):
            if i != self.time_axis_index:
                coords.append(self.time_series.coords[dim_name].copy())
            else:
                coords.append((dim_name,new_time_axis))


        filtered_time_series = xray.DataArray(filtered_array, coords=coords)
        # filtered_time_series.attrs['samplerate'] = self.resamplerate
        filtered_time_series['samplerate'] = self.resamplerate
        return TimeSeriesX(filtered_time_series)
示例#11
0
文件: herschel.py 项目: hsso/hssopy
 def resample(self, times=2):
     from scipy.signal import resample
     self.flux, self.freq = resample(self.flux, int(len(self.flux)/times),
                                     t=self.freq)
     if hasattr(self, "fluxcal"):
         self.fluxcal = resample(self.fluxcal,
                                     int(len(self.fluxcal)/times))
     if hasattr(self, "baseline"):
         self.baseline = resample(self.baseline,
                                     int(len(self.baseline)/times))
示例#12
0
 def lag_upsampling(self, sig_orig, sig_rec, n_up):
     if n_up != 1:
         sig_orig_up = signal.resample(sig_orig, sig_orig.shape[0] * n_up)
         sig_rec_up = signal.resample(sig_rec, sig_rec.shape[0] * n_up)
     else:
         sig_orig_up = sig_orig
         sig_rec_up = sig_rec
     l = self.lag(sig_orig_up, sig_rec_up)
     l_orig = float(l) / n_up
     return l_orig
示例#13
0
def RotateVector_45degree(newN,vec):
    from scipy import ndimage as ndi
    from scipy import signal
    N = vec.shape[0]
    newvec = numpy.zeros((newN,newN,3),float)
    for i in range(3):
        temp = signal.resample(signal.resample(vec[:,:,i],N*4,axis=0),N*4,axis=1) 
        temp = ndi.rotate(temp,45,mode='wrap')
        newvec[:,:,i] = signal.resample(signal.resample(temp,newN,axis=0),newN,axis=1) 
    return newvec
示例#14
0
文件: sampling.py 项目: makokal/pycss
def resample_via_fft(curve, samples):
    """ resample_via_fft(curve, samples)
    Resample the curve using scily signal processing utility via Fourier and zero padding
    """

    rx = resample(curve[0,:], samples)
    ry = resample(curve[1,:], samples)

    # rsig = resample(curve, samples, axis=0)

    return np.array([rx, ry])
示例#15
0
def resample_with_gaussian_blur(input_array,sigma_for_gaussian,resampling_factor):
	sz=input_array.shape	
	
	gauss_temp=ndimage.gaussian_filter(input_array,sigma=sigma_for_gaussian)
	
	resam_temp=sg.resample(gauss_temp,axis=1,num=sz[1]/resampling_factor)
	
	resam_temp=sg.resample(resam_temp,axis=2,num=sz[2]/resampling_factor)
	
	
	return (resam_temp)	
示例#16
0
def fft_gauss_blur_img(img, scale, std_cut_off=5):

    old_img_size = img.shape[0]
    new_img_size = np.round( img.shape[0]*scale )

    std = guassianDownSampleSTD( old_img_size , new_img_size,  std_cut_off, old_img_size )

    sr = sig.resample( img, old_img_size, window = ('gaussian', std) )
    sr = sig.resample( sr, old_img_size, window = ('gaussian', std), axis=1)

    return sr
示例#17
0
def fft_resample_img(img, nPix, std_cut_off = None):
    #this is only for square images
    if not std_cut_off is None:
        oldSize = np.size(img,0)
        std = guassianDownSampleSTD( oldSize, nPix,  stdCutOff, oldSize )
        sr = sig.resample(img, nPix, window = ('gaussian', std))
        sr = sig.resample(sr, nPix, window = ('gaussian', std ), axis = 1)
    else:
        sr = sig.resample( img, nPix, window = ('boxcar') )
        sr = sig.resample( sr, nPix, window = ('boxcar'), axis = 1)

    return sr
示例#18
0
def stftMorph(x1, x2, fs, w1, N1, w2, N2, H1, smoothf, balancef):
	"""
	Morph of two sounds using the STFT
	x1, x2: input sounds, fs: sampling rate
	w1, w2: analysis windows, N1, N2: FFT sizes, H1: hop size
	smoothf: smooth factor of sound 2, bigger than 0 to max of 1, where 1 is no smothing,
	balancef: balance between the 2 sounds, from 0 to 1, where 0 is sound 1 and 1 is sound 2
	returns y: output sound
	"""
	
	if (N2/2*smoothf < 3):                           # raise exception if decimation factor too small
		raise ValueError("Smooth factor too small")
		
	if (smoothf > 1):                                # raise exception if decimation factor too big
		raise ValueError("Smooth factor above 1")
	
	if (balancef > 1 or balancef < 0):               # raise exception if balancef outside 0-1
		raise ValueError("Balance factor outside range")
	
	if (H1 <= 0):                                    # raise error if hop size 0 or negative
		raise ValueError("Hop size (H1) smaller or equal to 0")
			
	M1 = w1.size                                     # size of analysis window
	hM1_1 = int(math.floor((M1+1)/2))                # half analysis window size by rounding
	hM1_2 = int(math.floor(M1/2))                    # half analysis window size by floor
	L = int(x1.size/H1)	                             # number of frames for x1
	x1 = np.append(np.zeros(hM1_2),x1)               # add zeros at beginning to center first window at sample 0
	x1 = np.append(x1,np.zeros(hM1_1))               # add zeros at the end to analyze last sample
	pin1 = hM1_1                                     # initialize sound pointer in middle of analysis window       
	w1 = w1 / sum(w1)                                # normalize analysis window
	M2 = w2.size                                     # size of analysis window
	hM2_1 = int(math.floor((M2+1)/2))                # half analysis window size by rounding
	hM2_2 = int(math.floor(M2/2))                    # half analysis window size by floor2
	H2 = int(x2.size/L)                              # hop size for second sound
	x2 = np.append(np.zeros(hM2_2),x2)               # add zeros at beginning to center first window at sample 0
	x2 = np.append(x2,np.zeros(hM2_1))               # add zeros at the end to analyze last sample
	pin2 = hM2_1                                     # initialize sound pointer in middle of analysis window  
	y = np.zeros(x1.size)                            # initialize output array
	for l in range(L):                                   
	#-----analysis-----  
		mX1, pX1 = DFT.dftAnal(x1[pin1-hM1_1:pin1+hM1_2], w1, N1)           # compute dft
		mX2, pX2 = DFT.dftAnal(x2[pin2-hM2_1:pin2+hM2_2], w2, N2)           # compute dft
	#-----transformation-----
		mX2smooth = resample(np.maximum(-200, mX2), int(mX2.size*smoothf))       # smooth spectrum of second sound
		mX2 = resample(mX2smooth, mX1.size)                                 # generate back the same size spectrum
		mY = balancef * mX2 + (1-balancef) * mX1                            # generate output spectrum
	#-----synthesis-----
		y[pin1-hM1_1:pin1+hM1_2] += H1*DFT.dftSynth(mY, pX1, M1)  # overlap-add to generate output sound
		pin1 += H1                                     # advance sound pointer
		pin2 += H2                                     # advance sound pointer
	y = np.delete(y, range(hM1_2))                   # delete half of first window which was added in stftAnal
	y = np.delete(y, range(y.size-hM1_1, y.size))    # add zeros at the end to analyze last sample
	return y
示例#19
0
def resample(signal, prev_sample_rate, new_sample_rate):
    if prev_sample_rate == new_sample_rate:
        return signal

    rate_factor = new_sample_rate/float(prev_sample_rate)
    num_samples = int(len(signal.T)*rate_factor)
    if signal.ndim == 2:
        result = numpy.empty((len(signal), num_samples), dtype=signal.dtype)
        for si, s in enumerate(signal):
            result[si] = scisig.resample(s, num_samples)
        return result
    else:
        return scisig.resample(signal, num_samples)    
示例#20
0
    def _forward_dataset_helper(self, ds):
        # local binding
        num = self.__num

        pos = None
        if not self.__position_attr is None:
            # we know something about sample position
            pos = ds.sa[self.__position_attr].value
            rsamples, pos = resample(ds.samples, self.__num, t=pos,
                                     window=self.__window_args)
        else:
            # we know nothing about samples position
            rsamples = resample(ds.samples, self.__num, t=None,
                                window=self.__window_args)
        # new dataset that reuses that feature and dataset attributes of the
        # source
        mds = Dataset(rsamples, fa=ds.fa, a=ds.a)

        # the tricky part is what to do with the samples attributes, since their
        # number has changes
        if self.__attr_strategy == 'remove':
            # nothing to be done
            pass
        elif self.__attr_strategy == 'sample':
            step = int(len(ds) / num)
            sa = dict([(k, ds.sa[k].value[0::step][:num]) for k in ds.sa])
            mds.sa.update(sa)
        elif self.__attr_strategy == 'resample':
            # resample the attributes themselves
            sa = {}
            for k in ds.sa:
                v = ds.sa[k].value
                if pos is None:
                    sa[k] = resample(v, self.__num, t=None,
                                     window=self.__window_args)
                else:
                    if k == self.__position_attr:
                        # position attr will be handled separately at the end
                        continue
                    sa[k] = resample(v, self.__num, t=pos,
                                     window=self.__window_args)[0]
            # inject them all
            mds.sa.update(sa)
        else:
            raise ValueError("Unkown attribute handling strategy '%s'."
                             % self.__attr_strategy)

        if not pos is None:
            # we got the new sample positions and can store them
            mds.sa[self.__position_attr] = pos
        return mds
示例#21
0
    def resample(self, sampling_times):

        [self.x, self.time] = signal.resample(
            self.x,
            len(sampling_times),
            sampling_times)
        [self.y, self.time] = signal.resample(
            self.y,
            len(sampling_times),
            sampling_times)
        [self.z, self.time] = signal.resample(
            self.z,
            len(sampling_times),
            sampling_times)
示例#22
0
def audio_loop(file, p, ratio, end, chunk_size, stop_proc):
    time.sleep(0.5)
    proc = psutil.Process(os.getpid())
    proc.nice(-5)
    time.sleep(0.02)
    print ('audio file is ' + str(file))
    while True:
        #chunk = 2048/2
        wf = wave.open(file, 'rb')
        time.sleep(0.03)
        data = wf.readframes(chunk_size.value)
        time.sleep(0.03)

        stream = p.open(
            format = p.get_format_from_width(wf.getsampwidth()), 
            channels = wf.getnchannels(),
            rate = wf.getframerate(),
            output = True,
            frames_per_buffer = chunk_size.value)
        
        while data != '' and stop_proc.value == 0:
            #need to try locking here for multiprocessing
            array = numpy.fromstring(data, dtype=numpy.int16)
            result = numpy.reshape(array, (array.size/2, 2))
            #split data into seperate channels and resample
            final = numpy.ones((1024,2))
            reshapel = signal.resample(result[:, 0], 1024)

            final[:, 0] = reshapel
            reshaper = signal.resample(result[:, 1], 1024)
            final[:, 1] = reshaper
            out_data = final.flatten().astype(numpy.int16).tostring()
            #data = signal.resample(array, chunk_size.value*ratio.value)
            #stream.write(data.astype(int).tostring())
            stream.write(out_data)
            round_data = (int)(chunk_size.value*ratio.value)
            if round_data % 2 != 0:
                round_data += 1
            data = wf.readframes(round_data)
        stream.stop_stream()
        stream.close()
        wf.close()
        p.terminate()
        
        if end or stop_proc.value == 1:
            break
    stream.stop_stream()
    stream.close()
    wf.close()
    p.terminate()
示例#23
0
    def resample(self, num, axis=0, window=None):

        """Resample timeseries data for a signal onto a different time
        base. This routine uses scipy.signal's resample function which
        creates an interpolation with the option of filtering by
        specifying the window keyword. See help(scipy.signal.resample)
        for details."""

        if self['time'] is not None:
            (self['signal'], self['time']) = resample(self['signal'], num, 
                                                      t=self['time'], axis=0, 
                                                      window=None)
        else: 
            self['signal'] = resample(self['signal'], num, axis=0, 
                                      window=None)
示例#24
0
def demodulate(sdr_samples, sample_rate, ignore=0):
    t = np.arange(len(sdr_samples)) / sample_rate
    demod = np.exp(-2j*pi*freq_offset*t)
    y = sdr_samples * demod

    threshold = 0.04
    y = y[abs(y) > threshold]

    from scipy import signal  #should be imported already?

    sig = angle(y[1:] * conj(y[:-1]))

    h = signal.firwin(256,5000.0,nyq=sample_rate/2.0)
    sigf = signal.fftconvolve(sig, h)
    if ignore > 0:
        sigf = sigf[ignore:-ignore]
    
    
    downsample = 24
    sigfd = sigf[::downsample]

    fs_down = fs
    sigfdd = signal.resample(sigfd, len(sigfd) * float(fs_down) / (sample_rate / float(downsample)))
    
    return sigfdd
示例#25
0
def resample(audio, target_sample_rate):
    
    """
    Resamples audio to a specified sample rate.
    
    This function should only be used for relatively short audio segments,
    say not longer than a second or so. It uses the `scipy.signal.resample`
    method to perform the resampling, which computes a length-M DFT and a
    length-N inverse DFT, where M and N are the input and output length,
    respectively. M and N may not be powers of two, and they may even be
    prime, which can make this function slow if M or N is too large.
    """
    
    
    if audio.sample_rate == target_sample_rate:
        # do not need to resample
        
        return audio
    
    else:
        # need to resample
        
        # We put this import here instead of at the top of this module
        # so  the module can be used in Python environments that don't
        # include SciPy as long as this function is not called.
        import scipy.signal as signal

        ratio = target_sample_rate / audio.sample_rate
        num_samples = int(round(len(audio.samples) * ratio))
        samples = signal.resample(audio.samples, num_samples)
        return Bunch(samples=samples, sample_rate=target_sample_rate)
示例#26
0
def stochasticModelSynth(stocEnv, H, N):
	"""
	Stochastic synthesis of a sound
	stocEnv: stochastic envelope; H: hop size; N: fft size
	returns y: output sound
	"""

	if not(UF.isPower2(N)):                                 	# raise error if N not a power of two
		raise ValueError("N is not a power of two")
 
	hN = N/2+1                                            		# positive size of fft
	No2 = N/2							# half of N
	L = stocEnv[:,0].size                                    	# number of frames
	ysize = H*(L+3)                                         	# output sound size
	y = np.zeros(ysize)                                     	# initialize output array
	ws = 2*hanning(N)                                        	# synthesis window
	pout = 0                                                 	# output sound pointer
	for l in range(L):                    
		mY = resample(stocEnv[l,:], hN)                        # interpolate to original size
		pY = 2*np.pi*np.random.rand(hN)                        # generate phase random values
		Y = np.zeros(N, dtype = complex)                       # initialize synthesis spectrum
		Y[:hN] = 10**(mY/20) * np.exp(1j*pY)                   # generate positive freq.
		Y[hN:] = 10**(mY[-2:0:-1]/20) * np.exp(-1j*pY[-2:0:-1]) # generate negative freq.
		fftbuffer = np.real(ifft(Y))                           # inverse FFT
		y[pout:pout+N] += ws*fftbuffer                         # overlap-add
		pout += H  
	y = np.delete(y, range(No2))                              # delete half of first window
	y = np.delete(y, range(y.size-No2, y.size))               # delete half of the last window 
	return y
示例#27
0
def convolve_scalogram(ana, wf, sampling_rate,optimize_fft):
    n = wf.shape[0]
    sig  = ana.magnitude
    ana_sr=ana.sampling_rate.rescale('Hz').magnitude
    if optimize_fft:
        sig=sig-sig.mean() # Remove mean before padding
        nfft=int(2**np.ceil(np.log(sig.size)/np.log(2)))
        sig=np.r_[sig,np.zeros(nfft-sig.size)] # pad signal with 0 to a power of 2 length
        sig=resample(sig,int(sig.size*sampling_rate/ana_sr)) # resample in time domain 
        sigf=fftpack.fft(sig,n) # Compute fft with a power of 2 length        
    else:        
        sigf=fftpack.fft(sig)
        # subsampling in fft domain (attention factor)
        factor = (sampling_rate/ana.sampling_rate).simplified.magnitude
        x=(n-1)//2
        if np.mod(n,2)==0:
            sigf = np.concatenate([sigf[0:x+2],  sigf[-x:]])*factor
        else:
            sigf = np.concatenate([sigf[0:x+1],  sigf[-x:]])*factor
            
    # windowing ???
    #win = fftpack.ifftshift(np.hamming(n))
    #sigf *= win
    
    # Convolve (mult. in Fourier space)
    wt_tmp=fftpack.ifft(sigf[:,np.newaxis]*wf,axis=0)
    # and shift
    wt = fftpack.fftshift(wt_tmp,axes=[0])
    return wt
示例#28
0
def stochasticResidualAnal(x, N, H, sfreq, smag, sphase, fs, stocf):
	"""
	Subtract sinusoids from a sound and approximate the residual with an envelope
	x: input sound, N: fft size, H: hop-size
	sfreq, smag, sphase: sinusoidal frequencies, magnitudes and phases
	fs: sampling rate; stocf: stochastic factor, used in the approximation
	returns stocEnv: stochastic approximation of residual
	"""

	hN = N/2                                              # half of fft size
	x = np.append(np.zeros(hN),x)                         # add zeros at beginning to center first window at sample 0
	x = np.append(x,np.zeros(hN))                         # add zeros at the end to analyze last sample
	bh = blackmanharris(N)                                # synthesis window
	w = bh/ sum(bh)                                       # normalize synthesis window
	L = sfreq.shape[0]                                    # number of frames, this works if no sines
	pin = 0
	for l in range(L):
		xw = x[pin:pin+N] * w                               # window the input sound
		X = fft(fftshift(xw))                               # compute FFT
		Yh = UF_C.genSpecSines(N*sfreq[l,:]/fs, smag[l,:], sphase[l,:], N)   # generate spec sines
		Xr = X-Yh                                           # subtract sines from original spectrum
		mXr = 20*np.log10(abs(Xr[:hN]))                     # magnitude spectrum of residual
		mXrenv = resample(np.maximum(-200, mXr), mXr.size*stocf)  # decimate the mag spectrum
		if l == 0:                                          # if first frame
			stocEnv = np.array([mXrenv])
		else:                                               # rest of frames
			stocEnv = np.vstack((stocEnv, np.array([mXrenv])))
		pin += H                                            # advance sound pointer
	return stocEnv
示例#29
0
文件: processes.py 项目: tbekolay/phd
    def make_step(self, size_in, size_out, dt, rng):
        assert size_in[0] == 0
        assert size_out[0] == 1

        rate = 1. / dt

        orig_rate, orig = readwav(self.path)
        new_size = int(orig.size * (rate / orig_rate))
        wave = resample(orig, new_size)
        wave -= wave.mean()

        # Normalize wave to desired rms
        wave_rms = npext.rms(wave)
        wave *= (self.rms / wave_rms)

        if self.at_end == 'loop':

            def step_wavfileloop(t):
                idx = int(t * rate) % wave.size
                return wave[idx]
            return step_wavfileloop

        elif self.at_end == 'stop':

            def step_wavfilestop(t):
                idx = int(t * rate)
                if idx > wave.size:
                    return 0.
                else:
                    return wave[idx]
            return step_wavfilestop
def to_envelopes(path,num_bands,freq_lims,window_length=None,time_step=None):
    sr, proc = preproc(path,alpha=0.97)
    proc = proc/sqrt(mean(proc**2))*0.03;
    bandLo = [ freq_lims[0]*exp(log(freq_lims[1]/freq_lims[0])/num_bands)**x for x in range(num_bands)]
    bandHi = [ freq_lims[0]*exp(log(freq_lims[1]/freq_lims[0])/num_bands)**(x+1) for x in range(num_bands)]
    if window_length is not None and time_step is not None:
        use_windows = True
        nperseg = int(window_length*sr)
        noverlap = int(time_step*sr)
        window = hanning(nperseg+2)[1:nperseg+1]
        step = nperseg - noverlap
        indices = arange(0, proc.shape[-1]-nperseg+1, step)
        num_frames = len(indices)
        envelopes = zeros((num_bands,num_frames))
    else:
        use_windows=False
        sr_env = 120
        t = len(proc)/sr
        numsamp = ceil(t * sr_env)
        envelopes = []
    for i in range(num_bands):
        b, a = butter(2,(bandLo[i]/(sr/2),bandHi[i]/(sr/2)), btype = 'bandpass')
        env = filtfilt(b,a,proc)
        env = abs(hilbert(env))
        if use_windows:
            window_sums = []
            for k,ind in enumerate(indices):
                seg = env[ind:ind+nperseg] * window
                window_sums.append(sum(seg))
            envelopes[i,:] = window_sums
        else:
            env = resample(env,numsamp)
            envelopes.append(env)
    return array(envelopes).T
示例#31
0
def ups(array):
    y = sg.resample(array, 600)
    y = pd.DataFrame(y)
    return (y)
示例#32
0
        #else:
        #	transforms[i] = [0,0,0]

        prev_gray = curr_gray

#plt.plot(np.arange(transforms.shape[0]) * 1/29.97,transforms[:,0])
#plt.plot(np.arange(transforms.shape[0]) * 1/29.97,transforms[:,1])
#plt.plot(np.arange(transforms.shape[0]) * 1/29.97,transforms[:,2])

transforms = np.genfromtxt('test_clips/GX016017.MP4' + "opticalflowH6.csv",
                           delimiter=',')

mysample = transforms[0:100, 2] * 59.94

N = 0
resampled = resample(mysample, 400)
sampleB = integrator.get_raw_data("z")[N:400 + N]

testcorr = np.correlate(sampleB, resampled, "full")
plt.plot(testcorr)
print(np.argmax(testcorr) - 400)

maxcorr = np.argmax(testcorr) - 400

#plt.plot(resampled)
#plt.plot(integrator.get_raw_data("z")[0:400])

plt.plot(np.arange(mysample.shape[0]) * interval - 3.1 / 60, mysample)

#plt.figure()
示例#33
0
          counter=counter+1
          
          

yClass=[]
for i in range(0,len(y)):
    temp=[0] * 11
    temp[y[i]]=1
    yClass.append(temp)

Xnew=[]
counter=0
for i in  X:
    
    f = signal.resample(i, 8000)
    counter=1+counter
    Xnew.append(f)
Xtest= np.array(Xnew)

ytest= np.array(yClass)


b = Xtest[:, :, newaxis]
Xtest=b

model = tf.keras.models.load_model('model1.h5') #Model name here.
pred= model.predict(Xtest,verbose=1)


#########
示例#34
0
def GetAPTs(filename,
            ti1=0,
            ti2=1,
            pval=100,
            pad=False,
            interpol=False,
            norm=False):

    print('Generating Pulse Train From File:')
    print(filename)
    print('\n')

    ## Spectral arrays.
    Specx = np.zeros(shape=2048, dtype=complex)
    Specy = np.zeros(shape=2048, dtype=complex)
    ndt = 2048

    # Unpack the datas.
    Horder, xp, ixp, yp, iyp = np.genfromtxt(filename, unpack=True)

    # Get x and y spectral components.
    specx = xp + 1j * ixp
    specy = yp + 1j * iyp

    # Shift the spectra.
    Specx[:1024] = specx
    Specy[:1024] = specy

    for idx, energy in enumerate(Horder):
        if energy < 12.0:
            Specx[idx] = 0
            Specy[idx] = 0
        if energy > 45:
            Specx[idx] = 0
            Specy[idx] = 0

    if pad == True:

        # Try zero padding hard core before ifft.
        Specx = np.pad(Specx, (pval, pval), 'constant', constant_values=(0, 0))
        Specy = np.pad(Specy, (pval, pval), 'constant', constant_values=(0, 0))

        # Get the padded fields.
        Ex = np.fft.ifft(Specx)
        Ey = np.fft.ifft(Specy)

    else:
        # Get the fields.
        Ex = np.fft.ifft(Specx)
        Ey = np.fft.ifft(Specy)

    # Shift the fields to center them at t=0.
    fl = len(Ex) / 2
    Ex = np.hstack((Ex[-fl:], Ex[:-fl]))
    Ey = np.hstack((Ey[-fl:], Ey[:-fl]))

    # Now, get the real part of the fields.
    Ex = np.real(Ex)
    Ey = np.real(Ey)

    if interpol == True:
        # Try interpolating.
        Ex = signal.resample(Ex, len(Ex) * 3)
        Ey = signal.resample(Ey, len(Ey) * 3)

    # Create the time array.
    dw = Horder[2] - Horder[1]  # Frequency step size.
    wmax = (Horder[-1] + dw) * omega  # Max frequency.
    t = np.linspace(0, len(Ex), len(Ex)) * np.pi / wmax  # Time array.
    dt = t[2] - t[1]
    tmax = t[-1] + dt

    tplot = (t - tmax / 2) / t_period

    if norm == True:
        Ex = Ex / np.max(np.abs(Ex))
        Ey = Ey / np.max(np.abs(Ey))

    # for idx, stuff in enumerate(Ex):
    #     print(idx, tplot[idx], np.sqrt(stuff**2 + Ey[idx]**2))

    # Truncate arrays to a single cycle of the IR field.
    tind = ((tplot >= ti1) & (tplot <= ti2))

    tplot = tplot[tind]
    Ex = Ex[tind]
    Ey = Ey[tind]

    # # Try padding the arrays for asthetics.
    # tpre = np.linspace(tplot[0], tplot[0]-0.25, 50)
    # tpost = np.linspace(tplot[-1], tplot[-1]+0.25, 50)
    # tplot = np.concatenate((tpre, tplot, tpost))
    # Ex = np.pad(Ex, (50, 50), 'constant', constant_values=(Ex[0], Ex[-1]))
    # Ey = np.pad(Ey, (50, 50), 'constant', constant_values=(Ey[0], Ey[-1]))

    return tplot, Ex, Ey
def convert(files, ids, genders, ecg_channel_name, dataset_name, i_patient):
    """
    Extracts segments from the ECG recordings and saves them as .npy files
    The segments of every patient will be stored in a separate folder.

    args:
        files (list of str): list of paths to the .edf recordings
        ids (pd.Dataframe): internal patient ids
        genders (pd.Dataframe): gender of each patient
        ecg_channel_name (str) name of the channel with ECG-data in the .edf files
        dataset_name (str): name of dataset ('shhs' or 'mesa')
        i_patient (int): counter
    """
    for file in files:
        i_patient += 1

        # Load index of file in meta-data
        if dataset_name == 'shhs':
            meta_idx = int(
                np.argwhere(ids.values == int(file.split('-')[1][:-4])))
        elif dataset_name == 'mesa':
            meta_idx = int(
                np.argwhere(ids.values == int(file.split('-')[-1][:-4])))
        else:
            raise Exception('Invalid dataset name. Select "shhs" or "mesa".')

        # Get gender of patient from meta-data
        gender = GENDER_DICT[genders.values[meta_idx]]
        patient_id = str(ids.values[meta_idx]).zfill(6)

        # Create new folder for the patient
        patient_folder = os.path.join(
            target_folder, '{}-{}-{}'.format(dataset_name, patient_id, gender))
        os.makedirs(patient_folder, exist_ok=True)

        # Load .edf file
        with pyedflib.EdfReader(file) as f:
            n = f.signals_in_file
            signal_labels = f.getSignalLabels()

            # Select ECG channel
            for i_channel in range(n):
                if signal_labels[i_channel] == ecg_channel_name:
                    channel = i_channel

            # Load ECG channel
            signal = f.readSignal(channel)

            # Get sample frequency
            fs = f.getSampleFrequency(channel)

            # Set start 60 Minutes in the recording to avoid noise in the beginning
            start = 60 * 60 * fs
            # use only inner 5 hours where signal is cleaner
            stop = min(
                len(signal) - seg_len * fs - 60 * 60 * fs, 6 * 60 * 60 * fs)
            if stop < start:
                print("WARNING: stop < start for file {}".format(file))
                continue

            seg_indices = np.linspace(start,
                                      stop,
                                      num=config['num_segs_per_patient'],
                                      dtype=np.int)

            for i_segment, ind in enumerate(seg_indices):
                local_idx = str(i_segment).zfill(2)
                target_path = os.path.join(patient_folder,
                                           '{}.npy'.format(local_idx))

                segment = signal[ind:ind + seg_len * fs]

                if plot_output:
                    plt.title("{}/{}, time: {:.2f}h/{:.2f}h".format(
                        i_patient, n_files_total, ind / (fs * 3600),
                        len(signal) / (fs * 3600)))
                    plt.plot(segment)
                    plt.show()

                # Resample
                if fs != config['new_fs']:
                    if abs(fs - config['new_fs']) > 20:
                        print(
                            "Warning, this is a big resampling, fs: {}, source: {}, target: {}"
                            .format(fs, file, target_path))
                    num = int(seg_len * new_fs)
                    segment = resample(segment, num=num)

                # Save
                if save:
                    np.save(file=target_path, arr=segment)

            # Logging
            if i_patient % config['log_interval'] == 0:
                time_left = (((time.time() - t_start) / i_patient) *
                             (n_files_total - i_patient)) / 3600
                print("\nProcessed file {} of {}".format(
                    i_patient, n_files_total))
                print(target_path, fs, len(segment),
                      "{}/{}".format(i_patient, n_files_total),
                      "\nTime elapsed {:.2f}s".format(time.time() - t_start),
                      "Estimated time left: {:.2f} hours".format(time_left))

    return i_patient
示例#36
0
def preprocess(data, factor, low_perc=5, up_perc=80):
    '''
    Preprocess gcamp data based on https://github.com/lucastheis/c2s/blob/master/c2s/c2s.py#L166
    Main differences:
        - Simple linear regression to remove trends
        - Allows standardization to differ in lower and upper percentiles
        - Additional tools for managing camera and spike times

    :param data: dict containing
    'calcium' input gcamp (required) [n]
    'fps' original fps (required) [1]
    'cam_times' original camera times (optional) [n]
    'spike_times' spike times in seconds (optional) [m]
    :param factor: factor to upsample data. will produce upsampled data of size [factor * n]
    :param low_perc: lower percentile to normalize dff
    :param up_perc: upper percentile to normalize dff
    :return: dict with upsampled data and binned spikes (if spike_times provided)
    '''

    if 'cam_times' in data:
        assert data['calcium'].shape[0] == data['cam_times'].shape[
            0], "gcamp and camera times must be same length"

    data = deepcopy(data)

    x = np.arange(data['calcium'].shape[0])

    regr = linear_model.LinearRegression()
    regr.fit(x.reshape(-1, 1), data['calcium'])

    a = regr.coef_
    b = regr.intercept_

    data['calcium'] = data['calcium'] - (a * x + b)

    calcium_low_perc = np.percentile(data['calcium'], low_perc)
    calcium_high_perc = np.percentile(data['calcium'], up_perc)

    if calcium_high_perc - calcium_low_perc > 0.:
        data['calcium'] = (data['calcium'] - calcium_low_perc
                           ) / float(calcium_high_perc - calcium_low_perc)

    # normalize sampling rate
    if factor > 1:
        # number of samples after update of sampling rate
        num_samples = data['calcium'].shape[0] * factor
        # resample calcium signal
        data['calcium'] = resample(data['calcium'].ravel(), num_samples)
        data['fps'] = data['fps'] * factor
        if 'cam_times' in data:
            data['cam_times'] = time_adjustment(data['cam_times'], factor,
                                                data['fps'])
        else:
            data['cam_times'] = np.arange(
                0, data['calcium'].shape[0] / data['fps'], (1.0 / data['fps']))
            data['cam_times'] = data['cam_times'][np.arange(
                data['calcium'].shape[0])]
        if 'spike_times' in data:
            data['spikes_per_bin'] = bin_spikes(data['cam_times'],
                                                data['spike_times'])
    return data
示例#37
0
 def samples_at(self, rate):
     new_samples = np.frombuffer(self.bytes_chunks, dtype=np.float32)
     return signal.resample(new_samples, rate * self.duration)
示例#38
0
def my_resample(x, origin_fs, target_fs):
    from scipy.signal import resample
    origin_l = len(x)
    target_l = np.int32((1. * origin_l / origin_fs * target_fs))
    xx = resample(x, target_l)
    return xx
示例#39
0
文件: waves.py 项目: rcrowder/thorns
def resample(signal, fs, new_fs):
    """Resample `signal` from `fs` to `new_fs`."""
    new_signal = dsp.resample(signal, int(len(signal) * new_fs / fs))

    return new_signal
示例#40
0
 def __resize_matrix_resample(self, groupsrc, groupdst, M):
     from scipy.signal import resample
     x_resampled = resample(M, len(groupsrc), window='blk')
     xy_resampled = resample(
         x_resampled.transpose(), len(groupdst), window='blk').transpose()
     return np.minimum(np.maximum(xy_resampled, 0), 1)
示例#41
0
def main():
    # Args
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_name",
                        default='models/XXL_lr000001_bs32.pth',
                        help="Classifier model path")
    parser.add_argument(
        "--classifier",
        default='XXL',
        help="Choose classifier architecture, C, S, XS, XL, XXL, XXXL")
    args = parser.parse_args()

    # Select training device
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # Load specified Classifier
    if args.classifier == 'XS':
        net = Classifier_XS()
    elif args.classifier == 'S':
        net = Classifier_S()
    elif args.classifier == 'XL':
        net = Classifier_XL()
    elif args.classifier == 'XXL':
        net = Classifier_XXL()
    elif args.classifier == 'XXXL':
        net = Classifier_XXXL()
    else:
        net = Classifier()
    net.to(device)

    # Load parameters from trained model
    net.load_state_dict(
        torch.load('../../STEAD_ANN/models/' + args.model_name + '.pth'))
    net.eval()

    # Load Belgica data
    f = scipy.io.loadmat(
        "../../Data_Belgica/mat_2018_08_19_00h28m05s_Parkwind_HDAS_2Dmap_StrainData_2D.mat"
    )

    # Read data
    data = f['Data_2D']

    fs = 10
    total = 0
    seis, ns = 0, 0

    # For every trace in the file
    for trace in data:
        # Resample
        resamp_trace = signal.resample(trace, 6000)

        # Normalize
        resamp_trace = resamp_trace / np.max(np.abs(resamp_trace))

        # Numpy to Torch
        resamp_trace = torch.from_numpy(resamp_trace).to(device).unsqueeze(0)

        # Prediction
        outputs = net(resamp_trace.float())
        predicted = torch.round(outputs.data).item()

        # Count traces
        total += 1

        if predicted:
            seis += 1

        else:
            ns += 1

    # Average 5km of measurements
    avg_data = np.mean(data[3500:4001, :], 0)

    # Filter average data
    avg_data_filtered1 = butter_bandpass_filter(avg_data, 0.5, 1, fs, order=5)
    avg_data_filtered2 = butter_bandpass_filter(avg_data,
                                                0.2,
                                                0.6,
                                                fs,
                                                order=5)
    avg_data_filtered3 = butter_bandpass_filter(avg_data,
                                                0.1,
                                                0.3,
                                                fs,
                                                order=5)

    # Resample
    avg_data = signal.resample(avg_data, 6000)
    avg_data_filtered1 = signal.resample(avg_data_filtered1, 6000)
    avg_data_filtered2 = signal.resample(avg_data_filtered2, 6000)
    avg_data_filtered3 = signal.resample(avg_data_filtered3, 6000)

    # Normalize
    avg_data = avg_data / np.max(np.abs(avg_data))
    avg_data_filtered1 = avg_data_filtered1 / np.max(
        np.abs(avg_data_filtered1))
    avg_data_filtered2 = avg_data_filtered2 / np.max(
        np.abs(avg_data_filtered2))
    avg_data_filtered3 = avg_data_filtered3 / np.max(
        np.abs(avg_data_filtered3))

    # Numpy to Torch
    avg_data = torch.from_numpy(avg_data).to(device).unsqueeze(0)
    avg_data_filtered1 = torch.from_numpy(avg_data_filtered1).to(
        device).unsqueeze(0)
    avg_data_filtered2 = torch.from_numpy(avg_data_filtered2).to(
        device).unsqueeze(0)
    avg_data_filtered3 = torch.from_numpy(avg_data_filtered3).to(
        device).unsqueeze(0)

    # Prediction
    output = net(avg_data.float())
    output_filtered1 = net(avg_data_filtered1.float())
    output_filtered2 = net(avg_data_filtered2.float())
    output_filtered3 = net(avg_data_filtered3.float())

    predicted = torch.round(output.data).item()
    predicted_filtered1 = torch.round(output_filtered1.data).item()
    predicted_filtered2 = torch.round(output_filtered2.data).item()
    predicted_filtered3 = torch.round(output_filtered3.data).item()

    # Results
    print(f'Inferencia Belgica:\n\n'
          f'Total traces: {total}\n'
          f'Total predicted seismic traces: {seis}\n'
          f'Total predicted noise traces: {ns}\n')

    print(f'Average predicted: {predicted}\n'
          f'Average filtered predicted 1: {predicted_filtered1}\n'
          f'Average filtered predicted 2: {predicted_filtered2}\n'
          f'Average filtered predicted 3: {predicted_filtered3}\n')
示例#42
0
data = pd.concat([train, test], sort=False)

sub = pd.read_csv('data/提交结果示例.csv')
y = train.groupby('fragment_id')['behavior_id'].min()

train['mod'] = (train.acc_x**2 + train.acc_y**2 + train.acc_z**2)**.5
train['modg'] = (train.acc_xg**2 + train.acc_yg**2 + train.acc_zg**2)**.5
test['mod'] = (test.acc_x**2 + test.acc_y**2 + test.acc_z**2)**.5
test['modg'] = (test.acc_xg**2 + test.acc_yg**2 + test.acc_zg**2)**.5

x = np.zeros((7292, 60, 8, 1))
t = np.zeros((7500, 60, 8, 1))
for i in tqdm(range(7292)):
    tmp = train[train.fragment_id == i][:60]
    x[i, :, :, 0] = resample(
        tmp.drop(['fragment_id', 'time_point', 'behavior_id'], axis=1), 60,
        np.array(tmp.time_point))[0]
for i in tqdm(range(7500)):
    tmp = test[test.fragment_id == i][:60]
    t[i, :, :, 0] = resample(tmp.drop(['fragment_id', 'time_point'], axis=1),
                             60, np.array(tmp.time_point))[0]

kfold = StratifiedKFold(5, shuffle=True)


def Net():
    emb_size = 6
    input = Input(shape=(60, 8, 1))

    input_1_1 = Lambda(lambda x: x[:, :, 1:, :])(input)
    input_1_2 = Lambda(lambda x: x[:, :, 0, :])(input)
示例#43
0
 def _resample_fs(cls, data, new_fs, old_fs):
     fs_ratio = new_fs / old_fs
     new_length = int(np.round(len(data) * fs_ratio))
     return signal.resample(data, new_length)
示例#44
0
def resample(data, fs, new_fs=44150):
    resampled = signal.resample(data, int(len(data)*new_fs/fs))
    return resampled
def proc_file(file):

    fobj = bu.hsDat(file, load=True)

    vperp = fobj.dat[:, 0]
    elec3 = fobj.dat[:, 1]

    try:
        phi_dg = fobj.attribs['phi_dg']
    except:
        phi_dg = 0.0

    inds = np.abs(full_freqs - fspin) < 200.0

    cut = int(0.1 * fsamp)
    zeros = np.zeros_like(elec3[:cut])
    voltages = [
        zeros, zeros, zeros, elec3[:cut], -1.0 * elec3[:cut], zeros, zeros,
        zeros
    ]
    efield = bu.trap_efield(voltages, only_x=True)[0]
    drive_amp, drive_phase = bu.get_sine_amp_phase(efield)

    elec3_fft = np.fft.rfft(elec3)
    true_fspin = np.average(full_freqs[inds], weights=np.abs(elec3_fft)[inds])


    carrier_amp, carrier_phase_mod = \
            bu.demod(vperp, true_fspin, fsamp, plot=plot_carrier_demod, \
                     filt=True, bandwidth=bandwidth,
                     notch_freqs=notch_freqs, notch_qs=notch_qs, \
                     tukey=True, tukey_alpha=5.0e-4, \
                     detrend=True, detrend_order=1, harmind=2.0)

    # b1, a1 = signal.butter(3, np.array(libration_filt_band)*2.0/fsamp, btype='bandpass')
    sos = signal.butter(3,
                        libration_filt_band,
                        btype='bandpass',
                        fs=fsamp,
                        output='sos')
    # carrier_phase_mod_filt = signal.filtfilt(b1, a1, carrier_phase_mod)
    carrier_phase_mod_filt = signal.sosfiltfilt(sos, carrier_phase_mod)

    if len(libration_filt_band):
        libration_inds = (full_freqs > libration_filt_band[0]) \
                                * (full_freqs < libration_filt_band[1])
    else:
        libration_inds = np.abs(full_freqs -
                                libration_guess) < 0.5 * libration_bandwidth

    phase_mod_fft = np.fft.rfft(carrier_phase_mod) * fac

    lib_fit_x = full_freqs[libration_inds]
    lib_fit_y = np.abs(phase_mod_fft[libration_inds])

    try:
        try:
            peaks = bu.find_fft_peaks(lib_fit_x,
                                      lib_fit_y,
                                      delta_fac=5.0,
                                      window=50)
            ind = np.argmax(peaks[:, 1])
        except:
            peaks = bu.find_fft_peaks(lib_fit_x,
                                      lib_fit_y,
                                      delta_fac=3.0,
                                      window=100)
            ind = np.argmax(peaks[:, 1])

        true_libration_freq = peaks[ind, 0]

    except:
        true_libration_freq = lib_fit_x[np.argmax(lib_fit_y)]

    libration_amp, libration_phase = \
            bu.demod(carrier_phase_mod, true_libration_freq, fsamp, \
                     plot=plot_libration_demod, filt=True, \
                     filt_band=libration_filt_band, \
                     bandwidth=libration_bandwidth, \
                     tukey=False, tukey_alpha=5.0e-4, \
                     detrend=False, detrend_order=1.0, harmind=1.0)

    libration_ds, time_vec_ds = \
            signal.resample(carrier_phase_mod_filt, t=time_vec, num=out_nsamp)
    libration_amp_ds, time_vec_ds = \
            signal.resample(libration_amp, t=time_vec, num=out_nsamp)

    libration_ds = libration_ds[out_cut:int(-1 * out_cut)]
    libration_amp_ds = libration_amp_ds[out_cut:int(-1 * out_cut)]
    time_vec_ds = time_vec_ds[out_cut:int(-1 * out_cut)]

    if plot_downsample:
        plt.plot(time_vec,
                 carrier_phase_mod_filt,
                 color='C0',
                 label='Original')
        plt.plot(time_vec_ds,
                 libration_ds,
                 color='C0',
                 ls='--',
                 label='Downsampled')
        plt.plot(time_vec, libration_amp, color='C1')  #, label='Original')
        plt.plot(time_vec_ds, libration_amp_ds, color='C1',
                 ls='--')  #, label='Downsampled')
        plt.legend()
        plt.show()

        input()

    return (time_vec_ds, libration_ds, libration_amp_ds, \
                true_libration_freq, phi_dg, drive_amp)
示例#46
0
def main():
    # Args
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_name", default='Default_model', help="Classifier model path")
    parser.add_argument("--classifier", default='C', help="Choose classifier architecture, C, CBN")
    args = parser.parse_args()

    # Select training device
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # Load specified Classifier
    if args.classifier == 'CBN':
        net = ClassConvBN()
    elif args.classifier == 'C':
        net = ClassConv()
    else:
        net = ClassConv()
        print('Bad Classifier option, running classifier C')
    net.to(device)

    # Load parameters from trained model
    net.load_state_dict(torch.load('../../STEAD_CNN/models/' + args.model_name + '.pth'))
    net.eval()

    # Load Francia data file 1
    f = scipy.io.loadmat("../../Data_Francia/Earthquake_1p9_Var_BP_2p5_15Hz.mat")

    # Read data
    data = f["StrainFilt"]
    # time= f["Time"]
    # distance = f["Distance_fiber"]

    # Sampling frequency
    fs = 100
    total = 0
    tr_seismic, tr_noise = 0, 0
    fil_seismic, fil_noise = 0, 0

    # For every trace in the file
    for trace in data:
        if np.max(np.abs(trace)):
            # Filter
            fil_trace = butter_bandpass_filter(trace, 0.5, 1, fs, order=5)

            # Resample
            resamp_trace = signal.resample(trace, 6000)
            resamp_fil_trace = signal.resample(fil_trace, 6000)

            # Normalize
            resamp_trace = resamp_trace / np.max(np.abs(resamp_trace))
            resamp_fil_trace = resamp_fil_trace / np.max(np.abs(resamp_fil_trace))

            # Numpy to Torch
            resamp_trace = torch.from_numpy(resamp_trace).to(device).unsqueeze(0)
            resamp_fil_trace = torch.from_numpy(resamp_fil_trace).to(device).unsqueeze(0)

            # Prediction
            out_trace = net(resamp_trace.float())
            out_fil_trace = net(resamp_fil_trace.float())

            pred_trace = torch.round(out_trace.data).item()
            pred_fil_trace = torch.round(out_fil_trace.data).item()

            # Count traces
            total += 1

            if pred_trace:
                tr_seismic += 1
            else:
                tr_noise += 1

            if pred_fil_trace:
                fil_seismic += 1
            else:
                fil_noise += 1

    # Results
    print(f'Inferencia Francia:\n\n'
          f'Total traces: {total}\n'
          f'Predicted seismic: {tr_seismic}, predicted noise: {tr_noise}\n'
          f'Predicted fil_seismic: {fil_seismic}, predicted fil_noise: {fil_noise}\n')
示例#47
0
def feature_extractor(pair_of_attrs, pair_of_attr_names):
    attr1 = pair_of_attrs[0]
    # attr2 = pair_of_attrs[1]
    attr1_name = pair_of_attr_names[0]
    # attr2_name = pair_of_attr_names[1]
    attr1_df = pd.DataFrame({attr1_name: attr1})
    # attr2_df = pd.DataFrame({attr2_name: attr2})

    content_ratio1 = content_ratio(attr1)
    # content_ratio2 = content_ratio(attr2)

    # print(content_ratio1)
    # print(content_ratio2)

    # all attrs in table1
    tbl1_attrs = attr1_df.keys()
    # all attrs in table2
    # tbl2_attrs = attr2_df.keys()

    distributions1 = {}
    # distributions2 = {}
    for attr in tbl1_attrs:
        content_histogram_tbl1 = content_histogram(attr1_df, attr)
        distributions1[attr] = content_histogram_tbl1

    # for attr in tbl2_attrs:
    #     content_histogram_tbl2 = content_histogram(attr2_df, attr)
    #     distributions2[attr] = content_histogram_tbl2

    # table1
    attr1_histog = []
    for distr in distributions1:
        # print(distr)
        if distr == attr1_name:
            # distributions1[distr].sort_values(distr)
            for index, row in distributions1[distr].iterrows():
                # print(row)
                # for key in row.keys():
                attr1_histog.append(row[distr])

    # # table2
    # attr2_histog = []
    # for distr in distributions2:
    #     # print(distr)
    #     if distr == attr2_name:
    #         # distributions2[distr].sort_values(distr)
    #         for index, row in distributions2[distr].iterrows():
    #             attr2_histog.append(row[distr])

    attr1_histog.sort()
    # attr2_histog.sort()

    # print(attr1_histog)
    # print(attr2_histog)

    # resample using fft
    resample_dimension = 20
    attr1_histog_re = signal.resample(attr1_histog, resample_dimension)
    # attr2_histog_re = signal.resample(attr2_histog, resample_dimension)

    # print(attr1_histog_re)
    # # print(attr2_histog_re)

    # visualize_histograms_pair(attr1_name, attr1_histog_re, attr2_name, attr2_histog_re, resample_dimension)

    # print(_get_col_dtype(attr1_df[attr1_name]))
    # print(_get_col_dtype(attr2_df[attr2_name]))

    average_cell_len1 = average_cell_len(attr1)
    # average_cell_len2 = average_cell_len(attr2)

    percentage_of_num1, percentage_of_alphabetic1 = percentage_of_num_alphabetic(
        attr1)
    # percentage_of_num2, percentage_of_alphabetic2 = percentage_of_num_alphabetic(attr2)

    attr1_features = [content_ratio1]
    attr1_features.extend(attr1_histog_re)
    attr1_features.append(average_cell_len1)
    attr1_features.append(percentage_of_num1)
    attr1_features.append(percentage_of_alphabetic1)

    # attr2_features = [content_ratio2]
    # attr2_features.extend(attr2_histog_re)
    # attr2_features.append(average_cell_len2)
    # attr2_features.append(percentage_of_num2)
    # attr2_features.append(percentage_of_alphabetic2)

    attr1_features_dict = ['attr_histogram'] * resample_dimension
    attr1_features_dict = [
        'content_ratio', *attr1_features_dict, 'average_cell_len',
        'percentage_of_num', 'percentage_of_alphabetic'
    ]

    return attr1_features, attr1_features_dict
示例#48
0
import numpy as np
import matplotlib.pyplot as plot
import scipy.signal as sg
import wave
from scipy.io.wavfile import read

rate, signal = read("../Xe.wav")
maxIndex = len(signal)
plot.figure()
for frames in range(0, 20):

    signalArray = []
    signalTQ = []
    for k in range(6000 + frames * 256, 6300 + frames * 256):
        signalArray.append(signal[k])
    signalArray = sg.resample(signalArray, 300)
    for k in range(0, 149):  # day la K 0..150
        sig = 0
        for y in range(0, 299 - k):
            sig = sig + signalArray[y] * signalArray[y + k]  # day la R(k)
        signalTQ.append(sig)

    plot.figure(1)
    plot.clf()
    # plot.subplot(111)
    plot.plot(signalTQ, color="purple")
    plot.grid(True)

    plot.figure(2)
    plot.clf()
    plot.subplot(311)
示例#49
0
    def get_data(self):  # Function to get force data from txt file
        filename = input(
            "Enter Force File Name with full Path if not in Working Directory (Ex: User/File.txt): "
        )
        print(" ")
        file1 = open(filename)  # set file name
        f1 = file1.readlines()  # read file
        DataBegin = 19  # Begin at line 19 to get force data
        for i in range(DataBegin, len(f1)):
            cnt = 0
            for x in range(len(f1[i])):
                if (f1[i][x] == '\t'):
                    if (cnt == 0):
                        self.time_abs.append(float(
                            f1[i][0:x]))  # Add to time list
                        temp = x + 1
                    if (cnt == 1):
                        self.forceX.append(float(
                            f1[i][temp:x]))  # Add to Attila X list
                        temp = x + 1
                    if (cnt == 2):
                        self.forceY.append(float(
                            f1[i][temp:x]))  # Add to Attila Y list
                        temp = x + 1
                    if (cnt == 3):
                        self.forceZ.append(float(
                            f1[i][temp:x]))  # Add to Attila Z list
                        temp = x + 1
                    if (cnt == 4):
                        temp = x + 1
                    if (cnt == 5):
                        temp = x + 1
                    if (cnt == 6):
                        if (f1[17][29] == 'x'):
                            self.forceX2.append(float(
                                f1[i][temp:x]))  # Add to Ryan X list
                        temp = x + 1
                    if (cnt == 7):
                        if (f1[17][34] == 'x'):
                            self.forceX2.append(float(
                                f1[i][temp:x]))  # Add to Ryan X list
                        if (f1[17][32] == 'y'):
                            self.forceY2.append(float(
                                f1[i][temp:x]))  # Add to Ryan Y list
                        temp = x + 1
                    if (cnt == 8):
                        if (f1[17][37] == 'y'):
                            self.forceY2.append(float(
                                f1[i][temp:x]))  # Add to Ryan Y list
                        if (f1[17][35] == 'z'):
                            self.forceZ2.append(float(
                                f1[i][temp:x]))  # Add to Ryan Z list
                        temp = x + 1
                    if (cnt == 9):
                        if (f1[17][40] == 'z'):
                            self.forceZ2.append(float(
                                f1[i][temp:x]))  # Add to Ryan Z list
                    cnt = cnt + 1

        #Get sampling rates to downsample data
        video_SR = input("Input Video Sampling Rate (Hz): ")
        data_SR = input("Input Data Sampling Rate (Hz): ")
        print(" ")
        #Set dwonsample trigger
        downsample = int(data_SR) / int(video_SR)
        up_samp = 0
        if downsample - int(downsample) != 0:
            up_samp = 1

        #Declare lists for graphing data
        self.graph = [0] * (self.total_frames)
        self.graph = range(len(self.graph))
        self.graphMax = [0] * (self.total_frames)
        self.graphMin = [0] * (self.total_frames)
        self.graphX = [0] * (self.total_frames)
        self.graphY = [0] * (self.total_frames)
        self.graphZ = [0] * (self.total_frames)
        self.graphX2 = [0] * (self.total_frames)
        self.graphY2 = [0] * (self.total_frames)
        self.graphZ2 = [0] * (self.total_frames)

        #Downsample data to video rate
        if up_samp == 0:
            cnt = 0
            for i in range(len(self.time_abs)):
                if cnt == downsample:
                    self.time_DS.append(self.time_abs[i])
                    self.forceX_DS.append(self.forceX[i])
                    self.forceY_DS.append(self.forceY[i])
                    self.forceZ_DS.append(self.forceZ[i])
                    self.forceX2_DS.append(self.forceX2[i])
                    self.forceY2_DS.append(self.forceY2[i])
                    self.forceZ2_DS.append(self.forceZ2[i])
                    cnt = 0
                cnt += 1

        #if resampling is needed
        if up_samp == 1:
            #find least common multiples and factors
            lcm = np.lcm(int(data_SR), int(video_SR))
            data_factor = int(lcm / int(data_SR))
            video_factor = int(lcm / int(video_SR))

            #resample data to a factor of data_factor
            f_time = np.linspace(0, max(self.time_abs),
                                 int(data_factor * len(self.time_abs)))
            f_X = signal.resample(self.forceX, data_factor * len(self.forceX))
            f_Y = signal.resample(self.forceY, data_factor * len(self.forceY))
            f_Z = signal.resample(self.forceZ, data_factor * len(self.forceZ))
            f_X2 = signal.resample(self.forceX2,
                                   data_factor * len(self.forceX2))
            f_Y2 = signal.resample(self.forceY2,
                                   data_factor * len(self.forceY2))
            f_Z2 = signal.resample(self.forceZ2,
                                   data_factor * len(self.forceZ2))

            #downsample data to video_factor
            cnt = 0
            for i in range(len(f_time)):
                if cnt == video_factor:
                    self.time_DS.append(f_time[i])
                    self.forceX_DS.append(f_X[i])
                    self.forceY_DS.append(f_Y[i])
                    self.forceZ_DS.append(f_Z[i])
                    self.forceX2_DS.append(f_X2[i])
                    self.forceY2_DS.append(f_Y2[i])
                    self.forceZ2_DS.append(f_Z2[i])
                    cnt = 0
                cnt += 1

        #create data lists to grah
        if self.contact_plate == "Attila":
            self.list_creator(self.forceZ_DS, self.graphZ, self.frame_number1)
        else:
            self.list_creator(self.forceZ2_DS, self.graphZ2,
                              self.frame_number1)

        count = 0
        for x in range(self.frame_number1, self.total_frames - 1):
            if count == len(self.time_DS):
                break
            self.graphX[self.frame_number1 +
                        count] = self.forceX_DS[self.frame_test + count]
            self.graphY[self.frame_number1 +
                        count] = self.forceY_DS[self.frame_test + count]
            self.graphX2[self.frame_number1 +
                         count] = self.forceX2_DS[self.frame_test + count]
            self.graphY2[self.frame_number1 +
                         count] = self.forceY2_DS[self.frame_test + count]
            if self.contact_plate == "Attila":
                self.graphZ2[self.frame_number1 +
                             count] = self.forceZ2_DS[self.frame_test + count]
            if self.contact_plate == "Ryan":
                self.graphZ[self.frame_number1 +
                            count] = self.forceZ_DS[self.frame_test + count]
            count += 1

        #Reverse Y data if inversed
        rev_Y = input("Reverse Y axis? (Y/N): ")
        if rev_Y == "Y":
            for x in range(len(self.graphY2)):
                self.graphY[x] = -1 * self.graphY[x]
                self.graphY2[x] = -1 * self.graphY2[x]

        #Reverse X data if inversed
        rev_X = input("Reverse X axis? (Y/N): ")
        print(" ")
        if rev_X == "Y":
            for x in range(len(self.graphX2)):
                self.graphX[x] = -1 * self.graphX[x]
                self.graphX2[x] = -1 * self.graphX2[x]

        #If trimming video, set data to trim points
        if self.trim == "Y":
            length = len(self.graph[self.trim_1:self.trim_2])
            self.graph = self.time_DS[0:length]
            self.graphX = self.graphX[self.trim_1:self.trim_2]
            self.graphY = self.graphY[self.trim_1:self.trim_2]
            self.graphZ = self.graphZ[self.trim_1:self.trim_2]
            self.graphX2 = self.graphX2[self.trim_1:self.trim_2]
            self.graphY2 = self.graphY2[self.trim_1:self.trim_2]
            self.graphZ2 = self.graphZ2[self.trim_1:self.trim_2]
示例#50
0
 def resample(cls, frames, org_fs, dst_fs):
     return signal.resample(frames, int((frames.size * dst_fs) / org_fs))
示例#51
0
def resample_signal(msignal, n_sample):
    return resample(msignal, n_sample)
 def apply(self, data):
     axis = data.ndim - 1
     if data.shape[-1] > self.f:
         return resample(data, self.f, axis=axis)
     return data
        globals()[VarName] = np.load(FilePath)

x_syn_peak, y_syn_peak = augment_train_set(xraw_test_peak[0:49, :],
                                           y_test_peak[0:49, :], 100)
x_syn_per, y_syn_per = augment_train_set(xraw_test_per[0:49, :],
                                         y_test_per[0:49, :], 100)
x_syn_osc, y_syn_osc = augment_train_set(xraw_test_osc[0:49, :],
                                         y_test_osc[0:49, :], 100)

FolderPath_output = os.path.abspath(os.path.join(os.getcwd(),
                                                 "./output_data/"))
np.save(FolderPath_output + "/x_syn_peak.npy", x_syn_peak)
np.save(FolderPath_output + "/x_syn_per.npy", x_syn_per)
np.save(FolderPath_output + "/x_syn_osc.npy", x_syn_osc)

x_syn_peak = signal.resample(x_syn_peak, 10000, axis=1)
x_syn_peak = preprocessing.scale(x_syn_peak)
n_syn_norm = len(x_syn_peak)

x_syn_per = signal.resample(x_syn_per, 10000, axis=1)
x_syn_per = preprocessing.scale(x_syn_per)

x_syn_osc = signal.resample(x_syn_osc, 10000, axis=1)
x_syn_osc = preprocessing.scale(x_syn_osc)
#=============================================================================#
########### produce data: reality-augmented simulation faulty data ############
#=============================================================================#
n_syn = 2000  # synthetic time series to be produced
Syn_norm = ['peak', 'per']
FolderPath1 = ['v110', 'v90', 'v70', 'v50', 'v20']
FolderPath2 = ['normal', 'WF']
from numpy.random import normal
from numpy import log10, finfo, arange
import matplotlib.pyplot as plt
from scipy.fftpack import fft
from scipy.signal.windows import get_window
from scipy.signal import decimate, resample
eps = finfo("float").eps

def normalise(x):
	return (x-min(x))/(max(x)-min(x))

sr = 44100
mean = 0
std = 1 
N = 2**14
noise = normal(mean, std, size=N)
noisedec = resample(decimate(noise, 2), N)
window = get_window('blackmanharris', N, fftbins=False)

noisefft = abs(fft(noise * window))[:int(N/2)]
noisefftdec = abs(fft(noisedec * window))[:int(N/2)]
noisefft = 20*log10(normalise(noisefft) + eps)
noisefftdec = 20*log10(normalise(noisefftdec) + eps)

f = arange(int(N/2))/N*sr

fig, ax = plt.subplots(2, 1, figsize=(16,9))
ax[0].plot(f,noisefft)
ax[1].plot(f,noisefftdec)
plt.show()
def interpolate_peaks(data,
                      peaks,
                      sample_rate,
                      desired_sample_rate=1000.0,
                      working_data={}):
    '''interpolate detected peak positions and surrounding data points

    Function that enables high-precision mode by taking the estimated peak position,
    then upsampling the peak position +/- 100ms to the specified sampling rate, subsequently
    estimating the peak position with higher accuracy.

    Parameters
    ----------
    data : 1d list or array
        list or array containing heart rate data

    peaks : 1d list or array
        list or array containing x-positions of peaks in signal

    sample_rate : int or float
        the sample rate of the signal (in Hz)

    desired_sampled-rate : int or float
        the sample rate to which to upsample.
        Must be sample_rate < desired_sample_rate

    Returns
    -------
    working_data : dict
        working_data dictionary object containing all of heartpy's temp objects

    Examples
    --------
    Given the output of a normal analysis and the first five peak-peak intervals:

    >>> import heartpy as hp
    >>> data, _ = hp.load_exampledata(0)
    >>> wd, m = hp.process(data, 100.0)
    >>> wd['peaklist'][0:5]
    [63, 165, 264, 360, 460]

    Now, the resolution is at max 10ms as that's the distance between data points.
    We can use the high precision mode for example to approximate a more precise
    position, for example if we had recorded at 1000Hz:

    >>> wd = interpolate_peaks(data = data, peaks = wd['peaklist'],
    ... sample_rate = 100.0, desired_sample_rate = 1000.0, working_data = wd)
    >>> wd['peaklist'][0:5]
    [63.5, 165.4, 263.6, 360.4, 460.2]

    As you can see the accuracy of peak positions has increased.
    Note that you cannot magically upsample nothing into something. Be reasonable.
    '''
    assert desired_sample_rate > sample_rate, "desired sample rate is lower than actual sample rate \
this would result in downsampling which will hurt accuracy."

    num_samples = int(0.1 * sample_rate)
    ratio = sample_rate / desired_sample_rate
    interpolation_slices = [(x - num_samples, x + num_samples) for x in peaks]
    peaks = []

    for i in interpolation_slices:
        slice = data[i[0]:i[1]]
        resampled = resample(
            slice, int(len(slice) * (desired_sample_rate / sample_rate)))
        peakpos = np.argmax(resampled)
        peaks.append((i[0] + (peakpos * ratio)))

    working_data['peaklist'] = peaks

    return working_data
示例#56
0
def scopeplot(x, width=800, height=400, range_=None, cmap=None, plot=None):
    """
    Plot a signal using brightness to indicate density.

    Parameters
    ----------
    x : array_like, 1-D
        The signal to be plotted
    width, height : int, optional
        The width and height of the output image in pixels.  Default is
        800×400.
    range_ : float or 2-tuple of floats, optional
        The vertical range of the plot.  If a tuple, it is (xmin, xmax).  If
        a single number, the range is (-range, range).  If None, it autoscales.
    cmap : str or matplotlib.colors.LinearSegmentedColormap, optional
        A matplotlib colormap for
        Grayscale by default.
    plot : bool or str or None, optional
        If plot is None, the X image array is returned.
        if plot is True, the image is plotted directly.
        If plot is a string, it represents a filename to save the image to
        using matplotlib's `imsave`.

    Returns
    -------
    X : ndarray of shape (width, height)
        A 2D array of amplitude 0 to 1, representing the density of the signal
        at that point.

    """

    if cmap is None:
        cmap = 'gray'

    x = asarray(x)

    N = len(x)

    # Add zeros to end to reduce circular Gibbs effects
    MIN_PAD = 5  # TODO: what should this be?  Seems subjective.

    # Make input an optimal length for fast processing
    pad_amount = next_fast_len(N + MIN_PAD) - N

    x = pad(x, (0, pad_amount), 'constant')

    # Resample such that signal evenly divides into chunks of equal length
    new_size = int(round(_ceildiv(RS * N, width) * width / N * len(x)))
    print('new size: {}'.format(new_size))

    x = resample(x, new_size)

    if not range_:
        range_ = 1.1 * np.amax(np.abs(x))

    if np.size(range_) == 1:
        xmin, xmax = -range_, +range_
    elif np.size(range_) == 2:
        xmin, xmax = range_
    else:
        raise ValueError('range_ not understood')

    spp = _ceildiv(N * RS, width)  # samples per pixel
    norm = 1 / spp

    # Pad some zeros at beginning for overlap
    x = pad(x, (spp // 2, 0), 'constant')

    X = np.empty((width, height))

    if spp % 2:  # N is odd
        chunksize = 2 * spp  # (even)
    else:  # N is even
        chunksize = 2 * spp + 1  # (odd)
    print('spp: {}, chunk size: {}'.format(spp, chunksize))

    for n in range(0, width):
        chunk = x[n * spp:n * spp + chunksize]
        assert len(chunk)  # don't send empties
        try:
            h = _ihist(chunk, bins=height, range_=(xmin, xmax))
        except ValueError:
            print('argh', len(chunk))
        else:
            X[n] = h * norm

    assert np.amax(X) <= 1.001, np.amax(X)

    X = X**(0.4)  # TODO: SUBJECTIVE

    if isinstance(plot, str):
        plt.imsave(plot, X.T, cmap=cmap, origin='lower', format='png')
    elif plot:
        fig = plt.figure()
        ax = fig.add_subplot(1, 1, 1)
        ax.imshow(X.T,
                  origin='lower',
                  aspect='auto',
                  cmap=cmap,
                  extent=(0, len(x), xmin, xmax),
                  interpolation='nearest')
    #              norm=LogNorm(vmin=0.01, vmax=300))
    else:
        return X
示例#57
0
    def stftMorph(self, x1, x2, fs, w1, N1, w2, N2, H1, smoothf, balancef):
        """
        Morph of two sounds using the STFT
        x1, x2: input sounds, fs: sampling rate
        w1, w2: analysis windows, N1, N2: FFT sizes, H1: hop size
        smoothf: smooth factor of sound 2, bigger than 0 to max of 1, where 1 is no smothing,
        balancef: balance between the 2 sounds, from 0 to 1, where 0 is sound 1 and 1 is sound 2
        returns y: output sound
        """
        if (N2 / 2 * smoothf <
                3):  # raise exception if decimation factor too small
            raise ValueError("Smooth factor too small")
        if (smoothf > 1):  # raise exception if decimation factor too big
            raise ValueError("Smooth factor above 1")
        if (balancef > 1
                or balancef < 0):  # raise exception if balancef outside 0-1
            raise ValueError("Balance factor outside range")
        if (H1 <= 0):  # raise error if hop size 0 or negative
            raise ValueError("Hop size (H1) smaller or equal to 0")

        M1 = w1.size  # size of analysis window
        hM1_1 = math.floor(
            (M1 + 1) // 2)  # half analysis window size by rounding
        hM1_2 = math.floor(M1 // 2)  # half analysis window size by floor
        L = x1.size // H1  # number of frames for x1
        x1 = np.append(
            np.zeros(hM1_2),
            x1)  # add zeros at beginning to center first window at sample 0
        x1 = np.append(
            x1, np.zeros(hM1_1))  # add zeros at the end to analyze last sample
        pin1 = hM1_1  # initialize sound pointer in middle of analysis window

        w1 = w1 / sum(w1)  # normalize analysis window
        M2 = w2.size  # size of analysis window
        hM2_1 = math.floor(
            (M2 + 1) / 2)  # half analysis window size by rounding
        hM2_2 = math.floor(M2 / 2)  # half analysis window size by floor2
        H2 = x2.size // L  # hop size for second sound
        x2 = np.append(
            np.zeros(hM2_2),
            x2)  # add zeros at beginning to center first window at sample 0
        x2 = np.append(
            x2, np.zeros(hM2_1))  # add zeros at the end to analyze last sample
        pin2 = hM2_1  # initialize sound pointer in middle of analysis window
        y = np.zeros(x1.size)  # initialize output array
        for l in range(L):
            #-----analysis-----
            mX1, pX1 = DFT.dftAnal(x1[pin1 - hM1_1:pin1 + hM1_2], w1,
                                   N1)  # compute dft
            mX2, pX2 = DFT.dftAnal(x2[pin2 - hM2_1:pin2 + hM2_2], w2,
                                   N2)  # compute dft
            #-----transformation-----
            mX2smooth = resample(np.maximum(-200, mX2), int(
                mX2.size * smoothf))  # smooth spectrum of second sound
            mX2 = resample(mX2smooth,
                           mX1.size)  # generate back the same size spectrum
            mY = balancef * mX2 + (1 -
                                   balancef) * mX1  # generate output spectrum
            #-----synthesis-----
            y[pin1 - hM1_1:pin1 + hM1_2] += H1 * DFT.dftSynth(
                mY, pX1, M1)  # overlap-add to generate output sound
            pin1 += H1  # advance sound pointer
            pin2 += H2  # advance sound pointer
        y = np.delete(y, range(
            hM1_2))  # delete half of first window which was added in stftAnal
        y = np.delete(
            y, range(y.size - hM1_1,
                     y.size))  # add zeros at the end to analyze last sample

        # write morphed sound data
        outputFullPath = outputPath + morphedFileName
        #path4Writing = getRightPath(outputFullPath)
        #sf.write(path4Writing, y, RATE)
        # write sound file in env
        commUtill.writeSoundFile(outputFullPath, y, RATE)
        return outputFullPath
示例#58
0
def main():
    # Args
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_name", default='CBN_10epch', help="Classifier model path")
    parser.add_argument("--classifier", default='CBN', help="Choose classifier architecture, C, CBN")
    args = parser.parse_args()

    # Select training device
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # Load specified Classifier
    if args.classifier == 'CBN':
        net = ClassConvBN()
    elif args.classifier == 'C':
        net = ClassConv()
    else:
        net = ClassConv()
        print('Bad Classifier option, running classifier C')
    net.to(device)

    # Load parameters from trained model
    net.load_state_dict(torch.load('../../STEAD_CNN/models/' + args.model_name + '.pth'))
    net.eval()

    # Load Nevada data file 1
    f = '../../Data_Utah/FORGE_78-32_iDASv3-P11_UTC190419001218.sgy'

    # Read data
    with segyio.open(f, ignore_geometry=True) as segy:
        segy.mmap()

        # Traces
        data = segyio.tools.collect(segy.trace[:])

        # Sampling frequency
        fs = segy.header[0][117]

    total = 0
    tr_seismic, tr_noise = 0, 0
    fil_seismic, fil_noise = 0, 0

    seis_traces = []
    seis_fil_traces = []

    noise_traces = []
    noise_fil_traces = []

    # For every trace in the file
    for idx, trace in enumerate(data):
        # Filter
        fil_trace = butter_bandpass_filter(trace, 0.5, 1, fs, order=5)

        # Resample
        resamp_trace = signal.resample(trace, 6000)
        resamp_fil_trace = signal.resample(fil_trace, 6000)

        # Normalize
        resamp_trace = resamp_trace / np.max(np.abs(resamp_trace))
        resamp_fil_trace = resamp_fil_trace / np.max(np.abs(resamp_fil_trace))

        # Numpy to Torch
        resamp_trace = torch.from_numpy(resamp_trace).to(device).unsqueeze(0)
        resamp_fil_trace = torch.from_numpy(resamp_fil_trace).to(device).unsqueeze(0)

        # Prediction
        out_trace = net(resamp_trace.float())
        out_fil_trace = net(resamp_fil_trace.float())

        pred_trace = torch.round(out_trace.data).item()
        pred_fil_trace = torch.round(out_fil_trace.data).item()

        # Count traces
        total += 1

        if pred_trace:
            tr_seismic += 1
            seis_traces.append(idx)
        else:
            tr_noise += 1
            noise_traces.append(idx)

        if pred_fil_trace:
            fil_seismic += 1
            seis_fil_traces.append(idx)
        else:
            fil_noise += 1
            noise_fil_traces.append(idx)

    seis_tr_id = np.random.choice(seis_traces, 1)
    seis_fil_tr_id = np.random.choice(seis_fil_traces, 1)

    noise_tr_id = np.random.choice(seis_traces, 1)
    noise_fil_tr_id = np.random.choice(seis_fil_traces, 1)

    plt.figure()
    plt.plot(data[seis_tr_id])
    plt.savefig('seis_trace1.png')

    plt.clf()
    plt.plot(data[seis_fil_tr_id])
    plt.savefig('seis_fil_trace1.png')

    plt.clf()
    plt.plot(data[noise_tr_id])
    plt.savefig('noise_trace1.png')

    plt.clf()
    plt.plot(data[noise_fil_tr_id])
    plt.savefig('noise_fil_trace1.png')

    # Results
    print(f'Inferencia Utah:\n\n'
          f'File: FORGE_78-32_iDASv3-P11_UTC190419001218.sgy\n'
          f'Total traces: {total}\n'
          f'Predicted seismic: {tr_seismic}, predicted noise: {tr_noise}\n'
          f'Predicted fil_seismic: {fil_seismic}, predicted fil_noise: {fil_noise}\n')
示例#59
0
文件: LPC.py 项目: dienvx1997bn/XLTN
import scipy.signal as sg
import wave
from scipy import signal
# from master.scikits.talkbox.linpred.levinson_lpc import levinson,lpc
from scikits.talkbox.linpred.levinson_lpc import lpc
from scipy.io.wavfile import read
rate, signal = read("../Xe.wav")
signalA = []
for i in range(6000, 6512):
    signalA.append(signal[i])
signalA = np.array(signalA)
p = 14
a, e, k = lpc(signalA, p, -1)
a = np.append(a, np.zeros(512-14))
data_freq = np.fft.fft(a, 512)
data_freq = sg.resample(data_freq,512)
magSpectrum = np.abs(data_freq)
# magDb = 1/magSpectrum
magDb = -np.log(magSpectrum)
signalArray = []
signalDK = []
for k in range(6000, 6900):
    signalArray.append(signal[k])
signalArray = sg.resample(signalArray, 900)
for k in range(0, 149):  # day la K 0..150
    sig = 0
    for m in range(0, 299):
        sig = sig + np.abs(signalArray[m] - signalArray[ m - k])  # day la D(k)
    signalDK.append(sig)
# plot.plot(signalDK)
plot.subplot(311)
示例#60
0
def main():
    cfg.device = torch.device('cuda')
    torch.backends.cudnn.benchmark = False

    max_per_image = 100
    num_classes = 80 if cfg.dataset == 'coco' else 4
    dictionary = np.load(cfg.dictionary_file)

    colors = COCO_COLORS if cfg.dataset == 'coco' else DETRAC_COLORS
    names = COCO_NAMES if cfg.dataset == 'coco' else DETRAC_NAMES
    for j in range(len(names)):
        col_ = [c * 255 for c in colors[j]]
        colors[j] = tuple(col_)

    print('Creating model and recover from checkpoint ...')
    if 'hourglass' in cfg.arch:
        model = exkp(n=5,
                     nstack=2,
                     dims=[256, 256, 384, 384, 384, 512],
                     modules=[2, 2, 2, 2, 2, 4],
                     num_classes=num_classes)
    else:
        raise NotImplementedError

    model = load_demo_model(model, cfg.ckpt_dir)
    model = model.to(cfg.device)
    model.eval()

    # Loading COCO validation images
    annotation_file = '{}/annotations/instances_{}.json'.format(
        cfg.data_dir, cfg.data_type)
    coco = COCO(annotation_file)

    # Load all annotations
    cats = coco.loadCats(coco.getCatIds())
    nms = [cat['name'] for cat in cats]
    catIds = coco.getCatIds(catNms=nms)
    imgIds = coco.getImgIds(catIds=catIds)
    annIds = coco.getAnnIds(catIds=catIds)
    all_anns = coco.loadAnns(ids=annIds)

    for annotation in all_anns:
        if annotation['iscrowd'] == 1 or type(
                annotation['segmentation']) != list:
            continue

        img = coco.loadImgs(annotation['image_id'])[0]
        image_path = '%s/images/%s/%s' % (cfg.data_dir, cfg.data_type,
                                          img['file_name'])
        w_img = int(img['width'])
        h_img = int(img['height'])
        if w_img < 1 or h_img < 1:
            continue

        polygons = annotation['segmentation'][0]
        gt_bbox = annotation['bbox']
        gt_x1, gt_y1, gt_w, gt_h = gt_bbox
        contour = np.array(polygons).reshape((-1, 2))

        # Downsample the contour to fix number of vertices
        fixed_contour = resample(contour, num=cfg.num_vertices)

        # Indexing from the left-most vertex, argmin x-axis
        idx = np.argmin(fixed_contour[:, 0])
        indexed_shape = np.concatenate(
            (fixed_contour[idx:, :], fixed_contour[:idx, :]), axis=0)

        clockwise_flag = check_clockwise_polygon(indexed_shape)
        if not clockwise_flag:
            fixed_contour = np.flip(indexed_shape, axis=0)
        else:
            fixed_contour = indexed_shape.copy()

        fixed_contour[:, 0] = np.clip(fixed_contour[:, 0], gt_x1, gt_x1 + gt_w)
        fixed_contour[:, 1] = np.clip(fixed_contour[:, 1], gt_y1, gt_y1 + gt_h)

        contour_mean = np.mean(fixed_contour, axis=0)
        contour_std = np.std(fixed_contour, axis=0)
        # norm_shape = (fixed_contour - contour_mean) / np.sqrt(np.sum(contour_std ** 2.))

        # plot gt mean and std
        # image = cv2.imread(image_path)
        # # cv2.ellipse(image, center=(int(contour_mean[0]), int(contour_mean[1])),
        # #             axes=(int(contour_std[0]), int(contour_std[1])),
        # #             angle=0, startAngle=0, endAngle=360, color=(0, 255, 0),
        # #             thickness=2)
        # cv2.rectangle(image, pt1=(int(contour_mean[0] - contour_std[0] / 2.), int(contour_mean[1] - contour_std[1] / 2.)),
        #               pt2=(int(contour_mean[0] + contour_std[0] / 2.), int(contour_mean[1] + contour_std[1] / 2.)),
        #               color=(0, 255, 0), thickness=2)
        # cv2.polylines(image, [fixed_contour.astype(np.int32)], True, (0, 0, 255))
        # cv2.rectangle(image, pt1=(int(min(fixed_contour[:, 0])), int(min(fixed_contour[:, 1]))),
        #               pt2=(int(max(fixed_contour[:, 0])), int(max(fixed_contour[:, 1]))),
        #               color=(255, 0, 0), thickness=2)
        # cv2.imshow('GT segments', image)
        # if cv2.waitKey() & 0xFF == ord('q'):
        #     break

        image = cv2.imread(image_path, cv2.IMREAD_COLOR)
        original_image = image.copy()
        height, width = image.shape[0:2]
        padding = 127 if 'hourglass' in cfg.arch else 31
        imgs = {}
        for scale in cfg.test_scales:
            new_height = int(height * scale)
            new_width = int(width * scale)

            if cfg.img_size > 0:
                img_height, img_width = cfg.img_size, cfg.img_size
                center = np.array([new_width / 2., new_height / 2.],
                                  dtype=np.float32)
                scaled_size = max(height, width) * 1.0
                scaled_size = np.array([scaled_size, scaled_size],
                                       dtype=np.float32)
            else:
                img_height = (new_height | padding) + 1
                img_width = (new_width | padding) + 1
                center = np.array([new_width // 2, new_height // 2],
                                  dtype=np.float32)
                scaled_size = np.array([img_width, img_height],
                                       dtype=np.float32)

            img = cv2.resize(image, (new_width, new_height))
            trans_img = get_affine_transform(center, scaled_size, 0,
                                             [img_width, img_height])
            img = cv2.warpAffine(img, trans_img, (img_width, img_height))

            img = img.astype(np.float32) / 255.
            img -= np.array(
                COCO_MEAN if cfg.dataset == 'coco' else DETRAC_MEAN,
                dtype=np.float32)[None, None, :]
            img /= np.array(COCO_STD if cfg.dataset == 'coco' else DETRAC_STD,
                            dtype=np.float32)[None, None, :]
            img = img.transpose(
                2, 0, 1)[None, :, :, :]  # from [H, W, C] to [1, C, H, W]

            # if cfg.test_flip:
            #     img = np.concatenate((img, img[:, :, :, ::-1].copy()), axis=0)

            imgs[scale] = {
                'image': torch.from_numpy(img).float(),
                'center': np.array(center),
                'scale': np.array(scaled_size),
                'fmap_h': np.array(img_height // 4),
                'fmap_w': np.array(img_width // 4)
            }

        with torch.no_grad():
            segmentations = []
            predicted_codes = []
            start_time = time.time()
            for scale in imgs:
                imgs[scale]['image'] = imgs[scale]['image'].to(cfg.device)

                output = model(imgs[scale]['image'])[-1]
                segms, codes_ = ctsegm_scale_decode_debug(
                    *output,
                    torch.from_numpy(dictionary.astype(np.float32)).to(
                        cfg.device),
                    K=cfg.test_topk)
                segms = segms.detach().cpu().numpy().reshape(
                    1, -1, segms.shape[2])[0]
                codes_ = codes_.detach().cpu().numpy().reshape(
                    1, -1, codes_.shape[2])[0]

                top_preds = {}
                code_preds = {}
                for j in range(cfg.num_vertices):
                    segms[:, 2 * j:2 * j + 2] = transform_preds(
                        segms[:, 2 * j:2 * j + 2], imgs[scale]['center'],
                        imgs[scale]['scale'],
                        (imgs[scale]['fmap_w'], imgs[scale]['fmap_h']))
                segms[:, cfg.num_vertices * 2:cfg.num_vertices * 2 +
                      2] = transform_preds(
                          segms[:,
                                cfg.num_vertices * 2:cfg.num_vertices * 2 + 2],
                          imgs[scale]['center'], imgs[scale]['scale'],
                          (imgs[scale]['fmap_w'], imgs[scale]['fmap_h']))
                segms[:, cfg.num_vertices * 2 + 2:cfg.num_vertices * 2 +
                      4] = transform_preds(
                          segms[:, cfg.num_vertices * 2 +
                                2:cfg.num_vertices * 2 + 4],
                          imgs[scale]['center'], imgs[scale]['scale'],
                          (imgs[scale]['fmap_w'], imgs[scale]['fmap_h']))

                clses = segms[:, -1]
                for j in range(num_classes):
                    inds = (clses == j)
                    top_preds[j + 1] = segms[inds, :cfg.num_vertices * 2 +
                                             5].astype(np.float32)
                    top_preds[j + 1][:, :cfg.num_vertices * 2 + 4] /= scale
                    code_preds[j + 1] = codes_[inds, :]

                segmentations.append(top_preds)
                predicted_codes.append(code_preds)

            segms_and_scores = {
                j: np.concatenate([d[j] for d in segmentations], axis=0)
                for j in range(1, num_classes + 1)
            }  # a Dict label: segments
            codes_and_scores = {
                j: np.concatenate([d[j] for d in predicted_codes], axis=0)
                for j in range(1, num_classes + 1)
            }  # a Dict label: segments
            scores = np.hstack([
                segms_and_scores[j][:, cfg.num_vertices * 2 + 4]
                for j in range(1, num_classes + 1)
            ])

            if len(scores) > max_per_image:
                kth = len(scores) - max_per_image
                thresh = np.partition(scores, kth)[kth]
                for j in range(1, num_classes + 1):
                    keep_inds = (segms_and_scores[j][:, cfg.num_vertices * 2 +
                                                     4] >= thresh)
                    segms_and_scores[j] = segms_and_scores[j][keep_inds]
                    codes_and_scores[j] = codes_and_scores[j][keep_inds]

            # Use opencv functions to output a video
            output_image = original_image

            for lab in segms_and_scores:
                for idx in range(len(segms_and_scores[lab])):
                    res = segms_and_scores[lab][idx]
                    c_ = codes_and_scores[lab][idx]
                    # for res in segms_and_scores[lab]:
                    contour, bbox, score = res[:-5], res[-5:-1], res[-1]
                    bbox[0] = np.clip(bbox[0], 0, w_img)
                    bbox[1] = np.clip(bbox[1], 0, h_img)
                    bbox[2] = np.clip(bbox[2], 0, w_img)
                    bbox[3] = np.clip(bbox[3], 0, h_img)
                    if score > cfg.detect_thres:
                        text = names[lab]  # + ' %.2f' % score
                        # label_size = cv2.getTextSize(text, cv2.FONT_HERSHEY_COMPLEX, thickness=2, fontScale=0.5)
                        polygon = contour.reshape((-1, 2))

                        # use bb tools to draw predictions
                        color = random.choice(COLOR_WORLD)
                        bb.add(output_image, bbox[0], bbox[1], bbox[2],
                               bbox[3], text, color)
                        cv2.polylines(output_image, [polygon.astype(np.int32)],
                                      True,
                                      RGB_DICT[color],
                                      thickness=2)

                        # color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
                        # contour_mean = np.mean(polygon, axis=0)
                        # contour_std = np.std(polygon, axis=0)
                        # center_x, center_y = np.mean(polygon, axis=0).astype(np.int32)
                        # text_location = [bbox[0] + 1, bbox[1] + 1,
                        #                  bbox[1] + label_size[0][0] + 1,
                        #                  bbox[0] + label_size[0][1] + 1]
                        # cv2.rectangle(output_image, pt1=(int(bbox[0]), int(bbox[1])),
                        #               pt2=(int(bbox[2]), int(bbox[3])),
                        #               color=color, thickness=1)
                        # cv2.rectangle(output_image, pt1=(int(np.min(polygon[:, 0])), int(np.min(polygon[:, 1]))),
                        #               pt2=(int(np.max(polygon[:, 0])), int(np.max(polygon[:, 1]))),
                        #               color=(0, 255, 0), thickness=1)
                        # cv2.polylines(output_image, [polygon.astype(np.int32)], True, color, thickness=2)
                        # cv2.putText(output_image, text, org=(int(text_location[0]), int(text_location[3])),
                        #             fontFace=cv2.FONT_HERSHEY_COMPLEX, thickness=2, fontScale=0.5,
                        #             color=(255, 0, 0))
                        # cv2.putText(output_image, text, org=(int(bbox[0]), int(bbox[1])),
                        #             fontFace=cv2.FONT_HERSHEY_COMPLEX, thickness=1, fontScale=0.5,
                        #             color=color)

                        # show the histgram for predicted codes
                        fig = plt.figure()
                        plt.plot(np.arange(cfg.n_codes),
                                 c_.reshape((-1, )),
                                 color='green',
                                 marker='o',
                                 linestyle='dashed',
                                 linewidth=2,
                                 markersize=6)
                        plt.ylabel('Value of each coefficient')
                        plt.xlabel('All predicted {} coefficients'.format(
                            cfg.n_codes))
                        plt.title(
                            'Distribution of the predicted coefficients for {}'
                            .format(text))
                        plt.show()

            cv2.imshow('Results', output_image)
            if cv2.waitKey() & 0xFF == ord('q'):
                break