예제 #1
0
def harris_ones(img, window_size, k=0.05):
    """Calculate the harris score based on a window function of diagonal ones.
    Args:
        img The image to use for corner detection.
        window_size Size of the window (NxN).
        k Weighting parameter during the final scoring (det vs. trace).
    Returns:
        Corner score image
    """
    # Gradients
    img = skiutil.img_as_float(img)
    imgy, imgx = np.gradient(img)

    imgxy = imgx * imgy
    imgxx = imgx ** 2
    imgyy = imgy ** 2

    # window function (matrix of diagonal ones)
    window = np.ones((window_size, window_size))

    # compute parts of harris matrix
    a11 = signal.correlate(imgxx, window, mode="same") / window_size
    a12 = signal.correlate(imgxy, window, mode="same") / window_size
    a21 = a12
    a22 = signal.correlate(imgyy, window, mode="same") / window_size

    # compute score per pixel
    det_a = a11 * a22 - a12 * a21
    trace_a = a11 + a22

    return det_a - k * trace_a ** 2
예제 #2
0
def main():
    """Load image, apply sobel (to get x/y gradients), plot the results."""
    img = data.camera()

    sobel_y = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
    sobel_x = np.rot90(sobel_y)  # rotates counter-clockwise

    # apply x/y sobel filter to get x/y gradients
    img_sx = signal.correlate(img, sobel_x, mode="same")
    img_sy = signal.correlate(img, sobel_y, mode="same")

    # combine x/y gradients to gradient magnitude
    # scikit-image's implementation divides by sqrt(2), not sure why
    img_s = np.sqrt(img_sx ** 2 + img_sy ** 2) / np.sqrt(2)

    # create binarized image
    threshold = np.average(img_s)
    img_s_bin = np.zeros(img_s.shape)
    img_s_bin[img_s > threshold] = 1

    # generate ground truth (scikit-image method)
    ground_truth = skifilters.sobel(data.camera())

    # plot
    util.plot_images_grayscale(
        [img, img_sx, img_sy, img_s, img_s_bin, ground_truth],
        [
            "Image",
            "Sobel (x)",
            "Sobel (y)",
            "Sobel (magnitude)",
            "Sobel (magnitude, binarized)",
            "Sobel (Ground Truth)",
        ],
    )
예제 #3
0
 def test_consistency_correlate_funcs(self):
     # Compare np.correlate, signal.correlate, signal.correlate2d
     a = np.arange(5)
     b = np.array([3.2, 1.4, 3])
     for mode in ["full", "valid", "same"]:
         assert_almost_equal(np.correlate(a, b, mode=mode), signal.correlate(a, b, mode=mode))
         assert_almost_equal(np.squeeze(signal.correlate2d([a], [b], mode=mode)), signal.correlate(a, b, mode=mode))
예제 #4
0
    def _setup_rank1(self, mode):
        np.random.seed(9)
        a = np.random.randn(10).astype(self.dt)
        a += 1j * np.random.randn(10).astype(self.dt)
        b = np.random.randn(8).astype(self.dt)
        b += 1j * np.random.randn(8).astype(self.dt)

        y_r = (correlate(a.real, b.real, mode=mode) + correlate(a.imag, b.imag, mode=mode)).astype(self.dt)
        y_r += 1j * (-correlate(a.real, b.imag, mode=mode) + correlate(a.imag, b.real, mode=mode))
        return a, b, y_r
예제 #5
0
파일: TimeTP.py 프로젝트: keialk/TimeTP
def checkPathway(network, entry, keggDict, maxDelay, skip) :

	netlist_e = []	# set of entry for each path [ set(0,1,2), set(0,1,2), set(2,3,4) ... ]
	netlist_eg = []	# connected components for each path [ (0!g1->1!g2->2!g3), (0!g4->1!g5->2!g6), ... ]
	netlist_cc = []
	net_e = nx.DiGraph()
	net_eg = nx.DiGraph()

	for (a,b) in network.edges() :
		type = network[a][b]['type']
		e1 = entry[a]
		e2 = entry[b]
		for g1 in e1 :
			for g2 in e2 :
				if g1 not in keggDict or g2 not in keggDict :
					continue 
				if checkProfile(keggDict[g1], keggDict[g2], type, maxDelay) :
					net_e.add_edge(a, b)
					net_eg.add_edge(a+'!'+g1, b+'!'+g2)

	comps = nx.connected_components(net_eg.to_undirected())
	temp = []
	for c in comps :
		real_c = map(lambda x: x.split('!')[1], c)
		real_ce = set(map(lambda x: x.split('!')[0], c))
		# Check if the same gene set w/ different entry name exists
		if real_c not in temp and ((not skip) or len(real_ce)>2):
			temp.append(real_c)
			netlist_e.append(set(map(lambda x: x.split('!')[0], c)))
			netlist_eg.append(net_eg.subgraph(c))

	for eg in netlist_eg :
		cc_e = {}
		tempcc = 0
		for (a, b) in eg.edges() :
			e1 = a.split('!')[0]
			e2 = b.split('!')[0]
			g1 = a.split('!')[1]
			g2 = b.split('!')[1]
			t1 = np.array(keggDict[g1])
			t2 = np.array(keggDict[g2])
			if network[e1][e2]['type']==1 :
				tempcc = abs(max(correlate(t1, t2)))
			else :
				tempcc = abs(min(correlate(t1, t2)))
			if (e1, e2) not in cc_e :
				cc_e[(e1, e2)]=[]
			cc_e[(e1, e2)].append(tempcc)
		cc = 0
		for e, cclist in cc_e.iteritems() :
			cc+= sum(cclist)/float(len(cclist))
		netlist_cc.append(cc)			
 
	return netlist_e, netlist_eg, netlist_cc
    def _setup_rank1(self, mode):
        a = np.random.randn(10).astype(self.dt)
        a += 1j * np.random.randn(10).astype(self.dt)
        b = np.random.randn(8).astype(self.dt)
        b += 1j * np.random.randn(8).astype(self.dt)

        y_r = (correlate(a.real, b.real, mode=mode, old_behavior=False) +
               correlate(a.imag, b.imag, mode=mode, old_behavior=False)).astype(self.dt)
        y_r += 1j * (-correlate(a.real, b.imag, mode=mode, old_behavior=False) +
                correlate(a.imag, b.real, mode=mode, old_behavior=False))
        return a, b, y_r
예제 #7
0
    def test_rank3(self):
        a = np.random.randn(10, 8, 6).astype(self.dt)
        a += 1j * np.random.randn(10, 8, 6).astype(self.dt)
        b = np.random.randn(8, 6, 4).astype(self.dt)
        b += 1j * np.random.randn(8, 6, 4).astype(self.dt)

        y_r = (correlate(a.real, b.real) + correlate(a.imag, b.imag)).astype(self.dt)
        y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real))

        y = correlate(a, b, "full")
        assert_array_almost_equal(y, y_r, decimal=self.decimal - 1)
        self.assertTrue(y.dtype == self.dt)
    def test_rank3_old(self):
        a = np.random.randn(10, 8, 6).astype(self.dt)
        a += 1j * np.random.randn(10, 8, 6).astype(self.dt)
        b = np.random.randn(8, 6, 4).astype(self.dt)
        b += 1j * np.random.randn(8, 6, 4).astype(self.dt)

        y_r = (correlate(a.real, b.real, old_behavior=False)
                + correlate(a.imag, b.imag, old_behavior=False)).astype(self.dt)
        y_r += 1j * (-correlate(a.real, b.imag, old_behavior=False) +
                correlate(a.imag, b.real, old_behavior=False))

        y = correlate(b, a.conj(), 'full')
        assert_array_almost_equal(y, y_r, decimal=4)
        self.failUnless(y.dtype == self.dt)
예제 #9
0
    def test_xcorr_xcorrt(self):
        N = 1001
        data1 = np.sin(np.arange(N // 2 + 1) / 100.)
        data2 = np.e ** (-(np.arange(N) - 500) ** 2 / 100.) - np.e ** (-(np.arange(N) - 50) ** 2 / 100.) + 5 * np.e ** (-(np.arange(N) - 950) ** 2 / 100.)
        cor1 = correlate(data1 - np.mean(data1), data2 - np.mean(data2), 'full')
        cor2 = xcorrt(data2, data1, 750, window=N, ndat2d=N // 2 + 1)[::-1]
        cor3 = xcorrt(data1, data2, 750, window=N, ndat1d=N // 2 + 1) #@UnusedVariable
        cor3b = xcorrt(data1, data2, 750, window=0) #@UnusedVariable
        cor3c = xcorrt(data2, data1, 750, window=0)[::-1] #@UnusedVariable
        cor1 *= max(cor2) / max(cor1)

        cor4 = correlate(data2, data1, 'full')
        cor5 = xcorrt(data2, data1, 750, demean=False)
        cor5b = xcorrt(data2, data1, 750, shift_zero= -100, demean=False)
        cor5c = xcorrt(data2, data1, 750, shift_zero=100, demean=False)
        cor4 *= max(cor5) / max(cor4)

        cor7 = correlate(data1, data2, 'full')
        cor8 = xcorrt(data1, data2, 750, demean=False, normalize=False)
        cor10 = xcorrt(data1, data2, 750, oneside=True, demean=False, normalize=False) #@UnusedVariable

#        from pylab import plot, show, subplot, legend
#        subplot(411)
#        plot(data1)
#        plot(data2)
#        subplot(412)
#        plot(cor1, label='scipy.signal all demeaned and normalized')
#        plot(cor2, label='xcorrt ndat1 > ndat2 ndatxd')
#        plot(cor3, label='xcorrt ndat1 < ndat2 ndatxd')
#        plot(cor3b, label='xcorrt ndat1 > ndat2 window = 0')
#        plot(cor3c, label='xcorrt ndat1 < ndat2 window = 0')
#        legend()
#        subplot(413)
#        plot(cor4, label='scipy.signal all normalized')
#        plot(cor5, label='xcorrt')
#        plot(cor5b, label='xcorrt shifted -100')
#        plot(cor5c, label='xcorrt shifted 100')
#        legend()
#        subplot(414)
#        plot(cor7, label='scipy.signal')
#        plot(cor8, label='xcorrt')
#        plot(cor10, label='xcorrt oneside=True')
#        legend()
#        show()
        np.testing.assert_array_almost_equal(cor1, cor2)
        np.testing.assert_array_almost_equal(cor4, cor5)
        np.testing.assert_array_almost_equal(cor7, cor8)
        np.testing.assert_array_almost_equal(cor5[200:300], cor5b[100:200])
        np.testing.assert_array_almost_equal(cor5[200:300], cor5c[300:400])
예제 #10
0
파일: qam.py 프로젝트: viyer/ham_qam
def detect_sync(r, i, sync, sync_bits, fs=48000, baud=300, plot=False):
    Ns = fs/baud
    corr_r = abs(signal.correlate(r.ravel(), sync.real.ravel(),"same"))
    corr_i = abs(signal.correlate(i.ravel(), sync.imag.ravel(),"same"))
    corr_index = np.argmax(corr_r)-sync_bits/2*Ns

    if plot:
        print np.max(np.abs(corr_r))
        print np.max(np.abs(corr_i))
        fig = plt.figure(figsize = (16,4))
        plt.plot(corr_r)
        plt.plot(corr_i)
        plt.title("Correlation of signal with known prefix")

    return corr_index
예제 #11
0
    def altPhase(self, debug=False):
        """
        Alternate version of lag detection using scipy's cross correlation function.
        """
        if debug or self._debug: print "altPhase..."
        # normalize arrays
        mod = self.model
        mod -= self.model.mean()
        mod /= mod.std()
        obs = self.observed
        obs -= self.observed.mean()
        obs /= obs.std()

        if debug or self._debug: print "...get cross correlation and find number of timesteps of shift..."
        xcorr = correlate(mod, obs)
        samples = np.arange(1 - self.length, self.length)
        time_shift = samples[xcorr.argmax()]

        # find number of minutes in time shift
        try: #Fix for Drifter's data
            step_sec = self.step.seconds
        except AttributeError:
            step_sec = self.step * 24.0 * 60.0 * 60.0 # converts matlabtime (in days) to seconds
        lag = time_shift * step_sec / 60

        if debug or self._debug: print "...altPhase done."

        return lag
def maxccf(A,B):
    nsamples=len(A)
    xcorr = correlate(A, B)
    et = np.arange(1-nsamples, nsamples)
    recovered_time_shift = et[xcorr.argmax()]
    #print recovered_time_shift
    return  recovered_time_shift
예제 #13
0
파일: util.py 프로젝트: pborky/pynfsa
def leakage(length,periods,fnc=None,notitle=True):
    import numpy as np
    from scipy.signal import correlate
    from scipy.fftpack import fftfreq,fft
    n = int(1e7)
    x = np.sin(np.linspace(0,2*periods*np.pi,length))
    if callable(fnc):
        x *= fnc(length)
        name = fnc.__name__
    else: name = 'rectangular'
    f = fftfreq(int(n),1./length)
    dB = lambda x:10*np.log10(x/x.max())
    psd =  lambda x,n: np.abs(fft(correlate(x,x,'full'),n=int(n)) )
    positive = lambda x,f:x[f>0]
    integers = lambda x,f:x[(np.ceil(f)-f<1e-4)&(f>0)]
    def plt(ax):
        ax.plot (positive(f,f)-periods, positive(dB(psd(x,n)),f))
        ax.plot (integers(f,f)-periods, integers(dB(psd(x,n)),f),'ro')
    (fg,ax),dumy = fig( plt, show=False )
    if not notitle: ax.set_title('Spectral leakage (%s window, %d samples)'%(name,length))
    ax.set_xlabel('DFT bins')
    ax.set_ylabel('Relative Magnitude [dB]')
    ax.set_xlim((-periods,14-periods))
    ax.set_ylim((-70,1))
    ax.set_xticks(range(-periods,14-periods+1,1))
    fg.show()
예제 #14
0
파일: utilities.py 프로젝트: cvarin/PyOFTK
def intAC(eFieldSVEA):
	'''
	Compuite the optical autocorrelation trace
	'''
	AC = signal.correlate(eFieldSVEA, eFieldSVEA, mode='same')	
	
	return AC
예제 #15
0
def match_orders(orders, s_list_src, s_list_dst):
    """
    try to math orders of src and dst
    """
    center_indx = int(len(s_list_src) / 2)

    center_s = s_list_src[center_indx]

    from scipy.signal import correlate

    # TODO : it is not clear if this is a right algorithm

    # we have to clip the spectra so that the correlation is not
    # sensitive to bright lines in the target spectra.
    s_list_dst_clip = [np.clip(s, -10, 100) for s in s_list_dst]
    cor_list = [correlate(center_s, s, mode="same") for s in s_list_dst_clip]
    cor_max_list = [np.nanmax(cor) for cor in cor_list]

    center_indx_dst = np.nanargmax(cor_max_list)

    delta_indx = center_indx - center_indx_dst

    print cor_max_list, delta_indx

    orders_dst = np.arange(len(s_list_dst)) - center_indx_dst + orders[center_indx]

    return delta_indx, orders_dst
예제 #16
0
def freq_from_autocorr(signal, fs):
    """
    Estimate frequency using autocorrelation

    Pros: Best method for finding the true fundamental of any repeating wave,
    even with strong harmonics or completely missing fundamental

    Cons: Not as accurate, doesn't find fundamental for inharmonic things like
    musical instruments, this implementation has trouble with finding the true
    peak
    """
    signal = asarray(signal) + 0.0

    # Calculate autocorrelation, and throw away the negative lags
    signal -= mean(signal)  # Remove DC offset
    corr = correlate(signal, signal, mode='full')
    corr = corr[len(corr)//2:]

    # Find the first valley in the autocorrelation
    d = diff(corr)
    start = find(d > 0)[0]

    # Find the next peak after the low point (other than 0 lag).  This bit is
    # not reliable for long signals, due to the desired peak occurring between
    # samples, and other peaks appearing higher.
    i_peak = argmax(corr[start:]) + start
    i_interp = parabolic(corr, i_peak)[0]

    return fs / i_interp
예제 #17
0
파일: fbcorr.py 프로젝트: npinto/sthor
def fbcorr(arr_in, arr_fb, arr_out=None, stride=DEFAULT_STRIDE):
    """XXX: docstring"""

    # -- Temporary constraints
    # XXX: make fbcorr n-dimensional
    assert arr_in.ndim == 3
    assert arr_fb.ndim == 4

    # -- check arguments
    assert arr_in.dtype == arr_fb.dtype

    inh, inw, ind = arr_in.shape
    fbh, fbw, fbd, fbn = arr_fb.shape

    out_shape = (inh - fbh + 1), (inw - fbw + 1), fbn

    # -- Create output array if necessary
    if arr_out is None:
        arr_out = np.empty(out_shape, dtype=arr_in.dtype)

    assert arr_out.dtype == arr_in.dtype
    assert arr_out.shape == out_shape

    # -- Correlate !
    for di in xrange(fbn):
        filt = arr_fb[..., di]
        arr_out[..., di] = correlate(arr_in, filt, mode='valid')

    return arr_out
def autoCorrLen(data):
	"""
	Calculates the autocorrelation length. Starting from 0 lag (each lag is 
	scaled by 1/(N-lag), N being length of data array), this finds where the 
	autocorrelation scaled by zero-lag autocorrelation drops to 0.01 first.
	
	parameters:
			data - 1-D array of numbers
	return:
			autoLen - autocorrelation length
			corr - autocorrelation
			
	note: later I want to instead implement a linear fit in log space to find
		  the exponential scale factor then use that to solve for the lag which	
		  the amplitude drops to 0.01 instead
		  i.e. autocorrLen = - scale ln(0.01) where scale is the -1/slope of the
		  fit
	
	"""
	arg = data - data.mean()
	
	# calculate the autocorrelation and lag array
	corr = sig.correlate(arg,arg)
	center = np.floor(len(corr)/2)+1 # find center index
	corr = corr[center:len(corr)]/corr[center] # restrict to positive lag
	where = np.where(corr<0.01) # find where autocorr equals
	
	autoLen = float(where[0][0]) # normalize by length of array
	
	return [autoLen, corr]
예제 #19
0
def align_adc_w_nrz(ADC, NRZ, OSR,
                    nrz_start=1e3, corr_len=250):
    '''to align and downsampling of ADC by NRZ'''
    ADC = ADC.reshape(-1, OSR, order='C')
    r = int(nrz_start) + numpy.arange(0, corr_len, dtype='int').reshape(-1, 1)
    delay = numpy.zeros(OSR, dtype='int')
    delay_corr = numpy.zeros(OSR)
    delay_corr_full = numpy.zeros((len(r), OSR))
    for i in numpy.arange(0, OSR, dtype='int'):
        delay[i], delay_corr[i] = fun.find_corr_shift(NRZ[r, 0], ADC[r, i])
        delay_corr_full[:, i] = signal.correlate(
            NRZ[r, 0], ADC[r, i], mode='same').ravel()
    delay_corr_full = delay_corr_full.reshape((-1, 1))
    t = fun.range_len_central_zero(
        numpy.size(delay_corr_full)
        ).reshape(-1, 1) / OSR
#    figure(2)
#    clf()
#    plot (t,delay_corr_full[:,0])
    m = numpy.argmax(delay_corr)
    d = delay[m]
#    print d
    X = numpy.roll(ADC[:, m], d)
    X = X.reshape((1, numpy.size(X)))
    return X
예제 #20
0
파일: tools.py 프로젝트: ardoi/datajuicer
 def get_align_indices(self, wave):
     #wave = helpers.blur_image(wave.astype('float'),1)
     wave = sn.uniform_filter(wave.astype('float'), (3,3))
     indices = []
     w_base = wave.mean(axis=0)
     w_base_n = (w_base-w_base.min())/(w_base.max()-w_base.min())
     pad_left = n.ones(wave.shape[1]/2.)*w_base_n[0:10].mean()
     pad_right = n.ones(wave.shape[1]/2.)*w_base_n[-10:].mean()
     ww0=n.hstack((pad_left,w_base_n,pad_right))
     flatten = 3
     for i in range(wave.shape[0]):
         if 0:
             indices.append(0)
         else:
             ww = wave[max(0,i-flatten):min(wave.shape[0], i+flatten)]
             w_i = ww.mean(axis=0)
             w_i2 = helpers.smooth(wave[i])
             w_i = helpers.smooth(w_i)
             w_i_n = (w_i-w_i.min())/(w_i.max()-w_i.min())
             w_i_n2 = (w_i2-w_i2.min())/(w_i2.max()-w_i2.min())
             cc = ss.correlate(ww0, w_i_n, mode='valid')
             indices.append(cc.argmax()-wave.shape[1]/2.)
     #make a nice polynomial fit for the indices
     indices = n.array(indices).astype('int')
     return indices
예제 #21
0
    def altPhase(self, debug=False):
	'''
	Alternate version of lag detection using scipy's cross correlation
	function.
	'''
        if debug or self._debug: print "altPhase..."
	# normalize arrays
	mod = self.model
	mod -= self.model.mean()
	mod /= mod.std()
	obs = self.observed
	obs -= self.observed.mean()
	obs /= obs.std()

	if debug or self._debug: print "...get cross correlation and find number of timesteps of shift..."
	xcorr = correlate(mod, obs)
	samples = np.arange(1 - self.length, self.length)
	time_shift = samples[xcorr.argmax()]

	# find number of minutes in time shift
	step_sec = self.step.seconds
	lag = time_shift * step_sec / 60

        if debug or self._debug: print "...altPhase done."

	return lag
예제 #22
0
def cross_correlate(profiles, template, period, resample_factor=100):
    """Template matching function. Cross-correlate profiles with a template,
    and determine the lag (is seconds).
    Input:
        profiles:          profiles in the channels
        template:          template to use for cross-correlation
        period:            pulse period (s)
        resample_factor:   factor to use fp resampling the correlation functions
    """
    resolution = len(profiles[0])
    time_resolution = period/float(resolution)
    delay = np.zeros(np.shape(profiles)[0])
    for i in range(np.shape(profiles)[0]):
        correlation_coefficients = correlate(template, profiles[i])
        resampled_coeffiecients = resample(correlation_coefficients, resample_factor \
                                                        * len(correlation_coefficients))
        lag = np.argmax(resampled_coeffiecients) / float(resample_factor)
        lag = np.mod(lag, resolution) + 1.0 # scipy correlate return N + 1 points
        # Restrict the lag from shifting profiles across full period
        if lag > len(template)/2.:
            lag = len(template) - lag
        else:
            lag = -lag
        delay[i] = time_resolution * lag
    return delay
예제 #23
0
def corrfunc(x, y, t):
    ''' Caluclate the cross correlation function and timeshifts for a
        pair of time series x,y
    '''

    # normalize input series
    x -= x.mean()
    y -= y.mean()
    x /= x.std()
    y /= y.std()

    # calculate cross-correlation function
    corr = signal.correlate(x,y)/float(len(x))

    # transform time axis in offset units
    lags = np.arange(corr.size) - (t.size - 1)
    tstep = (t[-1] - t[0])/float(t.size)
    offset = lags*tstep

    # time shift is found for the maximum of the correlation function
    shift = offset[np.argmax(corr)]

    # new time axis to plot shifted time series
    newt = t + shift

    # correct time intervals if shift bigger than half the interval
    if min(newt) > (max(t)/2):
         newt = newt - max(t)
         shift = shift - max(t)
    elif max(newt) < (min(t)/2):
         newt = newt + min(t)
         shift = shift + min(t)

    return corr, offset, newt, shift
예제 #24
0
def cross_correlate(SNR=10.0):
     wave,spec = read_spectrum()
     wave_resamp, spec_resamp = bin_spectrum(wave,spec)

     #With noise
     wave_resamp, noisy_spec = add_noise(wave_resamp, spec_resamp, SNR=SNR)    
     flat_spec_noise = flatten_spec(noisy_spec)
     wave_logspace_noise, flat_spec_log_noise = regrid_spectrum(wave_resamp, flat_spec_noise)

     #Without noise
     flat_spec_no_noise = flatten_spec(spec_resamp)
     wave_logspace_no_noise, flat_spec_log = regrid_spectrum(wave_resamp,flat_spec_no_noise)
    
     #Plot with noise and without noise, both flattened and regridded into logspace
     plt.ion()
     plt.figure(1)
     plt.clf()
     plt.plot(wave_logspace_noise,flat_spec_log_noise)
     plt.plot(wave_logspace_no_noise,flat_spec_log)
     
     #Cross correlate
     #(Need to check if this is the right order for np.correlate)
     cor = correlate(flat_spec_log_noise,flat_spec_log, mode='full' )
     plt.ion()
     plt.figure(2)
     plt.clf()
     plt.plot(cor)
     
     #Cross correlate with velocity on x-axis
     #delta_lnwave = np.log(wave_logspace_noise)
     #take median of difference and convert to meters from angstroms
     #diff = np.median((np.diff(delta_lnwave)))/(1e10)
     # set value speed light in m/s
     c = 2.99792458e8
     dv=(((wave_logspace_noise[1]-wave_logspace_noise[0])/(wave_logspace_noise[0]))*c)
     # recenter cor
     l = len(cor)
     ran = np.array(range(l))
     n_init = ((l-1)/2.0)
     n_fin = ran - n_init   
     
     #creates graph with peak centered at 0
     plt.ion()
     plt.figure(4)
     plt.clf()
     plt.plot(n_fin, cor)
     
     #error here, says operands could not be broadcast together with shapes (328,) (657,)   
     n_arr = np.array(n_fin)
     v = (dv * n_arr)/1000.
     plt.ion()
     plt.figure(3)
     plt.clf()
     plt.plot(v, cor, 'o')
     plt.plot(v,cor)
     plt.xlabel('Velocity km/s')
     plt.ylabel('Correlation Amplitude')
     
     return
     
예제 #25
0
파일: functions.py 프로젝트: mydmdm/project
def find_corr_shift (a,b):
    '''d: return value
    a = np.roll(b,d)'''
    t = range_len_central_zero(len(a))
    C = signal.correlate(a,b,mode='same')
    p = np.argmax(C)
    return (t[p],C[p])
def local_mean(img, size=3):
    """ Compute a image of the local average
    """
    structure_element = np.ones((size, size), dtype=img.dtype)
    l_mean = signal.correlate(img, structure_element, mode='same')
    l_mean /= size**2
    return l_mean
예제 #27
0
def is_periodic(aud_sample, SAMPLING_RATE = 8000):
    '''
    :param aud_sample: Numpy 1D array rep of audio sample
    :param SAMPLING_RATE: Used to focus on human speech freq range
    :return: True if periodic, False if aperiodic
    '''

    # TODO: Find a sensible threshold
    thresh = 1e-1

    # Use auto-correlation to find if there is enough periodicity in [50-400] Hz range
    values = signal.correlate(aud_sample, aud_sample, mode='full')
    # values = values[values.size/2:]
    # print(values.max, values.shape)

    # [50-400 Hz] corresponds to [2.5-20] ms OR [20-160] samples for 8 KHz sampling rate
    l_idx = int(SAMPLING_RATE*2.5/1000)
    r_idx = int(SAMPLING_RATE*20/1000)

    subset_values = values[l_idx:r_idx]

    # print(subset_values.shape, np.argmax(subset_values), subset_values.max())

    if subset_values.max() < thresh:
        return False
    else:
        return True
def local_var(img, size=3):
    """ Compute a image of the local variance
    """
    structure_element = np.ones((size, size), dtype=img.dtype)
    l_var = signal.correlate(img**2, structure_element, mode='same')
    l_var /= size**2
    l_var -= local_mean(img, size=size)**2
    return l_var
예제 #29
0
 def computeBias(self, fasta, chromDict, pwm):
     """compute bias track based on sequence and pwm"""
     self.slop(chromDict, up = pwm.up, down = pwm.down)
     sequence = seq.get_sequence(self, fasta)
     seqmat = seq.seq_to_mat(sequence, pwm.nucleotides)
     self.vals = signal.correlate(seqmat,np.log(pwm.mat),mode='valid')[0]
     self.start += pwm.up
     self.end -= pwm.down
예제 #30
0
파일: deconvolve.py 프로젝트: preinh/RF
def acorrt(a, num):
    """
    Return not normalized auto-correlation of signal a.
    
    Sample 0 corresponds to zero lag time. Auto-correlation will consist of
    num samples.
    """
    return correlate(add_zeros(a, num, 'right'), a, 'valid')
예제 #31
0
#
# 데이터 구성
image = RandomState(0).choice(np.arange(0, 4), size=25).reshape(5, 5)
# array([[0, 3, 1, 0, 3],
#        [3, 3, 3, 1, 3],
#        [1, 2, 0, 3, 2],
#        [0, 0, 0, 2, 1],
#        [2, 3, 3, 2, 0]])

filter = RandomState(0).choice(np.arange(3), size=9).reshape(-1, 3)
# array([[0, 1, 0],
#        [1, 1, 2],
#        [0, 2, 0]])

# 교차 상관
ccor = correlate(image, filter)
# array([[ 0,  0,  6,  2,  0,  6,  0],
#        [ 0, 12, 11, 10,  9,  9,  3],
#        [ 6, 11, 19,  9, 16, 11,  3],
#        [ 2,  8,  6, 11, 12, 10,  2],
#        [ 0,  5,  8, 10, 11,  5,  1],
#        [ 4,  8, 11, 10,  7,  3,  0],
#        [ 0,  2,  3,  3,  2,  0,  0]])

ccor_valid = correlate(image, filter, 'valid')
# array([[19,  9, 16],
#        [ 6, 11, 12],
#        [ 8, 10, 11]])

stride = 1
filter_size = 3
예제 #32
0
    image.imsave(os.path.join(PATH_TO_RELU, '{}.png'.format(i)),
                 conv1,
                 cmap=CMAP,
                 vmin=0,
                 vmax=255)

################################################################################
# PrimaryCaps
################################################################################
primary_caps_output = np.empty((6, 6, 256))
for i in range(primary_caps_weights.shape[3]):
    # Get the 9x9x256 kernel
    extracted_filter = primary_caps_weights[:, :, :, i]

    # Apply convolution
    conv2 = signal.correlate(conv1_output, extracted_filter, 'valid')
    conv2 = conv2 + primary_caps_bias[i]

    # Outputs 12x12, but we need 6x6 so we drop every other item
    conv2 = conv2[::2, ::2, :]
    conv2 = np.squeeze(conv2)

    # ReLU
    conv2 = np.maximum(0, conv2)
    primary_caps_output[:, :, i] = conv2

# The paper says that a PrimaryCaps layer is a 6x6 matrix of 8 dimensional
# vectors and there should be 32 PrimaryCaps layers. Meaning we have 6x6x32 vectors
# which equals 1,152 vectors.
# We only really need a list of all the 8d vectors hence we can reshape the matrix
# to: (1152, 8, 1)
예제 #33
0
f = ut.freq_fr_time(t)  # frequency axis
tc = ut.corr_fr_time(t)  # correlation time axis
cd = prn.gold_seq(3, 5, no_periods=3)  # code
Ts = 10e-3  # Nyquist's symbol interval
tau = 0.8  # time acceleration factor
Tstr = Ts * tau  # transmitted symbol interval
td = 0.05  # initial delay of the sequence (time offset)

# Time Domain
a1 = gen.rcos_tr(t, Tstr, td + Tstr / 2, cd, Ts, 1.0)
a2 = gen.rcos_tr(t, Tstr, td + Tstr / 2, cd, Ts, 0.5)
a3 = gen.rcos_tr(t, Tstr, td + Tstr / 2, cd, Ts, 0.0)
c = gen.rect_tr(t, Tstr, 0, td, cd)

# Correlate processor
A1_c = signal.correlate(a1, a1, 'full')
A2_c = signal.correlate(a2, a2, 'full')
A3_c = signal.correlate(a3, a3, 'full')
C1_c = signal.correlate(c, a1, 'full')
C2_c = signal.correlate(c, a2, 'full')
C3_c = signal.correlate(c, a3, 'full')

CC_c = signal.correlate(c, c, 'full')

##################### Plots ###########################
plt.figure(1, figsize=(10, 15), dpi=300)

#  Time domain

ax1 = plt.subplot(311)
plt.plot(t, a1, '-r', label='$x_{rcos}(t), \\beta = 1.0$')
예제 #34
0
def accumulated_correlation(signals,
                            dt,
                            coordinates,
                            x_lims,
                            y_lims,
                            z_lims,
                            v_p,
                            grid_size=10):
    """
  Estimate the location of an acoustic event from multiple microphone signals

  The Accumulated Correlation algorithm estimates the source location of a
  signal that arrives at different times at multiple sensors. It starts by
  calculating the cross-correlation for each pair of microphones signals. For
  each test grid point, the expected time delay is calculated for each
  microphone. Then for each unique signal pair the difference in the expected
  time delay is used as an index into the cross correlation vectors. The
  value in the cross correlation vector is added to a running sum for the
  current test grid point. Finally, the test grid point with the largest sum
  is taken as the most likely source location of the signal.

  Parameters
  ----------
  signals : numpy.ndarray
    An array of time-domain signals
  dt : scalar
    The amount of time between each signal sample
  coordinates: numpy.ndarray
    An array of microphone coordinates (2D array with dimensions N x 2)
  x_lims: numpy.array
    The x-axis limits of the search grid
  y_lims: numpy.array
    The y-axis limits of the search grid
  z_lims: numpy.array
    The z-axis limits of the search grid
  v_p: scalar
    The speed of sound in the cavity material
  grid_size: int, optional
    The number of vertical and horizontal test points (default 10)

  Returns
  -------
  c : list
    A two-element list containing the x and y coordinates
  """
    # The cross-correlation takes O(n) time, where n is the length of the signals
    # These loops take O(N^2) time, where N is the number of signals
    # For constant N, then, increasing the signal size linearly increases the
    # running time
    #
    # - Calculate the lag matrix (skip auto correlations since they aren't used)
    lag_matrix = np.zeros(
        (len(signals), len(signals), len(signals[0]) * 2 - 1))
    for i, signal_i in enumerate(signals):
        for j, signal_j in enumerate(signals[i + 1:]):
            lag_matrix[i, j + i + 1] = sig.correlate(signal_i, signal_j)
            lag_matrix[j + i + 1, i] = lag_matrix[i, j + i + 1]

    # - Create a zero matrix the size of the test point grid (sum matrix)
    sums = np.zeros((grid_size, grid_size, grid_size))
    xs = np.linspace(x_lims[0], x_lims[1], num=grid_size)
    ys = np.linspace(y_lims[0], y_lims[1], num=grid_size)
    zs = np.linspace(z_lims[0], z_lims[1], num=grid_size)
    # The math in the inner loop takes O(1) time
    # The inner two loops take O(N^2) time
    # The outer two loops take O(M^2) time if we assume equal sized horizontal
    # and vertical grids with M rows and columns
    # Together this is O(M^2*N^2) time
    #
    # - For each test point...
    for a, x in enumerate(xs):
        for b, y in enumerate(ys):
            for c, z in enumerate(zs):
                # - For each pair of microphones...
                for i, signal_i in enumerate(signals):
                    for j, signal_j in enumerate(signals[i + 1:]):
                        # - Calculate the expected difference in TOA
                        xi = coordinates[i, 0]
                        yi = coordinates[i, 1]
                        zi = coordinates[i, 2]
                        dxi = xi - x
                        dyi = yi - y
                        dzi = zi - z
                        di = math.sqrt(dxi * dxi + dyi * dyi + dzi * dzi)
                        ti = di / v_p

                        # Note: j -> j+i+1 because of the loop optimization
                        xj = coordinates[j + i + 1, 0]
                        yj = coordinates[j + i + 1, 1]
                        zj = coordinates[j + i + 1, 2]
                        dxj = xj - x
                        dyj = yj - y
                        dzj = zj - z
                        dj = math.sqrt(dxj * dxj + dyj * dyj + dzj * dzj)
                        tj = dj / v_p

                        tij = tj - ti
                        n0 = len(signals[0])
                        k = int(round(n0 - tij / dt - 1))

                        # - Add the appropriate lag matrix value for the given TOA delta to the
                        #   sum matrix
                        sums[a, b, c] += lag_matrix[i, j + i + 1, k]
    # - Use the max sum matrix element to calculate the most likely source point
    max_indicies = np.unravel_index([np.argmax(sums)], np.shape(sums))
    return [
        xs[max_indicies[0][0]], ys[max_indicies[1][0]], zs[max_indicies[2][0]]
    ]
fa = np.arange(400000, 120000 - 1, -20000)
scales = np.array(float(c)) * fs / np.array(fa)

[cfs1, frequencies1] = pywt.cwt(datay1, scales, wavelet, dt)
[cfs2, frequencies2] = pywt.cwt(datay2, scales, wavelet, dt)
magnitude1 = (abs(cfs1))**2
magnitude2 = (abs(cfs2))**2

corr_show = []
for i in range(len(magnitude1)):
    mean1 = magnitude1[i].mean()
    magnitude1[i] = magnitude1[i] / mean1
    mean2 = magnitude2[i].mean()
    magnitude2[i] = magnitude2[i] / mean2
    temp = signal.correlate(magnitude1[i],
                            magnitude2[i],
                            mode='same',
                            method='fft')
    meanc = temp.mean()
    corr_show.append(temp / meanc)

##corr = np.array(corr)
##
##E=207 * pow(10,9) #203#207
##p=7.86 * 1000 #7.93#7.86
##o=0.27
##h=0.002
##
##param = E * h * h * pi * pi / 3.0 / p / (1.0 - o * o)
##c = pow(param * pow(freq,2),0.25)
##time = sgn * (100.0 - dd) * 2.0 / 100.0 / c * 1000.0 / 1.5
##
            QuadDemod = QuadDemod * np.real(signal.hilbert(
                QuadDemod))  #RcvSignal[RcvStartingSample:-int(nBitSample)]

            InphaseEnvelop = butter_lowpass_filter(
                InphaseDemod, 3 * pow(10, 3), fs, 4) / InphaseDemod.size
            QuadEnvelop = butter_lowpass_filter(QuadDemod, 3 * pow(10, 3), fs,
                                                4) / QuadDemod.size
            '''
			print("o Envelope size")
			print("- In-phase: ", InphaseEnvelop.size)
			print("- Quadrature: ", QuadEnvelop.size)
			print("")
			'''

            # Correlation
            InphaseCorrelation = signal.correlate(
                InphaseEnvelop, SymbolSequence[(0, slice(None))], mode='valid')
            QuadCorrelation = signal.correlate(QuadEnvelop,
                                               SymbolSequence[(1,
                                                               slice(None))],
                                               mode='valid')
            Correlation = (InphaseCorrelation + QuadCorrelation)
            #Correlation = Correlation / Correlation.size

            tCorrelation = np.arange(RcvStartingSample, \
               (RcvStartingSample+Correlation.size) , 1) * Ts  * 1000 # in ms
            CorrelationPeak = np.argmax(Correlation)

            Estimation = (RcvStartingSample +
                          CorrelationPeak) * Ts * 340 / 2 - 0.05

            if abs(Estimation - Distance > 0.2):
예제 #37
0
def get_audio(plot_debug=False):
    """ Called by
        :param:     plot_debug, if True do a debug plots
        :return:    None
        Acquire Data under a trigger mode, then save the Data to the HD.
        Get Constants from Constants.py
        Variables will be extracted from the Variables_dict who is managed by Disk_IO.
        When debug==True plot acquired data as diagnostic.
    """
    recall_pickled_dict(
    )  # Load variables from the c.s.v. c.pickle_txt_fullpath
    SamplingFreq = v.SamplingFreq  # Sampling Frequency, Hz
    TrigLevel = v.TrigLevel  # Actual Trigger level as a %
    AveragesRequired = v.AveragesRequired  # Required Averages number
    Length = v.Length  # Pile length
    Speed = v.Speed  # P wave speed in concrete
    T_echo_0 = 2 * Length / Speed  # Return time of echo_0
    indx_echo_0 = round(T_echo_0 *
                        SamplingFreq)  # Index of the 1.st echo_0 sample
    #
    save_actual_pid()  # Save the PID to a file, for successive kill
    #
    """ Called at each new Acquisition, remove files at c.temp_data_path, if the dir was removed, recreate it.
    """
    create_and_clear(c.temp_data_path, delete_files=True)
    #
    TrigLevel_32 = (
        TrigLevel / 100
    ) * c.full_scale_32  # From % to fraction of unity, to fract. of c.full_scale_32
    pk_foot = 1 / 100  # Relative factor for the pulse pedestal
    FORMAT = pyaudio.paInt32  # Signed 32 bit (Little Endian byte order)
    CHANNELS = int(2)
    byte_width = 4  # Bytes in one signed 32 bit sample
    #
    print()
    print("=" * 50)
    print("   Started '" + __file__ + "' as a separate executable")
    print("=" * 50)
    if c.IS_RASPBERRY: print("Machine is Raspberry Pi")
    else: print("Machine is not a Raspberry Pi")
    print("Audio device is:", c.SOUND_DEVICE)
    """ New error handler: suppress all libasound.so error messages, by substitution of the error handler. 
    """
    ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int,
                                   c_char_p)

    def py_error_handler(filename, line, function, err, fmt):
        print('.',
              end="")  # Print a single '.' point instead of the error message

    #   #
    #
    c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)
    asound = cdll.LoadLibrary('libasound.so')
    #
    # Set error handler
    asound.snd_lib_error_set_handler(c_error_handler)
    #
    # Initialize PyAudio
    p = pyaudio.PyAudio()
    p.terminate()
    print()
    #
    #--------------------------- Error handler end ----------------------------------
    """ Creates an instance of the PyAudio class , and open a stream to write output frames
    """
    paudio = pyaudio.PyAudio()
    stream = paudio.open(rate=SamplingFreq,
                         channels=CHANNELS,
                         format=FORMAT,
                         input=True,
                         input_device_index=c.SOUND_DEVICE,
                         frames_per_buffer=c.N_FRAMES)
    #
    i = 1
    while i <= int(AveragesRequired):
        """ 
            Wait until the Trigger level is reached getting frames continuously,
            the format is long i.e. int32; the AD has 24 bit, 8 MSBits are zeros.
            The Trigger Channel is Ch1: Odd samples: 1, 3, 5,... => Ch1
            The variable inp_short_buffer cannot be predefined as a bytearray (that is mutable) 
            because stream.read() return an array of bytes (that is immutable).
        """
        inp_old_buffer = np.zeros(CHANNELS * c.N_FRAMES, dtype=np.int32)
        inp_big_buffer = np.zeros(CHANNELS * c.N_FRAMES * c.N_BLOCKS,
                                  dtype=np.int32)
        beg = 0
        end = CHANNELS * c.N_FRAMES
        os.system("sync")  # To empty write queue
        #
        # ====================== Trigger loop Begin ======================\
        #
        print("\n" + "=o" * 25 + "=")
        if do_plot: time.sleep(2)  # For testing, wait to close the plot window
        else: time.sleep(1)
        print("Waiting for Trigger n.", i, "...")
        while True:
            inp_short_buffer = np.frombuffer(stream.read(c.N_FRAMES), np.int32)

            #   #
            """ From the array of bytes inp_short_buffer of int32: ch1, ch2, c1, ch2,... blocks 
                of interleaved 4 bytes get the tuple ch1_int32 discarding each successive 4 bytes 
                represented by xxxx (corresponding to ch2, i.e. get only ch1 values), for a total of c.n_frames.
            """
            if len(inp_short_buffer) != c.N_FRAMES * CHANNELS: continue
            ch1_int32 = unpack(
                'ixxxx' * c.N_FRAMES, inp_short_buffer
            )  # Get tuple (i32 ch1 only) from tuple of bytes
            ch1_arr_32 = np.array(ch1_int32)  # From tuple to np array
            pk1_32 = np.max(
                np.abs(ch1_arr_32))  # Get absolute peak value of ch1 as int32
            if pk1_32 < TrigLevel_32:  # No Trigger
                inp_old_buffer = inp_short_buffer  # No Trigger, save as a possible pre-trigger
                continue
            else:  # Get Trigger
                """ The following print is only for testing: must been leaved commented.
                    Represents values of the trigger frame.
                """
                # rms1_32     = np.std(ch1_arr_32)                      # Check for single or few noise spike on ch1
                # print("Trig. rms1 = {:3.1f}".format((rms1_32 / c.full_scale_32)*1000), "mV")
                # print("Trig. pk1  = {:3.1f}".format((pk1_32  / c.full_scale_32)*1000), "mV")

                inp_big_buffer[
                    beg:end] = inp_old_buffer  # Pre-Trigger block of data
                beg = end  # Prepare 2.nd block address
                end += CHANNELS * c.N_FRAMES  # Next step into inp_big_buffer
                inp_big_buffer[
                    beg:
                    end] = inp_short_buffer  # Insert 2.nd block of Trigger data
                break
            #   #
        #   #
        # END while True:
        # ======================= Trigger loop End =======================/
        #
        # ====================== Acquisition loop start ==================\
        j = 2  # Block index: 0 and 1 blocks inserted inside the Trigger loop
        while j < c.N_BLOCKS:
            beg = end
            end += CHANNELS * c.N_FRAMES
            inp_short_buffer = np.frombuffer(stream.read(c.N_FRAMES), np.int32)
            inp_big_buffer[beg:end] = inp_short_buffer  # Packing blocks
            j += 1
        #   #
        # ====================== Acquisition loop end ====================/
        #
        """ Unpacking bytes to integers (32 bit) in the tuple tpl_i32. 
            Then separating interleaved Channels, Ch1, Ch2, getting c.N_of_samples points for channel.
            Finally we have y1, y2 as np.arrays of float64, and scaled to +/-1 and compensated for Pisound Input Gain.
            To verify the correctness of the formatting string: calcsize('ii') returns 8
        """
        tpl_i32 = unpack(
            'ii' * c.N_OF_SAMPLES,
            inp_big_buffer)  # Packed bytes and interleaved: Ch1, Ch2
        y1_tpl_i32 = tpl_i32[0:c.N_OF_SAMPLES *
                             CHANNELS:2]  # Odd:  Ch1 as integer 32
        y2_tpl_i32 = tpl_i32[1:c.N_OF_SAMPLES * CHANNELS +
                             1:2]  # Even: Ch2 as integer 32
        y1 = np.array(
            y1_tpl_i32
        ) / c.full_scale_32  # Scaled to +/- 1 full scale                                      # Go to float 8 bytes
        y2 = np.array(y2_tpl_i32) / c.full_scale_32
        y1 /= PI_INP_GAIN  # Pisound board Input Gain compensation
        y2 /= PI_INP_GAIN
        #

        # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
        # PUT A PULSE ON Ch1 TO TEST GLITCH DETECTION
        #
        # noise_1 = np.random.rand(np.max(y1.shape)) / 500
        # noise_2 = np.random.rand(np.max(y1.shape)) / 500
        # y1      = noise_1
        # y2      = noise_2
        # y1[480] = 0.5
        #
        # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\

        # ========================== SATURATION CHECK BEGIN ==========================\
        #
        y1_Min = np.min(y1)  # Getting min / max
        y1_Max = np.max(y1)
        y2_Min = np.min(y2)
        y2_Max = np.max(y2)
        max_abs1 = max(abs(y1_Min), abs(y1_Max))  # ch1 absolute peak
        max_abs2 = max(abs(y2_Min), abs(y2_Max))  # ch2 absolute peak
        max_abs = max(max_abs1, max_abs2)  # Greater absolute maximum peak
        #
        if max_abs < c.satur_threshold:  # Saturation check
            is_Saturated = False
        else:
            is_Saturated = True
            doBeep(on_list=alarm_3_S[0], off_list=alarm_3_S[1])
            print(Saturation_msg, "max_abs =", max_abs)
        #   #
        # ========================== SATURATION CHECK END ============================/

        # ========================== GLITCH TESTING BEGIN ============================\
        #
        W = 1  # Range minimum width
        is_err1 = False
        is_err2 = False
        #----------- NEW TEST BASED ON THE DIFFERENTIATION OF THE GLITCH (DIRAC DELTA FUNCTION)
        y1_d = np.diff(y1)
        y2_d = np.diff(y2)
        indx1 = min(min(
            np.nonzero(np.abs(y1) == max_abs1)))  # Index of max(abs(y1))
        indx2 = min(min(
            np.nonzero(np.abs(y2) == max_abs2)))  # Index of max(abs(y2))
        if indx1 < (np.max(y1.shape) - 2) and (np.max(
                abs(y1_d[indx1 - 1] - max_abs1)) < max_abs1 / 50):
            is_err1 = True
        #   #
        if indx2 < (np.max(y2.shape) - 2) and (np.max(
                abs(y2_d[indx1 - 1] - max_abs1)) < max_abs2 / 50):
            is_err2 = True
        #   #
        #-------------------------------------------------------------------------------
        print("")
        if is_err1: print(Glitch_msg, "Ch1")
        if is_err2: print(Glitch_msg, "Ch2")
        if is_err1 or is_err2:  # Glitches counted as saturation:
            doBeep(on_list=alarm_1_S[0], off_list=alarm_1_S[1])
            is_Saturated = True  #   the acquisition will be repeated
        #   #
        # ============================ GLITCH TESTING END ============================/

        # ======================= CHANNELS SWAP TESTING BEGIN ========================\
        #
        """ Sometime the first acquisition can swap ch1 with ch2, here we detect that occurrence, and in the case, 
            we recovery by a swap between ch1 and ch2. 
            1) The autocorrelations of y1 and y2 are calculated.
            2) The max absolute peaks of the autocorrelations are calculated.
            3) Each one of the autocorrelation amplitudes are normalized using the absolute peaks of point (2).
            4) The normalized autocorrelations are rounded to 1 decimal to exclude little oscillations around 0.
            5) The differentiation of the sign changements of the normalized and rounded correlations is calculated.
            6) The differentiations of point (5) are cumulated to obtain the final scalar count.
            7) The ch1 Force signal is a well behaved and then bandlimited pulse with only two zero crossing 
              (excluding noise), by contrast the ch2 Accel signal being a derivative (accelerometer signal) 
              has surely more sign changements, consequently when the total sign changes of ch1 exceeds that of ch2 
              the channels are swapped. 
        """
        upper = int(3 * indx_echo_0)  # Upper index limit for checking
        corr_y1 = correlate(y1[0:upper], y1[0:upper], mode='same')
        corr_y2 = correlate(y2[0:upper], y2[0:upper], mode='same')
        pk_abs_corr_1 = np.max(np.abs(corr_y1))
        pk_abs_corr_2 = np.max(np.abs(corr_y2))
        corr_y1 = np.round(corr_y1 / pk_abs_corr_1, 1)
        corr_y2 = np.round(corr_y2 / pk_abs_corr_2, 1)
        nz_diff_corr_y1 = (np.diff(np.sign(corr_y1), n=1) != 0).sum()
        nz_diff_corr_y2 = (np.diff(np.sign(corr_y2), n=1) != 0).sum()
        #
        if nz_diff_corr_y1 < nz_diff_corr_y2:
            is_swapped = False
        else:
            is_swapped = True
        #   #
        if plot_debug:
            print("\n" + "Zero crossing comparison:", nz_diff_corr_y1, " <  ",
                  nz_diff_corr_y2, " ", not is_swapped)
        #   #
        print("            Good if:")
        print(" nz_diff_corr_y1 < nz_diff_corr_y2")
        print("-----------------------------------")
        print("       ", nz_diff_corr_y1, "       |       ", nz_diff_corr_y2)
        print("-----------------------------------")
        if is_swapped:
            print("\n" + "/" * 35)
            print("     ERROR: Swapped ch1, ch2")
            print("        CORRECTION DONE!")
            print("/" * 35 + "\n")
            y1, y2 = y2, y1  # Restore from swap error
        #   #
        # ======================== CHANNELS SWAP TESTING END =========================/

        if not is_Saturated:
            """ If is_Saturated returns to the external acquisition loop: while i <= int(AveragesRequired)
                Preparing good data for storage:
                Relocation of the start of the inputrms1_32 signal in y1, y2, to eliminate the pre-trigger 
                part that has a variable length. That part will be shortened as follows: 
                a)  Search the first index where the Pulse input Data array (y1) is bigger than pk * pk_foot. 
                    The index of that basement point will be named index_Trig.
                b)  The new starting index is obtained decreasing, as possible, index_Trig by a fixed amount, 
                    in order to assure the inclusion of sufficient initial part of the signal. 
                    The corresponding amount of time, guard_T, will be equal to the hammer 
                    pulse width t.HAMMER_DT, the number of samples will be N_guard. 
            """
            y1_shape = y1.shape  # Array y1 dimensions
            y2_shape = y2.shape  # Array y2 dimensions
            if not (np.max(y1_shape) == np.max(y2_shape)):  # Length comparison
                print(Length_msg)
                print("get_audio: y1_shape =", y1_shape)
                print("get_audio: y2_shape =", y2_shape)
                doBeep(on_list=bip_1_5_S[0], off_list=bip_1_5_S[1])
                return
            #   #
            # ===============================  Relocation Begin =========================\
            #
            pk1 = pk1_32 / c.full_scale_32  # Peak scaled to +/- 1
            y1_length = np.max(y1_shape)  # Length of y1 array
            indexes = np.array(np.nonzero(
                abs(y1) > pk1 *
                pk_foot))[0, :]  # Array of indexes, [0,:] => from 1xN to N
            print("len(indexes) =", len(indexes))
            index_Trig = np.amin(
                indexes)  # Lowest index i.e. index pedestal start
            dT = 1.0 / SamplingFreq  # Sampling time
            guard_T = HAMMER_DT  # Safety margin time, before Pulse pedestal
            N_guard = int(round(guard_T /
                                dT))  # Corresponding guard margin as samples
            if index_Trig > N_guard:  # Guard must begin after y1[0]
                new_start = index_Trig - N_guard  # Index (>=0) for the new starting point
                tmp_array = np.zeros(
                    y1_length)  # Temporary empty array for new y1
                tmp_array[0:-1 - new_start] = copy.copy(
                    y1[new_start:-1]
                )  # Fill y1 without the guard section of y1
                # leaves a tail of zeros
                y1 = copy.copy(tmp_array)  # y1 replaced by relocated data
                tmp_array = np.zeros(np.max(
                    y2.shape))  # Temporary empty array for new y2
                tmp_array[0:-1 - new_start] = copy.copy(
                    y2[new_start:-1]
                )  # Fill y2 without the guard section of y2
                y2 = copy.copy(tmp_array)  # y2 replaced
                tmp_length = np.max(y1.shape)  # Actual length of y1 array
                if tmp_length != y1_length:
                    print(
                        "\n" +
                        "ERROR in get_audio: Length of y1, y2 modified erroneously"
                    )
                #   #
            else:  # Leave the signal unaltered
                pass
            #   #
            #===============================  Relocation End ===========================/
            #
            pk1 = np.max(np.abs(y1))
            pk2 = np.max(np.abs(y2))
            rms1 = np.std(y1)
            rms2 = np.std(y2)
            print("\n" + "Saving data to Disc")
            print("rms1 = {:3.1f}".format(rms1 * 1000),
                  "mV;  pk1  = {:3.1f}".format(pk1 * 1000), "mV")
            print("rms2 = {:3.1f}".format(rms2 * 1000),
                  "mV;  pk2  = {:3.1f}".format(pk2 * 1000), "mV")
            """ Save data to disc
            """
            name_1 = "/y1_" + str('{:02d}'.format(i))
            name_2 = "/y2_" + str('{:02d}'.format(i))
            path_1 = c.temp_data_path + name_1
            path_2 = c.temp_data_path + name_2
            #
            if c.sav_mode == "npy":
                """ Saving in Numpy binary file, not compressed, add extension .npy 
                """
                np.save(path_1, y1)  # Uncompressed format
                np.save(path_2, y2)
            elif c.sav_mode == "npz":
                """ Saving in Numpy binary file, compressed, add extension .npz 
                """
                np.savez_compressed(path_1, y1=y1)  # Compressed format
                np.savez_compressed(path_2, y2=y2)
            #   #
            elif c.sav_mode == "mat":
                """ Saving in Matlab format V.5, compressed, add automatically the extension .mat
                    loadmat() will return the vector in a dictionary ex:
                    d    = scipy.io.loadmat("vector.mat)
                    vect = d["vect"]
                    vect = d1["y1"]
                    and with one more dimension added ("column"):
                    original vect.shape -> (n,), returned vect.shape -> (n, 1); 
                    to return to the original dimension:
                    vect_reshaped = vect[:,0]
                """
                scipy.io.savemat(path_1, {"y1": y1},
                                 do_compression=True,
                                 oned_as="column")
                scipy.io.savemat(path_2, {"y2": y2},
                                 do_compression=True,
                                 oned_as="column")
            #   #
            print("New acquired data:", name_1, name_2)
            print("=o" * 25 + "=")
            save_blows(c.blows_list_txt, name_1, name_2)
            os.system("sync")  # Delayed write causes a random delay
            time.sleep(0.2)  # Delays for x seconds
            if plot_debug:
                sel = int(15 * indx_echo_0)  # was 1.5
                mx1 = max(np.abs(y1[0:sel]))
                mx2 = max(np.abs(y2[0:sel]))
                mx = 1.1 * max(mx1, mx2)
                tp = np.arange(
                    c.N_OF_SAMPLES) * 1.0 / SamplingFreq  # Time vector
                fig = plt.figure(1)
                fig.set_size_inches(w=3, h=4)
                fig.subplots_adjust(hspace=0.50)
                plt.subplot(211)
                plt.plot(tp[0:sel], y1[0:sel])
                plt.title('get_audio Hammer')
                plt.xlabel('Time [s]')
                plt.ylabel('Force')
                plt.grid(True)
                plt.axis([tp[0], tp[sel], -mx, mx])
                #
                plt.subplot(212)
                plt.plot(tp[0:sel], y2[0:sel])
                plt.title('get_audio Accel')
                plt.xlabel('Time [s]')
                plt.ylabel('Accel')
                plt.grid(True)
                plt.axis([tp[0], tp[sel], -mx, mx])
                #
                plt.show()
                time.sleep(0.1)
            # END if plot_debug:
            i += 1
        # END if not is_Saturated):
    # END while i <= int(AveragesRequired):
    #
    stream.stop_stream()
    stream.close()  # Close inp pyaudio object
    paudio.terminate()
    #
    print("get_audio: closed input device, terminated")
예제 #38
0
# calculate delta t
win_len = 5  # 5 second window
shift_fl, shift_fr, shift_lr = [], [], []
corr_max_fl, corr_max_fr, corr_max_lr = [], [], []
for i in range(len(mox_diff_f) / 10):  # update at 10 Hz
    if i < win_len * 10:
        continue
    else:
        front, left, right = [], [], []
        for idx in range((i - win_len * 10) * 10, i * 10):
            front.append(mox_diff_f[idx])
            left.append(mox_diff_l[idx])
            right.append(mox_diff_r[idx])
        time = np.arange(1 - win_len * 100,
                         win_len * 100) * 0.01  # -(N-1)~(N-1)
        corr_fl = signal.correlate(np.array(front), np.array(left))
        corr_fr = signal.correlate(np.array(front), np.array(right))
        corr_lr = signal.correlate(np.array(left), np.array(right))
        corr_max_fl.append(corr_fl.max())
        corr_max_fr.append(corr_fr.max())
        corr_max_lr.append(corr_lr.max())
        shift_fl.append(time[corr_fl.argmax()])
        shift_fr.append(time[corr_fr.argmax()])
        shift_lr.append(time[corr_lr.argmax()])

# save data
fd_out = h5py.File('./correlation.h5', 'w')
grp = fd_out.create_group("corr")
dset_max_fl = grp.create_dataset('corr_max_fl', (len(corr_max_fl), ),
                                 dtype='f')
dset_max_fr = grp.create_dataset('corr_max_fr', (len(corr_max_fr), ),
예제 #39
0
    #in differences
    #VARMA(np.diff(x,axis=0),B,C)

    #Note:
    # * signal correlate applies same filter to all columns if kernel.shape[1]<K
    #   e.g. signal.correlate(x0,np.ones((3,1)),'valid')
    # * if kernel.shape[1]==K, then `valid` produces a single column
    #   -> possible to run signal.correlate K times with different filters,
    #      see the following example, which replicates VAR filter
    x0 = np.column_stack([np.arange(T), 2 * np.arange(T)])
    B[:, :, 0] = np.ones((P, K))
    B[:, :, 1] = np.ones((P, K))
    B[1, 1, 1] = 0
    xhat0 = VAR(x0, B)
    xcorr00 = signal.correlate(x0, B[:, :, 0])  #[:,0]
    xcorr01 = signal.correlate(x0, B[:, :, 1])
    print(
        np.all(
            signal.correlate(x0, B[:, :, 0], 'valid')[:-1, 0] == xhat0[P:, 0]))
    print(
        np.all(
            signal.correlate(x0, B[:, :, 1], 'valid')[:-1, 0] == xhat0[P:, 1]))

    #import error
    #from movstat import acovf, acf
    from statsmodels.tsa.stattools import acovf, acf
    aav = acovf(x[:, 0])
    print(aav[0] == np.var(x[:, 0]))
    aac = acf(x[:, 0])
예제 #40
0
spR = spR[:ds / 2 + 1]

#振幅スペクトルらしい
ampL = np.array([np.sqrt(c.real**2 + c.imag**2) for c in spL])
ampR = np.array([np.sqrt(c.real**2 + c.imag**2) for c in spR])
nspL = spL / ampL
nspR = spR / ampR

sds, = spL.shape
ws = sds - 100

#C1 = nspL[:ws]np.conj(nspR[:sds])
#C2 = CCF(nspL[:sds],np.conj(nspR[:ws]))

#C = CCF(spL[:ws],spR[:sds])
C1 = sig.correlate(nspR[:sds], nspL[:ws], mode="valid")
C2 = sig.correlate(nspL[:sds], nspR[:ws], mode="valid")
csp1 = ifft(C1)
csp2 = ifft(C2)

#print "ds:",sds,", ws:",ws
#print "c1 max index:",C1.index(max(C1)),", max:",max(C1)
#print "c2 max index:",C2.index(max(C2)),", max:",max(C2)
print "csp1 max index:", np.argmax(csp1), ", max:", max(csp1)
print "csp2 max index:", np.argmax(csp2), ", max:", max(csp2)
#
#print "c.shape:",C.shape
#print "dL.shape:",dL.shape
#print "spL.shape:",spL.shape
plt.plot(csp1, "r")
plt.plot(csp2, "b")
예제 #41
0
def jd_rot_fit2(velsfile1="Q1v.dat", velsfile2="Q4v.dat"):
    #
    #  this version uses r as the spatial variable, not x (LSCP length)
    #
    import numpy as np
    import matplotlib.pyplot as plt
    from scipy.signal import medfilt, correlate

    rc('mathtext', default='regular')

    plt.ion()
    plt.clf()
    if velsfile1 is None:
        raise ArgumentError("Please give input file")
    r0 = 8.5
    theta0 = 220.0
    l1, vel1 = get_xy_data(velsfile1)
    l2, vel2 = get_xy_data(velsfile2)
    v1_temp = new_vLSR(l1, vel1)  # Apply the VLSR correction
    v2_temp = new_vLSR(l2, vel2)
    v1 = medfilt(v1_temp, kernel_size=5)
    v2 = medfilt(v2_temp,
                 kernel_size=5)  # Do the median filter that was done in McG07
    ndat1 = len(v1)
    ndat2 = len(v2)
    sinl1 = abs(np.sin(l1 * np.pi / 180))
    wt1 = np.array(sinl1)
    sinl2 = abs(np.sin(l2 * np.pi / 180))
    wt2 = np.array(sinl2)
    rot1 = np.array(abs(v1) + theta0 * sinl1)
    rg1 = np.array(r0 * sinl1)
    rot2 = np.array(abs(v2) + theta0 * sinl2)
    rg2 = np.array(r0 * sinl2)
    #
    # Create arrays with both datasets
    #    print "Creating array of length",ndat1+ndat2;
    rg = np.append(rg1, rg2)
    rot = np.append(rot1, rot2)
    wt = np.append(wt1, wt2)
    x = rg / r0
    y = rot / theta0
    weights = wt / np.mean(y)
    # Fit the array (currently unweighted so dominated by dense sampling at high R)
    pars = np.polyfit(x, y, 1)
    #    print pars
    yfit = np.polyval(pars, x)
    rotfit = yfit * theta0
    #
    # Create arrays of the differences from fit
    fit1 = np.polyval(pars, rg1 / r0)
    fit2 = np.polyval(pars, rg2 / r0)
    diff1 = rot1 - fit1 * theta0
    diff2 = rot2 - fit2 * theta0
    #
    #   diff1 and diff2 are the residual velocities after subtracting the best fits
    #
    #  now go back to longitude for equal step in x = linear dist along magic circle
    #  we'll just take the range x=3 to x=9.5 kpc (scaled by r0) defined by x01,x02
    #
    ###    x0=abs(l1*r0*np.pi/180.)
    x0 = sinl1 * r0
    #  the 1st quadrant data are in the opposite order now, flip both arrays
    diff0 = diff1
    x1 = x0[::-1]
    diff1 = diff0[::-1]
    ###    x2=abs((360.-l2)*r0*np.pi/180.)
    x2 = sinl2 * r0
    ###    x01=np.linspace(3.,9.5,1300)
    ###    x02=np.linspace(3.,9.5,1300)
    x01 = np.linspace(3., 7.65, 930)
    x02 = np.linspace(3., 7.65, 930)
    y01 = np.interp(x01, x1, diff1)
    y02 = np.interp(x02, x2, diff2)
    #
    #  note the new step size is Delta-x = 6500/1300 pc = 5 pc
    #                            Delta-x = 4800/960 pc = 5 pc
    #
    plt.plot(x1, diff1, 'r+', label="QI")
    plt.plot(x2, diff2, 'b+', label="QIV")
    plt.plot(x01, y01, 'r-', label="QI interp")
    plt.plot(x02, y02, 'b-', label="QIV interp")
    #    plt.plot(x01,y01,'ro',label="QI interp")
    #    plt.plot(x02,y02,'bo',label="QIV interp")
    plt.xlabel(r"$r_G \,(kpc)$")
    ###    plt.xlabel(r"$x \,(kpc)$")
    plt.ylabel(r"$\Delta \Theta \, (km\, s^{-1})$")
    #    plt.legend(loc=2)
    ###    boxx=[3.,3.,9.5,9.5,3.]
    boxx = [3., 3., 7.65, 7.65, 3.]
    boxy = [-14.5, +14.5, +14.5, -14.5, -14.5]
    plt.plot(boxx, boxy, 'k-')
    plt.show()
    #
    z = correlate(y01, y02) / (len(y01) * np.std(y01) * np.std(y02))
    z1 = correlate(y01, y01) / (len(y01) * np.var(y01))
    z2 = correlate(y02, y02) / (len(y01) * np.var(y02))
    # just for a test:
    #    z=jdccf_0pad(y01,y02)/(np.std(y01)*np.std(y02))
    #    z1=jdccf_0pad(y01,y01)/(np.var(y01))
    #    z2=jdccf_0pad(y02,y02)/(np.var(y02))
    # test done
    #    print len(z)
    zx1 = np.linspace(1, len(z), len(z))
    zx2 = (zx1 - (len(z) / 2.)) * .005
    plt.figure(2)
    plt.plot(zx2, z, 'g-', label="ccf")
    plt.plot(zx2, z1, 'r-', label="acf Q1")
    plt.plot(zx2, z2, 'b-', label="acf Q4")
    plt.legend(loc=1)
    ##    plt.xlabel(r"$R \,(kpc)$")
    ##    plt.ylabel(r"$\Delta\Theta (km\, s^{-1})$")
    ###    plt.xlabel(r"$\Delta x \, \, \, (kpc)$")
    plt.xlabel(r"$\Delta r_G \, (kpc)$")
    plt.ylabel("Normalised Correlation Coefficient")
    plt.minorticks_on()
    vlines(0., -0.5, 1.5, linestyles='dotted', linewidth=1.2)
    #hlines(0.,-3.,3.,linestyles='dotted',linewidth=1.2)
    ylim(-0.5, 1.2)
    xlim(-3, 3)
    ##    plt.xlim(-3.5,3.5)
    #
    #  the y axis is normalized to 1 for 100% correlation
    #  the x axis is in lag steps of 7.2 pc
    #

    #     keep this for later, in case plots of the raw residuals vs. R are needed
    #    plt.plot(rg1,diff1,'r+',label="QI")
    #    plt.plot(rg2,diff2,'b+',label="QIV")
    #    plt.xlabel(r"$R \,(kpc)$")
    #    plt.ylabel(r"$\Delta\Theta (km\, s^{-1})$")
    #    plt.xlim(3.0,8.0)
    #    plt.ylim(-15.,15.)
    #    print np.mean(diff1),np.mean(diff2)
    #    print np.median(diff1),np.median(diff2)
    #    print np.std(diff1),np.std(diff2)
    #    plt.legend(loc=2)
    #    plt.show()
    return z1, z2, z
std2 = ss().fit_transform(c_d[0])

corrupted_pca1 = sklearnPCA(n_components=169)
corrupted = corrupted_pca1.fit_transform(std2)

neighbors = classifier.kneighbors(corrupted)
predictions = classifier.predict(corrupted)

##PART 2.2: OPTIMAL DATA ALIGNMENT##
##Response to 2.2##
#The scipy module's signal.correlate function was used to calculate the
#cross correlation of a corrupted univariate time series with its nearest
#neighbor predicted from the classifier. The point of maximal correlation was
#determined from this cross-correlation array and used as the centering point
#to optimally align the data.

closest_n = neighbors[1][:, 0]
z = 0
alignment = np.zeros((30000, 2))
for i in range(len(closest_n)):
    b_sig = c_d[0][i]
    a_sig = d_c[0][closest_n[i]]
    xcorr = correlate(a_sig, b_sig)
    lag = abs(np.argmax(xcorr))
    delay = abs(lag - len(a_sig))  #Since signal.correlate uses full cross
    #correlation and len(xcorr) = 2*457 - 1
    end = delay + c_d[1][i]
    if end > 457:
        end = 457
    alignment[i] = [delay, end]
예제 #43
0
    x_pad = np.pad(x, pad_width=(1, 0), mode='constant', constant_values=0)
    print((convolution_1d(x_pad, w)))

    x_pad = np.pad(x, pad_width=(0, 1), mode='constant', constant_values=0)
    print((convolution_1d(x_pad, w)))

    # scipy.signal.convole() 함수
    conv = convolve(x, w, mode='valid')
    print(conv)
    conv_full = convolve(x, w, mode='full')  # x의 모든 원소가 동일하게 연산에 기여.
    print(conv_full)
    conv_same = convolve(x, w, mode='same')  # x의 크기와 동일한 리턴.
    print(conv_same)

    # scipy.signal.correlation() 함수
    cross_corr = correlate(x, w, mode='valid')
    print(cross_corr)
    cross_corr_full = correlate(x, w, mode='full')
    print(cross_corr_full)
    cross_corr_same = correlate(x, w, mode='same')
    print(cross_corr_same)

    # scipy.signal.convolve2d(), scipy.signal.correlate2d()
    # (4, 4) 2d ndarray
    x = np.array([[1, 2, 3, 0], [0, 1, 2, 3], [3, 0, 1, 2], [2, 3, 0, 1]])

    # (3, 3) 2d ndarray
    w = np.array([[2, 0, 1], [0, 1, 2], [1, 0, 2]])

    # x와 w의 교차 상관 연산(valid, full, same)
    corr_2d_valid = correlate2d(x, w, mode='valid')
예제 #44
0
파일: digits.py 프로젝트: burbanom/digits
def get_acc_num(matrix, dictionary):
    """ We take sclices of the entire character matrix read from the digits.txt file and assign to them the corresponding number. """
    sizes = np.array([x[x != 0].size for x in indexer(matrix.T, 3)])
    acc = 9 * ['?']
    second_best_corrs = dict(
    )  # this dictionary contains the correlations and positions where a given 3x3 matrix had a good match
    for key, val in dictionary.items():
        # number of non-zero character matrix elements
        non_zero = val[val != 0].size

        # correlation calculation between the matrix slice and the 3x3 val matrix that is mapped to a given number
        # From scipy: https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.correlate.html
        # This approach is limited by the fact that the 3x3 matrix is likely to be matched in many places throughout the character file matrix.
        # This is circumvented, however by making sure that the size (non-zero), as well as the correlation provide a good match.
        corr = signal.correlate(matrix, val, 'valid')

        holder = np.zeros(len(corr[0]), dtype=np.int8)
        for i, x in enumerate((corr != non_zero)[0]):
            if x:
                holder[i] = corr[0][i]

# We keep track of the digigs that provide a good match, so that if we need to guess the identity of a character
# we do not have to perform this loop again!
        second_best = np.where(holder == holder.max())
        second_best_corrs[key] = (holder.max(),
                                  (second_best[0] / 3).astype(int))

        overlaps, indices = np.where(corr == non_zero)

        if len(indices) > 0:
            indices = np.array(list(map(int, indices / 3)))
            for index in indices:
                if sizes[index] == non_zero:
                    acc[index] = key

    if '?' in acc:  # if a number has not been properly assigned, we can use the second_best_corrs to propose values
        possibilities = list()
        for index in [i for i, x in enumerate(acc) if x == '?']:
            closest_index = dict()
            for key, val in second_best_corrs.items():
                if index in val[1]:
                    closest_index[key] = val[0]

            for replace in sorted(closest_index,
                                  key=closest_index.get,
                                  reverse=True):
                acc[index] = replace
                dummy = acc.copy()
                dummy.append(' ')

                possibilities.append(dummy)

        possibilities = [
            checksum(sublist) for sublist in possibilities
            if '?' not in sublist
        ]
        possibilities = [item for sublist in possibilities for item in sublist]

        account = ''.join(possibilities)

    else:
        account = ''.join(checksum(acc))
    return account
#corrects for wwvb offset
print(len(wwvbSignal))
#temp = wwvbSignal[0:4800:1]
wwvbSignal = wwvbSignal[5500::]
wwvbSignal = np.array(wwvbSignal)
#temp = np.array(temp)
#wwvbSignal = np.concatenate((wwvbSignal, temp))
print(len(wwvbSignal))

#One min wwvb
bitStream = ""
devList = []
for i in range(59):
    start = i * 44100
    end = (i * 44100) + 44100
    corrZero = signal.correlate(wwvbSignal[start:end], Zero, mode='full')
    corrOne = signal.correlate(wwvbSignal[start:end], One, mode='full')
    corrX = signal.correlate(wwvbSignal[start:end], X, mode='full')
    numZero = int(max(corrZero[44078:44123:1]) / 1000000000)
    numOne = int(max(corrOne[44078:44123:1]) / 1000000000)
    numX = int(max(corrX[44078:44123:1]) / 1000000000)

    if numOne > (numX * 2):  # we have one or zero
        if numZero > (numOne + (numX * 0.8)):
            bitStream = bitStream + '0'
        else:
            bitStream = bitStream + '1'
    else:
        bitStream = bitStream + 'X'
    devList.append(numZero)
    devList.append(numOne)
예제 #46
0
#%% Load the sync signals only and compare the delay between the two
sync1, fs = sf.read(sync1_file)
sync2, fs = sf.read(sync2_file)

#%%
half_cycle = int(0.7 * 0.04 * fs)
plt.figure()
plt.plot(sync1[:half_cycle], label='device 1')
plt.plot(sync2[:half_cycle], label='device 2')
plt.legend()
#%%
samples = int((1 / 25) * fs * 0.7)
part_sync1 = sync1[:samples]
part_sync2 = sync2[:samples]

cc = signal.correlate(part_sync2, part_sync1, 'full')
peak = int(np.argmax(cc))
delay = peak - cc.size * 0.5

print(f'Device 2 is {delay} samples ahead of device 1')
#%%
plt.figure()
plt.plot(cc)
plt.plot(peak, cc[peak], '*')
plt.vlines(part_sync2.size, 0, np.max(cc))

#%% Now make the composite file which doesn't need to be delay adjusted - just < 1 sample difference.
multichannel_audio = np.zeros((sync1.size, 16))

all_audio = []
for i, each in enumerate(filenames):
예제 #47
0
    def align_motion(self,
                     period=(-np.inf, np.inf),
                     side='left',
                     sd_thresh=10,
                     display=False):
        # Get data samples within period
        wheel = self.data['wheel']
        self.alignment.label = side
        self.alignment.to_mask = lambda ts: np.logical_and(
            ts >= period[0], ts <= period[1])
        camera_times = self.data['camera_times'][side]
        cam_mask = self.alignment.to_mask(camera_times)
        frame_numbers, = np.where(cam_mask)

        if frame_numbers.size == 0:
            raise ValueError('No frames during given period')

        # Motion Energy
        camera_path = self.video_paths[side]
        roi = (*[slice(*r) for r in self.roi[side]], 0)
        try:
            # TODO Add function arg to make grayscale
            self.alignment.frames = \
                vidio.get_video_frames_preload(camera_path, frame_numbers, mask=roi)
        except AssertionError:
            self.log.error('Failed to open video')
            return None, None, None
        self.alignment.df, stDev = video.motion_energy(self.alignment.frames,
                                                       2)
        self.alignment.period = period  # For plotting

        # Calculate rotary encoder velocity trace
        x = camera_times[cam_mask]
        Fs = 1000
        pos, t = wh.interpolate_position(wheel.timestamps,
                                         wheel.position,
                                         freq=Fs)
        v, _ = wh.velocity_smoothed(pos, Fs)
        interp_mask = self.alignment.to_mask(t)
        # Convert to normalized speed
        xs = np.unique([find_nearest(t[interp_mask], ts) for ts in x])
        vs = np.abs(v[interp_mask][xs])
        vs = (vs - np.min(vs)) / (np.max(vs) - np.min(vs))

        # FIXME This can be used as a goodness of fit measure
        USE_CV2 = False
        if USE_CV2:
            # convert from numpy format to openCV format
            dfCV = np.float32(self.alignment.df.reshape((-1, 1)))
            reCV = np.float32(vs.reshape((-1, 1)))

            # perform cross correlation
            resultCv = cv2.matchTemplate(dfCV, reCV, cv2.TM_CCORR_NORMED)

            # convert result back to numpy array
            xcorr = np.asarray(resultCv)
        else:
            xcorr = signal.correlate(self.alignment.df, vs)

        # Cross correlate wheel speed trace with the motion energy
        CORRECTION = 2
        self.alignment.c = max(xcorr)
        self.alignment.xcorr = np.argmax(xcorr)
        self.alignment.dt_i = self.alignment.xcorr - xs.size + CORRECTION
        self.log.info(
            f'{side} camera, adjusted by {self.alignment.dt_i} frames')

        if display:
            # Plot the motion energy
            fig, ax = plt.subplots(2, 1, sharex='all')
            y = np.pad(self.alignment.df, 1, 'edge')
            ax[0].plot(x, y, '-x', label='wheel motion energy')
            thresh = stDev > sd_thresh
            ax[0].vlines(x[np.array(
                np.pad(thresh, 1, 'constant', constant_values=False))],
                         0,
                         1,
                         linewidth=0.5,
                         linestyle=':',
                         label=f'>{sd_thresh} s.d. diff')
            ax[1].plot(t[interp_mask], np.abs(v[interp_mask]))

            # Plot other stuff
            dt = np.diff(camera_times[[0, np.abs(self.alignment.dt_i)]])
            fps = 1 / np.diff(camera_times).mean()
            ax[0].plot(t[interp_mask][xs] - dt,
                       vs,
                       'r-x',
                       label='velocity (shifted)')
            ax[0].set_title('normalized motion energy, %s camera, %.0f fps' %
                            (side, fps))
            ax[0].set_ylabel('rate of change (a.u.)')
            ax[0].legend()
            ax[1].set_ylabel('wheel speed (rad / s)')
            ax[1].set_xlabel('Time (s)')

            title = f'{self.ref}, from {period[0]:.1f}s - {period[1]:.1f}s'
            fig.suptitle(title, fontsize=16)
            fig.set_size_inches(19.2, 9.89)

        return self.alignment.dt_i, self.alignment.c, self.alignment.df
예제 #48
0
rhs = read_export_tool_csv(right_watch)
lhs['n'] = (lhs['x'] ** 2 + lhs['y'] ** 2 + lhs['z'] ** 2) ** 0.5
rhs['n'] = (rhs['x'] ** 2 + rhs['y'] ** 2 + rhs['z'] ** 2) ** 0.5

# Synchronize signal********************
# Find shake area using protocol and by plotting the signal  #example: plt.plot(lft.loc[range(100,8000),'norm'])
lhs_shake_range = range(3000, 4000)
rhs_shake_range = range(2000, 3000)

# find offset
# time_offset = ta.find_offset(lhs.loc[lhs_shake_range,'n'].as_matrix(), rhs.loc[rhs_shake_range,'n'].as_matrix())


a = lhs.loc[lhs_shake_range, 'n'].as_matrix()
b = rhs.loc[rhs_shake_range, 'n'].as_matrix()
offset = np.argmax(signal.correlate(a, b))
shift_by = a.shape[0] - offset
time_offset = rhs.loc[rhs_shake_range[0] + shift_by, 'ts'] - lhs.loc[lhs_shake_range[0], 'ts']
# The link below says you need to subtract 1 from shift_by, but based on my plotting it looks like you don't need to
# link: http://stackoverflow.com/questions/4688715/find-time-shift-between-two-similar-waveforms

# Plot shift
st = 500
en = 550
plt.plot(a[range(st, en)])
# plt.plot(b[range(st, en)])

plt.plot(b[range(st + shift_by, en + shift_by)])


# Segment data into samples *****************************************
예제 #49
0
# In[6]:

from scipy.signal import convolve

# In[7]:

convolve(x, w, mode='valid')

# In[8]:

from scipy.signal import correlate

# In[9]:

correlate(x, w, mode='valid')

# In[10]:

correlate(x, w, mode='full')

# In[11]:

correlate(x, w, mode='same')

# In[12]:

x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
w = np.array([[2, 0], [0, 0]])

# In[13]:
예제 #50
0
Problem Title: Solve the Approximate Pattern Matching Problem
URL: https://stepik.org/lesson/9/step/4?course=Stepic-Interactive-Text-for-Week-2&unit=8224
Code Challenge:      Approximate Pattern Matching Problem: Find all approximate occurrences of a pattern in a string.
     Input: Strings Pattern and Text along with an integer d.
     Output: All starting positions where Pattern appears as a substring of Text with at most d mismatches.


'''

import sys
import utils
import numpy as np
from scipy import signal


def ApproximatePatternMatching(seq, pattern, d):
	'Returns positions in seq where pattern appears with a hamming distance <= d'
    ref_length = len(pattern)
    text_seq = utils._convert_to_matrices(seq)
    pattern_seq = utils._convert_to_matrices(pattern)
    matches = signal.correlate(text_seq, pattern_seq, mode = 'valid').flatten()
    return [i for i,x in enumerate(matches) if x >= ref_length-d]

if __name__ == '__main__':
	pattern = sys.stdin.readline()[:-1]
	seq = sys.stdin.readline()[:-1]
	d = int(sys.stdin.readline()[:-1])
	res = ApproximatePatternMatching(seq, pattern, d)
	print(' '.join(map(str, res)))

예제 #51
0
def olf_bulb_10(Nmitral, H_in, W_in, P_odor_in, dam):
    #    Nmitral = 10 #number of mitral cells
    Ngranule = np.copy(Nmitral)  #number of granule cells     pg. 383 of Li/Hop
    Ndim = Nmitral + Ngranule  #total number of cells
    t_inh = 25
    # time when inhalation starts
    t_exh = 205
    #time when exhalation starts
    finalt = 395
    # end time of the cycle

    #y = zeros(ndim,1);

    Sx = 1.43  #Sx,Sx2,Sy,Sy2 are parameters for the activation functions
    Sx2 = 0.143
    Sy = 2.86  #These are given in Li/Hopfield pg 382, slightly diff in her thesis
    Sy2 = 0.286
    th = 1  #threshold for the activation function

    tau_exh = 33.3333
    #Exhale time constant, pg. 382 of Li/Hop
    exh_rate = 1 / tau_exh

    alpha = .15  #decay rate for the neurons
    #Li/Hop have it as 1/7 or .142 on pg 383

    P_odor0 = np.zeros(Nmitral)  #odor pattern, no odor

    H0 = H_in  #weight matrix: to mitral from granule
    W0 = W_in  #weights: to granule from mitral

    Ib = np.ones((Nmitral, 1)) * .243  #initial external input to mitral cells
    Ic = np.ones(
        (Ngranule, 1)) * .1  #initial input to granule cells, these values are
    #given on pg 382 of Li/Hop

    signalflag = 1  # 0 for linear output, 1 for activation function

    noise = np.zeros((Ndim, 1))  #noise in inputs
    noiselevel = .00143
    noisewidth = 7  #noise correlation time, given pg 383 Li/Hop as 9, but 7 in thesis

    lastnoise = np.zeros((Ndim, 1))  #initial time of last noise pule

    #******************************************************************************

    #CALCULATE FIXED POINTS

    #Calculating equilibrium value with no input
    rest0 = np.zeros((Ndim, 1))

    restequi = fsolve(lambda x: equi(x,Ndim,Nmitral,Sx,Sx2,Sy,Sy2,th,alpha,\
                                     t_inh,H0,W0,P_odor0,Ib,Ic,dam),rest0) #about 20 ms to run this

    np.random.seed(seed=23)
    #init0 = restequi+np.random.rand(Ndim)*.00143 #initial conditions plus some noise
    #for no odor input
    init0 = restequi + np.random.rand(
        Ndim) * .00143  #initial conditions plus some noise
    #for no odor input
    np.random.seed()
    #Now calculate equilibrium value with odor input

    lastnoise = lastnoise + t_inh - noisewidth  #initialize lastnoise value
    #But what is it for? to have some
    #kind of correlation in the noise

    #find eigenvalues of A to see if input produces oscillating signal

    xequi = fsolve(lambda x: equi(x,Ndim,Nmitral,Sx,Sx2,Sy,Sy2,th,alpha,\
                                     t_inh,H0,W0,P_odor_in,Ib,Ic,dam),rest0)
    #equilibrium values with some input, about 20 ms to run

    #******************************************************************************

    #CALCULATE A AND DETERMINE EXISTENCE OF OSCILLATIONS

    diffgy = celldiff(xequi[Nmitral:], Sy, Sy2, th)
    diffgx = celldiff(xequi[0:Nmitral], Sx, Sx2, th)

    H1 = np.dot(H0, diffgy)
    W1 = np.dot(W0, diffgx)  #intermediate step in constructing A

    A = np.dot(H1, W1)  #Construct A

    dA, vA = lin.eig(A)  #about 20 ms to run this
    #Find eigenvalues of A

    diff = (1j) * (dA)**.5 - alpha  #criteria for a growing oscillation

    negsum = -(1j) * (dA)**.5 - alpha  #Same

    diff_re = np.real(diff)
    #Take the real part
    negsum_re = np.real(negsum)

    #do an argmax to return the eigenvalue that will cause the fastest growing oscillations
    #Then do a spectrograph to track the growth of the associated freq through time

    indices = np.where(
        diff_re > 0)  #Find the indices where the criteria is met
    indices2 = np.where(negsum_re > 0)

    #eigenvalues that could lead to growing oscillations
    #    candidates = np.append(np.real((dA[indices])**.5),np.real((dA[indices2])**.5))
    largest = np.argmax(diff_re)

    check = np.size(indices)
    check2 = np.size(indices2)

    if check == 0 and check2 == 0:
        #    print("No Odor Recognized")
        dominant_freq = 0
    else:
        dominant_freq = np.real((dA[largest])**.5) / (
            2 * np.pi)  #find frequency of the dominant mode
        #Divide by 2pi to get to cycles/ms
    #    print("Odor detected. Eigenvalues:",dA[indices],dA[indices2],\
    #          "\nEigenvectors:",vA[indices],vA[indices2],\
    #          "\nDominant Frequency:",dominant_freq)

    #*************************************************************************

    #SOLVE DIFFERENTIAL EQUATIONS TO GET INPUT AND OUTPUTS AS FN'S OF t

    #differential equation to solve
    teval = np.r_[0:finalt]

    #solve the differential equation
    sol = solve_ivp(lambda t,y: diffeq(t,y,Nmitral,Ngranule,Ndim,lastnoise,\
                    noise,noisewidth,noiselevel, t_inh,t_exh,exh_rate,alpha,Sy,\
                    Sy2,Sx,Sx2,th,H0,W0,P_odor_in,Ic,Ib,dam),\
                    [0,395],init0,t_eval = teval,method = 'RK45')
    t = sol.t
    y = sol.y
    y = np.transpose(y)
    yout = np.copy(y)

    #convert signal into output signal given by the activation fn
    if signalflag == 1:
        for i in np.arange(np.size(t)):
            yout[i, :Nmitral] = cellout(y[i, :Nmitral], Sx, Sx2, th)
            yout[i, Nmitral:] = cellout(y[i, Nmitral:], Sy, Sy2, th)

    #solve diffeq for P_odor = 0
    #first, reinitialize lastnoise & noise
    noise = np.zeros((Ndim, 1))
    lastnoise = np.zeros((Ndim, 1))
    lastnoise = lastnoise + t_inh - noisewidth

    sol0 = sol = solve_ivp(lambda t,y: diffeq(t,y,Nmitral,Ngranule,Ndim,lastnoise,\
                    noise,noisewidth,noiselevel, t_inh,t_exh,exh_rate,alpha,Sy,\
                    Sy2,Sx,Sx2,th,H0,W0,P_odor0,Ic,Ib,dam),\
                    [0,395],init0,t_eval = teval,method = 'RK45')
    y0 = sol0.y
    y0 = np.transpose(y0)
    y0out = np.copy(y0)

    #convert signal into output signal given by the activation fn
    if signalflag == 1:
        for i in np.arange(np.size(t)):
            y0out[i, :Nmitral] = cellout(y0[i, :Nmitral], Sx, Sx2, th)
            y0out[i, Nmitral:] = cellout(y0[i, Nmitral:], Sy, Sy2, th)

    #*****************************************************************************

    #SIGNAL PROCESSING

    #Filtering the signal - O_mean: Lowpass fitered signal, under 20 Hz
    #S_h: Highpass filtered signal, over 20 Hz

    fs = 1 / (.001 * (t[1] - t[0]))  #sampling freq, converting from ms to sec

    f_c = 15 / fs  # Cutoff freq at 20 Hz, written as a ratio of fc to sample freq

    flter = np.sinc(2 * f_c *
                    (t -
                     (finalt - 1) / 2)) * np.blackman(finalt)  #creating the
    #windowed sinc filter
    #centered at the middle
    #of the time data
    flter = flter / np.sum(flter)  #normalize

    hpflter = -np.copy(flter)
    hpflter[int(
        (finalt - 1) / 2)] += 1  #convert the LP filter into a HP filter

    Sh = np.zeros(np.shape(yout))
    Sl = np.copy(Sh)
    Sl0 = np.copy(Sh)
    Sbp = np.copy(Sh)

    for i in np.arange(Ndim):
        Sh[:, i] = np.convolve(yout[:, i], hpflter, mode='same')
        Sl[:, i] = np.convolve(yout[:, i], flter, mode='same')
        Sl0[:, i] = np.convolve(y0out[:, i], flter, mode='same')

    #find the oscillation period Tosc (Tosc must be greater than 5 ms to exclude noise)
    Tosc0 = np.zeros(np.size(np.arange(5, 50)))
    for i in np.arange(5, 50):
        Sh_shifted = np.roll(Sh, i, axis=0)
        Tosc0[i - 5] = np.sum(
            np.diagonal(
                np.dot(np.transpose(Sh[:, :Nmitral]),
                       Sh_shifted[:, :Nmitral])))
        #That is, do the correlation matrix (time correlation), take the diagonal to
        #get the autocorrelations, and find the max
    Tosc = np.argmax(Tosc0)
    Tosc = Tosc + 5

    f_c2 = 1000 * (
        1.3 /
        Tosc) / fs  #Filter out components with frequencies higher than this
    #to get rid of noise effects in cross-correlation
    #times 1000 to get units right

    flter2 = np.sinc(2 * f_c2 * (t - (finalt - 1) / 2)) * np.blackman(finalt)
    flter2 = flter2 / np.sum(flter2)

    for i in np.arange(Ndim):
        Sbp[:, i] = np.convolve(Sh[:, i], flter2, mode='same')

    #CALCULATE THE DISTANCE MEASURES

    #calculate phase via cross-correlation with each cell
    phase = np.zeros(Nmitral)

    for i in np.arange(1, Nmitral):
        crosscor = signal.correlate(Sbp[:, 0], Sbp[:, i])
        tdiff = np.argmax(crosscor) - (finalt - 1)
        phase[i] = tdiff / Tosc * 2 * np.pi

    #Problem with the method below is that it will only give values from 0 to pi
    #for i in np.arange(1,Nmitral):
    #    phase[i]=np.arccos(np.dot(Sbp[:,0],Sbp[:,i])/(lin.norm(Sbp[:,0])*lin.norm(Sbp[:,i])))

    OsciAmp = np.zeros(Nmitral)
    Oosci = np.copy(OsciAmp) * 0j
    Omean = np.zeros(Nmitral)

    for i in np.arange(Nmitral):
        OsciAmp[i] = np.sqrt(
            np.sum(Sh[125:250, i]**2) / np.size(Sh[125:250, i]))
        Oosci[i] = OsciAmp[i] * np.exp(1j * phase[i])
        Omean[i] = np.average(Sl[:, i] - Sl0[:, i])

    Omean = np.maximum(Omean, 0)

    Ooscibar = np.sqrt(np.dot(
        Oosci,
        np.conjugate(Oosci))) / Nmitral  #can't just square b/c it's complex
    Omeanbar = np.sqrt(np.dot(Omean, Omean)) / Nmitral

    maxlam = np.max(np.abs(np.imag(np.sqrt(dA))))

    return yout, y0out, Sh, t, OsciAmp, Omean, Oosci, Omeanbar, Ooscibar, dominant_freq, maxlam
 def matched_linear_filter(self):
     linear_filter_vals = []
     for i in self.filter_signal():
         corr = signal.correlate(i, np.ones(128), mode='same') / 128
         linear_filter_vals.append(corr)
     return linear_filter_vals
예제 #53
0
def calc_cross_corr(value0, value1, Fs=7200, show=False, save_addr='../static/img/a.png', scot=False, time_scale=False,
                    test_ccor=False, plt_title="", frame_by_frame=False, max_shift=500):
    """ Function to plot ccor in time and calc peak point of it """
    # if test_ccor:
    #     value0 = np.roll(value0, 100)  # just to test the scot
    
    # *************************************************************************
    value0 = value0 / np.max(np.abs(value0))
    value0 = value0 - np.mean(value0)
    value1 = value1 / np.max(np.abs(value1))
    value1 = value1 - np.mean(value1)

    if scot:
        sqrt_abs_Sxx = np.sqrt(np.abs(np.fft.fft(scisig.correlate(value0, value0))))
        sqrt_abs_Syy = np.sqrt(np.abs(np.fft.fft(scisig.correlate(value1, value1))))
        xsi = 1 / sqrt_abs_Syy / sqrt_abs_Sxx
        Sxy = np.fft.fft(scisig.correlate(value0, value1))
        value0 = np.fft.ifft(Sxy * xsi)
        value0 = value0 / np.max(np.abs(value0))
    else:
        value0 = scisig.correlate(value0, value1)
        value0 = value0 / np.max(np.abs(value0))

    # --- interpolate the sharp edges
    mid = (len(value0) - 1) // 2
    # value0[mid - 10: mid + 11] = 0
    # for i in range(1, mid):
    #     if abs(value0[i] - value0[i - 1]) > 0.1:
    #         value0[i] = value0[i - 1]
    # for i in reversed(range(mid, len(value0))):
    #     if abs(value0[i] - value0[i - 1]) > 0.1:
    #         value0[i - 1] = value0[i]

    # ---- plot cross-correlation
    plt.figure(4, figsize=(16, 8))
    plt.clf()
    if time_scale:
        a = len(value0) / 2 / Fs
        plt.xlabel("time [sec]")
    else:
        a = len(value0) / 2         # to have sample output
        plt.xlabel("time [sample]")
    plt.plot(np.linspace(-a, a, len(value0)), np.abs(value0))
    plt.ylabel("normalized power")
    plt.title(plt_title + "_zoomed")
    plt.grid(True)
    # ---- CHANGE THESE ACCORDING TO DATA ----
    max_shift = int(max_shift * Fs)
    lims = [-max_shift, max_shift, 0, 1]
    plt.xlim([lims[0], lims[1]])
    plt.ylim([lims[2], lims[3]])
    # plt.show()
    plt.xticks(np.linspace(lims[0], lims[1], 21))
    plt.yticks(np.linspace(lims[2], lims[3], 11))
    # plt.show()

    # ---- CHANGE THESE ACCORDING TO DATA ----
    # lims = [0, 300, 0, 1]
    # plt.xlim([lims[0], lims[1]])
    # plt.ylim([lims[2], lims[3]])
    # plt.xticks(np.linspace(lims[0], lims[1], 21))
    # plt.yticks(np.linspace(lims[2], lims[3], 11))

    if show:
        plt.show()
    else:
        if not os.path.exists(os.path.dirname(save_addr)):
            os.mkdir(os.path.dirname(save_addr))
        plt.savefig(save_addr)

    # --- print cross corr peak point
    mid = (len(value0) - 1) // 2
    shift = np.argmax(np.abs(value0[mid - max_shift: mid + max_shift])) - max_shift      # only search for max in 1 Sec distance
    # value = np.abs(value0[shift])
    # if time_scale:
    #     shift = shift / Fs
    speed_of_sound = 3000
    # print(shift)
    # print(Fs)
    # print(speed_of_sound)
    print("shift amount = {:4f} Sec, {} Samples, {:3f} Meter".format(shift / Fs, shift, shift / Fs * speed_of_sound))
    # print("shift amount = {} Meter".format(shift))
    # else:
    #     print("shift amount = {} Samples, with confedence = {:3f}%".format(shift, value * 100))
    #     shift = shift / Fs *
    #     print("shift amount = {} Meter".format(shift))
    return shift / Fs
예제 #54
0
def xcorr(x, y, normed=True):
    correls = correlate(x, y)
    if normed:
        correls /= np.sqrt(np.dot(x, x) * np.dot(y, y))
    return correls
예제 #55
0
"""
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from scipy.signal import convolve, correlate

# jpg 파일 오픈
img = Image.open('sample.jpg')
img_pixel = np.array(img)
print(img_pixel.shape)  # (height, width, color-depth)
# 머신 러닝 라이브러리에 따라서 color 표기의 위치가 달라짐.
# Tensorflow: channel-last 방식. color-depth가 3차원 배열의 마지막 차원
# Theano: channel-first 방식. color-depth가 3차원 배열의 첫번째 차원 (c,h,w)
# Keras: 두가지 방식 모두 지원.

plt.imshow(img_pixel)
plt.show()

# 이미지의 RED 값 정보
print(img_pixel[:, :, 0])

# (3, 3, 3) 필터
filter = np.zeros((8, 8, 3))
filter[0, 0, :] = 255
# transformed = convolve(img_pixel, filter, mode='same')
transformed = correlate(img_pixel, filter, mode='same')
plt.imshow(transformed.astype(np.uint8))
plt.show()


예제 #56
0
    def checkData(self, y1, y2):
        """ Called by ,fileLoad()
            :param y1: ch1 data array (Force)
            :param y2: ch2 data array (Accel)
            :return:    None
        """

        # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
        # PUT A PULSE ON Ch1 TO TEST GLITCH DETECTION
        #
        # noise_1 = np.random.rand(np.max(y1.shape)) / 500
        # noise_2 = np.random.rand(np.max(y1.shape)) / 500
        # y1      = noise_1
        # y2      = noise_2
        # y1[480] = 0.5
        #
        # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\

        # ========================== SATURATION CHECK BEGIN ==========================\
        #
        y1_Min      = np.min(y1)                                        # Getting min / max
        y1_Max      = np.max(y1)
        y2_Min      = np.min(y2)
        y2_Max      = np.max(y2)
        max_abs1    = max(abs(y1_Min), abs(y1_Max))                     # ch1 absolute peak
        max_abs2    = max(abs(y2_Min), abs(y2_Max))                     # ch2 absolute peak
        max_abs     = max(max_abs1, max_abs2)                           # Greater absolute maximum peak
        #
        if max_abs < c.satur_threshold:                                 # Saturation check
            is_Saturated = False
        else:
            is_Saturated = True
            doBeep(on_list  =  alarm_3_S[0],
                   off_list =  alarm_3_S[1])
            print(Saturation_msg, "max_abs =", max_abs)
        #   #
        # ========================== SATURATION CHECK END ============================/

        # ========================== GLITCH TESTING BEGIN ============================\
        #
        global plot_debug                                   # Access to the global plot_debug
        W = 1                                               # Lateral range width for each side around the peak sample
        is_err1     = False
        is_err2     = False
        #----------- NEW TEST BASED ON THE DIFFERENTIATION OF THE GLITCH (DIRAC DELTA FUNCTION)
        y1_d        = np.diff(y1)
        y2_d        = np.diff(y2)
        indx1       = min(min(np.nonzero(np.abs(y1) == max_abs1)))                  # Index of max(abs(y1))
        indx2       = min(min(np.nonzero(np.abs(y2) == max_abs2)))                  # Index of max(abs(y2))
        if indx1 < (np.max(y1.shape) -2) and (np.max(abs(y1_d[indx1-1] - max_abs1)) < max_abs1 / 50):
            is_err1 = True
        #   #
        if indx2 < (np.max(y2.shape) -2) and (np.max(abs(y2_d[indx1-1] - max_abs1)) < max_abs2 / 50):
            is_err2 = True
        #   #
        #-------------------------------------------------------------------------------
        if is_err1: print(Glitch_msg, "on Ch1")
        if is_err2: print(Glitch_msg, "on Ch2")
        if is_err1 or is_err2:
            doBeep(on_list  =  alarm_1_S[0], off_list = alarm_1_S[1])
        #   #
        # if is_err1 or is_err2 or plot_debug:
        #     mean1 = int(round(1000*mean_range1))
        #     max1  = int(round(1000*max_abs1 / 2))
        #     mean2 = int(round(1000*mean_range2))
        #     max2  = int(round(1000*max_abs2 / 2))
        #     print("==============================")
        #     print("  Glitches are absent if:")
        #     print("  mean_range > max_absX/2")
        #     print("==============================")
        #     print("  ch1:    {:03d}".format(mean1), "  {:03d}".format(max1), " [mV]")
        #     print("------------------------------")
        #     print("  ch2:    {:03d}".format(mean2), "  {:03d}".format(max2), "[mV]")
        #     print("------------------------------")
        #     print("  ch1 indexes:  ", range_indx1)
        #     print("  ch2 indexes:  ", range_indx2)
        #     print("------------------------------")
        # #   #
        # ============================ GLITCH TESTING END ============================/

        # ======================= CHANNELS SWAP TESTING BEGIN ========================\
        #
        """ Sometime the first acquisition can swap ch1 with ch2, here we detect that occurrence, and in the case, 
            we recovery by a swap between ch1 and ch2. 
            1) The autocorrelations of y1 and y2 are calculated.
            2) The max absolute peaks of the autocorrelations are calculated.
            3) Each one of the autocorrelation amplitudes are normalized using the absolute peaks of point (2).
            4) The normalized autocorrelations are rounded to 1 decimal to exclude little oscillations around 0.
            5) The differentiation of the sign changements of the normalized and rounded correlations is calculated.
            6) The differentiations of point (5) are cumulated to obtain the final scalar count.
            7) The ch1 Force signal is a well behaved and then bandlimited pulse with only two zero crossing 
              (excluding noise), by contrast the ch2 Accel signal being a derivative (accelerometer signal) 
              has surely more sign changements, consequently when the total sign changes of ch1 exceeds that of ch2 
              the channels are swapped. 
        """
        upper           = int(3 * self.indx_echo_0)                         # Upper index limit for checking
        corr_y1         = correlate(y1[0:upper], y1[0:upper], mode='same')
        corr_y2         = correlate(y2[0:upper], y2[0:upper], mode='same')
        pk_abs_corr_1   = np.max(np.abs(corr_y1))
        pk_abs_corr_2   = np.max(np.abs(corr_y2))
        corr_y1         = np.round(corr_y1 / pk_abs_corr_1, 1)
        corr_y2         = np.round(corr_y2 / pk_abs_corr_2, 1)
        nz_diff_corr_y1 = (np.diff(np.sign(corr_y1), n=1) != 0).sum()
        nz_diff_corr_y2 = (np.diff(np.sign(corr_y2), n=1) != 0).sum()
        #
        if nz_diff_corr_y1 < nz_diff_corr_y2:
            is_swapped = False
        else:
            is_swapped = True
        #   #
        if plot_debug:
            print("Actual loaded file n°", self.fileIndex)
            print("\n" + "Zero crossing comparison:", nz_diff_corr_y1, " <  ", nz_diff_corr_y2, " ", not is_swapped)
        #   #
        print("            Good if:")
        print(" nz_diff_corr_y1 < nz_diff_corr_y2")
        print("-----------------------------------")
        print("       ", nz_diff_corr_y1, "       |       ", nz_diff_corr_y2)
        print("-----------------------------------")
        if is_swapped:
            print("\n" + "/" * 35)
            print("     ERROR: Swapped ch1, ch2")
            print("        CORRECTION DONE!")
            print("/" * 35 + "\n")
            y1, y2 = y2, y1                                         #  Swap Channels
            self.y1_full = self.y1
            self.y2_full = self.y2
        #   #
        # ======================== CHANNELS SWAP TESTING END =========================/
        self.plotData()
예제 #57
0
import numpy as np
import scipy.linalg as la
import scipy.signal as signal
N = 100  # num of samples
sRate = 25  # sampling rate

z = np.linspace(0, 2 * np.pi, num=N)
x = np.sin(2 * np.pi * z) + np.sin(1 * np.pi * z)

autocorrMtx = la.toeplitz(signal.correlate(x, x, mode='full'))

print(len(autocorrMtx[0]))
예제 #58
0
파일: correlate.py 프로젝트: shimniok/aprs
emphasize_1 = 1

K = 2 * np.pi / Fsr
template_0 = [0 for i in range(chunk_size)]
template_1 = [0 for i in range(chunk_size)]
for s in range(samp_per_bit):
    template_0.append(emphasize_0 * -np.cos(s * K * fzero))
    template_1.append(emphasize_1 * -np.cos(s * K * fone))

# correlate each chunk
correlate_0 = []
correlate_1 = []
c = 0
while c < len(data):
    chunk = data[c:c + chunk_size - 1]
    c0 = correlate(chunk, template_0, 'full').tolist()
    c1 = correlate(chunk, template_1, 'full').tolist()
    for i in range(chunk_size):
        c0[i] *= -c0[i]
        c1[i] *= c1[i]

    #plt.subplot(2,1,1)
    #plt.plot(chunk)
    #plt.subplot(2,1,2)
    #plt.plot(range(len(c0)), c0, 'r-', c1, 'b-')
    #plt.show()
    correlate_0 += c0[0:chunk_size]
    correlate_1 += c1[0:chunk_size]
    c += chunk_size

# low pass filter
예제 #59
0
파일: ploting.py 프로젝트: vimmoos/NN
def correlation(current, data, data_len, transformer):
    des = data[0][0]["desired"][:data_len]
    out = current[0][transformer][0][
        "output"][:data_len] if current is not None else des
    return s.correlate(out, des)
예제 #60
0
def Recognition(input_img, input_filter, max_conv, min_conv):
    ### Forward Pass
    # Global variable initialization
    global convolved_nodes
    global temp
    global max_conv_t
    global min_conv_t

    # Convolution
    for i in range(0, input_filter.shape[0]):
        convolved_nodes[i] = signal.correlate(input_img,
                                              input_filter[i],
                                              mode="valid")
    # print("MAX MIN")
    # print(np.amax(convolved_nodes))
    # print(np.amin(convolved_nodes))
    # max_conv_t = np.amax(convolved_nodes)
    # if max_conv_t > max_conv:
    # 	max_conv = max_conv_t
    # else:
    # 	max_conv = max_conv

    # min_conv_t = np.amin(convolved_nodes)
    # if min_conv_t < min_conv:
    # 	min_conv = min_conv_t
    # else:
    # 	min_conv = min_conv
    # print("CONV NODES:")
    # print(convolved_nodes)

    # Sigmoid activation of convolution node
    convolved_nodes_sigmoid = relu_activation(convolved_nodes)

    # print("CONV RELU:")
    # print(convolved_nodes_sigmoid)
    # print(convolved_nodes_sigmoid.shape)

    # Flattening of sigmoid activated convolution layer
    convolved_nodes_sigmoid_flat = convolved_nodes_sigmoid.reshape(
        1, total_weights)
    # print("CONV FLAT:")
    # print(convolved_nodes_sigmoid_flat)

    # Fully connected layer
    output_nodes_flat = np.matmul(convolved_nodes_sigmoid_flat,
                                  convolved_nodes_to_output_nodes)
    # print("MAX MIN FC")
    # print(np.amax(output_nodes_flat))
    # print(np.amin(output_nodes_flat))
    # max_conv_t = np.amax(output_nodes_flat)
    # if max_conv_t > max_conv:
    # 	max_conv = max_conv_t
    # else:
    # 	max_conv = max_conv

    # min_conv_t = np.amin(output_nodes_flat)
    # if min_conv_t < min_conv:
    # 	min_conv = min_conv_t
    # else:
    # 	min_conv = min_conv
    print("OUTPUT NODES:")
    print(output_nodes_flat)

    # Softmax activation of output node
    output_nodes_flat_column = np.transpose(
        output_nodes_flat)  #.reshape(10,1)#for column comparison
    softmax_output, softmax_1_minus_max = softmax(output_nodes_flat_column)
    # print("MAX MIN")
    # print(np.amax(softmax_output))
    # print(np.amin(softmax_output))
    max_conv_t = np.amax(softmax_output)
    if max_conv_t > max_conv:
        max_conv = max_conv_t
    else:
        max_conv = max_conv

    min_conv_t = np.amin(softmax_output)
    if min_conv_t < min_conv:
        min_conv = min_conv_t
    else:
        min_conv = min_conv

    return output_nodes_flat_column, softmax_output, min_conv, max_conv