예제 #1
0
def correlation(data1, data2=None):
    """
    Calculates the numerical correlation between two numpy.ndarray data.
    
    :Parameters:
        #. data1 (numpy.ndarray): the first numpy.ndarray. If multidimensional the correlation calculation is performed on the first dimension.
        #. data2 (None, numpy.ndarray): the second numpy.ndarray. If None the data1 autocorrelation is calculated.
    
    :Returns:
        #. correlation (numpy.ndarray): the result of the numerical correlation. 
    """
    # The signal must not be empty.
    assert isinstance(
        data1,
        np.ndarray), Logger.error("data1 must be a non zero numpy.ndarray")
    # The length of data1 is stored in data1Length
    data1Length = len(data1)
    assert data1Length > 0, Logger.error(
        "data1 must be a non zero numpy.ndarray")
    # extendedLength = 2*len(data1)
    extendedLength = 2 * data1Length
    # The FCA algorithm:
    # 1) computation of the FFT of data1 zero-padded until extendedLength
    # The computation is done along the 0-axis
    FFTData1 = FFT(data1, extendedLength, 0)
    if data2 is None:
        # Autocorrelation case
        FFTData2 = FFTData1
    else:
        # 2) computation of the FFT of data2 zero-padded until extendedLength
        # The computation is  done along the 0-axis
        assert isinstance(data2, np.ndarray), Logger.error(
            "if not None, data2 must be a numpy.ndarray")
        FFTData2 = FFT(data2, extendedLength, 0)
    # 3) Product between FFT(data1)* and FFT(data2)
    FFTData1 = np.conjugate(FFTData1) * FFTData2
    # 4) inverse FFT of the product
    # The computation is done along the 0-axis
    FFTData1 = iFFT(FFTData1, len(FFTData1), 0)
    # This refers to (1/(N-m))*Sab in the published algorithm.
    # This is the correlation function defined for positive indexes only.
    if len(FFTData1.shape) == 1:
        corr = FFTData1.real[:data1Length] / (data1Length -
                                              np.arange(data1Length))
    else:
        corr = np.add.reduce(FFTData1.real[:data1Length],
                             1) / (data1Length - np.arange(data1Length))
    return corr
예제 #2
0
def split_operator(steps):
    global psi
    x_operator = np.exp(-0.5j * dt * V)
    k_operator = np.exp(-0.5j * dt * (np.fft.fftfreq(n) *
                                      (2 * np.pi / dx))**2 / m)
    for i in range(steps):
        psi = IFFT(FFT(psi * x_operator) * k_operator) * x_operator
예제 #3
0
    def __init__(self, kernel: np.ndarray) -> None:
        self._KX, self._KY, *_ = kernel.shape
        self._padX, self._padY = self._KX // 2, self._KY // 2

        # wrestle this kernel into the proper representation
        self.kernel = FFT(
            self.invert_kernel(
                np.pad(kernel, [[self._padX] * 2, [self._padY] * 2],
                       mode='constant')))
def __compute(lineout, nrolls=25):

    rolls = __generateRolls(lineout, nrolls)

    rollerFT = FFT(rolls, axis=1)
    ft_abs = np.copy(np.abs(rollerFT[1, :]))
    dargs = np.diff(np.unwrap(np.angle(rollerFT), axis=1), axis=1)
    avg = np.average(dargs, axis=1, weights=ft_abs[1:])
    toReturn = __chooseRoll(avg, nrolls)

    return toReturn
예제 #5
0
def lineoutToLocation(lineout, nrolls, nsamples):
    roller = np.zeros((nrolls, nsamples), dtype=float)
    for r in range(nrolls):
        roller[r, :] = np.roll(np.copy(lineout),
                               r * len(lineout) // nrolls)
    rollerFT = FFT(roller, axis=1)
    ft_abs = np.copy(np.abs(rollerFT[1, :]))
    dargs = np.diff(np.unwrap(np.angle(rollerFT), axis=1), axis=1)
    avg = np.average(dargs, axis=1, weights=ft_abs[1:])
    fourierLocation = timsChoice(avg, nrolls)
    return fourierLocation
예제 #6
0
def load_image(filename):
    image = np.loadtxt(imageDir + filename)
    FFT_PLACEHOLDER = np.zeros((image.shape[0], 128, 2))

    FFT_IMAGE = FFT(image, axis=1)

    FFT_PLACEHOLDER[:, :, 0] = np.abs(FFT_IMAGE)[:, :128]
    FFT_PLACEHOLDER[:, :, 1] = np.diff(np.unwrap(np.angle(FFT_IMAGE), axis=1),
                                       axis=1)[:, :128]

    return FFT_PLACEHOLDER
예제 #7
0
    def convolve(self, im: np.ndarray) -> np.ndarray:
        _X, _Y = im.shape
        print(_X, _Y)

        divX, divY = _X // self._KX, _Y // self._KY
        diffX = (self._KX - _X + self._KX * divX) % self._KX
        diffY = (self._KY - _Y + self._KY * divY) % self._KY

        # padding on each side, i.e. left, right, top and bottom;
        # centered as well as possible
        right = diffX // 2
        left = diffX - right
        bottom = diffY // 2
        top = diffY - bottom

        # Pad both `im` (original image) and `self._convolved` (stores results)
        im = np.lib.pad(im, [[left, right], [top, bottom]], mode='constant')

        self._convolved = np.zeros(
            (left + self._padX + right + self._padX + _X,
             bottom + self._padY + top + self._padY + _Y))
        #self._convolved = np.zeros(im.shape)

        # Generator that parametrizes (partitions) the image into blocks
        subsets = ([[i * self._KX, (i + 1) * self._KX],
                    [j * self._KY, (j + 1) * self._KY]] for i in range(divX)
                   for j in range(divY))

        # transform each partition and OA on conv_image
        for t, s in subsets:
            # slice and pad each array subset
            subset = np.lib.pad(im[slice(*t), slice(*s)],
                                [[self._padX] * 2, [self._padY] * 2],
                                mode='constant')

            transf_subset = FFT(subset)

            # hadamard product and iFFT
            space = iFFT(transf_subset * self.kernel).real

            # overlap and add
            t[1] += 2 * self._padX
            s[1] += 2 * self._padY
            self._convolved[slice(*t), slice(*s)] += space

        # slice image edges off and get it back, convolved
        self._convolved = self._convolved[self._padX + left:self._padX + left +
                                          _X, self._padY + bottom:self._padY +
                                          bottom + _Y]

        print(*self._convolved.shape)
        return self._convolved
예제 #8
0
def fourier_reshape(vec):
    vecFT = np.array(vec.shape, dtype=complex)
    dvecFT = np.array(vec.shape, dtype=complex)
    ddvecFT = np.array(vec.shape, dtype=complex)
    dddvecFT = np.array(vec.shape, dtype=complex)
    vecFT = FFT(vec)
    freqs = FREQS(len(vec))
    dvecFT = 1j * (freqs + .01j) * vecFT
    ddvecFT = -1 * np.power(freqs + .01j, int(2)) * vecFT
    dddvecFT = -1j * np.power(freqs + .01j, int(3)) * vecFT
    dvec = np.real(IFFT(dvecFT)) / vec.shape[0]
    ddvec = np.real(IFFT(ddvecFT)) / vec.shape[0]
    dddvec = np.real(IFFT(dddvecFT)) / vec.shape[0]
    return dvec, ddvec, dddvec
예제 #9
0
def transform(lineout):

    lineoutFT = FFT(lineout)

    return np.abs(lineoutFT), np.unwrap(np.angle(lineoutFT))
def fillimpulseresponses(printfiles = True,samplefiles = False):
    (s_collection_ft,n_collection_ft) = (nparray([0,0,0],dtype=complex),nparray([0,0,0],dtype=complex))
    filepath = '../data_fs/ave1/'
    filematch = filepath + 'C1--LowPulseHighRes-in-100-out1700-an2100--*.txt'
    filelist = glob.glob(filematch)


    print('filling impulse response files\n\tnum files = %i' % len(filelist))

    for i,f in enumerate(filelist):

        ## processing images 
        ## samplefiles = False
        m = re.search('(.+).txt$',f)
        if (i%10 == 0 and samplefiles):
            outname_spect = m.group(1) + '.spect.dat'
            outname_time = m.group(1) + '.time.dat'
            outname_simTOF = m.group(1) + '.simTOF.dat'

        fi = open(f, "r")
        for passline in range(6):
            headline = '# ' + fi.readline()
        (t,v) = fi.readline().split()
        v_vec=nparray(float(v),dtype=float)
        t_vec=nparray(float(t)*1.e9,dtype=float)
        for line in fi:
            (t,v) = line.split()
            v_vec = row_stack((v_vec,float(v)))
            t_vec = row_stack((t_vec,float(t)*1.e9))
        fi.close()
        #Get the mean time-step for sake of frequencies
        dt = mean(diff(t_vec,n=1,axis=0))
        #FFT the vector
        v_vec_ft = FFT(v_vec,axis=0)
        f = FREQ(v_vec_ft.shape[0],dt)
        m_extend = 10
        f_extend = FREQ(v_vec_ft.shape[0]*m_extend,dt)
        t_extend = arange(0,((t_vec[-1]-t_vec[0])+dt)*m_extend,dt)
        # deep copy for the noise extimation 
        n_vec_ft = npcopy(v_vec_ft)
        # find indices where there is only noise in the power, and indices with predominantly signal
        # replace the signal elements in the noise vector with a random sampling from the noise portion
        chooseinds = nparray([i for i,nu in enumerate(f) if (npabs(nu)> 6.5 and npabs(nu)<(20))])
        replaceinds = nparray([i for i,nu in enumerate(f) if npabs(nu)< 6.5])
        values = choice(n_vec_ft[chooseinds,0],len(replaceinds))
        n_vec_ft[replaceinds,0] = values

        ## build noise vector and add to n_collection_ft
        # sort inds for f and use for interp to extend noise in fourier domain
        inds = argsort(f)
        n_vec_extend_ft_r = interp(f_extend,f[inds],npabs(n_vec_ft[inds,0]))
        n_vec_extend_ft_phi = choice(npangle(n_vec_ft[:,0]),f_extend.shape[0])
        n_vec_extend_ft = nprect(n_vec_extend_ft_r,n_vec_extend_ft_phi)
        n_vec_extend_ft.shape = (n_vec_extend_ft.shape[0],1)
        
        if n_collection_ft.shape[0] < n_vec_extend_ft.shape[0]:
            n_collection_ft = npcopy(n_vec_extend_ft)
           # s_collection_ft.shape = (s_collection_ft.shape[0],1)
        else:
            n_collection_ft = column_stack((n_collection_ft,n_vec_extend_ft))

        ## build signal vector and add to n_collection_ft
        noiseamp = nppower(mean(npabs(values)),int(2))
        sigamp = nppower(mean(nparray([i for i,nu in enumerate(f) if npabs(nu)< 1.0])),int(2))
        s_vec_ft = npcopy(v_vec_ft)
        s_vec_ft[:,0] *= Weiner(f,sigamp,noiseamp,cut = 5,p = 4) * fourier_delay(f,-40) ## Weiner filter and dial back by 40 ns

        if samplefiles:
            out = column_stack((f,npabs(v_vec_ft),npabs(n_vec_ft),npabs(s_vec_ft)))
            savetxt(outname_spect,out,fmt='%.4f')

        s_vec = real(IFFT(s_vec_ft,axis=0))
        s_vec_extend = zeros((f_extend.shape[0],1),dtype=float) 
        s_vec_extend[:s_vec.shape[0],0] = s_vec[:,0]
        s_vec_extend_ft = FFT(s_vec_extend,axis=0)

        if s_collection_ft.shape[0] < s_vec_extend_ft.shape[0]:
            s_collection_ft = npcopy(s_vec_extend_ft)
           # s_collection_ft.shape = (s_collection_ft.shape[0],1)
        else:
            s_collection_ft = column_stack((s_collection_ft,s_vec_extend_ft))

        # first sum all the Weiner filtered and foureir_delay() signals, then add the single noise vector back
    if printfiles:
        outpath = '../data_fs/extern/'
        filename = outpath + 'signal_collection_ft'
        npsave(filename,s_collection_ft)
        filename = outpath + 'noise_collection_ft'
        npsave(filename,n_collection_ft)
        filename = outpath + 'frequencies_collection'
        npsave(filename,f_extend)
        filename = outpath + 'times_collection'
        npsave(filename,t_extend)

    return (s_collection_ft,n_collection_ft,f_extend,t_extend)
예제 #11
0
def split_operator(steps):
    global psi
    for i in range(steps):
        psi =IFFT(FFT(psi*x_operator)*k_operator)*x_operator
예제 #12
0
                            except:
                                continue
                            if nsumevents == 0:
                                sumsignal = lineout;
                                nsumevents += 1;
                            else:
                                sumsignal *= float(nsumevents)/(nsumevents+1);
                                sumsignal += lineout/(nsumevents+1);
                                nsumevents + 1;
                            R = np.row_stack((R,lineout));#it stacks from top to bottom

                            for r in range(nrolls):
                                #roller[r,:] = np.roll(np.copy(lineout) , (r*len(lineout))//nrolls);
                                roller[r,:] = np.roll(np.copy(lineout), r*len(lineout)//nrolls);

                            lineoutFT = FFT(np.roll(lineout,len(lineout)//2) );
                            rollerFT = FFT(roller,axis=1);
                            #ft_abs = np.copy(np.abs(lineoutFT));
                            #ft_arg = np.copy(np.angle(lineoutFT));
                            ft_abs = np.copy(np.abs(rollerFT[1,:]));
                            ft_arg = np.copy(np.angle(rollerFT[1,:]));
                            dargs = np.diff( np.unwrap( np.angle( rollerFT ) , axis = 1 ), axis =1 );

                            #lineoutback = np.real( IFFT(nprect( w_weights, np.angle(rollerFT[0,:]) )) );
                            #lineoutback = np.real( IFFT(nprect(w_weights,ft_arg)) );
                            lineoutback = np.real( IFFT(nprect(ft_abs,ft_arg)) );
                            #lineoutback = np.real( IFFT(rollerFT[1,:]) );
                            R_back = np.row_stack((R_back,lineoutback));

                            avg = np.average(dargs[:,:nslopes],axis=1,weights = ft_abs[1:nslopes+1]);
                            avg.shape=(avg.shape[0],1);
예제 #13
0
    def OAconv2(self):
        """ A threaded version of the former algorithm """
        self.__rangePX_, self.__rangePY_ = self.array.shape

        diffX = (self.__rangeKX_ - self.__rangePX_ +  \
                 self.__rangeKX_ * (self.__rangePX_ //\
                 self.__rangeKX_)) % self.__rangeKX_
        diffY = (self.__rangeKY_ - self.__rangePY_ +  \
                 self.__rangeKY_ * (self.__rangePY_ //\
                 self.__rangeKY_)) % self.__rangeKY_

        # padding on each side, i.e. left, right, top and bottom;
        # centered as well as possible
        right = diffX // 2
        left = diffX - right
        bottom = diffY // 2
        top = diffY - bottom

        # pad the array
        self.array = np.lib.pad(self.array, ((left, right), (top, bottom)),
                                mode='constant',
                                constant_values=0)

        divX = int(self.array.shape[0] // self.__rangeKX_)
        divY = int(self.array.shape[1] // self.__rangeKY_)

        # a list of tuples to partition the array by
        subsets = [(i*self.__rangeKX_, (i + 1)*self.__rangeKX_,\
                    j*self.__rangeKY_, (j + 1)*self.__rangeKY_)\
                   for i in xrange(divX)\
                   for j in xrange(divY)]

        # padding for individual blocks in the subsets list
        padX = self.__rangeKX_ // 2
        padY = self.__rangeKY_ // 2

        # Add. padding for __arr_ so it can store the results
        self.__arr_ = np.lib.pad(
            self.__arr_,
            ((left + padX + self.__offsetX_, right + padX + self.__offsetX_),
             (top + padY + self.__offsetY_, bottom + padY + self.__offsetY_)),
            mode='constant',
            constant_values=0)

        kernel = np.pad(self.kernel, [(padX, padX), (padY, padY)],
                        mode='constant',
                        constant_values=0)

        # thanks to http://stackoverflow.com/a/38384551/3928184!
        # Invert the kernel
        new_kernel = self.InvertKernel2(kernel)
        transf_kernel = FFT(new_kernel)

        # transform each partition and OA on conv_image
        for tup in tqdm(subsets):
            # slice and pad the array subset
            subset = self.array[tup[0]:tup[1], tup[2]:tup[3]]
            subset = np.lib.pad(subset, [(padX, padX), (padY, padY)],
                                mode='constant',
                                constant_values=0)

            transf_subset = FFT(subset)

            # multiply the two arrays entrywise

            space = iFFT(transf_subset * transf_kernel).real

            # overlap with indices and add them together
            self.__arr_[tup[0]:tup[1] + 2 * padX,\
                        tup[2]:tup[3] + 2 * padY] += space

        # crop image and get it back, convolved
        return self.__arr_[padX + left + self.__offsetX_:padX + left +
                           self.__offsetX_ + self.__rangeX_,
                           padY + bottom + self.__offsetY_:padY + bottom +
                           self.__offsetY_ + self.__rangeY_]