예제 #1
0
def correlation_func(cor_win_1,
                     cor_win_2,
                     window_size,
                     correlation_method='circular'):
    '''This function is doing the cross-correlation. Right now circular cross-correlation
    That means no zero-padding is done
    the .real is to cut off possible imaginary parts that remains due to finite numerical accuarcy
     '''
    if correlation_method == 'linear':
        cor_win_1 = cor_win_1 - cor_win_1.mean(axis=(1, 2)).reshape(
            cor_win_1.shape[0], 1, 1)
        cor_win_2 = cor_win_2 - cor_win_2.mean(axis=(1, 2)).reshape(
            cor_win_1.shape[0], 1, 1)
        cor_win_1[cor_win_1 < 0] = 0
        cor_win_2[cor_win_2 < 0] = 0

        corr = fftshift(irfft2(
            np.conj(rfft2(cor_win_1, s=(2 * window_size, 2 * window_size))) *
            rfft2(cor_win_2, s=(2 * window_size, 2 * window_size))).real,
                        axes=(1, 2))
        corr = corr[:, window_size // 2:3 * window_size // 2,
                    window_size // 2:3 * window_size // 2]

    else:
        corr = fftshift(irfft2(np.conj(rfft2(cor_win_1)) *
                               rfft2(cor_win_2)).real,
                        axes=(1, 2))
    return corr
예제 #2
0
def dnfsom_activity(n, m, stimulus, W, tau, T, alpha):
    p = 2 * n + 1
    dt = 35.0 / float(T)
    V = np.random.random((n, n)) * .01
    U = np.random.random((n, n)) * .01

    We = alpha * 1.50 * 960.0 / (n * n) * gaussian((p, p), (0.1, 0.1))
    Wi = alpha * 0.75 * 960.0 / (n * n) * gaussian((p, p), (1.0, 1.0))
    sigma_c = 2.10
    G = gaussian((n, n), (sigma_c, sigma_c))

    V_shape, We_shape, Wi_shape = np.array(V.shape), np.array(
        We.shape), np.array(Wi.shape)
    shape = np.array(best_fft_shape(V_shape + We_shape // 2))

    We_fft = rfft2(We[::-1, ::-1], shape)
    Wi_fft = rfft2(Wi[::-1, ::-1], shape)

    i0 = We.shape[0] // 2
    i1 = i0 + V_shape[0]
    j0 = We.shape[1] // 2
    j1 = j0 + V_shape[1]

    D = ((np.abs(W - stimulus)).sum(axis=-1)) / float(m * m)
    I = (1.0 - D.reshape(n, n)) * G * alpha

    for i in range(T):
        Z = rfft2(V, shape)
        Le = irfft2(Z * We_fft, shape).real[i0:i1, j0:j1]
        Li = irfft2(Z * Wi_fft, shape).real[i0:i1, j0:j1]
        U += (-U + (Le - Li) + I) * 1.0 / tau * dt
        V = np.maximum(U, 0)
    return V
def rl_deconv_all(img_list, psf_list, iterations=10, lbd=0.2):
    """
    Spatially-Variant Richardson-lucy deconvolution with Total Variation regularization
    """
    min_value = []
    for img_idx, img in enumerate(img_list):
        img_list[img_idx] = np.pad(img_list[img_idx],
                                   np.max(psf_list[0].shape),
                                   mode='reflect')
        min_value.append(np.min(img))
        img_list[img_idx] = img_list[img_idx] - np.min(img)
    size = np.array(np.array(img_list[0].shape) +
                    np.array(psf_list[0].shape)) - 1
    fsize = [fftpack.helper.next_fast_len(int(d)) for d in size]
    fslice = tuple([slice(0, int(sz)) for sz in size])

    latent_estimate = img_list.copy()
    error_estimate = img_list.copy()

    psf_f = []
    psf_flipped_f = []
    for img_idx, img in enumerate(latent_estimate):
        psf_f.append(rfft2(psf_list[img_idx], fsize))
        _psf_flipped = np.flip(psf_list[img_idx], axis=0)
        _psf_flipped = np.flip(_psf_flipped, axis=1)
        psf_flipped_f.append(rfft2(_psf_flipped, fsize))

    for i in range(iterations):
        log.info('RL TV Iter {}/{}, lbd = {}'.format(i, iterations, lbd))
        regularization = np.ones(img_list[0].shape)

        for img_idx, img in enumerate(latent_estimate):
            estimate_convolved = irfft2(
                np.multiply(psf_f[img_idx],
                            rfft2(latent_estimate[img_idx],
                                  fsize)))[fslice].real
            estimate_convolved = _centered(estimate_convolved, img.shape)
            relative_blur = div0(img_list[img_idx], estimate_convolved)
            error_estimate[img_idx] = irfft2(
                np.multiply(psf_flipped_f[img_idx],
                            rfft2(relative_blur, fsize)), fsize)[fslice].real
            error_estimate[img_idx] = _centered(error_estimate[img_idx],
                                                img.shape)
            regularization += 1.0 - (lbd * divergence(
                latent_estimate[img_idx] /
                np.linalg.norm(latent_estimate[img_idx], ord=1)))
            latent_estimate[img_idx] = np.multiply(latent_estimate[img_idx],
                                                   error_estimate[img_idx])

        for img_idx, img in enumerate(img_list):
            latent_estimate[img_idx] = np.divide(
                latent_estimate[img_idx],
                regularization / float(len(img_list)))

    for img_idx, img in enumerate(latent_estimate):
        latent_estimate[img_idx] += min_value[img_idx]
        latent_estimate[img_idx] = unpad(latent_estimate[img_idx],
                                         np.max(psf_list[0].shape))

    return np.sum(latent_estimate, axis=0)
예제 #4
0
def fft_correlate_images(image_a,
                         image_b,
                         correlation_method="circular",
                         normalized_correlation=True):
    """ FFT based cross correlation
    of two images with multiple views of np.stride_tricks()
    The 2D FFT should be applied to the last two axes (-2,-1) and the
    zero axis is the number of the interrogation window
    This should also work out of the box for rectangular windows.
    Parameters
    ----------
    image_a : 3d np.ndarray, first dimension is the number of windows,
        and two last dimensions are interrogation windows of the first image

    image_b : similar

    correlation_method : string
        one of the three methods implemented: 'circular' or 'linear'
        [default: 'circular].

    normalized_correlation : string
        decides wetehr normalized correlation is done or not: True or False
        [default: True].
    """

    if normalized_correlation:
        # remove the effect of stronger laser or
        # longer exposure for frame B
        # image_a = match_histograms(image_a, image_b)

        # remove mean background, normalize to 0..1 range
        image_a = normalize_intensity(image_a)
        image_b = normalize_intensity(image_b)

    s1 = np.array(image_a.shape[-2:])
    s2 = np.array(image_b.shape[-2:])

    if correlation_method == "linear":
        # have to be normalized, mainly because of zero padding
        size = s1 + s2 - 1
        fsize = 2**np.ceil(np.log2(size)).astype(int)
        fslice = (slice(0, image_a.shape[0]),
                  slice((fsize[0] - s1[0]) // 2, (fsize[0] + s1[0]) // 2),
                  slice((fsize[1] - s1[1]) // 2, (fsize[1] + s1[1]) // 2))
        f2a = rfft2(image_a, fsize, axes=(-2, -1)).conj()
        f2b = rfft2(image_b, fsize, axes=(-2, -1))
        corr = fftshift(irfft2(f2a * f2b).real, axes=(-2, -1))[fslice]
    elif correlation_method == "circular":
        corr = fftshift(irfft2(rfft2(image_a).conj() * rfft2(image_b)).real,
                        axes=(-2, -1))
    else:
        print("method is not implemented!")

    if normalized_correlation:
        corr = corr / (s2[0] * s2[1])  # for extended search area
        corr = np.clip(corr, 0, 1)
    return corr
예제 #5
0
	def fromEBmodes(cls,fourier_E,fourier_B,angle=3.14*deg):

		"""
		This class method allows to build a shear map specifying its E and B mode components

		:param fourier_E: E mode of the shear map in fourier space
		:type fourier_E: numpy 2D array, must be of type np.complex128 and must have a shape that is appropriate for a real fourier transform, i.e. (N,N/2 + 1); N should be a power of 2

		:param fourier_B: B mode of the shear map in fourier space
		:type fourier_B: numpy 2D array, must be of type np.complex128 and must have a shape that is appropriate for a real fourier transform, i.e. (N,N/2 + 1); N should be a power of 2

		:param angle: Side angle of the real space map in degrees
		:type angle: float.

		:returns: the corresponding ShearMap instance

		:raises: AssertionErrors for inappropriate inputs

		"""

		assert fourier_E.dtype == np.complex128 and fourier_B.dtype == np.complex128
		assert fourier_E.shape[1] == fourier_E.shape[0]/2 + 1
		assert fourier_B.shape[1] == fourier_B.shape[0]/2 + 1
		assert fourier_E.shape == fourier_B.shape

		#Compute frequencies
		lx = rfftfreq(fourier_E.shape[0])
		ly = fftfreq(fourier_E.shape[0])

		#Safety check
		assert len(lx)==fourier_E.shape[1]
		assert len(ly)==fourier_E.shape[0]

		#Compute sines and cosines of rotation angles
		l_squared = lx[np.newaxis,:]**2 + ly[:,np.newaxis]**2
		l_squared[0,0] = 1.0

		sin_2_phi = 2.0 * lx[np.newaxis,:] * ly[:,np.newaxis] / l_squared
		cos_2_phi = (lx[np.newaxis,:]**2 - ly[:,np.newaxis]**2) / l_squared

		sin_2_phi[0,0] = 0.0
		cos_2_phi[0,0] = 0.0

		#Invert E/B modes and find the components of the shear
		ft_data1 = cos_2_phi * fourier_E - sin_2_phi * fourier_B
		ft_data2 = sin_2_phi * fourier_E + cos_2_phi * fourier_B

		#Invert Fourier transforms
		data1 = irfft2(ft_data1)
		data2 = irfft2(ft_data2)

		#Instantiate new shear map class
		new = cls(np.array([data1,data2]),angle)
		setattr(new,"fourier_E",fourier_E)
		setattr(new,"fourier_B",fourier_B)

		return new
예제 #6
0
    def fromEBmodes(cls, fourier_E, fourier_B, angle=3.14 * deg):
        """
		This class method allows to build a shear map specifying its E and B mode components

		:param fourier_E: E mode of the shear map in fourier space
		:type fourier_E: numpy 2D array, must be of type np.complex128 and must have a shape that is appropriate for a real fourier transform, i.e. (N,N/2 + 1); N should be a power of 2

		:param fourier_B: B mode of the shear map in fourier space
		:type fourier_B: numpy 2D array, must be of type np.complex128 and must have a shape that is appropriate for a real fourier transform, i.e. (N,N/2 + 1); N should be a power of 2

		:param angle: Side angle of the real space map in degrees
		:type angle: float.

		:returns: the corresponding ShearMap instance

		:raises: AssertionErrors for inappropriate inputs

		"""

        assert fourier_E.dtype == np.complex128 and fourier_B.dtype == np.complex128
        assert fourier_E.shape[1] == fourier_E.shape[0] / 2 + 1
        assert fourier_B.shape[1] == fourier_B.shape[0] / 2 + 1
        assert fourier_E.shape == fourier_B.shape

        #Compute frequencies
        lx = rfftfreq(fourier_E.shape[0])
        ly = fftfreq(fourier_E.shape[0])

        #Safety check
        assert len(lx) == fourier_E.shape[1]
        assert len(ly) == fourier_E.shape[0]

        #Compute sines and cosines of rotation angles
        l_squared = lx[np.newaxis, :]**2 + ly[:, np.newaxis]**2
        l_squared[0, 0] = 1.0

        sin_2_phi = 2.0 * lx[np.newaxis, :] * ly[:, np.newaxis] / l_squared
        cos_2_phi = (lx[np.newaxis, :]**2 - ly[:, np.newaxis]**2) / l_squared

        sin_2_phi[0, 0] = 0.0
        cos_2_phi[0, 0] = 0.0

        #Invert E/B modes and find the components of the shear
        ft_data1 = cos_2_phi * fourier_E - sin_2_phi * fourier_B
        ft_data2 = sin_2_phi * fourier_E + cos_2_phi * fourier_B

        #Invert Fourier transforms
        data1 = irfft2(ft_data1)
        data2 = irfft2(ft_data2)

        #Instantiate new shear map class
        new = cls(np.array([data1, data2]), angle)
        setattr(new, "fourier_E", fourier_E)
        setattr(new, "fourier_B", fourier_B)

        return new
예제 #7
0
    def rhs(self, ω_hat):
        # aliasing
        ω_hat[np.where(self.K_sq > self.Nx * self.Ny / 9.)] = 0

        ω = irfft2(ifftshift(ω_hat, axes=0))
        ux = irfft2(ifftshift( 1j * self.KY * self.ω_hat / self.K_sq ,
                    axes=0))
        uy = irfft2(ifftshift(-1j * self.KX * self.ω_hat / self.K_sq ,
                    axes=0))
        tmp = 1j * self.KX * fftshift(rfft2(ux * ω), axes=0) \
            + 1j * self.KY * fftshift(rfft2(uy * ω), axes=0)

        return ω_hat - tmp * self.dt
예제 #8
0
    def output(self):
        """ """

        # One dimension
        if len(self._source.shape) == 1:
            source = self._actual_source
            # Use FFT convolution
            if self._fft:
                if not self._toric:
                    P = rfft(source,self._fft_shape[0])*self._fft_weights
                    R = irfft(P, self._fft_shape[0]).real
                    R = R[self._fft_indices]
                else:
                    P = rfft(source)*self._fft_weights
                    R = irfft(P,source.shape[0]).real

                # if self._toric:
                #     R  = ifft(fft(source)*self._fft_weights).real
                # else:
                #     n = source.shape[0]
                #     self._src_holder[n//2:n//2+n] = source
                #     R = ifft(fft(self._src_holder)*self._fft_weights)
                #     R = R.real[n//2:n//2+n]
            # Use regular convolution
            else:
                R = convolve1d(source, self._weights[::-1], self._toric)
            if self._src_rows is not None:
                R = R[self._src_rows]
            return R.reshape(self._target.shape)
        # Two dimensions
        else:
            source = self._actual_source
            # Use FFT convolution
            if self._fft:
                if not self._toric:
                    P = rfft2(source,self._fft_shape)*self._fft_weights
                    R = irfft2(P, self._fft_shape).real
                    R = R[self._fft_indices]
                else:
                    P = rfft2(source)*self._fft_weights
                    R = irfft2(P,source.shape).real

            # Use SVD convolution
            else:
                R = convolve2d(source, self._weights, self._USV, self._toric)
            if self._src_rows is not None and self._src_cols is not None:
                R = R[self._src_rows, self._src_cols]
        return R.reshape(self._target.shape)
예제 #9
0
def _correlate2d_fft(data0, kernel0, output=None, mode="nearest", cval=0.0):
    """_correlate2d_fft does 2d correlation of 'data' with 'kernel', storing
    the result in 'output' using the FFT to perform the correlation.

    supported 'mode's include:
        'nearest'   elements beyond boundary come from nearest edge pixel.
        'wrap'      elements beyond boundary come from the opposite array edge.
        'reflect'   elements beyond boundary come from reflection on same array
                    edge.
        'constant'  elements beyond boundary are set to 'cval'
    """

    shape = data0.shape
    kshape = kernel0.shape
    oversized = (np.array(shape) + np.array(kshape))

    dy = kshape[0] // 2
    dx = kshape[1] // 2

    kernel = np.zeros(oversized, dtype=np.float64)
    # convolution <-> correlation
    kernel[:kshape[0], :kshape[1]] = kernel0[::-1, ::-1]
    data = iraf_frame.frame(data0, oversized, mode=mode, cval=cval)

    complex_result = (isinstance(data, np.complexfloating)
                      or isinstance(kernel, np.complexfloating))

    Fdata = dft.fft2(data)
    del data

    Fkernel = dft.fft2(kernel)
    del kernel

    np.multiply(Fdata, Fkernel, Fdata)
    del Fkernel

    if complex_result:
        convolved = dft.irfft2(Fdata, s=oversized)
    else:
        convolved = dft.irfft2(Fdata, s=oversized)

    ks0, ks1 = kshape[0] - 1, kshape[1] - 1
    result = convolved[ks0:shape[0] + ks0, ks1:shape[1] + ks1]

    if output is not None:
        output._copyFrom(result)
    else:
        return result
예제 #10
0
    def output(self):
        """ """

        # One dimension
        if len(self._source.shape) == 1:
            source = self._actual_source
            # Use FFT convolution
            if self._fft:
                if not self._toric:
                    P = rfft(source, self._fft_shape[0]) * self._fft_weights
                    R = irfft(P, self._fft_shape[0]).real
                    R = R[self._fft_indices]
                else:
                    P = rfft(source) * self._fft_weights
                    R = irfft(P, source.shape[0]).real

                # if self._toric:
                #     R  = ifft(fft(source)*self._fft_weights).real
                # else:
                #     n = source.shape[0]
                #     self._src_holder[n//2:n//2+n] = source
                #     R = ifft(fft(self._src_holder)*self._fft_weights)
                #     R = R.real[n//2:n//2+n]
            # Use regular convolution
            else:
                R = convolve1d(source, self._weights[::-1], self._toric)
            if self._src_rows is not None:
                R = R[self._src_rows]
            return R.reshape(self._target.shape)
        # Two dimensions
        else:
            source = self._actual_source
            # Use FFT convolution
            if self._fft:
                if not self._toric:
                    P = rfft2(source, self._fft_shape) * self._fft_weights
                    R = irfft2(P, self._fft_shape).real
                    R = R[self._fft_indices]
                else:
                    P = rfft2(source) * self._fft_weights
                    R = irfft2(P, source.shape).real

            # Use SVD convolution
            else:
                R = convolve2d(source, self._weights, self._USV, self._toric)
            if self._src_rows is not None and self._src_cols is not None:
                R = R[self._src_rows, self._src_cols]
        return R.reshape(self._target.shape)
예제 #11
0
def _correlate2d_fft(data0, kernel0, output=None, mode="nearest", cval=0.0):
    """_correlate2d_fft does 2d correlation of 'data' with 'kernel', storing
    the result in 'output' using the FFT to perform the correlation.

    supported 'mode's include:
        'nearest'   elements beyond boundary come from nearest edge pixel.
        'wrap'      elements beyond boundary come from the opposite array edge.
        'reflect'   elements beyond boundary come from reflection on same array
                    edge.
        'constant'  elements beyond boundary are set to 'cval'
    """

    shape = data0.shape
    kshape = kernel0.shape
    oversized = (np.array(shape) + np.array(kshape))

    dy = kshape[0] // 2
    dx = kshape[1] // 2

    kernel = np.zeros(oversized, dtype=np.float64)
    # convolution <-> correlation
    kernel[:kshape[0], :kshape[1]] = kernel0[::-1,::-1]
    data = iraf_frame.frame(data0, oversized, mode=mode, cval=cval)

    complex_result = (isinstance(data, np.complexfloating) or
                      isinstance(kernel, np.complexfloating))

    Fdata = dft.fft2(data)
    del data

    Fkernel = dft.fft2(kernel)
    del kernel

    np.multiply(Fdata, Fkernel, Fdata)
    del Fkernel

    if complex_result:
        convolved = dft.irfft2(Fdata, s=oversized)
    else:
        convolved = dft.irfft2(Fdata, s=oversized)

    ks0, ks1 = kshape[0] - 1, kshape[1] - 1
    result = convolved[ks0:shape[0] + ks0, ks1:shape[1] + ks1]

    if output is not None:
        output._copyFrom(result)
    else:
        return result
예제 #12
0
 def getDisplacements2D(self, Z=None, window=False):
     """
     Use phase correlation to find the relative displacement between
     each time step
     """
     if Z is None:
         Z = self.getNbPixelsPerFrame()/self.getNbPixelsPerSlice()/2
     shape = np.asarray(self.get2DShape())
     if window:
         ham = np.hamming(shape[1])*np.atleast_2d(np.hamming(shape[0])).T
     else:
         ham = 1.0
     displs = np.zeros((self.getNbFrames(),2))
     a = rfft2(self.get2DSlice(T=0, Z=Z)*ham)
     for t in range(1,self.getNbFrames()):
         b = rfft2(self.get2DSlice(T=t, Z=Z)*ham)
         #calculate the normalized cross-power spectrum
         #R = numexpr.evaluate(
         #    'a*complex(real(b), -imag(b)/abs(a*complex(real(b), -imag(b))))'
         #    )
         R = a*b.conj()
         Ra = np.abs(a*b.conj())
         R[Ra>0] /= Ra[Ra>0]
         r = irfft2(R)
         #Get the periodic position of the peak
         l = r.argmax()
         displs[t] = np.unravel_index(l, r.shape)
         #prepare next step
         a = b
     return np.where(displs<shape/2, displs, displs-shape)
예제 #13
0
def ifftn_mpi(fu, u):
    """ifft in three directions using mpi.
    Need to do ifft in reversed order of fft
    """
    if num_processes == 1:
        #u[:] = irfft(ifft(ifft(fu, axis=0), axis=2), axis=1)
        u[:] = irfftn(fu, axes=(0,2,1))
        return
    
    # Do first owned direction
    Uc_hat[:] = ifft(fu, axis=0)
    
    # Communicate all values
    comm.Alltoall([Uc_hat, MPI.DOUBLE_COMPLEX], [U_mpi, MPI.DOUBLE_COMPLEX])
    for i in range(num_processes): 
        Uc_hatT[:, :, i*Np:(i+1)*Np] = U_mpi[i]

    #for i in range(num_processes):
    #    if not i == rank:
    #        comm.Sendrecv_replace([Uc_send[i], MPI.DOUBLE_COMPLEX], i, 0, i, 0)   
    #    Uc_hatT[:, :, i*Np:(i+1)*Np] = Uc_send[i]
           
    # Do last two directions
    #u[:] = irfft(ifft(Uc_hatT, axis=2), axis=1)
    u[:] = irfft2(Uc_hatT, axes=(2,1))
예제 #14
0
def convolve_dcr_image(flux_arr, x_loc, y_loc, bandpass=None, x_size=None, y_size=None, seed=None,
                       psf=None, pad_image=1.5, pixel_scale=None, kernel_radius=None,
                       oversample_image=1, photon_noise=False, sky_noise=0.0, verbose=True, **kwargs):
    """Wrapper to call fast_dft with multiple DCR planes."""
    x_size_use = int(x_size * pad_image)
    y_size_use = int(y_size * pad_image)
    oversample_image = int(oversample_image)
    pixel_scale_use = pixel_scale / oversample_image
    x0 = oversample_image * ((x_size_use - x_size) // 2)
    x1 = x0 + x_size * oversample_image
    y0 = oversample_image * ((y_size_use - y_size) // 2)
    y1 = y0 + y_size * oversample_image
    x_loc_use = x_loc * oversample_image + x0
    y_loc_use = y_loc * oversample_image + y0
    x_size_use *= oversample_image
    y_size_use *= oversample_image
    timing_model = -time.time()
    source_image = fast_dft(flux_arr, x_loc_use, y_loc_use, x_size=x_size_use, y_size=y_size_use,
                            kernel_radius=kernel_radius, **kwargs)
    timing_model += time.time()
    n_star = len(x_loc)
    if oversample_image > 1:
        bright_star = "bright "
    else:
        bright_star = ""
    if verbose:
        if n_star == 1:
            print("Time to model %i %sstar: [%0.3fs]"
                  % (n_star, bright_star, timing_model))
        else:
            print("Time to model %i %sstars: [%0.3fs | %0.5fs per star]"
                  % (n_star, bright_star, timing_model, timing_model / n_star))
    rand_gen = np.random
    if seed is not None:
        rand_gen.seed(seed - 1)
    # The images are purely real, so we can save time by using the real FFT,
    # which uses only half of the complex plane
    convol = np.zeros((y_size_use, x_size_use // 2 + 1), dtype='complex64')
    dcr_gen = dcr_generator(bandpass, pixel_scale=pixel_scale_use, **kwargs)
    timing_fft = -time.time()

    for _i, offset in enumerate(dcr_gen):
        source_image_use = source_image[_i]

        psf_image = psf.drawImage(scale=pixel_scale_use, method='fft', offset=offset,
                                  nx=x_size_use, ny=y_size_use, use_true_center=False)
        if photon_noise:
            base_noise = np.random.normal(scale=1.0, size=(y_size_use, x_size_use))
            base_noise *= np.sqrt(np.abs(source_image_use) / photons_per_adu)
            source_image_use += base_noise
        if sky_noise > 0:
            source_image_use += (rand_gen.normal(scale=sky_noise, size=(y_size_use, x_size_use))
                                 / np.sqrt(bandpass_nstep(bandpass)))
        convol += rfft2(source_image_use) * rfft2(psf_image.array)
    return_image = np.real(fftshift(irfft2(convol)))
    timing_fft += time.time()
    if verbose:
        print("FFT timing for %i DCR planes: [%0.3fs | %0.3fs per plane]"
              % (_i, timing_fft, timing_fft / _i))
    return(return_image[y0:y1:oversample_image, x0:x1:oversample_image] * oversample_image**2)
예제 #15
0
def fft_convolve(in1, in2, times):
    def _centered(arr, newsize):
        # Return the center newsize portion of the array.
        currsize = np.array(arr.shape)
        startind = (currsize - newsize) // 2
        endind = startind + newsize
        myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
        return arr[tuple(myslice)]

    if times == 0:
        return in1.copy()


    s1 = np.array(in1.shape)
    s2 = np.array(in2.shape)
    shape = s1 + (s2 - 1) * times

    # Speed up FFT by padding to optimal size for FFTPACK
    fshape = [next_fast_len(int(d)) for d in shape]
    fslice = tuple([slice(0, int(sz)) for sz in shape])

    resfft = fast_power(rfft2(in2, fshape), times)
    resfft = resfft * rfft2(in1, fshape)
    ret = irfft2(resfft, fshape)[fslice].copy()
    ret = ret.real

    return _centered(ret, s1)
예제 #16
0
def correlate_windows(window_a, window_b, corr_method='fft', nfftx=0, nffty=0):
    """Compute correlation function between two interrogation windows.

    The correlation function can be computed by using the correlation
    theorem to speed up the computation.

    Parameters
    ----------
    window_a : 2d np.ndarray
        a two dimensions array for the first interrogation window, 

    window_b : 2d np.ndarray
        a two dimensions array for the second interrogation window.

    corr_method   : string
        one of the two methods currently implemented: 'fft' or 'direct'.
        Default is 'fft', which is much faster.

    nfftx   : int
        the size of the 2D FFT in x-direction,
        [default: 2 x windows_a.shape[0] is recommended].

    nffty   : int
        the size of the 2D FFT in y-direction,
        [default: 2 x windows_a.shape[1] is recommended].


    Returns
    -------
    corr : 2d np.ndarray
        a two dimensions array for the correlation function.
    
    Note that due to the wish to use 2^N windows for faster FFT
    we use a slightly different convention for the size of the 
    correlation map. The theory says it is M+N-1, and the 
    'direct' method gets this size out
    the FFT-based method returns M+N size out, where M is the window_size
    and N is the search_area_size
    It leads to inconsistency of the output 
    """
    
    if corr_method == 'fft':
        window_b = np.conj(window_b[::-1, ::-1])
        if nfftx == 0:
            nfftx = nextpower2(window_b.shape[0] + window_a.shape[0])  
        if nffty == 0:
            nffty = nextpower2(window_b.shape[1] + window_a.shape[1]) 
        
        f2a = rfft2(normalize_intensity(window_a), s=(nfftx, nffty))
        f2b = rfft2(normalize_intensity(window_b), s=(nfftx, nffty))
        corr = irfft2(f2a * f2b).real
        corr = corr[:window_a.shape[0] + window_b.shape[0] - 1,
                    :window_b.shape[1] + window_a.shape[1] - 1]
        return corr
    elif corr_method == 'direct':
        return convolve2d(normalize_intensity(window_a),
                          normalize_intensity(window_b[::-1, ::-1]), 'full')

    else:
        raise ValueError('method is not implemented')
예제 #17
0
    def two_sph_bessel_binave(self, ell1, ell2, binwidth_dlny1,
                              binwidth_dlny2):
        """
		Bin-averaging for 3D statistics: alpha_pow = D = 3
		Calculate F(y_1,y_2) = \int_0^\infty dx_1 / x_1 \int_0^\infty dx_2 / x_2 * f(x_1,x_2) * j_{\ell_1}(x_1y_1) * j_{\ell_2}(x_2y_2),
		where j_\ell is the spherical Bessel func of order ell.
		array y is set as y[:] = 1/x[::-1]
		"""
        D = 3
        s_d_lambda1 = (np.exp(D * binwidth_dlny1) - 1.) / D
        s_d_lambda2 = (np.exp(D * binwidth_dlny2) - 1.) / D
        g1 = g_l_smooth(ell1, self.z1, binwidth_dlny1, D) / s_d_lambda1
        g2 = g_l_smooth(ell2, self.z2, binwidth_dlny2, D) / s_d_lambda2

        mat = np.conj(
            (self.c_mn * (self.x20 * self.y20)**(-1j * self.eta_n) * g2).T *
            (self.x10 * self.y10)**(-1j * self.eta_m) * g1).T
        mat_right = mat[:, self.N2 // 2:]
        mat_adjust = np.vstack(
            (mat_right[self.N1 // 2:, :], mat_right[1:self.N1 // 2, :]))
        # print(mat_adjust[0][1])
        Fy1y2 = ((irfft2(mat_adjust) * np.pi / 16. / self.y2**(self.nu2)).T /
                 self.y1**(self.nu1)).T

        return self.y1[self.N_extrap_high:self.N1 - self.
                       N_extrap_low], self.y2[self.N_extrap_high:self.N2 -
                                              self.N_extrap_low], Fy1y2[
                                                  self.N_extrap_high:self.N1 -
                                                  self.N_extrap_low,
                                                  self.N_extrap_high:self.N2 -
                                                  self.N_extrap_low]
예제 #18
0
    def two_sph_bessel(self, ell1, ell2):
        """
		Calculate F(y_1,y_2) = \int_0^\infty dx_1 / x_1 \int_0^\infty dx_2 / x_2 * f(x_1,x_2) * j_{\ell_1}(x_1y_1) * j_{\ell_2}(x_2y_2),
		where j_\ell is the spherical Bessel func of order ell.
		array y is set as y[:] = 1/x[::-1]
		"""

        g1 = g_l(ell1, self.z1)
        g2 = g_l(ell2, self.z2)

        mat = np.conj(
            (self.c_mn * (self.x20 * self.y20)**(-1j * self.eta_n) * g2).T *
            (self.x10 * self.y10)**(-1j * self.eta_m) * g1).T
        mat_right = mat[:, self.N2 // 2:]
        mat_adjust = np.vstack(
            (mat_right[self.N1 // 2:, :], mat_right[1:self.N1 // 2, :]))
        # print(mat_adjust[0][1])
        Fy1y2 = ((irfft2(mat_adjust) * np.pi / 16. / self.y2**self.nu2).T /
                 self.y1**self.nu1).T
        # print(Fy1y2)
        return self.y1[self.N_extrap_high:self.N1 - self.
                       N_extrap_low], self.y2[self.N_extrap_high:self.N2 -
                                              self.N_extrap_low], Fy1y2[
                                                  self.N_extrap_high:self.N1 -
                                                  self.N_extrap_low,
                                                  self.N_extrap_high:self.N2 -
                                                  self.N_extrap_low]
예제 #19
0
파일: vision.py 프로젝트: Python3pkg/Geist
def convolution(bin_template, bin_image, tollerance=0.5):
    expected = numpy.count_nonzero(bin_template)
    ih, iw = bin_image.shape
    th, tw = bin_template.shape

    # Padd image to even dimensions
    if ih % 2 or iw % 2:
        if ih % 2:
            ih += 1
        if iw % 2:
            iw += 1
        bin_image = pad_bin_image_to_shape(bin_image, (ih, iw))
    if expected == 0:
        return []

    # Calculate the convolution of the FFT's of the image & template
    convolution_freqs = rfft2(bin_image) * rfft2(bin_template[::-1, ::-1],
                                                 bin_image.shape)
    # Reverse the FFT to find the result image
    convolution_image = irfft2(convolution_freqs)
    # At this point, the maximum point in convolution_image should be the
    # bottom right (why?) of the area of greatest match

    # The areas in the result image within expected +- tollerance are where we
    # saw matches
    found_bitmap = ((convolution_image >
                     (expected - tollerance)) & (convolution_image <
                                                 (expected + tollerance)))

    match_points = numpy.transpose(numpy.nonzero(found_bitmap))  # bottom right

    # Find the top left point from the template (remember match_point is
    # inside the template (hence -1)
    return [((fx - (tw - 1)), (fy - (th - 1))) for (fy, fx) in match_points]
예제 #20
0
def _filter(filter, array):
    X = fft.rfft2(array)
    X = fft.fftshift(X)
    numpy.multiply(X, filter, out=X)
    X = fft.ifftshift(X)
    x = fft.irfft2(X, s=array.shape)
    return x
예제 #21
0
	def fromConvPower(self,power_func,seed=0,**kwargs):

		"""
		This method uses a supplied power spectrum to generate correlated noise maps in real space via FFTs

		:param power_func: function that given a numpy array of l's returns a numpy array with the according Pl's (this is the input power spectrum); alternatively you can pass an array (l,Pl) and the power spectrum will be calculated with scipy's interpolation routines
		:type power_func: function with the above specifications, or numpy array (l,Pl) of shape (2,n) 

		:param seed: seed of the random generator 
		:type seed: int.

		:param kwargs: keyword arguments to be passed to power_func, or to the interpolate.interp1d routine

		:returns: ConvergenceMap instance of the same exact shape as the one used as blueprint

		"""
		assert self.label == "convergence"

		#Initialize random number generator
		np.random.seed(seed)

		#Generate a random Fourier realization and invert it
		ft_map = self._fourierMap(power_func,**kwargs)
		noise_map = irfft2(ft_map)

		return ConvergenceMap(noise_map,self.side_angle)
예제 #22
0
def _filter(filter, array):
    X = fft.rfft2(array)
    X = fft.fftshift(X)
    numpy.multiply(X, filter, out=X)
    X = fft.ifftshift(X)
    x = fft.irfft2(X, s=array.shape)
    return x
예제 #23
0
def _BandPassFilter(image, len_noise, len_object):
    """
    bandpass filter implementation.
    Source: http://physics-server.uoregon.edu/~raghu/particle_tracking.html
    """
    b = len_noise
    w = round(len_object)
    N = 2 * w + 1

    # Gaussian Convolution Kernel
    sm = numpy.arange(0, N, dtype=numpy.float)
    r = (sm - w) / (2 * b)
    gx = numpy.power(math.e, -r ** 2) / (2 * b * math.sqrt(math.pi))
    gx = numpy.reshape(gx, (gx.shape[0], 1))
    gy = gx.conj().transpose()

    # Boxcar average kernel, background
    bx = numpy.zeros((1, N), numpy.float) + 1 / N
    by = bx.conj().transpose()

    # Convolution with the matrix and kernels
    gxy = gx * gy
    bxy = bx * by
    kernel = fft.rfft2(gxy - bxy, image.shape)

    res = fft.irfft2(fft.rfft2(image) * kernel)
    arr_out = numpy.zeros((image.shape))
    arr_out[w:-w, w:-w] = res[2 * w:, 2 * w:]
    res = numpy.maximum(arr_out, 0)
    return res
예제 #24
0
def ifftn_mpi(fu, u):
    # Inverse Fourier transform
    Uc_hat[:] = ifft(fu, axis=0)
    comm.Alltoall([Uc_hat, MPI.DOUBLE_COMPLEX], [U_mpi, MPI.DOUBLE_COMPLEX])
    Uc_hatT[:] = rollaxis(U_mpi, 1).reshape(Uc_hatT.shape)
    u[:] = irfft2(Uc_hatT, axes=(1, 2))
    return u
예제 #25
0
def _BandPassFilter(image, len_noise, len_object):
    """
    bandpass filter implementation. 
    Source: http://physics-server.uoregon.edu/~raghu/particle_tracking.html
    """
    b = len_noise
    w = round(len_object)
    N = 2 * w + 1

    # Gaussian Convolution Kernel
    sm = numpy.arange(0, N, dtype=numpy.float)
    r = (sm - w) / (2 * b)
    gx = numpy.power(math.e, -r**2) / (2 * b * math.sqrt(math.pi))
    gx = numpy.reshape(gx, (gx.shape[0], 1))
    gy = gx.conj().transpose()

    # Boxcar average kernel, background
    bx = numpy.zeros((1, N), numpy.float) + 1 / N
    by = bx.conj().transpose()

    # Convolution with the matrix and kernels
    gxy = gx * gy
    bxy = bx * by
    kernel = fft.rfft2(gxy - bxy, image.shape)

    res = fft.irfft2(fft.rfft2(image) * kernel)
    arr_out = numpy.zeros((image.shape))
    arr_out[w:-w, w:-w] = res[2 * w:, 2 * w:]
    res = numpy.maximum(arr_out, 0)
    return res
예제 #26
0
def dog(I, k0=1.0, k1=3.0, w=9, t=200.0, return_filt=False):
    """
    Convolve the image with a difference-of-Gaussians kernel,
    then apply a static threshold.

    args
    ----
        I           :   2D ndarray
        k0          :   float, positive kernel sigma
        k1          :   float, negative kernel sigma
        w           :   int, kernel size
        t           :   float, threshold
        return_filt :   bool, also return the filtered image

    returns
    -------
        if return_filt:
        (
            2D ndarray, the post-convolution image;
            2D ndarray, the thresholded binary image;
            2D ndarray, shape (n_spots, 2), the y and x 
                coordinates of each spot
        )
        else
            2D ndarray, shape (n_spots, 2), the y and x
                coordinates of each spot

    """
    # Generate the transfer function
    dog_tf = _dog_setup(*I.shape, k0, k1, w)

    # Perform the convolution
    return threshold_image(fftshift(irfft2(rfft2(I) * dog_tf, s=I.shape)),
                           t=t,
                           return_filt=return_filt)
예제 #27
0
def matches_exist(template, image, tolerance=1):
    # just taken from convolution def
    expected = numpy.count_nonzero(template)
    ih, iw = image.shape
    th, tw = template.shape

    # Pad image to even dimensions
    if ih % 2 or iw % 2:
        if ih % 2:
            ih += 1
        if iw % 2:
            iw += 1
        bin_image = pad_bin_image_to_shape(image, (ih, iw))
    if expected == 0:
        return []

    # Calculate the convolution of the FFT's of the image & template
    convolution_freqs = rfft2(image) * rfft2(template[::-1, ::-1],
                                                 image.shape)
    convolution_image = irfft2(convolution_freqs)
    found_bitmap = convolution_image > (expected - tolerance)
    if True in found_bitmap:
        return True
    else:
        return False
예제 #28
0
def centered_gauss(I, k=1.0, w=9, t=200.0, return_filt=False):
    """
    Convolve the image with a mean-subtracted Gaussian kernel, 
    then apply a static threshold.

    args
    ----
        I           :   2D ndarray (YX)
        k           :   float, kernel sigma
        w           :   int, kernel window size
        t           :   float, threshold
        return_filt :   bool, also return the filtered image
                        and boolean image

    returns
    -------
        if return_filt:
        (
            2D ndarray, the post-convolution image;
            2D ndarray, the thresholded binary image;
            2D ndarray, shape (n_spots, 2), the y and x 
                coordinates of each spot
        )
        else
            2D ndarray, shape (n_spots, 2), the y and x
                coordinates of each spot

    """
    # Compute the transfer function
    G_rft = _centered_gauss_setup(*I.shape, k, w)
    return threshold_image(fftshift(irfft2(rfft2(I) * G_rft, s=I.shape)),
                           t=t,
                           return_filt=return_filt)
예제 #29
0
def log(I, k=1.0, w=11, t=200.0, return_filt=False):
    """
    Detect spots by Laplacian-of-Gaussian filtering.

    args
    ----
        I           :   2D ndarray
        k           :   float, kernel sigma
        w           :   int, kernel size 
        t           :   float, threshold
        return_filt :   bool, also return the filtered image

    returns
    -------
        if return_filt:
        (
            2D ndarray, the post-convolution image;
            2D ndarray, the thresholded binary image;
            2D ndarray, shape (n_spots, 2), the y and x 
                coordinates of each spot
        )
        else
            2D ndarray, shape (n_spots, 2), the y and x
                coordinates of each spot

    """
    # Generate the transfer function
    G_rft = _log_setup(*I.shape, k, w)

    # Perform the convolution
    return threshold_image(fftshift(irfft2(rfft2(I) * G_rft, s=I.shape)),
                           t=t,
                           return_filt=return_filt)
def ifftn_mpi(fu, u):
    Uc_hat[:] = ifft(fu, axis=0)
    comm.Alltoall(MPI.IN_PLACE, [Uc_hat, MPI.DOUBLE_COMPLEX])
    Uc_hatT[:] = rollaxis(Uc_hat.reshape((num_processes, Np, Np, N // 2 + 1)),
                          1).reshape(Uc_hatT.shape)
    u[:] = irfft2(Uc_hatT, axes=(1, 2))
    return u
예제 #31
0
 def getDisplacements2D(self, Z=None, window=False):
     """
     Use phase correlation to find the relative displacement between
     each time step
     """
     if Z is None:
         Z = self.getNbPixelsPerFrame() / self.getNbPixelsPerSlice() / 2
     shape = np.asarray(self.get2DShape())
     if window:
         ham = np.hamming(shape[0]) * np.atleast_2d(np.hamming(shape[1])).T
     else:
         ham = 1.0
     displs = np.zeros((self.getNbFrames(), 2))
     a = rfft2(self.get2DSlice(T=0, Z=Z) * ham)
     for t in range(1, self.getNbFrames()):
         b = rfft2(self.get2DSlice(T=t, Z=Z) * ham)
         #calculate the normalized cross-power spectrum
         #R = numexpr.evaluate(
         #    'a*complex(real(b), -imag(b)/abs(a*complex(real(b), -imag(b))))'
         #    )
         R = a * b.conj()
         Ra = np.abs(a * b.conj())
         R[Ra > 0] /= Ra[Ra > 0]
         r = irfft2(R)
         #Get the periodic position of the peak
         l = r.argmax()
         displs[t] = np.unravel_index(l, r.shape)
         #prepare next step
         a = b
     return np.where(displs < shape[::-1] / 2, displs, displs - shape[::-1])
예제 #32
0
def linespectra (arr, freqs, sigma=4, channelWidth=20, kms=False, source_speed=0): #nb sigma is given in px (can be fractional)
    """arr should be an array of shape (x,:,>pix,>pix)
freqs an array or list of nums of length x"""
    shifts=[int(round((freqs[-1]-freqs[i])*299792458/(channelWidth*freqs[-1]))) for i in xrange(len(freqs))]
#    print shifts
    x=[[] for _ in xrange(arr.shape[0])]
    mid=arr.shape[2]/2.0-0.5

    gauss_mask=garray(arr.shape[-2:],sigma)
    s=[y*2 for y in gauss_mask.shape]
    ftg=rfft2(gauss_mask, s)

    for i in xrange(len(x)):
        for j in xrange(arr.shape[1]):
            convolved=irfft2(rfft2(arr[i,j,:,:],s)*ftg)
            x[i].append(convolved[s[0]/2,s[1]/2])

    padding=abs(max(shifts))
    padded=[0 for _ in xrange(arr.shape[1]+padding*2+2)]
    for i in xrange(len(x[0])):
        for j in xrange(len(x)):
            try:
                padded[i+shifts[j]+padding]+=x[j][i]
            except IndexError : 
                print j,i,len(x),len(x[j])
                None
    if kms: return [((i-150)*20/1000.0,x) for i,x in enumerate(padded)]
    else:   return [((i-150)*20,x)        for i,x in enumerate(padded)]
예제 #33
0
def indicator_function(delta, N):
    # Create grid
    N = 2 * N
    L = 1 + 10 * delta / np.sqrt(12)
    dxy = 2 * L / N
    xy = np.linspace(-L + dxy / 2, L - dxy / 2, N)

    # Create mesh and radial direction
    X, Y = np.meshgrid(xy, xy)
    r = np.sqrt(X**2 + Y**2)

    # Unfiltered indicator function (noramlized to integrate to unity)
    H = np.zeros((N, N))
    H[r < 1] = 1 / np.pi
    H = H / np.sum(H * dxy**2)

    # Gaussian filter (normalized to integrate to unity)
    G = 6 / (np.pi * delta**2) * np.exp(-6 * r**2 / delta**2)

    # Filtered indicator function doing convolution as multiplication in
    # spectral space
    I = fft.irfft2(fft.rfft2(H) * fft.rfft2(G) * dxy**2)

    # This is only really a function or r, so just return as a 1-D array
    # Notice the shift to the corners after doing the FFT
    r = xy[0:N // 2] + L
    I = I[0, 0:N // 2]

    return r, I
예제 #34
0
파일: vision.py 프로젝트: bibi-L/Geist
def convolution(bin_template, bin_image, tollerance=0.5):
    expected = numpy.count_nonzero(bin_template)
    ih, iw = bin_image.shape
    th, tw = bin_template.shape

    # Padd image to even dimensions
    if ih % 2 or iw % 2:
        if ih % 2:
            ih += 1
        if iw % 2:
            iw += 1
        bin_image = pad_bin_image_to_shape(bin_image, (ih, iw))
    if expected == 0:
        return []

    # Calculate the convolution of the FFT's of the image & template
    convolution_freqs = rfft2(bin_image) * rfft2(bin_template[::-1, ::-1],
                                                 bin_image.shape)
    # Reverse the FFT to find the result image
    convolution_image = irfft2(convolution_freqs)
    # At this point, the maximum point in convolution_image should be the
    # bottom right (why?) of the area of greatest match

    # The areas in the result image within expected +- tollerance are where we
    # saw matches
    found_bitmap = ((convolution_image > (expected - tollerance)) &
                    (convolution_image < (expected + tollerance)))

    match_points = numpy.transpose(numpy.nonzero(found_bitmap))  # bottom right

    # Find the top left point from the template (remember match_point is
    # inside the template (hence -1)
    return [((fx - (tw - 1)), (fy - (th - 1))) for (fy, fx) in match_points]
예제 #35
0
def ifftn_mpi(fu, u):
    Uc_hat[:] = ifft(fu, axis=0)
    comm.Alltoall([Uc_hat, MPI.DOUBLE_COMPLEX], [U_mpi, MPI.DOUBLE_COMPLEX])
    for i in range(num_processes):
        Uc_hatT[:, i*Np:(i+1)*Np] = U_mpi[i]
    u[:] = irfft2(Uc_hatT, axes=(1,2))
    return u
def fft_correlate_strided_images(image_a, image_b):
    """FFT based cross correlation
    of two images with multiple views of np.stride_tricks()

    The 2D FFT should be applied to the last two axes (-2,-1) and the
    zero axis is the number of the interrogation window

    This should also work out of the box for rectangular windows.

    Parameters
    ----------
    image_a : 3d np.ndarray, first dimension is the number of windows,
        and two last dimensions are interrogation windows of the first image
    image_b : similar
    """
    s1 = np.array(image_a.shape[-2:])
    s2 = np.array(image_b.shape[-2:])
    size = s1 + s2 - 1
    fsize = 2**np.ceil(np.log2(size)).astype(int)
    fslice = tuple([slice(0, image_a.shape[0])] +
                   [slice(0, int(sz)) for sz in size])
    f2a = rfft2(image_a, fsize, axes=(-2, -1))
    f2b = rfft2(image_b[:, ::-1, ::-1], fsize, axes=(-2, -1))
    corr = irfft2(f2a * f2b, axes=(-2, -1)).real[fslice]
    return corr
예제 #37
0
def fft_convolve(in1, in2, times):
    def _centered(arr, newsize):
        # Return the center newsize portion of the array.
        currsize = np.array(arr.shape)
        startind = (currsize - newsize) // 2
        endind = startind + newsize
        myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
        return arr[tuple(myslice)]

    if times == 0:
        return in1.copy()

    s1 = np.array(in1.shape)
    s2 = np.array(in2.shape)
    shape = s1 + (s2 - 1) * times

    # Speed up FFT by padding to optimal size for FFTPACK
    fshape = [next_fast_len(int(d)) for d in shape]
    fslice = tuple([slice(0, int(sz)) for sz in shape])

    resfft = fast_power(rfft2(in2, fshape), times)
    resfft = resfft * rfft2(in1, fshape)
    ret = irfft2(resfft, fshape)[fslice].copy()
    ret = ret.real

    return _centered(ret, s1)
 def update_nmda_high():
     high_nmda = np.array(high_excit_pop.s_NMDA).reshape(
         N_excit_high_1d, -1)
     fft_s_NMDA_2d = rfft2(high_nmda)
     fft_s_NMDA_total_2d = np.multiply(fft_high_kernel, fft_s_NMDA_2d)
     s_NMDA_tot_2d = irfft2(fft_s_NMDA_total_2d)
     high_excit_pop.s_NMDA_total_ = s_NMDA_tot_2d.reshape(-1)
예제 #39
0
def correlate_windows(window_a, window_b, corr_method='fft', nfftx=None, nffty=None):
    """Compute correlation function between two interrogation windows.

    The correlation function can be computed by using the correlation
    theorem to speed up the computation.

    Parameters
    ----------
    window_a : 2d np.ndarray
        a two dimensions array for the first interrogation window, 

    window_b : 2d np.ndarray
        a two dimensions array for the second interrogation window.

    corr_method   : string
        one of the two methods currently implemented: 'fft' or 'direct'.
        Default is 'fft', which is much faster.

    nfftx   : int
        the size of the 2D FFT in x-direction,
        [default: 2 x windows_a.shape[0] is recommended].

    nffty   : int
        the size of the 2D FFT in y-direction,
        [default: 2 x windows_a.shape[1] is recommended].


    Returns
    -------
    corr : 2d np.ndarray
        a two dimensions array for the correlation function.
    
    Note that due to the wish to use 2^N windows for faster FFT
    we use a slightly different convention for the size of the 
    correlation map. The theory says it is M+N-1, and the 
    'direct' method gets this size out
    the FFT-based method returns M+N size out, where M is the window_size
    and N is the search_size
    It leads to inconsistency of the output 
    """
    
    if corr_method == 'fft':
        window_b = np.conj(window_b[::-1, ::-1])
        if nfftx is None:
            nfftx = nextpower2(window_b.shape[0] + window_a.shape[0])  
        if nffty is None:
            nffty = nextpower2(window_b.shape[1] + window_a.shape[1]) 
        
        f2a = rfft2(normalize_intensity(window_a), s=(nfftx, nffty))
        f2b = rfft2(normalize_intensity(window_b), s=(nfftx, nffty))
        corr = irfft2(f2a * f2b).real
        corr = corr[:window_a.shape[0] + window_b.shape[0], 
                    :window_b.shape[1] + window_a.shape[1]]
        return corr
    elif corr_method == 'direct':
        return convolve2d(normalize_intensity(window_a),
        normalize_intensity(window_b[::-1, ::-1]), 'full')
    else:
        raise ValueError('method is not implemented')
예제 #40
0
 def getDispl2DImage(self, t0=0, t1=1, Z=0):
     ham = np.hamming(self.get2DShape()[1])*np.atleast_2d(np.hamming(self.get2DShape()[0])).T
     a = rfft2(self.get2DSlice(T=t0, Z=Z)*ham)
     b = rfft2(self.get2DSlice(T=t1, Z=Z)*ham)
     R = numexpr.evaluate(
         'a*complex(real(b), -imag(b)/abs(a*complex(real(b), -imag(b))'
         )
     return irfft2(R)
예제 #41
0
파일: registration.py 프로젝트: wj2/2p
def register_imgs(imgs,template):
    "save some time by only taking fft of template once"
    rfft2_template_conj = rfft2(template).conj()
    shifts = []
    for img in imgs:
        corr = irfft2(rfft2(img)*rfft2_template_conj)
        shifts.append(balanced_mod(np.unravel_index(corr.argmax(),corr.shape),corr.shape))
    return shifts
예제 #42
0
 def getDispl2DImage(self, t0=0, t1=1, Z=0):
     ham = np.hamming(self.get2DShape()[0]) * np.atleast_2d(
         np.hamming(self.get2DShape()[1])).T
     a = rfft2(self.get2DSlice(T=t0, Z=Z) * ham)
     b = rfft2(self.get2DSlice(T=t1, Z=Z) * ham)
     R = numexpr.evaluate(
         'a*complex(real(b), -imag(b)/abs(a*complex(real(b), -imag(b))')
     return irfft2(R)
def ifftn_mpi(fu, u):
    # Inverse Fourier transform
    #    Uc_hat[:] = ifft(fu, axis=0)
    # comm.Alltoall([Uc_hat, MPI.DOUBLE_COMPLEX], [U_mpi, MPI.DOUBLE_COMPLEX])
    #   Uc_hatT[:] = rollaxis(U_mpi, 1).reshape(Uc_hatT.shape)
    # print(['fu ifftn shape: '],shape(fu))
    u[:] = irfft2(fu)
    return u
예제 #44
0
def correlation_func(cor_win_1,
                     cor_win_2,
                     win_width,
                     win_height,
                     correlation_method='circular'):
    '''This function is doing the cross-correlation. Right now circular cross-correlation
    That means no zero-padding is done
    the .real is to cut off possible imaginary parts that remains due to finite numerical accuarcy
    
    Manuel: The function is modified to include the different heights and widths
        for the linear cross correlation, the circular one is not affected by this.
        Also now the mean is subtracted for both circular and linear and the correlation
        map is normalised
     '''
    cor_win_1 = cor_win_1 - cor_win_1.mean(axis=(1, 2)).reshape(
        cor_win_1.shape[0], 1, 1)
    cor_win_2 = cor_win_2 - cor_win_2.mean(axis=(1, 2)).reshape(
        cor_win_1.shape[0], 1, 1)

    if correlation_method == 'linear':
        corr = fftshift(irfft2(
            np.conj(rfft2(cor_win_1, s=(2 * win_height, 2 * win_width))) *
            rfft2(cor_win_2, s=(2 * win_height, 2 * win_width))).real,
                        axes=(1, 2))
        corr = corr[:, win_height // 2:3 * win_height // 2,
                    win_width // 2:3 * win_width // 2]
    else:
        corr = fftshift(irfft2(np.conj(rfft2(cor_win_1)) *
                               rfft2(cor_win_2)).real,
                        axes=(1, 2))
    """
    Normalize the whole thing. For that we calculate the vector of length equal
    to the amount of correlation windows, that contains the following multiplication:
        
    simga_A*sigma_B*Window_Height*Window_Width
    
    This will give us a maximum of 1 for the correlation peak.
    """
    normalizer = (np.std(cor_win_2, axis=(1,2))*np.std(cor_win_1, axis=(1,2))\
                  *win_width*win_height)
    # normalize by expanding the dimensions of the normalizer, to be of shape (n_windows, 1, 1)
    corr_norm = corr / np.expand_dims(normalizer, axis=(1, 2))
    # eliminate the values below 0 to get final correlation map and return it
    corr_norm[corr_norm < 0] = 0
    return corr_norm
예제 #45
0
def _upsample(arr, factor):
    if arr.shape[0] != arr.shape[1]:
        raise ValueError("array argument must be square shape")
    arr_k = rfft2(arr)
    nkx, nky = arr_k.shape
    unkx = nkx*factor
    unky = (nky-1) * factor + 1
    upsampled = irfft2(arr_k, s=[arr.shape[0]*factor, arr.shape[1]*factor])
    return upsampled
예제 #46
0
def subtract_outer(*args, **kwargs):
    p1 = rfft2(
        fftshift(args[0])
    )  #, threads=kwargs["fftthreads"], planner_effort="FFTW_ESTIMATE")
    p1s = subtract(p1, *args[1:])
    new_image = fftshift(
        irfft2(p1s)
    )  #, threads=kwargs["fftthreads"], planner_effort="FFTW_ESTIMATE"))
    return new_image
예제 #47
0
def correlation(f, g):
    f_fft = rfft2(f)
    if f is g:
        g_fft = f_fft
    else:
        g_fft = rfft2(g)
    g_conj = np.conj(g_fft)
    prod = f_fft * g_conj
    return np.real(irfft2(prod))
예제 #48
0
def dnf_response(n, Rn, stimulus, w, we, wi, time, dt):
    alpha, tau = 0.1, 1.0
    U = np.random.random((n, n)) * .01
    V = np.random.random((n, n)) * .01

    V_shape = np.array(V.shape)

    # Computes field input accordingly
    D = ((np.abs(w - stimulus)).sum(axis=-1)) / float(Rn * Rn)
    I = (1.0 - D.reshape(n, n)) * alpha

    for j in range(int(time / dt)):
        Z = rfft2(V * alpha)
        Le = irfft2(Z * we, V_shape).real
        Li = irfft2(Z * wi, V_shape).real
        U += (-U + (Le - Li) + I) * dt * tau
        V = np.maximum(U, 0.0)
    return V
예제 #49
0
def ifct(fu, u):
    """Inverse Cheb transform of x-direction, Fourier in y and z"""
    Uc_hat3[:] = ST.ifct(fu, Uc_hat3)
    comm.Alltoall([Uc_hat3, MPI.DOUBLE_COMPLEX], [U_mpi, MPI.DOUBLE_COMPLEX])
    n0 = U_mpi.shape[2]
    for i in range(num_processes):
        Uc_hatT[:, i*n0:(i+1)*n0] = U_mpi[i]
    u[:] = irfft2(Uc_hatT, axes=(1,2))
    return u
예제 #50
0
파일: __init__.py 프로젝트: xperroni/Skeye
def correlate(image, filter):
    r"""Performs a normalized cross-correlation between an image and a search
        template. For more details, see:

        http://en.wikipedia.org/wiki/Cross_correlation#Normalized_cross-correlation
    """
    si = rfft2(image - mean(image))
    sf = rfft2(filter - mean(filter), image.shape)
    return irfft2(si * conj(sf))
예제 #51
0
def dnf_response( n, Rn, stimulus, w, we, wi, time, dt ):
		alpha, tau = 0.1, 1.0
		U  = np.random.random((n,n)) * .01
		V  = np.random.random((n,n)) * .01

		V_shape = np.array(V.shape)

		# Computes field input accordingly
		D = (( np.abs( w - stimulus )).sum(axis=-1))/float(Rn*Rn)
		I = ( 1.0 - D.reshape(n,n) ) * alpha

		for j in range( int(time/dt) ):
				Z = rfft2( V * alpha )
				Le = irfft2( Z * we, V_shape).real
				Li = irfft2( Z * wi, V_shape).real
				U += ( -U + ( Le - Li ) + I )* dt * tau
				V = np.maximum( U, 0.0 )
		return V
예제 #52
0
def beam_convolve(arr, sigma):
    "convoles a 2D image with a gaussian profile with sigma in px"
    if len(arr.shape)!=2 or 3*sigma > max(arr.shape): raise ValueError ("arr is not 2d or beam is too wide")
    else: 
        shape=arr.shape
        gauss_mask=garray(shape,sigma)
        s=[y*2 for y in gauss_mask.shape]
        ftg=rfft2(gauss_mask, s)
        return irfft2(rfft2(arr,s)*ftg)
예제 #53
0
파일: common.py 프로젝트: ndaniyar/aphot
def conv(im, ker):
    ''' Convolves image im with kernel ker 
        Both image and kernel's dimensions should be even: ker.shape % 2 == 0
    '''
    sy,sx = array(ker.shape)/2
    y0,x0 = array(im.shape)/2
    big_ker = zeros(im.shape)
    big_ker[y0-sy:y0+sy,x0-sx:x0+sx] = ker
    return irfft2(rfft2(im)*rfft2(fftshift(big_ker)))
예제 #54
0
def phaseCorrel(a,b):
    print a
    print b
    """phase correlation calculation"""
    R = rfft2(a)*np.conj(rfft2(b))
    R /= np.absolute(R)
    print R
    print a.shape 
    return irfft2(R,a.shape)
예제 #55
0
def ifst(fu, u, S):
    """Inverse Shen transform of x-direction, Fourier in y and z"""
    Uc_hat3[:] = S.ifst(fu, Uc_hat3)
    comm.Alltoall([Uc_hat3, MPI.DOUBLE_COMPLEX], [U_mpi, MPI.DOUBLE_COMPLEX])
    #n0 = U_mpi.shape[2]
    #for i in range(num_processes):
        #Uc_hatT[:, i*n0:(i+1)*n0] = U_mpi[i]
    Uc_hatT[:] = rollaxis(U_mpi, 1).reshape(Uc_hatT.shape)
    u[:] = irfft2(Uc_hatT, axes=(1,2))
    return u
예제 #56
0
 def test_noise_sigma(self):
     """The sky noise should be normalized such that the standard deviation of the image == amplitude."""
     CoordsXY = self.coord
     noise_gen = _sky_noise_gen(CoordsXY, seed=self.seed, amplitude=self.amplitude,
                                n_step=self.n_step, verbose=False)
     noise_fft = next(noise_gen)
     for fft_single in noise_gen:
         noise_fft += fft_single
     noise_image = np.real(fftshift(irfft2(noise_fft)))
     dimension = np.sqrt(CoordsXY.xsize()*CoordsXY.ysize())
     self.assertLess(np.abs(np.std(noise_image) - self.amplitude), 1.0/dimension)
예제 #57
0
def lineold (imcube, sigma, chanwidth=10):
    "produces a spectrum by convolving each slice of imcube with a gaussian of width sigma and returning the value of the central pixel for each slice"
    shape=imcube.shape
    bandwidth=shape[0]*chanwidth
    if len(shape)!=3: raise ValueError("imcube must be a cube")
    gauss_mask=garray(shape[1:],sigma)
    s=[y*2 for y in gauss_mask.shape]
    ftg=rfft2(gauss_mask, s)
    
    return [(i*chanwidth-bandwidth/2,irfft2(rfft2(imcube[i,:,:],s)*ftg)[s[0]/2,s[1]/2])
            for i in xrange(shape[0])]
예제 #58
0
def upsample(arr, factor):
    if arr.shape[0] != arr.shape[1]:
        raise ValueError("array argument must be square shape")
    arr_k = rfft2(arr)
    nkx, nky = arr_k.shape
    unkx = nkx*factor
    unky = (nky-1) * factor + 1
    upsample_fft = np.zeros((unkx, unky), dtype=np.complex128)
    upsample_fft[:nkx/2+1, :nky] = arr_k[:nkx/2+1, :]
    upsample_fft[-(nkx/2-1):, :nky] = arr_k[-(nkx/2-1):, :]
    upsampled = irfft2(upsample_fft)
    return upsampled
예제 #59
0
def correlate_windows(window_a, window_b, corr_method="fft", nfftx=None, nffty=None):
    """Compute correlation function between two interrogation windows.
    
    The correlation function can be computed by using the correlation 
    theorem to speed up the computation.
    
    Parameters
    ----------
    window_a : 2d np.ndarray
        a two dimensions array for the first interrogation window.
        
    window_b : 2d np.ndarray
        a two dimensions array for the second interrogation window.
        
    corr_method   : string
        one of the two methods currently implemented: 'fft' or 'direct'.
        Default is 'fft', which is much faster.
        
    nfftx   : int
        the size of the 2D FFT in x-direction, 
        [default: 2 x windows_a.shape[0] is recommended].
        
    nffty   : int
        the size of the 2D FFT in y-direction, 
        [default: 2 x windows_a.shape[1] is recommended].
        
        
    Returns
    -------
    corr : 2d np.ndarray
        a two dimensions array for the correlation function.
    
    """

    if corr_method == "fft":
        if nfftx is None:
            nfftx = 2 * window_a.shape[0]
        if nffty is None:
            nffty = 2 * window_a.shape[1]
        return fftshift(
            irfft2(
                rfft2(normalize_intensity(window_a), s=(nfftx, nffty))
                * np.conj(rfft2(normalize_intensity(window_b), s=(nfftx, nffty)))
            ).real,
            axes=(0, 1),
        )
    elif corr_method == "direct":
        return convolve(normalize_intensity(window_a), normalize_intensity(window_b[::-1, ::-1]), "full")
    else:
        raise ValueError("method is not implemented")