Ejemplo n.º 1
0
    def test_not_last_axis_success(self):
        ar, ai = np.random.random((2, 16, 8, 32))
        a = ar + 1j*ai

        axes = (-2,)

        # Should not raise error
        fft.irfftn(a, axes=axes)
Ejemplo n.º 2
0
    def test_not_last_axis_success(self):
        ar, ai = np.random.random((2, 16, 8, 32))
        a = ar + 1j * ai

        axes = (-2, )

        # Should not raise error
        fft.irfftn(a, axes=axes)
Ejemplo n.º 3
0
def __compute_contrasts(im, sigma_g1s, sigma_g2s):
    """
    Compute the contrasts for measuring contrast enhancement. This computes c_σ_g₁ from [1] for each
    pixel across all of the frequencies, which is defined as:

        c_σ_g₁(x,y) = O(x,y) / S(x,y) for each σ_g1 (eq 22)
        O(x,y) = C(x,y) - S(x,y)                    (eq 19)
        C(x,y) = im ⊗ g₁(x,y)                       (eq 20)
        S(x,y) = im ⊗ g₂(x,y)                       (eq 21)
        σ_g₁ = 2 log(1/M²)/(1-M²) /𝜈² for 𝜈∈[0,π]   (eq 23)
            𝜈 = 72π/80, 69π/80, ..., 6π/80, 3π/80
        g(x,y) = exp(-1/(2σ²(x²+y²))) - a Gaussian with σ_g1 given above or σ_g₂=M*σ_g₁

    where ⊗ is a convolution. All of these are fairly easily expanded to any number of dimensions by
    using a higher-dimensional Gaussian kernel (actually a 1D Gaussian kernel is applied along each
    dimension).

    The g₁ and g₂ kernels are given for all σs along with the image.

    REFERENCES:
     1. Sen D and Pal S K, May 2011, "Automatic Exact Histogram Specification for Contrast
        Enhancement and Visual System Based Quantitative Evaluation", IEEE Transactions on Image
        Processing, 20(5):1211-1220.
    """
    from .util import EPS
    from scipy.ndimage import fourier_gaussian

    contrasts = empty(im.shape + (len(sigma_g1s), ))

    rfftn, irfftn, empty_aligned = __get_rfft()
    im = rfftn(im.astype(float, copy=False))
    temp = empty_aligned(im.shape, im.dtype)

    for i, (sigma_g1, sigma_g2) in enumerate(zip(sigma_g1s, sigma_g2s)):
        # Using real-space correlations  (also need to remove rfftn() above but keep the astype)
        # The real-space correlations produce lower values
        #center = im if g_1.size == 1 else ndi.gaussian_filter(im, sigma_g1, truncate=3)
        #surround = ndi.gaussian_filter(im, sigma_g2, output=temp, truncate=3)

        center = irfftn(
            fourier_gaussian(im, sigma_g1, im.shape[-1], output=temp))
        surround = irfftn(
            fourier_gaussian(im, sigma_g2, im.shape[-1], output=temp))
        surround[surround == 0] = EPS

        contrast = contrasts[..., i]
        subtract(center, surround, contrast)

        contrast /= surround
        #abs(contrast, contrast) # norm will automatically take the absolute value for us
    return contrasts
    def test_energy(self):
        tol = 1e-10
        L = 2 + rand()  # domain length
        a = 3 + rand()  # amplitude of force
        E = 4 + rand()  # Young's Mod
        for res in [4, 8, 16]:
            area_per_pt = L / res
            x = np.arange(res) * area_per_pt
            force = a * np.cos(2 * np.pi / L * x)

            # theoretical FFT of force
            Fforce = np.zeros_like(x)
            Fforce[1] = Fforce[-1] = res / 2. * a

            # theoretical FFT of disp
            Fdisp = np.zeros_like(x)
            Fdisp[1] = Fdisp[-1] = res / 2. * a / E * L / (np.pi)

            # verify consistency
            hs = PeriodicFFTElasticHalfSpace(res, E, L)
            fforce = rfftn(force.T).T
            fdisp = hs.greens_function * fforce
            self.assertTrue(
                Tools.mean_err(fforce, Fforce, rfft=True) < tol,
                "fforce = \n{},\nFforce = \n{}".format(fforce.real, Fforce))
            self.assertTrue(
                Tools.mean_err(fdisp, Fdisp, rfft=True) < tol,
                "fdisp = \n{},\nFdisp = \n{}".format(fdisp.real, Fdisp))

            # Fourier energy
            E = .5 * np.dot(Fforce / area_per_pt, Fdisp) / res

            disp = hs.evaluate_disp(force)
            e = hs.evaluate_elastic_energy(force, disp)
            kdisp = hs.evaluate_k_disp(force)
            self.assertTrue(
                abs(disp - irfftn(kdisp.T).T).sum() < tol,
                ("disp   = {}\n"
                 "ikdisp = {}").format(disp,
                                       irfftn(kdisp.T).T))
            ee = hs.evaluate_elastic_energy_k_space(fforce, kdisp)
            self.assertTrue(
                abs(e - ee) < tol,
                "violate Parseval: e = {}, ee = {}, ee/e = {}".format(
                    e, ee, ee / e))

            self.assertTrue(
                abs(E - e) < tol,
                "theoretical E = {}, computed e = {}, diff(tol) = {}({})".
                format(E, e, E - e, tol))
Ejemplo n.º 5
0
def getCovMatrix(IQdata, lags=100, hp=False):
	# 0: <I1I2> # 1: <Q1Q2> # 2: <I1Q2> # 3: <Q1I2> # 4: <Squeezing> Magnitude # 5: <Squeezing> Phase
	lags = int(lags)
	I1 = np.asarray(IQdata[0])
	Q1 = np.asarray(IQdata[1])
	I2 = np.asarray(IQdata[2])
	Q2 = np.asarray(IQdata[3])
	CovMat = np.zeros([10, lags * 2 + 1])
	start = len(I1) - lags - 1
	stop = len(I1) + lags
	sI1 = np.array(I1.shape)
	shape0 = sI1 * 2 - 1
	fshape = [_next_regular(int(d)) for d in shape0]  # padding to optimal size for FFTPACK
	fslice = tuple([slice(0, int(sz)) for sz in shape0])
	# Do FFTs and get Cov Matrix
	fftI1 = rfftn(I1, fshape)
	fftQ1 = rfftn(Q1, fshape)
	fftI2 = rfftn(I2, fshape)
	fftQ2 = rfftn(Q2, fshape)
	rfftI1 = rfftn(I1[::-1], fshape)  # there should be a simple relationship to fftI1
	rfftQ1 = rfftn(Q1[::-1], fshape)
	rfftI2 = rfftn(I2[::-1], fshape)
	rfftQ2 = rfftn(Q2[::-1], fshape)
	CovMat[0, :] = irfftn((fftI1 * rfftI2))[fslice].copy()[start:stop] / fshape
	CovMat[1, :] = irfftn((fftQ1 * rfftQ2))[fslice].copy()[start:stop] / fshape
	CovMat[2, :] = irfftn((fftI1 * rfftQ2))[fslice].copy()[start:stop] / fshape
	CovMat[3, :] = irfftn((fftQ1 * rfftI2))[fslice].copy()[start:stop] / fshape
	psi = (1j * (CovMat[2, :] + CovMat[3, :]) + (CovMat[0, :] - CovMat[1, :]))
	CovMat[4, :] = abs(psi)
	CovMat[5, :] = np.angle(psi)
	CovMat[6, :] = irfftn((fftI1 * rfftI1))[fslice].copy()[start:stop] / fshape
	CovMat[7, :] = irfftn((fftQ1 * rfftQ1))[fslice].copy()[start:stop] / fshape
	CovMat[8, :] = irfftn((fftI2 * rfftI2))[fslice].copy()[start:stop] / fshape
	CovMat[9, :] = irfftn((fftQ2 * rfftQ2))[fslice].copy()[start:stop] / fshape
	return CovMat
Ejemplo n.º 6
0
def convolve_3d_same(cube, psf, compute_fourier=True):
    """
    Convolve a 3D cube with PSF & LSF.
    PSF can be the PSF data or its Fourier transform.
    if compute_fourier then compute the fft transform of the PSF.
    if False then assumes that the fft is given.

    This convolution has edge effects (and is slower when using numpy than pyfftw).

    cube: The cube we want to convolve
    psf: The Point Spread Function or its Fast Fourier Transform
    """

    size = np.array(np.shape(cube)[slice(0, 3)])

    #import ipdb; ipdb.set_trace()

    if compute_fourier:
        fft_psf = rfftn(psf, axes=[0, 1, 2])
    else:
        fft_psf = psf

    fft_img = rfftn(cube, axes=[0, 1, 2])

    # Convolution
    #fft_cube = np.real(fftshift(irfftn(fft_img * fft_psf, size=size, axes=[0, 1, 2]), axes=[0, 1, 2]))

    convolved_cube = np.real(
        fftshift(irfftn(fft_img * fft_psf, axes=[0, 1, 2]), axes=[0, 1, 2]))

    return convolved_cube, fft_psf, fft_img
Ejemplo n.º 7
0
def ifftn_mpi(fu, u):
    """ifft in three directions using mpi.
    Need to do ifft in reversed order of fft
    """
    if num_processes == 1:
        #u[:] = irfft(ifft(ifft(fu, axis=0), axis=2), axis=1)
        u[:] = irfftn(fu, axes=(0,2,1))
        return
    
    # Do first owned direction
    Uc_hat[:] = ifft(fu, axis=0)
    
    # Communicate all values
    comm.Alltoall([Uc_hat, MPI.DOUBLE_COMPLEX], [U_mpi, MPI.DOUBLE_COMPLEX])
    for i in range(num_processes): 
        Uc_hatT[:, :, i*Np:(i+1)*Np] = U_mpi[i]

    #for i in range(num_processes):
    #    if not i == rank:
    #        comm.Sendrecv_replace([Uc_send[i], MPI.DOUBLE_COMPLEX], i, 0, i, 0)   
    #    Uc_hatT[:, :, i*Np:(i+1)*Np] = Uc_send[i]
           
    # Do last two directions
    #u[:] = irfft(ifft(Uc_hatT, axis=2), axis=1)
    u[:] = irfft2(Uc_hatT, axes=(2,1))
Ejemplo n.º 8
0
    def compute(a, b):
        """
        Compute an optimal displacement between two ndarrays.

        Finds the displacement between two ndimensional arrays. Arrays must be
        of the same size. Algorithm uses a cross correlation, computed efficiently
        through an n-dimensional fft.

        Parameters
        ----------
        a : ndarray
            The first array

        b : ndarray
            The second array
        """
        from numpy.fft import rfftn, irfftn
        from numpy import unravel_index, argmax

        # compute real-valued cross-correlation in fourier domain
        s = a.shape
        f = rfftn(a)
        f *= rfftn(b).conjugate()
        c = abs(irfftn(f, s))

        # find location of maximum
        inds = unravel_index(argmax(c), s)

        # fix displacements that are greater than half the total size
        pairs = zip(inds, a.shape)
        # cast to basic python int for serialization
        adjusted = [int(d - n) if d > n // 2 else int(d) for (d, n) in pairs]

        return Displacement(adjusted)
Ejemplo n.º 9
0
def fft_gaussian_filter(img, sigma):
    """FFT gaussian convolution.

    Parameters
    ----------
    img : ndarray
        Image to convolve with a gaussian kernel
    sigma : int or sequence
        The sigma(s) of the gaussian kernel in _real space_

    Returns
    -------
    filt_img : ndarray
        The filtered image
    """
    # This doesn't help agreement but it will make things faster
    # pull the shape
    s1 = np.array(img.shape)
    # s2 = np.array([int(s * 4) for s in _normalize_sequence(sigma, img.ndim)])
    shape = s1  # + s2 - 1
    # calculate a nice shape
    fshape = [next_fast_len(int(d)) for d in shape]
    # pad out with reflection
    pad_img = fft_pad(img, fshape, "reflect")
    # calculate the padding
    padding = tuple(_calc_pad(o, n) for o, n in zip(img.shape, pad_img.shape))
    # so that we can calculate the cropping, maybe this should be integrated
    # into `fft_pad` ...
    fslice = tuple(
        slice(s, -e) if e != 0 else slice(s, None) for s, e in padding)
    # fourier transfrom and apply the filter
    kimg = rfftn(pad_img, fshape)
    filt_kimg = fourier_gaussian(kimg, sigma, pad_img.shape[-1])
    # inverse FFT and return.
    return irfftn(filt_kimg, fshape)[fslice]
Ejemplo n.º 10
0
 def c2f(self, x_h):
     'see class doc'
     assert x_h.shape == (self.n*2, self.n*2, self.n+1)
     x_h[:,:,[0,-1]] *= sqrt(2)
     x = irfftn(self.extend(x_h)) * self.n32**3
     x_h[:,:,[0,-1]] /= sqrt(2)
     return x
Ejemplo n.º 11
0
    def get_V(self):
        """
	get_V - computes and stores the gravitational potential

        Parameters
        ----------
 	None.

	Returns
        -------
        None.

	"""
        if not self.gpu:
            self.rho[...] = conj_square(self.psi)
            self.fourier_grid[...] = fft.rfftn(self.rho)
            ft_inv_laplace(self.fourier_grid)
            self.fourier_grid *= 4 * np.pi * G
            self.V[...] = fft.irfftn(self.fourier_grid)
            self.V[...] += self.lam * self.rho**2
        else:
            self.g_conj_square(self.g_psi, self.g_rho)
            cufft.cufftExecD2Z(self.rho_plan, self.g_rho.ptr,
                               self.g_fourier.ptr)
            self.g_fourier /= self.psi.shape[0]**3
            self.g_pot_func(self.g_fourier,
                            np.float64(4 * np.pi * G / self.N),
                            np.int64(self.fourier_grid.shape[0]),
                            np.int64(self.fourier_grid.shape[1]),
                            np.int64(self.fourier_grid.shape[2]),
                            block=(8, 8, 8),
                            grid=tuple([(i + 7) / 8
                                        for i in self.psi_hat.shape]))
            cufft.cufftExecZ2D(self.inv_plan, self.g_fourier.ptr, self.g_V.ptr)
            self.g_V += self.lam * self.g_rho**2
Ejemplo n.º 12
0
def hg_conv(f, g):
	ub = tuple(array(f.shape) + array(g.shape) - 1);
	fh = rfftn(cpad(f,ub));
	gh = rfftn(cpad(g,ub));
	res = ifftshift(irfftn(fh * sp.conjugate(gh)));
	del fh, gh;
	return res;
Ejemplo n.º 13
0
def computeDisplacement(arry1, arry2):
    """
    Compute an optimal displacement between two ndarrays.

    Finds the displacement between two ndimensional arrays. Arrays must be
    of the same size. Algorithm uses a cross correlation, computed efficiently
    through an n-dimensional fft.

    Parameters
    ----------
    arry1 : ndarray
        The first array

    arry2 : ndarray
        The second array
    """

    from numpy.fft import rfftn, irfftn
    from numpy import unravel_index, argmax

    # compute real-valued cross-correlation in fourier domain
    s = arry1.shape
    f = rfftn(arry1)
    f *= rfftn(arry2).conjugate()
    c = abs(irfftn(f, s))

    # find location of maximum
    inds = unravel_index(argmax(c), s)

    # fix displacements that are greater than half the total size
    pairs = zip(inds, arry1.shape)
    # cast to basic python int for serialization
    adjusted = [int(d - n) if d > n // 2 else int(d) for (d, n) in pairs]

    return adjusted
Ejemplo n.º 14
0
def customfftconvolve(in1, in2, mode="full", types=('','')):
  """ Pretty much the same as original fftconvolve, but supports
      having operands as fft already 
  """

  in1 = asarray(in1)
  in2 = asarray(in2)

  if in1.ndim == in2.ndim == 0:  # scalar inputs
    return in1 * in2
  elif not in1.ndim == in2.ndim:
    raise ValueError("in1 and in2 should have the same dimensionality")
  elif in1.size == 0 or in2.size == 0:  # empty arrays
    return array([])

  s1 = array(in1.shape)
  s2 = array(in2.shape)
  complex_result = False
  #complex_result = (np.issubdtype(in1.dtype, np.complex) or
  #                  np.issubdtype(in2.dtype, np.complex))
  shape = s1 + s2 - 1
  
  if mode == "valid":
    _check_valid_mode_shapes(s1, s2)

  # Speed up FFT by padding to optimal size for FFTPACK
  fshape = [_next_regular(int(d)) for d in shape]
  fslice = tuple([slice(0, int(sz)) for sz in shape])

  if not complex_result:
    if types[0] == 'fft':
      fin1 = in1#_unfold_fft(in1, fshape)
    else:
      fin1 = rfftn(in1, fshape)

    if types[1] == 'fft':
      fin2 = in2#_unfold_fft(in2, fshape)
    else:
      fin2 = rfftn(in2, fshape)
    ret = irfftn(fin1 * fin2, fshape)[fslice].copy()
  else:
    if types[0] == 'fft':
      fin1 = _unfold_fft(in1, fshape)
    else:
      fin1 = fftn(in1, fshape)
    if types[1] == 'fft':
      fin2 = _unfold_fft(in2, fshape)
    else:
      fin2 = fftn(in2, fshape)
    ret = ifftn(fin1 * fin2)[fslice].copy()

  if mode == "full":
    return ret
  elif mode == "same":
    return _centered(ret, s1)
  elif mode == "valid":
    return _centered(ret, s1 - s2 + 1)
  else:
    raise ValueError("Acceptable mode flags are 'valid',"
                     " 'same', or 'full'.")
def GaussianRandomInitializer(gridShape, sigma=0.2, seed=None, slipSystem=None, slipPlanes=None, slipDirections=None, vacancy=None, smectic=None):

    oldgrid = copy.copy(gridShape)
   
    if len(gridShape) == 1:
	    gridShape = (128,)
    if len(gridShape) == 2:
	    gridShape = (128,128)
    if len(gridShape) == 3:
	    gridShape = (128,128,128)

    """ Returns a random initial set of fields of class type PlasticityState """
    if slipSystem=='gamma':
        state = SlipSystemState.SlipSystemState(gridShape,slipPlanes=slipPlanes,slipDirections=slipDirections)
    elif slipSystem=='betaP':
        state = SlipSystemBetaPState.SlipSystemState(gridShape,slipPlanes=slipPlanes,slipDirections=slipDirections)
    else:
        if vacancy is not None:
            state = VacancyState.VacancyState(gridShape,alpha=vacancy)
        elif smectic is not None:
            state = SmecticState.SmecticState(gridShape)
        else:
            state = PlasticityState.PlasticityState(gridShape)

    field = state.GetOrderParameterField()
    Ksq_prime = FourierSpaceTools.FourierSpaceTools(gridShape).kSq * (-sigma**2/4.)

    if seed is None:
        seed = 0
    n = 0
    random.seed(seed)

    Ksq = FourierSpaceTools.FourierSpaceTools(gridShape).kSq.numpy_array()

    for component in field.components:
        temp = random.normal(scale=gridShape[0],size=gridShape)
        ktemp = fft.rfftn(temp)*(sqrt(pi)*sigma)**len(gridShape)*exp(-Ksq*sigma**2/4.)
        field[component] = numpy.real(fft.irfftn(ktemp))
        #field[component] = GenerateGaussianRandomArray(gridShape, temp ,sigma)
        n += 1

    """
    t, s = LoadState("2dstate32.save", 0)
    for component in field.components:
        for j in range(0,32):
            field[component][:,:,j] = s.betaP[component].numpy_array()
    """

    ## To make seed consistent across grid sizes and convergence comparison
    gridShape = copy.copy(oldgrid)
    if gridShape[0] != 128:
        state = ResizeState(state,gridShape[0],Dim=len(gridShape))

    state = ReformatState(state)
    state.ktools = FourierSpaceTools.FourierSpaceTools(gridShape)
    
    return state 
Ejemplo n.º 16
0
    def density(self):

        self._transformer.density()
        fft_grid = rfftn(self.xmap.array)
        fft_grid[self._fft_mask] = 0
        grid = irfftn(fft_grid)
        if self.b_add is not None:
            pass
        self.xmap.array[:] = grid.real
Ejemplo n.º 17
0
    def __call__(self, image):

        assert alltrue(
            image.shape == self._expected_shape
        ), "Shape of image to be convolved is not correct. Re-run setup."

        ret = irfftn(rfftn(image, self._fshape) * self._psf_fft,
                     self._fshape)[self._fslice].copy()

        return _centered(ret, self._expected_shape)
def getCovMatrix(I1, Q1, I2, Q2, lags=20):
    # calc <I1I2>, <I1Q2>, Q1I2, Q1Q2
    lags = int(lags)
    I1 = np.asarray(I1)
    Q1 = np.asarray(Q1)
    I2 = np.asarray(I2)
    Q2 = np.asarray(Q2)
    CovMat = np.zeros([6, lags*2-1])
    start = len(I1*2-1)-lags
    stop = len(I1*2-1)-1+lags
    sI1 = np.array(I1.shape)
    sQ2 = np.array(Q2.shape)
    shape = sI1 + sQ2 - 1
    HPfilt = (int(sI1/(lags*4)))  # smallest features visible is lamda/4
    fshape = [_next_regular(int(d)) for d in shape]  # padding to optimal size for FFTPACK
    fslice = tuple([slice(0, int(sz)) for sz in shape])
    # Do FFTs and get Cov Matrix
    fftI1 = rfftn(I1, fshape)
    fftQ1 = rfftn(Q1, fshape)
    fftI2 = rfftn(I2, fshape)
    fftQ2 = rfftn(Q2, fshape)
    rfftI1 = rfftn(I1[::-1], fshape)
    rfftQ1 = rfftn(Q1[::-1], fshape)
    rfftI2 = rfftn(I2[::-1], fshape)
    rfftQ2 = rfftn(Q2[::-1], fshape)
    # filter frequencies outside the lags range
    fftI1 = np.concatenate((np.zeros(HPfilt), fftI1[HPfilt:]))
    fftQ1 = np.concatenate((np.zeros(HPfilt), fftQ1[HPfilt:]))
    fftI2 = np.concatenate((np.zeros(HPfilt), fftI2[HPfilt:]))
    fftQ2 = np.concatenate((np.zeros(HPfilt), fftQ2[HPfilt:]))
    # filter frequencies outside the lags range
    rfftI1 = np.concatenate((np.zeros(HPfilt), rfftI1[HPfilt:]))
    rfftQ1 = np.concatenate((np.zeros(HPfilt), rfftQ1[HPfilt:]))
    rfftI2 = np.concatenate((np.zeros(HPfilt), rfftI2[HPfilt:]))
    rfftQ2 = np.concatenate((np.zeros(HPfilt), rfftQ2[HPfilt:]))
    CovMat[0, :] = (irfftn((fftI1*rfftI2))[fslice].copy()[start:stop] / len(fftI1))  # 0: <I1I2>
    CovMat[1, :] = (irfftn((fftQ1*rfftQ2))[fslice].copy()[start:stop] / len(fftI1))  # 1: <Q1Q2>
    CovMat[2, :] = (irfftn((fftI1*rfftQ2))[fslice].copy()[start:stop] / len(fftI1))  # 2: <I1Q2>
    CovMat[3, :] = (irfftn((fftQ1*rfftI2))[fslice].copy()[start:stop] / len(fftI1))  # 3: <Q1I2>
    CovMat[4, :] = (abs(1j*(CovMat[2, :]+CovMat[3, :]) + (CovMat[0, :] - CovMat[1, :])))  # 4: <Squeezing> Magnitude
    CovMat[5, :] = np.angle(1j*(CovMat[2, :]+CovMat[3, :]) + (CovMat[0, :] - CovMat[1, :]))  # 5: <Squeezing> Angle
    return CovMat
Ejemplo n.º 19
0
    def extended_source_image(self, ideal_image):

        # Convolve

        assert np.alltrue(ideal_image.shape == self._expected_shape), "Shape of image to be convolved is not correct."

        ret = irfftn(rfftn(ideal_image, self._fshape) * self._psf_fft, self._fshape)[self._fslice].copy()

        conv = _centered(ret, self._expected_shape)

        return conv
def GenerateGaussianRandomArray(gridShape, temp, sigma):
    dimension = len(gridShape)
    if dimension == 1:
        kfactor = fromfunction(lambda kz: exp(-0.5*(sigma*kz)**2),[gridShape[0]/2+1,])
        ktemp = fft.rfft(temp)
        ktemp *= kfactor
        data = fft.irfft(ktemp)
    elif dimension == 2:
        X,Y = gridShape
        kfactor = fromfunction(lambda kx,ky: exp(-0.5*sigma**2*((kx*(kx<X/2)+(X-kx)*(kx>=X/2))**2+ky**2)),[X,Y/2+1])
        ktemp = fft.rfftn(temp)
        ktemp *= kfactor
        data = fft.irfftn(ktemp)
    elif dimension == 3:
        X,Y,Z = gridShape
        kfactor = fromfunction(lambda kx,ky,kz: exp(-0.5*sigma**2*( (kx*(kx<X/2)+(X-kx)*(kx>=X/2))**2 + \
                                                (ky*(ky<Y/2)+(Y-ky)*(ky>=Y/2))**2 + kz**2)),[X,Y,Z/2+1])
        ktemp = fft.rfftn(temp)
        ktemp *= kfactor
        data = fft.irfftn(ktemp)
    return data 
Ejemplo n.º 21
0
def fftconvolve(in1, in2):
    """Convolve two N-dimensional arrays using FFT.

    This is a modified version of the scipy.signal.fftconvolve.
    The new feature is derived from the fftconvolve algorithm used in the IDL package.

    Parameters
    ----------
    in1 : array_like
        First input.
    in2 : array_like
        Second input. Should have the same number of dimensions as `in1`;
        if sizes of `in1` and `in2` are not equal then `in1` has to be the
        larger array.

    Returns
    -------
    out : array
        An N-dimensional array containing a subset of the discrete linear
        convolution of `in1` with `in2`.

    """
    in1 = asarray(in1)
    in2 = asarray(in2)

    #if matrix_rank(in1) == matrix_rank(in2) == 0:  # scalar inputs
    #    return in1 * in2
    #elif not in1.ndim == in2.ndim:
    if not in1.ndim == in2.ndim:
        raise ValueError("in1 and in2 should have the same rank")
    elif in1.size == 0 or in2.size == 0:  # empty arrays
        return array([])

    s1 = np.array(in1.shape)
    s2 = np.array(in2.shape)
    complex_result = (np.issubdtype(in1.dtype, np.complex128)
                      or np.issubdtype(in2.dtype, np.complex128))

    fsize = s1

    fslice = tuple([slice(0, int(sz)) for sz in fsize])
    if not complex_result:
        ret = irfftn(rfftn(in1, fsize) * rfftn(in2, fsize),
                     fsize)[fslice].copy()
        ret = ret.real
    else:
        ret = ifftn(fftn(in1, fsize) * fftn(in2, fsize))[fslice].copy()

    # Shift the axes back
    shift = np.floor(fsize * 0.5).astype(int)
    list_of_axes = tuple(np.arange(0, shift.size))
    ret = roll(ret, -shift, axis=list_of_axes)
    return ret
def zeroextract2d(N,filename):
    a = fromfile(filename)
    a = a.reshape(9,N,N)
    b = numpy.zeros((9,N/2,N/2),float)
    for i in range(9):
        ka = fft.rfftn(a[i])
        kb = numpy.zeros((N/2,N/4+1),complex)
        kb[:N/4,:]=ka[:N/4,:N/4+1]
        kb[-N/4:,:]=ka[-N/4:,:N/4+1]
        b[i] = fft.irfftn(kb)
    b /= 4.
    b.tofile(filename.replace(str(N),str(N/2)))
def SpatialCorrelationFunctionA(Field1, Field2):
    """
    Designed for Periodic Boundary Condition.

    Corr_12(r) = <Phi_1(r)Phi_2(0)>
    Corr_12(k) = Phi_1(k)* Phi_2(k)/V 
    """
    V = float(numpy.array(Field1.shape).prod())
    KField1 = fft.rfftn(Field1).conj()
    KField2 = fft.rfftn(Field2)
    KCorr = KField1 * KField2 / V
    Corr = fft.irfftn(KCorr)
    return Corr
Ejemplo n.º 24
0
def fftconvolve(in1, in2):
    """Convolve two N-dimensional arrays using FFT.

    This is a modified version of the scipy.signal.fftconvolve.
    The new feature is derived from the fftconvolve algorithm used in the IDL package.

    Parameters
    ----------
    in1 : array_like
        First input.
    in2 : array_like
        Second input. Should have the same number of dimensions as `in1`;
        if sizes of `in1` and `in2` are not equal then `in1` has to be the
        larger array.

    Returns
    -------
    out : array
        An N-dimensional array containing a subset of the discrete linear
        convolution of `in1` with `in2`.

    """
    in1 = asarray(in1)
    in2 = asarray(in2)

    if matrix_rank(in1) == matrix_rank(in2) == 0:  # scalar inputs
        return in1 * in2
    elif not in1.ndim == in2.ndim:
        raise ValueError("in1 and in2 should have the same rank")
    elif in1.size == 0 or in2.size == 0:  # empty arrays
        return array([])

    s1 = np.array(in1.shape)
    s2 = np.array(in2.shape)
    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))

    fsize = s1

    fslice = tuple([slice(0, int(sz)) for sz in fsize])
    if not complex_result:
        ret = irfftn(rfftn(in1, fsize) *
                     rfftn(in2, fsize), fsize)[fslice].copy()
        ret = ret.real
    else:
        ret = ifftn(fftn(in1, fsize) * fftn(in2, fsize))[fslice].copy()

    shift = array([int(floor(fsize[0]/2.0)), int(floor(fsize[1]/2.0))])
    ret   = roll(roll(ret, -shift[0], axis=0), -shift[1], axis=1)
    return ret
Ejemplo n.º 25
0
def comp_ifftn(values, axes_list, is_real=True):
    """Computes the Inverse Fourier Transform
    Parameters
    ----------
    values: ndarray
        ndarray of the FT
    Returns
    -------
    IFT
    """

    axes = []
    shape = []
    is_onereal = False
    is_half = False
    axis_names = [axis.name for axis in axes_list]
    for axis in axes_list:
        if axis.transform == "ifft":
            if is_real and axis.name == "time":
                axes.append(axis.index)
                shape.append(2 * (values.shape[axis.index] - 1))
                is_onereal = True
            elif is_real and axis.name == "angle" and "time" not in axis_names:
                axes.append(axis.index)
                shape.append(values.shape[axis.index])
                is_half = True
            else:
                axes = [axis.index] + axes
                shape = [values.shape[axis.index]] + shape
    size = array(shape).prod()
    if is_onereal:
        values = values * size / 2
        values_shift = ifftshift(values, axes=axes[:-1])
        slice_0 = take(values_shift, 0, axis=axes[-1])
        slice_0 *= 2
        other_values = delete(values_shift, 0, axis=axes[-1])
        values = insert(other_values, 0, slice_0, axis=axes[-1])
        values_IFT = irfftn(values, axes=axes)
    elif is_half:
        values = values * size / 2
        values_shift = ifftshift(values, axes=axes[:-1])
        slice_0 = take(values_shift, 0, axis=axes[-1])
        slice_0 *= 2
        other_values = delete(values_shift, 0, axis=axes[-1])
        values = insert(other_values, 0, slice_0, axis=axes[-1])
        values_IFT = ifftn(values, axes=axes)
    else:
        values_shift = ifftshift(values, axes=axes) * size
        values_IFT = ifftn(values_shift, axes=axes)
    return values_IFT
Ejemplo n.º 26
0
def get_covariance_submatrix(IQdata, lags, q):
    logging.debug('Calculating Submatrix')
    I1 = np.asarray(IQdata[0])
    Q1 = np.asarray(IQdata[1])
    I2 = np.asarray(IQdata[2])
    Q2 = np.asarray(IQdata[3])
    lags = int(lags)
    start = len(I1) - lags - 1
    stop = len(I1) + lags
    sub_matrix = np.zeros([4, lags * 2 + 1])
    sI1 = np.array(I1.shape)
    shape0 = sI1*2 - 1
    fshape = [_next_regular(int(d)) for d in shape0]  # padding to optimal size for FFTPACK
    fslice = tuple([slice(0, int(sz)) for sz in shape0])  # remove padding later
    fftI1 = rfftn(I1, fshape)/fshape
    fftQ1 = rfftn(Q1, fshape)/fshape
    rfftI2 = rfftn(I2[::-1], fshape)/fshape
    rfftQ2 = rfftn(Q2[::-1], fshape)/fshape
    sub_matrix[0] = irfftn((fftI1 * rfftI2))[fslice].copy()[start:stop]  # <II>
    sub_matrix[1] = irfftn((fftI1 * rfftQ2))[fslice].copy()[start:stop]  # <IQ>
    sub_matrix[2] = irfftn((fftQ1 * rfftI2))[fslice].copy()[start:stop]  # <QI>
    sub_matrix[3] = irfftn((fftQ1 * rfftQ2))[fslice].copy()[start:stop]  # <QQ>
    q.put(sub_matrix)
def KspaceInitializer(gridShape, fileDictionary, state = None):
    """
    Initialize plasticity state by reading from files given by the file
    dictionary.

    File dictionary must be a dictionary(hash) with component of field as
    its keys and filename pair(for real and imaginary) as its values.
   
    State must first be initialized and passed in for non default plasticity
    states.
 
    example:
    fileDictionary = {('x','z') : \
        ("InitialConditions/InitFourierCoeffXZ_real256.dat", \
         "InitialConditions/InitFourierCoeffXZ_im256.dat"),\
        ('y','z') : ("InitialConditions/InitFourierCoeffYZ_real256.dat", \
         "InitialConditions/InitFourierCoeffYZ_im256.dat"),\
        ('z','x') : ("InitialConditions/InitFourierCoeffZX_real256.dat", \
         "InitialConditions/InitFourierCoeffZX_im256.dat")}
    """
    if state is None:
        state = PlasticityState.PlasticityState(gridShape)
    field = state.GetOrderParameterField() 
 
    kGridShape = list(gridShape)
    kGridShape[-1] = int(kGridShape[-1]/2)+1 
    kGridShape = tuple(kGridShape)
    totalSize = 1
    for sz in kGridShape:
        totalSize *= sz  

    for component in fileDictionary: 
        rePart = numpy.fromfile(fileDictionary[component][0])
        imPart = numpy.fromfile(fileDictionary[component][1])
        kSpaceArray = rePart + 1.0j*imPart
       
        """
        Strip down only first half rows as this is the only data that gets
        used. Notice that we are actually taking real fourier transform on
        last axis, nevertheless we only take half data from the top. i.e.
        this may not very intuitive, but is coded this way for compatibility
        with Yor's old C++ version. i.e., strip down only half rows, and re-
        arrange so that column is halved. (That is, second half of first row
        becomes the second row and first half of second row becomes the third.
        """
        kSpaceArray = kSpaceArray[:totalSize].reshape(kGridShape)
       
        field[component] = fft.irfftn(kSpaceArray)
    return state
Ejemplo n.º 28
0
    def _cpu_search(self):

        d = self.data
        c = self.cpu_data

        time0 = _time()
        for n in xrange(c['rotmat'].shape[0]):
            # rotate ligand image
            rotate_image3d(c['im_lsurf'], c['vlength'], 
                    np.linalg.inv(c['rotmat'][n]), d['im_center'], c['lsurf'])

            c['ft_lsurf'] = rfftn(c['lsurf']).conj()
            c['clashvol'] = irfftn(c['ft_lsurf'] * c['ft_rcore'], s=c['shape'])
            c['intervol'] = irfftn(c['ft_lsurf'] * c['ft_rsurf'], s=c['shape'])

            np.logical_and(c['clashvol'] < c['max_clash'],
                           c['intervol'] > c['min_interaction'],
                           c['interspace'])


	    print('Number of complexes to analyze: ', c['interspace'].sum())
            c['chi2'].fill(0)
            calc_chi2(c['interspace'], c['q'], c['base_Iq'], 
                    c['rind'], c['rxyz'], c['lind'], (np.mat(c['rotmat'][n])*np.mat(c['lxyz']).T).T, 
                    c['origin'], self.voxelspacing, 
                    c['fifj'], c['targetIq'], c['sq'], c['chi2'])

            ind = c['chi2'] > c['best_chi2']
            c['best_chi2'][ind] = c['chi2'][ind]
            c['rot_ind'][ind] = n

            if _stdout.isatty():
                self._print_progress(n, c['nrot'], time0)

        d['best_chi2'] = c['best_chi2']
        d['rot_ind'] = c['rot_ind']
def SpatialCorrelationFunction(Field1,Field2):
    """
	Designed for Periodic Boundary Condition.
	
	.. math::
		C_{12}(r) = <F_1(r) F_2(0)>
	
		C_{12}(k) = F_1(k)* F_2(k)/V 
    """ 
    V = float(numpy.array(Field1.shape).prod())
    KField1 = fft.rfftn(Field1).conj()
    KField2 = fft.rfftn(Field2) 
    KCorr = KField1*KField2/V 
    Corr  = fft.irfftn(KCorr)
    return Corr
Ejemplo n.º 30
0
def multichannel_overlap_add_fftconvolve(x, h, mode='valid'):
    """Given a signal x compute the convolution with h using the overlap-add algorithm.

    This is an fft based convolution algorithm optimized to work on a signal x 
    and filter, h, where x is much longer than h. 

    Input:
    x: float array with dimensions (N, num_channel)
    h: float array with dimensions (filter_len, num_channel)
    mode: only accepts valid - same profile scipy.convolve 

    Returns:
    float array of length N-filter_len+1 (for mode = valid)
    """
    assert mode == 'valid'
    
    # pad x so that the boundaries are dealt with correctly
    x_len = x.shape[0]
    num_channels = h.shape[1]
    h_len = h.shape[0]
    assert x.shape[1] == num_channels
    assert x_len >= h_len, \
        "The signal needs to be at least as long as the filter"
    
    #x = np.vstack((np.zeros((h_len, num_channels)), 
    #               x, 
    #               np.zeros((h_len, num_channels))))    
    # make sure that the desired block size is long enough to capture the motif
    block_size = max(2**OVERLAP_ADD_BLOCK_POWER, h_len)
    N = int(2**math.ceil(np.log2(block_size+h_len-1)))
    step_size = N-h_len+1
    
    H = rfftn(h,(N,num_channels))
    n_blocks = int(math.ceil(float(len(x))/step_size))
    y = np.zeros((n_blocks+1)*step_size)
    for block_index in xrange(n_blocks):
        start = block_index*step_size
        yt = irfftn( rfftn(x[start:start+step_size,:],(N, num_channels))*H, 
                     (N, num_channels) )
        y[start:start+N] += yt[:,num_channels-1]

    #y = y[h_len:2*h_len+x_len-1]
    if mode == 'full':
        return y
    elif mode == 'valid':
        return y[h_len-1:x_len]
    elif mode == 'same':
        raise NotImplementedError, "'same' mode is not implemented"
Ejemplo n.º 31
0
def multichannel_overlap_add_fftconvolve(x, h, mode='valid'):
    """Given a signal x compute the convolution with h using the overlap-add algorithm.

    This is an fft based convolution algorithm optimized to work on a signal x 
    and filter, h, where x is much longer than h. 

    Input:
    x: float array with dimensions (N, num_channel)
    h: float array with dimensions (filter_len, num_channel)
    mode: only accepts valid - same profile scipy.convolve 

    Returns:
    float array of length N-filter_len+1 (for mode = valid)
    """
    assert mode == 'valid'
    
    # pad x so that the boundaries are dealt with correctly
    x_len = x.shape[0]
    num_channels = h.shape[1]
    h_len = h.shape[0]
    assert x.shape[1] == num_channels
    assert x_len >= h_len, \
        "The signal needs to be at least as long as the filter"
    
    #x = np.vstack((np.zeros((h_len, num_channels)), 
    #               x, 
    #               np.zeros((h_len, num_channels))))    
    # make sure that the desired block size is long enough to capture the motif
    block_size = max(2**OVERLAP_ADD_BLOCK_POWER, h_len)
    N = int(2**math.ceil(np.log2(block_size+h_len-1)))
    step_size = N-h_len+1
    
    H = rfftn(h,(N,num_channels))
    n_blocks = int(math.ceil(float(len(x))/step_size))
    y = np.zeros((n_blocks+1)*step_size)
    for block_index in xrange(n_blocks):
        start = block_index*step_size
        yt = irfftn( rfftn(x[start:start+step_size,:],(N, num_channels))*H, 
                     (N, num_channels) )
        y[start:start+N] += yt[:,num_channels-1]

    #y = y[h_len:2*h_len+x_len-1]
    if mode == 'full':
        return y
    elif mode == 'valid':
        return y[h_len-1:x_len]
    elif mode == 'same':
        raise NotImplementedError, "'same' mode is not implemented"
Ejemplo n.º 32
0
def cross_correlation(seqs):
    # deal with the shape, and upcast to the next reasonable shape
    shape = np.array(seqs.shape[1:]) + np.array(seqs.shape[1:]) - 1
    fshape = [next_good_fshape(x) for x in shape]
    fslice = tuple([slice(0, int(sz)) for sz in shape])
    flipped_seqs_fft = np.zeros([seqs.shape[0],] + fshape[:-1] + [fshape[-1]//2+1,], dtype='complex')
    for i in xrange(seqs.shape[0]):
        rev_slice = tuple([i,] + [slice(None, None, -1) for sz in shape])
        flipped_seqs_fft[i] = rfftn(seqs[rev_slice], fshape)
    rv = np.zeros((seqs.shape[0], seqs.shape[0]), dtype='float32')
    for i in xrange(seqs.shape[0]):
        fft_seq = rfftn(seqs[i], fshape)
        for j in xrange(i+1, seqs.shape[0]):
            rv[i,j] = irfftn(fft_seq*flipped_seqs_fft[j], fshape)[fslice].max()
            #print rv[i,j], correlate(seqs[i], seqs[j]).max()
    return rv
Ejemplo n.º 33
0
def get_g2(P1, P2, lags=20):
    ''' Returns the Top part of the G2 equation (<P1P2> - <P1><P2>)'''
    lags = int(lags)
    P1 = np.asarray(P1)
    P2 = np.asarray(P2)
    # G2 = np.zeros([lags*2-1])

    start = len(P1*2-1)-lags
    stop = len(P1*2-1)-1+lags

    # assume I1 Q1 have the same shape
    sP1 = np.array(P1.shape)
    complex_result = np.issubdtype(P1.dtype, np.complex)
    shape = sP1 - 1
    HPfilt = (int(sP1/(lags*4)))  # smallest features visible is lamda/4

    # Speed up FFT by padding to optimal size for FFTPACK
    fshape = [_next_regular(int(d)) for d in shape]
    fslice = tuple([slice(0, int(sz)) for sz in shape])
    # Pre-1.9 NumPy FFT routines are not threadsafe.  For older NumPys, make
    # sure we only call rfftn/irfftn from one thread at a time.
    if not complex_result and _rfft_lock.acquire(False):
        try:
            fftP1 = rfftn(P1, fshape)
            rfftP2 = rfftn(P2[::-1], fshape)
            fftP1 = np.concatenate((np.zeros(HPfilt), fftP1[HPfilt:]))
            rfftP2 = np.concatenate((np.zeros(HPfilt), rfftP2[HPfilt:]))
            G2 = irfftn((fftP1*rfftP2))[fslice].copy()[start:stop]/len(fftP1)
            return 

        finally:
            _rfft_lock.release()

    else:
        # If we're here, it's either because we need a complex result, or we
        # failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
        # is already in use by another thread).  In either case, use the
        # (threadsafe but slower) SciPy complex-FFT routines instead.
        # ret = ifftn(fftn(in1, fshape) * fftn(in2, fshape))[fslice].copy()
        print 'Abort, reason:complex input or Multithreaded FFT not available'

        if not complex_result:
            pass  # ret = ret.real

    P12var = np.var(P1)*np.var(P2)
    return G2-P12var
def SpatialCorrelationFunctionA(Field1,Field2):
    """
    Corr_12(r) = <Phi_1(r)Phi_2(0)>

    Corr_12(k) = Phi_1(k)* Phi_2(k)/V 
    """ 
    dim = len(Field1.shape)
    if dim == 1:
        V=float(Field1.shape[0])
    elif dim == 2:
        V=float(Field1.shape[0]*Field1.shape[1])
    elif dim == 3:
        V=float(Field1.shape[0]*Field1.shape[1]*Field1.shape[2])
    KField1 = fft.rfftn(Field1).conj()
    KField2 = fft.rfftn(Field2) 
    KCorr = KField1*KField2/V 
    Corr  = fft.irfftn(KCorr)
    return Corr
def pivCrossCorrelation2D(voxA,voxB,width,offset1,offset2):
  vox1 = np.array(voxA[offset1[0]:offset1[0]+width,\
                      offset1[1]:offset1[1]+width ],dtype="float");
  #
  vox2 = np.array(voxB[offset2[0]:offset2[0]+width,\
                      offset2[1]:offset2[1]+width ],dtype="float");
  #  
  #
  siz = np.array(vox1.shape)*2;
  #
  #
  # remove median 
  vox1 -= np.mean( vox1 ); 
  vox2 -= np.mean( vox2 );
  #
  # cross correlation - padded with zeros
  R=(1.0/(width*width))*np.real(fftshift(irfftn( np.conj(rfftn(vox1,siz)) * rfftn(vox2,siz) )));
  return R
Ejemplo n.º 36
0
    def extended_source_image_(self, ideal_image):

        # Convolve

        assert np.alltrue(ideal_image.shape == self._expected_shape
                          ), "Shape of image to be convolved is not correct."

        ret = irfftn(
            rfftn(ideal_image, self._fshape) * self._psf_fft,
            self._fshape)[self._fslice].copy()

        conv = _centered(ret, self._expected_shape)
        #
        # fig, sub = plt.subplots(1,1)
        #
        # #sub[0].imshow(ideal_image, interpolation='none', cmap='gist_heat')
        # sub.imshow(conv, interpolation='none', cmap='gist_heat')
        #
        # fig.savefig("convolution.png")

        return conv
Ejemplo n.º 37
0
def fourier_lowpass(x, r):
    '''
    Fourier lowpass of a 3-D volume

    Parameters
    ==========
    x : numpy.ndarray
        x.ndim == 3
    r : double
        Cut-off frequency for Fourier lowpass.
        The unit of r is the number of voxels.
        The resolution is box_size * voxel_size / r.
    '''
    fx = rfftn(fftshift(x), norm='ortho')
    for (i, j, k) in np.ndindex(fx.shape):
        ii = i if i < fx.shape[0] // 2 else i - fx.shape[0]
        jj = j if j < fx.shape[1] // 2 else j - fx.shape[1]
        kk = k
        rho = sqrt(ii**2 + jj**2 + kk**2)
        if rho > r:
            fx[i, j, k] = 0
    return ifftshift(irfftn(fx, norm='ortho'))
Ejemplo n.º 38
0
def get_covariance_submatrix_full(IQdata, lags):
    I1 = np.asarray(IQdata[0])
    # Q1 = np.asarray(IQdata[1])
    # I2 = np.asarray(IQdata[2])
    Q2 = np.asarray(IQdata[3])
    lags = int(lags)
    start = len(I1) - lags - 1
    stop = len(I1) + lags
    sub_matrix = np.zeros([16, lags * 2 + 1])
    sI1 = np.array(I1.shape)
    shap0 = sI1*2 - 1
    fshape = [_next_regular(int(d)) for d in shap0]  # padding to optimal size for FFTPACK
    fslice = tuple([slice(0, int(sz)) for sz in shap0])  # remove padding later
    fftIQ = 4*[None]
    rfftIQ = 4*[None]
    for i in range(4):
        fftIQ[i] = rfftn(IQdata[i], fshape)
        rfftIQ[i] = rfftn(IQdata[i][::-1], fshape)
    for j in range(4):
        for i in range(4):
            idx = i + j*4
            sub_matrix[idx] = (irfftn(fftIQ[i]*rfftIQ[j]))[fslice].copy()[start:stop]/len(fftIQ[i])
    return sub_matrix
def SmecticInitializer(gridShape, sigma=0.2, seed=None):
    if seed is None:
        seed = 0
    random.seed(seed)

    state = SmecticState.SmecticState(gridShape)
    field = state.GetOrderParameterField()

    Ksq = FourierSpaceTools.FourierSpaceTools(gridShape).kSq.numpy_array()

    for component in field.components:
        temp = random.normal(scale=gridShape[0],size=gridShape)
        ktemp = fft.rfftn(temp)*(sqrt(pi)*sigma)**len(gridShape)*exp(-Ksq*sigma**2/4.)
        field[component] = numpy.real(fft.irfftn(ktemp))

    ## To make seed consistent across grid sizes and convergence comparison
    gridShape = copy.copy(oldgrid)
    if gridShape[0] != 128:
        state = ResizeState(state,gridShape[0],Dim=len(gridShape))

    state = ReformatState(state)
    state.ktools = FourierSpaceTools.FourierSpaceTools(gridShape)
    
    return state 
Ejemplo n.º 40
0
def fftconvolve_fast(data, kernel, **kwargs):
    """FFT convolution, a faster version than scipy.

    In this case the kernel ifftshifted before FFT but the data is not.
    This can be done because the effect of fourier convolution is to
    "wrap" around the data edges so whether we ifftshift before FFT
    and then fftshift after it makes no difference so we can skip the
    step entirely.
    """
    # TODO: add error checking like in the above and add functionality
    # for complex inputs. Also could add options for different types of
    # padding.
    dshape = np.array(data.shape)
    kshape = np.array(kernel.shape)
    # find maximum dimensions
    maxshape = np.max((dshape, kshape), 0)
    # calculate a nice shape
    fshape = [next_fast_len(int(d)) for d in maxshape]
    # pad out with reflection
    pad_data = fft_pad(data, fshape, "reflect")
    # calculate padding
    padding = tuple(
        _calc_pad(o, n) for o, n in zip(data.shape, pad_data.shape))
    # so that we can calculate the cropping, maybe this should be integrated
    # into `fft_pad` ...
    fslice = tuple(
        slice(s, -e) if e != 0 else slice(s, None) for s, e in padding)
    if kernel.shape != pad_data.shape:
        # its been assumed that the background of the kernel has already been
        # removed and that the kernel has already been centered
        kernel = fft_pad(kernel, pad_data.shape, mode="constant")
    k_kernel = rfftn(ifftshift(kernel), pad_data.shape, **kwargs)
    k_data = rfftn(pad_data, pad_data.shape, **kwargs)
    convolve_data = irfftn(k_kernel * k_data, pad_data.shape, **kwargs)
    # return data with same shape as original data
    return convolve_data[fslice]
Ejemplo n.º 41
0
    def smooth(self, in_data, clean=False, is_fft=False):
        """Apply smoothing to `in_data`

        Parameters
        ----------
        in_data: array_like
           The array to be smoothed. should be same shape as the
           shape provided during instantiation of this object
        clean: bool, optional
           Should we call ``nan_to_num`` on the data before smoothing?
        is_fft: bool, optional
           Has the data already been fft'd?

        Returns
        -------
        _out: array of same shape as input nin_data
           smoothed in_data

        Notes
        -----
        XXX: is the manual garbage collection --via calls to gc.collect()--
        actually necessary ? Is it dangerous ?

        """

        # nothing to do ?
        if not np.sum(self._fwhm) > 0:
            return in_data

        # get dimensionality of input data
        in_data = np.array(in_data)
        ndim = in_data.ndim

        if ndim == 4:
            _out = np.ndarray(in_data.shape)
            n_scans = in_data.shape[-1]
        elif ndim == 3:
            n_scans = 1
        else:
            raise ValueError('expecting either 3 or 4-d image')

        slices = [
            slice(0, self._bshape[i], 1) for i in range(len(self._shape))
        ]
        for _scan in range(n_scans):
            if ndim == 4:
                data = in_data[..., _scan]
            elif ndim == 3:
                data = in_data[:]
            if clean:
                data = np.nan_to_num(data)
            if not is_fft:
                data = self._presmooth(data, slices)
            data *= self.fkernel
            data = npfft.irfftn(data) / self._norm
            gc.collect()
            _dslice = [slice(0, self._bshape[i], 1) for i in range(3)]
            if self._scale != 1:
                data = self._scale * data[_dslice]
            if self._location != 0.0:
                data += self._location
            gc.collect()
            # Write out data
            if ndim == 4:
                _out[..., _scan] = data
            else:
                _out = data

        # collect output
        _out = _out[[
            slice(self._kernel.shape[i] // 2,
                  self._bshape[i] + self._kernel.shape[i] // 2)
            for i in range(len(self._bshape))
        ]]

        # return output
        return _out
Ejemplo n.º 42
0
if __name__ == "__main__":
    if torch.cuda.is_available():
        nfft3 = lambda x: nfft.fftn(x, axes=(1, 2, 3))
        nifft3 = lambda x: nfft.ifftn(x, axes=(1, 2, 3))

        cfs = [cfft.fft, cfft.fft2, cfft.fft3]
        nfs = [nfft.fft, nfft.fft2, nfft3]
        cifs = [cfft.ifft, cfft.ifft2, cfft.ifft3]
        nifs = [nfft.ifft, nfft.ifft2, nifft3]

        for args in zip(cfs, nfs, cifs, nifs):
            test_c2c(*args)

        nrfft3 = lambda x: nfft.rfftn(x, axes=(1, 2, 3))
        nirfft3 = lambda x: nfft.irfftn(x, axes=(1, 2, 3))

        cfs = [cfft.rfft, cfft.rfft2, cfft.rfft3]
        nfs = [nfft.rfft, nfft.rfft2, nrfft3]
        cifs = [cfft.irfft, cfft.irfft2, cfft.irfft3]
        nifs = [nfft.irfft, nfft.irfft2, nirfft3]

        for args in zip(cfs, nfs, cifs, nifs):
            test_r2c(*args)

        test_expand()
        test_fft_gradcheck()
        test_ifft_gradcheck()
        test_fft2d_gradcheck()
        test_ifft2d_gradcheck()
        test_fft3d_gradcheck()
Ejemplo n.º 43
0
 def irfftn(self, shape=None):
     return NumpyArray(fft.irfftn(self, shape))
Ejemplo n.º 44
0
    def calculate_6d_integral(self, n_g, q0_g, a2_g=None, e_LDAc_g=None, v_LDAc_g=None, v_g=None, deda2_g=None):
        self.timer.start("VdW-DF integral")
        self.timer.start("splines")
        if self.C_aip is None:
            self.construct_cubic_splines()
            self.construct_fourier_transformed_kernels()
        self.timer.stop("splines")

        gd = self.gd
        N = self.Nalpha

        world = self.world
        vdwcomm = self.vdwcomm

        if self.alphas:
            self.timer.start("hmm1")
            i_g = (np.log(q0_g / self.q_a[1] * (self.lambd - 1) + 1) / log(self.lambd)).astype(int)
            dq0_g = q0_g - self.q_a[i_g]
            self.timer.stop("hmm1")
        else:
            i_g = None
            dq0_g = None

        if self.verbose:
            print "VDW: fft:",

        theta_ak = {}
        p_ag = {}
        for a in self.alphas:
            self.timer.start("hmm2")
            C_pg = self.C_aip[a, i_g].transpose((3, 0, 1, 2))
            pa_g = C_pg[0] + dq0_g * (C_pg[1] + dq0_g * (C_pg[2] + dq0_g * C_pg[3]))
            self.timer.stop("hmm2")
            del C_pg
            self.timer.start("FFT")
            theta_ak[a] = rfftn(n_g * pa_g, self.shape).copy()
            if extra_parameters.get("vdw0"):
                theta_ak[a][0, 0, 0] = 0.0
            self.timer.stop()

            if not self.energy_only:
                p_ag[a] = pa_g
            del pa_g
            if self.verbose:
                print a,
                sys.stdout.flush()

        if self.energy_only:
            del i_g
            del dq0_g

        if self.verbose:
            print
            print "VDW: convolution:",

        F_ak = {}
        dj_k = self.dj_k
        energy = 0.0
        for a in range(N):
            if vdwcomm is not None:
                vdw_ranka = a * vdwcomm.size // N
                F_k = np.zeros((self.shape[0], self.shape[1], self.shape[2] // 2 + 1), complex)
            self.timer.start("Convolution")
            for b in self.alphas:
                _gpaw.vdw2(self.phi_aajp[a, b], self.j_k, dj_k, theta_ak[b], F_k)
            self.timer.stop()

            if vdwcomm is not None:
                self.timer.start("gather")
                for F in F_k:
                    vdwcomm.sum(F, vdw_ranka)
                # vdwcomm.sum(F_k, vdw_ranka)
                self.timer.stop("gather")

            if vdwcomm is not None and vdwcomm.rank == vdw_ranka:
                if not self.energy_only:
                    F_ak[a] = F_k
                energy += np.vdot(theta_ak[a][:, :, 0], F_k[:, :, 0]).real
                energy += np.vdot(theta_ak[a][:, :, -1], F_k[:, :, -1]).real
                energy += 2 * np.vdot(theta_ak[a][:, :, 1:-1], F_k[:, :, 1:-1]).real

            if self.verbose:
                print a,
                sys.stdout.flush()

        del theta_ak

        if self.verbose:
            print

        if not self.energy_only:
            F_ag = {}
            for a in self.alphas:
                n1, n2, n3 = gd.get_size_of_global_array()
                self.timer.start("iFFT")
                F_ag[a] = irfftn(F_ak[a]).real[:n1, :n2, :n3].copy()
                self.timer.stop()
            del F_ak

            self.timer.start("potential")
            self.calculate_potential(n_g, a2_g, i_g, dq0_g, p_ag, F_ag, e_LDAc_g, v_LDAc_g, v_g, deda2_g)
            self.timer.stop()

        self.timer.stop()
        return 0.5 * world.sum(energy) * gd.dv / self.shape.prod()
Ejemplo n.º 45
0
    def smooth(self, inimage, clean=False, is_fft=False):
        """ Apply smoothing to `inimage`

        Parameters
        ----------
        inimage : ``Image``
           The image to be smoothed.  Should be 3D.
        clean : bool, optional
           Should we call ``nan_to_num`` on the data before smoothing?
        is_fft : bool, optional
           Has the data already been fft'd?

        Returns
        -------
        s_image : `Image`
           New image, with smoothing applied
        """
        if inimage.ndim == 4:
            # we need to generalize which axis to iterate over.  By
            # default it should probably be the last.
            raise NotImplementedError('Smoothing volumes in a 4D series '
                                      'is broken, pending a rethink')
            _out = np.zeros(inimage.shape)
            # iterate over the first (0) axis - this is confusing - see
            # above
            nslice = inimage.shape[0]
        elif inimage.ndim == 3:
            nslice = 1
        else:
            raise NotImplementedError('expecting either 3 or 4-d image')
        in_data = inimage.get_data()
        for _slice in range(nslice):
            if in_data.ndim == 4:
                data = in_data[_slice]
            elif in_data.ndim == 3:
                data = in_data[:]
            if clean:
                data = np.nan_to_num(data)
            if not is_fft:
                data = self._presmooth(data)
            data *= self.fkernel
            data = fft.irfftn(data) / self.norms[self.normalization]
            gc.collect()
            _dslice = [slice(0, self.bshape[i], 1) for i in range(3)]
            if self.scale != 1:
                data = self.scale * data[_dslice]
            if self.location != 0.0:
                data += self.location
            gc.collect()
            # Write out data 
            if in_data.ndim == 4:
                _out[_slice] = data
            else:
                _out = data
            _slice += 1
        gc.collect()
        _out = _out[[slice(self._kernel.shape[i]/2, self.bshape[i] +
                           self._kernel.shape[i]/2) for i in range(len(self.bshape))]]
        if inimage.ndim == 3:
            return Image(_out, coordmap=self.coordmap)
        else:
            # This does not work as written.  See above
            concat_affine = AffineTransform.identity('concat')
            return Image(_out, coordmap=product(self.coordmap, concat_affine))
Ejemplo n.º 46
0
 def _irfftn(a, s=None, axes=None):
     return npfft.irfftn(a, s, axes).astype(real_dtype(a.dtype))
Ejemplo n.º 47
0
 def irfft(a, normalize=True, nthreads=ncpu):
     if normalize:
         return fftw.irfftn(a)
     else:
         return fftw.irfft(a)
Ejemplo n.º 48
0
 def get_real_image(self):
     if self.image is None:
         self.image = irfftn(self.ft, self.imageshape)
     return self.image
Ejemplo n.º 49
0
    def __init__(self, npow=-4., ngrid=256, xmax=1., dx=0.01, seed=27021987):

        start = time()
        print("Creating 3-D velocity grid with power spectrum P_k~k**{}".\
               format(npow))

        if ngrid % 2 != 0:
            print("Grid points must be an even number. Exiting.")
            exit()
        nc = int(ngrid / 2) + 1

        kmax = 2 * pi / dx
        kmin = 2 * pi / xmax

        kx = fft.fftfreq(ngrid, d=1 / (2 * kmax))
        ky = kx
        kz = fft.rfftfreq(ngrid, d=1 / (2 * kmax))

        # we produce a 3-D grid of the Fourier coordinates
        kxx, kyy, kzz = meshgrid(kx, ky, kz, indexing='ij', sparse=True)
        kk = kxx * kxx + kyy * kyy + kzz * kzz + kmin**2

        random.seed(seed)

        # we sample the components of a vector potential, as we want
        # an incompresible velocity field
        xi1 = random.random(size=kk.shape)
        xi2 = random.random(size=kk.shape)
        c = kk**((npow - 2.) / 4.) * sqrt(-log(1 - xi1))
        phi = 2 * pi * xi2
        akx = c * exp(1j * phi)
        xi1 = random.random(size=kk.shape)
        xi2 = random.random(size=kk.shape)
        c = kk**((npow - 2.) / 4.) * sqrt(-log(1 - xi1))
        phi = 2 * pi * xi2
        aky = c * exp(1j * phi)
        xi1 = random.random(size=kk.shape)
        xi2 = random.random(size=kk.shape)
        c = kk**((npow - 2.) / 4.) * sqrt(-log(1 - xi1))
        phi = 2 * pi * xi2
        akz = c * exp(1j * phi)

        new_shape = akx.shape + (3, )
        kv = zeros(new_shape, dtype=akx.dtype)
        kv[:, :, :, 0] = 1j * kxx
        kv[:, :, :, 1] = 1j * kyy
        kv[:, :, :, 2] = 1j * kzz
        ak = zeros(new_shape, dtype=akx.dtype)
        ak[:, :, :, 0] = akx
        ak[:, :, :, 1] = aky
        ak[:, :, :, 2] = akz

        # the velocity vector in Fourier space is obtained by
        # taking the curl of A, which is
        vk = cross(kv, ak)

        self.ngrid = ngrid
        self.vx = fft.irfftn(vk[:, :, :, 0])
        self.vy = fft.irfftn(vk[:, :, :, 1])
        self.vz = fft.irfftn(vk[:, :, :, 2])

        print("\nInverse Fourier Transform took {:g}s.".format(time() - start))
Ejemplo n.º 50
0
    def test_r2c_outofplace(self):
        """
        Test out-of-place R2C transforms
        """
        n = 32
        for dims in range(1, 5):
            if dims >= 3:
                ndim_max = min(dims + 1, 2)
            else:
                ndim_max = min(dims + 1, 3)
            for ndim in range(1, ndim_max):
                for dtype in [np.float32, np.float64]:
                    for norm in [0, 1, "ortho"]:
                        with self.subTest(dims=dims,
                                          ndim=ndim,
                                          dtype=dtype,
                                          norm=norm):
                            if dtype == np.float32:
                                rtol = 1e-6
                            else:
                                rtol = 1e-12
                            if dtype == np.float32:
                                dtype_c = np.complex64
                            elif dtype == np.float64:
                                dtype_c = np.complex128

                            sh = [n] * dims
                            sh = tuple(sh)
                            shc = [n] * dims
                            shc[-1] = n // 2 + 1
                            shc = tuple(shc)

                            d = np.random.uniform(0, 1, sh).astype(dtype)
                            # A pure random array may not be a very good test (too random),
                            # so add a Gaussian
                            xx = [
                                np.fft.fftshift(np.fft.fftfreq(nn))
                                for nn in sh
                            ]
                            v = np.zeros_like(d)
                            for x in np.meshgrid(*xx, indexing='ij'):
                                v += x**2
                            d += 10 * np.exp(-v * 2)
                            n0 = (abs(d)**2).sum()
                            d_gpu = cua.to_gpu(d)
                            d1_gpu = cua.empty(shc, dtype=dtype_c)

                            app = VkFFTApp(d.shape,
                                           d.dtype,
                                           ndim=ndim,
                                           norm=norm,
                                           r2c=True,
                                           inplace=False)
                            # base FFT scale
                            s = np.sqrt(np.prod(d.shape[-ndim:]))

                            d = rfftn(d, axes=list(range(dims))[-ndim:]) / s
                            d1_gpu = app.fft(d_gpu, d1_gpu)
                            d1_gpu *= dtype_c(app.get_fft_scale())
                            self.assertTrue(d1_gpu.shape == tuple(shc))
                            self.assertTrue(d1_gpu.dtype == dtype_c)

                            self.assertTrue(
                                np.allclose(d,
                                            d1_gpu.get(),
                                            rtol=rtol,
                                            atol=abs(d).max() * rtol))

                            d = irfftn(d, axes=list(range(dims))[-ndim:]) * s
                            d_gpu = app.ifft(d1_gpu, d_gpu)
                            d_gpu *= dtype(app.get_ifft_scale())
                            self.assertTrue(d_gpu.shape == tuple(sh))

                            self.assertTrue(
                                np.allclose(d,
                                            d_gpu.get(),
                                            rtol=rtol,
                                            atol=abs(d).max() * rtol))
                            n1 = (abs(d_gpu.get())**2).sum()
                            self.assertTrue(np.isclose(n0, n1, rtol=rtol))
Ejemplo n.º 51
0
    def smooth(self, inimage, clean=False, is_fft=False):
        """ Apply smoothing to `inimage`

        Parameters
        ----------
        inimage : ``Image``
           The image to be smoothed.  Should be 3D.
        clean : bool, optional
           Should we call ``nan_to_num`` on the data before smoothing?
        is_fft : bool, optional
           Has the data already been fft'd?

        Returns
        -------
        s_image : `Image`
           New image, with smoothing applied
        """
        if inimage.ndim == 4:
            # we need to generalize which axis to iterate over.  By
            # default it should probably be the last.
            raise NotImplementedError('Smoothing volumes in a 4D series '
                                      'is broken, pending a rethink')
            _out = np.zeros(inimage.shape)
            # iterate over the first (0) axis - this is confusing - see
            # above
            nslice = inimage.shape[0]
        elif inimage.ndim == 3:
            nslice = 1
        else:
            raise NotImplementedError('expecting either 3 or 4-d image')
        in_data = inimage.get_data()
        for _slice in range(nslice):
            if in_data.ndim == 4:
                data = in_data[_slice]
            elif in_data.ndim == 3:
                data = in_data[:]
            if clean:
                data = np.nan_to_num(data)
            if not is_fft:
                data = self._presmooth(data)
            data *= self.fkernel
            data = fft.irfftn(data) / self.norms[self.normalization]
            gc.collect()
            _dslice = [slice(0, self.bshape[i], 1) for i in range(3)]
            if self.scale != 1:
                data = self.scale * data[_dslice]
            if self.location != 0.0:
                data += self.location
            gc.collect()
            # Write out data
            if in_data.ndim == 4:
                _out[_slice] = data
            else:
                _out = data
            _slice += 1
        gc.collect()
        _out = _out[[
            slice(self._kernel.shape[i] / 2,
                  self.bshape[i] + self._kernel.shape[i] / 2)
            for i in range(len(self.bshape))
        ]]
        if inimage.ndim == 3:
            return Image(_out, coordmap=self.coordmap)
        else:
            # This does not work as written.  See above
            concat_affine = AffineTransform.identity('concat')
            return Image(_out, coordmap=product(self.coordmap, concat_affine))
Ejemplo n.º 52
0
 def ifft(y, ax, ncpu, lastsize):
     return irfftn(y, axes=ax)
Ejemplo n.º 53
0
def fftconvolve(in1, in2, mode="full"):
    """Convolve two N-dimensional arrays using FFT, implemented using the pyfftw module ('Fastest Fourier Transform in the West').

    Convolve `in1` and `in2` using the fast Fourier transform method, with
    the output size determined by the `mode` argument.

    This is generally much faster than `convolve` for large arrays (n > ~500),
    but can be slower when only a few output values are needed, and can only
    output float arrays (int or object array inputs will be cast to float).

    Parameters
    ----------
    in1 : array_like
        First input.
    in2 : array_like
        Second input. Should have the same number of dimensions as `in1`;
        if sizes of `in1` and `in2` are not equal then `in1` has to be the
        larger array.
    mode : str {'full', 'valid', 'same'}, optional
        A string indicating the size of the output:

        ``full``
           The output is the full discrete linear convolution
           of the inputs. (Default)
        ``valid``
           The output consists only of those elements that do not
           rely on the zero-padding.
        ``same``
           The output is the same size as `in1`, centered
           with respect to the 'full' output.

    Returns
    -------
    out : array
        An N-dimensional array containing a subset of the discrete linear
        convolution of `in1` with `in2`.

    Examples
    --------
    Autocorrelation of white noise is an impulse.  (This is at least 100 times
    as fast as `convolve`.)

    >>> from scipy import signal
    >>> sig = np.random.randn(1000)
    >>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')

    >>> import matplotlib.pyplot as plt
    >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
    >>> ax_orig.plot(sig)
    >>> ax_orig.set_title('White noise')
    >>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
    >>> ax_mag.set_title('Autocorrelation')
    >>> fig.show()

    Gaussian blur implemented using FFT convolution.  Notice the dark borders
    around the image, due to the zero-padding beyond its boundaries.
    The `convolve2d` function allows for other types of image boundaries,
    but is far slower.

    >>> from scipy import misc
    >>> lena = misc.lena()
    >>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
    >>> blurred = signal.fftconvolve(lena, kernel, mode='same')

    >>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(1, 3)
    >>> ax_orig.imshow(lena, cmap='gray')
    >>> ax_orig.set_title('Original')
    >>> ax_orig.set_axis_off()
    >>> ax_kernel.imshow(kernel, cmap='gray')
    >>> ax_kernel.set_title('Gaussian kernel')
    >>> ax_kernel.set_axis_off()
    >>> ax_blurred.imshow(blurred, cmap='gray')
    >>> ax_blurred.set_title('Blurred')
    >>> ax_blurred.set_axis_off()
    >>> fig.show()

    """
    # if NTHREADS==0:
        # print("WARNING: in fftwconvolve(): NTHREADS = 0, using numpy FFT routines instead...")

    in1 = asarray(in1)
    in2 = asarray(in2)

    if in1.ndim == in2.ndim == 0:  # scalar inputs
        return in1 * in2
    elif not in1.ndim == in2.ndim:
        raise ValueError("in1 and in2 should have the same dimensionality")
    elif in1.size == 0 or in2.size == 0:  # empty arrays
        return array([])

    s1 = array(in1.shape)
    s2 = array(in2.shape)
    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))
    shape = s1 + s2 - 1

    if mode == "valid":
        _check_valid_mode_shapes(s1, s2)

    # Speed up FFT by padding to optimal size for FFTPACK
    fshape = [_next_regular(int(d)) for d in shape]
    fslice = tuple([slice(0, int(sz)) for sz in shape])
    # Pre-1.9 NumPy FFT routines are not threadsafe.  For older NumPys, make
    # sure we only call rfftn/irfftn from one thread at a time.
    if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
        try:
            if NTHREADS==0:
                ret = irfftn(rfftn(in1, fshape) *
                             rfftn(in2, fshape), fshape)[fslice].copy()
            else:
                ret = pyfftw.interfaces.numpy_fft.irfftn(\
                        pyfftw.interfaces.numpy_fft.rfftn(in1, s=fshape, threads=NTHREADS) * \
                        pyfftw.interfaces.numpy_fft.rfftn(in2, s=fshape, threads=NTHREADS), \
                        s=fshape)[fslice].copy()
        finally:
            if not _rfft_mt_safe:
                _rfft_lock.release()
    else:
        # If we're here, it's either because we need a complex result, or we
        # failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
        # is already in use by another thread).  In either case, use the
        # (threadsafe but slower) SciPy complex-FFT routines instead.
        if NTHREADS==0:
            ret = ifftn(fftn(in1, fshape) * fftn(in2, fshape))[fslice].copy()
        else:
            ret = pyfftw.interfaces.numpy_fft.ifftn(\
                pyfftw.interfaces.numpy_fft.fftn(in1, fshape) * \
                pyfftw.interfaces.numpy_fft.fftn(in2, fshape)\
                )[fslice].copy()
        if not complex_result:
            ret = ret.real

    if mode == "full":
        return ret
    elif mode == "same":
        return _centered(ret, s1)
    elif mode == "valid":
        return _centered(ret, s1 - s2 + 1)
    else:
        raise ValueError("Acceptable mode flags are 'valid',"
                         " 'same', or 'full'.")
Ejemplo n.º 54
0
def invert_jacdict(jacdict, unknowns, targets, tau, test_invertible=False):
    """Given a nested dict of ATI Jacobians that maps unknowns -> targets, e.g. an asymptotic
    H_U matrix, get the inverse H_U^(-1) as a nested dict.

    This is implemented by inverting the FFT-based multiplication that was implemented above
    for ATI, making use of the linearity of the FFT:
        - We take the FFT of each ATI Jacobian, padded out to 4*tau-3 as above
            (This is done by first packing all Jacobians into a single array A)
        - Then, we take the FFT of the identity, centered aroun d2*tau-1 since
            we intend it to be the result of a product
        - We solve frequency-by-frequency, i.e. for each of 4*tau-3 omegas we solve a k*k
            linear system to get A_rfft[omega,...]^(-1)*id_rfft[omega,...]
        - We take the inverse FFT of the results, then take only the first 2*tau-1 elements
            to get (approximate) inverse Jacobians with times -(tau-1),...,(tau-1), same as
            original Jacobians
        - We unpack these to get a nested dict of ATI Jacobians that inverts original 'jacdict'

    Parameters
    ----------
    jacdict  : dict of dict, ATI (or convertible to ATI) Jacobians where jacdict[t][u] gives
                    asymptotic mapping from unknowns u to targets t in H_U
    unknowns : list, names of unknowns in H_U
    targets  : list, names of targets in H_U
    tau      : int, convert all ATI Jacobians to size tau and provide inverse in size tau
    test_invertible : [optional] bool, use winding number criterion to test whether we should
                    really be inverting this system (i.e. whether determinate solution)

    Returns
    -------
    inv_jacdict : dict of dict, ATI Jacobians where inv_jacdict[u][t] gives asymptotic mapping
                    from targets t to unknowns u in H_U^(-1)
    """

    k = len(unknowns)
    assert k == len(targets)

    # stack the k^2 Jacobians relating unknowns to targets into an A matrix
    A = jac.pack_asymptotic_jacobians(jacdict, unknowns, targets, tau)

    if test_invertible:
        # use winding number criterion to test invertibility
        if determinacy.winding_criterion(A, N=4096) != 0:
            raise ValueError('Trying to invert asymptotic time invariant system of Jacobians' + 
                             ' but winding number test says that it is not uniquely invertible!')

    # take FFT of first dimension (time) of A (i.e. take FFT separtely of all k^2 Jacobians)
    A_rfft = rfftn(A, s=(4*tau-3,), axes=(0,))
    
    # take FFT of identity operator (for efficiency, reuse smaller calc)
    id_vec_rfft = rfft(np.arange(4*tau-3)==(2*tau-2))
    id_rfft = np.zeros((2*tau-1, k, k), dtype=np.complex128)
    for i in range(k):
        id_rfft[:, i, i] = id_vec_rfft
    
    # now solve the linear system to invert A frequency-by-frequency
    # (since frequency is leading dimension, np.linalg.solve automatically does this)
    A_rfft_inv = np.linalg.solve(A_rfft, id_rfft)

    # take inverse FFT of this to get full A
    # then take first 2*tau-1 entries to get approximate A from -(tau-1),...,0,...,(tau-1)
    A_inv = irfftn(A_rfft_inv, s=(4*tau-3,), axes=(0,))[:2*tau-1, :, :]

    # unstack this
    return jac.unpack_asymptotic_jacobians(A_inv, targets, unknowns, tau)
Ejemplo n.º 55
0
def sgimgcoeffs(img, *args, **kwargs):
	'''
	Given a 3-D image img with shape (nx, ny, nz), use Savitzky-Golay
	stencils from savgol(*args, **kwargs) to compute compute the filtered
	double-precision image coeffs with shape (nx, ny, nz, ns) such that
	coeffs[:,:,:,i] holds the convolution of img with the i-th stencil.

	If the image is of single precision, the filter correlation will be done
	in single-precision; otherwise, double precision will be used.

	The pyfftw module will be used, if available, to accelerate FFT
	correlations. Otherwise, the stock Numpy FFT will be used.
	'''
	# Create the stencils first
	stencils = savgol(*args, **kwargs)
	if not stencils: raise ValueError('Savitzky-Golay stencil list is empty')

	# Make sure the array is in double precision
	img = np.asarray(img)
	if img.ndim != 3: raise ValueError('Image img must be three-dimensional')

	# If possible, find the next-larger efficient size
	try: from scipy.fftpack.helper import next_fast_len
	except ImportError: next_fast_len = lambda x: x

	# Half-sizes of kernels along each axis
	hsizes = tuple(bsz // 2 for bsz in stencils[0].shape)

	# Padded shape for FFT convolution and the R2C FFT output
	pshape = tuple(next_fast_len(isz + 2 * bsz)
			for isz, bsz in zip(img.shape, hsizes))

	if img.dtype == np.dtype('float32'):
		ftype, ctype = np.dtype('float32'), np.dtype('complex64')
	else:
		ftype, ctype = np.dtype('float64'), np.dtype('complex128')

	try:
		import pyfftw
	except ImportError:
		from numpy.fft import rfftn, irfftn
		empty = np.empty
		use_fftw = False
	else:
		# Cache PyFFTW planning for 5 seconds
		empty = pyfftw.empty_aligned
		use_fftw = True

	# Build working and output arrays
	kernel = empty(pshape, dtype=ftype)
	output = empty(img.shape + (len(stencils),), dtype=ftype)

	if use_fftw:
		# Need to create output arrays and plan both FFTs
		krfft = empty(pshape[:-1] + (pshape[-1] // 2 + 1,), dtype=ctype)
		rfftn = pyfftw.FFTW(kernel, krfft, axes=(0, 1, 2))
		irfftn = pyfftw.FFTW(krfft, kernel,
				axes=(0, 1, 2), direction='FFTW_BACKWARD')

	m,n,p = img.shape

	# Copy the image, leaving space for boundaries
	kernel[:,:,:] = 0.
	kernel[:m,:n,:p] = img

	# For right boundaries, watch for running off left end with small arrays
	for ax, (ld, hl)  in enumerate(zip(img.shape, hsizes)):
		# Build the slice for boundary values
		lslices = [slice(None)]*3
		rslices = [slice(None)]*3

		# Left boundaries are straightforward
		lslices[ax] = slice(hl, 0, -1)
		rslices[ax] = slice(-hl, None)
		kernel[rslices] = kernel[lslices]

		# Don't walk off left edge when mirroring right boundary
		hi = ld - 1
		lo = max(hi - hl, 0)
		lslices[ax] = slice(lo, hi)
		rslices[ax] = slice(2 * hi - lo, hi, -1)
		kernel[rslices] = kernel[lslices]

	# Compute the image FFT
	if use_fftw:
		rfftn.execute()
		imfft = krfft.copy()
	else: imfft = rfftn(kernel)

	i,j,k = hsizes
	t,u,v = stencils[0].shape

	for l, stencil in enumerate(stencils):
		# Clear the kernel storage and copy the stencil
		kernel[:,:,:] = 0.
		kernel[:t,:u,:v] = stencil[::-1,::-1,::-1]
		if use_fftw:
			rfftn.execute()
			krfft[:,:,:] *= imfft
			irfftn(normalise_idft=True)
		else: kernel = irfftn(rfftn(kernel) * imfft)
		output[:,:,:,l] = kernel[i:i+m,j:j+n,k:k+p]

	return output
Ejemplo n.º 56
0
 def get_real_image(self):
     if self.image is None:
         self.image = irfftn(self.ft, self.imageshape)
     return self.image
Ejemplo n.º 57
0
def weightedfftconvolve(in1, in2, mode="full", weighting='none', displayplots=False):
    """Convolve two N-dimensional arrays using FFT.
    Convolve `in1` and `in2` using the fast Fourier transform method, with
    the output size determined by the `mode` argument.
    This is generally much faster than `convolve` for large arrays (n > ~500),
    but can be slower when only a few output values are needed, and can only
    output float arrays (int or object array inputs will be cast to float).
    Parameters
    ----------
    in1 : array_like
        First input.
    in2 : array_like
        Second input. Should have the same number of dimensions as `in1`;
        if sizes of `in1` and `in2` are not equal then `in1` has to be the
        larger array.
    mode : str {'full', 'valid', 'same'}, optional
        A string indicating the size of the output:
        ``full``
           The output is the full discrete linear convolution
           of the inputs. (Default)
        ``valid``
           The output consists only of those elements that do not
           rely on the zero-padding.
        ``same``
           The output is the same size as `in1`, centered
           with respect to the 'full' output.
    Returns
    -------
    out : array
        An N-dimensional array containing a subset of the discrete linear
        convolution of `in1` with `in2`.
    """
    in1 = np.asarray(in1)
    in2 = np.asarray(in2)

    if np.isscalar(in1) and np.isscalar(in2):  # scalar inputs
        return in1 * in2
    elif not in1.ndim == in2.ndim:
        raise ValueError("in1 and in2 should have the same rank")
    elif in1.size == 0 or in2.size == 0:  # empty arrays
        return np.array([])

    s1 = np.array(in1.shape)
    s2 = np.array(in2.shape)
    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))
    size = s1 + s2 - 1

    if mode == "valid":
        _check_valid_mode_shapes(s1, s2)

    # Always use 2**n-sized FFT
    fsize = 2 ** np.ceil(np.log2(size)).astype(int)
    fslice = tuple([slice(0, int(sz)) for sz in size])
    if not complex_result:
        fft1 = rfftn(in1, fsize)
        fft2 = rfftn(in2, fsize)
        theorigmax = np.max(np.absolute(irfftn(gccproduct(fft1, fft2, 'none'), fsize)[fslice]))
        ret = irfftn(gccproduct(fft1, fft2, weighting, displayplots=displayplots), fsize)[fslice].copy()
        ret = irfftn(gccproduct(fft1, fft2, weighting, displayplots=displayplots), fsize)[fslice].copy()
        ret = ret.real
        ret *= theorigmax / np.max(np.absolute(ret))
    else:
        fft1 = fftpack.fftn(in1, fsize)
        fft2 = fftpack.fftn(in2, fsize)
        theorigmax = np.max(np.absolute(fftpack.ifftn(gccproduct(fft1, fft2, 'none'))[fslice]))
        ret = fftpack.ifftn(gccproduct(fft1, fft2, weighting, displayplots=displayplots))[fslice].copy()
        ret *= theorigmax / np.max(np.absolute(ret))

    # scale to preserve the maximum

    if mode == "full":
        return ret
    elif mode == "same":
        return _centered(ret, s1)
    elif mode == "valid":
        return _centered(ret, s1 - s2 + 1)
Ejemplo n.º 58
0
    def smooth(self, inimage, clean=False, is_fft=False):
        """
        :Parameters:
            inimage : `core.api.Image`
                The image to be smoothed
            clean : ``bool``
                Should we call ``nan_to_num`` on the data before smoothing?
            is_fft : ``bool``
                Has the data already been fft'd?

        :Returns: `Image`
        """
        if inimage.ndim == 4:
            _out = np.zeros(inimage.shape)
            nslice = inimage.shape[0]
        elif inimage.ndim == 3:
            nslice = 1
        else:
            raise NotImplementedError, 'expecting either 3 or 4-d image.'

        for _slice in range(nslice):
            if inimage.ndim == 4:
                data = inimage[_slice]
            elif inimage.ndim == 3:
                data = inimage[:]

            if clean:
                data = np.nan_to_num(data)
            if not is_fft:
                data = self._presmooth(data)
                data *= self.fkernel 
            else:
                data *= self.fkernel

            data = fft.irfftn(data) / self.norms[self.normalization]

            gc.collect()
            _dslice = [slice(0, self.bshape[i], 1) for i in range(3)]
            if self.scale != 1:
                data = self.scale * data[_dslice]

            if self.location != 0.0:
                data += self.location

            gc.collect()

            # Write out data 

            if inimage.ndim == 4:
                _out[_slice] = data
            else:
                _out = data
            _slice += 1

        gc.collect()
        _out = _out[[slice(self._kernel.shape[i]/2, self.bshape[i] +
                           self._kernel.shape[i]/2) for i in range(len(self.bshape))]]
        if inimage.ndim == 3:
            return Image(_out, coordmap=self.coordmap)
        else:
            concat_affine = AffineTransform.identity('concat')
            return Image(_out, coordmap=product(self.coordmap, concat_affine))
Ejemplo n.º 59
0
def correlate_windows(window_a,
                      window_b,
                      corr_method='fft',
                      nfftx=None,
                      nffty=None,
                      nfftz=None):
    """Compute correlation function between two interrogation windows.

    The correlation function can be computed by using the correlation
    theorem to speed up the computation.

    Parameters
    ----------
    window_a : 2d np.ndarray
        a two dimensions array for the first interrogation window,

    window_b : 2d np.ndarray
        a two dimensions array for the second interrogation window.

    corr_method   : string
        one method is currently implemented: 'fft'.

    nfftx   : int
        the size of the 2D FFT in x-direction,
        [default: 2 x windows_a.shape[0] is recommended].

    nffty   : int
        the size of the 2D FFT in y-direction,
        [default: 2 x windows_a.shape[1] is recommended].

    nfftz   : int
        the size of the 2D FFT in z-direction,
        [default: 2 x windows_a.shape[2] is recommended].


    Returns
    -------
    corr : 3d np.ndarray
        a three dimensional array of the correlation function.

    Note that due to the wish to use 2^N windows for faster FFT
    we use a slightly different convention for the size of the
    correlation map. The theory says it is M+N-1, and the
    'direct' method gets this size out
    the FFT-based method returns M+N size out, where M is the window_size
    and N is the search_area_size
    It leads to inconsistency of the output
    """

    if corr_method == 'fft':
        window_b = np.conj(window_b[::-1, ::-1, ::-1])
        if nfftx is None:
            nfftx = nextpower2(window_b.shape[0] + window_a.shape[0])
        if nffty is None:
            nffty = nextpower2(window_b.shape[1] + window_a.shape[1])
        if nfftz is None:
            nfftz = nextpower2(window_b.shape[2] + window_a.shape[2])

        f2a = rfftn(normalize_intensity(window_a), s=(nfftx, nffty, nfftz))
        f2b = rfftn(normalize_intensity(window_b), s=(nfftx, nffty, nfftz))
        corr = irfftn(f2a * f2b).real
        corr = corr[:window_a.shape[0] +
                    window_b.shape[0], :window_b.shape[1] +
                    window_a.shape[1], :window_b.shape[2] + window_a.shape[2]]
        return corr
    # elif corr_method == 'direct':
    #     return convolve2d(normalize_intensity(window_a),
    #                       normalize_intensity(window_b[::-1, ::-1, ::-1]), 'full')
    else:
        raise ValueError('method is not implemented')