Пример #1
0
def garfield(B, P, T = Identity(), seed = None):
    if seed != None:
        random.seed(seed)
    wn = random.normal(0, 1, B.shape)
    f  = fft.ifftn(fft.fftn(wn) * np.sqrt(P(B.K))).real
    #f /= f.std()
    return fft.ifftn(fft.fftn(f) * T(B.K)).real
Пример #2
0
def translation_align_given_rotation_angles(v1, m1, v2, m2, angs):
    """
    for each angle, do a translation search
    """
    v1f = fftn(v1)
    v1f[0, 0, 0] = 0.0
    v1f = fftshift(v1f)

    a = [{}] * len(angs)
    for i, ang in enumerate(angs):
        v2r = GR.rotate_pad_mean(v2, angle=ang)
        v2rf = fftn(v2r)
        v2rf[0, 0, 0] = 0.0
        v2rf = fftshift(v2rf)

        m2r = GR.rotate_pad_zero(m2, angle=ang)
        m1_m2r = m1 * m2

        # masked images
        v1fm = v1f * m1_m2r
        v2rfm = v2rf * m1_m2r

        # normalize values
        v1fmn = v1fm / N.sqrt(N.square(N.abs(v1fm)).sum())
        v2rfmn = v2rfm / N.sqrt(N.square(N.abs(v2rfm)).sum())

        lc = translation_align__given_unshifted_fft(ifftshift(v1fmn),
                                                    ifftshift(v2rfmn))

        a[i] = {'ang': ang, 'loc': lc['loc'], 'score': lc['cor']}

    return a
Пример #3
0
 def _eval(self, contrast, differentiate=False):
     contrast = contrast.copy()
     contrast[~self.support] = 0
     self._contrast = contrast
     if self.coarse:
         # TODO take real part? what about even case? for 1d, highest
         # fourier coeff must be real then, which is not guaranteed by
         # subsampling here.
         aux = fftn(self._contrast)[self.dualcoords]
         self._coarse_contrast = (
             (self.coarsegrid.size / self.domain.size) *
             ifftn(aux)
         )
     farfield = self.codomain.empty()
     rhs = self.domain.zeros()
     for j in range(self.inc_matrix.shape[0]):
         # Solve Lippmann-Schwinger equation v + a*conv(k, v) = a*u_inc for
         # the unknown v = a u_total. The Fourier coefficients of the
         # periodic convolution kernel k are precomputed.
         rhs[self.support] = self.inc_matrix[j, :] * contrast[self.support]
         if self.coarse:
             v = self._solve_two_grid(rhs)
         else:
             v = self._gmres(self._lippmann_schwinger, rhs).reshape(self.domain.shape)
         self._compute_farfield(farfield, j, v)
         # The total field can be recovered from v in a stable manner by the formula
         # u_total = u_inc - conv(k, v)
         if differentiate:
             self._totalfield[:, j] = (
                 self.inc_matrix[j, :] - ifftn(self.kernel * fftn(v))[self.support]
             )
     return farfield
Пример #4
0
    def solve_neutral(self, phi_g, rho_g, eps=None):
        """Solve Poissons equation for a neutral and periodic charge density.

        Parameters
        ----------
        phi_g: ndarray
            Potential (output array).
        rho_g: ndarray
            Charge distribution (in units of -e).

        """

        assert phi_g.dtype == self.dtype
        assert rho_g.dtype == self.dtype

        if self.gd.comm.size == 1:
            # Note, implicit downcast from complex to float when the dtype of
            # phi_g is float
            phi_g[:] = ifftn(fftn(rho_g) * 4.0 * pi / self.k2_Q)
        else:
            rho_g = self.gd.collect(rho_g)
            if self.gd.comm.rank == 0:
                globalphi_g = ifftn(fftn(rho_g) * 4.0 * pi / self.k2_Q)
            else:
                globalphi_g = None
            # What happens here if globalphi is complex and phi is real ??????
            self.gd.distribute(globalphi_g, phi_g)

        return 1
Пример #5
0
def fsc(v1, v2, band_width_radius=1.0):
    
    siz = v1.shape
    assert(siz == v2.shape)

    origin_co = GV.fft_mid_co(siz)
    
    x = N.mgrid[0:siz[0], 0:siz[1], 0:siz[2]]
    x = x.astype(N.float)

    for dim_i in range(3):      x[dim_i] -= origin_co[dim_i]

    rad = N.sqrt( N.square(x).sum(axis=0) )

    vol_rad = int( N.floor( N.min(siz) / 2.0 ) + 1)

    v1f = NF.fftshift( NF.fftn(v1) )
    v2f = NF.fftshift( NF.fftn(v2) )

    fsc_cors = N.zeros(vol_rad)

    # the interpolation can also be performed using scipy.ndimage.interpolation.map_coordinates()
    for r in range(vol_rad):

        ind = ( abs(rad - r) <= band_width_radius )

        c1 = v1f[ind]
        c2 = v2f[ind]

        fsc_cor_t = N.sum( c1 * N.conj(c2) ) / N.sqrt( N.sum( N.abs(c1)**2 ) * N.sum( N.abs(c2)**2) )
        fsc_cors[r] = N.real( fsc_cor_t )

    return fsc_cors
Пример #6
0
def garfield(B, P, T = Identity(), seed = None):
	if seed != None:
		random.seed(seed)
	wn = random.normal(0, 1, B.shape)
	f  = fft.ifftn(fft.fftn(wn) * np.sqrt(P(B.K))).real
	#f /= f.std()
	return fft.ifftn(fft.fftn(f) * T(B.K)).real
Пример #7
0
    def precompute_operator(self, f, a):
        """
        f - array of drift coefficients on domain (ndim x N[0] x N[1] x ... x N[ndim])
        a - array of diffusion coefficients on domain (ndim x N[0] x N[1] x ... x N[ndim])
        NOTE: To generalize to covariate noise, would need to add a dimension to a
        """

        if self.ndim == 1:
            f_hat = self.dx * fftn(f)
            a_hat = self.dx * fftn(a)

            # Set up spectral projection operator
            self.A = np.einsum('i,ij->ij', -1j*self.k, f_hat[self.idx]) \
                   + np.einsum('i,ij->ij', -self.k**2, a_hat[self.idx])

        if self.ndim == 2:
            # Initialize Fourier transformed coefficients
            f_hat = np.zeros(np.append([self.ndim], self.N),
                             dtype=np.complex64)
            a_hat = np.zeros(f_hat.shape, dtype=np.complex64)
            for i in range(self.ndim):
                f_hat[i] = np.prod(self.dx) * fftn(f[i])
                a_hat[i] = np.prod(self.dx) * fftn(a[i])

            self.A = -1j*np.einsum('i,ijkl->ijkl', self.k[0], f_hat[0, self.idx[0], self.idx[1]]) \
                     -1j*np.einsum('j,ijkl->ijkl', self.k[1], f_hat[1, self.idx[0], self.idx[1]]) \
                     -np.einsum('i,ijkl->ijkl', self.k[0]**2, a_hat[0, self.idx[0], self.idx[1]]) \
                     -np.einsum('j,ijkl->ijkl', self.k[1]**2, a_hat[1, self.idx[0], self.idx[1]])

            self.A = np.reshape(self.A, (np.prod(self.N), np.prod(self.N)))
Пример #8
0
    def SecondOrderCorrFuncfCoherentXrayScattering(self,state,gridShape):
        """
        Returns the intensity of scattered light as a function
		of the reciprocal lattice vector q

		.. math::
			I(q) = \\frac{1}{V} \\tilde{f}(q) \\tilde{f}(-q)  
        
		where 
		
		.. math::
			f(x) = e^{2 \pi i g*u(x)}

        Assume that there is a cubic symmetry, so that g = (h/a, k/a, l/a)  (set a=1)
        """
        V = float(array(gridShape).prod())
        u = state#.CalculateDisplacementField()
        ug = self.g[0]*u[x]+self.g[1]*u[y]+self.g[2]*u[z] 
        Kug = fft.fftn(ug) 
        u2g = ug*ug
        Ku2g = fft.fftn(u2g)
        I_0 = 1.
        I_1 = (1.j)*2.*pi*(Kug-Kug.conj())
        I_2 = 2.*pi**2*(2*Kug*Kug.conj()-Ku2g-Ku2g.conj())
        I_S = (I_0+I_2)/V
        I_A = I_1/V
        return I_S.real, I_A.real
Пример #9
0
    def potential_energy(self, potential, summed=False):
        r"""Calculate the potential energy :math:`E_{\text{pot}} := \langle\Psi|V|\Psi\rangle`
        of the different components :math:`\psi_i`.

        :param potential: The potential energy operator :math:`V(x)`.
        :param summed: Whether to sum up the potential energies :math:`E_i` of the individual
                       components :math:`\psi_i`. Default is ``False``.
        :return: A list with the potential energies of the individual components
                 or the overall potential energy of the wavefunction. (Depending on the optional arguments.)
        """
        # Compute the prefactor
        T = self._grid.get_extensions()
        N = self._grid.get_number_nodes()
        prefactor = product(array(T) / (1.0 * array(N) ** 2))

        # Reshape from (1, prod_d^D N_d) to (N_1, ..., N_D) shape
        potential = [pot.reshape(N) for pot in potential]

        # Apply the matrix potential to the ket
        tmp = [zeros(component.shape, dtype=complexfloating) for component in self._values]
        for row in xrange(0, self._number_components):
            for col in xrange(0, self._number_components):
                tmp[row] = tmp[row] + potential[row * self._number_components + col] * self._values[col]

        # Fourier transform the components
        ftcbra = [fftn(component) for component in self._values]
        ftcket = [fftn(component) for component in tmp]

        # Compute the braket in Fourier space
        epot = [prefactor * sum(conjugate(cbra) * cket) for cbra, cket in zip(ftcbra, ftcket)]

        if summed is True:
            epot = sum(epot)

        return epot
Пример #10
0
def test_plan_call():
    for shape in tested_shapes:
        plan = Plan(
            input_array=ranf_unit_complex(shape),
            output_array=numpy.empty(shape, dtype=numpy.complex),
            direction=Direction.forward,
        )
        testing.assert_allclose(
            plan(),
            fft.fftn(plan.input_array)
        )
        testing.assert_allclose(
            plan(normalize=True),
            fft.fftn(plan.input_array) / plan.input_array.size
        )
        plan = Plan(
            input_array=ranf_unit_complex(shape),
            output_array=numpy.empty(shape, dtype=numpy.complex),
            direction=Direction.backward
        )
        testing.assert_allclose(
            plan(),
            fft.ifftn(plan.input_array) * plan.input_array.size
        )
        testing.assert_allclose(
            plan(normalize=True),
            fft.ifftn(plan.input_array)
        )
Пример #11
0
    def solve_neutral(self, phi_g, rho_g, eps=None):
        """Solve Poissons equation for a neutral and periodic charge density.

        Parameters
        ----------
        phi_g: ndarray
            Potential (output array).
        rho_g: ndarray
            Charge distribution (in units of -e).

        """

        assert phi_g.dtype == self.dtype
        assert rho_g.dtype == self.dtype
        
        if self.gd.comm.size == 1:
            # Note, implicit downcast from complex to float when the dtype of
            # phi_g is float
            phi_g[:] = ifftn(fftn(rho_g) * 4.0 * pi / self.k2_Q)
        else:
            rho_g = self.gd.collect(rho_g)
            if self.gd.comm.rank == 0:
                globalphi_g = ifftn(fftn(rho_g) * 4.0 * pi / self.k2_Q)
            else:
                globalphi_g = None
            # What happens here if globalphi is complex and phi is real ??????
            self.gd.distribute(globalphi_g, phi_g)
            
        return 1
Пример #12
0
def gaussian_convolution(data, ijk_linewidths):

  from numpy import float32, zeros, add, divide, outer, reshape
  if data.dtype.type != float32:
    data = data.astype(float32)

  from math import exp
  gaussians = []
  for a in range(3):
    size = data.shape[a]
    gaussian = zeros((size,), float32)
    hw = ijk_linewidths[2-a] / 2.0
    for i in range(size):
      u = min(i,size-i) / hw
      p = min(u*u/2, 100)               # avoid OverflowError with exp()
      gaussian[i] = exp(-p)
    area = add.reduce(gaussian)
    divide(gaussian, area, gaussian)
    gaussians.append(gaussian)

  g01 = outer(gaussians[0], gaussians[1])
  g012 = outer(g01, gaussians[2])
  g012 = reshape(g012, data.shape)
  
  cdata = zeros(data.shape, float32)

  from numpy.fft import fftn, ifftn
  # TODO: Fourier transform Gaussian analytically to reduce computation time
  #       about 30% (one of three fft calculations).
  ftg = fftn(g012)
  ftd = fftn(data)
  gd = ifftn(ftg * ftd)
  gd = gd.astype(float32)
  return gd
Пример #13
0
    def _eval_grid_fast(self, *args, **kwds):
        X = np.vstack(args)
        d, inc = X.shape
        dx = X[:, 1] - X[:, 0]

        Xnc = self._make_flat_grid(dx, d, inc)

        Xn = np.dot(self._inv_hs, Xnc)
        kw = self._kernel_weights(Xn, dx, d, inc)

        r = kwds.get('r', 0)
        if r != 0:
            fun = self._moment_fun(r)
            kw *= fun(np.vstack(Xnc))
        kw.shape = (2 * inc, ) * d
        kw = np.fft.ifftshift(kw)

        y = kwds.get('y', 1.0)
        if self.alpha > 0:
            warnings.warn('alpha parameter is not used for binned kde!')

        # Find the binned kernel weights, c.
        c = gridcount(self.dataset, X, y=y)
        # Perform the convolution.
        z = np.real(ifftn(fftn(c, s=kw.shape) * fftn(kw)))

        ix = (slice(0, inc), ) * d
        if r == 0:
            return z[ix] * (z[ix] > 0.0)
        return z[ix]
Пример #14
0
    def fft_r2g(self, fr, shift_fg=False):
        """
        FFT of array ``fr`` given in real space.
        """
        ndim, shape = fr.ndim, fr.shape

        if ndim == 1:
            fr = np.reshape(fr, self.shape)
            return self.fft_r2g(fr, shift_fg=shift_fg).flatten()

        elif ndim == 3:
            assert self.size == np.prod(shape[-3:])
            fg = fftn(fr)
            if shift_fg: fg = fftshift(fg)

        elif ndim > 3:
            assert self.size == np.prod(shape[-3:])
            axes = np.arange(ndim)[-3:]
            fg = fftn(fr, axes=axes)
            if shift_fg: fg = fftshift(fg, axes=axes)

        else:
            raise NotImplementedError("ndim < 3 are not supported")

        return fg / self.size
Пример #15
0
def vecpot(arx,ary,arz, kf, lx=2*np.pi, ly=2*np.pi, lz=2*np.pi):
   """
   Function to compute vector potential of a 3D array
   """
   nx,ny,nz=arx.shape

   #  COMPUTE THE ARRAY SIZE
   kx, ky, kz, km = create_kgrid(*arx.shape, lx=lx, ly=ly, lz=lz)
  #kx, ky, kz, k2 = kx[:,nna,nna],ky[nna,:,nna],kz[nna,nna,:], km**2
   k2=km**2
   k2[nx/2,ny/2,nz/2]=1.

   #  FOURIER TRANSFORM THE ARRAY
   farx = nf.fftshift(nf.fftn(arx))
   fary = nf.fftshift(nf.fftn(ary))
   farz = nf.fftshift(nf.fftn(arz))

   #  SET VALUES ABOVE kf AS 0+0i
   farx = (np.sign(km - kf) - 1.)/(-2.)*farx
   fary = (np.sign(km - kf) - 1.)/(-2.)*fary
   farz = (np.sign(km - kf) - 1.)/(-2.)*farz

   #  FIND THE CORRESPONDING VECTOR POTENTIAL A = -ik x B /k^2
   axf = -eye*(ky*farz-kz*fary)/k2
   ayf = -eye*(kz*farx-kx*farz)/k2
   azf = -eye*(kx*fary-ky*farx)/k2

   #  BACK TRANSFORM TO REAL SPACE
   ax  = np.real(nf.ifftn(nf.ifftshift(axf)))
   ay  = np.real(nf.ifftn(nf.ifftshift(ayf)))
   az  = np.real(nf.ifftn(nf.ifftshift(azf)))
   return ax,ay,az
Пример #16
0
def fftconvolve(in1, in2, mode='same'):
    """Convolve two N-dimensional arrays using FFT. See convolve.

    """
    s1 = array(in1.shape)
    s2 = array(in2.shape)
    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))
    size = s1 + s2 - 1

    # Always use 2**n-sized FFT
    fsize = (2 ** np.ceil(np.log2(size))).astype('int')
    IN1 = fftn(in1, fsize)
    IN1 *= fftn(in2, fsize)
    fslice = tuple([slice(0, int(sz)) for sz in size])
    ret = ifftn(IN1)[fslice].copy()
    del IN1 
    if not complex_result:
        ret = ret.real
    if mode == "full":
        return ret
    elif mode == "same":
        if np.product(s1, axis=0) > np.product(s2, axis=0):
            osize = s1
        else:
            osize = s2
        return _centered(ret, osize)
    elif mode == "valid":
        return _centered(ret, abs(s2 - s1) + 1)

    return conv[:s[0], :s[1], :s[2]]
Пример #17
0
    def LaplacianUnwrap(self, Original_phase, VoxelSizes, padding_pixels):
        # Local Variables: AcqSpacing, padding_pixels, SliceSpacing, k, m, n, k2, Original_phase, size_data
        AcqSpacing = VoxelSizes[0]
        SliceSpacing = VoxelSizes[2]
        size_data_original = Original_phase.shape

        #% Zero-padding of the images
        Original_phase = np.lib.pad(Original_phase,
                                    ((padding_pixels, padding_pixels),
                                     (padding_pixels, padding_pixels),
                                     (padding_pixels, padding_pixels)), 'wrap')

        #%Mask=padarray(Mask,[padding_pixels/2 padding_pixels/2 padding_pixels/2]);
        size_data = Original_phase.shape

        #% Calculation of the k**2 matrix (we assume an isotropic inplane resolution)
        k2 = np.zeros(size_data)
        for k in np.arange(0., size_data[0]):
            for m in np.arange(0., size_data[1]):
                for n in np.arange(0., size_data[2]):
                    k2[int(k), int(m), int(n)] = (
                        np.double(k) - np.double(np.floor(size_data[0] / 2)) -
                        1.0)**2 + (np.double(m) - np.double(
                            np.floor(size_data[1] / 2)) - 1.0)**2 + (
                                (np.double(n) -
                                 np.double(np.floor(size_data[2] / 2)) - 1.0) *
                                (SliceSpacing / AcqSpacing))**2

        k2 = np.array(k2)

        #% Equation 13 from the paper from Li et al.
        # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3947438/
        Original_phase = fftn(
            (np.cos(Original_phase) * ifftn(
                (ifftshift(k2) * fftn(np.sin(Original_phase)))) -
             np.sin(Original_phase) * ifftn(
                 (ifftshift(k2) * fftn(np.cos(Original_phase))))
             )) / ifftshift(k2)

        #% To prevent errors arising from the k=0 point
        Original_phase[np.isnan(Original_phase)] = 0.
        Original_phase[np.isinf(Original_phase)] = 0.

        #% Back to the image domain

        #%Original_phase=ifftn(Original_phase)*Mask;
        Original_phase = ifftn(Original_phase)

        #% We remove the zero-padding
        Original_phase = Original_phase[[
            slice(padding_pixels, -padding_pixels)
            for _ in Original_phase.shape
        ]]

        if Original_phase.shape != size_data_original:
            raise Exception('Padding is weird')

        print "Laplacian unwrap done"
        return np.array(np.real(Original_phase))
Пример #18
0
def nccfft(s, p, room, fact=1):
    """Used for all Patterns that do not fall other categories.

    Cross correlates normalized Source and Pattern images while
    taking advantage of FFTs for the convolution step. 
    
    ----------
    s, p : Image
       Pattern and Source images for comparison.
       
    fact : int, optional
       Factor by which both Source and Pattern are
       scaled down.

    ----------
    out1 : ndarray[float]
       Confidence matrix for matches.

    out2 : float
       Threshold for deciding if a match has been found.

    out3 : float
       Mean of the confidence matrix.
    """
    
    # subtract mean from Pattern
    pmm = p - p.mean()
    pstd = p.std()
    n = p.size
    
    # make matrix of ones the same size as pattern
    u = np.ones(p.shape)

    # pad matrices (necessary for convolution)
    s = pad_by(s, room)

    upad = pad_to_size_of(u, s)
    pmmpad = pad_to_size_of(pmm, s)
    
    # compute neccessary ffts
    fftppad = fftn(pmmpad)
    ffts = fftn(s)
    fftss = fftn(s**2)
    fftu = fftn(upad)

    # compute conjugates
    cfppad = np.conj(fftppad)
    cfu = np.conj(fftu)

    # do multiplications and ifft's
    top = ifftn(cfppad * ffts)
    bot1 = n * ifftn(cfu * fftss)
    bot2 = ifftn(cfu * ffts) ** 2

    # finish it off!
    bottom = pstd * np.sqrt(bot1 - bot2)
    full = top / bottom

    return np.where(full.real.max() == full.real)
Пример #19
0
def fftnc(im_to_fft, ph, sz=None, axes=(-2, -1)):
    if sz is None:
        FFTdata = 1 / np.sqrt(im_to_fft.size) * fft.fftn(im_to_fft * ph,
                                                         axes=axes)
    else:
        FFTdata = (np.sqrt(sz) / im_to_fft.size) * fft.fftn(im_to_fft * ph,
                                                            axes=axes)
    return FFTdata
Пример #20
0
    def __DFT(self, arr, inverse=False):
        '''
        X = (x-x0)/dx = 0, 1, ..., L-1
        K = k/dk = -L/2, ..., 0, 1, ..., L/2
        dft(arr)[K] = \sum_X arr[X] e^{-2\pi i KX/L}
                    = \sum_X arr[X] e^{-2\pi i kx/(dkdxL)}e^{2\pi/(dkdxL) i k\cdot x0}

        FT(k) = \int arr(x)e^{-2\pi ik\cdot x}dx
              = \int arr(x) e^{-2\pi ik\cdot x}dx 
              ~ |dx|\sum_x arr[X] e^{-2\pi i Kdk\cdot (Xdx+x0)}
              = |dx| dft(arr)[K] e^{-2\pi i k\cdot x0}
              if 1 = dxdk L


        Assume periodicity so fft(arr)[-k] = fft(arr)[N-k] via fftshift

        if dim == len(inGrid):
            Do a full FT on the array
        else:
            Use Fourier slice theorem
        '''

        if hasattr(arr, 'asarray'):
            arr = arr.asarray()
        arr = context().asarray(arr)
        if inverse:
            dx = [1 / (g.item(1) - g.item(0))
                  for g in self.inGrid]
            x0 = [-g.item(0) for g in self.inGrid]
        else:
            dx = [g.item(1) - g.item(0) for g in self.inGrid]
            x0 = [g.item(0) for g in self.inGrid]

        if isinstance(self.ElementSpace, VolSpace):
            if inverse:
                FT = _applyweight(arr, self.outGrid, dx, x0)
                FT = ifftn(ifftshift(FT))
            else:
                FT = fftshift(fftn(arr))
                FT = _applyweight(FT, self.outGrid, dx, x0)
        else:
            '''
            arr[t,x] = \int_{y-x || t} f(y)dy
            FT[t,X] = FT[f](X)
            x[t,i,...] = detector[0][i]*w[0][t]+...
            '''
            ax = [i + 1 for i in range(self.dim - 1)]
            detector = self.outGrid[:self.dim - 1]
            w = self.outGrid[self.dim - 1:]

            if inverse:
                FT = _applyweight_hyperplane(arr, detector, w, dx, x0)
                FT = ifftn(ifftshift(FT, axes=ax), axes=ax)
            else:
                FT = fftshift(fftn(arr, axes=ax), axes=ax)
                FT = _applyweight_hyperplane(FT, detector, w, dx, x0)

        return FT
Пример #21
0
    def update(self, image):
        self.image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        if self.resize:
            self.image = cv2.resize(
                self.image,
                (self.image.shape[1] // 2, self.image.shape[0] // 2))

        patch = self.get_subwindow(self.image, self.pos, self.window_sz)
        zf = fftn(self.get_features(patch), axes=(0, 1))

        if self.correlation_type == 'gaussian':
            kzf = self.gaussian_correlation(zf, self.model_xf)

        response = real(ifftn(self.model_alphaf * kzf,
                              axes=(0, 1)))  # equation for fast detection
        # Find indices and values of nonzero elements curr = np.unravel_index(np.argmax(gi, axis=None), gi.shape)
        delta = np.unravel_index(np.argmax(response, axis=None),
                                 response.shape)
        vert_delta, horiz_delta = delta[0], delta[1]
        if vert_delta > np.size(
                zf,
                0) / 2:  # wrap around to negative half-space of vertical axis
            vert_delta = vert_delta - np.size(zf, 0)
        if horiz_delta > np.size(zf, 1) / 2:  # same for horizontal axis
            horiz_delta = horiz_delta - np.size(zf, 1)
        self.pos = self.pos + self.cell_size * np.array(
            [vert_delta, horiz_delta])

        # obtain a subwindow for training at newly estimated target position
        patch = self.get_subwindow(self.image, self.pos, self.window_sz)
        feat = self.get_features(patch)
        xf = fftn(feat, axes=(0, 1))

        # Kernel Ridge Regression, calculate alphas (in Fourier domain)
        if self.correlation_type == 'gaussian':
            kf = self.gaussian_correlation(xf, xf)

        alphaf = np.divide(self.yf, (kf + self.lambda_))
        # subsequent frames, interpolate model
        self.model_alphaf = (
            1 - self.interp_factor
        ) * self.model_alphaf + self.interp_factor * alphaf
        self.model_xf = (
            1 - self.interp_factor) * self.model_xf + self.interp_factor * xf

        if self.resize:
            pos_real = np.multiply(self.pos, 2)
        else:
            pos_real = self.pos

        box = [
            pos_real[1] - self.target_sz_real[1] / 2,
            pos_real[0] - self.target_sz_real[0] / 2, self.target_sz_real[1],
            self.target_sz_real[0]
        ]

        return box[0], box[1], box[2], box[3]
Пример #22
0
    def cross_correlate(self, im0, im1):
        for i in range(len(im0.shape)):
            assert im0.shape[i] == im1.shape[i]
            assert im0.shape[i] > 0

        ft_im0 = (fftn(im0))
        ft_im1 = (fftn(im1))
        cc = fftshift(abs(ifftn((ft_im0 * ft_im1))))
        return cc
Пример #23
0
def conv(array1, array2):
    # first step: do the Fourier transform of the two arrays
    ft1 = fftn(array1)
    ft2 = fftn(array2)

    # second step: do the product in the Fourier domain and the inverse transform
    product = ifftshift(ifftn(ft1 * ft2))
    result = np.abs(product)  # coming back to the image
    return result  #, ft1, ft2
    def __init__(
        self,
        modulus,
        support,  # MAKE SURE THAT SUPPORT ARRAY DIMS ARE EQUAL TO binning*modulus.shape
        beta=0.9,
        binning=1,  # set this to the desired PBF for high-energy measurements
        averaging=True,
        gpu=False,
        num_gpus=0,  # Phase retrieval and partial coherence correction on different GPUs. How much does this affect performance?
        random_start=True,
        initial_pcc_guess=[],
        pcc_learning_rate=1.e-3,  # default learning rate for the Gaussian partial coherence function optimization
        outlog=''  # directory name for computational graph (GPU only).
    ):
        self._modulus = fftshift(modulus)
        self._support = support  # user should ensure this is approriate for binned data
        self._beta = beta
        self._binning = binning
        self._averaging = averaging

        self._modulus_sum = modulus.sum()
        self._support_comp = 1. - support
        if random_start:
            self._cImage = np.exp(2.j * np.pi * np.random.rand(
                binning * self._modulus.shape[0],
                binning * self._modulus.shape[1],
                self._modulus.shape[2])) * self._support
        else:
            self._cImage = 1. * support

        self._initial_pcc_guess = initial_pcc_guess
        self._pcc_learning_rate = pcc_learning_rate

        if self._binning > 1:
            self._binLeft, self._binRight = bng.fastInPlaneBinning(
                self._cImage[:, :, 0], self._binning, self._binning)[1:]
            self._scale =   ( self._binLeft[ 0,:] > 1.e-6 ).astype( float ).sum() *\
                            ( self._binRight[0,:] > 1.e-6 ).astype( float ).sum()
            self._cImage_fft_mod = np.sqrt(
                ss.inPlaneBinning(
                    np.absolute(fftn(self._cImage))**2, self._binning,
                    self._binning))
        else:
            self._cImage_fft_mod = np.absolute(fftn(self._cImage))

        self._error = []
        self._UpdateError()

        if gpu == True:
            self.gpusolver = accelerator.Solver(self.generateVariableDict(),
                                                num_gpus=num_gpus,
                                                outlog=outlog)

        if self._averaging:
            self._prefactor = 1.
        else:
            self._prefactor = 1. / (self._binning**2)
def get_ops(h, crop2d, pad2d, crop3d, pad3d, up_shape):
    #for things which are not admm, put back in norm = 'ortho'
    H = fftn(ifftshift(h), norm='ortho')
    Hstar = np.conj(H)
    A = lambda x : np.real(crop3d(fftshift(ifftn(H * fftn(ifftshift(x), \
                        norm = 'ortho'), norm = 'ortho'))))
    AH = lambda x : np.real(fftshift(ifftn(Hstar * fftn(ifftshift(pad3d(x)), \
                        norm = 'ortho'), norm = 'ortho')))
    return A, AH
Пример #26
0
def FieldFFT(a, a_hat):
	"""Calculate the component-wise 2D or 3D FFT of a vector field a, and stores it in a_hat."""
	
	if DimOfVectorFieldDomain(a) == 2:
		a_hat[:,:,0], a_hat[:,:,1] = fft.fftn(a[:,:,0]), fft.fftn(a[:,:,1])
	else:
		a_hat[...,0], a_hat[...,1], a_hat[...,2] = fft.fftn(a[...,0]), fft.fftn(a[...,1]), fft.fftn(a[...,2])

	return a_hat
Пример #27
0
def u2uhat():
    """
    Forward Fourier transform the fields
    """
    global u, v, w, uhat, vhat, what
    uhat = fftn(u)  # lint:ok
    vhat = fftn(v)  # lint:ok
    what = fftn(w)  # lint:ok
    return
Пример #28
0
def fftd(I, dims=None):
    # Compute fft
    if dims is None:
        X = fftn(I)
    elif dims == 2:
        X = fft2(I, axes=(0, 1))
    else:
        X = fftn(I, axes=tuple(range(dims)))

    return X
Пример #29
0
def fconv(X, Y):
    ''' performs multidimensional convolution in spectral domain
    convolves X and Y by computing the n-dimensional fourier transforms of
    X with the size of Y '''
    # check if X and Y have the same number of dimensions
    assert len(X.shape) == len(Y.shape)
    # check if size of X is never larger than the size of Y in all dimensions
    assert all([X.shape[i] <= Y.shape[i] for i in range(len(X.shape))])
    b = fftn(X, s=Y.shape) * fftn(Y)
    return np.real(ifftn(b))
Пример #30
0
def subpixel_shift(array, z_shift, y_shift, x_shift=None):
    """
    Shift array by the shift values.

    Adapted from the Matlab code of Jesse Clark.

    :param array: array to be shifted
    :param z_shift: shift in the first dimension
    :param y_shift: shift in the second dimension
    :param x_shift: shift in the third dimension
    :return: the shifted array
    """
    # check some parameters
    valid.valid_ndarray(array, ndim=(2, 3), name="array")
    valid.valid_item(z_shift, allowed_types=float, name="z_shift")
    valid.valid_item(y_shift, allowed_types=float, name="y_shift")
    valid.valid_item(x_shift, allowed_types=float, allow_none=True, name="x_shift")

    # shift the array
    ndim = len(array.shape)
    if ndim == 3:
        numz, numy, numx = array.shape
        buf2ft = fftn(array)
        temp_z = ifftshift(
            np.arange(-np.fix(numz / 2), np.ceil(numz / 2))
        )  # python does not include the end point
        temp_y = ifftshift(
            np.arange(-np.fix(numy / 2), np.ceil(numy / 2))
        )  # python does not include the end point
        temp_x = ifftshift(
            np.arange(-np.fix(numx / 2), np.ceil(numx / 2))
        )  # python does not include the end point
        myz, myy, myx = np.meshgrid(temp_z, temp_y, temp_x, indexing="ij")
        greg = buf2ft * np.exp(
            -1j
            * 2
            * np.pi
            * (z_shift * myz / numz + y_shift * myy / numy + x_shift * myx / numx)
        )
        shifted_array = ifftn(greg)
    else:  # 2D case
        buf2ft = fftn(array)
        numz, numy = array.shape
        temp_z = ifftshift(
            np.arange(-np.fix(numz / 2), np.ceil(numz / 2))
        )  # python does not include the end point
        temp_y = ifftshift(
            np.arange(-np.fix(numy / 2), np.ceil(numy / 2))
        )  # python does not include the end point
        myz, myy = np.meshgrid(temp_z, temp_y, indexing="ij")
        greg = buf2ft * np.exp(
            -1j * 2 * np.pi * (z_shift * myz / numz + y_shift * myy / numy)
        )
        shifted_array = ifftn(greg)
    return shifted_array
Пример #31
0
def setup(options):

    base = options.get_string(option_section, "tensor_dir")
    snapshot = options.get_int(option_section, "snapshot")
    nx = options.get_int(option_section,
                         "resolution")  # pixel resolution 128, 64, 32, 16

    #  base='/home/rmandelb.proj/ssamurof/tng_tidal/'

    # gridded particle density data
    nxyz = fi.FITS('%s/density/dm_density_0%d_%d.fits' %
                   (base, snapshot, nx))[-1].read()
    gxyz = fi.FITS('%s/density/star_density_0%d_%d.fits' %
                   (base, snapshot, nx))[-1].read()

    # overdensity field
    d = nxyz / np.mean(nxyz) - 1
    g = gxyz / np.mean(gxyz) - 1

    tidal_tensor = np.zeros((nx, nx, nx, 3, 3), dtype=np.float32)
    galaxy_tidal_tensor = np.zeros((nx, nx, nx, 3, 3), dtype=np.float32)

    # FFT the box
    fft_dens = npf.fftn(d)
    galaxy_fft_dens = npf.fftn(g)

    A = 1.  #/2/np.pi #/(2.*np.pi)**3

    # now compute the tidal tensor
    k = npf.fftfreq(nx)[np.mgrid[0:nx, 0:nx, 0:nx]]

    for i in range(3):
        for j in range(3):
            print(i, j)
            # k[i], k[j] are 3D matrices
            temp = fft_dens * k[i] * k[j] / (k[0]**2 + k[1]**2 + k[2]**2)
            galaxy_temp = galaxy_fft_dens * k[i] * k[j] / (k[0]**2 + k[1]**2 +
                                                           k[2]**2)

            # subtract off the trace...
            if (i == j):
                temp -= 1. / 3 * fft_dens
                galaxy_temp -= 1. / 3 * galaxy_fft_dens

            temp[0, 0, 0] = 0
            tidal_tensor[:, :, :, i, j] = A * npf.ifftn(temp).real

            galaxy_temp[0, 0, 0] = 0
            galaxy_tidal_tensor[:, :, :, i,
                                j] = A * npf.ifftn(galaxy_temp).real

    print('loading shapes')
    gammaI = load_gamma(nx)

    return gammaI, tidal_tensor, galaxy_tidal_tensor
Пример #32
0
def wiener_deconvolve(x, kernel, l):
    s1 = array(x.shape)
    s2 = array(kernel.shape)
    size = s1 + s2 - 1

    X = fftn(x, size)
    H = fftn(kernel, size)
    Hc = np.conj(H)

    ret = ifftshift(ifftn( X*Hc / (H*Hc*X + l**2))).real
    return _centered(ret, s1)
Пример #33
0
def fftd(I, dims=None):

    # Compute fft
    if dims is None:
        X = fftn(I)
    elif dims == 2:
        X = fft2(I, axes=(0, 1))
    else:
        X = fftn(I, axes=tuple(range(dims)))

    return X
Пример #34
0
def clsf(G, H, gamma):
    G = fftn(G)
    H = fftn(H)

    p = laplacian_filter()
    P = fftn(ref_padding(p, H))

    D = np.divide(np.conj(H), (np.real(H)**2 + gamma * (np.real(P)**2)))
    R = np.multiply(D, G)

    return np.real(fftshift(ifftn(R)))
Пример #35
0
def denoise_img(img, filter_, save=False):
    denoised_img = np.multiply(fftn(img), fftn(filter_))
    denoised_img = np.real(fftshift(ifftn(denoised_img)))

    if save:
        global SAVE_COUNTER
        imageio.imwrite('plots/' + str(SAVE_COUNTER) + '_denoised_img.png',
                        denoised_img.astype(np.uint8))
        SAVE_COUNTER += 1

    return denoised_img
Пример #36
0
def MyFftConvolve(im1, im2):
    """ [insert description here]
    """
    from numpy.fft import fftn as fftn
    from numpy.fft import ifftn as ifftn

    convolved = np.real(ifftn(fftn(im1) * fftn(im2)))

    convolved = np.fft.fftshift(convolved)

    return convolved
Пример #37
0
 def solve_neutral(self, phi_g, rho_g, eps=None):
     if self.gd.comm.size == 1:
         phi_g[:] = ifftn(fftn(rho_g) * 4.0 * pi / self.k2_Q).real
     else:
         rho_g = self.gd.collect(rho_g)
         if self.gd.comm.rank == 0:
             globalphi_g = ifftn(fftn(rho_g) * 4.0 * pi / self.k2_Q).real
         else:
             globalphi_g = None
         self.gd.distribute(globalphi_g, phi_g)
     return 1
Пример #38
0
 def solve_neutral(self, phi_g, rho_g, eps=None):
     if self.gd.comm.size == 1:
         phi_g[:] = ifftn(fftn(rho_g) * 4.0 * pi / self.k2_Q).real
     else:
         rho_g = self.gd.collect(rho_g)
         if self.gd.comm.rank == 0:
             globalphi_g = ifftn(fftn(rho_g) * 4.0 * pi / self.k2_Q).real
         else:
             globalphi_g = None
         self.gd.distribute(globalphi_g, phi_g)
     return 1
Пример #39
0
def dog_smooth__large_map(v, s1, s2=None):
    """
    convolve with a dog function, delete unused data when necessary
    in order to save memory for large maps
    """
    if s2 is None:
        s2 = s1 * 1.1  # the 1.1 is according to a DoG particle picking paper
    assert s1 < s2

    size = v.shape

    pad_width = int(N.round(s2 * 2))
    vp = N.pad(array=v, pad_width=pad_width, mode='reflect')

    v_fft = fftn(vp).astype(N.complex64)
    del v
    gc.collect()

    g_small = difference_of_gauss_function(size=N.array(
        [int(N.round(s2 * 4))] * 3),
                                           sigma1=s1,
                                           sigma2=s2)
    assert N.all(N.array(g_small.shape) <= N.array(
        vp.shape))  # make sure we can use CV.paste_to_whole_map()

    g = N.zeros(vp.shape)
    paste_to_whole_map(whole_map=g, vol=g_small, c=None)

    g_fft_conj = N.conj(fftn(ifftshift(g)).astype(
        N.complex64))  # use ifftshift(g) to move center of gaussian to origin
    del g
    gc.collect()

    prod_t = (v_fft * g_fft_conj).astype(N.complex64)
    del v_fft
    gc.collect()
    del g_fft_conj
    gc.collect()

    prod_t_ifft = ifftn(prod_t).astype(N.complex64)
    del prod_t
    gc.collect()

    v_conv = N.real(prod_t_ifft)
    del prod_t_ifft
    gc.collect()
    v_conv = v_conv.astype(N.float32)

    v_conv = v_conv[(pad_width + 1):(pad_width + size[0] + 1),
                    (pad_width + 1):(pad_width + size[1] + 1),
                    (pad_width + 1):(pad_width + size[2] + 1)]
    assert size == v_conv.shape

    return v_conv
Пример #40
0
Файл: ims.py Проект: wj2/spims
    def peerWindow(im, bounds):
        im2 = deepcopy(im)
        
        im2.arr = im2.arr[bounds[2]:bounds[3],
                          bounds[0]:bounds[1]]
        im2.warr = im2.warr[bounds[2]:bounds[3],
                            bounds[0]:bounds[1], :]
        im2.fft = fftn(im2.arr)
        im2.fft2 = fftn(im2.arr ** 2)
        im2.stdev = im2.arr.std()
        im2.mean = im2.arr.mean()

        return im2
Пример #41
0
    def propagate(self):
        r"""Given the wavefunction values :math:`\Psi(\Gamma)` at time :math:`t`, calculate
        new values :math:`\Psi^\prime(\Gamma)` at time :math:`t + \tau`. We perform exactly
        one single timestep of size :math:`\tau` within this function.
        """
        # How many components does Psi have
        N = self._psi.get_number_components()

        # Unpack the values from the current WaveFunction
        values = self._psi.get_values()

        # First step with the potential
        tmp = [zeros(value.shape, dtype=complexfloating) for value in values]
        for row in range(0, N):
            for col in range(0, N):
                tmp[row] = tmp[row] + self._VE[row * N + col] * values[col]

        # Go to Fourier space
        tmp = [fftn(component) for component in tmp]

        # First step with the kinetic operator
        tmp = [self._TE * component for component in tmp]

        # Go back to real space
        tmp = [ifftn(component) for component in tmp]

        # Central step with V-tilde
        tmp2 = [zeros(value.shape, dtype=complexfloating) for value in values]
        for row in range(0, N):
            for col in range(0, N):
                tmp2[row] = tmp2[row] + self._VEtilde[row * N + col] * tmp[col]

        # Go to Fourier space
        tmp = [fftn(component) for component in tmp2]

        # Second step with the kinetic operator
        tmp = [self._TE * component for component in tmp]

        # Go back to real space
        tmp = [ifftn(component) for component in tmp]

        # Second step with the potential
        values = [zeros(component.shape, dtype=complexfloating) for component in tmp]
        for row in range(0, N):
            for col in range(0, N):
                values[row] = values[row] + self._VE[row * N + col] * tmp[col]

        # Pack values back to WaveFunction object
        # TODO: Consider squeeze(.) of data before repacking
        self._psi.set_values(values)
Пример #42
0
def convolve(target, kernel, center=None):
    if center is None:
        center = np.array(kernel.shape)/2

    shape = [max(ts, ks) for ts, ks in zip(target.shape, kernel.shape)]
    f_kernel = fft.fftn(kernel, shape)
    f_input = fft.fftn(target, shape)
    f_result = f_kernel*f_input
    result = fft.ifftn(f_result)

    for i in range(len(center)):
        roll = -center[i]
        result = np.roll(result, roll, axis=i)

    return result
Пример #43
0
    def emb_fftn(self, input_x, output_dim, act_axes):
        '''
        embedded fftn: abstraction of fft for future gpu computing 
        '''
        output_x=numpy.zeros(output_dim, dtype=dtype)
        #print('output_dim',input_dim,output_dim,range(0,numpy.size(input_dim)))
#         output_x[[slice(0, input_x.shape[_ss]) for _ss in range(0,len(input_x.shape))]] = input_x
        output_x[crop_slice_ind(input_x.shape)] = input_x
#         print('GPU flag',self.gpu_flag)
#         print('pyfftw flag',self.pyfftw_flag)
#         if self.gpu_flag == 1:
#             self.thr.to_device(output_x.astype(dtype), dest=self.data_dev)
#             output_x=self.gpufftn(self.data_dev).get()
             
#         except:
#         elif self.gpu_flag ==0:
#         elif self.pyfftw_flag == 1:
# #             try:
#     #                 print('using pyfftw interface')
#     #                 print('threads=',self.threads)
#             output_x=pyfftw.interfaces.scipy_fftpack.fftn(output_x, output_dim, act_axes, 
#                                                               threads=self.threads,overwrite_x=True)
# #             except: 
#         else:
    #                 print('using OLD interface')                
#                 output_x=scipy.fftpack.fftn(output_x, output_dim, act_axes,overwrite_x=True)
        output_x=fftpack.fftn(output_x, output_dim, act_axes)


        return output_x
Пример #44
0
    def steepest_descent_images(self, image, dW_dp, forward=None):
        # compute gradient
        # grad:  dims x ch x h x w
        nabla = self.gradient(image, forward=forward)
        nabla = nabla.as_vector().reshape((image.n_dims, image.n_channels) +
                                          nabla.shape)

        # compute steepest descent images
        # gradient: dims x ch x h x w
        # dw_dp:    dims x    x h x w x params
        # sdi:             ch x h x w x params
        sdi = 0
        a = nabla[..., None] * dW_dp[:, None, ...]
        for d in a:
            sdi += d

        if self._kernel is None:
            # reshape steepest descent images
            # sdi:           (ch x h x w) x params
            # filtered_sdi:  (ch x h x w) x params
            sdi = sdi.reshape((-1, sdi.shape[-1]))
            filtered_sdi = sdi
        else:
            # if required, filter steepest descent images
            # fft_sdi:  ch x h x w x params
            filtered_sdi = ifftn(self._kernel[..., None] *
                                 fftn(sdi, axes=(-3, -2)),
                                 axes=(-3, -2))
            # reshape steepest descent images
            # sdi:           (ch x h x w) x params
            # filtered_sdi:  (ch x h x w) x params
            sdi = sdi.reshape((-1, sdi.shape[-1]))
            filtered_sdi = filtered_sdi.reshape(sdi.shape)

        return filtered_sdi, sdi
Пример #45
0
def perdecomp (image):
  # Compute boundary image
  h,w,d = image.shape
  v = zeros (image.shape)
  v[:,0,:] = v[:,0,:] + image[:,0,:] - image[:,w-1,:]
  v[:,w-1,:] = v[:,w-1,:] + image[:,w-1,:] - image[:,0,:]
  v[0,:,:] = v[0,:,:] + image[0,:,:] - image[h-1,:,:]
  v[h-1,:,:] = v[h-1,:,:] + image[h-1,:,:] - image[0,:,:]

  # Compute multiplier
  x = arange (0., 1., 1./w)
  y = arange (0., 1., 1./h)
  xx,yy = meshgrid (x,y)
  multi = 4 - 2.*cos(2*pi*xx) - 2.*cos(2*pi*yy)
  multi[0,0] = 1.

  # Compute DFT of boundary image
  sh = fftn (v, axes=(0, 1))

  # Multiply by inverse of multiplier
  sh = sh / multi.reshape((h,w,1))
  sh[0,0,:] = zeros ((d))

  # Then, compute s as the iDFT of sh
  smooth = real (ifftn (sh, axes=(0, 1)))
  periodic = image - smooth

  return harmonize(periodic),harmonize(smooth)
Пример #46
0
def interpolate(yF, yG, fevalF=None, fevalG=None,
                dim=1, xrat=2, interpolation_order=-1, **kwargs):
  """Interpolate yG to yF."""

  if interpolation_order == -1:

    zG = fft.fftn(yG)
    zF = np.zeros(fevalF.shape, zG.dtype)

    zF[fevalF.half] = zG[fevalG.full]

    yF[...] = np.real(2**dim*fft.ifftn(zF))

  elif interpolation_order == 2:

    if dim != 1:
      raise NotImplementedError

    yF[0::xrat] = yG
    yF[1::xrat] = (yG + np.roll(yG, -1)) / 2.0

  elif interpolation_order == 4:

    if dim != 1:
      raise NotImplementedError

    yF[0::xrat] = yG
    yF[1::xrat] = ( - np.roll(yG,1)
                    + 9.0*yG
                    + 9.0*np.roll(yG,-1)
                    - np.roll(yG,-2) ) / 16.0

  else:
    raise ValueError, 'interpolation order must be -1, 2 or 4'
Пример #47
0
 def fourier(self, data):
     """
         This function performs a fourier trasform on the last 140 samples
         taken with a Hanning window, and adds the normalised results to an
         array. The highest values are found and used to generate a marker to
         visualize the brain's dominant frequency. The FT is also partitioned
         to represent 6 groups of frequencies, 3 alpha and 3 beta, as defined
         by the waves tuple. These, along with array of fourier data are
         returned to be plotted
     """
     self.Fourier_Data[1:140, :] = self.Fourier_Data[0:139, :]
     x = abs(fft.fftn(data.Processed_Data*hanning(len(data.Processed_Data))))[4:44]
     x_max = max(x)
     x_min = min(x)
     x = (255*(x-x_min)/(x_max-x_min))
     pointer = zeros((160), dtype=int8)
     pointer[(argmax(x))*4:(argmax(x))*4+4]= 255
     y = vstack((x, x, x, x))
     y = ravel(y, 'F')
     self.Fourier_Data[5, :] = y
     self.Fourier_Data[0:4, :] = vstack((pointer, pointer, pointer, pointer))
     fingers = []
     waves = (6, 9, 12, 15, 20, 25, 30)
     for i in range(6):
         finger_sum = sum(x[waves[i]:waves[i+1]])/100
         # throw away NaN values that may occur due to adjusting the NIA
         if not math.isnan(finger_sum):
             fingers.append(finger_sum)
         else:
             fingers.append(0)
     return self.Fourier_Data.tostring(), fingers
Пример #48
0
 def cost_closure(x, k):
     if k is None:
         return lambda: x.ravel().T.dot(x.ravel())
     else:
         kx = ifftn(k[..., None] * fftn(x, axes=(-2, -1)),
                    axes=(-2, -1))
         return lambda: x.ravel().T.dot(kx.ravel())
Пример #49
0
    def kinetic_energy(self, kinetic, summed=False):
        r"""Calculate the kinetic energy :math:`E_{\text{kin}} := \langle\Psi|T|\Psi\rangle`
        of the different components :math:`\psi_i`.

        :param kinetic: The kinetic energy operator :math:`T(\omega)`.
        :type kinetic: A :py:class:`KineticOperator` instance.
        :param summed: Whether to sum up the kinetic energies :math:`E_i` of the individual
                       components :math:`\psi_i`. Default is ``False``.
        :return: A list with the kinetic energies of the individual components
                 or the overall kinetic energy of the wavefunction. (Depending on the optional arguments.)
        """
        # TODO: Consider using less declarative coding style.
        #       Issue: Compute fft of each component only once
        #              AND avoid storing fft of all components.

        # Fourier transform the components
        ftc = [fftn(component) for component in self._values]

        # Compute the prefactor
        T = self._grid.get_extensions()
        N = self._grid.get_number_nodes()
        prefactor = product(array(T) / (1.0 * array(N) ** 2))

        # TODO: Consider taking the result of this call as input for efficiency?
        KO = kinetic.evaluate_at()

        # Compute the braket in Fourier space
        ekin = [prefactor * sum(conjugate(item) * KO * item) for item in ftc]

        if summed is True:
            ekin = sum(ekin)

        return ekin
Пример #50
0
    def create_laplacian_kernel(self,cineObj):
#===============================================================================
# #        # Laplacian oeprator, convolution kernel in spatial domain
#        Note only the y-axis is used 
#         # related to constraint
#===============================================================================
        uker2 = numpy.zeros((cineObj.dim_x,)+self.st['Nd'][0:2],dtype=numpy.complex64)
        rows_kd = self.st['Nd'][0] # ky-axis
        #cols_kd = self.st['Kd'][1] # t-axis
#        uker[0,0] = 1.0
#         rate = 30.0
#         uker2[0,0,0] = -4.0 - 2.0/rate
#         uker2[0,0,1] =1.0
#         uker2[0,0,-1]=1.0
#         uker2[0,1,0] =1.0#/rate
#         uker2[0,-1,0]=1.0#/rate
#         uker2[1,0,0] =1.0/rate
#         uker2[-1,0,0]=1.0/rate  
        rate = 15.0
        uker2[0,0,0] = -2.0 - 4.0/rate
        uker2[0,0,1] =1.0
        uker2[0,0,-1]=1.0
        uker2[0,1,0] =1.0/rate
        uker2[0,-1,0]=1.0/rate
        uker2[1,0,0] =1.0/rate
        uker2[-1,0,0]=1.0/rate        
        uker2 = fftpack.fftn(uker2,axes=(0,1,2,)) # 256x256x16
        return uker2
Пример #51
0
def laplacian_filter(in_file, in_mask=None, out_file=None):
    import numpy as np
    import nibabel as nb
    import os.path as op
    from math import pi
    from numpy.fft import fftn, ifftn, fftshift, ifftshift

    if out_file is None:
        fname, fext = op.splitext(op.basename(in_file))
        if fext == '.gz':
            fname, _ = op.splitext(fname)
        out_file = op.abspath('./%s_smooth.nii.gz' % fname)

    im = nb.load(in_file)
    data = im.get_data()

    if in_mask is not None:
        mask = nb.load(in_mask).get_data()
        mask[mask > 0] = 1.0
        mask[mask <= 0] = 0.0
        data *= mask

    dataft = fftshift(fftn(data))
    x = np.linspace(0, 2 * pi, dataft.shape[0])[:, None, None]
    y = np.linspace(0, 2 * pi, dataft.shape[1])[None, :, None]
    z = np.linspace(0, 2 * pi, dataft.shape[2])[None, None, :]
    lapfilt = 2.0 * np.squeeze((np.cos(x) + np.cos(y) + np.cos(z))) - 5.0
    dataft *= fftshift(lapfilt)
    imfilt = np.real(ifftn(ifftshift(dataft)))

    nb.Nifti1Image(imfilt.astype(np.float32), im.get_affine(),
                   im.get_header()).to_filename(out_file)
    return out_file
Пример #52
0
    def icwt2d(self, da=0.25):
        '''
        Inverse bi-dimensional continuous wavelet transform as in Wang and
        Lu (2010), equation [5].

        Parameters
        ----------
        da : float, optional
            Spacing in the frequency axis.
        '''
        if self.Wf is None:
            raise TypeError("Run cwt2D before icwt2D")
        m0, l0, k0 = self.Wf.shape

        if m0 != self.scales.size:
            raise Warning('Scale parameter array shape does not match\
                           wavelet transform array shape.')
        # Calculates the zonal and meridional wave numters.
        L, K = 2 ** int(np.ceil(np.log2(l0))), 2 ** int(np.ceil(np.log2(k0)))
        # Calculates the zonal and meridional wave numbers.
        l, k = fftfreq(L, self.dy), fftfreq(K, self.dx)
        # Creates empty inverse wavelet transform array and fills it for every
        # discrete scale using the convolution theorem.
        self.iWf = np.zeros((m0, L, K), 'complex')
        for i, an in enumerate(self.scales):
            psi_ft_bar = an * self.wavelet.psi_ft(an * k, an * l)
            W_ft = fftn(self.Wf[i, :, :], s=(L, K))
            self.iWf[i, :, :] = ifftn(W_ft * psi_ft_bar, s=(L, K)) *\
                da / an ** 2.

        self.iWf = self.iWf[:, :l0, :k0].real.sum(axis=0) / self.wavelet.cpsi

        return self
Пример #53
0
def gen_wedge_psf(nx, ny, nf, dx, dy, df, z, out, threads=None):
    u = fftshift(fftfreq(nx, dx * np.pi / 180))
    v = fftshift(fftfreq(ny, dy * np.pi / 180))
    e = fftshift(fftfreq(nf, df))

    E = np.sqrt(Cosmo.Om0 * (1 + z) ** 3 +
                Cosmo.Ok0 * (1 + z) ** 2 + Cosmo.Ode0)
    D = Cosmo.comoving_transverse_distance(z).value
    H0 = Cosmo.H0.value * 1e3
    c = const.c.value
    print(E, D, H0)
    kx = u * 2 * np.pi / D
    ky = v * 2 * np.pi / D
    k_perp = np.sqrt(kx ** 2 + ky[np.newaxis, ...].T ** 2)
    k_par = e * 2 * np.pi * H0 * f21 * E / (c * (1 + z) ** 2)
    arr = np.ones((nf, nx, ny), dtype='complex128')
    for i in range(nf):
        mask = (k_perp > np.abs(k_par[i]) * c * (1 + z) / (H0 * E * D))
        arr[i][mask] = 0
    np.save('kx.npy', kx)
    np.save('ky.npy', ky)
    np.save('kpar.npy', k_par)
    np.save('wedge_window.npy', arr.real)
    fft_arr = fftshift(fftn(ifftshift(arr))).real
    hdu = fits.PrimaryHDU(data=fft_arr)
    hdr_dict = dict(cdelt1=dx, cdelt2=dy, cdelt3=df,
                    crpix1=nx/2, crpix2=ny/2, crpix3=nf/2,
                    crval1=0, crval2=0, crval3=0,
                    ctype1='RA---SIN', ctype2='DEC--SIN', ctype3='FREQ',
                    cunit1='deg', cunit2='deg', cunit3='Hz')
    for k, v in hdr_dict.items():
        hdu.header[k] = v
    hdu.writeto(out, clobber=True)
Пример #54
0
    def steepest_descent_update(self, sdi, IWxp, template):
        # compute error image
        # error_img:  height  x  width  x  n_channels
        error_img = IWxp.pixels - template.pixels

        # compute FFT error image
        # fft_error_img:  height  x  width  x  n_channels
        fft_axes = range(IWxp.n_dims)
        fft_error_img = fftshift(fftn(error_img, axes=fft_axes),
                                 axes=fft_axes)

        # reshape FFT error image
        # fft_error_img:  (height x width)  x  n_channels
        fft_error_img = np.reshape(fft_error_img, (-1, IWxp.n_channels))

        # compute filtered steepest descent images
        # _filter_bank:        (height x width)  x
        # fft_error_img:       (height x width)  x  n_channels
        # filtered_error_img:  (height x width)  x  n_channels
        filtered_error_img = (self._filter_bank[..., None] * fft_error_img)

        # reshape _error_img
        # _error_img:  (height x width x n_channels)
        self._error_img = filtered_error_img.flatten()

        # compute steepest descent update
        # sdi:         (height x width x n_channels)  x  n_parameters
        # _error_img:  (height x width x n_channels)
        # sdu:             n_parameters
        return sdi.T.dot(np.conjugate(self._error_img))
Пример #55
0
    def source_terms(self, mara, retphi=False):
        from numpy.fft import fftfreq, fftn, ifftn
        ng = mara.number_guard_zones()
        G = self.G
        L = 1.0
        Nx, Ny, Nz = mara.fluid.shape
        Nx -= 2*ng
        Ny -= 2*ng
        Nz -= 2*ng
        P = mara.fluid.primitive[ng:-ng,ng:-ng,ng:-ng]
        rho = P[...,0]
        vx = P[...,2]
        vy = P[...,3]
        vz = P[...,4]

        K = [fftfreq(Nx)[:,np.newaxis,np.newaxis] * (2*np.pi*Nx/L),
             fftfreq(Ny)[np.newaxis,:,np.newaxis] * (2*np.pi*Ny/L),
             fftfreq(Nz)[np.newaxis,np.newaxis,:] * (2*np.pi*Nz/L)]
        delsq = -(K[0]**2 + K[1]**2 + K[2]**2)
        delsq[0,0,0] = 1.0 # prevent division by 0

        rhohat = fftn(rho)
        phihat = (4*np.pi*G) * rhohat / delsq
        fx = -ifftn(1.j * K[0] * phihat).real
        fy = -ifftn(1.j * K[1] * phihat).real
        fz = -ifftn(1.j * K[2] * phihat).real

        S = np.zeros(mara.fluid.shape + (5,))
        S[ng:-ng,ng:-ng,ng:-ng,0] = 0.0
        S[ng:-ng,ng:-ng,ng:-ng,1] = rho * (fx*vx + fy*vy + fz*vz)
        S[ng:-ng,ng:-ng,ng:-ng,2] = rho * fx
        S[ng:-ng,ng:-ng,ng:-ng,3] = rho * fy
        S[ng:-ng,ng:-ng,ng:-ng,4] = rho * fz
        return (S, ifftn(phihat).real) if retphi else S
Пример #56
0
    def update_d(self,u,dd):
#        print('inside_update_d ushape',u.shape)
#        print('inside_update_d fre grad ushape',freq_gradient(u).shape)
        out_dd = ()
        for jj in range(0,len(dd)) :
            if jj < 2: # derivative y 
                #tmp_d =get_Diff(u,jj)
                out_dd = out_dd  + (CsTransform.pynufft.get_Diff(u,jj),)
                
            if jj == 2: # derivative y 
                #tmp_d =get_Diff(u,jj)
                out_dd = out_dd  + (CsTransform.pynufft.get_Diff(u,jj),)  
                              
            elif jj == 3: # rho
                tmpu = numpy.copy(u)
                tmpu = fftpack.fftn(tmpu,axes = (2,))
#                 tmpu[:,:,0,:] = tmpu[:,:,0,:]*0.0
                out_dd = out_dd + (tmpu,)
                
            elif jj == 4:
                average_u = numpy.sum(u,2)
                tmpu= numpy.copy(u)
#                 for jj in range(0,u.shape[2]):
#                     tmpu[:,:,jj,:]= tmpu[:,:,jj,:] - average_u
                out_dd = out_dd + (tmpu,)
#                 out_dd = out_dd + (CsTransform.pynufft.get_Diff(tmpu,),)
#            elif jj == 3:
#                out_dd = out_dd + (freq_gradient(u),)
                
        return out_dd