예제 #1
0
파일: tools.py 프로젝트: wdachub/lit
    def grad(self, scalar):
        self.scalar_input_test(scalar)

        scalar_hat = self.fft(scalar)
        return fft.irfftn(1.0j * self.K * (2 * np.pi / self.L) * scalar_hat,
                          axes=(1, 2),
                          threads=self.num_threads)
예제 #2
0
def uirdftn(inarray, ndim=None, *args, **kwargs):
    """N-dim real unitary discrete Fourier transform

    This transform consider the Hermitian property of the transform
    from complex to real real input.

    Parameters
    ----------
    inarray : ndarray
        The array to transform.

    ndim : int, optional
        The `ndim` last axis along wich to compute the transform. All
        axes by default.

    Returns
    -------
    outarray : array-like (the last ndim as (N - 1) * 2 lenght)
    """

    if not ndim:
        ndim = inarray.ndim

    return irfftn(inarray, axes=range(-ndim, 0), *args, **kwargs) * np.sqrt(
        np.prod(inarray.shape[-ndim:-1]) * (inarray.shape[-1] - 1) * 2)
예제 #3
0
파일: tools.py 프로젝트: wdachub/lit
 def grad_invlap(self, scalar):
     self.scalar_input_test(scalar)
     scalar_hat = self.fft(scalar)
     return fft.irfftn(-1.0j * self.KoverK2 * (2 * np.pi / self.L)**(-1.0) *
                       scalar_hat,
                       axes=(1, 2),
                       threads=self.num_threads)
예제 #4
0
def fft_gaussian_filter(img, sigma):
    """FFT gaussian convolution

    Parameters
    ----------
    img : ndarray
        Image to convolve with a gaussian kernel
    sigma : int or sequence
        The sigma(s) of the gaussian kernel in _real space_

    Returns
    -------
    filt_img : ndarray
        The filtered image
    """
    # This doesn't help agreement but it will make things faster
    # pull the shape
    s1 = np.array(img.shape)
    # s2 = np.array([int(s * 4) for s in _normalize_sequence(sigma, img.ndim)])
    shape = s1  # + s2 - 1
    # calculate a nice shape
    fshape = [sig.fftpack.helper.next_fast_len(int(d)) for d in shape]
    # pad out with reflection
    pad_img = fft_pad(img, fshape, "reflect")
    # calculate the padding
    padding = tuple(_calc_pad(o, n) for o, n in zip(img.shape, pad_img.shape))
    # so that we can calculate the cropping, maybe this should be integrated
    # into `fft_pad` ...
    fslice = tuple(
        slice(s, -e) if e != 0 else slice(s, None) for s, e in padding)
    # fourier transfrom and apply the filter
    kimg = rfftn(pad_img, fshape)
    filt_kimg = fourier_gaussian(kimg, sigma, pad_img.shape[-1])
    # inverse FFT and return.
    return irfftn(filt_kimg, fshape)[fslice]
예제 #5
0
파일: tools.py 프로젝트: wdachub/lit
 def div(self, vector):
     """ Take divergence of vector """
     self.vector_input_test(vector)
     vector_hat = self.fft(vector)
     return fft.irfftn(np.sum(
         1j * self.K * (2 * np.pi / self.L) * vector_hat, 0),
                       threads=self.num_threads)
예제 #6
0
파일: tools.py 프로젝트: wdachub/lit
    def h1norm(self, vector):
        """ L2 norm of a vector field """
        self.vector_input_test(vector)
        vector_hat = self.fft(vector)
        grad_vx = fft.irfftn(1.0j * self.K * (2 * np.pi / self.L) *
                             vector_hat[0],
                             axes=(1, 2),
                             threads=self.num_threads)
        grad_vy = fft.irfftn(1.0j * self.K * (2 * np.pi / self.L) *
                             vector_hat[1],
                             axes=(1, 2),
                             threads=self.num_threads)

        integrand = (grad_vx[0]**2.0 + grad_vx[1]**2.0 + grad_vy[0]**2.0 +
                     grad_vy[1]**2.0)

        return np.sum(np.ravel(integrand) * self.h**2)**0.5
예제 #7
0
    def fftconvolve(in1, in2, mode="same", threads=1):
        """Same as above but with pyfftw added in"""
        in1 = np.asarray(in1)
        in2 = np.asarray(in2)

        if in1.ndim == in2.ndim == 0:  # scalar inputs
            return in1 * in2
        elif not in1.ndim == in2.ndim:
            raise ValueError("in1 and in2 should have the same dimensionality")
        elif in1.size == 0 or in2.size == 0:  # empty arrays
            return np.array([])

        s1 = np.array(in1.shape)
        s2 = np.array(in2.shape)
        complex_result = (np.issubdtype(in1.dtype, complex)
                          or np.issubdtype(in2.dtype, complex))
        shape = s1 + s2 - 1

        # Check that input sizes are compatible with 'valid' mode
        if sig._inputs_swap_needed(mode, s1, s2):
            # Convolution is commutative; order doesn't have any effect on output
            in1, s1, in2, s2 = in2, s2, in1, s1

        # Speed up FFT by padding to optimal size for FFTPACK
        fshape = [sig.fftpack.helper.next_fast_len(int(d)) for d in shape]
        fslice = tuple([slice(0, int(sz)) for sz in shape])
        # Pre-1.9 NumPy FFT routines are not threadsafe.  For older NumPys, make
        # sure we only call rfftn/irfftn from one thread at a time.
        if not complex_result and (sig._rfft_mt_safe
                                   or sig._rfft_lock.acquire(False)):
            try:
                sp1 = rfftn(in1, fshape, threads=threads)
                sp2 = rfftn(in2, fshape, threads=threads)
                ret = (irfftn(sp1 * sp2, fshape,
                              threads=threads)[fslice].copy())
            finally:
                if not sig._rfft_mt_safe:
                    sig._rfft_lock.release()
        else:
            # If we're here, it's either because we need a complex result, or we
            # failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
            # is already in use by another thread).  In either case, use the
            # (threadsafe but slower) SciPy complex-FFT routines instead.
            sp1 = fftn(in1, fshape, threads=threads)
            sp2 = fftn(in2, fshape, threads=threads)
            ret = ifftn(sp1 * sp2, threads=threads)[fslice].copy()
            if not complex_result:
                ret = ret.real

        if mode == "full":
            return ret
        elif mode == "same":
            return sig._centered(ret, s1)
        elif mode == "valid":
            return sig._centered(ret, s1 - s2 + 1)
        else:
            raise ValueError("Acceptable mode flags are 'valid',"
                             " 'same', or 'full'.")
예제 #8
0
파일: tools.py 프로젝트: wdachub/lit
 def curl(self, vector):
     """ Perform curl of vector """
     self.vector_input_test(vector)
     vector_hat = self.fft(vector)
     w = fft.irfftn(1j * self.K[0] *
                    (2.0 * np.pi / self.L) * vector_hat[1] -
                    1j * self.K[1] * (2.0 * np.pi / self.L) * vector_hat[0],
                    threads=self.num_threads)
     return w
예제 #9
0
    def fftconvolve(in1, in2, mode="same", threads=1):
        """Same as above but with pyfftw added in"""
        in1 = np.asarray(in1)
        in2 = np.asarray(in2)

        if in1.ndim == in2.ndim == 0:  # scalar inputs
            return in1 * in2
        elif not in1.ndim == in2.ndim:
            raise ValueError("in1 and in2 should have the same dimensionality")
        elif in1.size == 0 or in2.size == 0:  # empty arrays
            return np.array([])

        s1 = np.array(in1.shape)
        s2 = np.array(in2.shape)
        complex_result = (np.issubdtype(in1.dtype, complex) or
                          np.issubdtype(in2.dtype, complex))
        shape = s1 + s2 - 1

        # Check that input sizes are compatible with 'valid' mode
        if sig._inputs_swap_needed(mode, s1, s2):
            # Convolution is commutative; order doesn't have any effect on output
            in1, s1, in2, s2 = in2, s2, in1, s1

        # Speed up FFT by padding to optimal size for FFTPACK
        fshape = [sig.fftpack.helper.next_fast_len(int(d)) for d in shape]
        fslice = tuple([slice(0, int(sz)) for sz in shape])
        # Pre-1.9 NumPy FFT routines are not threadsafe.  For older NumPys, make
        # sure we only call rfftn/irfftn from one thread at a time.
        if not complex_result and (sig._rfft_mt_safe or sig._rfft_lock.acquire(False)):
            try:
                sp1 = rfftn(in1, fshape, threads=threads)
                sp2 = rfftn(in2, fshape, threads=threads)
                ret = (irfftn(sp1 * sp2, fshape, threads=threads)[fslice].copy())
            finally:
                if not sig._rfft_mt_safe:
                    sig._rfft_lock.release()
        else:
            # If we're here, it's either because we need a complex result, or we
            # failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
            # is already in use by another thread).  In either case, use the
            # (threadsafe but slower) SciPy complex-FFT routines instead.
            sp1 = fftn(in1, fshape, threads=threads)
            sp2 = fftn(in2, fshape, threads=threads)
            ret = ifftn(sp1 * sp2, threads=threads)[fslice].copy()
            if not complex_result:
                ret = ret.real

        if mode == "full":
            return ret
        elif mode == "same":
            return sig._centered(ret, s1)
        elif mode == "valid":
            return sig._centered(ret, s1 - s2 + 1)
        else:
            raise ValueError("Acceptable mode flags are 'valid',"
                             " 'same', or 'full'.")
예제 #10
0
 def get_smoothed(self, filter):
     if isinstance(filter, Number):
         filter = filters.GaussianFilter(radius=filter)
     if filter in self._smoothed:
         return self._smoothed[filter]
     knorm = self.knorm
     fft_field = self.white_noise_fft * filter.W(knorm)
     field = fft.irfftn(fft_field)
     self._smoothed[filter] = field
     return field
예제 #11
0
def fftconvolvefast(in1, in2):
    s1 = np.array(in1.shape)
    s2 = np.array(in2.shape)

    shape = s1 + s2 - 1

    fshape = [fftpack.helper.next_fast_len(int(d)) for d in shape]
    fslice = tuple([slice(0, int(sz)) for sz in shape])

    sp1 = rfftn(in1, fshape)
    sp2 = rfftn(in2, fshape)
    ret = (irfftn(sp1 * sp2, fshape)[fslice].copy())
    return _centered(ret, s1 - s2 + 1)
예제 #12
0
    def generate_white_noise(self):
        """Generate a white noise with the relevant power spectrum.

        Returns
        -------
        white_noise : np.ndarray
        """
        # Compute the k grid
        d = self.Lbox / self.dimensions / (2 * np.pi)
        all_k = [fft.fftfreq(self.dimensions, d=d)] * (self.Ndim - 1) + [
            fft.rfftfreq(self.dimensions, d=d)
        ]

        self.kgrid = kgrid = np.array(np.meshgrid(*all_k, indexing="ij"))
        self.knorm = knorm = np.sqrt(np.sum(kgrid**2, axis=0))

        # Compute Pk
        Pk = np.zeros_like(knorm)
        mask = knorm > 0
        Pk[mask] = self.Pk(knorm[mask] * 2)

        # Compute white noise (in Fourier space)
        mu = np.random.standard_normal([self.dimensions] * self.Ndim)
        muk = fft.rfftn(mu)
        deltak = muk * np.sqrt(Pk)

        # Compute field in real space
        white_noise = fft.irfftn(deltak)

        # Normalize variance
        deltak_smoothed = deltak * self.filter.W(knorm)
        field = fft.irfftn(deltak_smoothed)
        std = field.std()

        self.white_noise_fft = deltak * self.sigma8 / std
        self.white_noise = white_noise * self.sigma8 / std

        return self.white_noise
예제 #13
0
    def smooth(self, R):
        """Smooth the data at scale R (in pixel unit)."""
        if isinstance(R, unyt_quantity):
            R = float(R.to('pixel'))
        if R in self.data_smooth:
            return self.data_smooth[R]

        logger.debug('Smoothing at scale %.3f', R)
        if R > 0:
            data_f = self.data_raw_f * np.exp(-self.k2 * R**2 / 2)
        else:
            data_f = self.data_raw_f.copy()
        self.data_smooth_f[R] = data_f
        self.data_smooth[R] = fft.irfftn(data_f, **self.FFT_args)
        return self.data_smooth[R]
예제 #14
0
파일: ft.py 프로젝트: wwwennie/pycdft
def ftgr(fg, grid, real=False):
    """Fourier transform function fg from G space to R space.

    Args:
        fg (np.ndarray): G space function. shape == (grid.n1, grid.n2, grid.n3).
        grid (FFTGrid): FFT grid on which fg is defined.
        real (bool): if True, fg contains only iG1 >= 0 and thus has the shape
            of (grid.n1 // 2 + 1, grid.n2, grid.n3), after FT R space function is real.

    Returns:
        R space function.
    """
    if real:
        assert fg.shape == (grid.n1h, grid.n2, grid.n3)
        fgzyx = fg.T
        return grid.N * irfftn(fgzyx, s=(grid.n1, grid.n2, grid.n3)).T
    else:
        assert fg.shape == (grid.n1, grid.n2, grid.n3)
        return grid.N * ifftn(fg)
예제 #15
0
파일: uft.py 프로젝트: raftale/dphutils
def uirfftn(inarray, dim=None, shape=None):
    """N-dimensional inverse real unitary Fourier transform.

    This transform considers the Hermitian property of the transform
    from complex to real input.

    Parameters
    ----------
    inarray : ndarray
        The array to transform.
    dim : int, optional
        The last axis along which to compute the transform. All
        axes by default.
    shape : tuple of int, optional
        The shape of the output. The shape of ``rfft`` is ambiguous in
        case of odd-valued input shape. In this case, this parameter
        should be provided. See ``irfftn``.

    Returns
    -------
    outarray : ndarray
        The unitary N-D inverse real Fourier transform of ``inarray``.

    Notes
    -----
    The ``uirfft`` function assumes that the output array is
    real-valued. Consequently, the input is assumed to have a Hermitian
    property and redundant values are implicit.

    Examples
    --------
    >>> input = np.ones((5, 5, 5))
    >>> output = uirfftn(urfftn(input), shape=input.shape)
    >>> np.allclose(input, output)
    True
    >>> output.shape
    (5, 5, 5)
    """
    if dim is None:
        dim = inarray.ndim
    outarray = irfftn(inarray, shape, axes=range(-dim, 0))
    return outarray * np.sqrt(np.prod(outarray.shape[-dim:]))
            def _ifft(arry):
                """Find inverse FFT of arry.

                Parameters
                ----------
                spectrum: array_like
                    The spectrum of a real-valued signal

                Returns
                -------
                array_like
                    The spectrum transformed back to physical space
                """
                slicer = (base_slices +
                          tuple(slice(None) for dim in arry.shape[ndims:]))
                big_result = irfftn(arry,
                                    axes=axes,
                                    s=computational_shape,
                                    threads=NUM_THREADS,
                                    planner_effort=PLANNER_EFFORT)
                return big_result[slicer]
예제 #17
0
def fft_gaussian_filter(img, sigma):
    """FFT gaussian convolution

    Parameters
    ----------
    img : ndarray
        Image to convolve with a gaussian kernel
    sigma : int or sequence
        The sigma(s) of the gaussian kernel in _real space_

    Returns
    -------
    filt_img : ndarray
        The filtered image
    """
    # This doesn't help agreement but it will make things faster
    # pull the shape
    s1 = np.array(img.shape)
    # if any of the sizes is 32 or smaller, revert to proper filter
    if any(s1 < 33):
        warnings.warn(("Input is small along a dimension,"
                       " will revert to `gaussian_filter`"))
        return gaussian_filter(img, sigma)
    # s2 = np.array([int(s * 4) for s in _normalize_sequence(sigma, img.ndim)])
    shape = s1  # + s2 - 1
    # calculate a nice shape
    fshape = [sig.fftpack.helper.next_fast_len(int(d)) for d in shape]
    # pad out with reflection
    pad_img = fft_pad(img, fshape, "reflect")
    # calculate the padding
    padding = tuple(_calc_pad(o, n) for o, n in zip(img.shape, pad_img.shape))
    # so that we can calculate the cropping, maybe this should be integrated
    # into `fft_pad` ...
    fslice = tuple(slice(s, -e) if e != 0 else slice(s, None)
                   for s, e in padding)
    # fourier transfrom and apply the filter
    kimg = rfftn(pad_img, fshape)
    filt_kimg = fourier_gaussian(kimg, sigma, pad_img.shape[-1])
    # inverse FFT and return.
    return irfftn(filt_kimg, fshape)[fslice]
예제 #18
0
def fftconvolve_fast(data, kernel, **kwargs):
    """A faster version of fft convolution

    In this case the kernel ifftshifted before FFT but the data is not.
    This can be done because the effect of fourier convolution is to 
    "wrap" around the data edges so whether we ifftshift before FFT
    and then fftshift after it makes no difference so we can skip the
    step entirely.
    """
    # TODO: add error checking like in the above and add functionality
    # for complex inputs. Also could add options for different types of
    # padding.
    dshape = np.array(data.shape)
    kshape = np.array(kernel.shape)
    # find maximum dimensions
    maxshape = np.max((dshape, kshape), 0)
    # calculate a nice shape
    fshape = [sig.fftpack.helper.next_fast_len(int(d)) for d in maxshape]
    # pad out with reflection
    pad_data = fft_pad(data, fshape, "reflect")
    # calculate padding
    padding = tuple(
        _calc_pad(o, n) for o, n in zip(data.shape, pad_data.shape))
    # so that we can calculate the cropping, maybe this should be integrated
    # into `fft_pad` ...
    fslice = tuple(
        slice(s, -e) if e != 0 else slice(s, None) for s, e in padding)
    if kernel.shape != pad_data.shape:
        # its been assumed that the background of the kernel has already been
        # removed and that the kernel has already been centered
        kernel = fft_pad(kernel, pad_data.shape, mode='constant')
    k_kernel = rfftn(ifftshift(kernel), pad_data.shape, **kwargs)
    k_data = rfftn(pad_data, pad_data.shape, **kwargs)
    convolve_data = irfftn(k_kernel * k_data, pad_data.shape, **kwargs)
    # return data with same shape as original data
    return convolve_data[fslice]
예제 #19
0
def cross_correlation_3d(pixels1, pixels2):
    '''Align the second image with the first using max cross-correlation

    returns the z,y,x offsets to add to image1's indexes to align it with
    image2

    Many of the ideas here are based on the paper, "Fast Normalized
    Cross-Correlation" by J.P. Lewis
    (http://www.idiom.com/~zilla/Papers/nvisionInterface/nip.html)
    which is frequently cited when addressing this problem.
    '''

    s = np.maximum(pixels1.shape, pixels2.shape)
    fshape = s*2
    #
    # Calculate the # of pixels at a particular point
    #
    i, j, k = np.mgrid[-s[0]:s[0], -s[1]:s[1], -s[2]:s[2] ]
    unit = np.abs(i*j*k).astype(float)
    unit[unit < 1] = 1 # keeps from dividing by zero in some places
    #
    # Normalize the pixel values around zero which does not affect the
    # correlation, keeps some of the sums of multiplications from
    # losing precision and precomputes t(x-u,y-v) - t_mean
    #
    pixels1 = np.nan_to_num(pixels1-nanmean(pixels1))
    pixels2 = np.nan_to_num(pixels2-nanmean(pixels2))
    #
    # Lewis uses an image, f and a template t. He derives a normalized
    # cross correlation, ncc(u,v) =
    # sum((f(x,y)-f_mean(u,v))*(t(x-u,y-v)-t_mean),x,y) /
    # sqrt(sum((f(x,y)-f_mean(u,v))**2,x,y) * (sum((t(x-u,y-v)-t_mean)**2,x,y)
    #
    # From here, he finds that the numerator term, f_mean(u,v)*(t...) is zero
    # leaving f(x,y)*(t(x-u,y-v)-t_mean) which is a convolution of f
    # by t-t_mean.
    #
    fp1 = rfftn(pixels1.astype('float32'), fshape, axes=(0, 1, 2))
    fp2 = rfftn(pixels2.astype('float32'), fshape, axes=(0, 1, 2))
    corr12 = irfftn(fp1 * fp2.conj(), axes=(0, 1, 2)).real
    #
    # Use the trick of Lewis here - compute the cumulative sums
    # in a fashion that accounts for the parts that are off the
    # edge of the template.
    #
    # We do this in quadrants:
    # q0 q1
    # q2 q3
    # For the first,
    # q0 is the sum over pixels1[i:,j:] - sum i,j backwards
    # q1 is the sum over pixels1[i:,:j] - sum i backwards, j forwards
    # q2 is the sum over pixels1[:i,j:] - sum i forwards, j backwards
    # q3 is the sum over pixels1[:i,:j] - sum i,j forwards
    #
    # The second is done as above but reflected lr and ud
    #
    def get_cumsums(im, fshape):
        im_si = im.shape[0]
        im_sj = im.shape[1]
        im_sk = im.shape[2]
        im_sum = np.zeros(fshape)
        im_sum[:im_si, :im_sj, :im_sk] = cumsum_quadrant(im, False, False, False)
        im_sum[:im_si, :im_sj, -im_sk:] = cumsum_quadrant(im, False, False, True)
        im_sum[:im_si, -im_sj:, :im_sk] = cumsum_quadrant(im, False, True, True)
        im_sum[:im_si, -im_sj:, -im_sk:] = cumsum_quadrant(im, False, True, False)
        im_sum[-im_si:, :im_sj, :im_sk] = cumsum_quadrant(im, True, False, True)
        im_sum[-im_si:, :im_sj, -im_sk:] = cumsum_quadrant(im, True, False, False)
        im_sum[-im_si:, -im_sj:, :im_sk] = cumsum_quadrant(im, True, True, True)
        im_sum[-im_si:, -im_sj:, -im_sk:] = cumsum_quadrant(im, True, True, False)
        #
        # Divide the sum over the # of elements summed-over
        #
        return old_div(im_sum, unit)

    p1_mean = get_cumsums(pixels1, fshape)
    p2_mean = get_cumsums(pixels2, fshape)
    #
    # Once we have the means for u,v, we can caluclate the
    # variance-like parts of the equation. We have to multiply
    # the mean^2 by the # of elements being summed-over
    # to account for the mean being summed that many times.
    #
    p1sd = np.sum(pixels1**2) - p1_mean**2 * np.product(s)
    p2sd = np.sum(pixels2**2) - p2_mean**2 * np.product(s)
    #
    # There's always chance of roundoff error for a zero value
    # resulting in a negative sd, so limit the sds here
    #
    sd = np.sqrt(np.maximum(p1sd * p2sd, 0))
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        corrnorm = old_div(corr12, sd)
    #
    # There's not much information for points where the standard
    # deviation is less than 1/100 of the maximum. We exclude these
    # from consideration.
    #
    corrnorm[(unit < old_div(np.product(s), 2)) &
             (sd < old_div(np.mean(sd), 100))] = 0
    # Also exclude possibilites with few observed pixels.
    corrnorm[unit < old_div(np.product(s), 4)] = 0

    return corrnorm
예제 #20
0
    def compute_derivatives(self, R):
        """Compute the 1st and 2nd derivatives of the (Fourier) input.

        Argument
        --------
        R: float
            The smoothing scale.

        Return
        ------
        indices: (ndim, ndim, M)
            The M indices of the hessian. M = ndim*(ndim+1)/2.
        """
        if R not in self.data_smooth_f:
            self.smooth(R)

        data_f = self.data_smooth_f[R]
        ndim = self.ndim
        kgrid = self.kgrid
        ihess = 0
        indices = np.zeros((ndim, ndim), dtype=int)

        logger.debug('Computing hessian and gradient in Fourier space.')
        # Compute hessian and gradient in Fourier space
        for idim in range(ndim):
            k1 = kgrid[idim]
            self.grad_f[idim, ...] = ne.evaluate('data_f * 1j * k1')
            for idim2 in range(idim, ndim):
                k2 = kgrid[idim2]
                grad_f = self.grad_f[idim, ...]
                self.hess_f[ihess, ...] = ne.evaluate('grad_f * 1j * k2')

                indices[idim, idim2] = indices[idim2, idim] = ihess
                ihess += 1

        logger.debug('Inverse FFT of gradient')
        # Get them back in real space
        self.grad[...] = fft.irfftn(self.grad_f,
                                    axes=range(1, ndim + 1),
                                    **self.FFT_args)
        logger.debug('Inverse FFT of hessian')
        self.hess[...] = fft.irfftn(self.hess_f,
                                    axes=range(1, ndim + 1),
                                    **self.FFT_args)
        logger.debug('Curvature')
        if ndim == 3:
            a = self.hess[indices[0, 0]]
            b = self.hess[indices[1, 1]]
            c = self.hess[indices[2, 2]]
            d = self.hess[indices[0, 1]]
            e = self.hess[indices[0, 2]]
            f = self.hess[indices[1, 2]]
            self.curvature[...] = ne.evaluate(
                'a*b*c - c*d**2 - b*e**2 + 2*d*e*f - a*f**2',
                local_dict=dict(a=a, b=b, c=c, d=d, e=e, f=f))
        elif ndim == 2:
            a = self.hess[indices[0, 0]]
            b = self.hess[indices[1, 1]]
            c = self.hess[indices[0, 1]]
            self.curvature[...] = ne.evaluate('a*b - c**2',
                                              local_dict=dict(a=a, b=b, c=c))
        else:
            self.curvature[...] = np.linalg.det(self.hess[indices,
                                                          ...].T.copy()).T

        return indices
예제 #21
0
 def ifft(*args, **kwargs):
     return fftw.irfftn(*args, **kwargs, threads=2)
예제 #22
0
파일: align.py 프로젝트: asaich/sima
def cross_correlation_3d(pixels1, pixels2):
    '''Align the second image with the first using max cross-correlation

    returns the z,y,x offsets to add to image1's indexes to align it with
    image2

    Many of the ideas here are based on the paper, "Fast Normalized
    Cross-Correlation" by J.P. Lewis
    (http://www.idiom.com/~zilla/Papers/nvisionInterface/nip.html)
    which is frequently cited when addressing this problem.
    '''

    s = np.maximum(pixels1.shape, pixels2.shape)
    fshape = s*2
    #
    # Calculate the # of pixels at a particular point
    #
    i,j,k = np.mgrid[-s[0]:s[0], -s[1]:s[1], -s[2]:s[2] ]
    unit = np.abs(i*j*k).astype(float)
    unit[unit<1]=1 # keeps from dividing by zero in some places
    #
    # Normalize the pixel values around zero which does not affect the
    # correlation, keeps some of the sums of multiplications from
    # losing precision and precomputes t(x-u,y-v) - t_mean
    #
    pixels1 = np.nan_to_num(pixels1-nanmean(pixels1))
    pixels2 = np.nan_to_num(pixels2-nanmean(pixels2))
    #
    # Lewis uses an image, f and a template t. He derives a normalized
    # cross correlation, ncc(u,v) =
    # sum((f(x,y)-f_mean(u,v))*(t(x-u,y-v)-t_mean),x,y) /
    # sqrt(sum((f(x,y)-f_mean(u,v))**2,x,y) * (sum((t(x-u,y-v)-t_mean)**2,x,y)
    #
    # From here, he finds that the numerator term, f_mean(u,v)*(t...) is zero
    # leaving f(x,y)*(t(x-u,y-v)-t_mean) which is a convolution of f
    # by t-t_mean.
    #
    fp1 = rfftn(pixels1.astype('float32'), fshape, axes=(0, 1, 2))
    fp2 = rfftn(pixels2.astype('float32'), fshape, axes=(0, 1, 2))
    corr12 = irfftn(fp1 * fp2.conj(), axes=(0, 1, 2)).real
    #
    # Use the trick of Lewis here - compute the cumulative sums
    # in a fashion that accounts for the parts that are off the
    # edge of the template.
    #
    # We do this in quadrants:
    # q0 q1
    # q2 q3
    # For the first,
    # q0 is the sum over pixels1[i:,j:] - sum i,j backwards
    # q1 is the sum over pixels1[i:,:j] - sum i backwards, j forwards
    # q2 is the sum over pixels1[:i,j:] - sum i forwards, j backwards
    # q3 is the sum over pixels1[:i,:j] - sum i,j forwards
    #
    # The second is done as above but reflected lr and ud
    #
    def get_cumsums(im, fshape):
        im_si = im.shape[0]
        im_sj = im.shape[1]
        im_sk = im.shape[2]
        im_sum = np.zeros(fshape)
        im_sum[:im_si,:im_sj,:im_sk] = cumsum_quadrant(im, False, False, False)
        im_sum[:im_si,:im_sj,-im_sk:] = cumsum_quadrant(im, False, False, True)
        im_sum[:im_si,-im_sj:,:im_sk] = cumsum_quadrant(im, False, True, True)
        im_sum[:im_si,-im_sj:,-im_sk:] = cumsum_quadrant(im, False, True, False)
        im_sum[-im_si:,:im_sj,:im_sk] = cumsum_quadrant(im, True, False, True)
        im_sum[-im_si:,:im_sj,-im_sk:] = cumsum_quadrant(im, True, False, False)
        im_sum[-im_si:,-im_sj:,:im_sk] = cumsum_quadrant(im, True, True, True)
        im_sum[-im_si:,-im_sj:,-im_sk:] = cumsum_quadrant(im, True, True, False)
        #
        # Divide the sum over the # of elements summed-over
        #
        return im_sum / unit

    p1_mean = get_cumsums(pixels1, fshape)
    p2_mean = get_cumsums(pixels2, fshape)
    #
    # Once we have the means for u,v, we can caluclate the
    # variance-like parts of the equation. We have to multiply
    # the mean^2 by the # of elements being summed-over
    # to account for the mean being summed that many times.
    #
    p1sd = np.sum(pixels1**2) - p1_mean**2 * np.product(s)
    p2sd = np.sum(pixels2**2) - p2_mean**2 * np.product(s)
    #
    # There's always chance of roundoff error for a zero value
    # resulting in a negative sd, so limit the sds here
    #
    sd = np.sqrt(np.maximum(p1sd * p2sd, 0))
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        corrnorm = corr12 / sd
    #
    # There's not much information for points where the standard
    # deviation is less than 1/100 of the maximum. We exclude these
    # from consideration.
    #
    corrnorm[(unit < np.product(s) / 2) &
             (sd < np.mean(sd) / 100)] = 0
    # Also exclude possibilites with few observed pixels.
    corrnorm[unit < np.product(s) / 4] = 0

    return corrnorm
예제 #23
0
파일: tools.py 프로젝트: wdachub/lit
 def ifft(self, scalar_hat):
     """ Performs inverse fft of scalar field """
     self.scalar_hat_input_test(scalar_hat)
     return fft.irfftn(scalar_hat, threads=self.num_threads)
예제 #24
0
파일: tools.py 프로젝트: wdachub/lit
 def ifft(self, vector_hat):
     """ Performs inverse fft of vector hat field """
     self.vector_hat_input_test(vector_hat)
     return fft.irfftn(vector_hat, axes=(1, 2), threads=self.num_threads)