Esempio n. 1
0
def monogenic_filter_one_scale_gray(img, ss=1,  minWaveLength=3, mult=2.1, sigmaOnf=0.55):
    # float point is very important in order to compute the fft2
    if img.dtype not in ['float32', 'float64']:
        img = np.float64(img)
        imgdtype = 'float64'
    else:
        imgdtype = img.dtype
    # for 3 channel we  make one channel computing the mean value
    if img.ndim == 3:  # hay que cambiar esto
        img = img.mean(2)
    rows, cols = img.shape
    #compute the monnogenic scale in frequency domain
    logGabor, logGabor_H1, logGabor_H2 = monogenic_scale(cols,rows, ss,  minWaveLength, mult, sigmaOnf)
    # FFT2 in the corner
    IM = fft2(img)     # Discrete Fourier Transform of image
    IMF = IM * logGabor   # Frequency bandpassed image
    f = np.real(ifft2(IMF))  # Spatially bandpassed image
    # Bandpassed monogenic filtering, real part of h contains
    IMH1=IM*logGabor_H1
    IMH2=IM*logGabor_H2
    h1= np.real(ifft2(IMH1))
    h2= np.real(ifft2(IMH2))
    # Amplitude of this scale component
    An = np.sqrt(f * f + h1 * h1 + h2 * h2)
    #Orientation computation
    ori = np.arctan(-h2 / h1)
    # Wrap angles between -pi and pi and convert radians to degrees
    ori_d = np.fix((ori % np.pi) / np.pi * 180.)
    # Feature type (a phase angle between -pi/2 and pi/2)
    ft = np.arctan2(f, np.sqrt(h1 * h1 + h2 * h2))
    #proyectionin ij plane
    fr= np.sqrt(h1 * h1 + h2 * h2)
    return An,ori_d,ori,ft,fr,f
Esempio n. 2
0
 def _hilbert(self, data):
     pci1 = fft.fft2(fft.fftshift(np.float32(data)))
     pci2 = fft.ifftshift(pci1)*self.filter1
     fpci0 = fft.ifftshift(fft.ifft2(fft.fftshift(pci2)))
     fpci = np.imag(fpci0)
     result = fpci
     return result
Esempio n. 3
0
 def psf_correction(self, mat, win, pad_width):
     (nrow, ncol) = mat.shape
     mat_pad = np.pad(mat, pad_width, mode="reflect")
     win_pad = np.pad(win, pad_width, mode="constant", constant_values=1.0)
     mat_dec = fft.ifft2(fft.fft2(mat_pad) / fft.ifftshift(win_pad))
     return np.abs(mat_dec)[pad_width:pad_width + nrow,
                            pad_width:pad_width + ncol]
Esempio n. 4
0
 def _hilbert(self, data):
     pci1 = fft.fft2(fft.fftshift(np.float32(data)))
     pci2 = fft.ifftshift(pci1) * self.filter1
     fpci0 = fft.ifftshift(fft.ifft2(fft.fftshift(pci2)))
     fpci = np.imag(fpci0)
     result = fpci
     return result
Esempio n. 5
0
def remove_stripe_based_fft(sinogram, u, n, v, pad=150):
    """
    Remove stripes using the method in Ref. [1].
    Angular direction is along the axis 0.

    Parameters
    ----------
    sinogram : array_like
        2D array.
    u,n : int
        To define the shape of 1D Butterworth low-pass filter.
    v : int
        Number of rows (* 2) to be applied the filter.
    pad : int
        Padding for FFT

    Returns
    -------
    ndarray
        2D array. Stripe-removed sinogram.

    References
    ----------
    .. [1] https://doi.org/10.1063/1.1149043
    """
    if pad > 0:
        sinogram = np.pad(sinogram, ((pad, pad), (0, 0)), mode='mean')
        sinogram = np.pad(sinogram, ((0, 0), (pad, pad)), mode='edge')
    (nrow, ncol) = sinogram.shape
    window2d = create_2d_window(ncol, nrow, u, v, n)
    sinogram = fft.ifft2(
        np.fft.ifftshift(np.fft.fftshift(fft.fft2(sinogram)) * window2d))
    return np.real(sinogram[pad:nrow - pad, pad:ncol - pad])
Esempio n. 6
0
def apply_gaussian_filter(mat, sigmax, sigmay, pad):
    """
    Filtering an image using a 2D Gaussian window.

    Parameters
    ----------
    mat : array_like
        2D array.
    sigmax : int
        Sigma in the x-direction.
    sigmay : int
        Sigma in the y-direction.
    pad : int
        Padding for the Fourier transform.

    Returns
    -------
    ndarray
        2D array. Filtered image.
    """
    mat_pad = np.pad(mat, ((0, 0), (pad, pad)), mode='edge')
    mat_pad = np.pad(mat_pad, ((pad, pad), (0, 0)), mode='mean')
    (nrow, ncol) = mat_pad.shape
    window = make_2d_gaussian_window(nrow, ncol, sigmax, sigmay)
    listx = np.arange(0, ncol)
    listy = np.arange(0, nrow)
    x, y = np.meshgrid(listx, listy)
    mat_sign = np.power(-1.0, x + y)
    mat_filt = np.real(
        fft.ifft2(fft.fft2(mat_pad * mat_sign) * window) * mat_sign)
    return mat_filt[pad:nrow - pad, pad:ncol - pad]
Esempio n. 7
0
 def _paganin(self, data):
     pci1 = fft.fft2(np.float32(data))
     pci2 = fft.fftshift(pci1) / self.filtercomplex
     fpci = np.abs(fft.ifft2(pci2))
     result = -0.5 * self.parameters['Ratio'] * np.log(
         fpci + self.parameters['increment'])
     return result
Esempio n. 8
0
 def _paganin(self, data):
     pci1 = fft.fft2(np.float32(data))
     pci2 = fft.fftshift(pci1) / self.filtercomplex
     fpci = np.abs(fft.ifft2(pci2))
     result = -0.5 * self.parameters['Ratio'] * np.log(
         fpci + self.parameters['increment'])
     return result
Esempio n. 9
0
    def get_digital_phase_mask(self, psi):
        """
        Calculate the digital phase mask (i.e. reference wave), as in Colomb et
        al. 2006, Eqn. 26 [1]_.
        Fit for a second order polynomial, numerical parametric lens with least
        squares to remove tilt, spherical aberration.
        .. [1] http://www.ncbi.nlm.nih.gov/pubmed/16512526
        Parameters
        ----------
        psi : `~numpy.ndarray`
            The product of the Fourier transform of the hologram and the Fourier
            transform of impulse response function
        Returns
        -------
        phase_mask : `~numpy.ndarray`
            Digital phase mask, used for correcting phase aberrations.
        """
        inverse_psi = fftshift(ifft2(psi, axes=(0, 1)), axes=(0, 1))

        unwrapped_phase_image = np.atleast_3d(
            unwrap_phase(inverse_psi)) / 2 / self.wavenumber
        smooth_phase_image = gaussian_filter(
            unwrapped_phase_image, [50, 50, 0])  # do not filter along axis 2

        high = np.percentile(unwrapped_phase_image, 99)
        low = np.percentile(unwrapped_phase_image, 1)

        smooth_phase_image[high < unwrapped_phase_image] = high
        smooth_phase_image[low > unwrapped_phase_image] = low

        # Fit the smoothed phase image with a 2nd order polynomial surface with
        # mixed terms using least-squares.
        # This is iterated over all wavelength channels separately
        # TODO: can this be done on the smooth_phase_image along axis 2 instead
        # of direct iteration?
        smooth_phase_image = smooth_phase_image
        channels = np.split(smooth_phase_image,
                            smooth_phase_image.shape[2],
                            axis=2)
        fits = list()

        # Need to flip mgrid indices for this least squares solution
        y, x = self.mgrid - self.n / 2
        x, y = np.squeeze(x), np.squeeze(y)

        for channel in channels:
            v = np.array([
                np.ones(len(x[0, :])), x[0, :], y[:, 0], x[0, :]**2,
                x[0, :] * y[:, 0], y[:, 0]**2
            ])
            coefficients = np.linalg.lstsq(v.T, np.squeeze(channel))[0]
            fits.append(np.dot(v.T, coefficients))

        field_curvature_mask = np.stack(fits, axis=2)
        digital_phase_mask = np.exp(-1j * self.wavenumber *
                                    field_curvature_mask)

        return digital_phase_mask
Esempio n. 10
0
 def mapFromFFT(self,kFilter=None,kFilterFromList=None,showFilter=False,setMeanToZero=False,returnFFT=False):
     """
     @brief Performs inverse fft (map from FFT) with an optional filter.
     @param kFilter Optional; If applied, resulting map = IFFT(fft*kFilter) 
     @return (optinally filtered) 2D real array
     """
     kMap = self.kMap.copy()
     kFilter0 = numpy.real(kMap.copy())*0.+ 1.
     if kFilter != None:
         
         kFilter0 *= kFilter
         
     if kFilterFromList != None:
         kFilter = kMap.copy()*0.
         l = kFilterFromList[0]
         Fl = kFilterFromList[1] 
         FlSpline = splrep(l,Fl,k=3)
         ll = numpy.ravel(self.modLMap)
         
         kk = (splev(ll,FlSpline))
         
         kFilter = numpy.reshape(kk,[self.Ny,self.Nx])
         kFilter0 *= kFilter
     if setMeanToZero:
         id = numpy.where(self.modLMap == 0.)
         kFilter0[id] = 0.
     #showFilter =  True
     if showFilter:
         pylab.semilogy(l,Fl,'r',ll,kk,'b.')
         #utils.saveAndShow()
         #sys.exit()
         pylab.matshow(fftshift(kFilter0),origin="down",extent=[numpy.min(self.lx),\
                                                      numpy.max(self.lx),\
                                                      numpy.min(self.ly),\
                                                      numpy.max(self.ly)])
         pylab.show()
     
     kMap[:,:] *= kFilter0[:,:]
     if returnFFT:
         ftMap = self.copy()
         ftMap.kMap = kMap.copy()
         return numpy.real(ifft2(kMap)),ftMap
     else:
         return numpy.real(ifft2(kMap))
Esempio n. 11
0
    def process_frames(self, data):
        sino = data[0]
        sino2 = np.fliplr(sino[1:])
        (Nrow, Ncol) = sino.shape
        mask = self._create_mask(
            2*Nrow-1, Ncol, 0.5*self.parameters['ratio']*Ncol)

        FT1 = fft.fftshift(fft.fft2(np.vstack((sino, sino2))))
        sino = fft.ifft2(fft.ifftshift(FT1 - FT1*mask))
        return sino[0:Nrow].real
Esempio n. 12
0
def sampling_op_adjoint(image, mask_as_image):
    """ Adjoint of sampling operator
    
    :param image: Assumed to be an array with shape [N,N]
    :param mask_as_image: 
    :param mask_as_image: Mask of shape [N,N] where middle of the image 
                          corresponds to zero frequency. Values should be 0,1 or
                          True/False.
    :return: Array of shape [N, N] with the adjoint data.
    """
    N = max(image.shape)
    image_domain_data = fftw.ifft2(fftw.ifftshift(image)) * N
    return image_domain_data
 def process_frames(self, data):
     sinogram = data[0]        
     (height, _) = sinogram.shape
     if height%2==0:
         height = height - 1
     sinofit = np.abs(savgol_filter(
         sinogram, height, self.order, axis=0, mode = 'mirror'))
     sinofit2 = np.pad(
         sinofit,((0, 0), (self.pad, self.pad)), mode = 'edge')
     sinofit2 = np.pad(
         sinofit2,((self.pad, self.pad), (0, 0)), mode = 'mean')        
     sinofitsmooth = np.real(fft.ifft2(fft.fft2(
         sinofit2*self.matsign)*self.window2d)*self.matsign)
     sinofitsmooth = sinofitsmooth[self.pad:self.height1-self.pad,
                                   self.pad:self.width1-self.pad]
     num1 = np.mean(sinofit)
     num2 = np.mean(sinofitsmooth)
     sinofitsmooth = num1*sinofitsmooth/num2
     return sinogram/sinofit*sinofitsmooth
Esempio n. 14
0
 def process_frames(self, data):
     sinogram = data[0]
     (height, _) = sinogram.shape
     if height % 2 == 0:
         height = height - 1
     sinofit = np.abs(
         savgol_filter(sinogram, height, self.order, axis=0, mode='mirror'))
     sinofit2 = np.pad(sinofit, ((0, 0), (self.pad, self.pad)), mode='edge')
     sinofit2 = np.pad(sinofit2, ((self.pad, self.pad), (0, 0)),
                       mode='mean')
     sinofitsmooth = np.real(
         fft.ifft2(fft.fft2(sinofit2 * self.matsign) * self.window2d) *
         self.matsign)
     sinofitsmooth = sinofitsmooth[self.pad:self.height1 - self.pad,
                                   self.pad:self.width1 - self.pad]
     num1 = np.mean(sinofit)
     num2 = np.mean(sinofitsmooth)
     sinofitsmooth = num1 * sinofitsmooth / num2
     return sinogram / sinofit * sinofitsmooth
Esempio n. 15
0
 def apply_filter(self, mat, window, pattern, pad_width):
     (nrow, ncol) = mat.shape
     if pattern == "PROJECTION":
         top_drop = 10  # To remove the time stamp at some data
         mat_pad = np.pad(mat[top_drop:],
                          ((pad_width + top_drop, pad_width),
                           (pad_width, pad_width)),
                          mode="edge")
         win_pad = np.pad(window, pad_width, mode="edge")
         mat_dec = fft.ifft2(
             fft.fft2(-np.log(mat_pad)) / fft.ifftshift(win_pad))
         mat_dec = np.abs(mat_dec[pad_width:pad_width + nrow,
                                  pad_width:pad_width + ncol])
     else:
         mat_pad = np.pad(-np.log(mat), ((0, 0), (pad_width, pad_width)),
                          mode='edge')
         win_pad = np.pad(window, ((0, 0), (pad_width, pad_width)),
                          mode="edge")
         mat_fft = np.fft.fftshift(fft.fft(mat_pad), axes=1) / win_pad
         mat_dec = fft.ifft(np.fft.ifftshift(mat_fft, axes=1))
         mat_dec = np.abs(mat_dec[:, pad_width:pad_width + ncol])
     return np.float32(np.exp(-mat_dec))
Esempio n. 16
0
def _2d_filter(mat, sigmax, sigmay, pad):
    """
    Filtering an image using 2D Gaussian window.
    ---------
    Parameters: - mat: 2D array.
                - sigmax, sigmay: sigmas of the window.
                - pad: padding for FFT
    ---------
    Return:     - filtered image.
    """
    matpad = np.pad(mat, ((0, 0), (pad, pad)), mode='edge')
    matpad = np.pad(matpad, ((pad, pad), (0, 0)), mode='mean')
    (nrow, ncol) = matpad.shape
    win2d = _2d_window_ellipse(nrow, ncol, sigmax, sigmay)
    listx = np.arange(0, ncol)
    listy = np.arange(0, nrow)
    x, y = np.meshgrid(listx, listy)
    matsign = np.power(-1.0, x + y)
    # matfilter = np.real(ifft2(fft2(matpad*matsign)*win2d)*matsign)
    matfilter = np.real(
        fft_vo.ifft2(fft_vo.fft2(matpad * matsign) * win2d) * matsign)
    return matfilter[pad:nrow - pad, pad:ncol - pad]
Esempio n. 17
0
def adjoint_of_samples(samp_batch, k_mask_idx1, k_mask_idx2, N=128):
    if len(samp_batch.shape) != 2:
        print('Warning: adjoint_of_samples -> samp_batch.shape is wrong')
    batch_size = samp_batch.shape[0]
    nbr_samples = samp_batch.shape[1]
    samp_batch = ((2 * 4096) / 0.0075) * samp_batch

    adjoint_batch = np.zeros([batch_size, N, N], dtype=np.complex64)

    for i in range(batch_size):
        samples_concat = samp_batch[i]
        samples_real = samples_concat[:int(nbr_samples / 2)]
        samples_imag = samples_concat[int(nbr_samples / 2):]

        samples = samples_real - 1j * samples_imag
        if len(samples.shape) == 1:
            samples = np.expand_dims(samples, axis=1)
        fft_im = np.zeros([N, N], dtype=samples.dtype)
        fft_im[k_mask_idx1, k_mask_idx2] = samples

        adjoint = fftw.ifft2(fft_im) / 4096
        adjoint_batch[i, :, :] = adjoint

    return adjoint_batch
Esempio n. 18
0
    def _reconstruct(self, propagation_distance, fourier_mask=None):
        """
        Reconstruct the wave at a single ``propagation_distance`` for a single ``wavelength``.

        Parameters
        ----------
        propagation_distance : float
            Propagation distance [m]
        spectral_peak : integer pair [x,y]
            Centroid of spectral peak for wavelength in power spectrum of hologram FT
        fourier_mask : array_like or None, optional
            Fourier-domain mask. If None (default), a mask is determined from the position of the
            main spectral peak.

        Returns
        -------
        reconstructed_wave : `~numpy.ndarray` ndim 3
            The reconstructed wave as an array of dimensions (X, Y, wavelengths)
        """
        x_peak, y_peak = self.spectral_peak

        # Calculate mask radius. TODO: Update 250 to an automated guess based on input values.
        if self.rebin_factor != 1:
            mask_radius = 150. / self.rebin_factor
        elif self.crop_fraction is not None and self.crop_fraction != 0:
            mask_radius = 150. * self.crop_fraction
        else:
            mask_radius = 150.

        # Either use a Fourier-domain mask based on coords of spectral peak,
        # or a user-specified mask
        if fourier_mask is None:
            mask = self.real_image_mask(x_peak, y_peak, mask_radius)
        else:
            mask = np.asarray(fourier_mask, dtype=np.bool)
        mask = np.atleast_3d(mask)

        # Calculate Fourier transform of impulse response function
        G = self.fourier_trans_of_impulse_resp_func(
            np.atleast_1d([propagation_distance] *
                          self.wavelength.size).reshape((1, 1, -1)) -
            self.chromatic_shift)

        # Now calculate digital phase mask. First center the spectral peak for each channel
        x_peak, y_peak = x_peak.reshape(-1), y_peak.reshape(-1)
        shifted_ft_hologram = np.empty_like(np.atleast_3d(mask),
                                            dtype=np.complex128)
        for channel in range(self.wavelength.size):
            shifted_ft_hologram[:, :, channel] = arrshift(
                self.ft_hologram * mask[:, :, channel],
                [-x_peak[channel], -y_peak[channel]],
                axes=(0, 1))

        # Apodize the result
        psi = self.apodize(shifted_ft_hologram * G)
        digital_phase_mask = self.get_digital_phase_mask(psi)

        # Reconstruct the image
        # fftshift is independent of channel
        psi = np.empty_like(np.atleast_3d(shifted_ft_hologram))

        for channel in range(psi.shape[2]):
            psi[:, :, channel] = arrshift(fftshift(
                fft2(self.apodize(self.hologram) *
                     digital_phase_mask[:, :, channel],
                     axes=(0, 1))) * mask[:, :, channel],
                                          [-x_peak[channel], -y_peak[channel]],
                                          axes=(0, 1))
        psi *= G

        return fftshift(ifft2(psi, axes=(0, 1)), axes=(0, 1))
Esempio n. 19
0
def perfft2(im, compute_P=True, compute_spatial=False):
    """
    Moisan's Periodic plus Smooth Image Decomposition. The image is
    decomposed into two parts:

        im = s + p

    where 's' is the 'smooth' component with mean 0, and 'p' is the 'periodic'
    component which has no sharp discontinuities when one moves cyclically
    across the image boundaries.

    useage: S, [P, s, p] = perfft2(im)

    where:  im      is the image
            S       is the FFT of the smooth component
            P       is the FFT of the periodic component, returned if
                    compute_P (default)
            s & p   are the smooth and periodic components in the spatial
                    domain, returned if compute_spatial

    By default this function returns `P` and `S`, the FFTs of the periodic and
    smooth components respectively. If `compute_spatial=True`, the spatial
    domain components 'p' and 's' are also computed.

    This code is adapted from Lionel Moisan's Scilab function 'perdecomp.sci'
    "Periodic plus Smooth Image Decomposition" 07/2012 available at:

        <http://www.mi.parisdescartes.fr/~moisan/p+s>
    """

    if im.dtype not in ['float32', 'float64']:
        im = np.float64(im)

    rows, cols = im.shape

    # Compute the boundary image which is equal to the image discontinuity
    # values across the boundaries at the edges and is 0 elsewhere
    s = np.zeros_like(im)
    s[0, :] = im[0, :] - im[-1, :]
    s[-1, :] = -s[0, :]
    s[:, 0] = s[:, 0] + im[:, 0] - im[:, -1]
    s[:, -1] = s[:, -1] - im[:, 0] + im[:, -1]

    # Generate grid upon which to compute the filter for the boundary image
    # in the frequency domain.  Note that cos is cyclic hence the grid
    # values can range from 0 .. 2*pi rather than 0 .. pi and then pi .. 0
    x, y = (2 * np.pi * np.arange(0, v) / float(v) for v in (cols, rows))
    cx, cy = np.meshgrid(x, y)

    denom = (2. * (2. - np.cos(cx) - np.cos(cy)))
    denom[0, 0] = 1.     # avoid / 0

    S = fft2(s) / denom
    S[0, 0] = 0      # enforce zero mean

    if compute_P or compute_spatial:

        P = fft2(im) - S

        if compute_spatial:
            s = ifft2(S).real
            p = im - s

            return S, P, s, p
        else:
            return S, P
    else:
        return S
def iFFT2(FT):
    [Ny, Nx] = np.shape(FT)
    I = fftshift(ifft2(ifftshift(FT))) * np.sqrt(Ny) * np.sqrt(Nx)
    return I
Esempio n. 21
0
def perfft2(im, compute_P=True, compute_spatial=False):
    """
    Moisan's Periodic plus Smooth Image Decomposition. The image is
    decomposed into two parts:

        im = s + p

    where 's' is the 'smooth' component with mean 0, and 'p' is the 'periodic'
    component which has no sharp discontinuities when one moves cyclically
    across the image boundaries.

    useage: S, [P, s, p] = perfft2(im)

    where:  im      is the image
            S       is the FFT of the smooth component
            P       is the FFT of the periodic component, returned if
                    compute_P (default)
            s & p   are the smooth and periodic components in the spatial
                    domain, returned if compute_spatial

    By default this function returns `P` and `S`, the FFTs of the periodic and
    smooth components respectively. If `compute_spatial=True`, the spatial
    domain components 'p' and 's' are also computed.

    This code is adapted from Lionel Moisan's Scilab function 'perdecomp.sci'
    "Periodic plus Smooth Image Decomposition" 07/2012 available at:

        <http://www.mi.parisdescartes.fr/~moisan/p+s>
    """

    if im.dtype not in ['float32', 'float64']:
        im = np.float64(im)

    rows, cols = im.shape

    # Compute the boundary image which is equal to the image discontinuity
    # values across the boundaries at the edges and is 0 elsewhere
    s = np.zeros_like(im)
    s[0, :] = im[0, :] - im[-1, :]
    s[-1, :] = -s[0, :]
    s[:, 0] = s[:, 0] + im[:, 0] - im[:, -1]
    s[:, -1] = s[:, -1] - im[:, 0] + im[:, -1]

    # Generate grid upon which to compute the filter for the boundary image
    # in the frequency domain.  Note that cos is cyclic hence the grid
    # values can range from 0 .. 2*pi rather than 0 .. pi and then pi .. 0
    x, y = (2 * np.pi * np.arange(0, v) / float(v) for v in (cols, rows))
    cx, cy = np.meshgrid(x, y)

    denom = (2. * (2. - np.cos(cx) - np.cos(cy)))
    denom[0, 0] = 1.  # avoid / 0

    S = fft2(s) / denom
    S[0, 0] = 0  # enforce zero mean

    if compute_P or compute_spatial:

        P = fft2(im) - S

        if compute_spatial:
            s = ifft2(S).real
            p = im - s

            return S, P, s, p
        else:
            return S, P
    else:
        return S
Esempio n. 22
0
def cross_correlation_2d(pixels1, pixels2):
    '''Align the second image with the first using max cross-correlation

    returns the x,y offsets to add to image1's indexes to align it with
    image2

    Many of the ideas here are based on the paper, "Fast Normalized
    Cross-Correlation" by J.P. Lewis
    (http://www.idiom.com/~zilla/Papers/nvisionInterface/nip.html)
    which is frequently cited when addressing this problem.
    '''
    #
    # We double the size of the image to get a field of zeros
    # for the parts of one image that don't overlap the displaced
    # second image.
    #
    # Since we're going into the frequency domain, if the images are of
    # different sizes, we can make the FFT shape large enough to capture
    # the period of the largest image - the smaller just will have zero
    # amplitude at that frequency.
    #
    s = np.maximum(pixels1.shape, pixels2.shape)
    fshape = s*2
    #
    # Calculate the # of pixels at a particular point
    #
    i, j = np.mgrid[-s[0]:s[0],
                   -s[1]:s[1]]
    unit = np.abs(i*j).astype(float)
    unit[unit < 1] = 1 # keeps from dividing by zero in some places
    #
    # Normalize the pixel values around zero which does not affect the
    # correlation, keeps some of the sums of multiplications from
    # losing precision and precomputes t(x-u,y-v) - t_mean
    #
    pixels1 = np.nan_to_num(pixels1-nanmean(pixels1))
    pixels2 = np.nan_to_num(pixels2-nanmean(pixels2))
    #
    # Lewis uses an image, f and a template t. He derives a normalized
    # cross correlation, ncc(u,v) =
    # sum((f(x,y)-f_mean(u,v))*(t(x-u,y-v)-t_mean),x,y) /
    # sqrt(sum((f(x,y)-f_mean(u,v))**2,x,y) * (sum((t(x-u,y-v)-t_mean)**2,x,y)
    #
    # From here, he finds that the numerator term, f_mean(u,v)*(t...) is zero
    # leaving f(x,y)*(t(x-u,y-v)-t_mean) which is a convolution of f
    # by t-t_mean.
    #
    fp1 = fft2(pixels1.astype('float32'), fshape)
    fp2 = fft2(pixels2.astype('float32'), fshape)
    corr12 = ifft2(fp1 * fp2.conj()).real

    #
    # Use the trick of Lewis here - compute the cumulative sums
    # in a fashion that accounts for the parts that are off the
    # edge of the template.
    #
    # We do this in quadrants:
    # q0 q1
    # q2 q3
    # For the first,
    # q0 is the sum over pixels1[i:,j:] - sum i,j backwards
    # q1 is the sum over pixels1[i:,:j] - sum i backwards, j forwards
    # q2 is the sum over pixels1[:i,j:] - sum i forwards, j backwards
    # q3 is the sum over pixels1[:i,:j] - sum i,j forwards
    #
    # The second is done as above but reflected lr and ud
    #
    p1_si = pixels1.shape[0]
    p1_sj = pixels1.shape[1]
    p1_sum = np.zeros(fshape)
    p1_sum[:p1_si, :p1_sj] = cumsum_quadrant(pixels1, False, False)
    p1_sum[:p1_si, -p1_sj:] = cumsum_quadrant(pixels1, False, True)
    p1_sum[-p1_si:, :p1_sj] = cumsum_quadrant(pixels1, True, False)
    p1_sum[-p1_si:, -p1_sj:] = cumsum_quadrant(pixels1, True, True)
    #
    # Divide the sum over the # of elements summed-over
    #
    p1_mean = old_div(p1_sum, unit)

    p2_si = pixels2.shape[0]
    p2_sj = pixels2.shape[1]
    p2_sum = np.zeros(fshape)
    p2_sum[:p2_si, :p2_sj] = cumsum_quadrant(pixels2, False, False)
    p2_sum[:p2_si, -p2_sj:] = cumsum_quadrant(pixels2, False, True)
    p2_sum[-p2_si:, :p2_sj] = cumsum_quadrant(pixels2, True, False)
    p2_sum[-p2_si:, -p2_sj:] = cumsum_quadrant(pixels2, True, True)
    p2_sum = np.fliplr(np.flipud(p2_sum))
    p2_mean = old_div(p2_sum, unit)
    #
    # Once we have the means for u,v, we can caluclate the
    # variance-like parts of the equation. We have to multiply
    # the mean^2 by the # of elements being summed-over
    # to account for the mean being summed that many times.
    #
    p1sd = np.sum(pixels1**2) - p1_mean**2 * np.product(s)
    p2sd = np.sum(pixels2**2) - p2_mean**2 * np.product(s)
    #
    # There's always chance of roundoff error for a zero value
    # resulting in a negative sd, so limit the sds here
    #
    sd = np.sqrt(np.maximum(p1sd * p2sd, 0))
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        corrnorm = old_div(corr12, sd)
    #
    # There's not much information for points where the standard
    # deviation is less than 1/100 of the maximum. We exclude these
    # from consideration.
    #
    corrnorm[(unit < old_div(np.product(s), 2)) &
             (sd < old_div(np.mean(sd), 100))] = 0
    # Also exclude possibilites with few observed pixels.
    corrnorm[unit < old_div(np.product(s), 4)] = 0
    return corrnorm
Esempio n. 23
0
File: align.py Progetto: j3tsai/sima
def cross_correlation_2d(pixels1, pixels2):
    '''Align the second image with the first using max cross-correlation

    returns the x,y offsets to add to image1's indexes to align it with
    image2

    Many of the ideas here are based on the paper, "Fast Normalized
    Cross-Correlation" by J.P. Lewis
    (http://www.idiom.com/~zilla/Papers/nvisionInterface/nip.html)
    which is frequently cited when addressing this problem.
    '''
    #
    # We double the size of the image to get a field of zeros
    # for the parts of one image that don't overlap the displaced
    # second image.
    #
    # Since we're going into the frequency domain, if the images are of
    # different sizes, we can make the FFT shape large enough to capture
    # the period of the largest image - the smaller just will have zero
    # amplitude at that frequency.
    #
    s = np.maximum(pixels1.shape, pixels2.shape)
    fshape = s*2
    #
    # Calculate the # of pixels at a particular point
    #
    i,j = np.mgrid[-s[0]:s[0],
                   -s[1]:s[1]]
    unit = np.abs(i*j).astype(float)
    unit[unit<1]=1 # keeps from dividing by zero in some places
    #
    # Normalize the pixel values around zero which does not affect the
    # correlation, keeps some of the sums of multiplications from
    # losing precision and precomputes t(x-u,y-v) - t_mean
    #
    pixels1 = np.nan_to_num(pixels1-np.nanmean(pixels1))
    pixels2 = np.nan_to_num(pixels2-np.nanmean(pixels2))
    #
    # Lewis uses an image, f and a template t. He derives a normalized
    # cross correlation, ncc(u,v) =
    # sum((f(x,y)-f_mean(u,v))*(t(x-u,y-v)-t_mean),x,y) /
    # sqrt(sum((f(x,y)-f_mean(u,v))**2,x,y) * (sum((t(x-u,y-v)-t_mean)**2,x,y)
    #
    # From here, he finds that the numerator term, f_mean(u,v)*(t...) is zero
    # leaving f(x,y)*(t(x-u,y-v)-t_mean) which is a convolution of f
    # by t-t_mean.
    #
    fp1 = fft2(pixels1.astype('float32'),fshape)
    fp2 = fft2(pixels2.astype('float32'),fshape)
    corr12 = ifft2(fp1 * fp2.conj()).real

    #
    # Use the trick of Lewis here - compute the cumulative sums
    # in a fashion that accounts for the parts that are off the
    # edge of the template.
    #
    # We do this in quadrants:
    # q0 q1
    # q2 q3
    # For the first,
    # q0 is the sum over pixels1[i:,j:] - sum i,j backwards
    # q1 is the sum over pixels1[i:,:j] - sum i backwards, j forwards
    # q2 is the sum over pixels1[:i,j:] - sum i forwards, j backwards
    # q3 is the sum over pixels1[:i,:j] - sum i,j forwards
    #
    # The second is done as above but reflected lr and ud
    #
    p1_si = pixels1.shape[0]
    p1_sj = pixels1.shape[1]
    p1_sum = np.zeros(fshape)
    p1_sum[:p1_si,:p1_sj] = cumsum_quadrant(pixels1, False, False)
    p1_sum[:p1_si,-p1_sj:] = cumsum_quadrant(pixels1, False, True)
    p1_sum[-p1_si:,:p1_sj] = cumsum_quadrant(pixels1, True, False)
    p1_sum[-p1_si:,-p1_sj:] = cumsum_quadrant(pixels1, True, True)
    #
    # Divide the sum over the # of elements summed-over
    #
    p1_mean = p1_sum / unit

    p2_si = pixels2.shape[0]
    p2_sj = pixels2.shape[1]
    p2_sum = np.zeros(fshape)
    p2_sum[:p2_si,:p2_sj] = cumsum_quadrant(pixels2, False, False)
    p2_sum[:p2_si,-p2_sj:] = cumsum_quadrant(pixels2, False, True)
    p2_sum[-p2_si:,:p2_sj] = cumsum_quadrant(pixels2, True, False)
    p2_sum[-p2_si:,-p2_sj:] = cumsum_quadrant(pixels2, True, True)
    p2_sum = np.fliplr(np.flipud(p2_sum))
    p2_mean = p2_sum / unit
    #
    # Once we have the means for u,v, we can caluclate the
    # variance-like parts of the equation. We have to multiply
    # the mean^2 by the # of elements being summed-over
    # to account for the mean being summed that many times.
    #
    p1sd = np.sum(pixels1**2) - p1_mean**2 * np.product(s)
    p2sd = np.sum(pixels2**2) - p2_mean**2 * np.product(s)
    #
    # There's always chance of roundoff error for a zero value
    # resulting in a negative sd, so limit the sds here
    #
    sd = np.sqrt(np.maximum(p1sd * p2sd, 0))
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        corrnorm = corr12 / sd
    #
    # There's not much information for points where the standard
    # deviation is less than 1/100 of the maximum. We exclude these
    # from consideration.
    #
    corrnorm[(unit < np.product(s) / 2) &
             (sd < np.mean(sd) / 100)] = 0
    # Also exclude possibilites with few observed pixels.
    corrnorm[unit < np.product(s) / 4] = 0

    return corrnorm