Exemplo n.º 1
0
    def psf_calc(self, psf, kz, data_size):
        '''Pre calculate OTFs etc ...'''
        g = psf;

        self.height = data_size[0]
        self.width  = data_size[1]
        self.depth  = data_size[2]

        (x,y,z) = mgrid[-floor(self.height/2.0):(ceil(self.height/2.0)), -floor(self.width/2.0):(ceil(self.width/2.0)), -floor(self.depth/2.0):(ceil(self.depth/2.0))]
        
       
        gs = shape(g);
        g = g[int(floor((gs[0] - self.height)/2)):int(self.height + floor((gs[0] - self.height)/2)), int(floor((gs[1] - self.width)/2)):int(self.width + floor((gs[1] - self.width)/2)), int(floor((gs[2] - self.depth)/2)):int(self.depth + floor((gs[2] - self.depth)/2))]
	
        g = abs(ifftshift(ifftn(abs(fftn(g)))));
        g = (g/sum(sum(sum(g))));
	
        self.g = g;
        
        self.H = cast['f'](fftn(g));
        self.Ht = cast['f'](ifftn(g));

        tk = 2*kz*z
        
        t = g*exp(1j*tk)
        self.He = cast['F'](fftn(t));    
        self.Het = cast['F'](ifftn(t));

        tk = 2*tk
        
        t = g*exp(1j*tk)
        self.He2 = cast['F'](fftn(t));    
        self.He2t = cast['F'](ifftn(t));
Exemplo n.º 2
0
def fft_correlation(in1, in2, normalize=False):
    """Correlation of two N-dimensional arrays using FFT.

    Adapted from scipy's fftconvolve.

    Parameters
    ----------
    in1, in2 : array
    normalize: bool
        If True performs phase correlation

    """
    s1 = np.array(in1.shape)
    s2 = np.array(in2.shape)
    size = s1 + s2 - 1
    # Use 2**n-sized FFT
    fsize = 2 ** np.ceil(np.log2(size))
    IN1 = fftn(in1, fsize)
    IN1 *= fftn(in2, fsize).conjugate()
    if normalize is True:
        ret = ifftn(np.nan_to_num(IN1 / np.absolute(IN1))).real.copy()
    else:
        ret = ifftn(IN1).real.copy()
    del IN1
    return ret
Exemplo n.º 3
0
Arquivo: ftr.py Projeto: alexrudy/FTR
 def invert(self, estimate):
     """Invert the estimate to produce slopes.
     
     Parameters
     ----------
     estimate : array_like
         Phase estimate to invert.
     
     Returns
     -------
     xs : array_like
         Estimate of the x slopes.
     ys : array_like
         Estimate of the y slopes.
     
     
     """
     if self.manage_tt:
         estimate, ttx, tty = remove_tiptilt(self.ap, estimate)
     
     est_ft = fftpack.fftn(estimate) / 2.0
     
     xs_ft = self.gx * est_ft
     ys_ft = self.gy * est_ft
     
     xs = np.real(fftpack.ifftn(xs_ft))
     ys = np.real(fftpack.ifftn(ys_ft))
     
     if self.manage_tt and not self.suppress_tt:
         xs += ttx
         ys += tty
     
     return (xs, ys)
Exemplo n.º 4
0
 def test_definition(self):
     x = [[1,2,3],[4,5,6],[7,8,9]]
     y = ifftn(x)
     assert_array_almost_equal(y,direct_idftn(x))
     x = random((20,26))
     assert_array_almost_equal(ifftn(x),direct_idftn(x))
     x = random((5,4,3,20))
     assert_array_almost_equal(ifftn(x),direct_idftn(x))
Exemplo n.º 5
0
 def test_definition(self):
     x = np.array([[1,2,3],[4,5,6],[7,8,9]], dtype=self.dtype)
     y = ifftn(x)
     assert_(y.dtype == self.cdtype)
     assert_array_almost_equal_nulp(y,direct_idftn(x),self.maxnlp)
     x = random((20,26))
     assert_array_almost_equal_nulp(ifftn(x),direct_idftn(x),self.maxnlp)
     x = random((5,4,3,20))
     assert_array_almost_equal_nulp(ifftn(x),direct_idftn(x),self.maxnlp)
Exemplo n.º 6
0
    def test_invalid_sizes(self):
        with assert_raises(ValueError,
                           match="invalid number of data points"
                           r" \(\[1 0\]\) specified"):
            ifftn([[]])

        with assert_raises(ValueError,
                           match="invalid number of data points"
                           r" \(\[ 4 -3\]\) specified"):
            ifftn([[1, 1], [2, 2]], (4, -3))
Exemplo n.º 7
0
def convolve_turb(image, fwhm, get_psf=False):
    """
    Convolve the input image with a turbulent psf

    parameters
    ----------
    image:
        A numpy array
    fwhm:
        The FWHM of the turbulent psf.
    get_psf:
        If True, return a tuple (im,psf)

    The images dimensions should be square, and even so the psf is
    centered.
    """

    dims = array(image.shape)
    if dims[0] != dims[1]:
        raise ValueError("only square images for now")

    # add padding for PSF in real space
    # sigma is approximate
    kdims=dims.copy()
    kdims += 2*4*fwhm/TURB_SIGMA_FAC

    # Always use 2**n-sized FFT
    kdims = 2**ceil(log2(kdims))
    kcen = kdims/2.

    imfft = fftn(image,kdims)

    k0 = 2.92/fwhm
    # in fft units
    k0 *= kdims[0]/(2*pi)

    otf = pixmodel.ogrid_turb_kimage(kdims, kcen, k0)
    otf = fftshift(otf) 

    ckim = otf*imfft
    cim = ifftn(ckim)[0:dims[0], 0:dims[1]]
    cim = cim.real

    if get_psf:
        psf = ifftn(otf)
        psf = fftshift(psf)
        psf = sqrt(psf.real**2 + psf.imag**2)
        psf = pixmodel._centered(psf, dims)
        return cim, psf
    else:
        return cim
Exemplo n.º 8
0
def preWhitenCube(**kwargs):
	'''
	Pre-whitenening using noise estimates from a cube taken from the difference map.
	Returns a the pre-whitened volume and various spectra. (Alp Kucukelbir, 2013)

	'''
	print '\n= Pre-whitening the Cubes'
	tStart = time()

	n             = kwargs.get('n', 0)
	vxSize        = kwargs.get('vxSize', 0)
	elbowAngstrom = kwargs.get('elbowAngstrom', 0)
	rampWeight    = kwargs.get('rampWeight',1.0)
	dataF         = kwargs.get('dataF', 0)
	dataBGF       = kwargs.get('dataBGF', 0)
	dataBGSpect   = kwargs.get('dataBGSpect', 0)

	epsilon = 1e-10

	pWfilter = createPreWhiteningFilter(n             = n,
										spectrum      = dataBGSpect,
										elbowAngstrom = elbowAngstrom,
										rampWeight    = rampWeight,
										vxSize        = vxSize)

	# Apply the pre-whitening filter to the inside cube
	dataF       = np.multiply(pWfilter['pWfilter'],dataF)

	dataPWFabs  = np.abs(dataF)
	dataPWFabs  = dataPWFabs-np.min(dataPWFabs)
	dataPWFabs  = dataPWFabs/np.max(dataPWFabs)
	dataPWSpect = sphericalAverage(dataPWFabs**2) + epsilon

	dataPW = np.real(fftpack.ifftn(fftpack.ifftshift(dataF)))
	del dataF

	# Apply the pre-whitening filter to the outside cube
	dataBGF       = np.multiply(pWfilter['pWfilter'],dataBGF)

	dataPWBGFabs  = np.abs(dataBGF)
	dataPWBGFabs  = dataPWBGFabs-np.min(dataPWBGFabs)
	dataPWBGFabs  = dataPWBGFabs/np.max(dataPWBGFabs)
	dataPWBGSpect = sphericalAverage(dataPWBGFabs**2) + epsilon

	dataBGPW = np.real(fftpack.ifftn(fftpack.ifftshift(dataBGF)))
	del dataBGF

	m, s = divmod(time() - tStart, 60)
	print "  :: Time elapsed: %d minutes and %.2f seconds" % (m, s)

	return {'dataPW':dataPW, 'dataBGPW':dataBGPW, 'dataPWSpect': dataPWSpect, 'dataPWBGSpect': dataPWBGSpect, 'peval': pWfilter['peval'], 'pcoef': pWfilter['pcoef'] }
Exemplo n.º 9
0
    def Afunc(self, f):
        fs = reshape(f, (self.height, self.width, self.depth))

        F = fftn(fs)

        d_1 = ifftshift(ifftn(F*self.H));
        
        d_e = ifftshift(ifftn(F*self.He));
        
        d_e2 = ifftshift(ifftn(F*self.He2));
        
        d = (1.5*real(d_1) + 2*real(d_e*self.e1) + 0.5*real(d_e2*self.e2))
        
        d = real(d);
        return ravel(d)
Exemplo n.º 10
0
    def Ahfunc(self, f):
        fs = reshape(f, (self.height, self.width, self.depth))

        F = fftn(fs)

        d_1 = ifftshift(ifftn(F*self.Ht));

        d_e = ifftshift(ifftn(F*self.Het));

        d_e2 = ifftshift(ifftn(F*self.He2t));

        d = (1.5*d_1 + 2*real(d_e*exp(1j*self.alpha)) + 0.5*real(d_e2*exp(2*1j*self.alpha)));
        
        d = real(d);
        return ravel(d)
Exemplo n.º 11
0
def preparePSF(md, PSSize):
    global PSFFileName, cachedPSF, cachedOTF2, cachedOTFH, autocorr
    
    PSFFilename = md.PSFFile
                
    if (not (PSFFileName == PSFFilename)) or (not (cachedPSF.shape == PSSize)):
        try:
            ps, vox = md.taskQueue.getQueueData(md.dataSourceID, 'PSF')
        except:
            fid = open(getFullExistingFilename(PSFFilename), 'rb')
            ps, vox = pickle.load(fid)
            fid.close()
            
        ps = ps.max(2)
        ps = ps - ps.min()
        #ps = ps*(ps > 0)
        ps = ps*scipy.signal.hanning(ps.shape[0])[:,None]*scipy.signal.hanning(ps.shape[1])[None,:]
        ps = ps/ps.sum()
        PSFFileName = PSFFilename
        pw = (numpy.array(PSSize) - ps.shape)/2.
        pw1 = numpy.floor(pw)
        pw2 = numpy.ceil(pw)
        cachedPSF = pad.with_constant(ps, ((pw2[0], pw1[0]), (pw2[1], pw1[1])), (0,))
        cachedOTFH = ifftn(cachedPSF)*cachedPSF.size
        cachedOTF2 = cachedOTFH*fftn(cachedPSF)
Exemplo n.º 12
0
 def __FilterData2D(self,data):
     #lowpass filter to suppress noise
     #a = ndimage.gaussian_filter(data.astype('f'), self.filterRadiusLowpass)
     a = ifftshift(ifftn((fftn(data.astype('f'))*cachedOTFH)*(self.lamb**2 + cachedOTF2.mean())/(self.lamb**2 + cachedOTF2))).real
     #lowpass filter again to find background
     b = ndimage.gaussian_filter(a, self.filterRadiusHighpass)
     return 24*(a - b)
def rolloff(grid_shape, nhood_shape=[6,6], ncrop=None, osfactor=2, threshold=0.01, axes=[-1, -2], combi=False, slice_prof_coef=1):
    if combi:
        nz = spatial_dims[-1]
        spatial_dims = grid_shape[:-1]
        ndims = len(spatial_dims)
        ro_data = ones(nz, dtype='complex64')
        ro_K = zeros((nz, ndims), dtype='float32')
        ro_K[:,2] = linspace(-nz/2, nz/2, nz, endpoint=False)
    else:
        spatial_dims = grid_shape
        ndims = len(spatial_dims)
        ro_data = array([1.0], dtype='complex64')
        ro_K = array([[0]*ndims], dtype='float32')
    G = sparse_matrix_operator(ro_K, grid_shape=spatial_dims, nhood_shape=nhood_shape, osfactor=osfactor, combi=combi, slice_prof_coef=slice_prof_coef)
    ro = G.T * ro_data
    n = sqrt(G.shape[1])
    ro = reshape(ro, (n,n))
    #if osfactor > 1 and ncrop == None:
    #    ncrop = int((spatial_dims[0]/osfactor) * (osfactor - 1) / 2)
    ro = fftshift(abs(ifftn(fftshift(ro, axes=axes), axes=axes)), axes=axes)  # transform to image
    if ncrop > 0:
        ro = ro[ncrop:-ncrop, ncrop:-ncrop]
    #print 'rolloff shape:', ro.shape
    ro = ro / ro.max()  # normalize
    ro[ro < threshold] = 1.0
    #ro_max = ro.max()
    #ro[ro < threshold*ro_max] = 1.0
    ro = 1.0 / ro
    #ro = ro**2
    #print 'TOOK OUT SQUARED RO'
    #ro = ro / ro.max()
    return ro
Exemplo n.º 14
0
def fftconvolve(in1, in2, mode="full"):
    """Convolve two N-dimensional arrays using FFT. See convolve.
    
    """
    s1 = array(in1.shape)
    s2 = array(in2.shape)
    if (s1.dtype.char in ['D','F']) or (s2.dtype.char in ['D', 'F']):
        cmplx=1
    else: cmplx=0
    size = s1+s2-1
    IN1 = fftn(in1,size)
    IN1 *= fftn(in2,size)
    ret = ifftn(IN1)
    del IN1
    if not cmplx:
        ret = real(ret)
    if mode == "full":
        return ret
    elif mode == "same":
        if product(s1,axis=0) > product(s2,axis=0):
            osize = s1
        else:
            osize = s2
        return _centered(ret,osize)
    elif mode == "valid":
        return _centered(ret,abs(s2-s1)+1)
Exemplo n.º 15
0
    def icwt2d(self, da=0.25):
        '''
        Inverse bi-dimensional continuous wavelet transform as in Wang and
        Lu (2010), equation [5].

        Parameters
        ----------
        da : float, optional
            Spacing in the frequency axis.
        '''
        if self.Wf is None:
            raise TypeError("Run cwt2D before icwt2D")
        m0, l0, k0 = self.Wf.shape

        if m0 != self.scales.size:
            raise Warning('Scale parameter array shape does not match\
                           wavelet transform array shape.')
        # Calculates the zonal and meridional wave numters.
        L, K = 2 ** int(np.ceil(np.log2(l0))), 2 ** int(np.ceil(np.log2(k0)))
        # Calculates the zonal and meridional wave numbers.
        l, k = fftfreq(L, self.dy), fftfreq(K, self.dx)
        # Creates empty inverse wavelet transform array and fills it for every
        # discrete scale using the convolution theorem.
        self.iWf = np.zeros((m0, L, K), 'complex')
        for i, an in enumerate(self.scales):
            psi_ft_bar = an * self.wavelet.psi_ft(an * k, an * l)
            W_ft = fftn(self.Wf[i, :, :], s=(L, K))
            self.iWf[i, :, :] = ifftn(W_ft * psi_ft_bar, s=(L, K)) *\
                da / an ** 2.

        self.iWf = self.iWf[:, :l0, :k0].real.sum(axis=0) / self.wavelet.cpsi

        return self
Exemplo n.º 16
0
def fftconvolve3(in1, in2=None, in3=None, mode="full"):
    """Convolve two N-dimensional arrays using FFT. See convolve.

    for use with arma  (old version: in1=num in2=den in3=data

    * better for consistency with other functions in1=data in2=num in3=den
    * note in2 and in3 need to have consistent dimension/shape
      since I'm using max of in2, in3 shapes and not the sum

    copied from scipy.signal.signaltools, but here used to try out inverse
    filter doesn't work or I can't get it to work

    2010-10-23
    looks ok to me for 1d,
    from results below with padded data array (fftp)
    but it doesn't work for multidimensional inverse filter (fftn)
    original signal.fftconvolve also uses fftn
    """
    if (in2 is None) and (in3 is None):
        raise ValueError('at least one of in2 and in3 needs to be given')
    s1 = np.array(in1.shape)
    if not in2 is None:
        s2 = np.array(in2.shape)
    else:
        s2 = 0
    if not in3 is None:
        s3 = np.array(in3.shape)
        s2 = max(s2, s3) # try this looks reasonable for ARMA
        #s2 = s3


    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))
    size = s1+s2-1

    # Always use 2**n-sized FFT
    fsize = 2**np.ceil(np.log2(size))
    #convolve shorter ones first, not sure if it matters
    if not in2 is None:
        IN1 = fft.fftn(in2, fsize)
    if not in3 is None:
        IN1 /= fft.fftn(in3, fsize)  # use inverse filter
    # note the inverse is elementwise not matrix inverse
    # is this correct, NO  doesn't seem to work for VARMA
    IN1 *= fft.fftn(in1, fsize)
    fslice = tuple([slice(0, int(sz)) for sz in size])
    ret = fft.ifftn(IN1)[fslice].copy()
    del IN1
    if not complex_result:
        ret = ret.real
    if mode == "full":
        return ret
    elif mode == "same":
        if np.product(s1,axis=0) > np.product(s2,axis=0):
            osize = s1
        else:
            osize = s2
        return trim_centered(ret,osize)
    elif mode == "valid":
        return trim_centered(ret,abs(s2-s1)+1)
Exemplo n.º 17
0
    def apply(self, image, freq_image=False, invert=True):
        """
        apply(image, freq_image=False, invert=True)

        Apply the current filter.

        Parameters
        ----------
        image : array_like
            Image to apply filter to.

        freq_image : bool optional
            Flag to indicate if the input image is already the DFT of the
            input image.

        invert : bool optional
            Flag to indicate if the output image should be inverted from the
            frequency domain.
        """
        try:
            H = self.H
        except AttributeError:
            raise AttributeError, 'No filter currently set.'

        if not freq_image:
            image = fftpack.fftn(image)

        E = image * H

        if invert:
            E = np.real(fftpack.ifftn(E))

        return E
Exemplo n.º 18
0
def getFftArrays(S, type_, SOverlap=None):
    # This is getting the FFTs from the cache
    cachepar = (type_, SOverlap is None)
    cache = S.__dict__.setdefault("_fftcache", { })
    val = None
    if cache.has_key(cachepar):
        val, mctime = cache[cachepar]
        # if our mctime has changed (we have advanced in time),
        # then we need to regenerate the FFTn.
        if mctime != S.mctime:
            val = None

    # Do the actual regeneration of the functions:
    if val is None:
        #lattice = getLattice(S, type_)
        lattice = S.lattsite.copy()
        lattice[:] = 0
        if SOverlap is None:
            lattice[S.getPos(type_)] = 1
            norm = N
        else:
            lattice[S       .getPos(type_)] += 1
            lattice[SOverlap.getPos(type_)] += 1
            lattice[lattice != 2] = 0
            lattice[lattice == 2] = 1
            norm = N * S.densityOf(type_)

        #from rkddp import interact ; interact.interact()
        lattice.shape = S.lattShape
        val = fftn(lattice), ifftn(lattice), norm
        #val = function(lattice)
        S._fftcache[cachepar] = val, S.mctime
    return val
Exemplo n.º 19
0
def two_point_correlation_fft(im):
    r"""
    Calculates the two-point correlation function using fourier transforms

    Parameters
    ----------
    im : ND-array
        The image of the void space on which the 2-point correlation is desired

    Returns
    -------
    A tuple containing the x and y data for plotting the two-point correlation
    function, using the *args feature of matplotlib's plot function.  The x
    array is the distances between points and the y array is corresponding
    probabilities that points of a given distance both lie in the void space.

    Notes
    -----
    The fourier transform approach utilizes the fact that the autocorrelation
    function is the inverse FT of the power spectrum density.
    For background read the Scipy fftpack docs and for a good explanation see:
    http://www.ucl.ac.uk/~ucapikr/projects/KamilaSuankulova_BSc_Project.pdf
    """
    # Calculate half lengths of the image
    hls = (np.ceil(np.shape(im)) / 2).astype(int)
    # Fourier Transform and shift image
    F = sp_ft.ifftshift(sp_ft.fftn(sp_ft.fftshift(im)))
    # Compute Power Spectrum
    P = sp.absolute(F**2)
    # Auto-correlation is inverse of Power Spectrum
    autoc = sp.absolute(sp_ft.ifftshift(sp_ft.ifftn(sp_ft.fftshift(P))))
    tpcf = _radial_profile(autoc, r_max=np.min(hls))
    return tpcf
Exemplo n.º 20
0
def solve_qg(parms, q0):   
    
    # Euler Step
    t = 0.
    ii = 0
    NLnm = parms.method(q0, parms)
    q    = q0 + parms.dt*NLnm;

    # AB2 step
    t = parms.dt
    ii = 1
    NLn = parms.method(q, parms)
    q   = q + 0.5*parms.dt*(3*NLn - NLnm)

    npt = int(parms.tplot/parms.dt)
    for ii in range(3,parms.nt+1):

        # AB3 step
        t = (ii-1)*parms.dt
        NL = parms.method(q, parms);
        q  = q + parms.dt/12*(23*NL - 16*NLn + 5*NLnm)
        q  = (ifftn(parms.sfilt*fftn(q))).real

        # Reset fluxes
        NLnm = NLn
        NLn  = NL

        if ii%npt==0:

            q.T.tofile('Data/2d/q.{0:d}'.format(ii/npt))
Exemplo n.º 21
0
def fast_multinomial(pii, nsum, thresh):
    """generate multinomial distribution for given probability tuple pii.

    *nsum* is the overal number of atoms of a fixed element, pii is a tuple holding the
    distribution of the isotopes.

    this generator yields all combinations and their probabilities which are above *thresh*.

    Remark: the count of the first isotope of all combinations is not computed and "yielded", it is
    automatically *nsum* minus the sum of the elemens in the combinations. We could compute this
    value in this generator, but it is faster to do this later (so only if needed).

    Example:: given three isotopes ([n1]E, [n2]E, [n3]E) of an element E which have
              probabilities 0.2, 0.3 and 0.5.

    To generate all molecules consisting of 5 atoms of this element where the overall probability
    is abore 0.1 we can run:

        for index, pi in gen_n((0.2, 0.3, 0.5), 5, 0.1):
            print(index, pi)

    which prints:

        (1, 3) 0.15
        (2, 2) 0.135
        (2, 3) 0.1125

    the first combination refers to (1, 1, 3) (sum is 5), the second to (1, 2, 2) and the last to
    (0, 2, 3).

    So the probability of an molecule with the overall formula [n1]E1 [n2]E1 [n3]E3 is 0.15, for
    [n1]E1 [n2]E2 [n3]E2 is 0.135, and for [n2]E2 [n3]E3 is 0.1125.

    Implementation:: multinomial distribution can be described as the n times folding (convolution)
    of an underlying simpler distribution. convolution can be fast computed with fft + inverse
    fft as we do below.

    This is often 100 times faster than the old implementatation computing the full distribution
    using its common definition.
    """
    n = len(pii)

    if n == 1:
        yield (0,), pii[0]
        return

    dim = n - 1
    a = np.zeros((nsum + 1,) * dim)
    a[(0,) * dim] = pii[0]
    for i, pi in enumerate(pii[1:]):
        idx = [0] * dim
        idx[i] = 1
        a[tuple(idx)] = pi

    probs = ifftn(fftn(a) ** nsum).real
    mask = probs >= thresh
    pi = probs[mask]
    ii = zip(*np.where(mask))
    for iii, pii in zip(ii, pi):
        yield iii, pii
Exemplo n.º 22
0
def half_fft_convolve(in1, in2, size, mode = 'full', return_type='real'):
    """
    Rewrite of fftconvolve from scipy.signal ((c) Travis Oliphant 1999-2002)
    to deal with fft convolution where one signal is not fft transformed
    and the other one is.  Application is, for example, in a loop where
    convolution happens repeatedly with different kernels over the same
    signal.  First input is not transformed, second input is.
    """
    s1 = np.array(in1.shape)
    s2 = size - s1 + 1
    complex_result = (np.issubdtype( in1.dtype, np.complex) or
                      np.issubdtype( in2.dtype, np.complex) )

    # Always use 2**n-sized FFT
    fsize = 2 **np.ceil( np.log2( size) )
    IN1 = fftn(in1, fsize)
    IN1 *= in2
    fslice = tuple( [slice( 0, int(sz)) for sz in size] )
    ret = ifftn(IN1)[fslice].copy()
    del IN1
    if not complex_result:
        ret = ret.real
    if return_type == 'real':
        ret = ret.real
    if mode == 'full':
        return ret
    elif mode == 'same':
        if np.product(s1, axis=0) > np.product(s2, axis=0):
            osize = s1
        else:
            osize = s2
        return _centered(ret, osize)
    elif mode == 'valid':
        return _centered(ret, abs(s2 - s1) + 1)
Exemplo n.º 23
0
    def recon_dm_trans(self):

        for i, (x_start, x_end, y_start, y_end) in enumerate(self.point_info):
            prb_obj =  self.prb[:,:] * self.obj[x_start:x_end,y_start:y_end]
            tmp = 2. * prb_obj - self.product[i]

            if self.sf_flag:
                tmp_fft = sf.fftn(tmp) / npy.sqrt(npy.size(tmp))
            else:
                tmp_fft = npy.fft.fftn(tmp) / npy.sqrt(npy.size(tmp))
    
            amp_tmp = npy.abs(tmp_fft)
            ph_tmp = tmp_fft / (amp_tmp+self.sigma1)
            (index_x,index_y) = npy.where(self.diff_array[i] >= 0.)
            dev = amp_tmp - self.diff_array[i]
            power = npy.sum(npy.sum((dev[index_x,index_y])**2))/(self.nx_prb*self.ny_prb)
    
            if power > self.sigma2: 
                amp_tmp[index_x,index_y] = self.diff_array[i][index_x,index_y] + dev[index_x,index_y] * npy.sqrt(self.sigma2/power)

            if self.sf_flag:
                tmp2 =  sf.ifftn(amp_tmp*ph_tmp) *  npy.sqrt(npy.size(tmp))
            else:
                tmp2 = npy.fft.ifftn(amp_tmp*ph_tmp) * npy.sqrt(npy.size(tmp))
                    
            self.product[i] += self.beta*(tmp2 - prb_obj)

        del(prb_obj)
        del(tmp)
        del(amp_tmp)
        del(ph_tmp)
        del(tmp2)
Exemplo n.º 24
0
  def FourierToSpaceTimeNorway(P12,fx1,fy1,f1,**kwargs):

    opt = dotdict({'resolution' : np.r_[6e-5, 6e-5, 6e-5],
                   'c'          : 1540.0})
    opt.update(**kwargs)

    resolution = opt.resolution
    c          = opt.c

    testPlot = False

    dfx=(fx1[1]-fx1[0]);
    dfy=(fy1[1]-fy1[0]);
    df=(f1[1]-f1[0]);

    Nfx=int(2*np.round(c/dfx/resolution[0]/2)+1)
    Nfy=int(2*np.round(c/dfy/resolution[1]/2)+1)
    Nfs=int(2*np.round(c/2/df/resolution[2]/2)+1)

    xax=np.linspace(-1/dfx*c/2,1/dfx*c/2,Nfx);
    yax=np.linspace(-1/dfy*c/2,1/dfy*c/2,Nfy);
    zax=np.linspace(-1/df*c/4,1/df*c/4,Nfs);

    [Nx,Ny,Nf]=P12.shape
    P12zp=np.zeros((Nfx,Nfy,Nfs),dtype=np.complex128)
    ix1=int(np.round(1+(Nfx-1)/2-(Nx-1)/2))
    iy1=int(np.round(1+(Nfy-1)/2-(Ny-1)/2))
    if1=int(np.round(1+(Nfs-1)/2+1+f1[0]/df) - 1)
    P12zp[ix1:ix1+Nx,iy1:iy1+Ny,if1:if1+Nf]=P12;
    P12zp=np.fft.fftshift(P12zp);
    p12=ifftn(P12zp);
    p12=np.fft.fftshift(p12);

    return (p12,xax,yax,zax)
def fftconvolve(in1, in2, in3=None, mode="full"):
    """Convolve two N-dimensional arrays using FFT. See convolve.

    copied from scipy, but here used to try out inverse filter
    doesn't work or I can't get it to work
    """
    s1 = array(in1.shape)
    s2 = array(in2.shape)
    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))
    size = s1+s2-1

    # Always use 2**n-sized FFT
    fsize = 2**np.ceil(np.log2(size))
    IN1 = fftn(in1,fsize)
    #IN1 *= fftn(in2,fsize)
    IN1 /= fftn(in2,fsize)  # use inverse filter
    # note the inverse is elementwise not matrix inverse
    # is this correct, NO  doesn't seem to work
    fslice = tuple([slice(0, int(sz)) for sz in size])
    ret = ifftn(IN1)[fslice].copy()
    del IN1
    if not complex_result:
        ret = ret.real
    if mode == "full":
        return ret
    elif mode == "same":
        if product(s1,axis=0) > product(s2,axis=0):
            osize = s1
        else:
            osize = s2
        return _centered(ret,osize)
    elif mode == "valid":
        return _centered(ret,abs(s2-s1)+1)
Exemplo n.º 26
0
    def __init__(self, ps, vox, PSSize):
        ps = ps.max(2)
        ps = ps - ps.min()

        ps = ps*scipy.signal.hanning(ps.shape[0])[:,None]*scipy.signal.hanning(ps.shape[1])[None,:]
        ps = ps/ps.sum()
        #PSFFileName = PSFFilename

        pw = (numpy.array(PSSize) - ps.shape)/2.
        pw1 = numpy.floor(pw)
        pw2 = numpy.ceil(pw)

        self.cachedPSF = pad.with_constant(ps, ((pw2[0], pw1[0]), (pw2[1], pw1[1])), (0,))
        self.cachedOTFH = (ifftn(self.cachedPSF)*self.cachedPSF.size).astype('complex64')
        self.cachedOTF2 = (self.cachedOTFH*fftn(self.cachedPSF)).astype('complex64')

        self.weinerFT = fftw3.create_aligned_array(self.cachedOTFH.shape, 'complex64')
        self.weinerR = fftw3.create_aligned_array(self.cachedOTFH.shape, 'float32')

        self.planForward = fftw3.Plan(self.weinerR, self.weinerFT, flags = FFTWFLAGS, nthreads=NTHREADS)
        self.planInverse = fftw3.Plan(self.weinerFT, self.weinerR, direction='reverse', flags = FFTWFLAGS, nthreads=NTHREADS)
        
        fftwWisdom.save_wisdom()
        
        self.otf2mean = self.cachedOTF2.mean()
Exemplo n.º 27
0
def get_h1(imgs):
    ff = fftn(imgs)
    h1 = np.absolute(ifftn(ff[1, :, :]))
    scale = np.max(h1)
    # h1 = scale * gaussian_filter(h1 / scale, 5)
    h1 = scale * gaussian(h1 / scale, 5)
    return h1
Exemplo n.º 28
0
def customfftconvolve(in1, in2, mode="full", types=('','')):
  """ Pretty much the same as original fftconvolve, but supports
      having operands as fft already 
  """

  in1 = asarray(in1)
  in2 = asarray(in2)

  if in1.ndim == in2.ndim == 0:  # scalar inputs
    return in1 * in2
  elif not in1.ndim == in2.ndim:
    raise ValueError("in1 and in2 should have the same dimensionality")
  elif in1.size == 0 or in2.size == 0:  # empty arrays
    return array([])

  s1 = array(in1.shape)
  s2 = array(in2.shape)
  complex_result = False
  #complex_result = (np.issubdtype(in1.dtype, np.complex) or
  #                  np.issubdtype(in2.dtype, np.complex))
  shape = s1 + s2 - 1
  
  if mode == "valid":
    _check_valid_mode_shapes(s1, s2)

  # Speed up FFT by padding to optimal size for FFTPACK
  fshape = [_next_regular(int(d)) for d in shape]
  fslice = tuple([slice(0, int(sz)) for sz in shape])

  if not complex_result:
    if types[0] == 'fft':
      fin1 = in1#_unfold_fft(in1, fshape)
    else:
      fin1 = rfftn(in1, fshape)

    if types[1] == 'fft':
      fin2 = in2#_unfold_fft(in2, fshape)
    else:
      fin2 = rfftn(in2, fshape)
    ret = irfftn(fin1 * fin2, fshape)[fslice].copy()
  else:
    if types[0] == 'fft':
      fin1 = _unfold_fft(in1, fshape)
    else:
      fin1 = fftn(in1, fshape)
    if types[1] == 'fft':
      fin2 = _unfold_fft(in2, fshape)
    else:
      fin2 = fftn(in2, fshape)
    ret = ifftn(fin1 * fin2)[fslice].copy()

  if mode == "full":
    return ret
  elif mode == "same":
    return _centered(ret, s1)
  elif mode == "valid":
    return _centered(ret, s1 - s2 + 1)
  else:
    raise ValueError("Acceptable mode flags are 'valid',"
                     " 'same', or 'full'.")
Exemplo n.º 29
0
    def _make_noise_input(self, init):
        """
            Creates an initial input (generated) image.
        """

        # specify dimensions and create grid in Fourier domain
        dims = tuple(self.net.blobs["data"].data.shape[2:]) + \
               (self.net.blobs["data"].data.shape[1], )
        grid = np.mgrid[0:dims[0], 0:dims[1]]

        # create frequency representation for pink noise
        Sf = (grid[0] - (dims[0]-1)/2.0) ** 2 + \
             (grid[1] - (dims[1]-1)/2.0) ** 2
        Sf[np.where(Sf == 0)] = 1
        Sf = np.sqrt(Sf)
        Sf = np.dstack((Sf**int(init),)*dims[2])

        # apply ifft to create pink noise and normalize
        ifft_kernel = np.cos(2*np.pi*np.random.randn(*dims)) + \
                      1j*np.sin(2*np.pi*np.random.randn(*dims))
        img_noise = np.abs(ifftn(Sf * ifft_kernel))
        img_noise -= img_noise.min()
        img_noise /= img_noise.max()

        # preprocess the pink noise image
        x0 = self.transformer.preprocess("data", img_noise)

        return x0
Exemplo n.º 30
0
def fftconvolve(in1, in2, mode="full"):
    """Convolve two N-dimensional arrays using FFT. See convolve.

    """
    s1 = array(in1.shape)
    s2 = array(in2.shape)
    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))
    size = s1 + s2 - 1

    # Always use 2**n-sized FFT
    fsize = 2 ** np.ceil(np.log2(size))
    IN1 = fftn(in1, fsize)
    IN1 *= fftn(in2, fsize)
    fslice = tuple([slice(0, int(sz)) for sz in size])
    ret = ifftn(IN1)[fslice].copy()
    del IN1
    if not complex_result:
        ret = ret.real
    if mode == "full":
        return ret
    elif mode == "same":
        if product(s1, axis=0) > product(s2, axis=0):
            osize = s1
        else:
            osize = s2
        return _centered(ret, osize)
    elif mode == "valid":
        return _centered(ret, abs(s2 - s1) + 1)
Exemplo n.º 31
0
def fftconvolve(in1, in2, mode="full"):
    """Convolve two N-dimensional arrays using FFT. See convolve.

    """
    s1 = array(in1.shape)
    s2 = array(in2.shape)
    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))
    size = s1+s2-1
    IN1 = fftn(in1,size)
    IN1 *= fftn(in2,size)
    ret = ifftn(IN1)
    del IN1
    if not complex_result:
        ret = ret.real
    if mode == "full":
        return ret
    elif mode == "same":
        if product(s1,axis=0) > product(s2,axis=0):
            osize = s1
        else:
            osize = s2
        return _centered(ret,osize)
    elif mode == "valid":
        return _centered(ret,abs(s2-s1)+1)
Exemplo n.º 32
0
def calculate_spectral_ape_flux_baroclinic_barotropic(kx,ky,u_bt,v_bt,u_bc,v_bc,ape):
   """
   """
   
   i = np.complex(0,1)
   
   uhat_bt = fftn(u_bt)
   vhat_bt = fftn(v_bt)
   
   nx = u_bt.shape[1]
   ny = u_bt.shape[0]
   nz = u_bc.shape[0]
   
   ## FFT of the square root of APE
   ## We will multiply by conj of APE later, so
   ## we must take square root so that the final 
   ## product is APE
   ahat = fftn(np.sqrt(ape))
   
   # dAPE/dx in x,y
   ddx_ape = np.real( ifftn(i*kx*ahat) )
   # dAPE/dy in x,y
   ddy_ape = np.real( ifftn(i*ky*ahat) )
            
   # u_bt * dAPE/dx + v_bt * dAPE/dy
   adv_bt_ape = u_bt * ddx_ape + v_bt * ddy_ape
   
   for jk in range(0,nz):
      uhat_bc = fftn(u_bc[jk,:,:])
      vhat_bc = fftn(v_bc[jk,:,:])
      # u_bc * dAPE/dx + v_bc * dAPE/dy
      if (jk == 0):
         adv_bc_ape  = u_bc[jk,:,:] * ddx_ape + v_bc[jk,:,:] * ddy_ape
      else:
         adv_bc_ape += u_bc[jk,:,:] * ddx_ape + v_bc[jk,:,:] * ddy_ape
            
   adv_ape_bt_ape = np.real( -np.conj(ahat)*fftn(adv_bt_ape) )   #[m2/s3]
   adv_ape_bc_ape = np.real( -np.conj(ahat)*fftn(adv_bc_ape) )   #[m2/s3]
   nn = (ahat.shape[1]**2 * ahat.shape[0]**2)
   adv_ape_bt_ape = adv_ape_bt_ape / float(nn)
   adv_ape_bc_ape = adv_ape_bc_ape / float(nn)
   
   data = {}
   data['adv_ape_bt_ape'] = adv_ape_bt_ape
   data['adv_ape_bc_ape'] = adv_ape_bc_ape
   
   return data
Exemplo n.º 33
0
def fftconvolve3(in1, in2=None, in3=None, mode="full"):
    """Convolve two N-dimensional arrays using FFT. See convolve.

    for use with arma  (old version: in1=num in2=den in3=data

    * better for consistency with other functions in1=data in2=num in3=den
    * note in2 and in3 need to have consistent dimension/shape
      since I'm using max of in2, in3 shapes and not the sum

    copied from scipy.signal.signaltools, but here used to try out inverse
    filter doesn't work or I can't get it to work

    2010-10-23
    looks ok to me for 1d,
    from results below with padded data array (fftp)
    but it doesn't work for multidimensional inverse filter (fftn)
    original signal.fftconvolve also uses fftn
    """
    if (in2 is None) and (in3 is None):
        raise ValueError('at least one of in2 and in3 needs to be given')
    s1 = np.array(in1.shape)
    if in2 is not None:
        s2 = np.array(in2.shape)
    else:
        s2 = 0
    if in3 is not None:
        s3 = np.array(in3.shape)
        s2 = max(s2, s3)  # try this looks reasonable for ARMA
        #s2 = s3

    complex_result = (np.issubdtype(in1.dtype, np.complex)
                      or np.issubdtype(in2.dtype, np.complex))
    size = s1 + s2 - 1

    # Always use 2**n-sized FFT
    fsize = 2**np.ceil(np.log2(size))
    #convolve shorter ones first, not sure if it matters
    if in2 is not None:
        IN1 = fft.fftn(in2, fsize)
    if in3 is not None:
        IN1 /= fft.fftn(in3, fsize)  # use inverse filter
    # note the inverse is elementwise not matrix inverse
    # is this correct, NO  doesn't seem to work for VARMA
    IN1 *= fft.fftn(in1, fsize)
    fslice = tuple([slice(0, int(sz)) for sz in size])
    ret = fft.ifftn(IN1)[fslice].copy()
    del IN1
    if not complex_result:
        ret = ret.real
    if mode == "full":
        return ret
    elif mode == "same":
        if np.product(s1, axis=0) > np.product(s2, axis=0):
            osize = s1
        else:
            osize = s2
        return trim_centered(ret, osize)
    elif mode == "valid":
        return trim_centered(ret, abs(s2 - s1) + 1)
Exemplo n.º 34
0
 def __FilterThresh2D(self, data):
     #lowpass filter to suppress noise
     #a = ndimage.gaussian_filter(data.astype('f'), self.filterRadiusLowpass)
     a = ifftshift(ifftn((fftn(data.astype('f')) * cachedOTFH))).real
     #a = ifftshift(ifftn((fftn(data.astype('f'))*cachedOTFH)*(self.lamb**2 + cachedOTF2.mean())/(self.lamb**2 + cachedOTF2))).real
     #lowpass filter again to find background
     #b = ndimage.gaussian_filter(a, self.filterRadiusHighpass)
     return a
Exemplo n.º 35
0
def fft_filter(g, w):
    wp = padding(g, w)

    W = fftn(wp)
    G = fftn(g)
    R = np.multiply(W, G)

    return np.real(fftshift(ifftn(R)))
Exemplo n.º 36
0
    def Ahfunc(self, f):
        """Conjugate transform - convolve with conj. PSF"""
        fs = np.reshape(f, (self.height, self.width, self.depth))

        F = fftn(fs)
        d = ifftshift(ifftn(F * self.Ht))
        d = np.real(d)
        return np.ravel(d)
Exemplo n.º 37
0
def pixel_shift_2d(array, x_shift, y_shift):
    nx, ny = npy.shape(array)
    tmp = sf.ifftshift(sf.ifftn(sf.fftshift(array)))
    nest = npy.mgrid[0:nx, 0:ny]
    tmp = tmp * npy.exp(1j * 2 * npy.pi *
                        (-1. * x_shift * (nest[0, :, :] - nx / 2.) /
                         (nx) - y_shift * (nest[1, :, :] - ny / 2.) / (ny)))
    return sf.ifftshift(sf.fftn(sf.fftshift(tmp)))
Exemplo n.º 38
0
def read_fft_slice(path):
    d = pickle.load(open(path))['data']
    ff1 = fftn(d)
    fh = np.absolute(ifftn(ff1[1, :, :]))
    fh[fh < 0.1 * np.max(fh)] = 0.0
    d = 1. * fh / np.max(fh)
    d = np.expand_dims(d, axis=0)
    return d
Exemplo n.º 39
0
def RheinbergIllumination(ComplexField, CutR, CutG, CutB):
    """
    Processing Rheinberg illumination images from tomographic acquisitions

    Parameters
    ----------
    ComplexField : complex128
        Reconstructed complex refractive index distribution.
    CutR : int
        Cut-off frequency of the Red filter (assumed to be a circulat mask).
    CutG : int
        Cut-off frequency of the Green filter (assumed to be a circulat mask).
    CutB : int
        Cut-off frequency of the Blue filter (assumed to be a circulat mask).

    Returns
    -------
    Field : complex128
        Phase-contrast filtered refractive index distribution.

    """
    Spectrum = fftshift(fftn(ComplexField))
    kx, ky, kz = np.meshgrid(np.arange(-int(Spectrum.shape[1]/2), int(Spectrum.shape[1]/2)),
                             np.arange(-int(Spectrum.shape[0]/2), int(Spectrum.shape[0]/2)),
                             np.arange(-int(Spectrum.shape[2]/2), int(Spectrum.shape[2]/2)))
    FiltR = np.zeros((Spectrum.shape[1], Spectrum.shape[0], Spectrum.shape[2]), dtype=complex)
    FiltG = np.zeros((Spectrum.shape[1], Spectrum.shape[0], Spectrum.shape[2]), dtype=complex)
    FiltB = np.zeros((Spectrum.shape[1], Spectrum.shape[0], Spectrum.shape[2]), dtype=complex)

    FiltR[kx**2 + ky**2 + kz**2 < CutR[1]**2] = 1
    FiltR[kx**2 + ky**2 + kz**2 < CutR[0]**2] = 0

    FiltG[kx**2 + ky**2 + kz**2 < CutG**2] = 1

    FiltB[kx**2 + ky**2 + kz**2 < CutB[1]**2] = 1
    FiltB[kx**2 + ky**2 + kz**2 < CutB[0]**2] = 0

    R = Spectrum * FiltR
    G = Spectrum * FiltG
    B = Spectrum * FiltB

    FieldR = ifftn(ifftshift(R))
    FieldG = ifftn(ifftshift(G))
    FieldB = ifftn(ifftshift(B))

    return FieldR, FieldG, FieldB
Exemplo n.º 40
0
def FFTGammaW(GammaW, _map, BackForth):
    import scipy.fftpack as fft  #scipy fft support complex64 to complex64
    # import numpy.fft as fft
    OldShape = GammaW.shape
    TauBin = OldShape[-1]
    if BackForth == 1:
        GammaW = fft.fftn(GammaW, axes=[2, 3])
    elif BackForth == -1:
        GammaW = fft.ifftn(GammaW, axes=[2, 3])
    NewShape = (_map.L[0], _map.L[1], _map.L[0], _map.L[1], TauBin, TauBin)
    GammaW = GammaW.reshape(NewShape)
    if BackForth == 1:
        GammaW = fft.fftn(GammaW, axes=[0, 1, 2, 3])
    elif BackForth == -1:
        GammaW = fft.ifftn(GammaW, axes=[0, 1, 2, 3])
    GammaW = GammaW.reshape(OldShape)
    return GammaW
Exemplo n.º 41
0
def fft_imagefilter(g, w):
    wp = padd_filter(w, g.shape[0])
    W = fftn(wp)
    G = fftn(g)
    R = np.multiply(W, G)

    r = np.real(fftshift(ifftn(R)))
    return r
Exemplo n.º 42
0
 def interpolate_fft(self, arr, scale=1):
     shape = np.shape(arr)
     pad_width = tuple((int(np.ceil(a / 2 * (scale - 1))),
                        int(np.floor(a / 2 * (scale - 1)))) for a in shape)
     padded = np.pad(fft.fftshift(fft.fftn(arr)),
                     pad_width,
                     mode='constant')
     return fft.ifftn(scale**len(arr.shape) * fft.ifftshift(padded))
Exemplo n.º 43
0
    def realise_density(self, filename=None, cols=None):
        """Create realisation of the power spectrum by randomly sampling
        from Gaussian distributions of variance P(k) for each k mode."""

        k, pk = (None, None)
        if filename:
            assert (cols is not None)
            tmp = np.loadtxt(filename, unpack=True)
            k = tmp[cols[0]]
            pk = tmp[cols[1]]

            def log_interp1d(xx, yy, kind='linear'):
                logx = np.log10(xx)
                logy = np.log10(yy)
                lin_interp = si.InterpolatedUnivariateSpline(logx, logy)
                log_interp = lambda zz: np.power(10.0, lin_interp(np.log10(zz))
                                                 )
                return log_interp

            #pk = tmp[cols[1]]
            # Interp pk values to the flattened 3D self.k array
            f = log_interp1d(k, pk)

            k = self.k.flatten()
            pk = f(k)
            pk = np.reshape(pk, np.shape(self.k))
            pk = np.nan_to_num(pk)  # Remove NaN at k=0 (and any others...)
        else:
            k = self.k.flatten()
            pk = cp.perturbation.power_spectrum(k, **self.cosmo)
            pk = np.reshape(pk, np.shape(self.k))
            pk = np.nan_to_num(pk)  # Remove NaN at k=0 (and any others...)

        # Normalise the power spectrum properly (factor of volume, and norm.
        # factor of 3D DFT)
        pk *= self.boxfactor

        # Generate Gaussian random field with given power spectrum
        #re = np.random.normal(0., 1., np.shape(self.k))
        # im = np.random.normal(0., 1., np.shape(self.k)
        import random
        random.seed(1234)
        re = np.array([
            random.normalvariate(0., 1.) for i in range(len(self.k.flatten()))
        ]).reshape(self.k.shape)
        im = np.array([
            random.normalvariate(0., 1.) for i in range(len(self.k.flatten()))
        ]).reshape(self.k.shape)
        self.delta_k = (re + 1j * im) * np.sqrt(pk)

        # Transform to real space. Here, we are discarding the imaginary part
        # of the inverse FT! But we can recover the correct (statistical)
        # result by multiplying by a factor of sqrt(2). Also, there is a factor
        # of N^3 which seems to appear by a convention in the Discrete FT.
        self.delta_x = fft.ifftn(self.delta_k).real

        # Finally, get the Fourier transform on the real field back
        self.delta_k = fft.fftn(self.delta_x)
Exemplo n.º 44
0
def bgtensor(img, lsigma, rho=0.2):
    eps = 1e-12
    fimg = fftn(img, overwrite_x=True)

    for s in lsigma:
        jvbuffer = bgkern3(kerlen=math.ceil(s) * 6 + 1, sigma=s, rho=rho)
        jvbuffer = fftn(jvbuffer, shape=fimg.shape, overwrite_x=True) * fimg
        fimg = ifftn(jvbuffer, overwrite_x=True)
        yield hessian3(np.real(fimg))
Exemplo n.º 45
0
def deconvolve(tracks, impulse_response):
    tracklet_shape = tracks.shape[2:]
    conv_inverse_spectrum = 1 / fftpack.fftn(impulse_response, tracklet_shape)
    conv_inverse_spectrum = np.expand_dims(np.expand_dims(
        conv_inverse_spectrum, axis=0),
                                           axis=0)
    track_spectrum = fftpack.fftn(tracks, axes=list(range(tracks.ndim))[2:])
    return fftpack.ifftn(track_spectrum * conv_inverse_spectrum,
                         axes=list(range(tracks.ndim))[2:]).real
Exemplo n.º 46
0
def convolve(x, y):
    '''
    Compute the nD convolution of two real arrays of the same size.
    '''
    xHat = fftpack.fftn(x + 0j)
    yHat = fftpack.fftn(y + 0j)
    xHat *= yHat

    return np.real(fftpack.ifftn(xHat))
Exemplo n.º 47
0
def deconvolve(star, psf):
    '''
    Generic deconvolution function
    '''
    star_fft = fftpack.fftshift(fftpack.fftn(star))
    psf_fft = fftpack.fftshift(fftpack.fftn(psf))
    
    return np.real( fftpack.fftshift(fftpack.ifftn(fftpack.ifftshift(star_fft/psf_fft))) )
    
Exemplo n.º 48
0
 def get_H1(i):
     log("Fourier transforming on slice %d..." % i, 3)
     ff = fftn(images[i])
     first_harmonic = ff[1, :, :]
     log("Inverse Fourier transforming on slice %d..." % i, 3)
     result = np.absolute(ifftn(first_harmonic))
     log("Performing Gaussian blur on slice %d..." % i, 3)
     result = cv2.GaussianBlur(result, (5, 5), 0)
     return result
Exemplo n.º 49
0
def pixel_shift(array, x_shift, y_shift, z_shift):
    nx, ny, nz = np.shape(array)
    tmp = sf.ifftshift(sf.ifftn(sf.fftshift(array)))
    nest = np.mgrid[0:nx, 0:ny, 0:nz]
    tmp = tmp * np.exp(1j * 2 * np.pi *
                       (-1. * x_shift * (nest[0, :, :, :] - nx / 2.) /
                        (nx) - y_shift * (nest[1, :, :, :] - ny / 2.) /
                        (ny) - z_shift * (nest[2, :, :, :] - nz / 2.) / (nz)))
    return sf.ifftshift(sf.fftn(sf.fftshift(tmp)))
Exemplo n.º 50
0
def do_Hks_to_HRs ( data_controller ):
  from scipy import fftpack as FFT

  arry,attr = data_controller.data_dicts()

  #----------------------------------------------------------
  # Define the Hamiltonian and overlap matrix in real space:
  #   HRs and SRs (noinv and nosym = True in pw.x)
  #----------------------------------------------------------
  if rank == 0:
    # Original k grid to R grid
    arry['HRs'] = np.zeros_like(arry['Hks'])
    arry['HRs'] = FFT.ifftn(arry['Hks'], axes=[2,3,4])

    if attr['non_ortho']:
      arry['SRs'] = np.zeros_like(arry['Sks'])
      arry['SRs'] = FFT.ifftn(arry['Sks'], axes=[2,3,4])
      del arry['Sks']
Exemplo n.º 51
0
    def gaussian_blur_box(self, prot, resolution, box_size_x, box_size_y, box_size_z, sigma_coeff=0.356, normalise=True,filename="None"):
        """
        
        Returns a Map instance based on a Gaussian blurring of a protein.
        The convolution of atomic structures is done in reciprocal space.
    
        Arguments:
        
            *prot*
                the Structure instance to be blurred.
            *resolution*
                the resolution, in Angstroms, to blur the protein to.
            *box_size_x*
                 x dimension of map box in Angstroms.
            *box_size_y*
                y dimension of map box in Angstroms.
            *box_size_z* 
                z dimension of map box in Angstroms.
            *sigma_coeff*
                the sigma value (multiplied by the resolution) that controls the width of the Gaussian. 
                Default values is 0.356.
                
                Other values used :
                
                    0.187R corresponding with the Gaussian width of the Fourier transform falling to half the maximum at 1/resolution, as used in Situs (Wriggers et al, 1999);
                    
                    0.225R which makes the Fourier transform of the distribution fall to 1/e of its maximum value at wavenumber 1/resolution, the default in Chimera (Petterson et al, 2004)
                    
                    0.356R corresponding to the Gaussian width at 1/e maximum height equaling the resolution, an option in Chimera (Petterson et al, 2004);
                    
                    0.425R the fullwidth half maximum being equal to the resolution, as used by FlexEM (Topf et al, 2008);
                                
                    0.5R the distance between the two inflection points being the same length as the resolution, an option in Chimera (Petterson et al, 2004);
                                
                    1R where the sigma value simply equal to the resolution, as used by NMFF (Tama et al, 2004).

            *filename*
                output name of the map file.
                
        """
        densMap = self.protMapBox(prot, 1, resolution, box_size_x, box_size_y, box_size_z, filename)
        x_s = int(densMap.x_size()*densMap.apix)
        y_s = int(densMap.y_size()*densMap.apix)
        z_s = int(densMap.z_size()*densMap.apix)
        ##newMap = densMap.resample_by_box_size([z_s, y_s, x_s])
        ##newMap.fullMap *= 0
        newMap = densMap.copy()
        newMap.fullMap = zeros((z_s, y_s, x_s))
        newMap.apix = (densMap.apix*densMap.x_size())/x_s
        sigma = sigma_coeff*resolution
        newMap = self.make_atom_overlay_map(newMap, prot)
        fou_map = fourier_gaussian(fftn(newMap.fullMap), sigma)
        newMap.fullMap = real(ifftn(fou_map))
        newMap = newMap.resample_by_box_size(densMap.box_size())
        if normalise:
            newMap = newMap.normalise()
        return newMap
Exemplo n.º 52
0
    def psf_calc(self, psf, kz, data_size):
        """Pre calculate OTFs etc ..."""
        g = psf

        self.height = data_size[0]
        self.width = data_size[1]
        self.depth = data_size[2]

        (x, y,
         z) = np.mgrid[-np.floor(self.height / 2.0):(np.ceil(self.height /
                                                             2.0)),
                       -np.floor(self.width / 2.0):(np.ceil(self.width / 2.0)),
                       -np.floor(self.depth / 2.0):(np.ceil(self.depth / 2.0))]

        gs = np.shape(g)
        g = g[int(np.floor((gs[0] - self.height) /
                           2)):int(self.height +
                                   np.floor((gs[0] - self.height) / 2)),
              int(np.floor((gs[1] - self.width) /
                           2)):int(self.width +
                                   np.floor((gs[1] - self.width) / 2)),
              int(np.floor((gs[2] - self.depth) /
                           2)):int(self.depth +
                                   np.floor((gs[2] - self.depth) / 2))]

        g = abs(ifftshift(ifftn(abs(fftn(g)))))
        g = (g / sum(sum(sum(g))))

        self.g = g

        self.H = fftn(g).astype('f')
        self.Ht = ifftn(g).astype('f')

        tk = 2 * kz * z

        t = g * np.exp(1j * tk)
        self.He = np.cast['F'](fftn(t))
        self.Het = np.cast['F'](ifftn(t))

        tk = 2 * tk

        t = g * np.exp(1j * tk)
        self.He2 = np.cast['F'](fftn(t))
        self.He2t = np.cast['F'](ifftn(t))
Exemplo n.º 53
0
    def OnCorrelate(self, event):
        from scipy.fftpack import fftn, ifftn
        from pylab import fftshift, ifftshift
        import numpy as np
        #ch0 = self.image.data[:,:,:,0]
        chanList = self.image.data.dataList

        for i in range(len(self.rbs)):
            if self.rbs[i].GetValue():
                ch0 = self.image.data[:, :, :, i]

        ch0 = np.maximum(ch0 - ch0.mean(), 0)

        F0 = fftn(ch0)

        for i in range(self.image.data.shape[3]):
            if not self.rbs[i].GetValue():
                ch0 = self.image.data[:, :, :, i]
                ch0 = np.maximum(ch0 - ch0.mean(), 0)
                Fi = ifftn(ch0)

                corr = abs(fftshift(ifftn(F0 * Fi)))

                corr -= corr.min()

                corr = np.maximum(corr - corr.max() * .75, 0)

                xi, yi, zi = np.where(corr)

                corr_s = corr[corr > 0]
                corr_s /= corr_s.sum()

                dxi = ((xi * corr_s).sum() -
                       corr.shape[0] / 2.) * chanList[i].voxelsize[0]
                dyi = ((yi * corr_s).sum() -
                       corr.shape[1] / 2.) * chanList[i].voxelsize[1]
                dzi = ((zi * corr_s).sum() -
                       corr.shape[2] / 2.) * chanList[i].voxelsize[2]

                self.xctls[i].SetValue(str(int(dxi)))
                self.yctls[i].SetValue(str(int(dyi)))
                self.zctls[i].SetValue(str(int(dzi)))

        self.OnApply(None)
Exemplo n.º 54
0
def downsample(insamples, szout, mask=None):
    """
    Blur and downsample 1D to 3D objects such as, curves, images or volumes

    The function handles odd and even-sized arrays correctly. The center of
    an odd array is taken to be at (n+1)/2, and an even array is n/2+1.
    :param insamples: Set of objects to be downsampled in the form of an array, the last dimension
                    is the number of objects.
    :param szout: The desired resolution of for output objects.
    :return: An array consists of the blurred and downsampled objects.
    """

    ensure(
        insamples.ndim - 1 == np.size(szout),
        'The number of downsampling dimensions is not the same as that of objects.'
    )

    L_in = insamples.shape[0]
    L_out = szout[0]
    ndata = insamples.shape[-1]
    outdims = np.r_[szout, ndata]

    outsamples = np.zeros(outdims, dtype=insamples.dtype)

    if mask is None:
        mask = 1.0

    if insamples.ndim == 2:
        # stack of one dimension objects

        for idata in range(ndata):
            insamples_fft = crop_pad(fftshift(fft(insamples[:, idata])),
                                     L_out) * mask
            outsamples[:, idata] = np.real(
                ifft(ifftshift(insamples_fft)) * (L_out / L_in))

    elif insamples.ndim == 3:
        # stack of two dimension objects
        for idata in range(ndata):
            insamples_fft = crop_pad(fftshift(fft2(insamples[:, :, idata])),
                                     L_out) * mask
            outsamples[:, :, idata] = np.real(
                ifft2(ifftshift(insamples_fft)) * (L_out**2 / L_in**2))

    elif insamples.ndim == 4:
        # stack of three dimension objects
        for idata in range(ndata):
            insamples_fft = crop_pad(fftshift(fftn(insamples[:, :, :, idata])),
                                     L_out) * mask
            outsamples[:, :, :, idata] = np.real(
                ifftn(ifftshift(insamples_fft)) * (L_out**3 / L_in**3))

    else:
        raise RuntimeError('Number of dimensions > 3 for input objects.')

    return outsamples
Exemplo n.º 55
0
def preWhitenVolumeSoftBG(**kwargs):
    '''
	Pre-whitenening using noise estimates from a soft mask of the background.
	Returns a the pre-whitened volume and various spectra. (Alp Kucukelbir, 2013)

	'''
    print '\n= Pre-whitening'
    tStart = time()

    n = kwargs.get('n', 0)
    elbowAngstrom = kwargs.get('elbowAngstrom', 0)
    dataBGSpect = kwargs.get('dataBGSpect', 0)
    dataF = kwargs.get('dataF', 0)
    softBGmask = kwargs.get('softBGmask', 0)
    vxSize = kwargs.get('vxSize', 0)
    rampWeight = kwargs.get('rampWeight', 1.0)

    epsilon = 1e-10

    pWfilter = createPreWhiteningFilter(n=n,
                                        spectrum=dataBGSpect,
                                        elbowAngstrom=elbowAngstrom,
                                        rampWeight=rampWeight,
                                        vxSize=vxSize)

    # Apply the pre-whitening filter
    dataF = np.multiply(pWfilter['pWfilter'], dataF)

    dataPWFabs = np.abs(dataF)
    dataPWFabs = dataPWFabs - np.min(dataPWFabs)
    dataPWFabs = dataPWFabs / np.max(dataPWFabs)
    dataPWSpect = sphericalAverage(dataPWFabs**2) + epsilon

    dataPW = np.real(fftpack.ifftn(fftpack.ifftshift(dataF)))
    del dataF

    dataPWBG = np.multiply(dataPW, softBGmask)
    dataPWBG = np.array(fftpack.fftshift(
        fftpack.fftn(dataPWBG, overwrite_x=True)),
                        dtype='complex64')
    dataPWBGFabs = np.abs(dataPWBG)
    del dataPWBG

    dataPWBGFabs = dataPWBGFabs - np.min(dataPWBGFabs)
    dataPWBGFabs = dataPWBGFabs / np.max(dataPWBGFabs)
    dataPWBGSpect = sphericalAverage(dataPWBGFabs**2) + epsilon

    m, s = divmod(time() - tStart, 60)
    print "  :: Time elapsed: %d minutes and %.2f seconds" % (m, s)

    return {
        'dataPW': dataPW,
        'dataPWSpect': dataPWSpect,
        'dataPWBGSpect': dataPWBGSpect,
        'peval': pWfilter['peval']
    }
Exemplo n.º 56
0
def fourier(f, fBNC, tol):
    J = f.shape[0]
    Jp = 2 * J
    #Jp  = 3*J
    RHS = np.zeros((Jp, Jp))

    RHS[J // 2:3 * J // 2, J // 2:3 * J // 2] = f
    #================================
    Fcoeff = fftn(RHS)
    freq = np.zeros((Jp))
    for n in range(0, Jp // 2):
        freq[n] = n
    for n in range(Jp // 2, Jp):
        freq[n] = -Jp - n

    #print(freq.shape, "freq shape \n", Jp, "Jp \n" , freq, "freq")

    Fcoeff[0, 0] = 0

    #X AXIS
    for i in range(1, Jp):
        eigi = np.cos(2 * freq[i] * np.pi / (Jp)) - 1

        eig = 2 * (eigi)
        Fcoeff[i, 0] = (dt**2) * Fcoeff[i, 0] / eig
    #Y AXIS
    for j in range(1, Jp):
        eigj = np.cos(2 * freq[j] * np.pi / (Jp)) - 1

        eig = 2 * (eigj)
        Fcoeff[0, j] = (dt**2) * Fcoeff[0, j] / eig

    #FULL
    for i in range(1, Jp):

        eigi = np.cos(2 * freq[i] * np.pi / (Jp)) - 1

        for j in range(1, Jp):

            eigj = np.cos(2 * freq[j] * np.pi / (Jp)) - 1

            eig = 2 * (eigi + eigj)

            Fcoeff[i, j] = (dt**2) * Fcoeff[i, j] / eig

    #Fcoeff=np.nan_to_num(Fcoeff)

    #==============================
    u = ifftn(Fcoeff)
    #==============================
    u = u.real
    #u=fBNC(f,u)
    rhs = u[J // 2:3 * J // 2, J // 2:3 * J // 2]
    #rhs=u[J:2*J,J:2*J,J:2*J]*(8/Jp**3)

    return (rhs)
Exemplo n.º 57
0
def norm_xcorr(t,a):
    if t.size <= 2:
        raise Exception('Image is too small.')

    std_t = np.std(t)
    mean_t = np.mean(t)

    if std_t == 0:
        raise Exception('The image is blank.')

    t = np.float64(t)
    a = np.float64(a)

    outdim = np.array([a.shape[i]+t.shape[i]-1 for i in xrange(a.ndim)])

    # Check if convolution or FFT is faster #
    spattime, ffttime = get_time(t,a,outdim)
    if spattime < ffttime:
        method = 'spatial'
    else:
        method = 'fourier'

    if method == 'fourier':
        af = fftn(a,shape=outdim)   # Fast Fourier transform of search image
        tf = fftn(nflip(t),shape=outdim)   # Fast Fourier transform of search template

        xcorr = np.real(ifftn(tf*af))   # Inverse FFT of the convolution of search tempalte and search image

    else:
        xcorr = convolve(a,t,mode='constant',cval=0) # 2D convolution of search image and template (rarely used)

    ls_a = lsum(a,t.shape)   # Running sum of search image
    ls2_a = lsum(a**2,t.shape)   # Running sum of the square of search image

    xcorr = padArray(xcorr,ls_a.shape)

    ls_diff = ls2_a-(ls_a**2)/t.size
    ls_diff = np.where(ls_diff < 0,0,ls_diff) # Replace negatives by zero
    sigma_a = np.sqrt(ls_diff)

    sigma_t = np.sqrt(t.size-1.)*std_t

    den = sigma_t*sigma_a

    num = (xcorr - ls_a*mean_t)

    tol = np.sqrt(np.finfo(den.dtype).eps) # Define zero tolerance as sqrt of machine epsilon
    with np.errstate(divide='ignore'):
        nxcorr = np.where(den < tol,0,num/den) # Normalized correlation (make zero when below tolerance)

    ## This next line is recommended by both Lewis and MATLAB but seems to introduce a ~1 px error in each axis ##
    # nxcorr = np.where((np.abs(nxcorr)-1.) > np.sqrt(np.finfo(nxcorr.dtype).eps),0,nxcorr)

    nxcorr = padArray(nxcorr,a.shape)

    return nxcorr
Exemplo n.º 58
0
 def deconvolve(self, image, kernel):
     if image.shape != kernel.shape:
         kernel = self._procrustes(kernel,
                                   image.shape,
                                   side='both',
                                   padval=0)
     image_fft = fftpack.fftshift(fftpack.fftn(image))
     kernel_fft = fftpack.fftshift(fftpack.fftn(kernel))
     return fftpack.fftshift(
         fftpack.ifftn(fftpack.ifftshift(image_fft / kernel_fft)))
def constrained_least_squares_filtering(g, M, N, k=5, sigma=1.25, gamma=0.02):
    p = np.array([[0,-1,0],[-1,4,-1],[0,-1,0]])                         #Laplacian Operator
    m,n = size_image(p)                                                 #Tamanho do Filtro Laplaciano
    P = fftn(padding(g, p, M, N, m, n))                                 #Laplacian Fourier
    G = fftn(g)                                                         #Degraded Image Fourier
    h = gaussian_filter(k, sigma)                                       #Degradation Operator
    H = fftn(padding(g, h, M, N, k, k))                                 #Degradation Fourier
    C = np.multiply(np.divide(np.conj(H), (np.square(np.absolute(H)) + (gamma * np.square(np.absolute(P))))), G)
    f_rest = np.real(fftshift(ifftn(C)))
    return f_rest
Exemplo n.º 60
0
def correlate(original, x):
    shape = np.array(original.shape)

    fshape = [fftpack.helper.next_fast_len(int(d)) for d in shape]
    fslice = tuple([slice(0, int(sz)) for sz in shape])

    sp1 = fftpack.fftn(original, fshape)
    sp2 = fftpack.fftn(x, fshape)
    ret = fftpack.ifftn(sp1 * sp2)[fslice].copy().real
    return ret