Пример #1
0
 def test_shape_argument(self):
     small_x = [[1,2,3],[4,5,6]]
     large_x1 = [[1,2,3,0],[4,5,6,0],[0,0,0,0],[0,0,0,0]]
     y = fftn(small_x,shape=(4,4))
     assert_array_almost_equal (y,fftn(large_x1))
     y = fftn(small_x,shape=(3,4))
     assert_array_almost_equal (y,fftn(large_x1[:-1]))
Пример #2
0
    def bench_random(self):
        from numpy.fft import fftn as numpy_fftn
        print()
        print('    Multi-dimensional Fast Fourier Transform')
        print('===================================================')
        print('          |    real input     |   complex input    ')
        print('---------------------------------------------------')
        print('   size   |  scipy  |  numpy  |  scipy  |  numpy ')
        print('---------------------------------------------------')
        for size,repeat in [((100,100),100),((1000,100),7),
                            ((256,256),10),
                            ((512,512),3),
                            ]:
            print('%9s' % ('%sx%s'%size), end=' ')
            sys.stdout.flush()

            for x in [random(size).astype(double),
                      random(size).astype(cdouble)+random(size).astype(cdouble)*1j
                      ]:
                y = fftn(x)
                #if size > 500: y = fftn(x)
                #else: y = direct_dft(x)
                assert_array_almost_equal(fftn(x),y)
                print('|%8.2f' % measure('fftn(x)',repeat), end=' ')
                sys.stdout.flush()

                assert_array_almost_equal(numpy_fftn(x),y)
                print('|%8.2f' % measure('numpy_fftn(x)',repeat), end=' ')
                sys.stdout.flush()

            print(' (secs for %s calls)' % (repeat))

        sys.stdout.flush()
Пример #3
0
def fftconvolve(in1, in2, mode="full"):
    """Convolve two N-dimensional arrays using FFT. See convolve.
    
    """
    s1 = array(in1.shape)
    s2 = array(in2.shape)
    if (s1.dtype.char in ['D','F']) or (s2.dtype.char in ['D', 'F']):
        cmplx=1
    else: cmplx=0
    size = s1+s2-1
    IN1 = fftn(in1,size)
    IN1 *= fftn(in2,size)
    ret = ifftn(IN1)
    del IN1
    if not cmplx:
        ret = real(ret)
    if mode == "full":
        return ret
    elif mode == "same":
        if product(s1,axis=0) > product(s2,axis=0):
            osize = s1
        else:
            osize = s2
        return _centered(ret,osize)
    elif mode == "valid":
        return _centered(ret,abs(s2-s1)+1)
Пример #4
0
def customfftconvolve(in1, in2, mode="full", types=('','')):
  """ Pretty much the same as original fftconvolve, but supports
      having operands as fft already 
  """

  in1 = asarray(in1)
  in2 = asarray(in2)

  if in1.ndim == in2.ndim == 0:  # scalar inputs
    return in1 * in2
  elif not in1.ndim == in2.ndim:
    raise ValueError("in1 and in2 should have the same dimensionality")
  elif in1.size == 0 or in2.size == 0:  # empty arrays
    return array([])

  s1 = array(in1.shape)
  s2 = array(in2.shape)
  complex_result = False
  #complex_result = (np.issubdtype(in1.dtype, np.complex) or
  #                  np.issubdtype(in2.dtype, np.complex))
  shape = s1 + s2 - 1
  
  if mode == "valid":
    _check_valid_mode_shapes(s1, s2)

  # Speed up FFT by padding to optimal size for FFTPACK
  fshape = [_next_regular(int(d)) for d in shape]
  fslice = tuple([slice(0, int(sz)) for sz in shape])

  if not complex_result:
    if types[0] == 'fft':
      fin1 = in1#_unfold_fft(in1, fshape)
    else:
      fin1 = rfftn(in1, fshape)

    if types[1] == 'fft':
      fin2 = in2#_unfold_fft(in2, fshape)
    else:
      fin2 = rfftn(in2, fshape)
    ret = irfftn(fin1 * fin2, fshape)[fslice].copy()
  else:
    if types[0] == 'fft':
      fin1 = _unfold_fft(in1, fshape)
    else:
      fin1 = fftn(in1, fshape)
    if types[1] == 'fft':
      fin2 = _unfold_fft(in2, fshape)
    else:
      fin2 = fftn(in2, fshape)
    ret = ifftn(fin1 * fin2)[fslice].copy()

  if mode == "full":
    return ret
  elif mode == "same":
    return _centered(ret, s1)
  elif mode == "valid":
    return _centered(ret, s1 - s2 + 1)
  else:
    raise ValueError("Acceptable mode flags are 'valid',"
                     " 'same', or 'full'.")
Пример #5
0
def shear_fft2d(array,x,time,type='rho',rm_shear=0):
    #array of size (nx,ny,nz,3) if type='v'
    #array of size (nx,ny,nz) if type='rho'

    q=1.5e0 ; Omega=1.e-3
    Lx=1. ; Ly=2.*np.arcsin(1.e0) ; Lz=1.
    twopi=4.*np.arcsin(1.e0)

    if rank(array)==3:
        nx,ny,nz=shape(array)
    if rank(array)==4:
        nx,ny,nz,ndim=shape(array)

    # Remove background velocity shear if needed...
    if (rm_shear==1):
        array[:,:,:,1]=remove_shear(array[:,:,:,1],x)

    # Unshear data in real space...
    array=unshear(array,x,time,type=type)

    # Compute FFT...
    fft_array=array
    if (type=='rho'):
        for k in range(nz):
            fft_array[:,:,k]=fftn(array[:,:,k]-np.mean(array[:,:,k]))
    else:
        for k in range(nz):
            fft_array[:,:,k,0]=fftn(array[:,:,k,0])
            fft_array[:,:,k,1]=fftn(array[:,:,k,1])
            fft_array[:,:,k,2]=fftn(array[:,:,k,2])

    return fft_array
Пример #6
0
    def psf_calc(self, psf, kz, data_size):
        '''Pre calculate OTFs etc ...'''
        g = psf;

        self.height = data_size[0]
        self.width  = data_size[1]
        self.depth  = data_size[2]

        (x,y,z) = mgrid[-floor(self.height/2.0):(ceil(self.height/2.0)), -floor(self.width/2.0):(ceil(self.width/2.0)), -floor(self.depth/2.0):(ceil(self.depth/2.0))]
        
       
        gs = shape(g);
        g = g[int(floor((gs[0] - self.height)/2)):int(self.height + floor((gs[0] - self.height)/2)), int(floor((gs[1] - self.width)/2)):int(self.width + floor((gs[1] - self.width)/2)), int(floor((gs[2] - self.depth)/2)):int(self.depth + floor((gs[2] - self.depth)/2))]
	
        g = abs(ifftshift(ifftn(abs(fftn(g)))));
        g = (g/sum(sum(sum(g))));
	
        self.g = g;
        
        self.H = cast['f'](fftn(g));
        self.Ht = cast['f'](ifftn(g));

        tk = 2*kz*z
        
        t = g*exp(1j*tk)
        self.He = cast['F'](fftn(t));    
        self.Het = cast['F'](ifftn(t));

        tk = 2*tk
        
        t = g*exp(1j*tk)
        self.He2 = cast['F'](fftn(t));    
        self.He2t = cast['F'](ifftn(t));
Пример #7
0
def fftconvolve3(in1, in2=None, in3=None, mode="full"):
    """Convolve two N-dimensional arrays using FFT. See convolve.

    for use with arma  (old version: in1=num in2=den in3=data

    * better for consistency with other functions in1=data in2=num in3=den
    * note in2 and in3 need to have consistent dimension/shape
      since I'm using max of in2, in3 shapes and not the sum

    copied from scipy.signal.signaltools, but here used to try out inverse
    filter doesn't work or I can't get it to work

    2010-10-23
    looks ok to me for 1d,
    from results below with padded data array (fftp)
    but it doesn't work for multidimensional inverse filter (fftn)
    original signal.fftconvolve also uses fftn
    """
    if (in2 is None) and (in3 is None):
        raise ValueError('at least one of in2 and in3 needs to be given')
    s1 = np.array(in1.shape)
    if not in2 is None:
        s2 = np.array(in2.shape)
    else:
        s2 = 0
    if not in3 is None:
        s3 = np.array(in3.shape)
        s2 = max(s2, s3) # try this looks reasonable for ARMA
        #s2 = s3


    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))
    size = s1+s2-1

    # Always use 2**n-sized FFT
    fsize = 2**np.ceil(np.log2(size))
    #convolve shorter ones first, not sure if it matters
    if not in2 is None:
        IN1 = fft.fftn(in2, fsize)
    if not in3 is None:
        IN1 /= fft.fftn(in3, fsize)  # use inverse filter
    # note the inverse is elementwise not matrix inverse
    # is this correct, NO  doesn't seem to work for VARMA
    IN1 *= fft.fftn(in1, fsize)
    fslice = tuple([slice(0, int(sz)) for sz in size])
    ret = fft.ifftn(IN1)[fslice].copy()
    del IN1
    if not complex_result:
        ret = ret.real
    if mode == "full":
        return ret
    elif mode == "same":
        if np.product(s1,axis=0) > np.product(s2,axis=0):
            osize = s1
        else:
            osize = s2
        return trim_centered(ret,osize)
    elif mode == "valid":
        return trim_centered(ret,abs(s2-s1)+1)
Пример #8
0
    def test_size_accuracy_large(self, size):
        x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
        y1 = fftn(x.real.astype(np.float32))
        y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)

        assert_equal(y1.dtype, np.complex64)
        assert_array_almost_equal_nulp(y1, y2, 2000)
Пример #9
0
    def test_float16_input_large(self, size):
        x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
        y1 = fftn(x.real.astype(np.float16))
        y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)

        assert_equal(y1.dtype, np.complex64)
        assert_array_almost_equal_nulp(y1, y2, 2e6)
Пример #10
0
def fftconvolve(in1, in2, mode="full"):
    """Convolve two N-dimensional arrays using FFT. See convolve.

    """
    s1 = array(in1.shape)
    s2 = array(in2.shape)
    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))
    size = s1 + s2 - 1

    # Always use 2**n-sized FFT
    fsize = 2 ** np.ceil(np.log2(size))
    IN1 = fftn(in1, fsize)
    IN1 *= fftn(in2, fsize)
    fslice = tuple([slice(0, int(sz)) for sz in size])
    ret = ifftn(IN1)[fslice].copy()
    del IN1
    if not complex_result:
        ret = ret.real
    if mode == "full":
        return ret
    elif mode == "same":
        if product(s1, axis=0) > product(s2, axis=0):
            osize = s1
        else:
            osize = s2
        return _centered(ret, osize)
    elif mode == "valid":
        return _centered(ret, abs(s2 - s1) + 1)
Пример #11
0
def fft_correlation(in1, in2, normalize=False):
    """Correlation of two N-dimensional arrays using FFT.

    Adapted from scipy's fftconvolve.

    Parameters
    ----------
    in1, in2 : array
    normalize: bool
        If True performs phase correlation

    """
    s1 = np.array(in1.shape)
    s2 = np.array(in2.shape)
    size = s1 + s2 - 1
    # Use 2**n-sized FFT
    fsize = 2 ** np.ceil(np.log2(size))
    IN1 = fftn(in1, fsize)
    IN1 *= fftn(in2, fsize).conjugate()
    if normalize is True:
        ret = ifftn(np.nan_to_num(IN1 / np.absolute(IN1))).real.copy()
    else:
        ret = ifftn(IN1).real.copy()
    del IN1
    return ret
Пример #12
0
def fftconvolve(in1, in2, mode="full"):
    """Convolve two N-dimensional arrays using FFT. See convolve.

    """
    s1 = array(in1.shape)
    s2 = array(in2.shape)
    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))
    size = s1+s2-1
    IN1 = fftn(in1,size)
    IN1 *= fftn(in2,size)
    ret = ifftn(IN1)
    del IN1
    if not complex_result:
        ret = ret.real
    if mode == "full":
        return ret
    elif mode == "same":
        if product(s1,axis=0) > product(s2,axis=0):
            osize = s1
        else:
            osize = s2
        return _centered(ret,osize)
    elif mode == "valid":
        return _centered(ret,abs(s2-s1)+1)
def fftconvolve(in1, in2, in3=None, mode="full"):
    """Convolve two N-dimensional arrays using FFT. See convolve.

    copied from scipy, but here used to try out inverse filter
    doesn't work or I can't get it to work
    """
    s1 = array(in1.shape)
    s2 = array(in2.shape)
    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))
    size = s1+s2-1

    # Always use 2**n-sized FFT
    fsize = 2**np.ceil(np.log2(size))
    IN1 = fftn(in1,fsize)
    #IN1 *= fftn(in2,fsize)
    IN1 /= fftn(in2,fsize)  # use inverse filter
    # note the inverse is elementwise not matrix inverse
    # is this correct, NO  doesn't seem to work
    fslice = tuple([slice(0, int(sz)) for sz in size])
    ret = ifftn(IN1)[fslice].copy()
    del IN1
    if not complex_result:
        ret = ret.real
    if mode == "full":
        return ret
    elif mode == "same":
        if product(s1,axis=0) > product(s2,axis=0):
            osize = s1
        else:
            osize = s2
        return _centered(ret,osize)
    elif mode == "valid":
        return _centered(ret,abs(s2-s1)+1)
Пример #14
0
 def test_definition_float16(self):
     x = [[1, 2, 3],
          [4, 5, 6],
          [7, 8, 9]]
     y = fftn(np.array(x, np.float16))
     assert_equal(y.dtype, np.complex64)
     y_r = np.array(fftn(x), np.complex64)
     assert_array_almost_equal_nulp(y, y_r)
Пример #15
0
    def test_definition(self):
        x = [[1,2,3],[4,5,6],[7,8,9]]
        y = fftn(np.array(x, np.float32))
        if not y.dtype == np.complex64:
            raise ValueError("double precision output with single precision")

        y_r = np.array(fftn(x), np.complex64)
        assert_array_almost_equal_nulp(y, y_r)
Пример #16
0
 def test_definition(self):
     x = [[1,2,3],[4,5,6],[7,8,9]]
     y = fftn(x)
     assert_array_almost_equal(y,direct_dftn(x))
     x = random((20,26))
     assert_array_almost_equal(fftn(x),direct_dftn(x))
     x = random((5,4,3,20))
     assert_array_almost_equal(fftn(x),direct_dftn(x))
Пример #17
0
def bgtensor(img, lsigma, rho=0.2):
    eps = 1e-12
    fimg = fftn(img, overwrite_x=True)

    for s in lsigma:
        jvbuffer = bgkern3(kerlen=math.ceil(s)*6+1, sigma=s, rho=rho)
        jvbuffer = fftn(jvbuffer, shape=fimg.shape, overwrite_x=True) * fimg
        fimg = ifftn(jvbuffer, overwrite_x=True)
        yield hessian3(np.real(fimg))
Пример #18
0
    def test_invalid_sizes(self):
        with assert_raises(ValueError,
                           match="invalid number of data points"
                           r" \(\[1 0\]\) specified"):
            fftn([[]])

        with assert_raises(ValueError,
                           match="invalid number of data points"
                           r" \(\[ 4 -3\]\) specified"):
            fftn([[1, 1], [2, 2]], (4, -3))
Пример #19
0
def _fftconvolve(in1, in2, mode="full", axis=None):
    """ Convolve two N-dimensional arrays using FFT. See convolve.

    This is a fix of scipy.signal.fftconvolve, adding an axis argument and
    importing locally the stuff only needed for this function
    
    """
    #Locally import stuff only required for this:
    from scipy.fftpack import fftn, fft, ifftn, ifft
    from scipy.signal.signaltools import _centered
    from numpy import array, product


    s1 = array(in1.shape)
    s2 = array(in2.shape)
    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))

    if axis is None:
        size = s1+s2-1
        fslice = tuple([slice(0, int(sz)) for sz in size])
    else:
        equal_shapes = s1==s2
        # allow equal_shapes[axis] to be False
        equal_shapes[axis] = True
        assert equal_shapes.all(), 'Shape mismatch on non-convolving axes'
        size = s1[axis]+s2[axis]-1
        fslice = [slice(l) for l in s1]
        fslice[axis] = slice(0, int(size))
        fslice = tuple(fslice)

    # Always use 2**n-sized FFT
    fsize = 2**np.ceil(np.log2(size))
    if axis is None:
        IN1 = fftn(in1,fsize)
        IN1 *= fftn(in2,fsize)
        ret = ifftn(IN1)[fslice].copy()
    else:
        IN1 = fft(in1,fsize,axis=axis)
        IN1 *= fft(in2,fsize,axis=axis)
        ret = ifft(IN1,axis=axis)[fslice].copy()
    if not complex_result:
        del IN1
        ret = ret.real
    if mode == "full":
        return ret
    elif mode == "same":
        if product(s1,axis=0) > product(s2,axis=0):
            osize = s1
        else:
            osize = s2
        return _centered(ret,osize)
    elif mode == "valid":
        return _centered(ret,abs(s2-s1)+1)
Пример #20
0
def fftconvolve(in1, in2):
    """Convolve two N-dimensional arrays using FFT.

    This is a modified version of the scipy.signal.fftconvolve.
    The new feature is derived from the fftconvolve algorithm used in the IDL package.

    Parameters
    ----------
    in1 : array_like
        First input.
    in2 : array_like
        Second input. Should have the same number of dimensions as `in1`;
        if sizes of `in1` and `in2` are not equal then `in1` has to be the
        larger array.

    Returns
    -------
    out : array
        An N-dimensional array containing a subset of the discrete linear
        convolution of `in1` with `in2`.

    """
    in1 = asarray(in1)
    in2 = asarray(in2)

    if matrix_rank(in1) == matrix_rank(in2) == 0:  # scalar inputs
        return in1 * in2
    elif not in1.ndim == in2.ndim:
        raise ValueError("in1 and in2 should have the same rank")
    elif in1.size == 0 or in2.size == 0:  # empty arrays
        return array([])

    s1 = np.array(in1.shape)
    s2 = np.array(in2.shape)
    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))

    fsize = s1

    fslice = tuple([slice(0, int(sz)) for sz in fsize])
    if not complex_result:
        ret = irfftn(rfftn(in1, fsize) *
                     rfftn(in2, fsize), fsize)[fslice].copy()
        ret = ret.real
    else:
        ret = ifftn(fftn(in1, fsize) * fftn(in2, fsize))[fslice].copy()

    shift = array([int(floor(fsize[0]/2.0)), int(floor(fsize[1]/2.0))])
    ret   = roll(roll(ret, -shift[0], axis=0), -shift[1], axis=1)
    return ret
Пример #21
0
def GetXi2d(array,x,time,type,periodic):
    #Return correlation function

    ciso2=1.e-6

    # Get dims
    if rank(array)==3:
        nx,ny,nz=shape(array)
    if rank(array)==4:
        nx,ny,nz,ndim=shape(array)

    if ((type=='rho') or (type=='B')):
        rm_shear=0
    else:
        rm_shear=1

    # y-array
    Ly=2.*np.arcsin(1.0) ; dy=Ly/ny
    y=dy/2.+arange(ny)*dy

    # Perform FFTs
    if periodic:
        if rank(array)==3:
            fft_array=fftn(array)
        if rank(array)==4:
            fft_array=array
            fft_array[:,:,:,0]=fftn(array[:,:,:,0])
            fft_array[:,:,:,1]=fftn(array[:,:,:,1])
            fft_array[:,:,:,2]=fftn(array[:,:,:,2])
    else:
        fft_array=shear_fft2d(array,x,time,type=type,rm_shear=rm_shear)

    # Compute correlation function
    if (type=='rho'):
        xi=fft_array
        for k in range(nz):
            xi[:,:,k]=abs(ifftn(fft_array[:,:,k]*conjugate(fft_array[:,:,k])))/nx/ny
    else:
        xi=fft_array[:,:,:,0]
        for k in range(nz):
            xi[:,:,k]=          abs(ifftn(fft_array[:,:,k,1]*conjugate(fft_array[:,:,k,1])))/nx/ny
#            xi[:,:,k]=          abs(ifftn(fft_array[:,:,k,0]*conjugate(fft_array[:,:,k,0])))/nx/ny
#            xi[:,:,k]=xi[:,:,k]+abs(ifftn(fft_array[:,:,k,1]*conjugate(fft_array[:,:,k,1])))/nx/ny
#            xi[:,:,k]=xi[:,:,k]+abs(ifftn(fft_array[:,:,k,2]*conjugate(fft_array[:,:,k,2])))/nx/ny
            xi[:,:,k]=xi[:,:,k]/ciso2
    xi=np.roll(np.roll(xi,nx/2,0),ny/2,1)
    if not(periodic):
        xi=unshear(xi,x,time,type='rho',direct=-1.e0)

    return xi
Пример #22
0
    def test_shape_axes_argument(self):
        small_x = [[1, 2, 3],
                   [4, 5, 6],
                   [7, 8, 9]]
        large_x1 = array([[1, 2, 3, 0],
                          [4, 5, 6, 0],
                          [7, 8, 9, 0],
                          [0, 0, 0, 0]])
        y = fftn(small_x, shape=(4, 4), axes=(-2, -1))
        assert_array_almost_equal(y, fftn(large_x1))
        y = fftn(small_x, shape=(4, 4), axes=(-1, -2))

        assert_array_almost_equal(y, swapaxes(
            fftn(swapaxes(large_x1, -1, -2)), -1, -2))
Пример #23
0
 def test_shape_axes_argument(self):
     small_x = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
     large_x1 = array([[1, 2, 3, 0], [4, 5, 6, 0], [7, 8, 9, 0], [0, 0, 0, 0]])
     # Disable tests with shape and axes of different lengths
     # y = fftn(small_x,shape=(4,4),axes=(-1,))
     # for i in range(4):
     #    assert_array_almost_equal (y[i],fft(large_x1[i]))
     # y = fftn(small_x,shape=(4,4),axes=(-2,))
     # for i in range(4):
     #    assert_array_almost_equal (y[:,i],fft(large_x1[:,i]))
     y = fftn(small_x, shape=(4, 4), axes=(-2, -1))
     assert_array_almost_equal(y, fftn(large_x1))
     y = fftn(small_x, shape=(4, 4), axes=(-1, -2))
     assert_array_almost_equal(y, swapaxes(fftn(swapaxes(large_x1, -1, -2)), -1, -2))
Пример #24
0
    def test_shape_axes_argument2(self):
        # Change shape of the last axis
        x = numpy.random.random((10, 5, 3, 7))
        y = fftn(x, axes=(-1,), shape=(8,))
        assert_array_almost_equal(y, fft(x, axis=-1, n=8))

        # Change shape of an arbitrary axis which is not the last one
        x = numpy.random.random((10, 5, 3, 7))
        y = fftn(x, axes=(-2,), shape=(8,))
        assert_array_almost_equal(y, fft(x, axis=-2, n=8))

        # Change shape of axes: cf #244, where shape and axes were mixed up
        x = numpy.random.random((4,4,2))
        y = fftn(x, axes=(-3,-2), shape=(8,8))
        assert_array_almost_equal(y, numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8)))
Пример #25
0
 def pdf(self):
     """ Applies the 3D FFT in the q-space grid to generate
     the DSI diffusion propagator, remove the background noise with a
     hard threshold and then deconvolve the propagator with the 
     Lucy-Richardson deconvolution algorithm
     """
     values = self.data
     #create the signal volume
     Sq = np.zeros((self.qgrid_sz, self.qgrid_sz, self.qgrid_sz))
     #fill q-space
     for i in range(self.dn):
         qx, qy, qz = self.model.qgrid[i]
         Sq[qx, qy, qz] += values[i]
     #get deconvolution PSF
     DSID_PSF = self.model.cache_get('deconv_psf', key=self.model.gtab)
     if DSID_PSF is None:
         DSID_PSF = gen_PSF(self.model.qgrid, self.qgrid_sz, 
                            self.qgrid_sz, self.qgrid_sz)
     self.model.cache_set('deconv_psf', self.model.gtab, DSID_PSF)
     #apply fourier transform
     Pr = fftshift(np.abs(np.real(fftn(ifftshift(Sq), 
                   3 * (self.qgrid_sz, )))))
     #threshold propagator
     Pr = threshold_propagator(Pr)
     #apply LR deconvolution
     Pr = LR_deconv(Pr, DSID_PSF, 5, 2)
     return Pr
Пример #26
0
 def invert(self, estimate):
     """Invert the estimate to produce slopes.
     
     Parameters
     ----------
     estimate : array_like
         Phase estimate to invert.
     
     Returns
     -------
     xs : array_like
         Estimate of the x slopes.
     ys : array_like
         Estimate of the y slopes.
     
     
     """
     if self.manage_tt:
         estimate, ttx, tty = remove_tiptilt(self.ap, estimate)
     
     est_ft = fftpack.fftn(estimate) / 2.0
     
     xs_ft = self.gx * est_ft
     ys_ft = self.gy * est_ft
     
     xs = np.real(fftpack.ifftn(xs_ft))
     ys = np.real(fftpack.ifftn(ys_ft))
     
     if self.manage_tt and not self.suppress_tt:
         xs += ttx
         ys += tty
     
     return (xs, ys)
Пример #27
0
def standard_dsi_algorithm(S,bvals,bvecs):
    #volume size
    sz=16
    #shifting
    origin=8
    #hanning width
    filter_width=32.
    #number of signal sampling points
    n=515

    #odf radius
    #radius=np.arange(2.1,30,.1)
    radius=np.arange(2.1,6,.2)
    #radius=np.arange(.1,6,.1)   
    
    bv=bvals
    bmin=np.sort(bv)[1]
    bv=np.sqrt(bv/bmin)
    qtable=np.vstack((bv,bv,bv)).T*bvecs
    qtable=np.floor(qtable+.5)
   
    #calculate radius for the hanning filter
    r = np.sqrt(qtable[:,0]**2+qtable[:,1]**2+qtable[:,2]**2)
        
    #setting hanning filter width and hanning
    hanning=.5*np.cos(2*np.pi*r/filter_width)
    
    #center and index in q space volume
    q=qtable+origin
    q=q.astype('i8')
    
    #apply the hanning filter
    values=S*hanning
    
    #create the signal volume    
    Sq=np.zeros((sz,sz,sz))
    for i in range(n):        
        Sq[q[i][0],q[i][1],q[i][2]]+=values[i]
    
    #apply fourier transform
    Pr=fftshift(np.abs(np.real(fftn(fftshift(Sq),(sz,sz,sz)))))

    #vertices, edges, faces  = create_unit_sphere(5)    
    #vertices, faces = sphere_vf_from('symmetric362')           
    vertices, faces = sphere_vf_from('symmetric724')           
    odf = np.zeros(len(vertices))
        
    for m in range(len(vertices)):
        
        xi=origin+radius*vertices[m,0]
        yi=origin+radius*vertices[m,1]
        zi=origin+radius*vertices[m,2]
        
        PrI=map_coordinates(Pr,np.vstack((xi,yi,zi)),order=1)
        for i in range(len(radius)):
            odf[m]=odf[m]+PrI[i]*radius[i]**2
   
    peaks,inds=peak_finding(odf.astype('f8'),faces.astype('uint16'))

    return Pr,odf,peaks
Пример #28
0
def fast_multinomial(pii, nsum, thresh):
    """generate multinomial distribution for given probability tuple pii.

    *nsum* is the overal number of atoms of a fixed element, pii is a tuple holding the
    distribution of the isotopes.

    this generator yields all combinations and their probabilities which are above *thresh*.

    Remark: the count of the first isotope of all combinations is not computed and "yielded", it is
    automatically *nsum* minus the sum of the elemens in the combinations. We could compute this
    value in this generator, but it is faster to do this later (so only if needed).

    Example:: given three isotopes ([n1]E, [n2]E, [n3]E) of an element E which have
              probabilities 0.2, 0.3 and 0.5.

    To generate all molecules consisting of 5 atoms of this element where the overall probability
    is abore 0.1 we can run:

        for index, pi in gen_n((0.2, 0.3, 0.5), 5, 0.1):
            print(index, pi)

    which prints:

        (1, 3) 0.15
        (2, 2) 0.135
        (2, 3) 0.1125

    the first combination refers to (1, 1, 3) (sum is 5), the second to (1, 2, 2) and the last to
    (0, 2, 3).

    So the probability of an molecule with the overall formula [n1]E1 [n2]E1 [n3]E3 is 0.15, for
    [n1]E1 [n2]E2 [n3]E2 is 0.135, and for [n2]E2 [n3]E3 is 0.1125.

    Implementation:: multinomial distribution can be described as the n times folding (convolution)
    of an underlying simpler distribution. convolution can be fast computed with fft + inverse
    fft as we do below.

    This is often 100 times faster than the old implementatation computing the full distribution
    using its common definition.
    """
    n = len(pii)

    if n == 1:
        yield (0,), pii[0]
        return

    dim = n - 1
    a = np.zeros((nsum + 1,) * dim)
    a[(0,) * dim] = pii[0]
    for i, pi in enumerate(pii[1:]):
        idx = [0] * dim
        idx[i] = 1
        a[tuple(idx)] = pi

    probs = ifftn(fftn(a) ** nsum).real
    mask = probs >= thresh
    pi = probs[mask]
    ii = zip(*np.where(mask))
    for iii, pii in zip(ii, pi):
        yield iii, pii
Пример #29
0
def half_fft_convolve(in1, in2, size, mode = 'full', return_type='real'):
    """
    Rewrite of fftconvolve from scipy.signal ((c) Travis Oliphant 1999-2002)
    to deal with fft convolution where one signal is not fft transformed
    and the other one is.  Application is, for example, in a loop where
    convolution happens repeatedly with different kernels over the same
    signal.  First input is not transformed, second input is.
    """
    s1 = np.array(in1.shape)
    s2 = size - s1 + 1
    complex_result = (np.issubdtype( in1.dtype, np.complex) or
                      np.issubdtype( in2.dtype, np.complex) )

    # Always use 2**n-sized FFT
    fsize = 2 **np.ceil( np.log2( size) )
    IN1 = fftn(in1, fsize)
    IN1 *= in2
    fslice = tuple( [slice( 0, int(sz)) for sz in size] )
    ret = ifftn(IN1)[fslice].copy()
    del IN1
    if not complex_result:
        ret = ret.real
    if return_type == 'real':
        ret = ret.real
    if mode == 'full':
        return ret
    elif mode == 'same':
        if np.product(s1, axis=0) > np.product(s2, axis=0):
            osize = s1
        else:
            osize = s2
        return _centered(ret, osize)
    elif mode == 'valid':
        return _centered(ret, abs(s2 - s1) + 1)
Пример #30
0
    def recon_dm_trans(self):

        for i, (x_start, x_end, y_start, y_end) in enumerate(self.point_info):
            prb_obj =  self.prb[:,:] * self.obj[x_start:x_end,y_start:y_end]
            tmp = 2. * prb_obj - self.product[i]

            if self.sf_flag:
                tmp_fft = sf.fftn(tmp) / npy.sqrt(npy.size(tmp))
            else:
                tmp_fft = npy.fft.fftn(tmp) / npy.sqrt(npy.size(tmp))
    
            amp_tmp = npy.abs(tmp_fft)
            ph_tmp = tmp_fft / (amp_tmp+self.sigma1)
            (index_x,index_y) = npy.where(self.diff_array[i] >= 0.)
            dev = amp_tmp - self.diff_array[i]
            power = npy.sum(npy.sum((dev[index_x,index_y])**2))/(self.nx_prb*self.ny_prb)
    
            if power > self.sigma2: 
                amp_tmp[index_x,index_y] = self.diff_array[i][index_x,index_y] + dev[index_x,index_y] * npy.sqrt(self.sigma2/power)

            if self.sf_flag:
                tmp2 =  sf.ifftn(amp_tmp*ph_tmp) *  npy.sqrt(npy.size(tmp))
            else:
                tmp2 = npy.fft.ifftn(amp_tmp*ph_tmp) * npy.sqrt(npy.size(tmp))
                    
            self.product[i] += self.beta*(tmp2 - prb_obj)

        del(prb_obj)
        del(tmp)
        del(amp_tmp)
        del(ph_tmp)
        del(tmp2)
Пример #31
0
    def time_step(self):
        # 1. Accelerate the velocity fields.
        self.U.x += self.A.x * self.dt * self.rho
        self.U.y += self.A.y * self.dt * self.rho

        # 2. Advect the velocity fields with semi-lagrangian method
        # Find where would the particle been at the previous time step
        # and apply periodic boundary conditions.
        xp = self.pbc(self.X.x - (self.U.x * self.dt))
        yp = self.pbc(self.X.y - (self.U.y * self.dt))

        # Interpolate values at xp,yp back to orignial grid
        self.U.x = self.advect(self.U.x, xp, yp)
        self.U.y = self.advect(self.U.y, xp, yp)
        self.rho = self.advect(self.rho, xp, yp)

        # 3. Move to wave space to apply viscosity and mass conservation
        u = fftshift(fftn(self.U.x))
        v = fftshift(fftn(self.U.y))

        # Mass conservation comes from projecting U in FS onto
        # unit rotational vector (-y,x)/r
        # eta is multiplied in wave space to attenuate high
        # frequency components, this is viscosity
        u_n = self.eta * (u * (1.0 - self.Xs.x**2 / self.r2s) -
                          v * self.Xs.x * self.Xs.y / self.r2s)
        v_n = self.eta * (v * (1.0 - self.Xs.y**2 / self.r2s) -
                          u * self.Xs.x * self.Xs.y / self.r2s)

        # Return to space
        self.U.x = ifftn(ifftshift(u_n)).real
        self.U.y = ifftn(ifftshift(v_n)).real
        # hack, to prevent a systematic drift in velocity
        self.U.x -= np.mean(self.U.x)
        self.U.y -= np.mean(self.U.y)
        # Another hack to dial back the density, avoids having too much
        # material and saturating the color map
        self.rho *= 0.995
        self.its += 1
        self.elapsed = self.its * self.dt
Пример #32
0
    def psf_calc(self, psf, kz, data_size):
        '''Pre calculate OTFs etc ...'''
        g = psf

        self.height = data_size[0]
        self.width = data_size[1]
        self.depth = data_size[2]

        (x, y, z) = mgrid[-floor(self.height / 2.0):(ceil(self.height / 2.0)),
                          -floor(self.width / 2.0):(ceil(self.width / 2.0)),
                          -floor(self.depth / 2.0):(ceil(self.depth / 2.0))]

        gs = shape(g)
        g = g[int(floor((gs[0] - self.height) /
                        2)):int(self.height +
                                floor((gs[0] - self.height) / 2)),
              int(floor((gs[1] - self.width) /
                        2)):int(self.width + floor((gs[1] - self.width) / 2)),
              int(floor((gs[2] - self.depth) /
                        2)):int(self.depth + floor((gs[2] - self.depth) / 2))]

        g = abs(ifftshift(ifftn(abs(fftn(g)))))
        g = (g / sum(sum(sum(g))))

        self.g = g

        self.H = cast['f'](fftn(g))
        self.Ht = cast['f'](ifftn(g))

        tk = 2 * kz * z

        t = g * exp(1j * tk)
        self.He = cast['F'](fftn(t))
        self.Het = cast['F'](ifftn(t))

        tk = 2 * tk

        t = g * exp(1j * tk)
        self.He2 = cast['F'](fftn(t))
        self.He2t = cast['F'](ifftn(t))
Пример #33
0
def cross_power_spectrum_nd(input_array1, input_array2, box_dims):
	''' 
	Calculate the cross power spectrum two arrays and return it as an n-dimensional array,
	where n is the number of dimensions in input_array
	box_side is the size of the box in comoving Mpc. If this is set to None (default),
	the internal box size is used
	
	Parameters:
		* input_array1 (numpy array): the first array to calculate the 
			power spectrum of. Can be of any dimensions.
		* input_array2 (numpy array): the second array. Must have same 
			dimensions as input_array1.
		* box_dims = None (float or array-like): the dimensions of the 
			box. If this is None, the current box volume is used along all
			dimensions. If it is a float, this is taken as the box length
			along all dimensions. If it is an array-like, the elements are
			taken as the box length along each axis.
	
	Returns:
		The cross power spectrum in the same dimensions as the input arrays.
		
	TODO:
		Also return k values.
	'''

	assert(input_array1.shape == input_array2.shape)

	logger.info( 'Calculating power spectrum...')
	ft1 = fftpack.fftshift(fftpack.fftn(input_array1.astype('float64')))
	ft2 = fftpack.fftshift(fftpack.fftn(input_array2.astype('float64')))
	power_spectrum = np.real(ft1)*np.real(ft2)+np.imag(ft1)*np.imag(ft2)
	logger.info( '...done')

	# scale
	#boxvol = float(box_side)**len(input_array1.shape)
	boxvol = np.product(map(float,box_dims))
	pixelsize = boxvol/(np.product(map(float,input_array1.shape)))
	power_spectrum *= pixelsize**2/boxvol

	return power_spectrum
def calculate_spectral_flux(kx, ky, uvel, vvel):
    """
   Calculate spectral flux 
   We assume du/dt = -u du/dx - v du/dy
             dv/dt = -u dv/dx - v dv/dy
   """

    uhat = fftn(uvel)
    vhat = fftn(vvel)
    i = np.complex(0, 1)
    # du/dx in x,y
    ddx_u = np.real(ifftn(i * kx * uhat))
    # du/dy in x,y
    ddy_u = np.real(ifftn(i * ky * uhat))
    # dv/dx in x,y
    ddx_v = np.real(ifftn(i * kx * vhat))
    # dv/dy in x,y
    ddy_v = np.real(ifftn(i * ky * vhat))

    # adv_u = u * du/dx + v * du/dy
    adv_u = uvel * ddx_u + vvel * ddy_u
    # adv_v = u * dv/dx + v * dv/dy
    adv_v = uvel * ddx_v + vvel * ddy_v

    # KE trend from advection:
    # - u * adv_u - v * adv_v
    # in spectral space
    # The minus sign arises as advection
    # is on the RHS of the momentum eqs.
    Tkxky = np.real( -np.conj(fftn(uvel))*fftn(adv_u) - \
                      np.conj(fftn(vvel))*fftn(adv_v) )   #[m2/s3]

    return Tkxky
Пример #35
0
    def pseudo_spectral_integrate(self,initial_state,step=0.1,finish=1000,**kwargs):
#        print "Integration using pseudo-spectral step"
        if kwargs:
            self.update_parameters(kwargs)
        time = np.arange(0,finish+step,step)
        result=np.zeros((len(time),len(initial_state)))
        t=0
        result[0]=initial_state
        for i,tout in enumerate(time[1:]):
            self.state=result[-1]
            B,W,H=self.state.reshape(self.setup['nvar'],*self.setup['n'])
            self.fftb=fftn(B)
            self.fftw=fftn(W)
            self.ffth=fftn(H)
            while t < tout:
                self.fftb = self.multb*(self.fftb + self.dt*fftn(self.dBdt(B,W,H,t,self.p['P'],self.p['chi'])))#.real
                self.fftw = self.multw*(self.fftw + self.dt*fftn(self.dWdt(B,W,H,t,self.p['P'],self.p['chi'])))#.real
                self.ffth = self.multh*(self.ffth + self.dt*fftn(self.dHdt(B,W,H,t,self.p['P'],self.p['chi'])))#.real
                B= ifftn(self.fftb).real
                W= ifftn(self.fftw).real
                H= ifftn(self.ffth).real
                t+=self.dt
                self.time_elapsed+=self.dt
            result[i+1]=np.ravel((B,W,H))
        self.state=result[-1]
        return time,result
Пример #36
0
    def pseudo_spectral_integrate(self,initial_state,step=0.1,finish=1000):
#        print "Integration using pseudo-spectral step"
        time = np.arange(0,finish+step,step)
        result=[]
        t=0
        result.append(initial_state)
        for tout in time[1:]:
            self.state=result[-1]
            b,w,h=self.state.reshape(self.setup['nvar'],*self.setup['n'])
            self.fftb=fftn(b)
            self.fftw=fftn(w)
            self.ffth=fftn(h)
            while t < tout:
                self.fftb = self.multb*(self.fftb + self.dt*fftn(self.dbdt(b,w,h,t,self.p['p'],self.p['chi'],self.p['beta'],self.p['a'],self.p['omegaf'])))#.real
                self.fftw = self.multw*(self.fftw + self.dt*fftn(self.dwdt(b,w,h,t,self.p['p'],self.p['chi'],self.p['beta'],self.p['a'],self.p['omegaf'])))#.real
                self.ffth = self.multh*(self.ffth + self.dt*fftn(self.dhdt(b,w,h,t,self.p['p'],self.p['chi'],self.p['beta'],self.p['a'],self.p['omegaf'])))#.real
                b= ifftn(self.fftb).real
                w= ifftn(self.fftw).real
                h= ifftn(self.ffth).real
                t+=self.dt
                self.time_elapsed+=self.dt
            self.state=np.ravel((b,w,h))
            self.sim_step+=1
            result.append(self.state)
        return time,result
Пример #37
0
def _cwt_fft(X, Ws, mode="same"):
    """Compute cwt with fft based convolutions
    Return a generator over signals.
    """
    X = np.asarray(X)

    # Precompute wavelets for given frequency range to save time
    n_signals, n_times = X.shape
    n_freqs = len(Ws)

    Ws_max_size = max(W.size for W in Ws)
    size = n_times + Ws_max_size - 1
    # Always use 2**n-sized FFT
    fsize = 2 ** np.ceil(np.log2(size))

    # precompute FFTs of Ws
    fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
    for i, W in enumerate(Ws):
        if len(W) > n_times:
            raise ValueError('Wavelet is too long for such a short signal. '
                             'Reduce the number of cycles.')
        fft_Ws[i] = fftn(W, [fsize])

    for k, x in enumerate(X):
        if mode == "full":
            tfr = np.zeros((n_freqs, fsize), dtype=np.complex128)
        elif mode == "same" or mode == "valid":
            tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)

        fft_x = fftn(x, [fsize])
        for i, W in enumerate(Ws):
            ret = ifftn(fft_x * fft_Ws[i])[:n_times + W.size - 1]
            if mode == "valid":
                sz = abs(W.size - n_times) + 1
                offset = (n_times - sz) / 2
                tfr[i, offset:(offset + sz)] = _centered(ret, sz)
            else:
                tfr[i, :] = _centered(ret, n_times)
        yield tfr
Пример #38
0
def calculate_spectral_ens_flux(kx,ky,uvel,vvel,rot):
   """
   """
   
   rhat = fftn(rot)
   uhat = fftn(uvel)
   vhat = fftn(vvel)
   i = np.complex(0,1)
   # drot/dx in x,y
   ddx_rot = np.real( ifftn(i*kx*rhat) )
   # drot/dy in x,y
   ddy_rot = np.real( ifftn(i*ky*rhat) )
   
   # adv_rot = u * drot/dx + v * drot/dy
   adv_rot = uvel * ddx_rot + vvel * ddy_rot
   
   # Enstrophy trend from advection: 
   # rot * adv_rot 
   # in spectral space
   Tkxky = np.real( -np.conj(rhat)*fftn(adv_rot) ) 
   
   return Tkxky
def G_D_into_x(g_D_fft, w):
	N, V = np.shape(w)[0], np.shape(w)[1]
	L = np.int32(N**0.5)
	G_D_x = np.zeros([N,V],dtype=np.complex128)
	for v in range(V):
		w_im = np.zeros([2*L-1,2*L-1],dtype=np.complex128)
		w_im[0:L,0:L] = np.reshape(w[:,v], [L,L])
		fft_w = fftpack.fftn(w_im)
		fft_GDw = fft_w*g_D_fft
		y = fftpack.ifftn(fft_GDw)
		G_D_x[:,v] = np.reshape(y[:L,:L],[N])

	return G_D_x
Пример #40
0
def PowerSpectrum3D(grid, logbins=True, bins=50):  #edges should be pixels
    '''Calculate the power spectrum for a cube.
    Input:
    grid = input grid in numpy array.
    Output:
    k, psd1D'''
    isize = grid.shape[0]
    F = fftshift(fftpack.fftn(grid))
    psd3D = np.abs(F)**2

    k_arr, psd1D = azimuthalAverage3D(psd3D, logbins=logbins, bins=bins)
    k_arr = edge2center(k_arr)
    return k_arr, psd1D
Пример #41
0
 def _make_T(self):
     T_k = self.T_func[0](*self.k(indexing='ij', fft_shift=True))
     hilbert_dims = np.multiply.reduce(self.size)
     shape = [self.size, self.size
              ] if self.dims == 1 else list(self.size) * 2
     psi = np.eye(hilbert_dims).reshape(*shape)
     axes = tuple([i - self.dims for i in range(self.dims)])
     T = fftn(psi, axes=axes, overwrite_x=True)
     T *= T_k
     T = ifftn(T, axes=axes, overwrite_x=True)
     self.__T = T.reshape(hilbert_dims, hilbert_dims)
     self.__T_diag = np.diag(self.__T)
     self.__diag_mask = np.eye(len(self.__T_diag), dtype=bool)
Пример #42
0
def calculatePowerSpectrum(data):

    epsilon = 1e-10

    dataF = np.array(fftpack.fftshift(fftpack.fftn(data)), dtype='complex64')
    dataFabs = np.abs(dataF)
    dataFabs = dataFabs - np.min(dataFabs)
    dataFabs = dataFabs / np.max(dataFabs)

    dataPowerSpectrum = sphericalAverage(dataFabs**2) + epsilon
    del dataFabs

    return (dataF, dataPowerSpectrum)
Пример #43
0
def remove_phase_ramp(tmp, threshold_flag, threshold, subpixel_flag):
    tmp_tmp, x_shift, y_shift = subpixel_align(
        sf.ifftshift(sf.ifftn(sf.fftshift(np.abs(tmp)))),
        sf.ifftshift(sf.ifftn(sf.fftshift(tmp))),
        threshold_flag,
        threshold,
        subpixel_flag,
    )
    tmp_new = sf.ifftshift(sf.fftn(sf.fftshift(tmp_tmp)))
    phase_tmp = np.angle(tmp_new)
    ph_offset = np.mean(phase_tmp[np.where(np.abs(tmp) >= threshold)])
    phase_tmp = np.angle(tmp_new) - ph_offset
    return np.abs(tmp) * np.exp(1j * phase_tmp)
Пример #44
0
def interval_approximate_nd(f, a, b, deg, return_inf_norm=False):
    """Finds the chebyshev approximation of an n-dimensional function on an
    interval.

    Parameters
    ----------
    f : function from R^n -> R
        The function to interpolate.
    a : numpy array
        The lower bound on the interval.
    b : numpy array
        The upper bound on the interval.
    deg : numpy array
        The degree of the interpolation in each dimension.
    return_inf_norm : bool
        whether to return the inf norm of the function

    Returns
    -------
    coeffs : numpy array
        The coefficient of the chebyshev interpolating polynomial.
    inf_norm : float
        The inf_norm of the function
    """
    dim = len(a)
    if dim != len(b):
        raise ValueError("Interval dimensions must be the same!")

    if hasattr(f, "evaluate_grid"):
        cheb_points = transform(get_cheb_grid(deg, dim, True), a, b)
        values_block = f.evaluate_grid(cheb_points)
    else:
        cheb_points = transform(get_cheb_grid(deg, dim, False), a, b)
        values_block = f(*cheb_points.T).reshape(*([deg + 1] * dim))

    values = chebyshev_block_copy(values_block)

    if return_inf_norm:
        inf_norm = np.max(np.abs(values_block))

    x0_slicer, deg_slicer, slices, rescale = interval_approx_slicers(dim, deg)
    coeffs = fftn(values / rescale).real
    for x0sl, degsl in zip(x0_slicer, deg_slicer):
        # halve the coefficients in each slice
        coeffs[x0sl] /= 2
        coeffs[degsl] /= 2

    if return_inf_norm:
        return coeffs[tuple(slices)], inf_norm
    else:
        return coeffs[tuple(slices)]
Пример #45
0
    def compute_kernel(self):
        # TODO: Most of this stuff is duplicated in MeanEstimator - move up the hierarchy?
        n = self.n
        L = self.L
        _2L = 2 * self.L

        kernel = np.zeros((_2L, _2L, _2L, _2L, _2L, _2L), dtype=self.as_type)
        sq_filters_f = self.src.eval_filter_grid(self.L, power=2)

        for i in tqdm(range(0, n, self.batch_size)):
            _range = np.arange(i, min(n, i + self.batch_size))
            pts_rot = rotated_grids(L, self.src.rots[_range, :, :])
            weights = sq_filters_f[:, :, _range]
            weights *= self.src.amplitudes[_range]**2

            if L % 2 == 0:
                weights[0, :, :] = 0
                weights[:, 0, :] = 0

            # TODO: This is where this differs from MeanEstimator
            pts_rot = m_reshape(pts_rot, (3, L**2, -1))
            weights = m_reshape(weights, (L**2, -1))

            batch_n = weights.shape[-1]
            factors = np.zeros((_2L, _2L, _2L, batch_n), dtype=self.as_type)

            # TODO: Numpy has got to have a functional shortcut to avoid looping like this!
            for j in range(batch_n):
                factors[:, :, :, j] = anufft3(weights[:, j],
                                              pts_rot[:, :, j],
                                              (_2L, _2L, _2L),
                                              real=True)

            factors = vol_to_vec(factors)
            kernel += vecmat_to_volmat(factors @ factors.T) / (n * L**8)

        # Ensure symmetric kernel
        kernel[0, :, :, :, :, :] = 0
        kernel[:, 0, :, :, :, :] = 0
        kernel[:, :, 0, :, :, :] = 0
        kernel[:, :, :, 0, :, :] = 0
        kernel[:, :, :, :, 0, :] = 0
        kernel[:, :, :, :, :, 0] = 0

        logger.info('Computing non-centered Fourier Transform')
        kernel = mdim_ifftshift(kernel, range(0, 6))
        kernel_f = fftn(kernel)
        # Kernel is always symmetric in spatial domain and therefore real in Fourier
        kernel_f = np.real(kernel_f)

        return FourierKernel(kernel_f, centered=False)
Пример #46
0
def cross_power_spectrum_nd(input_array1, input_array2, box_dims):
        ''' 
        Calculate the cross power spectrum two arrays and return it as an n-dimensional array.
        
        Parameters:
                input_array1 (numpy array): the first array to calculate the 
                        power spectrum of. Can be of any dimensions.
                input_array2 (numpy array): the second array. Must have same 
                        dimensions as input_array1.
                box_dims = None (float or array-like): the dimensions of the 
                        box in Mpc. If this is None, the current box volume is used along all
                        dimensions. If it is a float, this is taken as the box length
                        along all dimensions. If it is an array-like, the elements are
                        taken as the box length along each axis.
        
        Returns:
                The cross power spectrum in the same dimensions as the input arrays.
                
        TODO:
                Also return k values.
        '''

        assert(input_array1.shape == input_array2.shape)

        box_dims = _get_dims(box_dims, input_array1.shape)

        print_msg( 'Calculating power spectrum...')
        ft1 = fftpack.fftshift(fftpack.fftn(input_array1.astype('float64')))
        ft2 = fftpack.fftshift(fftpack.fftn(input_array2.astype('float64')))
        power_spectrum = np.real(ft1)*np.real(ft2)+np.imag(ft1)*np.imag(ft2)
        print_msg( '...done')

        # scale
        boxvol = np.product(box_dims)
        pixelsize = boxvol/(np.product(input_array1.shape))
        power_spectrum *= pixelsize**2/boxvol

        return power_spectrum
Пример #47
0
def read_fft_volume(data4D, harmonic=1):
    zslices = data4D.shape[2]
    tframes = data4D.shape[3]
    data3d_fft = np.empty((data4D.shape[:2] + (0, )))
    for slice in range(zslices):
        ff1 = fftn([data4D[:, :, slice, t] for t in range(tframes)])
        fh = np.absolute(ifftn(ff1[harmonic, :, :]))
        fh[fh < 0.1 * np.max(fh)] = 0.0
        image = 1. * fh / np.max(fh)
        # plt.imshow(image, cmap = 'gray')
        # plt.show()
        image = np.expand_dims(image, axis=2)
        data3d_fft = np.append(data3d_fft, image, axis=2)
    return data3d_fft
Пример #48
0
    def init_angle_spectrum(self):
        mat_contents = self.load_file()
        keys = sorted(mat_contents.keys())
        pressure_field = mat_contents[keys[0]]
        angle_spectrum_0 = fftn(pressure_field, pressure_field.shape)
        self.spectrum.Nx = angle_spectrum_0.shape[0]
        self.spectrum.Ny = angle_spectrum_0.shape[1]

        lin_l = (-1)**np.arange(self.spectrum.Nx)
        lin_l = lin_l[:, np.newaxis]
        lin_m = lin_l.T
        lin_lm = lin_l @ lin_m
        self.angle_spectrum = 4 * np.pi**2 / self.spectrum.Nx**2 * angle_spectrum_0.conj(
        ) * lin_lm
Пример #49
0
 def xcorrelation(self, image, kernel, flip=True):
     """
     Cross Correlation of two images,
     Based on FFTs with padding to a size of 2N-1
     
     if flip uses the flip function to increase the speed
     """
     outdims = np.array([
         image.shape[dd] + kernel.shape[dd] - 1 for dd in range(image.ndim)
     ])
     if flip:
         af = fftpack.fftn(image, outdims)
         #for real data fftn(ndflip(t)) = conj(fftn(t)), but flipup is faster
         tf = fftpack.fftn(self._ndflip(kernel), outdims)
         # '*' in python: elementwise multiplikation
         xcorr = np.real(fftpack.ifftn(tf * af))
     else:
         corr = fftpack.fftshift(
             fftpack.ifftn(
                 np.multiply(fftpack.fftn(image, outdims),
                             np.conj(fftpack.fftn(kernel, outdims)))))
         xcorr = np.abs(corr)
     return xcorr
Пример #50
0
def calc_ps2d(cube, fmin=154, fmax=162, fov=2):
    f21cm = 1420.405751  # [MHz]
    cosmo = FlatLambdaCDM(H0=71, Om0=0.27)
    nf, ny, nx = cube.shape
    fc = (fmin + fmax) / 2
    zc = f21cm / fc - 1
    DM = cosmo.comoving_transverse_distance(zc).value  # [Mpc]
    
    pixelsize = fov / nx  # [deg]
    d_xy = DM * np.deg2rad(pixelsize)  # [Mpc]
    fs_xy = 1 / d_xy  # [Mpc^-1]
    
    dfreq = (fmax - fmin) / (nf-1)  # [MHz]
    c = ac.c.to("km/s").value
    H = cosmo.H(zc).value  # [km/s/Mpc]
    d_z = c * (1+zc)**2 * dfreq / H / f21cm  # [Mpc]
    fs_z = 1 / d_z  # [Mpc^-1]
    
    cubefft = fftpack.fftshift(fftpack.fftn(cube))
    ps3d = np.abs(cubefft) ** 2  # [K^2]
    norm1 = 1 / (nx * ny * nf)
    norm2 = 1 / (fs_xy**2 * fs_z)  # [Mpc^3]
    norm3 = 1 / (2*np.pi)**3
    ps3d *= (norm1 * norm2 * norm3)  # [K^2 Mpc^3]
    
    k_xy = 2*np.pi * fftpack.fftshift(fftpack.fftfreq(nx, d=d_xy))
    k_z  = 2*np.pi * fftpack.fftshift(fftpack.fftfreq(nf, d=d_z))
    k_perp = k_xy[k_xy >= 0]
    k_los  = k_z [k_z  >= 0]
    n_k_perp = len(k_perp)
    n_k_los  = len(k_los)
    ps2d = np.zeros(shape=(n_k_los, n_k_perp))

    eps = 1e-8
    ic_xy = (np.abs(k_xy) < eps).nonzero()[0][0]
    ic_z  = (np.abs(k_z)  < eps).nonzero()[0][0]
    p_xy = np.arange(nx) - ic_xy
    p_z  = np.abs(np.arange(nf) - ic_z)
    mx, my = np.meshgrid(p_xy, p_xy)
    rho = np.sqrt(mx**2 + my**2)
    rho = np.around(rho).astype(int)

    for r in range(n_k_perp):
        ix, iy = (rho == r).nonzero()
        for s in range(n_k_los):
            iz = (p_z == s).nonzero()[0]
            cells = np.concatenate([ps3d[z, iy, ix] for z in iz])
            ps2d[s, r] = cells.mean()
            
    return (ps2d, k_perp, k_los)
def pixel_shift_2d(array, x_shift, y_shift):
    nx, ny = np.shape(array)
    tmp = sf.ifftshift(sf.ifftn(sf.fftshift(array)))
    nest = np.mgrid[0:nx, 0:ny]
    tmp = tmp * np.exp(
        1j
        * 2
        * np.pi
        * (
            -1.0 * x_shift * (nest[0, :, :] - nx / 2.0) / (nx)
            - y_shift * (nest[1, :, :] - ny / 2.0) / (ny)
        )
    )
    return sf.ifftshift(sf.fftn(sf.fftshift(tmp)))
Пример #52
0
def fft_correlation(in1, in2, normalize=False):
    """Correlation of two N-dimensional arrays using FFT.

    Adapted from scipy's fftconvolve.

    Parameters
    ----------
    in1, in2 : array
    normalize: bool
        If True performs phase correlation

    """
    s1 = np.array(in1.shape)
    s2 = np.array(in2.shape)
    size = s1 + s2 - 1
    # Use 2**n-sized FFT
    fsize = (2**np.ceil(np.log2(size))).astype("int")
    fprod = fftn(in1, fsize)
    fprod *= fftn(in2, fsize).conjugate()
    if normalize is True:
        fprod = np.nan_to_num(fprod / np.absolute(fprod))
    ret = ifftn(fprod).real.copy()
    return ret, fprod
Пример #53
0
    def correlate(self, plane, ref_im):
        (r, c) = ref_im.shape
        key_fft = fftpack.fftn(ref_im)
        key_sq = ref_im * ref_im
        norm = key_sq.sum()
        norm = numpy.sqrt(norm)

        cur_slice = plane
        cur_sq = cur_slice * cur_slice
        cur_norm = cur_sq.sum()
        cur_norm = numpy.sqrt(cur_norm)

        cur_fft = fftpack.fftn(cur_slice)
        cur_fft = cur_fft.conjugate()

        cur_max = cur_slice.max()
        prod_slice = key_fft * cur_fft
        prod_slice = prod_slice / (norm * cur_norm)
        cor_slice = fftpack.ifftn(prod_slice)

        (row, col, val) = self.find_max(cor_slice.real)

        return val
Пример #54
0
def denoising(img, G):
    """Function that applies de denoising process on a image 'img' using the gaussian filter G.

    Parameters
    ----------
    img : (numpy.ndarray))
        An array representing the image given as input.
    G : (numpy.ndarray)
        An array representing the gaussian filter made with the parameters k and sigma.

    Returns
    -------
    (numpy.ndarray)
        The image after the denoising process.
    """
    # padding the filter so that it has the same size of the image.
    pad = (img.shape[0]//2)-G.shape[0]//2
    G_pad = np.pad(G, (pad, pad-1), "constant",  constant_values=0)

    # computing the Fourier transforms.
    R = np.multiply(fftn(img), fftn(G_pad))
    
    return np.real(fftshift(ifftn(R))), G_pad
Пример #55
0
def calc_score_on_fft_domain(gt, input):
    """
    Calculate mean absolute error in power spectrum and MAE in phase
    This score is original. I assume arguments are followed as:
    @param input: input image
    @param gt: ground truth
    return: diff_spe_const, diff_spe, diff_ang_const, diff_ang
    """
    # FFT
    imgFreqs = fftn(input)
    gtFreqs = fftn(gt)
    imgFreqs = np.fft.fftshift(imgFreqs)
    gtFreqs = np.fft.fftshift(gtFreqs)

    # Difference of power spectrum
    diff_spe = np.absolute((np.abs(gtFreqs)**2) - (np.abs(imgFreqs)**2))
    diff_spe_const = np.mean(diff_spe)

    # Difference of angle
    diff_ang = np.abs(np.angle(imgFreqs / gtFreqs, deg=True))
    diff_ang_const = np.mean(diff_ang)

    return diff_spe_const, diff_spe, diff_ang_const, diff_ang
Пример #56
0
    def sphAvgPwr(self, nbins=30):
        """
        Perform 3D FFT to generate a gaussian realisation of the CO cube
        """
        kEdges = np.linspace(np.min(self.k), np.max(self.k), nbins + 1)
        kMids = (kEdges[1:] + kEdges[:-1]) / 2.

        self.cube -= np.median(self.cube)
        Pcube = np.abs(sfft.fftn(self.cube))**2  #/ self.cube.size**2
        Pk = np.histogram(self.k.flatten(), kEdges,
                          weights=Pcube.flatten())[0] / np.histogram(
                              self.k.flatten(), kEdges)[0]

        return kMids, Pk
Пример #57
0
    def bench_random(self):
        from numpy.fft import fftn as numpy_fftn
        print()
        print('    Multi-dimensional Fast Fourier Transform')
        print('===================================================')
        print('          |    real input     |   complex input    ')
        print('---------------------------------------------------')
        print('   size   |  scipy  |  numpy  |  scipy  |  numpy ')
        print('---------------------------------------------------')
        for size, repeat in [
            ((100, 100), 100),
            ((1000, 100), 7),
            ((256, 256), 10),
            ((512, 512), 3),
        ]:
            print('%9s' % ('%sx%s' % size), end=' ')
            sys.stdout.flush()

            for x in [
                    random(size).astype(double),
                    random(size).astype(cdouble) +
                    random(size).astype(cdouble) * 1j
            ]:
                y = fftn(x)
                #if size > 500: y = fftn(x)
                #else: y = direct_dft(x)
                assert_array_almost_equal(fftn(x), y)
                print('|%8.2f' % measure('fftn(x)', repeat), end=' ')
                sys.stdout.flush()

                assert_array_almost_equal(numpy_fftn(x), y)
                print('|%8.2f' % measure('numpy_fftn(x)', repeat), end=' ')
                sys.stdout.flush()

            print(' (secs for %s calls)' % (repeat))

        sys.stdout.flush()
Пример #58
0
def unwrap_gradient(arr, pad_width=0, denoising=None, denoising_kws=None):
    """
    Multi-dimensional Gradient-based Fourier unwrap.

    Args:
        arr (np.ndarray): The wrapped phase array.
        pad_width (float|int): Size of the padding to use.
            This is useful for mitigating border effects.
            If int, it is interpreted as absolute size.
            If float, it is interpreted as relative to the maximum size.
        denoising (callable|None): The denoising function.
            If callable, must have the following signature:
            denoising(np.ndarray, ...) -> np.ndarray.
            It is applied to the real and imaginary part of `np.exp(1j * arr)`
            separately, using `fcn.filter_cx()`.
        denoising_kws (Mappable|None): Keyword arguments.
            These are passed to the function specified in `denoising`.
            If Iterable, must be convertible to a dictionary.
            If None, no keyword arguments will be passed.

    Returns:
        arr (np.ndarray): The unwrapped phase array.

    See Also:
        - Volkov, Vyacheslav V., and Yimei Zhu. “Deterministic Phase
          Unwrapping in the Presence of Noise.” Optics Letters 28, no. 22
          (November 15, 2003): 2156–58. https://doi.org/10.1364/OL.28.002156.
    """
    arr, mask = fcn.padding(arr, pad_width)

    arr = np.exp(1j * arr)
    if callable(denoising):
        denoising_kws = dict(denoising_kws) \
            if denoising_kws is not None else {}
        arr = fcn.filter_cx(arr, denoising, (), denoising_kws)
    kks = [
        fftshift(kk)
        for kk in fcn.gradient_kernels(arr.shape, factors=arr.shape)
    ]
    grads = np.gradient(arr)

    u_arr = np.zeros(arr.shape, dtype=complex)
    kk2 = np.zeros(arr.shape, dtype=complex)
    for kk, grad in zip(kks, grads):
        u_arr += -1j * kk * fftn(np.real(-1j * grad / arr))
        kk2 += kk**2
    fcn.apply_at(kk2, lambda x: 1 / x, kk2 != 0, in_place=True)
    arr = np.real(ifftn(kk2 * u_arr)) / (2 * np.pi)
    return arr[mask]
Пример #59
0
    def compute_kernel(self):
        # TODO: Most of this stuff is duplicated in MeanEstimator - move up the hierarchy?
        n = self.n
        L = self.L
        _2L = 2 * self.L

        kernel = np.zeros((_2L, _2L, _2L, _2L, _2L, _2L), dtype=self.dtype)
        sq_filters_f = self.src.eval_filter_grid(self.L, power=2)

        for i in tqdm(range(0, n, self.batch_size)):
            _range = np.arange(i, min(n, i + self.batch_size))
            pts_rot = rotated_grids(L, self.src.rots[_range, :, :])
            weights = sq_filters_f[:, :, _range]
            weights *= self.src.amplitudes[_range]**2

            if L % 2 == 0:
                weights[0, :, :] = 0
                weights[:, 0, :] = 0

            # TODO: This is where this differs from MeanEstimator
            pts_rot = np.moveaxis(pts_rot, -1, 0).reshape(-1, 3, L**2)
            weights = weights.T.reshape((-1, L**2))

            batch_n = weights.shape[0]
            factors = np.zeros((batch_n, _2L, _2L, _2L), dtype=self.dtype)

            for j in range(batch_n):
                factors[j] = anufft(weights[j],
                                    pts_rot[j], (_2L, _2L, _2L),
                                    real=True)

            factors = Volume(factors).to_vec()
            kernel += vecmat_to_volmat(factors.T @ factors) / (n * L**8)

        # Ensure symmetric kernel
        kernel[0, :, :, :, :, :] = 0
        kernel[:, 0, :, :, :, :] = 0
        kernel[:, :, 0, :, :, :] = 0
        kernel[:, :, :, 0, :, :] = 0
        kernel[:, :, :, :, 0, :] = 0
        kernel[:, :, :, :, :, 0] = 0

        logger.info("Computing non-centered Fourier Transform")
        kernel = mdim_ifftshift(kernel, range(0, 6))
        kernel_f = fftn(kernel)
        # Kernel is always symmetric in spatial domain and therefore real in Fourier
        kernel_f = np.real(kernel_f)

        return FourierKernel(kernel_f, centered=False)
Пример #60
0
    def generate_fractal_volume(self, G):
        """Generate a 3D volume with a fractal distribution.

        Args:
            G (class): Grid class instance - holds essential parameters describing the model.
        """

        # Scale filter according to size of fractal volume
        if self.nx == 1:
            filterscaling = np.amin(np.array([self.ny, self.nz])) / np.array([self.ny, self.nz])
            filterscaling = np.insert(filterscaling, 0, 1)
        elif self.ny == 1:
            filterscaling = np.amin(np.array([self.nx, self.nz])) / np.array([self.nx, self.nz])
            filterscaling = np.insert(filterscaling, 1, 1)
        elif self.nz == 1:
            filterscaling = np.amin(np.array([self.nx, self.ny])) / np.array([self.nx, self.ny])
            filterscaling = np.insert(filterscaling, 2, 1)
        else:
            filterscaling = np.amin(np.array([self.nx, self.ny, self.nz])) / np.array([self.nx, self.ny, self.nz])

        # Adjust weighting to account for filter scaling
        self.weighting = np.multiply(self.weighting, filterscaling)

        self.fractalvolume = np.zeros((self.nx, self.ny, self.nz), dtype=complextype)

        # Positional vector at centre of array, scaled by weighting
        v1 = np.array([self.weighting[0] * self.nx / 2, self.weighting[1] * self.ny / 2, self.weighting[2] * self.nz / 2])

        # 3D array of random numbers to be convolved with the fractal function
        rng = np.random.default_rng(seed=self.seed)
        A = rng.standard_normal(size=(self.nx, self.ny, self.nz))

        # 3D FFT
        A = fftpack.fftn(A)
        # Shift the zero frequency component to the centre of the array
        A = fftpack.fftshift(A)

        # Generate fractal
        generate_fractal3D(self.nx, self.ny, self.nz, G.nthreads, self.b, self.weighting, v1, A, self.fractalvolume)

        # Shift the zero frequency component to the start of the array
        self.fractalvolume = fftpack.ifftshift(self.fractalvolume)
        # Take the real part (numerical errors can give rise to an imaginary part) of the IFFT
        self.fractalvolume = np.real(fftpack.ifftn(self.fractalvolume))
        # Bin fractal values
        bins = np.linspace(np.amin(self.fractalvolume), np.amax(self.fractalvolume), self.nbins)
        for j in range(self.ny):
            for k in range(self.nz):
                self.fractalvolume[:, j, k] = np.digitize(self.fractalvolume[:, j, k], bins, right=True)