Ejemplo n.º 1
0
def compare_interpolated_spectrum():
    fig = plt.figure(figsize=(8, 8))
    ax = fig.add_subplot(111)

    out = fft(ifftshift(f_full))
    freqs = fftfreq(len(f_full), d=0.01) # spacing, Ang
    sfreqs = fftshift(freqs)
    taper = gauss_taper(freqs, sigma=0.0496) #Ang, corresponds to 2.89 km/s at 5150A.
    tout = out * taper

    ax.plot(sfreqs, fftshift(tout))

    wl_h, fl_h = np.abs(np.load("PH6.8kms_0.01ang.npy"))
    wl_l, fl_l = np.abs(np.load("PH2.5kms.npy"))

    #end edges
    wl_he = wl_h[200:-200]
    fl_he = fl_h[200:-200]
    interp = Sinc_w(wl_l, fl_l, a=5, window='kaiser')
    fl_hi = interp(wl_he)

    d = wl_he[1] - wl_he[0]
    out = fft(ifftshift(fl_hi))
    freqs = fftfreq(len(out), d=d)
    ax.plot(fftshift(freqs), fftshift(out))

    plt.show()
def pseudofrosch(x,k,tmax,dt):
    global potential, psi0
    ti=[0]
    fi=[psi0(x)]
    mmax=int((tmax)/dt+1)
    for m in range(1,mmax,1):
        tn=ti[m-1]
        fn=fi[m-1]
        fn=np.array(fn)
        dp1=e**(-1j*potential(x)*dt/2)*fn #
        
        
        ft_fn=ft.fft(dp1) #falschrum
        ft_fn=ft.ifftshift(ft_fn)
        
        dp2=e**(-1j*1/2*k**2*dt)*ft_fn
        
        ift_fn=ft.ifft(dp2)
        ift_fn=ft.ifftshift(ift_fn)
        dp3=e**(-1j*potential(x)*dt/2)*ift_fn
        
        dp3=list(dp3)
        
        ti+=[tn+dt]
        fi=fi+[dp3]
    return [ti,fi]
Ejemplo n.º 3
0
def odddftups(inp,nor=None,noc=None,usfac=1,roff=0,coff=0):
    from numpy.fft import ifftshift
    from numpy import pi,newaxis,floor

    nr,nc=np.shape(inp);

    # Set defaults
    if noc is None: noc=nc;
    if nor is None: nor=nr;

    if nr % 2 == 1:
        oddr = True
        nrnew = nr+1
    else:
        oddr = False
    if nr % 2 == 1:
        oddr = True
        nrnew = nr+1
    else:
        oddr = False

    # Compute kernels and obtain DFT by matrix products
    term1c = ( ifftshift(np.arange(nc) - floor(nc/2)).T[:,newaxis] )
    term2c = ( np.arange(noc) - coff  )[newaxis,:]
    kernc=np.exp((-1j*2*pi/(nc*usfac))*term1c*term2c);
    term1r = ( np.arange(nor).T - roff )[:,newaxis]
    term2r = ( ifftshift(np.arange(nr)) - floor(nr/2) )[newaxis,:]
    kernr=np.exp((-1j*2*pi/(nr*usfac))*term1r*term2r);
    #kernc=exp((-i*2*pi/(nc*usfac))*( ifftshift([0:nc-1]).' - floor(nc/2) )*( [0:noc-1] - coff ));
    #kernr=exp((-i*2*pi/(nr*usfac))*( [0:nor-1].' - roff )*( ifftshift([0:nr-1]) - floor(nr/2)  ));
    out=np.dot(np.dot(kernr,inp),kernc);
    #return np.roll(np.roll(out,+1,axis=0),+1,axis=1)
    return out 
Ejemplo n.º 4
0
def whiten_olsh_lee_inner(image, f_0=None, central_clip=(None, None), normalize_pre=True, normalize_post=True,
                          no_filter=False):
    height, width = image.shape
    assert height % 2 == 0 and width % 2 == 0, "image must have even size!"
    if normalize_pre:
        image = image - image.mean()  # I personally think this is useless, since rho will make (0,0) freq compoenent 0.
        std_im = image.std(ddof=1)
        assert std_im != 0, "constant image unsupported!"
        image /= std_im

    fx, fy = np.meshgrid(np.arange(-height / 2, height / 2), np.arange(-width / 2, width / 2), indexing='ij')
    rho = np.sqrt(fx * fx + fy * fy)
    if f_0 is None:
        f_0 = 0.4 * (height + width) / 2
    filt = rho * np.exp(-((rho / f_0) ** 4))

    im_f = fft2(image)
    if not no_filter:
        fft_filtered_old = im_f * ifftshift(filt)
    else:  # hack to only lower frequency response.
        print('no real filtering!')
        fft_filtered_old = im_f
    fft_filtered_old = fftshift(fft_filtered_old)
    if central_clip != (None, None):
        fft_filtered_old = fft_filtered_old[height // 2 - central_clip[0] // 2:height // 2 + central_clip[0] // 2,
                           width // 2 - central_clip[1] // 2:width // 2 + central_clip[1] // 2]
    im_out = np.real(ifft2(ifftshift(fft_filtered_old)))
    # I believe since the rho at the (0,0) frequency part is zero, then the whole image should be zero as well.
    # so explicit DC removing is useless.
    if normalize_post:
        assert abs(im_out.mean()) < 1e-6  # should be extremely small.
        std_im_out = im_out.std(ddof=1)
    else:
        std_im_out = 1
    return im_out / std_im_out
Ejemplo n.º 5
0
    def _simulate_image(self):
        """
        Generates the fake output.
        """
        with self._acquisition_init_lock:
            pos = self.align.position.value
            logging.debug("Simulating image shift by %s", pos)
            ac, bc = pos.get("a"), pos.get("b")
            ang = math.radians(135)
            # AB->XY
            xc = -(ac * math.sin(ang) + bc * math.cos(ang))
            yc = -(ac * math.cos(ang) - bc * math.sin(ang))
            pixelSize = self.fake_img.metadata[model.MD_PIXEL_SIZE]
            self.fake_img.metadata[model.MD_ACQ_DATE] = time.time()
            x_pxs = xc / pixelSize[0]
            y_pxs = yc / pixelSize[1]

            # Image shifted based on LensAligner position
            z = 1j  # imaginary unit
            self.deltar = x_pxs
            self.deltac = y_pxs
            nr, nc = self.fake_img.shape
            array_nr = numpy.arange(-numpy.fix(nr / 2), numpy.ceil(nr / 2))
            array_nc = numpy.arange(-numpy.fix(nc / 2), numpy.ceil(nc / 2))
            Nr = fft.ifftshift(array_nr)
            Nc = fft.ifftshift(array_nc)
            [Nc, Nr] = numpy.meshgrid(Nc, Nr)
            sim_img = fft.ifft2(fft.fft2(self.fake_img) * numpy.power(math.e,
                            z * 2 * math.pi * (self.deltar * Nr / nr + self.deltac * Nc / nc)))
            output = model.DataArray(abs(sim_img), self.fake_img.metadata)
            return output
Ejemplo n.º 6
0
 def test_axes_keyword(self):
     freqs = [[ 0,  1,  2], [ 3,  4, -4], [-3, -2, -1]]
     shifted = [[-1, -3, -2], [ 2,  0,  1], [-4,  3,  4]]
     assert_array_almost_equal(fftshift(freqs, axes=(0, 1)), shifted)
     assert_array_almost_equal(fftshift(freqs, axes=0), fftshift(freqs, axes=(0,)))
     assert_array_almost_equal(ifftshift(shifted, axes=(0, 1)), freqs)
     assert_array_almost_equal(ifftshift(shifted, axes=0), ifftshift(shifted, axes=(0,)))
Ejemplo n.º 7
0
    def fft_g2r(self, fg, fg_ishifted=False):
        """
        FFT of array ``fg`` given in G-space.
        """
        ndim, shape = fg.ndim, fg.shape

        if ndim == 1:
            fg = np.reshape(fg, self.shape)
            return self.fft_g2r(fg, fg_ishifted=fg_ishifted).flatten()

        if ndim == 3:
            assert self.size == np.prod(shape[-3:])
            if fg_ishifted: fg = ifftshift(fg)
            fr = ifftn(fg)

        elif ndim > 3:
            assert self.size == np.prod(shape[-3:])
            axes = np.arange(ndim)[-3:]
            if fg_ishifted: fg = ifftshift(fg, axes=axes)
            fr = ifftn(fg, axes=axes)

        else:
            raise NotImplementedError("ndim < 3 are not supported")

        return fr * self.size
Ejemplo n.º 8
0
def _UpsampledDFT(data, nor, noc, precision=1, roff=0, coff=0):
    """
    Upsampled DFT by matrix multiplies.
    data (numpy.array): 2d array
    nor, noc (ints): Number of pixels in the output upsampled DFT, in units
    of upsampled pixels
    precision (int): Calculate drift within 1/precision of a pixel
    roff, coff (ints): Row and column offsets, allow to shift the output array
                    to a region of interest on the DFT
    returns (tuple of floats): Drift in pixels
    """
    z = 1j  # imaginary unit
    nr, nc = data.shape

    # Compute kernels and obtain DFT by matrix products
    kernc = numpy.power(math.e, (-z * 2 * math.pi / (nc * precision)) *
                                ((fft.ifftshift(arange(0, nc))[:, None]).T - nc // 2) *
                                (arange(0, noc) - coff)[:, None]
                       )

    kernr = numpy.power(math.e, (-z * 2 * math.pi / (nr * precision)) *
                                (fft.ifftshift(arange(0, nr))[:, None] - nr // 2) *
                                ((arange(0, nor)[:, None]).T - roff)
                       )

    return numpy.dot(numpy.dot((kernr.transpose()), data), kernc.transpose())
Ejemplo n.º 9
0
def gs_mod_gpu(idata,itera=10,osize=256):
    
    
    cut=osize//2
    
    pl=cl.get_platforms()[0]
    devices=pl.get_devices(device_type=cl.device_type.GPU)
    ctx = cl.Context(devices=[devices[0]])
    queue = cl.CommandQueue(ctx)

    plan = Plan(idata.shape, queue=queue,dtype=complex128) #no funciona con "complex128"
    
    src = str(Template(KERNEL).render(
        double_support=all(
            has_double_support(dev) for dev in devices),
        amd_double_support=all(
            has_amd_double_support(dev) for dev in devices)
        ))
    prg = cl.Program(ctx,src).build() 
    

    idata_gpu=cl_array.to_device(queue, ifftshift(idata).astype("complex128"))
    fdata_gpu=cl_array.empty_like(idata_gpu)
    rdata_gpu=cl_array.empty_like(idata_gpu)
    plan.execute(idata_gpu.data,fdata_gpu.data)
    
    mask=exp(2.j*pi*random(idata.shape))
    mask[512-cut:512+cut,512-cut:512+cut]=0
    
    
    idata_gpu=cl_array.to_device(queue, ifftshift(idata+mask).astype("complex128"))
    fdata_gpu=cl_array.empty_like(idata_gpu)
    rdata_gpu=cl_array.empty_like(idata_gpu)
    error_gpu=cl_array.to_device(ctx, queue, zeros(idata_gpu.shape).astype("double"))
    plan.execute(idata_gpu.data,fdata_gpu.data)
    
    e=1000
    ea=1000
    for i in range (itera):
        prg.norm(queue, fdata_gpu.shape, None,fdata_gpu.data)
        plan.execute(fdata_gpu.data,rdata_gpu.data,inverse=True)
        #~ prg.norm1(queue, rdata_gpu.shape,None,rdata_gpu.data,idata_gpu.data,error_gpu.data, int32(cut))
        norm1=prg.norm1
        norm1.set_scalar_arg_dtypes([None, None, None, int32])
        norm1(queue, rdata_gpu.shape,None,rdata_gpu.data,idata_gpu.data,error_gpu.data, int32(cut))
        
        e= sqrt(cl_array.sum(error_gpu).get())/(2*cut)

        #~ if e>ea: 
           #~ 
            #~ break
        #~ ea=e
        plan.execute(rdata_gpu.data,fdata_gpu.data)
    
    fdata=fdata_gpu.get()
    fdata=ifftshift(fdata)
    fdata=exp(1.j*angle(fdata))
    return fdata
Ejemplo n.º 10
0
def dftups(inp,nor=None,noc=None,usfac=1,roff=0,coff=0):
    """
    Translated from matlab:

     * `Original Source <http://www.mathworks.com/matlabcentral/fileexchange/18401-efficient-subpixel-image-registration-by-cross-correlation/content/html/efficient_subpixel_registration.html>`_
     * Manuel Guizar - Dec 13, 2007
     * Modified from dftus, by J.R. Fienup 7/31/06

    Upsampled DFT by matrix multiplies, can compute an upsampled DFT in just
    a small region.

    This code is intended to provide the same result as if the following
    operations were performed:

      * Embed the array "in" in an array that is usfac times larger in each
        dimension. ifftshift to bring the center of the image to (1,1).
      * Take the FFT of the larger array
      * Extract an [nor, noc] region of the result. Starting with the
        [roff+1 coff+1] element.

    It achieves this result by computing the DFT in the output array without
    the need to zeropad. Much faster and memory efficient than the
    zero-padded FFT approach if [nor noc] are much smaller than [nr*usfac nc*usfac]

    Parameters
    ----------
    usfac : int
        Upsampling factor (default usfac = 1)
    nor,noc : int,int
        Number of pixels in the output upsampled DFT, in units of upsampled
        pixels (default = size(in))
    roff, coff : int, int
        Row and column offsets, allow to shift the output array to a region of
        interest on the DFT (default = 0)
    """
    # this function is translated from matlab, so I'm just going to pretend
    # it is matlab/pylab
    from numpy.fft import ifftshift,fftfreq
    from numpy import pi,newaxis,floor

    nr,nc=np.shape(inp);
    # Set defaults
    if noc is None: noc=nc;
    if nor is None: nor=nr;
    # Compute kernels and obtain DFT by matrix products
    term1c = ( ifftshift(np.arange(nc,dtype='float') - floor(nc/2)).T[:,newaxis] )/nc # fftfreq
    term2c = (( np.arange(noc,dtype='float') - coff  )/usfac)[newaxis,:]              # output points
    kernc=np.exp((-1j*2*pi)*term1c*term2c);

    term1r = ( np.arange(nor,dtype='float').T - roff )[:,newaxis]                # output points
    term2r = ( ifftshift(np.arange(nr,dtype='float')) - floor(nr/2) )[newaxis,:] # fftfreq
    kernr=np.exp((-1j*2*pi/(nr*usfac))*term1r*term2r);
    #kernc=exp((-i*2*pi/(nc*usfac))*( ifftshift([0:nc-1]).' - floor(nc/2) )*( [0:noc-1] - coff ));
    #kernr=exp((-i*2*pi/(nr*usfac))*( [0:nor-1].' - roff )*( ifftshift([0:nr-1]) - floor(nr/2)  ));
    out=np.dot(np.dot(kernr,inp),kernc);
    #return np.roll(np.roll(out,-1,axis=0),-1,axis=1)
    return out 
Ejemplo n.º 11
0
 def test_definition(self):
     x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
     y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
     assert_array_almost_equal(fft.fftshift(x), y)
     assert_array_almost_equal(fft.ifftshift(y), x)
     x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
     y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
     assert_array_almost_equal(fft.fftshift(x), y)
     assert_array_almost_equal(fft.ifftshift(y), x)
Ejemplo n.º 12
0
def plot_sinc_windows():
    fig, ax = plt.subplots(nrows=2, figsize=(8, 8))
    xs = np.linspace(-2, 2., num=200)
    xs4 = np.linspace(-5, 5, num=500)
    y2s = sinc_w(xs, 'lanczos')
    y4s = sinc_w(xs4, 'lanczos', a=5)
    yks = sinc_w(xs4, 'kaiser', a=5, alpha=5)
    yks2 = sinc_w(xs, 'kaiser', a=2, alpha=5)
    ax[0].plot(xs, y2s, "b", label='Lanczos, a=2')
    ax[0].plot(xs4, y4s, 'g', label='Lanczos, a=5')
    ax[0].plot(xs4, yks, "r", label='Kaiser 5, a=5')
    ax[0].plot(xs, yks2, "c", label='Kaiser 5, a=2')
    #ax[0].plot(xs,sinc_w(xs, 'hann'),label='Hann')
    #ax[0].plot(xs,sinc_w(xs, 'kaiser',alpha=5),label='Kaiser=5')
    #ax[0].plot(xs,sinc_w(xs, 'kaiser',alpha=10),label='Kaiser=10')
    #xs4 = np.linspace(-4,4,num=100)
    #ax[0].plot(xs4,sinc_w(xs4, 'lanczos', a = 4), label='Lanczos,a=4')

    ax[0].legend()
    ax[0].set_xlabel(r"$\pm a$")

    #n=400 #zeropadd FFT
    #freqs2 = fftfreq(len(y2s),d=xs[1]-xs[0])
    #freqs4 =fftfreq(400,d=xs4[1]-xs4[0])
    ysh = ifftshift(y2s)
    pady = np.concatenate((ysh[:100], np.zeros((1000,)), ysh[100:]))
    freq2 = fftshift(fftfreq(len(pady), d=0.02))

    ys4h = ifftshift(y4s)

    pad4y = np.concatenate((ys4h[:250], np.zeros((2000,)), ys4h[250:]))

    freq4 = fftshift(fftfreq(len(pad4y), d=0.02))
    fpady = fft(pady)
    fpad4y = fft(pad4y)
    ax[1].plot(freq2, 10 * np.log10(np.abs(fftshift(fpady / fpady[0]))))
    ax[1].plot(freq4, 10 * np.log10(np.abs(fftshift(fpad4y / fpad4y[0]))))

    ysk = ifftshift(yks)
    padk = np.concatenate((ysk[:250], np.zeros((2000,)), ysk[250:]))
    fpadk = fft(padk)
    ax[1].plot(freq4, 10 * np.log10(np.abs(fftshift(fpadk / fpadk[0]))))
    ysk2 = ifftshift(yks2)
    padk2 = np.concatenate((ysk2[:100], np.zeros((1000,)), ysk2[100:]))
    fpadk2 = fft(padk2)
    ax[1].plot(freq2, 10 * np.log10(np.abs(fftshift(fpadk2 / fpadk2[0]))))
    #ax[1].plot(freqs4, fft(ifftshift(
    #ax[1].plot(freqs, get_db_response(xs, 'hann'),label='Hann')
    #ax[1].plot(freqs, get_db_response(xs, 'kaiser',alpha=5),label='Kaiser=5')
    #ax[1].plot(freqs, get_db_response(xs, 'kaiser',alpha=10),label='Kaiser=10')
    #ax[1].legend()
    ax[1].set_ylabel("dB")
    ax[1].set_xlabel("cycles/a")
    plt.show()
Ejemplo n.º 13
0
def apply_filter(data_array, filter_array,
                 fft_multiplier=None, ifft_multiplier=None,
                 output_multiplier=None, apply_window_func=False,
                 window_shift=None, invert_filter=False):
    """FFT image cube, apply mask and iFFT the output back to image domain.

    Parameters
    ----------
    data_array : ndarray
        Image cube.
    filter_array : ndarray
        Filter to multiply to FFT(data_array). Same shape as data_array.
        Assume (min, max) = (0, 1).
    fft_multiplier : float, optional
        Scalar to multiply to FFT output before masking.
    ifft_multiplier : float, optional
        Scalar to multiply to the masked array before iFFT back.
    output_multiplier : float, optional
        Scalar to multiply to output
    apply_window_func : bool, optional
        Apply blackman-nuttall window function to the first dimension of
        the data_array before FFT.
    window_shift : int
        Shift window function by these many pixels.
        Negative will shift to the left.
    invert_filter: bool, optional
        Invert the filter (1 - filter_array) before applying. Default is False.

    Returns
    -------
    out: ndarray
        Masked image cube.

    """
    if apply_window_func:
        k = nuttall(data_array.shape[0])
        if window_shift:
            k = np.roll(k, window_shift)
        w = (np.ones_like(data_array).T * k).T
    else:
        w = np.ones_like(data_array)
    data_fft = fftshift(fftn(ifftshift(data_array * w)))
    if fft_multiplier is not None:
        data_fft *= fft_multiplier
    if invert_filter:
        filter_array = 1 - filter_array
    data_fft *= filter_array
    out = fftshift(ifftn(ifftshift(data_fft))) / w
    if ifft_multiplier is not None:
        out *= ifft_multiplier
    if output_multiplier is not None:
        out *= output_multiplier
    return out.real
Ejemplo n.º 14
0
def fast_hessian(img, sigma=3.0):
    """Calculates Hessian in *frequency space* via fft2

    -4π [   μ^2 iL(μ,ν) μν iL(μ,ν)  ]
        [   μν iL(μ,ν)  ν^2 iL(μ,ν) ] 

    where iL(μ,ν) is the inverse 2D FFT of the image

    """
    #ffx = partial(rfft2, s=img.shape)
    #iffx = partial(irfft2, s=img.shape)
    
    ffx = fft2
    iffx = ifft2
    iL = ffx(img)
    
    SHIFT = False

    # coordinate matrices [[ u^2, uv], [uv, v^2]]
    cxx = np.fromfunction(lambda i,j: i**2, iL.shape, dtype='float')
    cxy = np.fromfunction(lambda i,j: i*j, iL.shape, dtype='float')
    cyy = np.fromfunction(lambda i,j: j**2, iL.shape, dtype='float')
    
    if SHIFT:
        cxx = fftshift(cxx)
        cxy = fftshift(cxy)
        cyy = fftshift(cyy)

    # elementwise multiplication
    hxx = -4*np.pi**2 * cxx * iL
    hxy = -4*np.pi**2 * cxy * iL
    hyy = -4*np.pi**2 * cyy * iL
    
    exparg = -(cxx + cyy) / (2*np.pi**2 * sigma**2)
    #A = 1 / (2*np.pi*sigma**2)
    A = 1
    fgauss = A*np.exp(exparg)

    hxx = fgauss * hxx
    hxy = fgauss * hxy
    hyy = fgauss * hyy
    
    Hxx = iffx(hxx)
    Hxy = iffx(hxy)
    Hyy = iffx(hyy)

    if SHIFT:
        Hxx = ifftshift(Hxx)
        Hxy = ifftshift(Hxy)
        Hyy = ifftshift(Hyy)

    return  Hxx, Hxy, Hyy
Ejemplo n.º 15
0
    def setUp(self):
        # Input
        self.data = hdf5.read_data("example_input.h5")
        C, T, Z, Y, X = self.data[0].shape
        self.data[0].shape = Y, X
        self.small_data = self.data[0][350:400, 325:375]

        # Input drifted by known value
        self.data_drifted = hdf5.read_data("example_drifted.h5")
        C, T, Z, Y, X = self.data_drifted[0].shape
        self.data_drifted[0].shape = Y, X

        # Input drifted by random value
        z = 1j  # imaginary unit
        self.deltar = numpy.random.uniform(-100, 100)
        self.deltac = numpy.random.uniform(-100, 100)
        nr, nc = self.data[0].shape
        array_nr = numpy.arange(-numpy.fix(nr / 2), numpy.ceil(nr / 2))
        array_nc = numpy.arange(-numpy.fix(nc / 2), numpy.ceil(nc / 2))
        Nr = fft.ifftshift(array_nr)
        Nc = fft.ifftshift(array_nc)
        [Nc, Nr] = numpy.meshgrid(Nc, Nr)
        self.data_random_drifted = fft.ifft2(fft.fft2(self.data[0]) * numpy.power(math.e,
        				z * 2 * math.pi * (self.deltar * Nr / nr + self.deltac * Nc / nc)))

        # Noisy inputs
        noise = random.normal(0, 3000, self.data[0].size)
        noise_array = noise.reshape(self.data[0].shape[0], self.data[0].shape[1])

        self.data_noisy = self.data[0] + noise_array
        self.data_drifted_noisy = self.data_drifted[0] + noise_array
        self.data_random_drifted_noisy = self.data_random_drifted + noise_array

        # Small input drifted by random value
        self.small_deltar = numpy.random.uniform(-10, 10)
        self.small_deltac = numpy.random.uniform(-10, 10)
        nr, nc = self.small_data.shape
        array_nr = numpy.arange(-numpy.fix(nr / 2), numpy.ceil(nr / 2))
        array_nc = numpy.arange(-numpy.fix(nc / 2), numpy.ceil(nc / 2))
        Nr = fft.ifftshift(array_nr)
        Nc = fft.ifftshift(array_nc)
        [Nc, Nr] = numpy.meshgrid(Nc, Nr)
        self.small_data_random_drifted = fft.ifft2(fft.fft2(self.small_data) * numpy.power(math.e,
        				z * 2 * math.pi * (self.small_deltar * Nr / nr + self.small_deltac * Nc / nc)))

        # Small noisy inputs
        small_noise = random.normal(0, 3000, self.small_data.size)
        small_noise_array = small_noise.reshape(self.small_data.shape[0], self.small_data.shape[1])

        self.small_data_noisy = self.small_data + small_noise_array
        self.small_data_random_drifted_noisy = self.small_data_random_drifted + small_noise_array
Ejemplo n.º 16
0
def gs_mod(idata,itera=10,osize=256):
    """Modiffied Gerchberg-Saxton algorithm to calculate DOEs
    
    Calculates the phase distribution in a object plane to obtain an 
    specific amplitude distribution in the target plane. It uses a 
    FFT to calculate the field propagation.
    The wavefront at the DOE plane is assumed as a plane wave.
    This algorithm leaves a window around the image plane to allow the 
    noise to move there. It only optimises the center of the image.
    
    **ARGUMENTS:**
	
		========== ======================================================
		idata      numpy array containing the target amplitude distribution 
        itera      Maximum number of iterations
        osize      Size of the center of the image to be optimized
                   It should be smaller than the image itself.
		========== ======================================================
    """
    M,N=idata.shape
    cut=osize//2
    
    
    zone=zeros_like(idata)
    zone[M/2-cut:M/2+cut,N/2-cut:N/2+cut]=1
    zone=zone.astype(bool)

    mask=exp(2.j*pi*random(idata.shape))
    mask[zone]=0
    
    #~ imshow(abs(mask)),colorbar()
    
    fdata=fftshift(fft2(ifftshift(idata+mask))) #Nota, colocar esta mascara es muy importante, por que si no  no converge tan rapido
    
    e=1000
    ea=1000
    for i in range (itera):
        fdata=exp(1.j*angle(fdata))

        rdata=ifftshift(ifft2(fftshift(fdata)))
        #~ e= (abs(rdata[zone])-idata[zone]).std()
        #~ if e>ea: 
           #~ 
            #~ break
        ea=e
        rdata[zone]=exp(1.j*angle(rdata[zone]))*(idata[zone])        
        fdata=fftshift(fft2(ifftshift(rdata)))   
    fdata=exp(1.j*angle(fdata))
    return fdata
Ejemplo n.º 17
0
def delayseq(x, delay_sec:float, fs:int):
    """
    x: input 1-D signal
    delay_sec: amount to shift signal [seconds]
    fs: sampling frequency [Hz]

    xs: time-shifted signal
    """

    assert x.ndim == 1, 'only 1-D signals for now'

    delay_samples = delay_sec*fs
    delay_int = round(delay_samples)

    nfft = nextpow2(x.size+delay_int)

    fbins = 2*pi*ifftshift((arange(nfft)-nfft//2))/nfft

    X = fft(x,nfft)
    Xs = ifft(X*exp(-1j*delay_samples*fbins))

    if isreal(x[0]):
        Xs = Xs.real

    xs = zeros_like(x)
    xs[delay_int:] = Xs[delay_int:x.size]

    return xs
Ejemplo n.º 18
0
def gen_wedge_psf(nx, ny, nf, dx, dy, df, z, out, threads=None):
    u = fftshift(fftfreq(nx, dx * np.pi / 180))
    v = fftshift(fftfreq(ny, dy * np.pi / 180))
    e = fftshift(fftfreq(nf, df))

    E = np.sqrt(Cosmo.Om0 * (1 + z) ** 3 +
                Cosmo.Ok0 * (1 + z) ** 2 + Cosmo.Ode0)
    D = Cosmo.comoving_transverse_distance(z).value
    H0 = Cosmo.H0.value * 1e3
    c = const.c.value
    print(E, D, H0)
    kx = u * 2 * np.pi / D
    ky = v * 2 * np.pi / D
    k_perp = np.sqrt(kx ** 2 + ky[np.newaxis, ...].T ** 2)
    k_par = e * 2 * np.pi * H0 * f21 * E / (c * (1 + z) ** 2)
    arr = np.ones((nf, nx, ny), dtype='complex128')
    for i in range(nf):
        mask = (k_perp > np.abs(k_par[i]) * c * (1 + z) / (H0 * E * D))
        arr[i][mask] = 0
    np.save('kx.npy', kx)
    np.save('ky.npy', ky)
    np.save('kpar.npy', k_par)
    np.save('wedge_window.npy', arr.real)
    fft_arr = fftshift(fftn(ifftshift(arr))).real
    hdu = fits.PrimaryHDU(data=fft_arr)
    hdr_dict = dict(cdelt1=dx, cdelt2=dy, cdelt3=df,
                    crpix1=nx/2, crpix2=ny/2, crpix3=nf/2,
                    crval1=0, crval2=0, crval3=0,
                    ctype1='RA---SIN', ctype2='DEC--SIN', ctype3='FREQ',
                    cunit1='deg', cunit2='deg', cunit3='Hz')
    for k, v in hdr_dict.items():
        hdu.header[k] = v
    hdu.writeto(out, clobber=True)
Ejemplo n.º 19
0
def filterDFT(imageMatrix, filterMatrix):
   shiftedDFT = fftshift(fft2(imageMatrix))
   misc.imsave("dft.png", scaleSpectrum(shiftedDFT))

   filteredDFT = shiftedDFT * filterMatrix
   misc.imsave("filtered-dft.png", scaleSpectrum(filteredDFT))
   return ifft2(ifftshift(filteredDFT))
Ejemplo n.º 20
0
Archivo: imtool.py Proyecto: pkgw/pwkit
    def _load(self, path, fft, maxnorm):
        try:
            img = astimage.open(path, "r", eat_warnings=True)
        except Exception as e:
            die("can't open path “%s”: %s", path, e)

        try:
            img = img.simple()
        except Exception as e:
            print("blink: can't convert “%s” to simple 2D sky image; taking " "first plane" % path, file=sys.stderr)
            data = img.read(flip=True)[tuple(np.zeros(img.shape.size - 2))]
            toworld = None
        else:
            data = img.read(flip=True)
            toworld = img.toworld

        if fft:
            from numpy.fft import ifftshift, fft2, fftshift

            data = np.abs(ifftshift(fft2(fftshift(data.filled(0)))))
            data = np.ma.MaskedArray(data)
            toworld = None

        if maxnorm:
            data /= np.ma.max(data)

        return data, toworld
Ejemplo n.º 21
0
def create_matching_kernel(source_psf, target_psf, window=None):
    """
    Create a kernel to match 2D point spread functions (PSF) using the
    ratio of Fourier transforms.

    Parameters
    ----------
    source_psf : 2D `~numpy.ndarray`
        The source PSF.  The source PSF should have higher resolution
        (i.e. narrower) than the target PSF.  ``source_psf`` and
        ``target_psf`` must have the same shape and pixel scale.

    target_psf : 2D `~numpy.ndarray`
        The target PSF.  The target PSF should have lower resolution
        (i.e. broader) than the source PSF.  ``source_psf`` and
        ``target_psf`` must have the same shape and pixel scale.

    window : callable, optional
        The window (or taper) function or callable class instance used
        to remove high frequency noise from the PSF matching kernel.
        Some examples include:

        * `~photutils.psf.matching.HanningWindow`
        * `~photutils.psf.matching.TukeyWindow`
        * `~photutils.psf.matching.CosineBellWindow`
        * `~photutils.psf.matching.SplitCosineBellWindow`
        * `~photutils.psf.matching.TopHatWindow`

        For more information on window functions and example usage, see
        :ref:`psf_matching`.

    Returns
    -------
    kernel : 2D `~numpy.ndarray`
        The matching kernel to go from ``source_psf`` to ``target_psf``.
        The output matching kernel is normalized such that it sums to 1.
    """

    # inputs are copied so that they are not changed when normalizing
    source_psf = np.copy(np.asanyarray(source_psf))
    target_psf = np.copy(np.asanyarray(target_psf))

    if source_psf.shape != target_psf.shape:
        raise ValueError('source_psf and target_psf must have the same shape '
                         '(i.e. registered with the same pixel scale).')

    # ensure input PSFs are normalized
    source_psf /= source_psf.sum()
    target_psf /= target_psf.sum()

    source_otf = fftshift(fft2(source_psf))
    target_otf = fftshift(fft2(target_psf))
    ratio = target_otf / source_otf

    # apply a window function in frequency space
    if window is not None:
        ratio *= window(target_psf.shape)

    kernel = np.real(fftshift((ifft2(ifftshift(ratio)))))
    return kernel / kernel.sum()
Ejemplo n.º 22
0
def hg_conv(f, g):
	ub = tuple(array(f.shape) + array(g.shape) - 1);
	fh = rfftn(cpad(f,ub));
	gh = rfftn(cpad(g,ub));
	res = ifftshift(irfftn(fh * sp.conjugate(gh)));
	del fh, gh;
	return res;
Ejemplo n.º 23
0
def partial( x, order=1, length=None, axis=0):
    """Numerically differentiate `x` using the pseudo-spectral method.

       Parameters
       ----------
       x : array_like
       The periodic data to be differentiated.
       order : int
       The order of the derivative to be computed.
       length : float
       The length of the domain on which the signal was sampled.
        axis : int
        The axis of `x` containing the data to be differentiated.

        Returns
        -------
        dx : array, with the same shape as `x`
        The differentiated data.
        """
    if length is None:
        length = 2 * pi;
    if axis is None:
        axis = -1;

    N = x.shape[axis];
    y = fft(x, axis=axis);
    numvals = shape(y)[0];
    preserveVals = ones(numvals/6);
    downfilter = exp((arange(numvals/6+1, (numvals/2)+1) -
    numvals/2.0))[::-1];
    upfilter = exp(-(arange(numvals/2, (5*numvals/6)+1) -
    numvals/2.0))[::-1];
    #print 0, 'to' , numvals/6
    #print len(preserveVals); 
    #print numvals/6+1, 'to', numvals/2 +1
    #print len(downfilter);
    #print numvals/2 + 1, 'to', (5*numvals/6)+1
    #print len(upfilter)
    #print (5*numvals/6)+1, 'to', numvals;
    #print len(preserveVals)
    #downfilter = slopeDown*(arange(numvals/6+1, (numvals/2)+1) - numvals/2.0);
    #upfilter = slopeUp*(arange(numvals/2, (5*numvals/6)+1) - numvals/2.0);


    fftFilter = concatenate((preserveVals, downfilter, upfilter, preserveVals))

    y = (fftFilter * y.transpose()).transpose();
    
    k = array(range(N), dtype=complex128) - N/2;
    k *= 2*pi*1j / length;
    k = k**order;
    shp = ones(len(x.shape));
    shp[axis] = N;
    k.shape = shp;

    dy = ifftshift(k) * y;
    dx = ifft(dy, axis=axis).real;
   # print 'NUMPY'
   # print dx;
    return dx;
Ejemplo n.º 24
0
    def compute(self, scene: Scene):
        """ Compute optical irradiance map
        Computation proccedure:
            1) convert radiance to irradiance
            2) apply lens and macular transmittance
            3) apply off-axis fall-off (cos4th)
            4) apply optical transfert function

        Args:
            scene (pyEyeBall.Scene): instance of Scene class, containing the radiance and other scene information

        Examples:
            >>> oi = Optics()
            >>> oi.compute(Scene())
        """
        # set field of view and wavelength samples
        self.fov = scene.fov
        scene.wave = self._wave
        self.dist = scene.dist

        # compute irradiance
        self.photons = pi / (1 + 4 * self.f_number**2 * (1 + abs(self.magnification))**2) * scene.photons

        # apply ocular transmittance
        self.photons *= self.ocular_transmittance

        # apply the relative illuminant (off-axis) fall-off: cos4th function
        x, y = self.spatial_support
        s_factor = np.sqrt(self.image_distance**2 + x**2 + y**2)
        self.photons *= (self.image_distance / s_factor[:, :, None])**4

        # apply optical transfer function of the optics
        for ii in range(self.wave.size):
            otf = fftshift(self.otf(self._wave[ii], self.frequency_support_x, self.frequency_support_y))
            self.photons[:, :, ii] = np.abs(ifftshift(ifft2(otf * fft2(fftshift(self.photons[:, :, ii])))))
Ejemplo n.º 25
0
def plot_pulse(A_t,A_w,t,w, l0 = 1.550 * micr, t_zoom = pico, l_zoom = 10*nano):
        ## Fix maximum
        if 'A_max' not in plot_pulse.__dict__:
            plot_pulse.A_max = amax(A_t)
            plot_pulse.A_w_max = amax(A_w)

        w0 = 2 * pi * C_SPEED / l0

        ## Plot Time domain
        fig = pl.gcf()
        fig.add_subplot(211)
        pl.plot(t / t_zoom, absolute(A_t/plot_pulse.A_max), hold = False)
        pl.axis([amin(t) / t_zoom*0.4,amax(t) / t_zoom*0.4, 0, 1.1])
        pl.xlabel(r'$time\ (%s s)$'%units[t_zoom])

        atten_win = 0.01
        npoints = len(w)
        apod = arange(npoints)
        apod = exp(-apod/(npoints*atten_win)) + exp(-(npoints-apod)/(npoints*atten_win))
        apod = ifftshift(apod)

        ## Plot Freq domain
        fig.add_subplot(212)
        pl.plot((2 * pi * C_SPEED)/(w+w0)/micr, log10(absolute(A_w)**2/absolute(plot_pulse.A_w_max)**2) * 10, hold=False)
        #pl.plot((2 * pi * C_SPEED)/(w+w0) / micr, log10(absolute(apod)**2/absolute(amax(apod))**2 ) *10, hold=True)
        #pl.semilogy()
        pl.axis([(l0-l_zoom)/micr, (l0+l_zoom)/micr, -60, 5])
        pl.xlabel(r'$wavelength\ (\mu m)$')
        pl.ylabel(r'$spectrum (db)$')

        pl.show()
Ejemplo n.º 26
0
def laplacian_filter(in_file, in_mask=None, out_file=None):
    import numpy as np
    import nibabel as nb
    import os.path as op
    from math import pi
    from numpy.fft import fftn, ifftn, fftshift, ifftshift

    if out_file is None:
        fname, fext = op.splitext(op.basename(in_file))
        if fext == '.gz':
            fname, _ = op.splitext(fname)
        out_file = op.abspath('./%s_smooth.nii.gz' % fname)

    im = nb.load(in_file)
    data = im.get_data()

    if in_mask is not None:
        mask = nb.load(in_mask).get_data()
        mask[mask > 0] = 1.0
        mask[mask <= 0] = 0.0
        data *= mask

    dataft = fftshift(fftn(data))
    x = np.linspace(0, 2 * pi, dataft.shape[0])[:, None, None]
    y = np.linspace(0, 2 * pi, dataft.shape[1])[None, :, None]
    z = np.linspace(0, 2 * pi, dataft.shape[2])[None, None, :]
    lapfilt = 2.0 * np.squeeze((np.cos(x) + np.cos(y) + np.cos(z))) - 5.0
    dataft *= fftshift(lapfilt)
    imfilt = np.real(ifftn(ifftshift(dataft)))

    nb.Nifti1Image(imfilt.astype(np.float32), im.get_affine(),
                   im.get_header()).to_filename(out_file)
    return out_file
Ejemplo n.º 27
0
    def symmetrize(self, C_N):
        """Symmetrize force constant matrix."""

        # Number of atoms
        natoms = len(self.indices)
        # Number of unit cells
        N = np.prod(self.N_c)

        # Reshape force constants to (l, m, n) cell indices
        C_lmn = C_N.reshape(self.N_c + (3 * natoms, 3 * natoms))

        # Shift reference cell to center index
        if self.offset == 0:
            C_lmn = fft.fftshift(C_lmn, axes=(0, 1, 2)).copy()
        # Make force constants symmetric in indices -- in case of an even
        # number of unit cells don't include the first
        i, j, k = np.asarray(self.N_c) % 2 - 1
        C_lmn[i:, j:, k:] *= 0.5
        C_lmn[i:, j:, k:] += \
                  C_lmn[i:, j:, k:][::-1, ::-1, ::-1].transpose(0, 1, 2, 4, 3).copy()
        if self.offset == 0:
            C_lmn = fft.ifftshift(C_lmn, axes=(0, 1, 2)).copy()

        # Change to single unit cell index shape
        C_N = C_lmn.reshape((N, 3 * natoms, 3 * natoms))

        return C_N
Ejemplo n.º 28
0
def frp_to_fir(frp, fbins=None):
    '''Transform a fringe rate profile to a fir filter.'''
    frp = ifftshift(frp,axes=-1)
    fir = ifft(frp, axis=-1)
    fir = fftshift(fir, axes=-1)
    if fbins is not None: return fir, fftshift(fftfreq(fbins.size, fbins[1] - fbins[0]))
    else: return fir
Ejemplo n.º 29
0
    def real_space(self):
        """Fourier transform the dynamical matrix to real-space."""

        if not self.assembled:
            self.assemble()

        # Shape of q-point grid
        N_c = self.N_c

        # Reshape before Fourier transforming
        shape = self.D_k.shape
        Dq_lmn = self.D_k.reshape(N_c + shape[1:])
        DR_lmn = fft.ifftn(fft.ifftshift(Dq_lmn, axes=(0, 1, 2)), axes=(0, 1, 2))

        if debug:
            # Check that D_R is real enough
            assert np.all(DR_lmn.imag < 1e-8)
            
        DR_lmn = DR_lmn.real

        # Corresponding R_m vectors in units of the basis vectors
        R_cm = np.indices(N_c).reshape(3, -1)
        N1_c = np.array(N_c)[:, np.newaxis]        
        R_cm += N1_c // 2
        R_cm %= N1_c
        R_cm -= N1_c // 2
        R_clmn = R_cm.reshape((3,) + N_c)

        return DR_lmn, R_clmn
Ejemplo n.º 30
0
def inverse_spectrogram(spec, s_len,
    sample_rate, spec_sample_rate, freq_spacing, min_freq=0, max_freq=None, nstd=6, log=True, noise_level_db=80, rectify=True):
    """turns the complex spectrogram into a signal

    inverts by repeating the process on a string-of-ones
    """

    spec_copy = spec.copy()
    if log:
        spec_copy = 10**(spec_copy)
    spec_tranpose = spec.transpose() # spec_tranpose[time][frequency]

    hnwinlen = len(spec) - 1
    nincrement = int(np.round(float(sample_rate)/spec_sample_rate))

    gauss_t = np.arange(-hnwinlen, hnwinlen+1, 1.0)
    gauss_std = float(2*hnwinlen) / float(nstd)
    gauss_window = np.exp(-gauss_t**2 / (2.0*gauss_std**2)) / (gauss_std*np.sqrt(2*np.pi))
    
    s = np.zeros(s_len + 2*hnwinlen+1)
    w = np.zeros(s_len + 2*hnwinlen+1)

    for i in range(len(spec_tranpose)):
        sample = i * nincrement
        spec_slice = np.concatenate((spec_tranpose[i][:0:-1].conj(), spec_tranpose[i]))
        s[sample:sample+2*hnwinlen+1] += gauss_window * ifft(ifftshift(spec_slice))
        w[sample:sample+2*hnwinlen+1] += gauss_window ** 2
    s /= w
    return s[hnwinlen:hnwinlen+s_len]
Ejemplo n.º 31
0
def inverse_mps(mps):
    "Inverts a MPS back to a spectrogram"
    spec = ifft2(ifftshift(mps))
    return spec
Ejemplo n.º 32
0
def retrieve_phase(data, params, max_iters=200, pupil_tol=1e-8, mse_tol=1e-8, phase_only=False):
    """Retrieve the phase across the objective's back pupil from an
    experimentally measured PSF.

    NOTE: If all that is needed is phase, e.g. for adaptive optical correction, then most normal
    ways of estimating the background should be sufficient and you can use the `phase_only`
    keyword. However, if you want to properly model your PSF for something like deconvolution
    then you should be aware that the magnitude estimate is _incredibly_ sensitive to the background
    correction applied to the data prior to running the algorithm, and multiple background
    methods/parameters should be tried.

    Follows: [Hanser, B. M.; Gustafsson, M. G. L.; Agard, D. A.;
    Sedat, J. W. Phase Retrieval for High-Numerical-Aperture Optical Systems.
    Optics Letters 2003, 28 (10), 801.](dx.doi.org/10.1364/OL.28.000801)

    Parameters
    ----------
    data : ndarray (3 dim)
        The experimentally measured PSF of a subdiffractive source
    params : dict
        Parameters to pass to HanserPSF, size and zsize will be automatically
        updated from data.shape
    max_iters : int
        The maximum number of iterations to run, default is 200
    pupil_tol : float
        the tolerance in percent change in change in pupil, default is 1e-8
    mse_tol : float
        the tolerance in percent change for the mean squared error between
        data and simulated data, default is 1e-8
    phase_only : bool
        True means only the phase of the back pupil is retrieved while the
        amplitude is not.

    Returns
    -------
    PR_result : PhaseRetrievalResult
        An object that contains the phase retrieval result
    """
    # make sure data is square
    assert max_iters > 0, "Must have at least one iteration"
    assert data.shape[1] == data.shape[2], "Data is not square in x/y"
    assert data.ndim == 3, "Data doesn't have enough dims"
    # make sure the user hasn't screwed up the params
    params.update(
        dict(vec_corr="none", condition="none", zsize=data.shape[0], size=data.shape[-1])
    )
    # assume that data prep has been handled outside function
    # The field magnitude is the square root of the intensity
    mag = psqrt(data)
    # generate a model from parameters
    model = HanserPSF(**params)
    # generate coordinates
    model._gen_kr()
    # start a list for iteration
    mse = np.zeros(max_iters)
    mse_diff = np.zeros(max_iters)
    pupil_diff = np.zeros(max_iters)
    # generate a pupil to start with
    new_pupil = model._gen_pupil()
    # save it as a mask
    mask = new_pupil.real
    # initialize
    old_mse = old_pupil = np.nan
    # iterate
    for i in range(max_iters):
        # generate new mse and add it to the list
        model.apply_pupil(new_pupil)
        new_mse = _calc_mse(data, model.PSFi)
        mse[i] = new_mse
        if i > 0:
            # calculate the difference in mse to test for convergence
            mse_diff[i] = abs(old_mse - new_mse) / old_mse
            # calculate the difference in pupil
            pupil_diff[i] = (abs(old_pupil - new_pupil) ** 2).mean() / (abs(old_pupil) ** 2).mean()
        else:
            mse_diff[i] = np.nan
            pupil_diff[i] = np.nan
        # check tolerances, how much has the pupil changed, how much has the mse changed
        # and what's the absolute mse
        logger.info(
            f"Iteration {i}, mse_diff = {mse_diff[i]:.2g}, pupil_diff = {pupil_diff[i]:.2g}"
        )
        if pupil_diff[i] < pupil_tol or mse_diff[i] < mse_tol or mse[i] < mse_tol:
            break
        # update old_mse
        old_mse = new_mse
        # retrieve new pupil
        old_pupil = new_pupil
        # keep phase
        phase = np.angle(model.PSFa.squeeze())
        # replace magnitude with experimentally measured mag
        new_psf = mag * np.exp(1j * phase)
        # generate the new pupils
        new_pupils = fftn(ifftshift(new_psf, axes=(1, 2)), axes=(1, 2))
        # undo defocus and take the mean
        new_pupils /= model._calc_defocus()
        new_pupil = new_pupils.mean(0) * mask
        # if phase only discard magnitude info
        if phase_only:
            new_pupil = np.exp(1j * np.angle(new_pupil)) * mask
    else:
        logger.warning("Reach max iterations without convergence")
    mse = mse[: i + 1]
    mse_diff = mse_diff[: i + 1]
    pupil_diff = pupil_diff[: i + 1]
    # shift mask
    mask = fftshift(mask)
    # shift phase then unwrap and mask
    phase = unwrap_phase(fftshift(np.angle(new_pupil))) * mask
    # shift magnitude
    magnitude = fftshift(abs(new_pupil)) * mask
    return PhaseRetrievalResult(magnitude, phase, mse, pupil_diff, mse_diff, model)
Ejemplo n.º 33
0
def M(vk, H_fft):
    return np.real(fft.fftshift(fft.ifft2(fft.fft2(fft.ifftshift(vk)) *
                                          H_fft)))
Ejemplo n.º 34
0
def Filter(img, filter_matrix):
    shifted_dft_img = fftshift(fft2(img))
    filter_img = shifted_dft_img * filter_matrix
    return ifft2(ifftshift(filter_img))
Ejemplo n.º 35
0
im = cv2.imread(
    'bricks.jpg'
)  # path needs to be channged all input images are available in the input folder
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
cv2.imwrite("input.jpg",
            im)  # output images in the output folder can be used for refrence
sampled_1 = block_reduce(im, (1, 1))
sampled_2 = block_reduce(im, (2, 2))
sampled_4 = block_reduce(im, (4, 4))
sampled_8 = block_reduce(im, (8, 8))
sampled_16 = block_reduce(im, (16, 16))

sampled1_fft = fftshift(fft2(sampled_1))
sampled1_mag = abs(sampled1_fft)
res1 = ifft2(ifftshift(sampled1_fft)).astype("uint8")
cv2.imwrite(
    "res1.jpg",
    res1)  # output images in the output folder can be used for refrence
cv2.imwrite(
    "fft1.jpg",
    10 * log(sampled1_mag +
             1))  # output images in the output folder can be used for refrence

sampled2_fft = fftshift(fft2(sampled_2))
sampled2_mag = abs(sampled2_fft)
res2 = ifft2(ifftshift(sampled2_fft)).astype("uint8")
cv2.imwrite(
    "res2.jpg",
    res2)  # output images in the output folder can be used for refrence
cv2.imwrite(
Ejemplo n.º 36
0
def process_card(cardname, expansion=None):
    time.sleep(0.05)

    # try/except in case the search doesn't return anything
    try:
        # If the card specifies which set to retrieve the scan from, do that
        if expansion:
            # Set specified from set formatter
            query = "!\"" + cardname + "\" set=" + expansion
            print("Processing: " + cardname + ", set: " + expansion)
        else:
            query = "!\"" + cardname + "\""
            print("Processing: " + cardname)
        card = scrython.cards.Search(q=query).data()[0]

    except scrython.foundation.ScryfallError:
        print("Couldn't find card: " + cardname)
        return

    # Handle cards with multiple faces
    if card["layout"] == "transform":
        cards = [x for x in card["card_faces"]]
    else:
        cards = [
            card,
        ]

    for card_obj in cards:
        name = card_obj["name"].replace(
            "//", "&")  # should work on macOS & windows now
        name = name.replace(":", "")  # case for Circle of Protection: X

        # Process with waifu2x
        r = requests.post("https://api.deepai.org/api/waifu2x",
                          data={
                              'image': card_obj["image_uris"]["large"],
                          },
                          headers={'api-key': config.TOKEN})
        output_url = r.json()['output_url']
        im = imageio.imread(output_url)

        # Read in filter image
        filterimage = np.copy(imageio.imread("./filterimagenew.png"))

        # Resize filter to shape of input image
        filterimage = resize(filterimage, [im.shape[0], im.shape[1]],
                             anti_aliasing=True,
                             mode="edge")

        # Initialise arrays
        im_filtered = np.zeros(im.shape, dtype=np.complex_)
        im_recon = np.zeros(im.shape, dtype=np.float_)

        # Apply filter to each RGB channel individually
        for i in range(0, 3):
            im_filtered[:, :, i] = np.multiply(fftshift(fft2(im[:, :, i])),
                                               filterimage)
            im_recon[:, :, i] = ifft2(ifftshift(im_filtered[:, :, i])).real

        # Scale between 0 and 255 for uint8
        minval = np.min(im_recon)
        maxval = np.max(im_recon)
        im_recon_sc = (255 * ((im_recon - minval) / (maxval - minval))).astype(
            np.uint8)

        # TODO: pre-m15, post-8ed cards
        # TODO: pre-8ed cards (?)

        # Borderify image
        pad = 57  # Pad image by 1/8th of inch on each edge
        bordertol = 16  # Overfill onto existing border by 16px to remove white corners
        im_padded = np.zeros([im.shape[0] + 2 * pad, im.shape[1] + 2 * pad, 3])

        # Get border colour from left side of image
        bordercolour = np.median(im_recon_sc[200:(im_recon_sc.shape[0] - 200),
                                             0:bordertol],
                                 axis=(0, 1))

        # Pad image
        for i in range(0, 3):
            im_padded[pad:im.shape[0] + pad, pad:im.shape[1] + pad,
                      i] = im_recon_sc[:, :, i]

        # Overfill onto existing border to remove white corners
        # Left
        im_padded[0:im_padded.shape[0], 0:pad + bordertol, :] = bordercolour

        # Right
        im_padded[0:im_padded.shape[0], im_padded.shape[1] -
                  (pad + bordertol):im_padded.shape[1], :] = bordercolour

        # Top
        im_padded[0:pad + bordertol, 0:im_padded.shape[1], :] = bordercolour

        # Bottom
        im_padded[im_padded.shape[0] - (pad + bordertol):im_padded.shape[0],
                  0:im_padded.shape[1], :] = bordercolour

        # Remove copyright line
        if card["frame"] == "2015":
            # Modern frame
            leftPix = 735
            rightPix = 1140
            topPix = 1550
            bottomPix = 1585

            # creatures have a shifted legal line
            try:
                power = card_obj["power"]
                toughness = card_obj["toughness"]
                topPix = 1575
                bottomPix = 1615
                # Creature card
            except KeyError:
                pass

            # planeswalkers have a shifted legal line too
            try:
                loyalty = card_obj["loyalty"]
                topPix = 1575
                bottomPix = 1615
            except KeyError:
                pass

            im_padded[topPix:bottomPix, leftPix:rightPix, :] = bordercolour

        elif card["frame"] == "2003":
            # 8ED frame
            try:
                loyalty = card_obj["loyalty"]
                leftPix = 300
                rightPix = 960
                topPix = 1570
                bottomPix = 1600
                im_padded[topPix:bottomPix, leftPix:rightPix, :] = bordercolour
            except KeyError:
                # TODO: Content aware fill?
                pass

        # Remove holostamp
        if card["frame"] == "2015" and (card["rarity"] == "rare" or card["rarity"] == "mythic") \
                and "/large/front/" in card_obj["image_uris"]["large"]:
            # Need to remove holostamp
            # Define bounds of ellipse to fill with border colour
            leftE = 575
            rightE = 690
            topE = 1520
            bottomE = 1575

            cx = (leftE + rightE) / 2
            cy = (topE + bottomE) / 2

            h = (bottomE - topE) / 2
            w = (rightE - leftE) / 2

            for x in range(leftE, rightE + 1):
                for y in range(topE, bottomE + 1):
                    # determine if point is in the holostamp area
                    if pow(x - cx, 2) / pow(w, 2) + pow(y - cy, 2) / pow(
                            h, 2) <= 1:
                        # point is inside ellipse
                        im_padded[y, x, :] = bordercolour

        # Write image to disk
        imageio.imwrite("formatted/" + name + ".png",
                        im_padded.astype(np.uint8))
Ejemplo n.º 37
0
import numpy as np
import numpy.fft as fft
import pylab as plt
import iris

plt.ion()

FILE_LOC = '/home/markmuetz/mirrors/archer/work/cylc-run/u-ax548/share/data/history/m500_large_dom_no_wind/w072.nc'
w = iris.load(FILE_LOC)[0]

k = fft.fftshift(fft.fftfreq(w.shape[-1], d=0.5))
l = k.copy()
K, L = np.meshgrid(k, l)

fts = fft.fftshift(fft.fft2(w[:, 15].data), axes=(1, 2))
angle = np.angle(fts[0, 257, 256]) - np.angle(fts[:, 257, 256])
fts_trans = fts * np.exp(
    1j * angle[:, None, None] * L[None, :, :] / L[257, 256])
w_trans = fft.ifft2(fft.ifftshift(fts_trans, axes=(1, 2)))

for i in range(24):
    plt.clf()
    plt.imshow(w_trans[i].real)
    plt.pause(0.3)
Ejemplo n.º 38
0
 def space_propagate(self, wave: np.ndarray, dz: float):
     spatial_propagator = np.exp(-1j * self.wave_length * np.pi * dz *
                                 self.q_mgrid**2)
     return ifft2(ifftshift(spatial_propagator) * fft2(wave))
Ejemplo n.º 39
0
def dfi_reconstruction(sinogram,
                       center,
                       angles=None,
                       ratio=1.0,
                       filter_name="hann",
                       pad_rate=0.25,
                       pad_mode="edge",
                       apply_log=True):
    """
    Apply the DFI (direct Fourier inversion) reconstruction method to a
    sinogram image (Ref. [1]). The method is a practical and direct
    implementation of the Fourier slice theorem (Ref. [2]).

    Parameters
    ----------
    sinogram : array_like
        2D array. Sinogram image.
    center : float
        Center of rotation.
    angles : array_like
        1D array. List of angles (in radian) corresponding to the sinogram.
    ratio : float
        To apply a circle mask to the reconstructed image.
    filter_name : {None, "hann", "bartlett", "blackman", "hamming", "nuttall",\\
                  "parzen", "triang"}
        Apply a smoothing filter.
    pad_rate : float
        To apply padding before the FFT. The padding width equals to
        (pad_rate * image_width).
    pad_mode : str
        Padding method. Full list can be found at numpy.pad documentation.
    apply_log : bool
        Apply the logarithm function to the sinogram before reconstruction.

    Returns
    -------
    array_like
        Square array. Reconstructed image.

    References
    ----------
    .. [1] https://doi.org/10.1364/OE.418448
    .. [2] https://doi.org/10.1071/PH560198
    """
    if apply_log is True:
        sinogram = -np.log(sinogram)
    (nrow, ncol) = sinogram.shape
    if ncol % 2 == 0:
        sinogram = np.pad(sinogram, ((0, 0), (0, 1)), mode="edge")
    ncol1 = sinogram.shape[1]
    xshift = (ncol1 - 1) / 2.0 - center
    sinogram = shift(sinogram, (0, xshift), mode='nearest')
    if angles is not None:
        t_ang = np.sum(np.abs(np.diff(angles * 180.0 / np.pi)))
        if abs(t_ang - 360) < 10:
            nrow = nrow // 2 + 1
            sinogram = (sinogram[:nrow] + np.fliplr(sinogram[-nrow:])) / 2
        step = np.mean(np.abs(np.diff(angles)))
        b_ang = angles[0] - (angles[0] // (2 * np.pi)) * (2 * np.pi)
        sino_360 = np.vstack((sinogram[:nrow - 1], np.fliplr(sinogram)))
        sinogram = shift(sino_360, (b_ang / step, 0), mode='wrap')[:nrow]
        if angles[-1] < angles[0]:
            sinogram = np.flipud(np.fliplr(sinogram))
    num_pad = int(pad_rate * ncol1)
    sinogram = np.pad(sinogram, ((0, 0), (num_pad, num_pad)), mode=pad_mode)
    ncol2 = sinogram.shape[1]
    mask = util.make_circle_mask(ncol2, 1.0)
    (r_mat, theta_mat) = generate_mapping_coordinate(ncol2, nrow, ncol2, ncol2)
    sino_fft = fft.fftshift(fft.fft(fft.ifftshift(sinogram, axes=1)), axes=1)
    if filter_name is not None:
        window = make_smoothing_window(filter_name, ncol2)
        sino_fft = sino_fft * np.tile(window, (nrow, 1))
    mat_real = np.real(sino_fft)
    mat_imag = np.imag(sino_fft)
    reg_real = util.mapping(
        mat_real, r_mat, theta_mat, order=5, mode="reflect") * mask
    reg_imag = util.mapping(
        mat_imag, r_mat, theta_mat, order=5, mode="reflect") * mask
    recon = np.real(
        fft.fftshift(fft.ifft2(
            fft.ifftshift(reg_real + 1j * reg_imag))))[num_pad:ncol + num_pad,
                                                       num_pad:ncol + num_pad]
    if ratio is not None:
        if ratio == 0.0:
            ratio = min(center, ncol - center) / (0.5 * ncol)
        mask = util.make_circle_mask(ncol, ratio)
        recon = recon * mask
    return recon
Ejemplo n.º 40
0
def FROG_ifft(Ewsig):
    return fft.ifft(fft.ifftshift(Ewsig, axes=0), axis=0)
Ejemplo n.º 41
0
pl.subplot(1,3,3)
pl.imshow( angle(imgFFT) )         # Phase
pl.title('Phase')

# Most power in images is concentrated around zero frequency
# (which represents the mean). This stands out when the amplitude
# is plotted, frustrating our view of the other frequencies. 
# Log helps visualizing data with many different orders of magnitude.

pl.show()


##### 2.c

img2 = real(ifft2( ifftshift( imgFFT ) ))

pl.figure(2)

pl.subplot( 1, 2, 1)
pl.imshow(img)
pl.subplot( 1, 2, 2)
pl.imshow(img2)

pl.show()

print 'The squared error is: ', ((img - img2)**2).sum()
# Yes, they're the same!


##### 2.d
Ejemplo n.º 42
0
def calculate_grappa_unmixing(source_data, acc_factor, kernel_size=(4,5), data_mask=None, csm=None, regularization_factor=0.001, target_data=None):
    '''Calculates unmixing coefficients for a 2D image using a GRAPPA algorithm

    :param source_data: k-space source data ``[coils, y, x]``
    :param acc_factor: Acceleration factor, e.g. 2
    :param kernel_shape: Shape of the k-space kernel ``(ky-lines, kx-points)`` (default ``(4,5)``)
    :param data_mask: Mask of where calibration data is located in source_data (defaults to all of source_data)
    :param csm: Coil sensitivity map, ``[coil, y, x]`` (used for b1-weighted combining. Will be estimated from calibratino data if not supplied)
    :param regularization_factor: adds tychonov regularization (default ``0.001``)
        - 0 = no regularization
        - set higher for more aggressive regularization.
    :param target_data: If target data differs from source data (defaults to source_data)
    
    :returns unmix: Image unmixing coefficients for a single ``x`` location, ``[coil, y, x]``
    :returns gmap: Noise enhancement map, ``[y, x]``
    '''

    
    ny = source_data.shape[1]
    nc_source = source_data.shape[0]

    
    if target_data is None:
        target_data = source_data
        
    if data_mask is None:
        data_mask = np.ones((ny, nc_source))
        
    nc_target = target_data.shape[0]
        
    if csm is None:
        #Assume calibration data is in the middle         
        f = np.asarray(np.asmatrix(np.hamming(np.max(np.sum(data_mask,0)))).T * np.asmatrix(np.hamming(np.max(np.sum(data_mask,1)))))
        fmask = np.zeros((source_data.shape[1],source_data.shape[2]),dtype=np.complex64)
        idx = np.argwhere(data_mask==1)
        fmask[idx[:,0],idx[:,1]] = f.reshape(idx.shape[0])
        fmask = np.tile(fmask[None,:,:],(nc_source,1,1))
        csm = fftshift(ifftn(ifftshift(source_data * fmask, axes=(1,2)), axes=(1,2)), axes=(1,2))
        (csm,rho) = coils.calculate_csm_walsh(csm)
        
    
    kernel = np.zeros((nc_target,nc_source,kernel_size[0]*acc_factor,kernel_size[1]),dtype=np.complex64)
    sampled_indices = np.nonzero(data_mask)
    kx_cal = (sampled_indices[1][0],sampled_indices[1][-1])
    ky_cal = (sampled_indices[0][0],sampled_indices[0][-1])
    
    for s in range(0,acc_factor):
        kernel_mask = np.zeros((kernel_size[0]*acc_factor, kernel_size[1]),dtype=np.int8)
        kernel_mask[s:kernel_mask.shape[0]:acc_factor,:] = 1
        s_data = source_data[:,ky_cal[0]:ky_cal[1],kx_cal[0]:kx_cal[1]]
        t_data = target_data[:,ky_cal[0]:ky_cal[1],kx_cal[0]:kx_cal[1]]
        k = estimate_convolution_kernel(s_data,kernel_mask,regularization_factor=regularization_factor,target_data=t_data)
        kernel = kernel + k

    #return kernel
    
    kernel = kernel[:,:,::-1,::-1] #flip kernel in preparation for convolution
    
    csm_ss = np.sum(csm * np.conj(csm),0)
    csm_ss = csm_ss + 1.0*(csm_ss < np.spacing(1)).astype('float32')
    
    unmix = np.zeros(source_data.shape,dtype=np.complex64)
    
    for c in range(0,nc_target):
        kernel_pad = _pad_kernel(kernel[c,:,:,:],unmix.shape)
        kernel_pad = fftshift(ifftn(ifftshift(kernel_pad, axes=(1,2)), axes=(1,2)), axes=(1,2))
        kernel_pad *= unmix.shape[1]*unmix.shape[2]
        unmix = unmix + (kernel_pad * np.tile(np.conj(csm[c,:,:]) /csm_ss,(nc_source,1,1)))

    unmix /= acc_factor
    gmap = np.squeeze(np.sqrt(np.sum(abs(unmix) ** 2, 0))) * np.squeeze(np.sqrt(np.sum(abs(csm) ** 2, 0)))
    
    
    return (unmix,gmap)
Ejemplo n.º 43
0
 y = np.linspace(0, int(img.shape[0]), img.shape[0])
 x = np.linspace(0, int(img.shape[1]), img.shape[1])
 xx, yy = np.meshgrid(x, y)
 fft = np.empty([img.shape[0], img.shape[1], img.shape[2]],
                dtype=np.complex64)
 ifft = np.empty([img.shape[0], img.shape[1], img.shape[2]],
                 dtype=np.complex64)
 for c in range(3):
     #phase[:, :, c] = fillPhase(xx, yy, phaseSteps[0][c], phaseSteps[1][c])
     phase[:, :, c] = circPhase(xx, yy, i, j, c)
     #pass
 #plt.imshow(phase/np.amax(phase))
 #plt.show()
 for c in range(3):
     img1 = np.array(img * np.exp(1j * phase))
     fft[:, :, c] = fftshift(fft2(ifftshift(img1[:, :, c])))
     thetaX = np.arctan(
         i * ledSpace / ledSep) * (M * inPixSize /
                                   (wls[c] * 2 * np.pi * mag))
     thetaY = np.arctan(
         j * ledSpace / ledSep) * (M * inPixSize /
                                   (wls[c] * 2 * np.pi * mag))
     fftTrans = h.translate(fft[:, :, c], -thetaX, -thetaY)
     fft[:, :, c] = pupil[:, :, c] * fftTrans
     ifft[:, :, c] = fftshift(ifft2(fftshift(fft[:, :, c])))
 #plt.imshow(np.log(np.abs(fft))[:, :, 0])
 #plt.show()
 #plt.imshow(np.array(np.abs(ifft)*255, dtype=np.int64))
 #plt.show()
 imgName = "./Imgs/img_" + str(i) + "_" + str(j) + "_.jpg"
 cv.imwrite(imgName, np.array(np.abs(ifft) * 255, dtype=np.int64))
Ejemplo n.º 44
0
 def IFFT_t_shift(self, A):
     if PYFFTW_AVAILABLE:
         self.ifft_input[:] = fftshift(A)
         return ifftshift(self.ifft())
     else:
         return ifftshift(scipy.fftpack.fft(fftshift(A)))
Ejemplo n.º 45
0
                                                           T_PRI,
                                                           window=False,
                                                           debug=False)

# # phase correction based on range frequency
# time.count()
# print("phase correction based on range frequency")
# tau_0 = 2*R_t0/c
# for i in range(0, m_prime.size):
#     for k in range(FT_FFT_freq_RangeBin.size):
#         m_p = m_prime[i]
#         F_k = FT_FFT_freq_RangeBin[k]
#         FT_FFT_RangeBin[i, k] = (FT_FFT_RangeBin[i, k]*np.exp(1j*2*pi*F_k*tau_0))**(F_0/(F_0+F_k))*np.exp(-1j*2*pi*F_k*tau_0)

# ifft back to time domain after phase correction and remove zero padding
PhaseCorrected_RangeBin = ifftn(ifftshift(FT_FFT_RangeBin, axes=(1, )),
                                s=(RangeBin[0, :].size, ),
                                axes=(1, ))
# PhaseCorrected_RangeBin = ifftn(ifftshift(FT_FFT_RangeBin, axes=(1,)), axes=(1,))

time.count()
print("Doppler Processing")

# Doppler processing to extract target speed
Doppler = fftshift(
    fftn(PhaseCorrected_RangeBin, axes=(0, )),
    axes=(0, ))  # n dimensional FFT, only FFT over column (over pulses)
DopplerFreq = fftshift(fftfreq(pulse_num, T_PRI))
DopplerVelocity = DopplerFreq * wave_len / 2

time.count()
Ejemplo n.º 46
0
    def Recover(self,
                xi,
                *,
                code='',
                pb=('', 0, 1, 0, 0, 0, 0, 0, 0, 0, 0),
                vis=False,
                error=None,
                err=None):
        ## Initialization ##
        # Hard-code the random seed to make it reproducible #
        np.random.seed(0)

        # Pass xi into the goal image `y`, but pass it through the selection matrix `sig` first #
        y = np.zeros((np.shape(self.sig)[0], np.size(xi)))
        y[0, ...] = xi.reshape(np.size(xi), order="F")
        self.y[:] = np.reshape(self.sig @ y, self.sz, order="F")
        self.y_ = self.y.copy()

        # Reinitialize psi and the slack variables #
        self.Psi[:] = fftw.zeros_aligned(self.sz, dtype='complex64')
        self.S_0[:] = fftw.zeros_aligned(self.sz, dtype='complex64')
        self.S_1[:] = fftw.zeros_aligned(self.sz, dtype='complex64')
        self.S_2[:] = fftw.zeros_aligned(self.sz, dtype='complex64')

        # Localization error #
        if (vis or (error is not None)):
            pts = np.zeros([0, 3])
            if (error is not None):
                tru = OP._LoadMot(code)
                pts = tru[tru[:, :, 3] == pb[1], :]
                temp = []
                temp[:] = pts[:, 0]
                pts[:, 0] = pts[:, 1]  # Swap X & Y #
                pts[:, 1] = temp[:]
                H_A, H_S = _IDFilters(self.sz[1:])
            else:
                tru = OP._LoadTruth(code)
                for p in range(len(tru)):
                    if (pb[1] in tru[p].frm):
                        f = np.nonzero(pb[1] == tru[p].frm)[0]
                        pts = np.concatenate((pts, tru[p].res[f, :]), axis=0)

        ## Iterate Through ADMM ##
        stpwch = time.time()
        timers = np.zeros((USER.REC_ITER))
        for i in range(USER.REC_ITER):
            # Separate the result of this iteration from the previous solution `self.Psi`.  This allows us to build Psi incrementally, modularizing the regularization.
            Psi = fftw.zeros_aligned(self.sz, dtype='complex64')

            # Perform Regularizations #
            Psi = self.Reg_Accuracy(Psi, i)
            Psi = self.Reg_Sparcity(
                Psi, 1)  #np.minimum(np.maximum(2*i/USER.REC_ITER, 1/2), 3/2))
            Psi = self.Reg_Temporal(Psi)

            # Copy in the new result #
            self.Psi[:] = Psi.copy()

            # Alert us if we have an issue! #
            if (np.any(np.isnan(Psi))): raise ValueError("Psi has NaN values!")

            # Visualization #
            if (vis and (np.mod(i, USER.REC_ITER // 20) == 0)):
                self.BT_Psi()
                plt.clf()
                plt.gca(position=[0, 0, 1, 1])
                plt.imshow(np.log10(
                    npf.fftshift(np.sum(np.abs(self.psi), axis=(0, 1)))),
                           cmap='gray')
                plt.plot(pts[:, 0],
                         pts[:, 1],
                         color='r',
                         marker='o',
                         linewidth=0,
                         fillstyle='none')
                plt.clim(-3, 0)
                plt.draw()
                plt.pause(0.1)
            if ((error is not None) and (np.mod(i, 3) == 0) and (i > 60)):
                # Get psi #
                self.BT_Psi()

                # Find where psi is important #
                psi_f = npf.fftshift(np.abs(self.psi), axes=(-2, -1))

                # Identify points in the cloud #
                Psi_f = npf.fftn(psi_f)
                psi_a = np.real_if_close(
                    npf.ifftshift(npf.ifftn(Psi_f * H_A), axes=(-2, -1)))
                psi_s = np.real_if_close(
                    npf.ifftshift(npf.ifftn(Psi_f * H_S), axes=(-2, -1)))
                lhs = psi_a
                rhs = psi_s * (1 + 1) + np.mean(psi_f)
                idx = np.nonzero(lhs > rhs)
                pos = np.array([idx[3], idx[2], idx[1], idx[0] / USER.KER_T]).T
                wgt = np.round(
                    np.sqrt(psi_f**2 + ((psi_a + psi_s) / 2)**2)[idx], 3)

                if (0 < len(wgt) < 10000):
                    # Attempt a triangulation #
                    # Create a point cloud based on the points #
                    pnts = np.concatenate([pos, wgt[:, None]], axis=1)

                    # Segment the point cloud to find clusters #
                    cloud = PointCloud(pnts, seg=True)

                    # Why?? # vvv #
                    # Weight threshold #
                    clust = _CloudThr(cloud.clust)
                    #		# ^^^ #

                    clust = _Separate(clust)

                    if (len(clust) == 0): continue

                    # Evaluate the average minimum error per particle #
                    dist_x = np.zeros([np.shape(pts)[0], len(clust)])
                    dist_y = np.zeros([np.shape(pts)[0], len(clust)])
                    dist_z = np.zeros([np.shape(pts)[0], len(clust)])

                    # Evaluate the distance between each point and all clusters #
                    for c in range(len(clust)):
                        diff = (pts[:, :3] - clust[c].res) * [
                            *USER.RES, USER.DOF[0] / USER.KER_Z
                        ]
                        dist_x[:, c] = np.abs(diff[:, 0])
                        dist_y[:, c] = np.abs(diff[:, 1])
                        dist_z[:, c] = np.abs(diff[:, 2])

                    # Get the minimum error per cluster, average over all particles #
                    error[int(i // 3), pb[1], 0] = np.mean(np.min(dist_x,
                                                                  1))  # X
                    error[int(i // 3), pb[1], 1] = np.mean(np.min(dist_y,
                                                                  1))  # Y
                    error[int(i // 3), pb[1], 2] = np.mean(np.min(dist_z,
                                                                  1))  # Z
            #if((err is not None) and (np.mod(i, 3) == 0)):
            #	err[pb(1),i,:] = ComputeError(xi)

            # Progress Bar #
            timers[i] = time.time() - stpwch
            if (i > 0):
                prefix = '(%s):\t%8.3f sec' % (pb[0], pb[-2] + timers[i])
                #suffix = '(Remain: %5.0f sec)' % (pb[-1])
                suffix = '(Remain: %3.0f:%2.0f:%2.0f)  ' % (pb[-1] // 3600,
                                                            (pb[-1] % 3600) //
                                                            60, pb[-1] % 60)
                if (pb[4] > 1):  # Show Z progress #
                    VIS._ProgressBar(pb[1] + 1,
                                     pb[2],
                                     sub_i=pb[3] + 1,
                                     sub_I=pb[4],
                                     prefix=prefix,
                                     suffix=suffix)
                elif (pb[6] > 1
                      or pb[8] > 1):  # Show chunked iteration progress #
                    i_ = i + 1 + (pb[5] * pb[8] + pb[7]) * USER.REC_ITER
                    I_ = pb[6] * pb[8] * USER.REC_ITER
                    VIS._ProgressBar(pb[1] + 1,
                                     pb[2],
                                     sub_i=i_,
                                     sub_I=I_,
                                     prefix=prefix,
                                     suffix=suffix)
                else:
                    VIS._ProgressBar(pb[1] + 1,
                                     pb[2],
                                     sub_i=i,
                                     sub_I=USER.REC_ITER,
                                     prefix=prefix,
                                     suffix=suffix)
        if (vis):
            plt.ioff()
            plt.show()

        ## Output ##
        self.BT_Psi()
        return np.abs(self.psi), error
def reconstruct(DATA, iterations):
    ## Define Parameters
    # Define Optical Parameters

    wavelength = DATA['wavelength']
    LED_spacing = DATA['LED_spacing']
    matrix_spacing = DATA['matrix_spacing']
    x_offset = DATA['x_offset']
    y_offset = DATA['y_offset']
    NA_obj = DATA['NA_obj']
    px_size = DATA['px_size']
    Images = DATA['Images']

    m_s, n_s = Images[0][0].shape
    array_dimensions = Images.shape
    arraysize = array_dimensions[0]

    ## Define Calculated Parameters
    LED_limit = LED_spacing * (arraysize - 1) / 2
    LED_positions = np.linspace(float(-LED_limit), float(LED_limit),
                                int(2 * (LED_limit / LED_spacing) + 1))
    k = 2 * np.pi / wavelength
    # wavevector magnitude
    # lists of transverse wavevectors
    kx_list = -k * np.sin(
        np.arctan((LED_positions + x_offset) / matrix_spacing))
    ky_list = -k * np.sin(
        np.arctan((LED_positions + y_offset) / matrix_spacing))

    kx_list = kx_list[0]
    ky_list = ky_list[0]
    # Pixel Size and NA calculations
    px_size_synth = px_size / 4

    # calculate subimage size
    # size of sub images in pixels
    m_r = m_s * (px_size / px_size_synth)
    n_r = m_r

    # maximum spatial frequency for sub-image
    kt_max_sub = k * NA_obj

    # maximum spatial frequency for reconstructed image
    # Use synthetic NA plus only the margin needed for subimage oversampling.
    # also take matrix offset into account
    max_offset = np.max(np.abs([x_offset, y_offset]))
    NA_matrix = np.sin(np.arctan((LED_limit + max_offset) / matrix_spacing))
    kt_max_rec = k * (NA_matrix + NA_obj)

    # spatial frequency axes for spectrums of images
    kx_axis_sub = np.linspace(-kt_max_sub[0], kt_max_sub[0], n_s)
    ky_axis_sub = np.linspace(-kt_max_sub[0], kt_max_sub[0], m_s)

    # grid of spatial frequencies for each pixel of reconstructed spectrum
    # same for subimage spectrum
    [kx_g_sub, ky_g_sub] = np.meshgrid(kx_axis_sub, ky_axis_sub)

    ## retrieve phase iteratively

    # initialize object
    dtype = np.complex64
    '''
    objectFT = np.zeros(shape=[int(m_r),int(n_r)]).astype(dtype)
    '''
    img = Images[int(np.floor(arraysize / 2)), int(np.floor(arraysize / 2))]
    objectFTguess = fftshift(fftn(img))
    pad_width = int((m_r - img.shape[0]) / 2)
    objectFT = np.pad(objectFTguess, pad_width=pad_width,
                      mode='constant').astype(dtype)

    # only need to generate one CTF, since it will be applied to the
    # sub-images after they are extracted from the reconstructed image
    # spectrum, and thus will not move around (relative to the sub-image).
    CTF = (kx_g_sub**2 + ky_g_sub**2) < kt_max_rec**2

    # define convergence tolerance:
    for iters in range(iterations):
        print("Iteration: " + str(iters))
        for i in range(arraysize):  # one per row of LEDs
            for j in range(arraysize):  # one per column of LEDs
                kx_center = np.round(
                    (kx_list[j] + kt_max_rec) / 2 / kt_max_rec * (n_r - 1)) + 1
                ky_center = np.round(
                    (ky_list[i] + kt_max_rec) / 2 / kt_max_rec * (m_r - 1)) + 1
                kx_low = np.round(kx_center - (n_s) / 2)
                kx_high = np.round(kx_center + (n_s) / 2)
                ky_low = np.round(ky_center - (m_s) / 2)
                ky_high = np.round(ky_center + (m_s) / 2)
                # extract piece of spectrum
                kx = np.array([np.arange(int(kx_low), int(kx_high))],
                              dtype=int)
                ky = np.array([np.arange(int(ky_low), int(ky_high))],
                              dtype=int)

                pieceFT = objectFT[ky.T, kx]
                #apply CTF and digital correction
                pieceFT_constrained = (m_s / m_r)**2 * np.multiply(
                    pieceFT, CTF)
                # iFFT
                piece = ifftn(ifftshift(pieceFT_constrained))
                # Replace intensity with intensity of sampled Images
                piece_replaced = (m_r / m_s)**2 * np.sqrt(np.abs(
                    Images[i, j])) * np.exp(1j * np.angle(piece))
                # FFT
                piece_replacedFT = fftshift(fftn(piece_replaced)) * CTF
                # place updated intensity back into frequency space object
                #objectFT[ky.T,kx] += piece_replacedFT
                objectFT[ky.T,
                         kx] = piece_replacedFT + pieceFT - pieceFT_constrained
    # compute reconstructed object
    object = ifftn(ifftshift(objectFT))
    return object, objectFT
Ejemplo n.º 48
0
def TEST_LIMITS(img, ker, eps, *, code='', visual=False):
    # Test runs for limitations only - 64x64! #
    ## Initialize ##
    F = np.shape(img)[0]
    Z = np.shape(img)[1]
    Y = np.shape(img)[2]
    X = np.shape(img)[3]

    pos = [None] * F
    wgt = [None] * F
    H_A, H_S = _IDFilters([Z * np.shape(ker)[1], Y, X])

    tru = OP._LoadTruth(code)
    error = np.full([int(USER.REC_ITER // 3), F, 3], np.nan)

    # Progress #
    stpwch = time.time()
    timers = np.zeros((F))
    t_remain = np.nan

    ## Recover Emitter Positions ##
    admm = ADMM(ker)
    for f in np.arange(F):
        psi_f = np.zeros((np.shape(ker)[0], Z * np.shape(ker)[1], Y, X))
        for z in range(Z):
            pb = (code, f, F, z, Z, 0, 1, 0, 1,
                  timers[f - (1 if (f > 0) else 0)], t_remain)
            zrng = [z * USER.KER_Z, (z + 1) * USER.KER_Z]

            # Split the image into each plane #
            img_ = img[f, z, :, :] / eps[f, z, :, :]
            eps_ = eps[f, z, :, :] / np.max(eps)

            # Obtain the point clouds per frame #
            psi_f[:, zrng[0]:zrng[1], ...], error = admm.Recover(img_,
                                                                 code=code,
                                                                 pb=pb,
                                                                 vis=visual,
                                                                 error=error)

        # Identify points in the cloud #
        Psi_f = npf.fftn(psi_f)
        psi_a = np.real_if_close(
            npf.ifftshift(npf.ifftn(Psi_f * H_A), axes=(-2, -1)))
        psi_s = np.real_if_close(
            npf.ifftshift(npf.ifftn(Psi_f * H_S), axes=(-2, -1)))

        # Determine where the smaller blur is bigger than the larger one #
        lhs = psi_a
        rhs = psi_s * (1 + 1 / eps_) + np.mean(psi_f)
        idx = np.nonzero(lhs > rhs)
        pos[f] = np.array([idx[3], idx[2], idx[1], idx[0] / USER.KER_T + f]).T
        wgt[f] = np.round(np.sqrt(psi_f**2 + ((psi_a + psi_s) / 2)**2)[idx], 3)

        # Progress Display #
        timers[f] = time.time() - stpwch
        if (sum(timers > 0) > 1):
            t_remain = (F - (f + 1)) * np.mean(np.diff(timers[timers > 0]))
            prefix = '(%s):\t%8.3f sec' % (code, timers[f])
            suffix = '(Remain: %5.0f sec)' % (t_remain)
            VIS._ProgressBar(f + 1, F, prefix=prefix, suffix=suffix)

    #if(error is not None):
    spi.savemat(OP.FOLD_MAT + code + ' error.mat', {'error': error})
    print(code + ' done!')
Ejemplo n.º 49
0
 def conditional_ifftshift(self, x):
     if global_variables.PRE_FFTSHIFT:
         x[:] = ifftshift(x)
         return x
     else:
         return x
Ejemplo n.º 50
0
def _Recover(img, ker, eps, *, code='', step=1, vis=False):
    ## Initialize ##
    f0 = 0
    F = np.shape(img)[0]
    Z = np.shape(img)[1]
    Y = np.shape(img)[2]
    X = np.shape(img)[3]
    C = np.minimum(X, Y) if ((not USER.REC_CHUNK) or ((X <= 128) and
                                                      (Y <= 128))) else 64

    pos = [None] * F
    wgt = [None] * F
    H_A, H_S = _IDFilters([np.shape(ker)[0], Z * np.shape(ker)[1], Y, X])

    tru = OP._LoadTruth(code)

    # Progress #
    stpwch = time.time()
    timers = np.zeros((F))
    t_remain = np.nan

    # Truth #
    #error = np.full([int(USER.REC_ITER//3), F], np.nan)

    ## Recover Emitter Positions ##
    ker_ = ker[..., (Y - C) // 2:(Y + C) // 2, :][...,
                                                  (X - C) // 2:(X + C) // 2]
    admm = ADMM(ker_)
    for f in np.arange(F, step=step):
        psi_f = np.zeros((np.shape(ker)[0], Z * np.shape(ker)[1], Y, X))
        for z in range(Z):
            zrng = [z * USER.KER_Z, (z + 1) * USER.KER_Z]

            # Split the image into each plane #
            img_ = img[f + f0, z, :, :] / eps[
                f + f0,
                z, :, :]  # << ------------------------------------------------------------ #
            eps_ = eps[f + f0, z, :, :] / np.max(eps)

            # Chunk the image and obtain point clouds per frame #
            img_chunks, xrng, yrng, overlay = _Chunk(img_, C=C)
            M = np.shape(xrng)[0]
            N = np.shape(yrng)[0]
            for m in range(M):
                for n in range(N):
                    pb = (code, f, F, z, Z, m, M, n, N,
                          timers[f -
                                 (1 if (M == 1 and N == 1 and f > 0) else 0)],
                          t_remain)
                    if (np.ptp(img_chunks[n, m, ...]) > 2 * np.std(img_)):
                        psi, _ = admm.Recover(img_chunks[n, m, ...],
                                              code=code,
                                              pb=pb,
                                              vis=False)
                        psi = np.fft.fftshift(psi, axes=(-2, -1))
                        psi_f[:, zrng[0]:zrng[1],
                              ...][...,
                                   yrng[n,
                                        0]:yrng[n,
                                                1], :][...,
                                                       xrng[m,
                                                            0]:xrng[m,
                                                                    1]] += psi
                    timers[f] = time.time() - stpwch
            psi_f[:, zrng[0]:zrng[1], ...] /= np.maximum(overlay, 1)

        # Identify points in the cloud #
        Psi_f = npf.fftn(psi_f)
        psi_a = np.real_if_close(
            npf.ifftshift(npf.ifftn(Psi_f * H_A), axes=(-2, -1)))
        psi_s = np.real_if_close(
            npf.ifftshift(npf.ifftn(Psi_f * H_S), axes=(-2, -1)))

        # Determine where the smaller blur is bigger than the larger one #
        lhs = psi_a
        rhs = psi_s * (1 + 1 / eps_) + np.mean(psi_f) * (
            1 + (USER.KER_T > 1))  #eps_ * np.std(psi_f)/np.mean(psi_f)
        idx = np.nonzero(lhs > rhs)
        pos[f] = np.array([idx[3], idx[2], idx[1], idx[0] / USER.KER_T + f]).T
        wgt[f] = np.round(np.sqrt(psi_f**2 + ((psi_a + psi_s) / 2)**2)[idx], 3)

        # Visualization #
        if (vis):
            plt.figure(figsize=(15, 5))
            ax = plt.axes(position=[0, 0, 1 / 3, 0.9])
            ax.imshow(img_, cmap='gray')
            ax.set_title('Input image #%i/%i' % (f + 1, F + 1))

            ax = plt.axes(position=[1 / 3, 0, 1 / 3, 0.9])
            ax.imshow(np.sum(psi_f, axis=(0, 1)), cmap='gray')
            ax.set_title('Deconvolution')

            ax = plt.axes(position=[2 / 3, 0, 1 / 3, 0.9])
            ax.imshow(img_, cmap='gray')
            if (len(wgt[f]) > 0):
                ax.scatter(pos[f][:, 0],
                           pos[f][:, 1],
                           s=100 * (wgt[f] / np.max(wgt[f])),
                           c='r')
            ax.set_title('Point Cloud')

            if (USER.KER_Z > 1):
                plt.figure(figsize=(6, 6))
                ax = plt.axes(projection='3d',
                              position=[-0.05, -0.07, 1.1, 1.1])
                if (len(wgt[f]) > 0):
                    ax.scatter(pos[f][:, 0],
                               pos[f][:, 1],
                               pos[f][:, 2],
                               s=100 * (wgt[f] / np.max(wgt[f])))
                ax.view_init(azim=30, elev=10)
                ax.set_xlim(0, np.shape(img)[3])
                ax.set_ylim(0, np.shape(img)[2])
                ax.set_zlim(0, USER.KER_Z)
            plt.show()

        # Progress Display #
        timers[f] = time.time() - stpwch
        if (sum(timers > 0) > 1):
            t_remain = (F - (f + 1)) * np.mean(np.diff(timers[timers > 0]))
            prefix = '(%s):\t%8.3f sec' % (code, timers[f])
            #suffix = '(Remain: %5.0f sec)' % (t_remain)
            suffix = '(Remain: %3.0f:%2.0f:%2.0f)' % (t_remain // 3600,
                                                      (t_remain % 3600) // 60,
                                                      t_remain % 60)
            VIS._ProgressBar(f + 1, F, prefix=prefix, suffix=suffix)

    #import scipy.io as spi
    #spi.savemat(OP.FOLD_MAT + code + ' error.mat', {'error': error})

    ## Output ##
    return pos, wgt
Ejemplo n.º 51
0
    def setup_fftw(self, pulse_in, fiber, output_power, raman_plots=False):
        ''' Call immediately before starting Propagate. This function does two
        things:\n
        1) it sets up byte aligned arrays for fftw\n
        2) it fftshifts betas, omegas, and the Raman response so that no further\n
            shifts are required during integration. This saves lots of time.'''

        self.n = pulse_in.NPTS

        if PYFFTW_AVAILABLE:

            self.fft_input = pyfftw.empty_aligned(self.n, dtype='complex128')
            self.fft_output = pyfftw.empty_aligned(self.n, dtype='complex128')
            self.ifft_input = pyfftw.empty_aligned(self.n, dtype='complex128')
            self.ifft_output = pyfftw.empty_aligned(self.n, dtype='complex128')

            self.fft_input_2 = pyfftw.empty_aligned(self.n, dtype='complex128')
            self.fft_output_2 = pyfftw.empty_aligned(self.n,
                                                     dtype='complex128')
            self.ifft_input_2 = pyfftw.empty_aligned(self.n,
                                                     dtype='complex128')
            self.ifft_output_2 = pyfftw.empty_aligned(self.n,
                                                      dtype='complex128')

            self.ifft_input_3 = pyfftw.empty_aligned(self.n,
                                                     dtype='complex128')
            self.ifft_output_3 = pyfftw.empty_aligned(self.n,
                                                      dtype='complex128')

            # To be double sure that there are no problems, also make 2 copies of
            # the FFT objects. This lets us nest ifft_2 around a function using ifft
            # without worrying about potential problems.
            #self.fft    = pyfftw.builders.fft(self.fft_input)
            #self.fft_2  = pyfftw.builders.fft(self.fft_input_2)
            #self.ifft   = pyfftw.builders.fft(self.ifft_input)
            #self.ifft_2 = pyfftw.builders.fft(self.ifft_input_2)
            self.fft = pyfftw.FFTW(self.fft_input,
                                   self.fft_output,
                                   direction='FFTW_BACKWARD')
            self.fft_2 = pyfftw.FFTW(self.fft_input_2,
                                     self.fft_output_2,
                                     direction='FFTW_BACKWARD')

            self.ifft = pyfftw.FFTW(self.ifft_input,
                                    self.ifft_output,
                                    direction='FFTW_FORWARD')
            self.ifft_2 = pyfftw.FFTW(self.ifft_input_2,
                                      self.ifft_output_2,
                                      direction='FFTW_FORWARD')
            self.ifft_3 = pyfftw.FFTW(self.ifft_input_3,
                                      self.ifft_output_3,
                                      direction='FFTW_FORWARD')

        else:
            self.fft_input = np.ndarray((self.n, ), dtype='complex128')
            self.fft_output = np.ndarray((self.n, ), dtype='complex128')
            self.ifft_input = np.ndarray((self.n, ), dtype='complex128')
            self.ifft_output = np.ndarray((self.n, ), dtype='complex128')

            self.fft_input_2 = np.ndarray((self.n, ), dtype='complex128')
            self.fft_output_2 = np.ndarray((self.n, ), dtype='complex128')
            self.ifft_input_2 = np.ndarray((self.n, ), dtype='complex128')
            self.ifft_output_2 = np.ndarray((self.n, ), dtype='complex128')

            # self.fft_input    = pyfftw.empty_aligned(self.n, fft_n, dtype='complex128')
            # self.fft_output   = pyfftw.empty_aligned(self.n, fft_n, dtype='complex128')
            # self.ifft_input   = pyfftw.empty_aligned(self.n, fft_n, dtype='complex128')
            # self.ifft_output  = pyfftw.empty_aligned(self.n, fft_n, dtype='complex128')
            #
            # self.fft_input_2  = pyfftw.empty_aligned(self.n, fft_n, dtype='complex128')
            # self.fft_output_2 = pyfftw.empty_aligned(self.n, fft_n, dtype='complex128')
            # self.ifft_input_2 = pyfftw.empty_aligned(self.n, fft_n, dtype='complex128')
            # self.ifft_output_2= pyfftw.empty_aligned(self.n, fft_n, dtype='complex128')

        self.A_I = np.ndarray((self.n, ), dtype='complex128')

        self.A2 = np.ndarray((self.n, ), dtype='complex128')
        self.exp_D = np.ndarray((self.n, ), dtype='complex128')
        self.k1 = np.ndarray((self.n, ), dtype='complex128')
        self.k2 = np.ndarray((self.n, ), dtype='complex128')
        self.k3 = np.ndarray((self.n, ), dtype='complex128')
        self.k4 = np.ndarray((self.n, ), dtype='complex128')
        self.temp = np.ndarray((self.n, ), dtype='complex128')
        self.Aw = np.ndarray((self.n, ), dtype='complex128')
        self.A2w = np.ndarray((self.n, ), dtype='complex128')
        self.dA = np.ndarray((self.n, ), dtype='complex128')
        self.dA2 = np.ndarray((self.n, ), dtype='complex128')
        self.R_A2 = np.ndarray((self.n, ), dtype='complex128')
        self.dR_A2 = np.ndarray((self.n, ), dtype='complex128')
        self.omegas = np.ndarray((self.n, ), dtype='complex128')
        self.alpha = np.ndarray((self.n, ), dtype='complex128')
        self.betas = np.ndarray((self.n, ), dtype='complex128')
        self.LinearStep_output = np.ndarray((self.n, ), dtype='complex128')
        self.A = np.ndarray((self.n, ), dtype='complex128')
        self.R = np.ndarray((self.n, ), dtype='complex128')
        self.R0 = np.ndarray((self.n, ), dtype='complex128')
        self.Af = np.ndarray((self.n, ), dtype='complex128')
        self.Ac = np.ndarray((self.n, ), dtype='complex128')

        self.A_I[:] = 0.0
        self.A2[:] = 0.0
        self.Af[:] = 0.0
        self.Ac[:] = 0.0
        self.A[:] = 0.0
        self.R[:] = 0.0
        self.R0[:] = 0.0

        self.omegas[:] = pulse_in.V_THz
        self.alpha[:] = -fiber.get_gain(pulse_in, output_power)
        self.gamma = fiber.gamma
        self.w0 = pulse_in.center_frequency_THz * 2.0 * np.pi

        self.last_h = None

        # if not self.disable_Raman:

        self.CalculateRamanResponseFT(pulse_in)

        if raman_plots:
            plt.subplot(221)
            plt.plot(self.omegas / (2 * np.pi),
                     np.abs(self.R - (1 - self.f_R)), 'bo')
            plt.plot(self.omegas / (2 * np.pi),
                     np.abs(self.R0 - (1 - self.f_R0)), 'r')
            #plt.xlim([0,25])
            plt.title('Abs[R(w)]')
            plt.xlabel('THz')
            plt.subplot(222)
            plt.plot(self.omegas / (2 * np.pi),
                     np.unwrap(np.angle(self.R - (1 - self.f_R))), 'bo')
            plt.plot(self.omegas / (2 * np.pi),
                     np.unwrap(np.angle(self.R0 - (1 - self.f_R0))), 'r')
            plt.title('Angle[R(w)]')
            plt.xlabel('THz')
            plt.subplot(223)
            plt.plot(pulse_in.T*1000, ifftshift(np.real(self.IFFT_t(\
                    self.R - (1-self.f_R)))), 'bo')
            plt.plot(pulse_in.T*1000, ifftshift(np.real(self.IFFT_t(\
                    self.R0 - (1-self.f_R0)))), 'r')
            plt.title('Abs[R[t]]')
            plt.xlim([0, 1000])
            plt.xlabel('fs')
            plt.subplot(224)
            plt.plot(self.omegas / (2 * np.pi), abs(self.FFT_t(self.A)))
            plt.title('Abs[A[w]]')
            plt.xlabel('THz')
            plt.show()

        # Load up parameters
        self.A[:] = self.conditional_fftshift(pulse_in.AT)

        self.omegas[:] = self.conditional_fftshift(self.omegas)
        # self.betas[:]   = self.conditional_fftshift(self.betas)
        self.alpha[:] = self.conditional_fftshift(self.alpha)
        self.R[:] = self.conditional_fftshift(self.R)
        self.R0[:] = self.conditional_fftshift(self.R0)
        print('pulse energy in ', np.sum(abs(pulse_in.AT)))
        print('copied as  ', np.sum(abs(self.A)))
Ejemplo n.º 52
0
def fftGS(z, target, estimate=None, iterations=20, error=None, flagRand=True):
    """
    Far field Gerchberg - Saxton Algorithm

    Calculates the phase distribution in a object plane (for a given
    amplitude constrain) to obtain an specific amplitude distribution in
    the target plane.
    It uses the Gerchberg - Saxton algorithm for far-field propagation,
    using a standard FFT.


    **ARGUMENTS:**

            ========== ======================================================
            z          Propagation distance. This is used to calculate the
                       resolution needed in the object plane, for a given
                       target resolution.
            target     :class:`Field` instance whose amplitude distribution
                       is used to represent the amplitude constrain to be
                       applied in the target plane. The phase of this field
                       is not used.
            estimate   :class:`Field` instance used as initial estimate for
                       the problem. The amplitude of this field is taken as
                       the reference amplitude and the phase is obtained. The
                       resolution used to define this field must match the
                       value needed to obtain the required target resolution
                       when the FFT-Fraunhoffer transform is used. If the
                       wrong value is given an exception is raised.
                       If not given, a unitary amplitude wave, with random
                       phase and the correct resolution, is used.
            iterations Maximum number of iterations
            error      Expected error
            ========== ======================================================

            .. note:: target and object must have the same wavelength

    **RETURN VALUE:**
            (holo,err)

            ====  ==========================================================
            holo  Field instance, containing the reference amplitude
                      information and the phase obtained from the iterative
                      algorithm. The holo.res attribute contains the
                      resolution of the calculated hologram for the given
                      propagation distance. The holo.l attribute contains the
                      wavelength used to calculate the hologram.

            err   Final error obtained
            ====  ==========================================================


    """

    if estimate == None:
        if flagRand:
            edata = exp(2.0j * pi * random(target.shape))
        else:
            edata = exp(2.0j * pi * ones(target.shape))
        sx, sy = target.size
        dxe = target.l * z / sx
        dye = target.l * z / sy
        estimate = Field(data=edata, psize=(dxe, dye), l=target.l)

    assert (
        estimate.shape == target.shape
    ), "The estimate field, and the target field, must have the same shape"

    assert (
        target.l == estimate.l
    ), "The wave lengths for the reference beam, and the target must be equal"

    sx, sy = target.size
    dxe = target.l * z / sx
    dye = target.l * z / sy

    dx, dy = estimate.res

    assert (dxe == dx) and (
        dye == dy
    ), "The resolution for the reference beam, and the target must be equal"

    holo = estimate.data
    eabs = estimate.abs()

    # Normalized Target amplitude
    ntarget = target.abs() / target.abs().max()

    for n in range(iterations):

        if n != 0:
            holo = fftshift(fft2(ifftshift(imp)))

        # Keep only the phase in the hologram plane
        holo = exp(1.0j * angle(holo))
        holo = holo * eabs

        # Calculate the new image plane
        imp = ifftshift(ifft2(fftshift(holo)))

        err = (ntarget - abs(imp) / abs(imp).max()).std()

        if error != None and err < error:
            break

        d = exp(1.0j * angle(imp))
        imp = d * target.abs()

    holo = Field(data=holo, psize=(dxe, dye), l=target.l)
    return holo, err
Ejemplo n.º 53
0
def remove_stripe(img, level, wname='db5', sigma=1.5):
    """
	Suppress horizontal stripe in a sinogram using the Fourier-Wavelet based
	method by Munch et al. [2]_.

	Parameters
	----------
	img : 2d array
		The two-dimensional array representig the image or the sinogram to de-stripe.

	level : int
		The highest decomposition level.

	wname : str, optional
		The wavelet type. Default value is ``db5``

	sigma : float, optional
		The damping factor in the Fourier space. Default value is ``1.5``

	Returns
	-------
	out : 2d array
		The resulting filtered image.

	References
	----------
	.. [2] B. Munch, P. Trtik, F. Marone, M. Stampanoni, Stripe and ring artifact removal with
		   combined wavelet-Fourier filtering, Optics Express 17(10):8567-8591, 2009.
	"""

    nrow, ncol = img.shape

    # wavelet decomposition.
    cH = []
    cV = []
    cD = []

    for i in range(0, level):
        img, (cHi, cVi, cDi) = pywt.dwt2(img, wname)
        cH.append(cHi)
        cV.append(cVi)
        cD.append(cDi)

    # FFT transform of horizontal frequency bands
    for i in range(0, level):
        # FFT
        fcV = fftshift(fft(cV[i], axis=0))
        my, mx = fcV.shape

        # damping of vertical stripe information
        yy2 = (np.arange(-np.floor(my / 2), -np.floor(my / 2) + my))**2
        damp = -np.expm1(-yy2 / (2.0 * (sigma**2)))
        fcV = fcV * np.tile(damp.reshape(damp.size, 1), (1, mx))

        #inverse FFT
        cV[i] = np.real(ifft(ifftshift(fcV), axis=0))

    # wavelet reconstruction
    for i in range(level - 1, -1, -1):
        img = img[0:cH[i].shape[0], 0:cH[i].shape[1]]
        img = pywt.idwt2((img, (cH[i], cV[i], cD[i])), wname)

    return img[0:nrow, 0:ncol]
Ejemplo n.º 54
0
 def test_inverse(self):
     for n in [1, 4, 9, 100, 211]:
         x = random((n, ))
         assert_array_almost_equal(ifftshift(fftshift(x)), x)
Ejemplo n.º 55
0
    def updateNoise(self):
        """Updates the noise sample. Does not change any of the noise parameters 
            but choses a new random sample given the previously set parameters.
        """

        if not (self.noiseType in [
                'binary', 'Binary', 'normal', 'Normal', 'uniform', 'Uniform'
        ]):
            if (self.noiseType in [
                    'image', 'Image'
            ]) and (self.imageComponent in ['amplitude', 'Amplitude']):
                self.noiseTex = numpy.random.uniform(0, 1, int(self._size**2))
                self.noiseTex = numpy.reshape(
                    self.noiseTex, (int(self._size), int(self._size)))
                if self.filter in ['Butterworth', 'butterworth']:
                    self.noiseTex = fftshift(self._filter(self.noiseTex))
                elif self.filter in ['Gabor', 'gabor']:
                    self.noiseTex = fftshift(self._gabor(self.noiseTex))
                elif self.filter in ['Isotropic', 'isotropic']:
                    self.noiseTex = fftshift(self._isotropic(self.noiseTex))
                self.noiseTex[0][0] = 0
                In = self.noiseTex * exp(1j * self.noisePh)
                Im = numpy.real(ifft2(In))
            else:
                Ph = numpy.random.uniform(0, 2 * numpy.pi, int(self._size**2))
                Ph = numpy.reshape(Ph, (int(self._size), int(self._size)))
                In = self.noiseTex * exp(1j * Ph)
                Im = numpy.real(ifft2(In))
                Im = ifftshift(Im)
            gsd = filters.getRMScontrast(Im)
            factor = gsd * self.noiseClip
            numpy.clip(Im, -factor, factor, Im)
            self.tex = Im / factor
        elif self.noiseType in ['normal', 'Normal']:
            self.noiseTex = numpy.random.randn(int(
                self._sideLength[1]), int(
                    self._sideLength[0])) / self.noiseClip
        elif self.noiseType in ['uniform', 'Uniform']:
            self.noiseTex = 2.0 * numpy.random.rand(int(
                self._sideLength[1]), int(self._sideLength[0])) - 1.0
        else:
            numpy.random.shuffle(
                self.noiseTex)  # pick random noise sample by shuffleing values
            self.noiseTex = numpy.reshape(
                self.noiseTex,
                (int(self._sideLength[1]), int(self._sideLength[0])))
        if self.noiseType in [
                'binary', 'Binary', 'normal', 'Normal', 'uniform', 'Uniform'
        ]:
            if self.filter in [
                    'butterworth', 'Butterworth', 'Gabor', 'gabor',
                    'Isotropic', 'isotropic'
            ]:
                if self.units == 'pix':
                    if self._size[0] == self._size[1]:
                        baseImage = numpy.array(
                            Image.fromarray(self.noiseTex).resize(
                                (int(self._size[0]), int(self._size[1])),
                                Image.NEAREST))
                    else:
                        msg = (
                            'NoiseStim can only apply filters to square noise images'
                        )
                        raise ValueError(msg)
                else:
                    baseImage = numpy.array(
                        Image.fromarray(self.noiseTex).resize(
                            (int(self._size), int(self._size)), Image.NEAREST))
                baseImage = numpy.array(baseImage).astype(
                    numpy.float32) * 0.0078431372549019607 - 1.0
                FT = fft2(baseImage)
                spectrum = numpy.absolute(fftshift(FT))
                angle = numpy.angle(FT)
                if self.filter in ['butterworth', 'Butterworth']:
                    spectrum = fftshift(self._filter(spectrum))
                elif self.filter in ['isotropic', 'Isotropic']:
                    spectrum = fftshift(self._isotropic(spectrum))
                elif self.filter in ['gabor', 'Gabor']:
                    spectrum = fftshift(self._gabor(spectrum))
                spectrum[0][0] = 0  # set DC to zero
                FT = spectrum * exp(1j * angle)

                Im = numpy.real(ifft2(FT))
                gsd = filters.getRMScontrast(Im)
                factor = gsd * self.noiseClip
                numpy.clip(Im, -factor, factor, Im)
                self.tex = Im / factor
            else:
                if not (self.noiseType in ['image', 'Image']):
                    self.tex = self.noiseTex
Ejemplo n.º 56
0
def INFFT(input):
    return fft.fftshift(fft.ifft2(fft.ifftshift(input)))
Ejemplo n.º 57
0
def double_wedge_filter(sinogram,
                        center=0,
                        sino_type="180",
                        iteration=5,
                        mask=None,
                        ratio=1.0,
                        pad=250):
    """
    Apply double-wedge filter to a sinogram image (Ref. [1]_).

    Parameters
    ----------
    sinogram : array_like
        2D array. 180-degree sinogram or 360-degree sinogram.
    center : float, optional
        Center-of-rotation. No need for a 360-sinogram.
    sino_type : {"180", "360"}
        Sinogram type : 180-degree or 360-degree.
    iteration : int
        Number of iteration.
    mask : array_like, optional
        Double-wedge binary mask.
    ratio : float, optional
        Define the cut-off angle of the double-wedge filter.
    pad : int
        Padding width.

    Returns
    -------
    array_like
        2D array. Filtered sinogram.

    References
    ----------
    .. [1] https://doi.org/10.1364/OE.418448
    """
    if not (sino_type == "180" or sino_type == "360"):
        raise ValueError("!!! Use only one of two options: '180' or '360'!!!")
    if sino_type == "180":
        nrow0 = sinogram.shape[0]
        if center == 0:
            raise ValueError(
                "Please provide the location of the rotation axis")
        sinogram = conv.convert_sinogram_180_to_360(sinogram, center)
    (nrow, ncol) = sinogram.shape
    ncol_pad = ncol + 2 * pad
    if mask is None:
        mask = make_double_wedge_mask(nrow, ncol_pad, ratio * ncol / 2.0)
    else:
        if mask.shape != (nrow, ncol_pad):
            raise ValueError(
                "Shape of the left-right padded sinogram {0} and the mask "
                "{1} is not the same!!!".format((nrow, ncol_pad), mask.shape))
    sino_filt = np.copy(sinogram)
    for i in range(iteration):
        sino_filt = np.pad(sino_filt, ((0, 0), (pad, pad)), mode="edge")
        sino_filt = np.real(
            fft.ifft2(fft.ifftshift(fft.fftshift(fft.fft2(sino_filt)) * mask)))
        sino_filt = sino_filt[:, pad:ncol + pad]
    if sino_type == "180":
        sino_filt = sino_filt[:nrow0]
    return sino_filt
# evanescent_idx = np.isnan(kz)
ATF0[cut_idx] = 0  # exclude evanescent k

PSF3D = np.zeros(((Nz, Npixels - 1, Npixels - 1)))

for idx, z in enumerate(zs):

    angular_spectrum_propagator = np.exp(1.j * 2 * np.pi * kz * z)

    ATF = ATF0 * angular_spectrum_propagator

    evanescent_idx = (k_rho > k_cut_off)
    ATF[evanescent_idx] = 0

    ASF = ifftshift(ifft2(ATF))  #* k**2/f**2 # Amplitude Spread Function
    ASF = ASF[1:, 1:]

    PSF = np.abs(ASF)**2  # Point Spread Function

    PSF3D[idx, :, :] = PSF

print('The numerical aperture of the system is:', NA)
print('The transverse resolution is:', DeltaXY, 'um')
print('The axial resolution is:', DeltaZ, 'um')
print('The pixel size is:', dr, 'um')
print('The voxel depth is:', dz, 'um')
print(f'The displacement z from the focus is: {displacementZ} um')
print(f'The displacement y from the optical axis is: {displacementY} um')

# %% figure 1
Ejemplo n.º 59
0
def gs_gpu(idata, itera=100):
    """Gerchberg-Saxton algorithm to calculate DOEs using the GPU
    
    Calculates the phase distribution in a object plane to obtain an 
    specific amplitude distribution in the target plane. It uses a 
    FFT to calculate the field propagation.
    The wavefront at the DOE plane is assumed as a plane wave.
    
    **ARGUMENTS:**
    
        ========== ======================================================
        idata      numpy array containing the target amplitude distribution 
        itera      Maximum number of iterations
        ========== ======================================================
    """

    pl = cl.get_platforms()[0]
    devices = pl.get_devices(device_type=cl.device_type.GPU)
    ctx = cl.Context(devices=[devices[0]])
    queue = cl.CommandQueue(ctx)

    plan = Plan(idata.shape, queue=queue,
                dtype=complex128)  #no funciona con "complex128"

    src = str(
        Template(KERNEL).render(
            double_support=all(has_double_support(dev) for dev in devices),
            amd_double_support=all(
                has_amd_double_support(dev) for dev in devices)))
    prg = cl.Program(ctx, src).build()

    idata_gpu = cl_array.to_device(queue,
                                   ifftshift(idata).astype("complex128"))
    fdata_gpu = cl_array.empty_like(idata_gpu)
    rdata_gpu = cl_array.empty_like(idata_gpu)
    plan.execute(idata_gpu.data, fdata_gpu.data)

    e = 1000
    ea = 1000
    for i in range(itera):
        prg.norm(queue, fdata_gpu.shape, None, fdata_gpu.data)
        plan.execute(fdata_gpu.data, rdata_gpu.data, inverse=True)
        tr = rdata_gpu.get()
        rdata = ifftshift(tr)

        #TODO: This calculation should be done in the GPU
        e = (abs(rdata) - idata).std()
        if e > ea:
            break
        ea = e

        prg.norm2(queue, rdata_gpu.shape, None, rdata_gpu.data, idata_gpu.data)

        plan.execute(rdata_gpu.data, fdata_gpu.data)

    fdata = fdata_gpu.get()

    #~ prg.norm(queue, fdata_gpu.shape, None,fdata_gpu.data)
    fdata = ifftshift(fdata)
    fdata = exp(1.j * angle(fdata))

    #~ fdata=fdata_gpu.get()
    return fdata
Ejemplo n.º 60
0
 def lens_propagate(self, wave_in, cs_mm, defocus, aperture):
     h = mtf(self.wave_length, cs_mm, defocus)(self.q_mgrid)
     aper = np.where(self.q_mgrid < aperture / self.wave_length, 1., 0.)
     h *= aper
     return ifft2(ifftshift(h) * fft2(wave_in))