def fft_correlate_strided_images(image_a, image_b):
    """FFT based cross correlation
    of two images with multiple views of np.stride_tricks()

    The 2D FFT should be applied to the last two axes (-2,-1) and the
    zero axis is the number of the interrogation window

    This should also work out of the box for rectangular windows.

    Parameters
    ----------
    image_a : 3d np.ndarray, first dimension is the number of windows,
        and two last dimensions are interrogation windows of the first image
    image_b : similar
    """
    s1 = np.array(image_a.shape[-2:])
    s2 = np.array(image_b.shape[-2:])
    size = s1 + s2 - 1
    fsize = 2**np.ceil(np.log2(size)).astype(int)
    fslice = tuple([slice(0, image_a.shape[0])] +
                   [slice(0, int(sz)) for sz in size])
    f2a = rfft2(image_a, fsize, axes=(-2, -1))
    f2b = rfft2(image_b[:, ::-1, ::-1], fsize, axes=(-2, -1))
    corr = irfft2(f2a * f2b, axes=(-2, -1)).real[fslice]
    return corr
Exemple #2
0
def _BandPassFilter(image, len_noise, len_object):
    """
    bandpass filter implementation.
    Source: http://physics-server.uoregon.edu/~raghu/particle_tracking.html
    """
    b = len_noise
    w = round(len_object)
    N = 2 * w + 1

    # Gaussian Convolution Kernel
    sm = numpy.arange(0, N, dtype=numpy.float)
    r = (sm - w) / (2 * b)
    gx = numpy.power(math.e, -r ** 2) / (2 * b * math.sqrt(math.pi))
    gx = numpy.reshape(gx, (gx.shape[0], 1))
    gy = gx.conj().transpose()

    # Boxcar average kernel, background
    bx = numpy.zeros((1, N), numpy.float) + 1 / N
    by = bx.conj().transpose()

    # Convolution with the matrix and kernels
    gxy = gx * gy
    bxy = bx * by
    kernel = fft.rfft2(gxy - bxy, image.shape)

    res = fft.irfft2(fft.rfft2(image) * kernel)
    arr_out = numpy.zeros((image.shape))
    arr_out[w:-w, w:-w] = res[2 * w:, 2 * w:]
    res = numpy.maximum(arr_out, 0)
    return res
def rl_deconv_all(img_list, psf_list, iterations=10, lbd=0.2):
    """
    Spatially-Variant Richardson-lucy deconvolution with Total Variation regularization
    """
    min_value = []
    for img_idx, img in enumerate(img_list):
        img_list[img_idx] = np.pad(img_list[img_idx],
                                   np.max(psf_list[0].shape),
                                   mode='reflect')
        min_value.append(np.min(img))
        img_list[img_idx] = img_list[img_idx] - np.min(img)
    size = np.array(np.array(img_list[0].shape) +
                    np.array(psf_list[0].shape)) - 1
    fsize = [fftpack.helper.next_fast_len(int(d)) for d in size]
    fslice = tuple([slice(0, int(sz)) for sz in size])

    latent_estimate = img_list.copy()
    error_estimate = img_list.copy()

    psf_f = []
    psf_flipped_f = []
    for img_idx, img in enumerate(latent_estimate):
        psf_f.append(rfft2(psf_list[img_idx], fsize))
        _psf_flipped = np.flip(psf_list[img_idx], axis=0)
        _psf_flipped = np.flip(_psf_flipped, axis=1)
        psf_flipped_f.append(rfft2(_psf_flipped, fsize))

    for i in range(iterations):
        log.info('RL TV Iter {}/{}, lbd = {}'.format(i, iterations, lbd))
        regularization = np.ones(img_list[0].shape)

        for img_idx, img in enumerate(latent_estimate):
            estimate_convolved = irfft2(
                np.multiply(psf_f[img_idx],
                            rfft2(latent_estimate[img_idx],
                                  fsize)))[fslice].real
            estimate_convolved = _centered(estimate_convolved, img.shape)
            relative_blur = div0(img_list[img_idx], estimate_convolved)
            error_estimate[img_idx] = irfft2(
                np.multiply(psf_flipped_f[img_idx],
                            rfft2(relative_blur, fsize)), fsize)[fslice].real
            error_estimate[img_idx] = _centered(error_estimate[img_idx],
                                                img.shape)
            regularization += 1.0 - (lbd * divergence(
                latent_estimate[img_idx] /
                np.linalg.norm(latent_estimate[img_idx], ord=1)))
            latent_estimate[img_idx] = np.multiply(latent_estimate[img_idx],
                                                   error_estimate[img_idx])

        for img_idx, img in enumerate(img_list):
            latent_estimate[img_idx] = np.divide(
                latent_estimate[img_idx],
                regularization / float(len(img_list)))

    for img_idx, img in enumerate(latent_estimate):
        latent_estimate[img_idx] += min_value[img_idx]
        latent_estimate[img_idx] = unpad(latent_estimate[img_idx],
                                         np.max(psf_list[0].shape))

    return np.sum(latent_estimate, axis=0)
Exemple #4
0
def _BandPassFilter(image, len_noise, len_object):
    """
    bandpass filter implementation. 
    Source: http://physics-server.uoregon.edu/~raghu/particle_tracking.html
    """
    b = len_noise
    w = round(len_object)
    N = 2 * w + 1

    # Gaussian Convolution Kernel
    sm = numpy.arange(0, N, dtype=numpy.float)
    r = (sm - w) / (2 * b)
    gx = numpy.power(math.e, -r**2) / (2 * b * math.sqrt(math.pi))
    gx = numpy.reshape(gx, (gx.shape[0], 1))
    gy = gx.conj().transpose()

    # Boxcar average kernel, background
    bx = numpy.zeros((1, N), numpy.float) + 1 / N
    by = bx.conj().transpose()

    # Convolution with the matrix and kernels
    gxy = gx * gy
    bxy = bx * by
    kernel = fft.rfft2(gxy - bxy, image.shape)

    res = fft.irfft2(fft.rfft2(image) * kernel)
    arr_out = numpy.zeros((image.shape))
    arr_out[w:-w, w:-w] = res[2 * w:, 2 * w:]
    res = numpy.maximum(arr_out, 0)
    return res
Exemple #5
0
def convolution(bin_template, bin_image, tollerance=0.5):
    expected = numpy.count_nonzero(bin_template)
    ih, iw = bin_image.shape
    th, tw = bin_template.shape

    # Padd image to even dimensions
    if ih % 2 or iw % 2:
        if ih % 2:
            ih += 1
        if iw % 2:
            iw += 1
        bin_image = pad_bin_image_to_shape(bin_image, (ih, iw))
    if expected == 0:
        return []

    # Calculate the convolution of the FFT's of the image & template
    convolution_freqs = rfft2(bin_image) * rfft2(bin_template[::-1, ::-1],
                                                 bin_image.shape)
    # Reverse the FFT to find the result image
    convolution_image = irfft2(convolution_freqs)
    # At this point, the maximum point in convolution_image should be the
    # bottom right (why?) of the area of greatest match

    # The areas in the result image within expected +- tollerance are where we
    # saw matches
    found_bitmap = ((convolution_image > (expected - tollerance)) &
                    (convolution_image < (expected + tollerance)))

    match_points = numpy.transpose(numpy.nonzero(found_bitmap))  # bottom right

    # Find the top left point from the template (remember match_point is
    # inside the template (hence -1)
    return [((fx - (tw - 1)), (fy - (th - 1))) for (fy, fx) in match_points]
Exemple #6
0
def fft_convolve(in1, in2, times):
    def _centered(arr, newsize):
        # Return the center newsize portion of the array.
        currsize = np.array(arr.shape)
        startind = (currsize - newsize) // 2
        endind = startind + newsize
        myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
        return arr[tuple(myslice)]

    if times == 0:
        return in1.copy()

    s1 = np.array(in1.shape)
    s2 = np.array(in2.shape)
    shape = s1 + (s2 - 1) * times

    # Speed up FFT by padding to optimal size for FFTPACK
    fshape = [next_fast_len(int(d)) for d in shape]
    fslice = tuple([slice(0, int(sz)) for sz in shape])

    resfft = fast_power(rfft2(in2, fshape), times)
    resfft = resfft * rfft2(in1, fshape)
    ret = irfft2(resfft, fshape)[fslice].copy()
    ret = ret.real

    return _centered(ret, s1)
Exemple #7
0
def indicator_function(delta, N):
    # Create grid
    N = 2 * N
    L = 1 + 10 * delta / np.sqrt(12)
    dxy = 2 * L / N
    xy = np.linspace(-L + dxy / 2, L - dxy / 2, N)

    # Create mesh and radial direction
    X, Y = np.meshgrid(xy, xy)
    r = np.sqrt(X**2 + Y**2)

    # Unfiltered indicator function (noramlized to integrate to unity)
    H = np.zeros((N, N))
    H[r < 1] = 1 / np.pi
    H = H / np.sum(H * dxy**2)

    # Gaussian filter (normalized to integrate to unity)
    G = 6 / (np.pi * delta**2) * np.exp(-6 * r**2 / delta**2)

    # Filtered indicator function doing convolution as multiplication in
    # spectral space
    I = fft.irfft2(fft.rfft2(H) * fft.rfft2(G) * dxy**2)

    # This is only really a function or r, so just return as a 1-D array
    # Notice the shift to the corners after doing the FFT
    r = xy[0:N // 2] + L
    I = I[0, 0:N // 2]

    return r, I
Exemple #8
0
def convolve_dcr_image(flux_arr, x_loc, y_loc, bandpass=None, x_size=None, y_size=None, seed=None,
                       psf=None, pad_image=1.5, pixel_scale=None, kernel_radius=None,
                       oversample_image=1, photon_noise=False, sky_noise=0.0, verbose=True, **kwargs):
    """Wrapper to call fast_dft with multiple DCR planes."""
    x_size_use = int(x_size * pad_image)
    y_size_use = int(y_size * pad_image)
    oversample_image = int(oversample_image)
    pixel_scale_use = pixel_scale / oversample_image
    x0 = oversample_image * ((x_size_use - x_size) // 2)
    x1 = x0 + x_size * oversample_image
    y0 = oversample_image * ((y_size_use - y_size) // 2)
    y1 = y0 + y_size * oversample_image
    x_loc_use = x_loc * oversample_image + x0
    y_loc_use = y_loc * oversample_image + y0
    x_size_use *= oversample_image
    y_size_use *= oversample_image
    timing_model = -time.time()
    source_image = fast_dft(flux_arr, x_loc_use, y_loc_use, x_size=x_size_use, y_size=y_size_use,
                            kernel_radius=kernel_radius, **kwargs)
    timing_model += time.time()
    n_star = len(x_loc)
    if oversample_image > 1:
        bright_star = "bright "
    else:
        bright_star = ""
    if verbose:
        if n_star == 1:
            print("Time to model %i %sstar: [%0.3fs]"
                  % (n_star, bright_star, timing_model))
        else:
            print("Time to model %i %sstars: [%0.3fs | %0.5fs per star]"
                  % (n_star, bright_star, timing_model, timing_model / n_star))
    rand_gen = np.random
    if seed is not None:
        rand_gen.seed(seed - 1)
    # The images are purely real, so we can save time by using the real FFT,
    # which uses only half of the complex plane
    convol = np.zeros((y_size_use, x_size_use // 2 + 1), dtype='complex64')
    dcr_gen = dcr_generator(bandpass, pixel_scale=pixel_scale_use, **kwargs)
    timing_fft = -time.time()

    for _i, offset in enumerate(dcr_gen):
        source_image_use = source_image[_i]

        psf_image = psf.drawImage(scale=pixel_scale_use, method='fft', offset=offset,
                                  nx=x_size_use, ny=y_size_use, use_true_center=False)
        if photon_noise:
            base_noise = np.random.normal(scale=1.0, size=(y_size_use, x_size_use))
            base_noise *= np.sqrt(np.abs(source_image_use) / photons_per_adu)
            source_image_use += base_noise
        if sky_noise > 0:
            source_image_use += (rand_gen.normal(scale=sky_noise, size=(y_size_use, x_size_use))
                                 / np.sqrt(bandpass_nstep(bandpass)))
        convol += rfft2(source_image_use) * rfft2(psf_image.array)
    return_image = np.real(fftshift(irfft2(convol)))
    timing_fft += time.time()
    if verbose:
        print("FFT timing for %i DCR planes: [%0.3fs | %0.3fs per plane]"
              % (_i, timing_fft, timing_fft / _i))
    return(return_image[y0:y1:oversample_image, x0:x1:oversample_image] * oversample_image**2)
Exemple #9
0
def matches_exist(template, image, tolerance=1):
    # just taken from convolution def
    expected = numpy.count_nonzero(template)
    ih, iw = image.shape
    th, tw = template.shape

    # Pad image to even dimensions
    if ih % 2 or iw % 2:
        if ih % 2:
            ih += 1
        if iw % 2:
            iw += 1
        bin_image = pad_bin_image_to_shape(image, (ih, iw))
    if expected == 0:
        return []

    # Calculate the convolution of the FFT's of the image & template
    convolution_freqs = rfft2(image) * rfft2(template[::-1, ::-1],
                                                 image.shape)
    convolution_image = irfft2(convolution_freqs)
    found_bitmap = convolution_image > (expected - tolerance)
    if True in found_bitmap:
        return True
    else:
        return False
Exemple #10
0
 def getDisplacements2D(self, Z=None, window=False):
     """
     Use phase correlation to find the relative displacement between
     each time step
     """
     if Z is None:
         Z = self.getNbPixelsPerFrame()/self.getNbPixelsPerSlice()/2
     shape = np.asarray(self.get2DShape())
     if window:
         ham = np.hamming(shape[1])*np.atleast_2d(np.hamming(shape[0])).T
     else:
         ham = 1.0
     displs = np.zeros((self.getNbFrames(),2))
     a = rfft2(self.get2DSlice(T=0, Z=Z)*ham)
     for t in range(1,self.getNbFrames()):
         b = rfft2(self.get2DSlice(T=t, Z=Z)*ham)
         #calculate the normalized cross-power spectrum
         #R = numexpr.evaluate(
         #    'a*complex(real(b), -imag(b)/abs(a*complex(real(b), -imag(b))))'
         #    )
         R = a*b.conj()
         Ra = np.abs(a*b.conj())
         R[Ra>0] /= Ra[Ra>0]
         r = irfft2(R)
         #Get the periodic position of the peak
         l = r.argmax()
         displs[t] = np.unravel_index(l, r.shape)
         #prepare next step
         a = b
     return np.where(displs<shape/2, displs, displs-shape)
Exemple #11
0
def linespectra (arr, freqs, sigma=4, channelWidth=20, kms=False, source_speed=0): #nb sigma is given in px (can be fractional)
    """arr should be an array of shape (x,:,>pix,>pix)
freqs an array or list of nums of length x"""
    shifts=[int(round((freqs[-1]-freqs[i])*299792458/(channelWidth*freqs[-1]))) for i in xrange(len(freqs))]
#    print shifts
    x=[[] for _ in xrange(arr.shape[0])]
    mid=arr.shape[2]/2.0-0.5

    gauss_mask=garray(arr.shape[-2:],sigma)
    s=[y*2 for y in gauss_mask.shape]
    ftg=rfft2(gauss_mask, s)

    for i in xrange(len(x)):
        for j in xrange(arr.shape[1]):
            convolved=irfft2(rfft2(arr[i,j,:,:],s)*ftg)
            x[i].append(convolved[s[0]/2,s[1]/2])

    padding=abs(max(shifts))
    padded=[0 for _ in xrange(arr.shape[1]+padding*2+2)]
    for i in xrange(len(x[0])):
        for j in xrange(len(x)):
            try:
                padded[i+shifts[j]+padding]+=x[j][i]
            except IndexError : 
                print j,i,len(x),len(x[j])
                None
    if kms: return [((i-150)*20/1000.0,x) for i,x in enumerate(padded)]
    else:   return [((i-150)*20,x)        for i,x in enumerate(padded)]
Exemple #12
0
def dnfsom_activity(n, m, stimulus, W, tau, T, alpha):
    p = 2 * n + 1
    dt = 35.0 / float(T)
    V = np.random.random((n, n)) * .01
    U = np.random.random((n, n)) * .01

    We = alpha * 1.50 * 960.0 / (n * n) * gaussian((p, p), (0.1, 0.1))
    Wi = alpha * 0.75 * 960.0 / (n * n) * gaussian((p, p), (1.0, 1.0))
    sigma_c = 2.10
    G = gaussian((n, n), (sigma_c, sigma_c))

    V_shape, We_shape, Wi_shape = np.array(V.shape), np.array(
        We.shape), np.array(Wi.shape)
    shape = np.array(best_fft_shape(V_shape + We_shape // 2))

    We_fft = rfft2(We[::-1, ::-1], shape)
    Wi_fft = rfft2(Wi[::-1, ::-1], shape)

    i0 = We.shape[0] // 2
    i1 = i0 + V_shape[0]
    j0 = We.shape[1] // 2
    j1 = j0 + V_shape[1]

    D = ((np.abs(W - stimulus)).sum(axis=-1)) / float(m * m)
    I = (1.0 - D.reshape(n, n)) * G * alpha

    for i in range(T):
        Z = rfft2(V, shape)
        Le = irfft2(Z * We_fft, shape).real[i0:i1, j0:j1]
        Li = irfft2(Z * Wi_fft, shape).real[i0:i1, j0:j1]
        U += (-U + (Le - Li) + I) * 1.0 / tau * dt
        V = np.maximum(U, 0)
    return V
Exemple #13
0
 def getDisplacements2D(self, Z=None, window=False):
     """
     Use phase correlation to find the relative displacement between
     each time step
     """
     if Z is None:
         Z = self.getNbPixelsPerFrame() / self.getNbPixelsPerSlice() / 2
     shape = np.asarray(self.get2DShape())
     if window:
         ham = np.hamming(shape[0]) * np.atleast_2d(np.hamming(shape[1])).T
     else:
         ham = 1.0
     displs = np.zeros((self.getNbFrames(), 2))
     a = rfft2(self.get2DSlice(T=0, Z=Z) * ham)
     for t in range(1, self.getNbFrames()):
         b = rfft2(self.get2DSlice(T=t, Z=Z) * ham)
         #calculate the normalized cross-power spectrum
         #R = numexpr.evaluate(
         #    'a*complex(real(b), -imag(b)/abs(a*complex(real(b), -imag(b))))'
         #    )
         R = a * b.conj()
         Ra = np.abs(a * b.conj())
         R[Ra > 0] /= Ra[Ra > 0]
         r = irfft2(R)
         #Get the periodic position of the peak
         l = r.argmax()
         displs[t] = np.unravel_index(l, r.shape)
         #prepare next step
         a = b
     return np.where(displs < shape[::-1] / 2, displs, displs - shape[::-1])
Exemple #14
0
def convolution(bin_template, bin_image, tollerance=0.5):
    expected = numpy.count_nonzero(bin_template)
    ih, iw = bin_image.shape
    th, tw = bin_template.shape

    # Padd image to even dimensions
    if ih % 2 or iw % 2:
        if ih % 2:
            ih += 1
        if iw % 2:
            iw += 1
        bin_image = pad_bin_image_to_shape(bin_image, (ih, iw))
    if expected == 0:
        return []

    # Calculate the convolution of the FFT's of the image & template
    convolution_freqs = rfft2(bin_image) * rfft2(bin_template[::-1, ::-1],
                                                 bin_image.shape)
    # Reverse the FFT to find the result image
    convolution_image = irfft2(convolution_freqs)
    # At this point, the maximum point in convolution_image should be the
    # bottom right (why?) of the area of greatest match

    # The areas in the result image within expected +- tollerance are where we
    # saw matches
    found_bitmap = ((convolution_image >
                     (expected - tollerance)) & (convolution_image <
                                                 (expected + tollerance)))

    match_points = numpy.transpose(numpy.nonzero(found_bitmap))  # bottom right

    # Find the top left point from the template (remember match_point is
    # inside the template (hence -1)
    return [((fx - (tw - 1)), (fy - (th - 1))) for (fy, fx) in match_points]
Exemple #15
0
def correlate_windows(window_a, window_b, corr_method='fft', nfftx=0, nffty=0):
    """Compute correlation function between two interrogation windows.

    The correlation function can be computed by using the correlation
    theorem to speed up the computation.

    Parameters
    ----------
    window_a : 2d np.ndarray
        a two dimensions array for the first interrogation window, 

    window_b : 2d np.ndarray
        a two dimensions array for the second interrogation window.

    corr_method   : string
        one of the two methods currently implemented: 'fft' or 'direct'.
        Default is 'fft', which is much faster.

    nfftx   : int
        the size of the 2D FFT in x-direction,
        [default: 2 x windows_a.shape[0] is recommended].

    nffty   : int
        the size of the 2D FFT in y-direction,
        [default: 2 x windows_a.shape[1] is recommended].


    Returns
    -------
    corr : 2d np.ndarray
        a two dimensions array for the correlation function.
    
    Note that due to the wish to use 2^N windows for faster FFT
    we use a slightly different convention for the size of the 
    correlation map. The theory says it is M+N-1, and the 
    'direct' method gets this size out
    the FFT-based method returns M+N size out, where M is the window_size
    and N is the search_area_size
    It leads to inconsistency of the output 
    """
    
    if corr_method == 'fft':
        window_b = np.conj(window_b[::-1, ::-1])
        if nfftx == 0:
            nfftx = nextpower2(window_b.shape[0] + window_a.shape[0])  
        if nffty == 0:
            nffty = nextpower2(window_b.shape[1] + window_a.shape[1]) 
        
        f2a = rfft2(normalize_intensity(window_a), s=(nfftx, nffty))
        f2b = rfft2(normalize_intensity(window_b), s=(nfftx, nffty))
        corr = irfft2(f2a * f2b).real
        corr = corr[:window_a.shape[0] + window_b.shape[0] - 1,
                    :window_b.shape[1] + window_a.shape[1] - 1]
        return corr
    elif corr_method == 'direct':
        return convolve2d(normalize_intensity(window_a),
                          normalize_intensity(window_b[::-1, ::-1]), 'full')

    else:
        raise ValueError('method is not implemented')
Exemple #16
0
def correlation_func(cor_win_1,
                     cor_win_2,
                     window_size,
                     correlation_method='circular'):
    '''This function is doing the cross-correlation. Right now circular cross-correlation
    That means no zero-padding is done
    the .real is to cut off possible imaginary parts that remains due to finite numerical accuarcy
     '''
    if correlation_method == 'linear':
        cor_win_1 = cor_win_1 - cor_win_1.mean(axis=(1, 2)).reshape(
            cor_win_1.shape[0], 1, 1)
        cor_win_2 = cor_win_2 - cor_win_2.mean(axis=(1, 2)).reshape(
            cor_win_1.shape[0], 1, 1)
        cor_win_1[cor_win_1 < 0] = 0
        cor_win_2[cor_win_2 < 0] = 0

        corr = fftshift(irfft2(
            np.conj(rfft2(cor_win_1, s=(2 * window_size, 2 * window_size))) *
            rfft2(cor_win_2, s=(2 * window_size, 2 * window_size))).real,
                        axes=(1, 2))
        corr = corr[:, window_size // 2:3 * window_size // 2,
                    window_size // 2:3 * window_size // 2]

    else:
        corr = fftshift(irfft2(np.conj(rfft2(cor_win_1)) *
                               rfft2(cor_win_2)).real,
                        axes=(1, 2))
    return corr
Exemple #17
0
def fft_convolve(in1, in2, times):
    def _centered(arr, newsize):
        # Return the center newsize portion of the array.
        currsize = np.array(arr.shape)
        startind = (currsize - newsize) // 2
        endind = startind + newsize
        myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
        return arr[tuple(myslice)]

    if times == 0:
        return in1.copy()


    s1 = np.array(in1.shape)
    s2 = np.array(in2.shape)
    shape = s1 + (s2 - 1) * times

    # Speed up FFT by padding to optimal size for FFTPACK
    fshape = [next_fast_len(int(d)) for d in shape]
    fslice = tuple([slice(0, int(sz)) for sz in shape])

    resfft = fast_power(rfft2(in2, fshape), times)
    resfft = resfft * rfft2(in1, fshape)
    ret = irfft2(resfft, fshape)[fslice].copy()
    ret = ret.real

    return _centered(ret, s1)
Exemple #18
0
def correlate_windows(window_a, window_b, corr_method='fft', nfftx=None, nffty=None):
    """Compute correlation function between two interrogation windows.

    The correlation function can be computed by using the correlation
    theorem to speed up the computation.

    Parameters
    ----------
    window_a : 2d np.ndarray
        a two dimensions array for the first interrogation window, 

    window_b : 2d np.ndarray
        a two dimensions array for the second interrogation window.

    corr_method   : string
        one of the two methods currently implemented: 'fft' or 'direct'.
        Default is 'fft', which is much faster.

    nfftx   : int
        the size of the 2D FFT in x-direction,
        [default: 2 x windows_a.shape[0] is recommended].

    nffty   : int
        the size of the 2D FFT in y-direction,
        [default: 2 x windows_a.shape[1] is recommended].


    Returns
    -------
    corr : 2d np.ndarray
        a two dimensions array for the correlation function.
    
    Note that due to the wish to use 2^N windows for faster FFT
    we use a slightly different convention for the size of the 
    correlation map. The theory says it is M+N-1, and the 
    'direct' method gets this size out
    the FFT-based method returns M+N size out, where M is the window_size
    and N is the search_size
    It leads to inconsistency of the output 
    """
    
    if corr_method == 'fft':
        window_b = np.conj(window_b[::-1, ::-1])
        if nfftx is None:
            nfftx = nextpower2(window_b.shape[0] + window_a.shape[0])  
        if nffty is None:
            nffty = nextpower2(window_b.shape[1] + window_a.shape[1]) 
        
        f2a = rfft2(normalize_intensity(window_a), s=(nfftx, nffty))
        f2b = rfft2(normalize_intensity(window_b), s=(nfftx, nffty))
        corr = irfft2(f2a * f2b).real
        corr = corr[:window_a.shape[0] + window_b.shape[0], 
                    :window_b.shape[1] + window_a.shape[1]]
        return corr
    elif corr_method == 'direct':
        return convolve2d(normalize_intensity(window_a),
        normalize_intensity(window_b[::-1, ::-1]), 'full')
    else:
        raise ValueError('method is not implemented')
Exemple #19
0
	def decompose(self,l_edges,keep_fourier=False):

		"""
		Decomposes the shear map into its E and B modes components and returns the respective power spectral densities at the specified multipole moments

		:param l_edges: Multipole bin edges
		:type l_edges: array

		:param keep_fourier: If set to True, holds the Fourier transforms of the E and B mode maps into the E and B attributes of the ShearMap instance
		:type keep_fourier: bool. 

		:returns: :returns: tuple -- (l -- array,P_EE,P_BB,P_EB -- arrays) = (multipole moments, EE,BB power spectra and EB cross power)

		>>> test_map = ShearMap.load("shear.fit",format=load_fits_default_shear)
		>>> l_edges = np.arange(300.0,5000.0,200.0)
		>>> l,EE,BB,EB = test_map.decompose(l_edges)

		"""

		#Perform Fourier transforms
		ft_data1 = rfft2(self.data[0])
		ft_data2 = rfft2(self.data[1])

		#Compute frequencies
		lx = rfftfreq(ft_data1.shape[0])
		ly = fftfreq(ft_data1.shape[0])

		#Safety check
		assert len(lx)==ft_data1.shape[1]
		assert len(ly)==ft_data1.shape[0]

		#Compute sines and cosines of rotation angles
		l_squared = lx[np.newaxis,:]**2 + ly[:,np.newaxis]**2
		l_squared[0,0] = 1.0

		sin_2_phi = 2.0 * lx[np.newaxis,:] * ly[:,np.newaxis] / l_squared
		cos_2_phi = (lx[np.newaxis,:]**2 - ly[:,np.newaxis]**2) / l_squared

		#Compute E and B components
		ft_E = cos_2_phi * ft_data1 + sin_2_phi * ft_data2
		ft_B = -1.0 * sin_2_phi * ft_data1 + cos_2_phi * ft_data2

		ft_E[0,0] = 0.0
		ft_B[0,0] = 0.0

		assert ft_E.shape == ft_B.shape
		assert ft_E.shape == ft_data1.shape

		#Compute and return power spectra
		l = 0.5*(l_edges[:-1] + l_edges[1:])
		P_EE = _topology.rfft2_azimuthal(ft_E,ft_E,self.side_angle.to(deg).value,l_edges)
		P_BB = _topology.rfft2_azimuthal(ft_B,ft_B,self.side_angle.to(deg).value,l_edges)
		P_EB = _topology.rfft2_azimuthal(ft_E,ft_B,self.side_angle.to(deg).value,l_edges)

		if keep_fourier:
			self.fourier_E = ft_E
			self.fourier_B = ft_B

		return l,P_EE,P_BB,P_EB
Exemple #20
0
 def getDispl2DImage(self, t0=0, t1=1, Z=0):
     ham = np.hamming(self.get2DShape()[0]) * np.atleast_2d(
         np.hamming(self.get2DShape()[1])).T
     a = rfft2(self.get2DSlice(T=t0, Z=Z) * ham)
     b = rfft2(self.get2DSlice(T=t1, Z=Z) * ham)
     R = numexpr.evaluate(
         'a*complex(real(b), -imag(b)/abs(a*complex(real(b), -imag(b))')
     return irfft2(R)
Exemple #21
0
def register_imgs(imgs,template):
    "save some time by only taking fft of template once"
    rfft2_template_conj = rfft2(template).conj()
    shifts = []
    for img in imgs:
        corr = irfft2(rfft2(img)*rfft2_template_conj)
        shifts.append(balanced_mod(np.unravel_index(corr.argmax(),corr.shape),corr.shape))
    return shifts
Exemple #22
0
 def getDispl2DImage(self, t0=0, t1=1, Z=0):
     ham = np.hamming(self.get2DShape()[1])*np.atleast_2d(np.hamming(self.get2DShape()[0])).T
     a = rfft2(self.get2DSlice(T=t0, Z=Z)*ham)
     b = rfft2(self.get2DSlice(T=t1, Z=Z)*ham)
     R = numexpr.evaluate(
         'a*complex(real(b), -imag(b)/abs(a*complex(real(b), -imag(b))'
         )
     return irfft2(R)
Exemple #23
0
def correlate(image, filter):
    r"""Performs a normalized cross-correlation between an image and a search
        template. For more details, see:

        http://en.wikipedia.org/wiki/Cross_correlation#Normalized_cross-correlation
    """
    si = rfft2(image - mean(image))
    sf = rfft2(filter - mean(filter), image.shape)
    return irfft2(si * conj(sf))
Exemple #24
0
def fft_correlate_images(image_a,
                         image_b,
                         correlation_method="circular",
                         normalized_correlation=True):
    """ FFT based cross correlation
    of two images with multiple views of np.stride_tricks()
    The 2D FFT should be applied to the last two axes (-2,-1) and the
    zero axis is the number of the interrogation window
    This should also work out of the box for rectangular windows.
    Parameters
    ----------
    image_a : 3d np.ndarray, first dimension is the number of windows,
        and two last dimensions are interrogation windows of the first image

    image_b : similar

    correlation_method : string
        one of the three methods implemented: 'circular' or 'linear'
        [default: 'circular].

    normalized_correlation : string
        decides wetehr normalized correlation is done or not: True or False
        [default: True].
    """

    if normalized_correlation:
        # remove the effect of stronger laser or
        # longer exposure for frame B
        # image_a = match_histograms(image_a, image_b)

        # remove mean background, normalize to 0..1 range
        image_a = normalize_intensity(image_a)
        image_b = normalize_intensity(image_b)

    s1 = np.array(image_a.shape[-2:])
    s2 = np.array(image_b.shape[-2:])

    if correlation_method == "linear":
        # have to be normalized, mainly because of zero padding
        size = s1 + s2 - 1
        fsize = 2**np.ceil(np.log2(size)).astype(int)
        fslice = (slice(0, image_a.shape[0]),
                  slice((fsize[0] - s1[0]) // 2, (fsize[0] + s1[0]) // 2),
                  slice((fsize[1] - s1[1]) // 2, (fsize[1] + s1[1]) // 2))
        f2a = rfft2(image_a, fsize, axes=(-2, -1)).conj()
        f2b = rfft2(image_b, fsize, axes=(-2, -1))
        corr = fftshift(irfft2(f2a * f2b).real, axes=(-2, -1))[fslice]
    elif correlation_method == "circular":
        corr = fftshift(irfft2(rfft2(image_a).conj() * rfft2(image_b)).real,
                        axes=(-2, -1))
    else:
        print("method is not implemented!")

    if normalized_correlation:
        corr = corr / (s2[0] * s2[1])  # for extended search area
        corr = np.clip(corr, 0, 1)
    return corr
Exemple #25
0
def beam_convolve(arr, sigma):
    "convoles a 2D image with a gaussian profile with sigma in px"
    if len(arr.shape)!=2 or 3*sigma > max(arr.shape): raise ValueError ("arr is not 2d or beam is too wide")
    else: 
        shape=arr.shape
        gauss_mask=garray(shape,sigma)
        s=[y*2 for y in gauss_mask.shape]
        ftg=rfft2(gauss_mask, s)
        return irfft2(rfft2(arr,s)*ftg)
Exemple #26
0
def correlation(f, g):
    f_fft = rfft2(f)
    if f is g:
        g_fft = f_fft
    else:
        g_fft = rfft2(g)
    g_conj = np.conj(g_fft)
    prod = f_fft * g_conj
    return np.real(irfft2(prod))
Exemple #27
0
def conv(im, ker):
    ''' Convolves image im with kernel ker 
        Both image and kernel's dimensions should be even: ker.shape % 2 == 0
    '''
    sy,sx = array(ker.shape)/2
    y0,x0 = array(im.shape)/2
    big_ker = zeros(im.shape)
    big_ker[y0-sy:y0+sy,x0-sx:x0+sx] = ker
    return irfft2(rfft2(im)*rfft2(fftshift(big_ker)))
Exemple #28
0
def phaseCorrel(a,b):
    print a
    print b
    """phase correlation calculation"""
    R = rfft2(a)*np.conj(rfft2(b))
    R /= np.absolute(R)
    print R
    print a.shape 
    return irfft2(R,a.shape)
Exemple #29
0
def lineold (imcube, sigma, chanwidth=10):
    "produces a spectrum by convolving each slice of imcube with a gaussian of width sigma and returning the value of the central pixel for each slice"
    shape=imcube.shape
    bandwidth=shape[0]*chanwidth
    if len(shape)!=3: raise ValueError("imcube must be a cube")
    gauss_mask=garray(shape[1:],sigma)
    s=[y*2 for y in gauss_mask.shape]
    ftg=rfft2(gauss_mask, s)
    
    return [(i*chanwidth-bandwidth/2,irfft2(rfft2(imcube[i,:,:],s)*ftg)[s[0]/2,s[1]/2])
            for i in xrange(shape[0])]
Exemple #30
0
def correlate_windows(window_a,
                      window_b,
                      corr_method='fft',
                      nfftx=None,
                      nffty=None):
    """Compute correlation function between two interrogation windows.

    The correlation function can be computed by using the correlation
    theorem to speed up the computation.

    Parameters
    ----------
    window_a : 2d np.ndarray
        a two dimensions array for the first interrogation window.

    window_b : 2d np.ndarray
        a two dimensions array for the second interrogation window.

    corr_method   : string
        one of the two methods currently implemented: 'fft' or 'direct'.
        Default is 'fft', which is much faster.

    nfftx   : int
        the size of the 2D FFT in x-direction,
        [default: 2 x windows_a.shape[0] is recommended].

    nffty   : int
        the size of the 2D FFT in y-direction,
        [default: 2 x windows_a.shape[1] is recommended].


    Returns
    -------
    corr : 2d np.ndarray
        a two dimensions array for the correlation function.

    """

    if corr_method == 'fft':
        if nfftx is None:
            nfftx = 2 * window_a.shape[0]
        if nffty is None:
            nffty = 2 * window_a.shape[1]
        return fftshift(irfft2(
            rfft2(normalize_intensity(window_a), s=(nfftx, nffty)) * np.conj(
                rfft2(normalize_intensity(window_b), s=(nfftx, nffty)))).real,
                        axes=(0, 1))
    elif corr_method == 'direct':
        return convolve2d(normalize_intensity(window_a),
                          normalize_intensity(window_b[::-1, ::-1]), 'full')
    else:
        raise ValueError('method is not implemented')
Exemple #31
0
    def rhs(self, ω_hat):
        # aliasing
        ω_hat[np.where(self.K_sq > self.Nx * self.Ny / 9.)] = 0

        ω = irfft2(ifftshift(ω_hat, axes=0))
        ux = irfft2(ifftshift( 1j * self.KY * self.ω_hat / self.K_sq ,
                    axes=0))
        uy = irfft2(ifftshift(-1j * self.KX * self.ω_hat / self.K_sq ,
                    axes=0))
        tmp = 1j * self.KX * fftshift(rfft2(ux * ω), axes=0) \
            + 1j * self.KY * fftshift(rfft2(uy * ω), axes=0)

        return ω_hat - tmp * self.dt
Exemple #32
0
def fft_cpu(window_a, search_area):
    """
    Do batch of FFT's on on the CPU only

    Inputs:
        window_a: 3D numpy array
            stack of interrogation windows of the first frame
            output from the window slice function
        search_area: 3D numpy array
            Stack of interrogation windows of the second frame
            output from the window slice function
    Outputs:
        corr_gpu: 3D numpy array
            Stack of correlation functions for each image pair
    """

    batch_size, win_h, win_w = np.array(window_a.shape).astype(np.int32)
    window_a = window_a.astype(np.float32)
    search_area = search_area.astype(np.float32)

    # preallocate space for data
    winFFT = np.empty(
        [batch_size, window_a.shape[1], window_a.shape[2] // 2 + 1],
        np.complex64)
    search_areaFFT = np.empty(
        [batch_size, window_a.shape[1], window_a.shape[2] // 2 + 1],
        np.complex64)
    corr_cpu = np.empty_like(window_a)

    winA = np.zeros([win_h, win_w])
    sa = np.zeros([win_h, win_w])

    for i in range(batch_size):
        winA = window_a[i, :, :]  # - np.mean(window_a[i,:,:])
        sa = search_area[i, :, :]  # - np.mean(search_area[i,:,:])
        winFFT[i, :, :] = rfft2(winA, s=(win_h, win_w))
        search_areaFFT[i, :, :] = rfft2(sa, s=(win_h, win_w))
        tmp = np.conj(winFFT[i, :, :]) * search_areaFFT[i, :, :]
        corr_cpu[i, :, :] = fftshift(irfft2(tmp).real, axes=(0, 1))
    """
    end.record()
    end.synchronize()
    time = start.time_till(end)*1e-3
    print("CPU time is: %fs sec" %(time))

    print("CPU corr shape is: %s" %(corr_cpu.shape,))
    print("GPU corr shape is: %s" %(corr_gpu.shape,))
    print('Success status: ', np.allclose(corr_gpu, corr_cpu, atol=1e-1))
    """

    return (corr_cpu)
Exemple #33
0
def correlate_windows(window_a, window_b, corr_method="fft", nfftx=None, nffty=None):
    """Compute correlation function between two interrogation windows.
    
    The correlation function can be computed by using the correlation 
    theorem to speed up the computation.
    
    Parameters
    ----------
    window_a : 2d np.ndarray
        a two dimensions array for the first interrogation window.
        
    window_b : 2d np.ndarray
        a two dimensions array for the second interrogation window.
        
    corr_method   : string
        one of the two methods currently implemented: 'fft' or 'direct'.
        Default is 'fft', which is much faster.
        
    nfftx   : int
        the size of the 2D FFT in x-direction, 
        [default: 2 x windows_a.shape[0] is recommended].
        
    nffty   : int
        the size of the 2D FFT in y-direction, 
        [default: 2 x windows_a.shape[1] is recommended].
        
        
    Returns
    -------
    corr : 2d np.ndarray
        a two dimensions array for the correlation function.
    
    """

    if corr_method == "fft":
        if nfftx is None:
            nfftx = 2 * window_a.shape[0]
        if nffty is None:
            nffty = 2 * window_a.shape[1]
        return fftshift(
            irfft2(
                rfft2(normalize_intensity(window_a), s=(nfftx, nffty))
                * np.conj(rfft2(normalize_intensity(window_b), s=(nfftx, nffty)))
            ).real,
            axes=(0, 1),
        )
    elif corr_method == "direct":
        return convolve(normalize_intensity(window_a), normalize_intensity(window_b[::-1, ::-1]), "full")
    else:
        raise ValueError("method is not implemented")
Exemple #34
0
def convolve_image(flux_arr, x_loc, y_loc, x_size=None, y_size=None, seed=None,
                   psf=None, pad_image=1.5, pixel_scale=None, kernel_radius=None,
                   oversample_image=1, photon_noise=False, sky_noise=0.0, verbose=True, **kwargs):
    """Wrapper to call fast_dft with no DCR planes."""
    x_size_use = int(x_size * pad_image)
    y_size_use = int(y_size * pad_image)
    oversample_image = int(oversample_image)
    pixel_scale_use = pixel_scale / oversample_image
    x0 = oversample_image * ((x_size_use - x_size) // 2)
    x1 = x0 + x_size * oversample_image
    y0 = oversample_image * ((y_size_use - y_size) // 2)
    y1 = y0 + y_size * oversample_image
    x_loc_use = x_loc * oversample_image + x0
    y_loc_use = y_loc * oversample_image + y0
    x_size_use *= oversample_image
    y_size_use *= oversample_image
    timing_model = -time.time()
    source_image = fast_dft(flux_arr, x_loc_use, y_loc_use, x_size=x_size_use, y_size=y_size_use,
                            kernel_radius=kernel_radius, **kwargs)
    timing_model += time.time()
    n_star = len(x_loc)
    if oversample_image > 1:
        bright_star = "bright "
    else:
        bright_star = ""
    if verbose:
        if n_star == 1:
            print("Time to model %i %sstar: [%0.3fs]" % (n_star, bright_star, timing_model))
        else:
            print("Time to model %i %sstars: [%0.3fs | %0.5fs per star]"
                  % (n_star, bright_star, timing_model, timing_model / n_star))

    rand_gen = np.random
    if seed is not None:
        rand_gen.seed(seed - 1)
    psf_image = psf.drawImage(scale=pixel_scale_use, method='fft', offset=[0, 0],
                              nx=x_size_use, ny=y_size_use, use_true_center=False)
    if photon_noise:
        base_noise = np.random.normal(scale=1.0, size=(y_size_use, x_size_use))
        base_noise *= np.sqrt(np.abs(source_image) / photons_per_adu)
        source_image += base_noise
    if sky_noise > 0:
        source_image += rand_gen.normal(scale=sky_noise, size=(y_size_use, x_size_use))
    timing_fft = -time.time()
    convol = rfft2(source_image) * rfft2(psf_image.array)
    return_image = np.real(fftshift(irfft2(convol)))
    timing_fft += time.time()
    if verbose:
        print("FFT timing (single plane): [%0.3fs]" % (timing_fft))
    return(return_image[y0:y1:oversample_image, x0:x1:oversample_image] * oversample_image**2)
Exemple #35
0
    def output(self):
        """ """

        # One dimension
        if len(self._source.shape) == 1:
            source = self._actual_source
            # Use FFT convolution
            if self._fft:
                if not self._toric:
                    P = rfft(source, self._fft_shape[0]) * self._fft_weights
                    R = irfft(P, self._fft_shape[0]).real
                    R = R[self._fft_indices]
                else:
                    P = rfft(source) * self._fft_weights
                    R = irfft(P, source.shape[0]).real

                # if self._toric:
                #     R  = ifft(fft(source)*self._fft_weights).real
                # else:
                #     n = source.shape[0]
                #     self._src_holder[n//2:n//2+n] = source
                #     R = ifft(fft(self._src_holder)*self._fft_weights)
                #     R = R.real[n//2:n//2+n]
            # Use regular convolution
            else:
                R = convolve1d(source, self._weights[::-1], self._toric)
            if self._src_rows is not None:
                R = R[self._src_rows]
            return R.reshape(self._target.shape)
        # Two dimensions
        else:
            source = self._actual_source
            # Use FFT convolution
            if self._fft:
                if not self._toric:
                    P = rfft2(source, self._fft_shape) * self._fft_weights
                    R = irfft2(P, self._fft_shape).real
                    R = R[self._fft_indices]
                else:
                    P = rfft2(source) * self._fft_weights
                    R = irfft2(P, source.shape).real

            # Use SVD convolution
            else:
                R = convolve2d(source, self._weights, self._USV, self._toric)
            if self._src_rows is not None and self._src_cols is not None:
                R = R[self._src_rows, self._src_cols]
        return R.reshape(self._target.shape)
Exemple #36
0
    def output(self):
        """ """

        # One dimension
        if len(self._source.shape) == 1:
            source = self._actual_source
            # Use FFT convolution
            if self._fft:
                if not self._toric:
                    P = rfft(source,self._fft_shape[0])*self._fft_weights
                    R = irfft(P, self._fft_shape[0]).real
                    R = R[self._fft_indices]
                else:
                    P = rfft(source)*self._fft_weights
                    R = irfft(P,source.shape[0]).real

                # if self._toric:
                #     R  = ifft(fft(source)*self._fft_weights).real
                # else:
                #     n = source.shape[0]
                #     self._src_holder[n//2:n//2+n] = source
                #     R = ifft(fft(self._src_holder)*self._fft_weights)
                #     R = R.real[n//2:n//2+n]
            # Use regular convolution
            else:
                R = convolve1d(source, self._weights[::-1], self._toric)
            if self._src_rows is not None:
                R = R[self._src_rows]
            return R.reshape(self._target.shape)
        # Two dimensions
        else:
            source = self._actual_source
            # Use FFT convolution
            if self._fft:
                if not self._toric:
                    P = rfft2(source,self._fft_shape)*self._fft_weights
                    R = irfft2(P, self._fft_shape).real
                    R = R[self._fft_indices]
                else:
                    P = rfft2(source)*self._fft_weights
                    R = irfft2(P,source.shape).real

            # Use SVD convolution
            else:
                R = convolve2d(source, self._weights, self._USV, self._toric)
            if self._src_rows is not None and self._src_cols is not None:
                R = R[self._src_rows, self._src_cols]
        return R.reshape(self._target.shape)
Exemple #37
0
def _mle_amp_setup(H, W, k, w):
    """
    Make the transfer function for convolution of real-valued
    images with a centered (mean-subtracted) Gaussian kernel,
    along with a normalization term to convert to the max-likelihood
    estimate for the intensities of Gaussian spots.

    args
    ----
        H, W    :   int, height and width of image
        k       :   float, kernel sigma
        w       :   int, window size for the kernel

    returns
    -------
        (
            2D ndarray, dtype complex128, the RFFT2 of
                the kernel
            float, normalization factor
        )

    """
    S = 2 * (k**2)
    g = np.exp(-((np.indices((w, w)) - (w - 1) / 2)**2).sum(0) / S)
    g = g / g.sum()
    gc = g - g.mean()
    Sgc2 = (gc**2).sum()
    return rfft2(pad(gc, H, W)), Sgc2
Exemple #38
0
def fftn_mpi(u, fu):
    # Forward Fourier transform
    Uc_hatT[:] = rfft2(u, axes=(1, 2))
    U_mpi[:] = rollaxis(Uc_hatT.reshape(Np, num_processes, Np, N_half), 1)
    comm.Alltoall([U_mpi, MPI.DOUBLE_COMPLEX], [fu, MPI.DOUBLE_COMPLEX])
    fu[:] = fft(fu, axis=0)
    return fu
Exemple #39
0
    def __init__(self):
        # Retina
        self.R = np.zeros(retina_shape)

        # Superior colliculus
        self.SC_V = np.zeros(colliculus_shape)
        self.SC_U = np.zeros(colliculus_shape)

        # Projection from retina to colliculus
        self.P = retina_projection()

        # Parameters
        self.sigma_e  = sigma_e
        self.A_e      = A_e
        self.sigma_i  = sigma_i
        self.A_i      = A_i
        self.alpha    = alpha
        self.tau      = tau
        self.scale    = scale
        self.noise    = noise

        # Lateral weights
        # DoG
        # K = A_e*gaussian((2*n+1,2*n+1), sigma_e) - A_i*gaussian((2*n+1,2*n+1), sigma_i)
        # Constant inhibition
        K = A_e*gaussian((2*n+1,2*n+1), sigma_e) - A_i #*gaussian((2*n+1,2*n+1), sigma_i)

        # FFT for lateral weights
        K_shape = np.array(K.shape)
        self.fft_shape = np.array(best_fft_shape(colliculus_shape+K_shape//2))
        self.K_fft = rfft2(K,self.fft_shape)
        i0,j0 = K.shape[0]//2, K.shape[1]//2
        i1,j1 = i0+colliculus_shape[0], j0+colliculus_shape[1]
        self.K_indices = i0,i1,j0,j1
Exemple #40
0
def log(I, k=1.0, w=11, t=200.0, return_filt=False):
    """
    Detect spots by Laplacian-of-Gaussian filtering.

    args
    ----
        I           :   2D ndarray
        k           :   float, kernel sigma
        w           :   int, kernel size 
        t           :   float, threshold
        return_filt :   bool, also return the filtered image

    returns
    -------
        if return_filt:
        (
            2D ndarray, the post-convolution image;
            2D ndarray, the thresholded binary image;
            2D ndarray, shape (n_spots, 2), the y and x 
                coordinates of each spot
        )
        else
            2D ndarray, shape (n_spots, 2), the y and x
                coordinates of each spot

    """
    # Generate the transfer function
    G_rft = _log_setup(*I.shape, k, w)

    # Perform the convolution
    return threshold_image(fftshift(irfft2(rfft2(I) * G_rft, s=I.shape)),
                           t=t,
                           return_filt=return_filt)
Exemple #41
0
def fftn_mpi(u, fu):
    Uc_hatT[:] = rfft2(u, axes=(1, 2))
    fu[:] = rollaxis(Uc_hatT.reshape(Np, num_processes, Np, N // 2 + 1),
                     1).reshape(fu.shape)
    comm.Alltoall(MPI.IN_PLACE, [fu, MPI.DOUBLE_COMPLEX])
    fu[:] = fft(fu, axis=0)
    return fu
Exemple #42
0
def ds_to_cs_to_fits(data, P, BW, filename):
 ntime,nchan=data.shape
 new_ntime=nearest_power_of_2(data.shape[0])
 new_nchan = nearest_power_of_2(data.shape[1])
 print(new_ntime, 'X', new_nchan, ' 2D FFT in progress...')
 cs=ft.fftshift(ft.rfft2(data, [new_ntime, new_nchan]),axes=0)
 print('FFT shift...')
 realCS=np.real(cs)
 imagCS=np.imag(cs)
 CS=np.zeros([2,cs.shape[0], cs.shape[1]])
 CS[0,:]=realCS
 CS[1,:]=imagCS
 tau = ft.rfftfreq(new_nchan,np.abs(BW)/nchan)
 fD=ft.fftshift(ft.fftfreq(new_ntime,P))*1e3
 hdu = f.PrimaryHDU(CS)
 hdu.header['CTYPE1'] ='DELAY - MICROSEC'
 hdu.header['CTYPE2'] ='DOPPLER FREQUENCY - MILLIHERTZ'
 hdu.header['CTYPE3'] ='REAL-IMAG'
 hdu.header['CRVAL1'] = tau[0]
 hdu.header['CRVAL2'] = fD[0]
 hdu.header['CRVAL3'] = 0
 hdu.header['CDELT1'] = tau[1]-tau[0]
 hdu.header['CDELT2'] = fD[1]-fD[0]
 hdu.header['CDELT3'] = 0
 hdu.header['CRPIX1'] = 0
 hdu.header['CRPIX2'] = 0
 hdu.header['CRPIX3'] = 0
 hdu.header['CROTA1'] = 0.0
 hdu.header['CROTA2'] = 0.0
 hdu.header['CROTA3'] = 0.0
 hdu.header['BSCALE'] = 1.0
 update_fits_header(hdu)
 hdu.header['INT_TIME']=P*ntime
 hdu.writeto(filename)
 return fD, tau, cs
Exemple #43
0
    def __init__(self, params, iv, t0=0.):
        self.__dict__.update(params)

        # k-space
        kx = np.linspace(0, self.Nx//2, self.Nx//2+1)
        ky = np.linspace(-self.Ny//2, self.Ny//2-1, self.Ny)
        self.KX, self.KY = np.meshgrid(kx, ky)
        self.K_sq = np.square(self.KX) + np.square(self.KY)
        #  fix: this value is zero, but shouldn't be
        self.K_sq[self.Nx//2,0] = 1

        # x-space
        self.dx = (self.xe - self.xb) / self.Nx
        self.x = np.arange(self.xb, self.xe, self.dx)
        self.dy = (self.ye - self.yb) / self.Ny
        self.y = np.arange(self.yb, self.ye, self.dy)
        X, Y = np.meshgrid(self.x, self.y)

        # initial value and its fourier transform
        self.ω = iv(X, Y)
        self.ω_hat = fftshift(rfft2(self.ω), axes=0)
        self.t = t0
        #  self.t = [t0]

        self.cfl = 1
        #  self.cfl = []

        self.scheme = self.shu_osher
def fftn_mpi(u, fu):
    Uc_hatT[:] = rfft2(u, axes=(1, 2))
    U_mpi[:] = rollaxis(Uc_hatT.reshape(Np, num_processes, Np, N / 2 + 1), 1,
                        0)
    comm.Alltoall([U_mpi, MPI.DOUBLE_COMPLEX], [fu, MPI.DOUBLE_COMPLEX])
    fu[:] = fft(fu, axis=0)
    return fu
Exemple #45
0
def dog(I, k0=1.0, k1=3.0, w=9, t=200.0, return_filt=False):
    """
    Convolve the image with a difference-of-Gaussians kernel,
    then apply a static threshold.

    args
    ----
        I           :   2D ndarray
        k0          :   float, positive kernel sigma
        k1          :   float, negative kernel sigma
        w           :   int, kernel size
        t           :   float, threshold
        return_filt :   bool, also return the filtered image

    returns
    -------
        if return_filt:
        (
            2D ndarray, the post-convolution image;
            2D ndarray, the thresholded binary image;
            2D ndarray, shape (n_spots, 2), the y and x 
                coordinates of each spot
        )
        else
            2D ndarray, shape (n_spots, 2), the y and x
                coordinates of each spot

    """
    # Generate the transfer function
    dog_tf = _dog_setup(*I.shape, k0, k1, w)

    # Perform the convolution
    return threshold_image(fftshift(irfft2(rfft2(I) * dog_tf, s=I.shape)),
                           t=t,
                           return_filt=return_filt)
Exemple #46
0
def _dog_setup(H, W, k0, k1, w):
    """
    Generate the transfer function for DoG filtering.

    args
    ----
        H, W        :   int, height and width of the image
                        to be convolved
        k0          :   float, positive kernel sigma
        k1          :   float, negative kernel sigma
        w           :   int, kernel size

    returns
    -------
        2D ndarray, RFFT of the DoG kernel 

    """
    S0 = 2 * (k0**2)
    S1 = 2 * (k1**2)
    g1 = np.exp(-((np.indices(
        (int(w), int(w))) - (int(w) - 1) / 2)**2).sum(0) / S0)
    g1 = g1 / g1.sum()
    g2 = np.exp(-((np.indices(
        (int(w), int(w))) - (int(w) - 1) / 2)**2).sum(0) / S1)
    g2 = g2 / g2.sum()
    return rfft2(pad(g1 - g2, H, W))
Exemple #47
0
def _filter(filter, array):
    X = fft.rfft2(array)
    X = fft.fftshift(X)
    numpy.multiply(X, filter, out=X)
    X = fft.ifftshift(X)
    x = fft.irfft2(X, s=array.shape)
    return x
Exemple #48
0
def centered_gauss(I, k=1.0, w=9, t=200.0, return_filt=False):
    """
    Convolve the image with a mean-subtracted Gaussian kernel, 
    then apply a static threshold.

    args
    ----
        I           :   2D ndarray (YX)
        k           :   float, kernel sigma
        w           :   int, kernel window size
        t           :   float, threshold
        return_filt :   bool, also return the filtered image
                        and boolean image

    returns
    -------
        if return_filt:
        (
            2D ndarray, the post-convolution image;
            2D ndarray, the thresholded binary image;
            2D ndarray, shape (n_spots, 2), the y and x 
                coordinates of each spot
        )
        else
            2D ndarray, shape (n_spots, 2), the y and x
                coordinates of each spot

    """
    # Compute the transfer function
    G_rft = _centered_gauss_setup(*I.shape, k, w)
    return threshold_image(fftshift(irfft2(rfft2(I) * G_rft, s=I.shape)),
                           t=t,
                           return_filt=return_filt)
Exemple #49
0
def _fft_filter_setup(
    image_shape: Tuple[int, int], window: Union[np.ndarray, Window],
) -> Tuple[Tuple[int, int], np.ndarray, Tuple[int, int], Tuple[int, int]]:
    window_shape = window.shape

    # Optimal FFT shape
    #    real_fft_only = True
    fft_shape = (
        next_fast_len(
            image_shape[0] + window_shape[0] - 1
        ),  # , real_fft_only),
        next_fast_len(
            image_shape[1] + window_shape[1] - 1
        ),  # , real_fft_only),
    )

    # Pad window to optimal FFT size
    window_pad = _pad_window(window, fft_shape)

    # Obtain the transfer function via the real valued FFT
    transfer_function = rfft2(window_pad)

    # Image offset before FFT and after IFFT
    offset_before = _offset_before_fft(window_shape)
    offset_after = _offset_after_ifft(window_shape)

    return fft_shape, transfer_function, offset_before, offset_after
Exemple #50
0
    def get_c_mn(self):
        """
		return m and c_mn
		c_mn: the smoothed 2D-FFT coefficients of "biased" input function f(x):
		f_b = f(x_1,x_2) / x_1^\nu_1 / x_2^\nu_2

		number of x1, x2 values should be even
		c_window_width: the fraction of any row/column c_mn elements that are smoothed.
		"""
        print(self.fx1x2.shape)
        print(self.x2.size, self.x1.size)
        f_b = ((self.fx1x2 * self.x2**(-self.nu2)).T * self.x1**(-self.nu1)).T
        c_mn = rfft2(f_b)

        m = np.arange(-self.N1 // 2, self.N1 // 2 + 1)
        n = np.arange(-self.N2 // 2, self.N2 // 2 + 1)

        c_mn1 = c_mn[:self.N1 // 2 + 1, :]
        c_mn = np.vstack((c_mn[self.N1 // 2:, :], c_mn1[:, :]))

        c_mn_left = np.conj(np.flip(np.flip(c_mn, 0), 1))
        c_mn = np.hstack((c_mn_left[:, :-1], c_mn))

        c_window_array1 = c_window(m, int(self.c_window_width * self.N1 // 2.))
        c_window_array2 = c_window(n, int(self.c_window_width * self.N2 // 2.))
        c_mn_filter = ((c_mn * c_window_array2).T * c_window_array1).T
        return m, n, c_mn
Exemple #51
0
def _filter(filter, array):
    X = fft.rfft2(array)
    X = fft.fftshift(X)
    numpy.multiply(X, filter, out=X)
    X = fft.ifftshift(X)
    x = fft.irfft2(X, s=array.shape)
    return x
 def update_nmda_high():
     high_nmda = np.array(high_excit_pop.s_NMDA).reshape(
         N_excit_high_1d, -1)
     fft_s_NMDA_2d = rfft2(high_nmda)
     fft_s_NMDA_total_2d = np.multiply(fft_high_kernel, fft_s_NMDA_2d)
     s_NMDA_tot_2d = irfft2(fft_s_NMDA_total_2d)
     high_excit_pop.s_NMDA_total_ = s_NMDA_tot_2d.reshape(-1)
def fftn_mpi(u, fu):
    Uc_hatT[:] = rfft2(u, axes=(1,2))
    for i in range(num_processes):
        U_mpi[i] = Uc_hatT[:, i*Np:(i+1)*Np]
    comm.Alltoall([U_mpi, MPI.DOUBLE_COMPLEX], [fu, MPI.DOUBLE_COMPLEX])
    fu[:] = fft(fu, axis=0)
    return fu
def fftn_mpi(u, fu):
    """fft in three directions using mpi
    """
    if num_processes == 1:
        #fu[:] = fft(fft(rfft(u, axis=1), axis=2), axis=0)  
        fu[:] = rfftn(u, axes=(0,2,1))
        return
    
    # Do 2 ffts in y-z directions on owned data
    #ft = fu.transpose(2,1,0)
    #ft[:] = fft(rfft(u, axis=1), axis=2)
    Uc_hatT[:] = rfft2(u, axes=(2,1))
    
    ## Communicating intermediate result 
    ##rstack(ft, Uc_hatT, Np, num_processes)       
    #fu_send = fu.reshape((num_processes, Np, Nf, Np))
    #for i in range(num_processes):
        #if not i == rank:
           #comm.Sendrecv_replace([fu_send[i], MPI.DOUBLE_COMPLEX], i, 0, i, 0)   
    #fu_send[:] = fu_send.transpose(0,3,2,1)
      
    # Transform data to align with x-direction  
    for i in range(num_processes): 
        #U_mpi[i] = ft[:, :, i*Np:(i+1)*Np]
        U_mpi[i] = Uc_hatT[:, :, i*Np:(i+1)*Np]
        
    # Communicate all values
    comm.Alltoall([U_mpi, MPI.DOUBLE_COMPLEX], [fu, MPI.DOUBLE_COMPLEX])  
                
    # Do fft for last direction 
    fu[:] = fft(fu, axis=0)
Exemple #55
0
def fct(u, fu):
    """Fast Cheb transform of x-direction, Fourier transform of y and z"""
    Uc_hatT[:] = rfft2(u, axes=(1,2))
    n0 = U_mpi.shape[2]
    for i in range(num_processes):
        U_mpi[i] = Uc_hatT[:, i*n0:(i+1)*n0]
    comm.Alltoall([U_mpi, MPI.DOUBLE_COMPLEX], [Uc_hat, MPI.DOUBLE_COMPLEX])
    fu = ST.fct(Uc_hat, fu)
    return fu
Exemple #56
0
def fss(u, fu, S):
    """Fast Shen scalar product of x-direction, Fourier transform of y and z"""
    Uc_hatT[:] = rfft2(u, axes=(1,2))
    n0 = U_mpi.shape[2]
    for i in range(num_processes):
        U_mpi[i] = Uc_hatT[:, i*n0:(i+1)*n0]
    comm.Alltoall([U_mpi, MPI.DOUBLE_COMPLEX], [Uc_hat, MPI.DOUBLE_COMPLEX])
    fu = S.fastShenScalar(Uc_hat, fu)
    return fu
Exemple #57
0
def _upsample(arr, factor):
    if arr.shape[0] != arr.shape[1]:
        raise ValueError("array argument must be square shape")
    arr_k = rfft2(arr)
    nkx, nky = arr_k.shape
    unkx = nkx*factor
    unky = (nky-1) * factor + 1
    upsampled = irfft2(arr_k, s=[arr.shape[0]*factor, arr.shape[1]*factor])
    return upsampled
def fct(u, fu):
    """Fast Cheb transform of x-direction, Fourier transform of y and z"""
    Uc_hatT[:] = rfft2(u, axes=(1,2))
    #n0 = U_mpi.shape[2]
    #for i in range(num_processes):
        #U_mpi[i] = Uc_hatT[:, i*n0:(i+1)*n0]
    U_mpi[:] = rollaxis(Uc_hatT.reshape(Np[0], num_processes, Np[1], Nf), 1)
    comm.Alltoall([U_mpi, MPI.DOUBLE_COMPLEX], [Uc_hat, MPI.DOUBLE_COMPLEX])
    fu = ST.fct(Uc_hat, fu)
    return fu
Exemple #59
0
def _xcor2(array1, array2):
    x = tile.tile9_periodic(array1)
    a, b = x.shape
    y = array2[::-1, ::-1]
    c, d = y.shape
    m, n = _xcor2_shape(((a, b), (c, d)))
    x = np._zeropad(x, (m, n))
    y = np._zeropad(y, (m, n))
    X = fft.rfft2(x)
    Y = fft.rfft2(y)
    X = fft.fftshift(X)
    Y = fft.fftshift(Y)
    _detrend_filter(X)
    _detrend_filter(Y)
    numpy.multiply(X, Y, out=X)
    X = fft.ifftshift(X)
    x = fft.irfft2(X, s=(m, n))
    z = _center(x, (a // 3 + c, b // 3 + d))
    z = _center(z, (a // 3, b // 3))
    return z
Exemple #60
0
def upsample(arr, factor):
    if arr.shape[0] != arr.shape[1]:
        raise ValueError("array argument must be square shape")
    arr_k = rfft2(arr)
    nkx, nky = arr_k.shape
    unkx = nkx*factor
    unky = (nky-1) * factor + 1
    upsample_fft = np.zeros((unkx, unky), dtype=np.complex128)
    upsample_fft[:nkx/2+1, :nky] = arr_k[:nkx/2+1, :]
    upsample_fft[-(nkx/2-1):, :nky] = arr_k[-(nkx/2-1):, :]
    upsampled = irfft2(upsample_fft)
    return upsampled