예제 #1
0
def repeated_sales(df, artistname, artname, r2thresh=7000, fftr2thresh=10000, IMAGES_DIR='/home/ryan/asi_images/'):
    """
        Takes a dataframe, artistname and artname and tries to decide, via image matching, if there is a repeat sale. Returns a dict of lot_ids, each entry a list of repeat sales
    """
    artdf = df[(df['artistID']==artistname) & (df['artTitle']==artname)]

    artdf.images = artdf.images.apply(getpath)
    paths = artdf[['_id','images']].dropna()
    id_dict = {}
    img_buffer = {}
    already_ordered = []
    for i, path_i in paths.values:
        id_dict[i] = []
        img_buffer[i] = img_as_float(rgb2gray(resize(imread(IMAGES_DIR + path_i), (300,300))))
        for j, path_j in paths[paths._id != i].values:
            if j > i and j not in already_ordered:
                if j not in img_buffer.keys():
                    img_buffer[j] = img_as_float(rgb2gray(resize(imread(IMAGES_DIR + path_j), (300,300))))
                if norm(img_buffer[i] - img_buffer[j]) < r2thresh and\
                        norm(fft2(img_buffer[i]) - fft2(img_buffer[j])) < fftr2thresh:
                    id_dict[i].append(j)
                    already_ordered.append(j)
    for key in id_dict.keys():
        if id_dict[key] == []:
            id_dict.pop(key)
    return id_dict
예제 #2
0
파일: mvc.py 프로젝트: hopehhchen/TurbuStat
    def compute_pspec(self):
        '''
        Compute the 2D power spectrum.

        The quantity calculated here is the same as Equation 3 in Lazarian &
        Esquivel (2003), but the inputted arrays are not in the same form as
        described. We can, however, adjust for the use of normalized Centroids
        and the linewidth.

        An unnormalized centroid can be constructed by multiplying the centroid
        array by the moment0. Velocity dispersion is the square of the linewidth
        subtracted by the square of the normalized centroid.
        '''

        term1 = fft2(self.centroid*self.moment0)

        term2 = np.power(self.linewidth, 2) + np.power(self.centroid, 2)

        mvc_fft = term1 - term2 * fft2(self.moment0)

        # Shift to the center
        mvc_fft = fftshift(mvc_fft)

        self.ps2D = np.abs(mvc_fft) ** 2.

        return self
예제 #3
0
def get_spectrum_1d(data_reg,x_reg,y_reg):
    """Compute the 1d power spectrum.
    """
    # remove the mean and squarize
    data_reg-=data_reg.mean()
    jpj,jpi = data_reg.shape
    msize = min(jpj,jpi)
    data_reg = data_reg[:msize-1,:msize-1]
    x_reg = x_reg[:msize-1,:msize-1]
    y_reg = y_reg[:msize-1,:msize-1]
    # wavenumber vector
    x1dreg,y1dreg = x_reg[0,:],y_reg[:,0]
    Ni,Nj = msize-1,msize-1
    dx=npy.int(npy.ceil(x1dreg[1]-x1dreg[0]))
    k_max  = npy.pi / dx
    kx = fft.fftshift(fft.fftfreq(Ni, d=1./(2.*k_max)))
    ky = fft.fftshift(fft.fftfreq(Nj, d=1./(2.*k_max)))
    kkx, kky = npy.meshgrid( ky, kx )
    Kh = npy.sqrt(kkx**2 + kky**2)
    Nmin  = min(Ni,Nj)
    leng  = Nmin/2+1
    kstep = npy.zeros(leng)
    kstep[0] =  k_max / Nmin
    for ind in range(1, leng):
        kstep[ind] = kstep[ind-1] + 2*k_max/Nmin
    norm_factor = 1./( (Nj*Ni)**2 )
    # tukey windowing = tapered cosine window
    cff_tukey = 0.25
    yw=npy.linspace(0, 1, Nj)
    wdw_j = npy.ones(yw.shape)
    xw=npy.linspace(0, 1, Ni)
    wdw_i= npy.ones(xw.shape)
    first_conditioni = xw<cff_tukey/2
    first_conditionj = yw<cff_tukey/2
    wdw_i[first_conditioni] = 0.5 * (1 + npy.cos(2*npy.pi/cff_tukey * (xw[first_conditioni] - cff_tukey/2) ))
    wdw_j[first_conditionj] = 0.5 * (1 + npy.cos(2*npy.pi/cff_tukey * (yw[first_conditionj] - cff_tukey/2) ))
    third_conditioni = xw>=(1 - cff_tukey/2)
    third_conditionj = yw>=(1 - cff_tukey/2)
    wdw_i[third_conditioni] = 0.5 * (1 + npy.cos(2*npy.pi/cff_tukey * (xw[third_conditioni] - 1 + cff_tukey/2)))
    wdw_j[third_conditionj] = 0.5 * (1 + npy.cos(2*npy.pi/cff_tukey * (yw[third_conditionj] - 1 + cff_tukey/2)))
    wdw_ii, wdw_jj = npy.meshgrid(wdw_j, wdw_i, sparse=True)
    wdw = wdw_ii * wdw_jj
    data_reg*=wdw
    #2D spectrum
    cff  = norm_factor
    tempconj=fft.fft2(data_reg).conj()
    tempamp=cff * npy.real(tempconj*fft.fft2(data_reg))
    spec_2d=fft.fftshift(tempamp)
    #1D spectrum
    leng    = len(kstep)
    spec_1d = npy.zeros(leng)
    krange     = Kh <= kstep[0]
    spec_1d[0] = spec_2d[krange].sum()
    for ind in range(1, leng):
        krange = (kstep[ind-1] < Kh) & (Kh <= kstep[ind])
        spec_1d[ind] = spec_2d[krange].sum()
    spec_1d[0] /= kstep[0]
    for ind in range(1, leng):
        spec_1d[ind] /= kstep[ind]-kstep[ind-1]
    return spec_1d, kstep
예제 #4
0
 def SpectralCrossCorrelation(self, cstep = 1):
     
     # Measure the length of the list
     num_regions = len(self.Regions)
     
     # Width and height
     height = self.Regions[0].Height;
     width  = self.Regions[0].Width;
     
     # Allocate the correlation
     spectral_corr = Correlation(np.zeros((height, width), dtype = "complex"))
     
     # Calculate the FT of the first region.
     # Do this outside the loop so that we
     # only have to perform one FFT per iteration.
     ft_01 = fft.fft2(self.Regions[0].Data)
     
     # Correlate all the regions
     for k in range(num_regions - 1):    
         ft_02 = fft.fft2(self.Regions[k].Data)
         
         # Conjugate multiply
         spectral_corr.Data += ft_01 * np.conj(ft_02);
         
         # Shift the second FT into
         # the position of the first FT.
         ft_01 = ft_02
         
     return spectral_corr
예제 #5
0
def Convolve(image1, image2, MinPad=True, pad=True):
    """
    Convolves image1 with image2.

    :param image1: 2D image array
    :param image2: 2D image array
    :param MinPad: whether to use minimal padding
    :param pad: whether to pad the array
    """
    #The size of the images:
    r1, c1 = image1.shape
    r2, c2 = image2.shape

    if MinPad:
        r = r1 + r2
        c = c1 + c2
    else:
        r = 2*max(r1,r2)
        c = 2*max(c1,c2)
    
    #or in power of two
    if pad:
        pr2 = int(m.log(r)/m.log(2.) + 1.)
        pc2 = int(m.log(c)/m.log(2.) + 1.)
        rOrig = r
        cOrig = c
        r = 2**pr2
        c = 2**pc2
    
    fftimage = fft2(image1, s=(r,c))*fft2(image2[::-1,::-1],s=(r,c))

    if pad:
        return (ifft2(fftimage))[:rOrig,:cOrig].real
    else:
        return (ifft2(fftimage)).real
예제 #6
0
def create_matching_kernel(source_psf, target_psf, window=None):
    """
    Create a kernel to match 2D point spread functions (PSF) using the
    ratio of Fourier transforms.

    Parameters
    ----------
    source_psf : 2D `~numpy.ndarray`
        The source PSF.  The source PSF should have higher resolution
        (i.e. narrower) than the target PSF.  ``source_psf`` and
        ``target_psf`` must have the same shape and pixel scale.

    target_psf : 2D `~numpy.ndarray`
        The target PSF.  The target PSF should have lower resolution
        (i.e. broader) than the source PSF.  ``source_psf`` and
        ``target_psf`` must have the same shape and pixel scale.

    window : callable, optional
        The window (or taper) function or callable class instance used
        to remove high frequency noise from the PSF matching kernel.
        Some examples include:

        * `~photutils.psf.matching.HanningWindow`
        * `~photutils.psf.matching.TukeyWindow`
        * `~photutils.psf.matching.CosineBellWindow`
        * `~photutils.psf.matching.SplitCosineBellWindow`
        * `~photutils.psf.matching.TopHatWindow`

        For more information on window functions and example usage, see
        :ref:`psf_matching`.

    Returns
    -------
    kernel : 2D `~numpy.ndarray`
        The matching kernel to go from ``source_psf`` to ``target_psf``.
        The output matching kernel is normalized such that it sums to 1.
    """

    # inputs are copied so that they are not changed when normalizing
    source_psf = np.copy(np.asanyarray(source_psf))
    target_psf = np.copy(np.asanyarray(target_psf))

    if source_psf.shape != target_psf.shape:
        raise ValueError('source_psf and target_psf must have the same shape '
                         '(i.e. registered with the same pixel scale).')

    # ensure input PSFs are normalized
    source_psf /= source_psf.sum()
    target_psf /= target_psf.sum()

    source_otf = fftshift(fft2(source_psf))
    target_otf = fftshift(fft2(target_psf))
    ratio = target_otf / source_otf

    # apply a window function in frequency space
    if window is not None:
        ratio *= window(target_psf.shape)

    kernel = np.real(fftshift((ifft2(ifftshift(ratio)))))
    return kernel / kernel.sum()
예제 #7
0
파일: image.py 프로젝트: Germanc/supreme
def phase_corr(A, B):
    """Phase correlation of two images.

    Parameters
    ----------
    A, B : (M,N) ndarray
        Input images.

    Returns
    -------
    out : (M,N) ndarray
        Correlation coefficients.

    Examples
    --------

    Set up test data.  One array is offset (10, 10) from the other.

    >>> x = np.random.random((50, 50))
    >>> y = np.zeros_like(x)
    >>> y[10:, 10:] = x[0:-10, 0:-10]

    Correlate the two arrays, and ensure the peak is at (10, 10).

    >>> out = phase_corr(y, x)
    >>> m, n = np.unravel_index(np.argmax(out), out.shape)
    >>> print m, n
    (10, 10)

    """
    out = fft2(A) * fft2(B).conj()
    out /= np.abs(out)
    out = np.abs(ifft2(out))

    return out
def cross_corr(img1,img2,mask=None):
    '''Compute the autocorrelation of two images.
        Right now does not take mask into account.
        todo: take mask into account (requires extra calculations)
        input: 
            img1: first image
            img2: second image
            mask: a mask array
        output:
            the autocorrelation of the two images (same shape as the correlated images)
        
    '''
    #if(mask is not None):
    #   img1 *= mask
    #  img2 *= mask
    
    #img1_mean = np.mean( img1.flat )
    #img2_mean = np.mean( img2.flat )
    
    # imgc = fftshift( ifft2(     
    #        fft2(img1/img1_mean -1.0 )*np.conj(fft2( img2/img2_mean -1.0 ))).real )
    
    #imgc = fftshift( ifft2(     
    #        fft2(  img1/img1_mean  )*np.conj(fft2(  img2/img2_mean   ))).real )
    
    imgc = fftshift( ifft2(     
            fft2(  img1  )*np.conj(fft2(  img2  ))).real )
    
    #imgc /= (img1.shape[0]*img1.shape[1])**2
    if(mask is not None):
        maskc = cross_corr(mask,mask)        
        imgc /= np.maximum( 1, maskc )
            
            
    return imgc
예제 #9
0
def image_compare(df, IMAGES_DIR='/home/ryan/asi_images/'):
    '''
    takes a list of n image ids and returns sum(n..n-1) n comparisons of r2 difference, r2(fft) difference, and average number of thresholded pixels
    '''
    img_buffer = {}
    return_list = []
    artdf = df[['_id', 'images']].copy()
    artdf.images = artdf.images.apply(getpath) 
    paths = artdf[['_id','images']].dropna()
    paths.index = paths._id
    paths = paths.images
    if paths.shape[0] < 2:
        return DataFrame([])
    for id_pair in combinations(paths.index, 2):
        if id_pair[0] in img_buffer:
            img1 = img_buffer[id_pair[0]]
        else:
            img_buffer[id_pair[0]] = img_as_float(rgb2gray(resize(imread(IMAGES_DIR + paths[id_pair[0]]), (300,300))))
            img1 = img_buffer[id_pair[0]]
        
        if id_pair[1] in img_buffer:
            img2 = img_buffer[id_pair[1]]
        else:
            img_buffer[id_pair[1]] = img_as_float(rgb2gray(resize(imread(IMAGES_DIR + paths[id_pair[1]]), (300,300))))
            img2 = img_buffer[id_pair[1]]
        return_list.append(
                [id_pair[0], id_pair[1], \
                    norm(img1 - img2), \
                    norm(fft2(img1) - fft2(img2)), \
                    #mean([sum(img1 > threshold_otsu(img1)), sum(img2 > threshold_otsu(img2))])]
                    #mean([sum(img1 > 0.9), sum(img2 > 0.9)])] 
                    std(img1)+std(img2)/2.]
       )
    return DataFrame(return_list, columns=['id1','id2','r2diff', 'fftdiff', 'stdavg'])
def increment_mccf(A, B, X, y, nu=0.125, l=0.01, boundary='constant'):
    r"""
    Incremental Multi-Channel Correlation Filter (MCCF)
    """
    # number of images; number of channels, height and width
    n, k, hx, wx = X.shape
    x_shape = (hx, wx)

    # height and width of desired responses
    _, hy, wy = y.shape
    y_shape = (hy, wy)

    # extended shape
    ext_h = hx + hy - 1
    ext_w = wx + wy - 1
    ext_shape = (ext_h, ext_w)
    # extended dimensionality
    ext_d = ext_h * ext_w

    # extend desired response
    ext_y = pad(y, ext_shape)
    # fft of extended desired response
    fft_ext_y = fft2(ext_y)

    # auto and cross spectral energy matrices
    sXX = 0
    sXY = 0
    # for each training image and desired response
    for x in X:
        # extend image
        ext_x = pad(x, ext_shape, boundary=boundary)
        # fft of extended image
        fft_ext_x = fft2(ext_x)

        # store extended image fft as sparse diagonal matrix
        diag_fft_x = spdiags(fft_ext_x.reshape((k, -1)),
                             -np.arange(0, k) * ext_d, ext_d * k, ext_d).T
        # vectorize extended desired response fft
        diag_fft_y = fft_ext_y.ravel()

        # update auto and cross spectral energy matrices
        sXX += diag_fft_x.conj().T.dot(diag_fft_x)
        sXY += diag_fft_x.conj().T.dot(diag_fft_y)

    # combine old and new auto and cross spectral energy matrices
    sXY = (1 - nu) * A + nu * sXY
    sXX = (1 - nu) * B + nu * sXX
    # solve ext_d independent k x k linear systems (with regularization)
    # to obtain desired extended multi-channel correlation filter
    fft_ext_f = spsolve(sXX + l * speye(sXX.shape[-1]), sXY)
    # reshape extended filter to extended image shape
    fft_ext_f = fft_ext_f.reshape((k, ext_h, ext_w))

    # compute filter inverse fft
    ext_f = np.real(ifftshift(ifft2(fft_ext_f), axes=(-2, -1)))
    # crop extended filter to match desired response shape
    f = crop(ext_f, y_shape)

    return f, sXY, sXX
예제 #11
0
파일: poisson.py 프로젝트: qsnake/gpaw
    def initialize(self, b_phi1, b_phi2):
        distribution = np.zeros([self.gd.comm.size], int)
        if self.gd.comm.rank == 0:
            d3 = b_phi1.shape[2]
            gd = self.gd
            N_c1 = gd.N_c[:2, np.newaxis]
            i_cq = np.indices(gd.N_c[:2]).reshape((2, -1))
            i_cq += N_c1 // 2
            i_cq %= N_c1
            i_cq -= N_c1 // 2
            B_vc = 2.0 * np.pi * gd.icell_cv.T[:2, :2]
            k_vq = np.dot(B_vc, i_cq)
            k_vq *= k_vq
            k_vq2 = np.sum(k_vq, axis=0)
            k_vq2 = k_vq2.reshape(-1)

            b_phi1 = fft2(b_phi1, None, (0,1))
            b_phi2 = fft2(b_phi2, None, (0,1))

            b_phi1 = b_phi1[:, :, -1].reshape(-1)
            b_phi2 = b_phi2[:, :, 0].reshape(-1)

            loc_b_phi1 = np.array_split(b_phi1, self.gd.comm.size)
            loc_b_phi2 = np.array_split(b_phi2, self.gd.comm.size)
            loc_k_vq2 = np.array_split(k_vq2, self.gd.comm.size)

            self.loc_b_phi1 = loc_b_phi1[0]
            self.loc_b_phi2 = loc_b_phi2[0]
            self.k_vq2 = loc_k_vq2[0]

            for i in range(self.gd.comm.size):
                distribution[i] = len(loc_b_phi1[i])
            self.gd.comm.broadcast(distribution, 0)

            for i in range(1, self.gd.comm.size):
                self.gd.comm.ssend(loc_b_phi1[i], i, 135)
                self.gd.comm.ssend(loc_b_phi2[i], i, 246)
                self.gd.comm.ssend(loc_k_vq2[i], i, 169)
        else:
            self.gd.comm.broadcast(distribution, 0)
            self.loc_b_phi1 = np.zeros([distribution[self.gd.comm.rank]],
                                       dtype=complex)
            self.loc_b_phi2 = np.zeros([distribution[self.gd.comm.rank]],
                                       dtype=complex)
            self.k_vq2 = np.zeros([distribution[self.gd.comm.rank]])
            self.gd.comm.receive(self.loc_b_phi1, 0, 135)
            self.gd.comm.receive(self.loc_b_phi2, 0, 246)
            self.gd.comm.receive(self.k_vq2, 0, 169)


        k_distribution = np.arange(np.sum(distribution))
        self.k_distribution = np.array_split(k_distribution,
                                             self.gd.comm.size)

        self.d1, self.d2, self.d3 = self.gd.N_c
        self.r_distribution = np.array_split(np.arange(self.d3),
                                             self.gd.comm.size)
        self.comm_reshape = not (self.gd.parsize_c[0] == 1
                                 and self.gd.parsize_c[1] == 1)
예제 #12
0
    def test_kosta_comp_abs(self):
        ft_image = fft2(self.image)
        ft_mask = fft2(self.inert_mask_padded_kosta)

        ft_result = ft_image * ft_mask
        result = ifft2(ft_result)

        assert_array_almost_equal(abs(result), self.image)
예제 #13
0
def FFT_coregistration(ref_band_mat,target_band_mat):

    '''
    Alternative method used to coregister the images based on the FFT

    :param ref_band_mat: numpy 8 bit array containing reference image
    :param target_band_mat: numpy 8 bit array containing target image
    :returns: the shift among the two input images 

    Author: Mostapha Harb - Daniele De Vecchi - Daniel Aurelio Galeazzo
    Last modified: 14/11/2014
    '''
    status = Bar(3, "FFT")
    #Normalization - http://en.wikipedia.org/wiki/Cross-correlation#Normalized_cross-correlation 
    ref_band_mat = (ref_band_mat - ref_band_mat.mean()) / ref_band_mat.std()
    target_band_mat = (target_band_mat - target_band_mat.mean()) / target_band_mat.std() 

    #Check dimensions - they have to match
    rows_ref,cols_ref =  ref_band_mat.shape
    rows_target,cols_target = target_band_mat.shape

    if rows_target < rows_ref:
        print 'Rows - correction needed'

        diff = rows_ref - rows_target
        target_band_mat = np.vstack((target_band_mat,np.zeros((diff,cols_target))))
    elif rows_ref < rows_target:
        print 'Rows - correction needed'
        diff = rows_target - rows_ref
        ref_band_mat = np.vstack((ref_band_mat,np.zeros((diff,cols_ref))))
    status(1)
    rows_target,cols_target = target_band_mat.shape
    rows_ref,cols_ref = ref_band_mat.shape

    if cols_target < cols_ref:
        print 'Columns - correction needed'
        diff = cols_ref - cols_target
        target_band_mat = np.hstack((target_band_mat,np.zeros((rows_target,diff))))
    elif cols_ref < cols_target:
        print 'Columns - correction needed'
        diff = cols_target - cols_ref
        ref_band_mat = np.hstack((ref_band_mat,np.zeros((rows_ref,diff))))

    rows_target,cols_target = target_band_mat.shape   
    status(2)
    #translation(im_target,im_ref)
    freq_target = fft2(target_band_mat)   
    freq_ref = fft2(ref_band_mat)  
    inverse = abs(ifft2((freq_target * freq_ref.conjugate()) / (abs(freq_target) * abs(freq_ref))))   
    #Converts a flat index or array of flat indices into a tuple of coordinate arrays. would give the pixel of the max inverse value
    y_shift,x_shift = np.unravel_index(np.argmax(inverse),(rows_target,cols_target))

    if y_shift > rows_target // 2: # // used to truncate the division
        y_shift -= rows_target
    if x_shift > cols_target // 2: # // used to truncate the division
        x_shift -= cols_target
    status(3)
    return -x_shift, -y_shift
예제 #14
0
def InitVelField(_N, _M, _h, h, dt, rho=1.0, mu=1.0, DeltaType=0):
    WideLambda = zeros((_N, _M), float64)
    ShortLambda = zeros((_N, _M), float64)
    IB_c.InitWideLaplacian(_N, _M, _h, WideLambda)
    IB_c.InitShortLaplacian(_N, _M, _h, ShortLambda)
    DxSymbol = InitDxSymbol(_N, _M, _h)
    DySymbol = InitDySymbol(_N, _M, _h)

    r = int(ceil(3.0 * h / _h))

    fx = zeros((_N, _M), float64)
    for j in range(-r, r + 1):
        deltx = Delta(h, j * _h, DeltaType)
        for k in range(-r, r + 1):
            delt = deltx * Delta(h, k * _h, DeltaType) * 1.0
            fx[j % _N][k % _M] = fx[j % _N][k % _M] + delt
    #       print j%_N, k%_M, fx[j%_N][k%_M]

    fx, fy = fft2(dt * fx), zeros((_N, _M), float64)

    P = Solve_P_Hat(dt, WideLambda, DxSymbol, DySymbol, fx, fy)
    P[0, 0] = 0.0

    u, v = Solve_uv_Hat(dt, ShortLambda, DxSymbol, DySymbol, P, fx, fy, rho, mu)
    u = 1.0 * ifft2(u).real
    v = 1.0 * ifft2(v).real
    #    P = ifft2(P).real

    Fx1 = array(zeros((_N, _M), float64))
    Fy1 = array(zeros((_N, _M), float64))

    IB_c.WholeGridSpread(u, float(h), float(_h), int(r), Fx1, DeltaType)
    IB_c.WholeGridSpread(v, float(h), float(_h), int(r), Fy1, DeltaType)

    fy = zeros((_N, _M), float64)
    for j in range(-r, r + 1):
        deltx = Delta(h, j * _h, DeltaType)
        for k in range(-r, r + 1):
            delt = deltx * Delta(h, k * _h, DeltaType) * 1.0
            fy[j % _N][k % _M] = fy[j % _N][k % _M] + delt
    #       print j%_N, k%_M, fx[j%_N][k%_M]

    fx, fy = zeros((_N, _M), float64), fft2(dt * fy)

    P = Solve_P_Hat(dt, WideLambda, DxSymbol, DySymbol, fx, fy)
    P[0, 0] = 0.0

    u, v = Solve_uv_Hat(dt, ShortLambda, DxSymbol, DySymbol, P, fx, fy, rho, mu)
    u = 1.0 * ifft2(u).real
    v = 1.0 * ifft2(v).real

    Fx2 = array(zeros((_N, _M), float64))
    Fy2 = array(zeros((_N, _M), float64))

    IB_c.WholeGridSpread(u, float(h), float(_h), int(r), Fx2, DeltaType)
    IB_c.WholeGridSpread(v, float(h), float(_h), int(r), Fy2, DeltaType)

    return Fx1, Fy1, Fx2, Fy2
예제 #15
0
    def test_our_comp_shifted_abs(self):
        # Now, our computation
        ft_image = fft2(self.image)
        ft_mask = fft2(self.inert_mask, self.image.shape)
        ft_res_ft = fftshift(ft_image) * fftshift(ft_mask)

        result = ifft2(ifftshift(ft_res_ft))

        assert_array_equal(abs(result), self.image)
예제 #16
0
    def test_our_comp_real(self):
        # Now, our computation
        ft_image = fft2(self.image)
        ft_mask = fft2(self.inert_mask, self.image.shape)
        ft_res_ft = ft_image * ft_mask

        result = ifft2(ft_res_ft)

        assert_array_equal(result.real, self.image)
예제 #17
0
def cross_correlation_pcm(img1, img2, rangeX=None, rangeY=None, blur=3, down=0):
    """
    Find the cartesian shift vector between two images

    Parameters
    ----------

    img1 : ndarray
        Input array.
    img2 : ndarray
        Input array.

    rangeX, rangeY : integer, optional
        Mininum and maximum to search for minima.
    blur : integer
        Blur Filter on the Phase Map

    Returns
    -------
    ndarray
        shift array.
    """
    new_size = shift_bit_length(max(map(max, img2.shape, img1.shape)))
    new_shape = [new_size, new_size]
    ##
    if rangeX is None:
        rangeX = [0, new_size]
    if rangeY is None:
        rangeY = [0, new_size]
    ##
    norm_max = np.max([img1.max(), img2.max()])
    src_image = np.array(img1, dtype=np.complex128, copy=False) / norm_max
    target_image = np.array(img2, dtype=np.complex128, copy=False) / norm_max
    f0 = fft2(src_image)
    f1 = fft2(target_image)
    cross_correlation = abs(ifft2((f0 * f1.conjugate()) / (abs(f0) * abs(f1))))
    cross_correlation = ndimage.gaussian_filter(cross_correlation, sigma=blur)
    shape = cross_correlation.shape
    mask = np.zeros(cross_correlation.shape)
    if rangeY[0] > 0 and rangeX[0] > 0:
        mask[rangeY[0]:rangeY[1], rangeX[0]:rangeX[1]] = 1
    elif rangeY[0] < 0:
        mask[shape[0] + rangeY[0]:, rangeX[0]:rangeX[1]] = 1
        mask[:rangeY[1], rangeX[0]:rangeX[1]] = 1
    elif rangeX[0] < 0:
        mask[rangeY[0]:rangeY[1], shape[1] + rangeX[0]:] = 1
        mask[rangeY[0]:rangeY[1], :rangeX[1]] = 1
    cross_correlation = cross_correlation * mask
    # Locate maximum
    shifts = np.unravel_index(np.argmax(np.abs(cross_correlation)), cross_correlation.shape)
    if not down:
        if shifts[1] < 0:
            shifts[1] += float(shape[1])
    else:
        if shifts[0] < 0:
            shifts[0] += float(shape[0])
    return shifts
예제 #18
0
    def test_kost_comp_abs(self):
        ft_image = fft2(self.image)
        ft_mask_padded = fft2(self.inert_mask_padded)

        ft_result = fftshift(ft_image) * fftshift(ft_mask_padded)

        result = ifftshift(ifft2(ft_result))

        assert_array_almost_equal(abs(result), self.image)
예제 #19
0
    def test_kost_comp_real(self):
        ft_image = fft2(self.image)
        ft_mask_padded = fft2(self.inert_mask_padded)

        ft_result = ft_image * ft_mask_padded

        result = ifft2(ft_result)

        assert_array_equal(result.real, self.image)
예제 #20
0
파일: hl.py 프로젝트: EPFL-LQM/gpvmc
def get_stat_spin_struct(filenames,nsamp):
    """
    Gets the static structure factor flatened.
    The q-vectors are given by:
    q=arange(L)
    qx,qy=meshgrid(q,q)
    qx=qx.flatten()
    qy=qy.flatten()
    """
    if type(filenames)!=list:
        filenames=[filenames]
    Sq=load.get_quantity(filenames,nsamp)
    params=load.get_attr(filenames[0])
    Lx=int(params['L'])
    Ly=int(params['L'])
    hLx=int(Lx/2)
    hLy=int(Ly/2)
    N=Lx*Ly
    if Sq.shape[2]==N:
        # old file format, struct stored in Fourier components
        Sqxx=np.reshape(0.25*(Sq[:,1,:]+Sq[:,2,:]+Sq[:,3,:]+Sq[:,4,:]),(Sq.shape[0],Lx,Ly))
        Sqyy=np.reshape(0.25*(Sq[:,1,:]+Sq[:,2,:]-Sq[:,3,:]-Sq[:,4,:]),(Sq.shape[0],Lx,Ly))
        Sqzz=np.reshape(Sq[:,0,:],(Sq.shape[0],Lx.Ly))
        Srxx=fft.fftshift(fft.fft2(Sqxx,axes=(1,2)),axes=(1,2))/N
        Sryy=fft.fftshift(fft.fft2(Sqyy,axes=(1,2)),axes=(1,2))/N
        Srzz=fft.fftshift(fft.fft2(Sqzz,axes=(1,2)),axes=(1,2))/N
    else :
        # new file format, struct stored as real space site pairs.
        rx,ry=np.meshgrid(np.arange(Lx,dtype=int),np.arange(Ly,dtype=int))
        rx=rx.ravel()
        ry=ry.ravel()
        rix,rjx=np.meshgrid(rx,rx)
        riy,rjy=np.meshgrid(ry,ry)
        rijx=rjx-rix
        rijy=rjy-riy
        rijx[rijx>=hLx]-=Lx
        rijx[rijx<-hLx]+=Lx
        rijy[rijy>=hLy]-=Ly
        rijy[rijy<-hLy]+=Ly
        rijx=rijx.ravel()
        rijy=rijy.ravel()
        Sr=np.zeros((Sq.shape[0],5,N),dtype=complex)
        for samp in range(Sq.shape[0]):
            for t in range(N):
                Sr[samp,:,t]=np.sum(Sq[samp,:,np.where((rijy+hLy)*Lx+rijx+hLx==t)[0]],axis=0)/N
        Srxx=np.zeros((Sq.shape[0],Lx,Ly),dtype=complex)
        Sryy=np.zeros((Sq.shape[0],Lx,Ly),dtype=complex)
        Srzz=np.zeros((Sq.shape[0],Lx,Ly),dtype=complex)
        for samp in range(Sq.shape[0]):
            Srxx[samp,:,:]=np.reshape(0.25*np.sum(Sr[samp,1:,:],axis=0),(Lx,Ly))
            Sryy[samp,:,:]=np.reshape(0.25*(np.sum(Sr[samp,1:3,:],axis=0)-np.sum(Sr[samp,3:,:],axis=0)),(Lx,Ly))
            Srzz[samp,:,:]=np.reshape(Sr[samp,0,:],(Lx,Ly))
        Sqxx=fft.ifft2(fft.fftshift(Srxx,axes=(1,2)),axes=(1,2))*N
        Sqyy=fft.ifft2(fft.fftshift(Sryy,axes=(1,2)),axes=(1,2))*N
        Sqzz=fft.ifft2(fft.fftshift(Srzz,axes=(1,2)),axes=(1,2))*N
    return (Sqxx,Sqyy,Sqzz),(Srxx,Sryy,Srzz)
예제 #21
0
def fftconvolve(x, kernel):
    """convolve 2-d array x with a kernel *centered* in array."""
    
    ny, nx = kernel.shape
    xctr, yctr = (nx-1)/2., (ny-1)/2.
    
    # Phasor that will shift kernel to be centered at (0., 0.)
    fshift = cubefit.fft_shift_phasor_2d(kernel.shape, (-xctr, -yctr))
    
    return ifft2(fft2(kernel) * fft2(x) * fshift).real
예제 #22
0
def myconv2(A, B, zeropadding = False):
    # TO DO: zero padding to get rid of aliasing!
    if zeropadding:
        origdim = A.shape
        nextpow = pow(2, np.ceil(np.log(np.max(origdim))/np.log(2))+1)
        A = zeropad(A, nextpow.astype(int), nextpow.astype(int))
        B = zeropad(B, nextpow.astype(int), nextpow.astype(int))
    output = fftshift(ifft2( np.multiply(fft2(fftshift(A)), fft2(fftshift(B)) )))
    if zeropadding:
        output = output[nextpow/2 - origdim[0]/2: nextpow/2 + origdim[0]/2,nextpow/2 - origdim[1]/2: nextpow/2 + origdim[1]/2]
    return output
예제 #23
0
파일: tasks.py 프로젝트: mathiasose/TDT4195
def task3_1():
    print('3.1')
    c_img = normalize_intensity(imread(CAMERAMAN))
    fft_res = fft2(a=c_img)
    output_path = os.path.join(OUTPUT_DIR, "3_1_fft_spectrum_" + os.path.split(CAMERAMAN)[-1])
    imsave(output_path, log(1 + abs(fftshift(fft_res))))

    b_img = normalize_intensity(imread(BRICKS))
    fft_res = fft2(a=b_img)
    output_path = os.path.join(OUTPUT_DIR, "3_1_fft_spectrum_" + os.path.split(BRICKS)[-1])
    imsave(output_path, log(1 + abs(fftshift(fft_res))))
def cross_correlate(a2,b2,NfftH,NfftV=0):
    """ 
        2D cross correlation, mean removed (normalized) 
    """
    if NfftV == 0:
        NfftV = NfftH
    
    a2 -= a2.ravel().mean()
    b2 -= b2.ravel().mean()
    # c = signal.signaltools.correlate2d(a2,b2)
    return fftshift(ifft2(fft2(a2,s=(NfftH,NfftV))*conj(fft2(b2,s=(NfftH,NfftV)))).real,axes=(0,1))
예제 #25
0
def fft_convolve2d(x,y):
    """
    2D convolution, using FFT
    """
    fr = fft2(x)
    fr2 = fft2(np.flipud(np.fliplr(y)))
    m,n = fr.shape
    cc = np.real(ifft2(fr*fr2))
    cc = np.roll(cc, - int(m / 2) + 1, axis=0)
    cc = np.roll(cc, - int(n / 2) + 1, axis=1)
    return cc
예제 #26
0
def cube_convolve(imcube, sigma, inplace=0):
    "performs a convolution with a gaussian beam of width sigma on each yz plane of the cube"
    if not(inplace) : imcube=imcube.copy()
    shape=imcube.shape[1:]
    if len(shape)!=2:
        raise ValueError ("cube is not a cube")
    gauss_mask=garray(shape,sigma)
    s=[next_pow2(y*2+1) for y in gauss_mask.shape]
    ftg=fft2(gauss_mask, s).reshape(s)
    for i in xrange(imcube.shape[0]):
        imcube[i,...]=np.real(ifft2(fft2(imcube[i,...],s)*ftg)[shape[0]/2:3*shape[0]/2, shape[1]/2:3*shape[1]/2])
    return imcube
예제 #27
0
파일: imgreg.py 프로젝트: Satsuoni/imgreg
def translation(im0, im1):
    """Return translation vector to register images."""
    shape = im0.shape
    f0 = fft2(im0)
    f1 = fft2(im1)
    ir = abs(ifft2((f0 * f1.conjugate()) / (abs(f0) * abs(f1))))
    t0, t1 = np.unravel_index(np.argmax(ir), shape)
    if t0 > shape[0] // 2:
        t0 -= shape[0]
    if t1 > shape[1] // 2:
        t1 -= shape[1]
    return [t0, t1]
예제 #28
0
    def test_kosta_comp_shifted_wrong_abs(self):
        ft_image = fft2(self.image)
        ft_mask = fft2(self.inert_mask_padded_kosta)

        sh_ft_image = fftshift(ft_image)
        sh_ft_mask = fftshift(ft_mask)

        sh_ft_res = sh_ft_image * sh_ft_mask

        result = ifftshift(ifft2(sh_ft_res))

        assert_array_almost_equal(abs(result), self.image)
예제 #29
0
def fft_convolve2d(x,y):
    """
    2D convolution, using FFT
    borrowed from:
      https://github.com/thearn/game-of-life/blob/master/lib/lib.py#L4
    """
    fr = fft2(x)
    fr2 = fft2(np.flipud(np.fliplr(y)))
    m,n = fr.shape
    cc = np.real(ifft2(fr*fr2))
    cc = np.roll(cc, - int(m / 2) + 1, axis=0)
    cc = np.roll(cc, - int(n / 2) + 1, axis=1)
    return cc
예제 #30
0
파일: filters.py 프로젝트: jsalva/psychopy
def conv2d(smaller, larger):
    """convolve a pair of 2d numpy matrices
    Uses fourier transform method, so faster if larger matrix
    has dimensions of size 2**n

    Actually right now the matrices must be the same size (will sort out
    padding issues another day!)
    """
    smallerFFT = fft2(smaller)
    largerFFT = fft2(larger)

    invFFT = ifft2(smallerFFT*largerFFT)
    return invFFT.real
예제 #31
0
def band_filtering(f, filter, D0, width, order):
    nr, nc = f.shape[:2]

    fp = np.zeros([nr, nc])  # 前處理
    for x in range(nr):
        for y in range(nc):
            fp[x, y] = pow(-1, x + y) * f[x, y]

    F = fft2(fp)  # 離散傅立葉轉換
    G = F.copy()

    if filter == 1:  # 理想帶阻濾波器
        for u in range(nr):
            for v in range(nc):
                dist = np.sqrt((u - nr / 2) * (u - nr / 2) + (v - nc / 2) *
                               (v - nc / 2))
                if dist >= D0 - width / 2 and dist <= D0 + width / 2:
                    G[u, v] = 0

    elif filter == 2:  # 理想帶通濾波器
        for u in range(nr):
            for v in range(nc):
                dist = np.sqrt((u - nr / 2) * (u - nr / 2) + (v - nc / 2) *
                               (v - nc / 2))
                if dist < D0 - width / 2 or dist > D0 + width / 2:
                    G[u, v] = 0

    elif filter == 3:  # 高斯帶阻濾波器
        for u in range(nr):
            for v in range(nc):
                dist = np.sqrt((u - nr / 2) * (u - nr / 2) + (v - nc / 2) *
                               (v - nc / 2))
                if dist != 0 and width != 0:
                    H = 1.0 - np.exp(-pow((dist * dist - D0 * D0) /
                                          (dist * width), 2))
                    G[u, v] *= H

    elif filter == 4:  # 高斯帶通濾波器
        for u in range(nr):
            for v in range(nc):
                dist = np.sqrt((u - nr / 2) * (u - nr / 2) + (v - nc / 2) *
                               (v - nc / 2))
                if dist != 0 and width != 0:
                    H = np.exp(-pow((dist * dist - D0 * D0) /
                                    (dist * width), 2))
                    G[u, v] *= H

    elif filter == 5:  # 巴特沃斯帶阻濾波器
        for u in range(nr):
            for v in range(nc):
                dist = np.sqrt((u - nr / 2) * (u - nr / 2) + (v - nc / 2) *
                               (v - nc / 2))
                if dist != D0:
                    H = 1.0 / (1.0 + pow(
                        (dist * width) / (dist * dist - D0 * D0), 2 * order))
                    G[u, v] *= H
                else:
                    G[u, v] = 0

    elif filter == 6:  # 巴特沃斯帶通濾波器
        for u in range(nr):
            for v in range(nc):
                dist = np.sqrt((u - nr / 2) * (u - nr / 2) + (v - nc / 2) *
                               (v - nc / 2))
                if dist != D0:
                    H = 1.0 - 1.0 / (1.0 + pow(
                        (dist * width) / (dist * dist - D0 * D0), 2 * order))
                    G[u, v] *= H

    gp = ifft2(G)  # 反離散傅立葉轉換

    gp2 = np.zeros([nr, nc])  # 後處理
    for x in range(nr):
        for y in range(nc):
            gp2[x, y] = round(pow(-1, x + y) * np.real(gp[x, y]), 0)
    g = np.uint8(np.clip(gp2, 0, 255))

    return g
예제 #32
0
# -*- coding: utf-8 -*-
import numpy as np
from numpy import fft
import pyopencv as cv
import matplotlib.pyplot as plt

N = 256
img = cv.imread("lena_full.jpg")
img2 = cv.Mat()
cv.cvtColor(img, img2, cv.CV_BGR2GRAY)
img = cv.Mat()
cv.resize(img2, img, cv.Size(N, N))

fimg = fft.fft2(img[:])
mag_img = np.log10(np.abs(fimg))
shift_mag_img = fft.fftshift(mag_img)

rects = [(80, 125, 85, 130), (90, 90, 95, 95), (150, 10, 250, 250),
         (110, 110, 146, 146)]

filtered_results = []
for i, (x0, y0, x1, y1) in enumerate(rects):
    mask = np.zeros((N, N), dtype=np.bool)
    mask[x0:x1 + 1, y0:y1 + 1] = True
    mask[N - x1:N - x0 + 1, N - y1:N - y0 + 1] = True
    mask = fft.fftshift(mask)
    fimg2 = fimg * mask
    filtered_img = fft.ifft2(fimg2).real
    filtered_results.append(filtered_img)

### 绘图部分 ###