예제 #1
1
def get_filtered_CSF_img(img_in):
    img_dft = cv2.dft(np.float32(img_in), flags=cv2.DFT_COMPLEX_OUTPUT)
    dft_shift = np.fft.fftshift(img_dft)
    height = img_dft.shape[0]
    weight = img_dft.shape[1]
    M = weight / 2
    N = height / 2
    H_matrix = np.zeros((height, weight))

    for h_idx in range(height):
        for w_idx in range(weight):
            m = -M + w_idx + 0.5
            n = -N + h_idx + 0.5
            freq, theta = get_freq_dirc(m, n, weight, height)
            multiVal = freq_trans_func(freq, theta)
            H_matrix[h_idx][w_idx] = multiVal

    img_magi = cv2.magnitude(img_dft[:, :, 0], img_dft[:, :, 1])
    img_magi *= H_matrix
    img_phase = cv2.phase(img_dft[:, :, 0], img_dft[:, :, 1])

    img_re = img_magi * np.cos(img_phase)
    img_im = img_magi * (np.sin(img_phase))

    img_dft2 = np.dstack((img_re, img_im))

    imgback = cv2.idft(img_dft2)
    imgback = cv2.magnitude(imgback[:, :, 0], imgback[:, :, 1])

    return imgback
예제 #2
0
def doDFT():
	# read as gray image.
	img = cv2.imread('1.jpg', 0)
	# cv2.imwrite('gray.jpg', img)

	# t1 = dct(dct(img.T, norm='ortho').T, norm='ortho')
	dft = cv2.dft(np.float32(img),flags = cv2.DFT_COMPLEX_OUTPUT)
	dft_shift = np.fft.fftshift(dft)

	magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))


	rows, cols = img.shape
	crow,ccol = rows/2 , cols/2

	# create a mask first, center square is 1, remaining all zeros
	mask = np.zeros((rows,cols,2),np.uint8)
	mask[crow-5:crow+5, ccol-5:ccol+5] = 1

	# apply mask and inverse DFT
	fshift = dft_shift*mask
	print fshift[:, :, 0]
	print fshift[:, :, 0].shape
	f_ishift = np.fft.ifftshift(fshift)
	img_back = cv2.idft(f_ishift)
	img_back = cv2.magnitude(img_back[:,:,0],img_back[:,:,1])

	plt.subplot(121),plt.imshow(img, cmap = 'gray')
	plt.title('Input Image'), plt.xticks([]), plt.yticks([])
	plt.subplot(122),plt.imshow(img_back, cmap = 'gray')
	plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
	plt.show()
	return
예제 #3
0
def cv2_convolution(image, b):
    dft_m = cv2.getOptimalDFTSize(image.shape[0] + b.shape[0] - 1)
    dft_n = cv2.getOptimalDFTSize(image.shape[1] + b.shape[1] - 1)
    d = b.shape[0]
    c = np.zeros((image.shape[0] + d - 1, image.shape[1] + d - 1), dtype='uint8')
    # getting gaussian dft
    dft_b = np.zeros((dft_m, dft_n), dtype='float64')
    dft_b[:b.shape[0], :b.shape[1]] = b
    dft_b = cv2.dft(dft_b, flags=cv2.DFT_REAL_OUTPUT)
    # getting layers dft
    dfts = []
    new_channels = []
    channels = cv2.split(image)
    for i, channel in enumerate(channels):
        #cv2.imshow('channel %d'%i, channel)
        a = np.array(channel, dtype='float64')
        dft_a = np.zeros((dft_m, dft_n), dtype='float64')
        dft_a[:a.shape[0], :a.shape[1]] = a
        dft_a = cv2.dft(dft_a, flags=cv2.DFT_REAL_OUTPUT)
        dft_a = cv2.mulSpectrums(dft_a, dft_b, 0)
        dft_a = cv2.idft(dft_a, flags= cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)
        tmp = dft_a[d/2:a.shape[0] + d/2, d/2:a.shape[1] + d/2]
        channel = np.array(tmp, dtype='uint8')
        #cv2.imshow('new channel %d'%i, channel)
        new_channels.append(channel)
    result = cv2.merge(new_channels)
    return result
예제 #4
0
def main():
    parser = argparse.ArgumentParser(
        description='Enhance fingerprint image with diferent parameters.')
    parser.add_argument(
        "image_path", help="Specify image path location")

    args = parser.parse_args()

    pre_processor = preprocessing.PreProcessFingerImage(args.image_path)
    pre_processor.process_image()
    image_pre = pre_processor.get_preprocessed_image()

    #gaussian_3 = cv2.GaussianBlur(image, (9,9), 10.0)
    #unsharp_image = cv2.addWeighted(image, 1.5, gaussian_3, -0.5, 0, image)

    dft = cv2.dft(np.float32(image_pre), flags=cv2.DFT_COMPLEX_OUTPUT)
    dft_shift = np.fft.fftshift(dft)
    dft_filtered = preprocessing.frequency_filters.blpf(dft_shift, 70, 20)
    f_ishift = np.fft.ifftshift(dft_filtered)
    image_pre = cv2.idft(
        f_ishift, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)

    for k in np.linspace(0, 2, 5):
        for window in [20, 25, 35, 40, image_pre.shape[0]]:
            image_fft = enhance_image(image_pre, window, 2, k)
            cv2.imshow('FF Enhanced Image with k=' + str(k) +
                       ' window=' + str(window), image_fft)
            cv2.waitKey()
            cv2.destroyAllWindows()
예제 #5
0
    def test_dft(self):

        img = self.get_sample('samples/data/rubberwhale1.png', 0)
        eps = 0.001

        #test direct transform
        refDft = np.fft.fft2(img)
        refDftShift = np.fft.fftshift(refDft)
        refMagnitide = np.log(1.0 + np.abs(refDftShift))

        testDft = cv2.dft(np.float32(img),flags = cv2.DFT_COMPLEX_OUTPUT)
        testDftShift = np.fft.fftshift(testDft)
        testMagnitude = np.log(1.0 + cv2.magnitude(testDftShift[:,:,0], testDftShift[:,:,1]))

        refMagnitide = cv2.normalize(refMagnitide, 0.0, 1.0, cv2.NORM_MINMAX)
        testMagnitude = cv2.normalize(testMagnitude, 0.0, 1.0, cv2.NORM_MINMAX)

        self.assertLess(cv2.norm(refMagnitide - testMagnitude), eps)

        #test inverse transform
        img_back = np.fft.ifft2(refDft)
        img_back = np.abs(img_back)

        img_backTest = cv2.idft(testDft)
        img_backTest = cv2.magnitude(img_backTest[:,:,0], img_backTest[:,:,1])

        img_backTest = cv2.normalize(img_backTest, 0.0, 1.0, cv2.NORM_MINMAX)
        img_back = cv2.normalize(img_back, 0.0, 1.0, cv2.NORM_MINMAX)

        self.assertLess(cv2.norm(img_back - img_backTest), eps)
예제 #6
0
def cv2_deconvolution(image, b):
    dft_m = cv2.getOptimalDFTSize(image.shape[0] + b.shape[0] - 1)
    dft_n = cv2.getOptimalDFTSize(image.shape[1] + b.shape[1] - 1)
    c = np.zeros((image.shape[0] + d - 1, image.shape[1] + d - 1), dtype='uint8')
    # getting gaussian dft
    dft_b = np.zeros((dft_m, dft_n), dtype='float64')
    dft_b[:b.shape[0], :b.shape[1]] = b
    psf = cv2.dft(dft_b, flags=cv2.DFT_COMPLEX_OUTPUT)
    psf2 = (psf**2).sum(-1)
    ipsf = psf / (psf2 + 0.7)[..., np.newaxis]
    # getting layers dft
    dfts = []
    new_channels = []
    channels = cv2.split(image)
    for i, channel in enumerate(channels):
        #cv2.imshow('channel %d'%i, channel)
        a = np.array(channel, dtype='float64')
        dft_a = np.zeros((dft_m, dft_n), dtype='float64')
        dft_a[:a.shape[0], :a.shape[1]] = a
        print 'deconv'
        dft_a = cv2.dft(dft_a, flags=cv2.DFT_COMPLEX_OUTPUT)
        dft_a = cv2.mulSpectrums(dft_a, ipsf, 0)
        print dft_a
        dft_a = cv2.idft(dft_a, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)
        print dft_a
        tmp = dft_a[d/2:a.shape[0] + d/2, d/2:a.shape[1] + d/2]
        channel = np.array(tmp, dtype='uint8')
        cv2.imshow('new channel %d'%i, channel)
        new_channels.append(channel)
    result = cv2.merge(new_channels)
    return result
def saliency_feature(img):
    img_orig = img
    img = cv2.resize(img, (64, 64))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # h = cv2.getOptimalDFTSize(img.shape[0])
    # w = cv2.getOptimalDFTSize(img.shape[1])
    # print "Resizing (%d, %d) to (%d, %d)" % (img.shape[0], img.shape[1], h, w)
    # h = (h - img.shape[0])/2.0
    # w = (w - img.shape[1])/2.0
    # img = cv2.copyMakeBorder(img, int(math.floor(h)), int(math.ceil(h)), int(math.floor(w)), int(math.ceil(w)), cv2.BORDER_CONSTANT, value=0)

    dft = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)
    A, P = cv2.cartToPolar(dft[:,:,0], dft[:,:,1])
    L = cv2.log(A)
    h_n = (1./3**2)*np.ones((3,3))
    R = L - cv2.filter2D(L, -1, h_n)
    S = cv2.GaussianBlur(cv2.idft(np.dstack(cv2.polarToCart(cv2.exp(R), P)), flags=cv2.DFT_REAL_OUTPUT)**2, (0,0), 8)
    S = cv2.resize(cv2.normalize(S, None, 0, 1, cv2.NORM_MINMAX), (img_orig.shape[1],img_orig.shape[0]))

    # cv2.namedWindow('tmp1', cv2.WINDOW_NORMAL)
    # cv2.imshow('tmp1', img_orig)
    # cv2.namedWindow('tmp', cv2.WINDOW_NORMAL)
    # cv2.imshow('tmp', S)
    # cv2.waitKey()

    return S
    def apply_channel_deconvolution (self, img, psfsize=10, snrVal=8):    # Based on deconvolution.py in python samples of opencv
        
        img = img.astype('double')/255.0
        img = self.blur_edge(img)
        IMG = cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT)
    
        if (psfsize==0): return img
        
        defocus = True
    
        ang = 0
        d = psfsize
        snr = snrVal
        noise = 10**(-0.1*snr)

        if defocus:
            psf = self.defocus_kernel(d)
        else:
            psf = self.motion_kernel(ang, d)

        psf /= psf.sum()
        psf_pad = np.zeros_like(img)
        kh, kw = psf.shape
        psf_pad[:kh, :kw] = psf
        PSF = cv2.dft(psf_pad, flags=cv2.DFT_COMPLEX_OUTPUT, nonzeroRows = kh)
        PSF2 = (PSF**2).sum(-1)
        iPSF = PSF / (PSF2 + noise)[...,np.newaxis]
        RES = cv2.mulSpectrums(IMG, iPSF, 0)
        res = cv2.idft(RES, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT )
        res = np.roll(res, -kh//2, 0)
        res = np.roll(res, -kw//2, 1)

        return res
예제 #9
0
 def inverse_fourier_transform(spectral_image):
     """
         Method computes the inverse Fourier transform of a given image
     """
     f_ishift = np.fft.ifftshift(spectral_image)
     img_back = cv2.idft(f_ishift)
     return cv2.magnitude(img_back[:, :, 0], img_back[:, :, 1])
예제 #10
0
def fft(img,x,y,w,h):
	rows, cols = img.shape
	crow,ccol = rows/2 , cols/2
	dft = cv2.dft(np.float32(img),flags = cv2.DFT_COMPLEX_OUTPUT)
	dft_shift = np.fft.fftshift(dft)
	magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))
	mask = np.zeros((rows,cols,2),np.uint8)
	mask[crow-x:crow+w, ccol-y:ccol+h] = 1

	mag_mask = copy.copy(magnitude_spectrum)
	mag_mask[crow-x:crow+w, ccol-y:ccol+h] = 0

	fshift = dft_shift*mask
	f_ishift = np.fft.ifftshift(fshift)
	img_back = cv2.idft(f_ishift)
	img_back = cv2.magnitude(img_back[:,:,0],img_back[:,:,1])
	plt.subplot(121),plt.imshow(img, cmap = 'gray')
	plt.title('Input Image'), plt.xticks([]), plt.yticks([])
	plt.subplot(122),plt.imshow(magnitude_spectrum, cmap = 'gray')
	plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
	plt.show()

	plt.subplot(121),plt.imshow(img_back, cmap = 'gray')
	plt.title('Output Image'), plt.xticks([]), plt.yticks([])
	plt.subplot(122),plt.imshow(mag_mask, cmap = 'gray')
	plt.title('Cut Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
	plt.show()
예제 #11
0
 def state_vis(self):
     f = cv2.idft(self.H, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT )
     h, w = f.shape
     f = np.roll(f, -h//2, 0)
     f = np.roll(f, -w//2, 1)
     kernel = np.uint8( (f-f.min()) / f.ptp()*255 )
     resp = self.last_resp
     resp = np.uint8(np.clip(resp/resp.max(), 0, 1)*255)
     vis = np.hstack([self.last_img, kernel, resp])
     return vis
예제 #12
0
 def correlate(self, img):
     C = cv2.mulSpectrums(cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT), self.H, 0, conjB=True)
     resp = cv2.idft(C, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)
     h, w = resp.shape
     _, mval, _, (mx, my) = cv2.minMaxLoc(resp)
     side_resp = resp.copy()
     cv2.rectangle(side_resp, (mx-5, my-5), (mx+5, my+5), 0, -1)
     smean, sstd = side_resp.mean(), side_resp.std()
     psr = (mval-smean) / (sstd+eps)
     return resp, (mx-w//2, my-h//2), psr
예제 #13
0
파일: main.py 프로젝트: katejim/CV
def furieTransform(size):
    dftImage = cv2.dft(np.float32(inImg))
    dftShiftImage = np.fft.fftshift(dftImage)
    rows, cols = inImg.shape
    centralRow, centralCol = rows / 2, cols / 2
    mask = np.ones((rows, cols), np.uint8)
    mask[centralRow - size:centralRow + size, centralCol - size:centralCol + size] = 0
    fshift = dftShiftImage * mask
    fIShift = np.fft.ifftshift(fshift)
    imgResult = cv2.idft(fIShift)
    cv2.imwrite("out_furie.bmp", imgResult)
예제 #14
0
def filterImg(img,filtro_magnitud):
    """Filtro para imágenes de un canal"""
    
    
    #como la fase del filtro es 0 la conversión de polar a cartesiano es directa (magnitud->x, fase->y)
    filtro=np.array([filtro_magnitud,np.zeros(filtro_magnitud.shape)]).swapaxes(0,2).swapaxes(0,1)
    imgf=cv.dft(np.float32(img), flags=cv.DFT_COMPLEX_OUTPUT)
   
    imgf=cv.mulSpectrums(imgf, np.float32(filtro), cv.DFT_ROWS)
    
    return cv.idft(imgf, flags=cv.DFT_REAL_OUTPUT | cv.DFT_SCALE)
예제 #15
0
 def correlate(self, img):
     C = cv2.mulSpectrums(cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT),
                          self.H,
                          0,
                          conjB=True)
     resp = cv2.idft(C, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)
     h, w = resp.shape
     _, mval, _, (mx, my) = cv2.minMaxLoc(resp)
     side_resp = resp.copy()
     cv2.rectangle(side_resp, (mx - 5, my - 5), (mx + 5, my + 5), 0, -1)
     smean, sstd = side_resp.mean(), side_resp.std()
     psr = (mval - smean) / (sstd + eps)
     return resp, (mx - w // 2, my - h // 2), psr
예제 #16
0
def lowpass(img):
    radius = 50
    dft = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)
    dft_shift = np.fft.fftshift(dft)
    r, c = img.shape
    cr, cc = r / 2, c / 2
    mask = np.zeros((r, c, 2), np.uint8)
    mask[cr - radius:cr + radius, cc - radius:cc + radius] = 1
    fshift = dft_shift * mask
    f_ishift = np.fft.ifftshift(fshift)
    nimg = cv2.idft(f_ishift)
    nimg = cv2.magnitude(nimg[:, :, 0], nimg[:, :, 1])
    return nimg
 def HPF(self, im_source, im_mask):
     if len(im_source.shape) == 3:
         return None
     im_float32 = np.float32(im_source)
     dft = cv2.dft(im_float32, flags=cv2.DFT_COMPLEX_OUTPUT)
     dft_shift = np.fft.fftshift(dft)
     fshift = dft_shift * im_mask
     f_ishift = np.fft.ifftshift(fshift)
     im_back = cv2.idft(f_ishift)
     im_back = cv2.magnitude(im_back[:, :, 0], im_back[:, :, 1])
     Pmax = np.max(im_back)
     im_pow = im_back / Pmax * 255
     return np.uint8(im_pow)
예제 #18
0
def getFourier(data, HPFsize=60):
    ''' performs a DFT and high pass filtering
            data: grayscale 2D image array
            HPFsize: High Pass Filter size of box to filter out
    '''
    r, c = int(data.shape[0]/2), int(data.shape[1]/2)
    # data = cv2.fastNlMeansDenoising(data, None, 10, 7, 21)
    data = np.fft.fftshift(cv2.dft(np.float32(data), flags=cv2.DFT_COMPLEX_OUTPUT))
    data[r-HPFsize:r+HPFsize, c-HPFsize:c+HPFsize] = 0
    data = cv2.idft(np.fft.ifftshift(data))
    data = (data/np.max(data)*255)**2
    data[np.where(data>255)] = 255
    return (data).astype(np.uint8)
예제 #19
0
 def correlate(self, img):
     C = cv2.mulSpectrums(cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT), self.H, 0, conjB=True)
     resp = cv2.idft(C, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)
     h, w = resp.shape
     _, mval, _, (mx, my) = cv2.minMaxLoc(resp)
     fmx = vertex(mx, *resp[my,mx-1:mx+2]) if (1 <= mx <= w-1) else mx
     fmy = vertex(my, *resp[my-1:my+2,mx]) if (1 <= my <= h-1) else my
     #print (mx - w//2, my - h//2), (fmx - w//2, fmy - h//2)
     side_resp = resp.copy()
     cv2.rectangle(side_resp, (mx-5, my-5), (mx+5, my+5), 0, -1)
     smean, sstd = side_resp.mean(), side_resp.std()
     psr = (mval-smean) / (sstd+eps)
     return resp, (fmx-w//2, fmy-h//2), psr
예제 #20
0
 def HPF(self,im_source,im_mask):
     if len(im_source.shape) == 3:
         return None
     im_float32 = np.float32(im_source)
     dft = cv2.dft(im_float32, flags = cv2.DFT_COMPLEX_OUTPUT)
     dft_shift = np.fft.fftshift(dft)
     fshift = dft_shift*im_mask
     f_ishift = np.fft.ifftshift(fshift)
     im_back = cv2.idft(f_ishift)
     im_back = cv2.magnitude(im_back[:,:,0],im_back[:,:,1])
     Pmax = np.max(im_back)
     im_pow = im_back/Pmax*255
     return np.uint8(im_pow)
def apply_mask(mask, img):
    # apply mask and inverse DFTrum
    dft = cv2.dft(np.float32(img), flags = cv2.DFT_COMPLEX_OUTPUT)
    dft_shift = np.fft.fftshift(dft)
    rows, cols = img.shape
    crow,ccol = rows/2 , cols/2
    # fshift = dft_shift*mask # for low pass filter
    dft_shift[crow-30:crow+30, ccol-30:ccol+30] = 0 #for high pass filter
    #f_ishift = np.fft.ifftshift(fshift) #fshift for low pass code
    f_ishift = np.fft.ifftshift(dft_shift)
    img_back = cv2.idft(f_ishift)
    img_back = cv2.magnitude(img_back[:,:,0],img_back[:,:,1])
    return img_back
예제 #22
0
 def reconstructframe_ocvU(self, img, i):
     assert opencv, "No opencv present"
     mask = cv2.UMat((self.N // 2, self.N // 2), s=1, type=cv2.CV_8U)
     imU = cv2.UMat(img)
     diff = cv2.subtract(imU, self._imgstoreU[i])
     imf = cv2.multiply(cv2.dft(diff, flags=cv2.DFT_COMPLEX_OUTPUT),
                        self._prefilter_ocvU)
     cv2.copyTo(src=cv2.UMat(imf, (0, 0, self.N // 2, self.N // 2)),
                mask=mask,
                dst=cv2.UMat(self._carray_ocvU,
                             (0, 0, self.N // 2, self.N // 2)))
     cv2.copyTo(
         src=cv2.UMat(imf, (0, self.N // 2, self.N // 2, self.N // 2)),
         mask=mask,
         dst=cv2.UMat(self._carray_ocvU,
                      (0, 3 * self.N // 2, self.N // 2, self.N // 2)))
     cv2.copyTo(
         src=cv2.UMat(imf, (self.N // 2, 0, self.N // 2, self.N // 2)),
         mask=mask,
         dst=cv2.UMat(self._carray_ocvU,
                      (3 * self.N // 2, 0, self.N // 2, self.N // 2)))
     cv2.copyTo(
         src=cv2.UMat(imf,
                      (self.N // 2, self.N // 2, self.N // 2, self.N // 2)),
         mask=mask,
         dst=cv2.UMat(
             self._carray_ocvU,
             (3 * self.N // 2, 3 * self.N // 2, self.N // 2, self.N // 2)))
     img2 = cv2.multiply(
         cv2.idft(self._carray_ocvU,
                  flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT),
         self._reconfactorU[i])
     self._imgstoreU[i] = imU
     self._bigimgstoreU = cv2.add(
         self._bigimgstoreU,
         cv2.idft(cv2.multiply(cv2.dft(img2, flags=cv2.DFT_COMPLEX_OUTPUT),
                               self._postfilter_ocvU),
                  flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT))
     return self._bigimgstoreU
예제 #23
0
    def update(_):
        ang = np.deg2rad(cv2.getTrackbarPos('angle', win))
        d = cv2.getTrackbarPos('d', win)
        noise = 10**(-0.1 * cv2.getTrackbarPos('SNR (db)', win))

        if defocus:
            psf = defocus_kernel(d)
        else:
            psf = motion_kernel(ang, d)
        cv2.imshow('psf', psf)

        psf /= psf.sum()
        psf_pad = np.zeros_like(img_r)
        kh, kw = psf.shape
        psf_pad[:kh, :kw] = psf
        PSF = cv2.dft(psf_pad, flags=cv2.DFT_COMPLEX_OUTPUT, nonzeroRows=kh)
        PSF2 = (PSF**2).sum(-1)
        iPSF = PSF / (PSF2 + noise)[..., np.newaxis]

        # RES_BW = cv2.mulSpectrums(IMG_BW, iPSF, 0)
        RES_R = cv2.mulSpectrums(IMG_R, iPSF, 0)
        RES_G = cv2.mulSpectrums(IMG_G, iPSF, 0)
        RES_B = cv2.mulSpectrums(IMG_B, iPSF, 0)

        # res_bw = cv2.idft(RES_BW, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT )
        res_r = cv2.idft(RES_R, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)
        res_g = cv2.idft(RES_G, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)
        res_b = cv2.idft(RES_B, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)

        res_rgb = np.zeros_like(img_rgb)
        res_rgb[..., 0] = res_r
        res_rgb[..., 1] = res_g
        res_rgb[..., 2] = res_b

        # res_bw = np.roll(res_bw, -kh//2, 0)
        # res_bw = np.roll(res_bw, -kw//2, 1)
        res_rgb = np.roll(res_rgb, -kh // 2, 0)
        res_rgb = np.roll(res_rgb, -kw // 2, 1)
        cv2.imshow(win, res_rgb)
def imageIDFT(frequency_filtered_image):
    log.debug(
        "Computing the IDFT (Inverse Discrete Fourier Transform) of the provided image"
    )

    f_ishift = np.fft.ifftshift(frequency_filtered_image)
    result_image = cv.idft(f_ishift)
    result_image = cv.magnitude(result_image[:, :, 0], result_image[:, :, 1])

    result_image = cv.normalize(result_image, None, 255, 0, cv.NORM_MINMAX,
                                cv.CV_8UC1)
    log.debug("Finished computing the IDFT of the provided image")
    return result_image
예제 #25
0
def high_freq(img):

    dft = cv2.dft(np.float32(img),flags = cv2.DFT_COMPLEX_OUTPUT)
    dft_shift = np.fft.fftshift(dft)

    magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))

    rows, cols = img.shape
    crow,ccol = int(rows/2) , int(cols/2)

    # create a mask first, center square is 1, remaining all zeros
    mask = np.ones((rows,cols,2),np.uint8)
    mask[crow-30:crow+30, ccol-30:ccol+30] = 0

    # apply mask and inverse DFT
    fshift = dft_shift*mask
    f_ishift = np.fft.ifftshift(fshift)
    img_high = cv2.idft(f_ishift)
    img_high = cv2.magnitude(img_high[:,:,0],img_high[:,:,1])

    mask2 = np.zeros((rows,cols,2),np.uint8)
    mask2[crow-30:crow+30, ccol-30:ccol+30] = 1
    
    # apply mask and inverse DFT
    fshift = dft_shift*mask2
    f_ishift = np.fft.ifftshift(fshift)
    img_low = cv2.idft(f_ishift)
    img_low = cv2.magnitude(img_low[:,:,0],img_low[:,:,1])

    # from matplotlib import pyplot as plt
    # plt.subplot(121),plt.imshow(img_high, cmap = 'gray')
    # plt.title('Frequency Component'), plt.xticks([]), plt.yticks([])
    # plt.subplot(122),plt.imshow(img_low, cmap = 'gray')
    # plt.title('Base Component'), plt.xticks([]), plt.yticks([])
    # plt.show()
    
    # img_high = cv2.cvtColor(img_high, cv2.COLOR_GRAY2BGR)
    return img_high,img_low
예제 #26
0
def create_Frequency2DFiltering(complexImg, filter):
    #complexImg = cv.dft(complexImg)#Fourier
    complexImg = shiftDFT(complexImg)
    complexImg = cv.mulSpectrums(complexImg, filter, 0)

    complexImg = shiftDFT(complexImg)  #Inverse Fourier
    m = create_spectrum_magnitude_display(complexImg, True)
    result = cv.idft(complexImg)

    (myplanes_0, myplanes_1) = cv.split(result)
    result = cv.magnitude(myplanes_0, myplanes_1)
    result = cv.normalize(result, result, 0, 1, cv.NORM_MINMAX)
    imageRes = result
    return imageRes, m
예제 #27
0
    def to_image(self,filename,color=True):
        #convert back into image
        f_ishift = np.fft.ifftshift(self.unreshape_arr)
        img_back = cv2.idft(f_ishift)
        img_back = cv2.magnitude(img_back[:,:,0],img_back[:,:,1])

        # plt.subplot(121),plt.imshow(img_back, cmap = 'gray')
        # plt.title('Image'), plt.xticks([]), plt.yticks([])
        # plt.show()

        #save images with pyplot
        if color:
            plt.imsave(filename + "_color.png", img_back)
        plt.imsave(filename + "_gray.png",img_back,cmap='gray')
예제 #28
0
    def filtering(self):
        """Performs frequency filtering on an input image
        returns a filtered image, magnitude of DFT, magnitude of filtered DFT        
        ----------------------------------------------------------
        You are allowed to used inbuilt functions to compute fft
        There are packages available in numpy as well as in opencv
        Steps:
        1. Compute the fft of the image
        2. shift the fft to center the low frequencies
        3. get the mask (write your code in functions provided above) the functions can be called by self.filter(shape, cutoff, order)
        4. filter the image frequency based on the mask (Convolution theorem)
        5. compute the inverse shift
        6. compute the inverse fourier transform
        7. compute the magnitude
        8. You will need to do a full contrast stretch on the magnitude and depending on the algorithm you may also need to
        take negative of the image to be able to view it (use post_process_image to write this code)
        Note: You do not have to do zero padding as discussed in class, the inbuilt functions takes care of that
        filtered image, magnitude of DFT, magnitude of filtered DFT: Make sure all images being returned have grey scale full contrast stretch and dtype=uint8 
        """

        #print(self.image.dtype)
        img_float32 = np.float32(self.image)

        dft = cv2.dft(img_float32, flags=cv2.DFT_COMPLEX_OUTPUT)
        dft_shift = np.fft.fftshift(dft)

        #Magnitude of DFT
        magnitude = 10 * np.log(
            cv2.magnitude(dft_shift[:, :, 0], dft_shift[:, :, 1]))
        magnitude_DFT = np.array(magnitude, dtype=np.uint8)

        #mask
        mask = self.filter(self.image.shape, self.cutoff, self.order)

        # apply mask and inverse DFT
        fshift = dft_shift * mask
        f_ishift = np.fft.ifftshift(fshift)
        i_dft = cv2.idft(f_ishift, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)

        #print(nmg.dtype)
        img_back = i_dft.astype(np.uint8)

        #Magnitude of filtered DFT
        magnitude_filtered_DFT = np.array(magnitude_DFT * mask[:, :, 1],
                                          dtype=np.uint8)

        # Full scale contrast stretch
        fc_stretch = self.post_process_image(img_back)

        return [fc_stretch, magnitude_DFT, magnitude_filtered_DFT]
예제 #29
0
    def _problem1(self, img):
        angle_img = fft2(img)
        fft2_img = dft(img, flags=cv.DFT_COMPLEX_OUTPUT)
        fft2_img_shift = fftshift(fft2_img)
        mag = cv.magnitude(fft2_img_shift[:,:, 0], fft2_img_shift[:,:, 1])
        _min, _max = log(1 + abs(mag.min())), log(1 + abs(mag.max()))
        new_img = 255 * (log(1 + abs(mag)) - _min) / (_max - _min)

        idft_img = idft(ifftshift(fft2_img_shift), flags=cv.DFT_SCALE)
        new_img_ifft = cv.magnitude(idft_img[:,:, 0], idft_img[:,:, 1])
        new_img_ifft[new_img_ifft > 255] = 255
        new_img_ifft[new_img_ifft < 0] = 0

        return new_img, angle(angle_img), new_img_ifft
    def filtering(self, grayscale, fft_filter):
        image_fft2 = self.image_to_fft_center(grayscale)
       
        fshift = image_fft2 * fft_filter
        cv.imshow('fshift',cv.resize(np.uint8(self.get_log_scale(fshift[:,:,0])),(0,0),fx=0.5,fy=0.5))
        # cv.wa
        # cv.waitKey(-1)
        f_ishift = np.fft.ifftshift(fshift)
        img_back = cv.idft(f_ishift)
        result_filter = cv.magnitude(img_back[:,:,0],img_back[:,:,1])
        result_filter = result_filter.clip(min=0)
        result_filter = 255*(result_filter.copy() - result_filter.min()) / (result_filter.max()-result_filter.min())
 
        return np.uint8(result_filter)
def spectralResidual(frame):
    frame_gray = gray(frame)
    dft = cv2.dft(np.float32(frame_gray), flags=cv2.DFT_COMPLEX_OUTPUT)
    A, P = cv2.cartToPolar(dft[:,:,0],dft[:,:,1])
    L = np.log(A)
    kernel = np.ones((3,3),np.float32)/9
    new_L = cv2.filter2D(L, -1, kernel)
    R = new_L - L
    new_dft = np.empty(dft.shape)
    new_dft[:,:,0], new_dft[:,:,1] = cv2.polarToCart(R,P)
    Mr = cv2.idft(new_dft)
    MrI = cv2.magnitude(Mr[:,:,0],Mr[:,:,1])

    return MrI
예제 #32
0
def high_pass_filter(image, freq_cut):
    dft = cv2.dft(np.float32(image),flags = cv2.DFT_COMPLEX_OUTPUT)
    radius = freq_cut

    H = np.ones_like(dft)
    for i in range(dft.shape[0]):
        for j in range(dft.shape[1]):
            if np.sqrt(i**2 + j**2) <= radius:
                H[i,j] = 0

    fimage = dft * H

    image_enhanced = cv2.idft(fimage, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)
    return image_enhanced
예제 #33
0
    def inner(img):
        img = np.array(img, np.float32)
        dft = cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT)

        dft_shift = np.fft.fftshift(dft)

        mask = pass_filter(img)

        fshift = dft_shift * mask
        f_ishift = np.fft.ifftshift(fshift)
        result_img = cv2.idft(f_ishift, flags=cv2.DFT_SCALE)
        result_img = cv2.magnitude(result_img[:, :, 0], result_img[:, :, 1])

        return result_img
예제 #34
0
파일: features.py 프로젝트: Mirann/Genetic
def fourier_back_transfrom(img, dft_shift):
    rows, cols, dim = img.shape
    crow,ccol = rows/2 , cols/2

    # create a mask first, center square is 1, remaining all zeros
    mask = np.zeros((rows,cols,2),np.uint8)
    mask[crow-30:crow+30, ccol-30:ccol+30] = 1

    # apply mask and inverse DFT
    fshift = dft_shift*mask
    f_ishift = np.fft.ifftshift(fshift)
    img_back = cv2.idft(f_ishift)
    img_back = cv2.magnitude(img_back[:,:,0],img_back[:,:,1])
    return img_back
예제 #35
0
    def detect_scale(self, image):
        xsf = self.get_scale_sample(image)

        # Compute AZ in the paper
        add_temp = cv2.reduce(complexMultiplication(self.sf_num, xsf), 0, cv2.REDUCE_SUM)

        # compute the final y
        scale_response = cv2.idft(complexDivisionReal(add_temp, (self.sf_den + self.scale_lambda)), None, cv2.DFT_REAL_OUTPUT)

        # Get the max point as the final scaling rate
        # pv:响应最大值 pi:相应最大点的索引数组
        _, pv, _, pi = cv2.minMaxLoc(scale_response)
        
        return pi
예제 #36
0
def process(ip_image):
    ###########################
    ## Your Code goes here
    ###########################
    id_list = []
    f_img = []
    frame = ip_image[0:740, 0:1292]
    frame_con = cv2.addWeighted(frame, 2.2, np.zeros(frame.shape, frame.dtype),
                                0, -60)
    frame_con = fna(frame_con)
    im = split(frame_con)
    for i in range(0, 3):
        f_img.append(cv2.dft(np.array(im[i]), flags=cv2.DFT_COMPLEX_OUTPUT))
    angle = np.deg2rad(90)
    psf_length = 19
    SNR = 10**(-0.1 * 18)
    psf_var = PSF(angle, psf_length)
    psf_var = psf_var / psf_var.sum()
    i = 0
    temp = []
    for i in range(0, 3):
        mask = np.zeros_like(im[i])
        h, w = psf_var.shape
        mask[:h, :w] = psf_var
        f_psf_var = cv2.dft(mask, flags=cv2.DFT_COMPLEX_OUTPUT, nonzeroRows=h)
        psf1 = (f_psf_var**2).sum(-1)
        iff_psf_var = (f_psf_var / (psf1 + SNR)[..., np.newaxis])
        f_temp = (cv2.mulSpectrums(f_img[i], iff_psf_var, 0))
        temp1 = (cv2.idft(f_temp, flags=cv2.DFT_SCALE))
        temp1 = cv2.magnitude(temp1[:, :, 0], temp1[:, :, 1])
        temp.append(temp1)
    bgr = join(temp)
    bgr = np.multiply(255, bgr)
    bgr = bgr.astype(np.uint8)
    bgr = np.clip(bgr, 0, 255)
    ip_image, id_list = aruco_detector(bgr)
    ip_image = cv2.fastNlMeansDenoisingColored(ip_image, None, 6, 4, 15, 45)
    ip_image = cv2.addWeighted(ip_image, 1.5,
                               np.ones(ip_image.shape, ip_image.dtype), 0, -40)
    ip_image = cv2.copyMakeBorder(ip_image,
                                  0,
                                  100,
                                  0,
                                  308,
                                  cv2.BORDER_CONSTANT,
                                  value=[255, 255, 255])
    ip_image = sharpen(ip_image)

    cv2.imwrite((generated_folder_path + "/" + "aruco_with_id.png"), ip_image)
    return (ip_image, id_list)
예제 #37
0
def ifft(fft_mat):
    '''傅立叶反变换,返回反变换图像'''
    # 反换位,低频部分移到四周,高频部分移到中间
    f_ishift_mat = np.fft.ifftshift(fft_mat)

    # 傅立叶反变换
    img_back = cv2.idft(f_ishift_mat)

    # 将复数转换为幅度, sqrt(re^2 + im^2)
    img_back = cv2.magnitude(*cv2.split(img_back))

    # 标准化到0~255之间
    cv2.normalize(img_back, img_back, 0, 255, cv2.NORM_MINMAX)
    return np.uint8(np.around(img_back))
예제 #38
0
def low_pass_filter(img_in):  # Write low pass filter here
    dft = cv2.dft(np.float32(img_in), flags=cv2.DFT_COMPLEX_OUTPUT)
    dft_shift = np.fft.fftshift(dft)
    magnitude_spectrum = 20 * np.log(
        cv2.magnitude(dft_shift[:, :, 0], dft_shift[:, :, 1]))
    rows, cols = img_in.shape
    crow, ccol = rows / 2, cols / 2
    mask = np.zeros((rows, cols, 2), np.uint8)
    mask[int(crow) - 10:int(crow) + 10, int(ccol) - 10:int(ccol) + 10] = 1
    fshift = dft_shift * mask
    f_ishift = np.fft.ifftshift(fshift)
    img_back = cv2.idft(f_ishift, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)
    img_out = img_back
    return img_out
예제 #39
0
    def process(self):
        start = time()
        rows, cols, _ = self.dft.shape
        mask = np.zeros((rows, cols), np.float32)
        half = np.sqrt(rows**2 + cols**2) / 2
        radius = int(half * self.split_spin.value() / 100)
        mask = cv.circle(mask, (cols // 2, rows // 2), radius, 1, cv.FILLED)
        kernel = 2 * int(half * self.smooth_spin.value() / 100) + 1
        mask = cv.GaussianBlur(mask, (kernel, kernel), 0)
        mask /= np.max(mask)
        threshold = int(self.thr_spin.value() / 100 * 255)
        if threshold > 0:
            mask[self.magnitude0 < threshold] = 0
            zeros = (mask.size - np.count_nonzero(mask)) / mask.size * 100
        else:
            zeros = 0
        self.zero_label.setText(
            self.tr('(zeroed coefficients = {:.2f}%)').format(zeros))
        mask2 = np.repeat(mask[:, :, np.newaxis], 2, axis=2)

        rows0, cols0, _ = self.image.shape
        low = cv.idft(np.fft.ifftshift(self.dft * mask2), flags=cv.DFT_SCALE)
        low = norm_mat(cv.magnitude(low[:, :, 0], low[:, :,
                                                      1])[:rows0, :cols0],
                       to_bgr=True)
        self.low_viewer.update_processed(low)
        high = cv.idft(np.fft.ifftshift(self.dft * (1 - mask2)),
                       flags=cv.DFT_SCALE)
        high = norm_mat(cv.magnitude(high[:, :, 0], high[:, :, 1]),
                        to_bgr=True)
        self.high_viewer.update_processed(
            np.copy(high[:self.image.shape[0], :self.image.shape[1]]))
        self.magnitude = (self.magnitude0 * mask).astype(np.uint8)
        self.phase = (self.phase0 * mask).astype(np.uint8)
        self.postprocess()
        self.info_message.emit(
            self.tr('Frequency Split = {}'.format(elapsed_time(start))))
예제 #40
0
    def get_shift(self, imgA, imgB):
        rv = np.array([0.0, 0.0])
        if (imgA is not None) and (imgB is not None) and (imgA.shape==imgB.shape):
            # Phase correlation.
            A  = cv2.dft(imgA)
            B  = cv2.dft(imgB)
            AB = cv2.mulSpectrums(A, B, flags=0, conjB=True)
            normAB = cv2.norm(AB)
            if (normAB != 0.0):
                crosspower = AB / normAB
                shift = cv2.idft(crosspower)
                shift0  = np.roll(shift,  int(shift.shape[0]/2), 0)
                shift00 = np.roll(shift0, int(shift.shape[1]/2), 1) # Roll the matrix so 0,0 goes to the center of the image.
                
                # Get the coordinates of the maximum shift.
                kShift = np.argmax(shift00)
                (iShift,jShift) = np.unravel_index(kShift, shift00.shape)
    
                # Get weighted centroid of a region around the peak, for sub-pixel accuracy.
                w = 7
                r = int((w-1)/2)
                i0 = clip(iShift-r, 0, shift00.shape[0]-1)
                i1 = clip(iShift+r, 0, shift00.shape[0]-1)+1
                j0 = clip(jShift-r, 0, shift00.shape[1]-1)
                j1 = clip(jShift+r, 0, shift00.shape[1]-1)+1
                peak = shift00[i0:i1].T[j0:j1].T
                moments = cv2.moments(peak, binaryImage=False)
                           
                if (moments['m00'] != 0.0):
                    iShiftSubpixel = moments['m01']/moments['m00'] + float(i0)
                    jShiftSubpixel = moments['m10']/moments['m00'] + float(j0)
                else:
                    iShiftSubpixel = float(shift.shape[0])/2.0
                    jShiftSubpixel = float(shift.shape[1])/2.0
                
                # Accomodate the matrix roll we did above.
                iShiftSubpixel -= float(shift.shape[0])/2.0
                jShiftSubpixel -= float(shift.shape[1])/2.0
    
                # Convert unsigned shifts to signed shifts. 
                height = float(shift00.shape[0])
                width  = float(shift00.shape[1])
                iShiftSubpixel  = ((iShiftSubpixel+height/2.0) % height) - height/2.0
                jShiftSubpixel  = ((jShiftSubpixel+width/2.0) % width) - width/2.0
                
                rv = np.array([iShiftSubpixel, jShiftSubpixel])

            
        return rv
예제 #41
0
 def mix(self, imageToBeMixed: 'ImageModel', magnitudeOrRealRatio: float,
         phaesOrImaginaryRatio: float, mode: 'Modes') -> np.ndarray:
     """
     a function that takes ImageModel object mag ratio, phase ration 
     """
     ###
     # implement this function
     ###
     mix = np.zeros((self.imgByte.shape[0], self.imgByte.shape[1], 2),
                    'float64')
     if mode.value == "testMagAndPhaseMode":
         if (self.uniMag and imageToBeMixed.uniPh):
             real, imaginary = cv.polarToCart(
                 self.uniMagnitude * magnitudeOrRealRatio +
                 imageToBeMixed.magnitude * (1 - magnitudeOrRealRatio),
                 self.phase * (1 - phaesOrImaginaryRatio) +
                 imageToBeMixed.uniPhase * phaesOrImaginaryRatio,
                 angleInDegrees=True)
         elif (self.uniMag):
             real, imaginary = cv.polarToCart(
                 self.uniMagnitude * magnitudeOrRealRatio +
                 imageToBeMixed.magnitude * (1 - magnitudeOrRealRatio),
                 self.phase * (1 - phaesOrImaginaryRatio) +
                 imageToBeMixed.phase * phaesOrImaginaryRatio,
                 angleInDegrees=True)
         elif (imageToBeMixed.uniPh):
             real, imaginary = cv.polarToCart(
                 self.magnitude * magnitudeOrRealRatio +
                 imageToBeMixed.magnitude * (1 - magnitudeOrRealRatio),
                 self.phase * (1 - phaesOrImaginaryRatio) +
                 imageToBeMixed.uniPhase * phaesOrImaginaryRatio,
                 angleInDegrees=True)
         else:
             real, imaginary = cv.polarToCart(
                 self.magnitude * magnitudeOrRealRatio +
                 imageToBeMixed.magnitude * (1 - magnitudeOrRealRatio),
                 self.phase * (1 - phaesOrImaginaryRatio) +
                 imageToBeMixed.phase * phaesOrImaginaryRatio,
                 angleInDegrees=True)
     elif mode.value == "testRealAndImagMode":
         real = self.real * magnitudeOrRealRatio + imageToBeMixed.real * (
             1 - magnitudeOrRealRatio)
         imaginary = self.imaginary * (
             1 - phaesOrImaginaryRatio
         ) + imageToBeMixed.imaginary * phaesOrImaginaryRatio
     mix[:, :, 0], mix[:, :, 1] = real, imaginary
     invImg = cv.idft(mix, flags=cv.DFT_SCALE | cv.DFT_REAL_OUTPUT)
     invImg *= 255.0 / np.max(invImg)
     return invImg
예제 #42
0
def whitened_matched_filter(data, locs, window):
    """
    Function for using whitened matched filter to the original signal for better
    SNR. Use welch method to approximate the spectral density of the signal.
    Rescale the signal in frequency domain. After scaling, convolve the signal with
    peak-triggered-average to make spikes more prominent.
    
    Args:
        data: 1-d array
            input signal

        locs: 1-d array
            spike times

        window: 1-d array
            window with size of temporal filter

    Returns:
        datafilt: 1-d array
            signal processed after whitened matched filter
    
    """
    N = np.ceil(np.log2(len(data)))
    censor = np.zeros(len(data))
    censor[locs] = 1
    censor = np.int16(
        np.convolve(censor.flatten(),
                    np.ones([1, len(window)]).flatten(), 'same'))
    censor = (censor < 0.5)
    noise = data[censor]

    _, pxx = signal.welch(noise,
                          fs=2 * np.pi,
                          window=signal.get_window('hamming', 1000),
                          nfft=2**N,
                          detrend=False,
                          nperseg=1000)
    Nf2 = np.concatenate([pxx, np.flipud(pxx[1:-1])])
    scaling_vector = 1 / np.sqrt(Nf2)

    cc = np.pad(data.copy(), (0, np.int(2**N - len(data))), 'constant')
    dd = (cv2.dft(cc, flags=cv2.DFT_SCALE + cv2.DFT_COMPLEX_OUTPUT)[:, 0, :] *
          scaling_vector[:, np.newaxis])[:, np.newaxis, :]
    dataScaled = cv2.idft(dd)[:, 0, 0]
    PTDscaled = dataScaled[(locs[:, np.newaxis] + window)]
    PTAscaled = np.mean(PTDscaled, 0)
    datafilt = np.convolve(dataScaled, np.flipud(PTAscaled), 'same')
    datafilt = datafilt[:len(data)]
    return datafilt
예제 #43
0
    def apply_inverse_fourier(self, fft_shift, min=0, max=255):
        # aplicando a inversa da transformada
        f_ishift = np.fft.ifftshift(fft_shift)
        img_back = cv2.idft(f_ishift)
        img_back = np.abs(cv2.magnitude(img_back[:, :, 0], img_back[:, :, 1]))
        # normalizando a imagem
        norm_image = cv2.normalize(img_back,
                                   None,
                                   alpha=min,
                                   beta=max,
                                   norm_type=cv2.NORM_MINMAX,
                                   dtype=cv2.DFT_COMPLEX_OUTPUT).astype(
                                       np.uint8)

        return norm_image
        def highpass_1(img):
            img_float32 = np.float32(img)

            dft = cv2.dft(img_float32, flags=cv2.DFT_COMPLEX_OUTPUT)
            dft_shift = np.fft.fftshift(dft)
            magnitude_spectrum_img = 20 * np.log(
                cv2.magnitude(dft_shift[:, :, 0], dft_shift[:, :, 1]))

            mask_hp = highpass_0(img, 5)

            f_shift_hp = dft_shift * mask_hp
            f_ishift_hp = np.fft.ifftshift(f_shift_hp)
            img_back_hp = cv2.idft(f_ishift_hp,
                                   flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)
            return img_back_hp
예제 #45
0
def PHOT(inp):
    # 2D Discrete Fourier Transform (DFT) is used to find the frequency domain
    dft = cv2.dft(np.float32(inp), flags=cv2.DFT_COMPLEX_OUTPUT)
    MAG, PHASE = cv2.cartToPolar(dft[:, :, 0], dft[:, :, 1])
    dft[:, :, 0] = dft[:, :, 0] / MAG
    dft[:, :, 1] = dft[:, :, 1] / MAG

    img_back = cv2.idft(dft)
    MAG = img_back[:, :, 0]
    MAG = cv2.GaussianBlur(MAG, (9, 9), 0)
    m = np.mean(MAG)
    MAG = (MAG - m) * (MAG - m)
    cv2.normalize(MAG, MAG, 0, 1.0, cv2.NORM_MINMAX)

    return MAG.astype('float32')
예제 #46
0
def four():
    image = cv2.imread(f'{PATH}/first.jpg', 0)
    mu, sigma = 1, 2
    rows, cols = image.shape
    s = np.random.normal(mu, sigma, (rows, cols, 2))
    dft_image = cv2.dft(np.float32(image), flags=cv2.DFT_COMPLEX_OUTPUT)
    dft_s = dft_image * s
    image_back = cv2.idft(dft_s)
    image_back = cv2.magnitude(image_back[:, :, 0], image_back[:, :, 1])
    plt.subplot(121), plt.imshow(image, cmap='gray')
    plt.title('Input Image'), plt.xticks([]), plt.yticks([])
    plt.subplot(122), plt.imshow(image_back, cmap='gray')
    plt.title('After DFT'), plt.xticks([]), plt.yticks([])
    plt.show()
    """ 
    def test(self, frame):
        (x,y), (w,h) = self.pos, self.size
        win = cv2.createHanningWindow((w,h), cv2.CV_32F)

        img1 = self.preprocess(self.last_img, win)
        F1 = cv2.dft(img1, flags = cv2.DFT_COMPLEX_OUTPUT)
        G1 = cv2.mulSpectrums(F1, self.H, 0, conjB = True)
        resp1 = cv2.idft(G1, flags = cv2.DFT_SCALE|cv2.DFT_REAL_OUTPUT)
            
        img = cv2.getRectSubPix(frame, (w, h), (x,y))
        img2 = self.preprocess(img, win)
        F2 = cv2.dft(img2, flags = cv2.DFT_COMPLEX_OUTPUT)
        G2 = cv2.mulSpectrums(F2, self.H, 0, conjB = True)
        resp2 = cv2.idft(G2, flags = cv2.DFT_SCALE|cv2.DFT_REAL_OUTPUT)
    
        merge1 = np.hstack((img1 , resp1))
        merge2 = np.hstack((img2, resp2))
        merge = np.vstack((merge1, merge2))
    

        name = 'comparison--last vs cur.'
        cv2.namedWindow(name, cv2.WINDOW_NORMAL)
        cv2.resizeWindow(name, 600,400)
        cv2.imshow(name, merge)
예제 #48
0
def ideal_pass_filter(img_dft_shift, is_low):
    ipf_shift = np.copy(img_dft_shift)
    for i in range(M):
        for j in range(N):
            calc = math.sqrt((i - M / 2)**2 + (j - N / 2)**2)
            if (is_low and calc > CUTOFF_FREQ) or\
               (not is_low and calc < CUTOFF_FREQ):
                ipf_shift[i, j] = 0

    f_ishift = np.fft.ifftshift(ipf_shift)
    img_back_ipf = cv.idft(f_ishift, flags=cv.DFT_SCALE | cv.DFT_REAL_OUTPUT)
    img_back_ipf[img_back_ipf < 0] = 0
    img_back_ipf[img_back_ipf > 255] = 255

    return img_back_ipf
예제 #49
0
def phaseCorrelation(img_orig, img_transformed):
    # Step 3.1 - Initialize complex conjugates for original image and magnitudes
    orig_conj = np.copy(img_orig)
    orig_conj[:,:,IM_IDX] = -orig_conj[:,:,IM_IDX]
    orig_mags = cv2.magnitude(img_orig[:,:,RE_IDX],img_orig[:,:,IM_IDX])
    img_trans_mags = cv2.magnitude(img_transformed[:,:,RE_IDX],img_transformed[:,:,IM_IDX])
    # Step 3.2 - Do deconvolution
    # multiplication compex numbers ===> (x + yi) * (u + vi) = (xu - yv) + (xv + yu)i
    # deconvolution ( H* x G ) / |H x G|
    realPart = (orig_conj[:,:,RE_IDX] * img_transformed[:,:,RE_IDX] - orig_conj[:,:,IM_IDX] * img_transformed[:,:,IM_IDX]) / (orig_mags * img_trans_mags)
    imaginaryPart = (orig_conj[:,:,RE_IDX] * img_transformed[:,:,IM_IDX] + orig_conj[:,:,IM_IDX] * img_transformed[:,:,RE_IDX]) / ( orig_mags * img_trans_mags)
    result = np.dstack((realPart, imaginaryPart))
    result_idft = cv2.idft(result)
    # Step 3.3 - Find Max value (angle and scaling factor)
    result_mags = cv2.magnitude(result_idft[:,:,RE_IDX],result_idft[:,:,IM_IDX])
    return np.unravel_index( np.argmax(result_mags), result_mags.shape)
예제 #50
0
파일: deconv.py 프로젝트: fzliu/radiant
def apply_filter(img, ftype, flen, ksize=-1, ori=0):
    """
        Apply a particular deblur filter.

        :param str ftype:
            Filter type.

        :param int ksize:
            Kernel size.

        :param int flen:
            Filter length (must be strictly smaller than the kernel size).

        :param int ori:
            Optional kernel orientation. Unused for circular kernels.
    """

    if ksize < 0:
        ksize = flen

    assert ksize >= flen, "kernel size must not be less than filter length"

    img = np.array(img, dtype=np.float32)

    # frequency domain representation of the image
    img = blur_edge(img, flen)
    img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    IMG = cv2.dft(img_gray, flags=cv2.DFT_COMPLEX_OUTPUT)
    print(IMG)

    # make the PSF for deblurring
    if ftype == "linear":
        psf = makeLinearKernel(ksize=ksize, flen=flen, angle=ori)
    else:
        psf = makeCircularKernel(ksize=ksize, flen=flen)

    # perform the deconvolution
    psf_pad = np.zeros_like(img_gray)
    kh, kw = psf.shape
    psf_pad[:kh, :kw] = psf
    PSF = cv2.dft(psf_pad, flags=cv2.DFT_COMPLEX_OUTPUT, nonzeroRows=kh)
    PSF2 = (PSF**2).sum(-1)
    iPSF = PSF / (PSF2 + 10**5)[...,np.newaxis]
    RES = cv2.mulSpectrums(IMG, iPSF, 0)
    res = cv2.idft(RES, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)
    res = np.roll(res, -kh//2, 0)
    res = np.roll(res, -kw//2, 1)
 def update_butterworth_win(self, dummy=None):
     """
     Update Butterworth filter param and the result image.
     """
     sb = cv2.getTrackbarPos("stopband**2",
                             self.ctrl_panel_winname)
     if sb == 0:
         sb = 1
         print "Stopband should be more than 0. Reset to 1."
     bw_filter = self.get_butterworth_filter(stopband2=sb, showdft=True)
     dst_complex = bw_filter * self.dft4img#cv2.multiply(self.dft4img, bw_filter)
     dst_complex = cv2.idft(np.fft.ifftshift(dst_complex))
     dst = np.uint8(cv2.magnitude(dst_complex[:,:,0], dst_complex[:,:,1]))
     self.tmp = dst
     self.get_dft(self.tmp, showdft=True)
     self.hist_lines(dst, showhist=True)
     cv2.imshow(self.test_winname, dst)
예제 #52
0
def execute_fft(image):
    cv2.imshow('image', image)
    # gettin layers dft
    dfts = []
    channels = cv2.split(image)
    for channel in channels:
        #cv2.imshow('channel %d'%i, channel)
        channel = np.array(channel, dtype='float')
        dft = cv2.dft(channel, flags=cv2.DFT_SCALE)
        dfts.append(dft)
    new_channels = []
    for dft in dfts:
        channel = cv2.idft(dft)
        channel = np.array(channel, dtype='uint8')
        #cv2.imshow('new channel %d'%i, channel)
        new_channels.append(channel)
    result = cv2.merge(new_channels)
    cv2.imshow('result', result)
    def _get_channel_sal_magn(self, channel):
        """Returns the log-magnitude of the Fourier spectrum

            This method calculates the log-magnitude of the Fourier spectrum
            of a single-channel image. This image could be a regular grayscale
            image, or a single color channel of an RGB image.

            :param channel: single-channel input image
            :returns: log-magnitude of Fourier spectrum
        """
        # do FFT and get log-spectrum
        if self.use_numpy_fft:
            img_dft = np.fft.fft2(channel)
            magnitude, angle = cv2.cartToPolar(np.real(img_dft),
                                               np.imag(img_dft))
        else:
            img_dft = cv2.dft(np.float32(channel),
                              flags=cv2.DFT_COMPLEX_OUTPUT)
            magnitude, angle = cv2.cartToPolar(img_dft[:, :, 0],
                                               img_dft[:, :, 1])

        # get log amplitude
        log_ampl = np.log10(magnitude.clip(min=1e-9))

        # blur log amplitude with avg filter
        log_ampl_blur = cv2.blur(log_ampl, (3, 3))

        # residual
        residual = np.exp(log_ampl - log_ampl_blur)

        # back to cartesian frequency domain
        if self.use_numpy_fft:
            real_part, imag_part = cv2.polarToCart(residual, angle)
            img_combined = np.fft.ifft2(real_part + 1j*imag_part)
            magnitude, _ = cv2.cartToPolar(np.real(img_combined),
                                           np.imag(img_combined))
        else:
            img_dft[:, :, 0], img_dft[:, :, 1] = cv2.polarToCart(residual,
                                                                 angle)
            img_combined = cv2.idft(img_dft)
            magnitude, _ = cv2.cartToPolar(img_combined[:, :, 0],
                                           img_combined[:, :, 1])

        return magnitude
예제 #54
0
 def _fft(self, batch):
     for i in range(len(batch)):
         temp = batch[i]
         temp = temp[ :, :, 0]
         x = len(temp[0])
         dft = cv2.dft(np.float32(temp),flags = cv2.DFT_COMPLEX_OUTPUT)
         dft_shift = np.fft.fftshift(dft)
         rows, cols = temp.shape
         crow,ccol = rows/2 , cols/2
         crow = int(crow)
         ccol = int(ccol)
         mask = np.zeros((rows,cols,2),np.uint8)
         mask[crow-30:crow+30, ccol-30:ccol+30] = 1
         fshift = dft_shift*mask
         f_ishift = np.fft.ifftshift(fshift)
         a = cv2.idft(f_ishift)
         a = cv2.magnitude(a[:,:,0],a[:,:,1])
         batch[i] = a.reshape([x, x, 1])
     return batch
예제 #55
0
    def correlate(self, img,size):
        #print "new size in correlate",size
        self.H = self.resizeFFT(self.H,size)

##        H1 = cv2.resize(self.H[...,0],None,fx=lk_ratio[0], fy=lk_ratio[1], interpolation = cv2.INTER_CUBIC)
##        H2 = cv2.resize(self.H[...,1],None,fx=lk_ratio[0], fy=lk_ratio[1], interpolation = cv2.INTER_CUBIC)
##        self.H = np.dstack([H1,H2]).copy()
        FFT_img = cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT)
        #print "imgF:",FFT_img.shape,"filterF:",self.H.shape
        C = cv2.mulSpectrums(FFT_img, self.H, 0, conjB=True)
        resp = cv2.idft(C, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)
        h, w = resp.shape
        #print "resp shape:",resp.shape
        _, mval, _, (mx, my) = cv2.minMaxLoc(resp)
        side_resp = resp.copy()
        cv2.rectangle(side_resp, (mx-5, my-5), (mx+5, my+5), 0, -1)
        smean, sstd = side_resp.mean(), side_resp.std()
        psr = (mval-smean) / (sstd+eps)
        return resp, (mx-w//2, my-h//2), psr
예제 #56
0
    def state_vis(self):
        f = cv2.idft(self.H, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)
        h, w = f.shape

        f = np.roll(f, -h // 2, 0)
        f = np.roll(f, -w // 2, 1)

        kernel = np.uint8((f - f.min()) / f.ptp() * 255)
        resp = self.last_resp
        resp = np.uint8(np.clip(resp / resp.max(), 0, 1) * 255)
        vis = np.vstack([self.last_img, kernel, resp])

        x, y = 5, 10

        draw_str(vis, (x, y), "IMAGE")
        draw_str(vis, (x, y + h), "KERNEL")
        draw_str(vis, (x, y + 2 * h), "FRESP")

        return vis
예제 #57
0
    def correlate(self, img):
        # Multiply the image with the filter (in frequency space)
        C = cv2.mulSpectrums(cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT), self.H, 0, conjB=True)

        # Convert back to RGB space
        resp = cv2.idft(C, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)

        # Look for peak
        h, w = resp.shape
        _, mval, _, (mx, my) = cv2.minMaxLoc(resp)

        # Get the isolated peak response (should be a Gaussian point)
        side_resp = resp.copy()
        cv2.rectangle(side_resp, (mx-5, my-5), (mx+5, my+5), 0, -1)

        # Calculate the match probability
        smean, sstd = side_resp.mean(), side_resp.std()
        psr = (mval-smean) / (sstd+eps)

        return resp, (mx-w//2, my-h//2), psr