def ft_hologram(self, apodize=True): """ `~numpy.ndarray` of the self.hologram FFT """ if self._ft_hologram is None: if apodize == True: apodized_hologram = self.apodize(self.hologram) self._ft_hologram = fftshift(fft2(apodized_hologram)) else: self._ft_hologram = fftshift(fft2(self.hologram)) return self._ft_hologram
def _paganin(self, data): pci1 = fft.fft2(np.float32(data)) pci2 = fft.fftshift(pci1) / self.filtercomplex fpci = np.abs(fft.ifft2(pci2)) result = -0.5 * self.parameters['Ratio'] * np.log( fpci + self.parameters['increment']) return result
def _fine_search(self, sino, start_cor, search_radius, search_step, ratio, drop): """ Fine search for finding the rotation center. """ # Denoising (nrow, ncol) = sino.shape flip_sino = np.fliplr(sino) search_radius = np.clip(np.abs(search_radius), 1, ncol // 10 - 1) search_step = np.clip(np.abs(search_step), 0.1, 1.1) start_cor = np.clip(start_cor, search_radius, ncol - search_radius - 1) cen_fliplr = (ncol - 1.0) / 2.0 list_cor = start_cor + np.arange( -search_radius, search_radius + search_step, search_step) comp_sino = np.flipud(sino) # Used to avoid local minima list_metric = np.zeros(len(list_cor), dtype=np.float32) mask = self._create_mask(2 * nrow, ncol, 0.5 * ratio * ncol, drop) for i, cor in enumerate(list_cor): shift = 2.0 * (cor - cen_fliplr) sino_shift = ndi.interpolation.shift(flip_sino, (0, shift), order=3, prefilter=True) if shift >= 0: shift_int = np.int16(np.ceil(shift)) sino_shift[:, :shift_int] = comp_sino[:, :shift_int] else: shift_int = np.int16(np.floor(shift)) sino_shift[:, shift_int:] = comp_sino[:, shift_int:] mat1 = np.vstack((sino, sino_shift)) list_metric[i] = np.mean( np.abs(np.fft.fftshift(fft.fft2(mat1))) * mask) min_pos = np.argmin(list_metric) cor = list_cor[min_pos] return cor
def monogenic_filter_one_scale_gray(img, ss=1, minWaveLength=3, mult=2.1, sigmaOnf=0.55): # float point is very important in order to compute the fft2 if img.dtype not in ['float32', 'float64']: img = np.float64(img) imgdtype = 'float64' else: imgdtype = img.dtype # for 3 channel we make one channel computing the mean value if img.ndim == 3: # hay que cambiar esto img = img.mean(2) rows, cols = img.shape #compute the monnogenic scale in frequency domain logGabor, logGabor_H1, logGabor_H2 = monogenic_scale(cols,rows, ss, minWaveLength, mult, sigmaOnf) # FFT2 in the corner IM = fft2(img) # Discrete Fourier Transform of image IMF = IM * logGabor # Frequency bandpassed image f = np.real(ifft2(IMF)) # Spatially bandpassed image # Bandpassed monogenic filtering, real part of h contains IMH1=IM*logGabor_H1 IMH2=IM*logGabor_H2 h1= np.real(ifft2(IMH1)) h2= np.real(ifft2(IMH2)) # Amplitude of this scale component An = np.sqrt(f * f + h1 * h1 + h2 * h2) #Orientation computation ori = np.arctan(-h2 / h1) # Wrap angles between -pi and pi and convert radians to degrees ori_d = np.fix((ori % np.pi) / np.pi * 180.) # Feature type (a phase angle between -pi/2 and pi/2) ft = np.arctan2(f, np.sqrt(h1 * h1 + h2 * h2)) #proyectionin ij plane fr= np.sqrt(h1 * h1 + h2 * h2) return An,ori_d,ori,ft,fr,f
def _fine_search(self, sino, raw_cor): (Nrow, Ncol) = sino.shape centerfliplr = (Ncol + 1.0)/2.0-1.0 # Use to shift the sino2 to the raw CoR shiftsino = np.int16(2*(raw_cor-centerfliplr)) sino2 = np.roll(np.fliplr(sino[1:]), shiftsino, axis=1) lefttake = 0 righttake = Ncol-1 search_rad = self.parameters['search_radius'] if raw_cor <= centerfliplr: lefttake = np.ceil(search_rad+1) righttake = np.floor(2*raw_cor-search_rad-1) else: lefttake = np.ceil(raw_cor-(Ncol-1-raw_cor)+search_rad+1) righttake = np.floor(Ncol-1-search_rad-1) Ncol1 = righttake-lefttake + 1 mask = self._create_mask(2*Nrow-1, Ncol1, 0.5*self.parameters['ratio']*Ncol) numshift = np.int16((2*search_rad+1.0)/self.parameters['step']) listshift = np.linspace(-search_rad, search_rad, num=numshift) listmetric = np.zeros(len(listshift), dtype=np.float32) num1 = 0 for i in listshift: logging.debug("list shift %d", i) sino2a = ndi.interpolation.shift(sino2, (0, i), prefilter=False) sinojoin = np.vstack((sino, sino2a)) listmetric[num1] = np.sum(np.abs(fft.fftshift( fft.fft2(sinojoin[:, lefttake:righttake + 1])))*mask) num1 = num1 + 1 minpos = np.argmin(listmetric) rotcenter = raw_cor + listshift[minpos]/2.0 return rotcenter, listmetric
def _coarse_search(self, sino): # search minsearch to maxsearch in 1 pixel steps smin, smax = self.parameters['search_area'] logging.debug("SMIN and SMAX %d %d", smin, smax) (Nrow, Ncol) = sino.shape centre_fliplr = (Ncol - 1.0)/2.0 # check angles here to determine if a sinogram should be chopped off. # Copy the sinogram and flip left right, the purpose is to make a full # [0;2Pi] sinogram sino2 = np.fliplr(sino[1:]) # This image is used for compensating the shift of sino2 compensateimage = np.zeros((Nrow-1, Ncol), dtype=np.float32) # Start coarse search in which the shift step is 1 compensateimage[:] = sino[-1] start_shift = self._get_start_shift(centre_fliplr)*2 list_shift = np.arange(smin, smax + 1)*2 - start_shift logging.debug("%s", list_shift) list_metric = np.zeros(len(list_shift), dtype=np.float32) mask = self._create_mask(2*Nrow-1, Ncol, 0.5*self.parameters['ratio']*Ncol) count = 0 for i in list_shift: logging.debug("list shift %d", i) sino2a = np.roll(sino2, i, axis=1) if i >= 0: sino2a[:, 0:i] = compensateimage[:, 0:i] else: sino2a[:, i:] = compensateimage[:, i:] list_metric[count] = np.sum( np.abs(fft.fftshift(fft.fft2(np.vstack((sino, sino2a)))))*mask) count += 1 minpos = np.argmin(list_metric) rot_centre = centre_fliplr + list_shift[minpos]/2.0 return rot_centre, list_metric
def psf_correction(self, mat, win, pad_width): (nrow, ncol) = mat.shape mat_pad = np.pad(mat, pad_width, mode="reflect") win_pad = np.pad(win, pad_width, mode="constant", constant_values=1.0) mat_dec = fft.ifft2(fft.fft2(mat_pad) / fft.ifftshift(win_pad)) return np.abs(mat_dec)[pad_width:pad_width + nrow, pad_width:pad_width + ncol]
def apply_gaussian_filter(mat, sigmax, sigmay, pad): """ Filtering an image using a 2D Gaussian window. Parameters ---------- mat : array_like 2D array. sigmax : int Sigma in the x-direction. sigmay : int Sigma in the y-direction. pad : int Padding for the Fourier transform. Returns ------- ndarray 2D array. Filtered image. """ mat_pad = np.pad(mat, ((0, 0), (pad, pad)), mode='edge') mat_pad = np.pad(mat_pad, ((pad, pad), (0, 0)), mode='mean') (nrow, ncol) = mat_pad.shape window = make_2d_gaussian_window(nrow, ncol, sigmax, sigmay) listx = np.arange(0, ncol) listy = np.arange(0, nrow) x, y = np.meshgrid(listx, listy) mat_sign = np.power(-1.0, x + y) mat_filt = np.real( fft.ifft2(fft.fft2(mat_pad * mat_sign) * window) * mat_sign) return mat_filt[pad:nrow - pad, pad:ncol - pad]
def _coarse_search(self, sino): # search minsearch to maxsearch in 1 pixel steps smin, smax = self.parameters['search_area'] (Nrow, Ncol) = sino.shape centre_fliplr = (Ncol - 1.0) / 2.0 # check angles here to determine if a sinogram should be chopped off. # Copy the sinogram and flip left right, the purpose is to make a full # [0;2Pi] sinogram sino2 = np.fliplr(sino[1:]) # This image is used for compensating the shift of sino2 compensateimage = np.zeros((Nrow - 1, Ncol), dtype=np.float32) # Start coarse search in which the shift step is 1 compensateimage[:] = np.flipud(sino)[1:] start_shift = self._get_start_shift(centre_fliplr) * 2 list_shift = np.arange(smin, smax + 1) * 2 - start_shift list_metric = np.zeros(len(list_shift), dtype=np.float32) mask = self._create_mask(2 * Nrow - 1, Ncol, 0.5 * self.parameters['ratio'] * Ncol) count = 0 for i in list_shift: sino2a = np.roll(sino2, i, axis=1) if i >= 0: sino2a[:, 0:i] = compensateimage[:, 0:i] else: sino2a[:, i:] = compensateimage[:, i:] fft_out = fft.fft2(np.vstack((sino, sino2a))) temp = np.sum(np.abs(fft.fftshift(fft_out)) * mask) list_metric[count] = temp count += 1 minpos = np.argmin(list_metric) rot_centre = centre_fliplr + list_shift[minpos] / 2.0 return rot_centre, list_metric
def _fine_search(self, sino, start_cor, search_radius, search_step, ratio, drop): """ Fine search for finding the rotation center. """ # Denoising (nrow, ncol) = sino.shape flip_sino = np.fliplr(sino) search_radius = np.clip(np.abs(search_radius), 1, ncol//10 - 1) search_step = np.clip(np.abs(search_step), 0.1, 1.1) start_cor = np.clip(start_cor, search_radius, ncol - search_radius - 1) cen_fliplr = (ncol - 1.0) / 2.0 list_cor = start_cor + np.arange( -search_radius, search_radius + search_step, search_step) comp_sino = np.flipud(sino) # Used to avoid local minima list_metric = np.zeros(len(list_cor), dtype = np.float32) mask = self._create_mask(2 * nrow, ncol, 0.5 * ratio * ncol, drop) for i, cor in enumerate(list_cor): shift = 2.0*(cor - cen_fliplr) sino_shift = ndi.interpolation.shift( flip_sino, (0, shift), order = 3, prefilter = True) if shift>=0: shift_int = np.int16(np.ceil(shift)) sino_shift[:,:shift_int] = comp_sino[:,:shift_int] else: shift_int = np.int16(np.floor(shift)) sino_shift[:,shift_int:] = comp_sino[:,shift_int:] mat1 = np.vstack((sino, sino_shift)) list_metric[i] = np.mean( np.abs(np.fft.fftshift(fft.fft2(mat1)))*mask) min_pos = np.argmin(list_metric) cor = list_cor[min_pos] return cor
def _hilbert(self, data): pci1 = fft.fft2(fft.fftshift(np.float32(data))) pci2 = fft.ifftshift(pci1) * self.filter1 fpci0 = fft.ifftshift(fft.ifft2(fft.fftshift(pci2))) fpci = np.imag(fpci0) result = fpci return result
def remove_stripe_based_fft(sinogram, u, n, v, pad=150): """ Remove stripes using the method in Ref. [1]. Angular direction is along the axis 0. Parameters ---------- sinogram : array_like 2D array. u,n : int To define the shape of 1D Butterworth low-pass filter. v : int Number of rows (* 2) to be applied the filter. pad : int Padding for FFT Returns ------- ndarray 2D array. Stripe-removed sinogram. References ---------- .. [1] https://doi.org/10.1063/1.1149043 """ if pad > 0: sinogram = np.pad(sinogram, ((pad, pad), (0, 0)), mode='mean') sinogram = np.pad(sinogram, ((0, 0), (pad, pad)), mode='edge') (nrow, ncol) = sinogram.shape window2d = create_2d_window(ncol, nrow, u, v, n) sinogram = fft.ifft2( np.fft.ifftshift(np.fft.fftshift(fft.fft2(sinogram)) * window2d)) return np.real(sinogram[pad:nrow - pad, pad:ncol - pad])
def _coarse_search(self, sino, list_shift): # search minsearch to maxsearch in 1 pixel steps list_metric = np.zeros(len(list_shift), dtype=np.float32) (Nrow, Ncol) = sino.shape # check angles to determine if a sinogram should be chopped off. # Copy the sinogram and flip left right, to make a full [0:2Pi] sino sino2 = np.fliplr(sino[1:]) # This image is used for compensating the shift of sino2 compensateimage = np.zeros((Nrow - 1, Ncol), dtype=np.float32) # Start coarse search in which the shift step is 1 compensateimage[:] = np.flipud(sino)[1:] mask = self._create_mask(2 * Nrow - 1, Ncol, 0.5 * self.parameters['ratio'] * Ncol) count = 0 for i in list_shift: sino2a = np.roll(sino2, i, axis=1) if i >= 0: sino2a[:, 0:i] = compensateimage[:, 0:i] else: sino2a[:, i:] = compensateimage[:, i:] list_metric[count] = np.sum( np.abs(fft.fftshift(fft.fft2(np.vstack( (sino, sino2a))))) * mask) count += 1 return list_metric
def _fine_search(self, sino, raw_cor): (Nrow, Ncol) = sino.shape centerfliplr = (Ncol + 1.0) / 2.0 - 1.0 # Use to shift the sino2 to the raw CoR shiftsino = np.int16(2 * (raw_cor - centerfliplr)) sino2 = np.roll(np.fliplr(sino[1:]), shiftsino, axis=1) lefttake = 0 righttake = Ncol - 1 search_rad = self.parameters['search_radius'] if raw_cor <= centerfliplr: lefttake = np.ceil(search_rad + 1) righttake = np.floor(2 * raw_cor - search_rad - 1) else: lefttake = np.ceil(raw_cor - (Ncol - 1 - raw_cor) + search_rad + 1) righttake = np.floor(Ncol - 1 - search_rad - 1) Ncol1 = righttake - lefttake + 1 mask = self._create_mask(2 * Nrow - 1, Ncol1, 0.5 * self.parameters['ratio'] * Ncol) numshift = np.int16((2 * search_rad + 1.0) / self.parameters['step']) listshift = np.linspace(-search_rad, search_rad, num=numshift) listmetric = np.zeros(len(listshift), dtype=np.float32) num1 = 0 for i in listshift: logging.debug("list shift %d", i) sino2a = ndi.interpolation.shift(sino2, (0, i), prefilter=False) sinojoin = np.vstack((sino, sino2a)) listmetric[num1] = np.sum( np.abs( fft.fftshift(fft.fft2( sinojoin[:, lefttake:righttake + 1]))) * mask) num1 = num1 + 1 minpos = np.argmin(listmetric) rotcenter = raw_cor + listshift[minpos] / 2.0 return rotcenter, listmetric
def _hilbert(self, data): pci1 = fft.fft2(fft.fftshift(np.float32(data))) pci2 = fft.ifftshift(pci1)*self.filter1 fpci0 = fft.ifftshift(fft.ifft2(fft.fftshift(pci2))) fpci = np.imag(fpci0) result = fpci return result
def coarse_search_based_integer_shift(sino_0_180, start_cor=None, stop_cor=None, ratio=0.5): """ Find center of rotation (CoR) using integer shifts of a 180-360 sinogram. The 180-360 sinogram is made by flipping horizontally the 0-180 sinogram. Angular direction is along the axis 0. Note: 1-pixel shift of the 180-360 sinogram is equivalent to 0.5-pixel shift \ of CoR. Auto-search is limited to the range of [width/4; width - width/4]. Parameters ---------- sino_0_180 : float Sinogram in the angle range of [0;180]. start_cor : int Starting point for searching CoR. stop_cor : int Ending point for searching CoR. ratio : float Ratio between a sample and the width of the sinogram. Default value of 0.5 works in most cases (even when the sample is larger than the field_of_view). Returns ------- float Center of rotation with haft-pixel accuracy. """ # Denoising. Should not be used for a downsampled sinogram # sino_0_180 = ndi.gaussian_filter(sino_0_180, (3,1), mode='reflect') (nrow, ncol) = sino_0_180.shape if start_cor is None: start_cor = ncol // 4 if stop_cor is None: stop_cor = ncol - ncol // 4 - 1 start_cor, stop_cor = np.sort((start_cor, stop_cor)) start_cor = np.int16(np.clip(start_cor, 0, ncol - 1)) stop_cor = np.int16(np.clip(stop_cor, 0, ncol - 1)) cen_fliplr = (ncol - 1.0) / 2.0 sino_180_360 = np.fliplr(sino_0_180) comp_sino = np.flipud(sino_0_180) # Used to avoid local minima list_cor = np.arange(start_cor, stop_cor + 0.5, 0.5) list_metric = np.zeros(len(list_cor), dtype=np.float32) mask = make_mask(2 * nrow, ncol, 0.5 * ratio * ncol) for i, cor in enumerate(list_cor): shift = np.int16(2.0 * (cor - cen_fliplr)) sino_shift = np.roll(sino_180_360, shift, axis=1) if shift >= 0: sino_shift[:, :shift] = comp_sino[:, :shift] else: sino_shift[:, shift:] = comp_sino[:, shift:] mat1 = np.vstack((sino_0_180, sino_shift)) list_metric[i] = np.mean( np.abs(np.fft.fftshift(fft.fft2(mat1))) * mask) min_pos = np.argmin(list_metric) cor = list_cor[min_pos] return cor
def fine_search_based_subpixel_shift( sino_0_180, start_cor, search_radius=4, search_step=0.25, ratio=0.5): """ Find center of rotation (CoR) with sub-pixel accuracy by shifting a\ 180-360 sinogram around the coarse CoR. The 180-360 sinogram is made\ by flipping horizontally a 0-180 sinogram. Angular direction is along the axis 0. Parameters ---------- sino_0_180 : float Sinogram in the angle range of [0;180]. start_cor : float Starting point for searching CoR. search_radius : float Searching range = (start_cor +/- search_radius) search_step : float Searching step. ratio : float Ratio between the sample and the width of the sinogram. Default value of 0.5 works in most cases (even when a sample is larger than the field_of_view). Returns ------- float Center of rotation. """ # Denoising sino_0_180 = ndi.gaussian_filter(sino_0_180, (2, 2), mode='reflect') (nrow, ncol) = sino_0_180.shape sino_180_360 = np.fliplr(sino_0_180) search_radius = np.clip(np.abs(search_radius), 1, ncol // 10 - 1) search_step = np.clip(np.abs(search_step), 0.1, 1.1) start_cor = np.clip(start_cor, search_radius, ncol - search_radius - 1) cen_fliplr = (ncol - 1.0) / 2.0 list_cor = start_cor + np.arange( -search_radius, search_radius + search_step, search_step) comp_sino = np.flipud(sino_0_180) # Used to avoid local minima list_metric = np.zeros(len(list_cor), dtype=np.float32) mask = make_mask(2 * nrow, ncol, 0.5 * ratio * ncol) for i, cor in enumerate(list_cor): shift = 2.0 * (cor - cen_fliplr) sino_shift = ndi.interpolation.shift( sino_180_360, (0, shift), order=3, prefilter=True) if shift >= 0: shift_int = np.int16(np.ceil(shift)) sino_shift[:, :shift_int] = comp_sino[:, :shift_int] else: shift_int = np.int16(np.floor(shift)) sino_shift[:, shift_int:] = comp_sino[:, shift_int:] mat1 = np.vstack((sino_0_180, sino_shift)) list_metric[i] = np.mean( np.abs(np.fft.fftshift(fft.fft2(mat1))) * mask) min_pos = np.argmin(list_metric) cor = list_cor[min_pos] return cor
def fftFromLiteMap(liteMap,applySlepianTaper = False,nresForSlepian=3.0): """ @brief Creates an fft2D object out of a liteMap @param liteMap The map whose fft is being taken @param applySlepianTaper If True applies the lowest order taper (to minimize edge-leakage) @param nresForSlepian If above is True, specifies the resolution of the taeper to use. """ ft = fft2D() ft.Nx = liteMap.Nx ft.Ny = liteMap.Ny flTrace.issue("flipper.fftTools",1, "Taking FFT of map with (Ny,Nx)= (%f,%f)"%(ft.Ny,ft.Nx)) ft.pixScaleX = liteMap.pixScaleX ft.pixScaleY = liteMap.pixScaleY lx = 2*numpy.pi * fftfreq( ft.Nx, d = ft.pixScaleX ) ly = 2*numpy.pi * fftfreq( ft.Ny, d = ft.pixScaleY ) ix = numpy.mod(numpy.arange(ft.Nx*ft.Ny),ft.Nx) iy = numpy.arange(ft.Nx*ft.Ny)/ft.Nx modLMap = numpy.zeros([ft.Ny,ft.Nx]) modLMap[iy,ix] = numpy.sqrt(lx[ix]**2 + ly[iy]**2) ft.modLMap = modLMap ft.lx = lx ft.ly = ly ft.ix = ix ft.iy = iy ft.thetaMap = numpy.zeros([ft.Ny,ft.Nx]) ft.thetaMap[iy[:],ix[:]] = numpy.arctan2(ly[iy[:]],lx[ix[:]]) ft.thetaMap *=180./numpy.pi map = liteMap.data.copy() #map = map0.copy() #map[:,:] =map0[::-1,:] taper = map.copy()*0.0 + 1.0 if (applySlepianTaper) : try: f = open(taperDir + os.path.sep + 'taper_Ny%d_Nx%d_Nres%3.1f'%(ft.Ny,ft.Nx,nresForSlepian)) taper = pickle.load(f) f.close() except: taper = slepianTaper00(ft.Nx,ft.Ny,nresForSlepian) f = open(taperDir + os.path.sep + 'taper_Ny%d_Nx%d_Nres%3.1f'%(ft.Ny,ft.Nx,nresForSlepian),mode="w") pickle.dump(taper,f) f.close() ft.kMap = fft2(map*taper) del map, modLMap, lx, ly return ft
def process_frames(self, data): sino = data[0] sino2 = np.fliplr(sino[1:]) (Nrow, Ncol) = sino.shape mask = self._create_mask( 2*Nrow-1, Ncol, 0.5*self.parameters['ratio']*Ncol) FT1 = fft.fftshift(fft.fft2(np.vstack((sino, sino2)))) sino = fft.ifft2(fft.ifftshift(FT1 - FT1*mask)) return sino[0:Nrow].real
def sampling_op_forward(image, mask_as_image): """ Sampling operator :param image: Assumed to be an array with shape [N,N] :param mask_as_image: :param mask_as_image: Mask of shape [N,N] where middle of the image corresponds to zero frequency. Values should be 0,1 or True/False. :return: An array with shape [N,N] where some of the data have been zeroed out. """ N = max(image.shape) fourier_coeff = fftw.fftshift(fftw.fft2(image)) / N fourier_coeff_zero = np.multiply(mask_as_image, fourier_coeff) return fourier_coeff_zero
def incoherent(): # example of sparse image in time domain, diffuse in Fourier domain s = np.random.random((50, 50)) mask = s < .98 s[mask] = 0 fs = fftpack.fft2(s) fs = np.abs(fs) plt.subplot(121) plt.imshow(1 - s, cmap=plt.cm.Greys_r, interpolation='nearest') plt.xticks([]) plt.yticks([]) plt.subplot(122) plt.imshow(fs, cmap=plt.cm.Greys_r, interpolation='nearest') plt.xticks([]) plt.yticks([]) plt.savefig('incoherent.pdf') plt.clf()
def incoherent(): # example of sparse image in time domain, diffuse in Fourier domain s = np.random.random((50,50)) mask = s < .98 s[mask] = 0 fs = fftpack.fft2(s) fs = np.abs(fs) plt.subplot(121) plt.imshow(1-s, cmap=plt.cm.Greys_r, interpolation='nearest') plt.xticks([]) plt.yticks([]) plt.subplot(122) plt.imshow(fs, cmap=plt.cm.Greys_r, interpolation='nearest') plt.xticks([]) plt.yticks([]) plt.savefig('incoherent.pdf') plt.clf()
def process_frames(self, data): sinogram = data[0] (height, _) = sinogram.shape if height % 2 == 0: height = height - 1 sinofit = np.abs( savgol_filter(sinogram, height, self.order, axis=0, mode='mirror')) sinofit2 = np.pad(sinofit, ((0, 0), (self.pad, self.pad)), mode='edge') sinofit2 = np.pad(sinofit2, ((self.pad, self.pad), (0, 0)), mode='mean') sinofitsmooth = np.real( fft.ifft2(fft.fft2(sinofit2 * self.matsign) * self.window2d) * self.matsign) sinofitsmooth = sinofitsmooth[self.pad:self.height1 - self.pad, self.pad:self.width1 - self.pad] num1 = np.mean(sinofit) num2 = np.mean(sinofitsmooth) sinofitsmooth = num1 * sinofitsmooth / num2 return sinogram / sinofit * sinofitsmooth
def process_frames(self, data): sinogram = data[0] (height, _) = sinogram.shape if height%2==0: height = height - 1 sinofit = np.abs(savgol_filter( sinogram, height, self.order, axis=0, mode = 'mirror')) sinofit2 = np.pad( sinofit,((0, 0), (self.pad, self.pad)), mode = 'edge') sinofit2 = np.pad( sinofit2,((self.pad, self.pad), (0, 0)), mode = 'mean') sinofitsmooth = np.real(fft.ifft2(fft.fft2( sinofit2*self.matsign)*self.window2d)*self.matsign) sinofitsmooth = sinofitsmooth[self.pad:self.height1-self.pad, self.pad:self.width1-self.pad] num1 = np.mean(sinofit) num2 = np.mean(sinofitsmooth) sinofitsmooth = num1*sinofitsmooth/num2 return sinogram/sinofit*sinofitsmooth
def _coarse_search(self, sino, start_cor, stop_cor, ratio, drop): """ Coarse search for finding the rotation center. """ (nrow, ncol) = sino.shape start_cor, stop_cor = np.sort((start_cor,stop_cor)) start_cor = np.int16(np.clip(start_cor, 0, ncol-1)) stop_cor = np.int16(np.clip(stop_cor, 0, ncol-1)) cen_fliplr = (ncol - 1.0) / 2.0 # Flip left-right the [0:Pi ] sinogram to make a full [0;2Pi] sinogram flip_sino = np.fliplr(sino) # Below image is used for compensating the shift of the [Pi;2Pi] sinogram # It helps to avoid local minima. comp_sino = np.flipud(sino) list_cor = np.arange(start_cor, stop_cor + 1.0) list_metric = np.zeros(len(list_cor), dtype=np.float32) mask = self._create_mask(2 * nrow, ncol, 0.5 * ratio * ncol, drop) sino_sino = np.vstack((sino, flip_sino)) for i, cor in enumerate(list_cor): shift = np.int16(2.0*(cor - cen_fliplr)) _sino = sino_sino[nrow:] _sino[...] = np.roll(flip_sino, shift, axis=1) if shift >= 0: _sino[:, :shift] = comp_sino[:, :shift] else: _sino[:, shift:] = comp_sino[:, shift:] list_metric[i] = np.mean( np.abs(np.fft.fftshift(fft.fft2(sino_sino)))*mask) minpos = np.argmin(list_metric) if minpos==0: logging.warn('!!! WARNING !!! Global minimum is out of' ' the searching range. Please extend smin') self.error_msg_1 = "!!! WARNING !!! Global minimum is out of "\ "the searching range. Please extend smin" if minpos==len(list_metric)-1: logging.warn('!!! WARNING !!! Global minimum is out of' ' the searching range. Please extend smax') self.error_msg_2 = "!!! WARNING !!! Global minimum is out of "\ "the searching range. Please extend smax" rot_centre = list_cor[minpos] return rot_centre
def _coarse_search(self, sino, start_cor, stop_cor, ratio, drop): """ Coarse search for finding the rotation center. """ (nrow, ncol) = sino.shape start_cor, stop_cor = np.sort((start_cor, stop_cor)) start_cor = np.int16(np.clip(start_cor, 0, ncol - 1)) stop_cor = np.int16(np.clip(stop_cor, 0, ncol - 1)) cen_fliplr = (ncol - 1.0) / 2.0 # Flip left-right the [0:Pi ] sinogram to make a full [0;2Pi] sinogram flip_sino = np.fliplr(sino) # Below image is used for compensating the shift of the [Pi;2Pi] sinogram # It helps to avoid local minima. comp_sino = np.flipud(sino) list_cor = np.arange(start_cor, stop_cor + 1.0) list_metric = np.zeros(len(list_cor), dtype=np.float32) mask = self._create_mask(2 * nrow, ncol, 0.5 * ratio * ncol, drop) sino_sino = np.vstack((sino, flip_sino)) for i, cor in enumerate(list_cor): shift = np.int16(2.0 * (cor - cen_fliplr)) _sino = sino_sino[nrow:] _sino[...] = np.roll(flip_sino, shift, axis=1) if shift >= 0: _sino[:, :shift] = comp_sino[:, :shift] else: _sino[:, shift:] = comp_sino[:, shift:] list_metric[i] = np.mean( np.abs(np.fft.fftshift(fft.fft2(sino_sino))) * mask) minpos = np.argmin(list_metric) if minpos == 0: self.error_msg_1 = "!!! WARNING !!! Global minimum is out of "\ "the searching range. Please extend smin" logging.warn(self.error_msg_1) cu.user_message(self.error_msg_1) if minpos == len(list_metric) - 1: self.error_msg_2 = "!!! WARNING !!! Global minimum is out of "\ "the searching range. Please extend smax" logging.warn(self.error_msg_2) cu.user_message(self.error_msg_2) rot_centre = list_cor[minpos] return rot_centre
def apply_filter(self, mat, window, pattern, pad_width): (nrow, ncol) = mat.shape if pattern == "PROJECTION": top_drop = 10 # To remove the time stamp at some data mat_pad = np.pad(mat[top_drop:], ((pad_width + top_drop, pad_width), (pad_width, pad_width)), mode="edge") win_pad = np.pad(window, pad_width, mode="edge") mat_dec = fft.ifft2( fft.fft2(-np.log(mat_pad)) / fft.ifftshift(win_pad)) mat_dec = np.abs(mat_dec[pad_width:pad_width + nrow, pad_width:pad_width + ncol]) else: mat_pad = np.pad(-np.log(mat), ((0, 0), (pad_width, pad_width)), mode='edge') win_pad = np.pad(window, ((0, 0), (pad_width, pad_width)), mode="edge") mat_fft = np.fft.fftshift(fft.fft(mat_pad), axes=1) / win_pad mat_dec = fft.ifft(np.fft.ifftshift(mat_fft, axes=1)) mat_dec = np.abs(mat_dec[:, pad_width:pad_width + ncol]) return np.float32(np.exp(-mat_dec))
def fft_image(image): """ Perform an FFT on the supplied image array. Parameters ---------- image : ndarray An array of the image Returns ------- namedtuple A namedtuple containing the the fast-fourier transform, the amplitude and phase. """ image_fft = fft2(image) image_amplitude = np.abs(image_fft) image_phase = np.angle(image_fft) result = namedtuple('Image', 'FFT Amplitude Phase') return result(image_fft, image_amplitude, image_phase)
def _2d_filter(mat, sigmax, sigmay, pad): """ Filtering an image using 2D Gaussian window. --------- Parameters: - mat: 2D array. - sigmax, sigmay: sigmas of the window. - pad: padding for FFT --------- Return: - filtered image. """ matpad = np.pad(mat, ((0, 0), (pad, pad)), mode='edge') matpad = np.pad(matpad, ((pad, pad), (0, 0)), mode='mean') (nrow, ncol) = matpad.shape win2d = _2d_window_ellipse(nrow, ncol, sigmax, sigmay) listx = np.arange(0, ncol) listy = np.arange(0, nrow) x, y = np.meshgrid(listx, listy) matsign = np.power(-1.0, x + y) # matfilter = np.real(ifft2(fft2(matpad*matsign)*win2d)*matsign) matfilter = np.real( fft_vo.ifft2(fft_vo.fft2(matpad * matsign) * win2d) * matsign) return matfilter[pad:nrow - pad, pad:ncol - pad]
def sample_image(im, k_mask_idx1, k_mask_idx2): """ Creates the fourier samples the AUTOMAP network is trained to recover. The parameters k_mask_idx1 and k_mask_idx2 cointains the row and column indices, respectively, of the samples the network is trained to recover. It is assumed that these indices have the same ordering of the coefficents, as the network is used to recover. :param im: Image, assumed of size [batch_size, height, width]. The intensity values of the image should lie in the range [0, 1]. :param k_maks_idx1: Row indices of the Fourier samples :param k_maks_idx2: Column indices of the Fourier samples :return: Fourier samples in the format the AUTOMAP network expect """ # Scale the image to the right range im1 = 4096 * im batch_size = im1.shape[0] nbr_samples = k_mask_idx1.shape[0] samp_batch = np.zeros([batch_size, 2 * nbr_samples], dtype=np.float32) for i in range(batch_size): single_im = np.squeeze(im1[i, :, :]) fft_im = fftw.fft2(single_im) samples = fft_im[k_mask_idx1, k_mask_idx2] samples_real = np.real(samples) samples_imag = np.imag(np.conj(samples)) samples_concat = np.squeeze( np.concatenate((samples_real, samples_imag))) samples_concat = (0.0075 / (2 * 4096)) * samples_concat samp_batch[i] = samples_concat return samp_batch
def _coarse_search(self, sino, list_shift): # search minsearch to maxsearch in 1 pixel steps list_metric = np.zeros(len(list_shift), dtype=np.float32) (Nrow, Ncol) = sino.shape # check angles to determine if a sinogram should be chopped off. # Copy the sinogram and flip left right, to make a full [0:2Pi] sino sino2 = np.fliplr(sino[1:]) # This image is used for compensating the shift of sino2 compensateimage = np.zeros((Nrow-1, Ncol), dtype=np.float32) # Start coarse search in which the shift step is 1 compensateimage[:] = np.flipud(sino)[1:] mask = self._create_mask(2*Nrow-1, Ncol, 0.5*self.parameters['ratio']*Ncol) count = 0 for i in list_shift: sino2a = np.roll(sino2, i, axis=1) if i >= 0: sino2a[:, 0:i] = compensateimage[:, 0:i] else: sino2a[:, i:] = compensateimage[:, i:] list_metric[count] = np.sum( np.abs(fft.fftshift(fft.fft2(np.vstack((sino, sino2a)))))*mask) count += 1 return list_metric
def _reconstruct(self, propagation_distance, fourier_mask=None): """ Reconstruct the wave at a single ``propagation_distance`` for a single ``wavelength``. Parameters ---------- propagation_distance : float Propagation distance [m] spectral_peak : integer pair [x,y] Centroid of spectral peak for wavelength in power spectrum of hologram FT fourier_mask : array_like or None, optional Fourier-domain mask. If None (default), a mask is determined from the position of the main spectral peak. Returns ------- reconstructed_wave : `~numpy.ndarray` ndim 3 The reconstructed wave as an array of dimensions (X, Y, wavelengths) """ x_peak, y_peak = self.spectral_peak # Calculate mask radius. TODO: Update 250 to an automated guess based on input values. if self.rebin_factor != 1: mask_radius = 150. / self.rebin_factor elif self.crop_fraction is not None and self.crop_fraction != 0: mask_radius = 150. * self.crop_fraction else: mask_radius = 150. # Either use a Fourier-domain mask based on coords of spectral peak, # or a user-specified mask if fourier_mask is None: mask = self.real_image_mask(x_peak, y_peak, mask_radius) else: mask = np.asarray(fourier_mask, dtype=np.bool) mask = np.atleast_3d(mask) # Calculate Fourier transform of impulse response function G = self.fourier_trans_of_impulse_resp_func( np.atleast_1d([propagation_distance] * self.wavelength.size).reshape((1, 1, -1)) - self.chromatic_shift) # Now calculate digital phase mask. First center the spectral peak for each channel x_peak, y_peak = x_peak.reshape(-1), y_peak.reshape(-1) shifted_ft_hologram = np.empty_like(np.atleast_3d(mask), dtype=np.complex128) for channel in range(self.wavelength.size): shifted_ft_hologram[:, :, channel] = arrshift( self.ft_hologram * mask[:, :, channel], [-x_peak[channel], -y_peak[channel]], axes=(0, 1)) # Apodize the result psi = self.apodize(shifted_ft_hologram * G) digital_phase_mask = self.get_digital_phase_mask(psi) # Reconstruct the image # fftshift is independent of channel psi = np.empty_like(np.atleast_3d(shifted_ft_hologram)) for channel in range(psi.shape[2]): psi[:, :, channel] = arrshift(fftshift( fft2(self.apodize(self.hologram) * digital_phase_mask[:, :, channel], axes=(0, 1))) * mask[:, :, channel], [-x_peak[channel], -y_peak[channel]], axes=(0, 1)) psi *= G return fftshift(ifft2(psi, axes=(0, 1)), axes=(0, 1))
def perfft2(im, compute_P=True, compute_spatial=False): """ Moisan's Periodic plus Smooth Image Decomposition. The image is decomposed into two parts: im = s + p where 's' is the 'smooth' component with mean 0, and 'p' is the 'periodic' component which has no sharp discontinuities when one moves cyclically across the image boundaries. useage: S, [P, s, p] = perfft2(im) where: im is the image S is the FFT of the smooth component P is the FFT of the periodic component, returned if compute_P (default) s & p are the smooth and periodic components in the spatial domain, returned if compute_spatial By default this function returns `P` and `S`, the FFTs of the periodic and smooth components respectively. If `compute_spatial=True`, the spatial domain components 'p' and 's' are also computed. This code is adapted from Lionel Moisan's Scilab function 'perdecomp.sci' "Periodic plus Smooth Image Decomposition" 07/2012 available at: <http://www.mi.parisdescartes.fr/~moisan/p+s> """ if im.dtype not in ['float32', 'float64']: im = np.float64(im) rows, cols = im.shape # Compute the boundary image which is equal to the image discontinuity # values across the boundaries at the edges and is 0 elsewhere s = np.zeros_like(im) s[0, :] = im[0, :] - im[-1, :] s[-1, :] = -s[0, :] s[:, 0] = s[:, 0] + im[:, 0] - im[:, -1] s[:, -1] = s[:, -1] - im[:, 0] + im[:, -1] # Generate grid upon which to compute the filter for the boundary image # in the frequency domain. Note that cos is cyclic hence the grid # values can range from 0 .. 2*pi rather than 0 .. pi and then pi .. 0 x, y = (2 * np.pi * np.arange(0, v) / float(v) for v in (cols, rows)) cx, cy = np.meshgrid(x, y) denom = (2. * (2. - np.cos(cx) - np.cos(cy))) denom[0, 0] = 1. # avoid / 0 S = fft2(s) / denom S[0, 0] = 0 # enforce zero mean if compute_P or compute_spatial: P = fft2(im) - S if compute_spatial: s = ifft2(S).real p = im - s return S, P, s, p else: return S, P else: return S
def cross_correlation_2d(pixels1, pixels2): '''Align the second image with the first using max cross-correlation returns the x,y offsets to add to image1's indexes to align it with image2 Many of the ideas here are based on the paper, "Fast Normalized Cross-Correlation" by J.P. Lewis (http://www.idiom.com/~zilla/Papers/nvisionInterface/nip.html) which is frequently cited when addressing this problem. ''' # # We double the size of the image to get a field of zeros # for the parts of one image that don't overlap the displaced # second image. # # Since we're going into the frequency domain, if the images are of # different sizes, we can make the FFT shape large enough to capture # the period of the largest image - the smaller just will have zero # amplitude at that frequency. # s = np.maximum(pixels1.shape, pixels2.shape) fshape = s*2 # # Calculate the # of pixels at a particular point # i,j = np.mgrid[-s[0]:s[0], -s[1]:s[1]] unit = np.abs(i*j).astype(float) unit[unit<1]=1 # keeps from dividing by zero in some places # # Normalize the pixel values around zero which does not affect the # correlation, keeps some of the sums of multiplications from # losing precision and precomputes t(x-u,y-v) - t_mean # pixels1 = np.nan_to_num(pixels1-np.nanmean(pixels1)) pixels2 = np.nan_to_num(pixels2-np.nanmean(pixels2)) # # Lewis uses an image, f and a template t. He derives a normalized # cross correlation, ncc(u,v) = # sum((f(x,y)-f_mean(u,v))*(t(x-u,y-v)-t_mean),x,y) / # sqrt(sum((f(x,y)-f_mean(u,v))**2,x,y) * (sum((t(x-u,y-v)-t_mean)**2,x,y) # # From here, he finds that the numerator term, f_mean(u,v)*(t...) is zero # leaving f(x,y)*(t(x-u,y-v)-t_mean) which is a convolution of f # by t-t_mean. # fp1 = fft2(pixels1.astype('float32'),fshape) fp2 = fft2(pixels2.astype('float32'),fshape) corr12 = ifft2(fp1 * fp2.conj()).real # # Use the trick of Lewis here - compute the cumulative sums # in a fashion that accounts for the parts that are off the # edge of the template. # # We do this in quadrants: # q0 q1 # q2 q3 # For the first, # q0 is the sum over pixels1[i:,j:] - sum i,j backwards # q1 is the sum over pixels1[i:,:j] - sum i backwards, j forwards # q2 is the sum over pixels1[:i,j:] - sum i forwards, j backwards # q3 is the sum over pixels1[:i,:j] - sum i,j forwards # # The second is done as above but reflected lr and ud # p1_si = pixels1.shape[0] p1_sj = pixels1.shape[1] p1_sum = np.zeros(fshape) p1_sum[:p1_si,:p1_sj] = cumsum_quadrant(pixels1, False, False) p1_sum[:p1_si,-p1_sj:] = cumsum_quadrant(pixels1, False, True) p1_sum[-p1_si:,:p1_sj] = cumsum_quadrant(pixels1, True, False) p1_sum[-p1_si:,-p1_sj:] = cumsum_quadrant(pixels1, True, True) # # Divide the sum over the # of elements summed-over # p1_mean = p1_sum / unit p2_si = pixels2.shape[0] p2_sj = pixels2.shape[1] p2_sum = np.zeros(fshape) p2_sum[:p2_si,:p2_sj] = cumsum_quadrant(pixels2, False, False) p2_sum[:p2_si,-p2_sj:] = cumsum_quadrant(pixels2, False, True) p2_sum[-p2_si:,:p2_sj] = cumsum_quadrant(pixels2, True, False) p2_sum[-p2_si:,-p2_sj:] = cumsum_quadrant(pixels2, True, True) p2_sum = np.fliplr(np.flipud(p2_sum)) p2_mean = p2_sum / unit # # Once we have the means for u,v, we can caluclate the # variance-like parts of the equation. We have to multiply # the mean^2 by the # of elements being summed-over # to account for the mean being summed that many times. # p1sd = np.sum(pixels1**2) - p1_mean**2 * np.product(s) p2sd = np.sum(pixels2**2) - p2_mean**2 * np.product(s) # # There's always chance of roundoff error for a zero value # resulting in a negative sd, so limit the sds here # sd = np.sqrt(np.maximum(p1sd * p2sd, 0)) with warnings.catch_warnings(): warnings.simplefilter("ignore") corrnorm = corr12 / sd # # There's not much information for points where the standard # deviation is less than 1/100 of the maximum. We exclude these # from consideration. # corrnorm[(unit < np.product(s) / 2) & (sd < np.mean(sd) / 100)] = 0 # Also exclude possibilites with few observed pixels. corrnorm[unit < np.product(s) / 4] = 0 return corrnorm
def FFT2(I): [Ny, Nx] = np.shape(I) FT = fftshift(fft2(ifftshift(I))) / np.sqrt(Ny) / np.sqrt(Nx) return FT
def cross_correlation_2d(pixels1, pixels2): '''Align the second image with the first using max cross-correlation returns the x,y offsets to add to image1's indexes to align it with image2 Many of the ideas here are based on the paper, "Fast Normalized Cross-Correlation" by J.P. Lewis (http://www.idiom.com/~zilla/Papers/nvisionInterface/nip.html) which is frequently cited when addressing this problem. ''' # # We double the size of the image to get a field of zeros # for the parts of one image that don't overlap the displaced # second image. # # Since we're going into the frequency domain, if the images are of # different sizes, we can make the FFT shape large enough to capture # the period of the largest image - the smaller just will have zero # amplitude at that frequency. # s = np.maximum(pixels1.shape, pixels2.shape) fshape = s*2 # # Calculate the # of pixels at a particular point # i, j = np.mgrid[-s[0]:s[0], -s[1]:s[1]] unit = np.abs(i*j).astype(float) unit[unit < 1] = 1 # keeps from dividing by zero in some places # # Normalize the pixel values around zero which does not affect the # correlation, keeps some of the sums of multiplications from # losing precision and precomputes t(x-u,y-v) - t_mean # pixels1 = np.nan_to_num(pixels1-nanmean(pixels1)) pixels2 = np.nan_to_num(pixels2-nanmean(pixels2)) # # Lewis uses an image, f and a template t. He derives a normalized # cross correlation, ncc(u,v) = # sum((f(x,y)-f_mean(u,v))*(t(x-u,y-v)-t_mean),x,y) / # sqrt(sum((f(x,y)-f_mean(u,v))**2,x,y) * (sum((t(x-u,y-v)-t_mean)**2,x,y) # # From here, he finds that the numerator term, f_mean(u,v)*(t...) is zero # leaving f(x,y)*(t(x-u,y-v)-t_mean) which is a convolution of f # by t-t_mean. # fp1 = fft2(pixels1.astype('float32'), fshape) fp2 = fft2(pixels2.astype('float32'), fshape) corr12 = ifft2(fp1 * fp2.conj()).real # # Use the trick of Lewis here - compute the cumulative sums # in a fashion that accounts for the parts that are off the # edge of the template. # # We do this in quadrants: # q0 q1 # q2 q3 # For the first, # q0 is the sum over pixels1[i:,j:] - sum i,j backwards # q1 is the sum over pixels1[i:,:j] - sum i backwards, j forwards # q2 is the sum over pixels1[:i,j:] - sum i forwards, j backwards # q3 is the sum over pixels1[:i,:j] - sum i,j forwards # # The second is done as above but reflected lr and ud # p1_si = pixels1.shape[0] p1_sj = pixels1.shape[1] p1_sum = np.zeros(fshape) p1_sum[:p1_si, :p1_sj] = cumsum_quadrant(pixels1, False, False) p1_sum[:p1_si, -p1_sj:] = cumsum_quadrant(pixels1, False, True) p1_sum[-p1_si:, :p1_sj] = cumsum_quadrant(pixels1, True, False) p1_sum[-p1_si:, -p1_sj:] = cumsum_quadrant(pixels1, True, True) # # Divide the sum over the # of elements summed-over # p1_mean = old_div(p1_sum, unit) p2_si = pixels2.shape[0] p2_sj = pixels2.shape[1] p2_sum = np.zeros(fshape) p2_sum[:p2_si, :p2_sj] = cumsum_quadrant(pixels2, False, False) p2_sum[:p2_si, -p2_sj:] = cumsum_quadrant(pixels2, False, True) p2_sum[-p2_si:, :p2_sj] = cumsum_quadrant(pixels2, True, False) p2_sum[-p2_si:, -p2_sj:] = cumsum_quadrant(pixels2, True, True) p2_sum = np.fliplr(np.flipud(p2_sum)) p2_mean = old_div(p2_sum, unit) # # Once we have the means for u,v, we can caluclate the # variance-like parts of the equation. We have to multiply # the mean^2 by the # of elements being summed-over # to account for the mean being summed that many times. # p1sd = np.sum(pixels1**2) - p1_mean**2 * np.product(s) p2sd = np.sum(pixels2**2) - p2_mean**2 * np.product(s) # # There's always chance of roundoff error for a zero value # resulting in a negative sd, so limit the sds here # sd = np.sqrt(np.maximum(p1sd * p2sd, 0)) with warnings.catch_warnings(): warnings.simplefilter("ignore") corrnorm = old_div(corr12, sd) # # There's not much information for points where the standard # deviation is less than 1/100 of the maximum. We exclude these # from consideration. # corrnorm[(unit < old_div(np.product(s), 2)) & (sd < old_div(np.mean(sd), 100))] = 0 # Also exclude possibilites with few observed pixels. corrnorm[unit < old_div(np.product(s), 4)] = 0 return corrnorm