def invert_vort(uc, dx=dx, dy=dy, nx=nx, ny=ny, geom=tad.geom): ucv = geom.validview(uc) vort = ucv[0] u = ucv[1] v = ucv[2] f = fft2(vort) nx, ny = vort.shape scal_y = 2*pi/dy/ny scal_x = 2*pi/dx/nx k = fftfreq(nx, 1/nx)[:,None] * 1j * scal_x l = fftfreq(ny, 1/ny)[None,:] * 1j * scal_y lapl = k**2 + l**2 lapl[0,0] = 1.0 psi = f/lapl u[:] = -real(ifft2(psi * l)) v[:] = real(ifft2(psi * k)) return uc
def inverseFilter(img,fftsize): im = np.mean(img,2)/255. fftsize = 1024 im_fft = fftpack.fft2(im, (fftsize, fftsize)) #Complementary of a Gaussian filter SZ = 1024 sigma = 0.25 [xx,yy]=np.meshgrid(np.linspace(-4,4,SZ),np.linspace(-4,4,SZ)) gaussian = np.exp(-0.5*(xx*xx+yy*yy)/(sigma*sigma)) fil =1.-fftpack.fftshift(gaussian/np.max(gaussian)) fil_fft = fil im_fil_fft = im_fft * fil_fft im_fil = np.real(fftpack.ifft2(im_fil_fft)) hs=np.floor(SZ/2.) #Careful with the crop. Because we work directly in the Fourier domain there is no padding. im_crop = im_fil[0:im.shape[0], 0:im.shape[1]] F=fftpack.fft2(im_crop,(1024,1024)) H=fil_fft tol= 1e-2 I = F/H print np.min(I) I=np.where(np.abs(H)<tol,0,I) i_reconstructed = np.real(fftpack.ifft2(I)) plt.imshow(i_reconstructed[:im.shape[0],:im.shape[1]],cmap="gray")
def convolution_fourier_RGB(img, fil_fft, fftsize): channelR = np.zeros((img.shape[0],img.shape[1]), 'double') channelG = np.zeros((img.shape[0],img.shape[1]), 'double') channelB = np.zeros((img.shape[0],img.shape[1]), 'double') for x in range(img.shape[0]): for y in range(img.shape[1]): channelR[x,y] = img[x,y][0] channelG[x,y] = img[x,y][1] channelB[x,y] = img[x,y][2] matrixR_fft = fftpack.fft2(channelR, (fftsize, fftsize)) matrixG_fft = fftpack.fft2(channelG, (fftsize, fftsize)) matrixB_fft = fftpack.fft2(channelB, (fftsize, fftsize)) matrixR_fil_fft = matrixR_fft * fil_fft; matrixG_fil_fft = matrixG_fft * fil_fft; matrixB_fil_fft = matrixB_fft * fil_fft; matrixR_fil = np.real(fftpack.ifft2(matrixR_fil_fft)) matrixG_fil = np.real(fftpack.ifft2(matrixG_fil_fft)) matrixB_fil = np.real(fftpack.ifft2(matrixB_fil_fft)) img_fil = np.zeros((matrixR_fil.shape[0], matrixR_fil.shape[1], 3), 'double') for x in range(matrixR_fil.shape[0]): for y in range(matrixR_fil.shape[1]): img_fil[x,y,0] = matrixR_fil[x,y] img_fil[x,y,1] = matrixG_fil[x,y] img_fil[x,y,2] = matrixB_fil[x,y] return img_fil
def solve(self, u, v, dx, dy): import numexpr as ne nx, ny = u.shape assert u.shape == tuple(self.shape) fu = fft2(u) fv = fft2(v) mpx = self.mpx mmx = self.mmx dpx = self.dpx dmx = self.dmx mpy = self.mpy mmy = self.mmy dpy = self.dpy dmy = self.dmy d = ne.evaluate("fu*mmy * dmx + fv * mmx * dmy") lapl = ne.evaluate("mpy * mmy * dpx * dmx + mpx*mmx *dpy *dmy") lapl[0, 0] = 1.0 p = d / lapl px = np.real(ifft2(mpy * dpx * p)) py = np.real(ifft2(mpx * dpy * p)) # self.p = np.real(ifft2(p)) u -= px v -= py return px, py
def colorize_by_power(self, image): """ Colorize the image mode-by-mode according to the power in each mode. The top third of modes are colored red, the middle third green, and the lower third blue. For RGB images, a grayscale equivalent is computed and colorized. """ print "colorizing....." if len(image.shape) == 3: power = fft2(np.sum(image, axis=2))**2 elif len(image.shape) == 2: power = fft2(image)**2 else: raise Exception("Invalid image shape: {}".foramt(image.shape)) thirds = (power.max() - power.min())/3.0 third_cut = power.min() + thirds twothird_cut = third_cut + thirds lower = power < third_cut upper = power > twothird_cut middle = ~(lower | upper) colorized = np.zeros((power.shape[0], power.shape[1], 3), dtype=np.uint8) for color, region in enumerate([upper, middle, lower]): new_channel = ifft2(np.where(region, power, 0.0)) shifted = (new_channel - new_channel.min()) scaled = 255.0*shifted/shifted.max() colorized[..., color] = ifft2(np.where(region, power, 0.0)) return colorized
def step4(self): ''' Perform a 4th order timestep ''' def order2(c): Vc = np.exp( -1j * c * self.dt / 2. * ( self.V - self.gravity() + self.g * abs( self.psi ) ** 2 ) ) Tc = self.expksquare ** c return Vc, Tc p = 1/(4.-4.**(1/3.)) q = 1 - 4 * p Vp,Tp = order2( p ) Vq,Tq = order2( q ) return Vp * ff.fftshift( ff.ifft2( Tp * ff.fft2( ff.fftshift( Vp ** 2 * ff.fftshift( ff.ifft2( Tp * ff.fft2( ff.fftshift( Vp * Vq * ff.fftshift( ff.ifft2( Tq * ff.fft2( ff.fftshift( Vq * Vp * ff.fftshift( ff.ifft2( Tp * ff.fft2( ff.fftshift( Vp ** 2 * ff.fftshift( ff.ifft2( Tp * ff.fft2( ff.fftshift( Vp * self.psi ) ) ) ) ) ) ) ) ) ) ) ) ) ) ) ) ) ) ) )
def spec2grid(sfield): """ Transform one frame of SQG model output to a grided (physical) representation. Assumes 'sfield' to be up-half plane, and specifies lower half plane by conjugate sym (since physical field is assumed real-valued). Input field should have dimensions (...,kmax+1,2*kmax+1,nz), where kmax=2^n-1, hence physical resolution will be 2^(n+1) x 2^(n+1). NOTE: top row of the input field corresponds to ky = 0, the kx<0 part is NOT assumed a priori to be conjugate- symmetric with the kx>0 part. NOTE: grid2spec(spec2grid(fk)) = fk. OPTIONAL: da = true pads input with 0s before transfoming to gridspace, for dealiased products. Default is da = false. Args: sfield: complex spectrum field with shape (t(optional), ky, kx, z(optional)) """ if not _is_single_layer(sfield): hres = sfield.shape[-2] + 1 fk = fullspec(sfield) fk = fftpack.ifftshift(fk, axes=(-2,-3)) return hres*hres*np.real(fftpack.ifft2(fk, axes=(-2,-3))) else: hres = sfield.shape[-1] + 1 fk = fullspec(sfield, True) fk = fftpack.ifftshift(fk, axes=(-1,-2)) return hres*hres*np.real(fftpack.ifft2(fk, axes=(-1,-2)))
def shift_inner(arr, nx, ny, window=False, padding='reflect'): """ Shifts an array by nx and ny respectively. """ if ((nx % 1. == 0.) and (ny % 1. ==0)): return sp.roll(sp.roll(arr, int(ny), axis=0), int(nx), axis=1) else: atype = arr.dtype if padding: x, y = arr.shape pwx, pwy = int(pow(2., np.ceil(np.log2(1.5*arr.shape[0])))), int(pow(2., np.ceil(np.log2(1.5*arr.shape[1])))) pwx2, pwy2 = (pwx-x)/2, (pwy-y)/2 if pad=='zero': arr = pad.with_constant(arr, pad_width=((pwx2, pwx2), (pwy2, pwy2))) else: arr = pad.with_reflect(arr, pad_width=((pwx2, pwx2), (pwy2, pwy2))) phaseFactor = sp.exp(complex(0., -2.*sp.pi)*(ny*spf.fftfreq(arr.shape[0])[:, np.newaxis]+nx*spf.fftfreq(arr.shape[1])[np.newaxis, :])) if window: window = spf.fftshift(CXData._tukeywin(arr.shape[0], alpha=0.35)) arr = spf.ifft2(spf.fft2(arr)*phaseFactor*window) else: arr = spf.ifft2(spf.fft2(arr)*phaseFactor) if padding: arr = arr[pwx/4:3*pwx/4, pwy/4:3*pwy/4] if atype == 'complex': return arr else: return np.real(arr)
def find_foci(evt, type,key,type2,key2,minPhase=-500000, maxPhase=500000, steps=101, field_of_view_rad=100, wavelength=1.053, CCD_S_DIST=0.375, PX_SIZE=75e-6): img = evt[type][key].data centroids = evt[type2][key2].data Nfoci = centroids.shape[0] Xrange, Yrange = img.shape Npixel = field_of_view_rad p = numpy.linspace(-Xrange/2, Xrange/2-1, Xrange) q = numpy.linspace(-Yrange/2, Yrange/2-1, Yrange) pp, qq = numpy.meshgrid(p, q) phase_matrix = (2*numpy.pi/wavelength)*numpy.sqrt(1-((PX_SIZE/CCD_S_DIST)**2)*(qq**2 + pp**2)) prop_length = numpy.linspace(minPhase, maxPhase, steps) variance = numpy.zeros([steps, Nfoci]) # shift stuff for performance reasons img_shifted = fftshift(img) phase_matrix_shifted = fftshift(phase_matrix) for idx, phase in enumerate(prop_length): img_propagated = img_shifted * numpy.exp(1.j*phase*phase_matrix_shifted) recon = fftshift(ifft2(img_propagated)) for CC in numpy.arange(Nfoci): centerx, centery = centroids[CC, :] ###print centerx, centery reconcut = numpy.abs(recon[numpy.max([0, centerx-Npixel-1]).astype(int): numpy.min([Xrange-1, centerx+Npixel]).astype(int), numpy.max([0, centery-Npixel-1]).astype(int): numpy.min([Yrange-1, centery+Npixel]).astype(int)]) variance[idx, CC] = reconcut.var() focus_distance = numpy.zeros(Nfoci) CC_size = numpy.zeros(Nfoci) focused_CC = numpy.zeros(4*Npixel**2 * Nfoci).reshape(Nfoci, 2*Npixel, 2*Npixel) for CC in numpy.arange(Nfoci): ind_max = numpy.argmax(variance[:, CC]) tmp = variance[:, CC] # get max which is not at border loc_max_bool = numpy.r_[True, tmp[1:] > tmp[:-1]] & numpy.r_[tmp[:-1] > tmp[1:], True] loc_max_bool[0] = False loc_max_bool[-1] = False ind_max = numpy.argmax(tmp*loc_max_bool) focus_distance[CC] = prop_length[ind_max] img_propagated = img_shifted * numpy.exp(1.j * focus_distance[CC] * phase_matrix_shifted) recon = fftshift(ifft2(img_propagated)) centerx, centery = centroids[CC, :] reconcut = numpy.real(recon[numpy.max([0, centerx-Npixel]).astype(int): numpy.min([Xrange-1, centerx+Npixel]).astype(int), numpy.max([0, centery-Npixel]).astype(int): numpy.min([Yrange-1, centery+Npixel]).astype(int)]) focused_CC[CC, 0:reconcut.shape[0], 0:reconcut.shape[1]] = reconcut CC_size[CC] = numpy.sum(get_CC_size(reconcut)) if len(focused_CC): add_record(evt["analysis"], "analysis", "focused_CC", focused_CC[0]) add_record(evt["analysis"], "analysis", "focus distance", focus_distance) add_record(evt["analysis"], "analysis", "CC_size", CC_size) add_record(evt["analysis"], "analysis", "propagation length", prop_length)
def ifft(a, overwrite=False, shift=True): if shift: res = fftpack.ifft2(fftpack.fftshift(a, axes=[0,1]), axes=[0, 1], overwrite_x=overwrite) else: res = fftpack.ifft2(a, overwrite_x=overwrite) return res
def calc_gradients(inputvar, kxgrid, kygrid): var_k = fft2(np.complex128(inputvar)) gradx_k = 1j * kxgrid * var_k grady_k = 1j * kygrid * var_k dvar_dx = np.real(ifft2(gradx_k)) dvar_dy = np.real(ifft2(grady_k)) grad2 = dvar_dx**2 + dvar_dy**2 return dvar_dx, dvar_dy, grad2
def f(vort, t, k=k, l=l, lapl=lapl): fv = fft2(vort) psi = fv/lapl u = ifft2(-psi * l) v = ifft2(psi * k) adv = -(u* ifft2(fv*k) + v*ifft2(fv*l)) #+ ifft2(lapl * fv/R) return adv
def calcAcovf2d(self): """Calculate the 2d auto covariance function. """ # See Wiener-Kinchine theorem if self.shift: # Note, the ACovF needs the unshifted 2d PSD for inverse FFT, so unshift. # Then shift back again. self.acovf = fftpack.fftshift(fftpack.ifft2(fftpack.ifftshift(self.psd2d))) else: self.acovf = fftpack.ifft2(self.psd2d) return
def FilterElectrons(self,sign, Psi): ''' Routine that uses the Fourier transform to filter positrons/electrons Options: sign=1 Leaves electrons sign=-1 Leaves positrons ''' print ' ' print ' Filter Electron routine ' print ' ' px = self.c*self.Px py = self.c*self.Py m = self.mass c= self.c energy = np.sqrt( (m*c**2)**2 + px**2 + py**2 ) EP_11 = 1. + sign*m*c**2/energy EP_12 = 0. EP_13 = 0. EP_14 = sign*(px - 1j*py)/energy EP_21 = 0. EP_22 = 1. + sign*m*c**2/energy EP_23 = sign*(px + 1j*py)/energy EP_24 = 0. EP_31 = 0. EP_32 = sign*(px - 1j*py)/energy EP_33 = 1. - sign*m*c**2/energy EP_34 = 0. EP_41 = sign*(px + 1j*py)/energy EP_42 = 0. EP_43 = 0. EP_44 = 1. - sign*m*c**2/energy #Psi1, Psi2, Psi3, Psi4 = Psi psi1_fft = fftpack.fft2( Psi[0] ) psi2_fft = fftpack.fft2( Psi[1] ) psi3_fft = fftpack.fft2( Psi[2] ) psi4_fft = fftpack.fft2( Psi[3] ) psi1_fft_electron = EP_11*psi1_fft + EP_12*psi2_fft + EP_13*psi3_fft + EP_14*psi4_fft psi2_fft_electron = EP_21*psi1_fft + EP_22*psi2_fft + EP_23*psi3_fft + EP_24*psi4_fft psi3_fft_electron = EP_31*psi1_fft + EP_32*psi2_fft + EP_33*psi3_fft + EP_34*psi4_fft psi4_fft_electron = EP_41*psi1_fft + EP_42*psi2_fft + EP_43*psi3_fft + EP_44*psi4_fft return np.array([ fftpack.ifft2( psi1_fft_electron ), fftpack.ifft2( psi2_fft_electron ), fftpack.ifft2( psi3_fft_electron ), fftpack.ifft2( psi4_fft_electron ) ])
def _FilterElectrons(self,sign): ''' Routine that uses the Fourier transform to filter positrons/electrons Options: sign=1 Leaves electrons sign=-1 Leaves positrons ''' print ' ' print ' Filter Electron routine ' print ' ' min_Px = np.pi*self.X_gridDIM/(2*self.min_X) dPx = 2*np.abs(min_Px)/self.X_gridDIM px_Vector = fftpack.fftshift ( np.linspace(min_Px, np.abs(min_Px) - dPx, self.X_gridDIM )) min_Py = np.pi*self.Y_gridDIM/(2*self.min_Y) dPy = 2*np.abs(min_Py)/self.Y_gridDIM py_Vector = fftpack.fftshift ( np.linspace(min_Py, np.abs(min_Py) - dPy, self.Y_gridDIM )) px = px_Vector[np.newaxis,:] py = py_Vector[:,np.newaxis] sqrtp = sign*2*np.sqrt( self.mass*self.mass*self.c**4 + self.c*self.c*px*px + self.c*self.c*py*py ) aa = sign*self.mass*self.c*self.c/sqrtp bb = sign*(px/sqrtp - 1j*py/sqrtp) cc = sign*(px/sqrtp + 1j*py/sqrtp) ElectronProjector = np.matrix([ [0.5+aa , 0. , 0. , bb ], [0. , 0.5+aa , cc , 0. ], [0. , bb , 0.5-aa , 0. ], [cc , 0. , 0. , 0.5-aa] ]) psi1_fft = fftpack.fft2( self.Psi1_init ) psi2_fft = fftpack.fft2( self.Psi2_init ) psi3_fft = fftpack.fft2( self.Psi3_init ) psi4_fft = fftpack.fft2( self.Psi4_init ) psi1_fft_electron = ElectronProjector[0,0]*psi1_fft + ElectronProjector[0,1]*psi2_fft +\ ElectronProjector[0,2]*psi3_fft + ElectronProjector[0,3]*psi4_fft psi2_fft_electron = ElectronProjector[1,0]*psi1_fft + ElectronProjector[1,1]*psi2_fft +\ ElectronProjector[1,2]*psi3_fft + ElectronProjector[1,3]*psi4_fft psi3_fft_electron = ElectronProjector[2,0]*psi1_fft + ElectronProjector[2,1]*psi2_fft +\ ElectronProjector[2,2]*psi3_fft + ElectronProjector[2,3]*psi4_fft psi4_fft_electron = ElectronProjector[3,0]*psi1_fft + ElectronProjector[3,1]*psi2_fft +\ ElectronProjector[3,2]*psi3_fft + ElectronProjector[3,3]*psi4_fft self.Psi1_init = fftpack.ifft2( psi1_fft_electron ) self.Psi2_init = fftpack.ifft2( psi2_fft_electron ) self.Psi3_init = fftpack.ifft2( psi3_fft_electron ) self.Psi4_init = fftpack.ifft2( psi4_fft_electron )
def integration_and_analysis(zeta0, phi0, f_zeta_t, f_phi_t, M, kxgrid, kygrid, ktgrid, dt, time, damping, storage, wmax, wmin): ''' Perform the 4th order Runge-Kutta integration scheme for surface and potential. zeta0: surface at time step n. phi0: potential at time step n. f_zeta_t, f_phi_t: functions with Euler eqs of M-order to be solved found with derive_euler_equation_functions(M). time: time in which the integration takes place damping: non-linear damping factor in Euler eqs. Return: surface and potential at time step n+1. Other variables like the orders of phi can easily be returned! ''' zeta = zeta0 phi = phi0 rk1_zeta, rk1_phi, phi_m = tderiv_surface_potential(zeta, phi, f_zeta_t, f_phi_t, M, kxgrid, kygrid, ktgrid, time, damping, return_phi_m = 1) zeta = zeta0 + rk1_zeta*dt/2 phi = phi0 + rk1_phi*dt/2 rk2_zeta, rk2_phi = tderiv_surface_potential(zeta, phi, f_zeta_t, f_phi_t, M, kxgrid, kygrid, ktgrid, time + dt/2, damping) zeta = zeta0 + rk2_zeta*dt/2 phi = phi0 + rk2_phi*dt/2 rk3_zeta, rk3_phi = tderiv_surface_potential(zeta, phi, f_zeta_t, f_phi_t, M, kxgrid, kygrid, ktgrid, time + dt/2, damping) zeta = zeta0 + rk3_zeta*dt phi = phi0 + rk3_phi*dt rk4_zeta, rk4_phi = tderiv_surface_potential(zeta, phi, f_zeta_t, f_phi_t, M, kxgrid, kygrid, ktgrid, time + dt, damping) dzeta_dt = 1/6 * (rk1_zeta + rk4_zeta + 2*(rk2_zeta + rk3_zeta)) dphi_dt = 1/6 * (rk1_phi + rk4_phi + 2*(rk2_phi + rk3_phi)) kernel = monitor_conserved_quantities(phi0, zeta0, dzeta_dt, kxgrid, kygrid) print('Total Energy: ' + str(kernel['kin'] + kernel['poten']) + ' Total Mass:' + str(kernel['mass'])) zeta_next = np.real(ifft2(dealias(fft2(zeta0 + dt*dzeta_dt), M))) #We transform, dealias, then transform back as real. phi_next = np.real(ifft2(dealias(fft2(phi0 + dt*dphi_dt), M))) storage, wmax, wmin = detect_rogue_waves(zeta0, zeta_next, wmax, wmin, sig_h, storage, time) return zeta_next, phi_next, phi_m, storage, wmax, wmin
def solve_sqg(self): import scipy.fftpack as fft import numpy as np from math import pi self.precondition() dx = self.dx dy = self.dy rhos = self.ssd bhat = fft.fft2( - 9.81 * rhos / self.rho0) # calculate bouyance ny, nx = rhos.shape nz = self.nz k = 2 * pi * fft.fftfreq(nx) l = 2 * pi * fft.fftfreq(ny) ipsihat = np.zeros((nz+3, ny, nx))*complex(0, 0) Q = np.zeros((nz + 1, 1), dtype='float64'); Q[[0,-1]] = 0.0 # for interior PV, no used in this version # cutoff value ck, cl = 2 * pi / self.filterL, 2 * pi / self.filterL # loop through wavenumbers bhats = np.zeros_like(bhat) for ik in np.arange(k.size): for il in np.arange(l.size): wv2 = ((k[ik] / dx[il, 0]) ** 2 + (l[il] / dy[0, ik]) ** 2) if wv2 > (ck * ck + cl * cl): bhats[il,ik] = bhat[il,ik] right = - bhat[il, ik] / self.f0 * self.Rp left = self.M - wv2 * np.eye(self.nz+1) ipsihat[1:-1, il, ik] = np.linalg.solve(left, right).flatten() else: print 'skip k(ik,il)', ik, il, "wv2 = ", wv2 for k in range(1,nz+2): ipsihat[k, :, :] = (fft.ifft2(ipsihat[k, :, :])) if self.bottomboundary == 'psi=0': self.psis = np.r_[(np.real(ipsihat)), np.zeros((1,ny,nx))] else: self.psis = np.real(ipsihat) self.psis[0,:,:]= self.psis[1,:,:] self.psis[-1,:,:]=self.psis[-2,:,:]-self.dzc[-1]*np.real(fft.ifft2(-bhats))/self.f0 self.rhos = self.psi2rho(self.psis) self.us, self.vs = psi2uv(self.lon, self.lat, self.psis) return
def main(): fftsize=1024 #img=io.imread("torre.jpg") #print "Shape: ",img.shape #im = np.mean(img,axis=2)/255. #im_fft=fftpack.fft2(im,(fftsize, fftsize)) #F = np.log(1+np.abs(im_fft)) #recovered = np.real(fftpack.ifft2(im_fft)) a=np.zeros((3,3),dtype=float) b=np.ones((3,3),dtype=float) a[0][0]=2 a[0][1]=3 a[0][2]=1 #plt.show() #plt.imshow(im, cmap='gray') #plt.title('Imagen en gris') #blurImg(im,fftsize) imA = io.imread("torre.jpg") im = np.mean(imA,2)/255. fftsize = 1024 im_fft = fftpack.fft2(im, (fftsize, fftsize)) #Complementary of a Gaussian filter SZ = 1024 sigma = 0.25 [xx,yy]=np.meshgrid(np.linspace(-4,4,SZ),np.linspace(-4,4,SZ)) gaussian = np.exp(-0.5*(xx*xx+yy*yy)/(sigma*sigma)) fil =1.-fftpack.fftshift(gaussian/np.max(gaussian)) fil_fft = fil im_fil_fft = im_fft * fil_fft im_fil = np.real(fftpack.ifft2(im_fil_fft)) hs=np.floor(SZ/2.) #Careful with the crop. Because we work directly in the Fourier domain there is no padding. im_crop = im_fil[0:im.shape[0], 0:im.shape[1]] F=fftpack.fft2(im_crop,(1024,1024)) H=fil_fft tol= 1e-2 I = F/H print np.min(I) I=np.where(np.abs(H)<tol,0,I) i_reconstructed = np.real(fftpack.ifft2(I)) plt.imshow(i_reconstructed[:im.shape[0],:im.shape[1]],cmap="gray")
def make_initial_surface(spectrum, ktgrid, dkx_dky): '''Make an initial surface and potential from a spectrum by giving each mode a random phase''' random_phase = (2*pi)*np.random.random(np.shape(spectrum)) amplitude_k = np.sqrt(2*spectrum*dkx_dky) wk = np.sqrt(ktgrid*grav) wk[0,0] = 1 surface_k = amplitude_k * np.exp(1j * random_phase) potential_k = 1j * grav / wk * surface_k potential_k[0,0] = 0.0 surface = np.real(ifft2(surface_k)) potential = np.real(ifft2(potential_k)) return surface, potential
def correlate_layer(pattern_layer, source_layer): """ Normalized Cross-Correlation for a single channel of an RGB image (or a greyscale image). Normalization is done as follows: normalized = (x - mean(x)) / std(x) pattern_layer - Two-dimensional ndarray, single channel of pattern image source_layer - Two-dimensional ndarray, single channel of source image """ # http://bit.ly/WsRveH if pattern_layer.std() == 0: normalized_pattern = pattern_layer else: normalized_pattern = ((pattern_layer - np.mean(pattern_layer)) / (np.std(pattern_layer) * pattern_layer.size)) if source_layer.std() == 0: normalized_source = source_layer else: normalized_source = ((source_layer - np.mean(source_layer)) / np.std(source_layer)) #Take the fft of both Images, padding the pattern out with 0's # to be the same shape as the source pattern_fft = fftpack.fft2(normalized_pattern, source_layer.shape) source_fft = fftpack.fft2(normalized_source) # Perform the correlation in the frequency domain, which just the # inverse FFT of the pattern matrix's conjugate * the source matrix # http://en.wikipedia.org/wiki/Cross-correlation#Properties return fftpack.ifft2(pattern_fft.conjugate() * source_fft)
def rescale_target_superpixel_resolution(E_target): '''Rescale the target field to the superpixel resolution (currently only 4x4 superpixels implemented)''' superpixelSize = 4 ny,nx = scipy.shape(E_target) maskCenterX = scipy.ceil((nx+1)/2) maskCenterY = scipy.ceil((ny+1)/2) nSuperpixelX = int(nx/superpixelSize) nSuperpixelY = int(ny/superpixelSize) FourierMaskSuperpixelResolution = fourier_mask(ny,nx,superpixelSize) E_target_ft = fft.fftshift(fft.fft2(fft.ifftshift(E_target))) #Apply mask E_target_ft = FourierMaskSuperpixelResolution*E_target_ft #Remove zeros outside of mask E_superpixelResolution_ft = E_target_ft[(maskCenterY - scipy.ceil((nSuperpixelY-1)/2)-1):(maskCenterY + scipy.floor((nSuperpixelY-1)/2)),(maskCenterX - scipy.ceil((nSuperpixelX-1)/2)-1):(maskCenterX + scipy.floor((nSuperpixelX-1)/2))] # Add phase gradient to compensate for anomalous 1.5 pixel shift in real # plane phaseFactor = [[(scipy.exp(2*1j*pi*((k+1)/nSuperpixelY+(j+1)/nSuperpixelX)*3/8)) for j in range(nSuperpixelX)] for k in range(nSuperpixelY)] # QUESTION E_superpixelResolution_ft = E_superpixelResolution_ft*phaseFactor # Fourier transform back to DMD plane E_superpixelResolution = fft.fftshift(fft.ifft2(fft.ifftshift(E_superpixelResolution_ft))) return E_superpixelResolution
def matched_filter(self, noisesigma): self.Wl = 1.e0/((self.Cl+self.Clnoise)*self.Bl) self.Wk = np.interp(self.modk, self.k, self.Wl)/noisesigma self.snFxy = self.Fxy * self.Wk self.snxy = np.real(ft.ifft2(ft.fftshift(self.snFxy)))
def deflection_calculation(x, y, topo, rho_t, rho_c, rho_m, Te, E, nu, padding=0): """ Calculates the deflection due to a topographic load for a plate of constant thickness Te. Uses the equation: F[w] = rho_t/(rho_m-rho_c) phi_e(k) F[topo] """ ny, nx = np.shape(topo) dx = abs(x[0][1] - x[0][0]) dy = abs(y[1][0] - y[0][0]) if padding != 0: ny_pad, nx_pad = ny*padding, nx*padding topo = np.pad(topo, (ny_pad,nx_pad), 'constant', constant_values=0) else: nx_pad, ny_pad = 0, 0 fx = fftpack.fftfreq(nx + 2*nx_pad, dx) fy = fftpack.fftfreq(ny + 2*ny_pad, dy) fx, fy = np.meshgrid(fx, fy) k = 2*np.pi*np.sqrt(fx**2 + fy**2) F_w = rho_t/(rho_m - rho_c)*phi_e(k, Te, rho_c, rho_m, E, nu)*fftpack.fft2(topo) w = np.real(fftpack.ifft2(F_w)) if padding != 0: w = w[ny_pad:-ny_pad, nx_pad:-nx_pad] return w
def gamma_to_kappa(shear,dt1,dt2=None): """ simple application of Kaiser-Squires (1995) kernel in fourier space to convert complex shear to complex convergence: imaginary part of convergence is B-mode. """ if not dt2: dt2 = dt1 N1,N2 = shear.shape #convert angles from arcminutes to radians dt1 = dt1 * numpy.pi / 180. / 60. dt2 = dt2 * numpy.pi / 180. / 60. #compute k values corresponding to field size dk1 = numpy.pi / N1 / dt1 dk2 = numpy.pi / N2 / dt2 k1 = fftpack.ifftshift( dk1 * (numpy.arange(2*N1)-N1) ) k2 = fftpack.ifftshift( dk2 * (numpy.arange(2*N2)-N2) ) ipart,rpart = numpy.meshgrid(k2,k1) k = rpart + 1j*ipart #compute (inverse) Kaiser-Squires kernel on this grid fourier_map = numpy.conj( KS_kernel(-k) ) #compute Fourier transform of the shear gamma_fft = fftpack.fft2( shear, (2*N1,2*N2) ) kappa_fft = fourier_map * gamma_fft kappa = fftpack.ifft2(kappa_fft)[:N1,:N2] return kappa
def _getCrossCorrelation(self, ref, mask, fft_ref = False, fft_mask = False): """ Computes the cross correlation between reference and mask images. For parameter description, refer to <self._getDriftValue()> """ # Images should be square and of same dimensions at this point. assert(ref.shape==mask.shape) if not fft_ref: four_ref = fft2(ref) else: four_ref = ref if not fft_mask: # Crop the mask and replace the edges with 0.0 values. Helps the crosscorrelation. if self.cropping: size = min(mask.shape) crop = self.cropping mask_cropped = np.copy(mask[crop:(size-crop), crop:(size-crop)]) mask_padded = np.pad(mask_cropped, crop, mode='constant') four_mask = fft2(mask_padded) else: four_mask = fft2(mask) else: four_mask = mask # Conjugate the mask. four_mask_conj = np.conjugate(four_mask) # Compute pointwise product of reference and mask. product = np.multiply(four_mask_conj, four_ref) # Compute ifft of this product xcorr = ifft2(product) # Take the absolute value xcorr_abs = np.absolute(xcorr) return xcorr_abs
def mvd_wiener(initImg, imgList, psfList, iterNum, mu, positiveOnly=True): if positiveOnly: initImg[initImg < 0.0] = 0.0 viewNum = len(imgList) fftFactor = np.sqrt(initImg.shape[0]*initImg.shape[1]) mu = mu * fftFactor I = np.sum(np.abs(initImg)) e = fft2(initImg) e_img_old = initImg e_img = initImg if iterNum == 0: return e_img # pre-compute spectra ijList = [fft2(img) for img in imgList] pjList = [fft2(pad_and_center_psf(psf, initImg.shape)) for psf in psfList] for i in xrange(iterNum): c_all = np.zeros(e.shape, dtype=float) for j in xrange(viewNum): ij = ijList[j] pj = pjList[j] sj = e * pj cj = (np.conj(pj) * (ij - sj))/(np.square(np.abs(pj)) + mu**2) c_all = c_all + cj / float(viewNum) e = e + c_all e_img = np.real(ifft2(e)) if positiveOnly: e_img[e_img < 0.0] = 0.0 e_img = e_img / np.sum(np.abs(e_img)) * I e = fft2(e_img) print 'iter #%d, total change: %f.' %\ (i+1, np.sum(np.abs(e_img_old-e_img))/I) e_img_old = e_img return e_img
def ifft2(x): # Wrapped for ifft2 that handles CXData objects if isinstance(x, CXData): l=[] for i in xrange(len(x)): l.append(spf.ifft2(x.data[i])) return CXData(data=l) elif isinstance(x, CXModal): l=[] for mode in range(len(x.modes)): l.append(ifft2(x.modes[mode])) return CXModal(modes=l) elif isinstance(x, np.ndarray): return spf.ifft2(x) else: raise Exception('Unknown data type passed to ifft2')
def invertFft(self, useI=False, verbose=False): """Convert the 2d FFT into an image (imageI).""" # Checking this process with a simple (non-noisy) image shows that it will result in errors on the # level of 1e-15 counts (in an original image with min/max scale of 1.0). if useI: fimage = self.fimageI else: fimage = self.fimage if self.shift: self.imageI = fftpack.ifft2(fftpack.ifftshift(fimage)) else: self.imageI = fftpack.ifft2(fimage) if self.imageI.imag.max() < 1e-14: if verbose: print("Inverse FFT created only small imaginary portion - discarding.") self.imageI = self.imageI.real return
def InvLaplacian(field, length=None): if length is None: length = 2*pi; N = shape(field)[0]; k = array(range(N),dtype=complex128); k = concatenate((range(0,N/2),range(-N/2,0))); k *= (2*pi)/length; [KX, KY] = meshgrid(k,k); """ We are trying to solve d_yy(eta) + d_xx(eta) = p Therefore, in Fourier space, it will become (-(kx^2 + ky^2) )etaHat = pHat """ delsq = -(KX*KX + KY*KY) ; delsq[0,0] = 1; # tmp = fft(field,axis=0); # tmp = fft(tmp,axis=1); tmp = fft2(field); tmp = tmp/delsq; [xval,yval] = shape(tmp); tmp[xval/3:2*xval/3,yval/3:2*yval/3] = 0; # tmp = ifft(tmp,axis=1); # tmp = ifft(tmp,axis=0); tmp = ifft2(tmp); return tmp.real;
def kappa_to_gamma(kappa,dt1,dt2=None): """ simple application of Kaiser-Squires (1995) kernel in fourier space to convert complex shear to complex convergence: imaginary part of convergence is B-mode. """ if not dt2: dt2 = dt1 N1,N2 = kappa.shape #convert angles from arcminutes to radians dt1 = dt1 * np.pi / 180. / 60. dt2 = dt2 * np.pi / 180. / 60. #compute k values corresponding to field size dk1 = np.pi / N1 / dt1 dk2 = np.pi / N2 / dt2 k1 = fftpack.ifftshift( dk1 * (np.arange(2*N1)-N1) ) k2 = fftpack.ifftshift( dk2 * (np.arange(2*N2)-N2) ) ipart,rpart = np.meshgrid(k2,k1) k = rpart + 1j*ipart #compute Kaiser-Squires kernel on this grid. Eq. 43 p. 329 fourier_map = np.conj( KS_kernel(k) ) #compute Fourier transform of the kappa kappa_fft = fftpack.fft2( kappa, (2*N1,2*N2) ) gamma_fft = kappa_fft * fourier_map gamma = fftpack.ifft2(gamma_fft)[:N1,:N2] return gamma
def k_evolve_2d(dt, kx, ky, psi_grid): """ propagate the state in grid basis a time step forward with H = K :param dt: float, time step :param kx: float, momentum corresponding to x :param ky: float, momentum corresponding to y :param psi_grid: list, the two-electronic-states vibrational states in grid basis :return: psi_grid(update): list, the two-electronic-states vibrational states in grid basis """ for i in range(2): psi_k_tmp = fft2(psi_grid[i]) for j in range(len(kx)): for k in range(len(ky)): psi_k_tmp[j, k] *= np.exp(-0.5 * 1j / m * (kx[j]**2 + ky[k]**2) * dt) psi_grid[i] = ifft2(psi_k_tmp)
def ift2(G, delta_f ,FFT=None): """ Wrapper for inverse fourier transform Parameters: G: data to transform delta_f: pixel seperation FFT (FFT object, optional): An accelerated FFT object """ N = G.shape[0] if FFT: g = AOFFT.ftShift2d( FFT( AOFFT.ftShift2d(G) ) ) * (N * delta_f)**2 else: g = fft.ifftshift( fft.ifft2( fft.fftshift(G) ) ) * (N * delta_f)**2 return g
def main(): # utils image functions freq2spacial = lambda freq: _normalize( np.abs(fftpack.ifft2(fftpack.ifftshift(freq)))) freq2plot = lambda freq: _normalize(20 * np.log(np.abs(freq))) # read input input_path, output_path, d0 = _get_args() img = io.imread(input_path).astype(np.float32) # calculate fft of input image img_freq = fftpack.fftshift(fftpack.fft2(img)) # apply gaussian filter in frequency domain out_freq = _apply_gaussian(img_freq, d0) _output_images(output_path, freq2plot(img_freq), freq2plot(out_freq), freq2spacial(out_freq))
def shift(self, arr1, nx, ny): """ Shifts an array by nx and ny respectively. """ xpix, ypix = arr1.shape if ((nx % 1. == 0.) and (ny % 1. ==0)): return sp.roll(sp.roll(arr1, int(nx), axis=0), int(ny), axis=1 ) else: xfreqs, yfreqs = spf.fftfreq(xpix), spf.fftfreq(ypix) phaseFactor = sp.zeros((xpix,ypix),dtype=complex) for i in xrange(xpix): for j in xrange(ypix): phaseFactor[i,j] = sp.exp(complex(0., -2.*sp.pi)*(ny*yfreqs[j]+nx*xfreqs[i])) tmp = spf.ifft2(spf.fft2(arr1)*phaseFactor) return sp.real(tmp.copy())
def MtX(self, coeffs): """ MtX. This method calculates the action of the transpose of the matrix M on the data X, in this case inverse fourier transform of the input data in the frequency domain. Parameters ---------- x: np.ndarray Input data array, an array of recovered 2D kspace Returns ------- x: nd-array Reconstructed data array decomposisiton coefficients. """ return self.linear_operator.op(pfft.ifft2(self.mask * coeffs))
def SO3_ifft(f_hat): """ """ b = len(f_hat) d = setup_d_transform(b) df_hat = [d[l] * f_hat[l][:, None, :] for l in range(len(d))] # Note: the frequencies where m=-B or n=-B are set to zero, # because they are not used in the forward transform either # (the forward transform is up to m=-l, l<B F = np.zeros((2 * b, 2 * b, 2 * b), dtype=complex) for l in range(b): F[b - l:b + l + 1, :, b - l:b + l + 1] += df_hat[l] F = fftshift(F, axes=(0, 2)) f = ifft2(F, axes=(0, 2)) return f * 2 * (b ** 2) / np.pi
def focused(self, map_fft, keep_fraction=0.035, inner_corner=1): r, c = map_fft.shape '''#distribution 1 map_fft[0:inner_corner] = 0 map_fft[:,0:inner_corner] = 0 map_fft[int(r)-inner_corner:int(r)] = 0 map_fft[:, int(c)-inner_corner:int(c)] = 0 map_fft[inner_corner+int(r*keep_fraction):int(r*(1-keep_fraction))-inner_corner] = 0 map_fft[:, inner_corner+int(c*keep_fraction):int(c*(1-keep_fraction))-inner_corner] = 0''' #distribution 2 map_fft[int(r * keep_fraction):int(r * (1 - keep_fraction))] = 0 map_fft[:, int(c * keep_fraction):int(c * (1 - keep_fraction))] = 0 '''#distribution 3 map_fft[int(r*keep_fraction):int(r*(1-keep_fraction))] = 0 map_fft[:, int(c*keep_fraction):int(c*(1-keep_fraction))] = 0 map_fft[0:int(r*keep_fraction), 0:int(c*keep_fraction)] = 0''' reconstructed_map = fftpack.ifft2(map_fft).real return reconstructed_map
def random_vortices(nx, ny): omega_hat = sc.zeros([nx, ny]) tmp = sc.randn(3) + 1j*sc.randn(3) omega_hat[0, 4] = tmp[0] omega_hat[1, 1] = tmp[1] omega_hat[3, 0] = tmp[2] omega = sc.real(ifft2(omega_hat)) omega = omega/sc.amax(sc.amax(omega)) # Initialize pressure field p = sc.zeros([nx, ny]) print("Initialized random vortices") plt.imshow(omega) plt.colorbar() plt.pause(0.05) return omega, p
def denoiseFFT(im, keep_fraction=0.30): from scipy import fftpack im_fft = fftpack.fft2(im) im_fft2 = im_fft.copy() # Set r and c to be the number of rows and columns of the array. r, c = im_fft2.shape # Set to zero all rows with indices between r*keep_fraction and # r*(1-keep_fraction): im_fft2[int(r * keep_fraction):int(r * (1 - keep_fraction))] = 0 # Similarly with the columns: im_fft2[:, int(c * keep_fraction):int(c * (1 - keep_fraction))] = 0 im_new = fftpack.ifft2(im_fft2).real return im_new
def synthesize(self, f_hat): """ """ b = len(self.d) # Perform the brute-force Legendre transform # Note: the frequencies where m=-B or n=-B are set to zero, # because they are not used in the forward transform either # (the forward transform is up to m=-l, l<B df_hat = [self.d[l] * f_hat[l][:, None, :] for l in range(b)] F = np.zeros((2 * b, 2 * b, 2 * b), dtype=complex) for l in range(b): F[b - l:b + l + 1, :, b - l:b + l + 1] += df_hat[l] # The rest of the SO(3) FFT is just a standard torus FFT F = fftshift(F, axes=(0, 2)) f = ifft2(F, axes=(0, 2)) return f * (2 * b)**2
def convolution_2D(matrix_1, matrix_2): """ To be tested """ M, N = matrix_1.shape[0], matrix_1.shape[1] one, two = np.pad(np.copy(matrix_1),\ ((int(M/2), int(M/2)),(int(N/2), int(N/2))),\ mode = 'constant', constant_values=(0,0)),\ np.pad(np.copy(matrix_2),\ ((int(M/2), int(M/2)),(int(N/2), int(N/2))),\ mode = 'constant', constant_values=(0,0)) ONE, TWO = FT2(one), FT2(two) spatial_cross = ifftshift(ifft2(ifftshift(ONE) * (ifftshift(TWO)))) return spatial_cross[int(M/2) :int(M/2)+matrix_1.shape[0],\ int(N/2) : int(N/2)+matrix_1.shape[1]]
def upsample_background(img, H_new, W_new): # use fft to upsample H, W = img.shape im_fft = fftpack.fft2(img) im_fft2 = im_fft.copy() H, W = img.shape LR = np.zeros((H, int((W_new - W) / 2))) new = np.append(LR, im_fft2, axis=1) # cascade new = np.append(new, LR, axis=1) # cascade H, W = new.shape TB = np.zeros(((int((H_new - H) / 2)), W)) new = np.append(TB, new, axis=0) # cascade new = np.append(new, TB, axis=0) # cascade new_img = fftpack.ifft2(im_fft2).real new_img = cv2.resize(img, (W_new, H_new), interpolation=cv2.INTER_AREA) return new_img
def correctBeamTiltPhaseShift(imgarray, pixelsize=1e-10, beamtilt=(0.0, 0.0), Cs=2e-3, ht=120000): ''' Function to correct in phase shift induced by beam tilt. Not fully tested for its effectiveness. length unit in meters, voltage in volts, angles in radians ''' fft = fftpack.fft2(imgarray) beamtilt = (0.0, 1e-4) wavelength = getElectronWavelength(ht) correction = getBeamTiltPhaseShiftCorrection(fft.shape, beamtilt, Cs, wavelength, pixelsize) cfft = fft * correction corrected_image = fftpack.ifft2(cfft) return corrected_image
def __init__(self, image_size, cop, data: Union[float, np.ndarray] = 0, lam: float = 1, prox_param: float = 0.9, sampling=None): operator = lambda x: fft2(x, shape=a.shape) super(DatanormL2Conv, self).__init__(operator, sampling=sampling, prox_param=prox_param) self.lam = lam self.data = data self.f_data = fft2(kernel, shape=a.shape) self.f_datah = fft2(kernel, shape=a.shape) self.inv_operator = lambda x: ifft2(x).real self.i = 0
def autocorr(image): #we cannot pad by zeros, as this causes a linear decrease in the #autocorrelation function that will swamp the signal, and be proportional #to the total width of the 'box'. So, we can just going to do the #regular circular convolution (no fancy boundaries, just fft's), and #we will have to hope for the best. #image = image.astype(float) #convert to float to avoid overflows oLength = image.shape[0] #assume cropped to be square #image = pad(image) #nLength = image.shape[0] l,h = image.shape win = np.outer(sig.windows.hann(l),sig.windows.hann(h)) winIm = win*image xp = (winIm-winIm.mean()) xp = (image-image.mean()) xxp =xp/np.sqrt((xp**2).sum()) #return sig.correlate2d(win*image,win*image) return fftpack.fftshift((fftpack.ifft2(abs(fftpack.fft2(xp))**2)))/( (xp**2).sum()) #normalize to one
def hpf(mat, d0=30): spec = fftpack.fftshift(fftpack.fft2(mat)) order = 2 M, N = mat.shape m = M // 2 n = N // 2 ret = np.zeros(mat.shape).astype(complex) for i in range(M): for j in range(N): d = np.sqrt((i - m + 1)**2 + (j - n + 1)**2) if d == 0: h = 0 else: h = 1 / (1 + 0.414 * ((d0 / d)**(2 * order))) ret[i, j] = h * spec[i, j] ret = fftpack.ifft2(fftpack.ifftshift(ret)) ret = np.uint8(np.clip(np.real(ret), 0., 255.)) return ret
def fft(img, frequency_threshold=10, brightness_threshold=40, show=False): F1 = fp.fft2((img).astype(float)) F2 = fp.fftshift(F1) (w, h) = img.shape half_w, half_h = int(w / 2), int(h / 2) # high pass filter n = frequency_threshold F2[half_w - n:half_w + n + 1, half_h - n:half_h + n + 1] = 0 # select all but the first 50x50 (low) frequencies im1 = fp.ifft2(fp.ifftshift(F2)).real # im1 = im1.astype('uint8') retval, threshold = cv2.threshold(im1, brightness_threshold, 255, cv2.THRESH_BINARY) threshold = threshold.astype('uint8') img = img.astype('uint8') markers = open_operation(img, threshold) markers1 = markers.astype(np.uint8) ret, m2 = cv2.threshold(markers1, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) _, contours, hierarchy = cv2.findContours(m2, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) canvas = np.zeros((img.shape[0], img.shape[1], 3)) canvas[:, :, 0] = img canvas[:, :, 1] = img canvas[:, :, 2] = img canvas = canvas.astype(np.uint8) coor_list = [] for c in contours: area = cv2.contourArea(c) if area < 400: M = cv2.moments(c) cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"]) if show: cv2.drawContours(canvas, c, -1, (0, 255, 0), 1) cv2.circle(canvas, (cX, cY), 1, (255, 0, 0), -1) if [cX, cY] not in coor_list: coor_list.append([cX, cY]) if show: result = [m2, canvas] show_imgs(result) return coor_list
def equalise_power_spectrum(image, avg_power_spectrum): """Equalise images' power spectrum by setting an image's amplitudes in the Fourier domain to the amplitude average over all used images. parameter: - image: a numpy.ndarray - avg_power_spectrum: an array of the same dimension as one of images channels containing the average over all images amplitude spectrum""" # check input dimensions assert image.shape[: 2] == avg_power_spectrum.shape, 'Image shape={} unequal \ avg_spectrum shape={}'.format(image.shape[:2], avg_power_spectrum.shape) # convert image to greysclae channel = rgb2grey(image) # Fourier Forward Tranform and shift to centre f = fp.fft2(channel) f = fp.fftshift(f) # get amplitudes and phases f_amp = np.abs(f) f_phase = np.angle(f) # set amplitudes to average power spectrum fnew_amp = avg_power_spectrum # recalculating FFT complex representation from new phases and amplitudes fnew = fnew_amp * np.exp(1j * f_phase) # reverse shift to centre and perform Fourier Backwards Transformation fnew = fp.ifftshift(fnew) new_channel = fp.ifft2(fnew) # make sure that there are no imaginary parts after transformation new_channel = new_channel.real # clip too large and too small values new_channel[new_channel > 1] = 1 new_channel[new_channel < 0] = 0 # return stacked (RGB) grey image return (np.dstack((new_channel, new_channel, new_channel)))
def cal_tangent_effective_moduli(f_gamma0, mu, beta, F, C0, Nx, Ny, x, y, hx, hy, ax, bx, ay, by, xi1, xi2, P_homogenized, F_homogenized): xi = np.array([xi1, xi2]) alpha = zeros((dim, dim, dim, dim, Nx, Ny)) C = get_C(mu, beta, F) C_delta = C - C0 C_consistent_tangent = zeros((dim, dim, dim, dim)) vol = (bx - ax) * (by - ay) # ---------------------------------------------------------------------------- # Part I: Calculate \frac{\partial \tilde{\F}}{\partial \bar {\F}} # ---------------------------------------------------------------------------- TOL = 1e-6 maxIter2 = 10 iter2 = 0 while iter2 < maxIter2: f_alpha = ddot44(f_gamma0, fft.fft2(C_delta + ddot44(C_delta, alpha))) # Boundary condition f_alpha[:, :, :, :, 0, 0] = 0 alpha = fft.ifft2(f_alpha).real # CHECK CONVERGENCE if iter2 > -1: # Calculate consistent tangent stiffness beta = ddot44(C, (I4 + alpha)) for i, j, k, l in itertools.product(range(dim), repeat=4): C_consistent_tangent[i, j, k, l] = 1.0 / vol * hx * hy * np.sum(beta[i, j, k, l, :, :]) # S = np.einsum('ijkl,lk->ij', C_consistent_tangent, E_homogenized) # error = norm(S - S_homogenized) / norm(S_homogenized) # if error.real < TOL: # print("error") # print(error.real) # break iter2 += 1 # ---------------------------------------------------------------------------- # Part II: Calculate consistent tangent stiffness based on # \frac{\partial \tilde{\F}}{\partial \bar {\F}} # ---------------------------------------------------------------------------- # C_consistent_tangent = zeros((dim, dim, dim, dim)) # vol = (bx - ax) * (by - ay) # beta = ddot44(C, (I4 + alpha)) # for i, j, k, l in itertools.product(range(dim), repeat=4): # C_consistent_tangent[i, j, k, l] = 1.0 / vol * hx * hy * np.sum(beta[i, j, k, l, :, :]) return C_consistent_tangent
def grf(rands,n=-1,size=1024): #First generate, without library functions, an array of k_x and k_y to be #used for the ifft, thus with frequencies in the range #[0,...,f_c,-f_c+1,...,-1] for both dimensions. #Not the most efficient way. n2 = int((size+1)/2) ks, ks2d, ks2d2 = np.zeros(size), [None]*size, [None]*size for i in range(0,n2): ks[i] = i for i in range(-n2,0): ks[i+size] = i for i in range(size): ks2d[i] = ks ks2d2[i] = ([ks[i] for j in range(size)]) ksarr = np.array([ks2d,ks2d2]) #Turn the frequencies into amplitudes depending on the given power spectrum #index ampl = (ksarr[0]**2. + ksarr[1]**2. + 1e-10)**(n/2.) ampl[0,0] = 0 #turn the amplitudes into random complex numbers, and allow for the ifft to #be real by the given rule. A = np.zeros((size,size),dtype='complex') k = 0 #For each element in the top half (with the Nyquist-band), generate the #random numbers needed. for i in range(n2+1): for j in range(size): if (size-i)%size==i and (size-j)%size==j: if i==0 and j==0: A[i][j] = 0 else: A[i][j] = rands[k] * ampl[i][j] k += 1 elif A[i][j]==0 and A[-i][-j]==0: genran(rands,i,j,ampl,A,k) k += 2 B = ft.ifft2(A) return B, A
def Background_Unsharp(IMG, results, options): """creates a 2D background level using low order FFT coefficients. Takes the 2D FFT of an image and sets all coefficients above 3 to zero. This creates a very smooth image which can be used as a variable background level. This can then be subtracted from images to remove large bright sources, such as a nearby BGC or bright star. However, this background estimation method will likely also heavily bias flux value. Thus it can reasonably be used to isolate a galaxy with a large overlapping partner for the sake of fitting isophotes, but the extracted flux profile will be unreliable. Parameters ----------------- ap_background_unsharp_lowpass : int, default 3 User provided FFT coefficient cutoff for constructing unsharp image. Returns ------- IMG : ndarray Unaltered galaxy image results : dict .. code-block:: python {'background': , # flux image representing the variable background level (ndarray) } """ coefs = fft2(IMG) unsharp = ( int(options["ap_background_unsharp_lowpass"]) if "ap_background_unsharp_lowpass" in options else 3 ) coefs[unsharp:-unsharp] = 0 coefs[:, unsharp:-unsharp] = 0 dumy, stats = Background_Mode(IMG, results, options) stats.update({"background": ifft2(coefs).real}) return IMG, stats
def crossCorrelate(image1, image2): ''' :param image1: 2d array :param image2: 2d array :return: ''' fft_array1 = fft2(image1) fft_array2 = fft2(image2) shape = image1.shape c = abs(ifft2(fft_array1 * fft_array2.conjugate())) t0, t1 = np.unravel_index(np.argmax(c), image1.shape) if t0 > shape[0] // 2: t0 -= shape[0] if t1 > shape[1] // 2: t1 -= shape[1] return t0, t1
def ifrt(bins, N, norm=True, center=False, projNumber=0, Isum=-1): ''' Compute the inverse DRT in O(n logn) complexity using the discrete Fourier slice theorem and the FFT. Input should be DRT projections (bins) to recover an NxN image. projNumber is the number of non-zero projections in bins. This useful for backprojecting mu projections where mu < N. Isum is computed from first row if -1, otherwise provided value is used ''' if Isum < 0: Isum = bins[0, :].sum() # print "ISUM:", Isum if projNumber == 0: projNumber = N + N / 2 #all projections filled result = np.zeros((N, N), dtype=np.complex64) filter = oversampling_1D_filter(N, 2, norm) #fix DC for dyadic if N % 2 == 1: # if odd, assume prime if projNumber == 0: projNumber = N + 1 #all projections filled filter = np.ones(N) # filter[0] = 1.0/(projNumber+1) #DC fix filter[0] = 1.0 if projNumber < 0: filter[0] = 0 #all projections zero mean #Set slices (0 <= m <= N) for k, row in enumerate(bins): #iterate per row slice = fftpack.fft(row) slice *= filter # print "m:",k setSlice(k, result, slice) # print "filter:", filter result[0, 0] -= float(Isum) * N #iFFT 2D image result = fftpack.ifft2(result) if not norm: result *= N #ifft2 already divides by N**2 if center: result = fftpack.fftshift(result) return np.real(result)
def MixingUniMagPhase(self, Image, W1, W2): self.setMagnitude() self.setPhase() Image.setMagnitude() Image.setPhase() UniTemp = Image.getMagnitude() for i in range(len(UniTemp)): UniTemp[i] = 1 TempMagnitude = list( map(add, ((W1) * (np.array(UniTemp))), ((1 - W1) * (np.array(Image.getMagnitude()))))) TempPhase = np.exp( list( map(add, ((W2) * (np.array(Image.getPhase()))), ((1 - W2) * (np.array(self.getPhase())))))) Mix = [a * b for a, b in zip(TempMagnitude, TempPhase)] View = ifft2(Mix) View = pg.ImageItem(View) return View
def _correlate_images(im1, im2, method='brent'): shape = im1.shape f1 = fft2(im1) f1[0, 0] = 0 f2 = fft2(im2) f2[0, 0] = 0 ir = np.real(ifft2((f1 * f2.conjugate()))) t0, t1 = np.unravel_index(np.argmax(ir), shape) if t0 >= shape[0] / 2: t0 -= shape[0] if t1 >= shape[1] / 2: t1 -= shape[1] if method == 'brent': newim2 = ndimage.shift(im2, (t0, t1)) refine = optimize.brent(cost_function, args=(im1, newim2), brack=[-1, 1], tol=1.e-2) return t1 + refine
def filterButterworth2D(self, degradation, n=2): # frac -> max smoothing at center img = self.getMat() heigth, width = len(img),len(img[0]) smoothed = numpy.array([[0.0 for i in range(width)] for j in range(heigth)]) zeros = numpy.array([[0.0 for i in range(width)] for j in range(heigth)]) temp = numpy.array([[0.0 for i in range(width)] for j in range(heigth)]) freq = fftpack.fft2(img) maxD0 = sqrt(pow(float(width),2.0)+pow(float(heigth),2.0)) d0 = float(maxD0)*degradation newFreq = numpy.array([[freq[j][i] for i in range(width)] for j in range(heigth)], dtype=numpy.complex64) for i in range(heigth): for j in range(width): newFreq[i][j] = freq[i][j]*self._butterworth(float(sqrt(pow(i-heigth/2,2.0)+pow(j-width/2,2.0))),float(d0),float(n)) smoothed = numpy.real(fftpack.ifft2(newFreq)) return Image(smoothed)
def sharpen_images(fraction, filter_type): #-- first read data images, names = load_data() #-- make output directory dictionary outdir = {} outdir['train'] = os.path.join(trn_dir, 'images_%s_%.3f' % (filter_type, fraction)) outdir['test'] = os.path.join(tst_dir, 'images_%s_%.3f' % (filter_type, fraction)) #-- loop through train and test data for t in ['train', 'test']: if (not os.path.isdir(outdir[t])): os.mkdir(outdir[t]) #-- loop through images and sharpen them for m, n in zip(images[t], names[t]): #-- take fft rows, cols, channels = m.shape f_img = fftpack.fft2(m.reshape(rows, cols)) #-- make a numpy array copy f = f_img.copy() #-- get dimensions d1, d2 = f.shape #-- Set the low frequenies to 0 (to sharpen) #-- if you instead want ot get rid of high-frequency noise, set the #-- high frequencies to zero if filter_type in [ 'highpass', 'HighPass', 'HIGHPASS', 'high-pass' ]: f[0:int(d1 * fraction), :] = 0. f[-1 * int(d1 * fraction):, :] = 0. f[:, 0:int(d2 * fraction)] = 0. f[:, -1 * int(d2 * fraction):] = 0. elif filter_type in ['lowpass', 'LowPass', 'LOWPASS', 'low-pass']: f[int(d1 * fraction):int(d1 * (1 - fraction)), :] = 0 f[:, int(d2 * fraction):int(d2 * (1 - fraction))] = 0 #-- convert back to spatial domain im_sharp = fftpack.ifft2(f).real #-- convert to 3 dimensions with a color channel im_out_array = im_sharp.reshape(rows, cols, 1) #-- write image to file im_out = image.array_to_img(im_out_array) im_out.save(os.path.join(outdir[t], '%s' % n))
def Density_field(scaling=-2, N=1024): fourier_field = np.zeros((N, N), dtype=np.complex128) half = int(N / 2 + 0.5) for i in range(0, half + 1): k_y = 2 * np.pi * i / N for j in range(0, N): if j <= half: k_x = 2 * np.pi * j / N else: k_x = 2 * np.pi * (-N + j) / N k_vector = np.sqrt(k_x**2 + k_y**2) a, b = Box_Muller(0, np.sqrt(k_vector**(scaling)), 1) fourier_field[i, j] = complex(a, b) fourier_field[-i, -j] = fourier_field[i, j].conjugate() fourier_field[0, 0] = 0 fourier_field[0, half] = (fourier_field[0, half].real) * 2 fourier_field[half, 0] = (fourier_field[half, 0].real) * 2 fourier_field[half, half] = (fourier_field[half, half].real) * 2 return ifft2(fourier_field) * N**2
def energies(bec): ''' Return the potential and kinetic energies, and print them Inputs are the wavefunction, potential, k vector and dx ''' poten = bec.V + bec.g * abs(bec.psi)**2 - bec.gravity() + bec.angular() Epot = sum( sum( bec.psi.conjugate() * poten * bec.psi ) ) * bec.dx * bec.dy Ekin = sum( sum( bec.psi.conjugate() * f.fftshift( f.ifft2( -1 * bec.ksquare * f.fft2( f.fftshift( bec.psi) ) ) ) ) ) * bec.dx * bec.dy norm = sum(sum( bec.psi * ( bec.psi ).conjugate() )) * bec.dx * bec.dy print 'Potential Energy = ', Epot print 'Kinetic Energy = ', Ekin print 'Total Energy = ', Epot + Ekin print 'normalisation = ', norm return Epot, Ekin
def invfouriertrans(xaxis, yaxis, input): input.resize(xaxis.shape) output = theFFT.ifft2(input) outputshift = theFFT.fftshift(output) # # see DES logbook Vol1, page 25 # xax = xaxis[0, :] yax = yaxis[:, 0] deltax = xax[1] - xax[0] deltay = yax[1] - yax[0] Nx = xax.size Ny = yax.size yout, xout = itricks.mgrid[-1. / (2. * deltay):1. / (2. * deltay):1. / (Ny * deltay), -1. / (2. * deltax):1. / (2. * deltax):1. / (Nx * deltax)] return xout, yout, outputshift