def solve_neutral(self, phi_g, rho_g, eps=None): """Solve Poissons equation for a neutral and periodic charge density. Parameters ---------- phi_g: ndarray Potential (output array). rho_g: ndarray Charge distribution (in units of -e). """ assert phi_g.dtype == self.dtype assert rho_g.dtype == self.dtype if self.gd.comm.size == 1: # Note, implicit downcast from complex to float when the dtype of # phi_g is float phi_g[:] = ifftn(fftn(rho_g) * 4.0 * pi / self.k2_Q) else: rho_g = self.gd.collect(rho_g) if self.gd.comm.rank == 0: globalphi_g = ifftn(fftn(rho_g) * 4.0 * pi / self.k2_Q) else: globalphi_g = None # What happens here if globalphi is complex and phi is real ?????? self.gd.distribute(globalphi_g, phi_g) return 1
def source_terms(self, mara, retphi=False): from numpy.fft import fftfreq, fftn, ifftn ng = mara.number_guard_zones() G = self.G L = 1.0 Nx, Ny, Nz = mara.fluid.shape Nx -= 2*ng Ny -= 2*ng Nz -= 2*ng P = mara.fluid.primitive[ng:-ng,ng:-ng,ng:-ng] rho = P[...,0] vx = P[...,2] vy = P[...,3] vz = P[...,4] K = [fftfreq(Nx)[:,np.newaxis,np.newaxis] * (2*np.pi*Nx/L), fftfreq(Ny)[np.newaxis,:,np.newaxis] * (2*np.pi*Ny/L), fftfreq(Nz)[np.newaxis,np.newaxis,:] * (2*np.pi*Nz/L)] delsq = -(K[0]**2 + K[1]**2 + K[2]**2) delsq[0,0,0] = 1.0 # prevent division by 0 rhohat = fftn(rho) phihat = (4*np.pi*G) * rhohat / delsq fx = -ifftn(1.j * K[0] * phihat).real fy = -ifftn(1.j * K[1] * phihat).real fz = -ifftn(1.j * K[2] * phihat).real S = np.zeros(mara.fluid.shape + (5,)) S[ng:-ng,ng:-ng,ng:-ng,0] = 0.0 S[ng:-ng,ng:-ng,ng:-ng,1] = rho * (fx*vx + fy*vy + fz*vz) S[ng:-ng,ng:-ng,ng:-ng,2] = rho * fx S[ng:-ng,ng:-ng,ng:-ng,3] = rho * fy S[ng:-ng,ng:-ng,ng:-ng,4] = rho * fz return (S, ifftn(phihat).real) if retphi else S
def test_plan_call(): for shape in tested_shapes: plan = Plan( input_array=ranf_unit_complex(shape), output_array=numpy.empty(shape, dtype=numpy.complex), direction=Direction.forward, ) testing.assert_allclose( plan(), fft.fftn(plan.input_array) ) testing.assert_allclose( plan(normalize=True), fft.fftn(plan.input_array) / plan.input_array.size ) plan = Plan( input_array=ranf_unit_complex(shape), output_array=numpy.empty(shape, dtype=numpy.complex), direction=Direction.backward ) testing.assert_allclose( plan(), fft.ifftn(plan.input_array) * plan.input_array.size ) testing.assert_allclose( plan(normalize=True), fft.ifftn(plan.input_array) )
def compute_init_displacement(self, Dens): """compute Zeldovich displacement from initial density""" fDens = fft.fftn(Dens) fPot = fDens / self.km2 * self.mass_res**2 vx = fft.ifftn(fPot * -1j * np.sin(self.Km[0])).real / self.mass_res vy = fft.ifftn(fPot * -1j * np.sin(self.Km[1])).real / self.mass_res return np.array([vx,vy])
def fft_g2r(self, fg, fg_ishifted=False): """ FFT of array ``fg`` given in G-space. """ ndim, shape = fg.ndim, fg.shape if ndim == 1: fg = np.reshape(fg, self.shape) return self.fft_g2r(fg, fg_ishifted=fg_ishifted).flatten() if ndim == 3: assert self.size == np.prod(shape[-3:]) if fg_ishifted: fg = ifftshift(fg) fr = ifftn(fg) elif ndim > 3: assert self.size == np.prod(shape[-3:]) axes = np.arange(ndim)[-3:] if fg_ishifted: fg = ifftshift(fg, axes=axes) fr = ifftn(fg, axes=axes) else: raise NotImplementedError("ndim < 3 are not supported") return fr * self.size
def garfield(B, P, T = Identity(), seed = None): if seed != None: random.seed(seed) wn = random.normal(0, 1, B.shape) f = fft.ifftn(fft.fftn(wn) * np.sqrt(P(B.K))).real #f /= f.std() return fft.ifftn(fft.fftn(f) * T(B.K)).real
def Au(U,GF,EpsArr,NX,NY,NZ): """Returns the result of matrix-vector multiplication by the system matrix A=I-GX """ # reshaping input vector into 4-D array Uarr=sci.reshape(U,(NX,NY,NZ,3)) # extended zero-padded arrays Uext=sci.zeros((2*NX,2*NY,2*NZ,3),complex) Vext=sci.zeros((2*NX,2*NY,2*NZ,3),complex) Jext=sci.zeros((2*NX,2*NY,2*NZ,3),complex) JFext=sci.zeros((2*NX,2*NY,2*NZ,3),complex) Uext[0:NX,0:NY,0:NZ,:]=Uarr # contrast current array s=0 while s<=2: Jext[0:NX,0:NY,0:NZ,s]=Uext[0:NX,0:NY,0:NZ,s]*(EpsArr[0:NX,0:NY,0:NZ]-1.0) JFext[:,:,:,s]=fft.fftn(sci.squeeze(Jext[:,:,:,s])) s=s+1 Vext[:,:,:,0]=Uext[:,:,:,0]-\ fft.ifftn(sci.squeeze(sci.multiply(GF[:,:,:,0,0],JFext[:,:,:,0])+\ sci.multiply(GF[:,:,:,0,1],JFext[:,:,:,1])+\ sci.multiply(GF[:,:,:,0,2],JFext[:,:,:,2]))) Vext[:,:,:,1]=Uext[:,:,:,1]-\ fft.ifftn(sci.squeeze(sci.multiply(GF[:,:,:,1,0],JFext[:,:,:,0])+\ sci.multiply(GF[:,:,:,1,1],JFext[:,:,:,1])+\ sci.multiply(GF[:,:,:,1,2],JFext[:,:,:,2]))) Vext[:,:,:,2]=Uext[:,:,:,2]-\ fft.ifftn(sci.squeeze(sci.multiply(GF[:,:,:,2,0],JFext[:,:,:,0])+\ sci.multiply(GF[:,:,:,2,1],JFext[:,:,:,1])+\ sci.multiply(GF[:,:,:,2,2],JFext[:,:,:,2]))) # reshaping output into column vector V=sci.reshape(Vext[0:NX,0:NY,0:NZ,:],(NX*NY*NZ*3,1)) return V
def nccfft(s, p, room, fact=1): """Used for all Patterns that do not fall other categories. Cross correlates normalized Source and Pattern images while taking advantage of FFTs for the convolution step. ---------- s, p : Image Pattern and Source images for comparison. fact : int, optional Factor by which both Source and Pattern are scaled down. ---------- out1 : ndarray[float] Confidence matrix for matches. out2 : float Threshold for deciding if a match has been found. out3 : float Mean of the confidence matrix. """ # subtract mean from Pattern pmm = p - p.mean() pstd = p.std() n = p.size # make matrix of ones the same size as pattern u = np.ones(p.shape) # pad matrices (necessary for convolution) s = pad_by(s, room) upad = pad_to_size_of(u, s) pmmpad = pad_to_size_of(pmm, s) # compute neccessary ffts fftppad = fftn(pmmpad) ffts = fftn(s) fftss = fftn(s**2) fftu = fftn(upad) # compute conjugates cfppad = np.conj(fftppad) cfu = np.conj(fftu) # do multiplications and ifft's top = ifftn(cfppad * ffts) bot1 = n * ifftn(cfu * fftss) bot2 = ifftn(cfu * ffts) ** 2 # finish it off! bottom = pstd * np.sqrt(bot1 - bot2) full = top / bottom return np.where(full.real.max() == full.real)
def FieldIFFT(a_hat, a): """Calculate the component-wise 2D or 3D inverse FFT of a vector field a_hat, and stores it in a.""" if DimOfVectorFieldDomain(a) == 2: a[:,:,0], a[:,:,1] = real(fft.ifftn(a_hat[:,:,0])), real(fft.ifftn(a_hat[:,:,1])) else: a[...,0], a[...,1], a[...,2] = real(fft.ifftn(a_hat[...,0])), real(fft.ifftn(a_hat[...,1])), real(fft.ifftn(a_hat[...,2])) return a
def run(self, image): nslice, n_pe, n_fe = image.shape[-3:] mask = checkercube(nslice, n_pe, n_fe) from recon.tools import Recon if Recon._FAST_ARRAY: image[:] = mask*ifftn(mask*image[:], axes=[-3,-2,-1]) else: for vol in image: vol[:] = mask*ifftn(mask*vol[:])
def solve_neutral(self, phi_g, rho_g, eps=None): if self.gd.comm.size == 1: phi_g[:] = ifftn(fftn(rho_g) * 4.0 * pi / self.k2_Q).real else: rho_g = self.gd.collect(rho_g) if self.gd.comm.rank == 0: globalphi_g = ifftn(fftn(rho_g) * 4.0 * pi / self.k2_Q).real else: globalphi_g = None self.gd.distribute(globalphi_g, phi_g) return 1
def ifftd(I, dims=None): # Compute fft if dims is None: X = ifftn(I) elif dims == 2: X = ifft2(I, axes=(0, 1)) else: X = ifftn(I, axes=tuple(range(dims))) return X
def spec2d(u,v,w,dims=(1.0,1.0)): ny = u.shape[0] nx = u.shape[1] lens = np.array(dims) res = np.array(u.shape) minres = res.min() limiter = (res/lens).argmin() factors = lens / lens[limiter] fu = np.abs(fft.ifftn(u))**2 + np.abs(fft.ifftn(v))**2 + np.abs(fft.ifftn(w))**2 fu = circleAvg(fu[0:ny/2,0:nx/2],limiter,factors) return fu
def propagate(self): r"""Given the wavefunction values :math:`\Psi(\Gamma)` at time :math:`t`, calculate new values :math:`\Psi^\prime(\Gamma)` at time :math:`t + \tau`. We perform exactly one single timestep of size :math:`\tau` within this function. """ # How many components does Psi have N = self._psi.get_number_components() # Unpack the values from the current WaveFunction values = self._psi.get_values() # First step with the potential tmp = [zeros(value.shape, dtype=complexfloating) for value in values] for row in range(0, N): for col in range(0, N): tmp[row] = tmp[row] + self._VE[row * N + col] * values[col] # Go to Fourier space tmp = [fftn(component) for component in tmp] # First step with the kinetic operator tmp = [self._TE * component for component in tmp] # Go back to real space tmp = [ifftn(component) for component in tmp] # Central step with V-tilde tmp2 = [zeros(value.shape, dtype=complexfloating) for value in values] for row in range(0, N): for col in range(0, N): tmp2[row] = tmp2[row] + self._VEtilde[row * N + col] * tmp[col] # Go to Fourier space tmp = [fftn(component) for component in tmp2] # Second step with the kinetic operator tmp = [self._TE * component for component in tmp] # Go back to real space tmp = [ifftn(component) for component in tmp] # Second step with the potential values = [zeros(component.shape, dtype=complexfloating) for component in tmp] for row in range(0, N): for col in range(0, N): values[row] = values[row] + self._VE[row * N + col] * tmp[col] # Pack values back to WaveFunction object # TODO: Consider squeeze(.) of data before repacking self._psi.set_values(values)
def f2_solve(self, rhs, y, t, dt, f2, **kwargs): """Solve and evaluate implicit piece.""" # solve (rebuild operator every time, as dt may change) invop = 1.0 / (1.0 - self.nu*dt*self.laplacian) z = fft.fftn(rhs) z = invop * z y[...] = np.real(fft.ifftn(z)) # evaluate z = self.nu * self.laplacian * z f2[...] = np.real(fft.ifftn(z))
def steepest_descent_images(self, image, dW_dp, forward=None): # compute gradient # grad: dims x ch x h x w nabla = self.gradient(image, forward=forward) nabla = nabla.as_vector().reshape((image.n_dims, image.n_channels) + nabla.shape) # compute steepest descent images # gradient: dims x ch x h x w # dw_dp: dims x x h x w x params # sdi: ch x h x w x params sdi = 0 a = nabla[..., None] * dW_dp[:, None, ...] for d in a: sdi += d if self._kernel is None: # reshape steepest descent images # sdi: (ch x h x w) x params # filtered_sdi: (ch x h x w) x params sdi = sdi.reshape((-1, sdi.shape[-1])) filtered_sdi = sdi else: # if required, filter steepest descent images # fft_sdi: ch x h x w x params filtered_sdi = ifftn(self._kernel[..., None] * fftn(sdi, axes=(-3, -2)), axes=(-3, -2)) # reshape steepest descent images # sdi: (ch x h x w) x params # filtered_sdi: (ch x h x w) x params sdi = sdi.reshape((-1, sdi.shape[-1])) filtered_sdi = filtered_sdi.reshape(sdi.shape) return filtered_sdi, sdi
def real_space(self): """Fourier transform the dynamical matrix to real-space.""" if not self.assembled: self.assemble() # Shape of q-point grid N_c = self.N_c # Reshape before Fourier transforming shape = self.D_k.shape Dq_lmn = self.D_k.reshape(N_c + shape[1:]) DR_lmn = fft.ifftn(fft.ifftshift(Dq_lmn, axes=(0, 1, 2)), axes=(0, 1, 2)) if debug: # Check that D_R is real enough assert np.all(DR_lmn.imag < 1e-8) DR_lmn = DR_lmn.real # Corresponding R_m vectors in units of the basis vectors R_cm = np.indices(N_c).reshape(3, -1) N1_c = np.array(N_c)[:, np.newaxis] R_cm += N1_c // 2 R_cm %= N1_c R_cm -= N1_c // 2 R_clmn = R_cm.reshape((3,) + N_c) return DR_lmn, R_clmn
def fftconvolve(in1, in2, mode='same'): """Convolve two N-dimensional arrays using FFT. See convolve. """ s1 = array(in1.shape) s2 = array(in2.shape) complex_result = (np.issubdtype(in1.dtype, np.complex) or np.issubdtype(in2.dtype, np.complex)) size = s1 + s2 - 1 # Always use 2**n-sized FFT fsize = (2 ** np.ceil(np.log2(size))).astype('int') IN1 = fftn(in1, fsize) IN1 *= fftn(in2, fsize) fslice = tuple([slice(0, int(sz)) for sz in size]) ret = ifftn(IN1)[fslice].copy() del IN1 if not complex_result: ret = ret.real if mode == "full": return ret elif mode == "same": if np.product(s1, axis=0) > np.product(s2, axis=0): osize = s1 else: osize = s2 return _centered(ret, osize) elif mode == "valid": return _centered(ret, abs(s2 - s1) + 1) return conv[:s[0], :s[1], :s[2]]
def ktoi(data,axis=-1): if (axis == -1): ax = fth.arange(0,data.ndim) else: ax = axis return fth.fftshift(ft.ifftn(fth.ifftshift(data,axes=ax),axes=ax),axes=ax)
def UpdateFluid(self, f, Update_du = True, CalcPressure = False, KeepUpdate = True): """Advance the fluid one timestep via a semi-implicit scheme. f is a force field applied to the fluid at the current time and must be a vector field. if KeepUpdate is False then the updated fluid velocity is stored in self.Output_u, not self.u""" if Update_du: du = self.VectorGradient(self.u, self.du) self.c = self.ExplicitTerms(f) FieldFFT(self.c, self.c_Hat) self.p_Hat = self.SolveFor_p_Hat() self.u_Hat = self.SolveFor_u_Hat() if KeepUpdate: FieldIFFT(self.u_Hat, self.u) self.u = self.u.copy() else: FieldIFFT(self.u_Hat, self.Output_u) self.Output_u = self.Output_u.copy() if CalcPressure: self.p = fft.ifftn(self.p_Hat)
def ifftnc(Fx, N): """ centered n-dimensional inverse FFT algorithm """ # return fft.fftshift(fft.ifftn(fft.ifftshift(Fx), N))*np.prod(N) ax=tuple(np.setdiff1d(range(Fx.ndim), range(Fx.ndim-N.__len__()), assume_unique=True)) return fft.fftshift(fft.ifftn(fft.ifftshift(Fx, ax), N), ax).real*np.prod(N)
def gaussian_convolution(data, ijk_linewidths): from numpy import float32, zeros, add, divide, outer, reshape if data.dtype.type != float32: data = data.astype(float32) from math import exp gaussians = [] for a in range(3): size = data.shape[a] gaussian = zeros((size,), float32) hw = ijk_linewidths[2-a] / 2.0 for i in range(size): u = min(i,size-i) / hw p = min(u*u/2, 100) # avoid OverflowError with exp() gaussian[i] = exp(-p) area = add.reduce(gaussian) divide(gaussian, area, gaussian) gaussians.append(gaussian) g01 = outer(gaussians[0], gaussians[1]) g012 = outer(g01, gaussians[2]) g012 = reshape(g012, data.shape) cdata = zeros(data.shape, float32) from numpy.fft import fftn, ifftn # TODO: Fourier transform Gaussian analytically to reduce computation time # about 30% (one of three fft calculations). ftg = fftn(g012) ftd = fftn(data) gd = ifftn(ftg * ftd) gd = gd.astype(float32) return gd
def generate_init_density(self, P, F = lambda x: 1): """generate initial condition with power-spectrum P and post-filtering F (either gaussian smoothing or transfer function)""" wn = random.normal(0.0, 1.0, (self.Nm, self.Nm)) km = np.sqrt(self.km2) fwn = fft.fftn(wn) * np.sqrt(P(km)) * F(self.Km) return fft.ifftn(fwn).real
def interpolate(yF, yG, fevalF=None, fevalG=None, dim=1, xrat=2, interpolation_order=-1, **kwargs): """Interpolate yG to yF.""" if interpolation_order == -1: zG = fft.fftn(yG) zF = np.zeros(fevalF.shape, zG.dtype) zF[fevalF.half] = zG[fevalG.full] yF[...] = np.real(2**dim*fft.ifftn(zF)) elif interpolation_order == 2: if dim != 1: raise NotImplementedError yF[0::xrat] = yG yF[1::xrat] = (yG + np.roll(yG, -1)) / 2.0 elif interpolation_order == 4: if dim != 1: raise NotImplementedError yF[0::xrat] = yG yF[1::xrat] = ( - np.roll(yG,1) + 9.0*yG + 9.0*np.roll(yG,-1) - np.roll(yG,-2) ) / 16.0 else: raise ValueError, 'interpolation order must be -1, 2 or 4'
def icwt2d(self, da=0.25): ''' Inverse bi-dimensional continuous wavelet transform as in Wang and Lu (2010), equation [5]. Parameters ---------- da : float, optional Spacing in the frequency axis. ''' if self.Wf is None: raise TypeError("Run cwt2D before icwt2D") m0, l0, k0 = self.Wf.shape if m0 != self.scales.size: raise Warning('Scale parameter array shape does not match\ wavelet transform array shape.') # Calculates the zonal and meridional wave numters. L, K = 2 ** int(np.ceil(np.log2(l0))), 2 ** int(np.ceil(np.log2(k0))) # Calculates the zonal and meridional wave numbers. l, k = fftfreq(L, self.dy), fftfreq(K, self.dx) # Creates empty inverse wavelet transform array and fills it for every # discrete scale using the convolution theorem. self.iWf = np.zeros((m0, L, K), 'complex') for i, an in enumerate(self.scales): psi_ft_bar = an * self.wavelet.psi_ft(an * k, an * l) W_ft = fftn(self.Wf[i, :, :], s=(L, K)) self.iWf[i, :, :] = ifftn(W_ft * psi_ft_bar, s=(L, K)) *\ da / an ** 2. self.iWf = self.iWf[:, :l0, :k0].real.sum(axis=0) / self.wavelet.cpsi return self
def perdecomp (image): # Compute boundary image h,w,d = image.shape v = zeros (image.shape) v[:,0,:] = v[:,0,:] + image[:,0,:] - image[:,w-1,:] v[:,w-1,:] = v[:,w-1,:] + image[:,w-1,:] - image[:,0,:] v[0,:,:] = v[0,:,:] + image[0,:,:] - image[h-1,:,:] v[h-1,:,:] = v[h-1,:,:] + image[h-1,:,:] - image[0,:,:] # Compute multiplier x = arange (0., 1., 1./w) y = arange (0., 1., 1./h) xx,yy = meshgrid (x,y) multi = 4 - 2.*cos(2*pi*xx) - 2.*cos(2*pi*yy) multi[0,0] = 1. # Compute DFT of boundary image sh = fftn (v, axes=(0, 1)) # Multiply by inverse of multiplier sh = sh / multi.reshape((h,w,1)) sh[0,0,:] = zeros ((d)) # Then, compute s as the iDFT of sh smooth = real (ifftn (sh, axes=(0, 1))) periodic = image - smooth return harmonize(periodic),harmonize(smooth)
def laplacian_filter(in_file, in_mask=None, out_file=None): import numpy as np import nibabel as nb import os.path as op from math import pi from numpy.fft import fftn, ifftn, fftshift, ifftshift if out_file is None: fname, fext = op.splitext(op.basename(in_file)) if fext == '.gz': fname, _ = op.splitext(fname) out_file = op.abspath('./%s_smooth.nii.gz' % fname) im = nb.load(in_file) data = im.get_data() if in_mask is not None: mask = nb.load(in_mask).get_data() mask[mask > 0] = 1.0 mask[mask <= 0] = 0.0 data *= mask dataft = fftshift(fftn(data)) x = np.linspace(0, 2 * pi, dataft.shape[0])[:, None, None] y = np.linspace(0, 2 * pi, dataft.shape[1])[None, :, None] z = np.linspace(0, 2 * pi, dataft.shape[2])[None, None, :] lapfilt = 2.0 * np.squeeze((np.cos(x) + np.cos(y) + np.cos(z))) - 5.0 dataft *= fftshift(lapfilt) imfilt = np.real(ifftn(ifftshift(dataft))) nb.Nifti1Image(imfilt.astype(np.float32), im.get_affine(), im.get_header()).to_filename(out_file) return out_file
def cost_closure(x, k): if k is None: return lambda: x.ravel().T.dot(x.ravel()) else: kx = ifftn(k[..., None] * fftn(x, axes=(-2, -1)), axes=(-2, -1)) return lambda: x.ravel().T.dot(kx.ravel())
def _measure_autocorrelation(image_data, aper_radius=5, bg_annulus_radii=(5, 7)): """ Uses the autocorrelation function to calculate pixelwise RMS and correlation correction factor. This is an aperture photometry-like algorithm that measures the integrated autocorrelation function centered at zero lag. :param image_data: Image data to compute autocorrelation of. Should be sky- subtracted and have objects and bad pixels set to 0. :param aper_radius: Radius of the aperture to use for finding total power of the autocorrelation peak. The default is a 5-pixel lag in each direction, sufficient for most dither/drizzle schemes. :param bg_annulus_radii: Inner and outer radius of sky annulus. NOT inner radius and annulus width as in IRAF. :return: 2-tuple of RMS and autocorrelation factor """ # Compute 2D autocorrelation function fft_data = fftn(image_data) autocorr_image = ifftn(fft_data * np.conjugate(fft_data)).real autocorr_image = ifftshift(autocorr_image) # Get image shape and center coordinates im_shape = autocorr_image.shape center = np.array(im_shape) / 2 - 0.5 # generate x,y coordinates of pixels, and square distance to center of each x, y = np.meshgrid(range(im_shape[1]), range(im_shape[0])) sq_dist = (x - center[1]) ** 2 + (y - center[0]) ** 2 # 'sky' or background mask is an annulus around the center. # The annulus is also expanded by 1 pixel in both directions, with those # pixels contributing partial flux (grayscale masking) sky_mask = sq_dist > min(bg_annulus_radii) ** 2 sky_mask &= sq_dist < max(bg_annulus_radii) ** 2 sky_fix_mask = ~sky_mask sky_fix_mask &= sq_dist > (min(bg_annulus_radii) - 1) ** 2 sky_fix_mask &= sq_dist < (max(bg_annulus_radii) + 1) ** 2 # How much area is not accounted for in the original mask? sky_fix_area = (np.pi * (max(bg_annulus_radii) ** 2 - min(bg_annulus_radii) ** 2) - np.sum(sky_mask)) # What fraction of the 1-pixel expanded ring is actually inside the annulus fix_pixels_weight = sky_fix_area / np.sum(sky_fix_mask) sky_wts = 1.0 * sky_mask + fix_pixels_weight * sky_fix_mask # 'Flux' or measurement mask is a circle around the center flux_mask = sq_dist < aper_radius ** 2 flux_fix_mask = ~flux_mask & (sq_dist < (aper_radius + 1) ** 2) flux_fix_area = np.pi * aper_radius ** 2 - np.sum(flux_mask) fix_pixels_weight = flux_fix_area / np.sum(flux_fix_mask) flux_wts = 1.0 * flux_mask + fix_pixels_weight * flux_fix_mask # Calculate RMS and autocorrelation factor based on peak, background, and # integrated magnitude of the autocorrelation peak peak_val = np.max(autocorr_image[flux_mask]) bg_val = np.average(autocorr_image, weights=sky_wts) total_corr = np.sum((autocorr_image - bg_val) * flux_wts) corr_rms = np.sqrt((peak_val - bg_val) / autocorr_image.size) corr_fac = np.sqrt(total_corr / (peak_val - bg_val)) return corr_rms, corr_fac
def f2_evaluate(self, y, t, f2, **kwargs): """Evaluate implicit piece.""" z = fft.fftn(y) z = self.nu * self.laplacian * z u = np.real(fft.ifftn(z)) f2[...] = u
def Convolve(xhat, y): """Convolve the scalar field y with x. xhat is the Fourier transform of x.""" yhat = fft.fftn(y) return real(fft.ifftn(xhat * yhat))
def dft_registration(buf1ft, buf2ft, ups_factor=100): """ Efficient subpixel image registration by cross-correlation. This code gives the same precision as the FFT upsampled cross correlation in a small fraction of the computation time and with reduced memory requirements. It obtains an initial estimate of the cross-correlation peak by an FFT and then refines the shift estimation by upsampling the DFT only in a small neighborhood of that estimate by means of a matrix-multiply DFT. With this procedure all the image points are used to compute the upsampled cross-correlation. Manuel Guizar - Dec 13, 2007 Portions of this code were taken from code written by Ann M. Kowalczyk and James R. Fienup. J.R. Fienup and A.M. Kowalczyk, "Phase retrieval for a complex-valued object by using a low-resolution image," J. Opt. Soc. Am. A 7, 450-458 (1990). Citation for this algorithm: Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup, "Efficient subpixel image registration algorithms," Opt. Lett. 33, 156-158 (2008). :param buf1ft: Fourier transform of reference image, DC in (1,1) [DO NOT FFTSHIFT] :param buf2ft: Fourier transform of image to register, DC in (1,1) [DO NOT FFTSHIFT] :param ups_factor: upsampling factor (integer). Images will be registered to within 1/ups_factor of a pixel. For example ups_factor = 20 means the images will be registered within 1/20 of a pixel. (default = 1) :return: - output: [error,diff_phase,net_row_shift,net_col_shift] - error: translation invariant normalized RMS error between f and g - diff_phase: global phase difference between the two images (should be zero if images are non-negative). - row_shift, col_shift: pixel shifts between images """ if ups_factor == 0: crosscorr_max = np.sum(buf1ft * np.conj(buf2ft)) rfzero = np.sum(abs(buf1ft) ** 2) / buf1ft.size rgzero = np.sum(abs(buf2ft) ** 2) / buf2ft.size error = 1.0 - crosscorr_max * np.conj(crosscorr_max) / (rgzero * rfzero) error = np.sqrt(np.abs(error)) diff_phase = np.arctan2(np.imag(crosscorr_max), np.real(crosscorr_max)) return error, diff_phase # Whole-pixel shift - Compute cross-correlation by an IFFT and locate the # peak if ups_factor == 1: row_nb = buf1ft.shape[0] column_nb = buf1ft.shape[1] crosscorr = ifftn(buf1ft * np.conj(buf2ft)) _, indices = index_max(crosscorr) row_max = indices[0] column_max = indices[1] crosscorr_max = crosscorr[row_max, column_max] rfzero = np.sum(np.abs(buf1ft) ** 2) / (row_nb * column_nb) rgzero = np.sum(np.abs(buf2ft) ** 2) / (row_nb * column_nb) error = 1.0 - crosscorr_max * np.conj(crosscorr_max) / (rgzero * rfzero) error = np.sqrt(np.abs(error)) diff_phase = np.arctan2(np.imag(crosscorr_max), np.real(crosscorr_max)) md2 = np.fix(row_nb / 2) nd2 = np.fix(column_nb / 2) if row_max > md2: row_shift = row_max - row_nb else: row_shift = row_max if column_max > nd2: col_shift = column_max - column_nb else: col_shift = column_max return error, diff_phase, row_shift, col_shift # ups_factor > 1 # Partial-pixel shift # First upsample by a factor of 2 to obtain initial estimate # Embed Fourier data in a 2x larger array row_nb = buf1ft.shape[0] column_nb = buf1ft.shape[1] mlarge = row_nb * 2 nlarge = column_nb * 2 crosscorr = np.zeros([mlarge, nlarge], dtype=np.complex128) crosscorr[ int(row_nb - np.fix(row_nb / 2)) : int(row_nb + 1 + np.fix((row_nb - 1) / 2)), int(column_nb - np.fix(column_nb / 2)) : int( column_nb + 1 + np.fix((column_nb - 1) / 2) ), ] = (fftshift(buf1ft) * np.conj(fftshift(buf2ft)))[:, :] # Compute cross-correlation and locate the peak crosscorr = ifftn(ifftshift(crosscorr)) # Calculate cross-correlation _, indices = index_max(np.abs(crosscorr)) row_max = indices[0] column_max = indices[1] crosscorr_max = crosscorr[row_max, column_max] # Obtain shift in original pixel grid from the position of the # cross-correlation peak row_nb = crosscorr.shape[0] column_nb = crosscorr.shape[1] md2 = np.fix(row_nb / 2) nd2 = np.fix(column_nb / 2) if row_max > md2: row_shift = row_max - row_nb else: row_shift = row_max if column_max > nd2: col_shift = column_max - column_nb else: col_shift = column_max row_shift = row_shift / 2 col_shift = col_shift / 2 # If upsampling > 2, then refine estimate with matrix multiply DFT if ups_factor > 2: # DFT computation # Initial shift estimate in upsampled grid row_shift = 1.0 * np.round(row_shift * ups_factor) / ups_factor col_shift = 1.0 * np.round(col_shift * ups_factor) / ups_factor dftshift = np.fix( np.ceil(ups_factor * 1.5) / 2 ) # Center of output array at dftshift+1 # Matrix multiply DFT around the current shift estimate crosscorr = ( np.conj( dftups( buf2ft * np.conj(buf1ft), np.ceil(ups_factor * 1.5), np.ceil(ups_factor * 1.5), ups_factor, dftshift - row_shift * ups_factor, dftshift - col_shift * ups_factor, ) ) / (md2 * nd2 * ups_factor ** 2) ) # Locate maximum and map back to original pixel grid _, indices = index_max(np.abs(crosscorr)) row_max = indices[0] column_max = indices[1] crosscorr_max = crosscorr[row_max, column_max] rg00 = dftups(buf1ft * np.conj(buf1ft), 1, 1, ups_factor) / ( md2 * nd2 * ups_factor ** 2 ) rf00 = dftups(buf2ft * np.conj(buf2ft), 1, 1, ups_factor) / ( md2 * nd2 * ups_factor ** 2 ) row_max = row_max - dftshift column_max = column_max - dftshift row_shift = 1.0 * row_shift + 1.0 * row_max / ups_factor col_shift = 1.0 * col_shift + 1.0 * column_max / ups_factor # If upsampling = 2, no additional pixel shift refinement else: rg00 = np.sum(buf1ft * np.conj(buf1ft)) / row_nb / column_nb rf00 = np.sum(buf2ft * np.conj(buf2ft)) / row_nb / column_nb error = 1.0 - crosscorr_max * np.conj(crosscorr_max) / (rg00 * rf00) error = np.sqrt(np.abs(error)) diff_phase = np.arctan2(np.imag(crosscorr_max), np.real(crosscorr_max)) # If its only one row or column the shift along that dimension has no # effect. We set to zero. if md2 == 1: row_shift = 0 if nd2 == 1: col_shift = 0 return error, diff_phase, row_shift, col_shift
print("k_max:", k_max) # ALGORITHM 2 # #--Gaussian Filter--# scale = 3 A, B, C = np.meshgrid( k_axes, k_axes, k_axes ) # 3D fourier space. cubes with x-freqs, y-freqs, and z-freqs respectively kernel = np.exp(-1 / scale**2 * ((A - k_max[0])**2 + (B - k_max[1])**2 + (C - k_max[2])**2)) # coords = np.zeros( (num_snapshots, 3)) # array of 49 3D coordinates of the submarine for i in range(num_snapshots): denoised_xt = Xt[i] * kernel denoised_x = ifftn(denoised_xt) idxs = np.unravel_index(np.argmax(abs(denoised_x)), denoised_x.shape) coords[i, :] = s[idxs[0]], s[idxs[1]], s[idxs[2]] # 3D trajectory fig = plt.figure() ax = fig.gca(projection='3d') ax.plot(coords[:, 0], coords[:, 1], coords[:, 2], label="submarine path") ax.scatter(coords[0, 0], coords[0, 1], coords[0, 2], label="start") ax.scatter(coords[-1, 0], coords[-1, 1], coords[-1, 2], label="end") ax.view_init(elev=15, azim=50) plt.xlabel("x") plt.ylabel("y") ax.set_zlabel("z") ax.set_xlim(-10, 10) ax.set_ylim(-10, 10)
def _ifftn(a, s=None, axes=None): return npfft.ifftn(a, s, axes).astype(a.dtype)
def admm(grad, error, prox, opt, x, update_display): # initialize parameters h, b = opt.psf, opt.b zsize = opt.zsize #size of zstack of psfs crop2d, pad2d, crop3d, pad3d = opt.crop2d, opt.pad2d, opt.crop3d, opt.pad3d gamma = opt.gamma #step size size = opt.pad_shape del_pixels = opt.del_pixels #if del_pixels, then C = crop3d + image crop autotune = opt.autotune alpha, beta = opt.alpha, opt.beta #autotune parameters # get regularization parameters. if hasattr(opt, 'mu1'): mu1, mu2, mu3 = opt.mu1, opt.mu2, opt.mu3 tau = opt.tau else: mu1, mu2, mu3 = 1e-4, 1e-3, 1e-2 # tune these. Working with 1e-4, 1e-4, tau = 1e-3 tau = 1e-3 #regularization parameter on TV norm print('mu1 = ' + str(mu1) + ', mu2 = ' + str(mu2) + ', mu3 = ' + str(mu3) + \ ', tau = ' + str(tau)) if autotune: print('Autotuning with alpha = ' + str(alpha) + ', beta = ' + str(beta)) # Flip h for convolution and construct convolution matrices. h = np.roll(np.flip(h, axis=2), 1, axis=2) H = fftn(ifftshift(h)) HT = np.conj(H) DTD = computeDTD(size) HTH = HT * H J = mu1 * HTH + mu2 * DTD + mu3 #denom for x update #initialize matrices for crop. CTb = pad3d(b, del_pixels) CTC = pad3d(np.ones(b.shape, 'float32'), del_pixels) K = CTC + mu1 #denom for nu update #initialize M and MT. forward model is C * M(x) M = lambda x: np.real(fftshift(ifftn(H * fftn(ifftshift(x))))) MT = lambda x: np.real(fftshift(ifftn(HT * fftn(ifftshift(x))))) #initialize variables. x, xp = np.zeros(size, 'float32'), np.zeros(size, 'float32') xi, rho = np.zeros(size, 'float32'), np.zeros(size, 'float32') eta1, eta2, eta3 = np.zeros(size, 'float32'), np.zeros( size, 'float32'), np.zeros(size, 'float32') dxp, dyp, dzp = D(xp) Mxp = M(xp) #begin iteration i, error_list = 0, [] e = error(xp) error_list.append(e) while i < opt.max_itr and (not opt.eps or e > opt.eps): #Store previous Mx for residual calculations Mx = Mxp dx, dy, dz = dxp, dyp, dzp # update u. Should be fine for 2d, because dz will be 0 --> z3 = 0 z1, z2, z3 = dx + eta1 / mu2, dy + eta2 / mu2, dz + eta3 / mu2 zmod = np.sqrt(z1 * z1 + z2 * z2 + z3 * z3) zmod[zmod <= 0] = 1 #don't divide by 0 zmod = np.real(zmod) zmod = np.maximum(zmod - tau / mu2, np.zeros(size, 'float32')) / zmod u1, u2, u3 = z1 * zmod, z2 * zmod, z3 * zmod # update nu y = xi + mu1 * Mx + CTb nu = y / K #update w w = np.maximum(rho / mu3 + x, np.zeros(size, 'float32')) # update x r = DT(mu2 * u1 - eta1, mu2 * u2 - eta2, mu2 * u3 - eta3) + MT(mu1 * nu - xi) + (mu3 * w - rho) xp = np.real(fftshift(ifftn(fftn(ifftshift(r)) / J))) #invert in Fourier space Mxp = M(xp) # update xi and mu1 dxi = Mxp - nu xi = xi + mu1 * gamma * dxi if autotune: r1 = norm(dxi) s1 = mu1 * norm(Mxp - Mx) mu1, mu1_update = param_update(mu1, beta, alpha, r1, s1) #update eta and mu2 dxp, dyp, dzp = D(xp) deta1, deta2, deta3 = dxp - u1, dyp - u2, dzp - u3 eta1, eta2, eta3 = eta1 + mu2 * gamma * deta1, eta2 + mu2 * gamma * deta2, eta3 + mu2 * gamma * deta3 if autotune: r2 = np.sqrt(norm(deta1)**2 + norm(deta2)**2 + norm(deta3)**2) s2 = mu2 * np.sqrt( norm(dxp - dx)**2 + norm(dyp - dy)**2 + norm(dzp - dz)**2) mu2, mu2_update = param_update(mu2, beta, alpha, r2, s2) #update rho and mu3 dw = xp - w rho = rho + mu3 * gamma * dw if autotune: r3 = norm(dw) s3 = mu3 * norm(xp - x) mu3, mu3_update = param_update(mu3, beta, alpha, r3, s3) #if mus have been updated, update matrices as well. mu_update = mu1_update or mu2_update or mu3_update if mu_update: J = mu1 * HTH + mu2 * DTD + mu3 K = CTC + mu1 x = xp # append error and update display e = error(x) error_list.append(e) update_display(i, x, (mu1, mu2, mu3)) i += 1 return x, error_list
def inv_fourier_transform(v): return ifftn(ifftshift(v)).real
def backproject(self, kspace): return asarray(fftshift(ifftn(kspace)), order='F')
def hessian(data, scale=1): """ Hessian, Gaussian 2nd order partial derivatives filter in the fourier domain """ # Gausian 2nd derivative in each direction # (i*x)*(i*y)*g, etc # Pad pd = _pad(data, scale) # Get the scaled coordinate system if data.ndim == 2: x, y = _scale_coordinates(pd.shape, scale) rsq = x ** 2 + y ** 2 g = np.exp(-0.5 * rsq) temp = -1.0 * g * fftshift(fftn(pd)) dxx = ifftn(ifftshift(x * x * temp)) dxy = ifftn(ifftshift(x * y * temp)) dyy = ifftn(ifftshift(y * y * temp)) # Crop dxx = _crop(dxx, scale) dxy = _crop(dxy, scale) dyy = _crop(dyy, scale) # Ensure that real functions stay real if np.isrealobj(data): dxx = np.real(dxx) dxy = np.real(dxy) dyy = np.real(dyy) return [dxx, dxy, dyy] elif data.ndim == 3: x, y, z = _scale_coordinates(pd.shape, scale) rsq = x ** 2 + y ** 2 + z ** 2 g = np.exp(-0.5 * rsq) temp = -1.0 * g * fftshift(fftn(pd)) dxx = ifftn(ifftshift(x * x * temp)) dxy = ifftn(ifftshift(x * y * temp)) dxz = ifftn(ifftshift(x * z * temp)) dyy = ifftn(ifftshift(y * y * temp)) dyz = ifftn(ifftshift(y * z * temp)) dzz = ifftn(ifftshift(z * z * temp)) # Crop dxx = _crop(dxx, scale) dxy = _crop(dxy, scale) dxz = _crop(dxz, scale) dyy = _crop(dyy, scale) dyz = _crop(dyz, scale) dzz = _crop(dzz, scale) # Ensure that real functions stay real if np.isrealobj(data): dxx = np.real(dxx) dxy = np.real(dxy) dxz = np.real(dxz) dyy = np.real(dyy) dyz = np.real(dyz) dzz = np.real(dzz) return [dxx, dxy, dxz, dyy, dyz, dzz] else: raise RuntimeError( "Unsupported number of dimensions {}. We only supports 2 or 3D arrays.".format( data.ndim ) )
def ifft(a, normalize=True, nthreads=ncpu): if normalize: return fftw.ifftn(a) else: return fftw.ifft(a)
def coeff_real(coeff, _axes): return fft.ifftshift(fft.ifftn(coeff, axes=_axes)).real
def constrained_field(self, T=Identity()): f_c = self.compute_field(self.g - self.g_r) return fft.ifftn((self.f_r + f_c) * T(self.B.K)).real
def mean_field(self, T=Identity()): f_m = self.compute_field(self.g) return fft.ifftn(f_m * T(self.B.K)).real
def unconstrained_field(self, T=Identity()): return fft.ifftn(self.f_r * T(self.B.K)).real
def _make_sense(self,u0): st=self.st L=numpy.shape(u0)[-1] u0dims= numpy.ndim(u0) print('in make_sense, u0.shape',u0.shape) if u0dims-1 >0: rows=numpy.shape(u0)[0] # dpss_rows = numpy.kaiser(rows, 100) # dpss_rows = numpy.fft.fftshift(dpss_rows) # dpss_rows[3:-3] = 0.0 dpss_rows = numpy.ones(rows) # replace above sensitivity because # Frequency direction is not necessary dpss_fil = dpss_rows print('dpss shape',dpss_fil.shape) if u0dims-1 > 1: cols=numpy.shape(u0)[1] dpss_cols = numpy.kaiser(cols, 100) dpss_cols = numpy.fft.fftshift(dpss_cols) dpss_cols[3:-3] = 0.0 dpss_fil = CsTransform.pynufft.appendmat(dpss_fil,cols) dpss_cols = CsTransform.pynufft.appendmat(dpss_cols,rows) dpss_fil=dpss_fil*numpy.transpose(dpss_cols,(1,0)) print('dpss shape',dpss_fil.shape) if u0dims-1 > 2: zag = numpy.shape(u0)[2] dpss_zag = numpy.kaiser(zag, 100) dpss_zag = numpy.fft.fftshift(dpss_zag) dpss_zag[3:-3] = 0.0 dpss_fil = CsTransform.pynufft.appendmat(dpss_fil,zag) dpss_zag = CsTransform.pynufft.appendmat(dpss_zag,rows) dpss_zag = CsTransform.pynufft.appendmat(dpss_zag,cols) dpss_fil=dpss_fil*numpy.transpose(dpss_zag,(1,2,0)) # low pass filter print('dpss shape',dpss_fil.shape) #dpss_fil=dpss_fil / 10.0 rms=numpy.sqrt(numpy.mean(u0*u0.conj(),-1)) # Root of sum square st['sensemap']=numpy.ones(numpy.shape(u0),dtype=numpy.complex64) print('sensemap shape',st['sensemap'].shape, L) print('u0shape',u0.shape,rms.shape) # print('L',L) # print('rms',numpy.shape(rms)) for ll in xrange(0,L): st['sensemap'][...,ll]=(u0[...,ll]+1e-16)/(rms+1e-16) print('sensemap shape',st['sensemap'].shape, L) print('rmsshape', rms.shape) st['sensemap'][...,ll] = fftpack.fftn(st['sensemap'][...,ll], st['sensemap'][...,ll].shape, range(0,numpy.ndim(st['sensemap'][...,ll]))) st['sensemap'][...,ll] = st['sensemap'][...,ll] * dpss_fil st['sensemap'][...,ll] = fftpack.ifftn(st['sensemap'][...,ll], st['sensemap'][...,ll].shape, range(0,numpy.ndim(st['sensemap'][...,ll]))) # st['sensemap'][...,ll]=fftpack.ifftn(fftpack.fftn(st['sensemap'][...,ll])*dpss_fil) # st['sensemap'] = Normalize(st['sensemap']) return st
def setup(options): loc = options.get_string(option_section, "tensor_dir") snapshot = options.get_int(option_section, "snapshot") nx = options.get_int(option_section, "resolution") # pixel resolution 128, 64, 32, 16 use_binned = options.get_bool(option_section, "use_binned", default=False) base = '/home/rmandelb.proj/ssamurof/mb2_tidal/' nxyz = fi.FITS('%s/density/dm_density_0%d_%d.fits' % (base, snapshot, nx))[-1].read() gxyz = fi.FITS('%s/density/star_density_0%d_%d.fits' % (base, snapshot, nx))[-1].read() n0 = int(nxyz.shape[0] / 2) # now compute the tidal tensor k = npf.fftfreq(nx)[np.mgrid[0:nx, 0:nx, 0:nx]] tidal_tensor = np.zeros((nx, nx, nx, 3, 3), dtype=np.float32) galaxy_tidal_tensor = np.zeros((nx, nx, nx, 3, 3), dtype=np.float32) # overdensity field K = np.mean(nxyz) d = nxyz / K - 1 g = gxyz / np.mean(gxyz) - 1 # FFT the box fft_dens = npf.fftn(d) galaxy_fft_dens = npf.fftn(g) F = 2.85 A = 1. / np.pi / np.pi / np.pi / 2 / 2 / 2. # A=1. # A = F**(128./nx) /np.pi/np.pi/np.pi/2/2/300. for i in range(3): for j in range(3): print(i, j) # k[i], k[j] are 3D matrices temp = fft_dens * k[i] * k[j] / (k[0]**2 + k[1]**2 + k[2]**2) galaxy_temp = galaxy_fft_dens * k[i] * k[j] / (k[0]**2 + k[1]**2 + k[2]**2) # subtract off the trace... if (i == j): temp -= 1. / 3 * fft_dens galaxy_temp -= 1. / 3 * galaxy_fft_dens temp[0, 0, 0] = 0 tidal_tensor[:, :, :, i, j] = A * npf.ifftn(temp).real galaxy_temp[0, 0, 0] = 0 galaxy_tidal_tensor[:, :, :, i, j] = A * npf.ifftn(galaxy_temp).real print('loading shapes') gammaI, dgammaI = load_gamma(nx) # import pdb ; pdb.set_trace() # A = F**(64./nx) /np.pi/np.pi/np.pi/2/2/2. # s0 = np.mean(tidal_tensor) # tidal_tensor = A*tidal_tensor # s1 = np.mean(tidal_tensor) # tidal_tensor=tidal_tensor-s1+s0 # S = tidal_tensor.reshape(int(tidal_tensor.size / 3. / 3.), 3, 3) S2 = np.zeros_like(S) delta_tidal = np.zeros_like(S) for l, s in enumerate(S): M = np.zeros((3, 3)) # the density weighting term # just rescale the tidal tensor by the normalised matter overdensity #import pdb ; pdb.set_trace() delta_tidal[l, :, :] = s * d.flatten()[l] for i in range(3): for j in range(3): M[i, j] = np.sum(s[i, :] * s[:, j]) if (i == j): M[i, j] -= (1. / 3) * np.linalg.det(s)**2 S2[l] = M #print(l) import pdb pdb.set_trace() # S2=np.array(S2) #S2 = np.array([np.dot(s,s) for s in S]) # # s0 = np.mean(galaxy_tidal_tensor) # galaxy_tidal_tensor = A*galaxy_tidal_tensor # s1 = np.mean(galaxy_tidal_tensor) # galaxy_tidal_tensor=galaxy_tidal_tensor-s1+s0 # import pdb ; pdb.set_trace() #fi.FITS(base+'tidal/raw/star_tidal_traceless_0%d_0.25_%d.fits'%(snapshot,nx))[-1].read() # tidal_tensor*=np.std(tidal_tensor[tidal_tensor!=-9999.])**2 # galaxy_tidal_tensor*=np.std(galaxy_tidal_tensor[galaxy_tidal_tensor!=-9999.])**2 #dgammaI= np.zeros_like(gammaI) #for i in range(3): # for j in range(3): # dgammaI[:,:,:,i,j] = np.std(np.unique(gammaI[:,:,:,i,j])) dx = tidal_tensor.std() xc = tidal_tensor.mean() x = np.linspace(xc - 2 * dx, xc + 2 * dx, 20) x0 = (x[:-1] + x[1:]) / 2 y00, dy00 = [], [] y11, dy11 = [], [] y22, dy22 = [], [] if use_binned: for i, (lower, upper) in enumerate(zip(x[:-1], x[1:])): mask00 = (tidal_tensor[:, :, :, 0, 0] > lower) & (tidal_tensor[:, :, :, 0, 0] < upper) mask11 = (tidal_tensor[:, :, :, 1, 1] > lower) & (tidal_tensor[:, :, :, 1, 1] < upper) mask22 = (tidal_tensor[:, :, :, 2, 2] > lower) & (tidal_tensor[:, :, :, 2, 2] < upper) y00, dy00 = get_binned(0, gammaI, mask00, y00, dy00) y11, dy11 = get_binned(1, gammaI, mask11, y11, dy11) y22, dy22 = get_binned(2, gammaI, mask22, y22, dy22) return delta_tidal, gammaI, tidal_tensor, galaxy_tidal_tensor, S2, dgammaI, x0, y00, y11, y22, dy00, dy11, dy22, use_binned
def mkbackfft(image): """ make backwards fourier transform of a 2d image """ from numpy.fft import fftshift, ifftshift, ifftn return fftshift(ifftn(ifftshift(image)))
def calculate_charge_sloshing(self, R_Q): return self.gd.integrate(np.fabs(ifftn(R_Q).real))
def downsample(cls, img, side, compute_fx=False, stack=False, mask=None): """ Use Fourier methods to change the sample interval and/or aspect ratio of any dimensions of the input image 'img'. If the optional argument stack is set to True, then the *first* dimension of 'img' is interpreted as the index of each image in the stack. The size argument side is an integer, the size of the output images. Let the size of a stack of 2D images 'img' be n1 x n1 x k. The size of the output will be side x side x k. If the optional mask argument is given, this is used as the zero-centered Fourier mask for the re-sampling. The size of mask should be the same as the output image size. For example for downsampling an n0 x n0 image with a 0.9 x nyquist filter, do the following: msk = fuzzymask(n,2,.45*n,.05*n) out = downsample(img, n, 0, msk) The size of the mask must be the size of output. The optional fx output argument is the padded or cropped, masked, FT of in, with zero frequency at the origin. """ try: side = int(side) except ValueError: raise ValueError("side should be an integer!") if not isinstance(stack, bool): raise TypeError("stack should be a bool! set it to either True/False.") if mask is not None and mask.shape != img.shape: raise DimensionsIncompatible(f'Dimensions incompatible! mask shape={mask.shape}, img shape={img.shape}.') ndim = sum([True for i in img.shape if i > 1]) # number of non-singleton dimensions if ndim not in [1, 2, 3]: raise DimensionsIncompatible(f"Can't downsample image with {ndim} dimensions!") if ndim == 1: szout = (1, side) # this is the shape of the final vector elif ndim == 2 or ndim == 3 and stack: szout = (side, side) # this is the shape of the final mat else: # ndim == 3 and not stack szout = np.array([side, side, side]) # this is the shape of the final cube if ndim == 1: # force input img into row vector with the shape (1, img.size) img = np.asmatrix(flatten(img)) # check sizes of input and output szin = img[0, :, :].shape if stack else img.shape if TupleCompare.eq(szout, szin): # no change in shape if not compute_fx: return img # adjust mask to be the size of desired output mask = cls.crop(mask, side) if mask else 1 if ndim == 1: # return a vector scaled from the original vector x = fftshift(fft(img)) fx = cls.crop(x, side) * mask out = ifft(ifftshift(fx), axis=0) * (np.prod(szout) / np.prod(szin)) elif ndim == 2: # return a 2D image scaled from the original image fx = cls.crop(fftshift(fft2(img)), side) * mask out = ifft2(ifftshift(fx)) * (np.prod(szout) / np.prod(szin)) elif ndim == 3 and stack: # return a stack of 2D images where each one of them is downsampled num_images = img.shape[0] out = np.zeros([num_images, side, side], dtype=complex) for i in range(num_images): fx = cls.crop(fftshift(fft2(img[i, :, :])), side) * mask out[i, :, :] = ifft2(ifftshift(fx)) * (np.prod(szout) / np.prod(szin)) else: # ndim == 3 and not stack # return a 3D object scaled from the input 3D cube fx = cls.crop(fftshift(fftn(img)), side) * mask out = ifftn(ifftshift(fx)) * (np.prod(szout) / np.prod(szin)) if np.all(np.isreal(img)): out = np.real(out) if compute_fx: fx = np.fft.ifftshift(fx) return out, fx return out.astype('float32')
from fastbox.box import CosmoBox, default_cosmo from numpy import fft # Gaussian box np.random.seed(10) box = CosmoBox(cosmo=default_cosmo, box_scale=(1e2, 1e2, 1e2), nsamp=128, realise_now=False) box.realise_density() box.realise_velocity() # Plot real-space density field plt.matshow(box.delta_x[0], vmin=-1., vmax=20., cmap='cividis') plt.title("Real-space") plt.colorbar() # Get redshift-space density field vel_z = fft.ifftn(box.velocity_k[2]).real delta_s = box.redshift_space_density(delta_x=box.delta_x, velocity_z=vel_z, sigma_nl=200., method='linear') plt.matshow(delta_s[0], vmin=-1., vmax=20., cmap='cividis') plt.title("Redshift-space") plt.colorbar() plt.show()
def compute_gvirial(data_dens, dxy, dv, c0=1e5, npad_fft=3): """ Compute the g-virial based on mass distribution input: data_mass: column density distribution in the position-position-velocity space. The input data is 3D array, and the axes are arranged as (v, y, x). dxy, dv: the spearation of vorxels in the spatial and the velocity direction. c0: a parameter in the calculation. Larger c0 leads to results that are more smoothed in the v direction. c0 should be chosen to be comparable with the sound speed. Default, 1e5 (cm/s). npad_fft: zero-padding for the FFT. Larger value leads to better behaviors at the edges, but requires more computational time and memory. Default, 3. output: G-virial in position-velocity space. units: data_dens has the unit of mass (g). c0 has the unit of cm/s. """ data_dens[np.where(np.isnan(data_dens))] = 0.0 data_dens[np.where(np.isinf(data_dens))] = 0.0 nv = len(data_dens) data_dens_result = data_dens.copy().astype(complex) omagex = np.fft.fftfreq(len(data_dens[0][0]) * npad_fft, d=dxy).reshape(1, len(data_dens[0][0]) * npad_fft) omegay = np.fft.fftfreq(len(data_dens[0]) * npad_fft, d=dxy).reshape(len(data_dens[0]) * npad_fft, 1) omagexy = np.sqrt(omagex * omagex + omegay * omegay) omagexy[0, 0] = 0.5 * (omagexy.min() + omagexy.max()) # to avoid infinity print "computing G-virial" for i in range(len(data_dens)): percentage = int(i * 100 / len(data_dens)) sys.stdout.write("\r%d%%" % percentage) sys.stdout.flush() shape_orig = np.array(data_dens[0].shape) shape_padded = np.array(data_dens[0].shape) * npad_fft padded_map = np.zeros(shape_padded) padded_map[0:shape_orig[0], 0:shape_orig[1]] = data_dens[i] t_xy = fft.fftn(padded_map) t_xy_p = t_xy / omagexy phi_xy = fft.ifftn(t_xy_p) data_dens_result[i]\ = phi_xy[0: shape_orig[0], 0: shape_orig[1]] omegav = np.fft.fftfreq(npad_fft * nv, d=dv) omegavf = pi * np.exp(-c0 * np.abs(omegav) * 2 * pi) / c0 for x, y in itertools.product(xrange(len(data_dens_result[0][0])), xrange(len(data_dens_result[0]))): spec_line = omegav.copy() * 0 spec_line[0:nv] = data_dens_result[:, y, x].real spec_k = fft.fft(spec_line) * omegavf line = fft.ifft(spec_k) data_dens_result[:, y, x] = line[0:0 + nv] data_gvirial = data_dens_result * G return data_gvirial.real
def ifft(self, a_xG): xshape = a_xG.shape[:-1] a_xQ = self.gd.zeros(xshape, complex) a_xQ.reshape(xshape + (-1, ))[..., self.Q_G] = a_xG return ifftn(a_xQ, axes=(-3, -2, -1)).copy()
def gpe3d_python(kappa, Nt, dt, X, Y, Z, U, psi0, Ntstore=10, imag_time=0): Ntskip = Nt / (Ntstore - 1) Nx, Ny, Nz = np.size(X), np.size(Y), np.size(Z) dx, dy, dz = (X[1] - X[0], Y[1] - Y[0], Z[1] - Z[0]) dV = dx * dy * dz Kx = fft.fftfreq(Nx, dx) * 2.0 * np.pi Ky = fft.fftfreq(Ny, dy) * 2.0 * np.pi Kz = fft.fftfreq(Nz, dz) * 2.0 * np.pi T = np.zeros(Ntstore) if imag_time == 0: prefactor = 1j psi_out = np.zeros((Ntstore, Nx, Ny, Nz), complex) psi_out[0, :] = psi0 else: prefactor = 1 psi_out = np.zeros((Nx, Ny, Nz), complex) U1 = -prefactor * U * dt / 2.0 C1 = -prefactor * kappa * dt / 2.0 Kxg, Kyg, Kzg = np.meshgrid(Kx, Ky, Kz) K_squared = Kxg**2 + Kyg**2 + Kzg**2 Kin = np.exp(-prefactor * K_squared * dt / 2.0) psi = psi0 i = 0 for t1 in range(Ntstore - 1): for t2 in range(Ntskip): print('step ' + str(i) + 'of ' + str(Nt)) i += 1 # Split the entire time stepping into three steps. # The first is stepping by time k/2 but only applying the potential # and the mean field parts of the unitary psi_squared = psi * np.conj(psi) psi = np.exp(U1 + C1 * psi_squared) * psi print('first step') psi_int = np.sum(np.conj(psi) * psi) * dV print(psi_int) # The second part is applying the Kinetic part of the unitary. This # is done by taking the fourier transform of psi, so applying this # unitary in k space is simply multiplying it by another array psi = fft.ifftn(Kin * fft.fftn(psi)) print('second step') psi_int = np.sum(np.conj(psi) * psi) * dV print(psi_int) # The third part is again stepping by k/2 and applying the # potential and interaction part of the unitary psi_squared = psi * np.conj(psi) psi = np.exp(U1 + C1 * psi_squared) * psi if imag_time: # If we are propagating in imaginary time, then the solution # dies down, we need to explicitly normalize it print('third step') psi_int = np.sum(np.conj(psi) * psi) * dV print(psi_int) psi /= psi_int**0.5 psi_int = np.sum(np.conj(psi) * psi) * dV print(psi_int) # Store the wavefuction in psi_out T[t1 + 1] = (t1 + 1) * dt * Ntskip if imag_time == 0: psi_out[t1 + 1, :] = psi if imag_time == 1: psi_out = psi return (Kx, Ky, Kz, T, psi_out)
def reconstruct_ifft(self): kspace = self.kspace.get_static_data() image = asarray(fftshift(ifftn(kspace)), order='F') return Image3D(abs(image))
def compute_scores(particles, masks, outFile, gaussian_filter_sigma=0, score="all", mask_cutoff=0.5): ''' Compute the scoring functions for the set of subtomograms provided in the arguement Arguements: particles: list of filepaths of subtomograms in the cluster. Make sure subtomograms are transformed before computing score value. masks: list of filepaths of masks corresponding to each subtomogra in the cluster. Make sure masks are transformed before computing score value. gaussian_filter_sigma: Standard deviation of Gaussian filter. Default value is zero, that means no filtering. score: scoring function to compute. Computes all scoring functions by default. Check documentation on github readme file to see other possible values of 'score' mask_cutoff: threshold to binarize missing wedge mask Returns: Dictionary of scoring function acronym and score value ''' assert len(particles) > 1 scoreValues = {} if score == "all" or score == "SFSC": print("Computing SFSC") scoreValues["SFSC"] = str( sfsc(particles=particles, masks=masks, gf=gaussian_filter_sigma, mask_cutoff=mask_cutoff)) ########### SFSC ########### if score == "all" or score != "SFSC": if score == "all": print( "Computing gPC, amPC, FPC, FPCmw, CCC, amCCC, cPC, oPC, OS, gNSD, cNSD, oNSD, amNSD, DSD, gMI, NMI, cMI, oMI, amMI" ) else: print("Computing", score) cluster_mask = None if score in ["all", "amPC", "amNSD", "amMI", "amCCC"]: cluster_mask = cluster_average_mask(particles, masks) # Make subtomogram pairs pairs = [] num_particles = len(particles) possible_pair_num = int((num_particles * (num_particles - 1)) / 2) for i in range(num_particles): for j in range(i + 1, num_particles): pairs.append((i, j)) num_of_pairs = 0 minimum_num_of_paris = 5000 if possible_pair_num < minimum_num_of_paris: num_of_pairs = possible_pair_num elif possible_pair_num * 0.1 < minimum_num_of_paris: num_of_pairs = minimum_num_of_paris else: num_of_pairs = int(possible_pair_num * 0.10) print("Num of pairs: ", num_of_pairs) random.shuffle(pairs) pairs = pairs[:num_of_pairs] for i, p in enumerate(pairs): vr_1, vm_1 = read_particle_and_mask(particles[p[0]], masks[p[0]]) vr_2, vm_2 = read_particle_and_mask(particles[p[1]], masks[p[1]]) # Gaussian Filter vr_1_gf = SNFG(vr_1.copy(), gaussian_filter_sigma) vr_2_gf = SNFG(vr_2.copy(), gaussian_filter_sigma) # Binarize masks vm_1[vm_1 < mask_cutoff] = 0.0 vm_1[vm_1 >= mask_cutoff] = 1.0 vm_2[vm_2 < mask_cutoff] = 0.0 vm_2[vm_2 >= mask_cutoff] = 1.0 # Mask overlap masks_logical_and = N.logical_and(vm_1, vm_2) masks_logical_and_flag = False if masks_logical_and.sum() < 2: masks_logical_and_flag = True else: masks_logical_and = masks_logical_and.flatten() masks_logical_and = N.where(masks_logical_and == True)[0] # Generate masks for contoured and overlap scores threshold_i = 1.5 vr_1_mask = mask_segmentation(vr_1_gf.copy(), threshold_i) vr_2_mask = mask_segmentation(vr_2_gf.copy(), threshold_i) ########### gPC ########### if score in ["all", "gPC"]: if "gPC" not in scoreValues: scoreValues["gPC"] = [] #if i==0: print("Computing gPC") scoreValues["gPC"].append(pearson_correlation( vr_1_gf, vr_2_gf)) ########### amPC ########### if score in ["all", "amPC"]: if "amPC" not in scoreValues: scoreValues["amPC"] = [] #if i==0: print("Computing amPC") scoreValues["amPC"].append( pearson_correlation(vr_1_gf[cluster_mask], vr_2_gf[cluster_mask])) if score in ["all", "FPC", "FPCmw", "CCC", "amCCC"]: vr_1_f = NF.fftshift(NF.fftn(vr_1_gf.copy())) vr_2_f = NF.fftshift(NF.fftn(vr_2_gf.copy())) ########### FPC ########### if score in ["all", "FPC"]: if "FPC" not in scoreValues: scoreValues["FPC"] = [] #if i==0: print("Computing FPC") scoreValues["FPC"].append( pearson_correlation(vr_1_f.real.flatten(), vr_2_f.real.flatten())) ########### FPCmw ########### if score in ["all", "FPCmw"]: if "FPCmw" not in scoreValues: scoreValues["FPCmw"] = [] #if i==0: print("Computing FPCmw") if masks_logical_and_flag: scoreValues["FPCmw"].append(0.0) else: scoreValues["FPCmw"].append( pearson_correlation( vr_1_f.real.flatten()[masks_logical_and], vr_2_f.real.flatten()[masks_logical_and])) if score in ["all", "CCC", "amCCC"]: masks_logical_and = N.logical_and(vm_1, vm_2) N.place(vr_1_f, masks_logical_and == False, [0]) N.place(vr_2_f, masks_logical_and == False, [0]) vr_1_if = (NF.ifftn(NF.ifftshift(vr_1_f))).real vr_2_if = (NF.ifftn(NF.ifftshift(vr_2_f))).real ########### CCC ########### if score in ["all", "CCC"]: vr_1_if_norm = zeroMeanUnitStdNormalize(vr_1_if.copy()) vr_2_if_norm = zeroMeanUnitStdNormalize(vr_2_if.copy()) #if i==0: print("Computing CCC") if "CCC" not in scoreValues: scoreValues["CCC"] = [] scoreValues["CCC"].append( pearson_correlation(vr_1_if_norm.flatten(), vr_2_if_norm.flatten())) del vr_1_if_norm, vr_2_if_norm gc.collect() ########### amCCC ########### if score in ["all", "amCCC"]: vr_1_if = vr_1_if[cluster_mask] vr_2_if = vr_2_if[cluster_mask] vr_1_if_norm = zeroMeanUnitStdNormalize(vr_1_if.copy()) vr_2_if_norm = zeroMeanUnitStdNormalize(vr_2_if.copy()) #if i==0: print("Computing amCCC") if "amCCC" not in scoreValues: scoreValues["amCCC"] = [] scoreValues["amCCC"].append( pearson_correlation(vr_1_if_norm, vr_2_if_norm)) del vr_1_if_norm, vr_2_if_norm gc.collect() del vr_1_if, vr_2_if gc.collect() del vr_1_f, vr_2_f gc.collect() # Real space mask for contoured scores real_masks_or = N.logical_or(vr_1_mask, vr_2_mask) real_masks_or = real_masks_or.flatten() real_masks_or = N.where(real_masks_or == True)[0] # Real space mask for overlap scores real_masks_and = N.logical_and(vr_1_mask, vr_2_mask) real_masks_and = real_masks_and.flatten() real_masks_and = N.where(real_masks_and == True)[0] ########### cPC ########### if score in ["all", "cPC"]: if "cPC" not in scoreValues: scoreValues["cPC"] = [] #if i==0: print("Computing cPC") if real_masks_or.sum() < 2: scoreValues["cPC"].append(0.0) else: scoreValues["cPC"].append( pearson_correlation(vr_1_gf.flatten()[real_masks_or], vr_2_gf.flatten()[real_masks_or])) ########### oPC ########### if score in ["all", "oPC"]: if "oPC" not in scoreValues: scoreValues["oPC"] = [] #if i==0: print("Computing oPC") if real_masks_and.sum() < 2: scoreValues["oPC"].append(0.0) else: scoreValues["oPC"].append( pearson_correlation(vr_1_gf.flatten()[real_masks_and], vr_2_gf.flatten()[real_masks_and])) ########### OS ########### if score in ["all", "OS"]: if "OS" not in scoreValues: scoreValues["OS"] = [] #if i==0: print("Computing OS") scoreValues["OS"].append( float(N.logical_and(vr_1_mask, vr_2_mask).sum()) / min(vr_1_mask.sum(), vr_2_mask.sum())) ########### gNSD ########### if score in ["all", "gNSD"]: if "gNSD" not in scoreValues: scoreValues["gNSD"] = [] #if i==0: print("Computing gNSD") scoreValues['gNSD'].append(((vr_1_gf - vr_2_gf)**2).mean()) ########### cNSD ########### if score in ["all", "cNSD"]: if "cNSD" not in scoreValues: scoreValues["cNSD"] = [] #if i==0: print("Computing cNSD") scoreValues['cNSD'].append( ((vr_1_gf.flatten()[real_masks_or] - vr_2_gf.flatten()[real_masks_or])**2).mean()) ########### oNSD ########### if score in ["all", "oNSD"]: if "oNSD" not in scoreValues: scoreValues["oNSD"] = [] #if i==0: print("Computing oNSD") scoreValues['oNSD'].append( ((vr_1_gf.flatten()[real_masks_and] - vr_2_gf.flatten()[real_masks_and])**2).mean()) ########### amNSD ########### if score in ["all", "amNSD"]: if "amNSD" not in scoreValues: scoreValues["amNSD"] = [] #if i==0: print("Computing amNSD") scoreValues['amNSD'].append( ((vr_1_gf[cluster_mask] - vr_2_gf[cluster_mask])**2).mean()) ########### DSD ########### if score in ["all", "DSD"]: if "DSD" not in scoreValues: scoreValues["DSD"] = [] #if i==0: print("Computing DSD") scoreValues['DSD'].append(dsd(vr_1_gf.copy(), vr_2_gf.copy())) ########### gMI ########### if score in ["all", "gMI"]: if "gMI" not in scoreValues: scoreValues["gMI"] = [] #if i==0: print("Computing gMI") scoreValues['gMI'].append( MI(vr_1_gf.copy(), vr_2_gf.copy(), mask_array=None, normalised=False)) ########### NMI ########### if score in ["all", "NMI"]: if "NMI" not in scoreValues: scoreValues["NMI"] = [] #if i==0: print("Computing NMI") scoreValues['NMI'].append( MI(vr_1_gf.copy(), vr_2_gf.copy(), mask_array=None, normalised=True)) ########### cMI ########### if score in ["all", "cMI"]: if "cMI" not in scoreValues: scoreValues["cMI"] = [] #if i==0: print("Computing cMI") scoreValues['cMI'].append( MI(vr_1_gf.copy(), vr_2_gf.copy(), mask_array=N.logical_or(vr_1_mask, vr_2_mask), normalised=False)) ########### oMI ########### if score in ["all", "oMI"]: if "oMI" not in scoreValues: scoreValues["oMI"] = [] #if i==0: print("Computing oMI") scoreValues['oMI'].append( MI(vr_1_gf.copy(), vr_2_gf.copy(), mask_array=N.logical_and(vr_1_mask, vr_2_mask), normalised=False)) ########### amMI ########### if score in ["all", "amMI"]: if "amMI" not in scoreValues: scoreValues["amMI"] = [] #if i==0: print("Computing amMI") scoreValues['amMI'].append( MI(vr_1_gf.copy(), vr_2_gf.copy(), mask_array=cluster_mask, normalised=False)) print("Number of pairs computed:", i, end="\r") del vr_1, vr_2, vm_1, vm_2, vr_1_gf, vr_2_gf, threshold_i, vr_1_mask, vr_2_mask, real_masks_or, real_masks_and, masks_logical_and gc.collect() for score in scoreValues.keys(): if score != "SFSC": scoreValues[score] = str(N.mean(scoreValues[score])) with open(outFile, "w") as f: json.dump(scoreValues, f, indent=3) del scoreValues gc.collect()
def fft2pic(imgfft, ogshape): return ifftn(imgfft).real.astype(uint8)[:ogshape[0], :ogshape[1]]
def iftindx1(inparray): iftarray = ifftn(inparray, axes=[1]) iftarray = fftshift(iftarray, axes=[1]) return iftarray
def fourier_filter(self, V1t_xG, components='normal', criteria=1): """Fourier filter atomic gradients of the effective potential. Parameters ---------- V1t_xG: ndarray Array representation of atomic gradients of the effective potential in the supercell grid. components: str Fourier components to filter out (``normal`` or ``umklapp``). """ assert components in ['normal', 'umklapp'] # Grid shape shape = V1t_xG.shape[-3:] # Primitive unit cells in Bohr/Bohr^-1 cell_cv = self.atoms.get_cell() / units.Bohr reci_vc = 2 * pi * la.inv(cell_cv) norm_c = np.sqrt(np.sum(reci_vc**2, axis=0)) # Periodic BC array pbc_c = np.array(self.atoms.get_pbc(), dtype=bool) # Supercell atoms and cell atoms_N = self.atoms * self.N_c supercell_cv = atoms_N.get_cell() / units.Bohr # q-grid in units of the grid spacing (FFT ordering) q_cG = np.indices(shape).reshape(3, -1) q_c = np.array(shape)[:, np.newaxis] q_cG += q_c // 2 q_cG %= q_c q_cG -= q_c // 2 # Locate q-points inside the Brillouin zone if criteria == 0: # Works for all cases # Grid spacing in direction of reciprocal lattice vectors h_c = np.sqrt(np.sum((2 * pi * la.inv(supercell_cv))**2, axis=0)) # XXX Why does a "*=" operation on q_cG not work here ?? q1_cG = q_cG * h_c[:, np.newaxis] / (norm_c[:, np.newaxis] / 2) mask_G = np.ones(np.prod(shape), dtype=bool) for i, pbc in enumerate(pbc_c): if not pbc: continue mask_G &= (-1. < q1_cG[i]) & (q1_cG[i] <= 1.) else: # 2D hexagonal lattice # Projection of q points onto the periodic directions. Only in # these directions do normal and umklapp processees make sense. q_vG = np.dot(q_cG[pbc_c].T, 2 * pi * la.inv(supercell_cv).T[pbc_c]).T.copy() # Parametrize the BZ boundary in terms of the angle theta theta_G = np.arctan2(q_vG[1], q_vG[0]) % (pi / 3) phi_G = pi / 6 - np.abs(theta_G) qmax_G = norm_c[0] / 2 / np.cos(phi_G) norm_G = np.sqrt(np.sum(q_vG**2, axis=0)) # Includes point on BZ boundary with +1e-2 mask_G = (norm_G <= qmax_G + 1e-2 ) # & (q_vG[1] < (norm_c[0] / 2 - 1e-3)) if components != 'normal': mask_G = ~mask_G # Reshape to grid shape mask_G.shape = shape for V1t_G in V1t_xG: # Fourier transform atomic gradient V1tq_G = fft.fftn(V1t_G) # Zero normal/umklapp components V1tq_G[mask_G] = 0.0 # Fourier transform back V1t_G[:] = fft.ifftn(V1tq_G).real
def average_arrays( avg_obj, ref_obj, obj, support_threshold=0.25, correlation_threshold=0.90, aligning_option="dft", space="reciprocal_space", debugging=False, **kwargs, ): """ Average two reconstructions after aligning it. This function can be used to average a series of arrays within a loop. Alignment is performed using either DFT registration or the shift of the center of mass of the array. Averaging is processed only if their Pearson cross-correlation after alignment is larger than the correlation threshold. :param avg_obj: 3D array of complex numbers, current average :param ref_obj: 3D array of complex numbers, used as a reference for the alignment :param obj: 3D array of complex numbers, array to be aligned with the reference and to be added to avg_obj :param support_threshold: normalized threshold for the definition of the support. It is applied on the modulus of the array :param correlation_threshold: float in [0, 1], minimum correlation between two dataset to average them :param aligning_option: 'com' for center of mass, 'dft' for dft registration and subpixel shift :param space: 'direct_space' or 'reciprocal_space', in which space the average will be performed :param debugging: boolean, set to True to see plots :param kwargs: - 'width_z': size of the area to plot in z (axis 0), centered on the middle of the initial array - 'width_y': size of the area to plot in y (axis 1), centered on the middle of the initial array - 'width_x': size of the area to plot in x (axis 2), centered on the middle of the initial array - 'reciprocal_space': True if the object is in reciprocal space, it is used only for defining labels in plots - 'is_orthogonal': True if the data is in an orthonormal frame. Used for defining default plot labels. :return: the average complex density """ # check some parameters valid.valid_ndarray(arrays=(obj, avg_obj, ref_obj), ndim=3) if space not in {"direct_space", "reciprocal_space"}: raise ValueError("space should be 'direct_space' or 'reciprocal_space'") valid.valid_kwargs( kwargs=kwargs, allowed_kwargs={ "width_z", "width_y", "width_x", "reciprocal_space", "is_orthogonal", }, name="postprocessing_utils.average_obj", ) width_z = kwargs.get("width_z") width_y = kwargs.get("width_y") width_x = kwargs.get("width_x") reciprocal_space = kwargs.get("reciprocal_space", False) is_orthogonal = kwargs.get("is_orthogonal", False) avg_flag = 0 ####################################################### # first iteration of the loop, no running average yet # ####################################################### if avg_obj.sum() == 0: avg_obj = ref_obj if debugging: gu.multislices_plot( abs(avg_obj), width_z=width_z, width_y=width_y, width_x=width_x, plot_colorbar=True, sum_frames=True, title="Reference object", reciprocal_space=reciprocal_space, is_orthogonal=is_orthogonal, ) return avg_obj, avg_flag ############################################### # next iterations, update the running average # ############################################### # align obj new_obj, _ = align_arrays( reference_array=ref_obj, shifted_array=obj, shift_method="modulus", interpolation_method=aligning_option, support_threshold=support_threshold, precision=1000, verbose=True, debugging=debugging, ) # renormalize new_obj new_obj = new_obj / abs(new_obj).max() # calculate the correlation between arrays and average them eventually correlation = pearsonr( np.ndarray.flatten(abs(ref_obj)), np.ndarray.flatten(abs(new_obj)) )[0] if correlation < correlation_threshold: print( f"pearson cross-correlation = {correlation} too low, " "skip this reconstruction" ) else: # combine the arrays print( f"pearson-correlation = {correlation}, ", "average with this reconstruction", ) if debugging: myfig, _, _ = gu.multislices_plot( abs(new_obj), width_z=width_z, width_y=width_y, width_x=width_x, sum_frames=True, plot_colorbar=True, title="Aligned object", reciprocal_space=reciprocal_space, is_orthogonal=is_orthogonal, ) myfig.text( 0.60, 0.30, "pearson-correlation = " + str("{:.4f}".format(correlation)), size=20, ) # update the average either in direct space or in reciprocal space if space == "direct_space": avg_obj = avg_obj + new_obj else: # "reciprocal_space": avg_obj = ifftn(fftn(avg_obj) + fftn(obj)) avg_flag = 1 if debugging: gu.multislices_plot( abs(avg_obj), plot_colorbar=True, width_z=width_z, width_y=width_y, width_x=width_x, sum_frames=True, title="New averaged object", reciprocal_space=reciprocal_space, is_orthogonal=is_orthogonal, ) return avg_obj, avg_flag
def test_irfft_gradcheck(): invar = create_complex_var(5,11) assert torch.autograd.gradcheck(afft.Irfft(), invar) def test_irfft2d_gradcheck(): invar = create_complex_var(5,5,5) assert torch.autograd.gradcheck(afft.Irfft2d(), invar) def test_irfft3d_gradcheck(): invar = create_complex_var(5,3,3,3) assert torch.autograd.gradcheck(afft.Irfft3d(), invar) if __name__ == "__main__": if torch.cuda.is_available(): nfft3 = lambda x: nfft.fftn(x,axes=(1,2,3)) nifft3 = lambda x: nfft.ifftn(x,axes=(1,2,3)) cfs = [cfft.fft, cfft.fft2, cfft.fft3] nfs = [nfft.fft, nfft.fft2, nfft3] cifs = [cfft.ifft, cfft.ifft2, cfft.ifft3] nifs = [nfft.ifft, nfft.ifft2, nifft3] for args in zip(cfs, nfs, cifs, nifs): test_c2c(*args) nrfft3 = lambda x: nfft.rfftn(x,axes=(1,2,3)) nirfft3 = lambda x: nfft.irfftn(x,axes=(1,2,3)) cfs = [cfft.rfft, cfft.rfft2, cfft.rfft3] nfs = [nfft.rfft, nfft.rfft2, nrfft3] cifs = [cfft.irfft, cfft.irfft2, cfft.irfft3]
def apply(self, in_xg, out_xg, phase_cd=None): if in_xg.ndim > 3: for in_g, out_g in zip(in_xg, out_xg): out_g[:] = ifftn(fftn(in_g) * self.k2_Q).real else: out_xg[:] = ifftn(fftn(in_xg) * self.k2_Q).real