def backproject(self, rot_matrices): """ Backproject images along rotation :param im: An Image (stack) to backproject. :param rot_matrices: An n-by-3-by-3 array of rotation matrices \ corresponding to viewing directions. :return: Volume instance corresonding to the backprojected images. """ L = self.res ensure( self.n_images == rot_matrices.shape[0], "Number of rotation matrices must match the number of images", ) # TODO: rotated_grids might as well give us correctly shaped array in the first place pts_rot = aspire.volume.rotated_grids(L, rot_matrices) pts_rot = np.moveaxis(pts_rot, 1, 2) pts_rot = m_reshape(pts_rot, (3, -1)) im_f = xp.asnumpy(fft.centered_fft2(xp.asarray(self.data))) / (L**2) if L % 2 == 0: im_f[:, 0, :] = 0 im_f[:, :, 0] = 0 im_f = im_f.flatten() vol = anufft(im_f, pts_rot, (L, L, L), real=True) / L return aspire.volume.Volume(vol)
def estimate_noise_psd(self): """ :return: The estimated noise variance of the images in the Source used to create this estimator. TODO: How's this initial estimate of variance different from the 'estimate' method? """ # Run estimate using saved parameters g2d = grid_2d(self.L) mask = g2d["r"] >= self.bgRadius mean_est = 0 noise_psd_est = np.zeros((self.L, self.L)).astype(self.src.dtype) for i in range(0, self.n, self.batchSize): images = self.src.images(i, self.batchSize).asnumpy() images_masked = images * mask _denominator = self.n * np.sum(mask) mean_est += np.sum(images_masked) / _denominator im_masked_f = xp.asnumpy( fft.centered_fft2(xp.asarray(images_masked))) noise_psd_est += np.sum(np.abs(im_masked_f**2), axis=0) / _denominator mid = self.L // 2 noise_psd_est[mid, mid] -= mean_est**2 return noise_psd_est
def downsample(self, ds_res): """ Downsample Image to a specific resolution. This method returns a new Image. :param ds_res: int - new resolution, should be <= the current resolution of this Image :return: The downsampled Image object. """ grid = grid_2d(self.res) grid_ds = grid_2d(ds_res) im_ds = np.zeros((self.n_images, ds_res, ds_res), dtype=self.dtype) # x, y values corresponding to 'grid'. This is what scipy interpolator needs to function. res_by_2 = self.res / 2 x = y = np.ceil(np.arange(-res_by_2, res_by_2)) / res_by_2 mask = (np.abs(grid["x"]) < ds_res / self.res) & (np.abs(grid["y"]) < ds_res / self.res) im_shifted = fft.centered_ifft2( fft.centered_fft2(xp.asarray(self.data)) * xp.asarray(mask)) im = np.real(xp.asnumpy(im_shifted)) for s in range(im_ds.shape[0]): interpolator = RegularGridInterpolator((x, y), im[s], bounds_error=False, fill_value=0) im_ds[s] = interpolator(np.dstack([grid_ds["x"], grid_ds["y"]])) return Image(im_ds)
def filter(self, filter): """ Apply a `Filter` object to the Image and returns a new Image. :param filter: An object of type `Filter`. :return: A new filtered `Image` object. """ filter_values = filter.evaluate_grid(self.res) im_f = xp.asnumpy(fft.centered_fft2(xp.asarray(self.data))) if im_f.ndim > filter_values.ndim: im_f *= filter_values else: im_f = filter_values * im_f im = xp.asnumpy(fft.centered_ifft2(xp.asarray(im_f))) im = np.real(im) return Image(im)
def adaptive_support(img_src, energy_threshold=0.99): """ Determine size of the compact support in both real and Fourier Space. Returns c_limit (support radius in Fourier space), and R_limit (support radius in real space). Fourier c_limit is scaled in range [0, 0.5]. R_limit is in pixels [0, Image.res/2]. :param img_src: Input `Source` of images. :param energy_threshold: [0, 1] threshold limit :return: (c_limit, R_limit) """ if not isinstance(img_src, ImageSource): raise RuntimeError( "adaptive_support expects `Source` instance or subclass.") # Sanity Check Threshold is in range if energy_threshold <= 0 or energy_threshold > 1: raise ValueError( f"Given energy_threshold {energy_threshold} outside sane range [0,1]" ) L = img_src.L N = L // 2 r = grid_2d(L, shifted=False, normalized=False, dtype=img_src.dtype)["r"] # Estimate noise noise_est = WhiteNoiseEstimator(img_src) noise_var = noise_est.estimate() # Transform to Fourier space img = img_src.images(0, img_src.n).asnumpy() imgf = fft.centered_fft2(img) # Compute the Variance and Power Spectrum # Mean along image stack. variance_map = np.mean(np.abs(img)**2, axis=0) pspec = np.mean(np.abs(imgf)**2, axis=0) # Compute the Radial Variance and Radial Power Spectrum radial_var = np.zeros(N) radial_pspec = np.zeros(N) for i in range(N): mask = (r >= i) & (r < i + 1) # Mean along radial track defined by mask radial_var[i] = np.mean(variance_map[mask]) radial_pspec[i] = np.mean(pspec[mask]) # Subtract the noise variance radial_pspec -= noise_var radial_var -= noise_var # Lower bound variance and power by 0 np.clip(radial_pspec, 0, a_max=None, out=radial_pspec) np.clip(radial_var, 0, a_max=None, out=radial_var) # Construct range of Fourier limits. We need a half-sample correction # since each ring is centered between two integer radii. Same for spatial # domain (R). c = (np.arange(N) + 0.5) / (2 * N) R = np.arange(N) + 0.5 # Calculate cumulative energy cum_pspec = np.cumsum(radial_pspec * c) cum_var = np.cumsum(radial_var * R) # Normalize energies [0,1] # Multiply threshold to avoid unstable division c_energy_threshold = energy_threshold * cum_pspec[-1] R_energy_threshold = energy_threshold * cum_var[-1] # First note legacy code *=L for Fourier limit, # but then only uses divided by L... so just removed here. # This makes it consistent with Nyquist, ie [0, .5] # Second note, we attempt to find the cutoff, # but when a search fails returns the last (-1) element, # essentially the maximal radius. # Third note, to increase accuracy, we take a weighted average of the two # points around the cutoff. This mostly affects c since R is rounded. ind = np.argmax(cum_pspec > c_energy_threshold) if ind > 0: c_limit = (cum_pspec[ind - 1] * c[ind - 1] + cum_pspec[ind] * c[ind]) / (cum_pspec[ind - 1] + cum_pspec[ind]) else: c_limit = c[-1] ind = np.argmax(cum_var > R_energy_threshold) if ind > 0: R_limit = round( (cum_var[ind - 1] * R[ind - 1] + cum_var[ind] * R[ind]) / (cum_var[ind - 1] + cum_var[ind])) else: R_limit = R[-1] return c_limit, R_limit