def _get_covar(self, coeffs, mean_coeff=None, do_refl=True): """ Calculate the covariance matrix from the expansion coefficients without CTF information. :param coeffs: A coefficient vector (or an array of coefficient vectors) calculated from 2D images. :param mean_coeff: The mean vector calculated from the `coeffs`. :param do_refl: If true, enforce invariance to reflection (default false). :return: The covariance matrix of coefficients for all images. """ if coeffs.size == 0: raise RuntimeError("The coefficients need to be calculated first!") if mean_coeff is None: mean_coeff = self._get_mean(coeffs) # Initialize a totally empty BlkDiagMatrix, build incrementally. covar_coeff = BlkDiagMatrix.empty(0, dtype=coeffs.dtype) ell = 0 mask = self.basis._indices["ells"] == ell coeff_ell = coeffs[..., mask] - mean_coeff[mask] covar_ell = np.array(coeff_ell.T @ coeff_ell / coeffs.shape[0]) covar_coeff.append(covar_ell) for ell in range(1, self.basis.ell_max + 1): mask = self.basis._indices["ells"] == ell mask_pos = [ mask[i] and (self.basis._indices["sgns"][i] == +1) for i in range(len(mask)) ] mask_neg = [ mask[i] and (self.basis._indices["sgns"][i] == -1) for i in range(len(mask)) ] covar_ell_diag = np.array( coeffs[:, mask_pos].T @ coeffs[:, mask_pos] + coeffs[:, mask_neg].T @ coeffs[:, mask_neg]) / ( 2 * coeffs.shape[0]) if do_refl: covar_coeff.append(covar_ell_diag) covar_coeff.append(covar_ell_diag) else: covar_ell_off = np.array( (coeffs[:, mask_pos] @ coeffs[:, mask_neg].T / coeffs.shape[0] - coeffs[:, mask_pos].T @ coeffs[:, mask_neg]) / (2 * coeffs.shape[0])) hsize = covar_ell_diag.shape[0] covar_coeff_blk = np.zeros((2, hsize, 2, hsize)) covar_coeff_blk[0:2, :, 0:2, :] = covar_ell_diag[:hsize, :hsize] covar_coeff_blk[0, :, 1, :] = covar_ell_off[:hsize, :hsize] covar_coeff_blk[1, :, 0, :] = covar_ell_off.T[:hsize, :hsize] covar_coeff.append( covar_coeff_blk.reshape(2 * hsize, 2 * hsize)) return covar_coeff
def filter_to_fb_mat(h_fun, fbasis): """ Convert a nonradial function in k space into a basis representation. :param h_fun: The function form in k space. :param fbasis: The basis object for expanding. :return: a BlkDiagMatrix instance representation using the `fbasis` expansion. """ # These form a circular dependence, import locally until time to clean up. from aspire.basis import FFBBasis2D from aspire.basis.basis_utils import lgwt if not isinstance(fbasis, FFBBasis2D): raise NotImplementedError("Currently only fast FB method is supported") # Set same dimensions as basis object n_k = fbasis.n_r n_theta = fbasis.n_theta radial = fbasis.get_radial() # get 2D grid in polar coordinate k_vals, wts = lgwt(n_k, 0, 0.5, dtype=fbasis.dtype) k, theta = np.meshgrid(k_vals, np.arange(n_theta) * 2 * np.pi / (2 * n_theta), indexing="ij") # Get function values in polar 2D grid and average out angle contribution omegax = k * np.cos(theta) omegay = k * np.sin(theta) omega = 2 * np.pi * np.vstack((omegax.flatten("C"), omegay.flatten("C"))) h_vals2d = h_fun(omega).reshape(n_k, n_theta).astype(fbasis.dtype) h_vals = np.sum(h_vals2d, axis=1) / n_theta # Represent 1D function values in fbasis h_fb = BlkDiagMatrix.empty(2 * fbasis.ell_max + 1, dtype=fbasis.dtype) ind_ell = 0 for ell in range(0, fbasis.ell_max + 1): k_max = fbasis.k_max[ell] rmat = 2 * k_vals.reshape(n_k, 1) * fbasis.r0[0:k_max, ell].T fb_vals = np.zeros_like(rmat) ind_radial = np.sum(fbasis.k_max[0:ell]) fb_vals[:, 0:k_max] = radial[ind_radial:ind_radial + k_max].T h_fb_vals = fb_vals * h_vals.reshape(n_k, 1) h_fb_ell = fb_vals.T @ (h_fb_vals * k_vals.reshape(n_k, 1) * wts.reshape(n_k, 1)) h_fb[ind_ell] = h_fb_ell ind_ell += 1 if ell > 0: h_fb[ind_ell] = h_fb[ind_ell - 1] ind_ell += 1 return h_fb
def build(self): """ Computes the FSPCA basis. This may take some time for large image stacks. """ coef = self.basis.evaluate_t(self.src.images(0, self.src.n)) if self.noise_var is None: from aspire.noise import WhiteNoiseEstimator logger.info("Estimating the noise of images.") self.noise_var = WhiteNoiseEstimator(self.src).estimate() logger.info(f"Setting noise_var={self.noise_var}") cov2d = RotCov2D(self.basis) covar_opt = { "shrinker": "frobenius_norm", "verbose": 0, "max_iter": 250, "iter_callback": [], "store_iterates": False, "rel_tolerance": 1e-12, "precision": "float64", "preconditioner": "identity", } self.mean_coef_est = cov2d.get_mean(coef) self.covar_coef_est = cov2d.get_covar( coef, mean_coeff=self.mean_coef_est, noise_var=self.noise_var, covar_est_opt=covar_opt, ) # Create the arrays to be packed by _compute_spca self.eigvals = np.zeros(self.basis.count, dtype=self.dtype) self.eigvecs = BlkDiagMatrix.empty(2 * self.basis.ell_max + 1, dtype=self.dtype) self.spca_coef = np.zeros((self.src.n, self.basis.count), dtype=self.dtype) self._compute_spca(coef) self._compress(self.components)
def _compute_spca(self, coef): """ Algorithm 2 from paper. It has been adopted to use ASPIRE-Python's cov2d (real) covariance estimation. """ # Compute coefficient vector of mean image at zeroth component self.mean_coef_zero = self.mean_coef_est[self.angular_indices == 0] # Make the Data matrix (A_k) # # Construct A_k, matrix of expansion coefficients a^i_k_q # # for image i, angular index k, radial index q, # # (around eq 31-33) # # Rows radial indices, columns image i. # # # # We can extract this directly (up to transpose) from # # fb coef matrix where ells == angular_index # # then use the transpose so image stack becomes columns. # Initialize a totally empty BlkDiagMatrix, then build incrementally. A = BlkDiagMatrix.empty(0, dtype=coef.dtype) # Zero angular index is special case of indexing. mask = self.basis._indices["ells"] == 0 A_0 = coef[:, mask] - self.mean_coef_zero A.append(A_0) # Remaining angular indices have postive and negative entries in real representation. for ell in range(1, self.basis.ell_max + 1): # `ell` in this code is `k` from paper mask = self.basis._indices["ells"] == ell mask_pos = [ mask[i] and (self.basis._indices["sgns"][i] == +1) for i in range(len(mask)) ] mask_neg = [ mask[i] and (self.basis._indices["sgns"][i] == -1) for i in range(len(mask)) ] A.append(coef[:, mask_pos]) A.append(coef[:, mask_neg]) if len(A) != len(self.covar_coef_est): raise RuntimeError( "Data matrix A should have same number of blocks as Covar matrix.", f" {len(A)} != {len(self.covar_coef_est)}", ) # For each angular frequency (`ells` in FB code, `k` from paper) # we use the properties of Block Diagonal Matrices to work # on the correspong block. eigval_index = 0 for angular_index, C_k in enumerate(self.covar_coef_est): # # Eigen/SVD, covariance block C_k should be symmetric. eigvals_k, eigvecs_k = np.linalg.eigh(C_k) # Determistically enforce eigen vector sign convention eigvecs_k = fix_signs(eigvecs_k) # Sort eigvals_k sorted_indices = np.argsort(-eigvals_k) eigvals_k, eigvecs_k = ( eigvals_k[sorted_indices], eigvecs_k[:, sorted_indices], ) # These are the dense basis indices for this block. basis_inds = np.arange(eigval_index, eigval_index + len(eigvals_k)) # Store the eigvals for this block, note this is a flat array. self.eigvals[basis_inds] = eigvals_k # Store the eigvecs, note this is a BlkDiagMatrix and is assigned incrementally. self.eigvecs[angular_index] = eigvecs_k # To compute new expansion coefficients using spca basis # we combine the basis coefs using the eigen decomposition. # Note image stack slow moving axis, otherwise this is just a # block by block matrix multiply. self.spca_coef[:, basis_inds] = A[angular_index] @ eigvecs_k eigval_index += len(eigvals_k) # Sanity check we have same dimension of eigvals and basis coefs. if eigval_index != self.basis.count: raise RuntimeError( f"eigvals dimension {eigval_index} != basis coef count {self.basis.count}." ) # Store a map of indices sorted by eigenvalue. # We don't resort then now because this would destroy the block diagonal structure. # # sorted_indices[i] is the ith most powerful eigendecomposition index # # We can pass a full or truncated slice of sorted_indices to any array indexed by # the coefs. This is used later for compression and index re-generation. self.sorted_indices = np.argsort(-np.abs(self.eigvals))