def setUp(self):
        L = 32
        n = 64
        pixel_size = 5
        voltage = 200
        defocus_min = 1.5e4
        defocus_max = 2.5e4
        defocus_ct = 7
        Cs = 2.0
        alpha = 0.1
        self.dtype = np.float32

        filters = [
            RadialCTFFilter(pixel_size, voltage, defocus=d, Cs=Cs, alpha=alpha)
            for d in np.linspace(defocus_min, defocus_max, defocus_ct)
        ]

        vols = Volume(
            np.load(os.path.join(DATA_DIR, "clean70SRibosome_vol.npy")).astype(
                self.dtype))
        vols = vols.downsample((L * np.ones(3, dtype=int)))

        sim = Simulation(L=L,
                         n=n,
                         vols=vols,
                         unique_filters=filters,
                         dtype=self.dtype)

        self.orient_est = CLSyncVoting(sim, L // 2, 36)
示例#2
0
    def testSimulationEvalCoords(self):
        mean_est = Volume(np.load(os.path.join(DATA_DIR, "mean_8_8_8.npy")))
        eigs_est = Volume(
            np.load(os.path.join(DATA_DIR, "eigs_est_8_8_8_1.npy"))[..., 0])

        clustered_coords_est = np.load(
            os.path.join(DATA_DIR, "clustered_coords_est.npy"))

        result = self.sim.eval_coords(mean_est, eigs_est, clustered_coords_est)

        self.assertTrue(
            np.allclose(
                result["err"][:10],
                [
                    1.58382394,
                    1.58382394,
                    3.72076112,
                    1.58382394,
                    1.58382394,
                    3.72076112,
                    3.72076112,
                    1.58382394,
                    1.58382394,
                    1.58382394,
                ],
            ))

        self.assertTrue(
            np.allclose(
                result["rel_err"][0, :10],
                [
                    0.11048937,
                    0.11048937,
                    0.21684697,
                    0.11048937,
                    0.11048937,
                    0.21684697,
                    0.21684697,
                    0.11048937,
                    0.11048937,
                    0.11048937,
                ],
            ))

        self.assertTrue(
            np.allclose(
                result["corr"][0, :10],
                [
                    0.99390133,
                    0.99390133,
                    0.97658719,
                    0.99390133,
                    0.99390133,
                    0.97658719,
                    0.97658719,
                    0.99390133,
                    0.99390133,
                    0.99390133,
                ],
            ))
 def test03Vol2img(self):
     results = np.load(
         os.path.join(DATA_DIR, "clean70SRibosome_down8_imgs32.npy"))
     vols = Volume(
         np.load(os.path.join(DATA_DIR, "clean70SRibosome_vol_down8.npy")))
     rots = np.load(os.path.join(DATA_DIR, "rand_rot_matrices32.npy"))
     rots = np.moveaxis(rots, 2, 0)
     imgs_clean = vols.project(0, rots).asnumpy()
     self.assertTrue(np.allclose(results, imgs_clean, atol=1e-7))
示例#4
0
    def testDownsample(self):
        # Data files re-used from test_preprocess
        vols = Volume(
            np.load(os.path.join(DATA_DIR, "clean70SRibosome_vol.npy")))

        resv = Volume(
            np.load(os.path.join(DATA_DIR, "clean70SRibosome_vol_down8.npy")))

        result = vols.downsample((8, 8, 8))
        self.assertTrue(np.allclose(result, resv))
        self.assertTrue(isinstance(result, Volume))
示例#5
0
 def setUp(self):
     self.dtype = np.float32
     self.n = n = 3
     self.res = res = 42
     self.data_1 = np.arange(n * res**3,
                             dtype=self.dtype).reshape(n, res, res, res)
     self.data_2 = 123 * self.data_1.copy()
     self.vols_1 = Volume(self.data_1)
     self.vols_2 = Volume(self.data_2)
     self.random_data = np.random.randn(res, res, res).astype(self.dtype)
     self.vec = self.data_1.reshape(n, res**3)
示例#6
0
    def setUp(self):
        self.dtype = np.float32

        L = 8
        n = 32
        pixel_size = 5.0 * 65 / L
        voltage = 200
        defocus_min = 1.5e4
        defocus_max = 2.5e4
        defocus_ct = 7

        self.noise_var = 1.3957e-4
        noise_filter = ScalarFilter(dim=2, value=self.noise_var)

        unique_filters = [
            RadialCTFFilter(pixel_size, voltage, defocus=d, Cs=2.0, alpha=0.1)
            for d in np.linspace(defocus_min, defocus_max, defocus_ct)
        ]

        vols = Volume(
            np.load(os.path.join(DATA_DIR, "clean70SRibosome_vol.npy")).astype(
                self.dtype
            )
        )  # RCOPT
        vols = vols.downsample((L * np.ones(3, dtype=int))) * 1.0e3
        # Since FFBBasis2D doesn't yet implement dtype, we'll set this to double to match its built in types.
        sim = Simulation(
            n=n,
            L=L,
            vols=vols,
            unique_filters=unique_filters,
            offsets=0.0,
            amplitudes=1.0,
            dtype=self.dtype,
            noise_filter=noise_filter,
        )

        self.basis = FFBBasis2D((L, L), dtype=self.dtype)

        self.h_idx = sim.filter_indices
        self.h_ctf_fb = [filt.fb_mat(self.basis) for filt in unique_filters]

        self.imgs_clean = sim.projections()
        self.imgs_ctf_clean = sim.clean_images()
        self.imgs_ctf_noise = sim.images(start=0, num=n)

        self.cov2d = RotCov2D(self.basis)
        self.coeff_clean = self.basis.evaluate_t(self.imgs_clean)
        self.coeff = self.basis.evaluate_t(self.imgs_ctf_noise)
示例#7
0
    def testClustering(self):
        covar_est = np.load(os.path.join(
            DATA_DIR, "covar_8_8_8_8_8_8.npy")).astype(self.dtype)
        eigs_est, lambdas_est = eigs(covar_est, 16)

        C = 2

        # TODO, alter refs after RCOPT complete
        eigs_est_trunc = np.moveaxis(eigs_est[:, :, :, :C - 1], -1, 0)
        eigs_est_trunc = Volume(eigs_est_trunc)

        lambdas_est_trunc = lambdas_est[:C - 1, :C - 1]

        # Estimate the coordinates in the eigenbasis. Given the images, we find the coordinates in the basis that
        # minimize the mean squared error, given the (estimated) covariances of the volumes and the noise process.
        coords_est = src_wiener_coords(
            self.sim,
            self.mean_est,
            eigs_est_trunc,
            lambdas_est_trunc,
            self.noise_variance,
        )

        # Cluster the coordinates using k-means. Again, we know how many volumes we expect, so we can use this parameter
        # here. Typically, one would take the number of clusters to be one plus the number of eigenvectors extracted.

        # Since kmeans2 relies on randomness for initialization, important to push random seed to context manager here.
        with Random(0):
            centers, vol_idx = kmeans2(coords_est.T, C)

        clustering_accuracy = self.sim.eval_clustering(vol_idx)
        self.assertEqual(clustering_accuracy, 1)
示例#8
0
    def src_backward(self, mean_vol, noise_variance, shrink_method=None):
        """
        Apply adjoint mapping to source

        :return: The sum of the outer products of the mean-subtracted images in `src`, corrected by the expected noise
        contribution and expressed as coefficients of `basis`.
        """
        covar_b = np.zeros((self.L, self.L, self.L, self.L, self.L, self.L),
                           dtype=self.dtype)

        for i in range(0, self.n, self.batch_size):
            im = self.src.images(i, self.batch_size)
            batch_n = im.n_images
            im_centered = im - self.src.vol_forward(mean_vol, i,
                                                    self.batch_size)

            im_centered_b = np.zeros((batch_n, self.L, self.L, self.L),
                                     dtype=self.dtype)
            for j in range(batch_n):
                im_centered_b[j] = self.src.im_backward(
                    Image(im_centered[j]), i + j)
            im_centered_b = Volume(im_centered_b).to_vec()

            covar_b += vecmat_to_volmat(
                im_centered_b.T @ im_centered_b) / self.n

        covar_b_coeff = self.basis.mat_evaluate_t(covar_b)
        return self._shrink(covar_b_coeff, noise_variance, shrink_method)
示例#9
0
    def setUpClass(cls):
        cls.dtype = np.float32
        cls.sim = Simulation(
            n=1024,
            unique_filters=[
                RadialCTFFilter(defocus=d)
                for d in np.linspace(1.5e4, 2.5e4, 7)
            ],
            dtype=cls.dtype,
        )
        basis = FBBasis3D((8, 8, 8), dtype=cls.dtype)
        cls.noise_variance = 0.0030762743633643615

        cls.mean_estimator = MeanEstimator(cls.sim, basis)
        cls.mean_est = Volume(
            np.load(os.path.join(DATA_DIR,
                                 "mean_8_8_8.npy")).astype(cls.dtype))

        # Passing in a mean_kernel argument to the following constructor speeds up some calculations
        cls.covar_estimator = CovarianceEstimator(
            cls.sim,
            basis,
            mean_kernel=cls.mean_estimator.kernel,
            preconditioner="none")
        cls.covar_estimator_with_preconditioner = CovarianceEstimator(
            cls.sim,
            basis,
            mean_kernel=cls.mean_estimator.kernel,
            preconditioner="circulant",
        )
示例#10
0
    def testVecId2(self):
        """Test composition of to_vec(from_vec)."""
        # Construct Volume
        vol = Volume.from_vec(self.vec)

        # # Convert back to vec and compare
        self.assertTrue(np.all(vol.to_vec() == self.vec))
示例#11
0
    def testVecId1(self):
        """Test composition of from_vec(to_vec)."""
        # Construct vec
        vec = self.vols_1.to_vec()

        # Convert back to Volume and compare
        self.assertTrue(np.allclose(Volume.from_vec(vec), self.vols_1))
示例#12
0
    def _gaussian_blob_vols(self, L=8, C=2, K=16, alpha=1):
        """
        Generate Gaussian blob volumes
        :param L: The size of the volumes
        :param C: The number of volumes to generate
        :param K: The number of blobs
        :param alpha: A scale factor of the blob widths

        :return: A Volume instance containing C Gaussian blob volumes.
        """
        def gaussian_blobs(K, alpha):
            Q = np.zeros(shape=(3, 3, K)).astype(self.dtype)
            D = np.zeros(shape=(3, 3, K)).astype(self.dtype)
            mu = np.zeros(shape=(3, K)).astype(self.dtype)

            for k in range(K):
                V = randn(3, 3).astype(self.dtype) / np.sqrt(3)
                Q[:, :, k] = qr(V)[0]
                D[:, :, k] = alpha**2 / 16 * np.diag(np.sum(abs(V)**2, axis=0))
                mu[:, k] = 0.5 * randn(3) / np.sqrt(3)

            return Q, D, mu

        with Random(self.seed):
            vols = np.zeros(shape=(C, L, L, L)).astype(self.dtype)
            for k in range(C):
                Q, D, mu = gaussian_blobs(K, alpha)
                vols[k] = self.eval_gaussian_blobs(L, Q, D, mu)
            return Volume(vols)
示例#13
0
    def vol_coords(self, mean_vol=None, eig_vols=None):
        """
        Coordinates of simulation volumes in a given basis
        :param mean_vol: A mean volume in the form of a Volume Instance (default `mean_true`).
        :param eig_vols: A set of k volumes in a Volume instance (default `eigs`).
        :return:
        """
        if mean_vol is None:
            mean_vol = self.mean_true()
        if eig_vols is None:
            eig_vols = self.eigs()[0]

        assert isinstance(mean_vol, Volume)
        assert isinstance(eig_vols, Volume)

        vols = self.vols - mean_vol  # note, broadcast

        V = vols.to_vec()
        EV = eig_vols.to_vec()

        coords = EV @ V.T

        res = vols - Volume.from_vec(coords.T @ EV)
        res_norms = anorm(res.asnumpy(), (1, 2, 3))
        res_inners = mean_vol.to_vec() @ res.to_vec().T

        return coords.squeeze(), res_norms, res_inners
示例#14
0
    def eigs(self):
        """
        Eigendecomposition of volume covariance matrix of simulation
        :return: A 2-tuple:
            eigs_true: The eigenvectors of the volume covariance matrix in the form of Volume instance.
            lambdas_true: The eigenvalues of the covariance matrix in the form of a (C-1)-by-(C-1) diagonal matrix.
        """
        C = self.C
        vols_c = self.vols - self.mean_true()

        p = np.ones(C) / C
        # RCOPT, we may be able to do better here if we dig in.
        Q, R = qr(vols_c.to_vec().T, mode="economic")

        # Rank is at most C-1, so remove last vector
        Q = Q[:, :-1]
        R = R[:-1, :]

        w, v = eigh(make_symmat(R @ np.diag(p) @ R.T))
        eigs_true = Volume.from_vec((Q @ v).T)

        # Arrange in descending order (flip column order in eigenvector matrix)
        w = w[::-1]
        eigs_true = eigs_true.flip()

        return eigs_true, np.diag(w)
    def setUp(self):
        self.dtype = np.float32

        # Test Volume
        v = Volume(
            np.load(os.path.join(DATA_DIR, "clean70SRibosome_vol.npy")).astype(
                self.dtype)).downsample(32)

        # Create Sim object.
        # Creates 10 projects so there is something to feed FSPCABasis.
        self.src = Simulation(L=v.resolution, n=10, vols=v, dtype=v.dtype)

        # Original projection image to transform.
        self.orig_img = self.src.images(0, 1)

        # Rotate 90 degrees in cartesian coordinates using third party tool.
        self.rt90_img = Image(np.rot90(self.orig_img.asnumpy(), axes=(1, 2)))

        # Prepare a Fourier Bessel Basis
        self.basis = FFBBasis2D((self.orig_img.res, ) * 2, dtype=self.dtype)
        self.v1 = self.basis.evaluate_t(self.orig_img)
        self.v2 = self.basis.evaluate_t(self.rt90_img)
        # These should _not_ be equal or the test is pointless.
        self.assertFalse(np.allclose(self.v1, self.v2))

        # Prepare a FSPCA Basis too.
        self.fspca_basis = FSPCABasis(self.src, self.basis)
    def estimate(self, b_coeff=None, tol=None):
        """Return an estimate as a Volume instance."""
        if b_coeff is None:
            b_coeff = self.src_backward()
        est_coeff = self.conj_grad(b_coeff, tol=tol)
        est = self.basis.evaluate(est_coeff).T

        return Volume(est)
示例#17
0
    def testSimulationEvalMean(self):
        mean_est = Volume(np.load(os.path.join(DATA_DIR, "mean_8_8_8.npy")))
        result = self.sim.eval_mean(mean_est)

        self.assertTrue(
            np.allclose(result["err"], 2.664116055950763, atol=1e-4))
        self.assertTrue(
            np.allclose(result["rel_err"], 0.1765943704851626, atol=1e-4))
        self.assertTrue(
            np.allclose(result["corr"], 0.9849211540734224, atol=1e-4))
示例#18
0
    def setUp(self):
        self.resolution = 16
        self.dtype = np.float64

        # Create some projections
        v = Volume(
            np.load(os.path.join(DATA_DIR, "clean70SRibosome_vol.npy")).astype(
                self.dtype))
        v = v.downsample(self.resolution)

        # Clean
        self.clean_src = Simulation(
            L=self.resolution,
            n=321,
            vols=v,
            dtype=self.dtype,
        )

        # With Noise
        noise_var = 0.01 * np.var(np.sum(v[0], axis=0))
        noise_filter = ScalarFilter(dim=2, value=noise_var)
        self.noisy_src = Simulation(
            L=self.resolution,
            n=123,
            vols=v,
            dtype=self.dtype,
            noise_filter=noise_filter,
        )

        # Set up FFB
        # Setup a Basis
        self.basis = FFBBasis2D((self.resolution, self.resolution),
                                dtype=self.dtype)

        # Create Basis, use precomputed Basis
        self.clean_fspca_basis = FSPCABasis(
            self.clean_src, self.basis, noise_var=0
        )  # Note noise_var assigned zero, skips eigval filtering.

        # Ceate another fspca_basis, use autogeneration FFB2D Basis
        self.noisy_fspca_basis = FSPCABasis(self.noisy_src)
示例#19
0
    def testDownsample(self):
        # generate a 3D map with density decays as Gaussian function
        g3d = grid_3d(self.L, dtype=self.dtype)
        coords = np.array(
            [g3d["x"].flatten(), g3d["y"].flatten(), g3d["z"].flatten()])
        sigma = 0.2
        vol = np.exp(-0.5 * np.sum(np.abs(coords / sigma)**2, axis=0)).astype(
            self.dtype)
        vol = np.reshape(vol, g3d["x"].shape)
        vols = Volume(vol)

        # set noise to zero and CFT filters to unity for simulation object
        noise_var = 0
        noise_filter = ScalarFilter(dim=2, value=noise_var)
        sim = Simulation(
            L=self.L,
            n=self.n,
            vols=vols,
            offsets=0.0,
            amplitudes=1.0,
            unique_filters=[
                ScalarFilter(dim=2, value=1)
                for d in np.linspace(1.5e4, 2.5e4, 7)
            ],
            noise_filter=noise_filter,
            dtype=self.dtype,
        )
        # get images before downsample
        imgs_org = sim.images(start=0, num=self.n)
        # get images after downsample
        max_resolution = 32
        sim.downsample(max_resolution)
        imgs_ds = sim.images(start=0, num=self.n)

        # Check individual grid points
        self.assertTrue(
            np.allclose(
                imgs_org[:, 32, 32],
                imgs_ds[:, 16, 16],
                atol=utest_tolerance(self.dtype),
            ))
        # check resolution
        self.assertTrue(np.allclose(max_resolution, imgs_ds.shape[1]))
        # check energy conservation after downsample
        self.assertTrue(
            np.allclose(
                anorm(imgs_org.asnumpy(), axes=(1, 2)) / self.L,
                anorm(imgs_ds.asnumpy(), axes=(1, 2)) / max_resolution,
                atol=utest_tolerance(self.dtype),
            ))
示例#20
0
    def setUp(self):
        self.resolution = 16
        self.dtype = np.float32

        # Get a volume
        v = Volume(
            np.load(os.path.join(DATA_DIR, "clean70SRibosome_vol.npy")).astype(
                self.dtype))
        v = v.downsample(self.resolution)

        # Create a src from the volume
        self.src = Simulation(
            L=self.resolution,
            n=321,
            vols=v,
            dtype=self.dtype,
        )

        # Calculate some projection images
        self.imgs = self.src.images(0, self.src.n)

        # Configure an FSPCA basis
        self.fspca_basis = FSPCABasis(self.src, noise_var=0)
示例#21
0
    def compute_kernel(self):
        # TODO: Most of this stuff is duplicated in MeanEstimator - move up the hierarchy?
        n = self.n
        L = self.L
        _2L = 2 * self.L

        kernel = np.zeros((_2L, _2L, _2L, _2L, _2L, _2L), dtype=self.dtype)
        sq_filters_f = self.src.eval_filter_grid(self.L, power=2)

        for i in tqdm(range(0, n, self.batch_size)):
            _range = np.arange(i, min(n, i + self.batch_size))
            pts_rot = rotated_grids(L, self.src.rots[_range, :, :])
            weights = sq_filters_f[:, :, _range]
            weights *= self.src.amplitudes[_range]**2

            if L % 2 == 0:
                weights[0, :, :] = 0
                weights[:, 0, :] = 0

            # TODO: This is where this differs from MeanEstimator
            pts_rot = np.moveaxis(pts_rot, -1, 0).reshape(-1, 3, L**2)
            weights = weights.T.reshape((-1, L**2))

            batch_n = weights.shape[0]
            factors = np.zeros((batch_n, _2L, _2L, _2L), dtype=self.dtype)

            for j in range(batch_n):
                factors[j] = anufft(weights[j],
                                    pts_rot[j], (_2L, _2L, _2L),
                                    real=True)

            factors = Volume(factors).to_vec()
            kernel += vecmat_to_volmat(factors.T @ factors) / (n * L**8)

        # Ensure symmetric kernel
        kernel[0, :, :, :, :, :] = 0
        kernel[:, 0, :, :, :, :] = 0
        kernel[:, :, 0, :, :, :] = 0
        kernel[:, :, :, 0, :, :] = 0
        kernel[:, :, :, :, 0, :] = 0
        kernel[:, :, :, :, :, 0] = 0

        logger.info("Computing non-centered Fourier Transform")
        kernel = mdim_ifftshift(kernel, range(0, 6))
        kernel_f = fftn(kernel)
        # Kernel is always symmetric in spatial domain and therefore real in Fourier
        kernel_f = np.real(kernel_f)

        return FourierKernel(kernel_f, centered=False)
示例#22
0
    def setUp(self):

        self.vols = Volume(
            np.load(os.path.join(DATA_DIR,
                                 "clean70SRibosome_vol.npy"))).downsample(17)

        self.resolution = self.vols.resolution
        self.n_img = 3
        self.dtype = np.float64

        # Create a Basis to use in alignment.
        self.basis = FFBBasis2D((self.resolution, self.resolution),
                                dtype=self.dtype)

        # This sets up a trivial class, where there is one group having all images.
        self.classes = np.arange(self.n_img, dtype=int).reshape(1, self.n_img)
        self.reflections = np.zeros(self.classes.shape, dtype=bool)
    def testRotate(self):
        # Now low res (8x8) had problems;
        #  better with odd (7x7), but still not good.
        # We'll use a higher res test image.
        # fh = np.load(os.path.join(DATA_DIR, 'ffbbasis2d_xcoeff_in_8_8.npy'))[:7,:7]
        # Use a real data volume to generate a clean test image.
        v = Volume(
            np.load(os.path.join(DATA_DIR, "clean70SRibosome_vol.npy")).astype(
                np.float64))
        src = Simulation(L=v.resolution, n=1, vols=v, dtype=v.dtype)
        # Extract, this is the original image to transform.
        x1 = src.images(0, 1)

        # Rotate 90 degrees in cartesian coordinates.
        x2 = Image(np.rot90(x1.asnumpy(), axes=(1, 2)))

        # Express in an FB basis
        basis = FFBBasis2D((x1.res, ) * 2, dtype=x1.dtype)
        v1 = basis.evaluate_t(x1)
        v2 = basis.evaluate_t(x2)
        v3 = basis.evaluate_t(x1)
        v4 = basis.evaluate_t(x1)

        # Reflect in the FB basis space
        v4 = basis.rotate(v1, 0, refl=[True])

        # Rotate in the FB basis space
        v3 = basis.rotate(v1, 2 * np.pi)
        v1 = basis.rotate(v1, -np.pi / 2)

        # Evaluate back into cartesian
        y1 = basis.evaluate(v1)
        y2 = basis.evaluate(v2)
        y3 = basis.evaluate(v3)
        y4 = basis.evaluate(v4)

        # Rotate 90
        self.assertTrue(np.allclose(y1[0], y2[0], atol=1e-4))

        # 2*pi Identity
        self.assertTrue(
            np.allclose(x1[0], y3[0], atol=utest_tolerance(self.dtype)))

        # Refl (flipped using flipud)
        self.assertTrue(np.allclose(np.flipud(x1[0]), y4[0], atol=1e-4))
示例#24
0
    def testEigsEvaluation(self):
        covar_est = np.load(os.path.join(DATA_DIR, "covar_8_8_8_8_8_8.npy"))
        eigs_est, lambdas_est = eigs(covar_est, 16)

        # Number of distinct volumes
        C = 2

        # Eigenvalues and their corresponding eigenvectors are returned in descending order
        # We take the highest C-1 entries, since C-1 is the rank of the population covariance matrix.
        eigs_est_trunc = Volume(np.moveaxis(eigs_est[:, :, :, :C - 1], -1, 0))
        lambdas_est_trunc = lambdas_est[:C - 1, :C - 1]

        metrics = self.sim.eval_eigs(eigs_est_trunc, lambdas_est_trunc)
        self.assertAlmostEqual(13.09420492368651, metrics["err"], places=4)
        self.assertAlmostEqual(0.58567250265489856,
                               metrics["rel_err"],
                               places=4)
        self.assertAlmostEqual(0.85473300555263432, metrics["corr"], places=4)
    def testShift(self):
        """
        Compare shifting using Image with shifting provided by the Basis.

        Note the Basis shift method converts from FB to Image space and back.
        """

        n_img = 3
        test_shift = np.array([10, 2])

        # Construct some synthetic data
        v = Volume(
            np.load(os.path.join(DATA_DIR, "clean70SRibosome_vol.npy")).astype(
                np.float64)).downsample(self.L)

        src = Simulation(L=self.L, n=n_img, vols=v, dtype=np.float64)

        # Shift images using the Image method directly
        shifted_imgs = src.images(0, n_img).shift(test_shift)

        # Convert original images to basis coefficients
        f_imgs = self.basis.evaluate_t(src.images(0, n_img))

        # Use the basis shift method
        f_shifted_imgs = self.basis.shift(f_imgs, test_shift)

        # Compute diff between the shifted image sets
        diff = shifted_imgs.asnumpy() - self.basis.evaluate(
            f_shifted_imgs).asnumpy()

        # Compute mask to compare only the core of the shifted images
        g = grid_2d(self.L, normalized=False)
        mask = g["r"] > self.L / 2
        # Masking values outside radius to 0
        diff = np.where(mask, 0, diff)

        # Compute and check error
        rmse = np.sqrt(np.mean(np.square(diff), axis=(1, 2)))
        logger.info(f"RMSE shifted image diffs {rmse}")
        self.assertTrue(np.allclose(rmse, 0, atol=1e-5))
# Create CTF filters
filters = [
    RadialCTFFilter(pixel_size, voltage, defocus=d, Cs=2.0, alpha=0.1)
    for d in np.linspace(defocus_min, defocus_max, defocus_ct)
]

# %%
# Downsampling
# ------------

# Load the map file of a 70S Ribosome and downsample the 3D map to desired resolution.
# The downsampling should be done by the internal function of Volume object in future.
logger.info(f"Load 3D map and downsample 3D map to desired grids "
            f"of {img_size} x {img_size} x {img_size}.")
infile = mrcfile.open(os.path.join(DATA_DIR, "clean70SRibosome_vol_65p.mrc"))
vols = Volume(infile.data.astype(dtype))
vols = vols.downsample((img_size, ) * 3)

# %%
# Create Simulation Object and Obtain True Rotation Angles
# --------------------------------------------------------

# Create a simulation object with specified filters and the downsampled 3D map
logger.info("Use downsampled map to creat simulation object.")
sim = Simulation(L=img_size,
                 n=num_imgs,
                 vols=vols,
                 unique_filters=filters,
                 dtype=dtype)

logger.info(
示例#27
0
 def mean_true(self):
     return Volume(np.mean(self.vols, 0))
defocus_max = 2.5e4  # Maximum defocus value (in angstroms)
defocus_ct = 7  # Number of defocus groups
Cs = 2.0  # Spherical aberration
alpha = 0.1  # Amplitude contrast

logger.info("Initialize simulation object and CTF filters.")
# Create CTF filters
ctf_filters = [
    RadialCTFFilter(pixel_size, voltage, defocus=d, Cs=2.0, alpha=0.1)
    for d in np.linspace(defocus_min, defocus_max, defocus_ct)
]

# Load the map file of a 70S ribosome and downsample the 3D map to desired resolution.
infile = mrcfile.open(os.path.join(DATA_DIR, "clean70SRibosome_vol_65p.mrc"))
logger.info(f"Load 3D map from mrc file, {infile}")
vols = Volume(infile.data)

# Downsample the volume to a desired resolution and increase density
# by 1.0e5 time for a better graph view
logger.info(
    f"Downsample map to a resolution of {img_size} x {img_size} x {img_size}")
vols = vols.downsample((img_size, ) * 3) * 1.0e5

# Create a simulation object with specified filters and the downsampled 3D map
logger.info("Use downsampled map to create simulation object.")
source = Simulation(
    L=img_size,
    n=num_imgs,
    vols=vols,
    unique_filters=ctf_filters,
    noise_filter=noise_filter,
示例#29
0
logger.info("Initialize simulation object and CTF filters.")
# Create filters
ctf_filters = [
    RadialCTFFilter(pixel_size, voltage, defocus=d, Cs=2.0, alpha=0.1)
    for d in np.linspace(defocus_min, defocus_max, defocus_ct)
]

# Load the map file of a 70S Ribosome
logger.info(
    f"Load 3D map and downsample 3D map to desired grids "
    f"of {img_size} x {img_size} x {img_size}."
)
infile = mrcfile.open(os.path.join(DATA_DIR, "clean70SRibosome_vol_65p.mrc"))
# We prefer that our various arrays have consistent dtype.
vols = Volume(infile.data.astype(dtype) / np.max(infile.data))
vols = vols.downsample(img_size)

# Create a simulation object with specified filters and the downsampled 3D map
logger.info("Use downsampled map to creat simulation object.")
sim = Simulation(
    L=img_size,
    n=num_imgs,
    vols=vols,
    unique_filters=ctf_filters,
    offsets=0.0,
    amplitudes=1.0,
    dtype=dtype,
    noise_filter=noise_filter,
)
# Passing in a mean_kernel argument to the following constructor speeds up some calculations
covar_estimator = CovarianceEstimator(sim, basis, mean_kernel=mean_estimator.kernel)
covar_est = covar_estimator.estimate(mean_est, noise_variance)

# %%
# Use Top Eigenpairs to Form a Basis
# ----------------------------------

# Extract the top eigenvectors and eigenvalues of the covariance estimate.
# Since we know the population covariance is low-rank, we are only interested
# in the top eigenvectors.

eigs_est, lambdas_est = eigs(covar_est, num_eigs)

# Eigs returns column-major, so we transpose and construct a volume.
eigs_est = Volume(np.transpose(eigs_est, (3, 0, 1, 2)))

# Truncate the eigendecomposition. Since we know the true rank of the
# covariance matrix, we enforce it here.

eigs_est_trunc = Volume(eigs_est[: num_vols - 1])  # hrmm not very convenient
lambdas_est_trunc = lambdas_est[: num_vols - 1, : num_vols - 1]

# Estimate the coordinates in the eigenbasis. Given the images, we find the
# coordinates in the basis that minimize the mean squared error, given the
# (estimated) covariances of the volumes and the noise process.
coords_est = src_wiener_coords(
    sim, mean_est, eigs_est_trunc, lambdas_est_trunc, noise_variance
)

# Cluster the coordinates using k-means. Again, we know how many volumes