Ejemplo n.º 1
0
    def run_svm(self, score):
        """
        Trains and uses an SVM classifier.

        Trains an SVM classifier to distinguish between noise and particle projections based on
        mean intensity and variance. Every possible window in the micrograph is then classified
        as either noise or particle, resulting in a segmentation of the micrograph.

        Args:

            score: Matrix containing a score for each query image.

        Returns:
            Segmentation of the micrograph into noise and particle projections.
        """

        micro_img = xp.asarray(self.im)
        particle_windows = np.floor(self.tau1)
        non_noise_windows = np.ceil(self.tau2)
        bw_mask_p, bw_mask_n = Picker.get_maps(self, score, micro_img,
                                               particle_windows,
                                               non_noise_windows)

        x, y = PickerHelper.get_training_set(micro_img, bw_mask_p, bw_mask_n,
                                             self.query_size)
        x = xp.asnumpy(x)
        y = xp.asnumpy(y)

        scaler = preprocessing.StandardScaler()
        scaler.fit(x)
        x = scaler.transform(x)
        classifier = self.model
        classifier.fit(x, y)

        mean_all, std_all = PickerHelper.moments(micro_img, self.query_size)
        mean_all = xp.asnumpy(mean_all)
        std_all = xp.asnumpy(std_all)

        mean_all = mean_all[self.query_size - 1:-(self.query_size - 1),
                            self.query_size - 1:-(self.query_size - 1), ]

        std_all = std_all[self.query_size - 1:-(self.query_size - 1),
                          self.query_size - 1:-(self.query_size - 1), ]

        mean_all = np.reshape(mean_all, (np.prod(mean_all.shape), 1), "F")
        std_all = np.reshape(std_all, (np.prod(std_all.shape), 1), "F")
        cls_input = np.concatenate((mean_all, std_all), axis=1)
        cls_input = scaler.transform(cls_input)

        # compute classification for all possible windows in micrograph
        segmentation = classifier.predict(cls_input)

        _segmentation_shape = int(np.sqrt(segmentation.shape[0]))
        segmentation = np.reshape(segmentation,
                                  (_segmentation_shape, _segmentation_shape),
                                  "F")

        return segmentation.copy()
Ejemplo n.º 2
0
    def backproject(self, rot_matrices):
        """
        Backproject images along rotation
        :param im: An Image (stack) to backproject.
        :param rot_matrices: An n-by-3-by-3 array of rotation matrices \
        corresponding to viewing directions.

        :return: Volume instance corresonding to the backprojected images.
        """

        L = self.res

        ensure(
            self.n_images == rot_matrices.shape[0],
            "Number of rotation matrices must match the number of images",
        )

        # TODO: rotated_grids might as well give us correctly shaped array in the first place
        pts_rot = aspire.volume.rotated_grids(L, rot_matrices)
        pts_rot = np.moveaxis(pts_rot, 1, 2)
        pts_rot = m_reshape(pts_rot, (3, -1))

        im_f = xp.asnumpy(fft.centered_fft2(xp.asarray(self.data))) / (L**2)
        if L % 2 == 0:
            im_f[:, 0, :] = 0
            im_f[:, :, 0] = 0

        im_f = im_f.flatten()

        vol = anufft(im_f, pts_rot, (L, L, L), real=True) / L

        return aspire.volume.Volume(vol)
Ejemplo n.º 3
0
    def downsample(self, ds_res):
        """
        Downsample Image to a specific resolution. This method returns a new Image.

        :param ds_res: int - new resolution, should be <= the current resolution
            of this Image
        :return: The downsampled Image object.
        """
        grid = grid_2d(self.res)
        grid_ds = grid_2d(ds_res)

        im_ds = np.zeros((self.n_images, ds_res, ds_res), dtype=self.dtype)

        # x, y values corresponding to 'grid'. This is what scipy interpolator needs to function.
        res_by_2 = self.res / 2
        x = y = np.ceil(np.arange(-res_by_2, res_by_2)) / res_by_2

        mask = (np.abs(grid["x"]) < ds_res / self.res) & (np.abs(grid["y"]) <
                                                          ds_res / self.res)
        im_shifted = fft.centered_ifft2(
            fft.centered_fft2(xp.asarray(self.data)) * xp.asarray(mask))
        im = np.real(xp.asnumpy(im_shifted))

        for s in range(im_ds.shape[0]):
            interpolator = RegularGridInterpolator((x, y),
                                                   im[s],
                                                   bounds_error=False,
                                                   fill_value=0)
            im_ds[s] = interpolator(np.dstack([grid_ds["x"], grid_ds["y"]]))

        return Image(im_ds)
Ejemplo n.º 4
0
    def estimate_noise_psd(self):
        """
        :return: The estimated noise variance of the images in the Source used to create this estimator.
        TODO: How's this initial estimate of variance different from the 'estimate' method?
        """
        # Run estimate using saved parameters
        g2d = grid_2d(self.L)
        mask = g2d["r"] >= self.bgRadius

        mean_est = 0
        noise_psd_est = np.zeros((self.L, self.L)).astype(self.src.dtype)
        for i in range(0, self.n, self.batchSize):
            images = self.src.images(i, self.batchSize).asnumpy()
            images_masked = images * mask

            _denominator = self.n * np.sum(mask)
            mean_est += np.sum(images_masked) / _denominator
            im_masked_f = xp.asnumpy(
                fft.centered_fft2(xp.asarray(images_masked)))
            noise_psd_est += np.sum(np.abs(im_masked_f**2),
                                    axis=0) / _denominator

        mid = self.L // 2
        noise_psd_est[mid, mid] -= mean_est**2

        return noise_psd_est
Ejemplo n.º 5
0
def _im_translate2(im, shifts):
    """
    Translate image by shifts
    :param im: An Image instance to be translated.
    :param shifts: An array of size n-by-2 specifying the shifts in pixels.
        Alternatively, it can be a row vector of length 2, in which case the same shifts is applied to each image.
    :return: An Image instance translated by the shifts.

    TODO: This implementation has been moved here from aspire.aspire.abinitio and is faster than _im_translate.
    """

    if not isinstance(im, Image):
        logger.warning(
            "_im_translate2 expects an Image, attempting to convert array."
            "Expects array of size n-by-L-by-L.")
        im = Image(im)

    if shifts.ndim == 1:
        shifts = shifts[np.newaxis, :]

    n_shifts = shifts.shape[0]

    if shifts.shape[1] != 2:
        raise ValueError("Input `shifts` must be of size n-by-2")

    if n_shifts != 1 and n_shifts != im.n_images:
        raise ValueError(
            "The number of shifts must be 1 or match the number of images")

    resolution = im.res
    grid = xp.asnumpy(
        fft.ifftshift(
            xp.asarray(np.ceil(np.arange(-resolution / 2, resolution / 2)))))
    om_y, om_x = np.meshgrid(grid, grid)
    phase_shifts = np.einsum("ij, k -> ijk", om_x, shifts[:, 0]) + np.einsum(
        "ij, k -> ijk", om_y, shifts[:, 1])
    # TODO: figure out how why the result of einsum requires reshape
    phase_shifts = phase_shifts.reshape(n_shifts, resolution, resolution)
    phase_shifts /= resolution

    mult_f = np.exp(-2 * np.pi * 1j * phase_shifts)
    im_f = xp.asnumpy(fft.fft2(xp.asarray(im.asnumpy())))
    im_translated_f = im_f * mult_f
    im_translated = np.real(xp.asnumpy(fft.ifft2(xp.asarray(im_translated_f))))

    return Image(im_translated)
Ejemplo n.º 6
0
    def _im_translate(self, shifts):
        """
        Translate image by shifts
        :param im: An array of size n-by-L-by-L containing images to be translated.
        :param shifts: An array of size n-by-2 specifying the shifts in pixels.
            Alternatively, it can be a row vector of length 2, in which case the same shifts is applied to each image.
        :return: The images translated by the shifts, with periodic boundaries.

        TODO: This implementation is slower than _im_translate2
        """
        im = self.data

        if shifts.ndim == 1:
            shifts = shifts[np.newaxis, :]
        n_shifts = shifts.shape[0]

        ensure(shifts.shape[-1] == 2, "shifts must be nx2")

        ensure(
            n_shifts == 1 or n_shifts == self.n_images,
            "number of shifts must be 1 or match the number of images",
        )
        # Cast shifts to this instance's internal dtype
        shifts = shifts.astype(self.dtype)

        L = self.res
        im_f = xp.asnumpy(fft.fft2(xp.asarray(im)))
        grid_shifted = fft.ifftshift(
            xp.asarray(np.ceil(np.arange(-L / 2, L / 2, dtype=self.dtype))))
        grid_1d = xp.asnumpy(grid_shifted) * 2 * np.pi / L
        om_x, om_y = np.meshgrid(grid_1d, grid_1d, indexing="ij")

        phase_shifts_x = -shifts[:, 0].reshape((n_shifts, 1, 1))
        phase_shifts_y = -shifts[:, 1].reshape((n_shifts, 1, 1))

        phase_shifts = (om_x[np.newaxis, :, :] * phase_shifts_x +
                        om_y[np.newaxis, :, :] * phase_shifts_y)
        mult_f = np.exp(-1j * phase_shifts)
        im_translated_f = im_f * mult_f
        im_translated = xp.asnumpy(fft.ifft2(xp.asarray(im_translated_f)))
        im_translated = np.real(im_translated)

        return Image(im_translated)
Ejemplo n.º 7
0
    def filter(self, filter):
        """
        Apply a `Filter` object to the Image and returns a new Image.

        :param filter: An object of type `Filter`.
        :return: A new filtered `Image` object.
        """
        filter_values = filter.evaluate_grid(self.res)

        im_f = xp.asnumpy(fft.centered_fft2(xp.asarray(self.data)))

        if im_f.ndim > filter_values.ndim:
            im_f *= filter_values
        else:
            im_f = filter_values * im_f
        im = xp.asnumpy(fft.centered_ifft2(xp.asarray(im_f)))
        im = np.real(im)

        return Image(im)
Ejemplo n.º 8
0
    def gaussian_filter(cls, size_filter, std):
        """Computes low-pass filter.

        Args:
            size_filter: Size of filter (size_filter x size_filter).
            std: sigma value in filter.
        """

        y, x = xp.mgrid[-(size_filter - 1) // 2:(size_filter - 1) // 2 + 1,
                        -(size_filter - 1) // 2:(size_filter - 1) // 2 + 1, ]

        response = xp.exp(-xp.square(x) - xp.square(y) /
                          (2 * (std**2))) / (xp.sqrt(2 * xp.pi) * std)
        response[response < xp.finfo("float").eps] = 0

        return xp.asnumpy(response / response.sum())  # Normalize so sum is 1
Ejemplo n.º 9
0
    def project(self, vol_idx, rot_matrices):
        """
        Using the stack of rot_matrices,
        project images of Volume[vol_idx].

        :param vol_idx: Volume index
        :param rot_matrices: Stack of rotations. Rotation or ndarray instance.
        :return: `Image` instance.
        """

        # If we are an ASPIRE Rotation, get the numpy representation.
        if isinstance(rot_matrices, Rotation):
            rot_matrices = rot_matrices.matrices

        if rot_matrices.dtype != self.dtype:
            logger.warning(
                f"{self.__class__.__name__}"
                f" rot_matrices.dtype {rot_matrices.dtype}"
                f" != self.dtype {self.dtype}."
                " In the future this will raise an error."
            )

        data = self[vol_idx].T  # RCOPT

        n = rot_matrices.shape[0]

        pts_rot = np.moveaxis(rotated_grids(self.resolution, rot_matrices), 1, 2)

        # TODO: rotated_grids might as well give us correctly shaped array in the first place
        pts_rot = m_reshape(pts_rot, (3, self.resolution ** 2 * n))

        im_f = nufft(data, pts_rot) / self.resolution

        im_f = im_f.reshape(-1, self.resolution, self.resolution)

        if self.resolution % 2 == 0:
            im_f[:, 0, :] = 0
            im_f[:, :, 0] = 0

        im_f = xp.asnumpy(fft.centered_ifft2(xp.asarray(im_f)))

        return aspire.image.Image(np.real(im_f))
Ejemplo n.º 10
0
    def _pswf_integration(self, images_nufft):
        """
        Perform integration part for rotational invariant property.
        """
        num_images = images_nufft.shape[1]
        n_max_float = float(self.n_max) / 2
        r_n_eval_mat = np.zeros(
            (len(self.radial_quad_pts), self.n_max, num_images),
            dtype=complex_type(self.dtype),
        )

        for i in range(len(self.radial_quad_pts)):
            curr_r_mat = images_nufft[
                self.r_quad_indices[i] : self.r_quad_indices[i]
                + self.num_angular_pts[i],
                :,
            ]
            curr_r_mat = np.concatenate((curr_r_mat, np.conj(curr_r_mat)))
            fft_plan = xp.asnumpy(fft.fft(xp.asarray(curr_r_mat), axis=0))
            angular_eval = fft_plan * self.quad_rule_radial_wts[i]

            r_n_eval_mat[i, :, :] = np.tile(
                angular_eval,
                (int(max(1, np.ceil(n_max_float / self.num_angular_pts[i]))), 1),
            )[: self.n_max, :]

        r_n_eval_mat = r_n_eval_mat.reshape(
            (len(self.radial_quad_pts) * self.n_max, num_images), order="F"
        )
        coeff_vec_quad = np.zeros(
            (len(self.ang_freqs), num_images), dtype=complex_type(self.dtype)
        )
        m = len(self.pswf_radial_quad)
        for i in range(self.n_max):
            coeff_vec_quad[
                self.indices_for_n[i] + np.arange(self.numel_for_n[i]), :
            ] = np.dot(self.blk_r[i], r_n_eval_mat[i * m : (i + 1) * m, :])

        return coeff_vec_quad
Ejemplo n.º 11
0
    def evaluate_t(self, x):
        """
        Evaluate coefficient in FB basis from those in standard 2D coordinate basis

        :param x: The Image instance representing coefficient array in the
        standard 2D coordinate basis to be evaluated.
        :return v: The evaluation of the coefficient array `v` in the FB basis.
            This is an array of vectors whose last dimension equals `self.count`
            and whose first dimension correspond to `x.n_images`.
        """

        if x.dtype != self.dtype:
            logger.warning(
                f"{self.__class__.__name__}::evaluate_t"
                f" Inconsistent dtypes v: {x.dtype} self: {self.dtype}")

        if not isinstance(x, Image):
            logger.warning(f"{self.__class__.__name__}::evaluate_t"
                           " passed numpy array instead of Image.")
            x = Image(x)

        # get information on polar grids from precomputed data
        n_theta = np.size(self._precomp["freqs"], 2)
        n_r = np.size(self._precomp["freqs"], 1)
        freqs = np.reshape(self._precomp["freqs"], (2, n_r * n_theta))

        # number of 2D image samples
        n_images = x.n_images
        x_data = x.data

        # resamping x in a polar Fourier gird using nonuniform discrete Fourier transform
        pf = nufft(x_data, 2 * pi * freqs)
        pf = np.reshape(pf, (n_images, n_r, n_theta))

        # Recover "negative" frequencies from "positive" half plane.
        pf = np.concatenate((pf, pf.conjugate()), axis=2)

        # evaluate radial integral using the Gauss-Legendre quadrature rule
        for i_r in range(0, n_r):
            pf[:, i_r, :] = pf[:, i_r, :] * (self._precomp["gl_weights"][i_r] *
                                             self._precomp["gl_nodes"][i_r])

        #  1D FFT on the angular dimension for each concentric circle
        pf = 2 * pi / (2 * n_theta) * xp.asnumpy(fft.fft(xp.asarray(pf)))

        # This only makes it easier to slice the array later.
        v = np.zeros((n_images, self.count), dtype=x.dtype)

        # go through each basis function and find the corresponding coefficient
        ind = 0
        idx = ind + np.arange(self.k_max[0])
        mask = self._indices["ells"] == 0

        # include the normalization factor of angular part into radial part
        radial_norm = self._precomp["radial"] / np.expand_dims(
            self.angular_norms, 1)
        v[:, mask] = pf[:, :, 0].real @ radial_norm[idx].T
        ind = ind + np.size(idx)

        ind_pos = ind
        for ell in range(1, self.ell_max + 1):
            idx = ind + np.arange(self.k_max[ell])
            idx_pos = ind_pos + np.arange(self.k_max[ell])
            idx_neg = idx_pos + self.k_max[ell]

            v_ell = pf[:, :, ell] @ radial_norm[idx].T

            if np.mod(ell, 2) == 0:
                v_pos = np.real(v_ell)
                v_neg = -np.imag(v_ell)
            else:
                v_pos = np.imag(v_ell)
                v_neg = np.real(v_ell)

            v[:, idx_pos] = v_pos
            v[:, idx_neg] = v_neg

            ind = ind + np.size(idx)

            ind_pos = ind_pos + 2 * self.k_max[ell]

        return v
Ejemplo n.º 12
0
    def evaluate(self, v):
        """
        Evaluate coefficients in standard 2D coordinate basis from those in FB basis

        :param v: A coefficient vector (or an array of coefficient vectors)
            in FB basis to be evaluated. The last dimension must equal `self.count`.
        :return x: The evaluation of the coefficient vector(s) `x` in standard 2D
            coordinate basis. This is Image instance with resolution of `self.sz`
            and the first dimension correspond to remaining dimension of `v`.
        """

        if v.dtype != self.dtype:
            logger.debug(
                f"{self.__class__.__name__}::evaluate"
                f" Inconsistent dtypes v: {v.dtype} self: {self.dtype}")

        sz_roll = v.shape[:-1]
        v = v.reshape(-1, self.count)

        # number of 2D image samples
        n_data = v.shape[0]

        # get information on polar grids from precomputed data
        n_theta = np.size(self._precomp["freqs"], 2)
        n_r = np.size(self._precomp["freqs"], 1)

        # go through  each basis function and find corresponding coefficient
        pf = np.zeros((n_data, 2 * n_theta, n_r),
                      dtype=complex_type(self.dtype))
        mask = self._indices["ells"] == 0

        ind = 0

        idx = ind + np.arange(self.k_max[0], dtype=int)

        # include the normalization factor of angular part into radial part
        radial_norm = self._precomp["radial"] / np.expand_dims(
            self.angular_norms, 1)
        pf[:, 0, :] = v[:, mask] @ radial_norm[idx]
        ind = ind + np.size(idx)

        ind_pos = ind

        for ell in range(1, self.ell_max + 1):
            idx = ind + np.arange(self.k_max[ell], dtype=int)
            idx_pos = ind_pos + np.arange(self.k_max[ell], dtype=int)
            idx_neg = idx_pos + self.k_max[ell]

            v_ell = (v[:, idx_pos] - 1j * v[:, idx_neg]) / 2.0

            if np.mod(ell, 2) == 1:
                v_ell = 1j * v_ell

            pf_ell = v_ell @ radial_norm[idx]
            pf[:, ell, :] = pf_ell

            if np.mod(ell, 2) == 0:
                pf[:, 2 * n_theta - ell, :] = pf_ell.conjugate()
            else:
                pf[:, 2 * n_theta - ell, :] = -pf_ell.conjugate()

            ind = ind + np.size(idx)
            ind_pos = ind_pos + 2 * self.k_max[ell]

        # 1D inverse FFT in the degree of polar angle
        pf = 2 * pi * xp.asnumpy(fft.ifft(xp.asarray(pf), axis=1))

        # Only need "positive" frequencies.
        hsize = int(np.size(pf, 1) / 2)
        pf = pf[:, 0:hsize, :]

        for i_r in range(0, n_r):
            pf[..., i_r] = pf[..., i_r] * (self._precomp["gl_weights"][i_r] *
                                           self._precomp["gl_nodes"][i_r])

        pf = np.reshape(pf, (n_data, n_r * n_theta))

        # perform inverse non-uniformly FFT transform back to 2D coordinate basis
        freqs = m_reshape(self._precomp["freqs"], (2, n_r * n_theta))

        x = 2 * anufft(pf, 2 * pi * freqs, self.sz, real=True)

        # Return X as Image instance with the last two dimensions as *self.sz
        x = x.reshape((*sz_roll, *self.sz))

        return Image(x)
Ejemplo n.º 13
0
def downsample(insamples, szout, mask=None):
    """
    Blur and downsample 1D to 3D objects such as, curves, images or volumes

    The function handles odd and even-sized arrays correctly. The center of
    an odd array is taken to be at (n+1)/2, and an even array is n/2+1.
    :param insamples: Set of objects to be downsampled in the form of an array.\
    the first dimension is the number of objects.
    :param szout: The desired resolution of for output objects.
    :return: An array consists of the blurred and downsampled objects.
    """

    ensure(
        insamples.ndim - 1 == np.size(szout),
        "The number of downsampling dimensions is not the same as that of objects.",
    )

    L_in = insamples.shape[1]
    L_out = szout[0]
    ndata = insamples.shape[0]
    outdims = np.r_[ndata, szout]

    outsamples = np.zeros(outdims, dtype=insamples.dtype)

    if mask is None:
        mask = 1.0

    if insamples.ndim == 2:
        # stack of one dimension objects

        for idata in range(ndata):
            insamples_shifted = fft.fftshift(fft.fft(xp.asarray(insamples[idata])))
            insamples_fft = crop_pad(insamples_shifted, L_out) * mask

            outsamples_shifted = fft.ifft(fft.ifftshift(xp.asarray(insamples_fft)))
            outsamples[idata] = np.real(xp.asnumpy(outsamples_shifted) * (L_out / L_in))

    elif insamples.ndim == 3:
        # stack of two dimension objects
        for idata in range(ndata):
            insamples_shifted = fft.fftshift(fft.fft2(xp.asarray(insamples[idata])))
            insamples_fft = crop_pad(insamples_shifted, L_out) * mask

            outsamples_shifted = fft.ifft2(fft.ifftshift(xp.asarray(insamples_fft)))
            outsamples[idata] = np.real(
                xp.asnumpy(outsamples_shifted) * (L_out ** 2 / L_in ** 2)
            )

    elif insamples.ndim == 4:
        # stack of three dimension objects
        for idata in range(ndata):
            insamples_shifted = fft.fftshift(
                fft.fftn(xp.asarray(insamples[idata]), axes=(0, 1, 2))
            )
            insamples_fft = crop_pad(insamples_shifted, L_out) * mask

            outsamples_shifted = fft.ifftn(
                fft.ifftshift(xp.asarray(insamples_fft)), axes=(0, 1, 2)
            )
            outsamples[idata] = np.real(
                xp.asnumpy(outsamples_shifted) * (L_out ** 3 / L_in ** 3)
            )

    else:
        raise RuntimeError("Number of dimensions > 3 for input objects.")

    return outsamples
Ejemplo n.º 14
0
    def query_score(self, show_progress=True):
        """Calculates score for each query image.

        Extracts query images and reference windows. Computes the cross-correlation between these
        windows, and applies a threshold to compute a score for each query image.

        Args:
            show_progress: Whether to show a progress bar

        Returns:
            Matrix containing a score for each query image.
        """

        micro_img = xp.asarray(self.im)
        logger.info("Extracting query images")
        query_box = PickerHelper.extract_query(micro_img, self.query_size // 2)
        logger.info("Extracting query images complete")

        query_box = xp.conj(fft.fft2(query_box, axes=(2, 3)))

        reference_box = PickerHelper.extract_references(
            micro_img, self.query_size, self.container_size)

        reference_size = PickerHelper.reference_size(micro_img,
                                                     self.container_size)
        conv_map = xp.zeros(
            (reference_size, query_box.shape[0], query_box.shape[1]))

        def _work(index):
            reference_box_i = fft.fft2(reference_box[index], axes=(0, 1))
            window_t = xp.multiply(reference_box_i, query_box)
            cc = fft.ifft2(window_t, axes=(2, 3))
            return index, cc.real.max((2, 3)) - cc.real.mean((2, 3))

        n_works = reference_size
        n_threads = config.apple.conv_map_nthreads
        pbar = tqdm(total=reference_size, disable=not show_progress)

        # Ideally we'd like something like 'SerialExecutor' to enable easy debugging
        # but for now do an if-else
        if n_threads > 1:
            with futures.ThreadPoolExecutor(n_threads) as executor:
                to_do = [executor.submit(_work, i) for i in range(n_works)]

                for future in futures.as_completed(to_do):
                    i, res = future.result()
                    conv_map[i, :, :] = res
                    pbar.update(1)
        else:
            for i in range(n_works):
                _, conv_map[i, :, :] = _work(i)
                pbar.update(1)

        pbar.close()

        conv_map = xp.transpose(conv_map, (1, 2, 0))

        min_val = xp.min(conv_map)
        max_val = xp.max(conv_map)
        thresh = (
            min_val +
            (max_val - min_val) / config.apple.response_thresh_norm_factor)
        return xp.asnumpy(xp.sum(conv_map >= thresh, axis=2))