Ejemplo n.º 1
0
    def downsample(self, ds_res):
        """
        Downsample Image to a specific resolution. This method returns a new Image.

        :param ds_res: int - new resolution, should be <= the current resolution
            of this Image
        :return: The downsampled Image object.
        """
        grid = grid_2d(self.res)
        grid_ds = grid_2d(ds_res)

        im_ds = np.zeros((self.n_images, ds_res, ds_res), dtype=self.dtype)

        # x, y values corresponding to 'grid'. This is what scipy interpolator needs to function.
        res_by_2 = self.res / 2
        x = y = np.ceil(np.arange(-res_by_2, res_by_2)) / res_by_2

        mask = (np.abs(grid["x"]) < ds_res / self.res) & (np.abs(grid["y"]) <
                                                          ds_res / self.res)
        im_shifted = fft.centered_ifft2(
            fft.centered_fft2(xp.asarray(self.data)) * xp.asarray(mask))
        im = np.real(xp.asnumpy(im_shifted))

        for s in range(im_ds.shape[0]):
            interpolator = RegularGridInterpolator((x, y),
                                                   im[s],
                                                   bounds_error=False,
                                                   fill_value=0)
            im_ds[s] = interpolator(np.dstack([grid_ds["x"], grid_ds["y"]]))

        return Image(im_ds)
Ejemplo n.º 2
0
def im_downsample(im, L_ds):
    """
    Blur and downsample image
    :param im: Set of images to be downsampled in the form of an array L-by-L-by-K, where K is the number of images.
    :param L_ds: The desired resolution of the downsampled images. Must be smaller than L.
    :return: An array of the form L_ds-by-L_ds-by-K consisting of the blurred and downsampled images.
    """
    N = im.shape[0]
    grid = grid_2d(N)
    grid_ds = grid_2d(L_ds)

    im_ds = np.zeros((L_ds, L_ds, im.shape[2])).astype(im.dtype)

    # x, y values corresponding to 'grid'. This is what scipy interpolator needs to function.
    x = y = np.ceil(np.arange(-N/2, N/2)) / (N/2)

    mask = (np.abs(grid['x']) < L_ds/N) & (np.abs(grid['y']) < L_ds/N)
    im = np.real(centered_ifft2(centered_fft2(im) * np.expand_dims(mask, 2)))

    for s in range(im_ds.shape[-1]):
        interpolator = RegularGridInterpolator(
            (x, y),
            im[:, :, s],
            bounds_error=False,
            fill_value=0
        )
        im_ds[:, :, s] = interpolator(np.dstack([grid_ds['x'], grid_ds['y']]))

    return im_ds
Ejemplo n.º 3
0
    def downsample(self, ds_res):
        """
        Downsample Image to a specific resolution. This method returns a new Image.
        :param ds_res: int - new resolution, should be <= the current resolution of this Image
        :return: The downsampled Image object.
        """
        grid = grid_2d(self.res)
        grid_ds = grid_2d(ds_res)

        im_ds = np.zeros((ds_res, ds_res, self.n_images)).astype(self.dtype)

        # x, y values corresponding to 'grid'. This is what scipy interpolator needs to function.
        res_by_2 = self.res / 2
        x = y = np.ceil(np.arange(-res_by_2, res_by_2)) / res_by_2

        mask = (np.abs(grid['x']) < ds_res / self.res) & (np.abs(grid['y']) <
                                                          ds_res / self.res)
        im = np.real(
            centered_ifft2(centered_fft2(self.data) * np.expand_dims(mask, 2)))

        for s in range(im_ds.shape[-1]):
            interpolator = RegularGridInterpolator((x, y),
                                                   im[:, :, s],
                                                   bounds_error=False,
                                                   fill_value=0)
            im_ds[:, :,
                  s] = interpolator(np.dstack([grid_ds['x'], grid_ds['y']]))

        return Image(im_ds)
Ejemplo n.º 4
0
    def pca(self, signal, pixel_size, g_min, g_max):
        """

        :param signal: Estimated power spectrum.
        :param pixel_size: Pixel size in \u212b (Angstrom).
        :param g_min: Inverse of minimun resolution for PSD.
        :param g_max: Inverse of maximum resolution for PSD.
        :return: ratio.
        """

        # RCOPT
        signal = signal.asnumpy()[0].T

        N = signal.shape[0]
        center = N // 2

        grid = grid_2d(N, normalized=True, dtype=self.dtype)
        rb = np.sqrt((grid["x"] / 2)**2 + (grid["y"] / 2)**2).T

        r_ctf = rb * (10 / pixel_size)

        grid = grid_2d(N, normalized=False, dtype=self.dtype)
        X = grid["x"].T
        Y = grid["y"].T

        signal -= np.min(signal)

        rad_sq_min = N * pixel_size / g_min
        rad_sq_max = N * pixel_size / g_max

        min_limit = r_ctf[center, (center + np.floor(rad_sq_min)).astype(int)]
        signal[r_ctf < min_limit] = 0

        max_limit = r_ctf[center, (center + np.ceil(rad_sq_max)).astype(int)]
        signal = np.where(r_ctf > max_limit, 0, signal)

        moment_02 = Y**2 * signal
        moment_02 = np.sum(moment_02, axis=(0, 1))

        moment_11 = Y * X * signal
        moment_11 = np.sum(moment_11, axis=(0, 1))

        moment_20 = X**2 * signal
        moment_20 = np.sum(moment_20, axis=(0, 1))

        moment_mat = np.zeros((2, 2))
        moment_mat[0, 0] = moment_20
        moment_mat[1, 1] = moment_02
        moment_mat[0, 1] = moment_11
        moment_mat[1, 0] = moment_11

        moment_evals = npla.eigvalsh(moment_mat)
        ratio = moment_evals[0] / moment_evals[1]

        return ratio
Ejemplo n.º 5
0
def circ(size, x0=0, y0=0, radius=1, peak=1, dtype=np.float64):
    """
    Returns a 2d `circ` function in a square 2d numpy array.

    where for r = sqrt(x**2 + y**2)

    circ(x,y) = peak : 0 <= r <= radius
                0 : otherwise

    Default is a centered circle of spread=peak=1.

    :param size: The height and width of returned array (pixels)
    :param x0: x cordinate of center (pixels)
    :param y0: y cordinate of center (pixels)
    :param radius: radius of circle
    :param peak: peak height at center
    :param dtype: dtype of returned array
    :return: Numpy array (2D)
    """

    # Construct centered mesh
    g = grid_2d(size, shifted=True, normalized=False, dtype=dtype)

    vals = ((g["x"] - x0)**2 + (g["y"] - y0)**2) < radius * radius
    return (peak * vals).astype(dtype)
Ejemplo n.º 6
0
def gaussian_2d(size,
                x0=0,
                y0=0,
                sigma_x=1,
                sigma_y=1,
                peak=1,
                dtype=np.float64):
    """
    Returns a 2d Gaussian in a square 2d numpy array.

    Default is a centered disc of spread=peak=1.

    :param size: The height and width of returned array (pixels)
    :param x0: x cordinate of center (pixels)
    :param y0: y cordinate of center (pixels)
    :param sigma_x: spread in x direction
    :param sigma_y: spread in y direction
    :param peak: peak height at center
    :param dtype: dtype of returned array
    :return: Numpy array (2D)
    """

    # Construct centered mesh
    g = grid_2d(size, shifted=True, normalized=False, dtype=dtype)

    p = (g["x"] - x0)**2 / (2 * sigma_x**2) + (g["y"] - y0)**2 / (2 *
                                                                  sigma_y**2)
    return (peak * np.exp(-p)).astype(dtype, copy=False)
Ejemplo n.º 7
0
    def estimate_noise_psd(self):
        """
        :return: The estimated noise variance of the images in the Source used to create this estimator.
        TODO: How's this initial estimate of variance different from the 'estimate' method?
        """
        # Run estimate using saved parameters
        g2d = grid_2d(self.L)
        mask = g2d['r'] >= self.bgRadius

        mean_est = 0
        noise_psd_est = np.zeros((self.L, self.L)).astype(self.src.dtype)
        for i in range(0, self.n, self.batchSize):
            images = self.src.images(i, self.batchSize).asnumpy()
            images_masked = (images * np.expand_dims(mask, 2))

            _denominator = self.n * np.sum(mask)
            mean_est += np.sum(images_masked) / _denominator
            im_masked_f = centered_fft2(images_masked)
            noise_psd_est += np.sum(np.abs(im_masked_f**2),
                                    axis=2) / _denominator

        mid = self.L // 2
        noise_psd_est[mid, mid] -= mean_est**2

        return noise_psd_est
Ejemplo n.º 8
0
    def background_subtract_2d(self, signal, background_p1, max_col):
        """
        Subtract background from estimated power spectrum

        :param signal: Estimated power spectrum
        :param background_p1: 1-D background estimation
        :param max_col: Internal variable, returned as the second parameter from opt1d.
        :return: 2-tuple of NumPy arrays (Estimated PSD without noise and estimated noise).
        """

        signal = signal.asnumpy()

        N = signal.shape[1]
        grid = grid_2d(N, normalized=False, dtype=self.dtype)

        radii = np.sqrt((grid["x"] / 2)**2 + (grid["y"] / 2)**2).T

        background = np.zeros(signal.shape, dtype=self.dtype)
        for r in range(max_col + 2, background_p1.shape[1]):
            background[:,
                       (r < radii) & (radii <= r + 1)] = background_p1[max_col,
                                                                       r]
        mask = radii <= max_col + 2
        background[:, mask] = signal[:, mask]

        signal = signal - background
        signal = np.maximum(0, signal)

        return Image(signal), Image(background)
Ejemplo n.º 9
0
def unique_coords_nd(N, ndim, shifted=False, normalized=True):
    """
    Generate unique polar coordinates from 2D or 3D rectangular coordinates.
    :param N: length size of a square or cube.
    :param ndim: number of dimension, 2 or 3.
    :param shifted: shifted half pixel or not for odd N.
    :param normalized: normalize the grid or not.
    :return: The unique polar coordinates in 2D or 3D
    """
    ensure(ndim in (2, 3),
           'Only two- or three-dimensional basis functions are supported.')
    ensure(N > 0, 'Number of grid points should be greater than 0.')

    if ndim == 2:
        grid = grid_2d(N, shifted=shifted, normalized=normalized)
        mask = grid['r'] <= 1

        # Minor differences in r/theta/phi values are unimportant for the purpose
        # of this function, so round off before proceeding

        # TODO: numpy boolean indexing will return a 1d array (like MATLAB)
        # However, it always searches in row-major order, unlike MATLAB (column-major),
        # with no options to change the search order. The results we'll be getting back are thus not comparable.
        # We transpose the appropriate ndarrays before applying the mask to obtain the same behavior as MATLAB.
        r = grid['r'].T[mask].round(5)
        phi = grid['phi'].T[mask].round(5)

        r_unique, r_idx = np.unique(r, return_inverse=True)
        ang_unique, ang_idx = np.unique(phi, return_inverse=True)

    else:
        grid = grid_3d(N, shifted=shifted, normalized=normalized)
        mask = grid['r'] <= 1

        # In Numpy, elements in the indexed array are always iterated and returned in row-major (C-style) order.
        # To emulate a behavior where iteration happens in Fortran order, we swap axes 0 and 2 of both the array
        # being indexed (r/theta/phi), as well as the mask itself.
        # TODO: This is only for the purpose of getting the same behavior as MATLAB while porting the code, and is
        # likely not needed in the final version.

        # Minor differences in r/theta/phi values are unimportant for the purpose of this function,
        # so we round off before proceeding.

        mask_ = np.swapaxes(mask, 0, 2)
        r = np.swapaxes(grid['r'], 0, 2)[mask_].round(5)
        theta = np.swapaxes(grid['theta'], 0, 2)[mask_].round(5)
        phi = np.swapaxes(grid['phi'], 0, 2)[mask_].round(5)

        r_unique, r_idx = np.unique(r, return_inverse=True)
        ang_unique, ang_idx = np.unique(np.vstack([theta, phi]),
                                        axis=1,
                                        return_inverse=True)

    return {
        'r_unique': r_unique,
        'ang_unique': ang_unique,
        'r_idx': r_idx,
        'ang_idx': ang_idx,
        'mask': mask
    }
Ejemplo n.º 10
0
def rotated_grids(L, rot_matrices):
    """
    Generate rotated Fourier grids in 3D from rotation matrices
    :param L: The resolution of the desired grids.
    :param rot_matrices: An array of size k-by-3-by-3 containing K rotation matrices
    :return: A set of rotated Fourier grids in three dimensions as specified by the rotation matrices.
        Frequencies are in the range [-pi, pi].
    """

    grid2d = grid_2d(L, dtype=rot_matrices.dtype)
    num_pts = L**2
    num_rots = rot_matrices.shape[0]
    pts = np.pi * np.vstack([
        grid2d["x"].flatten(),
        grid2d["y"].flatten(),
        np.zeros(num_pts, dtype=rot_matrices.dtype),
    ])
    pts_rot = np.zeros((3, num_pts, num_rots), dtype=rot_matrices.dtype)
    for i in range(num_rots):
        pts_rot[:, :, i] = rot_matrices[i, :, :] @ pts

    pts_rot = pts_rot.reshape((3, L, L, num_rots))

    # Note we return grids as (Z, Y, X)
    return pts_rot[::-1]
Ejemplo n.º 11
0
    def evaluate_grid(self, L, dtype=np.float32, *args, **kwargs):
        grid2d = grid_2d(L, dtype=dtype)
        omega = np.pi * np.vstack(
            (grid2d["x"].flatten("F"), grid2d["y"].flatten("F")))
        h = self.evaluate(omega, *args, **kwargs)

        h = m_reshape(h, grid2d["x"].shape)

        return h
Ejemplo n.º 12
0
    def evaluate_grid(self, L, *args, **kwargs):
        grid2d = grid_2d(L)
        omega = np.pi * np.vstack(
            (grid2d['x'].flatten('F'), grid2d['y'].flatten('F')))
        h = self.evaluate(omega, *args, **kwargs)

        h = m_reshape(h, grid2d['x'].shape)

        return h
Ejemplo n.º 13
0
    def evaluate_grid(self, L, *args, **kwargs):
        # Todo: remove redundancy wrt a single Filter's evaluate_grid
        grid2d = grid_2d(L)
        omega = np.pi * np.vstack(
            (grid2d['x'].flatten('F'), grid2d['y'].flatten('F')))
        h = self.evaluate(omega, *args, **kwargs)

        h = m_reshape(h, grid2d['x'].shape + (len(self.filters), ))

        return h
Ejemplo n.º 14
0
    def testNormBackground(self):
        bg_radius = 1.0
        grid = grid_2d(self.L)
        mask = grid["r"] > bg_radius
        self.sim.normalize_background()
        imgs_nb = self.sim.images(start=0, num=self.n).asnumpy()
        new_mean = np.mean(imgs_nb[:, mask])
        new_variance = np.var(imgs_nb[:, mask])

        # new mean of noise should be close to zero and variance should be close to 1
        self.assertTrue(new_mean < 1e-7 and abs(new_variance - 1) < 1e-7)
Ejemplo n.º 15
0
    def eval_filter_grid(self, L, power=1):
        grid2d = grid_2d(L, dtype=self.dtype)
        omega = np.pi * np.vstack((grid2d["x"].flatten(), grid2d["y"].flatten()))

        h = np.empty((omega.shape[-1], len(self.filter_indices)), dtype=self.dtype)
        for i, filt in enumerate(self.unique_filters):
            idx_k = np.where(self.filter_indices == i)[0]
            if len(idx_k) > 0:
                filter_values = filt.evaluate(omega)
                if power != 1:
                    filter_values **= power
                h[:, idx_k] = np.column_stack((filter_values,) * len(idx_k))

        h = np.reshape(h, grid2d["x"].shape + (len(self.filter_indices),))

        return h
Ejemplo n.º 16
0
    def invert_contrast(self, batch_size=512):
        """
        invert the global contrast of images

        Check if all images in a stack should be globally phase flipped so that
        the molecule corresponds to brighter pixels and the background corresponds
        to darker pixels. This is done by comparing the mean in a small circle
        around the origin (supposed to correspond to the molecule) with the mean
        of the noise, and making sure that the mean of the molecule is larger.
        From the implementation level, we modify the `ImageSource` in-place by
        appending a `Multiple` filter to the generation pipeline.

        :param batch_size: Batch size of images to query.
        :return: On return, the `ImageSource` object has been modified in place.
        """

        logger.info("Apply contrast inversion on source object")
        L = self.L
        grid = grid_2d(L, shifted=True)
        # Get mask indices of signal and noise samples assuming molecule
        signal_mask = grid["r"] < 0.5
        noise_mask = grid["r"] > 0.8

        # Calculate mean values in batch_size
        signal_mean = 0.0
        noise_mean = 0.0

        for i in range(0, self.n, batch_size):
            images = self.images(i, batch_size).asnumpy()
            signal = images * signal_mask
            noise = images * noise_mask
            signal_mean += np.sum(signal)
            noise_mean += np.sum(noise)
        signal_denominator = self.n * np.sum(signal_mask)
        noise_denominator = self.n * np.sum(noise_mask)
        signal_mean /= signal_denominator
        noise_mean /= noise_denominator

        if signal_mean < noise_mean:
            logger.info("Need to invert contrast")
            scale_factor = -1.0
        else:
            logger.info("No need to invert contrast")
            scale_factor = 1.0

        logger.info("Adding Scaling Xform to end of generation pipeline")
        self.generation_pipeline.add_xform(Multiply(scale_factor))
Ejemplo n.º 17
0
    def eval_filter_grid(self, L, power=1):
        grid2d = grid_2d(L)
        omega = np.pi * np.vstack(
            (grid2d['x'].flatten(), grid2d['y'].flatten()))

        h = np.empty((omega.shape[-1], len(self.filters)))
        for f in set(self.filters):
            idx_k = np.where(self.filters == f)[0]
            if len(idx_k) > 0:
                filter_values = f.evaluate(omega)
                if power != 1:
                    filter_values **= power
                h[:, idx_k] = np.column_stack((filter_values, ) * len(idx_k))

        h = np.reshape(h, grid2d['x'].shape + (len(self.filters), ))

        return h
Ejemplo n.º 18
0
def normalize_bg(imgs, bg_radius=1.0, do_ramp=True):
    """
    Normalize backgrounds and apply to a stack of images

    :param imgs: A stack of images in N-by-L-by-L array
    :param bg_radius: Radius cutoff to be considered as background (in image size)
    :param do_ramp: When it is `True`, fit a ramping background to the data
            and subtract. Namely perform normalization based on values from each image.
            Otherwise, a constant background level from all images is used.
    :return: The modified images
    """
    L = imgs.shape[-1]
    grid = grid_2d(L)
    mask = grid["r"] > bg_radius

    if do_ramp:
        # Create matrices and reshape the background mask
        # for fitting a ramping background
        ramp_mask = np.vstack((
            grid["x"][mask].flatten(),
            grid["y"][mask].flatten(),
            np.ones(grid["y"][mask].flatten().size),
        )).T
        ramp_all = np.vstack(
            (grid["x"].flatten(), grid["y"].flatten(), np.ones(L * L))).T
        mask_reshape = mask.reshape((L * L))
        imgs = imgs.reshape((-1, L * L))

        # Fit a ramping background and apply to images
        coeff = lstsq(ramp_mask, imgs[:, mask_reshape].T)[0]  # RCOPT
        imgs = imgs - (ramp_all @ coeff).T  # RCOPT
        imgs = imgs.reshape((-1, L, L))

    # Apply mask images and calculate mean and std values of background
    imgs_masked = imgs * mask
    denominator = np.sum(mask)
    first_moment = np.sum(imgs_masked, axis=(1, 2)) / denominator
    second_moment = np.sum(imgs_masked**2, axis=(1, 2)) / denominator
    mean = first_moment.reshape(-1, 1, 1)
    variance = second_moment.reshape(-1, 1, 1) - mean**2
    std = np.sqrt(variance)

    return (imgs - mean) / std
Ejemplo n.º 19
0
    def __init__(
        self,
        pixel_size,
        cs,
        amplitude_contrast,
        voltage,
        psd_size,
        num_tapers,
        dtype=np.float32,
    ):
        """
        Instantiate a CtfEstimator instance.

        :param pixel_size: Size of the pixel in \u212b (Angstrom).
        :param cs: Spherical aberration in mm.
        :param amplitude_contrast: Amplitude contrast.
        :param voltage: Voltage of electron microscope.
        :param psd_size: Block size (in pixels) for PSD estimation.
        :param num_tapers: Number of tapers to apply in PSD estimation.
        :returns: CtfEstimator instance.
        """

        self.pixel_size = pixel_size
        self.cs = cs
        self.amplitude_contrast = amplitude_contrast
        self.voltage = voltage
        self.psd_size = psd_size
        self.num_tapers = num_tapers
        self.lmbd = voltage_to_wavelength(voltage) / 10.0  # (Angstrom)
        self.dtype = np.dtype(dtype)

        grid = grid_2d(psd_size, normalized=True, dtype=self.dtype)

        # Note this mesh for x,y is transposed, and range is -half to half.
        rb = np.sqrt((grid["x"] / 2)**2 + (grid["y"] / 2)**2).T

        self.r_ctf = rb * (10 / pixel_size)  # units: inverse nm
        # Note this mesh for theta is transposed.
        self.theta = grid["phi"].T
        self.defocus1 = 0
        self.defocus2 = 0
        self.angle = 0  # Radians
        self.h = 0
Ejemplo n.º 20
0
    def _estimate_noise_variance(self):
        """
        Any additional arguments/keyword-arguments are passed on to the Source's 'images' method
        :return: The estimated noise variance of the images in the Source used to create this estimator.
        TODO: How's this initial estimate of variance different from the 'estimate' method?
        """
        # Run estimate using saved parameters
        g2d = grid_2d(self.L)
        mask = g2d['r'] >= self.bgRadius

        first_moment = 0
        second_moment = 0
        for i in range(0, self.n, self.batchSize):
            images = self.src.images(start=i, num=self.batchSize)
            images_masked = (images * np.expand_dims(mask, 2))

            _denominator = self.n * np.sum(mask)
            first_moment += np.sum(images_masked) / _denominator
            second_moment += np.sum(np.abs(images_masked**2)) / _denominator
        return second_moment - first_moment**2
Ejemplo n.º 21
0
    def evaluate_grid(self, L, dtype=np.float32, *args, **kwargs):
        """
        Generates a two dimensional grid with prescribed dtype,
        yielding the values (omega) which are then evaluated by
        the filter's evaluate method.

        Passes arbritrary args and kwargs down to self.evaluate method.

        :param L: Number of grid points (L by L).
        :param dtype: dtype of grid, defaults np.float32.
        :return: Filter values at omega's points.
        """

        grid2d = grid_2d(L, dtype=dtype)
        omega = np.pi * np.vstack(
            (grid2d["x"].flatten("F"), grid2d["y"].flatten("F")))
        h = self.evaluate(omega, *args, **kwargs)

        h = m_reshape(h, grid2d["x"].shape)

        return h
Ejemplo n.º 22
0
def rotated_grids(L, rot_matrices):
    """
    Generate rotated Fourier grids in 3D from rotation matrices
    :param L: The resolution of the desired grids.
    :param rot_matrices: An array of size 3-by-3-by-K containing K rotation matrices
    :return: A set of rotated Fourier grids in three dimensions as specified by the rotation matrices.
        Frequencies are in the range [-pi, pi].
    """
    # TODO: Flattening and reshaping at end may not be necessary!
    grid2d = grid_2d(L)
    num_pts = L**2
    num_rots = rot_matrices.shape[-1]
    pts = np.pi * np.vstack([
        grid2d['x'].flatten('F'), grid2d['y'].flatten('F'),
        np.zeros(num_pts)
    ])
    pts_rot = np.zeros((3, num_pts, num_rots))
    for i in range(num_rots):
        pts_rot[:, :, i] = rot_matrices[:, :, i] @ pts

    pts_rot = m_reshape(pts_rot, (3, L, L, num_rots))
    return pts_rot
Ejemplo n.º 23
0
def inverse_r(size, x0=0, y0=0, peak=1, dtype=np.float64):
    """
    Returns a 2d inverse radius function in a square 2d numpy array.

    Where inverse_r(x,y): 1/sqrt(1 + x**2 + y**2)

    Default is a centered circle of peak=1.

    :param size: The height and width of returned array (pixels)
    :param x0: x cordinate of center (pixels)
    :param y0: y cordinate of center (pixels)
    :param peak: peak height at center
    :param dtype: dtype of returned array
    :return: Numpy array (2D)
    """

    # Construct centered mesh
    g = grid_2d(size, shifted=True, normalized=False, dtype=dtype)

    # Compute the denominator
    vals = np.sqrt(1 + (g["x"] - x0)**2 + (g["y"] - y0)**2)

    return (peak / vals).astype(dtype)
Ejemplo n.º 24
0
def downsample(insamples, szout):
    """
    Blur and downsample 1D to 3D objects such as, curves, images or volumes
    :param insamples: Set of objects to be downsampled in the form of an array, the last dimension
                    is the number of objects.
    :param szout: The desired resolution of for output objects.
    :return: An array consists of the blurred and downsampled objects.
    """

    ensure(
        insamples.ndim - 1 == szout.ndim,
        'The number of downsampling dimensions is not the same as that of objects.'
    )

    L_in = insamples.shape[0]
    L_out = szout.shape[0]
    ndata = insamples.shape(-1)
    outdims = szout
    outdims.push_back(ndata)
    outsamples = np.zeros((outdims)).astype(insamples.dtype)

    if insamples.ndim == 2:
        # one dimension object
        grid_in = grid_1d(L_in)
        grid_out = grid_1d(L_out)
        # x values corresponding to 'grid'. This is what scipy interpolator needs to function.
        x = np.ceil(np.arange(-L_in / 2, L_in / 2)) / (L_in / 2)
        mask = (np.abs(grid_in['x']) < L_out / L_in)
        insamples_fft = np.real(
            centered_ifft1(centered_fft1(insamples) * np.expand_dims(mask, 1)))
        for idata in range(ndata):
            interpolator = RegularGridInterpolator((x, ),
                                                   insamples_fft[:, idata],
                                                   bounds_error=False,
                                                   fill_value=0)
            outsamples[:, :, idata] = interpolator(np.dstack([grid_out['x']]))

    elif insamples.ndim == 3:
        grid_in = grid_2d(L_in)
        grid_out = grid_2d(L_out)
        # x, y values corresponding to 'grid'. This is what scipy interpolator needs to function.
        x = y = np.ceil(np.arange(-L_in / 2, L_in / 2)) / (L_in / 2)
        mask = (np.abs(grid_in['x']) < L_out / L_in) & (np.abs(grid_in['y']) <
                                                        L_out / L_in)
        insamples_fft = np.real(
            centered_ifft2(centered_fft2(insamples) * np.expand_dims(mask, 2)))
        for idata in range(ndata):
            interpolator = RegularGridInterpolator((x, y),
                                                   insamples_fft[:, :, idata],
                                                   bounds_error=False,
                                                   fill_value=0)
            outsamples[:, :, idata] = interpolator(
                np.dstack([grid_out['x'], grid_out['y']]))

    elif insamples.ndim == 4:
        grid_in = grid_3d(L_in)
        grid_out = grid_3d(L_out)
        # x, y, z values corresponding to 'grid'. This is what scipy interpolator needs to function.
        x = y = z = np.ceil(np.arange(-L_in / 2, L_in / 2)) / (L_in / 2)
        mask = (np.abs(grid_in['x']) < L_out / L_in) & (np.abs(
            grid_in['y']) < L_out / L_in) & (np.abs(grid_in['z']) <
                                             L_out / L_in)
        insamples_fft = np.real(
            centered_ifft3(centered_fft3(insamples) * np.expand_dims(mask, 3)))
        for idata in range(ndata):
            interpolator = RegularGridInterpolator((x, y, z),
                                                   insamples_fft[:, :, :,
                                                                 idata],
                                                   bounds_error=False,
                                                   fill_value=0)
            outsamples[:, :, :, idata] = interpolator(
                np.dstack([grid_out['x'], grid_out['y'], grid_out['z']]))

    return outsamples
Ejemplo n.º 25
0
def vol2img(volume, rots, L=None, dtype=None):
    """
    Generate 2D images from the input volume and rotation angles

    The function handles odd and even-sized arrays correctly. The center of
    an odd array is taken to be at (n+1)/2, and an even array is n/2+1.
    :param volume: A 3D volume objects.
    :param rots: A n-by-3-by-3 array of rotation angles.
    :param L: The output size of 2D images.
    :return: An array consists of 2D images.
    """

    if L is None:
        L = np.size(volume, 0)
    if dtype is None:
        dtype = volume.dtype

    lv = np.size(volume, 0)
    if L > lv + 1:
        # For compatibility with gen_projections, allow one pixel aliasing.
        # More precisely, it should be N>nv, however, by using nv+1 the
        # results match those of gen_projections.
        if np.mod(L - lv, 2) == 1:
            raise RuntimeError(
                'Upsampling from odd to even sizes or vice versa is '
                'currently not supported')
        dL = np.floor((L - lv) / 2)
        fv = centered_fft3(volume)
        padded_volume = np.zeros((L, L, L), dtype=dtype)
        padded_volume[dL + 1:dL + lv + 1, dL + 1:dL + lv + 1,
                      dL + 1:dL + lv + 1] = fv
        volume = centered_ifft3(padded_volume)
        ensure(
            np.norm(np.imag(volume[:])) / np.norm(volume[:]) < 1.0e-5,
            "The image part of volume is related large (>1.0e-5).")
        #  The new volume size
        lv = L

    grid2d = grid_2d(lv, shifted=True, normalized=False)

    num_pts = lv**2
    num_rots = rots.shape[0]
    pts = np.pi * np.vstack([
        grid2d['x'].flatten('F'), grid2d['y'].flatten('F'),
        np.zeros(num_pts)
    ])

    pts_rot = np.zeros((3, num_pts, num_rots))

    for i in range(num_rots):
        pts_rot[:, :, i] = rots[i, :, :].T @ pts

    pts_rot = m_reshape(pts_rot, (3, lv**2 * num_rots))

    pts_rot = -2 * pts_rot / lv

    im_f = Plan(volume.shape, -pts_rot).transform(volume)

    im_f = m_reshape(im_f, (lv, lv, -1))

    if lv % 2 == 0:
        pts_rot = m_reshape(pts_rot, (3, lv, lv, num_rots))
        im_f = im_f * np.exp(1j * np.sum(pts_rot, 0) / 2)
        im_f = im_f * np.expand_dims(
            np.exp(2 * np.pi * 1j * (grid2d['x'] + grid2d['y'] - 1) /
                   (2 * lv)), 2)

    im = centered_ifft2(im_f)
    if lv % 2 == 0:
        im = im * m_reshape(
            np.exp(2 * np.pi * 1j * (grid2d['x'] + grid2d['y']) / (2 * lv)),
            (lv, lv, 1))

    return np.real(im)
Ejemplo n.º 26
0
def adaptive_support(img_src, energy_threshold=0.99):
    """
    Determine size of the compact support in both real and Fourier Space.

    Returns c_limit (support radius in Fourier space),
    and R_limit (support radius in real space).

    Fourier c_limit is scaled in range [0, 0.5].
    R_limit is in pixels [0, Image.res/2].

    :param img_src: Input `Source` of images.
    :param energy_threshold: [0, 1] threshold limit
    :return: (c_limit, R_limit)
    """

    if not isinstance(img_src, ImageSource):
        raise RuntimeError(
            "adaptive_support expects `Source` instance or subclass.")

    # Sanity Check Threshold is in range
    if energy_threshold <= 0 or energy_threshold > 1:
        raise ValueError(
            f"Given energy_threshold {energy_threshold} outside sane range [0,1]"
        )

    L = img_src.L
    N = L // 2

    r = grid_2d(L, shifted=False, normalized=False, dtype=img_src.dtype)["r"]

    # Estimate noise
    noise_est = WhiteNoiseEstimator(img_src)
    noise_var = noise_est.estimate()

    # Transform to Fourier space
    img = img_src.images(0, img_src.n).asnumpy()
    imgf = fft.centered_fft2(img)

    # Compute the Variance and Power Spectrum
    #   Mean along image stack.
    variance_map = np.mean(np.abs(img)**2, axis=0)
    pspec = np.mean(np.abs(imgf)**2, axis=0)

    # Compute the Radial Variance and Radial Power Spectrum
    radial_var = np.zeros(N)
    radial_pspec = np.zeros(N)
    for i in range(N):
        mask = (r >= i) & (r < i + 1)
        # Mean along radial track defined by mask
        radial_var[i] = np.mean(variance_map[mask])
        radial_pspec[i] = np.mean(pspec[mask])

    # Subtract the noise variance
    radial_pspec -= noise_var
    radial_var -= noise_var

    # Lower bound variance and power by 0
    np.clip(radial_pspec, 0, a_max=None, out=radial_pspec)
    np.clip(radial_var, 0, a_max=None, out=radial_var)

    # Construct range of Fourier limits. We need a half-sample correction
    # since each ring is centered between two integer radii. Same for spatial
    # domain (R).
    c = (np.arange(N) + 0.5) / (2 * N)
    R = np.arange(N) + 0.5

    # Calculate cumulative energy
    cum_pspec = np.cumsum(radial_pspec * c)
    cum_var = np.cumsum(radial_var * R)

    # Normalize energies [0,1]
    #  Multiply threshold to avoid unstable division
    c_energy_threshold = energy_threshold * cum_pspec[-1]
    R_energy_threshold = energy_threshold * cum_var[-1]

    # First note legacy code *=L for Fourier limit,
    #   but then only uses divided by L... so just removed here.
    #   This makes it consistent with Nyquist, ie [0, .5]
    # Second note, we attempt to find the cutoff,
    #   but when a search fails returns the last (-1) element,
    #   essentially the maximal radius.
    # Third note, to increase accuracy, we take a weighted average of the two
    #   points around the cutoff. This mostly affects c since R is rounded.

    ind = np.argmax(cum_pspec > c_energy_threshold)
    if ind > 0:
        c_limit = (cum_pspec[ind - 1] * c[ind - 1] + cum_pspec[ind] *
                   c[ind]) / (cum_pspec[ind - 1] + cum_pspec[ind])
    else:
        c_limit = c[-1]

    ind = np.argmax(cum_var > R_energy_threshold)
    if ind > 0:
        R_limit = round(
            (cum_var[ind - 1] * R[ind - 1] + cum_var[ind] * R[ind]) /
            (cum_var[ind - 1] + cum_var[ind]))
    else:
        R_limit = R[-1]

    return c_limit, R_limit
Ejemplo n.º 27
0
 def testGrid2d(self):
     grid2d = grid_2d(8)
     self.assertTrue(np.allclose(grid2d['x'], np.load(os.path.join(DATA_DIR, 'grid2d_8_x.npy'))))
     self.assertTrue(np.allclose(grid2d['y'], np.load(os.path.join(DATA_DIR, 'grid2d_8_y.npy'))))
     self.assertTrue(np.allclose(grid2d['r'], np.load(os.path.join(DATA_DIR, 'grid2d_8_r.npy'))))
     self.assertTrue(np.allclose(grid2d['phi'], np.load(os.path.join(DATA_DIR, 'grid2d_8_phi.npy'))))
Ejemplo n.º 28
0
def estimate_ctf(
    data_folder,
    pixel_size,
    cs,
    amplitude_contrast,
    voltage,
    num_tapers,
    psd_size,
    g_min,
    g_max,
    output_dir,
    dtype=np.float32,
):
    """
    Given paramaters estimates CTF from experimental data
    and returns CTF as a mrc file.
    """

    dtype = np.dtype(dtype)
    assert dtype in (np.float32, np.float64)

    dir_content = os.scandir(data_folder)

    mrc_files = [
        f.name for f in dir_content if os.path.splitext(f)[1] == ".mrc"
    ]
    mrcs_files = [
        f.name for f in dir_content if os.path.splitext(f)[1] == ".mrcs"
    ]
    file_names = mrc_files + mrcs_files

    amp = amplitude_contrast
    amplitude_contrast = np.arctan(amplitude_contrast /
                                   np.sqrt(1 - amplitude_contrast**2))

    lmbd = voltage_to_wavelength(voltage) / 10  # (Angstrom)

    ctf_object = CtfEstimator(pixel_size,
                              cs,
                              amplitude_contrast,
                              voltage,
                              psd_size,
                              num_tapers,
                              dtype=dtype)

    # Note for repro debugging, suggest use of doubles,
    #   closer to original code.
    ffbbasis = FFBBasis2D((psd_size, psd_size), 2, dtype=dtype)

    results = []
    for name in file_names:
        with mrcfile.open(os.path.join(data_folder, name),
                          mode="r",
                          permissive=True) as mrc:
            micrograph = mrc.data

        # Try to match dtype used in Basis instance
        micrograph = micrograph.astype(dtype, copy=False)

        micrograph_blocks = ctf_object.preprocess_micrograph(
            micrograph, psd_size)

        tapers_1d = ctf_object.tapers(psd_size, num_tapers / 2, num_tapers)

        signal_observed = ctf_object.estimate_psd(micrograph_blocks, tapers_1d)

        amplitude_spectrum, _ = ctf_object.elliptical_average(
            ffbbasis, signal_observed,
            True)  # absolute differenceL 10^-14. Relative error: 10^-7

        # Optionally changing to: linprog_method='simplex',
        # will more deterministically repro results in exchange for speed.
        signal_1d, background_1d = ctf_object.background_subtract_1d(
            amplitude_spectrum, linprog_method="interior-point")

        avg_defocus, low_freq_skip = ctf_object.opt1d(
            signal_1d,
            pixel_size,
            cs,
            lmbd,  # (Angstrom)
            amplitude_contrast,
            signal_observed.shape[-1],
        )

        low_freq_skip = 12
        signal, background_2d = ctf_object.background_subtract_2d(
            signal_observed, background_1d, low_freq_skip)

        ratio = ctf_object.pca(signal_observed, pixel_size, g_min, g_max)

        signal, additional_background = ctf_object.elliptical_average(
            ffbbasis, signal.sqrt(), False)

        background_2d = background_2d + additional_background

        initial_df1 = (avg_defocus * 2) / (1 + ratio)
        initial_df2 = (avg_defocus * 2) - initial_df1

        grid = grid_2d(psd_size, normalized=True, dtype=dtype)

        rb = np.sqrt((grid["x"] / 2)**2 + (grid["y"] / 2)**2).T
        r_ctf = rb * (10 / pixel_size)
        theta = grid["phi"].T

        angle = -5 / 12 * np.pi  # Radians (-75 degrees)
        cc_array = np.zeros((6, 4))
        for a in range(0, 6):
            df1, df2, angle_ast, p = ctf_object.gd(
                signal,
                initial_df1,
                initial_df2,
                angle + a * np.pi / 6.0,  # Radians, + a*30degrees
                r_ctf,
                theta,
                pixel_size,
                g_min,
                g_max,
                amplitude_contrast,
                lmbd,  # (Angstrom)
                cs,
            )

            cc_array[a, 0] = df1
            cc_array[a, 1] = df2
            cc_array[a, 2] = angle_ast  # Radians
            cc_array[a, 3] = p
        ml = np.argmax(cc_array[:, 3], -1)

        result = (
            cc_array[ml, 0],
            cc_array[ml, 1],
            cc_array[ml, 2],  # Radians
            cs,
            voltage,
            pixel_size,
            amp,
            name,
        )

        ctf_object.write_star(*result, output_dir)
        results.append(result)

        ctf_object.set_df1(cc_array[ml, 0])
        ctf_object.set_df2(cc_array[ml, 1])
        ctf_object.set_angle(cc_array[ml, 2])  # Radians
        ctf_object.generate_ctf()

        with mrcfile.new(output_dir + "/" + os.path.splitext(name)[0] +
                         "_noise.mrc",
                         overwrite=True) as mrc:
            mrc.set_data(background_2d[0].astype(np.float32))
            mrc.voxel_size = pixel_size
            mrc.close()

        df = (cc_array[ml, 0] + cc_array[ml, 1]) * np.ones(
            theta.shape, theta.dtype) + (cc_array[ml, 0] - cc_array[
                ml, 1]) * np.cos(2 * theta - 2 * cc_array[ml, 2] *
                                 np.ones(theta.shape, theta.dtype))
        ctf_im = -np.sin(np.pi * lmbd * r_ctf**2 / 2 *
                         (df - lmbd**2 * r_ctf**2 * cs * 1e6) +
                         amplitude_contrast)
        ctf_signal = np.zeros(ctf_im.shape, ctf_im.dtype)
        ctf_signal[:ctf_im.shape[0] // 2, :] = ctf_im[:ctf_im.shape[0] // 2, :]
        ctf_signal[ctf_im.shape[0] // 2 +
                   1:, :] = signal[:, :, ctf_im.shape[0] // 2 + 1]

        with mrcfile.new(output_dir + "/" + os.path.splitext(name)[0] + ".ctf",
                         overwrite=True) as mrc:
            mrc.set_data(np.float32(ctf_signal))
            mrc.voxel_size = pixel_size
            mrc.close()

    return results