示例#1
0
def im_backproject(im, rot_matrices):
    """
    Backproject images along rotation
    :param im: An L-by-L-by-n array of images to backproject.
    :param rot_matrices: An 3-by-3-by-n array of rotation matrices corresponding to viewing directions.
    :return: An L-by-L-by-L volumes corresponding to the sum of the backprojected images.
    """
    L, _, n = im.shape
    ensure(L == im.shape[1], "im must be LxLxK")
    ensure(n == rot_matrices.shape[2],
           "No. of rotation matrices must match the number of images")

    pts_rot = rotated_grids(L, rot_matrices)
    pts_rot = m_reshape(pts_rot, (3, -1))

    im_f = centered_fft2(im) / (L**2)
    if L % 2 == 0:
        im_f[0, :, :] = 0
        im_f[:, 0, :] = 0
    im_f = m_flatten(im_f)

    plan = Plan(sz=(L, L, L), fourier_pts=pts_rot)
    vol = np.real(plan.adjoint(im_f)) / L

    return vol
示例#2
0
def vec_to_symmat(vec):
    """
    Convert packed lower triangular vector to symmetric matrix
    :param vec: A vector of size N*(N+1)/2-by-... describing a symmetric (or Hermitian) matrix.
    :return: An array of size N-by-N-by-... which indexes symmetric/Hermitian matrices that occupy the first two
        dimensions. The lower triangular parts of these matrices consists of the corresponding vectors in vec.
    """
    # TODO: Handle complex values in vec
    if np.iscomplex(vec).any():
        raise NotImplementedError('Coming soon')

    # M represents N(N+1)/2
    M = vec.shape[0]
    N = int(round(np.sqrt(2 * M + 0.25) - 0.5))
    ensure((M == 0.5 * N * (N + 1)) and N != 0,
           "Vector must be of size N*(N+1)/2 for some N>0.")

    vec, sz_roll = unroll_dim(vec, 2)
    index_matrix = np.empty((N, N))
    i_upper = np.triu_indices_from(index_matrix)
    index_matrix[i_upper] = np.arange(
        M)  # Incrementally populate upper triangle in row major order
    index_matrix.T[i_upper] = index_matrix[i_upper]  # Copy to lower triangle

    mat = vec[index_matrix.flatten('F').astype('int')]
    mat = m_reshape(mat, (N, N) + mat.shape[1:])
    mat = roll_dim(mat, sz_roll)

    return mat
示例#3
0
    def __init__(self, size, ell_max=None):

        d = len(size)
        ensure(d == 3, 'Only three-dimensional basis functions are supported.')
        ensure(len(set(size)) == 1, 'Only cubic domains are supported.')

        super().__init__(size, ell_max)
示例#4
0
    def __init__(self, xfer_fn_array):
        """
        A Filter corresponding to the filter with the specified transfer function.
        :param xfer_fn_array: The transfer function of the filter in the form of an array of one or two dimensions.
        """
        dim = xfer_fn_array.ndim
        ensure(dim in (1, 2), "Only dimensions 1 and 2 supported.")

        super().__init__(dim=dim, radial=False)

        # sz is assigned before we do anything with xfer_fn_array
        self.sz = xfer_fn_array.shape

        # The following code, though superficially different from the MATLAB code it's copied from,
        # results in the same behavior.
        # TODO: This could use documentation - very unintuitive!
        if dim == 1:
            # If we have a vector of even length, then append the first element to the last
            if xfer_fn_array.shape[0] % 2 == 0:
                xfer_fn_array = np.concatenate((xfer_fn_array, np.array([xfer_fn_array[0]])))
        elif dim == 2:
            # If we have a 2d array with an even no. of rows, append the first row reversed at the bottom
            if xfer_fn_array.shape[0] % 2 == 0:
                xfer_fn_array = np.vstack((xfer_fn_array, xfer_fn_array[0, ::-1]))
            # If we have a 2d array with an even no. of columns, append the first column reversed at the right
            if xfer_fn_array.shape[1] % 2 == 0:
                xfer_fn_array = np.hstack((xfer_fn_array, xfer_fn_array[::-1, 0][:, np.newaxis]))

        self.xfer_fn_array = xfer_fn_array
示例#5
0
    def process_micrograph(self,
                           filepath,
                           return_centers=True,
                           return_img=False,
                           show_progress=True,
                           create_jpg=False):
        ensure(not all([return_centers, return_img]),
               "Cannot specify both return_centers and return_img")
        ensure(filepath.endswith('.mrc'),
               f"Input file doesn't seem to be in MRC format! ({filepath})")

        picker = Picker(self.particle_size, self.max_particle_size,
                        self.min_particle_size, self.query_image_size,
                        self.tau1, self.tau2, self.minimum_overlap_amount,
                        self.container_size, filepath, self.output_dir)

        logger.info('Computing scores for query images')
        score = picker.query_score(
            show_progress=show_progress
        )  # compute score using normalized cross-correlations

        while True:
            logger.info(
                f'Running svm with tau1={picker.tau1}, tau2={picker.tau2}')
            # train SVM classifier and classify all windows in micrograph
            segmentation = picker.run_svm(score)

            # If all windows are classified identically, update tau_1 or tau_2
            if np.all(segmentation):
                picker.tau2 += 500
            elif not np.any(segmentation):
                picker.tau1 += 500
            else:
                break

        logger.info('Discarding suspected artifacts')
        segmentation = picker.morphology_ops(segmentation)

        logger.info('Getting particle centers')
        centers = picker.extract_particles(segmentation)

        particle_image = None
        if create_jpg and self.output_dir is not None:
            particle_image = self.particle_image(picker.original_im,
                                                 picker.particle_size, centers)
            misc.imsave(
                os.path.join(
                    self.output_dir,
                    os.path.splitext(os.path.basename(picker.filename))[0] +
                    '_result.jpg'), particle_image)

        if return_centers:
            return centers
        elif return_img:
            if particle_image is not None:
                return particle_image
            else:
                return self.particle_image(picker.original_im,
                                           picker.particle_size, centers)
示例#6
0
def unique_coords_nd(N, ndim):
    """
    Generate unique polar coordinates from 2D or 3D rectangular coordinates.
    :param N: length size of a square or cube.
    :param ndim: number of dimension, 2 or 3.
    :return: The unique polar coordinates in 2D or 3D
    """
    ensure(ndim in (2, 3),
           'Only two- or three-dimensional basis functions are supported.')
    ensure(N > 0, 'Number of grid points should be greater than 0.')

    if ndim == 2:
        grid = grid_2d(N)
        mask = grid['r'] <= 1

        # Minor differences in r/theta/phi values are unimportant for the purpose
        # of this function, so round off before proceeding

        # TODO: numpy boolean indexing will return a 1d array (like MATLAB)
        # However, it always searches in row-major order, unlike MATLAB (column-major),
        # with no options to change the search order. The results we'll be getting back are thus not comparable.
        # We transpose the appropriate ndarrays before applying the mask to obtain the same behavior as MATLAB.
        r = grid['r'].T[mask].round(5)
        phi = grid['phi'].T[mask].round(5)

        r_unique, r_idx = np.unique(r, return_inverse=True)
        ang_unique, ang_idx = np.unique(phi, return_inverse=True)

    else:
        grid = grid_3d(N)
        mask = grid['r'] <= 1

        # In Numpy, elements in the indexed array are always iterated and returned in row-major (C-style) order.
        # To emulate a behavior where iteration happens in Fortran order, we swap axes 0 and 2 of both the array
        # being indexed (r/theta/phi), as well as the mask itself.
        # TODO: This is only for the purpose of getting the same behavior as MATLAB while porting the code, and is
        # likely not needed in the final version.

        # Minor differences in r/theta/phi values are unimportant for the purpose of this function,
        # so we round off before proceeding.

        mask_ = np.swapaxes(mask, 0, 2)
        r = np.swapaxes(grid['r'], 0, 2)[mask_].round(5)
        theta = np.swapaxes(grid['theta'], 0, 2)[mask_].round(5)
        phi = np.swapaxes(grid['phi'], 0, 2)[mask_].round(5)

        r_unique, r_idx = np.unique(r, return_inverse=True)
        ang_unique, ang_idx = np.unique(np.vstack([theta, phi]),
                                        axis=1,
                                        return_inverse=True)

    return {
        'r_unique': r_unique,
        'ang_unique': ang_unique,
        'r_idx': r_idx,
        'ang_idx': ang_idx,
        'mask': mask
    }
示例#7
0
def vec_to_im(X):
    """
    Unroll vectors to images
    :param X: N^2-by-... array.
    :return: An N-by-N-by-... array.
    """
    shape = X.shape
    N = round(shape[0]**(1 / 2))
    ensure(N**2 == shape[0], "First dimension of X must be square")

    return m_reshape(X, (N, N) + (shape[1:]))
示例#8
0
def vec_to_vol(X):
    """
    Unroll vectors to volumes
    :param X: N^3-by-... array.
    :return: An N-by-N-by-N-by-... array.
    """
    shape = X.shape
    N = round(shape[0]**(1 / 3))
    ensure(N**3 == shape[0], "First dimension of X must be cubic")

    return m_reshape(X, (N, N, N) + (shape[1:]))
示例#9
0
    def transform(self, signal):
        ensure(signal.shape == self.sz,
               f'Signal to be transformed must have shape {self.sz}')

        self._plan.f_hat = signal.astype('complex64')
        f = self._plan.trafo()

        if signal.dtype == np.float32:
            f = f.astype('complex64')

        return f
示例#10
0
def vol_to_vec(X):
    """
    Roll up volumes into vectors
    :param X: N-by-N-by-N-by-... array.
    :return: An N^3-by-... array.
    """
    shape = X.shape
    ensure(X.ndim >= 3, "Array should have at least 3 dimensions")
    ensure(shape[0] == shape[1] == shape[2], "Array should have first 3 dimensions identical")

    return m_reshape(X, (shape[0]**3,) + (shape[3:]))
示例#11
0
def im_to_vec(im):
    """
    Roll up images into vectors
    :param im: An N-by-N-by-... array.
    :return: An N^2-by-... array.
    """
    shape = im.shape
    ensure(im.ndim >= 2, "Array should have at least 2 dimensions")
    ensure(shape[0] == shape[1], "Array should have first 2 dimensions identical")

    return m_reshape(im, (shape[0]**2,) + (shape[2:]))
示例#12
0
    def set_max_resolution(self, max_L):
        ensure(
            max_L <= self.L,
            "Max desired resolution should be less than the current resolution"
        )
        self.L = max_L

        ds_factor = self._L / max_L
        self.filters.scale(ds_factor)
        self.offsets /= ds_factor

        # Invalidate images
        self._im = None
示例#13
0
def acorr(x, y, axes=None):
    """
    Calculate array correlation along given axes
    :param x: An array of arbitrary shape
    :param y: An array of same shape as x
    :param axes: The axis along which to compute the correlation. If None, the correlation is calculated along all axes.
    :return: The correlation of x along specified axes.
    """
    ensure(x.shape == y.shape, "The shapes of the inputs have to match")

    if axes is None:
        axes = range(x.ndim)
    return ainner(x, y, axes) / (anorm(x, axes) * anorm(y, axes))
示例#14
0
def ainner(x, y, axes=None):
    """
    Calculate array inner product along given axes
    :param x: An array of arbitrary shape
    :param y: An array of same shape as x
    :param axes: The axis along which to compute the inner product. If None, the product is calculated along all axes.
    :return:
    """
    ensure(x.shape == y.shape, "The shapes of the inputs have to match")

    if axes is None:
        axes = range(x.ndim)
    return np.tensordot(x, y, axes=(axes, axes))
示例#15
0
    def eval_clustering(self, vol_idx):
        """
        Evaluate clustering estimation
        :param vol_idx: Indexes of the volumes determined (0-indexed)
        :return: Accuracy [0-1] in terms of proportion of correctly assigned labels
        """
        ensure(
            len(vol_idx) == self.n,
            f'Need {self.n} vol indexes to evaluate clustering')
        # Remember that `states` is 1-indexed while vol_idx is 0-indexed
        correctly_classified = np.sum(self.states - 1 == vol_idx)

        return correctly_classified / self.n
示例#16
0
def vec_to_mat(vec, is_symmat=False):
    """
    Converts a vectorized matrix into a matrix
    :param vec: The vectorized representations. If the matrix is non-symmetric, this array has the dimensions
        N^2-by-..., but if the matrix is symmetric, the dimensions are N*(N+1)/2-by-... .
    :param is_symmat: True if the vectors represent symmetric matrices (default False)
    :return: The array of size N-by-N-by-... representing the matrices.
    """
    if not is_symmat:
        sz = vec.shape
        N = int(round(np.sqrt(sz[0])))
        ensure(sz[0] == N**2, "Vector must represent square matrix.")
        return m_reshape(vec, (N, N) + sz[1:])
    else:
        return vec_to_symmat(vec)
示例#17
0
    def __call__(self, im, start=0, num=None):
        ensure(
            im.ndim == 3,
            "A SourceFilter can only be called for a 3d volume representing a stack of images"
        )

        end = self.n
        if num is not None:
            end = min(start + num, self.n)
        all_idx = np.arange(start, end)

        unique_filters = np.unique(self.indices[all_idx]).astype('int')
        for k in unique_filters:
            idx_k = np.where(self.indices[all_idx] == k)[0]
            im[:, :, idx_k] = im_filter(im[:, :, idx_k], self.filters[k])
        return im
示例#18
0
def mat_to_vec(mat, is_symmat=False):
    """
    Converts a matrix into vectorized form
    :param mat: An array of size N-by-N-by-... containing the matrices to be vectorized.
    :param is_symmat: Specifies whether the matrices are symmetric/Hermitian, in which case they are stored in packed
        form using symmat_to_vec (default False).
    :return: The vectorized form of the matrices, with dimension N^2-by-... or N*(N+1)/2-by-... depending on the value
        of is_symmat.
    """
    if not is_symmat:
        sz = mat.shape
        N = sz[0]
        ensure(sz[1] == N, "Matrix must be square")
        return m_reshape(mat, (N**2, ) + sz[2:])
    else:
        return symmat_to_vec(mat)
示例#19
0
    def convolve_volume_matrix(self, x):
        """
        Convolve volume matrix with kernel
        :param x: An N-by-...-by-N (6 dimensions) volume matrix to be convolved.
        :return: The original volume matrix convolved by the kernel with the same dimensions as before.
        """
        shape = x.shape
        N = shape[0]
        kernel_f = self.kernel
        ensure(
            len(set(shape[i] for i in range(5))) == 1,
            "Volume matrix must be cubic and square")

        # TODO from MATLAB code: Deal with rolled dimensions
        is_singleton = len(shape) == 6
        N_ker = kernel_f.shape[0]

        # Note from MATLAB code:
        # Order is important here.  It's about 20% faster to run from 1 through 6 compared with 6 through 1.
        # TODO: Experiment with scipy order; try overwrite_x argument
        x = fft(x, N_ker, 0, overwrite_x=True)
        x = fft(x, N_ker, 1, overwrite_x=True)
        x = fft(x, N_ker, 2, overwrite_x=True)
        x = fft(x, N_ker, 3, overwrite_x=True)
        x = fft(x, N_ker, 4, overwrite_x=True)
        x = fft(x, N_ker, 5, overwrite_x=True)

        x *= kernel_f

        x = ifft(x, None, 5, overwrite_x=True)
        x = x[:, :, :, :, :, :N]
        x = ifft(x, None, 4, overwrite_x=True)
        x = x[:, :, :, :, :N, :]
        x = ifft(x, None, 3, overwrite_x=True)
        x = x[:, :, :, :N, :, :]
        x = ifft(x, None, 2, overwrite_x=True)
        x = x[:, :, :N, :, :, :]
        x = ifft(x, None, 1, overwrite_x=True)
        x = x[:, :N, :, :, :, :]
        x = ifft(x, None, 0, overwrite_x=True)
        x = x[:N, :, :, :, :, :]

        x = np.real(x)

        return x
示例#20
0
    def process_micrograph(self,
                           filepath,
                           return_centers=True,
                           show_progress=True):
        ensure(filepath.endswith('.mrc'),
               f"Input file doesn't seem to be an MRC format! ({filepath})")

        picker = Picker(self.particle_size, self.max_particle_size,
                        self.min_particle_size, self.query_image_size,
                        self.tau1, self.tau2, self.minimum_overlap_amount,
                        self.container_size, filepath, self.output_dir)

        logger.info('Reading MRC file')
        im = picker.read_mrc()

        logger.info('Computing scores for query images')
        score = picker.query_score(
            im, show_progress=show_progress
        )  # compute score using normalized cross-correlations

        while True:
            logger.info(
                f'Running svm with tau1={picker.tau1}, tau2={picker.tau2}')
            # train SVM classifier and classify all windows in micrograph
            segmentation = picker.run_svm(im, score)

            # If all windows are classified identically, update tau_1 or tau_2
            if np.all(segmentation):
                picker.tau2 += 500
            elif not np.any(segmentation):
                picker.tau1 += 500
            else:
                break

        logger.info('Discarding suspected artifacts')
        segmentation = picker.morphology_ops(segmentation)

        logger.info('Getting particle centers')
        centers = picker.extract_particles(segmentation, self.create_jpg)

        if return_centers:
            return centers
示例#21
0
    def _shrink(self, covar_b_coeff, noise_variance, method=None):
        """
        Shrink covariance matrix
        :param covar_b_coeff: Outer products of the mean-subtracted images
        :param noise_variance: Noise variance
        :param method: One of None/'frobenius_norm'/'operator_norm'/'soft_threshold'
        :return: Shrunk covariance matrix
        """
        ensure(
            method
            in (None, 'frobenius_norm', 'operator_norm', 'soft_threshold'),
            'Unsupported shrink method')

        An = self.basis.mat_evaluate_t(self.mean_kernel.toeplitz())
        if method is None:
            covar_b_coeff -= noise_variance * An
        else:
            raise NotImplementedError('Only default shrink method supported.')

        return covar_b_coeff
示例#22
0
def symmat_to_vec(mat):
    """
    Packs a symmetric matrix into a lower triangular vector
    :param mat: An array of size N-by-N-by-... where the first two dimensions constitute symmetric or
        Hermitian matrices.
    :return: A vector of size N*(N+1)/2-by-... consisting of the lower triangular part of each matrix.

    Note that a lot of acrobatics happening here (swapaxes/triu instead of tril etc.) are so that we can get
    column-major ordering of elements (to get behavior consistent with MATLAB), since masking in numpy only returns
    data in row-major order.
    """
    N = mat.shape[0]
    ensure(mat.shape[1] == N, "Matrix must be square")

    mat, sz_roll = unroll_dim(mat, 3)
    triu_indices = np.triu_indices(N)
    vec = mat.swapaxes(0, 1)[triu_indices]
    vec = roll_dim(vec, sz_roll)

    return vec
示例#23
0
    def evaluate(self, omega):
        """
        Evaluate the filter at specified frequencies.
        :param omega: An array of size 2-by-n representing the spatial frequencies at which the filter
            is to be evaluated. These are normalized so that pi is equal to the Nyquist frequency.
        :return: The value of the filter at the specified frequencies.
        """
        ensure(omega.shape[0] == self.dim, f'Omega must be of size {self.dim} x n')
        if self.radial:
            omega = np.sqrt(np.sum(omega ** 2, axis=0))
            omega, idx = np.unique(omega, return_inverse=True)
            omega = np.vstack((omega, np.zeros_like(omega)))

        h = self._evaluate(omega)

        if self.power != 1:
            h = h ** self.power

        if self.radial:
            h = np.take(h, idx)
        return h
示例#24
0
def im_translate(im, shifts):
    """
    Translate image by shifts
    :param im: An array of size L-by-L-by-n containing images to be translated.
    :param shifts: An array of size 2-by-n specifying the shifts in pixels.
        Alternatively, it can be a column vector of length 2, in which case the same shifts is applied to each image.
    :return: The images translated by the shifts, with periodic boundaries.
    """

    n_im = im.shape[-1]
    n_shifts = shifts.shape[-1]

    ensure(shifts.shape[0] == 2, "shifts must be 2xn")
    ensure(n_shifts == 1 or n_shifts == n_im,
           "no. of shifts must be 1 or match the no. of images")
    ensure(im.shape[0] == im.shape[1], "images must be square")

    L = im.shape[0]
    im_f = fft2(im, axes=(0, 1))
    grid_1d = ifftshift(np.ceil(np.arange(-L / 2, L / 2))) * 2 * np.pi / L
    om_x, om_y = np.meshgrid(grid_1d, grid_1d, indexing='ij')

    phase_shifts_x = np.broadcast_to(-shifts[0, :], (L, L, n_shifts))
    phase_shifts_y = np.broadcast_to(-shifts[1, :], (L, L, n_shifts))
    phase_shifts = (om_x[:, :, np.newaxis] *
                    phase_shifts_x) + (om_y[:, :, np.newaxis] * phase_shifts_y)

    mult_f = np.exp(-1j * phase_shifts)
    im_translated_f = im_f * mult_f
    im_translated = ifft2(im_translated_f, axes=(0, 1))
    im_translated = np.real(im_translated)

    return im_translated
示例#25
0
    def convolve_volume(self, x):
        """
        Convolve volume with kernel
        :param x: An N-by-N-by-N-by-... array of volumes to be convolved.
        :return: The original volumes convolved by the kernel with the same dimensions as before.
        """
        N = x.shape[0]
        kernel_f = self.kernel
        N_ker = kernel_f.shape[0]

        x, sz_roll = unroll_dim(x, 4)
        ensure(x.shape[0] == x.shape[1] == x.shape[2] == N,
               "Volumes in x must be cubic")
        ensure(kernel_f.ndim == 3, "Convolution kernel must be cubic")
        ensure(
            len(set(kernel_f.shape)) == 1, "Convolution kernel must be cubic")

        is_singleton = x.ndim == 3

        if is_singleton:
            x = fftn(x, (N_ker, N_ker, N_ker))
        else:
            raise NotImplementedError('not yet')

        x = x * kernel_f

        if is_singleton:
            x = np.real(ifftn(x))
            x = x[:N, :N, :N]
        else:
            raise NotImplementedError('not yet')

        x = roll_dim(x, sz_roll)

        return x
示例#26
0
    def expand(self, x):
        """
        Obtain expansion coefficients in Fourier Bessel basis from those in standard 3D coordinate basis.

        This is a similar function to evaluate_t but with more accuracy by using the cg optimizing of linear
        equation, Ax=b.

        :param x: An array whose first three dimensions are to be expanded in FB basis.
             These dimensions must equal `self.sz`.
        :return : The coefficients of `v` expanded in FB basis. The first dimension of `v` is with size of `basis_count`
             and the second and higher dimensions of the return value correspond to those higher dimensions of `x`.

        """
        # TODO: this is function could be move to base class if all standard and fast versions of 2d and 3d are using
        #       the same data structures of x and v.
        ensure(x.shape[:self.d] == self.sz,
               f'First {self.d} dimensions of x must match {self.sz}.')

        operator = LinearOperator(
            shape=(self.basis_count, self.basis_count),
            matvec=lambda v: self.evaluate_t(self.evaluate(v)))

        # TODO: (from MATLAB implementation) - Check that this tolerance make sense for multiple columns in v
        tol = 10 * np.finfo(x.dtype).eps
        logger.info('Expanding array in basis')

        # number of image samples
        n_data = np.size(x, self.d)
        v = np.zeros((self.basis_count, n_data), dtype=x.dtype)

        for isample in range(0, n_data):
            b = self.evaluate_t(x[..., isample])
            # TODO: need check the initial condition x0 can improve the results or not.
            v[..., isample], info = cg(operator, b, tol=tol)
            if info != 0:
                raise RuntimeError('Unable to converge!')

        # return v coefficients with the first dimension of self.basis_count
        return v
示例#27
0
    def expand(self, v):
        """
        Expand array in basis

        If `v` is a matrix of size `basis.ct`-by-..., `B` is the change-of-basis matrix of this basis, and `x` is a
        matrix of size `self.sz`-by-..., the function calculates

            v = (B' * B)^(-1) * B' * x

        where the rows of `B` and columns of `x` are read as vectorized arrays.

        :param v: An array whose first few dimensions are to be expanded in this basis.
            These dimensions must equal `self.sz`.
        :return: The coefficients of `v` expanded in this basis. If more than one array of size `self.sz` is found in
            `v`, the second and higher dimensions of the return value correspond to those higher dimensions of `v`.

        .. seealso:: evaluate
        """
        ensure(v.shape[:self.d] == self.sz,
               f'First {self.d} dimensions of v must match {self.sz}.')

        v, sz_roll = unroll_dim(v, self.d + 1)
        b = self.evaluate_t(v)
        operator = LinearOperator(
            shape=(self.basis_count, self.basis_count),
            matvec=lambda x: self.evaluate_t(self.evaluate(x)))

        # TODO: (from MATLAB implementation) - Check that this tolerance make sense for multiple columns in v
        tol = 10 * np.finfo(v.dtype).eps
        logger.info('Expanding array in basis')
        v, info = cg(operator, b, tol=tol)

        if info != 0:
            raise RuntimeError('Unable to converge!')

        v = roll_dim(v, sz_roll)

        return v
示例#28
0
    def transform(self, signal):
        ensure(signal.shape == self.sz,
               f'Signal to be transformed must have shape {self.sz}')

        epsilon = max(self.epsilon, np.finfo(signal.dtype).eps)

        # Forward transform functions in finufftpy have signatures of the form:
        # (x, y, z, c, isign, eps, f, ...)
        # (x, y     c, isign, eps, f, ...)
        # (x,       c, isign, eps, f, ...)
        # Where f is a Fortran-order ndarray of the appropriate dimensions
        # We form these function signatures here by tuple-unpacking

        result = np.zeros(self.num_pts).astype('complex128')

        result_code = self.transform_function(*self.fourier_pts, result, -1,
                                              epsilon, signal)

        if result_code != 0:
            raise RuntimeError(
                f'FINufft transform failed. Result code {result_code}')

        return result
示例#29
0
    def __init__(self, filters, indices=None, n=None):
        """
        :param filters: An iterable of Filter objects.
        :param indices: An iterable of indices representing the 0-indexed indices of an image stack
            on which to apply the filters. If unspecified, `n` must be supplied, and individual filters are applied
            randomly.
        :param n: An integer representing the depth of the image stack on which this SourceFilter is applied.
            Not needed if `indices` are supplied.
        """
        if indices is None:
            ensure(n is not None,
                   "Either indices or n must be supplied for a SourceFilter")
            # Assign filters randomly.
            # For Matlab compatibility, randi returns numbers in the range [1, iMax] decrement by one for our purposes
            indices = randi(len(filters), n, seed=0) - 1
        else:
            ensure(n is None,
                   "Cannot supply both indices and n for a SourceFilter")
            n = len(indices)

        self.filters = filters
        self.indices = indices
        self.n = n
示例#30
0
    def expand_t(self, v):
        ensure(v.shape[0] == self.basis_count,
               f'First dimension of v must be {self.basis_count}')

        v, sz_roll = unroll_dim(v, 2)
        b = im_to_vec(self.evaluate(v))

        operator = LinearOperator(
            shape=(self.N**2, self.N**2),
            matvec=lambda x: im_to_vec(
                self.evaluate(self.evaluate_t(vec_to_im(x)))))

        # TODO: (from MATLAB implementation) - Check that this tolerance make sense for multiple columns in v
        tol = 10 * np.finfo(v.dtype).eps
        logger.info('Expanding array in dual basis')
        v, info = cg(operator, b, tol=tol)

        if info != 0:
            raise RuntimeError('Unable to converge!')

        v = roll_dim(v, sz_roll)
        x = vec_to_im(v)

        return x