示例#1
0
def _smooth(image, sigma, mode, cval, multichannel=None):
    """Return image with each channel smoothed by the Gaussian filter."""
    smoothed = cp.empty_like(image)

    # apply Gaussian filter to all channels independently
    if multichannel:
        sigma = (sigma,) * (image.ndim - 1) + (0,)
    ndi.gaussian_filter(image, sigma, output=smoothed,
                        mode=mode, cval=cval)
    return smoothed
示例#2
0
 def forward(self, add, model, data):
     self.checkDomainRange(model, data)
     if not add:
         data.zero()
     # Getting Ndarrays
     model_arr = model.getNdArray()
     data_arr = data.getNdArray()
     data_arr[:] += self.scaling * gaussian_filter(model_arr, sigma=self.sigma)
     return
示例#3
0
def _unsharp_mask_single_channel(image, radius, amount, vrange):
    """Single channel implementation of the unsharp masking filter."""

    blurred = gaussian_filter(image, sigma=radius, mode='reflect')

    result = image + (image - blurred) * amount
    if vrange is not None:
        return cp.clip(result, vrange[0], vrange[1], out=result)
    return result
示例#4
0
def test_scale_space():
    num_levels = 3
    for test_class in [ScaleSpace, IsotropicScaleSpace]:
        for dim in [2, 3]:
            print(dim, test_class)
            if dim == 2:
                moving, static = get_synthetic_warped_circle(1)
            else:
                moving, static = get_synthetic_warped_circle(30)
            moving = cp.asarray(moving)
            static = cp.asarray(static)
            input_spacing = np.array([1.1, 1.2, 1.5])[:dim]
            grid2world = np.diag(tuple(input_spacing) + (1.0, ))

            original = moving
            if test_class is ScaleSpace:
                ss = test_class(original, num_levels, grid2world,
                                input_spacing)
            elif test_class is IsotropicScaleSpace:
                factors = [4, 2, 1]
                sigmas = [3.0, 1.0, 0.0]
                ss = test_class(original, factors, sigmas, grid2world,
                                input_spacing)
            for level in range(num_levels):
                # Verify sigmas and images are consistent
                sigmas = ss.get_sigmas(level)
                expected = ndi.gaussian_filter(original, sigmas)
                expected = (expected - expected.min()) / (expected.max() -
                                                          expected.min())
                actual = ss.get_image(level)
                cp.testing.assert_array_almost_equal(actual, expected)

                # Verify scalings and spacings are consistent
                spacings = ss.get_spacing(level)
                scalings = ss.get_scaling(level)
                expected = ss.get_spacing(0) * scalings
                actual = ss.get_spacing(level)
                cp.testing.assert_array_almost_equal(actual, expected)

                # Verify affine and affine_inv are consistent
                affine = ss.get_affine(level)
                affine_inv = ss.get_affine_inv(level)
                expected = np.eye(1 + dim)
                actual = affine.dot(affine_inv)
                cp.testing.assert_array_almost_equal(actual, expected)

                # Verify affine consistent with spacings
                exp_dir, expected_sp = get_direction_and_spacings(affine, dim)
                actual_sp = spacings
                cp.testing.assert_array_almost_equal(actual_sp, expected_sp)
示例#5
0
def test_image_shape():
    """Test that shape of output image in deconvolution is same as input.

    This addresses issue #1172.
    """
    point = cp.zeros((5, 5), np.float)
    point[2, 2] = 1.0
    psf = ndi.gaussian_filter(point, sigma=1.0)
    # image shape: (45, 45), as reported in #1172
    image = cp.asarray(test_img[65:165, 215:315])  # just the face
    image_conv = ndi.convolve(image, psf)
    deconv_sup = restoration.wiener(image_conv, psf, 1)
    deconv_un = restoration.unsupervised_wiener(image_conv, psf)[0]
    # test the shape
    assert image.shape == deconv_sup.shape
    assert image.shape == deconv_un.shape
    # test the reconstruction error
    sup_relative_error = cp.abs(deconv_sup - image) / image
    un_relative_error = cp.abs(deconv_un - image) / image
    cp.testing.assert_array_less(cp.median(sup_relative_error), 0.1)
    cp.testing.assert_array_less(cp.median(un_relative_error), 0.1)
示例#6
0
def gaussian(image, sigma=1, output=None, mode='nearest', cval=0,
             multichannel=None, preserve_range=False, truncate=4.0):
    """Multi-dimensional Gaussian filter.

    Parameters
    ----------
    image : array-like
        Input image (grayscale or color) to filter.
    sigma : scalar or sequence of scalars, optional
        Standard deviation for Gaussian kernel. The standard
        deviations of the Gaussian filter are given for each axis as a
        sequence, or as a single number, in which case it is equal for
        all axes.
    output : array, optional
        The ``output`` parameter passes an array in which to store the
        filter output.
    mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
        The ``mode`` parameter determines how the array borders are
        handled, where ``cval`` is the value when mode is equal to
        'constant'. Default is 'nearest'.
    cval : scalar, optional
        Value to fill past edges of input if ``mode`` is 'constant'. Default
        is 0.0
    multichannel : bool, optional (default: None)
        Whether the last axis of the image is to be interpreted as multiple
        channels. If True, each channel is filtered separately (channels are
        not mixed together). Only 3 channels are supported. If ``None``,
        the function will attempt to guess this, and raise a warning if
        ambiguous, when the array has shape (M, N, 3).
    preserve_range : bool, optional
        Whether to keep the original range of values. Otherwise, the input
        image is converted according to the conventions of ``img_as_float``.
        Also see
        https://scikit-image.org/docs/dev/user_guide/data_types.html
    truncate : float, optional
        Truncate the filter at this many standard deviations.

    Returns
    -------
    filtered_image : ndarray
        the filtered array

    Notes
    -----
    This function is a wrapper around :func:`scipy.ndi.gaussian_filter`.

    Integer arrays are converted to float.

    The ``output`` should be floating point data type since gaussian converts
    to float provided ``image``. If ``output`` is not provided, another array
    will be allocated and returned as the result.

    The multi-dimensional filter is implemented as a sequence of
    one-dimensional convolution filters. The intermediate arrays are
    stored in the same data type as the output. Therefore, for output
    types with a limited precision, the results may be imprecise
    because intermediate results may be stored with insufficient
    precision.

    Examples
    --------

    >>> import cupy as cp
    >>> a = cp.zeros((3, 3))
    >>> a[1, 1] = 1
    >>> a
    array([[0., 0., 0.],
           [0., 1., 0.],
           [0., 0., 0.]])
    >>> gaussian(a, sigma=0.4)  # mild smoothing
    array([[0.00163116, 0.03712502, 0.00163116],
           [0.03712502, 0.84496158, 0.03712502],
           [0.00163116, 0.03712502, 0.00163116]])
    >>> gaussian(a, sigma=1)  # more smoothing
    array([[0.05855018, 0.09653293, 0.05855018],
           [0.09653293, 0.15915589, 0.09653293],
           [0.05855018, 0.09653293, 0.05855018]])
    >>> # Several modes are possible for handling boundaries
    >>> gaussian(a, sigma=1, mode='reflect')
    array([[0.08767308, 0.12075024, 0.08767308],
           [0.12075024, 0.16630671, 0.12075024],
           [0.08767308, 0.12075024, 0.08767308]])
    >>> # For RGB images, each is filtered separately
    >>> from skimage.data import astronaut
    >>> image = cp.array(astronaut())
    >>> filtered_img = gaussian(image, sigma=1, multichannel=True)

    """

    spatial_dims = None
    try:
        spatial_dims = _guess_spatial_dimensions(image)
    except ValueError:
        spatial_dims = image.ndim
    if spatial_dims is None and multichannel is None:
        msg = ("Images with dimensions (M, N, 3) are interpreted as 2D+RGB "
               "by default. Use `multichannel=False` to interpret as "
               "3D image with last dimension of length 3.")
        warn(RuntimeWarning(msg))
        multichannel = True
    # CuPy Backend: refactor to avoid overhead of cp.any(cp.asarray(sigma))
    sigma_msg = "Sigma values less than zero are not valid"
    if not isinstance(sigma, Iterable):
        if sigma < 0:
            raise ValueError(sigma_msg)
    elif any(s < 0 for s in sigma):
        raise ValueError(sigma_msg)
    if multichannel:
        # do not filter across channels
        if not isinstance(sigma, Iterable):
            sigma = [sigma] * (image.ndim - 1)
        if len(sigma) != image.ndim:
            sigma = tuple(sigma) + (0,)  # zero on channels axis
        sigma = tuple(sigma)
    image = convert_to_float(image, preserve_range)
    if output is None:
        output = cp.empty_like(image)
    elif not np.issubdtype(output.dtype, np.floating):
        raise ValueError("Provided output data type is not float")
    ndi.gaussian_filter(image, sigma, output=output, mode=mode, cval=cval,
                        truncate=truncate)
    return output
示例#7
0
def pca_noise_estimate(
    data,
    gtab,
    patch_radius=1,
    correct_bias=True,
    smooth=2,
    *,
    allow_single=False,
):
    """ PCA based local noise estimation.

    Parameters
    ----------
    data: 4D array
        the input dMRI data.

    gtab: gradient table object
      gradient information for the data gives us the bvals and bvecs of
      diffusion data, which is needed here to select between the noise
      estimation methods.
    patch_radius : int
        The radius of the local patch to be taken around each voxel (in
        voxels). Default: 1 (estimate noise in blocks of 3x3x3 voxels).
    correct_bias : bool
      Whether to correct for bias due to Rician noise. This is an implementation
      of equation 8 in [1]_.

    smooth : int
      Radius of a Gaussian smoothing filter to apply to the noise estimate
      before returning. Default: 2.

    Returns
    -------
    sigma_corr: 3D array
        The local noise standard deviation estimate.

    References
    ----------
    .. [1] Manjon JV, Coupe P, Concha L, Buades A, Collins DL "Diffusion
           Weighted Image Denoising Using Overcomplete Local PCA". PLoS ONE
           8(9): e73021. doi:10.1371/journal.pone.0073021.
    """
    # first identify the number of the b0 images
    K = np.count_nonzero(gtab.b0s_mask)

    if K > 1:
        # If multiple b0 values then use MUBE noise estimate
        data0 = data[..., cp.asarray(gtab.b0s_mask)]
        # sibe = False

    else:
        # if only one b0 value then SIBE noise estimate
        data0 = data[..., cp.asarray(~gtab.b0s_mask)]
        # sibe = True

    n0, n1, n2, n3 = data0.shape
    nsamples = n0 * n1 * n2

    if allow_single:
        data_dtype = cp.promote_types(data0.dtype, cp.float32)
    else:
        data_dtype = cp.float64
    data0 = data0.astype(data_dtype, copy=False)
    X = data0.reshape(nsamples, n3)
    # Demean:
    X = X - X.mean(axis=0, keepdims=True)
    # compute the covariance matrix, x
    r = cp.dot(X.T, X)
    # (symmetric) eigen decomposition
    w, v = cp.linalg.eigh(r)
    # project smallest eigenvector/value onto the data space
    I = X.dot(v[:, 0:1]).reshape(n0, n1, n2)
    del r, w, v

    s = 2 * patch_radius + 1
    sum_reg = ndi.uniform_filter(I, size=s)
    sigma_sq = I - sum_reg
    sigma_sq *= sigma_sq

    # find the SNR and make the correction for bias due to Rician noise:
    if correct_bias:
        mean = ndi.uniform_filter(data0.mean(-1), size=s, mode="reflect")
        snr = mean / cp.sqrt(sigma_sq)
        snr_sq = snr * snr
        # snr_sq = cp.asnumpy(snr_sq)  # transfer to host to use sps.iv
        # xi is practically equal to 1 above 37.4, and we overflow, raising
        # warnings and creating ot-a-numbers.
        # Instead, we will replace these values with 1 below
        with np.errstate(over="ignore", invalid="ignore"):
            tmp1 = snr_sq / 4
            tmp = sps.i0(tmp1)
            tmp *= 2 + snr_sq
            tmp += snr_sq * sps.i1(tmp1)
            tmp *= tmp
            tmp *= (np.pi / 8) * cp.exp(-snr_sq / 2)
            xi = 2 + snr_sq - tmp
            xi = xi.astype(data_dtype, copy=False)
            # xi = (2 + snr_sq - (np.pi / 8) * cp.exp(-snr_sq / 2) *
            #       ((2 + snr_sq) * sps.i0(snr_sq / 4) +
            #       (snr_sq) * sps.i1(snr_sq / 4)) ** 2).astype(float)
        xi[snr > 37.4] = 1
        sigma_corr = sigma_sq / xi
        sigma_corr[cp.isnan(sigma_corr)] = 0
    else:
        sigma_corr = sigma_sq

    if smooth is not None:
        ndi.gaussian_filter(sigma_corr, smooth, output=sigma_corr)

    cp.sqrt(sigma_corr, out=sigma_corr)
    return sigma_corr
示例#8
0
def resize(image,
           output_shape,
           order=None,
           mode='reflect',
           cval=0,
           clip=True,
           preserve_range=False,
           anti_aliasing=None,
           anti_aliasing_sigma=None):
    """Resize image to match a certain size.

    Performs interpolation to up-size or down-size N-dimensional images. Note
    that anti-aliasing should be enabled when down-sizing images to avoid
    aliasing artifacts. For down-sampling with an integer factor also see
    `skimage.transform.downscale_local_mean`.

    Parameters
    ----------
    image : ndarray
        Input image.
    output_shape : tuple or ndarray
        Size of the generated output image `(rows, cols[, ...][, dim])`. If
        `dim` is not provided, the number of channels is preserved. In case the
        number of input channels does not equal the number of output channels a
        n-dimensional interpolation is applied.

    Returns
    -------
    resized : ndarray
        Resized version of the input.

    Other parameters
    ----------------
    order : int, optional
        The order of the spline interpolation, default is 0 if
        image.dtype is bool and 1 otherwise. The order has to be in
        the range 0-5. See `skimage.transform.warp` for detail.
    mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}, optional
        Points outside the boundaries of the input are filled according
        to the given mode.  Modes match the behaviour of `numpy.pad`.
    cval : float, optional
        Used in conjunction with mode 'constant', the value outside
        the image boundaries.
    clip : bool, optional
        Whether to clip the output to the range of values of the input image.
        This is enabled by default, since higher order interpolation may
        produce values outside the given input range.
    preserve_range : bool, optional
        Whether to keep the original range of values. Otherwise, the input
        image is converted according to the conventions of `img_as_float`.
        Also see https://scikit-image.org/docs/dev/user_guide/data_types.html
    anti_aliasing : bool, optional
        Whether to apply a Gaussian filter to smooth the image prior
        to down-scaling. It is crucial to filter when down-sampling
        the image to avoid aliasing artifacts. If input image data
        type is bool, no anti-aliasing is applied.
    anti_aliasing_sigma : {float, tuple of floats}, optional
        Standard deviation for Gaussian filtering to avoid aliasing artifacts.
        By default, this value is chosen as (s - 1) / 2 where s is the
        down-scaling factor, where s > 1. For the up-size case, s < 1, no
        anti-aliasing is performed prior to rescaling.

    Notes
    -----
    Modes 'reflect' and 'symmetric' are similar, but differ in whether the edge
    pixels are duplicated during the reflection.  As an example, if an array
    has values [0, 1, 2] and was padded to the right by four values using
    symmetric, the result would be [0, 1, 2, 2, 1, 0, 0], while for reflect it
    would be [0, 1, 2, 1, 0, 1, 2].

    Examples
    --------
    >>> from skimage import data
    >>> from cucim.skimage.transform import resize
    >>> image = cp.array(data.camera())
    >>> resize(image, (100, 100)).shape
    (100, 100)

    """
    output_shape = tuple(output_shape)
    output_ndim = len(output_shape)
    input_shape = image.shape
    if output_ndim > image.ndim:
        # append dimensions to input_shape
        input_shape = input_shape + (1, ) * (output_ndim - image.ndim)
        image = cp.reshape(image, input_shape)
    elif output_ndim == image.ndim - 1:
        # multichannel case: append shape of last axis
        output_shape = output_shape + (image.shape[-1], )
    elif output_ndim < image.ndim - 1:
        raise ValueError("len(output_shape) cannot be smaller than the image "
                         "dimensions")

    if anti_aliasing is None:
        anti_aliasing = not image.dtype == bool

    if image.dtype == bool and anti_aliasing:
        warn(
            "Input image dtype is bool. Gaussian convolution is not defined "
            "with bool data type. Please set anti_aliasing to False or "
            "explicitely cast input image to another data type. Starting "
            "from version 0.19 a ValueError will be raised instead of this "
            "warning.",
            FutureWarning,
            stacklevel=2)

    factors = tuple(si / so for si, so in zip(input_shape, output_shape))

    if anti_aliasing and any(f > 1 for f in factors):
        if anti_aliasing_sigma is None:
            anti_aliasing_sigma = tuple([max(0, (f - 1) / 2) for f in factors])
        else:
            if np.isscalar(anti_aliasing_sigma):
                anti_aliasing_sigma = (anti_aliasing_sigma, ) * len(factors)
            elif len(anti_aliasing_sigma) != len(factors):
                raise ValueError("invalid anti_aliasing_sigma length")
            if any(sigma < 0 for sigma in anti_aliasing_sigma):
                raise ValueError("Anti-aliasing standard deviation must be "
                                 "greater than or equal to zero")
            elif any(((sigma > 0) & (factor <= 1))
                     for factor, sigma in zip(factors, anti_aliasing_sigma)):
                warn("Anti-aliasing standard deviation greater than zero but "
                     "not down-sampling along all axes")

        # Translate modes used by np.pad to those used by ndi.gaussian_filter
        np_pad_to_ndimage = {
            'constant': 'constant',
            'edge': 'nearest',
            'symmetric': 'reflect',
            'reflect': 'mirror',
            'wrap': 'wrap'
        }
        try:
            ndi_mode = np_pad_to_ndimage[mode]
        except KeyError:
            raise ValueError("Unknown mode, or cannot translate mode. The "
                             "mode should be one of 'constant', 'edge', "
                             "'symmetric', 'reflect', or 'wrap'. See the "
                             "documentation of numpy.pad for more info.")

        image = ndi.gaussian_filter(image,
                                    anti_aliasing_sigma,
                                    cval=cval,
                                    mode=ndi_mode)

    order = _validate_interpolation_order(image.dtype, order)
    ndi_mode = _to_ndimage_mode(mode)
    # TODO: move the following conversion into _to_ndimage_mode
    if ndi_mode == 'constant':
        ndi_mode = 'grid-constant'
    elif ndi_mode == 'wrap':
        ndi_mode = 'grid-wrap'
    zoom_factors = [1 / f for f in factors]
    image = convert_to_float(image, preserve_range)
    out = ndi.zoom(image,
                   zoom_factors,
                   order=order,
                   mode=ndi_mode,
                   cval=cval,
                   grid_mode=True)
    _clip_warp_output(image, out, order, mode, cval, clip)
    return out
示例#9
0
def threshold_local(image,
                    block_size,
                    method='gaussian',
                    offset=0,
                    mode='reflect',
                    param=None,
                    cval=0):
    """Compute a threshold mask image based on local pixel neighborhood.

    Also known as adaptive or dynamic thresholding. The threshold value is
    the weighted mean for the local neighborhood of a pixel subtracted by a
    constant. Alternatively the threshold can be determined dynamically by a
    given function, using the 'generic' method.

    Parameters
    ----------
    image : (N, M) ndarray
        Input image.
    block_size : int
        Odd size of pixel neighborhood which is used to calculate the
        threshold value (e.g. 3, 5, 7, ..., 21, ...).
    method : {'generic', 'gaussian', 'mean', 'median'}, optional
        Method used to determine adaptive threshold for local neighbourhood in
        weighted mean image.

        * 'generic': use custom function (see ``param`` parameter)
        * 'gaussian': apply gaussian filter (see ``param`` parameter for custom\
                      sigma value)
        * 'mean': apply arithmetic mean filter
        * 'median': apply median rank filter

        By default the 'gaussian' method is used.
    offset : float, optional
        Constant subtracted from weighted mean of neighborhood to calculate
        the local threshold value. Default offset is 0.
    mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
        The mode parameter determines how the array borders are handled, where
        cval is the value when mode is equal to 'constant'.
        Default is 'reflect'.
    param : {int, function}, optional
        Either specify sigma for 'gaussian' method or function object for
        'generic' method. This functions takes the flat array of local
        neighbourhood as a single argument and returns the calculated
        threshold for the centre pixel.
    cval : float, optional
        Value to fill past edges of input if mode is 'constant'.

    Returns
    -------
    threshold : (N, M) ndarray
        Threshold image. All pixels in the input image higher than the
        corresponding pixel in the threshold image are considered foreground.

    References
    ----------
    .. [1] https://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#adaptivethreshold

    Examples
    --------
    >>> from skimage.data import camera
    >>> image = camera()[:50, :50]
    >>> binary_image1 = image > threshold_local(image, 15, 'mean')
    >>> func = lambda arr: arr.mean()
    >>> binary_image2 = image > threshold_local(image, 15, 'generic',
    ...                                         param=func)
    """  # noqa
    if block_size % 2 == 0:
        raise ValueError("The kwarg ``block_size`` must be odd! Given "
                         "``block_size`` {0} is even.".format(block_size))
    check_nD(image, 2)
    thresh_image = cp.zeros(image.shape, _float_dtype(image))
    if method == 'generic':
        raise NotImplementedError("TODO: implement generic_filter")
        ndi.generic_filter(image,
                           param,
                           block_size,
                           output=thresh_image,
                           mode=mode,
                           cval=cval)
    elif method == 'gaussian':
        if param is None:
            # automatically determine sigma which covers > 99% of distribution
            sigma = (block_size - 1) / 6.0
        else:
            sigma = param
        ndi.gaussian_filter(image,
                            sigma,
                            output=thresh_image,
                            mode=mode,
                            cval=cval)
    elif method == 'mean':
        mask = 1.0 / block_size * cp.ones((block_size, ))
        # separation of filters to speedup convolution
        ndi.convolve1d(image,
                       mask,
                       axis=0,
                       output=thresh_image,
                       mode=mode,
                       cval=cval)
        ndi.convolve1d(thresh_image,
                       mask,
                       axis=1,
                       output=thresh_image,
                       mode=mode,
                       cval=cval)
    elif method == 'median':
        ndi.median_filter(image,
                          block_size,
                          output=thresh_image,
                          mode=mode,
                          cval=cval)
    else:
        raise ValueError("Invalid method specified. Please use `generic`, "
                         "`gaussian`, `mean`, or `median`.")

    return thresh_image - offset
示例#10
0
文件: corner.py 项目: grlee77/cucim
def structure_tensor(image, sigma=1, mode="constant", cval=0, order=None):
    """Compute structure tensor using sum of squared differences.

    The (2-dimensional) structure tensor A is defined as::

        A = [Arr Arc]
            [Arc Acc]

    which is approximated by the weighted sum of squared differences in a local
    window around each pixel in the image. This formula can be extended to a
    larger number of dimensions (see [1]_).

    Parameters
    ----------
    image : ndarray
        Input image.
    sigma : float, optional
        Standard deviation used for the Gaussian kernel, which is used as a
        weighting function for the local summation of squared differences.
    mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
        How to handle values outside the image borders.
    cval : float, optional
        Used in conjunction with mode 'constant', the value outside
        the image boundaries.
    order : {'rc', 'xy'}, optional
        NOTE: Only applies in 2D. Higher dimensions must always use 'rc' order.
        This parameter allows for the use of reverse or forward order of
        the image axes in gradient computation. 'rc' indicates the use of
        the first axis initially (Arr, Arc, Acc), whilst 'xy' indicates the
        usage of the last axis initially (Axx, Axy, Ayy).

    Returns
    -------
    A_elems : list of ndarray
        Upper-diagonal elements of the structure tensor for each pixel in the
        input image.

    See also
    --------
    structure_tensor_eigenvalues

    References
    ----------
    .. [1] https://en.wikipedia.org/wiki/Structure_tensor\

    Examples
    --------
    >>> import cupy as cp
    >>> from cucim.skimage.feature import structure_tensor
    >>> square = np.zeros((5, 5))
    >>> square[2, 2] = 1
    >>> Arr, Arc, Acc = structure_tensor(square, sigma=0.1, order="rc")
    >>> Acc
    array([[0., 0., 0., 0., 0.],
           [0., 1., 0., 1., 0.],
           [0., 4., 0., 4., 0.],
           [0., 1., 0., 1., 0.],
           [0., 0., 0., 0., 0.]])

    """
    if order == "xy" and image.ndim > 2:
        raise ValueError('Only "rc" order is supported for dim > 2.')

    if order is None:
        if image.ndim == 2:
            # The legacy 2D code followed (x, y) convention, so we swap the
            # axis order to maintain compatibility with old code
            warn(
                "deprecation warning: the default order of the structure "
                'tensor values will be "row-column" instead of "xy" starting '
                'in skimage version 0.20. Use order="rc" or order="xy" to '
                'set this explicitly.  (Specify order="xy" to maintain the '
                "old behavior.)",
                category=FutureWarning,
                stacklevel=2,
            )
            order = "xy"
        else:
            order = "rc"

    image = _prepare_grayscale_input_nD(image)

    derivatives = _compute_derivatives(image, mode=mode, cval=cval)

    if order == "xy":
        derivatives = reversed(derivatives)

    # structure tensor
    A_elems = [
        ndi.gaussian_filter(der0 * der1, sigma, mode=mode, cval=cval)
        for der0, der1 in combinations_with_replacement(derivatives, 2)
    ]

    return A_elems
示例#11
0
文件: corner.py 项目: grlee77/cucim
def hessian_matrix(image, sigma=1, mode="constant", cval=0, order="rc"):
    """Compute Hessian matrix.

    The Hessian matrix is defined as::

        H = [Hrr Hrc]
            [Hrc Hcc]

    which is computed by convolving the image with the second derivatives
    of the Gaussian kernel in the respective r- and c-directions.

    Parameters
    ----------
    image : ndarray
        Input image.
    sigma : float
        Standard deviation used for the Gaussian kernel, which is used as
        weighting function for the auto-correlation matrix.
    mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
        How to handle values outside the image borders.
    cval : float, optional
        Used in conjunction with mode 'constant', the value outside
        the image boundaries.
    order : {'rc', 'xy'}, optional
        This parameter allows for the use of reverse or forward order of
        the image axes in gradient computation. 'rc' indicates the use of
        the first axis initially (Hrr, Hrc, Hcc), whilst 'xy' indicates the
        usage of the last axis initially (Hxx, Hxy, Hyy)

    Returns
    -------
    Hrr : ndarray
        Element of the Hessian matrix for each pixel in the input image.
    Hrc : ndarray
        Element of the Hessian matrix for each pixel in the input image.
    Hcc : ndarray
        Element of the Hessian matrix for each pixel in the input image.

    Examples
    --------
    >>> import cupy as cp
    >>> from cucim.skimage.feature import hessian_matrix
    >>> square = cp.zeros((5, 5))
    >>> square[2, 2] = 4
    >>> Hrr, Hrc, Hcc = hessian_matrix(square, sigma=0.1, order='rc')
    >>> Hrc
    array([[ 0.,  0.,  0.,  0.,  0.],
           [ 0.,  1.,  0., -1.,  0.],
           [ 0.,  0.,  0.,  0.,  0.],
           [ 0., -1.,  0.,  1.,  0.],
           [ 0.,  0.,  0.,  0.,  0.]])
    """

    image = img_as_float(image)

    gaussian_filtered = ndi.gaussian_filter(image,
                                            sigma=sigma,
                                            mode=mode,
                                            cval=cval)

    gradients = cp.gradient(gaussian_filtered)
    axes = range(image.ndim)

    if order == "rc":
        axes = reversed(axes)

    H_elems = [
        cp.gradient(gradients[ax0], axis=ax1)
        for ax0, ax1 in combinations_with_replacement(axes, 2)
    ]

    return H_elems
示例#12
0
文件: _daisy.py 项目: grlee77/cucim
def daisy(image,
          step=4,
          radius=15,
          rings=3,
          histograms=8,
          orientations=8,
          normalization='l1',
          sigmas=None,
          ring_radii=None,
          visualize=False):
    """Extract DAISY feature descriptors densely for the given image.

    DAISY is a feature descriptor similar to SIFT formulated in a way that
    allows for fast dense extraction. Typically, this is practical for
    bag-of-features image representations.

    The implementation follows Tola et al. [1]_ but deviate on the following
    points:

      * Histogram bin contribution are smoothed with a circular Gaussian
        window over the tonal range (the angular range).
      * The sigma values of the spatial Gaussian smoothing in this code do not
        match the sigma values in the original code by Tola et al. [2]_. In
        their code, spatial smoothing is applied to both the input image and
        the center histogram. However, this smoothing is not documented in [1]_
        and, therefore, it is omitted.

    Parameters
    ----------
    image : (M, N) array
        Input image (grayscale).
    step : int, optional
        Distance between descriptor sampling points.
    radius : int, optional
        Radius (in pixels) of the outermost ring.
    rings : int, optional
        Number of rings.
    histograms  : int, optional
        Number of histograms sampled per ring.
    orientations : int, optional
        Number of orientations (bins) per histogram.
    normalization : [ 'l1' | 'l2' | 'daisy' | 'off' ], optional
        How to normalize the descriptors

          * 'l1': L1-normalization of each descriptor.
          * 'l2': L2-normalization of each descriptor.
          * 'daisy': L2-normalization of individual histograms.
          * 'off': Disable normalization.

    sigmas : 1D array of float, optional
        Standard deviation of spatial Gaussian smoothing for the center
        histogram and for each ring of histograms. The array of sigmas should
        be sorted from the center and out. I.e. the first sigma value defines
        the spatial smoothing of the center histogram and the last sigma value
        defines the spatial smoothing of the outermost ring. Specifying sigmas
        overrides the following parameter.

            ``rings = len(sigmas) - 1``

    ring_radii : 1D array of int, optional
        Radius (in pixels) for each ring. Specifying ring_radii overrides the
        following two parameters.

            ``rings = len(ring_radii)``
            ``radius = ring_radii[-1]``

        If both sigmas and ring_radii are given, they must satisfy the
        following predicate since no radius is needed for the center
        histogram.

            ``len(ring_radii) == len(sigmas) + 1``

    visualize : bool, optional
        Generate a visualization of the DAISY descriptors

    Returns
    -------
    descs : array
        Grid of DAISY descriptors for the given image as an array
        dimensionality  (P, Q, R) where

            ``P = ceil((M - radius*2) / step)``
            ``Q = ceil((N - radius*2) / step)``
            ``R = (rings * histograms + 1) * orientations``

    descs_img : (M, N, 3) array (only if visualize==True)
        Visualization of the DAISY descriptors.

    References
    ----------
    .. [1] Tola et al. "Daisy: An efficient dense descriptor applied to wide-
           baseline stereo." Pattern Analysis and Machine Intelligence, IEEE
           Transactions on 32.5 (2010): 815-830.
    .. [2] http://cvlab.epfl.ch/software/daisy
    """

    check_nD(image, 2, "img")

    image = img_as_float(image)

    # Validate parameters.
    if sigmas is not None and ring_radii is not None \
            and len(sigmas) - 1 != len(ring_radii):
        raise ValueError('`len(sigmas)-1 != len(ring_radii)`')
    if ring_radii is not None:
        rings = len(ring_radii)
        radius = ring_radii[-1]
    if sigmas is not None:
        rings = len(sigmas) - 1
    if sigmas is None:
        sigmas = [radius * (i + 1) / float(2 * rings) for i in range(rings)]
    if ring_radii is None:
        ring_radii = [radius * (i + 1) / float(rings) for i in range(rings)]
    if normalization not in ['l1', 'l2', 'daisy', 'off']:
        raise ValueError('Invalid normalization method.')

    # Compute image derivatives.
    dx = cp.zeros(image.shape)
    dy = cp.zeros(image.shape)
    dx[:, :-1] = cp.diff(image, n=1, axis=1)
    dy[:-1, :] = cp.diff(image, n=1, axis=0)

    # Compute gradient orientation and magnitude and their contribution
    # to the histograms
    grad_mag = dx * dx
    grad_mag += dy * dy
    cp.sqrt(grad_mag, out=grad_mag)
    grad_ori = cp.arctan2(dy, dx)
    pi = cp.pi
    orientation_kappa = orientations / pi
    orientation_angles = [
        2 * o * pi / orientations - pi for o in range(orientations)
    ]
    hist = cp.empty((orientations, ) + image.shape, dtype=float)
    for i, o in enumerate(orientation_angles):
        # Weigh bin contribution by the circular normal distribution
        hist[i, :, :] = cp.exp(orientation_kappa * cp.cos(grad_ori - o))
        # Weigh bin contribution by the gradient magnitude
        hist[i, :, :] = cp.multiply(hist[i, :, :], grad_mag)

    # Smooth orientation histograms for the center and all rings.
    sigmas = [sigmas[0]] + sigmas
    hist_smooth = cp.empty((rings + 1, ) + hist.shape, dtype=float)
    for i in range(rings + 1):
        for j in range(orientations):
            hist_smooth[i, j, :, :] = gaussian_filter(hist[j, :, :],
                                                      sigma=sigmas[i])

    # Assemble descriptor grid.
    theta = [2 * pi * j / histograms for j in range(histograms)]
    desc_dims = (rings * histograms + 1) * orientations
    descs = cp.empty(
        (desc_dims, image.shape[0] - 2 * radius, image.shape[1] - 2 * radius))
    descs[:orientations, :, :] = hist_smooth[0, :, radius:-radius,
                                             radius:-radius]
    idx = orientations
    for i in range(rings):
        for j in range(histograms):
            y_min = radius + int(round(ring_radii[i] * math.sin(theta[j])))
            y_max = descs.shape[1] + y_min
            x_min = radius + int(round(ring_radii[i] * math.cos(theta[j])))
            x_max = descs.shape[2] + x_min
            descs[idx:idx + orientations, :, :] = hist_smooth[i + 1, :,
                                                              y_min:y_max,
                                                              x_min:x_max]
            idx += orientations
    descs = descs[:, ::step, ::step]
    descs = descs.swapaxes(0, 1).swapaxes(1, 2)

    # Normalize descriptors.
    if normalization != 'off':
        descs += 1e-10
        if normalization == 'l1':
            descs /= cp.sum(descs, axis=2)[:, :, cp.newaxis]
        elif normalization == 'l2':
            descs /= cp.sqrt(cp.sum(descs * descs, axis=2))[:, :, cp.newaxis]
        elif normalization == 'daisy':
            for i in range(0, desc_dims, orientations):
                norms = descs[:, :, i:i + orientations]
                norms = norms * norms
                norms = norms.sum(axis=2)
                cp.sqrt(norms, out=norms)
                descs[:, :, i:i + orientations] /= norms[:, :, cp.newaxis]

    if visualize:
        from skimage import draw
        from skimage.color import gray2rgb

        image = cp.asnumpy(image)
        descs_img = gray2rgb(image)
        for i in range(descs.shape[0]):
            for j in range(descs.shape[1]):
                # Draw center histogram sigma
                color = [1, 0, 0]
                desc_y = i * step + radius
                desc_x = j * step + radius
                rows, cols, val = draw.circle_perimeter_aa(
                    desc_y, desc_x, int(sigmas[0]))
                draw.set_color(descs_img, (rows, cols), color, alpha=val)
                max_bin = float(cp.max(descs[i, j, :]))
                for o_num, o in enumerate(orientation_angles):
                    # Draw center histogram bins
                    bin_size = descs[i, j, o_num] / max_bin
                    dy = sigmas[0] * bin_size * math.sin(o)
                    dx = sigmas[0] * bin_size * math.cos(o)
                    rows, cols, val = draw.line_aa(desc_y, desc_x,
                                                   int(desc_y + dy),
                                                   int(desc_x + dx))
                    draw.set_color(descs_img, (rows, cols), color, alpha=val)
                for r_num, r in enumerate(ring_radii):
                    color_offset = float(1 + r_num) / rings
                    color = (1 - color_offset, 1, color_offset)
                    for t_num, t in enumerate(theta):
                        # Draw ring histogram sigmas
                        hist_y = desc_y + int(round(r * math.sin(t)))
                        hist_x = desc_x + int(round(r * math.cos(t)))
                        rows, cols, val = draw.circle_perimeter_aa(
                            hist_y, hist_x, int(sigmas[r_num + 1]))
                        draw.set_color(descs_img, (rows, cols),
                                       color,
                                       alpha=val)
                        for o_num, o in enumerate(orientation_angles):
                            # Draw histogram bins
                            bin_size = descs[i, j, orientations + r_num *
                                             histograms * orientations +
                                             t_num * orientations + o_num]
                            bin_size /= max_bin
                            dy = sigmas[r_num + 1] * bin_size * math.sin(o)
                            dx = sigmas[r_num + 1] * bin_size * math.cos(o)
                            rows, cols, val = draw.line_aa(
                                hist_y, hist_x, int(hist_y + dy),
                                int(hist_x + dx))
                            draw.set_color(descs_img, (rows, cols),
                                           color,
                                           alpha=val)
        return descs, descs_img
    else:
        return descs
示例#13
0
def get_maxima_2D(
    ar,
    sigma=0,
    edgeBoundary=0,
    minSpacing=0,
    minRelativeIntensity=0,
    minAbsoluteIntensity=0,
    relativeToPeak=0,
    maxNumPeaks=0,
    subpixel="poly",
    ar_FT=None,
    upsample_factor=16,
    get_maximal_points=None,
    blocks=None,
    threads=None,
):
    """
    Finds the indices where the 2D array ar is a local maximum.
    Optional parameters allow blurring of the array and filtering of the output;
    setting each of these to 0 (default) turns off these functions.

    Accepts:
        ar                      (ndarray) a 2D array
        sigma                   (float) guassian blur std to applyu to ar before finding the maxima
        edgeBoundary            (int) ignore maxima within edgeBoundary of the array edge
        minSpacing              (float) if two maxima are found within minSpacing, the dimmer one
                                is removed
        minRelativeIntensity    (float) maxima dimmer than minRelativeIntensity compared to the
                                relativeToPeak'th brightest maximum are removed
        relativeToPeak          (int) 0=brightest maximum. 1=next brightest, etc.
        maxNumPeaks             (int) return only the first maxNumPeaks maxima
        subpixel                (str)          'none': no subpixel fitting
                                     (default) 'poly': polynomial interpolation of correlogram peaks
                                                    (fairly fast but not very accurate)
                                               'multicorr': uses the multicorr algorithm with
                                                        DFT upsampling
        ar_FT                   (None or complex array) if subpixel=='multicorr' the
                                fourier transform of the image is required.  It may be
                                passed here as a complex array.  Otherwise, if ar_FT is None,
                                it is computed
        upsample_factor         (int) required iff subpixel=='multicorr'

    Returns
        maxima_x                (ndarray) x-coords of the local maximum, sorted by intensity.
        maxima_y                (ndarray) y-coords of the local maximum, sorted by intensity.
        maxima_intensity        (ndarray) intensity of the local maxima
    """
    assert subpixel in [
        "none",
        "poly",
        "multicorr",
    ], "Unrecognized subpixel option {}, subpixel must be 'none', 'poly', or 'multicorr'".format(
        subpixel
    )

    # Get maxima
    ar = gaussian_filter(ar, sigma)
    maxima_bool = cp.zeros_like(ar, dtype=bool)
    sizex = ar.shape[0]
    sizey = ar.shape[1]
    N = sizex * sizey
    get_maximal_points(blocks, threads, (ar, maxima_bool, sizex, sizey, N))

    # Remove edges
    if edgeBoundary > 0:
        assert isinstance(edgeBoundary, (int, np.integer))
        maxima_bool[:edgeBoundary, :] = False
        maxima_bool[-edgeBoundary:, :] = False
        maxima_bool[:, :edgeBoundary] = False
        maxima_bool[:, -edgeBoundary:] = False
    elif subpixel is True:
        maxima_bool[:1, :] = False
        maxima_bool[-1:, :] = False
        maxima_bool[:, :1] = False
        maxima_bool[:, -1:] = False

    # Get indices, sorted by intensity
    maxima_x, maxima_y = cp.nonzero(maxima_bool)
    maxima_x = maxima_x.get()
    maxima_y = maxima_y.get()
    dtype = np.dtype([("x", float), ("y", float), ("intensity", float)])
    maxima = np.zeros(len(maxima_x), dtype=dtype)
    maxima["x"] = maxima_x
    maxima["y"] = maxima_y

    ar = ar.get()
    maxima["intensity"] = ar[maxima_x, maxima_y]
    maxima = np.sort(maxima, order="intensity")[::-1]

    if len(maxima) > 0:
        # Remove maxima which are too close
        if minSpacing > 0:
            deletemask = np.zeros(len(maxima), dtype=bool)
            for i in range(len(maxima)):
                if deletemask[i] == False:
                    tooClose = (
                        (maxima["x"] - maxima["x"][i]) ** 2
                        + (maxima["y"] - maxima["y"][i]) ** 2
                    ) < minSpacing ** 2
                    tooClose[: i + 1] = False
                    deletemask[tooClose] = True
            maxima = np.delete(maxima, np.nonzero(deletemask)[0])

        # Remove maxima which are too dim
        if (minRelativeIntensity > 0) & (len(maxima) > relativeToPeak):
            assert isinstance(relativeToPeak, (int, np.integer))
            deletemask = (
                maxima["intensity"] / maxima["intensity"][relativeToPeak]
                < minRelativeIntensity
            )
            maxima = np.delete(maxima, np.nonzero(deletemask)[0])

        # Remove maxima which are too dim, absolute scale
        if minAbsoluteIntensity > 0:
            deletemask = maxima["intensity"] < minAbsoluteIntensity
            maxima = np.delete(maxima, np.nonzero(deletemask)[0])

        # Remove maxima in excess of maxNumPeaks
        if maxNumPeaks is not None and maxNumPeaks > 0:
            assert isinstance(maxNumPeaks, (int, np.integer))
            if len(maxima) > maxNumPeaks:
                maxima = maxima[:maxNumPeaks]

        # Subpixel fitting
        # For all subpixel fitting, first fit 1D parabolas in x and y to 3 points (maximum, +/- 1 pixel)
        if subpixel != "none":
            for i in range(len(maxima)):
                Ix1_ = ar[int(maxima["x"][i]) - 1, int(maxima["y"][i])]
                Ix0 = ar[int(maxima["x"][i]), int(maxima["y"][i])]
                Ix1 = ar[int(maxima["x"][i]) + 1, int(maxima["y"][i])]
                Iy1_ = ar[int(maxima["x"][i]), int(maxima["y"][i]) - 1]
                Iy0 = ar[int(maxima["x"][i]), int(maxima["y"][i])]
                Iy1 = ar[int(maxima["x"][i]), int(maxima["y"][i]) + 1]
                deltax = (Ix1 - Ix1_) / (4 * Ix0 - 2 * Ix1 - 2 * Ix1_)
                deltay = (Iy1 - Iy1_) / (4 * Iy0 - 2 * Iy1 - 2 * Iy1_)
                maxima["x"][i] += deltax
                maxima["y"][i] += deltay
                maxima["intensity"][i] = linear_interpolation_2D(
                    ar, maxima["x"][i], maxima["y"][i]
                )
        # Further refinement with fourier upsampling
        if subpixel == "multicorr":
            ar_FT = cp.conj(ar_FT)
            for ipeak in range(len(maxima["x"])):
                xyShift = np.array((maxima["x"][ipeak], maxima["y"][ipeak]))
                # we actually have to lose some precision and go down to half-pixel
                # accuracy. this could also be done by a single upsampling at factor 2
                # instead of get_maxima_2D.
                xyShift[0] = np.round(xyShift[0] * 2) / 2
                xyShift[1] = np.round(xyShift[1] * 2) / 2

                subShift = upsampled_correlation(ar_FT, upsample_factor, xyShift)
                maxima["x"][ipeak] = subShift[0]
                maxima["y"][ipeak] = subShift[1]

    return maxima["x"], maxima["y"], maxima["intensity"]
示例#14
0
def _find_Bragg_disks_single_DP_FK_CUDA(
    DP,
    probe_kernel_FT,
    corrPower=1,
    sigma=2,
    edgeBoundary=20,
    minRelativeIntensity=0.005,
    minAbsoluteIntensity=0.0,
    relativeToPeak=0,
    minPeakSpacing=60,
    maxNumPeaks=70,
    subpixel="multicorr",
    upsample_factor=16,
    filter_function=None,
    return_cc=False,
    peaks=None,
    get_maximal_points=None,
    blocks=None,
    threads=None,
    ccc=None,
    cc=None,
):
    """
    Finds the Bragg disks in DP by cross, hybrid, or phase correlation with probe_kernel_FT.

    After taking the cross/hybrid/phase correlation, a gaussian smoothing is applied
    with standard deviation sigma, and all local maxima are found. Detected peaks within
    edgeBoundary pixels of the diffraction plane edges are then discarded. Next, peaks with
    intensities less than minRelativeIntensity of the brightest peak in the correaltion are
    discarded. Then peaks which are within a distance of minPeakSpacing of their nearest neighbor
    peak are found, and in each such pair the peak with the lesser correlation intensities is
    removed. Finally, if the number of peaks remaining exceeds maxNumPeaks, only the maxNumPeaks
    peaks with the highest correlation intensity are retained.

    IMPORTANT NOTE: the argument probe_kernel_FT is related to the probe kernels generated by
    functions like get_probe_kernel() by:

            probe_kernel_FT = np.conj(np.fft.fft2(probe_kernel))

    if this function is simply passed a probe kernel, the results will not be meaningful! To run
    on a single DP while passing the real space probe kernel as an argument, use
    find_Bragg_disks_single_DP().

    Accepts:
        DP                   (ndarray) a diffraction pattern
        probe_kernel_FT      (cparray) the vacuum probe template, in Fourier space. Related to the
                             real space probe kernel by probe_kernel_FT = F(probe_kernel)*, where F
                             indicates a Fourier Transform and * indicates complex conjugation.
        corrPower            (float between 0 and 1, inclusive) the cross correlation power. A
                             value of 1 corresponds to a cross correaltion, and 0 corresponds to a
                             phase correlation, with intermediate values giving various hybrids.
        sigma                (float) the standard deviation for the gaussian smoothing applied to
                             the cross correlation
        edgeBoundary         (int) minimum acceptable distance from the DP edge, in pixels
        minRelativeIntensity (float) the minimum acceptable correlation peak intensity, relative to
                             the intensity of the relativeToPeak'th peak
        relativeToPeak       (int) specifies the peak against which the minimum relative intensity
                             is measured -- 0=brightest maximum. 1=next brightest, etc.
        minPeakSpacing       (float) the minimum acceptable spacing between detected peaks
        maxNumPeaks          (int) the maximum number of peaks to return
        subpixel             (str)          'none': no subpixel fitting
                                  (default) 'poly': polynomial interpolation of correlogram peaks
                                                    (fairly fast but not very accurate)
                                            'multicorr': uses the multicorr algorithm with
                                                        DFT upsampling
        upsample_factor      (int) upsampling factor for subpixel fitting (only used when subpixel='multicorr')
        filter_function      (callable) filtering function to apply to each diffraction pattern before peakfinding.
                             Must be a function of only one argument (the diffraction pattern) and return
                             the filtered diffraction pattern.
                             The shape of the returned DP must match the shape of the probe kernel (but does
                             not need to match the shape of the input diffraction pattern, e.g. the filter
                             can be used to bin the diffraction pattern). If using distributed disk detection,
                             the function must be able to be pickled with by dill.
        return_cc            (bool) if True, return the cross correlation
        peaks                (PointList) For internal use.
                             If peaks is None, the PointList of peak positions is created here.
                             If peaks is not None, it is the PointList that detected peaks are added
                             to, and must have the appropriate coords ('qx','qy','intensity').
        ccc and cc:         Precomputed complex and real-IFFT cross correlations. Used when called
                            in batched mode only, causing local calculation of those to be skipped

    Returns:
        peaks                (PointList) the Bragg peak positions and correlation intensities
    """
    assert subpixel in [
        "none",
        "poly",
        "multicorr",
    ], "Unrecognized subpixel option {}, subpixel must be 'none', 'poly', or 'multicorr'".format(
        subpixel
    )

    # if we are in batching mode, cc and ccc will be provided. else, compute it
    if ccc is None:
        # Perform any prefiltering
        DP = cp.array(
            DP if filter_function is None else filter_function(DP), dtype="float64"
        )

        # Get the cross correlation
        if subpixel in ("none", "poly"):
            cc = get_cross_correlation_fk(DP, probe_kernel_FT, corrPower)
            ccc = None
        # for multicorr subpixel fitting, we need both the real and complex cross correlation
        else:
            ccc = get_cross_correlation_fk(
                DP, probe_kernel_FT, corrPower, returnval="fourier"
            )
            cc = cp.maximum(cp.real(cp.fft.ifft2(ccc)), 0)

    # Find the maxima
    maxima_x, maxima_y, maxima_int = get_maxima_2D(
        cc,
        sigma=sigma,
        edgeBoundary=edgeBoundary,
        minRelativeIntensity=minRelativeIntensity,
        minAbsoluteIntensity=minAbsoluteIntensity,
        relativeToPeak=relativeToPeak,
        minSpacing=minPeakSpacing,
        maxNumPeaks=maxNumPeaks,
        subpixel=subpixel,
        ar_FT=ccc,
        upsample_factor=upsample_factor,
        get_maximal_points=get_maximal_points,
        blocks=blocks,
        threads=threads,
    )

    # Make peaks PointList
    if peaks is None:
        coords = [("qx", float), ("qy", float), ("intensity", float)]
        peaks = PointList(coordinates=coords)
    else:
        assert isinstance(peaks, PointList)
    peaks.add_tuple_of_nparrays((maxima_x, maxima_y, maxima_int))

    if return_cc:
        return peaks, gaussian_filter(cc, sigma)
    else:
        return peaks