Пример #1
0
def cul_activation_percentile_gpu(step, activations, parameter_name):
    percentiles = cp.array([0, 25, 50, 75, 100])
    res = []
    data = cp.asarray(activations.asnumpy())
    res.append([step, parameter_name[0:parameter_name.rfind('.')], -1] +
               cp.percentile(data, percentiles).tolist())
    for i in range(data.shape[0]):
        res.append([step, parameter_name[0:parameter_name.rfind('.')], i] +
                   cp.percentile(data[i], percentiles).tolist())
    return res
Пример #2
0
def test_li_arbitrary_start_point():
    cell = celld
    max_stationary_point = threshold_li(cell)
    low_stationary_point = threshold_li(
        cell, initial_guess=float(cp.percentile(cell, 5))
    )
    optimum = threshold_li(cell, initial_guess=float(cp.percentile(cell, 95)))
    assert 67 < max_stationary_point < 68
    assert 48 < low_stationary_point < 49
    assert 111 < optimum < 112
Пример #3
0
    def percentile_cudf(a, q, interpolation="linear"):
        # Cudf dispatch to the equivalent of `np.percentile`:
        # https://numpy.org/doc/stable/reference/generated/numpy.percentile.html
        a = cudf.Series(a)
        # a is series.
        n = len(a)
        if not len(a):
            return None, n
        if isinstance(q, Iterator):
            q = list(q)

        if cudf.api.types.is_categorical_dtype(a.dtype):
            result = cp.percentile(a.cat.codes, q, interpolation=interpolation)

            return (
                pd.Categorical.from_codes(result, a.dtype.categories,
                                          a.dtype.ordered),
                n,
            )
        if np.issubdtype(a.dtype, np.datetime64):
            result = a.quantile([i / 100.0 for i in q],
                                interpolation=interpolation)

            if q[0] == 0:
                # https://github.com/dask/dask/issues/6864
                result[0] = min(result[0], a.min())
            return result.to_pandas(), n
        if not np.issubdtype(a.dtype, np.number):
            interpolation = "nearest"
        return (
            a.quantile([i / 100.0 for i in q],
                       interpolation=interpolation).to_pandas(),
            n,
        )
Пример #4
0
def constrain_variable_probe(variable_probe, weights):
    """Add the following constraints to variable probe weights

    1. Remove outliars from weights
    2. Enforce orthogonality once per epoch

    """
    logger.info('Orthogonalize variable probes')
    variable_probe = tike.linalg.orthogonalize_gs(
        variable_probe,
        axis=(-3, -2, -1),
    )

    logger.info('Remove outliars from variable probe weights')
    aevol = cp.abs(weights)
    weights = cp.minimum(
        aevol,
        1.5 * cp.percentile(
            aevol,
            [95],
            axis=[-3],
            keepdims=True,
        ).astype(weights.dtype),
    ) * cp.sign(weights)

    # TODO: Smooth the weights as a function of the frame index.

    return variable_probe, weights
Пример #5
0
    def test_percentile_memory_access(self, dtype):
        # Create an allocator that guarantees array allocated in
        # cupy.percentile call will be followed by a NaN
        original_allocator = cuda.get_allocator()

        def controlled_allocator(size):
            memptr = original_allocator(size)
            base_size = memptr.mem.size
            assert base_size % 512 == 0
            item_size = dtype().itemsize
            shape = (base_size // item_size, )
            x = cupy.ndarray(memptr=memptr, shape=shape, dtype=dtype)
            x.fill(cupy.nan)
            return memptr

        # Check that percentile still returns non-NaN results
        a = testing.shaped_random((5, ), cupy, dtype)
        q = cupy.array((0, 100), dtype=dtype)

        cuda.set_allocator(controlled_allocator)
        try:
            percentiles = cupy.percentile(a, q, axis=None, method='linear')
        finally:
            cuda.set_allocator(original_allocator)

        assert not cupy.any(cupy.isnan(percentiles))
Пример #6
0
    def reset(self, t=tinit, ptile=70):
        """
        only to be used once after dry run
        reset alloc and time, find the sharpe ratio threshold for sigmoid
        """
        #global stockPool, hurstPool

        size = len(self.stocks)
        self.alloc = dict.fromkeys(self.stocks,
                                   1 / size)  #init with a sharpe function,
        self.alloc['cash'] = 0
        self.initAlloc = np.asarray([])
        self.weights = dict.fromkeys(self.stocks,
                                     1)  # put in dictionary for easy change
        self.orders = np.zeros(size)
        percentile = np.percentile(self.sharpeReal, ptile)
        self.threshold = percentile  # just the ptile
        self.sharpeOpt = np.asarray([])
        self.sharpeReal = np.asarray([])
        self.sharpeNonOpt = np.asarray([])
        self.cash = np.asarray([])
        self.value = np.asarray([self.volume])
        self.weightdata = pd.DataFrame()
        self.valuedata = pd.DataFrame()
        # print(self.stocks)
        # print(self.alloc)
        # print(self.value)

        self.optimize(first=True)
        print('reset!')
        print('threshold: ', self.threshold)
        print("_____")

        resetStocks()
        checkReset()
Пример #7
0
def is_low_contrast(
    image,
    fraction_threshold=0.05,
    lower_percentile=1,
    upper_percentile=99,
    method="linear",
):
    """Determine if an image is low contrast.

    Parameters
    ----------
    image : array-like
        The image under test.
    fraction_threshold : float, optional
        The low contrast fraction threshold. An image is considered low-
        contrast when its range of brightness spans less than this
        fraction of its data type's full range. [1]_
    lower_percentile : float, optional
        Disregard values below this percentile when computing image contrast.
    upper_percentile : float, optional
        Disregard values above this percentile when computing image contrast.
    method : str, optional
        The contrast determination method.  Right now the only available
        option is "linear".

    Returns
    -------
    out : bool
        True when the image is determined to be low contrast.

    References
    ----------
    .. [1] https://scikit-image.org/docs/dev/user_guide/data_types.html

    Examples
    --------
    >>> import cupy as cp
    >>> image = cp.linspace(0, 0.04, 100)
    >>> is_low_contrast(image)
    True
    >>> image[-1] = 1
    >>> is_low_contrast(image)
    True
    >>> is_low_contrast(image, upper_percentile=100)
    False
    """
    image = cp.asarray(image)
    if image.ndim == 3:
        if image.shape[2] == 4:
            image = rgba2rgb(image)
        if image.shape[2] == 3:
            image = rgb2gray(image)

    dlimits = dtype_limits(image, clip_negative=False)
    limits = cp.percentile(image, [lower_percentile, upper_percentile])
    ratio = (limits[1] - limits[0]) / (dlimits[1] - dlimits[0])

    return ratio < fraction_threshold
Пример #8
0
def _run_cupy_quantile(data, k):
    w = 100.0 / k
    p = cupy.arange(w, 100 + w, w)

    if p[-1] > 100.0:
        p[-1] = 100.0

    q = cupy.percentile(data, p)
    q = cupy.unique(q)
    return q
Пример #9
0
def norm_percentile(signals, p1=5, p2=95, pcnt=True):
    # n signals array are of dim (n,T)
    out = cp.zeros(signals.shape)
    for i in range(signals.shape[0]):
        s = signals[i, :]
        if pcnt == True:
            [n, m] = cp.percentile(s, [p1, p2])
            out[i, :] = (s - n) / (m - n)
        else:
            out[i, :] = (s - cp.mean(s)) / cp.std(s)
    return out
Пример #10
0
def cul_weight_percentile_gpu(step, weights, weight_names):
    res = []
    percentiles = cp.array([0, 25, 50, 75, 100])
    for i in [
            i for i in range(len(weight_names))
            if weight_names[i][-7:] == ".weight"
    ]:
        # 存weight
        weight = cp.asarray(weights[i].asnumpy())
        res.append([step, weight_names[i]] +
                   cp.percentile(weight, percentiles).tolist())
    return res
Пример #11
0
def background_mask(image, return_numpy=True):
    """
    Creates a background mask by setting all image pixels with low scattering
    signals to zero. As all background pixels are near zero for all images in
    the SLI image stack, this method should remove most of the background
    allowing for better approximations using the available features.
    It is advised to use this function.

    Args:

        image: Complete SLI measurement image stack as a 2D/3D Numpy array

        threshold: Threshhold for mask creation (default: 10)

        return_numpy: Necessary if using `use_gpu`. Specifies if a CuPy or
                      Numpy array will be returned.

    Returns:

        numpy.array: 1D/2D-image which masks the background as True and
                     foreground as False
    """
    gpu_image = cupy.array(image, dtype='float32')
    gpu_average = cupy.average(gpu_image, axis=-1)

    # Set histogram to a range of 0 to 1 ignoring any outliers.
    hist_avg_image = gpu_average / cupy.percentile(gpu_image, 99)
    # Generate histogram in range of 0 to 1 to ignore outliers again. We search for values at the beginning anyway.
    avg_hist, avg_bins = cupy.histogram(hist_avg_image, bins=256, range=(0, 1))
    # Use SLIX to search for significant peaks in the histogram
    avg_hist = avg_hist[numpy.newaxis, numpy.newaxis, ...]
    peaks = SLIX.toolbox.significant_peaks(image=avg_hist).flatten()
    # Reverse the histogram to search for minimal values with SLIX (again)
    avg_hist = -avg_hist
    reversed_peaks = SLIX.toolbox.significant_peaks(image=avg_hist).flatten()
    # We can now calculate the index of our background threshold using the reversed_peaks
    index = numpy.argmax(peaks) + numpy.argmax(reversed_peaks[numpy.argmax(peaks):])
    # Reverse from 0 to 1 to original image scale and calculate the threshold position
    threshold = avg_bins[index] * numpy.percentile(gpu_average, 99)
    # Return a mask with the calculated background image
    gpu_mask = gpu_average < threshold

    if return_numpy:
        cpu_mask = cupy.asnumpy(gpu_mask)
        del gpu_image
        del gpu_mask
        return cpu_mask
    else:
        return gpu_mask
Пример #12
0
 def determine_th(self, seeds, alpha, dist, constant_th):
     S = len(seeds[0])
     if constant_th is not None:
         return (xp.ones(S) * constant_th).tolist()
     th = []
     for i in range(0, S, 128):
         sl = slice(i, min(i + 128, S))
         D = distance_function(self.src_space[seeds[0][sl]].dot(self.W),
                               self.tgt_space, dist)
         th_i = xp.percentile(
             D - D[xp.arange(D.shape[0]), seeds[1][sl]][:, None],
             alpha * 100,
             axis=1)
         th.append(th_i)
     return xp.hstack(th).tolist()
Пример #13
0
    def _filter_data(self, frequency: float):
        n_chans = self.data.shape[0]
        amplitude_percentiles = cp.linspace(0, 100, self.n_bins + 1)

        win = cp.array(
            mne.time_frequency.morlet(self.sfreq, [frequency], self.omega)[0])

        data_envelope = cp.zeros_like(self.data)
        for i in range(n_chans):
            self.data_preprocessed[i] = cusignal.fftconvolve(
                self.data[i], win, 'same')
            data_envelope[i] = cp.abs(self.data_preprocessed[i])

            # normalize analog signal amplitude to make possible to compute PLV through inner product with conjugate
            self.data_preprocessed[i] /= data_envelope[i]
            # normalize signal envelope to make it comparable between different contacts
            data_envelope[i] /= cupy_median(data_envelope[i])
            self.data_thresholded[i] = data_envelope[i] <= (
                cupy_median(data_envelope[i]) * 2)
            # self.data_thresholded[i] = True

        amplitude_bins = cp.percentile(data_envelope[self.data_thresholded],
                                       amplitude_percentiles)
        digitize_cupy(data_envelope,
                      amplitude_bins,
                      out=self.data_amplitude_labels)

        self.data_amplitude_labels -= 1

        # deleting envelope to save some space
        # I am jogging here with conjugate and envelope memory because we dont need conjugate during preprocessing
        # and dont need envelope after preprocessing
        # del data_envelope
        # data_envelope = None
        self.data_envelope = data_envelope

        self.data_conj = cp.zeros_like(self.data_preprocessed)
        cp.conj(self.data_preprocessed, out=self.data_conj)
Пример #14
0
    def fit(self, X, y=None) -> "KBinsDiscretizer":
        """
        Fit the estimator.

        Parameters
        ----------
        X : numeric array-like, shape (n_samples, n_features)
            Data to be discretized.

        y : None
            Ignored. This parameter exists only for compatibility with
            :class:`sklearn.pipeline.Pipeline`.

        Returns
        -------
        self
        """
        X = self._validate_data(X, dtype='numeric')

        valid_encode = ('onehot', 'onehot-dense', 'ordinal')
        if self.encode not in valid_encode:
            raise ValueError("Valid options for 'encode' are {}. "
                             "Got encode={!r} instead.".format(
                                 valid_encode, self.encode))
        valid_strategy = ('uniform', 'quantile', 'kmeans')
        if self.strategy not in valid_strategy:
            raise ValueError("Valid options for 'strategy' are {}. "
                             "Got strategy={!r} instead.".format(
                                 valid_strategy, self.strategy))

        n_features = X.shape[1]
        n_bins = self._validate_n_bins(n_features)
        n_bins = np.asnumpy(n_bins)

        bin_edges = cpu_np.zeros(n_features, dtype=object)
        for jj in range(n_features):
            column = X[:, jj]
            col_min, col_max = column.min(), column.max()

            if col_min == col_max:
                warnings.warn("Feature %d is constant and will be "
                              "replaced with 0." % jj)
                n_bins[jj] = 1
                bin_edges[jj] = np.array([-np.inf, np.inf])
                continue

            if self.strategy == 'uniform':
                bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1)

            elif self.strategy == 'quantile':
                quantiles = np.linspace(0, 100, n_bins[jj] + 1)
                bin_edges[jj] = np.asarray(np.percentile(column, quantiles))
                # Workaround for https://github.com/cupy/cupy/issues/4451
                # This should be removed as soon as a fix is available in cupy
                # in order to limit alterations in the included sklearn code
                bin_edges[jj][-1] = col_max

            elif self.strategy == 'kmeans':
                # Deterministic initialization with uniform spacing
                uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1)
                init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5

                # 1D k-means procedure
                km = KMeans(n_clusters=n_bins[jj],
                            init=init,
                            n_init=1,
                            output_type='cupy')
                km = km.fit(column[:, None])
                with using_output_type('cupy'):
                    centers = km.cluster_centers_[:, 0]
                # Must sort, centers may be unsorted even with sorted init
                centers.sort()
                bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5
                bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max]

            # Remove bins whose width are too small (i.e., <= 1e-8)
            if self.strategy in ('quantile', 'kmeans'):
                mask = np.diff(bin_edges[jj], prepend=-np.inf) > 1e-8
                bin_edges[jj] = bin_edges[jj][mask]
                if len(bin_edges[jj]) - 1 != n_bins[jj]:
                    warnings.warn('Bins whose width are too small (i.e., <= '
                                  '1e-8) in feature %d are removed. Consider '
                                  'decreasing the number of bins.' % jj)
                    n_bins[jj] = len(bin_edges[jj]) - 1

        self.bin_edges_ = bin_edges
        self.n_bins_ = n_bins

        if 'onehot' in self.encode:
            self._encoder = OneHotEncoder(categories=np.array(
                [np.arange(i) for i in self.n_bins_]),
                                          sparse=self.encode == 'onehot',
                                          output_type='cupy')
            # Fit the OneHotEncoder with toy datasets
            # so that it's ready for use after the KBinsDiscretizer is fitted
            self._encoder.fit(np.zeros((1, len(self.n_bins_)), dtype=int))

        return self
Пример #15
0
def morphological_geodesic_active_contour(
    gimage,
    iterations,
    init_level_set="circle",
    smoothing=1,
    threshold="auto",
    balloon=0,
    iter_callback=lambda x: None,
):
    """Morphological Geodesic Active Contours (MorphGAC).

    Geodesic active contours implemented with morphological operators. It can
    be used to segment objects with visible but noisy, cluttered, broken
    borders.

    Parameters
    ----------
    gimage : (M, N) or (L, M, N) array
        Preprocessed image or volume to be segmented. This is very rarely the
        original image. Instead, this is usually a preprocessed version of the
        original image that enhances and highlights the borders (or other
        structures) of the object to segment.
        `morphological_geodesic_active_contour` will try to stop the contour
        evolution in areas where `gimage` is small. See
        `morphsnakes.inverse_gaussian_gradient` as an example function to
        perform this preprocessing. Note that the quality of
        `morphological_geodesic_active_contour` might greatly depend on this
        preprocessing.
    iterations : uint
        Number of iterations to run.
    init_level_set : str, (M, N) array, or (L, M, N) array
        Initial level set. If an array is given, it will be binarized and used
        as the initial level set. If a string is given, it defines the method
        to generate a reasonable initial level set with the shape of the
        `image`. Accepted values are 'checkerboard' and 'circle'. See the
        documentation of `checkerboard_level_set` and `circle_level_set`
        respectively for details about how these level sets are created.
    smoothing : uint, optional
        Number of times the smoothing operator is applied per iteration.
        Reasonable values are around 1-4. Larger values lead to smoother
        segmentations.
    threshold : float, optional
        Areas of the image with a value smaller than this threshold will be
        considered borders. The evolution of the contour will stop in this
        areas.
    balloon : float, optional
        Balloon force to guide the contour in non-informative areas of the
        image, i.e., areas where the gradient of the image is too small to push
        the contour towards a border. A negative value will shrink the contour,
        while a positive value will expand the contour in these areas. Setting
        this to zero will disable the balloon force.
    iter_callback : function, optional
        If given, this function is called once per iteration with the current
        level set as the only argument. This is useful for debugging or for
        plotting intermediate results during the evolution.

    Returns
    -------
    out : (M, N) or (L, M, N) array
        Final segmentation (i.e., the final level set)

    See Also
    --------
    inverse_gaussian_gradient, circle_level_set, checkerboard_level_set

    Notes
    -----

    This is a version of the Geodesic Active Contours (GAC) algorithm that uses
    morphological operators instead of solving partial differential equations
    (PDEs) for the evolution of the contour. The set of morphological operators
    used in this algorithm are proved to be infinitesimally equivalent to the
    GAC PDEs (see [1]_). However, morphological operators are do not suffer
    from the numerical stability issues typically found in PDEs (e.g., it is
    not necessary to find the right time step for the evolution), and are
    computationally faster.

    The algorithm and its theoretical derivation are described in [1]_.

    References
    ----------
    .. [1] A Morphological Approach to Curvature-based Evolution of Curves and
           Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
           Transactions on Pattern Analysis and Machine Intelligence (PAMI),
           2014, :DOI:`10.1109/TPAMI.2013.106`
    """

    image = gimage
    init_level_set = _init_level_set(init_level_set, image.shape)

    _check_input(image, init_level_set)

    if threshold == "auto":
        threshold = cp.percentile(image, 40)

    structure = cp.ones((3, ) * len(image.shape), dtype=cp.int8)
    dimage = cnp.gradient(image)
    # threshold_mask = image > threshold
    if balloon != 0:
        threshold_mask_balloon = image > threshold / cp.abs(balloon)

    u = (init_level_set > 0).astype(cp.int8)

    iter_callback(u)

    for _ in range(iterations):

        # Balloon
        if balloon > 0:
            aux = ndi.binary_dilation(u, structure)
        elif balloon < 0:
            aux = ndi.binary_erosion(u, structure)
        if balloon != 0:
            u[threshold_mask_balloon] = aux[threshold_mask_balloon]

        # Image attachment
        aux = cp.zeros_like(image)
        du = cnp.gradient(u)
        for el1, el2 in zip(dimage, du):
            aux += el1 * el2
        u[aux > 0] = 1
        u[aux < 0] = 0

        # Smoothing
        for _ in range(smoothing):
            u = _curvop(u)

        iter_callback(u)

    return u
Пример #16
0
 def time_percentile(self):
     np.percentile(self.e, [25, 35, 55, 65, 75])
def eegstats(signals, samples, statistic):

    import cupy as cp
    from scipy.stats import skew, kurtosis

    if statistic == 'mean':
        means = cp.zeros(samples)
        for i in range(len(signals)):
            means[i] = cp.mean(signals[i])
        return means

    elif statistic == 'std':
        std = cp.zeros(samples)
        for i in range(len(signals)):
            std[i] = cp.std(signals[i])
        return std

    elif statistic == 'skewness':
        skewness = cp.zeros(samples)
        for i in range(len(signals)):
            skewness[i] = skew(signals[i])
        return skewness

    elif statistic == 'kurtosis':
        kurt = cp.zeros(samples)
        for i in range(len(signals)):
            kurt[i] = kurtosis(signals[i])
        return kurt

    elif statistic == 'maximum':
        maxim = cp.zeros(samples)
        for i in range(len(signals)):
            maxim[i] = cp.amax(signals[i])
        return maxim

    elif statistic == 'minimum':
        minim = cp.zeros(samples)
        for i in range(len(signals)):
            minim[i] = cp.amin(signals[i])
        return minim
    ########
    elif statistic == 'n5':
        n5 = cp.zeros(samples)
        for i in range(len(signals)):
            n5[i] = cp.percentile(cp.asarray(signals[i]), 5)
        return n5

    elif statistic == 'n25':
        n25 = cp.zeros(samples)
        for i in range(len(signals)):
            n25[i] = cp.percentile(cp.asarray(signals[i]), 25)
        return n25

    elif statistic == 'n75':
        n75 = cp.zeros(samples)
        for i in range(len(signals)):
            n75[i] = cp.percentile(cp.asarray(signals[i]), 75)
        return n75

    elif statistic == 'n95':
        n95 = cp.zeros(samples)
        for i in range(len(signals)):
            n95[i] = cp.percentile(cp.asarray(signals[i]), 95)
        return n95

    elif statistic == 'median':
        median = cp.zeros(samples)
        for i in range(len(signals)):
            median[i] = cp.percentile(cp.asarray(signals[i]), 50)
        return median

    elif statistic == 'variance':
        variance = cp.zeros(samples)
        for i in range(len(signals)):
            variance[i] = cp.var(cp.asarray(signals[i]))
        return variance

    elif statistic == 'rms':
        rms = cp.zeros(samples)
        for i in range(len(signals)):
            rms[i] = cp.mean(cp.sqrt(cp.asarray(signals[i])**2))
        return rms
Пример #18
0
def canny(image,
          sigma=1.,
          low_threshold=None,
          high_threshold=None,
          mask=None,
          use_quantiles=False):
    """Edge filter an image using the Canny algorithm.

    Parameters
    -----------
    image : 2D array
        Grayscale input image to detect edges on; can be of any dtype.
    sigma : float, optional
        Standard deviation of the Gaussian filter.
    low_threshold : float, optional
        Lower bound for hysteresis thresholding (linking edges).
        If None, low_threshold is set to 10% of dtype's max.
    high_threshold : float, optional
        Upper bound for hysteresis thresholding (linking edges).
        If None, high_threshold is set to 20% of dtype's max.
    mask : array, dtype=bool, optional
        Mask to limit the application of Canny to a certain area.
    use_quantiles : bool, optional
        If True then treat low_threshold and high_threshold as quantiles of the
        edge magnitude image, rather than absolute edge magnitude values. If
        True, then the thresholds must be in the range [0, 1].

    Returns
    -------
    output : 2D array (image)
        The binary edge map.

    See also
    --------
    skimage.sobel

    Notes
    -----
    The steps of the algorithm are as follows:

    * Smooth the image using a Gaussian with ``sigma`` width.

    * Apply the horizontal and vertical Sobel operators to get the gradients
      within the image. The edge strength is the norm of the gradient.

    * Thin potential edges to 1-pixel wide curves. First, find the normal
      to the edge at each point. This is done by looking at the
      signs and the relative magnitude of the X-Sobel and Y-Sobel
      to sort the points into 4 categories: horizontal, vertical,
      diagonal and antidiagonal. Then look in the normal and reverse
      directions to see if the values in either of those directions are
      greater than the point in question. Use interpolation to get a mix of
      points instead of picking the one that's the closest to the normal.

    * Perform a hysteresis thresholding: first label all points above the
      high threshold as edges. Then recursively label any point above the
      low threshold that is 8-connected to a labeled point as an edge.

    References
    -----------
    .. [1] Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
           Pattern Analysis and Machine Intelligence, 8:679-714, 1986
           :DOI:`10.1109/TPAMI.1986.4767851`
    .. [2] William Green's Canny tutorial
           https://en.wikipedia.org/wiki/Canny_edge_detector

    Examples
    --------
    >>> import cupy as cp
    >>> from cucim.skimage import feature
    >>> # Generate noisy image of a square
    >>> im = cp.zeros((256, 256))
    >>> im[64:-64, 64:-64] = 1
    >>> im += 0.2 * cp.random.rand(*im.shape)
    >>> # First trial with the Canny filter, with the default smoothing
    >>> edges1 = feature.canny(im)
    >>> # Increase the smoothing for better results
    >>> edges2 = feature.canny(im, sigma=3)
    """

    #
    # The steps involved:
    #
    # * Smooth using the Gaussian with sigma above.
    #
    # * Apply the horizontal and vertical Sobel operators to get the gradients
    #   within the image. The edge strength is the sum of the magnitudes
    #   of the gradients in each direction.
    #
    # * Find the normal to the edge at each point using the arctangent of the
    #   ratio of the Y sobel over the X sobel - pragmatically, we can
    #   look at the signs of X and Y and the relative magnitude of X vs Y
    #   to sort the points into 4 categories: horizontal, vertical,
    #   diagonal and antidiagonal.
    #
    # * Look in the normal and reverse directions to see if the values
    #   in either of those directions are greater than the point in question.
    #   Use interpolation to get a mix of points instead of picking the one
    #   that's the closest to the normal.
    #
    # * Label all points above the high threshold as edges.
    # * Recursively label any point above the low threshold that is 8-connected
    #   to a labeled point as an edge.
    #
    # Regarding masks, any point touching a masked point will have a gradient
    # that is "infected" by the masked point, so it's enough to erode the
    # mask by one and then mask the output. We also mask out the border points
    # because who knows what lies beyond the edge of the image?
    #
    check_nD(image, 2)
    dtype_max = dtype_limits(image, clip_negative=False)[1]

    if low_threshold is None:
        low_threshold = 0.1
    elif use_quantiles:
        if not (0.0 <= low_threshold <= 1.0):
            raise ValueError("Quantile thresholds must be between 0 and 1.")
    else:
        low_threshold = low_threshold / dtype_max

    if high_threshold is None:
        high_threshold = 0.2
    elif use_quantiles:
        if not (0.0 <= high_threshold <= 1.0):
            raise ValueError("Quantile thresholds must be between 0 and 1.")
    else:
        high_threshold = high_threshold / dtype_max

    _gaussian = functools.partial(gaussian, sigma=sigma)

    def fsmooth(x, mode='constant'):
        return img_as_float(_gaussian(x, mode=mode))

    if mask is None:
        smoothed = fsmooth(image, mode='reflect')
        # mask that is ones everywhere except the borders
        eroded_mask = cp.ones(image.shape, dtype=bool)
        eroded_mask[:1, :] = 0
        eroded_mask[-1:, :] = 0
        eroded_mask[:, :1] = 0
        eroded_mask[:, -1:] = 0
    else:
        smoothed = smooth_with_function_and_mask(image, fsmooth, mask)
        #
        # Make the eroded mask. Setting the border value to zero will wipe
        # out the image edges for us.
        #
        s = generate_binary_structure(2, 2)
        eroded_mask = binary_erosion(mask, s, border_value=0)

    jsobel = ndi.sobel(smoothed, axis=1)
    isobel = ndi.sobel(smoothed, axis=0)
    abs_isobel = cp.abs(isobel)
    abs_jsobel = cp.abs(jsobel)
    magnitude = cp.hypot(isobel, jsobel)
    eroded_mask = eroded_mask & (magnitude > 0)
    # TODO: implement custom kernel to compute local maxima

    #
    # --------- Find local maxima --------------
    #
    # Assign each point to have a normal of 0-45 degrees, 45-90 degrees,
    # 90-135 degrees and 135-180 degrees.
    #
    local_maxima = cp.zeros(image.shape, bool)

    isobel_gt_0 = isobel >= 0
    jsobel_gt_0 = jsobel >= 0
    isobel_lt_0 = isobel <= 0
    jsobel_lt_0 = jsobel <= 0
    abs_isobel_lt_jsobel = abs_isobel <= abs_jsobel
    abs_isobel_gt_jsobel = abs_isobel >= abs_jsobel

    # ----- 0 to 45 degrees ------
    pts_plus = isobel_gt_0 & jsobel_gt_0
    pts_minus = isobel_lt_0 & jsobel_lt_0
    pts_tmp = (pts_plus | pts_minus) & eroded_mask
    pts = pts_tmp & abs_isobel_gt_jsobel
    # Get the magnitudes shifted left to make a matrix of the points to the
    # right of pts. Similarly, shift left and down to get the points to the
    # top right of pts.

    c1 = magnitude[1:, :][pts[:-1, :]]
    c2 = magnitude[1:, 1:][pts[:-1, :-1]]
    m = magnitude[pts]
    w = abs_jsobel[pts] / abs_isobel[pts]
    c_plus = _fused_comparison(w, c1, c2, m)
    c1 = magnitude[:-1, :][pts[1:, :]]
    c2 = magnitude[:-1, :-1][pts[1:, 1:]]
    c_minus = _fused_comparison(w, c1, c2, m)
    local_maxima[pts] = c_plus & c_minus
    # ----- 45 to 90 degrees ------
    # Mix diagonal and vertical
    #
    pts = pts_tmp & abs_isobel_lt_jsobel
    c1 = magnitude[:, 1:][pts[:, :-1]]
    c2 = magnitude[1:, 1:][pts[:-1, :-1]]
    m = magnitude[pts]
    w = abs_isobel[pts] / abs_jsobel[pts]
    c_plus = _fused_comparison(w, c1, c2, m)
    c1 = magnitude[:, :-1][pts[:, 1:]]
    c2 = magnitude[:-1, :-1][pts[1:, 1:]]
    c_minus = _fused_comparison(w, c1, c2, m)
    local_maxima[pts] = c_plus & c_minus
    # ----- 90 to 135 degrees ------
    # Mix anti-diagonal and vertical
    #
    pts_plus = isobel_lt_0 & jsobel_gt_0
    pts_minus = isobel_gt_0 & jsobel_lt_0
    pts_tmp = (pts_plus | pts_minus) & eroded_mask
    pts = pts_tmp & abs_isobel_lt_jsobel
    c1a = magnitude[:, 1:][pts[:, :-1]]
    c2a = magnitude[:-1, 1:][pts[1:, :-1]]
    m = magnitude[pts]
    w = abs_isobel[pts] / abs_jsobel[pts]
    c_plus = _fused_comparison(w, c1a, c2a, m)
    c1 = magnitude[:, :-1][pts[:, 1:]]
    c2 = magnitude[1:, :-1][pts[:-1, 1:]]
    c_minus = _fused_comparison(w, c1, c2, m)
    local_maxima[pts] = c_plus & c_minus
    # ----- 135 to 180 degrees ------
    # Mix anti-diagonal and anti-horizontal
    #
    pts = pts_tmp & abs_isobel_gt_jsobel
    c1 = magnitude[:-1, :][pts[1:, :]]
    c2 = magnitude[:-1, 1:][pts[1:, :-1]]
    m = magnitude[pts]
    w = abs_jsobel[pts] / abs_isobel[pts]
    c_plus = _fused_comparison(w, c1, c2, m)
    c1 = magnitude[1:, :][pts[:-1, :]]
    c2 = magnitude[1:, :-1][pts[:-1, 1:]]
    c_minus = _fused_comparison(w, c1, c2, m)
    local_maxima[pts] = c_plus & c_minus

    #
    # ---- If use_quantiles is set then calculate the thresholds to use
    #
    if use_quantiles:
        high_threshold = cp.percentile(magnitude, 100.0 * high_threshold)
        low_threshold = cp.percentile(magnitude, 100.0 * low_threshold)

    #
    # ---- Create two masks at the two thresholds.
    #
    high_mask = local_maxima & (magnitude >= high_threshold)
    low_mask = local_maxima & (magnitude >= low_threshold)

    #
    # Segment the low-mask, then only keep low-segments that have
    # some high_mask component in them
    #
    labels, count = ndi.label(low_mask, structure=cp.ones((3, 3), bool))
    if count == 0:
        return low_mask

    nonzero_sums = cp.unique(labels[high_mask])
    good_label = cp.zeros((count + 1, ), bool)
    good_label[nonzero_sums] = True
    output_mask = good_label[labels]
    return output_mask
Пример #19
0
def binary_blobs(length=512, blob_size_fraction=0.1, n_dim=2,
                 volume_fraction=0.5, seed=None):
    """
    Generate synthetic binary image with several rounded blob-like objects.

    Parameters
    ----------
    length : int, optional
        Linear size of output image.
    blob_size_fraction : float, optional
        Typical linear size of blob, as a fraction of ``length``, should be
        smaller than 1.
    n_dim : int, optional
        Number of dimensions of output image.
    volume_fraction : float, default 0.5
        Fraction of image pixels covered by the blobs (where the output is 1).
        Should be in [0, 1].
    seed : int, optional
        Seed to initialize the random number generator.
        If `None`, a random seed from the operating system is used.

    Returns
    -------
    blobs : ndarray of bools
        Output binary image

    Notes
    -----
    Warning: CuPy does not give identical randomly generated numbers as NumPy,
    so using a specific seed here will not give an identical pattern to the
    scikit-image implementation.

    The behavior for a given random seed may also change across CuPy major
    versions.
    See: https://docs.cupy.dev/en/stable/reference/random.html

    Examples
    --------
    >>> from cucim.skimage import data
    >>> # tiny size (5, 5)
    >>> blobs = data.binary_blobs(length=5, blob_size_fraction=0.2, seed=1)
    >>> # larger size
    >>> blobs = data.binary_blobs(length=256, blob_size_fraction=0.1)
    >>> # Finer structures
    >>> blobs = data.binary_blobs(length=256, blob_size_fraction=0.05)
    >>> # Blobs cover a smaller volume fraction of the image
    >>> blobs = data.binary_blobs(length=256, volume_fraction=0.3)
    """
    # filters is quite an expensive import since it imports all of scipy.signal
    # We lazy import here
    from ..filters import gaussian

    rs = cp.random.RandomState(seed)
    shape = tuple([length] * n_dim)
    mask = cp.zeros(shape)
    n_pts = max(int(1. / blob_size_fraction) ** n_dim, 1)
    points = (length * rs.rand(n_dim, n_pts)).astype(int)
    mask[tuple(indices for indices in points)] = 1
    mask = gaussian(mask, sigma=0.25 * length * blob_size_fraction)
    threshold = cp.percentile(mask, 100 * (1 - volume_fraction))
    return cp.logical_not(mask < threshold)
Пример #20
0
 def time_quartile(self):
     np.percentile(self.e, [25, 75])