예제 #1
0
    def prepare_train_data(self):
        batch_size = self.train_batch_size

        is_neg = cp.logical_not(self._train_labels)

        # Do not store verification matrix if using the negatives generation shortcut
        neg_mat = None if self.use_neg_trick else cp.array(self._neg_mat)

        # If there are no negative samples in the local portion of the training data, do nothing
        any_neg = cp.any(is_neg)
        if any_neg:
            self._train_users[is_neg], self._train_items[is_neg] = generate_negatives(
                self._train_users[is_neg], neg_mat, self.num_items, use_trick=self.use_neg_trick
            )

        shuffled_order = cp.random.permutation(self._train_users.shape[0])
        self._train_users = self._train_users[shuffled_order]
        self._train_items = self._train_items[shuffled_order]
        self._train_labels = self._train_labels[shuffled_order]
        is_neg = cp.logical_not(self._train_labels)
        
        # Manually create batches
        split_indices = np.arange(batch_size, self._train_users.shape[0], batch_size)
        self.train_users_batches = np.split(self._train_users, split_indices)
        self.train_items_batches = np.split(self._train_items, split_indices)
        self.train_labels_batches = np.split(self._train_labels, split_indices)
예제 #2
0
파일: _morphology.py 프로젝트: takagi/cupy
def binary_hit_or_miss(input, structure1=None, structure2=None, output=None,
                       origin1=0, origin2=None):
    """
    Multidimensional binary hit-or-miss transform.

    The hit-or-miss transform finds the locations of a given pattern
    inside the input image.

    Args:
        input (cupy.ndarray): Binary image where a pattern is to be detected.
        structure1 (cupy.ndarray, optional): Part of the structuring element to
            be fitted to the foreground (non-zero elements) of ``input``. If no
            value is provided, a structure of square connectivity 1 is chosen.
        structure2 (cupy.ndarray, optional): Second part of the structuring
            element that has to miss completely the foreground. If no value is
            provided, the complementary of ``structure1`` is taken.
        output (cupy.ndarray, dtype or None, optional): Array of the same shape
            as input, into which the output is placed. By default, a new array
            is created.
        origin1 (int or tuple of ints, optional): Placement of the first part
            of the structuring element ``structure1``, by default 0 for a
            centered structure.
        origin2 (int or tuple of ints or None, optional): Placement of the
            second part of the structuring element ``structure2``, by default 0
            for a centered structure. If a value is provided for ``origin1``
            and not for ``origin2``, then ``origin2`` is set to ``origin1``.

    Returns:
        cupy.ndarray: Hit-or-miss transform of ``input`` with the given
        structuring element (``structure1``, ``structure2``).

    .. warning::

        This function may synchronize the device.

    .. seealso:: :func:`scipy.ndimage.binary_hit_or_miss`
    """
    if structure1 is None:
        structure1 = generate_binary_structure(input.ndim, 1)
    if structure2 is None:
        structure2 = cupy.logical_not(structure1)
    origin1 = _util._fix_sequence_arg(origin1, input.ndim, 'origin1', int)
    if origin2 is None:
        origin2 = origin1
    else:
        origin2 = _util._fix_sequence_arg(origin2, input.ndim, 'origin2', int)

    tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1, 0,
                           False)
    inplace = isinstance(output, cupy.ndarray)
    result = _binary_erosion(input, structure2, 1, None, output, 0, origin2, 1,
                             False)
    if inplace:
        cupy.logical_not(output, output)
        cupy.logical_and(tmp1, output, output)
    else:
        cupy.logical_not(result, result)
        return cupy.logical_and(tmp1, result)
예제 #3
0
def _preprocess(labels):

    label_values, inv_idx = cp.unique(labels, return_inverse=True)
    if not (label_values == 0).any():
        warn('Random walker only segments unlabeled areas, where '
             'labels == 0. No zero valued areas in labels were '
             'found. Returning provided labels.',
             stacklevel=2)

        return labels, None, None, None, None

    # If some labeled pixels are isolated inside pruned zones, prune them
    # as well and keep the labels for the final output

    null_mask = labels == 0
    pos_mask = labels > 0
    mask = labels >= 0

    fill = ndi.binary_propagation(null_mask, mask=mask)
    isolated = cp.logical_and(pos_mask, cp.logical_not(fill))

    pos_mask[isolated] = False

    # If the array has pruned zones, be sure that no isolated pixels
    # exist between pruned zones (they could not be determined)
    if label_values[0] < 0 or cp.any(isolated):  # synchronize!
        isolated = cp.logical_and(
            cp.logical_not(ndi.binary_propagation(pos_mask, mask=mask)),
            null_mask)

        labels[isolated] = -1
        if cp.all(isolated[null_mask]):
            warn('All unlabeled pixels are isolated, they could not be '
                 'determined by the random walker algorithm.',
                 stacklevel=2)
            return labels, None, None, None, None

        mask[isolated] = False
        mask = cp.atleast_3d(mask)

    else:
        mask = None

    # Reorder label values to have consecutive integers (no gaps)
    zero_idx = cp.searchsorted(label_values, cp.array(0))
    labels = cp.atleast_3d(inv_idx.reshape(labels.shape) - zero_idx)

    nlabels = label_values[zero_idx + 1:].shape[0]

    inds_isolated_seeds = cp.nonzero(isolated)
    isolated_values = labels[inds_isolated_seeds]

    return labels, nlabels, mask, inds_isolated_seeds, isolated_values
예제 #4
0
def remove_small_holes_gpu(
    mask: cupy.ndarray,
    area_threshold: int,
) -> None:
    """ See scikit-image remove_small_holes()

    N.B.
        Input array must be a labeled mask.
        This is a inplace operation.
    """
    cupy.logical_not(mask, out=mask)
    remove_small_objects_gpu(mask, area_threshold)
    cupy.logical_not(mask, out=mask)
예제 #5
0
def binary_fill_holes(input, structure=None, output=None, origin=0):
    """Fill the holes in binary objects.

    Args:
        input (cupy.ndarray): N-D binary array with holes to be filled.
        structure (cupy.ndarray, optional):  Structuring element used in the
            computation; large-size elements make computations faster but may
            miss holes separated from the background by thin regions. The
            default element (with a square connectivity equal to one) yields
            the intuitive result where all holes in the input have been filled.
        output (cupy.ndarray, dtype or None, optional): Array of the same shape
            as input, into which the output is placed. By default, a new array
            is created.
        origin (int, tuple of ints, optional): Position of the structuring
            element.

    Returns:
        cupy.ndarray: Transformation of the initial image ``input`` where holes
            have been filled.

    .. warning::

        This function may synchronize the device.

    .. seealso:: :func:`scipy.ndimage.binary_fill_holes`
    """
    mask = cupy.logical_not(input)
    tmp = cupy.zeros(mask.shape, bool)
    inplace = isinstance(output, cupy.ndarray)
    # TODO (grlee77): set brute_force=False below once implemented
    if inplace:
        binary_dilation(tmp,
                        structure,
                        -1,
                        mask,
                        output,
                        1,
                        origin,
                        brute_force=True)
        cupy.logical_not(output, output)
    else:
        output = binary_dilation(tmp,
                                 structure,
                                 -1,
                                 mask,
                                 None,
                                 1,
                                 origin,
                                 brute_force=True)
        cupy.logical_not(output, output)
        return output
    def image_complexity(self, X, masks):
        # 복잡도 기준
        threshold = 25.0
        # reshape
        #masks = cp.reshape(masks, masks.shape[:-1])

        R, G, B = X[:, :, :, 0:1], X[:, :, :, 1:2], X[:, :, :, 2:]

        # compute rg = R - G
        rg = cp.absolute(R - G)

        # compute yb = 0.5 * (R + G) - B
        yb = cp.absolute(0.5 * (R + G) - B)

        _masks = cp.logical_not(masks)
        # compute the mean and standard deviation of both `rg` and `yb` ()
        # no masked_where on cupy
        rb_masked = np.ma.masked_where(_masks, rg)
        (rb_mean, rb_std) = (cp.mean(rb_masked, axis=(1, 2, 3)),
                             cp.std(rb_masked, axis=(1, 2, 3)))

        yb_masked = np.ma.masked_where(_masks, yb)
        (yb_mean, yb_std) = (cp.mean(yb_masked, axis=(1, 2, 3)),
                             cp.std(yb_masked, axis=(1, 2, 3)))

        # combine the mean and standard deviations
        std_root = cp.sqrt((rb_std**2) + (yb_std**2))
        mean_root = cp.sqrt((rb_mean**2) + (yb_mean**2))

        # derive the "colorfulness" metric and return it
        complexity = std_root + (0.3 * mean_root)

        # 수치 수정
        print('image_complexity Done.')
        return (complexity >= threshold).astype(cp.uint8)
예제 #7
0
파일: misc.py 프로젝트: kyle0x54/umtk
def remove_small_holes_gpu(
    mask: cupy.ndarray,
    area_threshold: int,
) -> None:
    """ See scikit-image remove_small_holes()

    N.B.
        Input array must be a binary mask (bool type) or
        labeled mask (int type).
        This is a inplace operation.
    """
    _check_dtype_supported(mask)

    cupy.logical_not(mask, out=mask)
    remove_small_objects_gpu(mask, area_threshold)
    cupy.logical_not(mask, out=mask)
예제 #8
0
def generate_negatives(neg_users, true_mat, item_range, sort=False, use_trick=False):
    """ 
    Generate negative samples for data augmentation
    """
    neg_u = []
    neg_i = []

    # If using the shortcut, generate negative items without checking if the associated
    # user has interacted with it. Speeds up training significantly with very low impact
    # on accuracy.
    if use_trick:
        neg_items = cp.random.randint(0, high=item_range, size=neg_users.shape[0])
        return neg_users, neg_items

    # Otherwise, generate negative items, check if associated user has interacted with it,
    # then generate a new one if true
    while len(neg_users) > 0:
        neg_items = cp.random.randint(0, high=item_range, size=neg_users.shape[0])
        neg_mask = true_mat[neg_users, neg_items]
        neg_u.append(neg_users[neg_mask])
        neg_i.append(neg_items[neg_mask])

        neg_users = neg_users[cp.logical_not(neg_mask)]

    neg_users = cp.concatenate(neg_u)
    neg_items = cp.concatenate(neg_i)

    if sort == False:
        return neg_users, neg_items

    sorted_users = cp.sort(neg_users)
    sort_indices = cp.argsort(neg_users)

    return sorted_users, neg_items[sort_indices]
예제 #9
0
파일: csr.py 프로젝트: toslunar/cupy
    def _comparison(self, other, op, op_name):
        if _util.isscalarlike(other):
            data = cupy.asarray(other, dtype=self.dtype).reshape(1)
            if numpy.isnan(data[0]):
                if op_name == '_ne_':
                    return csr_matrix(cupy.ones(self.shape, dtype=numpy.bool_))
                else:
                    return csr_matrix(self.shape, dtype=numpy.bool_)
            indices = cupy.zeros((1, ), dtype=numpy.int32)
            indptr = cupy.arange(2, dtype=numpy.int32)
            other = csr_matrix((data, indices, indptr), shape=(1, 1))
            return binopt_csr(self, other, op_name)
        elif _util.isdense(other):
            return op(self.todense(), other)
        elif isspmatrix_csr(other):
            self.sum_duplicates()
            other.sum_duplicates()
            if op_name in ('_ne_', '_lt_', '_gt_'):
                return binopt_csr(self, other, op_name)

            warnings.warn(
                "Comparing sparse matrices using ==, <=, and >= is "
                "inefficient, try using !=, <, or > instead.",
                SparseEfficiencyWarning)
            if op_name == '_eq_':
                opposite_op_name = '_ne_'
            elif op_name == '_le_':
                opposite_op_name = '_gt_'
            elif op_name == '_ge_':
                opposite_op_name = '_lt_'
            res = binopt_csr(self, other, opposite_op_name)
            out = cupy.logical_not(res.toarray())
            return csr_matrix(out)
        raise NotImplementedError
예제 #10
0
    def transform(self, X) -> SparseCumlArray:
        """Impute all missing values in X.

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape (n_samples, n_features)
            The input data to complete.
        """
        check_is_fitted(self)

        X = self._validate_input(X, in_fit=False)
        X_indicator = super()._transform_indicator(X)

        statistics = self.statistics_

        if X.shape[1] != statistics.shape[0]:
            raise ValueError("X has %d features per sample, expected %d" %
                             (X.shape[1], self.statistics_.shape[0]))

        # Delete the invalid columns if strategy is not constant
        if self.strategy == "constant":
            valid_statistics = statistics
        else:
            # same as np.isnan but also works for object dtypes
            invalid_mask = _get_mask(statistics, np.nan)
            valid_mask = np.logical_not(invalid_mask)
            valid_statistics = statistics[valid_mask]
            valid_statistics_indexes = np.flatnonzero(valid_mask)

            if invalid_mask.any():
                missing = np.arange(X.shape[1])[invalid_mask]
                if self.verbose:
                    warnings.warn("Deleting features without "
                                  "observed values: %s" % missing)
                X = X[:, valid_statistics_indexes]

        # Do actual imputation
        if sparse.issparse(X):
            if self.missing_values == 0:
                raise ValueError("Imputation not possible when missing_values "
                                 "== 0 and input is sparse. Provide a dense "
                                 "array instead.")
            else:
                mask = _get_mask(X.data, self.missing_values)
                indexes = np.repeat(np.arange(len(X.indptr) - 1, dtype=np.int),
                                    np.diff(X.indptr).tolist())[mask]

                X.data[mask] = valid_statistics[indexes].astype(X.dtype,
                                                                copy=False)
        else:
            mask = _get_mask(X, self.missing_values)
            if self.strategy == "constant":
                X[mask] = valid_statistics[0]
            else:
                for i, vi in enumerate(valid_statistics_indexes):
                    feature_idxs = np.flatnonzero(mask[:, vi])
                    X[feature_idxs, vi] = valid_statistics[i]

        X = super()._concatenate_indicator(X, X_indicator)
        return X
예제 #11
0
파일: test_canny.py 프로젝트: grlee77/cucim
 def test_01_01_circle(self):
     """Test that the Canny filter finds the outlines of a circle"""
     i, j = cp.mgrid[-200:200, -200:200].astype(float) / 200
     c = cp.abs(cp.sqrt(i * i + j * j) - 0.5) < 0.02
     result = feature.canny(c.astype(float), 4, 0, 0,
                            cp.ones(c.shape, bool))
     #
     # erode and dilate the circle to get rings that should contain the
     # outlines
     #
     # TODO: grlee77: only implemented brute_force=True, so added that to
     #                these tests
     cd = binary_dilation(c, iterations=3, brute_force=True)
     ce = binary_erosion(c, iterations=3, brute_force=True)
     cde = cp.logical_and(cd, cp.logical_not(ce))
     self.assertTrue(cp.all(cde[result]))
     #
     # The circle has a radius of 100. There are two rings here, one
     # for the inside edge and one for the outside. So that's
     # 100 * 2 * 2 * 3 for those places where pi is still 3.
     # The edge contains both pixels if there's a tie, so we
     # bump the count a little.
     point_count = cp.sum(result)
     self.assertTrue(point_count > 1200)
     self.assertTrue(point_count < 1600)
예제 #12
0
def logical_not(x: Array, /) -> Array:
    """
    Array API compatible wrapper for :py:func:`np.logical_not <numpy.logical_not>`.

    See its docstring for more information.
    """
    if x.dtype not in _boolean_dtypes:
        raise TypeError("Only boolean dtypes are allowed in logical_not")
    return Array._new(np.logical_not(x._array))
    def extract_color(self, X, masks):
        # reshape
        masks_ = cp.reshape(masks, masks.shape[:-1]).astype(cp.bool)
        # extract color
        num_X = len(X)
        color = cp.empty((num_X, ), dtype=np.string_)
        bg_color = cp.empty((num_X, ), dtype=np.string_)

        # test
        for i, (x, m) in enumerate(zip(X, masks_)):
            #print(self.nearest_color(colors, cp.mean(X[i][masks[i]], axis=0).astype(int)))
            color[i] = self.nearest_color(
                cp.mean(x[m], axis=0).astype(cp.uint8))
            #print(self.nearest_color(colors, cp.mean(X[i][cp.logical_not(masks[i])], axis=0).astype(int)))
            bg_color[i] = self.nearest_color(
                cp.mean(x[cp.logical_not(m)], axis=0).astype(cp.uint8))

        print('extract_color Done.')
        return color, bg_color
예제 #14
0
 def test_01_02_circle_with_noise(self):
     """Test that the Canny filter finds the circle outlines
      in a noisy image"""
     cp.random.seed(0)
     i, j = cp.mgrid[-200:200, -200:200].astype(float) / 200
     c = cp.abs(cp.sqrt(i * i + j * j) - 0.5) < 0.02
     cf = c.astype(float) * 0.5 + cp.random.uniform(size=c.shape) * 0.5
     result = F.canny(cf, 4, 0.1, 0.2, cp.ones(c.shape, bool))
     #
     # erode and dilate the circle to get rings that should contain the
     # outlines
     #
     cd = binary_dilation(c, iterations=4, brute_force=True)
     ce = binary_erosion(c, iterations=4, brute_force=True)
     cde = cp.logical_and(cd, cp.logical_not(ce))
     self.assertTrue(cp.all(cde[result]))
     point_count = cp.sum(result)
     self.assertTrue(point_count > 1200)
     self.assertTrue(point_count < 1600)
예제 #15
0
def _get_anchor_positive_triplet_mask(labels):
    """Return a 2D mask where mask[a, p] is True iff a and p are distinct and have same label.

    Args:
        labels: xp.array with shape=(batch_size)

    Returns:
        mask: xp.array with shape=(batch_size, batch_size), dtype=xp.bool
    """
    # Check that i and j are distinct
    indices_not_equal = xp.logical_not(
        xp.diag(xp.ones(labels.size, dtype=xp.bool)))

    # Check if labels[i] == labels[j]
    # By using broadcasting:
    # Left side's shape (1, batch_size) => (*, batch_size)
    # Right side's shape (batch_size, 1) => (batch_size, *)
    labels_equal = xp.expand_dims(labels, axis=0) == xp.expand_dims(labels,
                                                                    axis=1)

    # Combine the two masks
    return indices_not_equal & labels_equal
예제 #16
0
def binary_blobs(length=512, blob_size_fraction=0.1, n_dim=2,
                 volume_fraction=0.5, seed=None):
    """
    Generate synthetic binary image with several rounded blob-like objects.

    Parameters
    ----------
    length : int, optional
        Linear size of output image.
    blob_size_fraction : float, optional
        Typical linear size of blob, as a fraction of ``length``, should be
        smaller than 1.
    n_dim : int, optional
        Number of dimensions of output image.
    volume_fraction : float, default 0.5
        Fraction of image pixels covered by the blobs (where the output is 1).
        Should be in [0, 1].
    seed : int, optional
        Seed to initialize the random number generator.
        If `None`, a random seed from the operating system is used.

    Returns
    -------
    blobs : ndarray of bools
        Output binary image

    Notes
    -----
    Warning: CuPy does not give identical randomly generated numbers as NumPy,
    so using a specific seed here will not give an identical pattern to the
    scikit-image implementation.

    The behavior for a given random seed may also change across CuPy major
    versions.
    See: https://docs.cupy.dev/en/stable/reference/random.html

    Examples
    --------
    >>> from cucim.skimage import data
    >>> # tiny size (5, 5)
    >>> blobs = data.binary_blobs(length=5, blob_size_fraction=0.2, seed=1)
    >>> # larger size
    >>> blobs = data.binary_blobs(length=256, blob_size_fraction=0.1)
    >>> # Finer structures
    >>> blobs = data.binary_blobs(length=256, blob_size_fraction=0.05)
    >>> # Blobs cover a smaller volume fraction of the image
    >>> blobs = data.binary_blobs(length=256, volume_fraction=0.3)
    """
    # filters is quite an expensive import since it imports all of scipy.signal
    # We lazy import here
    from ..filters import gaussian

    rs = cp.random.RandomState(seed)
    shape = tuple([length] * n_dim)
    mask = cp.zeros(shape)
    n_pts = max(int(1. / blob_size_fraction) ** n_dim, 1)
    points = (length * rs.rand(n_dim, n_pts)).astype(int)
    mask[tuple(indices for indices in points)] = 1
    mask = gaussian(mask, sigma=0.25 * length * blob_size_fraction)
    threshold = cp.percentile(mask, 100 * (1 - volume_fraction))
    return cp.logical_not(mask < threshold)
예제 #17
0
def cross_correlate_masked(arr1,
                           arr2,
                           m1,
                           m2,
                           mode="full",
                           axes=(-2, -1),
                           overlap_ratio=0.3):
    """
    Masked normalized cross-correlation between arrays.

    Parameters
    ----------
    arr1 : ndarray
        First array.
    arr2 : ndarray
        Seconds array. The dimensions of `arr2` along axes that are not
        transformed should be equal to that of `arr1`.
    m1 : ndarray
        Mask of `arr1`. The mask should evaluate to `True`
        (or 1) on valid pixels. `m1` should have the same shape as `arr1`.
    m2 : ndarray
        Mask of `arr2`. The mask should evaluate to `True`
        (or 1) on valid pixels. `m2` should have the same shape as `arr2`.
    mode : {'full', 'same'}, optional
        'full':
            This returns the convolution at each point of overlap. At
            the end-points of the convolution, the signals do not overlap
            completely, and boundary effects may be seen.
        'same':
            The output is the same size as `arr1`, centered with respect
            to the `‘full’` output. Boundary effects are less prominent.
    axes : tuple of ints, optional
        Axes along which to compute the cross-correlation.
    overlap_ratio : float, optional
        Minimum allowed overlap ratio between images. The correlation for
        translations corresponding with an overlap ratio lower than this
        threshold will be ignored. A lower `overlap_ratio` leads to smaller
        maximum translation, while a higher `overlap_ratio` leads to greater
        robustness against spurious matches due to small overlap between
        masked images.

    Returns
    -------
    out : ndarray
        Masked normalized cross-correlation.

    Raises
    ------
    ValueError : if correlation `mode` is not valid, or array dimensions along
        non-transformation axes are not equal.

    References
    ----------
    .. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain.
           IEEE Transactions on Image Processing, vol. 21(5),
           pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402`
    .. [2] D. Padfield. "Masked FFT registration". In Proc. Computer Vision and
           Pattern Recognition, pp. 2918-2925 (2010).
           :DOI:`10.1109/CVPR.2010.5540032`
    """
    if mode not in {"full", "same"}:
        raise ValueError("Correlation mode {} is not valid.".format(mode))

    if arr1.dtype.kind == "c" or arr2.dtype.kind == "c":
        raise ValueError("complex-valued arr1, arr2 are not supported")
    fixed_image = cp.asarray(arr1, dtype=np.float)
    fixed_mask = cp.asarray(m1, dtype=np.bool)
    moving_image = cp.asarray(arr2, dtype=np.float)
    moving_mask = cp.asarray(m2, dtype=np.bool)
    eps = np.finfo(np.float).eps

    # Array dimensions along non-transformation axes should be equal.
    all_axes = set(range(fixed_image.ndim))
    for axis in all_axes - set(axes):
        if fixed_image.shape[axis] != moving_image.shape[axis]:
            raise ValueError(
                "Array shapes along non-transformation axes should be "
                "equal, but dimensions along axis {a} are not".format(a=axis))

    # Determine final size along transformation axes
    # Note that it might be faster to compute Fourier transform in a slightly
    # larger shape (`fast_shape`). Then, after all fourier transforms are done,
    # we slice back to`final_shape` using `final_slice`.
    final_shape = list(arr1.shape)
    for axis in axes:
        final_shape[axis] = (fixed_image.shape[axis] +
                             moving_image.shape[axis] - 1)
    final_shape = tuple(final_shape)
    final_slice = tuple([slice(0, int(sz)) for sz in final_shape])

    # Extent transform axes to the next fast length (i.e. multiple of 3, 5, or
    # 7)
    fast_shape = tuple([next_fast_len(final_shape[ax]) for ax in axes])

    # We use numpy.fft or the new scipy.fft because they allow leaving the
    # transform axes unchanged which was not possible with scipy.fftpack's
    # fftn/ifftn in older versions of SciPy.
    # E.g. arr shape (2, 3, 7), transform along axes (0, 1) with shape (4, 4)
    # results in arr_fft shape (4, 4, 7)
    fft = partial(fftmodule.fftn, s=fast_shape, axes=axes)
    ifft = partial(fftmodule.ifftn, s=fast_shape, axes=axes)

    fixed_image[cp.logical_not(fixed_mask)] = 0.0
    moving_image[cp.logical_not(moving_mask)] = 0.0

    # N-dimensional analog to rotation by 180deg is flip over all relevant axes.
    # See [1] for discussion.
    rotated_moving_image = _flip(moving_image, axes=axes)
    rotated_moving_mask = _flip(moving_mask, axes=axes)

    fixed_fft = fft(fixed_image)
    rotated_moving_fft = fft(rotated_moving_image)
    fixed_mask_fft = fft(fixed_mask)
    rotated_moving_mask_fft = fft(rotated_moving_mask)

    # Calculate overlap of masks at every point in the convolution.
    # Locations with high overlap should not be taken into account.
    number_overlap_masked_px = cp.real(
        ifft(rotated_moving_mask_fft * fixed_mask_fft))
    number_overlap_masked_px[:] = cp.around(number_overlap_masked_px)
    number_overlap_masked_px[:] = cp.fmax(number_overlap_masked_px, eps)
    masked_correlated_fixed_fft = ifft(rotated_moving_mask_fft * fixed_fft)
    masked_correlated_rotated_moving_fft = ifft(fixed_mask_fft *
                                                rotated_moving_fft)

    numerator = ifft(rotated_moving_fft * fixed_fft)
    numerator -= (masked_correlated_fixed_fft *
                  masked_correlated_rotated_moving_fft /
                  number_overlap_masked_px)

    fixed_squared_fft = fft(cp.square(fixed_image))
    fixed_denom = ifft(rotated_moving_mask_fft * fixed_squared_fft)
    fixed_denom -= (cp.square(masked_correlated_fixed_fft) /
                    number_overlap_masked_px)
    fixed_denom[:] = cp.fmax(fixed_denom, 0.0)

    rotated_moving_squared_fft = fft(cp.square(rotated_moving_image))
    moving_denom = ifft(fixed_mask_fft * rotated_moving_squared_fft)
    moving_denom -= (cp.square(masked_correlated_rotated_moving_fft) /
                     number_overlap_masked_px)
    moving_denom[:] = cp.fmax(moving_denom, 0.0)

    denom = cp.sqrt(fixed_denom * moving_denom)

    # Slice back to expected convolution shape.
    numerator = numerator[final_slice]
    denom = denom[final_slice]
    number_overlap_masked_px = number_overlap_masked_px[final_slice]

    if mode == "same":
        _centering = partial(_centered, newshape=fixed_image.shape, axes=axes)
        denom = _centering(denom)
        numerator = _centering(numerator)
        number_overlap_masked_px = _centering(number_overlap_masked_px)

    # Pixels where `denom` is very small will introduce large
    # numbers after division. To get around this problem,
    # we zero-out problematic pixels.
    tol = 1e3 * eps * cp.max(cp.abs(denom), axis=axes, keepdims=True)
    nonzero_indices = denom > tol

    # TODO: grlee77: Added a cast to real here.
    #                probably it should be real earlier?
    numerator = numerator.real
    denom = denom.real
    out = cp.zeros_like(denom)
    out[nonzero_indices] = numerator[nonzero_indices] / denom[nonzero_indices]
    cp.clip(out, a_min=-1, a_max=1, out=out)

    # Apply overlap ratio threshold
    number_px_threshold = overlap_ratio * cp.max(
        number_overlap_masked_px, axis=axes, keepdims=True)
    out[number_overlap_masked_px < number_px_threshold] = 0.0

    return out
예제 #18
0
def evaluate_chunks(
        results: [cp.ndarray, cp.ndarray,
                  cp.ndarray],  # closest triangle, distance, projection
        all_pts: cp.ndarray = None,
        vertices: cp.ndarray = None,
        edges: cp.ndarray = None,
        edge_norms: cp.ndarray = None,
        edge_normssq: cp.ndarray = None,
        normals: cp.ndarray = None,
        norms: cp.ndarray = None,
        normssq: cp.ndarray = None,
        zero_tensor: cp.ndarray = None,
        one_tensor: cp.ndarray = None,
        tris: cp.ndarray = None,
        vertex_normals: cp.ndarray = None,
        bounding_box: dict = None,
        chunk_size: int = None,
        num_verts: int = None) -> None:

    #
    # Expand vertex normals if non empty
    if vertex_normals is not None:
        vertex_normals = vertex_normals[tris]
        vertex_normals = cp.tile(cp.expand_dims(vertex_normals, axis=2),
                                 (1, 1, chunk_size, 1))

    # begin = time.time()
    #
    # Load and extend the batch
    num_chunks = all_pts.shape[0] // chunk_size
    for i in range(num_chunks):
        #
        # Get subset of the query points
        start_index = i * chunk_size
        end_index = (i + 1) * chunk_size
        pts = all_pts[start_index:end_index, :]

        #
        # Match the dimensions to those assumed above.
        #    REPEATED       REPEATED
        # [triangle_index, vert_index, querypoint_index, coordinates]
        pts = cp.tile(cp.expand_dims(pts, axis=(0, 1)), (num_verts, 3, 1, 1))

        #
        # Compute the differences between
        # vertices on each triangle and the
        # points of interest
        #
        # [triangle_index, vert_index, querypoint_index, coordinates]
        # ===================
        # [:,0,:,:] = p - p1
        # [:,1,:,:] = p - p2
        # [:,2,:,:] = p - p3
        diff_vectors = pts - vertices

        #
        # Compute alpha, beta, gamma
        barycentric = cp.empty(diff_vectors.shape)

        #
        # gamma = u x (p - p1)
        barycentric[:, 2, :, :] = cp.cross(edges[:, 0, :, :],
                                           diff_vectors[:, 0, :, :])
        # beta = (p - p1) x v
        barycentric[:, 1, :, :] = cp.cross(diff_vectors[:, 0, :, :],
                                           edges[:, 1, :, :])
        # alpha = w x (p - p2)
        barycentric[:, 0, :, :] = cp.cross(edges[:, 2, :, :],
                                           diff_vectors[:, 1, :, :])
        barycentric = cp.divide(
            cp.sum(cp.multiply(barycentric, normals), axis=3), normssq)

        #
        # Test conditions
        less_than_one = cp.less_equal(barycentric, one_tensor)
        more_than_zero = cp.greater_equal(barycentric, zero_tensor)

        #
        #     if 0 <= gamma and gamma <= 1
        #    and 0 <= beta and beta <= 1
        #    and 0 <= alpha and alpha <= 1:
        cond1 = cp.logical_and(less_than_one, more_than_zero)

        #
        #     if gamma <= 0:
        cond2 = cp.logical_not(more_than_zero[:, 2, :])
        cond2 = cp.tile(cp.expand_dims(cond2, axis=1), (1, 3, 1))

        #
        #     if beta <= 0:
        cond3 = cp.logical_not(more_than_zero[:, 1, :])
        cond3 = cp.tile(cp.expand_dims(cond3, axis=1), (1, 3, 1))

        #
        #     if alpha <= 0:
        cond4 = cp.logical_not(more_than_zero[:, 0, :])
        cond4 = cp.tile(cp.expand_dims(cond4, axis=1), (1, 3, 1))

        #
        # Get the projections for each case
        xi = cp.empty(barycentric.shape)
        barycentric_ext = cp.tile(cp.expand_dims(barycentric, axis=3),
                                  (1, 1, 1, 3))
        proj = cp.sum(cp.multiply(barycentric_ext, vertices), axis=1)
        #
        #     if 0 <= gamma and gamma <= 1
        #    and 0 <= beta and beta <= 1
        #    and 0 <= alpha and alpha <= 1:
        xi[cond1] = barycentric[cond1]

        #
        # if gamma <= 0:
        #  x = p - p1
        #  u = p2 - p1
        #  a = p1
        #  b = p2
        t2 = cp.divide(
            #
            # u.dot(x)
            cp.sum(cp.multiply(edges[:, 0, :, :], diff_vectors[:, 0, :, :]),
                   axis=2),
            edge_normssq[:, 0])
        xi2 = cp.zeros((t2.shape[0], 3, t2.shape[1]))
        xi2[:, 0, :] = -t2 + 1
        xi2[:, 1, :] = t2
        #
        t2 = cp.tile(cp.expand_dims(t2, axis=2), (1, 1, 3))
        lz = cp.less(t2, cp.zeros(t2.shape))
        go = cp.greater(t2, cp.ones(t2.shape))
        proj2 = vertices[:, 0, :, :] + cp.multiply(t2, edges[:, 0, :, :])
        proj2[lz] = vertices[:, 0, :, :][lz]
        proj2[go] = vertices[:, 1, :, :][go]
        #
        xi[cond2] = xi2[cond2]
        proj[cp.swapaxes(cond2, 1, 2)] = proj2[cp.swapaxes(cond2, 1, 2)]

        #
        # if beta <= 0:
        #  x = p - p1
        #  v = p3 - p1
        #  a = p1
        #  b = p3
        t3 = cp.divide(
            #
            # v.dot(x)
            cp.sum(cp.multiply(edges[:, 1, :, :], diff_vectors[:, 0, :, :]),
                   axis=2),
            edge_normssq[:, 1])
        xi3 = cp.zeros((t3.shape[0], 3, t3.shape[1]))
        xi3[:, 0, :] = -t3 + 1
        xi3[:, 2, :] = t3
        #
        t3 = cp.tile(cp.expand_dims(t3, axis=2), (1, 1, 3))
        lz = cp.less(t3, cp.zeros(t3.shape))
        go = cp.greater(t3, cp.ones(t3.shape))
        proj3 = vertices[:, 0, :, :] + cp.multiply(t3, edges[:, 1, :, :])
        proj3[lz] = vertices[:, 0, :, :][lz]
        proj3[go] = vertices[:, 2, :, :][go]
        #
        xi[cond3] = xi3[cond3]
        proj[cp.swapaxes(cond3, 1, 2)] = proj3[cp.swapaxes(cond3, 1, 2)]

        #
        #     if alpha <= 0:
        #  y = p - p2
        #  w = p3 - p2
        #  a = p2
        #  b = p3
        t4 = cp.divide(
            #
            # w.dot(y)
            cp.sum(cp.multiply(edges[:, 2, :, :], diff_vectors[:, 1, :, :]),
                   axis=2),
            edge_normssq[:, 2])
        xi4 = cp.zeros((t4.shape[0], 3, t4.shape[1]))
        xi4[:, 1, :] = -t4 + 1
        xi4[:, 2, :] = t4
        #
        t4 = cp.tile(cp.expand_dims(t4, axis=2), (1, 1, 3))
        lz = cp.less(t4, cp.zeros(t4.shape))
        go = cp.greater(t4, cp.ones(t4.shape))
        proj4 = vertices[:, 1, :, :] + cp.multiply(t4, edges[:, 2, :, :])
        proj4[lz] = vertices[:, 1, :, :][lz]
        proj4[go] = vertices[:, 2, :, :][go]
        #
        xi[cond4] = xi4[cond4]
        proj[cp.swapaxes(cond4, 1, 2)] = proj4[cp.swapaxes(cond4, 1, 2)]

        vec_to_point = pts[:, 0, :, :] - proj
        distances = cp.linalg.norm(vec_to_point, axis=2)

        # n = "\n"
        # print(f"{pts[:,0,:,:]=}")
        # print(f"{proj=}")
        # print(f"{pts[:,0,:,:] - proj=}")
        # print(f"{distances=}")

        min_distances = cp.min(distances, axis=0)

        closest_triangles = cp.argmin(distances, axis=0)

        projections = proj[closest_triangles, np.arange(chunk_size), :]

        #
        # Distinguish close triangles
        is_close = cp.isclose(distances, min_distances)

        #
        # Determine sign
        signed_normal = normals[:, 0, :, :]
        if vertex_normals is not None:
            signed_normal = cp.sum(vertex_normals.transpose() * xi.transpose(),
                                   axis=2).transpose()

        is_negative = cp.less_equal(
            cp.sum(cp.multiply(vec_to_point, signed_normal), axis=2), 0.)

        #
        # Combine
        is_close_and_negative = cp.logical_and(is_close, is_negative)

        #
        # Determine if inside
        is_inside = cp.all(cp.logical_or(is_close_and_negative,
                                         cp.logical_not(is_close)),
                           axis=0)

        #
        # Overwrite the signs of points
        # that are outside of the box
        if bounding_box is not None:
            #
            # Extract
            rotation_matrix = cp.asarray(bounding_box['rotation_matrix'])
            translation_vector = cp.asarray(bounding_box['translation_vector'])
            size = cp.asarray(bounding_box['size'])
            #
            # Transform
            transformed_pts = cp.dot(
                all_pts[start_index:end_index, :] - translation_vector,
                rotation_matrix)

            #
            # Determine if outside bbox
            inside_bbox = cp.all(cp.logical_and(
                cp.less_equal(0., transformed_pts),
                cp.less_equal(transformed_pts, size)),
                                 axis=1)

            #
            # Treat points outside bbox as
            # being outside of lumen
            print(f"{inside_bbox=}")
            is_inside = cp.logical_and(is_inside, inside_bbox)

        #
        # Apply sign to indicate whether the distance is
        # inside or outside the mesh.
        min_distances[is_inside] = -1 * min_distances[is_inside]

        #
        # Emplace results
        # [triangle_index, vert_index, querypoint_index, coordinates]
        results[0][start_index:end_index] = closest_triangles
        results[1][start_index:end_index] = min_distances
        results[2][start_index:end_index, :] = projections
 def __init__(self, mask):
     self.mask = mask
     self.e = cp.array([cp.array([0, 0]) for i in range(9)])
     self.w = cp.array([0.0 for i in range(9)])
     for i in range(9):
         if i == 0:
             self.w[i] = 4 / 9
             self.e[i][0] = 0
             self.e[i][1] = 0
         elif i == 1:
             self.w[i] = 1 / 9
             self.e[i][0] = 1
             self.e[i][1] = 0
         elif i == 2:
             self.w[i] = 1 / 9
             self.e[i][0] = 0
             self.e[i][1] = 1
         elif i == 3:
             self.w[i] = 1 / 9
             self.e[i][0] = -1
             self.e[i][1] = 0
         elif i == 4:
             self.w[i] = 1 / 9
             self.e[i][0] = 0
             self.e[i][1] = -1
         elif i == 5:
             self.w[i] = 1 / 36
             self.e[i][0] = 1
             self.e[i][1] = 1
         elif i == 6:
             self.w[i] = 1 / 36
             self.e[i][0] = -1
             self.e[i][1] = 1
         elif i == 7:
             self.w[i] = 1 / 36
             self.e[i][0] = -1
             self.e[i][1] = -1
         elif i == 8:
             self.w[i] = 1 / 36
             self.e[i][0] = 1
             self.e[i][1] = -1
         print(self.e[i], self.w[i])
     self.psi = cp.full((H, W), -1.0)
     self.psi[:, :10] = 1.0
     self.block_mask = cp.logical_not(mask)
     self.psi[self.block_mask] = psi_wall
     self.left_wall = cp.full((H, 1), 1.0)
     self.right_wall = cp.full((H, 1), -1.0)
     self.gamma = gamma
     self.top_bottom_wall = cp.full((1, W + 2), psi_wall)
     self.nabla_psix = cp.zeros((H, W))
     self.nabla_psiy = cp.zeros((H, W))
     self.nabla_psi2 = cp.zeros((H, W))
     self.rho = 1.0 - 0.05 * cp.random.rand(H, W)[mask]
     #self.rho = np.ones((H, W))[mask] * rho0  # macroscopic density
     self.ux = cp.zeros((H, W))[mask]
     self.uy = cp.zeros((H, W))[mask]
     self.p = cp.zeros((H, W))[mask]
     self.f = cp.array([cp.zeros((H, W)) for i in range(9)])
     self.g = cp.array([cp.zeros((H, W)) for i in range(9)])
     self.feq = cp.array([cp.zeros((H, W))[mask] for i in range(9)])
     self.geq = cp.array([cp.zeros((H, W))[mask] for i in range(9)])
     self.mu = cp.zeros((H, W))[mask]
     self.mix_tau = cp.zeros((H, W))[mask]
     self.F = cp.array([cp.zeros((H, W))[mask] for i in range(9)])
     self.nabla_psix = self.getNabla_psix()
     self.nabla_psiy = self.getNabla_psiy()
     self.nabla_psi2 = self.getNabla_psi2()
     mu = self.getMu()
     # self.uy = self.getUy()
     self.p = self.getP()
     self.mix_tau = self.getMix_tau()
     for i in range(9):
         self.f[i][self.mask] = self.getfeq(i)
         self.g[i][self.mask] = self.getgeq(i)
def main():
    rect_corner_list = []

    # for rectangle block
    # while True:
    #     if count * 20 > 360:
    #         break
    #     if flag:
    #         rect_corner_list.append(((count * 20, 60), ((count + 1) * 20, 80)))
    #         rect_corner_list.append(((count * 20, 140), ((count + 1) * 20, 160)))
    #         rect_corner_list.append(((count * 20, 220), ((count + 1) * 20, 240)))
    #         rect_corner_list.append(((count * 20, 300), ((count + 1) * 20, 320)))
    #         flag = False
    #         count += 2
    #         continue
    #     elif not flag:
    #         rect_corner_list.append(((count * 20, 20), ((count + 1) * 20, 40)))
    #         rect_corner_list.append(((count * 20, 100), ((count + 1) * 20, 120)))
    #         rect_corner_list.append(((count * 20, 180), ((count + 1) * 20, 200)))
    #         rect_corner_list.append(((count * 20, 260), ((count + 1) * 20, 280)))
    #         rect_corner_list.append(((count * 20, 340), ((count + 1) * 20, 360)))
    #         flag = True
    #         count += 2
    # block_psi_all, corner_list = setblock(rect_corner_list)
    cr = Createblock(H, W)
    bb = Bounce_back(H, W)
    circle_list = [[(int(W / 2 - W / 3), int(H / 2)), 30]]
    circle_list = []
    # circle_list.append(((int(W/2), int(H/2)), 30))
    r = 13
    xx = 78
    count = 2
    flag = True
    mabiki = MAX_T // 150
    # circle_list.append(((count * 2 * r, xx + r), r))
    # circle_list.append(((count * 2 * r, 2 * xx + 3 * r), r))
    # circle_list.append(((count * 2 * r, 3 * xx + 5 * r + 1),  r + 1))
    # circle_list.append(((count * 2 * r, 3 * xx + 5 * r), r))

    # while True:
    #     if count * 2 * r > 380:
    #         break
    #     if flag:
    #         circle_list.append(((count * 2 * r, 3 * r), r + 5))
    #         circle_list.append(((count * 2 * r, xx + 5 * r), r + 5))
    #         circle_list.append(((count * 2 * r, 2 * xx + 7 * r), r + 5))
    #         circle_list.append(((count * 2 * r, 3 * xx + 9 * r), r + 5))
    #         print(3 * xx + 5 * r)
    #         print(2 * xx + 3 * r)
    #         flag = False
    #         count += 2
    #         continue
    #     elif not flag:
    #         circle_list.append(((count * 2 * r, xx + r), r + 5))
    #         circle_list.append(((count * 2 * r, 2 * xx + 3 * r), r + 5))
    #         circle_list.append(((count * 2 * r, 3 * xx + 5 * r), r + 5))
    #         flag = True
    #         count += 2

    # while True:
    #     if count * r > 380:
    #         break
    #     circle_list.append(((count * r, 2 * r), r))
    #     circle_list.append(((count * r, 6 * r), r))
    #     circle_list.append(((count * r, 10 * r), r))
    #     circle_list.append(((count * r, 14 * r), r))
    #     circle_list.append(((count * r, 18 * r), r))
    #     count += 4

    block_psi_all, side_list, concave_list, convex_list = cr.setCirleblock(circle_list)
    block_mask = cp.where(block_psi_all == 1, True, False)
    mask = cp.logical_not(block_mask)
    cm = Compute(mask)
    cc = cp.array([cm.psi])
    for i in range(MAX_T):
        for j in range(9):
            cm.F[j] = cm.getLarge_F(j)
            cm.feq[j] = cm.getfeq(j)
            cm.geq[j] = cm.getgeq(j)
            cm.f[j][mask] = cm.getF(j)
            cm.g[j][mask] = cm.getG(j)
        if i % mabiki == 0:
            cc = cp.append(cc, cp.array([cm.psi]), axis=0)
            print("timestep:{}".format(i))
        f_behind = copy.deepcopy(cm.f)
        g_behind = copy.deepcopy(cm.g)
        stream(cm.f, cm.g)
        bb.halfway_bounceback_circle(side_list, concave_list, convex_list, f_behind, g_behind, cm.f, cm.g)
        # halfway_bounceback(corner_list, f_behind, g_behind, cm.f, cm.g)
        # bottom_top_wall(f_behind[:, 1:-1], g_behind[:, 1:-1], cm.f[:, 1:-1], cm.g[:, 1:-1])
        cm.zou_he_boundary_inlet()
        cm.zou_he_boundary_outlet()
        cm.rho = cm.getRho()
        cm.udpatePsi()
        cm.nabla_psix = cm.getNabla_psix()
        cm.nabla_psiy = cm.getNabla_psiy()
        cm.nabla_psi2 = cm.getNabla_psi2()
        cm.mu = cm.getMu()
        cm.ux = cm.getUx()
        cm.uy = cm.getUy()
        cm.p = cm.getP()
        cm.mix_tau = cm.getMix_tau()
    y = [i for i in range(H)]
    x = [i for i in range(W)]
    # fig = plt.figure()
    # plt.colorbar(plt.pcolor(x, y, cc[0], cmap='RdBu'))
    # ani = animation.FuncAnimation(fig, update, fargs=(x, y, cc), frames=int(len(cc)))
    # ani.save('../movies/MAX_T{}_Pe{}_M{}_Ca{}_wall{}.mp4'.format(MAX_T, Pe, M, Ca, psi_wall), fps=10)
    plt.figure()
    plt.pcolor(x, y, cm.psi, label='MAX_T{}_Pe{}_M{}_Ca{}_wall{}'.format(MAX_T, Pe, M, Ca, psi_wall), cmap='RdBu')
    plt.colorbar()
    plt.legend()
    # plt.grid()
    plt.show()
예제 #21
0
파일: _polygon.py 프로젝트: grlee77/cucim
def approximate_polygon(coords, tolerance):
    """Approximate a polygonal chain with the specified tolerance.

    It is based on the Douglas-Peucker algorithm.

    Note that the approximated polygon is always within the convex hull of the
    original polygon.

    Parameters
    ----------
    coords : (N, 2) array
        Coordinate array.
    tolerance : float
        Maximum distance from original points of polygon to approximated
        polygonal chain. If tolerance is 0, the original coordinate array
        is returned.

    Returns
    -------
    coords : (M, 2) array
        Approximated polygonal chain where M <= N.

    References
    ----------
    .. [1] https://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
    """
    if tolerance <= 0:
        return coords

    chain = cp.zeros(coords.shape[0], "bool")
    # pre-allocate distance array for all points
    dists = cp.zeros(coords.shape[0])
    chain[0] = True
    chain[-1] = True
    pos_stack = [(0, chain.shape[0] - 1)]
    end_of_chain = False

    while not end_of_chain:
        start, end = pos_stack.pop()
        # determine properties of current line segment
        r0, c0 = cp.asnumpy(coords[start, :])
        r1, c1 = cp.asnumpy(coords[end, :])
        dr = r1 - r0
        dc = c1 - c0
        segment_angle = -cp.arctan2(dr, dc)
        segment_dist = c0 * cp.sin(segment_angle) + r0 * cp.cos(segment_angle)

        # select points in-between line segment
        segment_coords = coords[start + 1:end, :]
        segment_dists = dists[start + 1:end]

        # check whether to take perpendicular or euclidean distance with
        # inner product of vectors

        # vectors from points -> start and end
        dr0 = segment_coords[:, 0] - r0
        dc0 = segment_coords[:, 1] - c0
        dr1 = segment_coords[:, 0] - r1
        dc1 = segment_coords[:, 1] - c1
        # vectors points -> start and end projected on start -> end vector
        projected_lengths0 = dr0 * dr + dc0 * dc
        projected_lengths1 = -dr1 * dr - dc1 * dc
        perp = cp.logical_and(projected_lengths0 > 0, projected_lengths1 > 0)
        eucl = cp.logical_not(perp)
        segment_dists[perp] = cp.abs(
            segment_coords[perp, 0] * cp.cos(segment_angle) +
            segment_coords[perp, 1] * cp.sin(segment_angle) - segment_dist)
        segment_dists[eucl] = cp.minimum(
            # distance to start point
            cp.sqrt(dc0[eucl]**2 + dr0[eucl]**2),
            # distance to end point
            cp.sqrt(dc1[eucl]**2 + dr1[eucl]**2),
        )

        if cp.any(segment_dists > tolerance):
            # select point with maximum distance to line
            new_end = start + cp.argmax(segment_dists) + 1
            pos_stack.append((new_end, end))
            pos_stack.append((start, new_end))
            chain[new_end] = True

        if len(pos_stack) == 0:
            end_of_chain = True

    return coords[chain, :]
예제 #22
0
파일: misc.py 프로젝트: mritools/cupyimg
def remove_small_holes(ar, area_threshold=64, connectivity=1, in_place=False):
    """Remove contiguous holes smaller than the specified size.

    Parameters
    ----------
    ar : ndarray (arbitrary shape, int or bool type)
        The array containing the connected components of interest.
    area_threshold : int, optional (default: 64)
        The maximum area, in pixels, of a contiguous hole that will be filled.
        Replaces `min_size`.
    connectivity : int, {1, 2, ..., ar.ndim}, optional (default: 1)
        The connectivity defining the neighborhood of a pixel.
    in_place : bool, optional (default: False)
        If `True`, remove the connected components in the input array itself.
        Otherwise, make a copy.

    Raises
    ------
    TypeError
        If the input array is of an invalid type, such as float or string.
    ValueError
        If the input array contains negative values.

    Returns
    -------
    out : ndarray, same shape and type as input `ar`
        The input array with small holes within connected components removed.

    Examples
    --------
    >>> import cupy as cp
    >>> from cupyimg.skimage import morphology
    >>> a = cp.array([[1, 1, 1, 1, 1, 0],
    ...               [1, 1, 1, 0, 1, 0],
    ...               [1, 0, 0, 1, 1, 0],
    ...               [1, 1, 1, 1, 1, 0]], bool)
    >>> b = morphology.remove_small_holes(a, 2)
    >>> b
    array([[ True,  True,  True,  True,  True, False],
           [ True,  True,  True,  True,  True, False],
           [ True, False, False,  True,  True, False],
           [ True,  True,  True,  True,  True, False]])
    >>> c = morphology.remove_small_holes(a, 2, connectivity=2)
    >>> c
    array([[ True,  True,  True,  True,  True, False],
           [ True,  True,  True, False,  True, False],
           [ True, False, False,  True,  True, False],
           [ True,  True,  True,  True,  True, False]])
    >>> d = morphology.remove_small_holes(a, 2, in_place=True)
    >>> d is a
    True

    Notes
    -----
    If the array type is int, it is assumed that it contains already-labeled
    objects. The labels are not kept in the output image (this function always
    outputs a bool image). It is suggested that labeling is completed after
    using this function.

    """
    _check_dtype_supported(ar)

    # Creates warning if image is an integer image
    if ar.dtype != bool:
        warn(
            "Any labeled images will be returned as a boolean array. "
            "Did you mean to use a boolean array?",
            UserWarning,
        )

    if in_place:
        out = ar
    else:
        out = ar.copy()

    # Creating the inverse of ar
    if in_place:
        cp.logical_not(out, out=out)
    else:
        out = cp.logical_not(out)

    # removing small objects from the inverse of ar
    out = remove_small_objects(out, area_threshold, connectivity, in_place)

    if in_place:
        cp.logical_not(out, out=out)
    else:
        out = cp.logical_not(out)

    return out
예제 #23
0
def train(source_bpe, target_bpe, source_glove, target_glove, chunk_length,
          batch_size, warmup_steps, save_decimation, num_steps, gpu_id, out,
          log_level):
    if not os.path.exists(out):
        os.makedirs(out)

    ll = getattr(logging, log_level)

    stream_handler = logging.StreamHandler(sys.stdout)
    stream_handler.setLevel(ll)
    stream_handler.setFormatter(logging.Formatter('%(message)s'))

    file_handler = logging.FileHandler(filename=os.path.join(
        out, 'training.log'),
                                       mode='a')
    file_handler.setLevel(ll)
    file_handler.setFormatter(logging.Formatter('%(message)s'))

    logger.addHandler(stream_handler)
    logger.addHandler(file_handler)
    logger.setLevel(ll)

    gpu_id = gpu_id if gpu_id is not None else -1

    device_name = '@intel64'
    if gpu_id >= 0:
        device_name = f'@cupy:{gpu_id}'

    with chainer.using_device(device_name):
        source_vocab = make_vocab(source_glove)
        target_vocab = make_vocab(target_glove)
        output_model_dim = target_vocab.embedding_size
        dataset = make_dataset(source_bpe, target_bpe, source_vocab,
                               target_vocab, chunk_length)
        iterator = MultithreadIterator(dataset, batch_size)
        state = TrainingState()
        model = Transformer(source_vocab, target_vocab)
        model.to_gpu(gpu_id)
        optimizer = Adam(beta1=0.99, beta2=0.98, eps=1e-9).setup(model)

        load_training(out, model, optimizer, state)

        try:
            for n, batch in enumerate(iterator):
                if n >= num_steps:
                    break

                if (n + 1) % save_decimation == 0:
                    save_training(out, model, optimizer, state)

                model.cleargrads()
                gc.collect()

                source, target = stack_nested(batch)

                source.token_ids.to_gpu(gpu_id)
                source.masks.to_gpu(gpu_id)
                target.token_ids.to_gpu(gpu_id)
                target.masks.to_gpu(gpu_id)

                output_probs = model.train_forward(source.token_ids,
                                                   target.token_ids,
                                                   input_masks=source.masks,
                                                   output_masks=target.masks)

                unnormalized_loss = F.softmax_cross_entropy(
                    F.reshape(output_probs,
                              (output_probs.shape[0] * output_probs.shape[1],
                               output_probs.shape[2])),
                    F.reshape(target.token_ids, (target.token_ids.shape[0] *
                                                 target.token_ids.shape[1], )),
                    reduce='no')
                loss_mask = xp.reshape(
                    xp.logical_not(target.masks.array).astype(xp.float32),
                    (target.masks.shape[0] * target.masks.shape[1], ))
                loss = F.sum(unnormalized_loss * loss_mask) / F.sum(loss_mask)
                loss.backward()

                learning_rate = (output_model_dim**-0.5) * min(
                    (state.step**-0.5), state.step * (warmup_steps**-1.5))
                optimizer.alpha = learning_rate
                optimizer.update()

                logger.info(
                    f'time = {int(time.time())} | step = {state.step} | loss = {float(loss.array)} | lr = {learning_rate}'
                )

                state.step += 1
        finally:
            save_training(out, model, optimizer, state)
예제 #24
0
def _unique_update_mask_equal_nan(mask, x0):
    mask1 = cupy.logical_not(cupy.isnan(x0))
    mask[:] = cupy.logical_and(mask, mask1)