def find_regular_segments(image): original_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) edges = filters.sobel(image) grid = util.regular_grid(image.shape, n_points=468) seeds = np.zeros(image.shape, dtype=int) seeds[grid] = np.arange(seeds[grid].size).reshape(seeds[grid].shape) + 1 w0 = watershed(edges, seeds) w1 = watershed(edges, seeds, compactness=0.01) fig, (ax0, ax1, ax2) = plt.subplots(1, 3) ax0.imshow(original_img) ax0.set_title("Original image") ax1.imshow(color.label2rgb(w0, image)) ax1.set_title('Classical watershed') ax2.imshow(color.label2rgb(w1, image)) ax2.set_title('Compact watershed') plt.show()
def multiscale_regular_seeds(off_limits, num_seeds): """Return evenly-spaced seeds, but thinned in areas with no boundaries. Parameters ---------- off_limits : array of bool, shape (M, N) A binary array where `True` indicates the position of a boundary, and thus where we don't want to place seeds. num_seeds : int The desired number of seeds. Returns ------- seeds : array of int, shape (M, N) An array of seed points. Each seed gets its own integer ID, starting from 1. """ seeds_binary = np.zeros(off_limits.shape, dtype=bool) grid = util.regular_grid(off_limits.shape, num_seeds) seeds_binary[grid] = True seeds_binary &= ~off_limits seeds_img = seeds_binary[grid] thinned_equal = False step = 2 while not thinned_equal: thinned = _thin_seeds(seeds_img, step) thinned_equal = np.all(seeds_img == thinned) seeds_img = thinned step *= 2 seeds_binary[grid] = seeds_img return ndi.label(seeds_binary)[0]
def test_regular_grid_3d_8(): ar = np.zeros((3, 20, 40)) g = regular_grid(ar.shape, 8) assert_equal(g, [slice(1.0, None, 3.0), slice(5.0, None, 10.0), slice(5.0, None, 10.0)]) ar[g] = 1 assert_equal(ar.sum(), 8)
def get_k_centers(k, height, width): ''' Function that finds n regularly spaced along a matrix of size height and width. Takes a parameter k (desired number of centers), returns actual calculated centers (as close to k as possible) coordinates. Parameters: k - Number of clusters to try to fit height - height of the input matrix width - width of the input matrix Returns: centers - list of center coordinates in the following format [[center_y, center_x], ... ] step_y - the step size between centers on the y axis (height) step_x - the step size between centers on the x axis ''' # setting up regular grid function using meshgrids grid_y, grid_x = np.mgrid[:height, :width] slices = util.regular_grid((height, width), k) step_y, step_x = [s.step if s.step is not None else 1 for s in slices] centers_y = grid_y[slices] centers_x = grid_x[slices] # reshape centers into desired output dimensions centers = np.concatenate([centers_y[..., np.newaxis], centers_x[..., np.newaxis]], axis=-1) centers = centers.reshape(-1, 2) return centers, step_y, step_x
def my_watershed(img, compactness, n_seeds=9): if len(img.shape) == 3: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) edges = filters.sobel(img) grid = util.regular_grid(img.shape, n_points=n_seeds) seeds = np.zeros(img.shape, dtype=int) seeds[grid] = np.arange(seeds[grid].size).reshape(seeds[grid].shape) + 1 w0 = watershed(edges, seeds, compactness=compactness) fig, ax = plt.subplots() ax.imshow(color.label2rgb(w0, img)) ax.set_title('Compactness:' + str(compactness))
def test_regular_grid_2d_32(): ar = np.zeros((20, 40)) g = regular_grid(ar.shape, 32) assert_equal(g, [slice(2.0, None, 5.0), slice(2.0, None, 5.0)]) ar[g] = 1 assert_equal(ar.sum(), 32)
import numpy as np import matplotlib.pyplot as plt from skimage import io, data, util, filters, color from skimage.morphology import watershed kitten = color.rgb2gray(io.imread("../images/kitten.jpeg")) kitten_edge = filters.sobel(kitten) # use edge detection algo before watershed grid = util.regular_grid( kitten.shape, n_points=300) # find 300 points evenly spaced in the image # The seed matrix is the same shape as the original image, and it contains integers in the range [1, size of image] seeds = np.zeros(kitten.shape, dtype=int) seeds[grid] = np.arange(seeds[grid].size).reshape(seeds[grid].shape) + 1 w1 = watershed( kitten_edge, seeds, compactness=0.91) ## compact watershed produces even region sizes water_compact = color.label2rgb(w1, kitten, alpha=0.4, kind="overlay") plt.figure(figsize=(8, 8)) plt.imshow(water_compact)
def slic(image, n_segments=100, compactness=10., max_iter=10, sigma=0, spacing=None, multichannel=True, convert2lab=True, enforce_connectivity=False, min_size_factor=0.5, max_size_factor=3, slic_zero=False): """Segments image using k-means clustering in Color-(x,y,z) space. Parameters ---------- image : 2D, 3D or 4D ndarray Input image, which can be 2D or 3D, and grayscale or multichannel (see `multichannel` parameter). n_segments : int, optional The (approximate) number of labels in the segmented output image. compactness : float, optional Balances color-space proximity and image-space proximity. Higher values give more weight to image-space. As `compactness` tends to infinity, superpixel shapes become square/cubic. In SLICO mode, this is the initial compactness. max_iter : int, optional Maximum number of iterations of k-means. sigma : float or (3,) array-like of floats, optional Width of Gaussian smoothing kernel for pre-processing for each dimension of the image. The same sigma is applied to each dimension in case of a scalar value. Zero means no smoothing. Note, that `sigma` is automatically scaled if it is scalar and a manual voxel spacing is provided (see Notes section). spacing : (3,) array-like of floats, optional The voxel spacing along each image dimension. By default, `slic` assumes uniform spacing (same voxel resolution along z, y and x). This parameter controls the weights of the distances along z, y, and x during k-means clustering. multichannel : bool, optional Whether the last axis of the image is to be interpreted as multiple channels or another spatial dimension. convert2lab : bool, optional Whether the input should be converted to Lab colorspace prior to segmentation. For this purpose, the input is assumed to be RGB. Highly recommended. enforce_connectivity: bool, optional (default False) Whether the generated segments are connected or not min_size_factor: float, optional Proportion of the minimum segment size to be removed with respect to the supposed segment size ```depth*width*height/n_segments``` max_size_factor: float, optional Proportion of the maximum connected segment size. A value of 3 works in most of the cases. slic_zero: bool, optional Run SLIC-zero, the zero-parameter mode of SLIC Returns ------- labels : 2D or 3D array Integer mask indicating segment labels. Raises ------ ValueError If: - the image dimension is not 2 or 3 and `multichannel == False`, OR - the image dimension is not 3 or 4 and `multichannel == True` Notes ----- * If `sigma > 0`, the image is smoothed using a Gaussian kernel prior to segmentation. * If `sigma` is scalar and `spacing` is provided, the kernel width is divided along each dimension by the spacing. For example, if ``sigma=1`` and ``spacing=[5, 1, 1]``, the effective `sigma` is ``[0.2, 1, 1]``. This ensures sensible smoothing for anisotropic images. * The image is rescaled to be in [0, 1] prior to processing. * Images of shape (M, N, 3) are interpreted as 2D RGB images by default. To interpret them as 3D with the last dimension having length 3, use `multichannel=False`. References ---------- .. [1] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi, Pascal Fua, and Sabine Süsstrunk, SLIC Superpixels Compared to State-of-the-art Superpixel Methods, TPAMI, May 2012. Examples -------- >>> from skimage.segmentation import slic >>> from skimage.data import astronaut >>> img = astronaut() >>> segments = slic(img, n_segments=100, compactness=10) Increasing the compactness parameter yields more square regions: >>> segments = slic(img, n_segments=100, compactness=20) """ if enforce_connectivity is None: warnings.warn('Deprecation: enforce_connectivity will default to' ' True in future versions.') enforce_connectivity = False image = img_as_float(image) is_2d = False if image.ndim == 2: # 2D grayscale image image = image[np.newaxis, ..., np.newaxis] is_2d = True elif image.ndim == 3 and multichannel: # Make 2D multichannel image 3D with depth = 1 image = image[np.newaxis, ...] is_2d = True elif image.ndim == 3 and not multichannel: # Add channel as single last dimension image = image[..., np.newaxis] if spacing is None: spacing = np.ones(3) elif isinstance(spacing, (list, tuple)): spacing = np.array(spacing, dtype=np.double) if not isinstance(sigma, coll.Iterable): sigma = np.array([sigma, sigma, sigma], dtype=np.double) sigma /= spacing.astype(np.double) elif isinstance(sigma, (list, tuple)): sigma = np.array(sigma, dtype=np.double) if (sigma > 0).any(): # add zero smoothing for multichannel dimension sigma = list(sigma) + [0] image = ndimage.gaussian_filter(image, sigma) if convert2lab and multichannel: if image.shape[3] != 3: raise ValueError("Lab colorspace conversion requires a RGB image.") image = rgb2lab(image) depth, height, width = image.shape[:3] # initialize cluster centroids for desired number of segments grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width] slices = regular_grid(image.shape[:3], n_segments) step_z, step_y, step_x = [int(s.step) for s in slices] segments_z = grid_z[slices] segments_y = grid_y[slices] segments_x = grid_x[slices] segments_color = np.zeros(segments_z.shape + (image.shape[3],)) segments = np.concatenate([segments_z[..., np.newaxis], segments_y[..., np.newaxis], segments_x[..., np.newaxis], segments_color], axis=-1).reshape(-1, 3 + image.shape[3]) segments = np.ascontiguousarray(segments) # we do the scaling of ratio in the same way as in the SLIC paper # so the values have the same meaning step = float(max((step_z, step_y, step_x))) ratio = 1.0 / compactness image = np.ascontiguousarray(image * ratio) labels = _slic_cython(image, segments, step, max_iter, spacing, slic_zero) if enforce_connectivity: segment_size = depth * height * width / n_segments min_size = int(min_size_factor * segment_size) max_size = int(max_size_factor * segment_size) labels = _enforce_label_connectivity_cython(labels, n_segments, min_size, max_size) if is_2d: labels = labels[0] return labels
def slic_feat(image, n_segments=100, compactness=10., max_iter=10, sigma=0, seed_type='grid', spacing=None, multichannel=True, convert2lab=None, enforce_connectivity=False, min_size_factor=0.5, max_size_factor=3, slic_zero=False, multifeat=True, return_adjacency=False, mask=None, recompute_seeds=False, n_random_seeds=10): """Segments image using k-means clustering in Color-(x,y,z) space. Parameters ---------- image : 2D, 3D or 4D ndarray Input image, which can be 2D or 3D, and grayscale or multichannel (see `multichannel` parameter). n_segments : int, optional The (approximate) number of labels in the segmented output image. compactness : float, optional Balances color-space proximity and image-space proximity. Higher values give more weight to image-space. As `compactness` tends to infinity, superpixel shapes become square/cubic. In SLICO mode, this is the initial compactness. max_iter : int, optional Maximum number of iterations of k-means. sigma : float or (3,) array-like of floats, optional Width of Gaussian smoothing kernel for pre-processing for each dimension of the image. The same sigma is applied to each dimension in case of a scalar value. Zero means no smoothing. Note, that `sigma` is automatically scaled if it is scalar and a manual voxel spacing is provided (see Notes section). spacing : (3,) array-like of floats, optional The voxel spacing along each image dimension. By default, `slic` assumes uniform spacing (same voxel resolution along z, y and x). This parameter controls the weights of the distances along z, y, and x during k-means clustering. multichannel : bool, optional Whether the last axis of the image is to be interpreted as multiple channels or another spatial dimension. convert2lab : bool, optional Whether the input should be converted to Lab colorspace prior to segmentation. The input image *must* be RGB. Highly recommended. This option defaults to ``True`` when ``multichannel=True`` *and* ``image.shape[-1] == 3``. enforce_connectivity: bool, optional (default False) Whether the generated segments are connected or not min_size_factor: float, optional Proportion of the minimum segment size to be removed with respect to the supposed segment size ```depth*width*height/n_segments``` max_size_factor: float, optional Proportion of the maximum connected segment size. A value of 3 works in most of the cases. slic_zero: bool, optional Run SLIC-zero, the zero-parameter mode of SLIC. [2]_ mask: ndarray of bools or 0s and 1s, optional Array of same shape as `image`. Supervoxel analysis will only be performed on points at which mask == True Returns ------- labels : 2D or 3D array Integer mask indicating segment labels. Raises ------ ValueError If ``convert2lab`` is set to ``True`` but the last array dimension is not of length 3. Notes ----- * If `sigma > 0`, the image is smoothed using a Gaussian kernel prior to segmentation. * If `sigma` is scalar and `spacing` is provided, the kernel width is divided along each dimension by the spacing. For example, if ``sigma=1`` and ``spacing=[5, 1, 1]``, the effective `sigma` is ``[0.2, 1, 1]``. This ensures sensible smoothing for anisotropic images. * The image is rescaled to be in [0, 1] prior to processing. * Images of shape (M, N, 3) are interpreted as 2D RGB images by default. To interpret them as 3D with the last dimension having length 3, use `multichannel=False`. References ---------- .. [1] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi, Pascal Fua, and Sabine Susstrunk, SLIC Superpixels Compared to State-of-the-art Superpixel Methods, TPAMI, May 2012. .. [2] http://ivrg.epfl.ch/research/superpixels#SLICO Examples -------- >>> from skimage.segmentation import slic >>> from skimage.data import astronaut >>> img = astronaut() >>> segments = slic(img, n_segments=100, compactness=10) Increasing the compactness parameter yields more square regions: >>> segments = slic(img, n_segments=100, compactness=20) """ if enforce_connectivity is None: warnings.warn('Deprecation: enforce_connectivity will default to' ' True in future versions.') enforce_connectivity = False if mask is None and seed_type == 'nrandom': warnings.warn( 'nrandom assignment of seed points should only be used with an ROI. Changing seed type.' ) seed_type = 'grid' if seed_type == 'nrandom' and recompute_seeds is False: warnings.warn( 'Seeds should be recomputed when seed points are randomly assigned' ) image = img_as_float(image) is_2d = False if image.ndim == 2: # 2D grayscale image image = image[np.newaxis, ..., np.newaxis] is_2d = True elif image.ndim == 3 and multichannel: # Make 2D multichannel image 3D with depth = 1 image = image[np.newaxis, ...] is_2d = True elif image.ndim == 3 and not multichannel: # Add channel as single last dimension image = image[..., np.newaxis] if spacing is None: spacing = np.ones(3) elif isinstance(spacing, (list, tuple)): spacing = np.array(spacing, dtype=np.double) if not isinstance(sigma, coll.Iterable): sigma = np.array([sigma, sigma, sigma], dtype=np.double) sigma /= spacing.astype(np.double) elif isinstance(sigma, (list, tuple)): sigma = np.array(sigma, dtype=np.double) if (sigma > 0).any(): # add zero smoothing for multichannel dimension sigma = list(sigma) + [0] image = ndi.gaussian_filter(image, sigma) if multichannel and (convert2lab or convert2lab is None): if image.shape[-1] != 3 and convert2lab: raise ValueError("Lab colorspace conversion requires a RGB image.") elif image.shape[-1] == 3: image = rgb2lab(image) if multifeat is True: feat_scale = float(image.shape[3]) else: feat_scale = 1.0 depth, height, width = image.shape[:3] if mask is None: mask = np.ones(image.shape[:3], dtype=np.bool) else: mask = np.asarray(mask, dtype=np.bool) if seed_type == 'nrandom': segments_z = np.zeros(n_random_seeds, dtype=int) segments_y = np.zeros(n_random_seeds, dtype=int) segments_x = np.zeros(n_random_seeds, dtype=int) m_inv = np.copy(mask) # SEED STEP 1: n seeds are placed as far as possible from every other seed and the edge. for ii in range(n_random_seeds): dtrans = distance_transform_edt(m_inv, sampling=spacing) mcoords = np.nonzero(dtrans == np.max(dtrans)) segments_z[ii] = int(mcoords[2][0]) segments_y[ii] = int(mcoords[1][0]) segments_x[ii] = int(mcoords[0][0]) m_inv[segments_x[ii], segments_y[ii], segments_z[ii]] = False # plt.imshow(dtrans[:, :, segments_z[ii]]) # plt.show() segments_color = np.zeros((segments_z.shape[0], image.shape[3])) segments = np.concatenate([ segments_x[..., np.newaxis], segments_y[..., np.newaxis], segments_z[..., np.newaxis], segments_color ], axis=1) sx = np.ascontiguousarray(segments_x, dtype=np.int32) sy = np.ascontiguousarray(segments_y, dtype=np.int32) sz = np.ascontiguousarray(segments_z, dtype=np.int32) out1 = get_mpd(sx, sy, sz) step_x, step_y, step_z = out1[0], out1[1], out1[2] elif seed_type == 'grid': # initialize cluster centroids for desired number of segments # essentially just outputs the indices of a grid in the x, y and z direction grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width] # returns 3 slices (an object representing an array of slices, see builtin slice) slices = regular_grid(image.shape[:3], n_segments) step_z, step_y, step_x = [int(s.step) for s in slices ] # extract step size from slices segments_z = grid_z[ slices] # use slices to extract coordinates for centre points segments_y = grid_y[slices] segments_x = grid_x[slices] # mask_ind = mask[slices].reshape(-1) # list of all locations as well as zeros for the color features segments_color = np.zeros(segments_z.shape + (image.shape[3], )) segments = np.concatenate([ segments_z[..., np.newaxis], segments_y[..., np.newaxis], segments_x[..., np.newaxis], segments_color ], axis=-1).reshape(-1, 3 + image.shape[3]) else: raise ValueError('seed_type should be nrandom or grid') # Only use values in the mask # segments = segments[mask_ind, :] #print("Number of supervoxels: ", segments.shape[0]) segments = np.ascontiguousarray(segments) # we do the scaling of ratio in the same way as in the SLIC paper # so the values have the same meaning step = float(max((step_z, step_y, step_x))) ratio = 1.0 / compactness image = np.ascontiguousarray(image * ratio, dtype=np.double) mask = np.ascontiguousarray(mask, dtype=np.int32) if recompute_seeds: # Seed step 2: Run SLIC to reinitialise seeds # Runs the supervoxel method but only uses distance to better initialise the method labels = _slic_feat_cython(image, mask, segments, step, max_iter, spacing, slic_zero, feat_scale, only_dist=True) # # Testing # fig = plt.figure() # ax = fig.add_subplot(111, projection='3d') # ax.scatter(segments_old[:, 0], segments_old[:, 1], segments_old[:, 2], c='red', s=80) # ax.scatter(segments[:, 0], segments[:, 1], segments[:, 2], c='blue', s=80) # plt.show() labels = _slic_feat_cython(image, mask, segments, step, max_iter, spacing, slic_zero, feat_scale, only_dist=False) if enforce_connectivity: segment_size = depth * height * width / n_segments min_size = int(min_size_factor * segment_size) max_size = int(max_size_factor * segment_size) labels = _enforce_label_connectivity_cython(labels, mask, n_segments, min_size, max_size) # Also return adjacency map if return_adjacency: labels = np.ascontiguousarray(labels, dtype=np.int32) if mask is None: adj_mat, border_mat = _find_adjacency_map(labels) else: adj_mat, border_mat = _find_adjacency_map_mask(labels) #print(adj_mat.shape) if is_2d: labels = labels[0] return labels, adj_mat, border_mat else: if is_2d: labels = labels[0] return labels
def slic(image, n_segments=100, compactness=10., max_iter=10, sigma=None, spacing=None, multichannel=True, convert2lab=True, ratio=None, enforce_connectivity=False, min_size_factor=0.5, max_size_factor=3, slic_zero=False): """Segments image using k-means clustering in Color-(x,y,z) space. Parameters ---------- image : 2D, 3D or 4D ndarray Input image, which can be 2D or 3D, and grayscale or multichannel (see `multichannel` parameter). n_segments : int, optional The (approximate) number of labels in the segmented output image. compactness : float, optional Balances color-space proximity and image-space proximity. Higher values give more weight to image-space. As `compactness` tends to infinity, superpixel shapes become square/cubic. In SLICO mode, this is the initial compactness. max_iter : int, optional Maximum number of iterations of k-means. sigma : float or (3,) array-like of floats, optional Width of Gaussian smoothing kernel for pre-processing for each dimension of the image. The same sigma is applied to each dimension in case of a scalar value. Zero means no smoothing. Note, that `sigma` is automatically scaled if it is scalar and a manual voxel spacing is provided (see Notes section). spacing : (3,) array-like of floats, optional The voxel spacing along each image dimension. By default, `slic` assumes uniform spacing (same voxel resolution along z, y and x). This parameter controls the weights of the distances along z, y, and x during k-means clustering. multichannel : bool, optional Whether the last axis of the image is to be interpreted as multiple channels or another spatial dimension. convert2lab : bool, optional Whether the input should be converted to Lab colorspace prior to segmentation. For this purpose, the input is assumed to be RGB. Highly recommended. ratio : float, optional Synonym for `compactness`. This keyword is deprecated. enforce_connectivity: bool, optional (default False) Whether the generated segments are connected or not min_size_factor: float, optional Proportion of the minimum segment size to be removed with respect to the supposed segment size ```depth*width*height/n_segments``` max_size_factor: float, optional Proportion of the maximum connected segment size. A value of 3 works in most of the cases. slic_zero: bool, optional Run SLIC-zero, the zero-parameter mode of SLIC Returns ------- labels : 2D or 3D array Integer mask indicating segment labels. Raises ------ ValueError If: - the image dimension is not 2 or 3 and `multichannel == False`, OR - the image dimension is not 3 or 4 and `multichannel == True` Notes ----- * If `sigma > 0`, the image is smoothed using a Gaussian kernel prior to segmentation. * If `sigma` is scalar and `spacing` is provided, the kernel width is divided along each dimension by the spacing. For example, if ``sigma=1`` and ``spacing=[5, 1, 1]``, the effective `sigma` is ``[0.2, 1, 1]``. This ensures sensible smoothing for anisotropic images. * The image is rescaled to be in [0, 1] prior to processing. * Images of shape (M, N, 3) are interpreted as 2D RGB images by default. To interpret them as 3D with the last dimension having length 3, use `multichannel=False`. References ---------- .. [1] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi, Pascal Fua, and Sabine Süsstrunk, SLIC Superpixels Compared to State-of-the-art Superpixel Methods, TPAMI, May 2012. Examples -------- >>> from skimage.segmentation import slic >>> from skimage.data import lena >>> img = lena() >>> segments = slic(img, n_segments=100, compactness=10, sigma=0) Increasing the compactness parameter yields more square regions: >>> segments = slic(img, n_segments=100, compactness=20, sigma=0) """ if sigma is None: warnings.warn('Default value of keyword `sigma` changed from ``1`` ' 'to ``0``.') sigma = 0 if ratio is not None: warnings.warn('Keyword `ratio` is deprecated. Use `compactness` ' 'instead.') compactness = ratio if enforce_connectivity is None: warnings.warn('Deprecation: enforce_connectivity will default to' ' True in future versions.') enforce_connectivity = False image = img_as_float(image) is_2d = False if image.ndim == 2: # 2D grayscale image image = image[np.newaxis, ..., np.newaxis] is_2d = True elif image.ndim == 3 and multichannel: # Make 2D multichannel image 3D with depth = 1 image = image[np.newaxis, ...] is_2d = True elif image.ndim == 3 and not multichannel: # Add channel as single last dimension image = image[..., np.newaxis] if spacing is None: spacing = np.ones(3) elif isinstance(spacing, (list, tuple)): spacing = np.array(spacing, dtype=np.double) if not isinstance(sigma, coll.Iterable): sigma = np.array([sigma, sigma, sigma], dtype=np.double) sigma /= spacing.astype(np.double) elif isinstance(sigma, (list, tuple)): sigma = np.array(sigma, dtype=np.double) if (sigma > 0).any(): # add zero smoothing for multichannel dimension sigma = list(sigma) + [0] image = ndimage.gaussian_filter(image, sigma) if convert2lab and multichannel: if image.shape[3] != 3: raise ValueError("Lab colorspace conversion requires a RGB image.") image = rgb2lab(image) depth, height, width = image.shape[:3] # initialize cluster centroids for desired number of segments grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width] slices = regular_grid(image.shape[:3], n_segments) step_z, step_y, step_x = [int(s.step) for s in slices] segments_z = grid_z[slices] segments_y = grid_y[slices] segments_x = grid_x[slices] segments_color = np.zeros(segments_z.shape + (image.shape[3], )) segments = np.concatenate([ segments_z[..., np.newaxis], segments_y[..., np.newaxis], segments_x[..., np.newaxis], segments_color ], axis=-1).reshape(-1, 3 + image.shape[3]) segments = np.ascontiguousarray(segments) # we do the scaling of ratio in the same way as in the SLIC paper # so the values have the same meaning step = float(max((step_z, step_y, step_x))) ratio = 1.0 / compactness image = np.ascontiguousarray(image * ratio) labels = _slic_cython(image, segments, step, max_iter, spacing, slic_zero) if enforce_connectivity: segment_size = depth * height * width / n_segments min_size = int(min_size_factor * segment_size) max_size = int(max_size_factor * segment_size) labels = _enforce_label_connectivity_cython(labels, n_segments, min_size, max_size) if is_2d: labels = labels[0] return labels
Both algorithms are implemented in the :py:func:`skimage.morphology.watershed` function. To use the compact form, simply pass a ``compactness`` value greater than 0. """ import numpy as np from skimage import data, util, filters, color from skimage.segmentation import watershed import matplotlib.pyplot as plt from skimage import io coins = color.rgb2gray(io.imread('E:/OwnWork/Leaf/TestImage/Deliveryimage/1.jpg')) # coins = data.coins() edges = filters.sobel(coins) grid = util.regular_grid(coins.shape, n_points=468) seeds = np.zeros(coins.shape, dtype=int) seeds[grid] = np.arange(seeds[grid].size).reshape(seeds[grid].shape) + 1 w0 = watershed(edges, seeds) w1 = watershed(edges, seeds, compactness=0.01) fig, (ax0, ax1) = plt.subplots(1, 2) ax0.imshow(color.label2rgb(w0, coins)) ax0.set_title('Classical watershed') ax1.imshow(color.label2rgb(w1, coins)) ax1.set_title('Compact watershed')
def slic(image, n_segments=None, compactness=.1, max_iter=10, spacing=None): """ Segments image using k-means clustering in cartesian space. Parameters ---------- image : 2D or 3D ndarray Input image, which can be 2D or 3D grayscale image. n_segments : int, optional The approximate number of labels in the segmented output image. compactness : float, optional Balance space proximity. Higher value gives more initial weight to space proximity, making superpixel shapes more square/cubic. This parameter depends strongly on image contrast and on the shapes of objects in the image. max_iter : int, optional Maximum number of iterations of k-means. spacing : (2, ) or (3, ) array-like of floats, optional The voxel spacing along each image dimension. By default, image is assumed to be uniform spaced. This parameter controls the weights of the distances along each dimension during k-means clustering. Returns ------- labels : 2D or 3D ndarray Integer mask indicating segment labels. References ---------- .. [1] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi, Pascal Fua, and Sabine Süsstrunk, SLIC Superpixels Compared to State-of-the-art Superpixel Methods, TPAMI, May 2012. .. [2] http://ivrg.epfl.ch/research/superpixels#SLICO .. [3] http://scikit-image.org/docs/dev/api/skimage.segmentation.html """ if image.ndim != 2 and image.ndim != 3: raise ValueError("Invalid image dimension ({}).".format(image.ndim)) image = img_as_float(image) #TODO adapt for 2D array, for now, assume it is targeted for 3D if spacing is None: spacing = np.ones(3) elif isinstance(spacing, (list, tuple)): spacing = np.asarray(spacing, dtype=np.double) depth, height, width = image.shape # initialize cluster centroids for desired number of segments grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width] slices = regular_grid(image.shape[:3], n_segments) step_z, step_y, step_x = [ int(s.step if s.step is not None else 1) for s in slices ] # centroid coordinate segments_z = grid_z[slices] segments_y = grid_y[slices] segments_x = grid_x[slices] # segment no. segments_c = np.zeros_like(segments_z, dtype=np.double) segments = np.stack((segments_z, segments_y, segments_x, segments_c), axis=3).reshape(-1, 4) segments = np.ascontiguousarray(segments) # scaling of the ratio step = float(max((step_z, step_y, step_x))) ratio = 1. / compactness image = np.ascontiguousarray(image * ratio) print(segments.shape) print(segments.dtype) labels = slic_cython(image, segments, step, max_iter, spacing) return labels
def slic(image, parallel=True, n_segments=100, compactness=10., max_iter=10, spacing=None, multichannel=True, convert2lab=None, enforce_connectivity=True, min_size_factor=0.5, max_size_factor=3, slic_zero=False, print_csv=False): lg.debug("... starting slic.py ...") """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """ PRE-PROCESSING """ """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" # reshape image to 3D, record if it was originally 2D image = img_as_float(image) is_2d = False if image.ndim == 2: # 2D grayscale image image = image[np.newaxis, ..., np.newaxis] is_2d = True elif image.ndim == 3 and multichannel: # Make 2D multichannel image 3D with depth = 1 image = image[np.newaxis, ...] is_2d = True elif image.ndim == 3 and not multichannel: # Add channel as single last dimension image = image[..., np.newaxis] # convert RGB -> LAB if multichannel and (convert2lab or convert2lab is None): if image.shape[-1] != 3 and convert2lab: raise ValueError("Lab colorspace conversion requires a RGB image.") elif image.shape[-1] == 3: image = rgb2lab(image.astype(np.float32)) # make contiguous is memory image = np.ascontiguousarray(image) #zyxc order, float64 """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """ INITIALIZE PARAMETERS USED FOR SEGMENTATION """ """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" ######################################################## # initalize segments, step, and spacing for _slic_cython # this section of code comes mostly from the skimage library depth, height, width = image.shape[:3] # initalize spacing if spacing is None: spacing = np.ones(3) elif isinstance(spacing, (list, tuple)): spacing = np.array(spacing, dtype=np.double) # initialize cluster centroids for desired number of segments grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width] slices = regular_grid(image.shape[:3], n_segments) step_z, step_y, step_x = [ int(s.step if s.step is not None else 1) for s in slices ] segments_z = grid_z[slices] segments_y = grid_y[slices] segments_x = grid_x[slices] segments_color = np.zeros(segments_z.shape + (image.shape[3], )) segments = np.concatenate([ segments_z[..., np.newaxis], segments_y[..., np.newaxis], segments_x[..., np.newaxis], segments_color ], axis=-1).reshape(-1, 3 + image.shape[3]) segments = np.ascontiguousarray(segments) step = float(max((step_z, step_y, step_x))) # ratio is to scale image for _clic_cython, which expects an image # that is already scaled by 1/compactness ratio = 1.0 / compactness ###################################################### # initalize centroids and centroids_dim for slic_cuda # centroids is a 1D array with 6D centroids represented sequentially # (example: [l1 a1 b1 x1 y1 z1 l2 a2 b2 x2 y2 z2 l3 a3 b3 x3 y3 z3]) centroids = np.array([segment[::-1] for segment in segments], dtype=np.float32) # compute the dimensions of the initial grid of centroids centroids_dim = \ np.array([len(range(slices[n].start, image.shape[n], slices[n].step)) for n in [2, 1, 0]], dtype=np.int32) """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """ SEGMENTATION """ """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" """""" # actual call to slic, with timing tstart = time() if parallel: labels = slic_cuda(image, centroids, centroids_dim, compactness, max_iter, print_csv) else: labels = _slic_cython(image * ratio, segments, step, max_iter, spacing, slic_zero) if print_csv: print "%s, %s, %s," % (0, 0, 0), # for piping into csv tend = time() if enforce_connectivity: # use commented line to verify that ascontiguousarray is what # causes mark_cuda_labels to produce unexpected output #labels = np.ascontiguousarray(labels.astype(np.intp)) segment_size = depth * height * width / n_segments min_size = int(min_size_factor * segment_size) max_size = int(max_size_factor * segment_size) labels = _enforce_label_connectivity_cython( np.ascontiguousarray(labels.astype(np.intp)), n_segments, min_size, max_size) if print_csv: print tend - tstart lg.info("TIME: %s", tend - tstart) if is_2d: labels = labels[0] return labels, centroids_dim
def slic(image, n_segments=100, compactness=10., max_iter=10, sigma=0, spacing=None, multichannel=True, convert2lab=None, enforce_connectivity=False, min_size_factor=0.5, max_size_factor=3, slic_zero=False, seed_type='grid', mask=None, recompute_seeds=False, plot_examples=False): """Segments image using k-means clustering in Color-(x,y,z) space. Parameters ---------- image : 2D, 3D or 4D ndarray Input image, which can be 2D or 3D, and grayscale or multichannel (see `multichannel` parameter). n_segments : int, optional The (approximate) number of labels in the segmented output image. compactness : float, optional Balances color proximity and space proximity. Higher values give more weight to space proximity, making superpixel shapes more square/cubic. In SLICO mode, this is the initial compactness. This parameter depends strongly on image contrast and on the shapes of objects in the image. We recommend exploring possible values on a log scale, e.g., 0.01, 0.1, 1, 10, 100, before refining around a chosen value. max_iter : int, optional Maximum number of iterations of k-means. sigma : float or (3,) array-like of floats, optional Width of Gaussian smoothing kernel for pre-processing for each dimension of the image. The same sigma is applied to each dimension in case of a scalar value. Zero means no smoothing. Note, that `sigma` is automatically scaled if it is scalar and a manual voxel spacing is provided (see Notes section). spacing : (3,) array-like of floats, optional The voxel spacing along each image dimension. By default, `slic` assumes uniform spacing (same voxel resolution along z, y and x). This parameter controls the weights of the distances along z, y, and x during k-means clustering. multichannel : bool, optional Whether the last axis of the image is to be interpreted as multiple channels or another spatial dimension. convert2lab : bool, optional Whether the input should be converted to Lab colorspace prior to maskslic. The input image *must* be RGB. Highly recommended. This option defaults to ``True`` when ``multichannel=True`` *and* ``image.shape[-1] == 3``. enforce_connectivity: bool, optional Whether the generated segments are connected or not min_size_factor: float, optional Proportion of the minimum segment size to be removed with respect to the supposed segment size ```depth*width*height/n_segments``` max_size_factor: float, optional Proportion of the maximum connected segment size. A value of 3 works in most of the cases. slic_zero: bool, optional Run SLIC-zero, the zero-parameter mode of SLIC. [2]_ mask: ndarray of bools or 0s and 1s, optional Array of same shape as `image`. Supervoxel analysis will only be performed on points at which mask == True Returns ------- labels : 2D or 3D array Integer mask indicating segment labels. Raises ------ ValueError If ``convert2lab`` is set to ``True`` but the last array dimension is not of length 3. Notes ----- * If `sigma > 0`, the image is smoothed using a Gaussian kernel prior to maskslic. * If `sigma` is scalar and `spacing` is provided, the kernel width is divided along each dimension by the spacing. For example, if ``sigma=1`` and ``spacing=[5, 1, 1]``, the effective `sigma` is ``[0.2, 1, 1]``. This ensures sensible smoothing for anisotropic images. * The image is rescaled to be in [0, 1] prior to processing. * Images of shape (M, N, 3) are interpreted as 2D RGB images by default. To interpret them as 3D with the last dimension having length 3, use `multichannel=False`. References ---------- .. [1] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi, Pascal Fua, and Sabine Süsstrunk, SLIC Superpixels Compared to State-of-the-art Superpixel Methods, TPAMI, May 2012. .. [2] http://ivrg.epfl.ch/research/superpixels#SLICO Examples -------- >>> from maskslic import slic >>> from skimage.data import astronaut >>> img = astronaut() >>> segments = slic(img, n_segments=100, compactness=10) Increasing the compactness parameter yields more square regions: >>> segments = slic(img, n_segments=100, compactness=20) """ # if enforce_connectivity: # raise NotImplementedError("Enforce connectivity has not been implemented yet for maskSLIC.\n" # "Please set enforce connectivity to 'False' ") if slic_zero: raise NotImplementedError("Slic zero has not been implemented yet for maskSLIC.") img = np.copy(image) if mask is not None: msk = np.copy(mask==1) else: msk = None # print("mask shape", msk.shape) if mask is None and seed_type == 'nplace': warnings.warn('nrandom assignment of seed points should only be used with an ROI. Changing seed type.') seed_type = 'size' if seed_type == 'nplace' and recompute_seeds is False: warnings.warn('Seeds should be recomputed when seed points are randomly assigned') image = img_as_float(image) is_2d = False if image.ndim == 2: # 2D grayscale image image = image[np.newaxis, ..., np.newaxis] is_2d = True elif image.ndim == 3 and multichannel: # Make 2D multichannel image 3D with depth = 1 image = image[np.newaxis, ...] is_2d = True elif image.ndim == 3 and not multichannel: # Add channel as single last dimension image = image[..., np.newaxis] if mask is None: mask = np.ones(image.shape[:3], dtype=np.bool) else: mask = np.asarray(mask, dtype=np.bool) if mask.ndim == 2: mask = mask[np.newaxis, ...] if spacing is None: spacing = np.ones(3) elif isinstance(spacing, (list, tuple)): spacing = np.array(spacing, dtype=np.double) if not isinstance(sigma, coll.Iterable): sigma = np.array([sigma, sigma, sigma], dtype=np.double) sigma /= spacing.astype(np.double) elif isinstance(sigma, (list, tuple)): sigma = np.array(sigma, dtype=np.double) if (sigma > 0).any(): # add zero smoothing for multichannel dimension sigma = list(sigma) + [0] image = ndi.gaussian_filter(image, sigma) if multichannel and (convert2lab or convert2lab is None): if image.shape[-1] != 3 and convert2lab: raise ValueError("Lab colorspace conversion requires a RGB image.") elif image.shape[-1] == 3: image = rgb2lab(image) depth, height, width = image.shape[:3] if seed_type == 'nplace': segments, step_x, step_y, step_z = place_seed_points(image, img, mask, n_segments, spacing) # print('{0}, {1}, {2}'.format(step_x, step_y, step_z)) elif seed_type == 'grid': # initialize cluster centroids for desired number of segments # essentially just outputs the indices of a grid in the x, y and z direction grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width] # returns 3 slices (an object representing an array of slices, see builtin slice) slices = regular_grid(image.shape[:3], n_segments) step_z, step_y, step_x = [int(s.step) for s in slices] # extract step size from slices segments_z = grid_z[slices] # use slices to extract coordinates for centre points segments_y = grid_y[slices] segments_x = grid_x[slices] # mask_ind = mask[slices].reshape(-1) # list of all locations as well as zeros for the color features segments_color = np.zeros(segments_z.shape + (image.shape[3],)) segments = np.concatenate([segments_z[..., np.newaxis], segments_y[..., np.newaxis], segments_x[..., np.newaxis], segments_color], axis=-1).reshape(-1, 3 + image.shape[3]) if mask is not None: ind1 = mask[segments[:, 0].astype('int'), segments[:, 1].astype('int'), segments[:, 2].astype('int')] segments = segments[ind1, :] # seg_list = [] # for ii in range(segments.shape[0]): # if mask[segments[ii, 0], segments[ii, 1], segments[ii, 2]] != 0: # seg_list.append(ii) # segments = segments[seg_list, :] else: raise ValueError('seed_type should be nrandom or grid') segments = np.ascontiguousarray(segments) # we do the scaling of ratio in the same way as in the SLIC paper # so the values have the same meaning step = float(max((step_z, step_y, step_x))) ratio = 1.0 / compactness image = np.ascontiguousarray(image * ratio, dtype=np.double) mask = np.ascontiguousarray(mask, dtype=np.int32) segments_old = np.copy(segments) if recompute_seeds: # Seed step 2: Run SLIC to reinitialise seeds # Runs the supervoxel method but only uses distance to better initialise the method labels = _slic_cython(image, mask, segments, step, max_iter, spacing, slic_zero, only_dist=True) # Testing if plot_examples: fig = plt.figure() plt.imshow(img) if msk is not None: plt.contour(msk, contours=1, colors='yellow', linewidths=1) plt.scatter(segments_old[:, 2], segments_old[:, 1], color='green') plt.axis('off') fig = plt.figure() plt.imshow(img) if msk is not None: plt.contour(msk, contours=1, colors='yellow', linewidths=1) plt.scatter(segments[:, 2], segments[:, 1], color='green') plt.axis('off') # image = np.ascontiguousarray(image * ratio) labels = _slic_cython(image, mask, segments, step, max_iter, spacing, slic_zero, only_dist=False) if enforce_connectivity: if msk is None: segment_size = depth * height * width / n_segments else: segment_size = msk.sum() / n_segments min_size = int(min_size_factor * segment_size) max_size = int(max_size_factor * segment_size) labels = _enforce_label_connectivity_cython(labels, mask, n_segments, min_size, max_size) if is_2d: labels = labels[0] return labels
#%% waterershed algorithm (wronge approach, but worse) # apply noise removal med = median(image, disk(5)) # apply thresholding thresh_min = threshold_minimum(med) binary_min = med > thresh_min inverted = np.invert(binary_min) # find edges edges = sobel(inverted) grid = util.regular_grid(inverted.shape, n_points=468) seeds = np.zeros(inverted.shape, dtype=int) seeds[grid] = np.arange(seeds[grid].size).reshape(seeds[grid].shape) + 1 labels = watershed(edges, seeds, compactness=True) #plt.imshow(labels, cmap="gray") plt.imshow(color.label2rgb(labels, inverted, bg_label=-1)) #%% image segmentation # typical pipeline for image segmentation # 1. preprocessing the image by noise removal # 2. apply background removal (if necessary: i.e., there is large variation in image intensity) # 3. apply thresholding # 4. post processing thresholding (if necessary) binary closing (fill holes) adn binary openning # 5. watershed to cut connected objects # 5. post processing watershed (if necessary) binary closing (fill holes) adn binary openning
close to the pixel being considered. Both algorithms are implemented in the :py:func:`skimage.morphology.watershed` function. To use the compact form, simply pass a ``compactness`` value greater than 0. """ import numpy as np from skimage import data, util, filters, color from skimage.morphology import watershed import matplotlib.pyplot as plt coins = data.coins() edges = filters.sobel(coins) grid = util.regular_grid(coins.shape, n_points=468) seeds = np.zeros(coins.shape, dtype=int) seeds[grid] = np.arange(seeds[grid].size).reshape(seeds[grid].shape) + 1 w0 = watershed(edges, seeds) w1 = watershed(edges, seeds, compactness=0.01) fig, (ax0, ax1) = plt.subplots(1, 2) ax0.imshow(color.label2rgb(w0, coins)) ax0.set_title("Classical watershed") ax1.imshow(color.label2rgb(w1, coins)) ax1.set_title("Compact watershed")
def slic(image, n_segments=100, compactness=10., max_iter=10, sigma=0, spacing=None, multichannel=True, convert2lab=None, enforce_connectivity=True, min_size_factor=0.5, max_size_factor=3, slic_zero=False): """Segments image using k-means clustering in Color-(x,y,z) space. Parameters ---------- image : 2D, 3D or 4D ndarray Input image, which can be 2D or 3D, and grayscale or multichannel (see `multichannel` parameter). n_segments : int, optional The (approximate) number of labels in the segmented output image. compactness : float, optional Balances color proximity and space proximity. Higher values give more weight to space proximity, making superpixel shapes more square/cubic. In SLICO mode, this is the initial compactness. This parameter depends strongly on image contrast and on the shapes of objects in the image. We recommend exploring possible values on a log scale, e.g., 0.01, 0.1, 1, 10, 100, before refining around a chosen value. max_iter : int, optional Maximum number of iterations of k-means. sigma : float or (3,) array-like of floats, optional Width of Gaussian smoothing kernel for pre-processing for each dimension of the image. The same sigma is applied to each dimension in case of a scalar value. Zero means no smoothing. Note, that `sigma` is automatically scaled if it is scalar and a manual voxel spacing is provided (see Notes section). spacing : (3,) array-like of floats, optional The voxel spacing along each image dimension. By default, `slic` assumes uniform spacing (same voxel resolution along z, y and x). This parameter controls the weights of the distances along z, y, and x during k-means clustering. multichannel : bool, optional Whether the last axis of the image is to be interpreted as multiple channels or another spatial dimension. convert2lab : bool, optional Whether the input should be converted to Lab colorspace prior to segmentation. The input image *must* be RGB. Highly recommended. This option defaults to ``True`` when ``multichannel=True`` *and* ``image.shape[-1] == 3``. enforce_connectivity: bool, optional Whether the generated segments are connected or not min_size_factor: float, optional Proportion of the minimum segment size to be removed with respect to the supposed segment size ```depth*width*height/n_segments``` max_size_factor: float, optional Proportion of the maximum connected segment size. A value of 3 works in most of the cases. slic_zero: bool, optional Run SLIC-zero, the zero-parameter mode of SLIC. [2]_ Returns ------- labels : 2D or 3D array Integer mask indicating segment labels. Raises ------ ValueError If ``convert2lab`` is set to ``True`` but the last array dimension is not of length 3. Notes ----- * If `sigma > 0`, the image is smoothed using a Gaussian kernel prior to segmentation. * If `sigma` is scalar and `spacing` is provided, the kernel width is divided along each dimension by the spacing. For example, if ``sigma=1`` and ``spacing=[5, 1, 1]``, the effective `sigma` is ``[0.2, 1, 1]``. This ensures sensible smoothing for anisotropic images. * The image is rescaled to be in [0, 1] prior to processing. * Images of shape (M, N, 3) are interpreted as 2D RGB images by default. To interpret them as 3D with the last dimension having length 3, use `multichannel=False`. References ---------- .. [1] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi, Pascal Fua, and Sabine Süsstrunk, SLIC Superpixels Compared to State-of-the-art Superpixel Methods, TPAMI, May 2012. .. [2] http://ivrg.epfl.ch/research/superpixels#SLICO Examples -------- >>> from skimage.segmentation import slic >>> from skimage.data import astronaut >>> img = astronaut() >>> segments = slic(img, n_segments=100, compactness=10) Increasing the compactness parameter yields more square regions: >>> segments = slic(img, n_segments=100, compactness=20) """ image = img_as_float(image) is_2d = False ##添加维度 if image.ndim == 2: # 2D grayscale image image = image[np.newaxis, ..., np.newaxis] is_2d = True elif image.ndim == 3 and multichannel: # Make 2D multichannel image 3D with depth = 1 image = image[np.newaxis, ...] is_2d = True elif image.ndim == 3 and not multichannel: # Add channel as single last dimension image = image[..., np.newaxis] if spacing is None: spacing = np.ones(3) elif isinstance(spacing, (list, tuple)): spacing = np.array(spacing, dtype=np.double) if not isinstance(sigma, coll.Iterable): sigma = np.array([sigma, sigma, sigma], dtype=np.double) sigma /= spacing.astype(np.double) elif isinstance(sigma, (list, tuple)): sigma = np.array(sigma, dtype=np.double) if (sigma > 0).any(): # add zero smoothing for multichannel dimension sigma = list(sigma) + [0] image = ndi.gaussian_filter(image, sigma) if multichannel and (convert2lab or convert2lab is None): if image.shape[-1] != 3 and convert2lab: raise ValueError("Lab colorspace conversion requires a RGB image.") elif image.shape[-1] == 3: image = rgb2lab(image) depth, height, width = image.shape[:3] # initialize cluster centroids for desired number of segments grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width] slices = regular_grid(image.shape[:3], n_segments) step_z, step_y, step_x = [ int(s.step if s.step is not None else 1) for s in slices ] segments_z = grid_z[slices] segments_y = grid_y[slices] segments_x = grid_x[slices] segments_color = np.zeros(segments_z.shape + (image.shape[3], )) segments = np.concatenate([ segments_z[..., np.newaxis], segments_y[..., np.newaxis], segments_x[..., np.newaxis], segments_color ], axis=-1).reshape(-1, 3 + image.shape[3]) segments = np.ascontiguousarray(segments) # we do the scaling of ratio in the same way as in the SLIC paper # so the values have the same meaning step = float(max((step_z, step_y, step_x))) ratio = 1.0 / compactness image = np.ascontiguousarray(image * ratio) labels, color_center, edge = _slic_cython(image, segments, step, max_iter, spacing, slic_zero) if enforce_connectivity: segment_size = depth * height * width / n_segments min_size = int(min_size_factor * segment_size) max_size = int(max_size_factor * segment_size) labels, color_center, edge = _enforce_label_connectivity_cython( labels, color_center, min_size, max_size) if is_2d: labels = labels[0] color_center = color_center[:, 3:] color_center = color_center * 10 color_center = color_center[np.newaxis, ...] color_center = lab2rgb(color_center) color_center = color_center * 255 color_center = color_center[0] edgeList = [[] for i in range(len(edge))] for i in range(len(edge)): for j in range(len(edge[i])): if edge[i, j] != -1 and edge[i, j] not in edgeList[i]: edgeList[i].append(edge[i, j]) return labels, color_center[:(np.amax(labels) + 1)], edgeList
def test_regular_grid_full(): ar = np.zeros((2, 2)) g = regular_grid(ar, 25) assert_equal(g, [slice(None, None, None), slice(None, None, None)]) ar[g] = 1 assert_equal(ar.size, ar.sum())
def slic(image, n_segments=100, compactness=10., max_iter=10, sigma=0, spacing=None, multichannel=True, convert2lab=None, enforce_connectivity=False, min_size_factor=0.5, max_size_factor=3, slic_zero=False, seed_type='grid', mask=None, recompute_seeds=False, plot_examples=False): """Segments image using k-means clustering in Color-(x,y,z) space. Parameters ---------- image : 2D, 3D or 4D ndarray Input image, which can be 2D or 3D, and grayscale or multichannel (see `multichannel` parameter). n_segments : int, optional The (approximate) number of labels in the segmented output image. compactness : float, optional Balances color proximity and space proximity. Higher values give more weight to space proximity, making superpixel shapes more square/cubic. In SLICO mode, this is the initial compactness. This parameter depends strongly on image contrast and on the shapes of objects in the image. We recommend exploring possible values on a log scale, e.g., 0.01, 0.1, 1, 10, 100, before refining around a chosen value. max_iter : int, optional Maximum number of iterations of k-means. sigma : float or (3,) array-like of floats, optional Width of Gaussian smoothing kernel for pre-processing for each dimension of the image. The same sigma is applied to each dimension in case of a scalar value. Zero means no smoothing. Note, that `sigma` is automatically scaled if it is scalar and a manual voxel spacing is provided (see Notes section). spacing : (3,) array-like of floats, optional The voxel spacing along each image dimension. By default, `slic` assumes uniform spacing (same voxel resolution along z, y and x). This parameter controls the weights of the distances along z, y, and x during k-means clustering. multichannel : bool, optional Whether the last axis of the image is to be interpreted as multiple channels or another spatial dimension. convert2lab : bool, optional Whether the input should be converted to Lab colorspace prior to maskslic. The input image *must* be RGB. Highly recommended. This option defaults to ``True`` when ``multichannel=True`` *and* ``image.shape[-1] == 3``. enforce_connectivity: bool, optional Whether the generated segments are connected or not min_size_factor: float, optional Proportion of the minimum segment size to be removed with respect to the supposed segment size ```depth*width*height/n_segments``` max_size_factor: float, optional Proportion of the maximum connected segment size. A value of 3 works in most of the cases. slic_zero: bool, optional Run SLIC-zero, the zero-parameter mode of SLIC. [2]_ mask: ndarray of bools or 0s and 1s, optional Array of same shape as `image`. Supervoxel analysis will only be performed on points at which mask == True Returns ------- labels : 2D or 3D array Integer mask indicating segment labels. Raises ------ ValueError If ``convert2lab`` is set to ``True`` but the last array dimension is not of length 3. Notes ----- * If `sigma > 0`, the image is smoothed using a Gaussian kernel prior to maskslic. * If `sigma` is scalar and `spacing` is provided, the kernel width is divided along each dimension by the spacing. For example, if ``sigma=1`` and ``spacing=[5, 1, 1]``, the effective `sigma` is ``[0.2, 1, 1]``. This ensures sensible smoothing for anisotropic images. * The image is rescaled to be in [0, 1] prior to processing. * Images of shape (M, N, 3) are interpreted as 2D RGB images by default. To interpret them as 3D with the last dimension having length 3, use `multichannel=False`. References ---------- .. [1] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi, Pascal Fua, and Sabine Süsstrunk, SLIC Superpixels Compared to State-of-the-art Superpixel Methods, TPAMI, May 2012. .. [2] http://ivrg.epfl.ch/research/superpixels#SLICO Examples -------- >>> from maskslic import slic >>> from skimage.data import astronaut >>> img = astronaut() >>> segments = slic(img, n_segments=100, compactness=10) Increasing the compactness parameter yields more square regions: >>> segments = slic(img, n_segments=100, compactness=20) """ # if enforce_connectivity: # raise NotImplementedError("Enforce connectivity has not been implemented yet for maskSLIC.\n" # "Please set enforce connectivity to 'False' ") if slic_zero: raise NotImplementedError( "Slic zero has not been implemented yet for maskSLIC.") img = np.copy(image) if mask is not None: msk = np.copy(mask == 1) else: msk = None # print("mask shape", msk.shape) if mask is None and seed_type == 'nplace': warnings.warn( 'nrandom assignment of seed points should only be used with an ROI. Changing seed type.' ) seed_type = 'size' if seed_type == 'nplace' and recompute_seeds is False: warnings.warn( 'Seeds should be recomputed when seed points are randomly assigned' ) image = img_as_float(image) is_2d = False if image.ndim == 2: # 2D grayscale image image = image[np.newaxis, ..., np.newaxis] is_2d = True elif image.ndim == 3 and multichannel: # Make 2D multichannel image 3D with depth = 1 image = image[np.newaxis, ...] is_2d = True elif image.ndim == 3 and not multichannel: # Add channel as single last dimension image = image[..., np.newaxis] if mask is None: mask = np.ones(image.shape[:3], dtype=np.bool) else: mask = np.asarray(mask, dtype=np.bool) if mask.ndim == 2: mask = mask[np.newaxis, ...] if spacing is None: spacing = np.ones(3) elif isinstance(spacing, (list, tuple)): spacing = np.array(spacing, dtype=np.double) if not isinstance(sigma, coll.Iterable): sigma = np.array([sigma, sigma, sigma], dtype=np.double) sigma /= spacing.astype(np.double) elif isinstance(sigma, (list, tuple)): sigma = np.array(sigma, dtype=np.double) if (sigma > 0).any(): # add zero smoothing for multichannel dimension sigma = list(sigma) + [0] image = ndi.gaussian_filter(image, sigma) if multichannel and (convert2lab or convert2lab is None): if image.shape[-1] != 3 and convert2lab: raise ValueError("Lab colorspace conversion requires a RGB image.") elif image.shape[-1] == 3: image = rgb2lab(image) depth, height, width = image.shape[:3] if seed_type == 'nplace': segments, step_x, step_y, step_z = place_seed_points( image, img, mask, n_segments, spacing) # print('{0}, {1}, {2}'.format(step_x, step_y, step_z)) elif seed_type == 'grid': # initialize cluster centroids for desired number of segments # essentially just outputs the indices of a grid in the x, y and z direction grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width] # returns 3 slices (an object representing an array of slices, see builtin slice) slices = regular_grid(image.shape[:3], n_segments) step_z, step_y, step_x = [int(s.step) for s in slices ] # extract step size from slices segments_z = grid_z[ slices] # use slices to extract coordinates for centre points segments_y = grid_y[slices] segments_x = grid_x[slices] # mask_ind = mask[slices].reshape(-1) # list of all locations as well as zeros for the color features segments_color = np.zeros(segments_z.shape + (image.shape[3], )) segments = np.concatenate([ segments_z[..., np.newaxis], segments_y[..., np.newaxis], segments_x[..., np.newaxis], segments_color ], axis=-1).reshape(-1, 3 + image.shape[3]) if mask is not None: ind1 = mask[segments[:, 0].astype('int'), segments[:, 1].astype('int'), segments[:, 2].astype('int')] segments = segments[ind1, :] # seg_list = [] # for ii in range(segments.shape[0]): # if mask[segments[ii, 0], segments[ii, 1], segments[ii, 2]] != 0: # seg_list.append(ii) # segments = segments[seg_list, :] else: raise ValueError('seed_type should be nrandom or grid') segments = np.ascontiguousarray(segments) # we do the scaling of ratio in the same way as in the SLIC paper # so the values have the same meaning step = float(max((step_z, step_y, step_x))) ratio = 1.0 / compactness image = np.ascontiguousarray(image * ratio, dtype=np.double) mask = np.ascontiguousarray(mask, dtype=np.int32) segments_old = np.copy(segments) if recompute_seeds: # Seed step 2: Run SLIC to reinitialise seeds # Runs the supervoxel method but only uses distance to better initialise the method labels = _slic_cython(image, mask, segments, step, max_iter, spacing, slic_zero, only_dist=True) # Testing if plot_examples: fig = plt.figure() plt.imshow(img) if msk is not None: plt.contour(msk, contours=1, colors='yellow', linewidths=1) plt.scatter(segments_old[:, 2], segments_old[:, 1], color='green') plt.axis('off') fig = plt.figure() plt.imshow(img) if msk is not None: plt.contour(msk, contours=1, colors='yellow', linewidths=1) plt.scatter(segments[:, 2], segments[:, 1], color='green') plt.axis('off') # image = np.ascontiguousarray(image * ratio) labels = _slic_cython(image, mask, segments, step, max_iter, spacing, slic_zero, only_dist=False) if enforce_connectivity: if msk is None: segment_size = depth * height * width / n_segments else: segment_size = msk.sum() / n_segments min_size = int(min_size_factor * segment_size) max_size = int(max_size_factor * segment_size) labels = _enforce_label_connectivity_cython(labels, mask, n_segments, min_size, max_size) if is_2d: labels = labels[0] return labels