def test_reduce_func_sumofsqrt_linewidth_3(): def reduce_func(x): return (x ** 0.5).sum() prof = profile_line( pyth_image, (1, 2), (4, 2), linewidth=3, order=0, reduce_func=reduce_func, mode="constant", ) expected_prof = apply_along_axis( reduce_func, arr=pyth_image[1:5, 1:4], axis=1 ) assert_array_almost_equal(prof, expected_prof)
def profile_line( image, src, dst, linewidth=1, order=None, mode=None, cval=0.0, *, reduce_func=cp.mean, ): """Return the intensity profile of an image measured along a scan line. Parameters ---------- image : ndarray, shape (M, N[, C]) The image, either grayscale (2D array) or multichannel (3D array, where the final axis contains the channel information). src : array_like, shape (2, ) The coordinates of the start point of the scan line. dst : array_like, shape (2, ) The coordinates of the end point of the scan line. The destination point is *included* in the profile, in contrast to standard numpy indexing. linewidth : int, optional Width of the scan, perpendicular to the line order : int in {0, 1, 2, 3, 4, 5}, optional The order of the spline interpolation, default is 0 if image.dtype is bool and 1 otherwise. The order has to be in the range 0-5. See `skimage.transform.warp` for detail. mode : {'constant', 'nearest', 'reflect', 'mirror', 'wrap'}, optional How to compute any values falling outside of the image. cval : float, optional If `mode` is 'constant', what constant value to use outside the image. reduce_func : callable, optional Function used to calculate the aggregation of pixel values perpendicular to the profile_line direction when `linewidth` > 1. If set to None the unreduced array will be returned. Returns ------- return_value : array The intensity profile along the scan line. The length of the profile is the ceil of the computed length of the scan line. Examples -------- >>> import cupy as cp >>> x = cp.asarray([[1, 1, 1, 2, 2, 2]]) >>> img = cp.vstack([cp.zeros_like(x), x, x, x, cp.zeros_like(x)]) >>> img array([[0, 0, 0, 0, 0, 0], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 0, 0]]) >>> profile_line(img, (2, 1), (2, 4)) array([1., 1., 2., 2.]) >>> profile_line(img, (1, 0), (1, 6), cval=4) array([1., 1., 1., 2., 2., 2., 4.]) The destination point is included in the profile, in contrast to standard numpy indexing. For example: >>> profile_line(img, (1, 0), (1, 6)) # The final point is out of bounds array([1., 1., 1., 2., 2., 2., 0.]) >>> profile_line(img, (1, 0), (1, 5)) # This accesses the full first row array([1., 1., 1., 2., 2., 2.]) For different reduce_func inputs: >>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=cp.mean) array([0.66666667, 0.66666667, 0.66666667, 1.33333333]) >>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=cp.max) array([1, 1, 1, 2]) >>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=cp.sum) array([2, 2, 2, 4]) The unreduced array will be returned when `reduce_func` is None or when `reduce_func` acts on each pixel value individually. >>> profile_line(img, (1, 2), (4, 2), linewidth=3, order=0, ... reduce_func=None) array([[1, 1, 2], [1, 1, 2], [1, 1, 2], [0, 0, 0]]) >>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=cp.sqrt) array([[1. , 1. , 0. ], [1. , 1. , 0. ], [1. , 1. , 0. ], [1.41421356, 1.41421356, 0. ]]) """ order = _validate_interpolation_order(image.dtype, order) if mode is None: warn( "Default out of bounds interpolation mode 'constant' is " "deprecated. In version 0.19 it will be set to 'reflect'. " "To avoid this warning, set `mode=` explicitly.", FutureWarning, stacklevel=2, ) mode = "constant" perp_lines = _line_profile_coordinates(src, dst, linewidth=linewidth) if image.ndim == 3: pixels = [ ndi.map_coordinates( image[..., i], perp_lines, prefilter=order > 1, order=order, mode=mode, cval=cval, ) for i in range(image.shape[2]) ] pixels = cp.transpose(cp.asarray(pixels), (1, 2, 0)) else: pixels = ndi.map_coordinates( image, perp_lines, prefilter=order > 1, order=order, mode=mode, cval=cval, ) # The outputted array with reduce_func=None gives an array where the # row values (axis=1) are flipped. Here, we make this consistent. pixels = np.flip(pixels, axis=1) if reduce_func is None: intensities = pixels else: try: intensities = reduce_func(pixels, axis=1) except TypeError: # function doesn't allow axis kwarg intensities = cnp.apply_along_axis(reduce_func, arr=pixels, axis=1) return intensities
def _clahe(image, kernel_size, clip_limit, nbins): """Contrast Limited Adaptive Histogram Equalization. Parameters ---------- image : (N1,...,NN) ndarray Input image. kernel_size: int or N-tuple of int Defines the shape of contextual regions used in the algorithm. clip_limit : float Normalized clipping limit between 0 and 1 (higher values give more contrast). nbins : int Number of gray bins for histogram ("data range"). Returns ------- out : (N1,...,NN) ndarray Equalized image. The number of "effective" graylevels in the output image is set by `nbins`; selecting a small value (e.g. 128) speeds up processing and still produces an output image of good quality. A clip limit of 0 or larger than or equal to 1 results in standard (non-contrast limited) AHE. """ ndim = image.ndim dtype = image.dtype # pad the image such that the shape in each dimension # - is a multiple of the kernel_size and # - is preceded by half a kernel size pad_start_per_dim = [k // 2 for k in kernel_size] pad_end_per_dim = [ (k - s % k) % k + math.ceil(k / 2.0) for k, s in zip(kernel_size, image.shape) ] image = cp.pad( image, [(p_i, p_f) for p_i, p_f in zip(pad_start_per_dim, pad_end_per_dim)], mode="reflect", ) bin_size = 1 + NR_OF_GRAY // nbins if True: lut = cp.arange(NR_OF_GRAY) lut //= bin_size image = lut[image] else: lut = np.arange(NR_OF_GRAY) lut //= bin_size image = cp.asarray(lut[image.get()]) # calculate graylevel mappings for each contextual region # rearrange image into flattened contextual regions ns_hist = [int(s / k) - 1 for s, k in zip(image.shape, kernel_size)] hist_blocks_shape = functools.reduce( operator.add, [(s, k) for s, k in zip(ns_hist, kernel_size)] ) hist_blocks_axis_order = tuple(range(0, ndim * 2, 2)) + tuple( range(1, ndim * 2, 2) ) hist_slices = [ slice(k // 2, k // 2 + n * k) for k, n in zip(kernel_size, ns_hist) ] hist_blocks = image[tuple(hist_slices)].reshape(hist_blocks_shape) hist_blocks = hist_blocks.transpose(hist_blocks_axis_order) hist_block_assembled_shape = hist_blocks.shape hist_blocks = hist_blocks.reshape((_prod(ns_hist), -1)) # Calculate actual clip limit if clip_limit > 0.0: clim = int(max(clip_limit * _prod(kernel_size), 1)) else: # largest possible value, i.e., do not clip (AHE) clim = np.product(kernel_size) if True: # faster to loop over the arrays on the host hist_blocks = cp.asnumpy(hist_blocks) hist = np.apply_along_axis( np.bincount, -1, hist_blocks, minlength=nbins ) hist = np.apply_along_axis( clip_histogram, -1, hist, clip_limit=clim, xp=np ) hist = cp.asarray(hist) else: hist = cnp.apply_along_axis( cp.bincount, -1, hist_blocks, minlength=nbins ) hist = cnp.apply_along_axis(clip_histogram, -1, hist, clip_limit=clim) hist = map_histogram(hist, 0, NR_OF_GRAY - 1, _prod(kernel_size)) hist = hist.reshape(hist_block_assembled_shape[:ndim] + (-1,)) # duplicate leading mappings in each dim map_array = cp.pad( hist, [(1, 1) for _ in range(ndim)] + [(0, 0)], mode="edge" ) # Perform multilinear interpolation of graylevel mappings # using the convention described here: # https://en.wikipedia.org/w/index.php?title=Adaptive_histogram_ # equalization&oldid=936814673#Efficient_computation_by_interpolation # rearrange image into blocks for vectorized processing ns_proc = [int(s / k) for s, k in zip(image.shape, kernel_size)] blocks_shape = functools.reduce( operator.add, [(s, k) for s, k in zip(ns_proc, kernel_size)] ) blocks_axis_order = hist_blocks_axis_order blocks = image.reshape(blocks_shape) blocks = blocks.transpose(blocks_axis_order) blocks_flattened_shape = blocks.shape blocks = blocks.reshape((_prod(ns_proc), _prod(blocks.shape[ndim:]))) # calculate interpolation coefficients coeffs = cp.meshgrid( *tuple([cp.arange(k) / k for k in kernel_size[::-1]]), indexing="ij" ) coeffs = [cp.transpose(c).flatten() for c in coeffs] inv_coeffs = [1 - c for c in coeffs] # sum over contributions of neighboring contextual # regions in each direction result = cp.zeros(blocks.shape, dtype=cp.float32) for iedge, edge in enumerate(itertools.product(*((range(2),) * ndim))): edge_maps = map_array[ tuple([slice(e, e + n) for e, n in zip(edge, ns_proc)]) ] edge_maps = edge_maps.reshape((_prod(ns_proc), -1)) # apply map # edge_mapped = cp.asarray(np.take_along_axis(edge_maps.get(), blocks.get(), axis=-1)) edge_mapped = cp.take_along_axis(edge_maps, blocks, axis=-1) # interpolate edge_coeffs = functools.reduce( operator.mul, [[inv_coeffs, coeffs][e][d] for d, e in enumerate(edge[::-1])], ) result += (edge_mapped * edge_coeffs).astype(result.dtype) result = result.astype(dtype) # rebuild result image from blocks result = result.reshape(blocks_flattened_shape) blocks_axis_rebuild_order = functools.reduce( operator.add, [(s, k) for s, k in zip(range(0, ndim), range(ndim, ndim * 2))], ) result = result.transpose(blocks_axis_rebuild_order) result = result.reshape(image.shape) # undo padding unpad_slices = tuple( [ slice(p_i, s - p_f) for p_i, p_f, s in zip( pad_start_per_dim, pad_end_per_dim, image.shape ) ] ) result = result[unpad_slices] return result