def transform_affine(volume, ref_shape, affine, order=1): from cupyimg.scipy.ndimage._kernels import interp ndim = volume.ndim legacy_mode = interp.const_legacy_mode interp.const_legacy_mode = False try: affine = cupy.asarray(affine) if True: out = ndi.affine_transform( volume, matrix=affine, order=order, mode="constant", output_shape=tuple(ref_shape), ) else: # use map_coordinates instead of affine_transform xcoords = cupy.meshgrid( *[cupy.arange(s, dtype=volume.dtype) for s in ref_shape], indexing="ij", sparse=True, ) coords = _apply_affine_to_field( xcoords, affine[:ndim, :], include_translations=True, coord_axis=0, ) out = ndi.map_coordinates(volume, coords, order=1) finally: interp.const_legacy_mode = legacy_mode return out
def profile_line( image, src, dst, linewidth=1, order=None, mode=None, cval=0.0, *, reduce_func=cp.mean, ): """Return the intensity profile of an image measured along a scan line. Parameters ---------- image : ndarray, shape (M, N[, C]) The image, either grayscale (2D array) or multichannel (3D array, where the final axis contains the channel information). src : array_like, shape (2, ) The coordinates of the start point of the scan line. dst : array_like, shape (2, ) The coordinates of the end point of the scan line. The destination point is *included* in the profile, in contrast to standard numpy indexing. linewidth : int, optional Width of the scan, perpendicular to the line order : int in {0, 1, 2, 3, 4, 5}, optional The order of the spline interpolation, default is 0 if image.dtype is bool and 1 otherwise. The order has to be in the range 0-5. See `skimage.transform.warp` for detail. mode : {'constant', 'nearest', 'reflect', 'mirror', 'wrap'}, optional How to compute any values falling outside of the image. cval : float, optional If `mode` is 'constant', what constant value to use outside the image. reduce_func : callable, optional Function used to calculate the aggregation of pixel values perpendicular to the profile_line direction when `linewidth` > 1. If set to None the unreduced array will be returned. Returns ------- return_value : array The intensity profile along the scan line. The length of the profile is the ceil of the computed length of the scan line. Examples -------- >>> import cupy as cp >>> x = cp.asarray([[1, 1, 1, 2, 2, 2]]) >>> img = cp.vstack([cp.zeros_like(x), x, x, x, cp.zeros_like(x)]) >>> img array([[0, 0, 0, 0, 0, 0], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 0, 0]]) >>> profile_line(img, (2, 1), (2, 4)) array([1., 1., 2., 2.]) >>> profile_line(img, (1, 0), (1, 6), cval=4) array([1., 1., 1., 2., 2., 2., 4.]) The destination point is included in the profile, in contrast to standard numpy indexing. For example: >>> profile_line(img, (1, 0), (1, 6)) # The final point is out of bounds array([1., 1., 1., 2., 2., 2., 0.]) >>> profile_line(img, (1, 0), (1, 5)) # This accesses the full first row array([1., 1., 1., 2., 2., 2.]) For different reduce_func inputs: >>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=cp.mean) array([0.66666667, 0.66666667, 0.66666667, 1.33333333]) >>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=cp.max) array([1, 1, 1, 2]) >>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=cp.sum) array([2, 2, 2, 4]) The unreduced array will be returned when `reduce_func` is None or when `reduce_func` acts on each pixel value individually. >>> profile_line(img, (1, 2), (4, 2), linewidth=3, order=0, ... reduce_func=None) array([[1, 1, 2], [1, 1, 2], [1, 1, 2], [0, 0, 0]]) >>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=cp.sqrt) array([[1. , 1. , 0. ], [1. , 1. , 0. ], [1. , 1. , 0. ], [1.41421356, 1.41421356, 0. ]]) """ order = _validate_interpolation_order(image.dtype, order) if mode is None: warn( "Default out of bounds interpolation mode 'constant' is " "deprecated. In version 0.19 it will be set to 'reflect'. " "To avoid this warning, set `mode=` explicitly.", FutureWarning, stacklevel=2, ) mode = "constant" perp_lines = _line_profile_coordinates(src, dst, linewidth=linewidth) if image.ndim == 3: pixels = [ ndi.map_coordinates( image[..., i], perp_lines, prefilter=order > 1, order=order, mode=mode, cval=cval, ) for i in range(image.shape[2]) ] pixels = cp.transpose(cp.asarray(pixels), (1, 2, 0)) else: pixels = ndi.map_coordinates( image, perp_lines, prefilter=order > 1, order=order, mode=mode, cval=cval, ) # The outputted array with reduce_func=None gives an array where the # row values (axis=1) are flipped. Here, we make this consistent. pixels = np.flip(pixels, axis=1) if reduce_func is None: intensities = pixels else: try: intensities = reduce_func(pixels, axis=1) except TypeError: # function doesn't allow axis kwarg intensities = cnp.apply_along_axis(reduce_func, arr=pixels, axis=1) return intensities
def test_warp_coords_example(): image = cp.asarray(astronaut().astype(np.float32)) assert 3 == image.shape[2] tform = SimilarityTransform(translation=(0, -10), xp=cp) coords = warp_coords(tform, (30, 30, 3)) map_coordinates(image[:, :, 0], coords[:2])
def warp( image, inverse_map, map_args={}, output_shape=None, order=None, mode="constant", cval=0.0, clip=True, preserve_range=False, ): """Warp an image according to a given coordinate transformation. Parameters ---------- image : ndarray Input image. inverse_map : transformation object, callable ``cr = f(cr, **kwargs)``, or ndarray Inverse coordinate map, which transforms coordinates in the output images into their corresponding coordinates in the input image. There are a number of different options to define this map, depending on the dimensionality of the input image. A 2-D image can have 2 dimensions for gray-scale images, or 3 dimensions with color information. - For 2-D images, you can directly pass a transformation object, e.g. `skimage.transform.SimilarityTransform`, or its inverse. - For 2-D images, you can pass a ``(3, 3)`` homogeneous transformation matrix, e.g. `skimage.transform.SimilarityTransform.params`. - For 2-D images, a function that transforms a ``(M, 2)`` array of ``(col, row)`` coordinates in the output image to their corresponding coordinates in the input image. Extra parameters to the function can be specified through `map_args`. - For N-D images, you can directly pass an array of coordinates. The first dimension specifies the coordinates in the input image, while the subsequent dimensions determine the position in the output image. E.g. in case of 2-D images, you need to pass an array of shape ``(2, rows, cols)``, where `rows` and `cols` determine the shape of the output image, and the first dimension contains the ``(row, col)`` coordinate in the input image. See `scipy.ndimage.map_coordinates` for further documentation. Note, that a ``(3, 3)`` matrix is interpreted as a homogeneous transformation matrix, so you cannot interpolate values from a 3-D input, if the output is of shape ``(3,)``. See example section for usage. map_args : dict, optional Keyword arguments passed to `inverse_map`. output_shape : tuple (rows, cols), optional Shape of the output image generated. By default the shape of the input image is preserved. Note that, even for multi-band images, only rows and columns need to be specified. order : int, optional The order of interpolation. The order has to be in the range 0-5: - 0: Nearest-neighbor - 1: Bi-linear (default) - 2: Bi-quadratic - 3: Bi-cubic - 4: Bi-quartic - 5: Bi-quintic Default is 0 if image.dtype is bool and 1 otherwise. mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}, optional Points outside the boundaries of the input are filled according to the given mode. Modes match the behaviour of `numpy.pad`. cval : float, optional Used in conjunction with mode 'constant', the value outside the image boundaries. clip : bool, optional Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range. preserve_range : bool, optional Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of `img_as_float`. Also see https://scikit-image.org/docs/dev/user_guide/data_types.html Returns ------- warped : double ndarray The warped input image. Notes ----- - The input image is converted to a `double` image. - In case of a `SimilarityTransform`, `AffineTransform` and `ProjectiveTransform` and `order` in [0, 3] this function uses the underlying transformation matrix to warp the image with a much faster routine. Examples -------- >>> from skimage.transform import warp >>> from skimage import data >>> image = data.camera() The following image warps are all equal but differ substantially in execution time. The image is shifted to the bottom. Use a geometric transform to warp an image (fast): >>> from skimage.transform import SimilarityTransform >>> tform = SimilarityTransform(translation=(0, -10)) >>> warped = warp(image, tform) Use a callable (slow): >>> def shift_down(xy): ... xy[:, 1] -= 10 ... return xy >>> warped = warp(image, shift_down) Use a transformation matrix to warp an image (fast): >>> import cupy as cp >>> matrix = cp.asarray([[1, 0, 0], [0, 1, -10], [0, 0, 1]]) >>> warped = warp(image, matrix) >>> from skimage.transform import ProjectiveTransform >>> warped = warp(image, ProjectiveTransform(matrix=matrix)) You can also use the inverse of a geometric transformation (fast): >>> warped = warp(image, tform.inverse) For N-D images you can pass a coordinate array, that specifies the coordinates in the input image for every element in the output image. E.g. if you want to rescale a 3-D cube, you can do: >>> cube_shape = cp.asarray([30, 30, 30]) >>> cube = cp.random.rand(*cube_shape) Setup the coordinate array, that defines the scaling: >>> scale = 0.1 >>> output_shape = (scale * cube_shape).astype(int) >>> coords0, coords1, coords2 = cp.mgrid[:output_shape[0], ... :output_shape[1], :output_shape[2]] >>> coords = cp.asarray([coords0, coords1, coords2]) Assume that the cube contains spatial data, where the first array element center is at coordinate (0.5, 0.5, 0.5) in real space, i.e. we have to account for this extra offset when scaling the image: >>> coords = (coords + 0.5) / scale - 0.5 >>> warped = warp(cube, coords) """ if image.size == 0: raise ValueError("Cannot warp empty image with dimensions", image.shape) order = _validate_interpolation_order(image.dtype, order) if image.dtype.kind == "c": if not preserve_range: raise NotImplementedError("TODO") else: image = convert_to_float(image, preserve_range) input_shape = np.array(image.shape) if output_shape is None: output_shape = input_shape else: output_shape = safe_as_int(output_shape) warped = None if order == 2: # When fixing this issue, make sure to fix the branches further # below in this function warn( "Bi-quadratic interpolation behavior has changed due " "to a bug in the implementation of scikit-image. " "The new version now serves as a wrapper " "around SciPy's interpolation functions, which itself " "is not verified to be a correct implementation. Until " "skimage's implementation is fixed, we recommend " "to use bi-linear or bi-cubic interpolation instead." ) if warped is None: # use ndi.map_coordinates if isinstance(inverse_map, cp.ndarray) and inverse_map.shape == (3, 3,): # inverse_map is a transformation matrix as numpy array, # this is only used for order >= 4. inverse_map = ProjectiveTransform(matrix=inverse_map) if isinstance(inverse_map, cp.ndarray): # inverse_map is directly given as coordinates coords = inverse_map else: # inverse_map is given as function, that transforms (N, 2) # destination coordinates to their corresponding source # coordinates. This is only supported for 2(+1)-D images. if image.ndim < 2 or image.ndim > 3: raise ValueError( "Only 2-D images (grayscale or color) are " "supported, when providing a callable " "`inverse_map`." ) def coord_map(*args): return inverse_map(*args, **map_args) if len(input_shape) == 3 and len(output_shape) == 2: # Input image is 2D and has color channel, but output_shape is # given for 2-D images. Automatically add the color channel # dimensionality. output_shape = ( output_shape[0], output_shape[1], input_shape[2], ) coords = warp_coords(coord_map, output_shape) # Pre-filtering not necessary for order 0, 1 interpolation prefilter = order > 1 ndi_mode = _to_ndimage_mode(mode) warped = ndi.map_coordinates( image, coords, prefilter=prefilter, mode=ndi_mode, order=order, cval=cval, ) _clip_warp_output(image, warped, order, mode, cval, clip) return warped
def resize( image, output_shape, order=None, mode="reflect", cval=0, clip=True, preserve_range=False, anti_aliasing=None, anti_aliasing_sigma=None, ): """Resize image to match a certain size. Performs interpolation to up-size or down-size N-dimensional images. Note that anti-aliasing should be enabled when down-sizing images to avoid aliasing artifacts. For down-sampling with an integer factor also see `skimage.transform.downscale_local_mean`. Parameters ---------- image : ndarray Input image. output_shape : tuple or ndarray Size of the generated output image `(rows, cols[, ...][, dim])`. If `dim` is not provided, the number of channels is preserved. In case the number of input channels does not equal the number of output channels a n-dimensional interpolation is applied. Returns ------- resized : ndarray Resized version of the input. Other parameters ---------------- order : int, optional The order of the spline interpolation, default is 0 if image.dtype is bool and 1 otherwise. The order has to be in the range 0-5. See `skimage.transform.warp` for detail. mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}, optional Points outside the boundaries of the input are filled according to the given mode. Modes match the behaviour of `numpy.pad`. cval : float, optional Used in conjunction with mode 'constant', the value outside the image boundaries. clip : bool, optional Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range. preserve_range : bool, optional Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of `img_as_float`. Also see https://scikit-image.org/docs/dev/user_guide/data_types.html anti_aliasing : bool, optional Whether to apply a Gaussian filter to smooth the image prior to down-scaling. It is crucial to filter when down-sampling the image to avoid aliasing artifacts. If input image data type is bool, no anti-aliasing is applied. anti_aliasing_sigma : {float, tuple of floats}, optional Standard deviation for Gaussian filtering to avoid aliasing artifacts. By default, this value is chosen as (s - 1) / 2 where s is the down-scaling factor, where s > 1. For the up-size case, s < 1, no anti-aliasing is performed prior to rescaling. Notes ----- Modes 'reflect' and 'symmetric' are similar, but differ in whether the edge pixels are duplicated during the reflection. As an example, if an array has values [0, 1, 2] and was padded to the right by four values using symmetric, the result would be [0, 1, 2, 2, 1, 0, 0], while for reflect it would be [0, 1, 2, 1, 0, 1, 2]. Examples -------- >>> from skimage import data >>> from skimage.transform import resize >>> image = data.camera() >>> resize(image, (100, 100)).shape (100, 100) """ output_shape = tuple(output_shape) output_ndim = len(output_shape) input_shape = image.shape if output_ndim > image.ndim: # append dimensions to input_shape input_shape = input_shape + (1,) * (output_ndim - image.ndim) image = cp.reshape(image, input_shape) elif output_ndim == image.ndim - 1: # multichannel case: append shape of last axis output_shape = output_shape + (image.shape[-1],) elif output_ndim < image.ndim - 1: raise ValueError( "len(output_shape) cannot be smaller than the image " "dimensions" ) if anti_aliasing is None: anti_aliasing = not image.dtype == bool if image.dtype == bool and anti_aliasing: warn( "Input image dtype is bool. Gaussian convolution is not defined " "with bool data type. Please set anti_aliasing to False or " "explicitely cast input image to another data type. Starting " "from version 0.19 a ValueError will be raised instead of this " "warning.", FutureWarning, stacklevel=2, ) factors = np.asarray(input_shape, dtype=float) / np.asarray( output_shape, dtype=float ) if anti_aliasing: if anti_aliasing_sigma is None: anti_aliasing_sigma = np.maximum(0, (factors - 1) / 2) else: anti_aliasing_sigma = np.atleast_1d( anti_aliasing_sigma ) * np.ones_like(factors) if np.any(anti_aliasing_sigma < 0): raise ValueError( "Anti-aliasing standard deviation must be " "greater than or equal to zero" ) elif np.any((anti_aliasing_sigma > 0) & (factors <= 1)): warn( "Anti-aliasing standard deviation greater than zero but " "not down-sampling along all axes" ) # Translate modes used by np.pad to those used by ndi.gaussian_filter np_pad_to_ndimage = { "constant": "constant", "edge": "nearest", "symmetric": "reflect", "reflect": "mirror", "wrap": "wrap", } try: ndi_mode = np_pad_to_ndimage[mode] except KeyError: raise ValueError( "Unknown mode, or cannot translate mode. The " "mode should be one of 'constant', 'edge', " "'symmetric', 'reflect', or 'wrap'. See the " "documentation of numpy.pad for more info." ) image = ndi.gaussian_filter( image, anti_aliasing_sigma, cval=cval, mode=ndi_mode ) # 2-dimensional interpolation if len(output_shape) == 2 or ( len(output_shape) == 3 and output_shape[2] == input_shape[2] ): rows = output_shape[0] cols = output_shape[1] input_rows = input_shape[0] input_cols = input_shape[1] if rows == 1 and cols == 1: tform = AffineTransform( translation=(input_cols / 2.0 - 0.5, input_rows / 2.0 - 0.5), xp=np, ) else: # 3 control points necessary to estimate exact AffineTransform src_corners = np.array([[1, 1], [1, rows], [cols, rows]]) - 1 dst_corners = np.zeros(src_corners.shape, dtype=np.double) # take into account that 0th pixel is at position (0.5, 0.5) dst_corners[:, 0] = factors[1] * (src_corners[:, 0] + 0.5) - 0.5 dst_corners[:, 1] = factors[0] * (src_corners[:, 1] + 0.5) - 0.5 tform = AffineTransform(xp=np) tform.estimate(src_corners, dst_corners) # Make sure the transform is exactly metric, to ensure fast warping. tform.params[2] = np.asarray((0, 0, 1)) tform.params[0, 1] = 0 tform.params[1, 0] = 0 # transfer the Affine to the GPU tform.params = cp.asarray(tform.params) out = warp( image, tform, output_shape=output_shape, order=order, mode=mode, cval=cval, clip=clip, preserve_range=preserve_range, ) else: # n-dimensional interpolation order = _validate_interpolation_order(image.dtype, order) coord_arrays = [ factors[i] * (cp.arange(d) + 0.5) - 0.5 for i, d in enumerate(output_shape) ] coord_map = cp.stack( cp.meshgrid(*coord_arrays, sparse=False, indexing="ij") ) image = convert_to_float(image, preserve_range) ndi_mode = _to_ndimage_mode(mode) out = ndi.map_coordinates( image, coord_map, order=order, mode=ndi_mode, cval=cval ) _clip_warp_output(image, out, order, mode, cval, clip) return out
def warp( volume, d1, affine_idx_in=None, affine_idx_out=None, affine_disp=None, out_shape=None, *, order=1, mode="constant", coord_axis=-1, ): """ Deforms the input volume under the given transformation. The warped volume is computed using is given by: (1) warped[i] = volume[ C * d1[A*i] + B*i ] where: A = affine_idx_in B = affine_idx_out C = affine_disp """ A = affine_idx_in B = affine_idx_out C = affine_disp if out_shape is None: out_shape = volume.shape if A is not None: A = cupy.asarray(A) if B is not None: B = cupy.asarray(B) if C is not None: C = cupy.asarray(C) # TODO: reduce number of temporary arrays coord_dtype = cupy.promote_types(volume.dtype, np.float32) ndim = volume.ndim if d1.shape[coord_axis] != ndim: raise ValueError("expected a displacement field with shape " "{} along axis {}".format(ndim, coord_axis)) if A is None: xcoords = cupy.meshgrid( *[cupy.arange(s, dtype=coord_dtype) for s in out_shape], indexing="ij", sparse=True, ) Z = cupy.ascontiguousarray(cupy.moveaxis(d1, -1, 0)) else: xcoords = cupy.meshgrid( *[cupy.arange(s, dtype=coord_dtype) for s in out_shape], indexing="ij", sparse=True, ) # Y = mul0(A, xcoords, sh, cupy, lastcol=1) Y = _apply_affine_to_field( xcoords, A[:ndim, :], out=None, include_translations=True, coord_axis=0, ) # for CuPy with non-legacy linear interpolation, don't need to extend d1 Z = cupy.empty_like(Y) if coord_axis == -1: for n in range(ndim): Z[n, ...] = ndi.map_coordinates(d1[..., n], Y, order=1, mode=mode) else: for n in range(ndim): Z[n, ...] = ndi.map_coordinates(d1[n], Y, order=1, mode=mode) if C is not None: # Z = mul0(C, Z, sh, cupy, out=Z, lastcol=0) Z = _apply_affine_to_field( Z, C[:ndim, :ndim], out=None, include_translations=False, coord_axis=0, ) if B is not None: # Z += mul0(B, xcoords, sh, cupy, lastcol=1) Z += _apply_affine_to_field( xcoords, B[:ndim, :], out=None, include_translations=True, coord_axis=0, ) else: if A is None: for n in range(ndim): Z[n, ...] += xcoords[n] else: Z += Y return ndi.map_coordinates(volume, Z, order=order, mode=mode)
def simplify_warp_function( d, affine_idx_in, affine_idx_out, affine_disp, out_shape, *, mode="constant", coord_axis=-1, ): """ Simplifies a nonlinear warping function combined with an affine transform Modifies the given deformation field by incorporating into it an affine transformation and voxel-to-space transforms associated with the discretization of its domain and codomain. The resulting transformation may be regarded as operating on the image spaces given by the domain and codomain discretization. More precisely, the resulting transform is of the form: (1) T[i] = W * d[U * i] + V * i Where U = affine_idx_in, V = affine_idx_out, W = affine_disp. Parameters ---------- d : array, shape (S', R', C', 3) the non-linear part of the transformation (displacement field) affine_idx_in : array, shape (4, 4) the matrix U in eq. (1) above affine_idx_out : array, shape (4, 4) the matrix V in eq. (1) above affine_disp : array, shape (4, 4) the matrix W in eq. (1) above out_shape : array, shape (3,) the number of slices, rows and columns of the sampling grid Returns ------- out : array, shape = out_shape the deformation field `out` associated with `T` in eq. (1) such that: T[i] = i + out[i] Notes ----- Both the direct and inverse transforms of a DiffeomorphicMap can be written in this form: Direct: Let D be the voxel-to-space transform of the domain's discretization, P be the pre-align matrix, Rinv the space-to-voxel transform of the reference grid (the grid the displacement field is defined on) and Cinv be the space-to-voxel transform of the codomain's discretization. Then, for each i in the domain's grid, the direct transform is given by (2) T[i] = Cinv * d[Rinv * P * D * i] + Cinv * P * D * i and we identify U = Rinv * P * D, V = Cinv * P * D, W = Cinv Inverse: Let C be the voxel-to-space transform of the codomain's discretization, Pinv be the inverse of the pre-align matrix, Rinv the space-to-voxel transform of the reference grid (the grid the displacement field is defined on) and Dinv be the space-to-voxel transform of the domain's discretization. Then, for each j in the codomain's grid, the inverse transform is given by (3) Tinv[j] = Dinv * Pinv * d[Rinv * C * j] + Dinv * Pinv * C * j and we identify U = Rinv * C, V = Dinv * Pinv * C, W = Dinv * Pinv """ if coord_axis not in [0, -1]: raise ValueError("coord_axis must be 0 or -1") ndim = d.shape[coord_axis] U = affine_idx_in V = affine_idx_out W = affine_disp # TODO: reduce number of temporary arrays coord_dtype = cupy.promote_types(d.dtype, np.float32) if U is None: xcoords = cupy.meshgrid( *[cupy.arange(s, dtype=coord_dtype) for s in d.shape[:-1]], indexing="ij", sparse=True, ) if coord_axis == 0: Z = d.copy() else: Z = cupy.ascontiguousarray(cupy.moveaxis(d, -1, 0)) else: xcoords = cupy.meshgrid( *[cupy.arange(s, dtype=coord_dtype) for s in d.shape[:-1]], indexing="ij", sparse=True, ) # Y = mul0(A, xcoords, sh, cupy, lastcol=1) Y = _apply_affine_to_field( xcoords, U[:ndim, :], out=None, include_translations=True, coord_axis=0, ) # for CuPy with non-legacy linear interpolation, don't need to extend d Z = cupy.empty_like(Y) if coord_axis == 0: for n in range(ndim): Z[n, ...] = ndi.map_coordinates(d[n], Y, order=1, mode=mode) else: for n in range(ndim): Z[n, ...] = ndi.map_coordinates(d[..., n], Y, order=1, mode=mode) if W is not None: # Z = mul0(C, Z, sh, cupy, out=Z, lastcol=0) Z = _apply_affine_to_field( Z, W[:ndim, :ndim], out=None, include_translations=False, coord_axis=0, ) if V is not None: Z += _apply_affine_to_field( xcoords, V[:ndim, :], out=None, include_translations=True, coord_axis=0, ) for n in range(ndim): Z[n, ...] -= xcoords[ n] # TODO: just subtract one from last column of V instead? if coord_axis == -1: Z = cupy.moveaxis(Z, 0, -1) if not Z.flags.c_contiguous: Z = cupy.ascontiguousarray(Z) return Z
def compose_vector_fields( d1, d2, premult_index, premult_disp, time_scaling, comp=None, order=1, *, coord_axis=-1, omit_stats=False, xcoords=None, Y=None, Z=None, ): if comp is None: comp = cupy.empty_like(d1, order="C") # need vector elements on first axis, not last if coord_axis != 0: d1 = cupy.ascontiguousarray(cupy.moveaxis(d1, -1, 0)) d2 = cupy.ascontiguousarray(cupy.moveaxis(d2, -1, 0)) else: if not d1.flags.c_contiguous: d1 = cupy.ascontiguousarray(d1) if not d2.flags.c_contiguous: d2 = cupy.ascontiguousarray(d2) ndim = d1.shape[0] B = premult_disp A = premult_index t = time_scaling if xcoords is None: xcoords = cupy.meshgrid( *[cupy.arange(s, dtype=d1.real.dtype) for s in d1.shape[1:]], indexing="ij", sparse=True, ) # TODO: reduce number of temporary arrays if ndim in [2, 3]: if Y is None: Y = cupy.empty_like(d1) if A is None: if B is None: if ndim == 3: composeNone_3d( d1[0], d1[1], d1[2], xcoords[0], xcoords[1], xcoords[2], Y[0], Y[1], Y[2], ) else: composeNone_2d(d1[0], d1[1], xcoords[0], xcoords[1], Y[0], Y[1]) else: B = cupy.asarray(B[:ndim, :ndim], dtype=d1.dtype, order="C") if ndim == 3: composeB_3d( d1[0], d1[1], d1[2], xcoords[0], xcoords[1], xcoords[2], B, Y[0], Y[1], Y[2], ) else: composeB_2d(d1[0], d1[1], xcoords[0], xcoords[1], B, Y[0], Y[1]) elif B is None: A = cupy.asarray(A[:ndim, :], dtype=d1.dtype, order="C") if ndim == 3: composeA_3d(xcoords[0], xcoords[1], xcoords[2], A, Y[0], Y[1], Y[2]) else: composeA_2d(xcoords[0], xcoords[1], A, Y[0], Y[1]) else: A = cupy.asarray(A[:ndim, :], dtype=d1.dtype, order="C") B = cupy.asarray(B[:ndim, :ndim], dtype=d1.dtype, order="C") if ndim == 3: composeAB_3d( d1[0], d1[1], d1[2], xcoords[0], xcoords[1], xcoords[2], B, A, Y[0], Y[1], Y[2], ) else: composeAB_2d(d1[0], d1[1], xcoords[0], xcoords[1], B, A, Y[0], Y[1]) else: if B is None: d1tmp = d1.copy() # have to copy to avoid modification of d1 else: d1tmp = _apply_affine_to_field(d1, B[:ndim, :ndim], include_translations=False, coord_axis=0) if A is None: Y = d1tmp for n in range(ndim): Y[n] += xcoords[n] else: # Y = mul0(A, xcoords, sh, cupy, lastcol=1) Y = _apply_affine_to_field(xcoords, A[:ndim, :], include_translations=True, coord_axis=0) Y += d1tmp from cupyimg.scipy.ndimage._kernels import interp # TODO: things outside the domain should be set to zero legacy_mode_pre = interp.const_legacy_mode try: interp.const_legacy_mode = True if Z is None: Z = cupy.empty_like(Y) for n in range(ndim): Z[n, ...] = ndi.map_coordinates(d2[n], Y, order=1, mode="constant") finally: interp.const_legacy_mode = legacy_mode_pre if coord_axis == 0: res = comp else: res = cupy.empty_like(Z) if omit_stats and ndim in [2, 3]: _shape = cupy.asarray([d1.shape[1 + n] - 1 for n in range(ndim)], dtype=cupy.int32) if ndim == 3: _comp_apply_masked_time_scaling_3d( d1[0], d1[1], d1[2], Y[0], Y[1], Y[2], Z[0], Z[1], Z[2], t, _shape, res[0], res[1], res[2], ) else: _comp_apply_masked_time_scaling_2d(d1[0], d1[1], Y[0], Y[1], Z[0], Z[1], t, _shape, res[0], res[1]) else: # TODO: declare count as boolean? count = cupy.zeros(Z.shape[1:], dtype=np.int32) # We now compute: # res = d1 + t * Z # except that res = 0 where either coordinate in # interpolating Y was outside the displacement extent for n in range(ndim): _comp_apply_masked_time_scaling_nd(d1[n], Y[n], Z[n], t, d1.shape[1 + n] - 1, res[n], count) # nnz corresponds to the number of points in comp inside the domain count = count > 0 # remove after init count as boolean if not omit_stats: nnz = res.size // ndim - cupy.count_nonzero(count) res *= ~count[np.newaxis, ...] if omit_stats: stats = None else: # compute the stats stats = cupy.empty((3, ), dtype=float) nn = res[0] * res[0] for n in range(1, ndim): nn += res[n] * res[n] # TODO: do we want stats to be a GPU array or CPU array? stats[0] = cupy.sqrt(nn.max()) mean_norm = nn.sum() / nnz stats[1] = cupy.sqrt(mean_norm) nn *= nn stats[2] = cupy.sqrt(nn.sum() / nnz - mean_norm * mean_norm) if coord_axis != 0: res = cupy.moveaxis(res, 0, -1) comp[...] = res return comp, stats