def remove_outliers(self, data): """Remove outliers""" if (int(self.dezinger) > 0): r = int(self.dezinger) fdata = ndimage.median_filter(data, [1, r, r]) ids = cp.where(cp.abs(fdata - data) > 0.5 * cp.abs(fdata)) data[ids] = fdata[ids]
def getProj(self, obs, center_pixel, rz, z, ry, rx): patch = self.getPatch(obs, center_pixel, torch.zeros_like(rz)) patch = np.round(patch.cpu().numpy(), 5) patch = cp.array(patch) projections = [] size = self.patch_size zs = cp.array(z.numpy()) + cp.array( [(-size / 2 + j) * self.heightmap_resolution for j in range(size)]) zs = zs.reshape((zs.shape[0], 1, 1, zs.shape[1])) zs = zs.repeat(size, 1).repeat(size, 2) c = patch.reshape(patch.shape[0], self.patch_size, self.patch_size, 1).repeat(size, 3) ori_occupancy = c > zs # transform into points point_w_d = cp.argwhere(ori_occupancy) rz_id = (rz.expand(-1, self.num_rz) - self.rzs).abs().argmin(1) ry_id = (ry.expand(-1, self.num_ry) - self.rys).abs().argmin(1) rx_id = (rx.expand(-1, self.num_rx) - self.rxs).abs().argmin(1) dimension = point_w_d[:, 0] point = point_w_d[:, 1:4] rz_id = cp.array(rz_id) ry_id = cp.array(ry_id) rx_id = cp.array(rx_id) mapped_point = self.map[rz_id[dimension], ry_id[dimension], rx_id[dimension], point[:, 0], point[:, 1], point[:, 2]].T rotated_point = mapped_point.T[(cp.logical_and( 0 < mapped_point.T, mapped_point.T < size)).all(1)] d = dimension[(cp.logical_and( 0 < mapped_point.T, mapped_point.T < size)).all(1)].T.astype(int) for i in range(patch.shape[0]): point = rotated_point[d == i].T occupancy = cp.zeros((size, size, size)) if point.shape[0] > 0: occupancy[point[0], point[1], point[2]] = 1 occupancy = median_filter(occupancy, size=2) occupancy = cp.ceil(occupancy) projection = cp.stack( (occupancy.sum(0), occupancy.sum(1), occupancy.sum(2))) projections.append(projection) return torch.tensor(cp.stack(projections)).float().to(self.device)
def threshold_local(image, block_size, method='gaussian', offset=0, mode='reflect', param=None, cval=0): """Compute a threshold mask image based on local pixel neighborhood. Also known as adaptive or dynamic thresholding. The threshold value is the weighted mean for the local neighborhood of a pixel subtracted by a constant. Alternatively the threshold can be determined dynamically by a given function, using the 'generic' method. Parameters ---------- image : (N, M) ndarray Input image. block_size : int Odd size of pixel neighborhood which is used to calculate the threshold value (e.g. 3, 5, 7, ..., 21, ...). method : {'generic', 'gaussian', 'mean', 'median'}, optional Method used to determine adaptive threshold for local neighbourhood in weighted mean image. * 'generic': use custom function (see ``param`` parameter) * 'gaussian': apply gaussian filter (see ``param`` parameter for custom\ sigma value) * 'mean': apply arithmetic mean filter * 'median': apply median rank filter By default the 'gaussian' method is used. offset : float, optional Constant subtracted from weighted mean of neighborhood to calculate the local threshold value. Default offset is 0. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The mode parameter determines how the array borders are handled, where cval is the value when mode is equal to 'constant'. Default is 'reflect'. param : {int, function}, optional Either specify sigma for 'gaussian' method or function object for 'generic' method. This functions takes the flat array of local neighbourhood as a single argument and returns the calculated threshold for the centre pixel. cval : float, optional Value to fill past edges of input if mode is 'constant'. Returns ------- threshold : (N, M) ndarray Threshold image. All pixels in the input image higher than the corresponding pixel in the threshold image are considered foreground. References ---------- .. [1] https://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#adaptivethreshold Examples -------- >>> from skimage.data import camera >>> image = camera()[:50, :50] >>> binary_image1 = image > threshold_local(image, 15, 'mean') >>> func = lambda arr: arr.mean() >>> binary_image2 = image > threshold_local(image, 15, 'generic', ... param=func) """ # noqa if block_size % 2 == 0: raise ValueError("The kwarg ``block_size`` must be odd! Given " "``block_size`` {0} is even.".format(block_size)) check_nD(image, 2) thresh_image = cp.zeros(image.shape, _float_dtype(image)) if method == 'generic': raise NotImplementedError("TODO: implement generic_filter") ndi.generic_filter(image, param, block_size, output=thresh_image, mode=mode, cval=cval) elif method == 'gaussian': if param is None: # automatically determine sigma which covers > 99% of distribution sigma = (block_size - 1) / 6.0 else: sigma = param ndi.gaussian_filter(image, sigma, output=thresh_image, mode=mode, cval=cval) elif method == 'mean': mask = 1.0 / block_size * cp.ones((block_size, )) # separation of filters to speedup convolution ndi.convolve1d(image, mask, axis=0, output=thresh_image, mode=mode, cval=cval) ndi.convolve1d(thresh_image, mask, axis=1, output=thresh_image, mode=mode, cval=cval) elif method == 'median': ndi.median_filter(image, block_size, output=thresh_image, mode=mode, cval=cval) else: raise ValueError("Invalid method specified. Please use `generic`, " "`gaussian`, `mean`, or `median`.") return thresh_image - offset
def _ilk(reference_image, moving_image, flow0, radius, num_warp, gaussian, prefilter): """Iterative Lucas-Kanade (iLK) solver for optical flow estimation. Parameters ---------- reference_image : ndarray, shape (M, N[, P[, ...]]) The first gray scale image of the sequence. moving_image : ndarray, shape (M, N[, P[, ...]]) The second gray scale image of the sequence. flow0 : ndarray, shape (reference_image.ndim, M, N[, P[, ...]]) Initialization for the vector field. radius : int Radius of the window considered around each pixel. num_warp : int Number of times moving_image is warped. gaussian : bool if True, a gaussian kernel is used for the local integration. Otherwise, a uniform kernel is used. prefilter : bool Whether to prefilter the estimated optical flow before each image warp. This helps to remove potential outliers. Returns ------- flow : ndarray, shape ((reference_image.ndim, M, N[, P[, ...]]) The estimated optical flow components for each axis. """ dtype = reference_image.dtype ndim = reference_image.ndim size = 2 * radius + 1 if gaussian: sigma = ndim * (size / 4, ) filter_func = partial(ndi.gaussian_filter, sigma=sigma, mode='mirror') else: filter_func = partial(ndi.uniform_filter, size=ndim * (size, ), mode='mirror') flow = flow0 # For each pixel location (i, j), the optical flow X = flow[:, i, j] # is the solution of the ndim x ndim linear system # A[i, j] * X = b[i, j] A = cp.zeros(reference_image.shape + (ndim, ndim), dtype=dtype) b = cp.zeros(reference_image.shape + (ndim, ), dtype=dtype) grid = cp.meshgrid( *[cp.arange(n, dtype=dtype) for n in reference_image.shape], indexing='ij', sparse=True) for _ in range(num_warp): if prefilter: flow = ndi.median_filter(flow, (1, ) + ndim * (3, )) moving_image_warp = warp(moving_image, get_warp_points(grid, flow), mode='nearest') grad = cp.stack(cp.gradient(moving_image_warp), axis=0) error_image = ((grad * flow).sum(axis=0) + reference_image - moving_image_warp) # Local linear systems creation for i, j in combinations_with_replacement(range(ndim), 2): A[..., i, j] = A[..., j, i] = filter_func(grad[i] * grad[j]) for i in range(ndim): b[..., i] = filter_func(grad[i] * error_image) # Don't consider badly conditioned linear systems idx = abs(cp.linalg.det(A)) < 1e-14 A[idx] = cp.eye(ndim, dtype=dtype) b[idx] = 0 # Solve the local linear systems flow = cp.moveaxis(cp.linalg.solve(A, b), ndim, 0) return flow
def _tvl1(reference_image, moving_image, flow0, attachment, tightness, num_warp, num_iter, tol, prefilter): """TV-L1 solver for optical flow estimation. Parameters ---------- reference_image : ndarray, shape (M, N[, P[, ...]]) The first gray scale image of the sequence. moving_image : ndarray, shape (M, N[, P[, ...]]) The second gray scale image of the sequence. flow0 : ndarray, shape (image0.ndim, M, N[, P[, ...]]) Initialization for the vector field. attachment : float Attachment parameter. The smaller this parameter is, the smoother is the solutions. tightness : float Tightness parameter. It should have a small value in order to maintain attachement and regularization parts in correspondence. num_warp : int Number of times image1 is warped. num_iter : int Number of fixed point iteration. tol : float Tolerance used as stopping criterion based on the L² distance between two consecutive values of (u, v). prefilter : bool Whether to prefilter the estimated optical flow before each image warp. Returns ------- flow : ndarray, shape ((image0.ndim, M, N[, P[, ...]]) The estimated optical flow components for each axis. """ dtype = reference_image.dtype grid = cp.meshgrid( *[cp.arange(n, dtype=dtype) for n in reference_image.shape], indexing='ij', sparse=True) dt = 0.5 / reference_image.ndim reg_num_iter = 2 f0 = attachment * tightness f1 = dt / tightness tol *= reference_image.size flow_current = flow_previous = flow0 g = cp.zeros((reference_image.ndim, ) + reference_image.shape, dtype=dtype) proj = cp.zeros(( reference_image.ndim, reference_image.ndim, ) + reference_image.shape, dtype=dtype) s_g = [slice(None)] * g.ndim s_p = [slice(None)] * proj.ndim s_d = [slice(None)] * (proj.ndim - 2) for _ in range(num_warp): if prefilter: flow_current = ndi.median_filter(flow_current, [1] + reference_image.ndim * [3]) image1_warp = warp(moving_image, get_warp_points(grid, flow_current), mode='nearest') grad = cp.stack(cp.gradient(image1_warp)) NI = (grad * grad).sum(0) NI[NI == 0] = 1 rho_0 = image1_warp - reference_image - (grad * flow_current).sum(0) for _ in range(num_iter): # Data term rho = rho_0 + (grad * flow_current).sum(0) idx = abs(rho) <= f0 * NI flow_auxiliary = flow_current flow_auxiliary[:, idx] -= rho[idx] * grad[:, idx] / NI[idx] idx = ~idx srho = f0 * cp.sign(rho[idx]) flow_auxiliary[:, idx] -= srho * grad[:, idx] # Regularization term flow_current = flow_auxiliary.copy() for idx in range(reference_image.ndim): s_p[0] = idx for _ in range(reg_num_iter): for ax in range(reference_image.ndim): s_g[0] = ax s_g[ax + 1] = slice(0, -1) g[tuple(s_g)] = cp.diff(flow_current[idx], axis=ax) s_g[ax + 1] = slice(None) norm = cp.sqrt((g * g).sum(0, keepdims=True)) norm *= f1 norm += 1.0 proj[idx] -= dt * g proj[idx] /= norm # d will be the (negative) divergence of proj[idx] d = -proj[idx].sum(0) for ax in range(reference_image.ndim): s_p[1] = ax s_p[ax + 2] = slice(0, -1) s_d[ax] = slice(1, None) d[tuple(s_d)] += proj[tuple(s_p)] s_p[ax + 2] = slice(None) s_d[ax] = slice(None) flow_current[idx] = flow_auxiliary[idx] + d flow_previous -= flow_current # The difference as stopping criteria if (flow_previous * flow_previous).sum() < tol: break flow_previous = flow_current return flow_current
def median(image, selem=None, out=None, mode='nearest', cval=0.0, behavior='ndimage'): """Return local median of an image. Parameters ---------- image : array-like Input image. selem : ndarray, optional If ``behavior=='rank'``, ``selem`` is a 2-D array of 1's and 0's. If ``behavior=='ndimage'``, ``selem`` is a N-D array of 1's and 0's with the same number of dimension than ``image``. If None, ``selem`` will be a N-D array with 3 elements for each dimension (e.g., vector, square, cube, etc.) out : ndarray, (same dtype as image), optional If None, a new array is allocated. mode : {'reflect', 'constant', 'nearest', 'mirror','‘wrap'}, optional The mode parameter determines how the array borders are handled, where ``cval`` is the value when mode is equal to 'constant'. Default is 'nearest'. .. versionadded:: 0.15 ``mode`` is used when ``behavior='ndimage'``. cval : scalar, optional Value to fill past edges of input if mode is 'constant'. Default is 0.0 .. versionadded:: 0.15 ``cval`` was added in 0.15 is used when ``behavior='ndimage'``. behavior : {'ndimage', 'rank'}, optional Either to use the old behavior (i.e., < 0.15) or the new behavior. The old behavior will call the :func:`skimage.filters.rank.median`. The new behavior will call the :func:`scipy.ndimage.median_filter`. Default is 'ndimage'. .. versionadded:: 0.15 ``behavior`` is introduced in 0.15 .. versionchanged:: 0.16 Default ``behavior`` has been changed from 'rank' to 'ndimage' Returns ------- out : 2-D array (same dtype as input image) Output image. See also -------- skimage.filters.rank.median : Rank-based implementation of the median filtering offering more flexibility with additional parameters but dedicated for unsigned integer images. Examples -------- >>> import cupy as cp >>> from skimage import data >>> from cucim.skimage.morphology import disk >>> from cucim.skimage.filters import median >>> img = cp.array(data.camera()) >>> med = median(img, disk(5)) """ if behavior == 'rank': if mode != 'nearest' or not np.isclose(cval, 0.0): warn("Change 'behavior' to 'ndimage' if you want to use the " "parameters 'mode' or 'cval'. They will be discarded " "otherwise.") raise NotImplementedError("rank behavior not currently implemented") # TODO: implement median rank filter # return generic.median(image, selem=selem, out=out) if selem is None: selem = ndi.generate_binary_structure(image.ndim, image.ndim) return ndi.median_filter(image, footprint=selem, output=out, mode=mode, cval=cval)