def __call__(self, data): d = dict(data) spatial_size = self.rand_3d_elastic.spatial_size self.randomize(spatial_size) grid = create_grid(spatial_size) if self.rand_3d_elastic.do_transform: device = self.rand_3d_elastic.device grid = torch.tensor(grid).to(device) gaussian = GaussianFilter(spatial_dims=3, sigma=self.rand_3d_elastic.sigma, truncated=3., device=device) grid[:3] += gaussian(self.rand_3d_elastic.rand_offset[None] )[0] * self.rand_3d_elastic.magnitude grid = self.rand_3d_elastic.rand_affine_grid(grid=grid) if isinstance(self.mode, (tuple, list)): for key, m in zip(self.keys, self.mode): d[key] = self.rand_3d_elastic.resampler(d[key], grid, mode=m) return d for key in self.keys: # same interpolation mode d[key] = self.rand_3d_elastic.resampler( d[key], grid, mode=self.rand_3d_elastic.mode) return d
def compute_importance_map(patch_size, mode="constant", sigma_scale=0.125, device=None): """Get importance map for different weight modes. Args: patch_size (tuple): Size of the required importance map. This should be either H, W [,D]. mode (str): Importance map type. Options are 'constant' (Each weight has value 1.0) or 'gaussian' (Importance becomes lower away from center). sigma_scale (float): Sigma_scale to calculate sigma for each dimension (sigma = sigma_scale * dim_size). Used for gaussian mode only. device (str of pytorch device): Device to put importance map on. Returns: Tensor of size patch_size. """ importance_map = None if mode == "constant": importance_map = torch.ones(patch_size, device=device).float() elif mode == "gaussian": center_coords = [i // 2 for i in patch_size] sigmas = [i * sigma_scale for i in patch_size] importance_map = torch.zeros(patch_size, device=device) importance_map[tuple(center_coords)] = 1 pt_gaussian = GaussianFilter(len(patch_size), sigmas).to(device=device, dtype=torch.float) importance_map = pt_gaussian(importance_map.unsqueeze(0).unsqueeze(0)) importance_map = importance_map.squeeze(0).squeeze(0) importance_map = importance_map / torch.max(importance_map) importance_map = importance_map.float() # importance_map cannot be 0, otherwise we may end up with nans! importance_map[importance_map == 0] = torch.min(importance_map[importance_map != 0]) else: raise ValueError('mode must be "constant" or "gaussian".') return importance_map
def __call__(self, data): d = dict(data) spatial_size = self.rand_3d_elastic.spatial_size if np.any([sz <= 1 for sz in spatial_size]): spatial_size = data[self.keys[0]].shape[1:] self.randomize(spatial_size) grid = create_grid(spatial_size) if self.rand_3d_elastic.do_transform: device = self.rand_3d_elastic.device grid = torch.tensor(grid).to(device) gaussian = GaussianFilter(spatial_dims=3, sigma=self.rand_3d_elastic.sigma, truncated=3.0).to(device) offset = torch.tensor(self.rand_3d_elastic.rand_offset[None], device=device) grid[:3] += gaussian(offset)[0] * self.rand_3d_elastic.magnitude grid = self.rand_3d_elastic.rand_affine_grid(grid=grid) for idx, key in enumerate(self.keys): d[key] = self.rand_3d_elastic.resampler( d[key], grid, padding_mode=self.padding_mode[idx], mode=self.mode[idx]) return d
def __call__( self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]] ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]: d = dict(data) sp_size = fall_back_tuple(self.rand_3d_elastic.spatial_size, data[self.keys[0]].shape[1:]) self.randomize(grid_size=sp_size) grid = create_grid(spatial_size=sp_size) if self.rand_3d_elastic.do_transform: device = self.rand_3d_elastic.device grid = torch.tensor(grid).to(device) gaussian = GaussianFilter(spatial_dims=3, sigma=self.rand_3d_elastic.sigma, truncated=3.0).to(device) offset = torch.tensor(self.rand_3d_elastic.rand_offset, device=device).unsqueeze(0) grid[:3] += gaussian(offset)[0] * self.rand_3d_elastic.magnitude grid = self.rand_3d_elastic.rand_affine_grid(grid=grid) for idx, key in enumerate(self.keys): d[key] = self.rand_3d_elastic.resampler( d[key], grid, mode=self.mode[idx], padding_mode=self.padding_mode[idx]) return d
def test_2d(self): a = torch.ones(1, 1, 3, 3) g = GaussianFilter(2, 3, 3, torch.device('cpu:0')) expected = np.array([[[[0.13380532, 0.14087981, 0.13380532], [0.14087981, 0.14832835, 0.14087981], [0.13380532, 0.14087981, 0.13380532]]]]) np.testing.assert_allclose(g(a).cpu().numpy(), expected)
def compute_importance_map( patch_size: Tuple[int, ...], mode: Union[BlendMode, str] = BlendMode.CONSTANT, sigma_scale: Union[Sequence[float], float] = 0.125, device: Union[torch.device, int, str] = "cpu", ) -> torch.Tensor: """Get importance map for different weight modes. Args: patch_size: Size of the required importance map. This should be either H, W [,D]. mode: {``"constant"``, ``"gaussian"``} How to blend output of overlapping windows. Defaults to ``"constant"``. - ``"constant``": gives equal weight to all predictions. - ``"gaussian``": gives less weight to predictions on edges of windows. sigma_scale: Sigma_scale to calculate sigma for each dimension (sigma = sigma_scale * dim_size). Used for gaussian mode only. device: Device to put importance map on. Raises: ValueError: When ``mode`` is not one of ["constant", "gaussian"]. Returns: Tensor of size patch_size. """ mode = BlendMode(mode) device = torch.device(device) # type: ignore[arg-type] if mode == BlendMode.CONSTANT: importance_map = torch.ones(patch_size, device=device).float() elif mode == BlendMode.GAUSSIAN: center_coords = [i // 2 for i in patch_size] sigma_scale = ensure_tuple_rep(sigma_scale, len(patch_size)) sigmas = [i * sigma_s for i, sigma_s in zip(patch_size, sigma_scale)] importance_map = torch.zeros(patch_size) #, device=device) importance_map[tuple(center_coords)] = 1 importance_map = importance_map.to(device) pt_gaussian = GaussianFilter(len(patch_size), sigmas).to(device=device, dtype=torch.float) importance_map = pt_gaussian(importance_map.unsqueeze(0).unsqueeze(0)) importance_map = importance_map.squeeze(0).squeeze(0) importance_map = importance_map / torch.max(importance_map) importance_map = importance_map.float() # importance_map cannot be 0, otherwise we may end up with nans! min_non_zero = importance_map[importance_map != 0].min().item() importance_map = torch.clamp(importance_map, min=min_non_zero) else: raise ValueError( f"Unsupported mode: {mode}, available options are [{BlendMode.CONSTANT}, {BlendMode.CONSTANT}]." ) return importance_map
def test_1d(self): a = torch.ones(1, 8, 10) g = GaussianFilter(1, 3, 3, torch.device('cpu:0')) expected = np.array([[ [ 0.56658804, 0.69108766, 0.79392236, 0.86594427, 0.90267116, 0.9026711, 0.8659443, 0.7939224, 0.6910876, 0.56658804 ], ]]) expected = np.tile(expected, (1, 8, 1)) np.testing.assert_allclose(g(a).cpu().numpy(), expected)
def compute_importance_map( patch_size: Tuple[int, ...], mode: Union[BlendMode, str] = BlendMode.CONSTANT, sigma_scale: float = 0.125, device: Optional[torch.device] = None, ): """Get importance map for different weight modes. Args: patch_size: Size of the required importance map. This should be either H, W [,D]. mode: {``"constant"``, ``"gaussian"``} How to blend output of overlapping windows. Defaults to ``"constant"``. - ``"constant``": gives equal weight to all predictions. - ``"gaussian``": gives less weight to predictions on edges of windows. sigma_scale: Sigma_scale to calculate sigma for each dimension (sigma = sigma_scale * dim_size). Used for gaussian mode only. device: Device to put importance map on. Raises: ValueError: When ``mode`` is not one of ["constant", "gaussian"]. Returns: Tensor of size patch_size. """ mode = BlendMode(mode) if mode == BlendMode.CONSTANT: importance_map = torch.ones(patch_size, device=device).float() elif mode == BlendMode.GAUSSIAN: center_coords = [i // 2 for i in patch_size] sigmas = [i * sigma_scale for i in patch_size] importance_map = torch.zeros(patch_size, device=device) importance_map[tuple(center_coords)] = 1 pt_gaussian = GaussianFilter(len(patch_size), sigmas).to(device=device, dtype=torch.float) importance_map = pt_gaussian(importance_map.unsqueeze(0).unsqueeze(0)) importance_map = importance_map.squeeze(0).squeeze(0) importance_map = importance_map / torch.max(importance_map) importance_map = importance_map.float() # importance_map cannot be 0, otherwise we may end up with nans! importance_map[importance_map == 0] = torch.min( importance_map[importance_map != 0]) else: raise ValueError( f'Unsupported mode: {mode}, available options are ["constant", "gaussian"].' ) return importance_map
def test_3d(self): a = torch.ones(1, 1, 4, 3, 4) g = GaussianFilter(3, 3, 3, torch.device('cpu:0')) expected = np.array( [[[[[0.07294822, 0.08033235, 0.08033235, 0.07294822], [0.07680509, 0.08457965, 0.08457965, 0.07680509], [0.07294822, 0.08033235, 0.08033235, 0.07294822]], [[0.08033235, 0.08846395, 0.08846395, 0.08033235], [0.08457965, 0.09314119, 0.09314119, 0.08457966], [0.08033235, 0.08846396, 0.08846396, 0.08033236]], [[0.08033235, 0.08846395, 0.08846395, 0.08033235], [0.08457965, 0.09314119, 0.09314119, 0.08457966], [0.08033235, 0.08846396, 0.08846396, 0.08033236]], [[0.07294822, 0.08033235, 0.08033235, 0.07294822], [0.07680509, 0.08457965, 0.08457965, 0.07680509], [0.07294822, 0.08033235, 0.08033235, 0.07294822]]]]], ) np.testing.assert_allclose(g(a).cpu().numpy(), expected)
def __call__(self, img, spatial_size=None, mode=None): """ Args: img (ndarray or tensor): shape must be (num_channels, H, W, D), spatial_size (3 ints): specifying spatial 3D output image spatial size [h, w, d]. mode ('nearest'|'bilinear'): interpolation order. Defaults to 'self.mode'. """ spatial_size = spatial_size or self.spatial_size mode = mode or self.mode self.randomize(spatial_size) grid = create_grid(spatial_size) if self.do_transform: grid = torch.as_tensor(np.ascontiguousarray(grid), device=self.device) gaussian = GaussianFilter(3, self.sigma, 3., device=self.device) grid[:3] += gaussian(self.rand_offset[None])[0] * self.magnitude grid = self.rand_affine_grid(grid=grid) return self.resampler(img, grid, mode)