def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = deepcopy(dict(data)) for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Create inverse transform orig_size = np.array(transform[InverseKeys.ORIG_SIZE]) current_size = np.array(d[key].shape[1:]) # Unfortunately, we can't just use ResizeWithPadOrCrop with original size because of odd/even rounding. # Instead, we first pad any smaller dimensions, and then we crop any larger dimensions. # First, do pad if np.any((orig_size - current_size) > 0): pad_to_start = np.floor((orig_size - current_size) / 2).astype(int) # in each direction, if original size is even and current size is odd, += 1 pad_to_start[np.logical_and(orig_size % 2 == 0, current_size % 2 == 1)] += 1 pad_to_start[pad_to_start < 0] = 0 pad_to_end = orig_size - current_size - pad_to_start pad_to_end[pad_to_end < 0] = 0 pad = list(chain(*zip(pad_to_start.tolist(), pad_to_end.tolist()))) d[key] = BorderPad(pad)(d[key]) # Next crop if np.any((orig_size - current_size) < 0): if self.padcropper.padder.method == Method.SYMMETRIC: roi_center = [floor(i / 2) if r % 2 == 0 else (i - 1) // 2 for r, i in zip(orig_size, current_size)] else: roi_center = [floor(r / 2) if r % 2 == 0 else (r - 1) // 2 for r in orig_size] d[key] = SpatialCrop(roi_center, orig_size)(d[key]) # Remove the applied transform self.pop_transform(d, key) return d
def __call__( self, data: Mapping[Hashable, np.ndarray]) -> List[Dict[Hashable, np.ndarray]]: d = dict(data) label = d[self.label_key] image = d[self.image_key] if self.image_key else None fg_indices = d.get(self.fg_indices_key, None) if self.fg_indices_key is not None else None bg_indices = d.get(self.bg_indices_key, None) if self.bg_indices_key is not None else None self.randomize(label, fg_indices, bg_indices, image) if not isinstance(self.spatial_size, tuple): raise AssertionError if self.centers is None: raise AssertionError results: List[Dict[Hashable, np.ndarray]] = [{} for _ in range(self.num_samples)] for i, center in enumerate(self.centers): for key in self.key_iterator(d): img = d[key] cropper = SpatialCrop( roi_center=tuple(center), roi_size=self.spatial_size) # type: ignore results[i][key] = cropper(img) # fill in the extra keys with unmodified data for key in set(data.keys()).difference(set(self.keys)): results[i][key] = data[key] return results
def __call__( self, data: Mapping[Hashable, np.ndarray]) -> List[Dict[Hashable, np.ndarray]]: d = dict(data) self.randomize(d[self.w_key]) _spatial_size = fall_back_tuple(self.spatial_size, d[self.w_key].shape[1:]) results: List[Dict[Hashable, np.ndarray]] = [{} for _ in range(self.num_samples)] for key in self.key_iterator(d): img = d[key] if img.shape[1:] != d[self.w_key].shape[1:]: raise ValueError( f"data {key} and weight map {self.w_key} spatial shape mismatch: " f"{img.shape[1:]} vs {d[self.w_key].shape[1:]}.") for i, center in enumerate(self.centers): cropper = SpatialCrop(roi_center=center, roi_size=_spatial_size) results[i][key] = cropper(img) if self.center_coord_key: results[i][self.center_coord_key] = center # fill in the extra keys with unmodified data for key in set(data.keys()).difference(set(self.keys)): for i in range(self.num_samples): results[i][key] = data[key] return results
def __call__(self, data: Mapping[Hashable, Tensor]) -> Dict[Hashable, Tensor]: """ This transform can support to crop ND spatial (channel-first) data. It also supports pseudo ND spatial data (e.g., (C,H,W) is a pseudo-3D data point where C is the number of slices) Args: data: is a dictionary containing (key,value) pairs from the loaded dataset Returns: the new data dictionary """ d = dict(data) # compute roi_size according to self.ref_key roi_size = d[self.ref_key].shape[1:] # first dimension is not spatial (could be channel) # crop keys for key in self.key_iterator(d): image = d[key] roi_center = tuple(i // 2 for i in image.shape[1:]) cropper = SpatialCrop(roi_center=roi_center, roi_size=roi_size) d[key] = convert_to_tensor(cropper(d[key])) return d
def inverse( self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = deepcopy(dict(data)) for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Create inverse transform orig_size = transform[InverseKeys.ORIG_SIZE.value] if self.padder.method == Method.SYMMETRIC: current_size = d[key].shape[1:] roi_center = [ floor(i / 2) if r % 2 == 0 else (i - 1) // 2 for r, i in zip(orig_size, current_size) ] else: roi_center = [ floor(r / 2) if r % 2 == 0 else (r - 1) // 2 for r in orig_size ] inverse_transform = SpatialCrop(roi_center, orig_size) # Apply inverse transform d[key] = inverse_transform(d[key]) # Remove the applied transform self.pop_transform(d, key) return d
def inverse( self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = deepcopy(dict(data)) for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Create inverse transform orig_size = np.array(transform[InverseKeys.ORIG_SIZE.value]) roi_start = np.array(self.padder.spatial_border) # Need to convert single value to [min1,min2,...] if roi_start.size == 1: roi_start = np.full((len(orig_size)), roi_start) # need to convert [min1,max1,min2,...] to [min1,min2,...] elif roi_start.size == 2 * orig_size.size: roi_start = roi_start[::2] roi_end = np.array( transform[InverseKeys.ORIG_SIZE.value]) + roi_start inverse_transform = SpatialCrop(roi_start=roi_start, roi_end=roi_end) # Apply inverse transform d[key] = inverse_transform(d[key]) # Remove the applied transform self.pop_transform(d, key) return d
def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = deepcopy(dict(data)) for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Create inverse transform orig_size = np.asarray(transform[InverseKeys.ORIG_SIZE]) cur_size = np.asarray(d[key].shape[1:]) extra_info = transform[InverseKeys.EXTRA_INFO] box_start = np.asarray(extra_info["box_start"]) box_end = np.asarray(extra_info["box_end"]) # first crop the padding part roi_start = np.maximum(-box_start, 0) roi_end = cur_size - np.maximum(box_end - orig_size, 0) d[key] = SpatialCrop(roi_start=roi_start, roi_end=roi_end)(d[key]) # update bounding box to pad pad_to_start = np.maximum(box_start, 0) pad_to_end = orig_size - np.minimum(box_end, orig_size) # interleave mins and maxes pad = list(chain(*zip(pad_to_start.tolist(), pad_to_end.tolist()))) # second pad back the original size d[key] = BorderPad(pad)(d[key]) # Remove the applied transform self.pop_transform(d, key) return d
def __call__( self, data: Mapping[Hashable, np.ndarray]) -> List[Dict[Hashable, np.ndarray]]: d = dict(data) label = d[self.label_key] image = d[self.image_key] if self.image_key else None fg_indices = d.get(self.fg_indices_key, None) if self.fg_indices_key is not None else None bg_indices = d.get(self.bg_indices_key, None) if self.bg_indices_key is not None else None self.randomize(label, fg_indices, bg_indices, image) assert isinstance(self.spatial_size, tuple) assert self.centers is not None results: List[Dict[Hashable, np.ndarray]] = [ dict() for _ in range(self.num_samples) ] for key in data.keys(): if key in self.keys: img = d[key] for i, center in enumerate(self.centers): cropper = SpatialCrop(roi_center=tuple(center), roi_size=self.spatial_size) results[i][key] = cropper(img) else: for i in range(self.num_samples): results[i][key] = data[key] return results
def __call__(self, data): d = dict(data) box_start, box_end = \ generate_spatial_bounding_box(data[self.source_key], self.select_fn, self.channel_indexes, self.margin) cropper = SpatialCrop(roi_start=box_start, roi_end=box_end) for key in self.keys: d[key] = cropper(d[key]) return d
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) box_start, box_end = generate_spatial_bounding_box( d[self.source_key], self.select_fn, self.channel_indices, self.margin ) cropper = SpatialCrop(roi_start=box_start, roi_end=box_end) for key in self.keys: d[key] = cropper(d[key]) return d
def __init__(self, keys: KeysCollection, roi_center=None, roi_size=None, roi_start=None, roi_end=None): """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` roi_center (list or tuple): voxel coordinates for center of the crop ROI. roi_size (list or tuple): size of the crop ROI. roi_start (list or tuple): voxel coordinates for start of the crop ROI. roi_end (list or tuple): voxel coordinates for end of the crop ROI. """ super().__init__(keys) self.cropper = SpatialCrop(roi_center, roi_size, roi_start, roi_end)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) box_start, box_end = generate_spatial_bounding_box( d[self.source_key], self.select_fn, self.channel_indices, self.margin ) d[self.start_coord_key] = np.asarray(box_start) d[self.end_coord_key] = np.asarray(box_end) cropper = SpatialCrop(roi_start=box_start, roi_end=box_end) for key in self.key_iterator(d): self.push_transform(d, key, extra_info={"box_start": box_start, "box_end": box_end}) d[key] = cropper(d[key]) return d
def __call__(self, data): d = dict(data) label = d[self.label_key] image = d[self.image_key] if self.image_key else None self.randomize(label, image) results = [dict() for _ in range(self.num_samples)] for key in data.keys(): if key in self.keys: img = d[key] for i, center in enumerate(self.centers): cropper = SpatialCrop(roi_center=tuple(center), roi_size=self.spatial_size) results[i][key] = cropper(img) else: for i in range(self.num_samples): results[i][key] = data[key] return results
def inverse(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = deepcopy(dict(data)) for key in self.key_iterator(d): transform = self.get_most_recent_transform(d, key) # Create inverse transform orig_size = np.array(transform[InverseKeys.ORIG_SIZE]) current_size = np.array(d[key].shape[1:]) roi_start = np.floor((current_size - orig_size) / 2) roi_end = orig_size + roi_start inverse_transform = SpatialCrop(roi_start=roi_start, roi_end=roi_end) # Apply inverse transform d[key] = inverse_transform(d[key]) # Remove the applied transform self.pop_transform(d, key) return d
def __init__( self, keys: KeysCollection, roi_center: Optional[Sequence[int]] = None, roi_size: Optional[Sequence[int]] = None, roi_start: Optional[Sequence[int]] = None, roi_end: Optional[Sequence[int]] = None, ) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` roi_center: voxel coordinates for center of the crop ROI. roi_size: size of the crop ROI. roi_start: voxel coordinates for start of the crop ROI. roi_end: voxel coordinates for end of the crop ROI. """ super().__init__(keys) self.cropper = SpatialCrop(roi_center, roi_size, roi_start, roi_end)
def __init__( self, keys: KeysCollection, roi_center: Optional[Sequence[int]] = None, roi_size: Optional[Sequence[int]] = None, roi_start: Optional[Sequence[int]] = None, roi_end: Optional[Sequence[int]] = None, roi_slices: Optional[Sequence[slice]] = None, allow_missing_keys: bool = False, ) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` roi_center: voxel coordinates for center of the crop ROI. roi_size: size of the crop ROI. roi_start: voxel coordinates for start of the crop ROI. roi_end: voxel coordinates for end of the crop ROI. roi_slices: list of slices for each of the spatial dimensions. allow_missing_keys: don't raise exception if key is missing. """ super().__init__(keys, allow_missing_keys) self.cropper = SpatialCrop(roi_center, roi_size, roi_start, roi_end, roi_slices)
def get_mask_edges( seg_pred: Union[np.ndarray, torch.Tensor], seg_gt: Union[np.ndarray, torch.Tensor], label_idx: int = 1, crop: bool = True, ) -> Tuple[np.ndarray, np.ndarray]: """ Do binary erosion and use XOR for input to get the edges. This function is helpful to further calculate metrics such as Average Surface Distance and Hausdorff Distance. The input images can be binary or labelfield images. If labelfield images are supplied, they are converted to binary images using `label_idx`. `scipy`'s binary erosion is used to calculate the edges of the binary labelfield. In order to improve the computing efficiency, before getting the edges, the images can be cropped and only keep the foreground if not specifies ``crop = False``. We require that images are the same size, and assume that they occupy the same space (spacing, orientation, etc.). Args: seg_pred: the predicted binary or labelfield image. seg_gt: the actual binary or labelfield image. label_idx: for labelfield images, convert to binary with `seg_pred = seg_pred == label_idx`. crop: crop input images and only keep the foregrounds. In order to maintain two inputs' shapes, here the bounding box is achieved by ``(seg_pred | seg_gt)`` which represents the union set of two images. Defaults to ``True``. """ # Get both labelfields as np arrays if isinstance(seg_pred, torch.Tensor): seg_pred = seg_pred.detach().cpu().numpy() if isinstance(seg_gt, torch.Tensor): seg_gt = seg_gt.detach().cpu().numpy() if seg_pred.shape != seg_gt.shape: raise ValueError( f"seg_pred and seg_gt should have same shapes, got {seg_pred.shape} and {seg_gt.shape}." ) # If not binary images, convert them if seg_pred.dtype != bool: seg_pred = seg_pred == label_idx if seg_gt.dtype != bool: seg_gt = seg_gt == label_idx if crop: if not np.any(seg_pred | seg_gt): return np.zeros_like(seg_pred), np.zeros_like(seg_gt) seg_pred, seg_gt = np.expand_dims(seg_pred, 0), np.expand_dims(seg_gt, 0) box_start, box_end = generate_spatial_bounding_box( np.asarray(seg_pred | seg_gt)) cropper = SpatialCrop(roi_start=box_start, roi_end=box_end) seg_pred, seg_gt = np.squeeze(cropper(seg_pred)), np.squeeze( cropper(seg_gt)) # Do binary erosion and use XOR to get edges edges_pred = binary_erosion(seg_pred) ^ seg_pred edges_gt = binary_erosion(seg_gt) ^ seg_gt return edges_pred, edges_gt