def __init__(self, keys: KeysCollection, channel_dim: int = -1, allow_missing_keys: bool = False) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` channel_dim: which dimension of input image is the channel, default is the last dimension. allow_missing_keys: don't raise exception if key is missing. """ super().__init__(keys, allow_missing_keys) self.converter = AsChannelFirst(channel_dim=channel_dim)
def get_patch(self, wsi, location: Tuple[int, int], size: Tuple[int, int], level: int, dtype: DtypeLike, mode: str) -> np.ndarray: """ Extracts and returns a patch image form the whole slide image. Args: wsi: a whole slide image object loaded from a file or a lis of such objects location: (top, left) tuple giving the top left pixel in the level 0 reference frame. Defaults to (0, 0). size: (height, width) tuple giving the patch size at the given level (`level`). If None, it is set to the full image size at the given level. level: the level number. Defaults to 0 dtype: the data type of output image mode: the output image mode, 'RGB' or 'RGBA' """ # Extract a patch or the entire image # (reverse the order of location and size to become WxH for OpenSlide) pil_patch = wsi.read_region(location=location[::-1], size=size[::-1], level=level) # convert to RGB/RGBA pil_patch = pil_patch.convert(mode) # Convert to numpy patch = np.asarray(pil_patch, dtype=dtype) # Make it channel first patch = AsChannelFirst()(patch) # type: ignore # Check if the color channel is 3 (RGB) or 4 (RGBA) if mode == "RGBA" and patch.shape[0] != 4: raise ValueError( f"The image is expected to have four color channels in '{mode}' mode but has {patch.shape[0]}." ) elif mode in "RGB" and patch.shape[0] != 3: raise ValueError( f"The image is expected to have three color channels in '{mode}' mode but has {patch.shape[0]}. " ) return patch