def __getitem__(self, idx):
        """
        Extract the CT sepcified by idx.
        ----------
        INPUT
            |---- idx (int) the sample index in self.data_df.
        OUTPUT
            |---- im (torch.tensor) the CT image with dimension (1 x H x W).
            |---- mask (torch.tensor) the inpaining mask with dimension (1 x H x W).
        """
        # load dicom and recover the CT pixel values
        dcm_im = pydicom.dcmread(self.data_path +
                                 self.data_df.iloc[idx].filename)
        im = (dcm_im.pixel_array * float(dcm_im.RescaleSlope) +
              float(dcm_im.RescaleIntercept))
        # Window the CT-scan
        if self.window:
            im = window_ct(im,
                           win_center=self.window[0],
                           win_width=self.window[1],
                           out_range=(0, 1))
        # transform image
        im = self.transform(im)
        label = self.data_df.iloc[idx].Hemorrhage

        if self.artificial_anomaly and (np.random.rand() <
                                        self.anomaly_proba) and (label == 0):
            # get a mask
            anomalies = tf.ToTorchTensor()(self.draw_ellipses(
                (im.shape[1], im.shape[2]), **self.drawing_params))
            im = torch.where(anomalies > 0, anomalies, im)
            label = 1

        return im, torch.tensor(label), torch.tensor(idx)
Example #2
0
    def __init__(self,
                 img_fn,
                 mask_fn,
                 augmentation_transform=[
                     tf.Translate(low=-0.1, high=0.1),
                     tf.Rotate(low=-10, high=10),
                     tf.Scale(low=0.9, high=1.1),
                     tf.HFlip(p=0.5)
                 ],
                 output_size=256):
        """
        Build a dataset for loading image and mask.
        ----------
        INPUT
            |---- img_fn (list of str) list of image file name (for format support see skimage.io.imread).
            |---- mask_fn (list of str) list of binary mask file image.
            |---- augmentation_transform (list of transofrom) data augmentation transformation to apply.
            |---- output_size (int) the dimension of the output (H = W).
        OUTPUT
            |---- RSNA_Inpaint_dataset (torch.Dataset) the RSNA dataset for inpainting.
        """
        super(ImgMaskDataset, self).__init__()
        self.mask_fn = mask_fn
        self.img_fn = img_fn
        assert len(self.mask_fn) == len(
            self.img_fn), f"The number of masks and image must be similar. \
                                                        Given {len(self.mask_fn)} masks and {len(self.img_fn)} images."

        self.transform = tf.Compose(tf.Resize(H=output_size, W=output_size),
                                    *augmentation_transform,
                                    tf.ToTorchTensor())
    def __init__(self,
                 data_df,
                 data_path,
                 augmentation_transform=[
                     tf.Translate(low=-0.1, high=0.1),
                     tf.Rotate(low=-10, high=10),
                     tf.Scale(low=0.9, high=1.1),
                     tf.HFlip(p=0.5)
                 ],
                 window=None,
                 output_size=256):
        """
        Build a dataset for loading image and mask.
        ----------
        INPUT
            |---- data_df (pd.DataFrame) the input dataframe of samples. Each row must contains a columns 'im_fn' with
            |               image filepath and a column 'mask_fn' with mask filepath.
            |---- data_path (str) path to the root of the dataset folder (until where the samples' filnames begins).
            |---- augmentation_transform (list of transofrom) data augmentation transformation to apply.
            |---- window (tuple (center, width)) the window for image intensity rescaling. If None, no windowing is performed.
            |---- output_size (int) the dimension of the output (H = W).
        OUTPUT
            |---- RSNA_Inpaint_dataset (torch.Dataset) the RSNA dataset for inpainting.
        """
        super(ImgMaskDataset, self).__init__()
        self.data_df = data_df
        self.data_path = data_path
        self.window = window

        self.transform = tf.Compose(*augmentation_transform,
                                    tf.Resize(H=output_size, W=output_size),
                                    tf.ToTorchTensor())
    def __getitem__(self, idx):
        """
        Extract the CT sepcified by idx.
        ----------
        INPUT
            |---- idx (int) the sample index in self.data_df.
        OUTPUT
            |---- im (torch.tensor) the CT image with dimension (1 x H x W).
            |---- mask (torch.tensor) the inpaining mask with dimension (1 x H x W).
        """
        # load dicom and recover the CT pixel values
        dcm_im = pydicom.dcmread(self.data_path +
                                 self.data_df.iloc[idx].filename)
        im = (dcm_im.pixel_array * float(dcm_im.RescaleSlope) +
              float(dcm_im.RescaleIntercept))
        # Window the CT-scan
        if self.window:
            im = window_ct(im,
                           win_center=self.window[0],
                           win_width=self.window[1],
                           out_range=(0, 1))
        # transform image
        im = self.transform(im)
        # get a mask
        mask = self.random_ff_mask((im.shape[1], im.shape[2]))

        return im, tf.ToTorchTensor()(mask)
    def __init__(self,
                 data_df,
                 data_path,
                 augmentation_transform=[
                     tf.Translate(low=-0.1, high=0.1),
                     tf.Rotate(low=-10, high=10),
                     tf.Scale(low=0.9, high=1.1),
                     tf.HFlip(p=0.5)
                 ],
                 window=None,
                 output_size=256):
        """
        Build a dataset for the 2D annotated segmentation of brain.
        ----------
        INPUT
            |---- data_df (pd.DataFrame) the input dataframe of samples. Each row must contains a volume number, a slice
            |           number, an image filename and a mask filename.
            |---- data_path (str) path to the root of the dataset folder (until where the samples' filnames begins).
            |---- augmentation_transform (list of transofrom) data augmentation transformation to apply.
            |---- window (tuple (center, width)) the window for CT intensity rescaling. If None, no windowing is performed.
            |---- output_size (int) the dimension of the output (H = W).
        OUTPUT
            |---- brain_Dataset2D (torch.Dataset) the 2D dataset.
        """
        super(brain_extract_Dataset2D, self).__init__()
        self.data_df = data_df
        self.data_path = data_path
        self.window = window

        self.transform = tf.Compose(*augmentation_transform,
                                    tf.Resize(H=output_size, W=output_size),
                                    tf.ToTorchTensor())
    def __init__(self,
                 data_df,
                 data_path,
                 augmentation_transform=[
                     tf.Translate(low=-0.1, high=0.1),
                     tf.Rotate(low=-10, high=10),
                     tf.Scale(low=0.9, high=1.1),
                     tf.HFlip(p=0.5)
                 ],
                 window=None,
                 output_size=256,
                 n_draw=(1, 4),
                 vertex=(5, 15),
                 brush_width=(10, 30),
                 angle=(0.0, 6.28),
                 length=(10, 30),
                 n_salt_pepper=(0, 10),
                 salt_peper_radius=(1, 3)):
        """
        Build a dataset for the RSNA dataset CT slice.
        ----------
        INPUT
            |---- data_df (pd.DataFrame) the input dataframe of samples. Each row must contains a filename and a columns
            |           Hemorrhage specifying if slice has or not an hemorrhage.
            |---- data_path (str) path to the root of the dataset folder (until where the samples' filnames begins).
            |---- augmentation_transform (list of transofrom) data augmentation transformation to apply.
            |---- window (tuple (center, width)) the window for CT intensity rescaling. If None, no windowing is performed.
            |---- output_size (int) the dimension of the output (H = W).
            |---- n_draw (tuple (low, high)) range of number of inpaint element to draw.
            |---- vertex (tuple (low, high)) range of number of vertex for each inpaint element.
            |---- brush_width (tuple (low, high)) range of brush size to draw each inpaint element.
            |---- angle (tuple (low, high)) the range of angle between each vertex of an inpaint element. Note that every
            |               two segment, Pi is added to the angle to keep the drawing in the vicinity. Angle in radian.
            |---- length (tuple (low, high)) range of length for each segment.
            |---- n_salt_pepper (tuple (low, high)) range of number of salt and pepper disk element to draw. Set to (0,1)
            |               for no salt and pepper elements.
            |---- salt_peper_radius (tuple (low, high)) range of radius for the salt and pepper disk element.
        OUTPUT
            |---- RSNA_Inpaint_dataset (torch.Dataset) the RSNA dataset for inpainting.
        """
        super(RSNA_Inpaint_dataset, self).__init__()
        self.data_df = data_df
        self.data_path = data_path
        self.window = window

        self.transform = tf.Compose(*augmentation_transform,
                                    tf.Resize(H=output_size, W=output_size),
                                    tf.ToTorchTensor())
        self.n_draw = n_draw
        self.vertex = vertex
        self.brush_width = brush_width
        self.angle = angle
        self.length = length
        self.n_salt_pepper = n_salt_pepper
        self.salt_peper_radius = salt_peper_radius
    def __init__(self,
                 data_df,
                 data_path,
                 artificial_anomaly=True,
                 anomaly_proba=0.5,
                 augmentation_transform=[
                     tf.Translate(low=-0.1, high=0.1),
                     tf.Rotate(low=-10, high=10),
                     tf.Scale(low=0.9, high=1.1),
                     tf.HFlip(p=0.5)
                 ],
                 window=None,
                 output_size=256,
                 drawing_params=dict(n_ellipse=(1, 10),
                                     major_axis=(1, 25),
                                     minor_axis=(1, 25),
                                     rotation=(0, 2 * np.pi),
                                     intensity=(0.1, 1),
                                     noise=None)):
        """
        Build a dataset for the RSNA dataset for FCDD training.
        ----------
        INPUT
            |---- data_df (pd.DataFrame) the input dataframe of samples. Each row must contains a filename and a columns
            |           Hemorrhage specifying if slice has or not an hemorrhage.
            |---- data_path (str) path to the root of the dataset folder (until where the samples' filnames begins).
            |---- artificial_anomaly (bool) whether to generate anomalies with drawing of ellipse on top of image. If False,
            |           the dataset will return labled hemorrhagous as anomalies.
            |---- anomaly_proba (float in [0.0,1.0]) the probability to geenrate an artificial anomaly. Ignored if
            |           artificial_anomaly is False.
            |---- augmentation_transform (list of transofrom) data augmentation transformation to apply.
            |---- window (tuple (center, width)) the window for CT intensity rescaling. If None, no windowing is performed.
            |---- output_size (int) the dimension of the output (H = W).
            |---- drawing_params (dict) the parameters to be passed to the ellipse drawing method.
        OUTPUT
            |---- RSNA_FCDD_dataset (data.Dataset)
        """
        super().__init__()
        assert 0.0 <= anomaly_proba <= 1.0, f"Probability of anomaly must be in [0.0 , 1.0]. Given {anomaly_proba}."
        self.data_df = data_df
        self.data_path = data_path
        self.artificial_anomaly = artificial_anomaly
        self.anomaly_proba = anomaly_proba
        self.window = window

        self.transform = tf.Compose(*augmentation_transform,
                                    tf.Resize(H=output_size, W=output_size),
                                    tf.ToTorchTensor())

        self.drawing_params = drawing_params
Example #8
0
    def __getitem__(self, idx):
        """
        Extract the CT sepcified by idx.
        ----------
        INPUT
            |---- idx (int) the sample index in self.data_df.
        OUTPUT
            |---- im (torch.tensor) the CT image with dimension (1 x H x W).
            |---- mask (torch.tensor) the inpaining mask with dimension (1 x H x W).
        """
        # load image and convert it in float in [0,1]
        im = skimage.img_as_float(io.imread(self.img_fn[idx]))
        # transform image
        im = self.transform(im)
        # get a random mask
        mask = self.random_ff_mask((im.shape[1], im.shape[2]), **self.ff_param)

        return im, tf.ToTorchTensor()(mask)
Example #9
0
 def __init__(self,
              fn_list,
              augmentation_transform=[
                  tf.Translate(low=-0.1, high=0.1),
                  tf.Rotate(low=-10, high=10),
                  tf.Scale(low=0.9, high=1.1),
                  tf.HFlip(p=0.5)
              ],
              output_size=256,
              ff_param=dict(n_draw=(1, 4),
                            vertex=(15, 30),
                            brush_width=(15, 25),
                            angle=(0.5, 2),
                            length=(15, 50),
                            n_salt_pepper=(0, 15),
                            salt_peper_radius=(1, 6))):
     """
     Build a dataset Free Form inpainting from a image folder.
     ----------
     INPUT
         |---- fn_list (list of str) list of image file name (for format support see skimage.io.imread).
         |---- augmentation_transform (list of transofrom) data augmentation transformation to apply.
         |---- output_size (int) the dimension of the output (H = W).
         |---- n_draw (tuple (low, high)) range of number of inpaint element to draw.
         |---- ff_param (dict) parameters for the free form mask generation. valid keys are:
         |        |---- vertex (tuple (low, high)) range of number of vertex for each inpaint element.
         |        |---- brush_width (tuple (low, high)) range of brush size to draw each inpaint element.
         |        |---- angle (tuple (low, high)) the range of angle between each vertex of an inpaint element. Note that every
         |        |               two segment, Pi is added to the angle to keep the drawing in the vicinity. Angle in radian.
         |        |---- length (tuple (low, high)) range of length for each segment.
         |        |---- n_salt_pepper (tuple (low, high)) range of number of salt and pepper disk element to draw. Set to (0,1)
         |        |               for no salt and pepper elements.
         |        |---- salt_peper_radius (tuple (low, high)) range of radius for the salt and pepper disk element.
     OUTPUT
         |---- Inpaint_dataset (torch.Dataset)
     """
     super(InpaintDataset, self).__init__()
     self.img_fn = fn_list
     self.transform = tf.Compose(*augmentation_transform,
                                 tf.Resize(H=output_size, W=output_size),
                                 tf.ToTorchTensor())
     self.ff_param = ff_param
    def __init__(self,
                 data_df,
                 data_path,
                 augmentation_transform=[
                     tf.RandomZCrop(Z=64),
                     tf.Translate(low=-0.1, high=0.1),
                     tf.Rotate(low=-10, high=10),
                     tf.Scale(low=0.9, high=1.1),
                     tf.HFlip(p=0.5)
                 ],
                 window=None,
                 resampling_dim=(-1, -1, 2.5),
                 resampling_order=1):
        """
        Build a dataset for the 3D annotated segmentation of ICH from NIfTI images.
        ----------
        INPUT
            |---- data_df (pd.DataFrame) the input dataframe of samples. Each row must contains a patient number, an
            |           image filename and a mask filename.
            |---- data_path (str) path to the root of the dataset folder (until where the samples' filnames begins).
            |---- augmentation_transform (list of transofrom) data augmentation transformation to apply.
            |---- window (tuple (center, width)) the window for CT intensity rescaling. If None, no windowing is performed.
            |---- resampling_dim (tuple (x, y, z)) the output pixel dimension for volume reampling. If value is set to
            |           -1, the input pixel dimension is used.
            |---- resampling_order (int) define the interpolation strategy for the resampling. Must be between 0 and 5.
            |           See scipy.ndimage.zoom().
        OUTPUT
            |---- ICH_Dataset3D (torch.Dataset) the 3D dataset.
        """
        super(public_SegICH_Dataset3D, self).__init__()
        self.data_df = data_df
        self.data_path = data_path

        self.window = window
        self.resampling_dim = resampling_dim
        self.resampling_order = resampling_order

        self.transform = tf.Compose(*augmentation_transform,
                                    tf.Resize(H=output_size, W=output_size),
                                    tf.ToTorchTensor())
Example #11
0
    def segement_volume(self,
                        vol,
                        save_fn=None,
                        window=None,
                        input_size=(256, 256),
                        return_pred=False):
        """
        Segement each slice of the passed Nifti volume and save the results as a Nifti volume.
        ----------
        INPUT
            |---- vol (nibabel.nifti1.Nifti1Pair) the nibabel volume with metadata to segement.
            |---- save_fn (str) where to save the segmentation.
            |---- window (tuple (center, width)) the winowing to apply to the ct-scan.
            |---- input_size (tuple (h, w)) the input size for the network.
            |---- return_pred (bool) whether to return the volume of prediction.
        OUTPUT
            |---- (mask_vol) (nibabel.nifti1.Nifti1Pair) the prediction volume.
        """
        pred_list = []
        vol_data = np.rot90(vol.get_fdata(),
                            axes=(0, 1))  # 90° counterclockwise rotation
        if window:
            vol_data = window_ct(vol_data,
                                 win_center=window[0],
                                 win_width=window[1],
                                 out_range=(0, 1))
        transform = tf.Compose(tf.Resize(H=input_size[0], W=input_size[1]),
                               tf.ToTorchTensor())
        self.unet.eval()
        self.unet.to(self.device)
        with torch.no_grad():
            for s in range(0, vol_data.shape[2], self.batch_size):
                # get slice in good size and as tensor
                input = transform(vol_data[:, :, s:s + self.batch_size]).to(
                    self.device).float().permute(3, 0, 1, 2)
                # predict
                pred = self.unet(input)
                pred = torch.where(pred >= 0.5,
                                   torch.ones_like(pred, device=self.device),
                                   torch.zeros_like(pred, device=self.device))
                # store pred (B x H x W)
                pred_list.append(
                    pred.squeeze(dim=1).permute(1, 2, 0).cpu().numpy().astype(
                        np.uint8) * 255)
                if self.print_progress:
                    print_progessbar(s + pred.shape[0] - 1,
                                     Max=vol_data.shape[2],
                                     Name='Slice',
                                     Size=20,
                                     erase=True)

        # make the prediction volume
        vol_pred = np.concatenate(pred_list, axis=2)
        # resize to input size and rotate 90° clockwise
        vol_pred = np.rot90(skimage.transform.resize(
            vol_pred, (vol.header['dim'][1], vol.header['dim'][2]), order=0),
                            axes=(1, 0))
        # make Nifty and save it
        vol_pred_nii = nib.Nifti1Pair(vol_pred.astype(np.uint8), vol.affine)
        if save_fn:
            nib.save(vol_pred_nii, save_fn)
        # return Nifti prediction
        if return_pred:
            return vol_pred_nii
    def __init__(self,
                 data_df,
                 data_path,
                 augmentation_transform=[
                     tf.Translate(low=-0.1, high=0.1),
                     tf.Rotate(low=-10, high=10),
                     tf.Scale(low=0.9, high=1.1),
                     tf.HFlip(p=0.5)
                 ],
                 window=None,
                 output_size=256,
                 mode='standard',
                 n_swap=10,
                 swap_w=15,
                 swap_h=15,
                 swap_rot=False,
                 contrastive_augmentation=None):
        """
        Build a dataset for the RSNA dataset of ICH CT slice.
        ----------
        INPUT
            |---- data_df (pd.DataFrame) the input dataframe of samples. Each row must contains a filename and a columns
            |           Hemorrhage specifying if slice has or not an hemorrhage.
            |---- data_path (str) path to the root of the dataset folder (until where the samples' filnames begins).
            |---- augmentation_transform (list of transofrom) data augmentation transformation to apply.
            |---- window (tuple (center, width)) the window for CT intensity rescaling. If None, no windowing is performed.
            |---- output_size (int) the dimension of the output (H = W).
            |---- mode (str) define how to load the RSNA dataset. 'standard': return an image with its label.
            |           'context_restoration': return the image and the corruped image. 'contrastive': return two heavilly
            |           augmented version of the input image. 'binary_classification': return image and binary label (ICH vs No-ICH).
            |---- n_swap (int) the number of swap to use in the context_restoration mode.
            |---- swap_h (int) the height of the swapped patch in the context_restoration mode.
            |---- swap_w (int) the width of the swapped patch in the context_restoration mode.
            |---- swap_rot (bool) whether to rotate patches. If true, swap_h must be None.
            |---- contrastive_augmentation (list of transformation) the list of augmentation to apply in the contrastive
            |           mode. They must be composable by tf.Compose.
        OUTPUT
            |---- RSNA_dataset (torch.Dataset) the RSNA dataset.
        """
        super(RSNA_dataset, self).__init__()
        self.data_df = data_df.copy()
        self.n_sample = len(data_df)
        self.data_path = data_path
        self.window = window
        assert mode in [
            'standard', 'context_restoration', 'contrastive',
            'binary_classification', 'multi_classification'
        ], f"Invalid mode. Must be one of 'standard', 'context_restoration', 'contrastive', 'binary_classification'. Given : {mode}"
        self.mode = mode

        self.transform = tf.Compose(*augmentation_transform,
                                    tf.Resize(H=output_size,
                                              W=output_size))  #,
        #tf.ToTorchTensor())
        self.toTensor = tf.ToTorchTensor()
        if mode == 'context_restoration':
            self.swap_tranform = tf.RandomPatchSwap(n=n_swap,
                                                    w=swap_w,
                                                    h=swap_h,
                                                    rotate=swap_rot)
        elif mode == 'contrastive':
            self.contrastive_transform = tf.Compose(*contrastive_augmentation)
        elif mode == 'multi_classification':
            # add a columns 'no_Hemorrage'
            self.data_df['no_Hemorrhage'] = 1 - self.data_df.Hemorrhage
            # name of the classes
            self.class_name = [
                'no_Hemorrhage', 'Hemorrhage', 'epidural', 'intraparenchymal',
                'intraventricular', 'subarachnoid', 'subdural'
            ]