예제 #1
0
 def analysis_patchwise_crop(self, sample, data_aug):
     # If skipping blank patches is active
     if self.patchwise_skip_blanks:
         # Slice image and segmentation into patches
         patches_img = slice_matrix(sample.img_data, self.patch_shape,
                                    self.patchwise_overlap,
                                    self.data_io.interface.three_dim)
         patches_seg = slice_matrix(sample.seg_data, self.patch_shape,
                                    self.patchwise_overlap,
                                    self.data_io.interface.three_dim)
         # Skip blank patches (only background)
         for i in reversed(range(0, len(patches_seg))):
             # IF patch DON'T contain any non background class -> remove it
             if not np.any(patches_seg[i][...,self.patchwise_skip_class] != 1):
                 del patches_img[i]
                 del patches_seg[i]
         # Select a random patch
         pointer = np.random.randint(0, len(patches_img))
         img = patches_img[pointer]
         seg = patches_seg[pointer]
         # Expand image dimension to simulate a batch with one image
         img_data = np.expand_dims(img, axis=0)
         seg_data = np.expand_dims(seg, axis=0)
         # Pad patches if necessary
         if img_data.shape[1:-1] != self.patch_shape:
             img_data = pad_patch(img_data, self.patch_shape,
                                  return_slicer=False)
             seg_data = pad_patch(seg_data, self.patch_shape,
                                  return_slicer=False)
         # Run data augmentation
         if data_aug:
             img_data, seg_data = self.data_augmentation.run(img_data,
                                                             seg_data)
     # If skipping blank is not active -> random crop
     else:
         # Access image and segmentation data
         img = sample.img_data
         seg = sample.seg_data
         # If no data augmentation should be performed
         # -> create Data Augmentation instance without augmentation methods
         if not data_aug or self.data_augmentation is None:
             cropping_data_aug = Data_Augmentation(cycles=1,
                                         scaling=False, rotations=False,
                                         elastic_deform=False, mirror=False,
                                         brightness=False, contrast=False,
                                         gamma=False, gaussian_noise=False)
         else : cropping_data_aug = self.data_augmentation
         # Configure the Data Augmentation instance to cropping
         cropping_data_aug.cropping = True
         cropping_data_aug.cropping_patch_shape = self.patch_shape
         # Expand image dimension to simulate a batch with one image
         img_data = np.expand_dims(img, axis=0)
         seg_data = np.expand_dims(seg, axis=0)
         # Run data augmentation and cropping
         img_data, seg_data = cropping_data_aug.run(img_data, seg_data)
     # Create tuple of preprocessed data
     ready_data = list(zip(img_data, seg_data))
     # Return preprocessed data tuple
     return ready_data
예제 #2
0
 def __init__(self,
              data_io,
              batch_size,
              subfunctions=[],
              data_aug=Data_Augmentation(),
              prepare_subfunctions=False,
              prepare_batches=False,
              analysis="patchwise-crop",
              patch_shape=None):
     # Parse Data Augmentation
     if isinstance(data_aug, Data_Augmentation):
         self.data_augmentation = data_aug
     else:
         self.data_augmentation = None
     # Exception: Analysis parameter check
     analysis_types = ["patchwise-crop", "patchwise-grid", "fullimage"]
     if not isinstance(analysis, str) or analysis not in analysis_types:
         raise ValueError('Non existent analysis type in preprocessing.')
     # Exception: Patch-shape parameter check
     if (analysis == "patchwise-crop" or analysis == "patchwise-grid") and \
         not isinstance(patch_shape, tuple):
         raise ValueError("Missing or wrong patch shape parameter for " + \
                          "patchwise analysis.")
     # Parse parameter
     self.data_io = data_io
     self.batch_size = batch_size
     self.subfunctions = subfunctions
     self.prepare_subfunctions = prepare_subfunctions
     self.prepare_batches = prepare_batches
     self.analysis = analysis
     self.patch_shape = patch_shape
예제 #3
0
    def __init__(self, saved_path):

        self.input_path = os.path.join(path_prefix, "input/")
        if os.path.isdir(self.input_path):
            shutil.rmtree(self.input_path)
        os.mkdir(self.input_path)

        interface = NIFTI_interface(pattern="input", channels=1, classes=3)

        # data_path = os.path.join(path_prefix, upload_folder)
        data_io = Data_IO(interface, path_prefix)

        data_aug = Data_Augmentation(
            cycles=2,
            scaling=True,
            rotations=True,
            elastic_deform=True,
            mirror=True,
            brightness=True,
            contrast=True,
            gamma=True,
            gaussian_noise=True,
        )

        sf_normalize = Normalization(z_score=True)
        sf_clipping = Clipping(min=-79, max=304)
        sf_resample = Resampling((3.22, 1.62, 1.62))

        subfunctions = [sf_resample, sf_clipping, sf_normalize]

        pp = Preprocessor(
            data_io,
            data_aug=data_aug,
            batch_size=1,
            subfunctions=subfunctions,
            prepare_subfunctions=True,
            analysis="patchwise-crop",
            patch_shape=(48, 128, 128),
        )
        pp.patchwise_overlap = (12, 32, 32)

        unet_standard = Architecture()
        self.model = Neural_Network(
            preprocessor=pp,
            architecture=unet_standard,
            loss=tversky_loss,
            metrics=[dice_soft, dice_crossentropy],
            batch_queue_size=1,
            workers=1,
            learninig_rate=0.0001,
        )

        self.model.load(saved_path)
예제 #4
0
    vol = nib.load(path_vol)
    vol_data = vol.get_data()

# Create the Data I/O object
data_io = Data_IO(interface, data_path)

sample_list = data_io.get_indiceslist()
sample_list.sort()
print("All samples: " + str(sample_list))

# Create and configure the Data Augmentation class
data_aug = Data_Augmentation(cycles=2,
                             scaling=True,
                             rotations=True,
                             elastic_deform=True,
                             mirror=True,
                             brightness=True,
                             contrast=True,
                             gamma=True,
                             gaussian_noise=True)

# Select Subfunctions for the Preprocessing

# Create a pixel value normalization Subfunction through Z-Score
sf_normalize = Normalization()
# Create a clipping Subfunction between -79 and 304
sf_clipping = Clipping(min=-79, max=304)
# Create a resampling Subfunction to voxel spacing 3.22 x 1.62 x 1.62
sf_resample = Resampling((3.22, 1.62, 1.62))

# Assemble Subfunction classes into a list Be aware that the Subfunctions will be exectued according to the list order!