def process(self): """ Main pipeline """ import os import logging from mirp.imageRead import load_image from mirp.imageProcess import crop_image, estimate_image_noise, interpolate_image,\ interpolate_roi, divide_tumour_regions, resegmentise, calculate_features, transform_images, \ create_tissue_mask, bias_field_correction, normalise_image from mirp.imagePerturbations import rotate_image, adapt_roi_size, randomise_roi_contours import copy # Configure logger logging.basicConfig( format= "%(levelname)s\t: %(processName)s \t %(asctime)s \t %(message)s", level=logging.INFO, stream=sys.stdout) # Initialise empty feature list feat_list = [] # Notify logging.info(self._message_computation_initialisation()) # Get iterables from current settings which lead to different image adaptations iter_set, n_outer_iter, n_inner_iter = self.get_iterable_parameters( settings=self.settings) # Load image and roi if self.keep_images_in_memory: base_img_obj, base_roi_list = load_image( image_folder=self.image_folder, modality=self.modality, roi_folder=self.roi_folder, registration_image_folder=self.roi_reg_img_folder, image_name=self.image_file_name_pattern, roi_names=self.roi_names, registration_image_name=self. registration_image_file_name_pattern) self.set_image_name(img_obj=base_img_obj) else: base_img_obj = base_roi_list = None # Iterate over iterable settings for ii in np.arange(0, n_outer_iter): # Log current iteration if n_outer_iter * n_inner_iter > 1: if n_inner_iter > 1: logging.info( f"Starting computations for {ii * n_inner_iter + 1} to {(ii+1) * n_inner_iter} of {n_outer_iter * n_inner_iter} perturbations." ) else: logging.info( f"Starting computations for {ii + 1} of {n_outer_iter} perturbations." ) else: logging.info("Starting computations.") ######################################################################################################## # Load and pre-process image and roi ######################################################################################################## # Use pre-loaded base image and roi_list (more memory used, but may be faster if loading over a network), or load from disk. if self.keep_images_in_memory: img_obj = base_img_obj.copy() roi_list = copy.deepcopy(base_roi_list) else: # Read image and ROI segmentations img_obj, roi_list = load_image( image_folder=self.image_folder, modality=self.modality, roi_folder=self.roi_folder, registration_image_folder=self.roi_reg_img_folder, image_name=self.image_file_name_pattern, roi_names=self.roi_names, registration_image_name=self. registration_image_file_name_pattern) self.set_image_name(img_obj=img_obj) # Crop slice stack if self.settings.vol_adapt.crop: img_obj, roi_list = crop_image( img_obj=img_obj, roi_list=roi_list, boundary=self.settings.vol_adapt.crop_distance) # Extract diagnostic features from initial image and rois self.extract_diagnostic_features(img_obj=img_obj, roi_list=roi_list, append_str="init") ######################################################################################################## # Update settings and initialise ######################################################################################################## # Copy settings for current iteration run - this allows local changes to curr_setting curr_setting = copy.deepcopy(self.settings) # Update settings object with iterable settings curr_setting.vol_adapt.rot_angles = [iter_set.rot_angle[ii]] curr_setting.img_interpolate.new_spacing = [ iter_set.vox_spacing[ii] ] curr_setting.vol_adapt.translate_x = [iter_set.translate_x[ii]] curr_setting.vol_adapt.translate_y = [iter_set.translate_y[ii]] curr_setting.vol_adapt.translate_z = [iter_set.translate_z[ii]] ######################################################################################################## # Bias field correction and normalisation ######################################################################################################## # Create a tissue mask if curr_setting.post_process.bias_field_correction or not curr_setting.post_process.intensity_normalisation == "none": tissue_mask = create_tissue_mask(img_obj=img_obj, settings=curr_setting) if curr_setting.post_process.bias_field_correction: # Perform bias field correction img_obj = bias_field_correction(img_obj=img_obj, settings=curr_setting, mask=tissue_mask) # Normalise image img_obj = normalise_image( img_obj=img_obj, norm_method=curr_setting.post_process. intensity_normalisation, intensity_range=curr_setting.post_process. intensity_normalisation_range, saturation_range=curr_setting.post_process. intensity_normalisation_saturation, mask=tissue_mask) ######################################################################################################## # Determine image noise levels (optional) ######################################################################################################## # Initialise noise level with place holder value est_noise_level = -1.0 # Determine image noise levels if curr_setting.vol_adapt.add_noise and curr_setting.vol_adapt.noise_level is None and est_noise_level == -1.0: est_noise_level = estimate_image_noise(img_obj=img_obj, settings=curr_setting, method="chang") elif curr_setting.vol_adapt.add_noise: est_noise_level = curr_setting.vol_adapt.noise_level ######################################################################################################## # Base image-based operations - basic operations on base image (rotation, cropping, noise addition) # Note interpolation and translation are performed simultaneously, and interpolation is only done after # application of spatial filters ######################################################################################################## # Rotate object img_obj, roi_list = rotate_image(img_obj=img_obj, roi_list=roi_list, settings=curr_setting) # Crop image to a box extending at most 15 cm around the combined ROI if curr_setting.vol_adapt.crop: img_obj, roi_list = crop_image(img_obj=img_obj, roi_list=roi_list, boundary=150.0, z_only=False) # Add random noise to an image if curr_setting.vol_adapt.add_noise: img_obj.add_noise(noise_level=est_noise_level, noise_iter=ii) ######################################################################################################## # Interpolation of base image ######################################################################################################## # Translate and interpolate image to isometric voxels img_obj = interpolate_image(img_obj=img_obj, settings=curr_setting) roi_list = interpolate_roi(roi_list=roi_list, img_obj=img_obj, settings=curr_setting) self.extract_diagnostic_features(img_obj=img_obj, roi_list=roi_list, append_str="interp") ######################################################################################################## # ROI-based operations # These operations only affect the regions of interest ######################################################################################################## # Adapt roi sizes by dilation and erosion roi_list = adapt_roi_size(roi_list=roi_list, settings=curr_setting) # Update roi using SLIC roi_list = randomise_roi_contours(roi_list=roi_list, img_obj=img_obj, settings=curr_setting) # Extract boundaries and tumour bulk roi_list = divide_tumour_regions(roi_list=roi_list, settings=curr_setting) # Resegmentise ROI based on intensities in the base images roi_list = resegmentise(img_obj=img_obj, roi_list=roi_list, settings=curr_setting) self.extract_diagnostic_features(img_obj=img_obj, roi_list=roi_list, append_str="reseg") # Compose ROI of heterogeneous supervoxels # roi_list = imageProcess.selectHeterogeneousSuperVoxels(img_obj=img_obj, roi_list=roi_list, settings=curr_setting, # file_str=os.path.join(self.write_path, self.subject + "_" + self.modality + "_" + self.data_str + "_" + self.date)) ######################################################################################################## # Base image computations and exports ######################################################################################################## if self.extract_images: img_obj.export(file_path=self.write_path) for roi_obj in roi_list: roi_obj.export(img_obj=img_obj, file_path=self.write_path) iter_feat_list = [] if self.compute_features: iter_feat_list.append( calculate_features(img_obj=img_obj, roi_list=roi_list, settings=curr_setting)) ######################################################################################################## # Image transformations ######################################################################################################## if self.settings.img_transform.perform_img_transform: # Get image features from transformed images (may be empty if no features are computed) iter_feat_list += transform_images( img_obj=img_obj, roi_list=roi_list, settings=curr_setting, compute_features=self.compute_features, extract_images=self.extract_images, file_path=self.write_path) ######################################################################################################## # Collect and combine features for current iteration ######################################################################################################## if self.compute_features: feat_list.append( self.collect_features(img_obj=img_obj, roi_list=roi_list, feat_list=iter_feat_list, settings=curr_setting)) # Clean up del img_obj, roi_list ######################################################################################################## # Feature aggregation over settings ######################################################################################################## if self.compute_features: # Strip empty entries feat_list = [ list_entry for list_entry in feat_list if list_entry is not None ] # Check if features were extracted if len(feat_list) == 0: logging.warning(self._message_warning_no_features_extracted()) return None # Concatenate feat list df_feat = pd.concat(feat_list, axis=0) # Write to file file_name = "_".join([ file_name_comp for file_name_comp in [ self.subject, self.modality, self.data_str, self.date, self.settings.general.config_str, "features.csv" ] if file_name_comp != "" ]).replace(" ", "_") # file_name = self.subject + "_" + self.modality + "_" + self.data_str + "_" + self.date + "_" + self.settings.general.config_str + "_features.csv" df_feat.to_csv(path_or_buf=os.path.join(self.write_path, file_name), sep=";", na_rep="NA", index=False, decimal=".") # Write successful completion to console or log logging.info(self._message_feature_extraction_finished())
def randomise_roi_contours(roi_list, img_obj, settings): """Use SLIC to randomise the roi based on supervoxels""" # Check whether randomisation should take place if not settings.vol_adapt.randomise_roi: return roi_list from mirp.utilities import world_to_index from scipy.ndimage import binary_closing new_roi_list = [] # Iterate over roi objects for roi_ind in np.arange(0, len(roi_list)): # Resect image to speed up segmentation process res_img_obj, res_roi_obj = crop_image(img_obj=img_obj, roi_obj=roi_list[roi_ind], boundary=25.0, z_only=False) # Check if the roi is empty. If so, add the number of required empty rois if res_roi_obj.is_empty(): for ii in np.arange(settings.vol_adapt.roi_random_rep): repl_roi = roi_list[roi_ind].copy() repl_roi.name += "_svx_" + str(ii) # Adapt roi name repl_roi.svx_randomisation_id = ii + 1 # Update randomisation id new_roi_list.append(repl_roi) # Go on to the next roi in the roi list continue # Get supervoxels img_segments = get_supervoxels(img_obj=res_img_obj, roi_obj=res_roi_obj, settings=settings) # Determine overlap of supervoxels with contour overlap_indices, overlap_fract, overlap_size = get_supervoxel_overlap( roi_obj=res_roi_obj, img_segments=img_segments) # Set the highest overlap to 1.0 to ensure selection of at least 1 supervoxel overlap_fract[np.argmax(overlap_fract)] = 1.0 # Include supervoxels with 90% coverage and exclude those with less then 20% coverage overlap_fract[overlap_fract >= 0.90] = 1.0 overlap_fract[overlap_fract < 0.20] = 0.0 # Determine grid indices of the resected grid with respect to the original image grid grid_origin = world_to_index(coord=res_img_obj.origin, origin=img_obj.origin, spacing=img_obj.spacing) grid_origin = grid_origin.astype(np.int) # Iteratively create randomised regions of interest for ii in np.arange(settings.vol_adapt.roi_random_rep): # Draw random numbers between 0.0 and 1.0 random_incl = np.random.random(size=len(overlap_fract)) # Select those segments where the random number is less than the overlap fraction - i.e. the fraction is the # probability of selecting the supervoxel incl_segments = overlap_indices[np.less(random_incl, overlap_fract)] # Replace randomised contour in original roi voxel space roi_vox = np.zeros(shape=roi_list[roi_ind].roi.size, dtype=np.bool) roi_vox[grid_origin[0]: grid_origin[0] + res_roi_obj.roi.size[0], grid_origin[1]: grid_origin[1] + res_roi_obj.roi.size[1], grid_origin[2]: grid_origin[2] + res_roi_obj.roi.size[2], ] = \ np.reshape(np.in1d(np.ravel(img_segments), incl_segments), res_roi_obj.roi.size) # Apply binary closing to close gaps roi_vox = binary_closing(input=roi_vox) # Update voxels in original roi, adapt name and set randomisation id repl_roi = roi_list[roi_ind].copy() repl_roi.roi.set_voxel_grid( voxel_grid=roi_vox ) # Replace copied original contour with randomised contour repl_roi.name += "_svx_" + str(ii) # Adapt roi name repl_roi.svx_randomisation_id = ii + 1 # Update randomisation id new_roi_list += [repl_roi] return new_roi_list