def process_deep_learning(self, output_slices=False, crop_size=None, center_crops_per_slice=True, remove_empty_crops=True, intensity_range=None, normalisation="none", as_numpy=False): import logging from mirp.imageRead import load_image from mirp.imageProcess import estimate_image_noise, interpolate_image, interpolate_roi, crop_image_to_size, saturate_image, normalise_image from mirp.imagePerturbations import rotate_image, adapt_roi_size, randomise_roi_contours from mirp.roiClass import merge_roi_objects import copy from mirp.imagePlot import plot_image # Configure logger logging.basicConfig( format= "%(levelname)s\t: %(processName)s \t %(asctime)s \t %(message)s", level=logging.INFO, stream=sys.stdout) # Notifications logging.info( "\nInitialising image and mask processing using %s images for %s.", self.modality + "_" + self.data_str + "_", self.subject) # Process input parameters. crop_as_3d = crop_size is None or len(crop_size) == 3 # Set crop_size. if crop_size is None: crop_size = [np.nan, np.nan, np.nan] elif len(crop_size) == 1: crop_size = [np.nan, crop_size, crop_size] elif len(crop_size) == 2: crop_size = [np.nan, crop_size[0], crop_size[1]] elif len(crop_size) == 3: crop_size = [crop_size[0], crop_size[1], crop_size[2]] else: raise ValueError( f"The crop_size parameter is longer than 3: {len(crop_size)}") # Ignore settings for center_crops_per_slice and remove_empty_crops for 3D crops. if crop_as_3d: center_crops_per_slice = False remove_empty_crops = False # Set default intensity ranges. if intensity_range is None: intensity_range = [np.nan, np.nan] elif len(intensity_range) > 2: raise ValueError( f"The intensity_range parameter is longer than 2: {len(intensity_range)}" ) # Get iterables from current settings which lead to different image adaptations iter_set, n_outer_iter, n_inner_iter = self.get_iterable_parameters( settings=self.settings) # Load image and roi if self.keep_images_in_memory: base_img_obj, base_roi_list = load_image( image_folder=self.image_folder, modality=self.modality, roi_folder=self.roi_folder, registration_image_folder=self.roi_reg_img_folder, image_name=self.image_file_name_pattern, roi_names=self.roi_names, registration_image_name=self. registration_image_file_name_pattern) self.set_image_name(img_obj=base_img_obj) else: base_img_obj = base_roi_list = None # Create lists for image objects and rois processed_image_list = [] # Iterate over iterable settings for ii in np.arange(0, n_outer_iter): # Log current iteration if n_outer_iter * n_inner_iter > 1: if n_inner_iter > 1: logging.info( "\nProcessing image and mask for %s to %s of %s adaptations.\n", str(ii * n_inner_iter + 1), str( (ii + 1) * n_inner_iter), str(n_outer_iter * n_inner_iter)) else: logging.info( "\nProcessing image and mask for %s of %s adaptations.\n", str(ii + 1), str(n_outer_iter)) else: logging.info("\nStarting image and mask processing.\n") ######################################################################################################## # Load and pre-process image and roi ######################################################################################################## # Use pre-loaded base image and roi_list (more memory used, but may be faster if loading over a network), or load from disk. if self.keep_images_in_memory: img_obj = base_img_obj.copy() roi_list = copy.deepcopy(base_roi_list) else: # Read image and ROI segmentations img_obj, roi_list = load_image( image_folder=self.image_folder, modality=self.modality, roi_folder=self.roi_folder, registration_image_folder=self.roi_reg_img_folder, image_name=self.image_file_name_pattern, roi_names=self.roi_names, registration_image_name=self. registration_image_file_name_pattern) self.set_image_name(img_obj=img_obj) # Remove metadata img_obj.drop_metadata() for roi_obj in roi_list: roi_obj.drop_metadata() ######################################################################################################## # Update settings and initialise ######################################################################################################## # Copy settings for current iteration run - this allows local changes to curr_setting curr_setting = copy.deepcopy(self.settings) # Update settings object with iterable settings curr_setting.vol_adapt.rot_angles = [iter_set.rot_angle[ii]] curr_setting.img_interpolate.new_spacing = [ iter_set.vox_spacing[ii] ] curr_setting.vol_adapt.translate_x = [iter_set.translate_x[ii]] curr_setting.vol_adapt.translate_y = [iter_set.translate_y[ii]] curr_setting.vol_adapt.translate_z = [iter_set.translate_z[ii]] ######################################################################################################## # Determine image noise levels (optional) ######################################################################################################## # Initialise noise level with place holder value est_noise_level = -1.0 # Determine image noise levels if curr_setting.vol_adapt.add_noise and curr_setting.vol_adapt.noise_level is None and est_noise_level == -1.0: est_noise_level = estimate_image_noise(img_obj=img_obj, settings=curr_setting, method="chang") elif curr_setting.vol_adapt.add_noise: est_noise_level = curr_setting.vol_adapt.noise_level ######################################################################################################## # Base image-based operations - basic operations on base image (rotation, cropping, noise addition) # Note interpolation and translation are performed simultaneously, and interpolation is only done after # application of spatial filters ######################################################################################################## # Rotate object img_obj, roi_list = rotate_image(img_obj=img_obj, roi_list=roi_list, settings=curr_setting) # Add random noise to an image if curr_setting.vol_adapt.add_noise: img_obj.add_noise(noise_level=est_noise_level, noise_iter=ii) ######################################################################################################## # Interpolation of base image ######################################################################################################## # Translate and interpolate image to isometric voxels img_obj = interpolate_image(img_obj=img_obj, settings=curr_setting) roi_list = interpolate_roi(roi_list=roi_list, img_obj=img_obj, settings=curr_setting) ######################################################################################################## # ROI-based operations # These operations only affect the regions of interest ######################################################################################################## # Adapt roi sizes by dilation and erosion roi_list = adapt_roi_size(roi_list=roi_list, settings=curr_setting) # Update roi using SLIC roi_list = randomise_roi_contours(roi_list=roi_list, img_obj=img_obj, settings=curr_setting) ######################################################################################################## # Standardise output ######################################################################################################## # Set intensity range img_obj = saturate_image(img_obj=img_obj, intensity_range=intensity_range, fill_value=None) # Normalise the image to a standard range img_obj = normalise_image(img_obj=img_obj, norm_method=normalisation, intensity_range=intensity_range) ######################################################################################################## # Collect output ######################################################################################################## # Merge ROIs roi_obj = merge_roi_objects(roi_list=roi_list) # Crop slices if crop_as_3d: # Create 3D crop. img_obj, roi_obj = crop_image_to_size(img_obj=img_obj, crop_size=crop_size, roi_obj=roi_obj) img_list = [img_obj] roi_list = [roi_obj] elif not center_crops_per_slice: # Create 3D crop, then chop into slices. img_obj, roi_obj = crop_image_to_size(img_obj=img_obj, crop_size=crop_size, roi_obj=roi_obj) img_list = img_obj.get_slices() roi_list = roi_obj.get_slices() else: # Create 2D crops that are centered on the ROI. img_list = [] roi_list = [] for jj in np.arange(img_obj.size[0]): slice_img_obj, slice_roi_obj = crop_image_to_size( img_obj=img_obj.get_slices(slice_number=jj)[0], roi_obj=roi_obj.get_slices(slice_number=jj)[0], crop_size=crop_size) img_list += [slice_img_obj] roi_list += [slice_roi_obj] # Iterate over list to remove empty slices. if remove_empty_crops and not crop_as_3d: slice_empty = [ slice_roi_obj.is_empty() for slice_roi_obj in roi_list ] img_list = [ img_list[jj] for jj in range(len(slice_empty)) if not slice_empty[jj] ] roi_list = [ roi_list[jj] for jj in range(len(slice_empty)) if not slice_empty[jj] ] # Convert 3D crops to axial slices. if crop_as_3d and output_slices: img_list = img_list[0].get_slices() roi_list = roi_list[0].get_slices() # Check consistency if len(img_list) == 0: warn( "No valid, non-empty image crops were created. A ROI may be missing?" ) return None if all([slice_roi_obj.is_empty() for slice_roi_obj in roi_list]): warn( "No image crops were created that contain a mask. A ROI may be missing?." ) return None # Update the name of the images. for slice_img_obj in img_list: slice_img_obj.name = img_obj.name # Plot images if self.plot_images: for jj in np.arange(len(img_list)): # Generate a file name that depends on the number of list elements. file_name = "plot" if len( img_list) == 1 else "plot_" + str(jj) # Plot images. plot_image(img_obj=img_list[jj], roi_list=[roi_list[jj]], slice_id="all", file_path=self.write_path, file_name=file_name, g_range=[np.nan, np.nan]) # Convert to numpy arrays, if required. if as_numpy: img_list = [ np.squeeze(slice_img_obj.get_voxel_grid()) for slice_img_obj in img_list ] roi_list = [ np.squeeze(slice_roi_obj.roi.get_voxel_grid()) for slice_roi_obj in roi_list ] # Return processed imaging. processed_image_list = [] for jj in np.arange(len(img_list)): processed_image_list += [{ "name": img_obj.name, "image": img_list[jj], "mask": roi_list[jj] }] # Return list of processed images and masks return processed_image_list
def find_imaging_parameters_deprecated(image_folder, modality, subject, plot_images, write_folder, roi_folder=None, roi_reg_img_folder=None, settings=None, roi_names=None): """ :param image_folder: path; path to folder containing image data. :param modality: string; identifies modality of the image in the image folder. :param subject: string; name of the subject. :param plot_images: bool; flag to set image extraction. An image is created at the center of each ROI. :param write_folder: path; path to folder where the analysis should be written. :param roi_folder: path; path to folder containing the region of interest definitions. :param roi_reg_img_folder: path; path to folder containing image data on which the region of interest was originally created. If None, it is assumed that the image in image_folder was used to the define the roi. :param settings: :param roi_names: :return: """ from mirp.imagePlot import plot_image from mirp.imageMetaData import get_meta_data from mirp.imageProcess import estimate_image_noise # # Convert a single input modality to list # if type(modality) is str: modality = [modality] # Read image characteristics df_img_char = read_basic_image_characteristics(image_folder=image_folder) # Remove non-modality objects df_img_char = df_img_char.loc[np.logical_and( df_img_char.modality.isin([modality]), df_img_char.file_type.isin(["dicom"]))] if len(df_img_char) == 0: logging.warning("No dicom images with modality %s were found for %s.", modality[0], subject) return None # Check if image parameters need to be read within roi slices if roi_names is not None: img_obj, roi_list = load_image( image_folder=image_folder, roi_folder=roi_folder, registration_image_folder=roi_reg_img_folder, modality=modality, roi_names=roi_names) # Register rois to image for ii in np.arange(len(roi_list)): roi_list[ii].register(img_obj=img_obj) else: roi_list = None # Read meta tags if modality in ["CT", "PT", "MR"]: df_meta = get_meta_data( image_file_list=df_img_char.file_path.values.tolist(), modality=modality) else: logging.warning( "Dicom images could not be analysed for provided modality.") return None df_meta["subject"] = subject df_meta["folder"] = image_folder if roi_names is not None: df_meta["noise"] = estimate_image_noise(img_obj=img_obj, settings=settings, method="chang") # Plot images if isinstance(plot_images, str): if plot_images == "single": plot_image(img_obj=img_obj, roi_list=roi_list, slice_id="roi_center", file_path=write_folder, file_name=subject + "_" + modality[0], g_range=settings.roi_resegment.g_thresh) elif plot_images == "all_roi": plot_image(img_obj=img_obj, roi_list=roi_list, slice_id="all_roi", file_path=write_folder, file_name=subject + "_" + modality[0], g_range=settings.roi_resegment.g_thresh) elif plot_images == "all": plot_image(img_obj=img_obj, roi_list=roi_list, slice_id="all", file_path=write_folder, file_name=subject + "_" + modality[0], g_range=settings.roi_resegment.g_thresh) else: if plot_images: plot_image(img_obj=img_obj, roi_list=roi_list, slice_id="roi_center", file_path=write_folder, file_name=subject + "_" + modality[0], g_range=settings.roi_resegment.g_thresh) # Write table to single file for case-by-case analysis df_meta.to_frame().T.to_csv(path_or_buf=os.path.normpath( os.path.join(write_folder, subject + "_" + modality[0] + "_meta_data.csv")), sep=";", na_rep="NA", index=False, decimal=".") return df_meta.to_frame().T
def process_deep_learning(self, output_slices=False): import logging from mirp.imageRead import load_image from mirp.imageProcess import estimate_image_noise, interpolate_image, interpolate_roi, crop_image_to_size, saturate_image, normalise_image from mirp.imagePerturbations import rotate_image, adapt_roi_size, randomise_roi_contours import copy from mirp.imagePlot import plot_image # Configure logger logging.basicConfig( format= "%(levelname)s\t: %(processName)s \t %(asctime)s \t %(message)s", level=logging.INFO) # Notifications logging.info( "Initialising image and mask processing using %s images for %s.", self.modality + "_" + self.data_str + "_", self.subject) # Get iterables from current settings which lead to different image adaptations iter_set, n_outer_iter, n_inner_iter = self.get_iterable_parameters( settings=self.settings) # Load image and roi if self.keep_images_in_memory: base_img_obj, base_roi_list = load_image( image_folder=self.image_folder, modality=self.modality, roi_folder=self.roi_folder, registration_image_folder=self.roi_reg_img_folder, image_name=self.image_file_name_pattern, roi_names=self.roi_names, registration_image_name=self. registration_image_file_name_pattern) self.set_image_name(img_obj=base_img_obj) else: base_img_obj = base_roi_list = None # Create lists for image objects and rois processed_image_list = [] # Iterate over iterable settings for ii in np.arange(0, n_outer_iter): # Log current iteration if n_outer_iter * n_inner_iter > 1: if n_inner_iter > 1: logging.info( "Processing image and mask for %s to %s of %s adaptations.", str(ii * n_inner_iter + 1), str( (ii + 1) * n_inner_iter), str(n_outer_iter * n_inner_iter)) else: logging.info( "Processing image and mask for %s of %s adaptations.", str(ii + 1), str(n_outer_iter)) else: logging.info("Starting image and mask processing.") ######################################################################################################## # Load and pre-process image and roi ######################################################################################################## # Use pre-loaded base image and roi_list (more memory used, but may be faster if loading over a network), or load from disk. if self.keep_images_in_memory: img_obj = base_img_obj.copy() roi_list = copy.deepcopy(base_roi_list) else: # Read image and ROI segmentations img_obj, roi_list = load_image( image_folder=self.image_folder, modality=self.modality, roi_folder=self.roi_folder, registration_image_folder=self.roi_reg_img_folder, image_name=self.image_file_name_pattern, roi_names=self.roi_names, registration_image_name=self. registration_image_file_name_pattern) self.set_image_name(img_obj=img_obj) ######################################################################################################## # Update settings and initialise ######################################################################################################## # Copy settings for current iteration run - this allows local changes to curr_setting curr_setting = copy.deepcopy(self.settings) # Update settings object with iterable settings curr_setting.vol_adapt.rot_angles = [iter_set.rot_angle[ii]] curr_setting.img_interpolate.new_spacing = [ iter_set.vox_spacing[ii] ] curr_setting.vol_adapt.translate_x = [iter_set.translate_x[ii]] curr_setting.vol_adapt.translate_y = [iter_set.translate_y[ii]] curr_setting.vol_adapt.translate_z = [iter_set.translate_z[ii]] ######################################################################################################## # Determine image noise levels (optional) ######################################################################################################## # Initialise noise level with place holder value est_noise_level = -1.0 # Determine image noise levels if curr_setting.vol_adapt.add_noise and curr_setting.vol_adapt.noise_level is None and est_noise_level == -1.0: est_noise_level = estimate_image_noise(img_obj=img_obj, settings=curr_setting, method="chang") elif curr_setting.vol_adapt.add_noise: est_noise_level = curr_setting.vol_adapt.noise_level ######################################################################################################## # Base image-based operations - basic operations on base image (rotation, cropping, noise addition) # Note interpolation and translation are performed simultaneously, and interpolation is only done after # application of spatial filters ######################################################################################################## # Rotate object img_obj, roi_list = rotate_image(img_obj=img_obj, roi_list=roi_list, settings=curr_setting) # Add random noise to an image if curr_setting.vol_adapt.add_noise: img_obj.add_noise(noise_level=est_noise_level, noise_iter=ii) ######################################################################################################## # Interpolation of base image ######################################################################################################## # Translate and interpolate image to isometric voxels img_obj = interpolate_image(img_obj=img_obj, settings=curr_setting) roi_list = interpolate_roi(roi_list=roi_list, img_obj=img_obj, settings=curr_setting) ######################################################################################################## # ROI-based operations # These operations only affect the regions of interest ######################################################################################################## # Adapt roi sizes by dilation and erosion roi_list = adapt_roi_size(roi_list=roi_list, settings=curr_setting) # Update roi using SLIC roi_list = randomise_roi_contours(roi_list=roi_list, img_obj=img_obj, settings=curr_setting) ######################################################################################################## # Standardise output ######################################################################################################## # Crop image img_obj, roi_list = crop_image_to_size( img_obj=img_obj, crop_size=curr_setting.deep_learning.expected_size, roi_list=roi_list) # Set intensity range img_obj = saturate_image( img_obj=img_obj, intensity_range=curr_setting.deep_learning.intensity_range, fill_value=None) # Normalise the image to a standard range img_obj = normalise_image( img_obj=img_obj, norm_method=curr_setting.deep_learning.normalisation, intensity_range=curr_setting.deep_learning.intensity_range) ######################################################################################################## # Collect output ######################################################################################################## if self.extract_images: img_obj.export(file_path=self.write_path) for roi_obj in roi_list: roi_obj.export(img_obj=img_obj, file_path=self.write_path) # Store processed imaging if output_slices: # 2D slices slice_img_obj_list = img_obj.get_slices() for jj in np.arange(len(slice_img_obj_list)): for roi_obj in roi_list: processed_image_list += [{ "image": slice_img_obj_list[jj], "mask": roi_obj.get_slices(slice_number=jj) }] else: # 3D volumes for roi_obj in roi_list: processed_image_list += [{ "image": img_obj, "mask": roi_obj }] # Plot images if self.plot_images: plot_image(img_obj=img_obj, roi_list=roi_list, slice_id="all", file_path=self.write_path, file_name="plot", g_range=[np.nan, np.nan]) # Return list of processed images and masks return processed_image_list
def find_imaging_parameters(image_folder, modality, subject, plot_images, write_folder, roi_folder=None, registration_image_folder=None, settings=None, roi_names=None): """ :param image_folder: path; path to folder containing image data. :param modality: string; identifies modality of the image in the image folder. :param subject: string; name of the subject. :param plot_images: bool; flag to set image extraction. An image is created at the center of each ROI. :param write_folder: path; path to folder where the analysis should be written. :param roi_folder: path; path to folder containing the region of interest definitions. :param registration_image_folder: path; path to folder containing image data on which the region of interest was originally created. If None, it is assumed that the image in image_folder was used to the define the roi. :param settings: :param roi_names: :return: """ from mirp.imagePlot import plot_image from mirp.imageMetaData import get_meta_data from mirp.imageProcess import estimate_image_noise # Read DICOM series img_obj: ImageClass = read_dicom_image_series(image_folder=image_folder, modality=modality) # Load registration image if registration_image_folder == image_folder or registration_image_folder is None: img_reg_obj = img_obj else: img_reg_obj: ImageClass = read_dicom_image_series( image_folder=image_folder, modality=modality) # Load segmentations roi_list = read_dicom_rt_struct(dcm_folder=roi_folder, image_object=img_reg_obj, roi=roi_names) # Load dicom headers for all slices in the image object. dcm_list = get_all_dicom_headers( image_folder=image_folder, modality=modality, sop_instance_uid=img_obj.slice_table.sop_instance_uid.values) # Parse metadata metadata_table = get_meta_data(dcm_list=dcm_list, modality=modality) # Add sample identifier, folder and image noise metadata_table["subject"] = subject metadata_table["folder"] = image_folder metadata_table["noise"] = estimate_image_noise(img_obj=img_obj, settings=None, method="chang") # Find the segmentation range. if settings is None: g_range = None else: g_range = settings.roi_resegment.g_thresh # Plot images if isinstance(plot_images, str): if plot_images == "single": plot_image(img_obj=img_obj, roi_list=roi_list, slice_id="roi_center", file_path=write_folder, file_name=subject + "_" + modality, g_range=g_range) elif plot_images == "all_roi": plot_image(img_obj=img_obj, roi_list=roi_list, slice_id="all_roi", file_path=write_folder, file_name=subject + "_" + modality, g_range=g_range) elif plot_images == "all": plot_image(img_obj=img_obj, roi_list=roi_list, slice_id="all", file_path=write_folder, file_name=subject + "_" + modality, g_range=g_range) elif isinstance(plot_images, bool): if plot_images: plot_image(img_obj=img_obj, roi_list=roi_list, slice_id="roi_center", file_path=write_folder, file_name=subject + "_" + modality, g_range=settings.roi_resegment.g_thresh) else: raise TypeError("plot_image is expected to be a string or boolean.") # Write table to single file for case-by-case analysis metadata_table.to_frame().T.to_csv(path_or_buf=os.path.normpath( os.path.join(write_folder, subject + "_" + modality + "_meta_data.csv")), sep=";", na_rep="NA", index=False, decimal=".") return metadata_table.to_frame().T