def store_inference_results(inference_result: InferencePipeline.Result, config: SegmentationModelBase, results_folder: Path, image_header: ImageHeader) -> List[str]: """ Store the segmentation, posteriors, and binary predictions into Nifti files. :param inference_result: The inference result for a given patient_id and epoch. Posteriors must be in (Classes x Z x Y x X) shape, segmentation in (Z, Y, X) :param config: The test configurations. :param results_folder: The folder where the prediction should be stored. :param image_header: The image header that was used in the input image. """ def create_file_path(_results_folder: Path, _file_name: str) -> Path: """ Create filename with Nifti extension :param _results_folder: The results folder :param _file_name: The name of the file :return: A full path to the results folder for the file """ file_path = _file_name + MedicalImageFileType.NIFTI_COMPRESSED_GZ.value return _results_folder / Path(file_path) # create the directory for the given patient inside the results dir patient_results_folder = get_patient_results_folder( results_folder, inference_result.patient_id) patient_results_folder.mkdir(exist_ok=True, parents=True) # write the segmentations to disk image_paths = [ io_util.store_as_ubyte_nifti(image=inference_result.segmentation, header=image_header, file_name=str( create_file_path( patient_results_folder, "segmentation"))) ] class_names_and_indices = config.class_and_index_with_background().items() binaries = binaries_from_multi_label_array(inference_result.segmentation, config.number_of_classes) # rescale posteriors if required and save them for (class_name, index), binary in zip(class_names_and_indices, binaries): posterior = inference_result.posteriors[index, ...] # save the posterior map file_name = "posterior_{}".format(class_name) image_path = io_util.store_posteriors_as_nifti( image=posterior, header=image_header, file_name=str(create_file_path(patient_results_folder, file_name))) image_paths.append(image_path) # save the binary mask image_path = io_util.store_binary_mask_as_nifti( image=binary, header=image_header, file_name=str(create_file_path(patient_results_folder, class_name))) image_paths.append(image_path) # rescale and store uncertainty map as nifti image_path = io_util.store_posteriors_as_nifti( image=inference_result.uncertainty, header=image_header, file_name=str(create_file_path(patient_results_folder, "uncertainty"))) image_paths.append(image_path) return image_paths
def load_predictions(run_type: SurfaceDistanceRunType, azure_config: AzureConfig, model_config: SegmentationModelBase, execution_mode: ModelExecutionMode, extended_annotators: List[str], outlier_range: float) -> List[Segmentation]: """ For each run type (IOV or outliers), instantiate a list of predicted Segmentations and return :param run_type: either "iov" or "outliers: :param azure_config: AzureConfig :param model_config: GenericConfig :param execution_mode: ModelExecutionMode: Either Test, Train or Val :param extended_annotators: List of annotators plus model_name to load segmentations for :param outlier_range: The standard deviation from the mean which the points have to be below to be considered an outlier. :return: list of [(subject_id, structure name and dice_scores)] """ predictions = [] if run_type == SurfaceDistanceRunType.OUTLIERS: first_child_run = sd_util.get_first_child_run(azure_config) output_dir = sd_util.get_run_output_dir(azure_config, model_config) metrics_path = sd_util.get_metrics_path(azure_config, model_config) # Load the downloaded metrics CSV as dataframe and determine worst performing outliers for the Test run df = load_csv(metrics_path, [ MetricsFileColumns.Patient.value, MetricsFileColumns.Structure.value ]) test_run_df = df[df['mode'] == execution_mode.value] worst_performers = get_worst_performing_outliers( test_run_df, outlier_range, MetricsFileColumns.Dice.value, max_n_outliers=-50) for (subject_id, structure_name, dice_score, _) in worst_performers: subject_prefix = sd_util.get_subject_prefix( model_config, execution_mode, subject_id) # if not already present, download data for subject download_run_outputs_by_prefix(blobs_prefix=subject_prefix, destination=output_dir, run=first_child_run) # check it has been downloaded segmentation_path = output_dir / subject_prefix / f"{structure_name}.nii.gz" predictions.append( Segmentation(structure_name=structure_name, subject_id=subject_id, segmentation_path=segmentation_path, dice_score=float(dice_score))) elif run_type == SurfaceDistanceRunType.IOV: subject_id = 0 iov_dir = Path("outputs") / SurfaceDistanceRunType.IOV.value.lower() all_structs = model_config.class_and_index_with_background() structs_to_plot = [ struct_name for struct_name in all_structs.keys() if struct_name not in ['background', 'external'] ] for annotator in extended_annotators: for struct_name in structs_to_plot: segmentation_path = iov_dir / f"{struct_name + annotator}.nii.gz" if not segmentation_path.is_file(): logging.warning(f"No such file {segmentation_path}") continue predictions.append( Segmentation(structure_name=struct_name, subject_id=subject_id, segmentation_path=segmentation_path, annotator=annotator)) return predictions