def plot_contours_for_all_classes(sample: Sample, segmentation: np.ndarray, foreground_class_names: List[str], result_folder: Path, result_prefix: str = "", image_range: Optional[TupleFloat2] = None, channel_index: int = 0) -> List[Path]: """ Creates a plot with the image, the ground truth, and the predicted segmentation overlaid. One plot is created for each class, each plotting the Z slice where the ground truth has most pixels. :param sample: The image sample, with the photonormalized image and the ground truth labels. :param segmentation: The predicted segmentation: multi-value, size Z x Y x X. :param foreground_class_names: The names of all classes, excluding the background class. :param result_folder: The folder into which the resulting plot PNG files should be written. :param result_prefix: A string prefix that will be used for all plots. :param image_range: The minimum and maximum image values that will be mapped to the color map ranges. If None, use the actual min and max values. :param channel_index: The index of the image channel that should be plotted. :return: The paths to all generated PNG files. """ check_size_matches(sample.labels[0], segmentation) num_classes = sample.labels.shape[0] if len(foreground_class_names) != num_classes - 1: raise ValueError( f"Labels tensor indicates {num_classes} classes, but got {len(foreground_class_names)} foreground " f"class names: {foreground_class_names}") plot_names: List[Path] = [] image = sample.image[channel_index, ...] contour_arguments = [{ 'colors': 'r' }, { 'colors': 'b', 'linestyles': 'dashed' }] binaries = binaries_from_multi_label_array(segmentation, num_classes) for class_index, binary in enumerate(binaries): if class_index == 0: continue ground_truth = sample.labels[class_index, ...] largest_gt_slice = get_largest_z_slice(ground_truth) labels_at_largest_gt = ground_truth[largest_gt_slice] segmentation_at_largest_gt = binary[largest_gt_slice, ...] class_name = foreground_class_names[class_index - 1] patient_id = sample.patient_id if isinstance(patient_id, str): patient_id_str = patient_id else: patient_id_str = f"{patient_id:03d}" filename_stem = f"{result_prefix}{patient_id_str}_{class_name}_slice_{largest_gt_slice:03d}" plot_file = plot_image_and_label_contour( image=image[largest_gt_slice, ...], labels=[labels_at_largest_gt, segmentation_at_largest_gt], contour_arguments=contour_arguments, image_range=image_range, plot_file_name=result_folder / filename_stem) plot_names.append(plot_file) return plot_names
def store_inference_results(inference_result: InferencePipeline.Result, config: SegmentationModelBase, results_folder: Path, image_header: ImageHeader) -> List[str]: """ Store the segmentation, posteriors, and binary predictions into Nifti files. :param inference_result: The inference result for a given patient_id and epoch. Posteriors must be in (Classes x Z x Y x X) shape, segmentation in (Z, Y, X) :param config: The test configurations. :param results_folder: The folder where the prediction should be stored. :param image_header: The image header that was used in the input image. """ def create_file_path(_results_folder: Path, _file_name: str) -> Path: """ Create filename with Nifti extension :param _results_folder: The results folder :param _file_name: The name of the file :return: A full path to the results folder for the file """ file_path = _file_name + MedicalImageFileType.NIFTI_COMPRESSED_GZ.value return _results_folder / Path(file_path) # create the directory for the given patient inside the results dir patient_results_folder = get_patient_results_folder( results_folder, inference_result.patient_id) patient_results_folder.mkdir(exist_ok=True, parents=True) # write the segmentations to disk image_paths = [ io_util.store_as_ubyte_nifti(image=inference_result.segmentation, header=image_header, file_name=str( create_file_path( patient_results_folder, "segmentation"))) ] class_names_and_indices = config.class_and_index_with_background().items() binaries = binaries_from_multi_label_array(inference_result.segmentation, config.number_of_classes) # rescale posteriors if required and save them for (class_name, index), binary in zip(class_names_and_indices, binaries): posterior = inference_result.posteriors[index, ...] # save the posterior map file_name = "posterior_{}".format(class_name) image_path = io_util.store_posteriors_as_nifti( image=posterior, header=image_header, file_name=str(create_file_path(patient_results_folder, file_name))) image_paths.append(image_path) # save the binary mask image_path = io_util.store_binary_mask_as_nifti( image=binary, header=image_header, file_name=str(create_file_path(patient_results_folder, class_name))) image_paths.append(image_path) # rescale and store uncertainty map as nifti image_path = io_util.store_posteriors_as_nifti( image=inference_result.uncertainty, header=image_header, file_name=str(create_file_path(patient_results_folder, "uncertainty"))) image_paths.append(image_path) return image_paths
def calculate_metrics_per_class(segmentation: np.ndarray, ground_truth: np.ndarray, ground_truth_ids: List[str], voxel_spacing: TupleFloat3, patient_id: Optional[int] = None) -> MetricsDict: """ Calculate the dice for all foreground structures (the background class is completely ignored). Returns a MetricsDict with metrics for each of the foreground structures. Metrics are NaN if both ground truth and prediction are all zero for a class. :param ground_truth_ids: The names of all foreground classes. :param segmentation: predictions multi-value array with dimensions: [Z x Y x X] :param ground_truth: ground truth binary array with dimensions: [C x Z x Y x X] :param voxel_spacing: voxel_spacing in 3D Z x Y x X :param patient_id: for logging """ number_of_classes = ground_truth.shape[0] if len(ground_truth_ids) != (number_of_classes - 1): raise ValueError(f"Received {len(ground_truth_ids)} foreground class names, but " f"the label tensor indicates that there are {number_of_classes - 1} classes.") binaries = binaries_from_multi_label_array(segmentation, number_of_classes) all_classes_are_binary = [is_binary_array(ground_truth[label_id]) for label_id in range(ground_truth.shape[0])] if not np.all(all_classes_are_binary): raise ValueError("Ground truth values should be 0 or 1") overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter() hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter() metrics = MetricsDict(hues=ground_truth_ids) for i, prediction in enumerate(binaries): if i == 0: continue check_size_matches(prediction, ground_truth[i], arg1_name="prediction", arg2_name="ground_truth") if not is_binary_array(prediction): raise ValueError("Predictions values should be 0 or 1") # simpleitk returns a Dice score of 0 if both ground truth and prediction are all zeros. # We want to be able to fish out those cases, and treat them specially later. prediction_zero = np.all(prediction == 0) gt_zero = np.all(ground_truth[i] == 0) dice = mean_surface_distance = hausdorff_distance = math.nan if not (prediction_zero and gt_zero): prediction_image = sitk.GetImageFromArray(prediction.astype(np.uint8)) prediction_image.SetSpacing(sitk.VectorDouble(reverse_tuple_float3(voxel_spacing))) ground_truth_image = sitk.GetImageFromArray(ground_truth[i].astype(np.uint8)) ground_truth_image.SetSpacing(sitk.VectorDouble(reverse_tuple_float3(voxel_spacing))) overlap_measures_filter.Execute(prediction_image, ground_truth_image) dice = overlap_measures_filter.GetDiceCoefficient() if prediction_zero or gt_zero: hausdorff_distance = mean_surface_distance = math.inf else: try: hausdorff_distance_filter.Execute(prediction_image, ground_truth_image) hausdorff_distance = hausdorff_distance_filter.GetHausdorffDistance() except Exception as e: logging.warning("Cannot calculate Hausdorff distance for " f"structure {i} of patient {patient_id}: {e}") try: mean_surface_distance = surface_distance(prediction_image, ground_truth_image) except Exception as e: logging.warning(f"Cannot calculate mean distance for structure {i} of patient {patient_id}: {e}") logging.debug(f"Patient {patient_id}, class {i} has Dice score {dice}") def add_metric(metric_type: MetricType, value: float) -> None: metrics.add_metric(metric_type, value, skip_nan_when_averaging=True, hue=ground_truth_ids[i - 1]) add_metric(MetricType.DICE, dice) add_metric(MetricType.HAUSDORFF_mm, hausdorff_distance) add_metric(MetricType.MEAN_SURFACE_DIST_mm, mean_surface_distance) return metrics