Пример #1
0
def evaluate_model_predictions(
        process_id: int, config: SegmentationModelBase,
        dataset: FullImageDataset,
        results_folder: Path) -> Tuple[PatientMetadata, MetricsDict]:
    """
    Evaluates model segmentation predictions, dice scores and surface distances are computed.
    Generated contours are plotted and saved in results folder.
    The function is intended to be used in parallel for loop to process each image in parallel.
    :param process_id: Identifier for the process calling the function
    :param config: Segmentation model config object
    :param dataset: Dataset object, it is used to load intensity image, labels, and patient metadata.
    :param results_folder: Path to results folder
    :returns [PatientMetadata, list[list]]: Patient metadata and list of computed metrics for each image.
    """
    sample = dataset.get_samples_at_index(index=process_id)[0]
    logging.info(f"Evaluating predictions for patient {sample.patient_id}")
    patient_results_folder = get_patient_results_folder(
        results_folder, sample.patient_id)
    segmentation = load_nifti_image(patient_results_folder /
                                    DEFAULT_RESULT_IMAGE_NAME).image
    metrics_per_class = metrics.calculate_metrics_per_class(
        segmentation,
        sample.labels,
        ground_truth_ids=config.ground_truth_ids,
        voxel_spacing=sample.image_spacing,
        patient_id=sample.patient_id)
    thumbnails_folder = results_folder / THUMBNAILS_FOLDER
    thumbnails_folder.mkdir(exist_ok=True)
    plotting.plot_contours_for_all_classes(
        sample,
        segmentation=segmentation,
        foreground_class_names=config.ground_truth_ids,
        result_folder=thumbnails_folder,
        image_range=config.output_range)
    return sample.metadata, metrics_per_class
 def assert_metrics(prediction: np.ndarray, ground_truth: np.ndarray, expected: Optional[float],
                    voxel_spacing: TupleFloat3 = (1, 1, 1)) -> float:
     m = metrics.calculate_metrics_per_class(prediction, ground_truth, voxel_spacing=voxel_spacing,
                                             ground_truth_ids=[g1])
     actual = m.get_single_metric(MetricType.HAUSDORFF_mm, hue=g1)
     if expected is not None:
         assert actual == expected
     return actual
 def assert_metrics(segmentation: np.ndarray, ground_truth: np.ndarray,
                    expected_dice: float) -> None:
     a = metrics.calculate_metrics_per_class(segmentation,
                                             ground_truth,
                                             voxel_spacing=(1, 1, 1),
                                             ground_truth_ids=[g1])
     assert a.get_hue_names(include_default=False) == [g1]
     assert equal_respecting_nan(
         a.get_single_metric(MetricType.DICE, hue=g1), expected_dice)
def test_calculate_hd_exact() -> None:
    prediction = np.array([[[0, 0, 1], [0, 0, 1], [0, 0, 1]]])
    ground_truth = np.array([[[1, 0, 0], [1, 0, 0], [1, 0, 0]]])

    ground_truth = np.stack(np.stack([1 - ground_truth, ground_truth]))
    g1 = "g1"
    m = metrics.calculate_metrics_per_class(prediction,
                                            ground_truth,
                                            voxel_spacing=(1, 2, 3),
                                            ground_truth_ids=[g1])
    assert m.get_single_metric(MetricType.HAUSDORFF_mm, hue=g1) == 6
    assert m.get_single_metric(MetricType.MEAN_SURFACE_DIST_mm, hue=g1) == 6
def test_calculate_dice2(prediction_list: list, expected_dice: float) -> None:
    g1 = "g1"

    # Turns a row vector into a single Z-slice 3D array, by copying along dimension 1 and extending.
    # Without doing that, computation of the Hausdorff distance fails.
    def expand(a: List[float]) -> np.ndarray:
        return np.repeat(np.array([[a]]), 3, 1)

    # Ground truth is same as (i.e. one-hot version of) prediction.
    ground_truth_values = expand([0, 0, 1])
    ground_truth = np.stack([1 - ground_truth_values, ground_truth_values])
    prediction = expand(prediction_list)
    m = metrics.calculate_metrics_per_class(prediction, ground_truth, voxel_spacing=(1, 1, 1), ground_truth_ids=[g1])
    assert m.get_single_metric(MetricType.DICE, hue=g1) == expected_dice