Example #1
0
def run_inference(images_with_header: List[ImageWithHeader],
                  inference_pipeline: FullImageInferencePipelineBase,
                  config: SegmentationModelBase) -> np.ndarray:
    """
    Runs inference on a list of channels and given a config and inference pipeline
    :param images_with_header:
    :param inference_pipeline:
    :param config:
    :return: segmentation
    """
    # Check the image has the correct spacing
    if config.dataset_expected_spacing_xyz:
        for image_with_header in images_with_header:
            spacing_xyz = reverse_tuple_float3(
                image_with_header.header.spacing)
            if not is_spacing_valid(spacing_xyz,
                                    config.dataset_expected_spacing_xyz):
                raise ValueError(
                    f'Input image has spacing {spacing_xyz} '
                    f'but expected {config.dataset_expected_spacing_xyz}')
    # Photo norm
    photo_norm = PhotometricNormalization(config_args=config)
    photo_norm_images = [
        photo_norm.transform(image_with_header.image)
        for image_with_header in images_with_header
    ]
    segmentation = inference_pipeline.predict_and_post_process_whole_image(
        image_channels=np.array(photo_norm_images),
        voxel_spacing_mm=images_with_header[0].header.spacing).segmentation

    return segmentation
Example #2
0
def test_create_dicom_series(test_output_dirs: OutputFolderForTests) -> None:
    """
    Test that a DICOM series can be created.

    :param test_output_dirs: Test output directories.
    :return: None.
    """
    test_shape = (24, 36, 48)  # (#slices, #rows, #columns)
    test_spacing = (1.0, 1.0, 2.5
                    )  # (column spacing, row spacing, slice spacing)
    series_folder = test_output_dirs.root_dir / "series"
    series_folder.mkdir()
    # Create the series
    image_data = io_util.create_dicom_series(series_folder, test_shape,
                                             test_spacing)
    # Load it in
    loaded_image = io_util.load_dicom_series(series_folder)
    # GetSize returns (width, height, depth)
    assert loaded_image.GetSize() == reverse_tuple_float3(test_shape)
    assert loaded_image.GetSpacing() == test_spacing
    # Get the data from the loaded series and compare it.
    loaded_image_data = sitk.GetArrayFromImage(loaded_image).astype(np.float)
    assert loaded_image_data.shape == test_shape
    # Data is saved 16 bit, so need a generous tolerance.
    assert np.allclose(loaded_image_data, image_data, atol=1e-1)
def test_score_check_spacing() -> None:
    config = LOADER.create_model_config_from_name("DummyModel")
    config.dataset_expected_spacing_xyz = (1.0, 1.0, 3.0)
    image_with_header = io_util.load_nifti_image(img_nii_path)
    spacing_xyz = reverse_tuple_float3(image_with_header.header.spacing)
    assert is_spacing_valid(spacing_xyz, config.dataset_expected_spacing_xyz)
    assert is_spacing_valid(spacing_xyz, (1, 1, 3.01))
    assert not is_spacing_valid(spacing_xyz, (1, 1, 3.2))
Example #4
0
def test_nii_load_zyx(test_output_dirs: OutputFolderForTests) -> None:
    expected_shape = (44, 167, 167)
    file_path = full_ml_test_data_path("patch_sampling/scan_small.nii.gz")
    image: sitk.Image = sitk.ReadImage(str(file_path))
    assert image.GetSize() == reverse_tuple_float3(expected_shape)
    img = sitk.GetArrayFromImage(image)
    assert img.shape == expected_shape
    image_header = io_util.load_nifti_image(file_path)
    assert image_header.image.shape == expected_shape
    assert image_header.header.spacing is not None
    np.testing.assert_allclose(image_header.header.spacing, (3.0, 1.0, 1.0), rtol=0.1)
Example #5
0
def test_zip_random_dicom_series(
        test_output_dirs: OutputFolderForTests) -> None:
    """
    Test that a DICOM series can be created.
        :param test_output_dirs: Test output directories.
    :return: None.
    """
    test_shape = (24, 36, 48)  # (#slices, #rows, #columns)
    test_spacing = (1.0, 1.0, 2.5
                    )  # (column spacing, row spacing, slice spacing)
    zip_file_path = test_output_dirs.root_dir / "pack" / "random.zip"
    scratch_folder = test_output_dirs.root_dir / "scratch"
    io_util.zip_random_dicom_series(test_shape, test_spacing, zip_file_path,
                                    scratch_folder)

    assert zip_file_path.is_file()
    series_folder = test_output_dirs.root_dir / "unpack"
    with zipfile.ZipFile(zip_file_path, 'r') as zip_file:
        zip_file.extractall(series_folder)
    # Load it in
    loaded_image = io_util.load_dicom_series(series_folder)
    # GetSize returns (width, height, depth)
    assert loaded_image.GetSize() == reverse_tuple_float3(test_shape)
    assert loaded_image.GetSpacing() == test_spacing
Example #6
0
def calculate_metrics_per_class(segmentation: np.ndarray,
                                ground_truth: np.ndarray,
                                ground_truth_ids: List[str],
                                voxel_spacing: TupleFloat3,
                                patient_id: Optional[int] = None) -> MetricsDict:
    """
    Calculate the dice for all foreground structures (the background class is completely ignored).
    Returns a MetricsDict with metrics for each of the foreground
    structures. Metrics are NaN if both ground truth and prediction are all zero for a class.
    :param ground_truth_ids: The names of all foreground classes.
    :param segmentation: predictions multi-value array with dimensions: [Z x Y x X]
    :param ground_truth: ground truth binary array with dimensions: [C x Z x Y x X]
    :param voxel_spacing: voxel_spacing in 3D Z x Y x X
    :param patient_id: for logging
    """
    number_of_classes = ground_truth.shape[0]
    if len(ground_truth_ids) != (number_of_classes - 1):
        raise ValueError(f"Received {len(ground_truth_ids)} foreground class names, but "
                         f"the label tensor indicates that there are {number_of_classes - 1} classes.")
    binaries = binaries_from_multi_label_array(segmentation, number_of_classes)

    all_classes_are_binary = [is_binary_array(ground_truth[label_id]) for label_id in range(ground_truth.shape[0])]
    if not np.all(all_classes_are_binary):
        raise ValueError("Ground truth values should be 0 or 1")
    overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter()
    hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()
    metrics = MetricsDict(hues=ground_truth_ids)
    for i, prediction in enumerate(binaries):
        if i == 0:
            continue
        check_size_matches(prediction, ground_truth[i], arg1_name="prediction", arg2_name="ground_truth")
        if not is_binary_array(prediction):
            raise ValueError("Predictions values should be 0 or 1")
        # simpleitk returns a Dice score of 0 if both ground truth and prediction are all zeros.
        # We want to be able to fish out those cases, and treat them specially later.
        prediction_zero = np.all(prediction == 0)
        gt_zero = np.all(ground_truth[i] == 0)
        dice = mean_surface_distance = hausdorff_distance = math.nan
        if not (prediction_zero and gt_zero):
            prediction_image = sitk.GetImageFromArray(prediction.astype(np.uint8))
            prediction_image.SetSpacing(sitk.VectorDouble(reverse_tuple_float3(voxel_spacing)))
            ground_truth_image = sitk.GetImageFromArray(ground_truth[i].astype(np.uint8))
            ground_truth_image.SetSpacing(sitk.VectorDouble(reverse_tuple_float3(voxel_spacing)))
            overlap_measures_filter.Execute(prediction_image, ground_truth_image)
            dice = overlap_measures_filter.GetDiceCoefficient()
            if prediction_zero or gt_zero:
                hausdorff_distance = mean_surface_distance = math.inf
            else:
                try:
                    hausdorff_distance_filter.Execute(prediction_image, ground_truth_image)
                    hausdorff_distance = hausdorff_distance_filter.GetHausdorffDistance()
                except Exception as e:
                    logging.warning("Cannot calculate Hausdorff distance for "
                                    f"structure {i} of patient {patient_id}: {e}")
                try:
                    mean_surface_distance = surface_distance(prediction_image, ground_truth_image)
                except Exception as e:
                    logging.warning(f"Cannot calculate mean distance for structure {i} of patient {patient_id}: {e}")
            logging.debug(f"Patient {patient_id}, class {i} has Dice score {dice}")

        def add_metric(metric_type: MetricType, value: float) -> None:
            metrics.add_metric(metric_type, value, skip_nan_when_averaging=True, hue=ground_truth_ids[i - 1])

        add_metric(MetricType.DICE, dice)
        add_metric(MetricType.HAUSDORFF_mm, hausdorff_distance)
        add_metric(MetricType.MEAN_SURFACE_DIST_mm, mean_surface_distance)
    return metrics