예제 #1
0
def test_load_images_and_stack_with_resize_only_float(test_output_dirs: OutputFolderForTests) -> None:
    """
    Don't allow int type images to be loaded if image_size is set:
    skimage.transform.resize will not resize these correctly
    """
    image_size = (20, 30, 20)

    array = np.ones((10, 20, 20), dtype='uint16')
    np.save(test_output_dirs.root_dir / "file.npy", array)
    file_list = [test_output_dirs.root_dir / "file.npy"]

    with pytest.raises(ValueError):
        load_images_and_stack(file_list,
                              load_segmentation=False,
                              image_size=image_size)
예제 #2
0
def test_load_images_and_stack_3d_with_resize_random(test_output_dirs: OutputFolderForTests) -> None:
    """
    Test load and resize of 3D images
    """
    image_size = (20, 30, 20)
    low = -200
    high = 200

    array1 = np.random.randint(low=low, high=high, size=(10, 20, 10)).astype(np.float)
    np.save(test_output_dirs.root_dir / "file1.npy", array1)
    array2 = np.random.randint(low=low, high=high, size=(20, 30, 20)).astype(np.float)
    np.save(test_output_dirs.root_dir / "file2.npy", array2)
    array3 = np.random.randint(low=low, high=high, size=(30, 10, 30)).astype(np.float)
    np.save(test_output_dirs.root_dir / "file3.npy", array3)

    array1 = resize(array1.astype(np.float), image_size, anti_aliasing=True)
    array3 = resize(array3.astype(np.float), image_size, anti_aliasing=True)

    expected_tensor = torch.from_numpy(np.stack([array1, array2, array3]).astype(float))

    file_list = [test_output_dirs.root_dir / f"file{i}.npy" for i in range(1, 4)]
    imaging_data = load_images_and_stack(file_list,
                                         load_segmentation=False,
                                         image_size=image_size)

    assert len(imaging_data.images.shape) == 4
    assert imaging_data.images.shape[0] == 3
    assert imaging_data.images.shape[1:] == image_size
    assert torch.allclose(imaging_data.images, expected_tensor)
예제 #3
0
def test_load_images_and_stack_2d_with_resize_ones(test_output_dirs: OutputFolderForTests) -> None:
    """
    Test load and resize of 2D images filled with (int) ones.
    """
    image_size = (20, 30)

    array = np.ones((10, 20), dtype='uint16')
    write_test_dicom(array, test_output_dirs.root_dir / "file1.dcm")
    array = np.ones((20, 30), dtype='uint16')
    write_test_dicom(array, test_output_dirs.root_dir / "file2.dcm")
    array = np.ones((30, 10), dtype='uint16')
    write_test_dicom(array, test_output_dirs.root_dir / "file3.dcm")

    expected_tensor = torch.from_numpy(np.ones((3, 1) + image_size))

    file_list = [test_output_dirs.root_dir / f"file{i}.dcm" for i in range(1, 4)]
    imaging_data = load_images_and_stack(file_list,
                                         load_segmentation=False,
                                         image_size=(1,) + image_size)

    assert len(imaging_data.images.shape) == 4
    assert imaging_data.images.shape[0] == 3
    assert imaging_data.images.shape[1] == 1
    assert imaging_data.images.shape[2:] == image_size
    assert torch.allclose(imaging_data.images, expected_tensor)
예제 #4
0
def test_load_images_and_stack_2d_with_resize_random(test_output_dirs: OutputFolderForTests) -> None:
    """
    Test load and resize of 2D images
    """
    image_size = (20, 30)
    low = 0
    high = 200

    array1 = np.random.randint(low=low, high=high, size=(10, 20), dtype='uint16')
    write_test_dicom(array1, test_output_dirs.root_dir / "file1.dcm")
    array2 = np.random.randint(low=low, high=high, size=(20, 30), dtype='uint16')
    write_test_dicom(array2, test_output_dirs.root_dir / "file2.dcm")
    array3 = np.random.randint(low=low, high=high, size=(30, 20), dtype='uint16')
    write_test_dicom(array3, test_output_dirs.root_dir / "file3.dcm")

    array1 = resize(array1.astype(np.float), image_size, anti_aliasing=True)
    array3 = resize(array3.astype(np.float), image_size, anti_aliasing=True)

    expected_tensor = torch.from_numpy(np.expand_dims(np.stack([array1, array2, array3]).astype(float), axis=1))

    file_list = [test_output_dirs.root_dir / f"file{i}.dcm" for i in range(1, 4)]
    imaging_data = load_images_and_stack(file_list,
                                         load_segmentation=False,
                                         image_size=(1,) + image_size)

    assert len(imaging_data.images.shape) == 4
    assert imaging_data.images.shape[0] == 3
    assert imaging_data.images.shape[1] == 1
    assert imaging_data.images.shape[2:] == image_size
    assert torch.allclose(imaging_data.images, expected_tensor)
def test_load_images_and_stack_3d_with_resize_ones(
        test_output_dirs: OutputFolderForTests) -> None:
    """
    Test load and resize of 3D images filled with (float) ones.
    """
    image_size = (20, 30, 20)

    array = np.ones((10, 20, 10))
    np.save(test_output_dirs.root_dir / "file1.npy", array)
    array = np.ones((20, 30, 20))
    np.save(test_output_dirs.root_dir / "file2.npy", array)
    array = np.ones((30, 10, 30))
    np.save(test_output_dirs.root_dir / "file3.npy", array)

    expected_tensor = torch.from_numpy(np.ones((3, ) + image_size))

    file_list = [
        test_output_dirs.root_dir / f"file{i}.npy" for i in range(1, 4)
    ]
    imaging_data = load_images_and_stack(file_list,
                                         load_segmentation=False,
                                         image_size=image_size)

    assert len(imaging_data.images.shape) == 4
    assert imaging_data.images.shape[0] == 3
    assert imaging_data.images.shape[1:] == image_size
    assert torch.allclose(imaging_data.images, expected_tensor)
def test_load_and_stack_with_crop() -> None:
    image_size = (10, 12, 14)
    crop_shape = (4, 6, 8)
    center_start = (3, 3, 3)
    assert np.allclose(
        np.array(center_start) * 2 + np.array(crop_shape),
        np.array(image_size))
    # Create a fake image that is all zeros apart from ones in the center, right where we expect the
    # center crop to be taken from. We can later assert that the right crop was taken by checking that only
    # values of 1.0 are in the image
    image = np.zeros(image_size)
    image[center_start[0]:center_start[0] + crop_shape[0],
          center_start[1]:center_start[1] + crop_shape[1],
          center_start[2]:center_start[2] + crop_shape[2]] = 1
    segmentation = image * 2
    mock_return = ImageAndSegmentations(image, segmentation)
    with mock.patch("InnerEye.ML.utils.io_util.load_image_in_known_formats",
                    return_value=mock_return):
        stacked = load_images_and_stack([Path("doesnotmatter")],
                                        load_segmentation=True,
                                        center_crop_size=crop_shape)
        assert torch.is_tensor(stacked.images)
        assert stacked.images.shape == (1, ) + crop_shape
        assert torch.all(stacked.images == 1.0)
        assert torch.is_tensor(stacked.segmentations)
        assert stacked.segmentations is not None
        assert stacked.segmentations.shape == (1, ) + crop_shape
        assert torch.all(stacked.segmentations == 2.0)
예제 #7
0
def test_load_and_stack(file_path_str: str, expected_shape: Tuple) -> None:
    file_path = Path(file_path_str)
    files = [full_ml_test_data_path() / f for f in [file_path, file_path]]
    stacked = load_images_and_stack(files, load_segmentation=False)
    assert torch.is_tensor(stacked.segmentations)
    assert stacked.segmentations is not None
    assert stacked.segmentations.shape == (0,)
    assert torch.is_tensor(stacked.images)
    assert stacked.images.shape == (2,) + expected_shape
예제 #8
0
    def load_images(self, root_path: Optional[Path],
                    file_mapping: Optional[Dict[str, Path]],
                    load_segmentation: bool,
                    center_crop_size: Optional[TupleInt3],
                    image_size: Optional[TupleInt3]) -> ScalarItem:
        """
        Loads all the images that are specified in the channel_files field, and stacks them into a tensor
        along the first dimension. The channel_files field must either contain the image file path, relative to the
        root_path argument, or it must contain a file name stem only (without extension). In this case, the actual
        mapping from file name stem to full path is expected in the file_mapping argument.
        Either of 'root_path' or 'file_mapping' must be provided.
        :param root_path: The root path where all channel files for images are expected. This is ignored if
        file_mapping is given.
        :param file_mapping: A mapping from a file name stem (without extension) to its full path.
        :param load_segmentation: If True it loads segmentation if present on the same file as the image.
        :param center_crop_size: If supplied, all loaded images will be cropped to the size given here. The crop will
        be taken from the center of the image.
        :param image_size: If given, all loaded images will be reshaped to the size given here, prior to the
        center crop.
        :return: An instance of ClassificationItem, with the same label and numerical_non_image_features fields,
        and all images loaded.
        """
        full_channel_files: List[Path] = []
        for f in self.channel_files:
            if f is None:
                raise ValueError(
                    "When loading images, channel_files should no longer contain None entries."
                )
            elif file_mapping:
                if f in file_mapping:
                    full_channel_files.append(file_mapping[str(f)])
                else:
                    raise ValueError(
                        f"File mapping does not contain an entry for {f}")
            elif root_path:
                full_channel_files.append(root_path / f)
            else:
                raise ValueError(
                    "One of the arguments 'file_mapping' or 'root_path' must be given."
                )

        imaging_data = load_images_and_stack(
            files=full_channel_files,
            load_segmentation=load_segmentation,
            center_crop_size=center_crop_size,
            image_size=image_size)
        return ScalarItem(
            label=self.label,
            numerical_non_image_features=self.numerical_non_image_features,
            categorical_non_image_features=self.categorical_non_image_features,
            # HDF5 files can contain float16 images. Convert to float32. AMP may later convert back to float16.
            images=imaging_data.images.float(),
            segmentations=imaging_data.segmentations,
            metadata=self.metadata)
예제 #9
0
def test_load_and_stack_with_segmentation() -> None:
    expected_shape = (4, 5, 7)
    file_path = full_ml_test_data_path() / "hdf5_data/patient_hdf5s/4be9beed-5861-fdd2-72c2-8dd89aadc1ef.h5"
    files = [file_path, file_path]
    stacked = load_images_and_stack(files, load_segmentation=True)
    assert stacked.segmentations is not None
    assert torch.is_tensor(stacked.segmentations)
    assert stacked.segmentations.dtype == torch.uint8
    assert stacked.segmentations.shape == (2,) + expected_shape
    assert torch.is_tensor(stacked.images)
    assert stacked.images.dtype == torch.float16
    assert stacked.images.shape == (2,) + expected_shape
def test_load_images_and_stack_2d_random(
        test_output_dirs: TestOutputDirectories) -> None:
    """
    Test load of 2D images
    """
    image_size = (20, 30)
    low = 0
    high = 200

    array1 = np.random.randint(low=low,
                               high=high,
                               size=image_size,
                               dtype='uint16')
    write_test_dicom(array1, Path(test_output_dirs.root_dir) / "file1.dcm")
    array2 = np.random.randint(low=low,
                               high=high,
                               size=image_size,
                               dtype='uint16')
    write_test_dicom(array2, Path(test_output_dirs.root_dir) / "file2.dcm")
    array3 = np.random.randint(low=low,
                               high=high,
                               size=image_size,
                               dtype='uint16')
    write_test_dicom(array3, Path(test_output_dirs.root_dir) / "file3.dcm")

    expected_tensor = torch.from_numpy(
        np.expand_dims(np.stack([array1, array2, array3]).astype(float),
                       axis=1))

    file_list = [
        Path(test_output_dirs.root_dir) / f"file{i}.dcm" for i in range(1, 4)
    ]
    imaging_data = load_images_and_stack(file_list,
                                         load_segmentation=False,
                                         image_size=(1, ) + image_size)

    assert len(imaging_data.images.shape) == 4
    assert imaging_data.images.shape[0] == 3
    assert imaging_data.images.shape[1] == 1
    assert imaging_data.images.shape[2:] == image_size
    assert torch.allclose(imaging_data.images, expected_tensor)
예제 #11
0
def test_load_images_when_empty() -> None:
    stacked = load_images_and_stack([], load_segmentation=False)
    assert stacked.images.shape == (0,)
    assert stacked.segmentations is not None
    assert stacked.segmentations.shape == (0,)