def test_metadata(metaimage_path, dicom_path):
    meta = load_ct.load_ct(dicom_path, voxel=False)
    meta = load_ct.MetaData(meta)
    zipped = zip(meta.spacing, (0.703125, 0.703125, 2.5))
    assert all([m_axis == o_axis for m_axis, o_axis in zipped])

    meta = load_ct.load_ct(metaimage_path, voxel=False)
    spacing = list(reversed(meta.GetSpacing()))
    meta = load_ct.MetaData(meta)
    assert meta.spacing == spacing

    try:
        load_ct.MetaData([1, 2, 3])
    except ValueError as e:
        assert 'either list[dicom.dataset.FileDataset] or SimpleITK' in str(e)
def test_metadata(metaimage_path, dicom_path):
    meta = load_ct.load_ct(dicom_path, voxel=False)
    meta = load_ct.MetaData(meta)
    zipped = zip(meta.spacing, (2.5, 0.703125, 0.703125))
    assert all([m_axis == o_axis for m_axis, o_axis in zipped])

    meta = load_ct.load_ct(metaimage_path, voxel=False)
    # the default axes order which is used is: (z, y, x)
    spacing = meta.GetSpacing()[::-1]
    meta = load_ct.MetaData(meta)
    assert meta.spacing == spacing

    try:
        load_ct.MetaData([1, 2, 3])
    except ValueError as e:
        assert 'either list[dicom.dataset.FileDataset] or SimpleITK' in str(e)
Exemple #3
0
    def __call__(self, voxel_data, meta):
        if not isinstance(meta, load_ct.MetaData):
            meta = load_ct.MetaData(meta)

        if self.params is None:
            return voxel_data

        # Instead of np.clip usage in order to avoid np.max | np.min calculation in case of None
        if self.params.clip_lower is not None:
            voxel_data[
                voxel_data < self.params.clip_lower] = self.params.clip_lower

        if self.params.clip_upper is not None:
            voxel_data[
                voxel_data > self.params.clip_upper] = self.params.clip_upper

        if self.params.min_max_normalize:
            data_max = self.params.clip_upper
            data_min = self.params.clip_lower
            if data_max is None:
                data_max = voxel_data.max()
            if data_min is None:
                data_min = voxel_data.min()

            voxel_data = (voxel_data - data_min) / float(data_max - data_min)

        if self.params.spacing is not None:
            zoom_fctr = meta.spacing / np.asarray(self.params.spacing)
            voxel_data = scipy.ndimage.interpolation.zoom(
                voxel_data, zoom_fctr)
            meta.spacing = [axis for axis in self.params.spacing]

        return voxel_data
Exemple #4
0
def test_resample(metaimage_path):
    ct_array, meta = load_ct.load_ct(metaimage_path)
    resampled, _ = resample(ct_array,
                            np.array(load_ct.MetaData(meta).spacing),
                            np.array([1, 1, 1]),
                            order=1)
    preprocess = preprocess_ct.PreprocessCT(spacing=True, order=1)
    processed, _ = preprocess(ct_array, meta)
    assert np.abs(resampled - processed).sum() == 0
def crop_patch(ct_array, meta, patch_shape=None, centroids=None, stride=None, pad_value=0):
    """ Generator yield a patch of a desired shape for each centroid
    from a given a CT scan.

    Args:
        ct_array (np.ndarray): a numpy ndarray representation of a CT scan
        patch_shape (int, list[int]): a desired shape of a patch. If int will be provided,
            then patch will be a cube-shaped.
        centroids (list[dict]): A list of centroids of the form::
            {'x': int,
             'y': int,
             'z': int}
        meta (src.preprocess.load_ct.MetaData): meta information of the CT scan.
        stride (int): stride for patch coordinates meshgrid.
            If None is set (default), then no meshgrid will be returned.
        pad_value (int): value with which an array padding will be performed.
    Yields:
        np.ndarray: cropped patch from a CT scan.
        np.ndarray | None: meshgrid of a patch.
    """
    if centroids is None:
        centroids = []

    if patch_shape is None:
        patch_shape = []

    if not isinstance(meta, load_ct.MetaData):
        meta = load_ct.MetaData(meta)

    patch_shape = scipy.ndimage._ni_support._normalize_sequence(patch_shape, len(ct_array.shape))
    patch_shape = np.array(patch_shape)
    init_shape = np.array(ct_array.shape)
    padding = np.ceil(patch_shape / 2.).astype(np.int)
    padding = np.stack([padding, padding], axis=1)
    ct_array = np.pad(ct_array, padding, mode='constant', constant_values=pad_value)

    for centroid in centroids:
        centroid = mm2voxel([centroid[axis] for axis in 'zyx'], meta.origin, meta.spacing)

        patch = ct_array[centroid[0]: centroid[0] + patch_shape[0],
                         centroid[1]: centroid[1] + patch_shape[1],
                         centroid[2]: centroid[2] + patch_shape[2]]

        if stride:
            init_shape += np.clip(patch_shape // 2 - centroid, 0, np.inf).astype(np.int64)
            init_shape += np.clip(centroid + patch_shape // 2 - init_shape, 0, np.inf).astype(np.int64)

            normstart = (np.array(centroid) - patch_shape / 2) / init_shape - 0.5
            normsize = patch_shape / init_shape
            xx, yy, zz = np.meshgrid(np.linspace(normstart[0], normstart[0] + normsize[0], patch_shape[0] // stride),
                                     np.linspace(normstart[1], normstart[1] + normsize[1], patch_shape[1] // stride),
                                     np.linspace(normstart[2], normstart[2] + normsize[2], patch_shape[2] // stride),
                                     indexing='ij')
            coord = np.concatenate([xx[np.newaxis, ...], yy[np.newaxis, ...], zz[np.newaxis, :]], 0).astype('float32')
            yield patch, coord

        yield patch
def test_preprocess_dicom_min_max_scale(dicom_path):
    params = preprocess_ct.Params(clip_lower=-1000, clip_upper=400, min_max_normalize=True)
    preprocess = preprocess_ct.PreprocessCT(params)

    dicom_array, meta = load_ct.load_ct(dicom_path)
    meta = load_ct.MetaData(meta)
    dicom_array = preprocess(dicom_array, meta)
    assert isinstance(dicom_array, np.ndarray)
    assert dicom_array.max() <= 1
    assert dicom_array.min() >= 0
def test_preprocess_dicom_clips(dicom_path):
    params = preprocess_ct.Params(clip_lower=-1, clip_upper=40)
    preprocess = preprocess_ct.PreprocessCT(params)

    dicom_array, meta = load_ct.load_ct(dicom_path)
    meta = load_ct.MetaData(meta)
    dicom_array = preprocess(dicom_array, meta)
    assert isinstance(dicom_array, np.ndarray)
    assert dicom_array.max() <= 40
    assert dicom_array.min() >= -1
def test_preprocess_dicom_pure(dicom_path):
    params = preprocess_ct.Params()
    preprocess = preprocess_ct.PreprocessCT(params)

    dicom_array, meta = load_ct.load_dicom(dicom_path)
    assert isinstance(dicom_array, np.ndarray)

    dicom_array, meta = load_ct.load_dicom(dicom_path)
    meta = load_ct.MetaData(meta)
    dicom_array = preprocess(dicom_array, meta)
    assert isinstance(dicom_array, np.ndarray)
Exemple #9
0
def test_patches_from_ct(ct_path):
    ct_array, meta = load_ct.load_ct(ct_path)
    meta = load_ct.MetaData(meta)
    centroids = [[507, -21, -177], [547, -121, -220], [530, -221, -277]]
    centroids = [{
        'x': centroid[0],
        'y': centroid[1],
        'z': centroid[2]
    } for centroid in centroids]
    patches = crop_patches.patches_from_ct(ct_array,
                                           patch_shape=12,
                                           centroids=centroids,
                                           meta=meta)
    assert isinstance(patches, list)
    assert len(patches) == 3
    assert all([patch.shape == (12, 12, 12) for patch in patches])
Exemple #10
0
def predict(ct_path, model_path=None):
    """

    Args:
      image_itk: ITK Image in Hu units
      model_path: Path to the file containing the model state
                 (Default value = "src/algorithms/identify/assets/dsb2017_detector.ckpt")

    Returns:
      List of Nodule locations and probabilities

    """
    if not model_path:
        INDENTIFY_DIR = path.join(Config.ALGOS_DIR, 'identify')
        model_path = path.join(INDENTIFY_DIR, 'assets', 'dsb2017_detector.ckpt')

    ct_array, meta = load_ct.load_ct(ct_path)
    meta = load_ct.MetaData(meta)
    spacing = np.array(meta.spacing)
    masked_image, mask = filter_lungs(ct_array)

    # masked_image = image
    net = Net()
    net.load_state_dict(torch.load(model_path)["state_dict"])

    if torch.cuda.is_available():
        net = torch.nn.DataParallel(net).cuda()

    split_comber = SplitComb(side_len=int(144), margin=32, max_stride=16, stride=4, pad_value=170)

    # We have to use small batches until the next release of PyTorch, as bigger ones will segfault for CPU
    # split_comber = SplitComb(side_len=int(32), margin=16, max_stride=16, stride=4, pad_value=170)
    # Transform image to the 0-255 range and resample to 1x1x1mm
    preprocess = preprocess_ct.PreprocessCT(clip_lower=-1200., clip_upper=600., spacing=True, order=1,
                                            min_max_normalize=True, scale=255, dtype='uint8')

    ct_array, meta = preprocess(ct_array, meta)
    ct_array = ct_array[np.newaxis, ...]

    imgT, coords, nzhw = split_data(ct_array, split_comber=split_comber)
    results = []

    # Loop over the image chunks
    for img, coord in zip(imgT, coords):
        var = Variable(img[np.newaxis])
        var.volatile = True
        coord = Variable(coord[np.newaxis])
        coord.volatile = True
        resvar = net(var, coord)
        res = resvar.data.cpu().numpy()
        results.append(res)

    results = np.concatenate(results, 0)
    results = split_comber.combine(results, nzhw=nzhw)
    pbb = GetPBB()
    # First index of proposals is the propabillity. Then x, y z, and radius
    proposals, _ = pbb(results, ismask=True)

    # proposals = proposals[proposals[:,4] < 40]
    proposals = nms(proposals)
    # Filter out proposals outside the actual lung
    # prop_int = proposals[:, 1:4].astype(np.int32)
    # wrong = [imgs[0, x[0], x[1], x[2]] > 180 for x in prop_int]
    # proposals = proposals[np.logical_not(wrong)]

    # Do sigmoid to get propabillities
    proposals[:, 0] = expit(proposals[:, 0])
    # Remove really weak proposals?
    # proposals = proposals[proposals[:,0] > 0.5]

    # Rescale back to image space coordinates
    proposals[:, 1:4] /= spacing[np.newaxis]
    return [{"x": int(p[3]), "y": int(p[2]), "z": int(p[1]), "p_nodule": float(p[0])} for p in proposals]