Esempio n. 1
0
 def postprocessing(self, sample, prediction):
     # Access original shape of the last sample and reset it
     original_shape = sample.get_extended_data()["orig_resize_shape"]
     # Transform original shape to one-channel array
     prediction = np.reshape(prediction, prediction.shape + (1, ))
     # Transform prediction from channel-last to channel-first structure
     prediction = np.moveaxis(prediction, -1, 0)
     # Resize imaging data
     prediction = resize_segmentation(prediction,
                                      original_shape,
                                      order=1,
                                      cval=0)
     # Transform data from channel-first back to channel-last structure
     prediction = np.moveaxis(prediction, 0, -1)
     # Transform one-channel array back to original shape
     prediction = np.reshape(prediction, original_shape[1:])
     # Return postprocessed prediction
     return prediction
Esempio n. 2
0
def downsample_seg_for_ds_transform2(seg, ds_scales=((1, 1, 1), (0.5, 0.5, 0.5), (0.25, 0.25, 0.25)), order=0, cval=0, axes=None):
    if axes is None:
        # axes = list(range(2, len(seg.shape)))
        axes = [0,1,2]
    output = []
    for s in ds_scales:
        if all([i == 1 for i in s]):
            output.append(seg)
        else:
            new_shape = np.array(seg.shape).astype(float)
            for i, a in enumerate(axes):
                new_shape[a] *= s[i]
            # print("#2", (new_shape))
            new_shape = np.round(new_shape).astype(int)
            out_seg = np.zeros(new_shape, dtype=seg.dtype)
            # for b in range(seg.shape[0]):
            #     for c in range(seg.shape[1]):
            out_seg = resize_segmentation(seg, new_shape, order, cval)
            # print("#3", s,new_shape, out_seg.shape)
            output.append(torch.from_numpy(out_seg))
    return output
def augment_resize(data, target_size, order=3, order_seg=1, cval_seg=0, seg=None, concatenate_list=False):
    """
    Reshapes data (and seg) to target_size
    :param data: np.ndarray or list/tuple of np.ndarrays, must be (b, c, x, y(, z))) (if list/tuple then each entry
    must be of this shape!)
    :param target_size: int or list/tuple of int
    :param order: interpolation order for data (see skimage.transform.resize)
    :param order_seg: interpolation order for seg (see skimage.transform.resize)
    :param cval_seg: cval for segmentation (see skimage.transform.resize)
    :param seg: can be None, if not None then it will also be resampled to target_size. Can also be list/tuple of
    np.ndarray (just like data). Must also be (b, c, x, y(, z))
    :param concatenate_list: if you give list/tuple of data/seg and set concatenate_list=True then the result will be
    concatenated into one large ndarray (once again b, c, x, y(, z))
    :return:
    """
    if isinstance(data, np.ndarray):
        is_list = False
        assert (seg is None) or isinstance(seg, np.ndarray), "if data is ndarray then seg must be ndarray as well"
    elif isinstance(data, (list, tuple)):
        is_list = True
        assert (seg is None) or isinstance(seg,  (list, tuple)), "if data is list/tuple then seg must be list/tuple as well"
    else:
        raise TypeError("Data has to be either a numpy array or a list")

    if not is_list:
        data = [data]
        if seg is not None:
            seg = [seg]
        concatenate_list = True

    result_data = []
    for i in range(len(data)):
        dimensionality = len(data[i].shape) - 2
        if not isinstance(target_size, (list, tuple)):
            target_size_here = [target_size] * dimensionality
        else:
            assert len(target_size) == dimensionality, "If you give a tuple/list as target size, make sure it has " \
                                                       "the same dimensionality as data!"
            target_size_here = list(target_size)

        # resize only supports 3d images. And it makes sense to treat each color channel of each sample separately
        result_this_data = []
        for b in range(data[i].shape[0]):
            result_this_sample = []
            for c in range(data[i].shape[1]):
                result_this_sample.append(
                    resize(data[i][b, c].astype(float), target_size_here, order).astype(data[i].dtype)[None])
            result_this_sample = np.vstack(result_this_sample)
            result_this_data.append(result_this_sample[None])
        result_this_data = np.vstack(result_this_data)
        result_data.append(result_this_data)

    if concatenate_list:
        result_data = np.vstack(result_data)

    if seg is not None:
        result_seg = []
        for i in range(len(seg)):
            dimensionality = len(seg[i].shape) - 2
            if not isinstance(target_size, (list, tuple)):
                target_size_here = [target_size] * dimensionality
            else:
                assert len(target_size) == dimensionality, "If you give a tuple/list as target size, make sure it has " \
                                                           "the same dimensionality as seg!"
                target_size_here = list(target_size)

            # resize only supports 3d images. And it makes sense to treat each color channel of each sample separately
            result_this_seg = []
            for b in range(seg[i].shape[0]):
                result_this_sample = []
                for c in range(seg[i].shape[1]):
                    result_this_sample.append(
                        resize_segmentation(seg[i][b, c].astype(float), target_size_here, order_seg, cval_seg)[None])
                result_this_sample = np.vstack(result_this_sample)
                result_this_seg.append(result_this_sample[None])
            result_this_seg = np.vstack(result_this_seg)
            result_seg.append(result_this_seg)

        if concatenate_list:
            result_seg = np.vstack(result_seg)
    else:
        result_seg = None

    return result_data, result_seg
def augment_zoom(data, zoom_factors, order=3, order_seg=1, cval_seg=0, seg=None, concatenate_list=False):
    """
    zooms data (and seg) by factor zoom_factors
    :param data: np.ndarray or list/tuple of np.ndarrays, must be (b, c, x, y(, z))) (if list/tuple then each entry
    must be of this shape!)
    :param zoom_factors: int or list/tuple of int
    :param order: interpolation order for data (see skimage.transform.resize)
    :param order_seg: interpolation order for seg (see skimage.transform.resize)
    :param cval_seg: cval for segmentation (see skimage.transform.resize)
    :param seg: can be None, if not None then it will also be zoomed by zoom_factors. Can also be list/tuple of
    np.ndarray (just like data). Must also be (b, c, x, y(, z))
    :param concatenate_list: if you give list/tuple of data/seg and set concatenate_list=True then the result will be
    concatenated into one large ndarray (once again b, c, x, y(, z))
    :return:
    """
    if isinstance(data, np.ndarray):
        is_list = False
    elif isinstance(data, (list, tuple)):
        is_list = True
        assert len(data) > 0 and all([isinstance(i, np.ndarray) for i in data])
    else:
        raise TypeError("Data has to be either a numpy array or a list")

    if seg is not None:
        if is_list:
            assert isinstance(seg, (list, tuple)), "if data is list/tuple then seg must be, too"
            assert len(seg) > 0 and all([isinstance(i, np.ndarray) for i in seg])
        else:
            assert isinstance(seg, np.ndarray)

    if not is_list:
        data = [data]
        if seg is not None:
            seg = [seg]
        concatenate_list = True

    result_data = []
    for i in range(len(data)):
        dimensionality = len(data[i].shape) - 2
        shape = np.array(data[i].shape[2:])
        if not isinstance(zoom_factors, (list, tuple)):
            zoom_factors_here = np.array([zoom_factors] * dimensionality)
        else:
            assert len(zoom_factors) == dimensionality, "If you give a tuple/list as target size, make sure it has " \
                                                        "the same dimensionality as data!"
            zoom_factors_here = np.array(zoom_factors)
        target_shape_here = np.round(shape * zoom_factors_here).astype(int)

        # resize only supports 3d images. And it makes sense to treat each color channel of each sample separately
        result_this_data = []
        for b in range(data[i].shape[0]):
            result_this_sample = []
            for c in range(data[i].shape[1]):
                result_this_sample.append(
                    resize(data[i][b, c].astype(float), target_shape_here, order).astype(data[i].dtype)[None])
            result_this_sample = np.vstack(result_this_sample)
            result_this_data.append(result_this_sample[None])
        result_this_data = np.vstack(result_this_data)
        result_data.append(result_this_data)

    if concatenate_list:
        result_data = np.vstack(result_data)

    if seg is not None:
        result_seg = []
        for i in range(len(seg)):
            dimensionality = len(seg[i].shape) - 2
            shape = np.array(seg[i].shape[2:])
            if not isinstance(zoom_factors, (list, tuple)):
                zoom_factors_here = np.array([zoom_factors] * dimensionality)
            else:
                assert len(zoom_factors) == dimensionality, "If you give a tuple/list as target size, make sure it has " \
                                                            "the same dimensionality as seg!"
                zoom_factors_here = np.array(zoom_factors)
            target_shape_here = np.round(shape * zoom_factors_here).astype(int)

            # resize only supports 3d images. And it makes sense to treat each color channel of each sample separately
            result_this_seg = []
            for b in range(seg[i].shape[0]):
                result_this_sample = []
                for c in range(seg[i].shape[1]):
                    result_this_sample.append(
                        resize_segmentation(seg[i][b, c].astype(float), target_shape_here, order_seg, cval_seg).astype(seg[i].dtype)[None])
                result_this_sample = np.vstack(result_this_sample)
                result_this_seg.append(result_this_sample[None])
            result_this_seg = np.vstack(result_this_seg)
            result_seg.append(result_this_seg)

        if concatenate_list:
            result_seg = np.vstack(result_seg)
    else:
        result_seg = None

    return result_data, result_seg