Esempio n. 1
0
def view_patient_raw_data(patient, width=400, height=400):
    import batchviewer
    a = []
    a.append(patient['ed_data'][None])
    a.append(patient['ed_gt'][None])
    a.append(patient['es_data'][None])
    a.append(patient['es_gt'][None])
    batchviewer.view_batch(np.vstack(a), width, height)
                                         num_splits=5,
                                         random_state=12345)

    patch_size = (128, 128, 128)
    batch_size = 2

    # I recommend you don't use 'iteration oder all training data' as epoch because in patch based training this is
    # really not super well defined. If you leave all arguments as default then each batch sill contain randomly
    # selected patients. Since we don't care about epochs here we can set num_threads_in_multithreaded to anything.
    dataloader = BraTS2017DataLoader3D(train, batch_size, patch_size, 1)

    batch = next(dataloader)
    try:
        from batchviewer import view_batch
        # batch viewer can show up to 4d tensors. We can show only one sample, but that should be sufficient here
        view_batch(batch['data'][0], batch['seg'][0])
    except ImportError:
        view_batch = None
        print(
            "you can visualize batches with batchviewer. It's a nice and handy tool. You can get it here: "
            "https://github.com/FabianIsensee/BatchViewer")

    # now we have some DataLoader. Let's go an get some augmentations

    # first let's collect all shapes, you will see why later
    shapes = [
        BraTS2017DataLoader3D.load_patient(i)[0].shape[1:] for i in patients
    ]
    max_shape = np.max(shapes, 0)
    max_shape = np.max((max_shape, patch_size), 0)
Esempio n. 3
0
                                self.strength, float) else np.random.uniform(
                                    *self.strength)
                            if len(data.shape) == 4:
                                filter_here = self.filter_2d * strength_here
                                filter_here[1, 1] += 1
                            else:
                                filter_here = self.filter_3d * strength_here
                                filter_here[1, 1, 1] += 1
                            data[b, c] = convolve(data[b, c],
                                                  filter_here,
                                                  mode='same')
                            data[b, c] = np.clip(data[b, c], mn, mx)
        return data_dict


if __name__ == '__main__':
    from copy import deepcopy
    from skimage.data import camera

    # just some playing around with BrightnessGradientAdditiveTransform
    data = {
        'data':
        np.vstack((camera()[None], camera()[None],
                   camera()[None]))[None, None].astype(np.float32)
    }
    tr = MedianFilterTransform((1, 20), True)
    transformed = tr(**deepcopy(data))['data']
    from batchviewer import view_batch

    view_batch(*data['data'][0], *transformed[0])
Esempio n. 4
0
        intens_corrected = np.clip(ret * (filter_orig / filter_aug), data.min(), data.max())
        #import IPython;IPython.embed()
        data_dict['data'] = intens_corrected
        return data_dict"""

if __name__ == '__main__':
    from copy import deepcopy
    from skimage.data import camera

    # just some playing around with BrightnessGradientAdditiveTransform
    data = {
        'data':
        np.vstack((camera()[None], camera()[None],
                   camera()[None]))[None].astype(np.float32)
    }
    tr = LocalContastTransform(
        lambda x, y: np.random.uniform(x[y] // 10, x[y] // 4),
        #lambda x, y: np.random.uniform(-1, 0) if np.random.uniform() < 0.5 else np.random.uniform(1, 2),
        (0, 1),
        #lambda: np.random.uniform(0.0001, 0.01) if np.random.uniform() < 0.5 else np.random.uniform(1, 10),
        1e-90,
        same_for_all_channels=False)
    transformed = tr(**deepcopy(data))['data']
    from batchviewer import view_batch
    data['data'][0][:, 0:2, 0] = np.array((0, 255))
    transformed[0][:, 0:2, 0] = np.array((0, 255))
    diff = [i - j for i, j in zip(data['data'][0], transformed[0])]
    [print(i[10, 10]) for i in diff]
    view_batch(*data['data'][0], *transformed[0],
               *[i - j for i, j in zip(data['data'][0], transformed[0])])
Esempio n. 5
0
Created on Tue Jul 28 12:30:15 2020

@author: rtgun
"""
from batchviewer import view_batch
import numpy as np
import nibabel as nib

data = nib.load("d010_pre0_dat.nii.gz")
data = data.get_fdata()
print(data.shape)

data = data[None]

data1 = np.transpose(data, axes=(0, 3, 1, 2))
print(data1.shape)

data2 = np.transpose(data, axes=(0, 3, 2, 1))
print(data2.shape)

data3 = np.rot90(data1, k=2, axes=(-2, -1))
print(data3.shape)

data4 = np.rot90(data2, k=2, axes=(-1, -2))
print(data4.shape)

data_f = np.concatenate((data1, data2, data3, data4))

print(data_f.shape)
view_batch(data1, width=500, height=500)
    train, val = get_split_deterministic(patients, fold=0, num_splits=5, random_state=12345)

    patch_size = (160, 160)
    batch_size = 48

    # I recommend you don't use 'iteration oder all training data' as epoch because in patch based training this is
    # really not super well defined. If you leave all arguments as default then each batch sill contain randomly
    # selected patients. Since we don't care about epochs here we can set num_threads_in_multithreaded to anything.
    dataloader = BraTS2017DataLoader2D(train, batch_size, patch_size, 1)

    batch = next(dataloader)
    try:
        from batchviewer import view_batch
        # batch viewer can show up to 4d tensors. We can show only one sample, but that should be sufficient here
        view_batch(np.concatenate((batch['data'][0], batch['seg'][0]), 0)[:, None])
    except ImportError:
        view_batch = None
        print("you can visualize batches with batchviewer. It's a nice and handy tool. You can get it here: "
              "https://github.com/FabianIsensee/BatchViewer")

    # now we have some DataLoader. Let's go an get some augmentations

    # first let's collect all shapes, you will see why later
    shapes = [BraTS2017DataLoader2D.load_patient(i)[0].shape[2:] for i in patients]
    max_shape = np.max(shapes, 0)
    max_shape = np.max((max_shape, patch_size), 0)

    # we create a new instance of DataLoader. This one will return batches of shape max_shape. Cropping/padding is
    # now done by SpatialTransform. If we do it this way we avoid border artifacts (the entire brain of all cases will
    # be in the batch and SpatialTransform will use zeros which is exactly what we have outside the brain)