def Bspline_and_Affine(img, mask):
    # Define a scaling transformation object
    angle = random.randrange(-1,2,2)*np.pi/(random.randint(6,16))
    center_point_x = 0.1*random.randint(3,7)
    center_point_y = 0.1*random.randint(3,7)
    affine = gryds.AffineTransformation(
    ndim=2,
    angles=[angle], # List of angles (for 3D transformations you need a list of 3 angles).
    center=[center_point_x, center_point_y])  # Center of rotation.
    
    # Define a random 3x3 B-spline grid for a 2D image:
    random_grid = np.random.rand(2, 3, 3)
    random_grid -= 0.5
    random_grid /= 5
    
    # Define a B-spline transformation object
    bspline = gryds.BSplineTransformation(random_grid)
    
    # Define an interpolator object for the image:
    interpolator_img = gryds.Interpolator(img)
    interpolator_mask = gryds.Interpolator(mask)
    
    # Transform the image using both transformations. The B-spline is applied to the
    # sampling grid first, and the affine transformation second. From the
    # perspective of the image itself, the order will seem reversed (!).
    transformed_image = interpolator_img.transform(bspline, affine)
    transformed_mask = interpolator_mask.transform(bspline, affine)
    return transformed_image, transformed_mask
示例#2
0
    def __call__(self, image, label):
        image = image.reshape(256, 256)
        label = label.reshape(256, 256)
        ia = False
        random_grid = np.random.rand(2, 7, 7)
        random_grid -= 0.5
        random_grid /= 12
        # Define a B-spline transformation object
        bspline_trf = gryds.BSplineTransformation(random_grid)

        # rotate between -pi/8 and pi/8
        rot = np.random.rand() * np.pi / 4 - np.pi / 8
        # scale between 0.9 and 1.1
        scale_x = np.random.rand() * 0.2 + 0.9
        scale_y = np.random.rand() * 0.2 + 0.9
        # translate between -10% and 10%
        trans_x = np.random.rand() * .2 - .1
        trans_y = np.random.rand() * .2 - .1

        affine_trf = gryds.AffineTransformation(
            ndim=2,
            angles=[rot],  # the rotation angle
            scaling=[scale_x, scale_y],  # the anisotropic scaling
            translation=[trans_x, trans_y],  # translation
            center=[0.5, 0.5]  # center of rotation
        )
        composed_trf = gryds.ComposedTransformation(bspline_trf, affine_trf)

        t_ind = np.random.randint(2)
        interpolator = gryds.Interpolator(image[:, :], mode='reflect')

        interpolator_label = gryds.Interpolator(label[:, :],
                                                order=0,
                                                mode='constant')

        patch = interpolator.transform(composed_trf)
        patch_label = interpolator_label.transform(composed_trf)

        if ia:
            intensity_shift = np.random.rand() * .1 - .05
            contrast_shift = np.random.rand() * 0.05 + 0.975

            patch += intensity_shift
            patch = np.sign(patch) * np.power(np.abs(patch), contrast_shift)

        blur = np.random.uniform()
        patch = gaussian_filter(patch, sigma=blur)

        p5, p95 = np.percentile(patch, (5, 95))
        patch = (patch - p5) / (p95 - p5)
        patch = equalize_adapthist(np.clip(patch, 1e-5, 1),
                                   kernel_size=24)[..., np.newaxis]
        patch += np.random.normal(scale=0.025, size=patch.shape)

        return patch, patch_label
示例#3
0
def data_augmentation(images, labels, how_many):

    augmented_images = np.zeros(
        (images.shape[0], images.shape[1], images.shape[2] * how_many))
    augmented_labels = np.zeros(
        (labels.shape[0], labels.shape[1], labels.shape[2] * how_many))

    for i in range(images.shape[2]):
        for j in range(how_many):
            img_sa = images[:, :, i]
            # normalise data
            p5 = np.percentile(img_sa, 5)
            p95 = np.percentile(img_sa, 95)
            img_sa = (img_sa - p5) / (p95 - p5)
            # affine transformation
            affine_transformation = gryds.AffineTransformation(
                ndim=2,
                angles=[np.random.uniform(-np.pi / 8.,
                                          np.pi / 8.)],  # the rotation angle
                scaling=[
                    np.random.uniform(0.8, 1.2),
                    np.random.uniform(0.8, 1.2)
                ],  # the anisotropic scaling
                # shear_matrix=[[1, 0.5], [0, 1]], # shearing matrix
                translation=[
                    np.random.uniform(-0.2, 0.2),
                    np.random.uniform(-0.2, 0.2)
                ],  # translation
                center=[0.5, 0.5]  # center of rotation
            )
            # Define a random 3x3 B-spline grid for a 2D image:
            random_grid = np.random.rand(2, 3, 3)
            random_grid -= 0.5
            random_grid /= 5
            # Define a B-spline transformation object
            bspline = gryds.BSplineTransformation(random_grid)
            # Define an interpolator object for the image:
            interpolator_sa = gryds.Interpolator(img_sa)
            interpolator_gt = gryds.Interpolator(labels[:, :, i],
                                                 order=0)  # img_gt

            composed_trf = gryds.ComposedTransformation(
                bspline, affine_transformation)

            augmented_images[:, :, i * how_many + j] = np.clip(
                interpolator_sa.transform(composed_trf), 0, 1)
            augmented_labels[:, :, i * how_many +
                             j] = interpolator_gt.transform(composed_trf)

    augmented_images = augmented_images[np.newaxis, ...]
    augmented_images = np.transpose(augmented_images, (3, 0, 1, 2))
    augmented_labels = np.transpose(augmented_labels, (2, 0, 1))

    return augmented_images, augmented_labels
def Bspline(img, mask):
    # Define a random 3x3 B-spline grid for a 2D image:
    random_grid = np.random.rand(2, 3, 3)
    random_grid -= 0.5
    random_grid /= 5

    # Define a B-spline transformation object
    bspline = gryds.BSplineTransformation(random_grid)

    # Define an interpolator object for the image:
    interpolator_img = gryds.Interpolator(img)
    interpolator_mask = gryds.Interpolator(mask)
    
    # Transform the image using the B-spline transformation
    transformed_image = interpolator_img.transform(bspline)
    transformed_mask = interpolator_mask.transform(bspline)

    return transformed_image, transformed_mask
def Affine(img,mask):
    # Define a scaling transformation object
    angle = random.randrange(-1,2,2)*np.pi/(random.randint(6,16))
    center_point_x = 0.1*random.randint(3,7)
    center_point_y = 0.1*random.randint(3,7)
    affine = gryds.AffineTransformation(
    ndim=2,
    angles=[angle], # List of angles (for 3D transformations you need a list of 3 angles).
    center=[center_point_x, center_point_y])  # Center of rotation.
    
    # Define an interpolator object for the image:
    interpolator_img = gryds.Interpolator(img)
    interpolator_mask = gryds.Interpolator(mask)
    
    # Transform image and mask using Affine transformation
    transformed_image = interpolator_img.transform(affine)
    transformed_mask = interpolator_mask.transform(affine)
    return transformed_image, transformed_mask
示例#6
0
 def test_2d_bspline_interpolator_90_deg_rotation(self):
     image = np.array([[0, 0, 1, 0, 0], [0, 0, 1, 0, 0], [1, 1, 1, 1, 1],
                       [0, 0, 1, 0, 0], [0, 0, 1, 0, 0]],
                      dtype=DTYPE)
     intp = gryds.Interpolator(image)
     trf = gryds.AffineTransformation(ndim=2,
                                      angles=[np.pi / 2.],
                                      center=[0.4, 0.4])
     new_image = intp.transform(trf, mode='mirror').astype(DTYPE)
     np.testing.assert_almost_equal(image, new_image, decimal=4)
def Affine(img, mask):
    center_point_x = 0.1 * random.randint(4, 6)
    center_point_y = 0.1 * random.randint(4, 6)
    affine = gryds.AffineTransformation(
        ndim=2,
        angles=[
            random.randrange(-1, 2, 2) * (np.pi / (random.randint(50, 60)))
        ],  # List of angles (for 3D transformations you need a list of 3 angles).
        center=[center_point_x, center_point_y]  # Center of rotation.
    )

    # Define an interpolator object for the image:
    interpolator_img = gryds.Interpolator(img)
    interpolator_mask = gryds.Interpolator(mask)

    # Transform the image using Affine
    transformed_image = interpolator_img.transform(affine)
    transformed_mask = interpolator_mask.transform(affine)

    return transformed_image, transformed_mask
def geometric_aug(train_images, train_segmentations):

    mult = 6
    #number of augmented images generated from each original image
    sh = np.shape(train_images)
    images = np.zeros((sh[0] * mult, sh[1], sh[2], sh[3]))
    segmentations = np.zeros((sh[0] * mult, sh[1], sh[2], 1))
    random_grids = np.zeros((mult, 2, 3, 3))

    for trans_idx in range(mult):

        # Define a random 3x3 B-spline grid for a 2D image
        random_grid = np.random.rand(2, 3, 3)
        random_grid -= 0.5
        random_grid /= 20
        random_grids[trans_idx, :, :, :] = random_grid

    for idx, image in enumerate(train_images):
        for trans_idx in range(mult):

            # Define a B-spline transformation object
            bspline = gryds.BSplineTransformation(
                random_grids[trans_idx, :, :, :])

            interpolator_seg = gryds.Interpolator(
                train_segmentations[idx, :, :, 0])
            segmentations[idx * mult + trans_idx, :, :,
                          0] = interpolator_seg.transform(bspline)

            for channel in range(sh[3]):
                # Define an interpolator object for the image
                interpolator_image = gryds.Interpolator(train_images[idx, :, :,
                                                                     channel])

                # Transform the image and add to the new images
                images[idx * mult + trans_idx, :, :,
                       channel] = interpolator_image.transform(bspline)

    return images, segmentations
示例#9
0
    def test_translation(self):
        image = np.array([[0, 0, 1, 0, 0], [0, 0, 1, 0, 0], [1, 1, 1, 1, 1],
                          [0, 0, 1, 0, 0], [0, 0, 1, 0, 0]],
                         dtype=DTYPE)
        intp = gryds.Interpolator(image, mode='mirror')

        trf1 = gryds.TranslationTransformation([0.1, 0])
        trf2 = gryds.TranslationTransformation([-0.1, 0])

        trf = gryds.ComposedTransformation(trf2, trf1)

        new_image = intp.transform(trf)
        np.testing.assert_almost_equal(image, new_image)
示例#10
0
    def test_rotation(self):
        image = np.array([[0, 0, 1, 0, 0], [0, 0, 1, 0, 0], [1, 1, 1, 1, 1],
                          [0, 0, 1, 0, 0], [0, 0, 1, 0, 0]],
                         dtype=DTYPE)
        intp = gryds.Interpolator(image, mode='mirror')

        trf1 = gryds.AffineTransformation(ndim=2, angles=[0.1])
        trf2 = gryds.AffineTransformation(ndim=2, angles=[-0.1])

        trf = gryds.ComposedTransformation(trf2, trf1)

        new_image = intp.transform(trf)
        np.testing.assert_almost_equal(image, new_image, decimal=6)
示例#11
0
def Bspline_and_Affine_flipped(img, mask):
    # Define a scaling transformation object
    center_point_x = 0.1 * random.randint(4, 6)
    center_point_y = 0.1 * random.randint(4, 6)
    affine = gryds.AffineTransformation(
        ndim=2,
        angles=[
            random.randrange(-1, 2, 2) * (np.pi / (random.randint(50, 60)))
        ],  # List of angles (for 3D transformations you need a list of 3 angles).
        center=[center_point_x, center_point_y]  # Center of rotation.
    )

    # Define a random 3x3 B-spline grid for a 2D image:
    random_grid = np.random.rand(2, 3, 3)
    random_grid -= 0.5
    random_grid /= 10

    # Define a B-spline transformation object
    bspline = gryds.BSplineTransformation(random_grid)

    # Define an interpolator object for the image:
    interpolator_img = gryds.Interpolator(img)
    interpolator_mask = gryds.Interpolator(mask)

    # Transform the image using both transformations. The B-spline is applied to the
    # sampling grid first, and the affine transformation second. From the
    # perspective of the image itself, the order will seem reversed (!).
    transformed_image = interpolator_img.transform(bspline, affine)
    transformed_mask = interpolator_mask.transform(bspline, affine)

    img = torch.from_numpy(transformed_image.copy())
    mask = torch.from_numpy(transformed_mask.copy())
    flipped_img = torchvision.transforms.functional.hflip(
        img=img)  # change to .vflip for vertical flip
    flipped_mask = torchvision.transforms.functional.hflip(img=mask)
    transformed_image = flipped_img.cpu().detach().numpy()
    transformed_mask = flipped_mask.cpu().detach().numpy()

    return transformed_image, transformed_mask
示例#12
0
def gryds_function(images, segmentations):
    # input: (images): The input is an array of images
    # input: (segmentations): The segmentations of the corresponding input images

    # output: (new_image): The output is a  geometricly augmented array of images with a randomly defined deformation/ rotation.
    # output:(new_segmentation): The output the augmented version of the segmentation

    import numpy as np
    import gryds
    new_image = []
    new_segmentation = []
    for i in range(len(images)):

        affine = gryds.AffineTransformation(
            ndim=2,
            angles=[
                np.pi / 4.
            ],  # List of angles (for 3D transformations you need a list of 3 angles).
            center=[0.5, 0.5]  # Center of rotation.
        )

        random_grid = np.random.rand(2, 3, 3)
        random_grid -= 0.5
        random_grid /= 20

        bspline = gryds.BSplineTransformation(random_grid)
        # define interpolator of the input_image
        interpolator = gryds.MultiChannelInterpolator(images[i],
                                                      order=0,
                                                      cval=[.1, .2, .3],
                                                      mode='nearest')

        #define interpolator of the segmentation image
        interpolator_segentation = gryds.Interpolator(segmentations[i][:, :,
                                                                       0],
                                                      mode='constant')

        #transform the input image
        transformed_image = interpolator.transform(bspline, affine)

        #transform the segmentation image
        transformed_segmentation = interpolator_segentation.transform(
            bspline, affine)

        #add results into lists
        new_segmentation.append(np.clip(transformed_segmentation, 0, 1))
        new_image.append(np.clip(transformed_image, 0, 1))

    return np.array(new_image), np.array(new_segmentation)
示例#13
0
 def test_2d_bspline_interpolator_45_deg_rotation(self):
     image = np.array([[0, 0, 1, 0, 0], [0, 0, 1, 0, 0], [1, 1, 1, 1, 1],
                       [0, 0, 1, 0, 0], [0, 0, 1, 0, 0]],
                      dtype=DTYPE)
     expected = np.array(
         [[1., 0.2929, 0., 0.2929, 1.], [0.2929, 1., 0.5, 1., 0.2929],
          [0., 0.5, 1., 0.5, 0.], [0.2929, 1., 0.5, 1., 0.2929],
          [1., 0.2929, 0., 0.2929, 1.]],
         dtype=DTYPE)
     intp = gryds.Interpolator(image, order=1, mode='mirror')
     trf = gryds.AffineTransformation(ndim=2,
                                      angles=[np.pi / 4.],
                                      center=[0.4, 0.4])
     new_image = intp.transform(trf).astype(DTYPE)
     np.testing.assert_almost_equal(expected, new_image, decimal=4)
示例#14
0
def do_domain_augmentation2(image, sz):

    random_grid = np.random.rand(2, 7, 7)
    random_grid -= 0.5
    random_grid /= 10
    # Define a B-spline transformation object
    bspline_trf = gryds.BSplineTransformation(random_grid)

    # rotate between -pi/8 and pi/8
    rot = np.random.rand() * np.pi / 4 - np.pi / 8
    # scale between 0.9 and 1.1
    scale_x = np.random.rand() * 0.2 + 0.9
    scale_y = np.random.rand() * 0.2 + 0.9
    # translate between -10% and 10%
    trans_x = np.random.rand() * .2 - .1
    trans_y = np.random.rand() * .2 - .1

    affine_trf = gryds.AffineTransformation(
        ndim=2,
        angles=[rot],  # the rotation angle
        scaling=[scale_x, scale_y],  # the anisotropic scaling
        translation=[trans_x, trans_y],  # translation
        center=[0.5, 0.5]  # center of rotation
    )
    composed_trf = gryds.ComposedTransformation(bspline_trf, affine_trf)

    z_ind = np.random.randint(image.shape[2])
    t_ind = np.random.randint(2)
    interpolator = gryds.Interpolator(image[..., z_ind, t_ind], mode='reflect')

    patch = interpolator.transform(composed_trf)

    patch += np.random.normal(scale=0.025, size=patch.shape)

    blur = np.random.uniform()
    patch = gaussian_filter(patch, sigma=blur)

    midx = image.shape[0] // 2
    midy = image.shape[1] // 2
    patch = patch[midx - (sz[0] // 2):midx + (sz[0] // 2),
                  midy - (sz[1] // 2):midy + (sz[1] // 2)]
    p5, p95 = np.percentile(patch, (5, 95))
    patch = (patch - p5) / (p95 - p5)
    patch = equalize_adapthist(np.clip(patch, 0, 1))[..., np.newaxis]

    return patch
def b_spline(images, segmentations, patch_size, patches_per_im, seed):

    # Define a random 3x3 B-spline grid for a 4D image:
    random_grid = np.random.rand(4, 3, 3, 1, 1)
    random_grid -= 0.5
    random_grid /= 5

    # Define a B-spline transformation object
    bspline = gryds.BSplineTransformation(random_grid)

    # Define an interpolator object for the image:
    interpolator = gryds.Interpolator(images)

    # Transform the image using the B-spline transformation
    images = interpolator.transform(bspline)

    #Define a random brightness shift
    random_matrix_array = 0.01 * np.random.rand(1, 1, 1, 1)
    images = images + random_matrix_array

    # The total amount of patches that will be obtained
    inp_size = len(images) * patches_per_im
    # Allocate memory for the patches and segmentations of the patches
    x = np.zeros((inp_size, patch_size[0], patch_size[1], images.shape[-1]))
    y = np.zeros(
        (inp_size, patch_size[0], patch_size[1], segmentations.shape[-1]))

    # Loop over all the images (and corresponding segmentations) and extract random patches
    # using the extract_patches_2d function of scikit learn
    for idx, (im, seg) in enumerate(zip(images, segmentations)):
        # Note the random seed to ensure the corresponding segmentation is extracted for each patch
        x[idx * patches_per_im:(idx + 1) *
          patches_per_im] = extract_patches_2d(im,
                                               patch_size,
                                               max_patches=patches_per_im,
                                               random_state=seed)
        y[idx * patches_per_im:(idx + 1) * patches_per_im] = np.expand_dims(
            extract_patches_2d(seg,
                               patch_size,
                               max_patches=patches_per_im,
                               random_state=seed),
            axis=-1)

    return x, y
def random_deformation_gen(image, bspline_shape=(3, 3), std=0.15):
    assert image.shape[0] == image.shape[1]
    bspline_grid_shape = (len(image.shape), ) + bspline_shape
    bspline_grid = np.random.rand(*bspline_grid_shape) * std

    a_bspline_transformation = gryds.BSplineTransformation(bspline_grid)
    an_image_interpolator = gryds.Interpolator(image, order=1)

    an_image_grid = gryds.Grid(
        image.shape)  # makes a Grid the size of the image
    a_deformed_image_grid = an_image_grid.transform(a_bspline_transformation)
    a_deformed_image = an_image_interpolator.resample(a_deformed_image_grid)

    f = (a_deformed_image_grid.grid - an_image_grid.grid) * image.shape[0]
    flow = np.ndarray(image.shape + (len(image.shape), ))

    for i in range(len(image.shape)):
        flow[..., i] = f[i, ...]

    return a_deformed_image, flow
示例#17
0
def do_augmentation2(image, label, sz, ia=False):

    random_grid = np.random.rand(2, 7, 7)
    random_grid -= 0.5
    random_grid /= 12
    # Define a B-spline transformation object
    bspline_trf = gryds.BSplineTransformation(random_grid)

    # rotate between -pi/8 and pi/8
    rot = np.random.rand() * np.pi / 4 - np.pi / 8
    # scale between 0.9 and 1.1
    scale_x = np.random.rand() * 0.2 + 0.9
    scale_y = np.random.rand() * 0.2 + 0.9
    # translate between -10% and 10%
    trans_x = np.random.rand() * .2 - .1
    trans_y = np.random.rand() * .2 - .1

    affine_trf = gryds.AffineTransformation(
        ndim=2,
        angles=[rot],  # the rotation angle
        scaling=[scale_x, scale_y],  # the anisotropic scaling
        translation=[trans_x, trans_y],  # translation
        center=[0.5, 0.5]  # center of rotation
    )
    composed_trf = gryds.ComposedTransformation(bspline_trf, affine_trf)

    z_ind = np.random.randint(image.shape[2])
    t_ind = np.random.randint(2)
    interpolator = gryds.Interpolator(image[..., z_ind, t_ind], mode='reflect')

    interpolator_label = gryds.Interpolator(label[..., z_ind, t_ind],
                                            order=0,
                                            mode='constant')

    patch = interpolator.transform(composed_trf)
    patch_label = interpolator_label.transform(composed_trf)

    if ia:
        intensity_shift = np.random.rand() * .1 - .05
        contrast_shift = np.random.rand() * 0.05 + 0.975

        patch += intensity_shift
        patch = np.sign(patch) * np.power(np.abs(patch), contrast_shift)

    blur = np.random.uniform()
    patch = gaussian_filter(patch, sigma=blur)

    # midx = image.shape[0] // 2
    # midy = image.shape[1] // 2
    if patch.shape[0] > sz[0] and patch.shape[1] > sz[1]:
        all_startx = [
            0, patch.shape[0] // 2 - sz[0] // 2, patch.shape[0] - sz[0]
        ]
        all_starty = [
            0, patch.shape[1] // 2 - sz[1] // 2, patch.shape[1] - sz[1]
        ]
        xrint = np.random.randint(3)
        yrint = np.random.randint(3)
        midx = all_startx[xrint]
        midy = all_starty[yrint]

        patch = patch[midx:midx + sz[0], midy:midy + sz[1]]
        patch_label = patch_label[midx:midx + sz[0], midy:midy + sz[1]]
    else:
        patch = patch[:sz[0], :sz[1]]
        patch_label = patch_label[:sz[0], :sz[1]]
        new_patch = np.zeros((sz[0], sz[1]))
        new_patch_label = np.zeros((sz[0], sz[1]))
        new_patch[:patch.shape[0], :patch.shape[1]] = patch
        new_patch_label[:patch_label.shape[0], :patch_label.
                        shape[1]] = patch_label
        patch, patch_label = new_patch, new_patch_label

    # patch = patch[midx-(sz[0]//2):midx+(sz[0]//2),midy-(sz[1]//2):midy+(sz[1]//2)]
    p5, p95 = np.percentile(patch, (5, 95))
    patch = (patch - p5) / (p95 - p5)
    patch = equalize_adapthist(np.clip(patch, 1e-5, 1),
                               kernel_size=24)[..., np.newaxis]
    patch += np.random.normal(scale=0.025, size=patch.shape)

    # patch = np.clip(patch, 0, 1)[...,np.newaxis]

    # patch_label = patch_label[midx-(sz[0]//2):midx+(sz[0]//2),midy-(sz[1]//2):midy+(sz[1]//2)]

    return (patch, patch_label)
def deformable_data_augmentation(images, images_gt):
    '''
    Images and images_gt should be 3D numpy arrays in which the first index indicates the number of 2D images
    This function performs Bspline transformation, rotation, translation, brightness change and samplewise normalization of the augmented images
    The output is a 4D numpy array of 3D images with the original and augmented data
    '''

    #The nr_epochs states how many data augmentations are performed
    nr_epochs = 2

    #Make an empty list for the deformed images and masks
    deformed_images = []
    deformed_images_gt = []

    #Define the augmentation apart from B-spline transformation for the images
    datagen = dict(rotation_range=5,
                   samplewise_center=True,
                   samplewise_std_normalization=True,
                   brightness_range=[0.95, 1.05],
                   width_shift_range=5,
                   height_shift_range=5)

    #Define the augmentation apart from B-spline transformation for the masks (these should not be normalized because they are binary)
    datagen_gt = dict(rotation_range=5,
                      width_shift_range=5,
                      height_shift_range=5)

    #Call the ImageDataGenerator
    train_datagen = ImageDataGenerator(**datagen)
    train_datagen_gt = ImageDataGenerator(**datagen_gt)

    print("Performing data augmentation... \n")
    for epoch in range(nr_epochs):
        print('Epoch', epoch)

        #Perform B-spline transformation with the use of "Gryds"
        for image in range(images.shape[0]):
            #By default the mode of the interpolator is constant which interpolates a value of 0 at the border.
            #Alternatively, you can choose a mode for other interpolation: nearest, mirror, wrap or reflect
            an_image_interpolator = gryds.Interpolator(images[image])
            an_image_gt_interpolator = gryds.Interpolator(images_gt[image])

            #Randomly generate a 2D displacement matrix of size (3,3) for the i and j direction
            disp_i = np.random.uniform(low=-0.05, high=0.05, size=(3, 3))
            disp_j = np.random.uniform(low=-0.05, high=0.05, size=(3, 3))

            #Calculate the transformation
            transformation = gryds.BSplineTransformation([disp_i, disp_j])

            #Apply the transformation to the image and corresponding mask
            deformed_image = an_image_interpolator.transform(transformation)
            deformed_image_gt = an_image_gt_interpolator.transform(
                transformation)

            #Save the deformed image and mask in a list
            deformed_images.append(deformed_image)
            deformed_images_gt.append(deformed_image_gt)

    #Convert list to numpy array
    deformed_images = np.array(deformed_images)
    deformed_images_gt = np.array(deformed_images_gt)

    #Convert 3D to 4D numpy array
    deformed_images = np.reshape(deformed_images,
                                 newshape=(*deformed_images.shape, 1))
    deformed_images_gt = np.reshape(deformed_images_gt,
                                    newshape=(*deformed_images_gt.shape, 1))

    #Make an empty list for the resulting augmented images and masks
    augmented_images = []
    augmented_images_gt = []

    #Perform rotation, translation, brightness change and normalization to the deformed images
    batches = 0
    for batch in train_datagen.flow(deformed_images, batch_size=1, seed=0):
        batches += 1
        augmented_images.append(batch[0, :, :, :])
        if batches >= len(deformed_images):
            # we need to break the loop by hand because
            # the generator loops indefinitely
            break

    #Perform rotation, translation and brightness change to the corresponding masks of the deformed images
    #These augmentations are the same as for the deformed images, because of the seed
    batches = 0
    for batch in train_datagen_gt.flow(deformed_images_gt,
                                       batch_size=1,
                                       seed=0):
        batches += 1
        augmented_images_gt.append(batch[0, :, :, :])
        if batches >= len(deformed_images_gt):
            # we need to break the loop by hand because
            # the generator loops indefinitely
            break

    #Convert list to numpy array
    augmented_images = np.array(augmented_images)
    augmented_images_gt = np.array(augmented_images_gt)

    #Convert original images and masks from 3D to 4D numpy array
    images = np.reshape(images, newshape=(*images.shape, 1))
    images_gt = np.reshape(images_gt, newshape=(*images_gt.shape, 1))

    #Concatenate numpy arrays of original images and masks with the augmented images and masks
    augmented_images = np.concatenate((images, augmented_images), axis=0)
    augmented_images_gt = np.concatenate((images_gt, augmented_images_gt),
                                         axis=0)

    #Save the resulting 4D numpy arrays
    np.save("augmented_images", augmented_images)
    np.save("augmented_images_gt", augmented_images_gt)

    return augmented_images, augmented_images_gt
示例#19
0
N = 1

Ns = range(0, 151, 1)
M = 10

image = np.random.rand(N, 128, 128)
intp = gryds.BSplineInterpolatorCuda(image)
intp.transform(bsp)

times = []
for i in range(M):
    ts = []
    for N in Ns:
        print(i, N)
        image = np.random.rand(N, 128, 128)
        intp = gryds.Interpolator(image, order=1)
        t0 = time.time()
        intp.transform(bsp)
        ts.append(time.time() - t0)
    times.append(ts)
times = np.median(times, axis=0)

times_cuda = []
for i in range(M):
    ts = []
    for N in Ns:
        print(i, N)
        image = np.random.rand(N, 128, 128)
        intp = gryds.BSplineInterpolatorCuda(image, order=1)
        t0 = time.time()
        intp.transform(bsp)
示例#20
0
def augment(imA, imB):
    """
    Input day0 and day4 image of the same rat
    An augmented version of both is returned
    """
    #"3D" to 2D
    imA = imA.reshape((256, 256))
    imB = imB.reshape((256, 256))

    # Define random deformation matrix
    random_grid = np.random.rand(2, 3, 3)
    random_grid -= 0.5
    random_grid /= 10

    # Define B-Spline transformation matrix
    bspline = gryds.BSplineTransformation(random_grid)

    # Define random translation matrix
    a_translation = gryds.TranslationTransformation(
        [random.uniform(-0.03, 0.03),
         random.uniform(-0.03, 0.03)])

    # Define random rotation matrix
    a_rotation = gryds.AffineTransformation(
        ndim=2,
        angles=[random.uniform(-np.pi / 24, np.pi / 24)],  # ± 7 degrees
        center=[0.5, 0.5])

    # Define an image interpolater
    an_image_interpolatorA = gryds.Interpolator(imA)
    an_image_interpolatorB = gryds.Interpolator(imB)

    # Combine all operations and apply the same augmentation to day0 and day4
    composed = gryds.ComposedTransformation(bspline, a_rotation, a_translation)
    transformed_imageA = an_image_interpolatorA.transform(composed)
    transformed_imageB = an_image_interpolatorB.transform(composed)

    # Define noise augmentation
    mu = 0.0
    sigma = random.uniform(0., 0.05)
    noise_mapA = np.random.normal(mu, sigma, size=np.size(imA)).reshape(
        (256, 256))
    noise_mapB = np.random.normal(mu, sigma, size=np.size(imB)).reshape(
        (256, 256))
    noise_mapA[transformed_imageA < 1e-2] = 0.
    noise_mapB[transformed_imageB < 1e-2] = 0.

    # Add noise to augmented image
    transformed_imageA = transformed_imageA + noise_mapA
    transformed_imageB = transformed_imageB + noise_mapB

    # Flip L/R (half of the time)
    perform_flip = random.choice([False, True])
    if perform_flip:
        transformed_imageA = np.fliplr(transformed_imageA)
        transformed_imageB = np.fliplr(transformed_imageB)

    return [
        transformed_imageA.reshape((256, 256, 1)),
        transformed_imageB.reshape((256, 256, 1))
    ]