Exemple #1
0
 def test_constraints(self):
     grid_warper = AffineGridWarperLayer(
         source_shape=(3, 3),
         output_shape=(2, 4),
         constraints=AffineWarpConstraints.no_shear_2d())
     self.assertEqual(grid_warper.constraints.constraints,
                      ((None, 0, None), (0, None, None)))
Exemple #2
0
    def _test_simple_2d_images(self,
                               interpolation='linear',
                               boundary='replicate'):
        # rotating around the center (8, 8) by 15 degree
        expected = [[0.96592583, -0.25881905, 2.34314575],
                    [0.25881905, 0.96592583, -1.79795897]]
        expected = np.asarray(expected).flatten()
        test_image, input_shape = get_multiple_2d_images()
        test_target, target_shape = get_multiple_2d_rotated_targets()

        identity_affine = [[1., 0., 0., 0., 1., 0.], [1., 0., 0., 0., 1., 0.],
                           [1., 0., 0., 0., 1., 0.], [1., 0., 0., 0., 1., 0.]]
        affine_var = tf.get_variable('affine', initializer=identity_affine)
        grid = AffineGridWarperLayer(source_shape=input_shape[1:-1],
                                     output_shape=target_shape[1:-1],
                                     constraints=None)
        warp_coords = grid(affine_var)
        resampler = ResamplerLayer(interpolation, boundary=boundary)
        new_image = resampler(tf.constant(test_image, dtype=tf.float32),
                              warp_coords)

        diff = tf.reduce_mean(
            tf.squared_difference(new_image,
                                  tf.constant(test_target, dtype=tf.float32)))
        learning_rate = 0.05
        if (interpolation == 'linear') and (boundary == 'zero'):
            learning_rate = 0.0003
        optimiser = tf.train.AdagradOptimizer(learning_rate)
        grads = optimiser.compute_gradients(diff)
        opt = optimiser.apply_gradients(grads)
        with self.cached_session() as sess:
            sess.run(tf.global_variables_initializer())
            init_val, affine_val = sess.run([diff, affine_var])
            # compute the MAE between the initial estimated parameters and the expected parameters
            init_var_diff = np.sum(np.abs(affine_val[0] - expected))
            for it in range(500):
                _, diff_val, affine_val = sess.run([opt, diff, affine_var])
                # print('{} diff: {}, {}'.format(it, diff_val, affine_val[0]))
            # import matplotlib.pyplot as plt
            # plt.figure()
            # plt.imshow(test_target[0])
            # plt.draw()

            # plt.figure()
            # plt.imshow(sess.run(new_image).astype(np.uint8)[0])
            # plt.draw()

            # plt.show()
            self.assertGreater(init_val, diff_val)
            # compute the MAE between the final estimated parameters and the expected parameters
            var_diff = np.sum(np.abs(affine_val[0] - expected))
            self.assertGreater(init_var_diff, var_diff)
            print('{} {} -- diff {}'.format(interpolation, boundary, var_diff))
            print('{}'.format(affine_val[0]))
Exemple #3
0
    def test_simple_inverse(self):
        expected_grid = np.array(
            [[[[0.38, 0.08], [-0.02, 0.68], [-0.42, 1.28]],
              [[0.98, -0.32], [0.58, 0.28], [0.18, 0.88]],
              [[1.58, -0.72], [1.18, -0.12], [0.78, 0.48]]]],
            dtype=np.float32)

        inverse_grid = AffineGridWarperLayer(source_shape=(3, 3),
                                             output_shape=(2, 2)).inverse_op()
        aff = tf.constant([[1.5, 1.0, 0.2, 1.0, 1.5, 0.5]])
        output = inverse_grid(aff)
        with self.cached_session() as sess:
            out_val = sess.run(output)
        self.assertAllClose(out_val, expected_grid)
 def test_combined(self):
     expected = [[[[[1], [2]], [[3], [4]]], [[[5], [6]], [[7], [8]]]],
                 [[[[9.5], [2.5]], [[11.5], [3.0]]],
                  [[[13.5], [3.5]], [[15.5], [4.0]]]]]
     affine_grid = AffineGridWarperLayer(source_shape=(2, 2, 3),
                                         output_shape=(2, 2, 2))
     test_grid = affine_grid(
         tf.constant([[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0],
                      [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, .5]],
                     dtype=tf.float32))
     self._test_correctness(inputs=get_3d_input1(),
                            grid=test_grid,
                            interpolation='idw',
                            boundary='replicate',
                            expected_value=expected)
Exemple #5
0
    def layer_op(self, input_tensor):
        sz = input_tensor.get_shape().as_list()
        grid_warper = AffineGridWarperLayer(sz[1:-1], sz[1:-1])

        resampler = ResamplerLayer(interpolation=self.interpolation,
                                   boundary=self.boundary)
        relative_transform = self.transform_func(sz[0])
        to_relative = tf.tile(
            [[[2. / (sz[1] - 1), 0., 0., -1.], [0., 2. / (sz[2] - 1), 0., -1.],
              [0., 0., 2. / (sz[3] - 1), -1.], [0., 0., 0., 1.]]],
            [sz[0], 1, 1])
        from_relative = tf.matrix_inverse(to_relative)
        voxel_transform = tf.matmul(from_relative,
                                    tf.matmul(relative_transform, to_relative))
        warp_parameters = tf.reshape(voxel_transform[:, 0:3, 0:4], [sz[0], 12])
        grid = grid_warper(warp_parameters)
        return resampler(input_tensor, grid)
Exemple #6
0
    def test_simple_inverse(self):
        expected_grid = np.array([[[[0.16, -0.44],
                                    [-0.64, 0.76],
                                    [-1.44, 1.96]],
                                   [[1.36, -1.24],
                                    [0.56, -0.04],
                                    [-0.24, 1.16]],
                                   [[2.56, -2.04],
                                    [1.76, -0.84],
                                    [0.96, 0.36]]]], dtype=np.float32)

        inverse_grid = AffineGridWarperLayer(source_shape=(3, 3),
                                             output_shape=(2, 2)).inverse_op()
        aff = tf.constant([[1.5, 1.0, 0.2, 1.0, 1.5, 0.5]])
        output = inverse_grid(aff)
        with self.test_session() as sess:
            out_val = sess.run(output)
        self.assertAllClose(out_val, expected_grid)
    def layer_op(self, input_tensor):
        input_shape = input_tensor.shape.as_list()
        batch_size = input_shape[0]
        spatial_shape = input_shape[1:-1]
        spatial_rank = infer_spatial_rank(input_tensor)

        if self._transform is None:
            relative_transform = self._random_transform(
                batch_size, spatial_rank)
            self._transform = relative_transform
        else:
            relative_transform = self._transform

        grid_warper = AffineGridWarperLayer(spatial_shape, spatial_shape)
        resampler = ResamplerLayer(interpolation=self.interpolation,
                                   boundary=self.boundary)
        warp_parameters = tf.reshape(relative_transform[:, :spatial_rank, :],
                                     [batch_size, -1])
        grid = grid_warper(warp_parameters)
        resampled = resampler(input_tensor, grid)
        return resampled
    def _test_grads_images(self,
                           interpolation='linear',
                           boundary='replicate',
                           ndim=2):
        if ndim == 2:
            test_image, input_shape = get_multiple_2d_images()
            test_target, target_shape = get_multiple_2d_targets()
            identity_affine = [[1., 0., 0., 0., 1., 0.]] * 4
        else:
            test_image, input_shape = get_multiple_3d_images()
            test_target, target_shape = get_multiple_3d_targets()
            identity_affine = [[
                1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 1., 0.
            ]] * 4
        affine_var = tf.get_variable('affine', initializer=identity_affine)
        grid = AffineGridWarperLayer(source_shape=input_shape[1:-1],
                                     output_shape=target_shape[1:-1],
                                     constraints=None)
        warp_coords = grid(affine_var)
        resampler = ResamplerLayer(interpolation, boundary=boundary)
        new_image = resampler(tf.constant(test_image, dtype=tf.float32),
                              warp_coords)

        diff = tf.reduce_mean(
            tf.squared_difference(new_image,
                                  tf.constant(test_target, dtype=tf.float32)))
        optimiser = tf.train.AdagradOptimizer(0.01)
        grads = optimiser.compute_gradients(diff)
        opt = optimiser.apply_gradients(grads)
        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            init_val, affine_val = sess.run([diff, affine_var])
            for _ in range(5):
                _, diff_val, affine_val = sess.run([opt, diff, affine_var])
                print('{}, {}'.format(diff_val, affine_val[0]))
            self.assertGreater(init_val, diff_val)
Exemple #9
0
 def test_no_constraints(self):
     grid_warper = AffineGridWarperLayer(source_shape=(3, 3),
                                         output_shape=(2, ))
     self.assertEqual(grid_warper.constraints.constraints,
                      ((None, None, None), (None, None, None)))
Exemple #10
0
 def _test_correctness(self, args, aff, expected_value):
     grid_warper = AffineGridWarperLayer(**args)
     computed_grid = grid_warper(aff)
     with self.cached_session() as sess:
         output_val = sess.run(computed_grid)
         self.assertAllClose(expected_value, output_val)
    def layer_op(self):
        image_id, fixed_inputs, moving_inputs, fixed_shape, moving_shape = \
            self.iterator.get_next()
        # TODO preprocessing layer modifying
        #      image shapes will not be supported
        # assuming the same shape across modalities, using the first
        image_id.set_shape((self.batch_size,))
        image_id = tf.to_float(image_id)

        fixed_inputs.set_shape(
            (self.batch_size,) + (None,) * self.spatial_rank + (2,))
        # last dim is 1 image + 1 label
        moving_inputs.set_shape(
            (self.batch_size,) + self.moving_image_shape + (2,))
        fixed_shape.set_shape((self.batch_size, self.spatial_rank + 1))
        moving_shape.set_shape((self.batch_size, self.spatial_rank + 1))

        # resizing the moving_inputs to match the target
        # assumes the same shape across the batch
        target_spatial_shape = \
            tf.unstack(fixed_shape[0], axis=0)[:self.spatial_rank]
        moving_inputs = Resize(new_size=target_spatial_shape)(moving_inputs)
        combined_volume = tf.concat([fixed_inputs, moving_inputs], axis=-1)

        # TODO affine data augmentation here
        if self.spatial_rank == 3:

            window_channels = np.prod(self.window_size[self.spatial_rank:]) * 4
            # TODO if no affine augmentation:
            img_spatial_shape = target_spatial_shape
            win_spatial_shape = [tf.constant(dim) for dim in
                                 self.window_size[:self.spatial_rank]]

            # scale the image to new space
            batch_scale = [
                tf.reshape(tf.to_float(img) / tf.to_float(win), (1,1))
                for (win, img) in zip(win_spatial_shape, img_spatial_shape)]
            batch_scale = tf.concat(batch_scale, axis=1)
            affine_constraints = ((None, 0.0, 0.0, 0.0),
                                  (0.0, None, 0.0, 0.0),
                                  (0.0, 0.0, None, 0.0))
            computed_grid = AffineGridWarperLayer(
                source_shape=(None, None, None),
                output_shape=self.window_size[:self.spatial_rank],
                constraints=affine_constraints)(batch_scale)
            computed_grid.set_shape((1,) +
                                    self.window_size[:self.spatial_rank] +
                                    (self.spatial_rank,))
            resampler = ResamplerLayer(
                interpolation='linear', boundary='replicate')
            windows = resampler(combined_volume, computed_grid)
            out_shape = [self.batch_size] + \
                        list(self.window_size[:self.spatial_rank]) + \
                        [window_channels]
            windows.set_shape(out_shape)

            image_id = tf.reshape(image_id, (self.batch_size, 1))
            start_location = tf.zeros((self.batch_size, self.spatial_rank))
            end_location = tf.constant(self.window_size[:self.spatial_rank])
            end_location = tf.reshape(end_location, (1, self.spatial_rank))
            end_location = tf.to_float(tf.tile(
                end_location, [self.batch_size, 1]))
            locations = tf.concat([
                image_id, start_location, end_location], axis=1)
        return windows, locations
    def layer_op(self):
        """
        This function concatenate image and label volumes at the last dim
        and randomly cropping the volumes (also the cropping margins)
        """
        image_id, fixed_inputs, moving_inputs, fixed_shape, moving_shape = \
            self.iterator.get_next()
        # TODO preprocessing layer modifying
        #      image shapes will not be supported
        # assuming the same shape across modalities, using the first
        image_id.set_shape((self.batch_size, ))
        image_id = tf.to_float(image_id)

        fixed_inputs.set_shape((self.batch_size, ) +
                               (None, ) * self.spatial_rank + (2, ))
        # last dim is 1 image + 1 label
        moving_inputs.set_shape((self.batch_size, ) + self.moving_image_shape +
                                (2, ))
        fixed_shape.set_shape((self.batch_size, self.spatial_rank + 1))
        moving_shape.set_shape((self.batch_size, self.spatial_rank + 1))

        # resizing the moving_inputs to match the target
        # assumes the same shape across the batch
        target_spatial_shape = \
            tf.unstack(fixed_shape[0], axis=0)[:self.spatial_rank]
        moving_inputs = Resize(new_size=target_spatial_shape)(moving_inputs)
        combined_volume = tf.concat([fixed_inputs, moving_inputs], axis=-1)

        # smoothing_layer = Smoothing(
        #     sigma=1, truncate=3.0, type_str='gaussian')
        # combined_volume = tf.unstack(combined_volume, axis=-1)
        # combined_volume[0] = tf.expand_dims(combined_volume[0], axis=-1)
        # combined_volume[1] = smoothing_layer(
        #     tf.expand_dims(combined_volume[1]), axis=-1)
        # combined_volume[2] = tf.expand_dims(combined_volume[2], axis=-1)
        # combined_volume[3] = smoothing_layer(
        #     tf.expand_dims(combined_volume[3]), axis=-1)
        # combined_volume = tf.stack(combined_volume, axis=-1)

        # TODO affine data augmentation here
        if self.spatial_rank == 3:

            window_channels = np.prod(self.window_size[self.spatial_rank:]) * 4
            # TODO if no affine augmentation:
            img_spatial_shape = target_spatial_shape
            win_spatial_shape = [
                tf.constant(dim)
                for dim in self.window_size[:self.spatial_rank]
            ]
            # when img==win make sure shift => 0.0
            # otherwise interpolation is out of bound
            batch_shift = [
                tf.random_uniform(shape=(self.batch_size, 1),
                                  minval=0,
                                  maxval=tf.maximum(tf.to_float(img - win - 1),
                                                    0.01))
                for (win, img) in zip(win_spatial_shape, img_spatial_shape)
            ]
            batch_shift = tf.concat(batch_shift, axis=1)
            affine_constraints = ((1.0, 0.0, 0.0, None), (0.0, 1.0, 0.0, None),
                                  (0.0, 0.0, 1.0, None))
            computed_grid = AffineGridWarperLayer(
                source_shape=(None, None, None),
                output_shape=self.window_size[:self.spatial_rank],
                constraints=affine_constraints)(batch_shift)
            computed_grid.set_shape((self.batch_size, ) +
                                    self.window_size[:self.spatial_rank] +
                                    (self.spatial_rank, ))
            resampler = ResamplerLayer(interpolation='linear',
                                       boundary='replicate')
            windows = resampler(combined_volume, computed_grid)
            out_shape = [self.batch_size] + \
                        list(self.window_size[:self.spatial_rank]) + \
                        [window_channels]
            windows.set_shape(out_shape)

            image_id = tf.reshape(image_id, (self.batch_size, 1))
            start_location = tf.zeros((self.batch_size, self.spatial_rank))
            locations = tf.concat([image_id, start_location, batch_shift],
                                  axis=1)
        return windows, locations
    def layer_op(self):
        """
        This function concatenate image and label volumes at the last dim
        and randomly cropping the volumes (also the cropping margins)
        """
        image_id, fixed_inputs, moving_inputs, fixed_shape, moving_shape = \
            self.iterator.get_next()
        # TODO preprocessing layer modifying
        #      image shapes will not be supported
        # assuming the same shape across modalities, using the first
        image_id.set_shape((self.batch_size,))
        image_id = tf.to_float(image_id)

        fixed_inputs.set_shape(
            (self.batch_size,) + (None,) * self.spatial_rank + (2,))
        # last dim is 1 image + 1 label
        moving_inputs.set_shape(
            (self.batch_size,) + self.moving_image_shape + (2,))
        fixed_shape.set_shape((self.batch_size, self.spatial_rank + 1))
        moving_shape.set_shape((self.batch_size, self.spatial_rank + 1))

        # resizing the moving_inputs to match the target
        # assumes the same shape across the batch
        target_spatial_shape = \
            tf.unstack(fixed_shape[0], axis=0)[:self.spatial_rank]
        moving_inputs = Resize(new_size=target_spatial_shape)(moving_inputs)
        combined_volume = tf.concat([fixed_inputs, moving_inputs], axis=-1)

        # smoothing_layer = Smoothing(
        #     sigma=1, truncate=3.0, type_str='gaussian')
        # combined_volume = tf.unstack(combined_volume, axis=-1)
        # combined_volume[0] = tf.expand_dims(combined_volume[0], axis=-1)
        # combined_volume[1] = smoothing_layer(
        #     tf.expand_dims(combined_volume[1]), axis=-1)
        # combined_volume[2] = tf.expand_dims(combined_volume[2], axis=-1)
        # combined_volume[3] = smoothing_layer(
        #     tf.expand_dims(combined_volume[3]), axis=-1)
        # combined_volume = tf.stack(combined_volume, axis=-1)

        # TODO affine data augmentation here
        if self.spatial_rank == 3:

            window_channels = np.prod(self.window_size[self.spatial_rank:]) * 4
            # TODO if no affine augmentation:
            img_spatial_shape = target_spatial_shape
            win_spatial_shape = [tf.constant(dim) for dim in
                                 self.window_size[:self.spatial_rank]]
            # when img==win make sure shift => 0.0
            # otherwise interpolation is out of bound
            batch_shift = [
                tf.random_uniform(
                    shape=(self.batch_size, 1),
                    minval=0,
                    maxval=tf.maximum(tf.to_float(img - win - 1), 0.01))
                for (win, img) in zip(win_spatial_shape, img_spatial_shape)]
            batch_shift = tf.concat(batch_shift, axis=1)
            affine_constraints = ((1.0, 0.0, 0.0, None),
                                  (0.0, 1.0, 0.0, None),
                                  (0.0, 0.0, 1.0, None))
            computed_grid = AffineGridWarperLayer(
                source_shape=(None, None, None),
                output_shape=self.window_size[:self.spatial_rank],
                constraints=affine_constraints)(batch_shift)
            computed_grid.set_shape((self.batch_size,) +
                                    self.window_size[:self.spatial_rank] +
                                    (self.spatial_rank,))
            resampler = ResamplerLayer(
                interpolation='linear', boundary='replicate')
            windows = resampler(combined_volume, computed_grid)
            out_shape = [self.batch_size] + \
                        list(self.window_size[:self.spatial_rank]) + \
                        [window_channels]
            windows.set_shape(out_shape)

            image_id = tf.reshape(image_id, (self.batch_size, 1))
            start_location = tf.zeros((self.batch_size, self.spatial_rank))
            locations = tf.concat([
                image_id, start_location, batch_shift], axis=1)
        return windows, locations