コード例 #1
0
    def test_op_errors(self):
        batch_size = 10
        data_height = 9
        data_width = 7
        data_depth = 3
        data_channels = 5
        warp_width = 4
        warp_height = 8

        # Input data shape is not defined over a 2D grid, i.e. its shape is not like
        # (batch_size, data_height, data_width, data_channels).
        data_shape = (batch_size, data_height, data_width, data_depth,
                      data_channels)
        data = np.zeros(data_shape)
        warp_shape = (batch_size, warp_height, warp_width, 2)
        warp = np.zeros(warp_shape)

        # pylint: disable=bad-continuation
        with self.assertRaisesRegexp(
                tf.errors.UnimplementedError,
                "Only bilinear interpolation is currently supported."):
            # pylint: enable=bad-continuation
            self.evaluate(resampler_ops.resampler(data, warp))

        # Warp tensor must be at least a matrix, with shape [batch_size, 2].
        data_shape = (batch_size, data_height, data_width, data_channels)
        data = np.zeros(data_shape)
        warp_shape = (batch_size, )
        warp = np.zeros(warp_shape)

        with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
                                     "warp should be at least a matrix"):
            self.evaluate(resampler_ops.resampler(data, warp))

        # The batch size of the data and warp tensors must be the same.
        data_shape = (batch_size, data_height, data_width, data_channels)
        data = np.zeros(data_shape)
        warp_shape = (batch_size + 1, warp_height, warp_width, 2)
        warp = np.zeros(warp_shape)

        with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
                                     "Batch size of data and warp tensor"):
            self.evaluate(resampler_ops.resampler(data, warp))

        # The warp tensor must contain 2D coordinates, i.e. its shape last dimension
        # must be 2.
        data_shape = (batch_size, data_height, data_width, data_channels)
        data = np.zeros(data_shape)
        warp_shape = (batch_size, warp_height, warp_width, 3)
        warp = np.zeros(warp_shape)

        # pylint: disable=bad-continuation
        with self.assertRaisesRegexp(
                tf.errors.UnimplementedError,
                "Only bilinear interpolation is supported, warping"):
            # pylint: enable=bad-continuation
            self.evaluate(resampler_ops.resampler(data, warp))
コード例 #2
0
def test_op_forward_pass(dtype):
    np.random.seed(0)
    data_width = 7
    data_height = 9
    data_channels = 5
    warp_width = 4
    warp_height = 8
    batch_size = 10

    warp = _make_warp(batch_size, warp_height, warp_width, dtype)
    data_shape = (batch_size, data_height, data_width, data_channels)
    data = np.random.rand(*data_shape).astype(dtype)
    data_ph = tf.constant(data)
    warp_ph = tf.constant(warp)
    outputs = resampler_ops.resampler(data=data_ph, warp=warp_ph)
    assert outputs.shape == (10, warp_height, warp_width, data_channels)

    # Generate reference output via bilinear interpolation in numpy
    reference_output = np.zeros_like(outputs)
    for batch in range(batch_size):
        for c in range(data_channels):
            reference_output[batch, :, :, c] = _bilinearly_interpolate(
                data[batch, :, :, c], warp[batch, :, :, 0], warp[batch, :, :, 1]
            )

    test_utils.assert_allclose_according_to_type(
        outputs, reference_output, half_rtol=5e-3, half_atol=5e-3
    )
コード例 #3
0
    def _test_op_forward_pass(self, on_gpu, dtype):
        np.random.seed(0)
        data_width = 7
        data_height = 9
        data_channels = 5
        warp_width = 4
        warp_height = 8
        batch_size = 10

        warp = _make_warp(batch_size, warp_height, warp_width, dtype)
        data_shape = (batch_size, data_height, data_width, data_channels)
        data = np.random.rand(*data_shape).astype(dtype)
        use_gpu = on_gpu and tf.test.is_gpu_available()
        with test_utils.device(use_gpu):
            data_ph = tf.constant(data)
            warp_ph = tf.constant(warp)
            outputs = self.evaluate(resampler_ops.resampler(data=data_ph, warp=warp_ph))
            self.assertEqual(
                outputs.shape, (10, warp_height, warp_width, data_channels)
            )

        # Generate reference output via bilinear interpolation in numpy
        reference_output = np.zeros_like(outputs)
        for batch in range(batch_size):
            for c in range(data_channels):
                reference_output[batch, :, :, c] = _bilinearly_interpolate(
                    data[batch, :, :, c], warp[batch, :, :, 0], warp[batch, :, :, 1]
                )

        self.assertAllCloseAccordingToType(
            outputs, reference_output, half_rtol=5e-3, half_atol=5e-3
        )
コード例 #4
0
def test_op_empty_batch():
    np.random.seed(13)
    data_width = 5
    data_height = 4
    data_channels = 3
    warp_width = 2
    warp_height = 6
    batch_size = 0
    dtype = np.float32

    warp = _make_warp(batch_size, warp_height, warp_width, dtype)
    data_shape = (batch_size, data_height, data_width, data_channels)
    data = np.zeros(data_shape).astype(dtype)
    data_tensor = tf.constant(data)
    warp_tensor = tf.constant(warp)
    with tf.GradientTape() as tape:
        tape.watch(data_tensor)
        tape.watch(warp_tensor)
        outputs = resampler_ops.resampler(data=data_tensor, warp=warp_tensor)
    data_grad, warp_grad = tape.gradient(outputs, (data_tensor, warp_tensor))
    assert data_grad.shape == data.shape
    assert warp_grad.shape == warp.shape