Beispiel #1
0
    def _gradients(self, data_format, use_gpu=False):
        with test_utils.device(use_gpu):
            batch, channels, height, width = 2, 3, 5, 6
            input_a = np.random.randn(batch, channels, height, width).astype(np.float32)
            input_b = np.random.randn(batch, channels, height, width).astype(np.float32)

            kernel_size = 1
            max_displacement = 2
            stride_1 = 1
            stride_2 = 2
            pad = 4

            if data_format == "channels_last":
                input_a = tf.transpose(input_a, [0, 2, 3, 1])
                input_b = tf.transpose(input_b, [0, 2, 3, 1])

            input_a_op = tf.convert_to_tensor(input_a)
            input_b_op = tf.convert_to_tensor(input_b)

            def correlation_fn(input_a, input_b):
                return CorrelationCost(
                    kernel_size=kernel_size,
                    max_displacement=max_displacement,
                    stride_1=stride_1,
                    stride_2=stride_2,
                    pad=pad,
                    data_format=data_format,
                )([input_a, input_b])

            theoretical, numerical = tf.test.compute_gradient(
                correlation_fn, [input_a_op, input_b_op]
            )

            self.assertAllClose(theoretical[0], numerical[0], atol=1e-3)
    def _test_op_forward_pass(self, on_gpu, dtype):
        np.random.seed(0)
        data_width = 7
        data_height = 9
        data_channels = 5
        warp_width = 4
        warp_height = 8
        batch_size = 10

        warp = _make_warp(batch_size, warp_height, warp_width, dtype)
        data_shape = (batch_size, data_height, data_width, data_channels)
        data = np.random.rand(*data_shape).astype(dtype)
        use_gpu = on_gpu and tf.test.is_gpu_available()
        with test_utils.device(use_gpu):
            data_ph = tf.constant(data)
            warp_ph = tf.constant(warp)
            outputs = self.evaluate(resampler_ops.resampler(data=data_ph, warp=warp_ph))
            self.assertEqual(
                outputs.shape, (10, warp_height, warp_width, data_channels)
            )

        # Generate reference output via bilinear interpolation in numpy
        reference_output = np.zeros_like(outputs)
        for batch in range(batch_size):
            for c in range(data_channels):
                reference_output[batch, :, :, c] = _bilinearly_interpolate(
                    data[batch, :, :, c], warp[batch, :, :, 0], warp[batch, :, :, 1]
                )

        self.assertAllCloseAccordingToType(
            outputs, reference_output, half_rtol=5e-3, half_atol=5e-3
        )
    def _forward_simple(self, data_format, use_gpu=False):
        # We are just testing where the output has vanishing values.
        with test_utils.device(use_gpu):
            val_a, val_b = self._create_test_data(data_format)
            input_a = tf.constant(val_a, dtype=tf.float32)
            input_b = tf.constant(val_b, dtype=tf.float32)

            input_a_tensor = tf.convert_to_tensor(input_a, dtype=tf.float32)
            input_b_tensor = tf.convert_to_tensor(input_b, dtype=tf.float32)

            kernel_size = 1
            max_displacement = 2
            stride_1 = 1
            stride_2 = 2
            pad = 4

            actual = self._forward(
                input_a_tensor,
                input_b_tensor,
                kernel_size=kernel_size,
                max_displacement=max_displacement,
                stride_1=stride_1,
                stride_2=stride_2,
                pad=pad,
                data_format=data_format,
            )

            if data_format == "channels_last":
                # NHWC -> NCHW
                actual = tf.transpose(actual, [0, 3, 1, 2])

            # We can test fixed ids, as output is independent from data_format
            expected_ids = np.concatenate([np.zeros(464, ), np.ones(464, )])
            self.assertAllClose(
                tf.where(tf.equal(actual, 0))[:, 0], expected_ids)

            counts = [54, 52, 54, 50, 44, 50, 54, 52, 54]
            expected_ids = np.concatenate(
                [k * np.ones(v, ) for k, v in enumerate(counts)])
            expected_ids = np.concatenate([expected_ids, expected_ids])
            self.assertAllClose(
                tf.where(tf.equal(actual, 0))[:, 1], expected_ids)
            self.assertEqual(actual.shape, (2, 9, 7, 8))
Beispiel #4
0
    def _keras(self, data_format, use_gpu=False):
        # Unable to use `layer_test` as this layer has multiple inputs.
        with test_utils.device(use_gpu):
            val_a, val_b = _create_test_data(data_format)

            # yapf: disable
            input_a = tf.keras.Input(shape=val_a.shape[1:])
            input_b = tf.keras.Input(shape=val_b.shape[1:])

            layer = CorrelationCost(
                kernel_size=1,
                max_displacement=2,
                stride_1=1,
                stride_2=2,
                pad=4,
                data_format=data_format)

            expected_output_shape = tuple(
                layer.compute_output_shape([input_a.shape, input_b.shape]))
            # yapf: enable

            x = [input_a, input_b]
            y = layer(x)
            model = tf.keras.models.Model(x, y)
            actual_output = model([val_a, val_b])

            expected_output_type = "float32"
            if tf.keras.backend.dtype(y[0]) != expected_output_type:
                raise AssertionError(
                    "Inferred output type %s does not equal "
                    "expected output type %s"
                    % (tf.keras.backend.dtype(y[0]), expected_output_type)
                )

            if actual_output.shape[1:] != expected_output_shape[0][1:]:
                raise AssertionError(
                    "Expected shape %s does not equal output shape"
                    "%s" % (actual_output.shape, expected_output_shape[0])
                )
    def _test_op_backward_pass(self, on_gpu, dtype):
        np.random.seed(13)
        data_width = 5
        data_height = 4
        data_channels = 3
        warp_width = 2
        warp_height = 6
        batch_size = 3

        warp = _make_warp(batch_size, warp_height, warp_width, dtype)
        data_shape = (batch_size, data_height, data_width, data_channels)
        data = np.random.rand(*data_shape).astype(dtype)
        use_gpu = on_gpu and tf.test.is_gpu_available()
        with test_utils.device(use_gpu):
            data_tensor = tf.constant(data)
            warp_tensor = tf.constant(warp)
            theoretical, numerical = tf.test.compute_gradient(
                resampler_ops.resampler, [data_tensor, warp_tensor]
            )
            if not use_gpu:
                # On CPU we perform numerical differentiation at the best available
                # precision, and compare against that. This is necessary for test to
                # pass for float16.
                data_tensor_64 = tf.constant(data, dtype=tf.float64)
                warp_tensor_64 = tf.constant(warp, dtype=tf.float64)
                theoretical_64, numerical_64 = tf.test.compute_gradient(
                    resampler_ops.resampler, [data_tensor_64, warp_tensor_64]
                )

                for t, n in zip(theoretical, numerical_64):
                    self.assertAllCloseAccordingToType(
                        t, n, float_rtol=5e-5, float_atol=5e-5
                    )
            else:
                for t, n in zip(theoretical, numerical):
                    self.assertAllCloseAccordingToType(
                        t, n, float_rtol=5e-5, float_atol=5e-5
                    )