def test_static_parameterizer(self):
     shape = (1, 2, 3, 4)
     var = self._test_parameterizer(
         parameterizers.StaticParameterizer(tf.initializers.zeros()),
         tf.initializers.random_uniform(), shape)
     self.assertEqual(var.shape, shape)
     self.assertAllClose(var, np.zeros(shape), rtol=0, atol=1e-7)
Esempio n. 2
0
    def run_same(self, batch, input_support, channels, filters, kernel_support,
                 corr, strides_down, strides_up, padding, extra_pad_end,
                 channel_separable, data_format, activation, use_bias):
        assert channels == filters == 1

        # Create input array.
        input_shape = (batch, 1) + input_support
        inputs = np.arange(np.prod(input_shape))
        inputs = inputs.reshape(input_shape).astype(np.float32)
        if data_format != "channels_first":
            tf_inputs = tf.constant(np.moveaxis(inputs, 1, -1))
        else:
            tf_inputs = tf.constant(inputs)

        # Create kernel array. This is an identity kernel, so the outputs should
        # be equal to the inputs except for up- and downsampling.
        tf_kernel = parameterizers.StaticParameterizer(
            initializers.IdentityInitializer())

        # Run SignalConv* layer.
        layer_class = {
            3: signal_conv.SignalConv1D,
            4: signal_conv.SignalConv2D,
            5: signal_conv.SignalConv3D,
        }[inputs.ndim]
        layer = layer_class(1,
                            kernel_support,
                            corr=corr,
                            strides_down=strides_down,
                            strides_up=strides_up,
                            padding=padding,
                            extra_pad_end=extra_pad_end,
                            channel_separable=channel_separable,
                            data_format=data_format,
                            activation=activation,
                            use_bias=use_bias,
                            kernel_parameterizer=tf_kernel)
        tf_outputs = layer(tf_inputs)
        with self.cached_session() as sess:
            sess.run(tf.global_variables_initializer())
            outputs = sess.run(tf_outputs)

        # Check that SignalConv* computes the correct output size.
        predicted_shape = layer.compute_output_shape(tf_inputs.shape)
        self.assertEqual(outputs.shape, tuple(predicted_shape.as_list()))

        # If not using channels_first, convert back to it to compare to input.
        if data_format != "channels_first":
            outputs = np.moveaxis(outputs, -1, 1)

        # Upsample and then downsample inputs.
        expected = inputs
        if not all(s == 1 for s in strides_up):
            expected = self.numpy_upsample(expected, strides_up, extra_pad_end)
        slices = (slice(None), slice(None))
        slices += tuple(slice(None, None, s) for s in strides_down)
        expected = expected[slices]

        self.assertAllClose(expected, outputs, rtol=0, atol=1e-3)
Esempio n. 3
0
    def run_valid(self, batch, input_support, channels, filters,
                  kernel_support, corr, strides_down, strides_up, padding,
                  extra_pad_end, channel_separable, data_format, activation,
                  use_bias):
        assert padding == "valid"

        # Create input array.
        inputs = np.random.randint(32, size=(batch, channels) + input_support)
        inputs = inputs.astype(np.float32)
        if data_format != "channels_first":
            tf_inputs = tf.constant(np.moveaxis(inputs, 1, -1))
        else:
            tf_inputs = tf.constant(inputs)

        # Create kernel array.
        kernel = np.random.randint(16,
                                   size=kernel_support + (channels, filters))
        kernel = kernel.astype(np.float32)
        tf_kernel = parameterizers.StaticParameterizer(
            tf.constant_initializer(kernel))

        # Run SignalConv* layer.
        layer_class = {
            3: signal_conv.SignalConv1D,
            4: signal_conv.SignalConv2D,
            5: signal_conv.SignalConv3D,
        }[inputs.ndim]
        layer = layer_class(filters,
                            kernel_support,
                            corr=corr,
                            strides_down=strides_down,
                            strides_up=strides_up,
                            padding="valid",
                            extra_pad_end=extra_pad_end,
                            channel_separable=channel_separable,
                            data_format=data_format,
                            activation=activation,
                            use_bias=use_bias,
                            kernel_parameterizer=tf_kernel)
        tf_outputs = layer(tf_inputs)
        with self.cached_session() as sess:
            sess.run(tf.global_variables_initializer())
            outputs = sess.run(tf_outputs)

        # Check that SignalConv* computes the correct output size.
        predicted_shape = layer.compute_output_shape(tf_inputs.shape)
        self.assertEqual(outputs.shape, tuple(predicted_shape.as_list()))

        # If not using channels_first, convert back to it to compare to SciPy.
        if data_format != "channels_first":
            outputs = np.moveaxis(outputs, -1, 1)

        # Compute the equivalent result using SciPy and compare.
        expected = self.scipy_convolve_valid(corr, inputs, kernel,
                                             strides_down, strides_up,
                                             extra_pad_end, channel_separable)
        self.assertAllClose(expected, outputs, rtol=0, atol=1e-3)