def testMakeCovarianceUpdateOp(self):
        with tf_ops.Graph().as_default():
            # Construct all arguments such that convolution kernel is applied in
            # exactly one spatial location.
            inputs = np.random.randn(
                1,  # batch_size
                self.kernel_height,
                self.kernel_width,
                self.in_channels)  # in_channels
            outputs_grad = np.random.randn(
                1,  # batch_size
                1,  # output_height
                1,  # output_width
                self.out_channels)

            factor = ff.ConvDiagonalFactor(
                (constant_op.constant(inputs), ),
                ((constant_op.constant(outputs_grad), ), ),
                self.kernel_shape,
                strides=[1, 1, 1, 1],
                padding='VALID')
            factor.instantiate_cov_variables()

            # Completely forget initial value on first update.
            cov_update_op = factor.make_covariance_update_op(0.0)

            # Ensure new covariance value is same as outer-product of inputs/outputs
            # vectorized, squared.
            with self.test_session() as sess:
                sess.run(tf_variables.global_variables_initializer())
                cov = sess.run(cov_update_op)
                expected_cov = np.outer(inputs.flatten(),
                                        outputs_grad.flatten())**2
                self.assertAllClose(expected_cov, cov)
    def testHasBias(self):
        with tf_ops.Graph().as_default():
            inputs = random_ops.random_uniform(
                [self.batch_size, self.height, self.width, self.in_channels])
            outputs_grads = [
                random_ops.random_uniform([
                    self.batch_size, self.height // self.strides[1],
                    self.width // self.strides[2], self.out_channels
                ]) for _ in range(3)
            ]

            factor = ff.ConvDiagonalFactor((inputs, ), (outputs_grads, ),
                                           self.kernel_shape,
                                           self.strides,
                                           self.padding,
                                           data_format=self.data_format,
                                           has_bias=True)
            factor.instantiate_cov_variables()

            # Ensure shape accounts for bias.
            self.assertEqual([
                self.kernel_height * self.kernel_width * self.in_channels + 1,
                self.out_channels
            ],
                             factor.get_cov_var().shape.as_list())

            # Ensure update op doesn't crash.
            cov_update_op = factor.make_covariance_update_op(0.0)
            with self.test_session() as sess:
                sess.run(tf_variables.global_variables_initializer())
                sess.run(cov_update_op)
Ejemplo n.º 3
0
    def testInit(self):
        with tf_ops.Graph().as_default():
            inputs = random_ops.random_uniform(
                [self.batch_size, self.height, self.width, self.in_channels])
            outputs_grads = [
                random_ops.random_uniform([
                    self.batch_size, self.height // self.strides[1],
                    self.width // self.strides[2], self.out_channels
                ]) for _ in range(3)
            ]

            factor = ff.ConvDiagonalFactor(inputs,
                                           outputs_grads,
                                           self.kernel_shape,
                                           self.strides,
                                           self.padding,
                                           data_format=self.data_format)
            factor.instantiate_cov_variables()

            # Ensure covariance matrix's shape makes sense.
            self.assertEqual([
                self.kernel_height * self.kernel_width * self.in_channels,
                self.out_channels
            ],
                             factor.get_cov_var().shape.as_list())