def testCopyExtraArgs(self): # Note: we cannot easily test all bijectors since each requires # different initialization arguments. We therefore spot test a few. sigmoid = tfb.Sigmoid(low=-1., high=2., validate_args=True) self.assertEqual(sigmoid.parameters, sigmoid.copy().parameters) chain = tfb.Chain( [ tfb.Softplus(hinge_softness=[1., 2.], validate_args=True), tfb.MatrixInverseTriL(validate_args=True) ], validate_args=True) self.assertEqual(chain.parameters, chain.copy().parameters)
def testErrorOnInputSingular(self): inv = tfb.MatrixInverseTriL(validate_args=True) x_ = np.array([[1., 0.], [0., 0.]], dtype=np.float32) nonsingular_error_msg = "must have all diagonal entries nonzero" with self.assertRaisesOpError(nonsingular_error_msg): self.evaluate(inv.forward(x_)) with self.assertRaisesOpError(nonsingular_error_msg): self.evaluate(inv.inverse(x_)) with self.assertRaisesOpError(nonsingular_error_msg): self.evaluate(inv.forward_log_det_jacobian(x_, event_ndims=2)) with self.assertRaisesOpError(nonsingular_error_msg): self.evaluate(inv.inverse_log_det_jacobian(x_, event_ndims=2))
def testErrorOnInputRankTooLow(self): inv = tfb.MatrixInverseTriL(validate_args=True) x_ = np.array([0.1], dtype=np.float32) rank_error_msg = "must have rank at least 2" with self.assertRaisesWithPredicateMatch(ValueError, rank_error_msg): self.evaluate(inv.forward(x_)) with self.assertRaisesWithPredicateMatch(ValueError, rank_error_msg): self.evaluate(inv.inverse(x_)) with self.assertRaisesWithPredicateMatch(ValueError, rank_error_msg): self.evaluate(inv.forward_log_det_jacobian(x_, event_ndims=2)) with self.assertRaisesWithPredicateMatch(ValueError, rank_error_msg): self.evaluate(inv.inverse_log_det_jacobian(x_, event_ndims=2))
def testZeroByZeroMatrix(self): inv = tfb.MatrixInverseTriL(validate_args=True) x_ = np.eye(0, dtype=np.float32) x_inv_ = np.eye(0, dtype=np.float32) y = inv.forward(x_) x_back = inv.inverse(x_inv_) y_, x_back_ = self.evaluate([y, x_back]) self.assertAllClose(x_inv_, y_, atol=0., rtol=1e-5) self.assertAllClose(x_, x_back_, atol=0., rtol=1e-5)
def testOneByOneMatrix(self): inv = tfb.MatrixInverseTriL(validate_args=True) x_ = np.array([[5.]], dtype=np.float32) x_inv_ = np.array([[0.2]], dtype=np.float32) y = inv.forward(x_) x_back = inv.inverse(x_inv_) y_, x_back_ = self.evaluate([y, x_back]) self.assertAllClose(x_inv_, y_, atol=0., rtol=1e-5) self.assertAllClose(x_, x_back_, atol=0., rtol=1e-5)
def testErrorOnInputNotLowerTriangular(self): inv = tfb.MatrixInverseTriL(validate_args=True) x_ = np.array([[1., 2.], [3., 4.]], dtype=np.float32) triangular_error_msg = 'must be lower triangular' with self.assertRaisesOpError(triangular_error_msg): self.evaluate(inv.forward(x_)) with self.assertRaisesOpError(triangular_error_msg): self.evaluate(inv.inverse(x_)) with self.assertRaisesOpError(triangular_error_msg): self.evaluate(inv.forward_log_det_jacobian(x_, event_ndims=2)) with self.assertRaisesOpError(triangular_error_msg): self.evaluate(inv.inverse_log_det_jacobian(x_, event_ndims=2))
def testComputesCorrectValues(self): inv = tfb.MatrixInverseTriL(validate_args=True) self.assertStartsWith(inv.name, 'matrix_inverse_tril') x_ = np.array([[0.7, 0., 0.], [0.1, -1., 0.], [0.3, 0.25, 0.5]], dtype=np.float32) x_inv_ = np.linalg.inv(x_) y = inv.forward(x_) x_back = inv.inverse(x_inv_) y_, x_back_ = self.evaluate([y, x_back]) self.assertAllClose(x_inv_, y_, atol=0., rtol=1e-5) self.assertAllClose(x_, x_back_, atol=0., rtol=1e-5)
def testJacobian(self): bijector = tfb.MatrixInverseTriL() batch_size = 5 for ndims in range(2, 5): x_ = np.tril( np.random.uniform( -1., 1., size=[batch_size, ndims, ndims]).astype(np.float64)) fldj = bijector.forward_log_det_jacobian(x_, event_ndims=2) fldj_theoretical = bijector_test_util.get_fldj_theoretical( bijector, x_, event_ndims=2, input_to_unconstrained=tfb.Invert(tfb.FillTriangular()), output_to_unconstrained=tfb.Invert(tfb.FillTriangular())) fldj_, fldj_theoretical_ = self.evaluate([fldj, fldj_theoretical]) self.assertAllClose(fldj_, fldj_theoretical_)
def testBatch(self): # Test batch computation with input shape (2, 1, 2, 2), i.e. batch shape # (2, 1). inv = tfb.MatrixInverseTriL(validate_args=True) x_ = np.array([[[[1., 0.], [2., 3.]]], [[[4., 0.], [5., -6.]]]], dtype=np.float32) x_inv_ = self._inv(x_) y = inv.forward(x_) x_back = inv.inverse(x_inv_) y_, x_back_ = self.evaluate([y, x_back]) self.assertAllClose(x_inv_, y_, atol=0., rtol=1e-5) self.assertAllClose(x_, x_back_, atol=0., rtol=1e-5)
def testZeroByZeroMatrix(self): inv = tfb.MatrixInverseTriL(validate_args=True) x_ = np.eye(0, dtype=np.float32) x_inv_ = np.eye(0, dtype=np.float32) expected_fldj_ = 0. y = inv.forward(x_) x_back = inv.inverse(x_inv_) fldj = inv.forward_log_det_jacobian(x_, event_ndims=2) ildj = inv.inverse_log_det_jacobian(x_inv_, event_ndims=2) y_, x_back_, fldj_, ildj_ = self.evaluate([y, x_back, fldj, ildj]) self.assertAllClose(x_inv_, y_, atol=0., rtol=1e-5) self.assertAllClose(x_, x_back_, atol=0., rtol=1e-5) self.assertNear(expected_fldj_, fldj_, err=1e-3) self.assertNear(-expected_fldj_, ildj_, err=1e-3)
def testErrorOnInputNotLowerTriangular(self): inv = tfb.MatrixInverseTriL(validate_args=True) x_ = np.array([[1., 2.], [3., 4.]], dtype=np.float32) triangular_error_msg = "must be lower triangular" with self.test_session(): with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, triangular_error_msg): inv.forward(x_).eval() with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, triangular_error_msg): inv.inverse(x_).eval() with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, triangular_error_msg): inv.forward_log_det_jacobian(x_, event_ndims=2).eval() with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, triangular_error_msg): inv.inverse_log_det_jacobian(x_, event_ndims=2).eval()
def testComputesCorrectValues(self): inv = tfb.MatrixInverseTriL(validate_args=True) self.assertEqual("matrix_inverse_tril", inv.name) x_ = np.array([[0.7, 0., 0.], [0.1, -1., 0.], [0.3, 0.25, 0.5]], dtype=np.float32) x_inv_ = np.linalg.inv(x_) expected_fldj_ = -6. * np.sum(np.log(np.abs(np.diag(x_)))) y = inv.forward(x_) x_back = inv.inverse(x_inv_) fldj = inv.forward_log_det_jacobian(x_, event_ndims=2) ildj = inv.inverse_log_det_jacobian(x_inv_, event_ndims=2) y_, x_back_, fldj_, ildj_ = self.evaluate([y, x_back, fldj, ildj]) self.assertAllClose(x_inv_, y_, atol=0., rtol=1e-5) self.assertAllClose(x_, x_back_, atol=0., rtol=1e-5) self.assertNear(expected_fldj_, fldj_, err=1e-3) self.assertNear(-expected_fldj_, ildj_, err=1e-3)
def testBatch(self): # Test batch computation with input shape (2, 1, 2, 2), i.e. batch shape # (2, 1). inv = tfb.MatrixInverseTriL(validate_args=True) x_ = np.array([[[[1., 0.], [2., 3.]]], [[[4., 0.], [5., -6.]]]], dtype=np.float32) x_inv_ = self._inv(x_) expected_fldj_ = -4. * np.sum( np.log(np.abs(np.diagonal(x_, axis1=-2, axis2=-1))), axis=-1) y = inv.forward(x_) x_back = inv.inverse(x_inv_) fldj = inv.forward_log_det_jacobian(x_, event_ndims=2) ildj = inv.inverse_log_det_jacobian(x_inv_, event_ndims=2) y_, x_back_, fldj_, ildj_ = self.evaluate([y, x_back, fldj, ildj]) self.assertAllClose(x_inv_, y_, atol=0., rtol=1e-5) self.assertAllClose(x_, x_back_, atol=0., rtol=1e-5) self.assertAllClose(expected_fldj_, fldj_, atol=0., rtol=1e-3) self.assertAllClose(-expected_fldj_, ildj_, atol=0., rtol=1e-3)