def testTheoreticalFldj(self): width = 4 bijector = tfp.experimental.bijectors.build_trainable_highway_flow( width, activation_fn=tf.nn.softplus, gate_first_n=2, seed=test_util.test_seed()) self.evaluate([v.initializer for v in bijector.trainable_variables]) x = self.evaluate( samplers.uniform( [width], minval=-1., maxval=1., seed=test_util.test_seed(sampler_type='stateless'))) y = self.evaluate(bijector.forward(x)) bijector_test_util.assert_bijective_and_finite(bijector, x, y, eval_func=self.evaluate, event_ndims=1, inverse_event_ndims=1, rtol=1e-5) fldj = bijector.forward_log_det_jacobian(x, event_ndims=1) # The jacobian is not yet broadcast, since it is constant. fldj_theoretical = bijector_test_util.get_fldj_theoretical( bijector, x, event_ndims=1) self.assertAllClose(self.evaluate(fldj_theoretical), self.evaluate(fldj), atol=1e-5, rtol=1e-5)
def testBijectiveAndFinite32bit(self): bijector = tfb.Softplus() x = np.linspace(-20., 20., 100).astype(np.float32) y = np.logspace(-10, 10, 100).astype(np.float32) bijector_test_util.assert_bijective_and_finite( bijector, x, y, eval_func=self.evaluate, event_ndims=0, rtol=1e-2, atol=1e-2)
def testBijectiveAndFiniteSkewnessNeg1Tailweight0p5(self): bijector = tfb.SinhArcsinh( skewness=-1., tailweight=0.5, validate_args=True) x = np.concatenate((-np.logspace(-2, 10, 1000), [0], np.logspace( -2, 10, 1000))).astype(np.float32) bijector_test_util.assert_bijective_and_finite( bijector, x, x, eval_func=self.evaluate, event_ndims=0, rtol=1e-3)
def testBijectiveAndFiniteWithNegativeHingeSoftness32Bit(self): bijector = tfb.Softplus(hinge_softness=-0.7) x = np.linspace(-20., 20., 100).astype(np.float32) y = -np.logspace(-10, 10, 100).astype(np.float32) bijector_test_util.assert_bijective_and_finite( bijector, x, y, eval_func=self.evaluate, event_ndims=0, rtol=1e-2, atol=1e-2)
def testWithLKJSamples(self, dimension, concentration): bijector = tfb.CorrelationCholesky() lkj_dist = lkj.LKJ(dimension=dimension, concentration=np.float64(concentration), input_output_cholesky=True) batch_size = 10 y = self.evaluate(lkj_dist.sample([batch_size])) x = self.evaluate(bijector.inverse(y)) bijector_test_util.assert_bijective_and_finite(bijector, x, y, eval_func=self.evaluate, event_ndims=1, inverse_event_ndims=2, rtol=1e-5) fldj = bijector.forward_log_det_jacobian(x, event_ndims=1) fldj_theoretical = bijector_test_util.get_fldj_theoretical( bijector, x, event_ndims=1, inverse_event_ndims=2, output_to_unconstrained=tfb.Invert(tfb.FillTriangular())) self.assertAllClose(self.evaluate(fldj_theoretical), self.evaluate(fldj), atol=1e-5, rtol=1e-5)
def testTheoreticalFldj(self, data): dim = data.draw(hps.integers(min_value=0, max_value=10)) diag_bijector = data.draw( bijector_hps.unconstrained_bijectors( max_forward_event_ndims=1, must_preserve_event_ndims=True).filter( _preserves_vector_dim(dim))) logging.info('Using diagonal bijector %s %s', diag_bijector.name, diag_bijector) bijector = tfb.TransformDiagonal(diag_bijector=diag_bijector) ensure_nonzero_batch = lambda shape: [d if d > 0 else 1 for d in shape] shape = data.draw( tfp_hps.shapes().map(ensure_nonzero_batch)) + [dim, dim] x = np.random.randn(*shape).astype(np.float64) y = self.evaluate(bijector.forward(x)) bijector_test_util.assert_bijective_and_finite(bijector, x, y, eval_func=self.evaluate, event_ndims=2, inverse_event_ndims=2, rtol=1e-5) fldj = bijector.forward_log_det_jacobian(x, event_ndims=2) # For constant-jacobian bijectors, the zero fldj may not be broadcast. fldj = fldj + tf.zeros(tf.shape(x)[:-2], dtype=x.dtype) fldj_theoretical = bijector_test_util.get_fldj_theoretical( bijector, x, event_ndims=2, inverse_event_ndims=2) self.assertAllClose(self.evaluate(fldj_theoretical), self.evaluate(fldj), atol=1e-5, rtol=1e-5)
def testBijectiveAndFinite(self): bijector = tfb.Softsign(validate_args=True) x = np.linspace(-20., 20., 100).astype(np.float32) y = np.linspace(-0.99, 0.99, 100).astype(np.float32) bijector_test_util.assert_bijective_and_finite( bijector, x, y, eval_func=self.evaluate, event_ndims=0, rtol=1e-3, atol=1e-3)
def testBijectiveAndFinite(self): x = np.linspace(-5., 5., 100).astype(np.float32) eps = 1e-3 y = np.linspace(eps, 1. - eps, 100).astype(np.float32) bijector_test_util.assert_bijective_and_finite( tfb.Tanh(), x, y, eval_func=self.evaluate, event_ndims=0, atol=0., rtol=1e-4)
def testTheoreticalFldj(self): nbatch = 5 channels = 10 x = np.random.uniform(size=[nbatch, channels]).astype(np.float32) bijector = tfb.BatchNormalization(training=False) bijector.batchnorm.build(x.shape) self.evaluate([v.initializer for v in bijector.variables]) y = self.evaluate(bijector.forward(x)) bijector_test_util.assert_bijective_and_finite(bijector, x, y, eval_func=self.evaluate, event_ndims=1, inverse_event_ndims=1, rtol=1e-5) fldj = bijector.forward_log_det_jacobian(x, event_ndims=1) # The jacobian is not yet broadcast, since it is constant. fldj = fldj + tf.zeros(tf.shape(x)[:-1], dtype=x.dtype) fldj_theoretical = bijector_test_util.get_fldj_theoretical( bijector, x, event_ndims=1) self.assertAllClose(self.evaluate(fldj_theoretical), self.evaluate(fldj), atol=1e-5, rtol=1e-5)
def testTheoreticalFldj(self, data): # get_fldj_theoretical test rig requires 1-d batches. batch_shape = data.draw(tfp_hps.shapes(min_ndims=1, max_ndims=1)) bijector = data.draw( rq_splines(batch_shape=batch_shape, dtype=tf.float64)) self.assertEqual(tf.float64, bijector.dtype) bw, bh, kd = self.evaluate( [bijector.bin_widths, bijector.bin_heights, bijector.knot_slopes]) logging.info('bw: %s\nbh: %s\nkd: %s', bw, bh, kd) x_shp = ((bw + bh)[..., :-1] + kd).shape[:-1] if x_shp[-1] == 1: # Possibly broadcast the x dim. dim = data.draw(hps.integers(min_value=1, max_value=7)) x_shp = x_shp[:-1] + (dim, ) x = np.linspace(-5, 5, np.prod(x_shp), dtype=np.float64).reshape(*x_shp) y = self.evaluate(bijector.forward(x)) bijector_test_util.assert_bijective_and_finite(bijector, x, y, eval_func=self.evaluate, event_ndims=0, inverse_event_ndims=0, rtol=1e-5) fldj = bijector.forward_log_det_jacobian(x, event_ndims=0) fldj_theoretical = bijector_test_util.get_fldj_theoretical( bijector, x, event_ndims=0) self.assertAllClose(self.evaluate(fldj_theoretical), self.evaluate(fldj), atol=1e-5, rtol=1e-5)
def testTheoreticalFldj(self): raw_mat = tf.constant([[1., 2, 3], [4, 5, 6], [0.5, 0., 0.25]]) nbatch = 5 batch_mats = raw_mat * tf.range(1., nbatch + 1.)[:, tf.newaxis, tf.newaxis] lower_upper, permutation = tf.linalg.lu(tf.cast(batch_mats, tf.float64)) bijector = tfb.ScaleMatvecLU( lower_upper=lower_upper, permutation=permutation, validate_args=True) self.assertEqual(tf.float64, bijector.dtype) channels = tf.compat.dimension_value(lower_upper.shape[-1]) x = np.random.uniform(size=[2, 7, nbatch, channels]).astype(np.float64) y = self.evaluate(bijector.forward(x)) bijector_test_util.assert_bijective_and_finite( bijector, x, y, eval_func=self.evaluate, event_ndims=1, inverse_event_ndims=1, rtol=1e-5) fldj = bijector.forward_log_det_jacobian(x, event_ndims=1) # The jacobian is not yet broadcast, since it is constant. fldj = fldj + tf.zeros(tf.shape(x)[:-1], dtype=x.dtype) fldj_theoretical = bijector_test_util.get_fldj_theoretical( bijector, x, event_ndims=1) self.assertAllClose( self.evaluate(fldj_theoretical), self.evaluate(fldj), atol=1e-5, rtol=1e-5)
def testBijectiveAndFinite(self): x = np.linspace(-100., 100., 100).astype(np.float32) eps = 1e-3 y = np.linspace(eps, 1. - eps, 100).astype(np.float32) bijector_test_util.assert_bijective_and_finite( tfb.Sigmoid(), x, y, eval_func=self.evaluate, event_ndims=0, atol=0., rtol=1e-4)
def testTheoreticalFldjSimple(self): bijector = tfb.RationalQuadraticSpline( bin_widths=[1., 1], bin_heights=[np.sqrt(.5), 2 - np.sqrt(.5)], knot_slopes=1) self.assertEqual(tf.float64, bijector.dtype) dim = 5 x = np.linspace(-1.05, 1.05, num=2 * dim, dtype=np.float64).reshape(2, dim) y = self.evaluate(bijector.forward(x)) bijector_test_util.assert_bijective_and_finite( bijector, x, y, eval_func=self.evaluate, event_ndims=0, inverse_event_ndims=0, rtol=1e-5) fldj = bijector.forward_log_det_jacobian(x, event_ndims=0) fldj_theoretical = bijector_test_util.get_fldj_theoretical( bijector, x, event_ndims=0) self.assertAllClose( self.evaluate(fldj_theoretical), self.evaluate(fldj), atol=1e-5, rtol=1e-5)
def testBijectiveAndFinite(self): loc = np.array(-1., np.float64) bijector = tfb.FrechetCDF(loc=loc, scale=3.0, concentration=2., validate_args=True) x = np.linspace(loc+0.25, 10., num=10).astype(np.float64) y = np.linspace(0.01, 0.99, num=10).astype(np.float64) bijector_test_util.assert_bijective_and_finite( bijector, x, y, eval_func=self.evaluate, event_ndims=0, rtol=1e-3)
def testBijectiveAndFinite(self): permutation = np.int32([2, 0, 1]) x = np.random.randn(4, 2, 3) y = x[..., permutation] bijector = tfb.Permute(permutation=permutation, validate_args=True) bijector_test_util.assert_bijective_and_finite( bijector, x, y, eval_func=self.evaluate, event_ndims=1, rtol=1e-6, atol=0)
def testBijectiveAndFinite(self): bijector = tfb.Weibull(scale=20., concentration=2., validate_args=True) x = np.linspace(1., 8., num=10).astype(np.float32) y = np.linspace( -np.expm1(-1 / 400.), -np.expm1(-16), num=10).astype(np.float32) bijector_test_util.assert_bijective_and_finite( bijector, x, y, eval_func=self.evaluate, event_ndims=0, rtol=1e-3)
def testBijectiveAndFinite(self): x = np.random.randn(4, 2, 3) y = np.reshape(x, [4, 1, 2, 3]) bijector = tfb.Reshape( event_shape_in=[2, 3], event_shape_out=[1, 2, 3], validate_args=True) bijector_test_util.assert_bijective_and_finite( bijector, x, y, eval_func=self.evaluate, event_ndims=2, rtol=1e-6, atol=0)
def testBijectiveAndFinite(self): ascending = tfb.Ascending() x = (np.random.randn(3, 10)).astype(np.float32) y = np.sort(np.random.randn(3, 10), axis=-1).astype(np.float32) bijector_test_util.assert_bijective_and_finite(ascending, x, y, eval_func=self.evaluate, event_ndims=1)
def testBijectiveAndFinite(self, lower, upper): bijector = tfb.Reciprocal() x = np.linspace(lower, upper, num=100).astype(np.float32) y = np.linspace(lower, upper, num=100).astype(np.float32) bijector_test_util.assert_bijective_and_finite(bijector, x, y, eval_func=self.evaluate, event_ndims=0)
def testBijectiveAndFinite16bit(self): x = np.arange(100).astype(np.int32) y = np.logspace(-5, 4, 100).astype(np.float16) bijector = categorical_to_discrete.CategoricalToDiscrete(map_values=y) bijector_test_util.assert_bijective_and_finite(bijector, x, y, eval_func=self.evaluate, event_ndims=0)
def testBijectiveAndFinite(self): ordered = tfb.Ordered() x = np.sort(self._rng.randn(3, 10), axis=-1).astype(np.float32) y = (self._rng.randn(3, 10)).astype(np.float32) bijector_test_util.assert_bijective_and_finite(ordered, x, y, eval_func=self.evaluate, event_ndims=1)
def testBijectiveAndFinite(self): bijector = tfb.Cumsum(validate_args=True) x = np.linspace(-10, 10, num=10).astype(np.float32) y = np.cumsum(x, axis=-1) bijector_test_util.assert_bijective_and_finite(bijector, x, y, eval_func=self.evaluate, event_ndims=1)
def testBijectiveAndFiniteLowTemperature(self): floor = tfb.Softfloor(self.dtype(1e-1)) x = np.sort(5 * self._rng.randn(3, 10), axis=-1).astype(self.dtype) y = 5 * self._rng.randn(3, 10).astype(self.dtype) bijector_test_util.assert_bijective_and_finite(floor, x, y, eval_func=self.evaluate, event_ndims=1)
def testBijectiveAndFinite(self): bijector = tfb.Exp() x = np.linspace(-10, 10, num=10).astype(np.float32) y = np.logspace(-10, 10, num=10).astype(np.float32) bijector_test_util.assert_bijective_and_finite(bijector, x, y, eval_func=self.evaluate, event_ndims=0)
def testBijectiveAndFinite(self): bijector = tfb.NormalCDF(validate_args=True) x = np.linspace(-10., 10., num=10).astype(np.float32) y = np.linspace(0.1, 0.9, num=10).astype(np.float32) bijector_test_util.assert_bijective_and_finite(bijector, x, y, eval_func=self.evaluate, event_ndims=0, rtol=1e-4)
def testBijectiveAndFinite(self): bijector = tfb.PowerTransform(power=0.2, validate_args=True) x = np.linspace(-4.999, 10, num=10).astype(np.float32) y = np.logspace(0.001, 10, num=10).astype(np.float32) bijector_test_util.assert_bijective_and_finite(bijector, x, y, eval_func=self.evaluate, event_ndims=0, rtol=1e-3)
def testBijectiveAndFinite(self): bijector = tfb.Gumbel(loc=0., scale=3.0, validate_args=True) x = np.linspace(-10., 10., num=10).astype(np.float32) y = np.linspace(0.01, 0.99, num=10).astype(np.float32) bijector_test_util.assert_bijective_and_finite(bijector, x, y, eval_func=self.evaluate, event_ndims=0, rtol=1e-3)
def testBijectiveAndFinite16bit(self): bijector = tfb.Softplus() # softplus(-20) is zero, so we can't use such a large range as in 32bit. x = np.linspace(-10., 20., 100).astype(np.float16) # Note that float16 is only in the open set (0, inf) for a smaller # logspace range. The actual range was (-7, 4), so use something smaller # for the test. y = np.logspace(-6, 3, 100).astype(np.float16) bijector_test_util.assert_bijective_and_finite( bijector, x, y, eval_func=self.evaluate, event_ndims=0, rtol=1e-1, atol=1e-3)
def testBijectorEndpoints(self): for dtype in (np.float32, np.float64): bijector = tfb.SinhArcsinh( skewness=dtype(0.), tailweight=dtype(1.), validate_args=True) bounds = np.array( [np.finfo(dtype).min, np.finfo(dtype).max], dtype=dtype) # Note that the above bijector is the identity bijector. Hence, the # log_det_jacobian will be 0. Because of this we use atol. bijector_test_util.assert_bijective_and_finite( bijector, bounds, bounds, eval_func=self.evaluate, event_ndims=0, atol=2e-6)
def testBijectiveAndFinite(self): x = np.linspace(-10., 10., num=10).astype(np.float32) y = np.linspace(0.01, 0.99, num=10).astype(np.float32) for dct_type in 2, 3: bijector_test_util.assert_bijective_and_finite( tfb.DiscreteCosineTransform(dct_type=dct_type, validate_args=True), x, y, eval_func=self.evaluate, event_ndims=1, rtol=1e-3)
def testBijectiveAndFinite(self): softmax = tfb.SoftmaxCentered() x = np.linspace(-50, 50, num=10).reshape(5, 2).astype(np.float32) # Make y values on the simplex with a wide range. y_0 = np.ones(5).astype(np.float32) y_1 = (1e-5 * rng.rand(5)).astype(np.float32) y_2 = (1e1 * rng.rand(5)).astype(np.float32) y = np.array([y_0, y_1, y_2]) y /= y.sum(axis=0) y = y.T # y.shape = [5, 3] bijector_test_util.assert_bijective_and_finite( softmax, x, y, eval_func=self.evaluate, event_ndims=1)
def testBijectiveAndFinite(self): bijector = tfb.GompertzCDF(concentration=1., rate=0.01, validate_args=True) x = np.logspace(-10, 2, num=10).astype(np.float32) y = np.linspace(0.01, 0.99, num=10).astype(np.float32) bijector_test_util.assert_bijective_and_finite(bijector, x, y, eval_func=self.evaluate, event_ndims=0, rtol=1e-3)
def testBijectiveAndFinite(self): concentration1 = 1.2 concentration0 = 2. bijector = tfb.Kumaraswamy( concentration1=concentration1, concentration0=concentration0, validate_args=True) # Omitting the endpoints 0 and 1, since idlj will be infinity at these # endpoints. y = np.linspace(.01, 0.99, num=10).astype(np.float32) x = 1 - (1 - y ** concentration1) ** concentration0 bijector_test_util.assert_bijective_and_finite( bijector, x, y, eval_func=self.evaluate, event_ndims=0, rtol=1e-3)
def testBijectiveAndFinite(self, lower, upper): bijector = tfb.Reciprocal() x = np.linspace(lower, upper, num=100).astype(np.float32) y = np.linspace(lower, upper, num=100).astype(np.float32) bijector_test_util.assert_bijective_and_finite( bijector, x, y, eval_func=self.evaluate, event_ndims=0)
def testBijectiveAndFinite(self): bijector = tfb.PowerTransform(power=0.2, validate_args=True) x = np.linspace(-4.999, 10, num=10).astype(np.float32) y = np.logspace(0.001, 10, num=10).astype(np.float32) bijector_test_util.assert_bijective_and_finite( bijector, x, y, eval_func=self.evaluate, event_ndims=0, rtol=1e-3)
def testBijectiveAndFinite(self): bijector = tfb.NormalCDF(validate_args=True) x = np.linspace(-10., 10., num=10).astype(np.float32) y = np.linspace(0.1, 0.9, num=10).astype(np.float32) bijector_test_util.assert_bijective_and_finite( bijector, x, y, eval_func=self.evaluate, event_ndims=0, rtol=1e-4)
def testBijectiveAndFinite(self): bijector = tfb.Exp() x = np.linspace(-10, 10, num=10).astype(np.float32) y = np.logspace(-10, 10, num=10).astype(np.float32) bijector_test_util.assert_bijective_and_finite( bijector, x, y, eval_func=self.evaluate, event_ndims=0)
def testBijectiveAndFinite(self): ordered = tfb.Ordered() x = np.sort(self._rng.randn(3, 10), axis=-1).astype(np.float32) y = (self._rng.randn(3, 10)).astype(np.float32) bijector_test_util.assert_bijective_and_finite( ordered, x, y, eval_func=self.evaluate, event_ndims=1)