def testUniformUniformKLFinite(self): batch_size = 6 a_low = -1.0 * np.arange(1, batch_size + 1) a_high = np.array([1.0] * batch_size) b_low = -2.0 * np.arange(1, batch_size + 1) b_high = np.array([2.0] * batch_size) a = tfd.Uniform(low=a_low, high=a_high, validate_args=True) b = tfd.Uniform(low=b_low, high=b_high, validate_args=True) true_kl = np.log(b_high - b_low) - np.log(a_high - a_low) kl = tfd.kl_divergence(a, b) # This is essentially an approximated integral from the direct definition # of KL divergence. x = a.sample(int(1e4), seed=test_util.test_seed()) kl_sample = tf.reduce_mean(a.log_prob(x) - b.log_prob(x), axis=0) kl_, kl_sample_ = self.evaluate([kl, kl_sample]) self.assertAllClose(true_kl, kl_, atol=2e-15) self.assertAllClose(true_kl, kl_sample_, atol=0.0, rtol=1e-1) zero_kl = tfd.kl_divergence(a, a) true_zero_kl_, zero_kl_ = self.evaluate( [tf.zeros_like(true_kl), zero_kl]) self.assertAllEqual(true_zero_kl_, zero_kl_)
def testUniformAssertMaxGtMin(self): a_v = np.array([1.0, 1.0, 1.0], dtype=np.float32) b_v = np.array([1.0, 2.0, 3.0], dtype=np.float32) with self.assertRaisesOpError('not defined when `low` >= `high`'): uniform = tfd.Uniform(low=a_v, high=b_v, validate_args=True) self.evaluate(uniform.mean())
def _testUniformSampleMultiDimensional(self): # DISABLED: Please enable this test once b/issues/30149644 is resolved. batch_size = 2 a_v = [3.0, 22.0] b_v = [13.0, 35.0] a = tf.constant([a_v] * batch_size) b = tf.constant([b_v] * batch_size) uniform = tfd.Uniform(low=a, high=b, validate_args=True) n_v = 100000 n = tf.constant(n_v) samples = uniform.sample(n, seed=test_util.test_seed()) self.assertEqual(samples.shape, (n_v, batch_size, 2)) sample_values = self.evaluate(samples) self.assertFalse( np.any(sample_values[:, 0, 0] < a_v[0]) or np.any(sample_values[:, 0, 0] >= b_v[0])) self.assertFalse( np.any(sample_values[:, 0, 1] < a_v[1]) or np.any(sample_values[:, 0, 1] >= b_v[1])) self.assertAllClose(sample_values[:, 0, 0].mean(), (a_v[0] + b_v[0]) / 2, atol=1e-2) self.assertAllClose(sample_values[:, 0, 1].mean(), (a_v[1] + b_v[1]) / 2, atol=1e-2)
def make_dataset(self, n, d, link, offset=None, scale=1.): seed = tfd.SeedStream(seed=213356351, salt='tfp.glm.fisher_scoring_test') model_coefficients = tfd.Uniform(low=np.array(-0.5, self.dtype), high=np.array(0.5, self.dtype)).sample( d, seed=seed()) radius = np.sqrt(2.) model_coefficients *= radius / tf.linalg.norm( tensor=model_coefficients) model_matrix = tfd.Normal(loc=np.array(0, self.dtype), scale=np.array(1, self.dtype)).sample( [n, d], seed=seed()) scale = tf.convert_to_tensor(value=scale, dtype=self.dtype) linear_response = tf.tensordot(model_matrix, model_coefficients, axes=[[1], [0]]) if offset is not None: linear_response += offset if link == 'linear': response = tfd.Normal(loc=linear_response, scale=scale).sample(seed=seed()) elif link == 'probit': response = tf.cast( tfd.Normal(loc=linear_response, scale=scale).sample(seed=seed()) > 0, self.dtype) elif link == 'logit': response = tfd.Bernoulli(logits=linear_response).sample( seed=seed()) else: raise ValueError('unrecognized true link: {}'.format(link)) return model_matrix, response, model_coefficients, linear_response
def testUniformRange(self): a = 3.0 b = 10.0 uniform = tfd.Uniform(low=a, high=b, validate_args=True) self.assertAllClose(a, self.evaluate(uniform.low)) self.assertAllClose(b, self.evaluate(uniform.high)) self.assertAllClose(b - a, self.evaluate(uniform.range()))
def testAssertValidSample(self): dist = tfd.Uniform(low=2., high=5., validate_args=True) with self.assertRaisesOpError( 'must be greater than or equal to `low`'): self.evaluate(dist.cdf([2.3, 1.7, 4.])) with self.assertRaisesOpError('must be less than or equal to `high`'): self.evaluate(dist.survival_function([2.3, 5.2, 4.]))
def testUniformEntropy(self): a_v = np.array([1.0, 1.0, 1.0]) b_v = np.array([[1.5, 2.0, 3.0]]) uniform = tfd.Uniform(low=a_v, high=b_v, validate_args=True) expected_entropy = np.log(b_v - a_v) self.assertAllClose(expected_entropy, self.evaluate(uniform.entropy()))
def test_multipart_bijector(self): seed_stream = test_util.test_seed_stream() prior = tfd.JointDistributionSequential([ tfd.Gamma(1., 1.), lambda scale: tfd.Uniform(0., scale), lambda concentration: tfd.CholeskyLKJ(4, concentration), ], validate_args=True) likelihood = lambda corr: tfd.MultivariateNormalTriL(scale_tril=corr) obs = self.evaluate( likelihood( prior.sample(seed=seed_stream())[-1]).sample(seed=seed_stream())) bij = prior.experimental_default_event_space_bijector() def target_log_prob(scale, conc, corr): return prior.log_prob(scale, conc, corr) + likelihood(corr).log_prob(obs) kernel = tfp.mcmc.HamiltonianMonteCarlo(target_log_prob, num_leapfrog_steps=3, step_size=.5) kernel = tfp.mcmc.TransformedTransitionKernel(kernel, bij) init = self.evaluate( tuple(tf.random.uniform(s, -2., 2., seed=seed_stream()) for s in bij.inverse_event_shape(prior.event_shape))) state = bij.forward(init) kr = kernel.bootstrap_results(state) next_state, next_kr = kernel.one_step(state, kr, seed=seed_stream()) self.evaluate((state, kr, next_state, next_kr)) expected = (target_log_prob(*state) - bij.inverse_log_det_jacobian(state, [0, 0, 2])) actual = kernel._inner_kernel.target_log_prob_fn(*init) # pylint: disable=protected-access self.assertAllClose(expected, actual)
def testUniformBroadcasting(self): a = 10.0 b = [11.0, 20.0] uniform = tfd.Uniform(a, b, validate_args=False) pdf = uniform.prob([[10.5, 11.5], [9.0, 19.0], [10.5, 21.0]]) expected_pdf = np.array([[1.0, 0.1], [0.0, 0.1], [1.0, 0.0]]) self.assertAllClose(expected_pdf, self.evaluate(pdf))
def testModifiedVariableAssertionSingleVar(self): low = tf.Variable(0.) high = 1. self.evaluate(low.initializer) uniform = tfd.Uniform(low=low, high=high, validate_args=True) with self.assertRaisesOpError('not defined when `low` >= `high`'): with tf.control_dependencies([low.assign(2.)]): self.evaluate(uniform.mean())
def testModifiedVariableAssertion(self): low = tf.Variable(0.) high = tf.Variable(1.) self.evaluate([low.initializer, high.initializer]) uniform = tfd.Uniform(low=low, high=high, validate_args=True) with self.assertRaisesOpError('not defined when low >= high'): with tf.control_dependencies([low.assign(2.)]): self.evaluate(uniform.mean())
def testFullyReparameterized(self): a = tf.constant(0.1) b = tf.constant(0.8) _, [grad_a, grad_b] = tfp.math.value_and_gradient( lambda a_, b_: ( # pylint: disable=g-long-lambda tfd.Uniform(a_, b_).sample(100, seed=test_util.test_seed())), [a, b]) self.assertIsNotNone(grad_a) self.assertIsNotNone(grad_b)
def testUniformShape(self): a = tf.constant([-3.0] * 5) b = tf.constant(11.0) uniform = tfd.Uniform(low=a, high=b, validate_args=True) self.assertEqual(self.evaluate(uniform.batch_shape_tensor()), (5, )) self.assertEqual(uniform.batch_shape, tf.TensorShape([5])) self.assertAllEqual(self.evaluate(uniform.event_shape_tensor()), []) self.assertEqual(uniform.event_shape, tf.TensorShape([]))
def testUniformQuantile(self): low = tf.reshape(tf.linspace(0., 1., 6), [2, 1, 3]) high = tf.reshape(tf.linspace(1.5, 2.5, 6), [1, 2, 3]) uniform = tfd.Uniform(low=low, high=high, validate_args=True) expected_quantiles = tf.reshape(tf.linspace(1.01, 1.49, 24), [2, 2, 2, 3]) cumulative_densities = uniform.cdf(expected_quantiles) actual_quantiles = uniform.quantile(cumulative_densities) self.assertAllClose(self.evaluate(expected_quantiles), self.evaluate(actual_quantiles))
def testSupportBijectorOutsideRange(self): low = np.array([1., 2., 3., -5.]) high = np.array([6., 7., 6., 1.]) dist = tfd.Uniform(low=low, high=high, validate_args=False) eps = 1e-6 x = np.array([1. - eps, 1.5, 6. + eps, -5. - eps]) bijector_inverse_x = dist.experimental_default_event_space_bijector( ).inverse(x) self.assertAllNan(self.evaluate(bijector_inverse_x))
def testReproducible(self): u = dtc._TensorCoercible(tfd.Uniform(low=-100., high=100), tfd.Distribution.sample) # Small scale means only the mean really matters. x = tfd.Normal(loc=u, scale=0.0001) [u_, x1_, x2_] = self.evaluate([ tf.convert_to_tensor(x.loc), x.sample(), x.sample()]) self.assertNear(u_, x1_, err=0.01) self.assertNear(u_, x2_, err=0.01)
def testUniformPDFWithScalarEndpoint(self): a = tf.constant([0.0, 5.0]) b = tf.constant(10.0) uniform = tfd.Uniform(low=a, high=b, validate_args=True) x = np.array([0.0, 8.0], dtype=np.float32) expected_pdf = np.array([1.0 / (10.0 - 0.0), 1.0 / (10.0 - 5.0)]) pdf = uniform.prob(x) self.assertAllClose(expected_pdf, self.evaluate(pdf))
def test_docstring_shapes(self): d = tfd.BatchBroadcast(tfd.Normal(tf.range(3.), 1.), [2, 3]) self.assertEqual([2, 3], d.batch_shape) self.assertEqual([3], d.distribution.batch_shape) self.assertEqual([], d.event_shape) df = tfd.Uniform(4., 5.).sample([10, 1], seed=test_util.test_seed()) d = tfd.BatchBroadcast(tfd.WishartTriL(df=df, scale_tril=tf.eye(3)), [2]) self.assertEqual([10, 2], d.batch_shape) self.assertEqual([10, 1], d.distribution.batch_shape) self.assertEqual([3, 3], d.event_shape)
def testUniformUniformKLInfinite(self): # This covers three cases: # - a.low < b.low, # - a.high > b.high, and # - both. a_low = np.array([-1.0, 0.0, -1.0]) a_high = np.array([1.0, 2.0, 2.0]) b_low = np.array([0.0] * 3) b_high = np.array([1.0] * 3) a = tfd.Uniform(low=a_low, high=a_high, validate_args=True) b = tfd.Uniform(low=b_low, high=b_high, validate_args=True) # Since 'a' can be sampled to give points outside the support of 'b', # the KL Divergence is infinite. true_kl = tf.convert_to_tensor(value=np.array([np.inf] * 3)) kl = tfd.kl_divergence(a, b) true_kl_, kl_ = self.evaluate([true_kl, kl]) self.assertAllEqual(true_kl_, kl_)
def test_default_event_space_bijector_shape(self): dist = tfd.Uniform(low=[1., 2., 3., 6.], high=10., validate_args=True) batch_shape = [2, 2, 1] reshape_dist = tfd.BatchReshape(dist, batch_shape=batch_shape, validate_args=True) x = self.evaluate(dist._experimental_default_event_space_bijector()( 10. * tf.ones(dist.batch_shape))) x_reshape = self.evaluate( reshape_dist._experimental_default_event_space_bijector()( 10. * tf.ones(reshape_dist.batch_shape))) self.assertAllEqual(tf.reshape(x, batch_shape), x_reshape)
def testDeferredBijectorParameters(self): low = tf.Variable(2.) high = tf.Variable(7.) dist = tfd.Uniform(low, high, validate_args=True) shift = dist._experimental_default_event_space_bijector( ).bijectors[0].shift scale = dist._experimental_default_event_space_bijector( ).bijectors[1].scale self.evaluate([low.initializer, high.initializer]) self.assertIsNone(tf.get_static_value(shift)) self.assertIsNone(tf.get_static_value(scale)) self.assertEqual(self.evaluate(tf.convert_to_tensor(scale)), 5.) self.assertEqual(self.evaluate(tf.convert_to_tensor(shift)), 2.)
def testUniformFloat64(self): uniform = tfd.Uniform(low=np.float64(0.), high=np.float64(1.)) self.assertAllClose( [1., 1.], self.evaluate(uniform.prob(np.array([0.5, 0.6], dtype=np.float64)))) self.assertAllClose( [0.5, 0.6], self.evaluate(uniform.cdf(np.array([0.5, 0.6], dtype=np.float64)))) self.assertAllClose(0.5, self.evaluate(uniform.mean())) self.assertAllClose(1 / 12., self.evaluate(uniform.variance())) self.assertAllClose(0., self.evaluate(uniform.entropy()))
def one_term(event_shape, event_shape_tensor, batch_shape, batch_shape_tensor, dtype): if not tensorshape_util.is_fully_defined(event_shape): event_shape = event_shape_tensor result = tfd.Sample(tfd.Uniform(low=tf.constant(-2., dtype=dtype), high=tf.constant(2., dtype=dtype)), sample_shape=event_shape) if not tensorshape_util.is_fully_defined(batch_shape): batch_shape = batch_shape_tensor needs_bcast = True else: # Only batch broadcast when batch ndims > 0. needs_bcast = bool(tensorshape_util.as_list(batch_shape)) if needs_bcast: result = tfd.BatchBroadcast(result, batch_shape) return result
def testUniformNans(self): a = 10.0 b = [11.0, 100.0] uniform = tfd.Uniform(low=a, high=b, validate_args=False) no_nans = tf.constant(1.0) nans = tf.constant(0.0) / tf.constant(0.0) self.assertTrue(self.evaluate(tf.math.is_nan(nans))) with_nans = tf.stack([no_nans, nans]) pdf = uniform.prob(with_nans) is_nan = self.evaluate(tf.math.is_nan(pdf)) self.assertFalse(is_nan[0]) self.assertTrue(is_nan[1])
def _make_dataset(self, n, d, link, scale=1., batch_shape=None, dtype=np.float32, seed=42): seed = tfd.SeedStream(seed=seed, salt='tfp.glm.proximal_hessian_test') if batch_shape is None: batch_shape = [] model_coefficients = tfd.Uniform(low=np.array(-1, dtype), high=np.array(1, dtype)).sample( batch_shape + [d], seed=seed()) radius = np.sqrt(2.) model_coefficients *= (radius / tf.linalg.norm( tensor=model_coefficients, axis=-1)[..., tf.newaxis]) mask = tfd.Bernoulli(probs=0.5, dtype=tf.bool).sample(batch_shape + [d]) model_coefficients = tf1.where(mask, model_coefficients, tf.zeros_like(model_coefficients)) model_matrix = tfd.Normal(loc=np.array(0, dtype), scale=np.array(1, dtype)).sample( batch_shape + [n, d], seed=seed()) scale = tf.convert_to_tensor(value=scale, dtype=dtype) linear_response = tf.matmul(model_matrix, model_coefficients[..., tf.newaxis])[..., 0] if link == 'linear': response = tfd.Normal(loc=linear_response, scale=scale).sample(seed=seed()) elif link == 'probit': response = tf.cast( tfd.Normal(loc=linear_response, scale=scale).sample(seed=seed()) > 0, dtype) elif link == 'logit': response = tfd.Bernoulli(logits=linear_response).sample( seed=seed()) else: raise ValueError('unrecognized true link: {}'.format(link)) return self.evaluate( [model_matrix, response, model_coefficients, mask])
def testUniformSampleWithShape(self): a = 10.0 b = [11.0, 20.0] uniform = tfd.Uniform(a, b, validate_args=True) pdf = uniform.prob(uniform.sample((2, 3), seed=test_util.test_seed())) # pylint: disable=bad-continuation expected_pdf = [ [[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]], [[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]], ] # pylint: enable=bad-continuation self.assertAllClose(expected_pdf, self.evaluate(pdf)) pdf = uniform.prob(uniform.sample(seed=test_util.test_seed())) expected_pdf = [1.0, 0.1] self.assertAllClose(expected_pdf, self.evaluate(pdf))
def test_bijector_shapes(self): d = tfd.Sample(tfd.Uniform(tf.zeros([5]), 1.), 2) b = d.experimental_default_event_space_bijector() self.assertEqual((2,), d.event_shape) self.assertEqual((2,), b.inverse_event_shape((2,))) self.assertEqual((2,), b.forward_event_shape((2,))) self.assertEqual((5, 2), b.forward_event_shape((5, 2))) self.assertEqual((5, 2), b.inverse_event_shape((5, 2))) self.assertEqual((3, 5, 2), b.inverse_event_shape((3, 5, 2))) self.assertEqual((3, 5, 2), b.forward_event_shape((3, 5, 2))) d = tfd.Sample(tfd.CholeskyLKJ(4, concentration=tf.ones([5])), 2) b = d.experimental_default_event_space_bijector() self.assertEqual((2, 4, 4), d.event_shape) dim = (4 * 3) // 2 self.assertEqual((5, 2, dim), b.inverse_event_shape((5, 2, 4, 4))) self.assertEqual((5, 2, 4, 4), b.forward_event_shape((5, 2, dim))) self.assertEqual((3, 5, 2, dim), b.inverse_event_shape((3, 5, 2, 4, 4))) self.assertEqual((3, 5, 2, 4, 4), b.forward_event_shape((3, 5, 2, dim)))
def testUniformSample(self): a = tf.constant([3.0, 4.0]) b = tf.constant(13.0) a1_v = 3.0 a2_v = 4.0 b_v = 13.0 n = tf.constant(100000) uniform = tfd.Uniform(low=a, high=b, validate_args=True) samples = uniform.sample(n, seed=test_util.test_seed()) sample_values = self.evaluate(samples) self.assertEqual(sample_values.shape, (100000, 2)) self.assertAllClose( sample_values[::, 0].mean(), (b_v + a1_v) / 2, atol=1e-1, rtol=0.) self.assertAllClose( sample_values[::, 1].mean(), (b_v + a2_v) / 2, atol=1e-1, rtol=0.) self.assertFalse( np.any(sample_values[::, 0] < a1_v) or np.any(sample_values >= b_v)) self.assertFalse( np.any(sample_values[::, 1] < a2_v) or np.any(sample_values >= b_v))
def testUniformCDF(self): batch_size = 6 a = tf.constant([1.0] * batch_size) b = tf.constant([11.0] * batch_size) a_v = 1.0 b_v = 11.0 x = np.array([-2.5, 2.5, 4.0, 0.0, 10.99, 12.0], dtype=np.float32) uniform = tfd.Uniform(low=a, high=b, validate_args=False) def _expected_cdf(): cdf = (x - a_v) / (b_v - a_v) cdf[x >= b_v] = 1 cdf[x < a_v] = 0 return cdf cdf = uniform.cdf(x) self.assertAllClose(_expected_cdf(), self.evaluate(cdf)) log_cdf = uniform.log_cdf(x) self.assertAllClose(np.log(_expected_cdf()), self.evaluate(log_cdf))
def test_default_event_space_bijector_bijective_and_finite(self): batch_shape = [5, 1, 4] batch_size = np.prod(batch_shape) low = tf.Variable( np.linspace(-5., 5., batch_size).astype(self.dtype), shape=(batch_size, ) if self.is_static_shape else None) dist = tfd.Uniform(low=low, high=30., validate_args=True) reshape_dist = tfd.BatchReshape(dist, batch_shape=batch_shape, validate_args=True) x = np.linspace(-10., 10., batch_size).astype(self.dtype).reshape(batch_shape) y = np.linspace(5., 30 - 1e-4, batch_size).astype(self.dtype).reshape(batch_shape) self.evaluate(low.initializer) bijector_test_util.assert_bijective_and_finite( reshape_dist._experimental_default_event_space_bijector(), x, y, eval_func=self.evaluate, event_ndims=0, rtol=1e-4)