def testKumaraswamyModeEnableAllowNanStats(self): with tf1.Session(): a = np.array([1., 2, 3]) b = np.array([2., 4, 1.2]) dist = tfd.Kumaraswamy(a, b, allow_nan_stats=True, validate_args=True) expected_mode = _kumaraswamy_mode(a, b) expected_mode[0] = np.nan self.assertEqual((3, ), dist.mode().shape) self.assertAllClose(expected_mode, self.evaluate(dist.mode())) a = np.array([2., 2, 3]) b = np.array([1., 4, 1.2]) dist = tfd.Kumaraswamy(a, b, allow_nan_stats=True, validate_args=True) expected_mode = _kumaraswamy_mode(a, b) expected_mode[0] = np.nan self.assertEqual((3, ), dist.mode().shape) self.assertAllClose(expected_mode, self.evaluate(dist.mode()))
def testKumaraswamyModeInvalid(self): with tf1.Session(): a = np.array([1., 2, 3]) b = np.array([2., 4, 1.2]) dist = tfd.Kumaraswamy(a, b, allow_nan_stats=False, validate_args=True) with self.assertRaisesOpError('Mode undefined for concentration1 <= 1.'): self.evaluate(dist.mode()) a = np.array([2., 2, 3]) b = np.array([1., 4, 1.2]) dist = tfd.Kumaraswamy(a, b, allow_nan_stats=False, validate_args=True) with self.assertRaisesOpError('Mode undefined for concentration0 <= 1.'): self.evaluate(dist.mode())
def testKumaraswamyMean(self): a = [1., 2, 3] b = [2., 4, 1.2] dist = tfd.Kumaraswamy(a, b, validate_args=True) expected_mean = _kumaraswamy_moment(a, b, 1) self.assertEqual((3,), dist.mean().shape) self.assertAllClose(expected_mean, self.evaluate(dist.mean()))
def testKumaraswamySample(self): a = 1. b = 2. kumaraswamy = tfd.Kumaraswamy(a, b, validate_args=True) n = tf.constant(100000) samples = kumaraswamy.sample(n, seed=test_util.test_seed()) sample_values = self.evaluate(samples) self.assertEqual(sample_values.shape, (100000, )) self.assertFalse(np.any(sample_values < 0.0)) self.assertLess( sp_stats.kstest( # Kumaraswamy is a univariate distribution. sample_values, lambda x: _kumaraswamy_cdf(1., 2., x))[0], 0.01) # The standard error of the sample mean is 1 / (sqrt(18 * n)) expected_mean = _kumaraswamy_moment(a, b, 1) self.assertAllClose(sample_values.mean(axis=0), expected_mean, atol=1e-2) expected_variance = _kumaraswamy_moment(a, b, 2) - _kumaraswamy_moment( a, b, 1)**2 self.assertAllClose(np.cov(sample_values, rowvar=0), expected_variance, atol=1e-1)
def testKumaraswamyMode(self): a = np.array([1.1, 2, 3]) b = np.array([2., 4, 1.2]) expected_mode = _kumaraswamy_mode(a, b) dist = tfd.Kumaraswamy(a, b, validate_args=True) self.assertEqual((3,), dist.mode().shape) self.assertAllClose(expected_mode, self.evaluate(dist.mode()))
def testKumaraswamyVariance(self): a = [1., 2, 3] b = [2., 4, 1.2] dist = tfd.Kumaraswamy(a, b, validate_args=True) expected_variance = _kumaraswamy_moment(a, b, 2) - _kumaraswamy_moment( a, b, 1)**2 self.assertEqual((3,), dist.variance().shape) self.assertAllClose(expected_variance, self.evaluate(dist.variance()))
def testSimpleShapes(self): a = np.random.rand(3) b = np.random.rand(3) dist = tfd.Kumaraswamy(a, b, validate_args=True) self.assertAllEqual([], self.evaluate(dist.event_shape_tensor())) self.assertAllEqual([3], self.evaluate(dist.batch_shape_tensor())) self.assertEqual(tf.TensorShape([]), dist.event_shape) self.assertEqual(tf.TensorShape([3]), dist.batch_shape)
def testComplexShapesBroadcast(self): a = np.random.rand(3, 2, 2) b = np.random.rand(2, 2) dist = tfd.Kumaraswamy(a, b, validate_args=True) self.assertAllEqual([], self.evaluate(dist.event_shape_tensor())) self.assertAllEqual([3, 2, 2], self.evaluate(dist.batch_shape_tensor())) self.assertEqual(tf.TensorShape([]), dist.event_shape) self.assertEqual(tf.TensorShape([3, 2, 2]), dist.batch_shape)
def testPdfAStretchedInBroadcastWhenLowerRank(self): a = [1., 2] b = [1., 2] x = [[.5, .5], [.2, .8]] pdf = tfd.Kumaraswamy(a, b).prob(x) expected_pdf = _kumaraswamy_pdf(a, b, x) self.assertAllClose(expected_pdf, self.evaluate(pdf)) self.assertEqual((2, 2), pdf.shape)
def testPdfXStretchedInBroadcastWhenLowerRank(self): a = [[1., 2], [2., 3]] b = [[1., 2], [2., 3]] x = [.5, .5] pdf = tfd.Kumaraswamy(a, b, validate_args=True).prob(x) expected_pdf = _kumaraswamy_pdf(a, b, x) self.assertEqual((2, 2), pdf.shape) self.assertAllClose(expected_pdf, self.evaluate(pdf))
def testPdfAStretchedInBroadcastWhenSameRank(self): a = [[1., 2]] b = [[1., 2]] x = [[.5, .5], [.3, .7]] dist = tfd.Kumaraswamy(a, b, validate_args=True) pdf = dist.prob(x) expected_pdf = _kumaraswamy_pdf(a, b, x) self.assertAllClose(expected_pdf, self.evaluate(pdf)) self.assertEqual((2, 2), pdf.shape)
def testSupportBijectorOutsideRange(self): a = np.array([1., 2., 3.]) b = np.array([2., 4., 1.2]) dist = tfd.Kumaraswamy(a, b, validate_args=True) eps = 1e-6 x = np.array([-2.3, -eps, 1. + eps, 1.4]) bijector_inverse_x = dist.experimental_default_event_space_bijector( ).inverse(x) self.assertAllNan(self.evaluate(bijector_inverse_x))
def testPdfTwoBatchesNontrivialX(self): a = [1., 2] b = [1., 2] x = [.3, .7] dist = tfd.Kumaraswamy(a, b, validate_args=True) pdf = dist.prob(x) expected_pdf = _kumaraswamy_pdf(a, b, x) self.assertAllClose(expected_pdf, self.evaluate(pdf)) self.assertEqual((2,), pdf.shape)
def testKumaraswamyEntropy(self): with tf1.Session(): a = np.array([1., 2, 3]) b = np.array([2., 4, 1.2]) dist = tfd.Kumaraswamy(a, b, validate_args=True) self.assertEqual(dist.entropy().shape, (3,)) expected_entropy = (1 - 1. / b) + ( 1 - 1. / a) * _harmonic_number(b) - np.log(a * b) self.assertAllClose(expected_entropy, self.evaluate(dist.entropy()))
def testPdfTwoBatches(self): a = [1., 2] b = [1., 2] x = [.5, .5] dist = tfd.Kumaraswamy(a, b) pdf = dist.prob(x) expected_pdf = _kumaraswamy_pdf(a, b, x) self.assertAllClose(expected_pdf, self.evaluate(pdf)) self.assertEqual((2, ), pdf.shape)
def testKumaraswamySampleMultipleTimes(self): a_val = 1. b_val = 2. n_val = 100 seed = test_util.test_seed() tf1.set_random_seed(seed) kumaraswamy1 = tfd.Kumaraswamy(concentration1=a_val, concentration0=b_val, name='kumaraswamy1') samples1 = self.evaluate(kumaraswamy1.sample(n_val, seed=seed)) tf1.set_random_seed(seed) kumaraswamy2 = tfd.Kumaraswamy(concentration1=a_val, concentration0=b_val, name='kumaraswamy2') samples2 = self.evaluate(kumaraswamy2.sample(n_val, seed=seed)) self.assertAllClose(samples1, samples2)
def testPdfUniformZeroBatch(self): # This is equivalent to a uniform distribution a = 1. b = 1. x = np.array([.1, .2, .3, .5, .8], dtype=np.float32) dist = tfd.Kumaraswamy(a, b, validate_args=True) pdf = dist.prob(x) expected_pdf = _kumaraswamy_pdf(a, b, x) self.assertAllClose(expected_pdf, self.evaluate(pdf)) self.assertEqual((5,), pdf.shape)
def testKumaraswamyCdf(self): shape = (30, 40, 50) for dt in (np.float32, np.float64): a = 10. * np.random.random(shape).astype(dt) b = 10. * np.random.random(shape).astype(dt) x = np.random.random(shape).astype(dt) actual = self.evaluate(tfd.Kumaraswamy(a, b, validate_args=True).cdf(x)) self.assertAllEqual(np.ones(shape, dtype=np.bool_), 0. <= x) self.assertAllEqual(np.ones(shape, dtype=np.bool_), 1. >= x) self.assertAllClose(_kumaraswamy_cdf(a, b, x), actual)
def testInvalidConcentration0(self): x = tf.Variable(1.) dist = tfd.Kumaraswamy( concentration0=x, concentration1=1., validate_args=True) self.evaluate(x.initializer) self.assertIs(x, dist.concentration0) self.assertAllEqual([], self.evaluate(dist.event_shape_tensor())) with self.assertRaisesOpError( 'Argument `concentration0` must be positive.'): with tf.control_dependencies([x.assign(-1.)]): self.evaluate(dist.event_shape_tensor())
def testAssertValidSample(self): a = [[1., 2, 3]] b = [[2., 4, 3]] dist = tfd.Kumaraswamy(a, b, validate_args=True) self.evaluate(dist.prob([.1, .3, .6])) self.evaluate(dist.prob([.2, .3, .5])) # Either condition can trigger. with self.assertRaisesOpError('Sample must be non-negative.'): self.evaluate(dist.prob([-1., 0.1, 0.5])) with self.assertRaisesOpError('Sample must be less than or equal to `1`.'): self.evaluate(dist.prob([.1, .2, 1.2]))
def testPdfAtBoundary(self): a = [0.5, 2.] b = [0.5, 5.] x = [[0.], [1.]] dist = tfd.Kumaraswamy(a, b, validate_args=True) pdf = self.evaluate(dist.prob(x)) log_pdf = self.evaluate(dist.log_prob(x)) self.assertAllPositiveInf(pdf[:, 0]) self.assertAllFinite(pdf[:, 1]) self.assertAllPositiveInf(log_pdf[:, 0]) self.assertAllNegativeInf(log_pdf[:, 1])
def testPdfXProper(self): a = [[1., 2, 3]] b = [[2., 4, 3]] dist = tfd.Kumaraswamy(a, b, validate_args=True) self.evaluate(dist.prob([.1, .3, .6])) self.evaluate(dist.prob([.2, .3, .5])) # Either condition can trigger. with self.assertRaisesOpError('sample must be non-negative'): self.evaluate(dist.prob([-1., 0.1, 0.5])) with self.assertRaisesOpError('sample must be no larger than `1`'): self.evaluate(dist.prob([.1, .2, 1.2]))
def testKumaraswamySampleMultidimensional(self): a = np.random.rand(3, 2, 2).astype(np.float32) b = np.random.rand(3, 2, 2).astype(np.float32) kumaraswamy = tfd.Kumaraswamy(a, b, validate_args=True) n = tf.constant(100000) samples = kumaraswamy.sample(n, seed=test_util.test_seed()) sample_values = self.evaluate(samples) self.assertEqual(sample_values.shape, (100000, 3, 2, 2)) self.assertFalse(np.any(sample_values < 0.0)) self.assertAllClose(sample_values[:, 1, :].mean(axis=0), _kumaraswamy_moment(a, b, 1)[1, :], atol=1e-1)
def testKumaraswamyLogCdf(self): shape = (30, 40, 50) for dt in (np.float32, np.float64): a = 10. * np.random.random(shape).astype(dt) b = 10. * np.random.random(shape).astype(dt) x = np.random.random(shape).astype(dt) actual = self.evaluate(tf.exp(tfd.Kumaraswamy(a, b).log_cdf(x))) self.assertAllEqual(np.ones(shape, dtype=np.bool), 0. <= x) self.assertAllEqual(np.ones(shape, dtype=np.bool), 1. >= x) self.assertAllClose(_kumaraswamy_cdf(a, b, x), actual, rtol=1e-4, atol=0)
def mixup_func(major_samples, minor_samples, alpha=0): """Mixup implementation is from here: https://www.inference.vc/mixup-data-dependent-data-augmentation/ Args: major_samples: A Tensor of shape [batch_size, ...] or list of tensors with most preferred samples. minor_samples: A Tensor of same shape and dtype as `major_samples` or list of tensors with less preferred samples with a shape equal to the shape of the `major_samples` tensor. alpha(float): A value in range [0, 1] using to define beta distribution. If `alpha` is 0, than mixup is not used. Raises: AssertionError: If the shape of the tensor or the length of the list of tensors `major_samples` is not equal to the shape of the tensor or the length of the list of tensors `minor_samples`. Returns: If `alpha` is 0: A Tensor or list of Tensors `major_samples` else: A Tensor or list of Tensors with mixed samples consisting of a combination of major_samples and minor_samples in the proportion based on the beta distribution """ if isinstance(major_samples, (list, tuple)): assert len(major_samples) == len( minor_samples) # The length of the lists must be the same else: major_samples, minor_samples = [major_samples], [minor_samples] assert major_samples[0].shape[1:] == minor_samples[0].shape[ 1:] # The shapes of the tensors must be the same alpha = clip_value(alpha, 0.0, 1.0) if alpha == 0.0: return major_samples mixed_samples = [] kumaraswamy = tfd.Kumaraswamy(alpha + 1.0, alpha) for major, minor in zip(major_samples, minor_samples): try: sample_shape = (tf.shape(major)[0], *[1] * (len(major.shape) - 1)) mix_fraction = kumaraswamy.sample(sample_shape) mixed = mix_fraction * (major - minor) + minor except ValueError: mixed = major mixed_samples.append(mixed) return mixed_samples
def testBProperty(self): a = [[1., 2, 3]] b = [[2., 4, 3]] dist = tfd.Kumaraswamy(a, b, validate_args=True) self.assertEqual([1, 3], dist.concentration0.shape) self.assertAllClose(b, self.evaluate(dist.concentration0))
def testAProperty(self): a = [[1., 2, 3]] b = [[2., 4, 3]] dist = tfd.Kumaraswamy(a, b) self.assertEqual([1, 3], dist.concentration1.shape) self.assertAllClose(a, self.evaluate(dist.concentration1))