Example #1
0
 def testGammaModeAllowNanStatsFalseRaisesForUndefinedBatchMembers(self):
     # Mode will not be defined for the first entry.
     alpha_v = np.array([0.5, 3.0, 2.5])
     beta_v = np.array([1.0, 4.0, 5.0])
     gamma = gamma_lib.Gamma(concentration=alpha_v,
                             rate=beta_v,
                             allow_nan_stats=False)
     with self.assertRaisesOpError("x < y"):
         self.evaluate(gamma.mode())
Example #2
0
 def testGammaMean(self):
     alpha_v = np.array([1.0, 3.0, 2.5])
     beta_v = np.array([1.0, 4.0, 5.0])
     gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
     self.assertEqual(gamma.mean().shape, (3, ))
     if not stats:
         return
     expected_means = stats.gamma.mean(alpha_v, scale=1 / beta_v)
     self.assertAllClose(self.evaluate(gamma.mean()), expected_means)
Example #3
0
    def testGammaShape(self):
        alpha = tf.constant([3.0] * 5)
        beta = tf.constant(11.0)
        gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)

        self.assertEqual(self.evaluate(gamma.batch_shape_tensor()), (5, ))
        self.assertEqual(gamma.batch_shape, tf.TensorShape([5]))
        self.assertAllEqual(self.evaluate(gamma.event_shape_tensor()), [])
        self.assertEqual(gamma.event_shape, tf.TensorShape([]))
Example #4
0
 def testGammaVariance(self):
     alpha_v = np.array([1.0, 3.0, 2.5])
     beta_v = np.array([1.0, 4.0, 5.0])
     gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
     self.assertEqual(gamma.variance().get_shape(), (3, ))
     if not stats:
         return
     expected_variances = stats.gamma.var(alpha_v, scale=1 / beta_v)
     self.assertAllClose(self.evaluate(gamma.variance()),
                         expected_variances)
Example #5
0
 def testGammaFullyReparameterized(self):
     alpha = tf.constant(4.0)
     beta = tf.constant(3.0)
     with backprop.GradientTape() as tape:
         tape.watch(alpha)
         tape.watch(beta)
         gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
         samples = gamma.sample(100)
     grad_alpha, grad_beta = tape.gradient(samples, [alpha, beta])
     self.assertIsNotNone(grad_alpha)
     self.assertIsNotNone(grad_beta)
Example #6
0
 def testGammaModeAllowNanStatsIsTrueReturnsNaNforUndefinedBatchMembers(
         self):
     # Mode will not be defined for the first entry.
     alpha_v = np.array([0.5, 3.0, 2.5])
     beta_v = np.array([1.0, 4.0, 5.0])
     gamma = gamma_lib.Gamma(concentration=alpha_v,
                             rate=beta_v,
                             allow_nan_stats=True)
     expected_modes = (alpha_v - 1) / beta_v
     expected_modes[0] = np.nan
     self.assertEqual(gamma.mode().get_shape(), (3, ))
     self.assertAllClose(self.evaluate(gamma.mode()), expected_modes)
Example #7
0
    def testGammaCDF(self):
        batch_size = 6
        alpha = tf.constant([2.0] * batch_size)
        beta = tf.constant([3.0] * batch_size)
        alpha_v = 2.0
        beta_v = 3.0
        x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)

        gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
        cdf = gamma.cdf(x)
        self.assertEqual(cdf.get_shape(), (6, ))
        if not stats:
            return
        expected_cdf = stats.gamma.cdf(x, alpha_v, scale=1 / beta_v)
        self.assertAllClose(self.evaluate(cdf), expected_cdf)
Example #8
0
 def testGammaLogPDF(self):
     batch_size = 6
     alpha = tf.constant([2.0] * batch_size)
     beta = tf.constant([3.0] * batch_size)
     alpha_v = 2.0
     beta_v = 3.0
     x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
     gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
     log_pdf = gamma.log_prob(x)
     self.assertEqual(log_pdf.shape, (6, ))
     pdf = gamma.prob(x)
     self.assertEqual(pdf.shape, (6, ))
     if not stats:
         return
     expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
     self.assertAllClose(self.evaluate(log_pdf), expected_log_pdf)
     self.assertAllClose(self.evaluate(pdf), np.exp(expected_log_pdf))
  def _sample_n(self, n, seed=None, name=None):
    n = tf.convert_to_tensor(n, name='num', dtype=tf.int32)
    loc = tf.convert_to_tensor(self.loc)
    scale = tf.convert_to_tensor(self.scale)
    power = tf.convert_to_tensor(self.power)

    batch_shape = self._batch_shape_tensor(loc=loc, scale=scale, power=power)
    result_shape = ps.concat([[n], batch_shape], axis=0)

    ipower = tf.broadcast_to(tf.math.reciprocal(power), batch_shape)
    gamma_dist = gamma.Gamma(ipower, 1.)
    rademacher_seed, gamma_seed = samplers.split_seed(seed, salt='GenNormal')
    gamma_sample = gamma_dist.sample(n, seed=gamma_seed)
    binary_sample = tfp_random.rademacher(result_shape, dtype=self.dtype,
                                          seed=rademacher_seed)
    sampled = (binary_sample * tf.math.pow(tf.abs(gamma_sample), ipower))
    return loc + scale * sampled
Example #10
0
 def testGammaLogPDFMultidimensional(self):
     batch_size = 6
     alpha = tf.constant([[2.0, 4.0]] * batch_size)
     beta = tf.constant([[3.0, 4.0]] * batch_size)
     alpha_v = np.array([2.0, 4.0])
     beta_v = np.array([3.0, 4.0])
     x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
     gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
     log_pdf = gamma.log_prob(x)
     log_pdf_values = self.evaluate(log_pdf)
     self.assertEqual(log_pdf.get_shape(), (6, 2))
     pdf = gamma.prob(x)
     pdf_values = self.evaluate(pdf)
     self.assertEqual(pdf.get_shape(), (6, 2))
     if not stats:
         return
     expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
     self.assertAllClose(log_pdf_values, expected_log_pdf)
     self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
Example #11
0
 def testGammaSample(self):
     alpha_v = 4.0
     beta_v = 3.0
     alpha = tf.constant(alpha_v)
     beta = tf.constant(beta_v)
     n = 100000
     gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
     samples = gamma.sample(n, seed=137)
     sample_values = self.evaluate(samples)
     self.assertEqual(samples.get_shape(), (n, ))
     self.assertEqual(sample_values.shape, (n, ))
     self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
     if not stats:
         return
     self.assertAllClose(sample_values.mean(),
                         stats.gamma.mean(alpha_v, scale=1 / beta_v),
                         atol=.01)
     self.assertAllClose(sample_values.var(),
                         stats.gamma.var(alpha_v, scale=1 / beta_v),
                         atol=.15)
Example #12
0
 def testGammaPdfOfSampleMultiDims(self):
     gamma = gamma_lib.Gamma(concentration=[7., 11.], rate=[[5.], [6.]])
     num = 50000
     samples = gamma.sample(num, seed=137)
     pdfs = gamma.prob(samples)
     sample_vals, pdf_vals = self.evaluate([samples, pdfs])
     self.assertEqual(samples.get_shape(), (num, 2, 2))
     self.assertEqual(pdfs.get_shape(), (num, 2, 2))
     self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
     self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
     self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
     self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
     if not stats:
         return
     self.assertAllClose(stats.gamma.mean([[7., 11.], [7., 11.]],
                                          scale=1 /
                                          np.array([[5., 5.], [6., 6.]])),
                         sample_vals.mean(axis=0),
                         atol=.1)
     self.assertAllClose(stats.gamma.var([[7., 11.], [7., 11.]],
                                         scale=1 /
                                         np.array([[5., 5.], [6., 6.]])),
                         sample_vals.var(axis=0),
                         atol=.1)
Example #13
0
    ASVI_SURROGATE_SUBSTITUTIONS[condition] = substitution_fn


# Default substitutions attempt to express distributions using the most
# flexible available parameterization.
# pylint: disable=g-long-lambda
register_asvi_substitution_rule(
    half_normal.HalfNormal, lambda dist: truncated_normal.TruncatedNormal(
        loc=0., scale=dist.scale, low=0., high=dist.scale * 10.))
register_asvi_substitution_rule(
    uniform.Uniform, lambda dist: shift.Shift(dist.low)
    (scale_lib.Scale(dist.high - dist.low)
     (beta.Beta(concentration0=tf.ones_like(dist.mean()), concentration1=1.))))
register_asvi_substitution_rule(
    exponential.Exponential,
    lambda dist: gamma.Gamma(concentration=1., rate=dist.rate))
register_asvi_substitution_rule(
    chi2.Chi2, lambda dist: gamma.Gamma(concentration=0.5 * dist.df, rate=0.5))

# pylint: enable=g-long-lambda


# TODO(kateslin): Add support for models with prior+likelihood written as
# a single JointDistribution.
def build_asvi_surrogate_posterior(prior,
                                   mean_field=False,
                                   initial_prior_weight=0.5,
                                   seed=None,
                                   name=None):
    """Builds a structured surrogate posterior inspired by conjugate updating.