def testUnknownAndInvalidShape(self):
        logits = tf1.placeholder_with_default(19.84, shape=None)
        with self.assertRaises(ValueError,
                               'Argument `logits` must have rank at least 1.'):
            dist = tfd.ExpRelaxedOneHotCategorical(0.75,
                                                   logits=logits,
                                                   validate_args=True)
            self.evaluate(dist.sample())

        logits = tf1.placeholder_with_default([[], []], shape=None)
        with self.assertRaises(
                ValueError,
                'Argument `logits` must have final dimension >= 1.'):
            dist = tfd.ExpRelaxedOneHotCategorical(12.0,
                                                   logits=logits,
                                                   validate_args=True)
            self.evaluate(dist.sample())
 def testAssertionsProbs(self):
     probs = tf.Variable([0.1, 0.7, 0.0])
     with self.assertRaisesOpError('Argument `probs` must sum to 1.'):
         d = tfd.ExpRelaxedOneHotCategorical(0.3,
                                             probs=probs,
                                             validate_args=True)
         self.evaluate([v.initializer for v in d.variables])
         self.evaluate(d.sample())
 def testProbs(self):
     temperature = 1.0
     logits = [2.0, 3.0, -4.0]
     dist = tfd.ExpRelaxedOneHotCategorical(temperature,
                                            logits,
                                            validate_args=True)
     expected_p = np.exp(logits) / np.sum(np.exp(logits))
     self.assertAllClose(expected_p, self.evaluate(dist.probs_parameter()))
     self.assertAllEqual([3], dist.probs_parameter().shape)
 def testEventSizeOfOne(self):
     d = tfd.ExpRelaxedOneHotCategorical(
         0.1337,
         logits=tf1.placeholder_with_default([0.], shape=None),
         validate_args=True)
     self.assertAllEqual(np.zeros((5, 3, 1), dtype=np.int32),
                         self.evaluate(d.sample([5, 3])))
     self.assertAllClose(np.ones(5), self.evaluate(d.prob(np.zeros(
         (5, 1)))))
 def testUnknownShape(self):
     logits_pl = tf1.placeholder_with_default(input=[.3, .1, .4],
                                              shape=None)
     temperature = 1.0
     dist = tfd.ExpRelaxedOneHotCategorical(temperature,
                                            logits_pl,
                                            validate_args=True)
     self.assertAllEqual([3], self.evaluate(dist.sample()).shape)
     self.assertAllEqual([5, 3], self.evaluate(dist.sample(5)).shape)
Beispiel #6
0
 def test_event_space_bijector_fldj(self):
   # Also test forward log det jacobian for the default event space
   # bijector in the same setting, for completeness.
   sub_d = tfd.ExpRelaxedOneHotCategorical(
       logits=[0., 0., 0.],
       temperature=[0.01, 0.01, 0.01, 0.01])
   d = tfd.Masked(sub_d, validity_mask=False, validate_args=True)
   bij = d.experimental_default_event_space_bijector()
   fldj = bij.forward_log_det_jacobian(tf.zeros(shape=[4, 2]))
   self.assertAllEqual(fldj, tf.zeros_like(fldj))
 def testAssertionsTemperatureAfterMutation(self):
     t = tf.Variable(7.7)
     d = tfd.ExpRelaxedOneHotCategorical(t,
                                         probs=[0.5, 0.5],
                                         validate_args=True)
     self.evaluate([v.initializer for v in d.variables])
     with self.assertRaisesOpError(
             'Condition x > 0 did not hold element-wise'):
         with tf.control_dependencies([t.assign(-0.07)]):
             self.evaluate(d.logits_parameter())
 def testAssertionsProbsAfterMutation(self):
     probs = tf.Variable([0.25, 0.25, 0.5])
     d = tfd.ExpRelaxedOneHotCategorical(0.1337,
                                         probs=probs,
                                         validate_args=True)
     with self.assertRaisesOpError(
             'Condition x >= 0 did not hold element-wise'):
         self.evaluate([v.initializer for v in d.variables])
         with tf.control_dependencies([probs.assign([-0.25, 0.75, 0.5])]):
             self.evaluate(d.logits_parameter())
 def testGradientProbs(self):
     t = tf.Variable(0.4)
     probs = tf.Variable([0.1, 0.7, 0.2])
     d = tfd.ExpRelaxedOneHotCategorical(t, probs=probs, validate_args=True)
     with tf.GradientTape() as tape:
         loss = -d.log_prob(
             tf.math.log_softmax([[1., 0., 0.], [0., 0., 1.]]))
     g = tape.gradient(loss, d.trainable_variables)
     self.assertLen(g, 2)
     self.assertAllNotNone(g)
 def testAssertionsLogits(self):
     logits = tfp.util.DeferredTensor(tf.identity,
                                      tf.Variable(0.),
                                      shape=None)
     with self.assertRaisesRegexp(
             ValueError, 'Argument `logits` must have rank at least 1.'):
         d = tfd.ExpRelaxedOneHotCategorical(0.7,
                                             logits=logits,
                                             validate_args=True)
         self.evaluate([v.initializer for v in d.variables])
         self.evaluate(d.sample())
 def testGradientLogits(self):
     t = tf.Variable([0.01, 1.])
     logits = tf.Variable([[-1., 0., 1], [3., 3., 3.]])
     d = tfd.ExpRelaxedOneHotCategorical(t,
                                         logits=logits,
                                         validate_args=True)
     with tf.GradientTape() as tape:
         loss = -d.log_prob(
             tf.math.log_softmax([[-1., 0., 0.], [0., 0., 1.]]))
     g = tape.gradient(loss, d.trainable_variables)
     self.assertLen(g, 2)
     self.assertAllNotNone(g)
 def testParamTensorFromProbs(self):
     x = tf.constant([0.1, 0.5, 0.4])
     d = tfd.ExpRelaxedOneHotCategorical(temperature=1.,
                                         probs=x,
                                         validate_args=True)
     self.assertAllClose(*self.evaluate(
         [tf.math.log(x), d.logits_parameter()]),
                         atol=0,
                         rtol=1e-4)
     self.assertAllClose(*self.evaluate([x, d.probs_parameter()]),
                         atol=0,
                         rtol=1e-4)
Beispiel #13
0
 def test_event_space_bijector(self):
   # Test that the default event space bijector executes.  This is
   # non-trivial, because the event space bijector of this particular
   # component distribution cannot be relied upon to produce finite
   # values in the unconstrained space from samples of `sub_d`.
   sub_d = tfd.ExpRelaxedOneHotCategorical(
       logits=[0., 0., 0.],
       temperature=[0.01, 0.01, 0.01, 0.01],
       validate_args=True)
   d = tfd.Masked(sub_d, validity_mask=False, validate_args=True)
   bij = d.experimental_default_event_space_bijector()
   x = bij(tf.zeros(shape=[4, 2]))
   # The error tested for manifests as failed validations due to
   # invalid values.
   self.assertAllNotNan(self.evaluate(x))
 def testPdf(self):
     temperature = .4
     logits = [.3, .1, .4]
     k = len(logits)
     p = np.exp(logits) / np.sum(np.exp(logits))
     dist = tfd.ExpRelaxedOneHotCategorical(temperature,
                                            logits,
                                            validate_args=True)
     x = self.evaluate(dist.sample())
     # analytical ExpConcrete density presented in Maddison et al. 2016
     prod_term = p * np.exp(-temperature * x)
     expected_pdf = (gamma(k) * np.power(temperature, k - 1) *
                     np.prod(prod_term / np.sum(prod_term)))
     pdf = self.evaluate(dist.prob(x))
     self.assertAllClose(expected_pdf, pdf)