Пример #1
0
 def testBijectiveAndFinite(self):
     ordered = tfb.Ordered()
     x = np.sort(self._rng.randn(3, 10), axis=-1).astype(np.float32)
     y = (self._rng.randn(3, 10)).astype(np.float32)
     bijector_test_util.assert_bijective_and_finite(ordered,
                                                    x,
                                                    y,
                                                    eval_func=self.evaluate,
                                                    event_ndims=1)
Пример #2
0
 def testShapeGetters(self):
     x = tf.TensorShape([4])
     y = tf.TensorShape([4])
     bijector = tfb.Ordered(validate_args=True)
     self.assertAllEqual(y, bijector.forward_event_shape(x))
     self.assertAllEqual(
         tensorshape_util.as_list(y),
         self.evaluate(
             bijector.forward_event_shape_tensor(
                 tensorshape_util.as_list(x))))
     self.assertAllEqual(x, bijector.inverse_event_shape(y))
     self.assertAllEqual(
         tensorshape_util.as_list(x),
         self.evaluate(
             bijector.inverse_event_shape_tensor(
                 tensorshape_util.as_list(y))))
Пример #3
0
 def testBijectorVector(self):
     ordered = tfb.Ordered()
     self.assertStartsWith(ordered.name, "ordered")
     x = np.asarray([[2., 3, 4], [4., 8, 13]])
     y = [[2., 0, 0], [4., np.log(4.), np.log(5.)]]
     self.assertAllClose(y, self.evaluate(ordered.forward(x)))
     self.assertAllClose(x, self.evaluate(ordered.inverse(y)))
     self.assertAllClose(
         np.sum(np.asarray(y)[..., 1:], axis=-1),
         self.evaluate(ordered.inverse_log_det_jacobian(y, event_ndims=1)),
         atol=0.,
         rtol=1e-7)
     self.assertAllClose(
         self.evaluate(-ordered.inverse_log_det_jacobian(y, event_ndims=1)),
         self.evaluate(ordered.forward_log_det_jacobian(x, event_ndims=1)),
         atol=0.,
         rtol=1e-7)
Пример #4
0
 def testBijectorUnknownShape(self):
     ordered = tfb.Ordered()
     self.assertStartsWith(ordered.name, "ordered")
     x_ = np.asarray([[2., 3, 4], [4., 8, 13]], dtype=np.float32)
     y_ = np.asarray(
         [[2., 0, 0], [4., np.log(4.), np.log(5.)]], dtype=np.float32)
     x = tf1.placeholder_with_default(x_, shape=[2, None])
     y = tf1.placeholder_with_default(y_, shape=[2, None])
     self.assertAllClose(y_, self.evaluate(ordered.forward(x)))
     self.assertAllClose(x_, self.evaluate(ordered.inverse(y)))
     self.assertAllClose(
         np.sum(np.asarray(y_)[..., 1:], axis=-1),
         self.evaluate(ordered.inverse_log_det_jacobian(y, event_ndims=1)),
         atol=0.,
         rtol=1e-7)
     self.assertAllClose(
         -self.evaluate(ordered.inverse_log_det_jacobian(y, event_ndims=1)),
         self.evaluate(ordered.forward_log_det_jacobian(x, event_ndims=1)),
         atol=0.,
         rtol=1e-7)
Пример #5
0
     constrain_between_eps_and_one_minus_eps(),
 'Binomial.probs':
     tf.sigmoid,
 'NegativeBinomial.probs':
     tf.sigmoid,
 'Bernoulli.probs':
     tf.sigmoid,
 'PlackettLuce.scores':
     tfp_hps.softplus_plus_eps(),
 'ProbitBernoulli.probs':
     tf.sigmoid,
 'RelaxedBernoulli.probs':
     tf.sigmoid,
 'cutpoints':
     # Permit values that aren't too large
     lambda x: tfb.Ordered().inverse(10 * tf.math.tanh(x)),
 'log_rate':
     lambda x: tf.maximum(x, -16.),
 'mixing_concentration':
     tfp_hps.softplus_plus_eps(),
 'mixing_rate':
     tfp_hps.softplus_plus_eps(),
 'rate':
     tfp_hps.softplus_plus_eps(),
 'scale':
     tfp_hps.softplus_plus_eps(),
 'Wishart.scale':
     tfp_hps.positive_definite,
 'scale_diag':
     tfp_hps.softplus_plus_eps(),
 'scale_identity_multiplier':
def constraint_for(dist=None, param=None):
    """Get bijector constraint for a given distribution's parameter."""

    constraints = {
        'atol':
            tfb.Softplus(),
        'rtol':
            tfb.Softplus(),
        'concentration':
            tfb.Softplus(),
        'GeneralizedPareto.concentration':  # Permits +ve and -ve concentrations.
            lambda x: tf.math.tanh(x) * 0.24,
        'concentration0':
            tfb.Softplus(),
        'concentration1':
            tfb.Softplus(),
        'df':
            tfb.Softplus(),
        'InverseGaussian.loc':
            tfb.Softplus(),
        'JohnsonSU.tailweight':
            tfb.Softplus(),
        'PowerSpherical.mean_direction':
            lambda x: tf.math.l2_normalize(tf.math.sigmoid(x) + 1e-6, -1),
        'ContinuousBernoulli.probs':
            tfb.Sigmoid(),
        'Geometric.logits':  # TODO(b/128410109): re-enable down to -50
            # Capping at 15. so that probability is less than 1, and entropy is
            # defined. b/147394924
            lambda x: tf.minimum(tf.maximum(x, -16.), 15.
                                ),  # works around the bug
        'Geometric.probs':
            constrain_between_eps_and_one_minus_eps(),
        'Binomial.probs':
            tfb.Sigmoid(),
        'NegativeBinomial.probs':
            tfb.Sigmoid(),
        'Bernoulli.probs':
            tfb.Sigmoid(),
        'PlackettLuce.scores':
            tfb.Softplus(),
        'ProbitBernoulli.probs':
            tfb.Sigmoid(),
        'RelaxedBernoulli.probs':
            tfb.Sigmoid(),
        'cutpoints':  # Permit values that aren't too large
            lambda x: tfb.Ordered().inverse(10. * tf.math.tanh(x)),
        'log_rate':
            lambda x: tf.maximum(x, -16.),
        'mixing_concentration':
            tfb.Softplus(),
        'mixing_rate':
            tfb.Softplus(),
        'rate':
            tfb.Softplus(),
        'scale':
            tfb.Softplus(),
        'scale_diag':
            tfb.Softplus(),
        'scale_identity_multiplier':
            tfb.Softplus(),
        'tailweight':
            tfb.Softplus(),
        'temperature':
            tfb.Softplus(),
        'total_count':
            lambda x: tf.floor(tfb.Sigmoid()(x / 100.) * 100.) + 1.,
        'Bernoulli':
            lambda d: dict(d, dtype=tf.float32),
        'CholeskyLKJ':
            fix_lkj,
        'LKJ':
            fix_lkj,
        'Zipf':
            lambda d: dict(d, dtype=tf.float32),
        'GeneralizedNormal.power':
            tfb.Softplus(),
    }

    if param is not None:
        return constraints.get('{}.{}'.format(dist, param),
                               constraints.get(param, tfb.Identity()))
    return constraints.get(dist, tfb.Identity())