コード例 #1
0
  def test_bug170030378(self):
    n_item = 50
    n_rater = 7

    stream = test_util.test_seed_stream()
    weight = self.evaluate(
        tfd.Sample(tfd.Dirichlet([0.25, 0.25]), n_item).sample(seed=stream()))
    mixture_dist = tfd.Categorical(probs=weight)  # batch_shape=[50]

    rater_sensitivity = self.evaluate(
        tfd.Sample(tfd.Beta(5., 1.), n_rater).sample(seed=stream()))
    rater_specificity = self.evaluate(
        tfd.Sample(tfd.Beta(2., 5.), n_rater).sample(seed=stream()))

    probs = tf.stack([rater_sensitivity, rater_specificity])[None, ...]

    components_dist = tfd.BatchBroadcast(  # batch_shape=[50, 2]
        tfd.Independent(tfd.Bernoulli(probs=probs),
                        reinterpreted_batch_ndims=1),
        [50, 2])

    obs_dist = tfd.MixtureSameFamily(mixture_dist, components_dist)

    observed = self.evaluate(obs_dist.sample(seed=stream()))
    mixture_logp = obs_dist.log_prob(observed)

    expected_logp = tf.math.reduce_logsumexp(
        tf.math.log(weight) + components_dist.distribution.log_prob(
            observed[:, None, ...]),
        axis=-1)
    self.assertAllClose(expected_logp, mixture_logp)
コード例 #2
0
ファイル: nuts_test.py プロジェクト: qoffee/probability
 def testSigmoidBetaTargetConservation(self):
   # Not inverting the sigmoid bijector makes a kooky distribution, but nuts
   # should still conserve it (with a smaller step size).
   sigmoid_beta_dist = tfb.Identity(tfb.Sigmoid())(
       tfd.Beta(concentration0=1., concentration1=2.))
   self.evaluate(assert_univariate_target_conservation(
       self, sigmoid_beta_dist, step_size=0.02))
コード例 #3
0
ファイル: continuous.py プロジェクト: YashinaTatiana/odin-ai
 def new(params,
         event_shape=(),
         alpha_activation=tf.nn.softplus,
         beta_activation=tf.nn.softplus,
         clip_for_stable=True,
         validate_args=False,
         name="BetaLayer"):
     r"""Create the distribution instance from a `params` vector."""
     params = tf.convert_to_tensor(value=params, name='params')
     alpha_activation = parse_activation(alpha_activation, 'tf')
     beta_activation = parse_activation(beta_activation, 'tf')
     event_shape = dist_util.expand_to_vector(
         tf.convert_to_tensor(value=event_shape,
                              name='event_shape',
                              dtype=tf.int32),
         tensor_name='event_shape',
     )
     output_shape = tf.concat(
         [tf.shape(input=params)[:-1], event_shape],
         axis=0,
     )
     # alpha, beta
     concentration1, concentration0 = tf.split(params, 2, axis=-1)
     concentration1 = alpha_activation(concentration1)
     concentration0 = beta_activation(concentration0)
     if clip_for_stable:
         concentration0 = tf.clip_by_value(concentration0, 1e-3, 1e3)
         concentration1 = tf.clip_by_value(concentration1, 1e-3, 1e3)
     return tfd.Independent(
         tfd.Beta(concentration1=tf.reshape(concentration1, output_shape),
                  concentration0=tf.reshape(concentration0, output_shape),
                  validate_args=validate_args),
         reinterpreted_batch_ndims=tf.size(input=event_shape),
         name=name,
     )
コード例 #4
0
ファイル: nuts_test.py プロジェクト: zhengzhuang3/probability
 def testLogitBetaTargetConservation(self):
     logit_beta_dist = tfb.Invert(tfb.Sigmoid())(tfd.Beta(
         concentration0=1., concentration1=2.))
     self.evaluate(
         assert_univariate_target_conservation(self,
                                               logit_beta_dist,
                                               step_size=0.2))
コード例 #5
0
 def testBetaTargetConservation(self):
     # Not that we expect NUTS to do a good job without an unconstraining
     # bijector, but...
     beta_dist = tfd.Beta(concentration0=1., concentration1=2.)
     self.evaluate(
         assert_univariate_target_conservation(self,
                                               beta_dist,
                                               step_size=1e-3))
コード例 #6
0
 def testMode(self):
     dist = self._cls()(tfd.Beta(concentration1=[5., 10.],
                                 concentration0=15.,
                                 validate_args=True),
                        tfb.Shift(2., validate_args=True)(tfb.Scale(
                            10., validate_args=True)),
                        validate_args=True)
     self.assertAllClose(2. + 10. * dist.distribution.mode(),
                         self.evaluate(dist.mode()),
                         atol=0.,
                         rtol=1e-6)
コード例 #7
0
 def test_support_works_correctly_with_HMC(self):
     num_results = 2000
     target = tfd.Beta(concentration1=self.dtype(1.),
                       concentration0=self.dtype(10.))
     transformed_hmc = tfp.mcmc.TransformedTransitionKernel(
         inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
             target_log_prob_fn=tf.function(target.log_prob,
                                            autograph=False),
             step_size=1.64,
             num_leapfrog_steps=2,
             seed=_maybe_seed(55)),
         bijector=tfb.Sigmoid())
     # Recall, tfp.mcmc.sample_chain calls
     # transformed_hmc.bootstrap_results too.
     states, kernel_results = tfp.mcmc.sample_chain(
         num_results=num_results,
         # The initial state is used by inner_kernel.bootstrap_results.
         # Note the input is *after* bijector.forward.
         current_state=self.dtype(0.25),
         kernel=transformed_hmc,
         num_burnin_steps=200,
         num_steps_between_results=1,
         parallel_iterations=1)
     self.assertEqual(num_results,
                      tf.compat.dimension_value(states.shape[0]))
     sample_mean = tf.reduce_mean(states, axis=0)
     sample_var = tf.reduce_mean(tf.math.squared_difference(
         states, sample_mean),
                                 axis=0)
     [
         sample_mean_,
         sample_var_,
         is_accepted_,
         true_mean_,
         true_var_,
     ] = self.evaluate([
         sample_mean,
         sample_var,
         kernel_results.inner_results.is_accepted,
         target.mean(),
         target.variance(),
     ])
     self.assertAllClose(true_mean_, sample_mean_, atol=0.06, rtol=0.)
     self.assertAllClose(true_var_, sample_var_, atol=0.01, rtol=0.1)
     self.assertNear(0.6, is_accepted_.mean(), err=0.05)
コード例 #8
0
def stochastic_volatility_prior_fn(num_timesteps):
    """Generative process for the stochastic volatility model."""
    persistence_of_volatility = yield Root(
        tfb.Shift(-1.)(tfb.Scale(2.)(tfd.Beta(
            concentration1=20.,
            concentration0=1.5,
            name='persistence_of_volatility'))))
    mean_log_volatility = yield Root(
        tfd.Cauchy(loc=0., scale=5., name='mean_log_volatility'))
    white_noise_shock_scale = yield Root(
        tfd.HalfCauchy(loc=0., scale=2., name='white_noise_shock_scale'))

    _ = yield tfd.JointDistributionCoroutine(functools.partial(
        autoregressive_series_fn,
        num_timesteps=num_timesteps,
        mean=mean_log_volatility,
        noise_scale=white_noise_shock_scale,
        persistence=persistence_of_volatility),
                                             name='log_volatility')
コード例 #9
0
 def test_support_works_correctly_with_rwm(self):
   num_results = 500
   target = tfd.Beta(
       concentration1=self.dtype(1.),
       concentration0=self.dtype(10.))
   transformed_rwm = tfp.mcmc.TransformedTransitionKernel(
       inner_kernel=tfp.mcmc.RandomWalkMetropolis(
           target_log_prob_fn=tf.function(target.log_prob, autograph=False),
           new_state_fn=tfp.mcmc.random_walk_normal_fn(scale=1.5)),
       bijector=tfb.Sigmoid())
   # Recall, tfp.mcmc.sample_chain calls
   # transformed_hmc.bootstrap_results too.
   states = tfp.mcmc.sample_chain(
       num_results=num_results,
       # The initial state is used by inner_kernel.bootstrap_results.
       # Note the input is *after* bijector.forward.
       current_state=self.dtype(0.25),
       kernel=transformed_rwm,
       num_burnin_steps=200,
       num_steps_between_results=1,
       trace_fn=None,
       seed=test_util.test_seed())
   self.assertEqual(num_results, tf.compat.dimension_value(states.shape[0]))
   sample_mean = tf.reduce_mean(states, axis=0)
   sample_var = tf.reduce_mean(
       tf.math.squared_difference(states, sample_mean), axis=0)
   [
       sample_mean_,
       sample_var_,
       true_mean_,
       true_var_,
   ] = self.evaluate([
       sample_mean,
       sample_var,
       target.mean(),
       target.variance(),
   ])
   self.assertAllClose(true_mean_, sample_mean_,
                       atol=0.15, rtol=0.)
   self.assertAllClose(true_var_, sample_var_,
                       atol=0.03, rtol=0.2)
コード例 #10
0
 def test_support_works_correctly_with_MALA(self):
     num_results = 2000
     target = tfd.Beta(concentration1=self.dtype(1.),
                       concentration0=self.dtype(10.))
     transformed_mala = tfp.mcmc.TransformedTransitionKernel(
         inner_kernel=tfp.mcmc.MetropolisAdjustedLangevinAlgorithm(
             target_log_prob_fn=tf.function(target.log_prob,
                                            autograph=False),
             step_size=1.,
             seed=_maybe_seed(test_util.test_seed())),
         bijector=tfb.Sigmoid())
     # Recall, tfp.mcmc.sample_chain calls
     # transformed_hmc.bootstrap_results too.
     states, _ = tfp.mcmc.sample_chain(
         num_results=num_results,
         # The initial state is used by inner_kernel.bootstrap_results.
         # Note the input is *after* bijector.forward.
         current_state=self.dtype(0.25),
         kernel=transformed_mala,
         num_burnin_steps=200,
         num_steps_between_results=1,
         parallel_iterations=1)
     self.assertEqual(num_results,
                      tf.compat.dimension_value(states.shape[0]))
     sample_mean = tf.reduce_mean(states, axis=0)
     sample_var = tf.reduce_mean(tf.math.squared_difference(
         states, sample_mean),
                                 axis=0)
     [
         sample_mean_,
         sample_var_,
         true_mean_,
         true_var_,
     ] = self.evaluate([
         sample_mean,
         sample_var,
         target.mean(),
         target.variance(),
     ])
     self.assertAllClose(true_mean_, sample_mean_, atol=0.06, rtol=0.)
     self.assertAllClose(true_var_, sample_var_, atol=0.01, rtol=0.1)
コード例 #11
0
    def test_batch_slicing(self):
        # pylint: disable=bad-whitespace
        d = tfd.JointDistributionNamed(dict(
            s=tfd.Exponential(rate=[10, 12, 14]),
            n=lambda s: tfd.Normal(loc=0, scale=s),
            x=lambda: tfd.Beta(concentration0=[3, 2, 1], concentration1=1)),
                                       validate_args=True)
        # pylint: enable=bad-whitespace

        d0, d1 = d[:1], d[1:]
        x0 = d0.sample(seed=test_util.test_seed())
        x1 = d1.sample(seed=test_util.test_seed())

        self.assertLen(x0, 3)
        self.assertEqual([1], x0['s'].shape)
        self.assertEqual([1], x0['n'].shape)
        self.assertEqual([1], x0['x'].shape)

        self.assertLen(x1, 3)
        self.assertEqual([2], x1['s'].shape)
        self.assertEqual([2], x1['n'].shape)
        self.assertEqual([2], x1['x'].shape)
コード例 #12
0
        def __body(w_, e_, mask, b):
            e = math_ops.cast(distributions.Beta((self.__mf - 1.0) / 2.0,
                                                 (self.__mf - 1.0) / 2.0).
                              sample(shape, seed=seed), dtype=self.dtype)

            u = random_ops.random_uniform(shape, dtype=self.dtype, seed=seed)
            w = (1.0 - (1.0 + b) * e) / (1.0 - (1.0 - b) * e)
            x = (1.0 - b) / (1.0 + b)
            c = self.scale * x + (self.__mf - 1) * math_ops.log1p(-x**2)

            tmp = tf.clip_by_value(x * w, 0, 1 - 1e-16)
            reject = gen_math_ops.less(((self.__mf - 1.0) * math_ops.log(1.0 - tmp) +
                                        self.scale * w - c),
                                       math_ops.log(u))
            accept = gen_math_ops.logical_not(reject)

            w_ = array_ops.where(gen_math_ops.logical_and(mask, accept), w, w_)
            e_ = array_ops.where(gen_math_ops.logical_and(mask, accept), e, e_)
            mask = array_ops.where(gen_math_ops.logical_and(mask, accept),
                                   reject, mask)

            return w_, e_, mask, b
コード例 #13
0
 def mk_sigmoid_beta():
     beta = tfd.Beta(concentration0=1., concentration1=2.)
     # Not inverting the sigmoid bijector makes a kooky distribution, but
     # nuts should still conserve it (with a smaller step size).
     return tfb.Sigmoid()(beta)
コード例 #14
0
 def mk_logit_beta():
     beta = tfd.Beta(concentration0=1., concentration1=2.)
     return tfb.Invert(tfb.Sigmoid())(beta)