Esempio n. 1
0
 def _sample_n(self, n, seed=None):
     seed = seed_stream.SeedStream(seed, "dirichlet_multinomial")
     n_draws = tf.cast(self.total_count, dtype=tf.int32)
     k = self.event_shape_tensor()[0]
     unnormalized_logits = tf.math.log(
         tf.random.gamma(shape=[n],
                         alpha=self._broadcasted_concentration,
                         dtype=self.dtype,
                         seed=seed()))
     x = multinomial.draw_sample(1, k, unnormalized_logits, n_draws,
                                 self.dtype, seed())
     final_shape = tf.concat([[n], self.batch_shape_tensor(), [k]], 0)
     return tf.reshape(x, final_shape)
Esempio n. 2
0
 def _sample_n(self, n, seed=None):
     # Need to create logits corresponding to [p, 1 - p].
     # Note that for this distributions, logits corresponds to
     # inverse sigmoid(p) while in multivariate distributions,
     # such as multinomial this corresponds to log(p).
     # Because of this, when we construct the logits for the multinomial
     # sampler, we'll have to be careful.
     # log(p) = log(sigmoid(logits)) = logits - softplus(logits)
     # log(1 - p) = log(1 - sigmoid(logits)) = -softplus(logits)
     # Because softmax is invariant to a constnat shift in all inputs,
     # we can offset the logits by softplus(logits) so that we can use
     # [logits, 0.] as our input.
     logits = tf.stack([self.logits, tf.zeros_like(self.logits)], axis=-1)
     return multinomial.draw_sample(num_samples=n,
                                    num_classes=2,
                                    logits=logits,
                                    num_trials=tf.cast(self.total_count,
                                                       dtype=tf.int32),
                                    dtype=self.dtype,
                                    seed=seed)[..., 0]
  def _sample_n(self, n, seed=None):
    gamma_seed, multinomial_seed = samplers.split_seed(
        seed, salt='dirichlet_multinomial')

    concentration = tf.convert_to_tensor(self._concentration)
    total_count = tf.convert_to_tensor(self._total_count)

    n_draws = tf.cast(total_count, dtype=tf.int32)
    k = self._event_shape_tensor(concentration)[0]
    alpha = tf.math.multiply(
        tf.ones_like(total_count[..., tf.newaxis]),
        concentration,
        name='alpha')

    unnormalized_logits = gamma_lib.random_gamma(
        shape=[n], concentration=alpha, seed=gamma_seed, log_space=True)
    x = multinomial.draw_sample(
        1, k, unnormalized_logits, n_draws, self.dtype, multinomial_seed)
    final_shape = ps.concat(
        [[n], self._batch_shape_tensor(concentration=concentration,
                                       total_count=total_count), [k]], 0)
    return tf.reshape(x, final_shape)
Esempio n. 4
0
    def _sample_n(self, n, seed=None):
        seed = seed_stream.SeedStream(seed, 'dirichlet_multinomial')

        concentration = tf.convert_to_tensor(self._concentration)
        total_count = tf.convert_to_tensor(self._total_count)

        n_draws = tf.cast(total_count, dtype=tf.int32)
        k = self._event_shape_tensor(concentration)[0]
        alpha = tf.math.multiply(tf.ones_like(total_count[..., tf.newaxis]),
                                 concentration,
                                 name='alpha')

        unnormalized_logits = tf.math.log(
            tf.random.gamma(shape=[n],
                            alpha=alpha,
                            dtype=self.dtype,
                            seed=seed()))
        x = multinomial.draw_sample(1, k, unnormalized_logits, n_draws,
                                    self.dtype, seed())
        final_shape = tf.concat(
            [[n],
             self._batch_shape_tensor(concentration, total_count), [k]], 0)
        return tf.reshape(x, final_shape)