예제 #1
0
def get_kl_divergence(p, q, n_samples, name='KL_divergence'):
    """
  Args:
    p: A `tfp.Distribution` instance.
    q: A `tfp.Distribution` instance.
    n_samples: Positive integers.
    name: String.

  Returns:
    A `MonteCarloIntegral` instance.
  """
    with tf.name_scope(name):
        p_samples = p.sample(n_samples)
        integrands = p.log_prob(p_samples) - q.log_prob(p_samples)
        return monte_carlo_integrate(integrands)
예제 #2
0
def get_entropy(distribution, n_samples=32, name='entropy'):
    """Returns the entropy of the distribution `distribution` as a Monte-Carlo
  integral.

  Args:
    distribution: A `tfp.Distribution` instance.
    n_samples: Positive integer.

  Returns:
    A `MonteCarloIntegral` instance.
  """
    with tf.name_scope(name):
        samples = distribution.sample(n_samples)
        # shape: # [n_samples] + batch-shape
        integrands = -distribution.log_prob(samples)
        # shape: batch-shape
        return monte_carlo_integrate(integrands, axes=[0])
예제 #3
0
    def discriminate_part(self, data, discriminator, reuse):
        """Returns the `E_{x~P} [ g_f(D(x)) ]`.

    Args:
      data: Tensor with shape `[None] + A`.
      discriminator: Callable with the signature:
        Args:
          ambient: Tensor with shape `[None] + A`.
          reuse: Boolean.
        Returns:
          Tensor with the same batch-shape as the `ambient`.
      reuse: Boolean.

    Returns:
      A scalar `MonteCarloIntegral` instance.
    """
        with tf.name_scope('discriminator_part'):
            # [B]
            integrands = self.output_activation(discriminator(data, reuse))
            return monte_carlo_integrate(integrands, axes=[0])
예제 #4
0
    def generate_part(self, fake_data, discriminator, reuse):
        """Returns the `E_{x~Q} [ -f*( g_f(D(x)) ) ]`.

    Args:
      fake_data: Tensor with shape `[None] + A`.
      discriminator: Callable with the signature:
        Args:
          ambient: Tensor with shape `[None] + A`.
          reuse: Boolean.
        Returns:
          Tensor with the same batch-shape as the `ambient`.
      reuse: Boolean.

    Returns:
      A scalar `MonteCarloIntegral` instance.
    """
        with tf.name_scope('generator_part'):
            # [self.n_samples]
            integrands = -self.f_star(
                self.output_activation(discriminator(fake_data, reuse)))
            return monte_carlo_integrate(integrands, axes=[0])
예제 #5
0
 def generate_part(self, fake_data, discrimator, reuse):
     """Re-implementing for avoiding the numerical instability caused by
 the `tf.exp` in `self.f_star`."""
     discr_fake = discrimator(fake_data, reuse)
     integrands = -log_sigmoid(discr_fake) + log1m_sigmoid(discr_fake)
     return monte_carlo_integrate(integrands, axes=[0])
예제 #6
0
    def loss(self, ambient, name='loss', reuse=tf.AUTO_REUSE):
        r"""Returns the tensors for L(X) and its error.

    Definition:
      ```math
      Denoting $X$ the ambient and $z$ the latent,

      \begin{equation}
          L(x) := E_{z \sim Q(Z \mid x)} \left[
                      \ln q(z \mid x) - \ln p(z) - \ln p(x \mid z)
                  \right].
      \end{equation}
      ```

    Evaluation:
      The performance of this fitting by minimizing the loss L(x) over
      a dataset of x can be evaluated by the variance of the Monte-Carlo
      integral in L(x). The integrand

      ```python
      p_z = ...  # the unknown likelihood distribution of latent Z.
      q_z = ...  # the inference distribution.

      z_samples = q_z.samples(N)

      for i, z in enumerate(z_samples):
          integrand[i] = q_z.log_prob(z) - (p_z.log_prob(z) + constant)
      ```

      So, the variance of the `integrand` measures the difference between the
      `q_z.log_prob` and `p_z.log_prob` in the region that the `q_z` can prob
      by sampling, regardless the `constant`. Indeed, if they are perfectly
      fitted in the probed region, then the variance vanishes, no matter what
      the value the `constant` is.

    Args:
      ambient: Tensor of the shape `batch_shape + [ambient_dim]`.
      name: String.
      reuse: Boolean.

    Returns:
      An instance of `MonteCarloIntegral` with shape `batch_shape`.
    """
        with tf.name_scope(self.base_name):
            with tf.name_scope(name):
                # Get the distribution Q(Z|x) in definition
                encoder = self.encoder(ambient, reuse=reuse)

                # Get the distribution P(X|z) in definition
                # [n_samples] + B + L
                latent_samples = encoder.sample(self.n_samples)
                decoder = self.decoder(latent_samples, reuse=reuse)

                # Get the log_q(z|x) - log_p(z) - log_p(x|z) in definition
                # [n_samples] + B
                integrands = (encoder.log_prob(latent_samples) -
                              self.prior.log_prob(latent_samples) -
                              decoder.log_prob(ambient))
                return monte_carlo_integrate(integrands,
                                             axes=[0],
                                             n_samples=self.n_samples)