Exemple #1
0
  def testNormalSampleMultiDimensional(self):
    with self.test_session():
      batch_size = 2
      mu = constant_op.constant([[3.0, -3.0]] * batch_size)
      sigma = constant_op.constant([[math.sqrt(2.0), math.sqrt(3.0)]] *
                                   batch_size)
      mu_v = [3.0, -3.0]
      sigma_v = [np.sqrt(2.0), np.sqrt(3.0)]
      n = constant_op.constant(100000)
      normal = normal_lib.Normal(loc=mu, scale=sigma)
      samples = normal.sample(n)
      sample_values = samples.eval()
      # Note that the standard error for the sample mean is ~ sigma / sqrt(n).
      # The sample variance similarly is dependent on sigma and n.
      # Thus, the tolerances below are very sensitive to number of samples
      # as well as the variances chosen.
      self.assertEqual(samples.get_shape(), (100000, batch_size, 2))
      self.assertAllClose(sample_values[:, 0, 0].mean(), mu_v[0], atol=1e-1)
      self.assertAllClose(sample_values[:, 0, 0].std(), sigma_v[0], atol=1e-1)
      self.assertAllClose(sample_values[:, 0, 1].mean(), mu_v[1], atol=1e-1)
      self.assertAllClose(sample_values[:, 0, 1].std(), sigma_v[1], atol=1e-1)

      expected_samples_shape = (tensor_shape.TensorShape([n.eval(
      )]).concatenate(tensor_shape.TensorShape(normal.batch_shape().eval())))
      self.assertAllEqual(expected_samples_shape, samples.get_shape())
      self.assertAllEqual(expected_samples_shape, sample_values.shape)

      expected_samples_shape = (tensor_shape.TensorShape(
          [n.eval()]).concatenate(normal.get_batch_shape()))
      self.assertAllEqual(expected_samples_shape, samples.get_shape())
      self.assertAllEqual(expected_samples_shape, sample_values.shape)
Exemple #2
0
    def testNormalLogPDF(self):
        with self.test_session():
            batch_size = 6
            mu = constant_op.constant([3.0] * batch_size)
            sigma = constant_op.constant([math.sqrt(10.0)] * batch_size)
            x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
            normal = normal_lib.Normal(loc=mu, scale=sigma)
            expected_log_pdf = stats.norm(mu.eval(), sigma.eval()).logpdf(x)

            log_pdf = normal.log_prob(x)
            self.assertAllClose(expected_log_pdf, log_pdf.eval())
            self.assertAllEqual(normal.batch_shape_tensor().eval(),
                                log_pdf.get_shape())
            self.assertAllEqual(normal.batch_shape_tensor().eval(),
                                log_pdf.eval().shape)
            self.assertAllEqual(normal.batch_shape, log_pdf.get_shape())
            self.assertAllEqual(normal.batch_shape, log_pdf.eval().shape)

            pdf = normal.prob(x)
            self.assertAllClose(np.exp(expected_log_pdf), pdf.eval())
            self.assertAllEqual(normal.batch_shape_tensor().eval(),
                                pdf.get_shape())
            self.assertAllEqual(normal.batch_shape_tensor().eval(),
                                pdf.eval().shape)
            self.assertAllEqual(normal.batch_shape, pdf.get_shape())
            self.assertAllEqual(normal.batch_shape, pdf.eval().shape)
Exemple #3
0
  def testNormalLogPDFMultidimensional(self):
    with self.test_session():
      batch_size = 6
      mu = constant_op.constant([[3.0, -3.0]] * batch_size)
      sigma = constant_op.constant([[math.sqrt(10.0), math.sqrt(15.0)]] *
                                   batch_size)
      x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
      normal = normal_lib.Normal(loc=mu, scale=sigma)
      expected_log_pdf = stats.norm(mu.eval(), sigma.eval()).logpdf(x)

      log_pdf = normal.log_prob(x)
      log_pdf_values = log_pdf.eval()
      self.assertEqual(log_pdf.get_shape(), (6, 2))
      self.assertAllClose(expected_log_pdf, log_pdf_values)
      self.assertAllEqual(normal.batch_shape().eval(), log_pdf.get_shape())
      self.assertAllEqual(normal.batch_shape().eval(), log_pdf.eval().shape)
      self.assertAllEqual(normal.get_batch_shape(), log_pdf.get_shape())
      self.assertAllEqual(normal.get_batch_shape(), log_pdf.eval().shape)

      pdf = normal.prob(x)
      pdf_values = pdf.eval()
      self.assertEqual(pdf.get_shape(), (6, 2))
      self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
      self.assertAllEqual(normal.batch_shape().eval(), pdf.get_shape())
      self.assertAllEqual(normal.batch_shape().eval(), pdf_values.shape)
      self.assertAllEqual(normal.get_batch_shape(), pdf.get_shape())
      self.assertAllEqual(normal.get_batch_shape(), pdf_values.shape)
Exemple #4
0
  def testNormalSample(self):
    with self.test_session():
      mu = constant_op.constant(3.0)
      sigma = constant_op.constant(math.sqrt(3.0))
      mu_v = 3.0
      sigma_v = np.sqrt(3.0)
      n = constant_op.constant(100000)
      normal = normal_lib.Normal(loc=mu, scale=sigma)
      samples = normal.sample(n)
      sample_values = samples.eval()
      # Note that the standard error for the sample mean is ~ sigma / sqrt(n).
      # The sample variance similarly is dependent on sigma and n.
      # Thus, the tolerances below are very sensitive to number of samples
      # as well as the variances chosen.
      self.assertEqual(sample_values.shape, (100000,))
      self.assertAllClose(sample_values.mean(), mu_v, atol=1e-1)
      self.assertAllClose(sample_values.std(), sigma_v, atol=1e-1)

      expected_samples_shape = tensor_shape.TensorShape([n.eval()]).concatenate(
          tensor_shape.TensorShape(normal.batch_shape_tensor().eval()))

      self.assertAllEqual(expected_samples_shape, samples.get_shape())
      self.assertAllEqual(expected_samples_shape, sample_values.shape)

      expected_samples_shape = (tensor_shape.TensorShape(
          [n.eval()]).concatenate(normal.batch_shape))

      self.assertAllEqual(expected_samples_shape, samples.get_shape())
      self.assertAllEqual(expected_samples_shape, sample_values.shape)
Exemple #5
0
 def testNegativeSigmaFails(self):
     with self.test_session():
         normal = normal_lib.Normal(mu=[1.],
                                    sigma=[-5.],
                                    validate_args=True,
                                    name="G")
         with self.assertRaisesOpError("Condition x > 0 did not hold"):
             normal.mean().eval()
Exemple #6
0
  def testNormalVariance(self):
    with self.test_session():
      # sigma will be broadcast to [7, 7, 7]
      mu = [1., 2., 3.]
      sigma = [7.]

      normal = normal_lib.Normal(loc=mu, scale=sigma)

      self.assertAllEqual((3,), normal.variance().get_shape())
      self.assertAllEqual([49., 49, 49], normal.variance().eval())
Exemple #7
0
  def testNormalShape(self):
    with self.test_session():
      mu = constant_op.constant([-3.0] * 5)
      sigma = constant_op.constant(11.0)
      normal = normal_lib.Normal(loc=mu, scale=sigma)

      self.assertEqual(normal.batch_shape().eval(), [5])
      self.assertEqual(normal.get_batch_shape(), tensor_shape.TensorShape([5]))
      self.assertAllEqual(normal.event_shape().eval(), [])
      self.assertEqual(normal.get_event_shape(), tensor_shape.TensorShape([]))
Exemple #8
0
  def testNormalNormalKL(self):
    with self.test_session() as sess:
      batch_size = 6
      mu_a = np.array([3.0] * batch_size)
      sigma_a = np.array([1.0, 2.0, 3.0, 1.5, 2.5, 3.5])
      mu_b = np.array([-3.0] * batch_size)
      sigma_b = np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0])

      n_a = normal_lib.Normal(loc=mu_a, scale=sigma_a)
      n_b = normal_lib.Normal(loc=mu_b, scale=sigma_b)

      kl = kullback_leibler.kl(n_a, n_b)
      kl_val = sess.run(kl)

      kl_expected = ((mu_a - mu_b)**2 / (2 * sigma_b**2) + 0.5 * (
          (sigma_a**2 / sigma_b**2) - 1 - 2 * np.log(sigma_a / sigma_b)))

      self.assertEqual(kl.get_shape(), (batch_size,))
      self.assertAllClose(kl_val, kl_expected)
 def testExplicitVariationalAndPrior(self):
     with self.test_session() as sess:
         _, _, variational, _, log_likelihood = mini_vae()
         prior = normal.Normal(loc=3., scale=2.)
         elbo = vi.elbo(log_likelihood,
                        variational_with_prior={variational: prior})
         expected_elbo = log_likelihood - kullback_leibler.kl(
             variational.distribution, prior)
         sess.run(variables.global_variables_initializer())
         self.assertAllEqual(*sess.run([expected_elbo, elbo]))
Exemple #10
0
  def testNormalStandardDeviation(self):
    with self.test_session():
      # sigma will be broadcast to [7, 7, 7]
      mu = [1., 2., 3.]
      sigma = [7.]

      normal = normal_lib.Normal(loc=mu, scale=sigma)

      self.assertAllEqual((3,), normal.stddev().get_shape())
      self.assertAllEqual([7., 7, 7], normal.stddev().eval())
Exemple #11
0
 def _testParamShapes(self, sample_shape, expected):
   with self.test_session():
     param_shapes = normal_lib.Normal.param_shapes(sample_shape)
     mu_shape, sigma_shape = param_shapes["loc"], param_shapes["scale"]
     self.assertAllEqual(expected, mu_shape.eval())
     self.assertAllEqual(expected, sigma_shape.eval())
     mu = array_ops.zeros(mu_shape)
     sigma = array_ops.ones(sigma_shape)
     self.assertAllEqual(
         expected,
         array_ops.shape(normal_lib.Normal(mu, sigma).sample()).eval())
Exemple #12
0
  def testNormalMeanAndMode(self):
    with self.test_session():
      # Mu will be broadcast to [7, 7, 7].
      mu = [7.]
      sigma = [11., 12., 13.]

      normal = normal_lib.Normal(loc=mu, scale=sigma)

      self.assertAllEqual((3,), normal.mean().get_shape())
      self.assertAllEqual([7., 7, 7], normal.mean().eval())

      self.assertAllEqual((3,), normal.mode().get_shape())
      self.assertAllEqual([7., 7, 7], normal.mode().eval())
Exemple #13
0
  def testNormalShapeWithPlaceholders(self):
    mu = array_ops.placeholder(dtype=dtypes.float32)
    sigma = array_ops.placeholder(dtype=dtypes.float32)
    normal = normal_lib.Normal(loc=mu, scale=sigma)

    with self.test_session() as sess:
      # get_batch_shape should return an "<unknown>" tensor.
      self.assertEqual(normal.get_batch_shape(), tensor_shape.TensorShape(None))
      self.assertEqual(normal.get_event_shape(), ())
      self.assertAllEqual(normal.event_shape().eval(), [])
      self.assertAllEqual(
          sess.run(normal.batch_shape(), feed_dict={mu: 5.0,
                                                    sigma: [1.0, 2.0]}), [2])
Exemple #14
0
  def testNormalEntropyWithScalarInputs(self):
    # Scipy.stats.norm cannot deal with the shapes in the other test.
    with self.test_session():
      mu_v = 2.34
      sigma_v = 4.56
      normal = normal_lib.Normal(loc=mu_v, scale=sigma_v)

      # scipy.stats.norm cannot deal with these shapes.
      expected_entropy = stats.norm(mu_v, sigma_v).entropy()
      entropy = normal.entropy()
      self.assertAllClose(expected_entropy, entropy.eval())
      self.assertAllEqual(normal.batch_shape().eval(), entropy.get_shape())
      self.assertAllEqual(normal.batch_shape().eval(), entropy.eval().shape)
      self.assertAllEqual(normal.get_batch_shape(), entropy.get_shape())
      self.assertAllEqual(normal.get_batch_shape(), entropy.eval().shape)
Exemple #15
0
  def testNormalEntropy(self):
    with self.test_session():
      mu_v = np.array([1.0, 1.0, 1.0])
      sigma_v = np.array([[1.0, 2.0, 3.0]]).T
      normal = normal_lib.Normal(loc=mu_v, scale=sigma_v)

      # scipy.stats.norm cannot deal with these shapes.
      sigma_broadcast = mu_v * sigma_v
      expected_entropy = 0.5 * np.log(2 * np.pi * np.exp(1) * sigma_broadcast**
                                      2)
      entropy = normal.entropy()
      np.testing.assert_allclose(expected_entropy, entropy.eval())
      self.assertAllEqual(normal.batch_shape().eval(), entropy.get_shape())
      self.assertAllEqual(normal.batch_shape().eval(), entropy.eval().shape)
      self.assertAllEqual(normal.get_batch_shape(), entropy.get_shape())
      self.assertAllEqual(normal.get_batch_shape(), entropy.eval().shape)
Exemple #16
0
  def testNormalCDF(self):
    with self.test_session():
      batch_size = 50
      mu = self._rng.randn(batch_size)
      sigma = self._rng.rand(batch_size) + 1.0
      x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)

      normal = normal_lib.Normal(loc=mu, scale=sigma)
      expected_cdf = stats.norm(mu, sigma).cdf(x)

      cdf = normal.cdf(x)
      self.assertAllClose(expected_cdf, cdf.eval(), atol=0)
      self.assertAllEqual(normal.batch_shape().eval(), cdf.get_shape())
      self.assertAllEqual(normal.batch_shape().eval(), cdf.eval().shape)
      self.assertAllEqual(normal.get_batch_shape(), cdf.get_shape())
      self.assertAllEqual(normal.get_batch_shape(), cdf.eval().shape)
Exemple #17
0
  def testNormalLogSurvivalFunction(self):
    with self.test_session():
      batch_size = 50
      mu = self._rng.randn(batch_size)
      sigma = self._rng.rand(batch_size) + 1.0
      x = np.linspace(-10.0, 100.0, batch_size).astype(np.float64)

      normal = normal_lib.Normal(loc=mu, scale=sigma)
      expected_sf = stats.norm(mu, sigma).logsf(x)

      sf = normal.log_survival_function(x)
      self.assertAllClose(expected_sf, sf.eval(), atol=0, rtol=1e-5)
      self.assertAllEqual(normal.batch_shape().eval(), sf.get_shape())
      self.assertAllEqual(normal.batch_shape().eval(), sf.eval().shape)
      self.assertAllEqual(normal.get_batch_shape(), sf.get_shape())
      self.assertAllEqual(normal.get_batch_shape(), sf.eval().shape)
Exemple #18
0
 def testFiniteGradientAtDifficultPoints(self):
   for dtype in [np.float32, np.float64]:
     g = ops.Graph()
     with g.as_default():
       mu = variables.Variable(dtype(0.0))
       sigma = variables.Variable(dtype(1.0))
       dist = normal_lib.Normal(loc=mu, scale=sigma)
       x = np.array([-100., -20., -5., 0., 5., 20., 100.]).astype(dtype)
       for func in [
           dist.cdf, dist.log_cdf, dist.survival_function,
           dist.log_survival_function, dist.log_prob, dist.prob
       ]:
         value = func(x)
         grads = gradients_impl.gradients(value, [mu, sigma])
         with self.test_session(graph=g):
           variables.global_variables_initializer().run()
           self.assertAllFinite(value)
           self.assertAllFinite(grads[0])
           self.assertAllFinite(grads[1])
Exemple #19
0
    def _baseQuantileFiniteGradientAtDifficultPoints(self, dtype):
        g = ops.Graph()
        with g.as_default():
            mu = variables.Variable(dtype(0.0))
            sigma = variables.Variable(dtype(1.0))
            dist = normal_lib.Normal(loc=mu, scale=sigma)
            p = variables.Variable(
                np.array([
                    0.,
                    np.exp(-32.),
                    np.exp(-2.), 1. - np.exp(-2.), 1. - np.exp(-32.), 1.
                ]).astype(dtype))

            value = dist.quantile(p)
            grads = gradients_impl.gradients(value, [mu, p])
            with self.test_session(graph=g):
                variables.global_variables_initializer().run()
                self.assertAllFinite(grads[0])
                self.assertAllFinite(grads[1])
Exemple #20
0
    def testNormalQuantile(self):
        with self.test_session():
            batch_size = 52
            mu = self._rng.randn(batch_size)
            sigma = self._rng.rand(batch_size) + 1.0
            p = np.linspace(0., 1.0, batch_size - 2).astype(np.float64)
            # Quantile performs piecewise rational approximation so adding some
            # special input values to make sure we hit all the pieces.
            p = np.hstack((p, np.exp(-33), 1. - np.exp(-33)))

            normal = normal_lib.Normal(loc=mu, scale=sigma)
            expected_x = stats.norm(mu, sigma).ppf(p)
            x = normal.quantile(p)

            self.assertAllClose(expected_x, x.eval(), atol=0.)
            self.assertAllEqual(normal.batch_shape_tensor().eval(),
                                x.get_shape())
            self.assertAllEqual(normal.batch_shape_tensor().eval(),
                                x.eval().shape)
            self.assertAllEqual(normal.batch_shape, x.get_shape())
            self.assertAllEqual(normal.batch_shape, x.eval().shape)
Exemple #21
0
def samples_one_normal(sample_shape, loc=0.5, scale=0.1):
    norm = normal.Normal(loc, scale)
    return norm.sample(sample_shape)
Exemple #22
0
    def __init__(self,
                 loc=None,
                 scale=None,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="MultivariateNormalLinearOperator"):
        """Construct Multivariate Normal distribution on `R^k`.

    The `batch_shape` is the broadcast shape between `loc` and `scale`
    arguments.

    The `event_shape` is given by the last dimension of `loc` or the last
    dimension of the matrix implied by `scale`.

    Recall that `covariance = scale @ scale.T`.

    Additional leading dimensions (if any) will index batches.

    Args:
      loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
        implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
        `b >= 0` and `k` is the event size.
      scale: Instance of `LinearOperator` with same `dtype` as `loc` and shape
        `[B1, ..., Bb, k, k]`.
      validate_args: Python `bool`, default `False`. Whether to validate input
        with asserts. If `validate_args` is `False`, and the inputs are
        invalid, correct behavior is not guaranteed.
      allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to give Ops created by the initializer.

    Raises:
      ValueError: if `scale` is unspecified.
      TypeError: if not `scale.dtype.is_floating`
    """
        parameters = locals()
        if scale is None:
            raise ValueError("Missing required `scale` parameter.")
        if not scale.dtype.is_floating:
            raise TypeError(
                "`scale` parameter must have floating-point dtype.")

        with ops.name_scope(name, values=[loc] + scale.graph_parents):
            # Since expand_dims doesn't preserve constant-ness, we obtain the
            # non-dynamic value if possible.
            event_shape = scale.range_dimension_tensor()
            if tensor_util.constant_value(event_shape) is not None:
                event_shape = tensor_util.constant_value(event_shape).reshape(
                    [1])
            else:
                event_shape = event_shape[array_ops.newaxis]
            batch_shape = scale.batch_shape_tensor()
            if loc is not None:
                loc = ops.convert_to_tensor(loc, name="loc")
                loc_batch_shape = loc.get_shape().with_rank_at_least(1)[:-1]
                if (loc.get_shape().ndims is None
                        or not loc_batch_shape.is_fully_defined()):
                    loc_batch_shape = array_ops.shape(loc)[:-1]
                else:
                    loc_batch_shape = ops.convert_to_tensor(
                        loc_batch_shape, name="loc_batch_shape")
                batch_shape = _broadcast_shape(batch_shape, loc_batch_shape)

        super(MultivariateNormalLinearOperator,
              self).__init__(distribution=normal.Normal(
                  loc=array_ops.zeros([], dtype=scale.dtype),
                  scale=array_ops.ones([], dtype=scale.dtype)),
                             bijector=bijectors.AffineLinearOperator(
                                 shift=loc,
                                 scale=scale,
                                 validate_args=validate_args),
                             batch_shape=batch_shape,
                             event_shape=event_shape,
                             validate_args=validate_args,
                             name=name)
        self._parameters = parameters