Esempio n. 1
0
 def testLaplaceNonPositiveInitializationParamsRaises(self):
   loc_v = constant_op.constant(0.0, name="loc")
   scale_v = constant_op.constant(-1.0, name="scale")
   with self.assertRaisesOpError("Condition x > 0 did not hold element-wise"):
     laplace = laplace_lib.Laplace(
         loc=loc_v, scale=scale_v, validate_args=True)
     self.evaluate(laplace.mean())
   loc_v = constant_op.constant(1.0, name="loc")
   scale_v = constant_op.constant(0.0, name="scale")
   with self.assertRaisesOpError("Condition x > 0 did not hold element-wise"):
     laplace = laplace_lib.Laplace(
         loc=loc_v, scale=scale_v, validate_args=True)
     self.evaluate(laplace.mean())
Esempio n. 2
0
 def testLaplacePdfOfSampleMultiDims(self):
   laplace = laplace_lib.Laplace(loc=[7., 11.], scale=[[5.], [6.]])
   num = 50000
   samples = laplace.sample(num, seed=137)
   pdfs = laplace.prob(samples)
   sample_vals, pdf_vals = self.evaluate([samples, pdfs])
   self.assertEqual(samples.get_shape(), (num, 2, 2))
   self.assertEqual(pdfs.get_shape(), (num, 2, 2))
   self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
   self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
   self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
   self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
   if not stats:
     return
   self.assertAllClose(
       stats.laplace.mean(
           [[7., 11.], [7., 11.]], scale=np.array([[5., 5.], [6., 6.]])),
       sample_vals.mean(axis=0),
       rtol=0.05,
       atol=0.)
   self.assertAllClose(
       stats.laplace.var([[7., 11.], [7., 11.]],
                         scale=np.array([[5., 5.], [6., 6.]])),
       sample_vals.var(axis=0),
       rtol=0.05,
       atol=0.)
Esempio n. 3
0
 def testLaplaceSampleMultiDimensional(self):
   with session.Session():
     loc_v = np.array([np.arange(1, 101, dtype=np.float32)])  # 1 x 100
     scale_v = np.array([np.arange(1, 11, dtype=np.float32)]).T  # 10 x 1
     laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v)
     n = 10000
     samples = laplace.sample(n, seed=137)
     sample_values = self.evaluate(samples)
     self.assertEqual(samples.get_shape(), (n, 10, 100))
     self.assertEqual(sample_values.shape, (n, 10, 100))
     zeros = np.zeros_like(loc_v + scale_v)  # 10 x 100
     loc_bc = loc_v + zeros
     scale_bc = scale_v + zeros
     if not stats:
       return
     self.assertAllClose(
         sample_values.mean(axis=0),
         stats.laplace.mean(
             loc_bc, scale=scale_bc),
         rtol=0.35,
         atol=0.)
     self.assertAllClose(
         sample_values.var(axis=0),
         stats.laplace.var(loc_bc, scale=scale_bc),
         rtol=0.10,
         atol=0.)
     fails = 0
     trials = 0
     for ai, a in enumerate(np.reshape(loc_v, [-1])):
       for bi, b in enumerate(np.reshape(scale_v, [-1])):
         s = sample_values[:, bi, ai]
         trials += 1
         fails += 0 if self._kstest(a, b, s) else 1
     self.assertLess(fails, trials * 0.03)
Esempio n. 4
0
 def testLaplaceSample(self):
   with session.Session():
     loc_v = 4.0
     scale_v = 3.0
     loc = constant_op.constant(loc_v)
     scale = constant_op.constant(scale_v)
     n = 100000
     laplace = laplace_lib.Laplace(loc=loc, scale=scale)
     samples = laplace.sample(n, seed=137)
     sample_values = self.evaluate(samples)
     self.assertEqual(samples.get_shape(), (n,))
     self.assertEqual(sample_values.shape, (n,))
     if not stats:
       return
     self.assertAllClose(
         sample_values.mean(),
         stats.laplace.mean(
             loc_v, scale=scale_v),
         rtol=0.05,
         atol=0.)
     self.assertAllClose(
         sample_values.var(),
         stats.laplace.var(loc_v, scale=scale_v),
         rtol=0.05,
         atol=0.)
     self.assertTrue(self._kstest(loc_v, scale_v, sample_values))
Esempio n. 5
0
 def testLaplaceMode(self):
   with self.test_session():
     loc_v = np.array([0.5, 3.0, 2.5])
     scale_v = np.array([1.0, 4.0, 5.0])
     laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v)
     self.assertEqual(laplace.mode().get_shape(), (3,))
     self.assertAllClose(self.evaluate(laplace.mode()), loc_v)
Esempio n. 6
0
 def testLaplaceNonPositiveInitializationParamsRaises(self):
     with self.test_session():
         loc_v = constant_op.constant(0.0, name="loc")
         scale_v = constant_op.constant(-1.0, name="scale")
         laplace = laplace_lib.Laplace(loc=loc_v,
                                       scale=scale_v,
                                       validate_args=True)
         with self.assertRaisesOpError("scale"):
             laplace.mean().eval()
         loc_v = constant_op.constant(1.0, name="loc")
         scale_v = constant_op.constant(0.0, name="scale")
         laplace = laplace_lib.Laplace(loc=loc_v,
                                       scale=scale_v,
                                       validate_args=True)
         with self.assertRaisesOpError("scale"):
             laplace.mean().eval()
Esempio n. 7
0
    def testLaplaceShape(self):
        loc = constant_op.constant([3.0] * 5)
        scale = constant_op.constant(11.0)
        laplace = laplace_lib.Laplace(loc=loc, scale=scale)

        self.assertEqual(self.evaluate(laplace.batch_shape_tensor()), (5, ))
        self.assertEqual(laplace.batch_shape, tensor_shape.TensorShape([5]))
        self.assertAllEqual(self.evaluate(laplace.event_shape_tensor()), [])
        self.assertEqual(laplace.event_shape, tensor_shape.TensorShape([]))
Esempio n. 8
0
 def testLaplaceEntropy(self):
     loc_v = np.array([1.0, 3.0, 2.5])
     scale_v = np.array([1.0, 4.0, 5.0])
     laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v)
     self.assertEqual(laplace.entropy().get_shape(), (3, ))
     if not stats:
         return
     expected_entropy = stats.laplace.entropy(loc_v, scale=scale_v)
     self.assertAllClose(self.evaluate(laplace.entropy()), expected_entropy)
Esempio n. 9
0
 def testLaplaceStd(self):
   with self.test_session():
     loc_v = np.array([1.0, 3.0, 2.5])
     scale_v = np.array([1.0, 4.0, 5.0])
     laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v)
     self.assertEqual(laplace.stddev().get_shape(), (3,))
     if not stats:
       return
     expected_stddev = stats.laplace.std(loc_v, scale=scale_v)
     self.assertAllClose(self.evaluate(laplace.stddev()), expected_stddev)
Esempio n. 10
0
 def testLaplaceVariance(self):
     with self.test_session():
         loc_v = np.array([1.0, 3.0, 2.5])
         scale_v = np.array([1.0, 4.0, 5.0])
         laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v)
         self.assertEqual(laplace.variance().get_shape(), (3, ))
         if not stats:
             return
         expected_variances = stats.laplace.var(loc_v, scale=scale_v)
         self.assertAllClose(laplace.variance().eval(), expected_variances)
Esempio n. 11
0
 def testLaplaceFullyReparameterized(self):
   loc = constant_op.constant(4.0)
   scale = constant_op.constant(3.0)
   with backprop.GradientTape() as tape:
     tape.watch(loc)
     tape.watch(scale)
     laplace = laplace_lib.Laplace(loc=loc, scale=scale)
     samples = laplace.sample(100)
   grad_loc, grad_scale = tape.gradient(samples, [loc, scale])
   self.assertIsNotNone(grad_loc)
   self.assertIsNotNone(grad_scale)
Esempio n. 12
0
    def testLaplaceLogSurvivalFunction(self):
        batch_size = 6
        loc = constant_op.constant([2.0] * batch_size)
        scale = constant_op.constant([3.0] * batch_size)
        loc_v = 2.0
        scale_v = 3.0
        x = np.array([-2.5, 2.5, -4.0, 0.1, 1.0, 2.0], dtype=np.float32)

        laplace = laplace_lib.Laplace(loc=loc, scale=scale)

        sf = laplace.log_survival_function(x)
        self.assertEqual(sf.get_shape(), (6, ))
        if not stats:
            return
        expected_sf = stats.laplace.logsf(x, loc_v, scale=scale_v)
        self.assertAllClose(self.evaluate(sf), expected_sf)
Esempio n. 13
0
    def testLaplaceCDF(self):
        batch_size = 6
        loc = constant_op.constant([2.0] * batch_size)
        scale = constant_op.constant([3.0] * batch_size)
        loc_v = 2.0
        scale_v = 3.0
        x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)

        laplace = laplace_lib.Laplace(loc=loc, scale=scale)

        cdf = laplace.cdf(x)
        self.assertEqual(cdf.get_shape(), (6, ))
        if not stats:
            return
        expected_cdf = stats.laplace.cdf(x, loc_v, scale=scale_v)
        self.assertAllClose(self.evaluate(cdf), expected_cdf)
Esempio n. 14
0
  def testLaplaceLogPDF(self):
    with self.test_session():
      batch_size = 6
      loc = constant_op.constant([2.0] * batch_size)
      scale = constant_op.constant([3.0] * batch_size)
      loc_v = 2.0
      scale_v = 3.0
      x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
      laplace = laplace_lib.Laplace(loc=loc, scale=scale)
      log_pdf = laplace.log_prob(x)
      self.assertEqual(log_pdf.get_shape(), (6,))
      if not stats:
        return
      expected_log_pdf = stats.laplace.logpdf(x, loc_v, scale=scale_v)
      self.assertAllClose(self.evaluate(log_pdf), expected_log_pdf)

      pdf = laplace.prob(x)
      self.assertEqual(pdf.get_shape(), (6,))
      self.assertAllClose(self.evaluate(pdf), np.exp(expected_log_pdf))
Esempio n. 15
0
    def testLaplaceLogPDFMultidimensionalBroadcasting(self):
        batch_size = 6
        loc = constant_op.constant([[2.0, 4.0]] * batch_size)
        scale = constant_op.constant(3.0)
        loc_v = np.array([2.0, 4.0])
        scale_v = 3.0
        x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
        laplace = laplace_lib.Laplace(loc=loc, scale=scale)
        log_pdf = laplace.log_prob(x)
        log_pdf_values = self.evaluate(log_pdf)
        self.assertEqual(log_pdf.get_shape(), (6, 2))

        pdf = laplace.prob(x)
        pdf_values = self.evaluate(pdf)
        self.assertEqual(pdf.get_shape(), (6, 2))
        if not stats:
            return
        expected_log_pdf = stats.laplace.logpdf(x, loc_v, scale=scale_v)
        self.assertAllClose(log_pdf_values, expected_log_pdf)
        self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
    def __init__(self,
                 loc=None,
                 scale=None,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="VectorLaplaceLinearOperator"):
        """Construct Vector Laplace distribution on `R^k`.

    The `batch_shape` is the broadcast shape between `loc` and `scale`
    arguments.

    The `event_shape` is given by last dimension of the matrix implied by
    `scale`. The last dimension of `loc` (if provided) must broadcast with this.

    Recall that `covariance = 2 * scale @ scale.T`.

    Additional leading dimensions (if any) will index batches.

    Args:
      loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
        implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
        `b >= 0` and `k` is the event size.
      scale: Instance of `LinearOperator` with same `dtype` as `loc` and shape
        `[B1, ..., Bb, k, k]`.
      validate_args: Python `bool`, default `False`. Whether to validate input
        with asserts. If `validate_args` is `False`, and the inputs are
        invalid, correct behavior is not guaranteed.
      allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to give Ops created by the initializer.

    Raises:
      ValueError: if `scale` is unspecified.
      TypeError: if not `scale.dtype.is_floating`
    """
        parameters = locals()
        if scale is None:
            raise ValueError("Missing required `scale` parameter.")
        if not scale.dtype.is_floating:
            raise TypeError(
                "`scale` parameter must have floating-point dtype.")

        with ops.name_scope(name, values=[loc] + scale.graph_parents):
            # Since expand_dims doesn't preserve constant-ness, we obtain the
            # non-dynamic value if possible.
            loc = ops.convert_to_tensor(loc,
                                        name="loc") if loc is not None else loc
            batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
                loc, scale)

            super(VectorLaplaceLinearOperator,
                  self).__init__(distribution=laplace.Laplace(
                      loc=array_ops.zeros([], dtype=scale.dtype),
                      scale=array_ops.ones([], dtype=scale.dtype)),
                                 bijector=bijectors.AffineLinearOperator(
                                     shift=loc,
                                     scale=scale,
                                     validate_args=validate_args),
                                 batch_shape=batch_shape,
                                 event_shape=event_shape,
                                 validate_args=validate_args,
                                 name=name)
            self._parameters = parameters