def testTailBijectorInverseIdentities(self, value, delta, expected):
     """Tests that the output of the inverse delta transformation is correct."""
     ht = tfb.LambertWTail(shift=0.,
                           scale=1.,
                           tailweight=tf.constant(delta, tf.float64))
     self.assertAllClose(ht.inverse(np.float64(value)),
                         np.float64(expected))
Пример #2
0
    def __init__(self,
                 distribution,
                 shift,
                 scale,
                 tailweight=None,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="LambertWDistribution"):
        """Initializes the class.

    Args:
      distribution: `tf.Distribution`-like instance. Distribution F that is
        transformed to produce this Lambert W x F distribution.
      shift: shift that should be applied before & after tail transformation.
        For a location-scale family `distribution` (e.g., `Normal` or
        `StudentT`) this usually is set as the mean / location parameter. For a
        scale family `distribution` (e.g., `Gamma` or `Fisher`) this must be
        set to 0 to guarantee a proper transformation on the positive
        real-line.
      scale: scaling factor that should be applied before & after the tail
        trarnsformation.  Usually the standard deviation or scaling parameter
        of the `distribution`.
      tailweight: Tail parameter `delta` of the resulting Lambert W x F
        distribution(s).
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`,
        statistics (e.g., mean, mode, variance) use the value '`NaN`' to
        indicate the result is undefined. When `False`, an exception is raised
        if one or more of the statistic's batch members are undefined.
      name: A name for the operation (optional).
    """
        parameters = dict(locals())
        with tf.name_scope(name) as name:
            dtype = dtype_util.common_dtype([tailweight, shift, scale],
                                            tf.float32)
            tailweight = 0. if tailweight is None else tailweight
            self._tailweight = tensor_util.convert_nonref_to_tensor(
                tailweight, name="tailweight", dtype=dtype)
            self._shift = tensor_util.convert_nonref_to_tensor(shift,
                                                               name="shift",
                                                               dtype=dtype)
            self._scale = tensor_util.convert_nonref_to_tensor(scale,
                                                               name="scale",
                                                               dtype=dtype)
            dtype_util.assert_same_float_dtype(
                (self.tailweight, self.shift, self.scale))
            self._allow_nan_stats = allow_nan_stats
            super(LambertWDistribution, self).__init__(
                distribution=distribution,
                bijector=tfb.LambertWTail(shift=shift,
                                          scale=scale,
                                          tailweight=tailweight,
                                          validate_args=validate_args),
                parameters=parameters,
                validate_args=validate_args,
                name=name)
 def testLambertWGaussianizationDeltaZero(self):
     """Tests that the output of ShiftScaleTail is correct when delta=0."""
     values = np.random.normal(loc=self.loc, scale=self.scale, size=10)
     lsht = tfb.LambertWTail(shift=self.loc,
                             scale=self.scale,
                             tailweight=0.0)
     self.assertAllClose(values, lsht(values))
     self.assertAllClose(values, lsht.inverse(values))
Пример #4
0
 def testTailBijectorRandomInputNonZeroDelta(self, delta):
   """Tests that the output of the inverse delta transformation is correct."""
   vals = np.linspace(-1, 1, num=10)
   ht = tfb.LambertWTail(shift=0., scale=1.,
                         tailweight=tf.constant(delta, tf.float64))
   with self.session():
     # Gaussianizing makes the values be further away from zero, i.e., their
     # ratio > 1 (for vals != 0).
     self.assertTrue(np.all(self.evaluate(ht(vals)) / vals > 1.))
     self.assertAllClose(ht.inverse(ht(vals)), vals)
     self.assertAllClose(ht(vals), _xexp_delta_squared_numpy(vals, delta))
 def testTailBijectorLogDetJacobian(self, value, delta, expected):
     """Tests that the output of the inverse delta transformation is correct."""
     ht = tfb.LambertWTail(shift=0.,
                           scale=1.,
                           tailweight=tf.constant(delta, tf.float64))
     if isinstance(value, np.ndarray):
         value = value.astype(np.float64)
         expected = expected.astype(np.float64)
     else:
         value = np.float64(value)
         expected = np.float64(expected)
     self.assertAllClose(expected,
                         ht.inverse_log_det_jacobian(value, event_ndims=0))
Пример #6
0
 def testLambertWGaussianizationDeltaNonZero(self):
   """Tests that the inverse of the heavy tail transform is Normal."""
   vals = np.random.normal(loc=self.loc, scale=self.scale,
                           size=100).astype(np.float64)
   lsht = tfb.LambertWTail(shift=self.loc, scale=self.scale, tail=self.tail)
   with self.session():
     heavy_tailed_vals = lsht(vals)
     _, p = stats.normaltest(self.evaluate(heavy_tailed_vals))
     self.assertLess(p, 1e-2)
     gaussianized_vals = lsht.inverse(heavy_tailed_vals)
     _, p = stats.normaltest(self.evaluate(gaussianized_vals))
     self.assertGreater(p, 0.05)
     self.assertAllClose(vals, gaussianized_vals)
Пример #7
0
 def testLambertWGaussianizationDeltaNonZeroSpecificValues(self, delta):
   """Tests that the output of ShiftScaleTail is correct when delta!=0."""
   vals = np.linspace(-1, 1, 10) + self.loc
   lsht = tfb.LambertWTail(shift=self.loc, scale=self.scale, tailweight=delta)
   with self.session():
     scaled_vals = (vals - self.loc) / self.scale
     ht_vals = _xexp_delta_squared_numpy(scaled_vals, delta=delta)
     ht_vals *= self.scale
     ht_vals += self.loc
     self.assertAllClose(ht_vals, self.evaluate(lsht(vals)), rtol=0.0001)
     # Inverse-Gaussianizing pushes the values be further away from the mean,
     # i.e., their centered ratio > 1 (for vals - loc != 0).
     self.assertTrue(np.all((self.evaluate(lsht(vals)) - self.loc) /
                            (vals - self.loc)
                            > 1.))
     # Inverse function is correct.
     self.assertAllClose(lsht.inverse(lsht(vals)), vals)
 def testTailBijectorRandomInputZeroDelta(self):
     """Tests that the output of the inverse delta transformation is correct."""
     vals = np.linspace(-1, 1, num=11)
     ht = tfb.LambertWTail(shift=0., scale=1., tailweight=0.0)
     self.assertAllClose(ht(vals), vals)