def testWorksInDistributionLayerAndNegloglik(self):
    """Test that distribution works as layer and in gradient optimization."""
    x = np.random.uniform(size=(100, 1))
    y = 2 + -1 * x
    lwn = lambertw_f.LambertWNormal(tailweight=0.5, loc=0.2, scale=1.0)
    eps = self.evaluate(lwn.sample(x.shape, seed=test_util.test_seed()))
    y += eps

    def dist_lambda(t):
      return lambertw_f.LambertWNormal(
          loc=t[..., :1],
          scale=1e-3 + tf.math.softplus(t[..., 1:2]),
          tailweight=1e-3 + tf.math.softplus(t[..., 2:]))

    dist_layer = tfp.layers.DistributionLambda(dist_lambda)
    model = tf.keras.Sequential([
        tf.keras.layers.Dense(10, "relu"),
        tf.keras.layers.Dense(5, "selu"),
        tf.keras.layers.Dense(1 + 1 + 1),
        dist_layer])
    negloglik = lambda y, p_y: -p_y.log_prob(y)
    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01),
                  loss=negloglik)

    model.fit(x, y, epochs=1, verbose=True, batch_size=32, validation_split=0.2)
    self.assertGreater(model.history.history["val_loss"][0], -np.Inf)
 def testVariance(self, delta, variance_multiplier):
   loc = 2.
   scale = 3.
   lwn = lambertw_f.LambertWNormal(loc=loc, scale=scale, tailweight=delta)
   if np.isnan(variance_multiplier):
     self.assertAllClose(lwn.variance(), np.nan)
   else:
     self.assertAllClose(lwn.variance(), scale**2 * variance_multiplier)
 def testMeanMode(self, delta, mean_exists):
   loc = 2.
   scale = 3.
   lwn = lambertw_f.LambertWNormal(loc=loc, scale=scale, tailweight=delta)
   self.assertAllClose(lwn.mode(), loc)
   if mean_exists:
     self.assertAllClose(lwn.mean(), loc)
   else:
     self.assertAllClose(lwn.mean(), np.nan)
 def testShapes(self):
     loc = tf.ones([2], dtype=tf.float32)
     scale = tf.ones([3, 1], dtype=tf.float32)
     tailweight = tf.zeros([4, 1, 1], dtype=tf.float32)
     lwn = lambertw_f.LambertWNormal(loc=loc,
                                     scale=scale,
                                     tailweight=tailweight,
                                     validate_args=True)
     self.assertAllEqual(lwn.batch_shape, [4, 3, 2])
 def testProbMethod(self):
   """Tests that Lambert W pdf is the same as Normal pdf when tail is zero."""
   tailweight = 0.
   loc = 2.
   scale = 3.
   lwn = lambertw_f.LambertWNormal(loc=loc, scale=scale, tailweight=tailweight)
   values = np.random.normal(loc=2., scale=3., size=10)
   normal_pdf = tfd.Normal(loc, scale).prob(values)
   lambertw_normal_pdf = lwn.prob(values)
   self.assertAllClose(normal_pdf, lambertw_normal_pdf)
  def testSampleMethod(self):
    """Tests that samples can be correctly transformed into gaussian samples."""
    tailweight = .1
    loc = 2.
    scale = 3.
    lwn = lambertw_f.LambertWNormal(loc=loc, scale=scale, tailweight=tailweight)
    samples = lwn.sample(100, seed=test_util.test_seed)

    self.assertAllEqual(samples.shape, [100])
    gaussianized_samples = tfb.LambertWTail(
        shift=loc, scale=scale, tailweight=tailweight).inverse(samples)
    _, p = stats.normaltest(self.evaluate(gaussianized_samples))
    self.assertGreater(p, .05)
  def testQuantileMethod(self):
    """Tests that quantiles are correct."""
    tailweight = 0.
    loc = 2.
    scale = 3.
    lwn = lambertw_f.LambertWNormal(loc=loc, scale=scale, tailweight=tailweight)

    values = np.random.uniform(low=0.0, high=1.0, size=5)
    normal_quantiles = tfd.Normal(loc, scale).quantile(values)
    transformed_normal_quantiles = tfb.LambertWTail(
        shift=loc, scale=scale, tailweight=tailweight)(normal_quantiles)
    lambertw_quantiles = lwn.quantile(values)
    self.assertAllClose(transformed_normal_quantiles, lambertw_quantiles)
  def testCDFMethod(self):
    """Tests that output of the cdf method is correct."""
    tailweight = 0.
    loc = 2.
    scale = 3.
    lwn = lambertw_f.LambertWNormal(loc=loc, scale=scale, tailweight=tailweight)

    values = np.random.uniform(low=0.0, high=1.0, size=10)
    quantiles = tfd.Normal(loc, scale).quantile(values)
    heavy_tailed_values = tfb.LambertWTail(
        shift=loc, scale=scale, tailweight=tailweight)(quantiles)
    lambertw_normal_values = lwn.cdf(heavy_tailed_values)
    self.assertAllClose(values, lambertw_normal_values)
 def dist_lambda(t):
   return lambertw_f.LambertWNormal(
       loc=t[..., :1],
       scale=1e-3 + tf.math.softplus(t[..., 1:2]),
       tailweight=1e-3 + tf.math.softplus(t[..., 2:]))
 def testQuantileInverseOfCDF(self, delta):
   lwn = lambertw_f.LambertWNormal(loc=2., scale=3., tailweight=delta)
   samples_ = self.evaluate(lwn.sample(5, seed=test_util.test_seed()))
   cdf_vals = lwn.cdf(samples_)
   self.assertAllClose(lwn.quantile(cdf_vals), samples_, rtol=1e-4)