def __init__(self,
                 loc=None,
                 scale=None,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="VectorExponentialLinearOperator"):
        """Construct Vector Exponential distribution supported on a subset of `R^k`.

    The `batch_shape` is the broadcast shape between `loc` and `scale`
    arguments.

    The `event_shape` is given by last dimension of the matrix implied by
    `scale`. The last dimension of `loc` (if provided) must broadcast with this.

    Recall that `covariance = scale @ scale.T`.

    Additional leading dimensions (if any) will index batches.

    Args:
      loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
        implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
        `b >= 0` and `k` is the event size.
      scale: Instance of `LinearOperator` with same `dtype` as `loc` and shape
        `[B1, ..., Bb, k, k]`.
      validate_args: Python `bool`, default `False`. Whether to validate input
        with asserts. If `validate_args` is `False`, and the inputs are
        invalid, correct behavior is not guaranteed.
      allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to give Ops created by the initializer.

    Raises:
      ValueError: if `scale` is unspecified.
      TypeError: if not `scale.dtype.is_floating`
    """
        parameters = dict(locals())
        if scale is None:
            raise ValueError("Missing required `scale` parameter.")
        if not scale.dtype.is_floating:
            raise TypeError(
                "`scale` parameter must have floating-point dtype.")

        with tf.name_scope(name, values=[loc] + scale.graph_parents) as name:
            # Since expand_dims doesn't preserve constant-ness, we obtain the
            # non-dynamic value if possible.
            loc = tf.convert_to_tensor(loc,
                                       name="loc") if loc is not None else loc
            batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
                loc, scale)

            super(VectorExponentialLinearOperator,
                  self).__init__(distribution=exponential.Exponential(
                      rate=tf.ones([], dtype=scale.dtype),
                      allow_nan_stats=allow_nan_stats),
                                 bijector=bijectors.AffineLinearOperator(
                                     shift=loc,
                                     scale=scale,
                                     validate_args=validate_args),
                                 batch_shape=batch_shape,
                                 event_shape=event_shape,
                                 validate_args=validate_args,
                                 name=name)
            self._parameters = parameters
예제 #2
0
 def testExponentialLogPDFBoundary(self):
   # Check that Log PDF is finite at 0.
   rate = np.array([0.1, 0.5, 1., 2., 5., 10.], dtype=np.float32)
   exponential = exponential_lib.Exponential(rate=rate)
   log_pdf = exponential.log_prob(0.)
   self.assertAllClose(np.log(rate), self.evaluate(log_pdf))