Пример #1
0
    def __init__(self, loc=0., scale=1., validate_args=False, name="gumbel"):
        """Instantiates the `Gumbel` bijector.

    Args:
      loc: Float-like `Tensor` that is the same dtype and is
        broadcastable with `scale`.
        This is `loc` in `Y = g(X) = exp(-exp(-(X - loc) / scale))`.
      scale: Positive Float-like `Tensor` that is the same dtype and is
        broadcastable with `loc`.
        This is `scale` in `Y = g(X) = exp(-exp(-(X - loc) / scale))`.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.
    """
        with tf.name_scope(name) as name:
            dtype = dtype_util.common_dtype([loc, scale],
                                            dtype_hint=tf.float32)
            self._loc = tensor_util.convert_immutable_to_tensor(loc,
                                                                dtype=dtype,
                                                                name="loc")
            self._scale = tensor_util.convert_immutable_to_tensor(scale,
                                                                  dtype=dtype,
                                                                  name="scale")
            super(Gumbel, self).__init__(validate_args=validate_args,
                                         forward_min_event_ndims=0,
                                         name=name)
Пример #2
0
 def test_tf_variable(self):
     x = tf.Variable(1., trainable=True)
     y = tensor_util.convert_immutable_to_tensor(x)
     self.assertIs(x, y)
     x = tf.Variable(1., trainable=False)
     y = tensor_util.convert_immutable_to_tensor(x)
     self.assertIs(x, y)
Пример #3
0
    def __init__(self,
                 skewness=None,
                 tailweight=None,
                 validate_args=False,
                 name="sinh_arcsinh"):
        """Instantiates the `SinhArcsinh` bijector.

    Args:
      skewness:  Skewness parameter.  Float-type `Tensor`.  Default is `0`
        of type `float32`.
      tailweight:  Tailweight parameter.  Positive `Tensor` of same `dtype` as
        `skewness` and broadcastable `shape`.  Default is `1` of type `float32`.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.
    """
        with tf.name_scope(name) as name:
            tailweight = 1. if tailweight is None else tailweight
            skewness = 0. if skewness is None else skewness
            dtype = dtype_util.common_dtype([tailweight, skewness],
                                            dtype_hint=tf.float32)
            self._skewness = tensor_util.convert_immutable_to_tensor(
                skewness, dtype=dtype, name="skewness")
            self._tailweight = tensor_util.convert_immutable_to_tensor(
                tailweight, dtype=dtype, name="tailweight")
            super(SinhArcsinh, self).__init__(forward_min_event_ndims=0,
                                              validate_args=validate_args,
                                              name=name)
Пример #4
0
    def __init__(self,
                 concentration1=1.,
                 concentration0=1.,
                 validate_args=False,
                 name="kumaraswamy"):
        """Instantiates the `Kumaraswamy` bijector.

    Args:
      concentration1: Python `float` scalar indicating the transform power,
        i.e., `Y = g(X) = (1 - (1 - X)**(1 / b))**(1 / a)` where `a` is
        `concentration1`.
      concentration0: Python `float` scalar indicating the transform power,
        i.e., `Y = g(X) = (1 - (1 - X)**(1 / b))**(1 / a)` where `b` is
        `concentration0`.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.
    """
        with tf.name_scope(name) as name:
            dtype = dtype_util.common_dtype([concentration0, concentration1],
                                            dtype_hint=tf.float32)
            self._concentration0 = tensor_util.convert_immutable_to_tensor(
                concentration0, dtype=dtype, name="concentration0")
            self._concentration1 = tensor_util.convert_immutable_to_tensor(
                concentration1, dtype=dtype, name="concentration1")
            super(Kumaraswamy, self).__init__(forward_min_event_ndims=0,
                                              validate_args=validate_args,
                                              name=name)
Пример #5
0
    def __init__(self,
                 loc,
                 scale,
                 concentration,
                 validate_args=False,
                 allow_nan_stats=True,
                 name=None):
        """Construct a Generalized Pareto distribution.

    Args:
      loc: The location / shift of the distribution. GeneralizedPareto is a
        location-scale distribution. This parameter lower bounds the
        distribution's support. Must broadcast with `scale`, `concentration`.
        Floating point `Tensor`.
      scale: The scale of the distribution. GeneralizedPareto is a
        location-scale distribution, so doubling the `scale` doubles a sample
        and halves the density. Strictly positive floating point `Tensor`. Must
        broadcast with `loc`, `concentration`.
      concentration: The shape parameter of the distribution. The larger the
        magnitude, the more the distribution concentrates near `loc` (for
        `concentration >= 0`) or near `loc - (scale/concentration)` (for
        `concentration < 0`). Floating point `Tensor`.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, variance) use the value "`NaN`" to indicate the result is
        undefined. When `False`, an exception is raised if one or more of the
        statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      TypeError: if `loc`, `scale`, or `concentration` have different dtypes.
    """
        parameters = dict(locals())
        with tf.name_scope(name or 'GeneralizedPareto') as name:
            dtype = dtype_util.common_dtype([loc, scale, concentration],
                                            dtype_hint=tf.float32)
            self._loc = tensor_util.convert_immutable_to_tensor(loc,
                                                                dtype=dtype,
                                                                name='loc')
            self._scale = tensor_util.convert_immutable_to_tensor(scale,
                                                                  dtype=dtype,
                                                                  name='scale')
            self._concentration = tensor_util.convert_immutable_to_tensor(
                concentration, dtype=dtype, name='concentration')
            super(GeneralizedPareto,
                  self).__init__(dtype=dtype,
                                 validate_args=validate_args,
                                 allow_nan_stats=allow_nan_stats,
                                 reparameterization_type=reparameterization.
                                 FULLY_REPARAMETERIZED,
                                 parameters=parameters,
                                 name=name)
Пример #6
0
    def __init__(self,
                 total_count,
                 logits=None,
                 probs=None,
                 validate_args=False,
                 allow_nan_stats=True,
                 name='Multinomial'):
        """Initialize a batch of Multinomial distributions.

    Args:
      total_count: Non-negative floating point tensor with shape broadcastable
        to `[N1,..., Nm]` with `m >= 0`. Defines this as a batch of
        `N1 x ... x Nm` different Multinomial distributions. Its components
        should be equal to integer values.
      logits: Floating point tensor representing unnormalized log-probabilities
        of a positive event with shape broadcastable to
        `[N1,..., Nm, K]` `m >= 0`, and the same dtype as `total_count`. Defines
        this as a batch of `N1 x ... x Nm` different `K` class Multinomial
        distributions. Only one of `logits` or `probs` should be passed in.
      probs: Positive floating point tensor with shape broadcastable to
        `[N1,..., Nm, K]` `m >= 0` and same dtype as `total_count`. Defines
        this as a batch of `N1 x ... x Nm` different `K` class Multinomial
        distributions. `probs`'s components in the last portion of its shape
        should sum to `1`. Only one of `logits` or `probs` should be passed in.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value "`NaN`" to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.
    """
        parameters = dict(locals())

        if (probs is None) == (logits is None):
            raise ValueError('Must pass probs or logits, but not both.')
        with tf.name_scope(name) as name:
            dtype = dtype_util.common_dtype([total_count, logits, probs],
                                            dtype_hint=tf.float32)
            self._total_count = tensor_util.convert_immutable_to_tensor(
                total_count, name='total_count', dtype=dtype)
            self._probs = tensor_util.convert_immutable_to_tensor(probs,
                                                                  dtype=dtype,
                                                                  name='probs')
            self._logits = tensor_util.convert_immutable_to_tensor(
                logits, dtype=dtype, name='logits')
            super(Multinomial, self).__init__(
                dtype=dtype,
                reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
                validate_args=validate_args,
                allow_nan_stats=allow_nan_stats,
                parameters=parameters,
                name=name)
Пример #7
0
  def __init__(self,
               df,
               loc,
               scale,
               validate_args=False,
               allow_nan_stats=True,
               name='StudentT'):
    """Construct Student's t distributions.

    The distributions have degree of freedom `df`, mean `loc`, and scale
    `scale`.

    The parameters `df`, `loc`, and `scale` must be shaped in a way that
    supports broadcasting (e.g. `df + loc + scale` is a valid operation).

    Args:
      df: Floating-point `Tensor`. The degrees of freedom of the
        distribution(s). `df` must contain only positive values.
      loc: Floating-point `Tensor`. The mean(s) of the distribution(s).
      scale: Floating-point `Tensor`. The scaling factor(s) for the
        distribution(s). Note that `scale` is not technically the standard
        deviation of this distribution but has semantics more similar to
        standard deviation than variance.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`,
        statistics (e.g., mean, mode, variance) use the value '`NaN`' to
        indicate the result is undefined. When `False`, an exception is raised
        if one or more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      TypeError: if loc and scale are different dtypes.
    """
    parameters = dict(locals())
    with tf.name_scope(name) as name:
      dtype = dtype_util.common_dtype([df, loc, scale], tf.float32)
      self._df = tensor_util.convert_immutable_to_tensor(
          df, name='df', dtype=dtype)
      self._loc = tensor_util.convert_immutable_to_tensor(
          loc, name='loc', dtype=dtype)
      self._scale = tensor_util.convert_immutable_to_tensor(
          scale, name='scale', dtype=dtype)
      dtype_util.assert_same_float_dtype((self._df, self._loc, self._scale))
      super(StudentT, self).__init__(
          dtype=dtype,
          reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
          validate_args=validate_args,
          allow_nan_stats=allow_nan_stats,
          parameters=parameters,
          name=name)
Пример #8
0
    def test_tf_module(self):
        x = FakeModule(1.)
        y = tensor_util.convert_immutable_to_tensor(x)
        self.assertIsInstance(y, tf.Tensor)
        self.assertEqual(1., self.evaluate(y))

        x = FakeModule(tf.Variable(2., trainable=True))
        y = tensor_util.convert_immutable_to_tensor(x)
        self.assertIs(x, y)

        x = FakeModule(tf.Variable(2., trainable=False))
        y = tensor_util.convert_immutable_to_tensor(x)
        self.assertIs(x, y)
Пример #9
0
    def __init__(self,
                 scale,
                 validate_args=False,
                 allow_nan_stats=True,
                 name='HalfNormal'):
        """Construct HalfNormals with scale `scale`.

    Args:
      scale: Floating point tensor; the scales of the distribution(s).
        Must contain only positive values.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`,
        statistics (e.g., mean, mode, variance) use the value '`NaN`' to
        indicate the result is undefined. When `False`, an exception is raised
        if one or more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.
    """
        parameters = dict(locals())
        with tf.name_scope(name) as name:
            dtype = dtype_util.common_dtype([scale], dtype_hint=tf.float32)
            self._scale = tensor_util.convert_immutable_to_tensor(scale,
                                                                  name='scale',
                                                                  dtype=dtype)
            super(HalfNormal,
                  self).__init__(dtype=dtype,
                                 reparameterization_type=reparameterization.
                                 FULLY_REPARAMETERIZED,
                                 validate_args=validate_args,
                                 allow_nan_stats=allow_nan_stats,
                                 parameters=parameters,
                                 name=name)
Пример #10
0
    def __init__(self,
                 loc,
                 scale,
                 validate_args=False,
                 allow_nan_stats=True,
                 name='Laplace'):
        """Construct Laplace distribution with parameters `loc` and `scale`.

    The parameters `loc` and `scale` must be shaped in a way that supports
    broadcasting (e.g., `loc / scale` is a valid operation).

    Args:
      loc: Floating point tensor which characterizes the location (center)
        of the distribution.
      scale: Positive floating point tensor which characterizes the spread of
        the distribution.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`,
        statistics (e.g., mean, mode, variance) use the value '`NaN`' to
        indicate the result is undefined. When `False`, an exception is raised
        if one or more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      TypeError: if `loc` and `scale` are of different dtype.
    """
        parameters = dict(locals())
        with tf.name_scope(name) as name:
            dtype = dtype_util.common_dtype([loc, scale], tf.float32)
            self._loc = tensor_util.convert_immutable_to_tensor(loc,
                                                                name='loc',
                                                                dtype=dtype)
            self._scale = tensor_util.convert_immutable_to_tensor(scale,
                                                                  name='scale',
                                                                  dtype=dtype)
            dtype_util.assert_same_float_dtype([self._loc, self._scale])
            super(Laplace,
                  self).__init__(dtype=dtype,
                                 reparameterization_type=reparameterization.
                                 FULLY_REPARAMETERIZED,
                                 validate_args=validate_args,
                                 allow_nan_stats=allow_nan_stats,
                                 parameters=parameters,
                                 name=name)
Пример #11
0
    def __init__(self,
                 concentration,
                 rate,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="Gamma"):
        """Construct Gamma with `concentration` and `rate` parameters.

    The parameters `concentration` and `rate` must be shaped in a way that
    supports broadcasting (e.g. `concentration + rate` is a valid operation).

    Args:
      concentration: Floating point tensor, the concentration params of the
        distribution(s). Must contain only positive values.
      rate: Floating point tensor, the inverse scale params of the
        distribution(s). Must contain only positive values.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value "`NaN`" to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      TypeError: if `concentration` and `rate` are different dtypes.
    """
        parameters = dict(locals())
        with tf.name_scope(name) as name:
            dtype = dtype_util.common_dtype([concentration, rate],
                                            dtype_hint=tf.float32)
            self._concentration = tensor_util.convert_immutable_to_tensor(
                concentration, dtype=dtype, name="concentration")
            self._rate = tensor_util.convert_immutable_to_tensor(rate,
                                                                 dtype=dtype,
                                                                 name="rate")

            super(Gamma,
                  self).__init__(dtype=dtype,
                                 validate_args=validate_args,
                                 allow_nan_stats=allow_nan_stats,
                                 reparameterization_type=reparameterization.
                                 FULLY_REPARAMETERIZED,
                                 parameters=parameters,
                                 name=name)
Пример #12
0
  def __init__(self,
               logits=None,
               probs=None,
               dtype=tf.int32,
               validate_args=False,
               allow_nan_stats=True,
               name='Bernoulli'):
    """Construct Bernoulli distributions.

    Args:
      logits: An N-D `Tensor` representing the log-odds of a `1` event. Each
        entry in the `Tensor` parametrizes an independent Bernoulli distribution
        where the probability of an event is sigmoid(logits). Only one of
        `logits` or `probs` should be passed in.
      probs: An N-D `Tensor` representing the probability of a `1`
        event. Each entry in the `Tensor` parameterizes an independent
        Bernoulli distribution. Only one of `logits` or `probs` should be passed
        in.
      dtype: The type of the event samples. Default: `int32`.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`,
        statistics (e.g., mean, mode, variance) use the value "`NaN`" to
        indicate the result is undefined. When `False`, an exception is raised
        if one or more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      ValueError: If p and logits are passed, or if neither are passed.
    """
    parameters = dict(locals())
    if (probs is None) == (logits is None):
      raise ValueError('Must pass probs or logits, but not both.')
    with tf.name_scope(name) as name:
      self._probs = tensor_util.convert_immutable_to_tensor(
          probs, dtype_hint=tf.float32, name='probs')
      self._logits = tensor_util.convert_immutable_to_tensor(
          logits, dtype_hint=tf.float32, name='logits')
    super(Bernoulli, self).__init__(
        dtype=dtype,
        reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
        validate_args=validate_args,
        allow_nan_stats=allow_nan_stats,
        parameters=parameters,
        name=name)
Пример #13
0
    def __init__(self,
                 logits=None,
                 probs=None,
                 dtype=tf.int32,
                 validate_args=False,
                 allow_nan_stats=True,
                 name='Categorical'):
        """Initialize Categorical distributions using class log-probabilities.

    Args:
      logits: An N-D `Tensor`, `N >= 1`, representing the unnormalized
        log probabilities of a set of Categorical distributions. The first
        `N - 1` dimensions index into a batch of independent distributions
        and the last dimension represents a vector of logits for each class.
        Only one of `logits` or `probs` should be passed in.
      probs: An N-D `Tensor`, `N >= 1`, representing the probabilities
        of a set of Categorical distributions. The first `N - 1` dimensions
        index into a batch of independent distributions and the last dimension
        represents a vector of probabilities for each class. Only one of
        `logits` or `probs` should be passed in.
      dtype: The type of the event samples (default: int32).
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value "`NaN`" to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.
    """
        parameters = dict(locals())
        if (probs is None) == (logits is None):
            raise ValueError('Must pass probs or logits, but not both.')
        with tf.name_scope(name) as name:
            self._probs = tensor_util.convert_immutable_to_tensor(
                probs, dtype_hint=tf.float32, name='probs')
            self._logits = tensor_util.convert_immutable_to_tensor(
                logits, dtype_hint=tf.float32, name='logits')
            super(Categorical, self).__init__(
                dtype=dtype,
                reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
                validate_args=validate_args,
                allow_nan_stats=allow_nan_stats,
                parameters=parameters,
                name=name)
Пример #14
0
    def __init__(self,
                 loc,
                 scale,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="Normal"):
        """Construct Normal distributions with mean and stddev `loc` and `scale`.

    The parameters `loc` and `scale` must be shaped in a way that supports
    broadcasting (e.g. `loc + scale` is a valid operation).

    Args:
      loc: Floating point tensor; the means of the distribution(s).
      scale: Floating point tensor; the stddevs of the distribution(s).
        Must contain only positive values.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`,
        statistics (e.g., mean, mode, variance) use the value "`NaN`" to
        indicate the result is undefined. When `False`, an exception is raised
        if one or more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      TypeError: if `loc` and `scale` have different `dtype`.
    """
        parameters = dict(locals())
        with tf.name_scope(name) as name:
            dtype = dtype_util.common_dtype([loc, scale],
                                            preferred_dtype=tf.float32)
            self._loc = tensor_util.convert_immutable_to_tensor(loc,
                                                                dtype=dtype,
                                                                name="loc")
            self._scale = tensor_util.convert_immutable_to_tensor(scale,
                                                                  dtype=dtype,
                                                                  name="scale")
            super(Normal,
                  self).__init__(dtype=dtype,
                                 reparameterization_type=reparameterization.
                                 FULLY_REPARAMETERIZED,
                                 validate_args=validate_args,
                                 allow_nan_stats=allow_nan_stats,
                                 parameters=parameters,
                                 name=name)
Пример #15
0
 def __init__(self, scale=2, validate_args=False, name=None):
     with tf.name_scope(name or 'forward_only') as name:
         self._scale = tensor_util.convert_immutable_to_tensor(
             scale, dtype_hint=tf.float32)
         super(ForwardOnlyBijector,
               self).__init__(validate_args=validate_args,
                              forward_min_event_ndims=0,
                              name=name)
Пример #16
0
    def __init__(self,
                 shift=None,
                 scale=None,
                 validate_args=False,
                 name="affine_scalar"):
        """Instantiates the `AffineScalar` bijector.

    This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,
    giving the forward operation:

    ```none
    Y = g(X) = scale * X + shift
    ```

    if `scale` is not specified, then the bijector has the semantics of
    `scale = 1.`. Similarly, if `shift` is not specified, then the bijector
    has the semantics of `shift = 0.`.

    Args:
      shift: Floating-point `Tensor`. If this is set to `None`, no shift is
        applied.
      scale: Floating-point `Tensor`. If this is set to `None`, no scale is
        applied.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.
    """
        with tf.name_scope(name) as name:
            dtype = dtype_util.common_dtype([shift, scale],
                                            dtype_hint=tf.float32)

            self._shift = tensor_util.convert_immutable_to_tensor(shift,
                                                                  dtype=dtype,
                                                                  name="shift")
            self._scale = tensor_util.convert_immutable_to_tensor(scale,
                                                                  dtype=dtype,
                                                                  name="scale")

            super(AffineScalar, self).__init__(forward_min_event_ndims=0,
                                               is_constant_jacobian=True,
                                               validate_args=validate_args,
                                               dtype=dtype,
                                               name=name)
Пример #17
0
  def __init__(self,
               concentration1,
               concentration0,
               validate_args=False,
               allow_nan_stats=True,
               name="Beta"):
    """Initialize a batch of Beta distributions.

    Args:
      concentration1: Positive floating-point `Tensor` indicating mean
        number of successes; aka "alpha". Implies `self.dtype` and
        `self.batch_shape`, i.e.,
        `concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`.
      concentration0: Positive floating-point `Tensor` indicating mean
        number of failures; aka "beta". Otherwise has same semantics as
        `concentration1`.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value "`NaN`" to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.
    """
    parameters = dict(locals())
    with tf.name_scope(name) as name:
      dtype = dtype_util.common_dtype([concentration1, concentration0],
                                      dtype_hint=tf.float32)
      self._concentration1 = tensor_util.convert_immutable_to_tensor(
          concentration1, dtype=dtype, name="concentration1")
      self._concentration0 = tensor_util.convert_immutable_to_tensor(
          concentration0, dtype=dtype, name="concentration0")
      super(Beta, self).__init__(
          dtype=dtype,
          validate_args=validate_args,
          allow_nan_stats=allow_nan_stats,
          reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
          parameters=parameters,
          name=name)
Пример #18
0
    def __init__(self,
                 shift=None,
                 scale=None,
                 adjoint=False,
                 validate_args=False,
                 name='affine_linear_operator'):
        """Instantiates the `AffineLinearOperator` bijector.

    Args:
      shift: Floating-point `Tensor`.
      scale:  Subclass of `LinearOperator`. Represents the (batch) positive
        definite matrix `M` in `R^{k x k}`.
      adjoint: Python `bool` indicating whether to use the `scale` matrix as
        specified or its adjoint.
        Default value: `False`.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.

    Raises:
      TypeError: if `scale` is not a `LinearOperator`.
      TypeError: if `shift.dtype` does not match `scale.dtype`.
      ValueError: if not `scale.is_non_singular`.
    """
        with tf.name_scope(name) as name:
            dtype = dtype_util.common_dtype([shift, scale],
                                            preferred_dtype=tf.float32)
            self._shift = tensor_util.convert_immutable_to_tensor(shift,
                                                                  dtype=dtype,
                                                                  name='shift')
            if scale is not None:
                if not isinstance(scale, tf.linalg.LinearOperator):
                    raise TypeError(
                        'scale is not an instance of tf.LinearOperator')
                if validate_args and not scale.is_non_singular:
                    raise ValueError('Scale matrix must be non-singular.')
            self._scale = scale
            self._adjoint = adjoint
            super(AffineLinearOperator,
                  self).__init__(forward_min_event_ndims=1,
                                 is_constant_jacobian=True,
                                 dtype=dtype,
                                 validate_args=validate_args,
                                 name=name)
Пример #19
0
  def __init__(self, axis=-1, validate_args=False, name='cumsum'):
    """Instantiates the `Cumsum` bijector.

    Args:
      axis: Negative Python `int` indicating the axis along which to compute the
        cumulative sum. Note that positive (and zero) values are not supported.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.

    Raises:
      TypeError: if `axis` is not an `int`.
      ValueError: if `axis` is not negative.
    """
    with tf.name_scope(name) as name:
      self._axis = tensor_util.convert_immutable_to_tensor(
          axis, dtype_hint=tf.int32, name='axis')
      super(Cumsum, self).__init__(
          is_constant_jacobian=True,
          # Positive because we verify `axis < 0`.
          forward_min_event_ndims=-axis,
          validate_args=validate_args,
          name=name)
Пример #20
0
    def __init__(self,
                 rate,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="Exponential"):
        """Construct Exponential distribution with parameter `rate`.

    Args:
      rate: Floating point tensor, equivalent to `1 / mean`. Must contain only
        positive values.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value "`NaN`" to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.
    """
        parameters = dict(locals())
        # Even though all statistics of are defined for valid inputs, this is not
        # true in the parent class "Gamma."  Therefore, passing
        # allow_nan_stats=True
        # through to the parent class results in unnecessary asserts.
        with tf.name_scope(name) as name:
            self._rate = tensor_util.convert_immutable_to_tensor(
                rate,
                name="rate",
                dtype=dtype_util.common_dtype([rate], dtype_hint=tf.float32))
            super(Exponential, self).__init__(concentration=1.,
                                              rate=self._rate,
                                              allow_nan_stats=allow_nan_stats,
                                              validate_args=validate_args,
                                              name=name)
            self._parameters = parameters
Пример #21
0
    def __init__(self,
                 loc,
                 atol=None,
                 rtol=None,
                 is_vector=False,
                 validate_args=False,
                 allow_nan_stats=True,
                 parameters=None,
                 name="_BaseDeterministic"):
        """Initialize a batch of `_BaseDeterministic` distributions.

    The `atol` and `rtol` parameters allow for some slack in `pmf`, `cdf`
    computations, e.g. due to floating-point error.

    ```
    pmf(x; loc)
      = 1, if Abs(x - loc) <= atol + rtol * Abs(loc),
      = 0, otherwise.
    ```

    Args:
      loc: Numeric `Tensor`.  The point (or batch of points) on which this
        distribution is supported.
      atol:  Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
        shape.  The absolute tolerance for comparing closeness to `loc`.
        Default is `0`.
      rtol:  Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
        shape.  The relative tolerance for comparing closeness to `loc`.
        Default is `0`.
      is_vector:  Python `bool`.  If `True`, this is for `VectorDeterministic`,
        else `Deterministic`.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value "`NaN`" to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      parameters: Dict of locals to facilitate copy construction.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      ValueError:  If `loc` is a scalar.
    """
        with tf.name_scope(name) as name:
            dtype = dtype_util.common_dtype([loc, atol, rtol],
                                            dtype_hint=tf.float32)
            self._loc = tensor_util.convert_immutable_to_tensor(
                loc, dtype_hint=dtype, name="loc")
            self._atol = tensor_util.convert_immutable_to_tensor(
                0 if atol is None else atol, dtype=dtype, name="atol")
            self._rtol = tensor_util.convert_immutable_to_tensor(
                0 if rtol is None else rtol, dtype=dtype, name="rtol")
            self._is_vector = is_vector

            super(_BaseDeterministic, self).__init__(
                dtype=self._loc.dtype,
                reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
                validate_args=validate_args,
                allow_nan_stats=allow_nan_stats,
                parameters=parameters,
                name=name)
Пример #22
0
    def __init__(self,
                 rate=None,
                 log_rate=None,
                 interpolate_nondiscrete=True,
                 validate_args=False,
                 allow_nan_stats=True,
                 name='Poisson'):
        """Initialize a batch of Poisson distributions.

    Args:
      rate: Floating point tensor, the rate parameter. `rate` must be positive.
        Must specify exactly one of `rate` and `log_rate`.
      log_rate: Floating point tensor, the log of the rate parameter.
        Must specify exactly one of `rate` and `log_rate`.
      interpolate_nondiscrete: Python `bool`. When `False`,
        `log_prob` returns `-inf` (and `prob` returns `0`) for non-integer
        inputs. When `True`, `log_prob` evaluates the continuous function
        `k * log_rate - lgamma(k+1) - rate`, which matches the Poisson pmf
        at integer arguments `k` (note that this function is not itself
        a normalized probability log-density).
        Default value: `True`.
      validate_args: Python `bool`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
        Default value: `False`.
      allow_nan_stats: Python `bool`. When `True`, statistics
        (e.g., mean, mode, variance) use the value "`NaN`" to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
        Default value: `True`.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      ValueError: if none or both of `rate`, `log_rate` are specified.
      TypeError: if `rate` is not a float-type.
      TypeError: if `log_rate` is not a float-type.
    """
        parameters = dict(locals())
        if (rate is None) == (log_rate is None):
            raise ValueError(
                'Must specify exactly one of `rate` and `log_rate`.')
        with tf.name_scope(name) as name:
            dtype = dtype_util.common_dtype([rate, log_rate],
                                            dtype_hint=tf.float32)
            if not dtype_util.is_floating(dtype):
                raise TypeError(
                    '[log_]rate.dtype ({}) is a not a float-type.'.format(
                        dtype_util.name(dtype)))
            self._rate = tensor_util.convert_immutable_to_tensor(rate,
                                                                 name='rate',
                                                                 dtype=dtype)
            self._log_rate = tensor_util.convert_immutable_to_tensor(
                log_rate, name='log_rate', dtype=dtype)

            self._interpolate_nondiscrete = interpolate_nondiscrete
            super(Poisson, self).__init__(
                dtype=dtype,
                reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
                validate_args=validate_args,
                allow_nan_stats=allow_nan_stats,
                parameters=parameters,
                name=name)
    def __init__(self,
                 shift=None,
                 scale=None,
                 log_scale=None,
                 validate_args=False,
                 name='affine_scalar'):
        """Instantiates the `AffineScalar` bijector.

    This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,
    giving the forward operation:

    ```none
    Y = g(X) = scale * X + shift
    ```

    Alternatively, you can specify `log_scale` instead of `scale` for slighly
    better numerics with tiny scales. Note that when using `log_scale` it is
    currently impossible to specify a negative scale.

    If `scale` or `log_scale` are not specified, then the bijector has the
    semantics of `scale = 1.`. Similarly, if `shift` is not specified, then the
    bijector has the semantics of `shift = 0.`.

    Args:
      shift: Floating-point `Tensor`. If this is set to `None`, no shift is
        applied.
      scale: Floating-point `Tensor`. If this is set to `None`, no scale is
        applied. This should not be set if `log_scale` is set.
      log_scale: Floating-point `Tensor`. Logarithm of the scale. If this is set
        to `None`, no scale is applied. This should not be set if `scale` is
        set.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.

    Raises:
      ValueError: If both `scale` and `log_scale` are specified.
    """
        with tf.name_scope(name) as name:
            dtype = dtype_util.common_dtype([shift, scale, log_scale],
                                            dtype_hint=tf.float32)

            if scale is not None and log_scale is not None:
                raise ValueError(
                    'At most one of `scale` and `log_scale` should be '
                    'specified')

            self._shift = tensor_util.convert_immutable_to_tensor(shift,
                                                                  dtype=dtype,
                                                                  name='shift')
            self._scale = tensor_util.convert_immutable_to_tensor(scale,
                                                                  dtype=dtype,
                                                                  name='scale')
            self._log_scale = tensor_util.convert_immutable_to_tensor(
                log_scale, dtype=dtype, name='log_scale')

            super(AffineScalar, self).__init__(forward_min_event_ndims=0,
                                               is_constant_jacobian=True,
                                               validate_args=validate_args,
                                               dtype=dtype,
                                               name=name)
Пример #24
0
 def test_tf_tensor(self):
     x = tf.constant(1.)
     y = tensor_util.convert_immutable_to_tensor(x)
     self.assertIs(x, y)
Пример #25
0
 def test_np_object(self):
     x = np.array(0.)
     y = tensor_util.convert_immutable_to_tensor(x)
     self.assertIsInstance(y, tf.Tensor)
     self.assertEqual(x, self.evaluate(y))