Exemple #1
0
    def __init__(self,
                 mu,
                 sigma,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="Normal"):
        """Construct Normal distributions with mean and stddev `mu` and `sigma`.

    The parameters `mu` and `sigma` must be shaped in a way that supports
    broadcasting (e.g. `mu + sigma` is a valid operation).

    Args:
      mu: Floating point tensor, the means of the distribution(s).
      sigma: Floating point tensor, the stddevs of the distribution(s).
        sigma must contain only positive values.
      validate_args: `Boolean`, default `False`.  Whether to assert that
        `sigma > 0`. If `validate_args` is `False`, correct output is not
        guaranteed when input is invalid.
      allow_nan_stats: `Boolean`, default `True`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member.  If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to give Ops created by the initializer.

    Raises:
      TypeError: if mu and sigma are different dtypes.
    """
        parameters = locals()
        parameters.pop("self")
        with ops.name_scope(name, values=[mu, sigma]) as ns:
            with ops.control_dependencies(
                [check_ops.assert_positive(sigma)] if validate_args else []):
                self._mu = array_ops.identity(mu, name="mu")
                self._sigma = array_ops.identity(sigma, name="sigma")
                contrib_tensor_util.assert_same_float_dtype(
                    (self._mu, self._sigma))
        super(Normal, self).__init__(
            dtype=self._sigma.dtype,
            is_continuous=True,
            reparameterization_type=distribution.FULLY_REPARAMETERIZED,
            validate_args=validate_args,
            allow_nan_stats=allow_nan_stats,
            parameters=parameters,
            graph_parents=[self._mu, self._sigma],
            name=ns)
  def __init__(self,
               loc,
               scale,
               validate_args=False,
               allow_nan_stats=True,
               name="Laplace"):
    """Construct Laplace distribution with parameters `loc` and `scale`.

    The parameters `loc` and `scale` must be shaped in a way that supports
    broadcasting (e.g., `loc / scale` is a valid operation).

    Args:
      loc: Floating point tensor which characterizes the location (center)
        of the distribution.
      scale: Positive floating point tensor which characterizes the spread of
        the distribution.
      validate_args: `Boolean`, default `False`.  Whether to validate input
        with asserts.  If `validate_args` is `False`, and the inputs are
        invalid, correct behavior is not guaranteed.
      allow_nan_stats: `Boolean`, default `True`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member.  If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to give Ops created by the initializer.

    Raises:
      TypeError: if `loc` and `scale` are of different dtype.
    """
    parameters = locals()
    parameters.pop("self")
    with ops.name_scope(name, values=[loc, scale]) as ns:
      with ops.control_dependencies([check_ops.assert_positive(scale)] if
                                    validate_args else []):
        self._loc = array_ops.identity(loc, name="loc")
        self._scale = array_ops.identity(scale, name="scale")
        contrib_tensor_util.assert_same_float_dtype((self._loc, self._scale))
      super(Laplace, self).__init__(
          dtype=self._loc.dtype,
          is_continuous=True,
          is_reparameterized=True,
          validate_args=validate_args,
          allow_nan_stats=allow_nan_stats,
          parameters=parameters,
          graph_parents=[self._loc, self._scale],
          name=ns)
Exemple #3
0
  def __init__(self,
               low=0.,
               high=1.,
               validate_args=False,
               allow_nan_stats=True,
               name="Uniform"):
    """Initialize a batch of Uniform distributions.

    Args:
      low: Floating point tensor, lower boundary of the output interval. Must
        have `low < high`.
      high: Floating point tensor, upper boundary of the output interval. Must
        have `low < high`.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value "`NaN`" to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      InvalidArgumentError: if `low >= high` and `validate_args=False`.
    """
    parameters = locals()
    with ops.name_scope(name, values=[low, high]) as ns:
      with ops.control_dependencies([
          check_ops.assert_less(
              low, high, message="uniform not defined when low >= high.")
      ] if validate_args else []):
        self._low = array_ops.identity(low, name="low")
        self._high = array_ops.identity(high, name="high")
        contrib_tensor_util.assert_same_float_dtype([self._low, self._high])
    super(Uniform, self).__init__(
        dtype=self._low.dtype,
        reparameterization_type=distribution.FULLY_REPARAMETERIZED,
        is_continuous=True,
        validate_args=validate_args,
        allow_nan_stats=allow_nan_stats,
        parameters=parameters,
        graph_parents=[self._low,
                       self._high],
        name=ns)
Exemple #4
0
    def __init__(self,
                 loc,
                 scale,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="Normal"):
        """Construct Normal distributions with mean and stddev `loc` and `scale`.

    The parameters `loc` and `scale` must be shaped in a way that supports
    broadcasting (e.g. `loc + scale` is a valid operation).

    Args:
      loc: Floating point tensor; the means of the distribution(s).
      scale: Floating point tensor; the stddevs of the distribution(s).
        Must contain only positive values.
      validate_args: Python `Boolean`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `Boolean`, default `True`. When `True`,
        statistics (e.g., mean, mode, variance) use the value "`NaN`" to
        indicate the result is undefined.  When `False`, an exception is raised
        if one or more of the statistic's batch members are undefined.
      name: `String` name prefixed to Ops created by this class.

    Raises:
      TypeError: if `loc` and `scale` have different `dtype`.
    """
        parameters = locals()
        with ops.name_scope(name, values=[loc, scale]) as ns:
            with ops.control_dependencies(
                [check_ops.assert_positive(scale)] if validate_args else []):
                self._loc = array_ops.identity(loc, name="loc")
                self._scale = array_ops.identity(scale, name="scale")
                contrib_tensor_util.assert_same_float_dtype(
                    (self._loc, self._scale))
        super(Normal, self).__init__(
            dtype=self._scale.dtype,
            is_continuous=True,
            reparameterization_type=distribution.FULLY_REPARAMETERIZED,
            validate_args=validate_args,
            allow_nan_stats=allow_nan_stats,
            parameters=parameters,
            graph_parents=[self._loc, self._scale],
            name=ns)
    def __init__(self,
                 r,
                 p,
                 pi,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="ZeroInflatedNegativeBinomial"):
        """Construct zero-inflated negative binomial distributions.

    Args:
      r: Floating point tensor, the number of failures before stop parameter of the distribution(s). `r` must be positive.
      p: Floating point tensor, the succes probability parameter of the distribution(s). `p` must be in the interval [0, 1].
      validate_args: `Boolean`, default `False`.  Whether to assert that
        `p > 0` as well as inputs to pmf computations are non-negative
        integers. If validate_args is `False`, then `pmf` computations might
        return `NaN`, but can be evaluated at any real value.
      allow_nan_stats: `Boolean`, default `True`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member.  If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: A name for this distribution.
    """
        parameters = locals()
        parameters.pop("self")
        with ops.name_scope(name, values=[r, p, pi]) as ns:
            with ops.control_dependencies([
                    check_ops.assert_positive(r),
                    check_ops.assert_positive(p),
                    check_ops.assert_positive(pi)
            ] if validate_args else []):
                self._r = array_ops.identity(r, name="r")
                self._p = array_ops.identity(p, name="p")
                self._pi = array_ops.identity(pi, name="pi")
                contrib_tensor_util.assert_same_float_dtype(
                    (self._r, self._p, self._pi))
        super(ZeroInflatedNegativeBinomial,
              self).__init__(dtype=self._r.dtype,
                             is_continuous=True,
                             is_reparameterized=False,
                             validate_args=validate_args,
                             allow_nan_stats=allow_nan_stats,
                             parameters=parameters,
                             graph_parents=[self._r, self._p, self._pi],
                             name=ns)
Exemple #6
0
  def __init__(self,
               loc,
               scale,
               validate_args=False,
               allow_nan_stats=True,
               name="Laplace"):
    """Construct Laplace distribution with parameters `loc` and `scale`.

    The parameters `loc` and `scale` must be shaped in a way that supports
    broadcasting (e.g., `loc / scale` is a valid operation).

    Args:
      loc: Floating point tensor which characterizes the location (center)
        of the distribution.
      scale: Positive floating point tensor which characterizes the spread of
        the distribution.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`,
        statistics (e.g., mean, mode, variance) use the value "`NaN`" to
        indicate the result is undefined. When `False`, an exception is raised
        if one or more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      TypeError: if `loc` and `scale` are of different dtype.
    """
    parameters = locals()
    with ops.name_scope(name, values=[loc, scale]):
      with ops.control_dependencies([check_ops.assert_positive(scale)] if
                                    validate_args else []):
        self._loc = array_ops.identity(loc, name="loc")
        self._scale = array_ops.identity(scale, name="scale")
        contrib_tensor_util.assert_same_float_dtype([self._loc, self._scale])
      super(Laplace, self).__init__(
          dtype=self._loc.dtype,
          reparameterization_type=distribution.FULLY_REPARAMETERIZED,
          validate_args=validate_args,
          allow_nan_stats=allow_nan_stats,
          parameters=parameters,
          graph_parents=[self._loc, self._scale],
          name=name)
Exemple #7
0
  def __init__(
      self, alpha, beta, strict=True, strict_statistics=True, name="Gamma"):
    """Construct Gamma distributions with parameters `alpha` and `beta`.

    The parameters `alpha` and `beta` must be shaped in a way that supports
    broadcasting (e.g. `alpha + beta` is a valid operation).

    Args:
      alpha: `float` or `double` tensor, the shape params of the
        distribution(s).
        alpha must contain only positive values.
      beta: `float` or `double` tensor, the inverse scale params of the
        distribution(s).
        beta must contain only positive values.
      strict: Whether to assert that `a > 0, b > 0`, and that `x > 0` in the
        methods `prob(x)` and `log_prob(x)`.  If `strict` is False
        and the inputs are invalid, correct behavior is not guaranteed.
      strict_statistics:  Boolean, default True.  If True, raise an exception if
        a statistic (e.g. mean/mode/etc...) is undefined for any batch member.
        If False, batch members with valid parameters leading to undefined
        statistics will return NaN for this statistic.
      name: The name to prepend to all ops created by this distribution.

    Raises:
      TypeError: if `alpha` and `beta` are different dtypes.
    """
    self._strict_statistics = strict_statistics
    self._strict = strict
    with ops.op_scope([alpha, beta], name) as scope:
      self._name = scope
      with ops.control_dependencies(
          [check_ops.assert_positive(alpha), check_ops.assert_positive(beta)]
          if strict else []):
        alpha = array_ops.identity(alpha, name="alpha")
        beta = array_ops.identity(beta, name="beta")

        contrib_tensor_util.assert_same_float_dtype((alpha, beta))
        self._broadcast_tensor = alpha + beta

    self._get_batch_shape = self._broadcast_tensor.get_shape()
    self._get_event_shape = tensor_shape.TensorShape([])

    self._alpha = alpha
    self._beta = beta
Exemple #8
0
    def __init__(self,
                 loc,
                 scale,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="Gumbel"):
        """Construct Gumbel distributions with location and scale `loc` and `scale`.

    The parameters `loc` and `scale` must be shaped in a way that supports
    broadcasting (e.g. `loc + scale` is a valid operation).

    Args:
      loc: Floating point tensor, the means of the distribution(s).
      scale: Floating point tensor, the scales of the distribution(s).
        scale must contain only positive values.
      validate_args: `Boolean`, default `False`.  Whether to assert that
        `scale > 0`. If `validate_args` is `False`, correct output is not
        guaranteed when input is invalid.
      allow_nan_stats: `Boolean`, default `True`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member.  If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to give Ops created by the initializer.

    Raises:
      TypeError: if loc and scale are different dtypes.
    """
        parameters = locals()
        parameters.pop("self")
        with ops.name_scope(name, values=[loc, scale]) as ns:
            with ops.control_dependencies(
                [check_ops.assert_positive(scale)] if validate_args else []):
                self._loc = array_ops.identity(loc, name="loc")
                self._scale = array_ops.identity(scale, name="scale")
                contrib_tensor_util.assert_same_float_dtype(
                    (self._loc, self._scale))
        super(_Gumbel, self).__init__(dtype=self._scale.dtype,
                                      is_continuous=True,
                                      is_reparameterized=True,
                                      validate_args=validate_args,
                                      allow_nan_stats=allow_nan_stats,
                                      parameters=parameters,
                                      graph_parents=[self._loc, self._scale],
                                      name=ns)
Exemple #9
0
  def __init__(self,
               alpha,
               beta,
               validate_args=True,
               allow_nan_stats=False,
               name="Gamma"):
    """Construct Gamma distributions with parameters `alpha` and `beta`.

    The parameters `alpha` and `beta` must be shaped in a way that supports
    broadcasting (e.g. `alpha + beta` is a valid operation).

    Args:
      alpha: Floating point tensor, the shape params of the
        distribution(s).
        alpha must contain only positive values.
      beta: Floating point tensor, the inverse scale params of the
        distribution(s).
        beta must contain only positive values.
      validate_args: Whether to assert that `a > 0, b > 0`, and that `x > 0` in
        the methods `prob(x)` and `log_prob(x)`.  If `validate_args` is `False`
        and the inputs are invalid, correct behavior is not guaranteed.
      allow_nan_stats:  Boolean, default `False`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member.  If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to prepend to all ops created by this distribution.

    Raises:
      TypeError: if `alpha` and `beta` are different dtypes.
    """
    with ops.name_scope(name, values=[alpha, beta]) as ns:
      with ops.control_dependencies([
          check_ops.assert_positive(alpha),
          check_ops.assert_positive(beta),
      ] if validate_args else []):
        self._alpha = array_ops.identity(alpha, name="alpha")
        self._beta = array_ops.identity(beta, name="beta")
        contrib_tensor_util.assert_same_float_dtype((self._alpha, self._beta))
        super(Gamma, self).__init__(
            dtype=self._alpha.dtype,
            parameters={"alpha": self._alpha, "beta": self._beta},
            validate_args=validate_args,
            allow_nan_stats=allow_nan_stats,
            name=ns)
Exemple #10
0
  def __init__(self, a=0.0, b=1.0, name="Uniform"):
    """Construct Uniform distributions with `a` and `b`.

    The parameters `a` and `b` must be shaped in a way that supports
    broadcasting (e.g. `b - a` is a valid operation).

    Here are examples without broadcasting:

    ```python
    # Without broadcasting
    u1 = Uniform(3.0, 4.0)  # a single uniform distribution [3, 4]
    u2 = Uniform([1.0, 2.0], [3.0, 4.0])  # 2 distributions [1, 3], [2, 4]
    u3 = Uniform([[1.0, 2.0],
                  [3.0, 4.0]],
                 [[1.5, 2.5],
                  [3.5, 4.5]])  # 4 distributions
    ```

    And with broadcasting:

    ```python
    u1 = Uniform(3.0, [5.0, 6.0, 7.0])  # 3 distributions
    ```

    Args:
      a: `float` or `double` tensor, the minimum endpoint.
      b: `float` or `double` tensor, the maximum endpoint. Must be > `a`.
      name: The name to prefix Ops created by this distribution class.

    Raises:
      InvalidArgumentError: if `a >= b`.
    """
    with ops.op_scope([a, b], name):
      with ops.control_dependencies([check_ops.assert_less(a, b)]):
        a = array_ops.identity(a, name="a")
        b = array_ops.identity(b, name="b")

    self._a = a
    self._b = b
    self._name = name
    self._batch_shape = self._ones().get_shape()
    self._event_shape = tensor_shape.TensorShape([])

    contrib_tensor_util.assert_same_float_dtype((a, b))
Exemple #11
0
    def __init__(self,
                 loc,
                 scale,
                 validate_args=True,
                 allow_nan_stats=False,
                 name="Laplace"):
        """Construct Laplace distribution with parameters `loc` and `scale`.

    The parameters `loc` and `scale` must be shaped in a way that supports
    broadcasting (e.g., `loc / scale` is a valid operation).

    Args:
      loc: Floating point tensor which characterizes the location (center)
        of the distribution.
      scale: Positive floating point tensor which characterizes the spread of
        the distribution.
      validate_args: Whether to validate input with asserts.  If `validate_args`
        is `False`, and the inputs are invalid, correct behavior is not
        guaranteed.
      allow_nan_stats:  Boolean, default `False`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member.  If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to give Ops created by the initializer.

    Raises:
      TypeError: if `loc` and `scale` are of different dtype.
    """
        self._allow_nan_stats = allow_nan_stats
        self._validate_args = validate_args
        with ops.name_scope(name, values=[loc, scale]):
            loc = ops.convert_to_tensor(loc)
            scale = ops.convert_to_tensor(scale)
            with ops.control_dependencies(
                [check_ops.assert_positive(scale)] if validate_args else []):
                self._name = name
                self._loc = array_ops.identity(loc, name="loc")
                self._scale = array_ops.identity(scale, name="scale")
                self._batch_shape = common_shapes.broadcast_shape(
                    self._loc.get_shape(), self._scale.get_shape())
                self._event_shape = tensor_shape.TensorShape([])

        contrib_tensor_util.assert_same_float_dtype((loc, scale))
Exemple #12
0
  def log_cdf(self, x, name="log_cdf"):
    """Log CDF of observations `x` under these Gamma distribution(s).

    Args:
      x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
      name: The name to give this op.

    Returns:
      log_cdf: tensor of dtype `dtype`, the log-CDFs of `x`.
    """
    with ops.op_scope([self._alpha, self._beta, x], self.name):
      with ops.name_scope(name):
        x = ops.convert_to_tensor(x)
        x = control_flow_ops.with_dependencies(
            [check_ops.assert_positive(x)], x)
        contrib_tensor_util.assert_same_float_dtype(tensors=[x,],
                                                    dtype=self.dtype)
        # Note that igamma returns the regularized incomplete gamma function,
        # which is what we want for the CDF.
        return math_ops.log(math_ops.igamma(self._alpha, self._beta * x))
Exemple #13
0
    def __init__(self,
                 loc,
                 scale,
                 strict=True,
                 strict_statistics=True,
                 name="Laplace"):
        """Construct Laplace distribution with parameters `loc` and `scale`.

    The parameters `loc` and `scale` must be shaped in a way that supports
    broadcasting (e.g., `loc / scale` is a valid operation).

    Args:
      loc: `float` or `double` tensor which characterizes the location (center)
        of the distribution.
      scale: `float` or `double`, positive-valued tensor which characterzes the
        spread of the distribution.
      strict: Whether to validate input with asserts.  If `strict` is `False`,
        and the inputs are invalid, correct behavior is not guaranteed.
      strict_statistics:  Boolean, default True.  If True, raise an exception if
        a statistic (e.g. mean/mode/etc...) is undefined for any batch member.
        If False, batch members with valid parameters leading to undefined
        statistics will return NaN for this statistic.
      name: The name to give Ops created by the initializer.

    Raises:
      TypeError: if `loc` and `scale` are of different dtype.
    """
        self._strict_statistics = strict_statistics
        self._strict = strict
        with ops.op_scope([loc, scale], name):
            loc = ops.convert_to_tensor(loc)
            scale = ops.convert_to_tensor(scale)
            with ops.control_dependencies(
                [check_ops.assert_positive(scale)] if strict else []):
                self._name = name
                self._loc = array_ops.identity(loc, name="loc")
                self._scale = array_ops.identity(scale, name="scale")
                self._batch_shape = self._ones().get_shape()
                self._event_shape = tensor_shape.TensorShape([])

        contrib_tensor_util.assert_same_float_dtype((loc, scale))
Exemple #14
0
    def __init__(self,
                 mu,
                 sigma,
                 validate_args=True,
                 allow_nan_stats=False,
                 name="Normal"):
        """Construct Normal distributions with mean and stddev `mu` and `sigma`.

    The parameters `mu` and `sigma` must be shaped in a way that supports
    broadcasting (e.g. `mu + sigma` is a valid operation).

    Args:
      mu: Floating point tensor, the means of the distribution(s).
      sigma: Floating point tensor, the stddevs of the distribution(s).
        sigma must contain only positive values.
      validate_args: Whether to assert that `sigma > 0`. If `validate_args` is
        `False`, correct output is not guaranteed when input is invalid.
      allow_nan_stats:  Boolean, default `False`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member.  If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to give Ops created by the initializer.

    Raises:
      TypeError: if mu and sigma are different dtypes.
    """
        self._allow_nan_stats = allow_nan_stats
        self._validate_args = validate_args
        with ops.name_scope(name, values=[mu, sigma]):
            mu = ops.convert_to_tensor(mu)
            sigma = ops.convert_to_tensor(sigma)
            with ops.control_dependencies(
                [check_ops.assert_positive(sigma)] if validate_args else []):
                self._name = name
                self._mu = array_ops.identity(mu, name="mu")
                self._sigma = array_ops.identity(sigma, name="sigma")
                self._batch_shape = common_shapes.broadcast_shape(
                    self._mu.get_shape(), self._sigma.get_shape())
                self._event_shape = tensor_shape.TensorShape([])

        contrib_tensor_util.assert_same_float_dtype((mu, sigma))
Exemple #15
0
    def __init__(self,
                 sigma,
                 alpha,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="Pareto"):
        """Construct pareto distributions (Type 1).

    Args:
      sigma: Floating point tensor, the scale parameter of the distribution(s). `sigma` must be positive and non-zero.
      alpha: Floating point tensor, the shape parameter of the distribution(s). `alpha` must be positive and non-zero.
      validate_args: `Boolean`, default `False`.  Whether to assert that
        `p > 0` as well as inputs to pmf computations are non-negative
        integers. If validate_args is `False`, then `pmf` computations might
        return `NaN`, but can be evaluated at any real value.
      allow_nan_stats: `Boolean`, default `True`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member.  If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: A name for this distribution.
    """
        parameters = locals()
        parameters.pop("self")
        with ops.name_scope(name, values=[sigma, alpha]) as ns:
            with ops.control_dependencies([
                    check_ops.assert_positive(sigma),
                    check_ops.assert_positive(alpha)
            ] if validate_args else []):
                self._sigma = array_ops.identity(sigma, name="r")
                self._alpha = array_ops.identity(alpha, name="p")
                contrib_tensor_util.assert_same_float_dtype(
                    (self._sigma, self._alpha))
        super(Pareto, self).__init__(dtype=self._sigma.dtype,
                                     is_continuous=True,
                                     is_reparameterized=False,
                                     validate_args=validate_args,
                                     allow_nan_stats=allow_nan_stats,
                                     parameters=parameters,
                                     graph_parents=[self._sigma, self._alpha],
                                     name=ns)
Exemple #16
0
    def __init__(self, alpha, beta, name="Gamma"):
        """Construct Gamma distributions with parameters `alpha` and `beta`.

    The parameters `alpha` and `beta` must be shaped in a way that supports
    broadcasting (e.g. `alpha + beta` is a valid operation).

    Args:
      alpha: `float` or `double` tensor, the shape params of the
        distribution(s).
        alpha must contain only positive values.
      beta: `float` or `double` tensor, the inverse scale params of the
        distribution(s).
        beta must contain only positive values.
      name: The name to prepend to all ops created by this distribution.

    Raises:
      TypeError: if `alpha` and `beta` are different dtypes.
    """
        with ops.op_scope([alpha, beta], name):
            with ops.control_dependencies([
                    check_ops.assert_positive(alpha),
                    check_ops.assert_positive(beta)
            ]):
                alpha = array_ops.identity(alpha, name="alpha")
                beta = array_ops.identity(beta, name="beta")

                contrib_tensor_util.assert_same_float_dtype((alpha, beta))

                with ops.name_scope("mean"):
                    self._mean = alpha / beta

                with ops.name_scope("variance"):
                    self._variance = alpha / math_ops.square(beta)

        self._get_batch_shape = self._mean.get_shape()
        self._get_event_shape = tensor_shape.TensorShape([])

        self._alpha = alpha
        self._beta = beta
        self._name = name
Exemple #17
0
    def __init__(self,
                 mu,
                 sigma,
                 strict=True,
                 strict_statistics=True,
                 name="Normal"):
        """Construct Normal distributions with mean and stddev `mu` and `sigma`.

    The parameters `mu` and `sigma` must be shaped in a way that supports
    broadcasting (e.g. `mu + sigma` is a valid operation).

    Args:
      mu: `float` or `double` tensor, the means of the distribution(s).
      sigma: `float` or `double` tensor, the stddevs of the distribution(s).
        sigma must contain only positive values.
      strict: Whether to assert that `sigma > 0`. If `strict` is False,
        correct output is not guaranteed when input is invalid.
      strict_statistics:  Boolean, default True.  If True, raise an exception if
        a statistic (e.g. mean/mode/etc...) is undefined for any batch member.
        If False, batch members with valid parameters leading to undefined
        statistics will return NaN for this statistic.
      name: The name to give Ops created by the initializer.

    Raises:
      TypeError: if mu and sigma are different dtypes.
    """
        self._strict_statistics = strict_statistics
        self._strict = strict
        with ops.op_scope([mu, sigma], name):
            mu = ops.convert_to_tensor(mu)
            sigma = ops.convert_to_tensor(sigma)
            with ops.control_dependencies(
                [check_ops.assert_positive(sigma)] if strict else []):
                self._name = name
                self._mu = array_ops.identity(mu, name="mu")
                self._sigma = array_ops.identity(sigma, name="sigma")
                self._batch_shape = self._ones().get_shape()
                self._event_shape = tensor_shape.TensorShape([])

        contrib_tensor_util.assert_same_float_dtype((mu, sigma))
Exemple #18
0
    def __init__(self, mu, sigma, name=None):
        """Construct Gaussian distributions with mean and stddev `mu` and `sigma`.

    The parameters `mu` and `sigma` must be shaped in a way that supports
    broadcasting (e.g. `mu + sigma` is a valid operation).

    Args:
      mu: `float` or `double` tensor, the means of the distribution(s).
      sigma: `float` or `double` tensor, the stddevs of the distribution(s).
        sigma must contain only positive values.
      name: The name to give Ops created by the initializer.

    Raises:
      TypeError: if mu and sigma are different dtypes.
    """
        with ops.op_scope([mu, sigma], name, "Gaussian"):
            mu = ops.convert_to_tensor(mu)
            sigma = ops.convert_to_tensor(sigma)
            with ops.control_dependencies([_assert_all_positive(sigma)]):
                self._mu = array_ops.identity(mu, name="mu")
                self._sigma = array_ops.identity(sigma, name="sigma")

        contrib_tensor_util.assert_same_float_dtype((mu, sigma))
Exemple #19
0
    def __init__(self, df, mu, sigma, strict=True, name="StudentT"):
        """Construct Student's t distributions.

    The distributions have degree of freedom `df`, mean `mu`, and scale `sigma`.

    The parameters `df`, `mu`, and `sigma` must be shaped in a way that supports
    broadcasting (e.g. `df + mu + sigma` is a valid operation).

    Args:
      df: `float` or `double` tensor, the degrees of freedom of the
        distribution(s). `df` must contain only positive values.
      mu: `float` or `double` tensor, the means of the distribution(s).
      sigma: `float` or `double` tensor, the scaling factor for the
        distribution(s). `sigma` must contain only positive values.
        Note that `sigma` is not the standard deviation of this distribution.
      strict: Whether to assert that `df > 0, sigma > 0`. If `strict` is False
        and inputs are invalid, correct behavior is not guaranteed.
      name: The name to give Ops created by the initializer.

    Raises:
      TypeError: if mu and sigma are different dtypes.
    """
        super(StudentT, self).__init__()
        self._strict = strict
        with ops.op_scope([df, mu, sigma], name) as scope:
            with ops.control_dependencies([
                    check_ops.assert_positive(df),
                    check_ops.assert_positive(sigma)
            ] if strict else []):
                self._df = ops.convert_to_tensor(df, name="df")
                self._mu = ops.convert_to_tensor(mu, name="mu")
                self._sigma = ops.convert_to_tensor(sigma, name="sigma")
                contrib_tensor_util.assert_same_float_dtype(
                    (self._df, self._mu, self._sigma))
            self._name = scope
            self._get_batch_shape = self._ones().get_shape()
            self._get_event_shape = tensor_shape.TensorShape([])
Exemple #20
0
  def log_pdf(self, x, name="log_pdf"):
    """Log pdf of observations in `x` under these Gamma distribution(s).

    Args:
      x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
      name: The name to give this op.

    Returns:
      log_pdf: tensor of dtype `dtype`, the log-PDFs of `x`.
    Raises:
      TypeError: if `x` and `alpha` are different dtypes.
    """
    with ops.op_scope([self._alpha, self._beta, x], self.name):
      with ops.name_scope(name):
        alpha = self._alpha
        beta = self._beta
        x = ops.convert_to_tensor(x)
        x = control_flow_ops.with_dependencies(
            [check_ops.assert_positive(x)], x)
        contrib_tensor_util.assert_same_float_dtype(tensors=[x,],
                                                    dtype=self.dtype)

        return (alpha * math_ops.log(beta) + (alpha - 1) * math_ops.log(x) -
                beta * x - math_ops.lgamma(self._alpha))
def safe_embedding_lookup_sparse(embedding_weights,
                                 sparse_ids,
                                 sparse_weights=None,
                                 combiner=None,
                                 default_id=None,
                                 name=None,
                                 partition_strategy="div",
                                 max_norm=None):
  """Lookup embedding results, accounting for invalid IDs and empty features.

  The partitioned embedding in `embedding_weights` must all be the same shape
  except for the first dimension. The first dimension is allowed to vary as the
  vocabulary size is not necessarily a multiple of `P`.  `embedding_weights`
  may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a
  partitioner.

  Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
  with non-positive weight. For an entry with no features, the embedding vector
  for `default_id` is returned, or the 0-vector if `default_id` is not supplied.

  The ids and weights may be multi-dimensional. Embeddings are always aggregated
  along the last dimension.

  Args:
    embedding_weights:  A list of `P` float tensors or values representing
        partitioned embedding tensors.  Alternatively, a `PartitionedVariable`,
        created by partitioning along dimension 0.  The total unpartitioned
        shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the
        vocab size and `e_1, ..., e_m` are the embedding dimensions.
    sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
        ids. `d_0` is typically batch size.
    sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
        float weights corresponding to `sparse_ids`, or `None` if all weights
        are be assumed to be 1.0.
    combiner: A string specifying how to combine embedding results for each
        entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
        the default.
    default_id: The id to use for an entry with no features.
    name: A name for this operation (optional).
    partition_strategy: A string specifying the partitioning strategy.
        Currently `"div"` and `"mod"` are supported. Default is `"div"`.
    max_norm: If not None, all embeddings are l2-normalized to max_norm before
        combining.


  Returns:
    Dense tensor of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.

  Raises:
    ValueError: if `embedding_weights` is empty.
  """
  if combiner is None:
    logging.warn("The default value of combiner will change from \"mean\" "
                 "to \"sqrtn\" after 2016/11/01.")
    combiner = "mean"
  if embedding_weights is None:
    raise ValueError("Missing embedding_weights %s." % embedding_weights)
  if isinstance(embedding_weights, variables.PartitionedVariable):
    embedding_weights = list(embedding_weights)  # get underlying Variables.
  if not isinstance(embedding_weights, list):
    embedding_weights = [embedding_weights]
  if len(embedding_weights) < 1:
    raise ValueError("Missing embedding_weights %s." % embedding_weights)

  dtype = sparse_weights.dtype if sparse_weights is not None else None
  if isinstance(embedding_weights, variables.PartitionedVariable):
    embedding_weights = list(embedding_weights)
  embedding_weights = [
      ops.convert_to_tensor(w, dtype=dtype) for w in embedding_weights
  ]

  contrib_tensor_util.assert_same_float_dtype(embedding_weights +
                                              [sparse_weights])

  with ops.name_scope(name, "embedding_lookup",
                      embedding_weights + [sparse_ids,
                                           sparse_weights]) as scope:
    # Reshape higher-rank sparse ids and weights to linear segment ids.
    original_shape = sparse_ids.dense_shape
    original_rank_dim = sparse_ids.dense_shape.get_shape()[0]
    original_rank = (
        array_ops.size(original_shape)
        if original_rank_dim.value is None
        else original_rank_dim.value)
    sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
        math_ops.reduce_prod(
            array_ops.slice(original_shape, [0], [original_rank - 1])),
        array_ops.gather(original_shape, original_rank - 1)])
    if sparse_weights is not None:
      sparse_weights = sparse_tensor.SparseTensor(
          sparse_ids.indices,
          sparse_weights.values, sparse_ids.dense_shape)

    # Prune invalid ids and weights.
    sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)

    # Fill in dummy values for empty features, if necessary.
    sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(sparse_ids,
                                                                 default_id or
                                                                 0)
    if sparse_weights is not None:
      sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)

    result = embedding_ops.embedding_lookup_sparse(
        embedding_weights,
        sparse_ids,
        sparse_weights,
        combiner=combiner,
        partition_strategy=partition_strategy,
        name=None if default_id is None else scope,
        max_norm=max_norm)

    if default_id is None:
      # Broadcast is_row_empty to the same shape as embedding_lookup_result,
      # for use in Select.
      is_row_empty = array_ops.tile(
          array_ops.reshape(is_row_empty, [-1, 1]),
          array_ops.stack([1, array_ops.shape(result)[1]]))

      result = array_ops.where(is_row_empty,
                               array_ops.zeros_like(result),
                               result,
                               name=scope)

    # Reshape back from linear ids back into higher-dimensional dense result.
    final_result = array_ops.reshape(
        result,
        array_ops.concat([
            array_ops.slice(
                math_ops.cast(original_shape, dtypes.int32), [0],
                [original_rank - 1]),
            array_ops.slice(array_ops.shape(result), [1], [-1])
        ], 0))
    final_result.set_shape(tensor_shape.unknown_shape(
        (original_rank_dim - 1).value).concatenate(result.get_shape()[1:]))
    return final_result
Exemple #22
0
    def __init__(self,
                 df,
                 scale_operator_pd,
                 cholesky_input_output_matrices=False,
                 validate_args=False,
                 allow_nan_stats=True,
                 name=None):
        """Construct Wishart distributions.

    Args:
      df: `float` or `double` tensor, the degrees of freedom of the
        distribution(s). `df` must be greater than or equal to `k`.
      scale_operator_pd: `float` or `double` instance of `OperatorPDBase`.
      cholesky_input_output_matrices: `Boolean`. Any function which whose input
        or output is a matrix assumes the input is Cholesky and returns a
        Cholesky factored matrix. Example`log_pdf` input takes a Cholesky and
        `sample_n` returns a Cholesky when
        `cholesky_input_output_matrices=True`.
      validate_args: `Boolean`, default `False`.  Whether to validate input with
        asserts. If `validate_args` is `False`, and the inputs are invalid,
        correct behavior is not guaranteed.
      allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
        exception if a statistic (e.g., mean, mode) is undefined for any batch
        member. If True, batch members with valid parameters leading to
        undefined statistics will return `NaN` for this statistic.
      name: The name to give Ops created by the initializer.

    Raises:
      TypeError: if scale is not floating-type
      TypeError: if scale.dtype != df.dtype
      ValueError: if df < k, where scale operator event shape is `(k, k)`
    """
        self._cholesky_input_output_matrices = cholesky_input_output_matrices
        with ops.name_scope(name) as ns:
            with ops.name_scope("init", values=[df, scale_operator_pd]):
                if not scale_operator_pd.dtype.is_floating:
                    raise TypeError(
                        "scale_operator_pd.dtype=%s is not a floating-point type"
                        % scale_operator_pd.dtype)
                self._scale_operator_pd = scale_operator_pd
                self._df = ops.convert_to_tensor(df,
                                                 dtype=scale_operator_pd.dtype,
                                                 name="df")
                contrib_tensor_util.assert_same_float_dtype(
                    (self._df, self._scale_operator_pd))
                if (self._scale_operator_pd.get_shape().ndims is None or
                        self._scale_operator_pd.get_shape()[-1].value is None):
                    self._dimension = math_ops.cast(
                        self._scale_operator_pd.vector_space_dimension(),
                        dtype=self._scale_operator_pd.dtype,
                        name="dimension")
                else:
                    self._dimension = ops.convert_to_tensor(
                        self._scale_operator_pd.get_shape()[-1].value,
                        dtype=self._scale_operator_pd.dtype,
                        name="dimension")
                df_val = tensor_util.constant_value(self._df)
                dim_val = tensor_util.constant_value(self._dimension)
                if df_val is not None and dim_val is not None:
                    df_val = np.asarray(df_val)
                    if not df_val.shape: df_val = (df_val, )
                    if any(df_val < dim_val):
                        raise ValueError(
                            "Degrees of freedom (df = %s) cannot be less than dimension of "
                            "scale matrix (scale.dimension = %s)" %
                            (df_val, dim_val))
                elif validate_args:
                    assertions = check_ops.assert_less_equal(
                        self._dimension,
                        self._df,
                        message=(
                            "Degrees of freedom (df = %s) cannot be less than "
                            "dimension of scale matrix (scale.dimension = %s)"
                            % (self._dimension, self._df)))
                    self._df = control_flow_ops.with_dependencies([assertions],
                                                                  self._df)
                super(_WishartOperatorPD,
                      self).__init__(dtype=self._scale_operator_pd.dtype,
                                     parameters={
                                         "df": self._df,
                                         "scale_operator_pd":
                                         self._scale_operator_pd,
                                         "dimension": self._dimension
                                     },
                                     validate_args=validate_args,
                                     allow_nan_stats=allow_nan_stats,
                                     is_continuous=True,
                                     is_reparameterized=True,
                                     name=ns)
Exemple #23
0
    def log_pdf(self, x, name=None):
        """Log pdf of observations `x` given these Multivariate Normals.

    Args:
      x: tensor of dtype `dtype`, must be broadcastable with `mu`.
      name: The name to give this op.

    Returns:
      log_pdf: tensor of dtype `dtype`, the log-PDFs of `x`.
    """
        with ops.op_scope([self._mu, self._sigma_chol, x], name,
                          "MultivariateNormalLogPdf"):
            x = ops.convert_to_tensor(x)
            contrib_tensor_util.assert_same_float_dtype((self._mu, x))

            x_centered = x - self.mu

            x_rank = array_ops.rank(x_centered)
            sigma_rank = array_ops.rank(self._sigma_chol)

            x_rank_vec = array_ops.pack([x_rank])
            sigma_rank_vec = array_ops.pack([sigma_rank])
            x_shape = array_ops.shape(x_centered)

            # sigma_chol is shaped [D, E, F, ..., k, k]
            # x_centered shape is one of:
            #   [D, E, F, ..., k], or [F, ..., k], or
            #   [A, B, C, D, E, F, ..., k]
            # and we need to convert x_centered to shape:
            #   [D, E, F, ..., k, A*B*C] (or 1 if A, B, C don't exist)
            # then transpose and reshape x_whitened back to one of the shapes:
            #   [D, E, F, ..., k], or [1, 1, F, ..., k], or
            #   [A, B, C, D, E, F, ..., k]

            # This helper handles the case where rank(x_centered) < rank(sigma)
            def _broadcast_x_not_higher_rank_than_sigma():
                return array_ops.reshape(
                    x_centered,
                    array_ops.concat(
                        # Reshape to ones(deficient x rank) + x_shape + [1]
                        0,
                        (array_ops.ones(array_ops.pack(
                            [sigma_rank - x_rank - 1]),
                                        dtype=x_rank.dtype), x_shape, [1])))

            # These helpers handle the case where rank(x_centered) >= rank(sigma)
            def _broadcast_x_higher_rank_than_sigma():
                x_shape_left = array_ops.slice(x_shape, [0],
                                               sigma_rank_vec - 1)
                x_shape_right = array_ops.slice(x_shape, sigma_rank_vec - 1,
                                                x_rank_vec - 1)
                x_shape_perm = array_ops.concat(
                    0, (math_ops.range(sigma_rank - 1, x_rank),
                        math_ops.range(0, sigma_rank - 1)))
                return array_ops.reshape(
                    # Convert to [D, E, F, ..., k, B, C]
                    array_ops.transpose(x_centered, perm=x_shape_perm),
                    # Reshape to [D, E, F, ..., k, B*C]
                    array_ops.concat(
                        0, (x_shape_right,
                            array_ops.pack(
                                [math_ops.reduce_prod(x_shape_left, 0)]))))

            def _unbroadcast_x_higher_rank_than_sigma():
                x_shape_left = array_ops.slice(x_shape, [0],
                                               sigma_rank_vec - 1)
                x_shape_right = array_ops.slice(x_shape, sigma_rank_vec - 1,
                                                x_rank_vec - 1)
                x_shape_perm = array_ops.concat(
                    0, (math_ops.range(sigma_rank - 1, x_rank),
                        math_ops.range(0, sigma_rank - 1)))
                return array_ops.transpose(
                    # [D, E, F, ..., k, B, C] => [B, C, D, E, F, ..., k]
                    array_ops.reshape(
                        # convert to [D, E, F, ..., k, B, C]
                        x_whitened_broadcast,
                        array_ops.concat(0, (x_shape_right, x_shape_left))),
                    perm=x_shape_perm)

            # Step 1: reshape x_centered
            x_centered_broadcast = control_flow_ops.cond(
                # x_centered == [D, E, F, ..., k] => [D, E, F, ..., k, 1]
                # or         == [F, ..., k] => [1, 1, F, ..., k, 1]
                x_rank <= sigma_rank - 1,
                _broadcast_x_not_higher_rank_than_sigma,
                # x_centered == [B, C, D, E, F, ..., k] => [D, E, F, ..., k, B*C]
                _broadcast_x_higher_rank_than_sigma)

            x_whitened_broadcast = linalg_ops.batch_matrix_triangular_solve(
                self._sigma_chol, x_centered_broadcast)

            # Reshape x_whitened_broadcast back to x_whitened
            x_whitened = control_flow_ops.cond(
                x_rank <= sigma_rank - 1,
                lambda: array_ops.reshape(x_whitened_broadcast, x_shape),
                _unbroadcast_x_higher_rank_than_sigma)

            x_whitened = array_ops.expand_dims(x_whitened, -1)
            # Reshape x_whitened to contain row vectors
            # Returns a batchwise scalar
            x_whitened_norm = math_ops.batch_matmul(x_whitened,
                                                    x_whitened,
                                                    adj_x=True)
            x_whitened_norm = control_flow_ops.cond(
                x_rank <= sigma_rank - 1,
                lambda: array_ops.squeeze(x_whitened_norm, [-2, -1]),
                lambda: array_ops.squeeze(x_whitened_norm, [-1]))

            log_two_pi = constant_op.constant(math.log(2 * math.pi),
                                              dtype=self.dtype)
            k = math_ops.cast(self._k, self.dtype)
            log_pdf_value = (-math_ops.log(self._sigma_det) - k * log_two_pi -
                             x_whitened_norm) / 2
            final_shaped_value = control_flow_ops.cond(
                x_rank <= sigma_rank - 1, lambda: log_pdf_value,
                lambda: array_ops.squeeze(log_pdf_value, [-1]))

            output_static_shape = x_centered.get_shape()[:-1]
            final_shaped_value.set_shape(output_static_shape)
            return final_shaped_value
Exemple #24
0
    def __init__(self, mu, sigma=None, sigma_chol=None, name=None):
        """Multivariate Normal distributions on `R^k`.

    User must provide means `mu`, which are tensors of rank `N+1` (`N >= 0`)
    with the last dimension having length `k`.

    User must provide exactly one of `sigma` (the covariance matrices) or
    `sigma_chol` (the cholesky decompositions of the covariance matrices).
    `sigma` or `sigma_chol` must be of rank `N+2`.  The last two dimensions
    must both have length `k`.  The first `N` dimensions correspond to batch
    indices.

    If `sigma_chol` is not provided, the batch cholesky factorization of `sigma`
    is calculated for you.

    The shapes of `mu` and `sigma` must match for the first `N` dimensions.

    Regardless of which parameter is provided, the covariance matrices must all
    be **positive definite** (an error is raised if one of them is not).

    Args:
      mu: (N+1)-D.  `float` or `double` tensor, the means of the distributions.
      sigma: (N+2)-D.  (optional) `float` or `double` tensor, the covariances
        of the distribution(s).  The first `N+1` dimensions must match
        those of `mu`.  Must be batch-positive-definite.
      sigma_chol: (N+2)-D.  (optional) `float` or `double` tensor, a
        lower-triangular factorization of `sigma`
        (`sigma = sigma_chol . sigma_chol^*`).  The first `N+1` dimensions
        must match those of `mu`.  The tensor itself need not be batch
        lower triangular: we ignore the upper triangular part.  However,
        the batch diagonals must be positive (i.e., sigma_chol must be
        batch-positive-definite).
      name: The name to give Ops created by the initializer.

    Raises:
      ValueError: if neither sigma nor sigma_chol is provided.
      TypeError: if mu and sigma (resp. sigma_chol) are different dtypes.
    """
        if (sigma is None) == (sigma_chol is None):
            raise ValueError(
                "Exactly one of sigma and sigma_chol must be provided")

        with ops.op_scope([mu, sigma, sigma_chol], name, "MultivariateNormal"):
            sigma_or_half = sigma_chol if sigma is None else sigma

            mu = ops.convert_to_tensor(mu)
            sigma_or_half = ops.convert_to_tensor(sigma_or_half)

            contrib_tensor_util.assert_same_float_dtype((mu, sigma_or_half))

            with ops.control_dependencies(
                [_assert_compatible_shapes(mu, sigma_or_half)]):
                mu = array_ops.identity(mu, name="mu")

                # Store the dimensionality of the MVNs
                self._k = array_ops.gather(array_ops.shape(mu),
                                           array_ops.rank(mu) - 1)

                if sigma_chol is not None:
                    # Ensure we only keep the lower triangular part.
                    sigma_chol = array_ops.batch_matrix_band_part(sigma_chol,
                                                                  num_lower=-1,
                                                                  num_upper=0)
                    sigma_det = _determinant_from_sigma_chol(sigma_chol)
                    with ops.control_dependencies(
                        [_assert_batch_positive_definite(sigma_chol)]):
                        self._sigma = math_ops.batch_matmul(sigma_chol,
                                                            sigma_chol,
                                                            adj_y=True,
                                                            name="sigma")
                        self._sigma_chol = array_ops.identity(
                            sigma_chol, "sigma_chol")
                        self._sigma_det = array_ops.identity(
                            sigma_det, "sigma_det")
                        self._mu = array_ops.identity(mu, "mu")
                else:  # sigma is not None
                    sigma_chol = linalg_ops.batch_cholesky(sigma)
                    sigma_det = _determinant_from_sigma_chol(sigma_chol)
                    # batch_cholesky checks for PSD; so we can just use it here.
                    with ops.control_dependencies([sigma_chol]):
                        self._sigma = array_ops.identity(sigma, "sigma")
                        self._sigma_chol = array_ops.identity(
                            sigma_chol, "sigma_chol")
                        self._sigma_det = array_ops.identity(
                            sigma_det, "sigma_det")
                        self._mu = array_ops.identity(mu, "mu")
Exemple #25
0
    def __init__(self,
                 df,
                 scale_operator,
                 cholesky_input_output_matrices=False,
                 validate_args=False,
                 allow_nan_stats=True,
                 name=None):
        """Construct Wishart distributions.

    Args:
      df: `float` or `double` tensor, the degrees of freedom of the
        distribution(s). `df` must be greater than or equal to `k`.
      scale_operator: `float` or `double` instance of `LinearOperator`.
      cholesky_input_output_matrices: Python `bool`. Any function which whose
        input or output is a matrix assumes the input is Cholesky and returns a
        Cholesky factored matrix. Example `log_prob` input takes a Cholesky and
        `sample_n` returns a Cholesky when
        `cholesky_input_output_matrices=True`.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value "`NaN`" to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      TypeError: if scale is not floating-type
      TypeError: if scale.dtype != df.dtype
      ValueError: if df < k, where scale operator event shape is
        `(k, k)`
    """
        parameters = dict(locals())
        self._cholesky_input_output_matrices = cholesky_input_output_matrices
        with ops.name_scope(name) as name:
            with ops.name_scope("init", values=[df, scale_operator]):
                if not scale_operator.dtype.is_floating:
                    raise TypeError(
                        "scale_operator.dtype=%s is not a floating-point type"
                        % scale_operator.dtype)
                if not scale_operator.is_square:
                    print(scale_operator.to_dense().eval())
                    raise ValueError("scale_operator must be square.")

                self._scale_operator = scale_operator
                self._df = ops.convert_to_tensor(df,
                                                 dtype=scale_operator.dtype,
                                                 name="df")
                contrib_tensor_util.assert_same_float_dtype(
                    (self._df, self._scale_operator))
                if (self._scale_operator.shape.ndims is None
                        or self._scale_operator.shape[-1].value is None):
                    self._dimension = math_ops.cast(
                        self._scale_operator.domain_dimension_tensor(),
                        dtype=self._scale_operator.dtype,
                        name="dimension")
                else:
                    self._dimension = ops.convert_to_tensor(
                        self._scale_operator.shape[-1].value,
                        dtype=self._scale_operator.dtype,
                        name="dimension")
                df_val = tensor_util.constant_value(self._df)
                dim_val = tensor_util.constant_value(self._dimension)
                if df_val is not None and dim_val is not None:
                    df_val = np.asarray(df_val)
                    if not df_val.shape:
                        df_val = [df_val]
                    if any(df_val < dim_val):
                        raise ValueError(
                            "Degrees of freedom (df = %s) cannot be less than "
                            "dimension of scale matrix (scale.dimension = %s)"
                            % (df_val, dim_val))
                elif validate_args:
                    assertions = check_ops.assert_less_equal(
                        self._dimension,
                        self._df,
                        message=("Degrees of freedom (df = %s) cannot be "
                                 "less than dimension of scale matrix "
                                 "(scale.dimension = %s)" %
                                 (self._dimension, self._df)))
                    self._df = control_flow_ops.with_dependencies([assertions],
                                                                  self._df)
        super(_WishartLinearOperator, self).__init__(
            dtype=self._scale_operator.dtype,
            validate_args=validate_args,
            allow_nan_stats=allow_nan_stats,
            reparameterization_type=distribution.FULLY_REPARAMETERIZED,
            parameters=parameters,
            graph_parents=([self._df, self._dimension] +
                           self._scale_operator.graph_parents),
            name=name)
Exemple #26
0
  def log_prob(self, x, name='log_prob'):
    """Log of the probability density/mass function.

    Args:
      x: `float` or `double` `Tensor`.
      name: The name to give this op.

    Returns:
      log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
        values of type `self.dtype`.
    """
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=[x] + list(self.inputs.values())):
        x = ops.convert_to_tensor(x, name='x')
        contrib_tensor_util.assert_same_float_dtype(
            (self.scale_operator_pd, x))
        if self.cholesky_input_output_matrices:
          x_sqrt = x
        else:
          # Complexity: O(nbk^3)
          x_sqrt = linalg_ops.batch_cholesky(x)

        batch_shape = self.batch_shape()
        event_shape = self.event_shape()
        ndims = array_ops.rank(x_sqrt)
        # sample_ndims = ndims - batch_ndims - event_ndims
        sample_ndims = ndims - array_ops.shape(batch_shape)[0] - 2
        sample_shape = array_ops.slice(
            array_ops.shape(x_sqrt), [0], [sample_ndims])

        # We need to be able to pre-multiply each matrix by its corresponding
        # batch scale matrix.  Since a Distribution Tensor supports multiple
        # samples per batch, this means we need to reshape the input matrix `x`
        # so that the first b dimensions are batch dimensions and the last two
        # are of shape [dimension, dimensions*number_of_samples]. Doing these
        # gymnastics allows us to do a batch_solve.
        #
        # After we're done with sqrt_solve (the batch operation) we need to undo
        # this reshaping so what we're left with is a Tensor partitionable by
        # sample, batch, event dimensions.

        # Complexity: O(nbk^2) since transpose must access every element.
        scale_sqrt_inv_x_sqrt = x_sqrt
        perm = array_ops.concat(0, (math_ops.range(sample_ndims, ndims),
                                    math_ops.range(0, sample_ndims)))
        scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)
        shape = array_ops.concat(
            0, (batch_shape,
                (math_ops.cast(self.dimension, dtype=dtypes.int32), -1)))
        scale_sqrt_inv_x_sqrt = array_ops.reshape(scale_sqrt_inv_x_sqrt, shape)

        # Complexity: O(nbM*k) where M is the complexity of the operator solving
        # a vector system.  E.g., for OperatorPDDiag, each solve is O(k), so
        # this complexity is O(nbk^2). For OperatorPDCholesky, each solve is
        # O(k^2) so this step has complexity O(nbk^3).
        scale_sqrt_inv_x_sqrt = self.scale_operator_pd.sqrt_solve(
            scale_sqrt_inv_x_sqrt)

        # Undo make batch-op ready.
        # Complexity: O(nbk^2)
        shape = array_ops.concat(0, (batch_shape, event_shape, sample_shape))
        scale_sqrt_inv_x_sqrt = array_ops.reshape(scale_sqrt_inv_x_sqrt, shape)
        perm = array_ops.concat(0, (math_ops.range(ndims - sample_ndims, ndims),
                                    math_ops.range(0, ndims - sample_ndims)))
        scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)

        # Write V = SS', X = LL'. Then:
        # tr[inv(V) X] = tr[inv(S)' inv(S) L L']
        #              = tr[inv(S) L L' inv(S)']
        #              = tr[(inv(S) L) (inv(S) L)']
        #              = sum_{ik} (inv(S) L)_{ik}^2
        # The second equality follows from the cyclic permutation property.
        # Complexity: O(nbk^2)
        trace_scale_inv_x = math_ops.reduce_sum(
            math_ops.square(scale_sqrt_inv_x_sqrt),
            reduction_indices=[-2, -1])

        # Complexity: O(nbk)
        half_log_det_x = math_ops.reduce_sum(
            math_ops.log(array_ops.batch_matrix_diag_part(x_sqrt)),
            reduction_indices=[-1])

        # Complexity: O(nbk^2)
        log_prob = ((self.df - self.dimension - 1.) * half_log_det_x -
                    0.5 * trace_scale_inv_x -
                    self.log_normalizing_constant())

        # Set shape hints.
        # Try to merge what we know from the input then what we know from the
        # parameters of this distribution.
        if x.get_shape().ndims is not None:
          log_prob.set_shape(x.get_shape()[:-2])
        if (log_prob.get_shape().ndims is not None and
            self.get_batch_shape().ndims is not None and
            self.get_batch_shape().ndims > 0):
          log_prob.get_shape()[-self.get_batch_shape().ndims:].merge_with(
              self.get_batch_shape())

        return log_prob
Exemple #27
0
    def __init__(self,
                 a=0.,
                 b=1.,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="Uniform"):
        """Construct Uniform distributions with `a` and `b`.

    The parameters `a` and `b` must be shaped in a way that supports
    broadcasting (e.g. `b - a` is a valid operation).

    Here are examples without broadcasting:

    ```python
    # Without broadcasting
    u1 = Uniform(3.0, 4.0)  # a single uniform distribution [3, 4]
    u2 = Uniform([1.0, 2.0], [3.0, 4.0])  # 2 distributions [1, 3], [2, 4]
    u3 = Uniform([[1.0, 2.0],
                  [3.0, 4.0]],
                 [[1.5, 2.5],
                  [3.5, 4.5]])  # 4 distributions
    ```

    And with broadcasting:

    ```python
    u1 = Uniform(3.0, [5.0, 6.0, 7.0])  # 3 distributions
    ```

    Args:
      a: Floating point tensor, the minimum endpoint.
      b: Floating point tensor, the maximum endpoint. Must be > `a`.
      validate_args: `Boolean`, default `False`.  Whether to validate input with
        asserts. If `validate_args` is `False`, and the inputs are invalid,
        correct behavior is not guaranteed.
      allow_nan_stats: `Boolean`, default `True`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member.  If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to prefix Ops created by this distribution class.

    Raises:
      InvalidArgumentError: if `a >= b` and `validate_args=False`.
    """
        parameters = locals()
        parameters.pop("self")
        with ops.name_scope(name, values=[a, b]) as ns:
            with ops.control_dependencies([
                    check_ops.assert_less(
                        a, b, message="uniform not defined when a > b.")
            ] if validate_args else []):
                self._a = array_ops.identity(a, name="a")
                self._b = array_ops.identity(b, name="b")
                contrib_tensor_util.assert_same_float_dtype((self._a, self._b))
        super(Uniform, self).__init__(dtype=self._a.dtype,
                                      is_reparameterized=True,
                                      is_continuous=True,
                                      validate_args=validate_args,
                                      allow_nan_stats=allow_nan_stats,
                                      parameters=parameters,
                                      graph_parents=[self._a, self._b],
                                      name=ns)
Exemple #28
0
    def __init__(self,
                 df,
                 scale_operator,
                 input_output_cholesky=False,
                 validate_args=False,
                 allow_nan_stats=True,
                 name=None):
        """Construct Wishart distributions.

    Args:
      df: `float` or `double` tensor, the degrees of freedom of the
        distribution(s). `df` must be greater than or equal to `k`.
      scale_operator: `float` or `double` instance of `LinearOperator`.
      input_output_cholesky: Python `bool`. If `True`, functions whose input or
        output have the semantics of samples assume inputs are in Cholesky form
        and return outputs in Cholesky form. In particular, if this flag is
        `True`, input to `log_prob` is presumed of Cholesky form and output from
        `sample`, `mean`, and `mode` are of Cholesky form.  Setting this
        argument to `True` is purely a computational optimization and does not
        change the underlying distribution; for instance, `mean` returns the
        Cholesky of the mean, not the mean of Cholesky factors. The `variance`
        and `stddev` methods are unaffected by this flag.
        Default value: `False` (i.e., input/output does not have Cholesky
        semantics).
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value "`NaN`" to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      TypeError: if scale is not floating-type
      TypeError: if scale.dtype != df.dtype
      ValueError: if df < k, where scale operator event shape is
        `(k, k)`
    """
        parameters = dict(locals())
        self._input_output_cholesky = input_output_cholesky
        with tf.name_scope(name) as name:
            with tf.name_scope("init", values=[df, scale_operator]):
                if not scale_operator.dtype.is_floating:
                    raise TypeError(
                        "scale_operator.dtype=%s is not a floating-point type"
                        % scale_operator.dtype)
                if not scale_operator.is_square:
                    print(scale_operator.to_dense().eval())
                    raise ValueError("scale_operator must be square.")

                self._scale_operator = scale_operator
                self._df = tf.convert_to_tensor(df,
                                                dtype=scale_operator.dtype,
                                                name="df")
                contrib_tensor_util.assert_same_float_dtype(
                    (self._df, self._scale_operator))
                if (self._scale_operator.shape.ndims is None
                        or self._scale_operator.shape[-1].value is None):
                    self._dimension = tf.cast(
                        self._scale_operator.domain_dimension_tensor(),
                        dtype=self._scale_operator.dtype,
                        name="dimension")
                else:
                    self._dimension = tf.convert_to_tensor(
                        self._scale_operator.shape[-1].value,
                        dtype=self._scale_operator.dtype,
                        name="dimension")
                df_val = tensor_util.constant_value(self._df)
                dim_val = tensor_util.constant_value(self._dimension)
                if df_val is not None and dim_val is not None:
                    df_val = np.asarray(df_val)
                    if not df_val.shape:
                        df_val = [df_val]
                    if any(df_val < dim_val):
                        raise ValueError(
                            "Degrees of freedom (df = %s) cannot be less than "
                            "dimension of scale matrix (scale.dimension = %s)"
                            % (df_val, dim_val))
                elif validate_args:
                    assertions = tf.assert_less_equal(
                        self._dimension,
                        self._df,
                        message=("Degrees of freedom (df = %s) cannot be "
                                 "less than dimension of scale matrix "
                                 "(scale.dimension = %s)" %
                                 (self._dimension, self._df)))
                    self._df = control_flow_ops.with_dependencies([assertions],
                                                                  self._df)
        super(_WishartLinearOperator, self).__init__(
            dtype=self._scale_operator.dtype,
            validate_args=validate_args,
            allow_nan_stats=allow_nan_stats,
            reparameterization_type=tf.distributions.FULLY_REPARAMETERIZED,
            parameters=parameters,
            graph_parents=([self._df, self._dimension] +
                           self._scale_operator.graph_parents),
            name=name)
    def __init__(self,
                 distribution,
                 lower_cutoff=None,
                 upper_cutoff=None,
                 name="QuantizedDistribution"):
        """Construct a Quantized Distribution representing `Y = ceiling(X)`.

    Some properties are inherited from the distribution defining `X`.
    In particular, `validate_args` and `allow_nan_stats` are determined for this
    `QuantizedDistribution` by reading the `distribution`.

    Args:
      distribution:  The base distribution class to transform. Typically an
        instance of `Distribution`.
      lower_cutoff:  `Tensor` with same `dtype` as this distribution and shape
        able to be added to samples.  Should be a whole number.  Default `None`.
        If provided, base distribution's pdf/pmf should be defined at
        `lower_cutoff`.
      upper_cutoff:  `Tensor` with same `dtype` as this distribution and shape
        able to be added to samples.  Should be a whole number.  Default `None`.
        If provided, base distribution's pdf/pmf should be defined at
        `upper_cutoff - 1`.
        `upper_cutoff` must be strictly greater than `lower_cutoff`.
      name: The name for the distribution.

    Raises:
      TypeError: If `dist_cls` is not a subclass of
          `Distribution` or continuous.
      NotImplementedError:  If the base distribution does not implement `cdf`.
    """
        values = (list(distribution.parameters.values()) +
                  [lower_cutoff, upper_cutoff])
        with ops.name_scope(name, values=values):
            self._dist = distribution
            super(QuantizedDistribution,
                  self).__init__(dtype=self._dist.dtype,
                                 parameters={
                                     "distribution": distribution,
                                     "lower_cutoff": lower_cutoff,
                                     "upper_cutoff": upper_cutoff,
                                 },
                                 is_continuous=False,
                                 is_reparameterized=False,
                                 validate_args=self._dist.validate_args,
                                 allow_nan_stats=self._dist.allow_nan_stats,
                                 name=name)

            if lower_cutoff is not None:
                lower_cutoff = ops.convert_to_tensor(lower_cutoff,
                                                     name="lower_cutoff")
            if upper_cutoff is not None:
                upper_cutoff = ops.convert_to_tensor(upper_cutoff,
                                                     name="upper_cutoff")
            contrib_tensor_util.assert_same_float_dtype(
                tensors=[self.distribution, lower_cutoff, upper_cutoff])

            checks = []
            if lower_cutoff is not None and upper_cutoff is not None:
                message = "lower_cutoff must be strictly less than upper_cutoff."
                checks.append(
                    check_ops.assert_less(lower_cutoff,
                                          upper_cutoff,
                                          message=message))

            with ops.control_dependencies(
                    checks if self.validate_args else []):
                if lower_cutoff is not None:
                    self._lower_cutoff = self._check_integer(lower_cutoff)
                else:
                    self._lower_cutoff = None
                if upper_cutoff is not None:
                    self._upper_cutoff = self._check_integer(upper_cutoff)
                else:
                    self._upper_cutoff = None
Exemple #30
0
    def __init__(self,
                 distribution,
                 lower_cutoff=None,
                 upper_cutoff=None,
                 validate_args=False,
                 name="QuantizedDistribution"):
        """Construct a Quantized Distribution representing `Y = ceiling(X)`.

    Some properties are inherited from the distribution defining `X`. Example:
    `allow_nan_stats` is determined for this `QuantizedDistribution` by reading
    the `distribution`.

    Args:
      distribution:  The base distribution class to transform. Typically an
        instance of `Distribution`.
      lower_cutoff:  `Tensor` with same `dtype` as this distribution and shape
        able to be added to samples.  Should be a whole number.  Default `None`.
        If provided, base distribution's `prob` should be defined at
        `lower_cutoff`.
      upper_cutoff:  `Tensor` with same `dtype` as this distribution and shape
        able to be added to samples.  Should be a whole number.  Default `None`.
        If provided, base distribution's `prob` should be defined at
        `upper_cutoff - 1`.
        `upper_cutoff` must be strictly greater than `lower_cutoff`.
      validate_args: Python boolean.  Whether to validate input with asserts.
        If `validate_args` is `False`, and the inputs are invalid,
        correct behavior is not guaranteed.
      name: The name for the distribution.

    Raises:
      TypeError: If `dist_cls` is not a subclass of
          `Distribution` or continuous.
      NotImplementedError:  If the base distribution does not implement `cdf`.
    """
        parameters = locals()
        parameters.pop("self")
        values = (list(distribution.parameters.values()) +
                  [lower_cutoff, upper_cutoff])
        with ops.name_scope(name, values=values) as ns:
            self._dist = distribution

            if lower_cutoff is not None:
                lower_cutoff = ops.convert_to_tensor(lower_cutoff,
                                                     name="lower_cutoff")
            if upper_cutoff is not None:
                upper_cutoff = ops.convert_to_tensor(upper_cutoff,
                                                     name="upper_cutoff")
            contrib_tensor_util.assert_same_float_dtype(
                tensors=[self.distribution, lower_cutoff, upper_cutoff])

            # We let QuantizedDistribution access _graph_parents since this class is
            # more like a baseclass.
            graph_parents = self._dist._graph_parents  # pylint: disable=protected-access

            checks = []
            if lower_cutoff is not None and upper_cutoff is not None:
                message = "lower_cutoff must be strictly less than upper_cutoff."
                checks.append(
                    check_ops.assert_less(lower_cutoff,
                                          upper_cutoff,
                                          message=message))
            self._validate_args = validate_args  # self._check_integer uses this.
            with ops.control_dependencies(checks if validate_args else []):
                if lower_cutoff is not None:
                    self._lower_cutoff = self._check_integer(lower_cutoff)
                    graph_parents += [self._lower_cutoff]
                else:
                    self._lower_cutoff = None
                if upper_cutoff is not None:
                    self._upper_cutoff = self._check_integer(upper_cutoff)
                    graph_parents += [self._upper_cutoff]
                else:
                    self._upper_cutoff = None

        super(QuantizedDistribution, self).__init__(
            dtype=self._dist.dtype,
            is_continuous=False,
            reparameterization_type=distributions.NOT_REPARAMETERIZED,
            validate_args=validate_args,
            allow_nan_stats=self._dist.allow_nan_stats,
            parameters=parameters,
            graph_parents=graph_parents,
            name=ns)