Пример #1
0
 def _mode(self):
     mode = (self.a - 1.0) / (self.a_b_sum - 2.0)
     if self.allow_nan_stats:
         nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
         return math_ops.select(
             math_ops.logical_and(math_ops.greater(self.a, 1.0), math_ops.greater(self.b, 1.0)),
             mode,
             array_ops.fill(self.batch_shape(), nan, name="nan"),
         )
     else:
         return control_flow_ops.with_dependencies(
             [
                 check_ops.assert_less(
                     array_ops.ones((), dtype=self.dtype),
                     self.a,
                     message="Mode not defined for components of a <= 1.",
                 ),
                 check_ops.assert_less(
                     array_ops.ones((), dtype=self.dtype),
                     self.b,
                     message="Mode not defined for components of b <= 1.",
                 ),
             ],
             mode,
         )
Пример #2
0
  def mode(self, name="mode"):
    """Mode of the distribution.

    Note that the mode for the Beta distribution is only defined
    when `a > 1`, `b > 1`. This returns the mode when `a > 1` and `b > 1`,
    and NaN otherwise. If `self.allow_nan_stats` is `False`, an exception
    will be raised rather than returning `NaN`.

    Args:
      name: The name for this op.

    Returns:
      Mode of the Beta distribution.
    """
    with ops.name_scope(self.name):
      with ops.op_scope([self._a, self._b, self._a_b_sum], name):
        a = self._a
        b = self._b
        a_b_sum = self._a_b_sum
        one = constant_op.constant(1, self.dtype)
        mode = (a - 1)/ (a_b_sum - 2)

        if self.allow_nan_stats:
          return math_ops.select(
              math_ops.logical_and(
                  math_ops.greater(a, 1), math_ops.greater(b, 1)),
              mode,
              (constant_op.constant(float("NaN"), dtype=self.dtype) *
               array_ops.ones_like(a_b_sum, dtype=self.dtype)))
        else:
          return control_flow_ops.with_dependencies([
              check_ops.assert_less(one, a),
              check_ops.assert_less(one, b)], mode)
Пример #3
0
def _verify_input(tensor_list, labels, probs_list):
  """Verify that batched inputs are well-formed."""
  checked_probs_list = []
  for probs in probs_list:
    # Since number of classes shouldn't change at runtime, probabilities shape
    # should be fully defined.
    probs.get_shape().assert_is_fully_defined()

    # Probabilities must be 1D.
    probs.get_shape().assert_has_rank(1)

    # Probabilities must be nonnegative and sum to one.
    tol = 1e-6
    prob_sum = math_ops.reduce_sum(probs)
    checked_probs = control_flow_ops.with_dependencies([
        check_ops.assert_non_negative(probs),
        check_ops.assert_less(prob_sum, 1.0 + tol),
        check_ops.assert_less(1.0 - tol, prob_sum)
    ], probs)
    checked_probs_list.append(checked_probs)

  # All probabilities should be the same length.
  prob_length = checked_probs_list[0].get_shape().num_elements()
  for checked_prob in checked_probs_list:
    if checked_prob.get_shape().num_elements() != prob_length:
      raise ValueError('Probability parameters must have the same length.')

  # Labels tensor should only have batch dimension.
  labels.get_shape().assert_has_rank(1)

  for tensor in tensor_list:
    # Data tensor should have a batch dimension.
    shape = tensor.get_shape().with_rank_at_least(1)

    # Data and label batch dimensions must be compatible.
    tensor_shape.dimension_at_index(shape, 0).assert_is_compatible_with(
        labels.get_shape()[0])

  # Data and labels must have the same, strictly positive batch size. Since we
  # can't assume we know the batch size at graph creation, add runtime checks.
  labels_batch_size = array_ops.shape(labels)[0]
  lbl_assert = check_ops.assert_positive(labels_batch_size)

  # Make each tensor depend on its own checks.
  labels = control_flow_ops.with_dependencies([lbl_assert], labels)
  tensor_list = [
      control_flow_ops.with_dependencies([
          lbl_assert,
          check_ops.assert_equal(array_ops.shape(x)[0], labels_batch_size)
      ], x) for x in tensor_list
  ]

  # Label's classes must be integers 0 <= x < num_classes.
  labels = control_flow_ops.with_dependencies([
      check_ops.assert_integer(labels), check_ops.assert_non_negative(labels),
      check_ops.assert_less(labels, math_ops.cast(prob_length, labels.dtype))
  ], labels)

  return tensor_list, labels, checked_probs_list
Пример #4
0
 def test_doesnt_raise_when_less_and_broadcastable_shapes(self):
   with self.test_session():
     small = constant_op.constant([1], name="small")
     big = constant_op.constant([3, 2], name="big")
     with ops.control_dependencies([check_ops.assert_less(small, big)]):
       out = array_ops.identity(small)
     out.eval()
Пример #5
0
 def _entropy(self):
   probs = self._probs
   if self.validate_args:
     probs = control_flow_ops.with_dependencies(
         [check_ops.assert_less(
             probs,
             constant_op.constant(1., probs.dtype),
             message="Entropy is undefined when logits = inf or probs = 1.")],
         probs)
   # Claim: entropy(p) = softplus(s)/p - s
   # where s=logits and p=probs.
   #
   # Proof:
   #
   # entropy(p)
   # := -[(1-p)log(1-p) + plog(p)]/p
   # = -[log(1-p) + plog(p/(1-p))]/p
   # = -[-softplus(s) + ps]/p
   # = softplus(s)/p - s
   #
   # since,
   # log[1-sigmoid(s)]
   # = log[1/(1+exp(s)]
   # = -log[1+exp(s)]
   # = -softplus(s)
   #
   # using the fact that,
   # 1-sigmoid(s) = sigmoid(-s) = 1/(1+exp(s))
   return nn.softplus(self.logits) / probs - self.logits
Пример #6
0
    def _variance(self):
        var = self._ones() * math_ops.square(self.sigma) * self.df / (self.df - 2)
        # When 1 < df <= 2, variance is infinite.
        inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())
        result_where_defined = math_ops.select(
            math_ops.greater(self.df, array_ops.fill(self.batch_shape(), 2.0)),
            var,
            array_ops.fill(self.batch_shape(), inf, name="inf"),
        )

        if self.allow_nan_stats:
            nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
            return math_ops.select(
                math_ops.greater(self.df, self._ones()),
                result_where_defined,
                array_ops.fill(self.batch_shape(), nan, name="nan"),
            )
        else:
            return control_flow_ops.with_dependencies(
                [
                    check_ops.assert_less(
                        array_ops.ones((), dtype=self.dtype),
                        self.df,
                        message="variance not defined for components of df <= 1",
                    )
                ],
                result_where_defined,
            )
Пример #7
0
    def variance(self, name="variance"):
        """Variance of each batch member.

    Variance for inverse gamma is defined only for `alpha > 2`. If
    `self.strict_statistics` is `True`, an exception will be raised rather
    than returning `NaN`.

    Args:
      name: A name to give this op.

    Returns:
      The variance for every batch member, a `Tensor` with same `dtype` as self.
    """
        alpha = self._alpha
        beta = self._beta
        with ops.name_scope(self.name):
            with ops.op_scope([alpha, beta], name):
                var_if_defined = math_ops.square(self._beta) / (
                    math_ops.square(self._alpha - 1.0) * (self._alpha - 2.0)
                )
                if self.strict_statistics:
                    two = ops.convert_to_tensor(2.0, dtype=self.dtype)
                    return control_flow_ops.with_dependencies([check_ops.assert_less(two, alpha)], var_if_defined)
                else:
                    alpha_gt_2 = alpha > 2.0
                    nan = np.nan * self._ones()
                    return math_ops.select(alpha_gt_2, var_if_defined, nan)
Пример #8
0
  def variance(self, name="variance"):
    """Variance of each batch member.

    Variance for inverse gamma is defined only for `alpha > 2`. If
    `self.allow_nan_stats` is `False`, an exception will be raised rather
    than returning `NaN`.

    Args:
      name: A name to give this op.

    Returns:
      The variance for every batch member, a `Tensor` with same `dtype` as self.
    """
    alpha = self._alpha
    beta = self._beta
    with ops.name_scope(self.name):
      with ops.op_scope([alpha, beta], name):
        var_if_defined = (math_ops.square(self._beta) /
                          (math_ops.square(self._alpha - 1.0) *
                           (self._alpha - 2.0)))
        if self.allow_nan_stats:
          alpha_gt_2 = alpha > 2.0
          nan = np.nan * self._ones()
          return math_ops.select(alpha_gt_2, var_if_defined, nan)
        else:
          two = constant_op.constant(2.0, dtype=self.dtype)
          return control_flow_ops.with_dependencies(
              [check_ops.assert_less(
                  two, alpha,
                  message="variance not defined for components of alpha <= 2")],
              var_if_defined)
Пример #9
0
  def mean(self, name="mean"):
    """Mean of each batch member.

    The mean of an inverse gamma distribution is `beta / (alpha - 1)`,
    when `alpha > 1`, and `NaN` otherwise.  If `self.allow_nan_stats` is
    `False`, an exception will be raised rather than returning `NaN`

    Args:
      name: A name to give this op.

    Returns:
      The mean for every batch member, a `Tensor` with same `dtype` as self.
    """
    alpha = self._alpha
    beta = self._beta
    with ops.name_scope(self.name):
      with ops.op_scope([alpha, beta], name):
        mean_if_defined = beta / (alpha - 1.0)
        if self.allow_nan_stats:
          alpha_gt_1 = alpha > 1.0
          nan = np.nan * self._ones()
          return math_ops.select(alpha_gt_1, mean_if_defined, nan)
        else:
          one = constant_op.constant(1.0, dtype=self.dtype)
          return control_flow_ops.with_dependencies(
              [check_ops.assert_less(
                  one, alpha,
                  message="mean not defined for components of alpha <= 1")],
              mean_if_defined)
Пример #10
0
  def mode(self, name="mode"):
    """Mode of each batch member.

    The mode of a gamma distribution is `(alpha - 1) / beta` when `alpha > 1`,
    and `NaN` otherwise.  If `self.strict_statistics` is `True`, an exception
    will be raised rather than returning `NaN`.

    Args:
      name:  A name to give this op.

    Returns:
      The mode for every batch member, a `Tensor` with same `dtype` as self.
    """
    alpha = self._alpha
    beta = self._beta
    with ops.name_scope(self.name):
      with ops.op_scope([alpha, beta], name):
        mode_if_defined = (alpha - 1.0) / beta
        if self.strict_statistics:
          one = ops.convert_to_tensor(1.0, dtype=self.dtype)
          return control_flow_ops.with_dependencies(
              [check_ops.assert_less(one, alpha)], mode_if_defined)
        else:
          alpha_ge_1 = alpha >= 1.0
          nan = np.nan * self._ones()
          return math_ops.select(alpha_ge_1, mode_if_defined, nan)
Пример #11
0
  def mode(self, name="mode"):
    """Mode of each batch member.

    The mode of a gamma distribution is `(alpha - 1) / beta` when `alpha > 1`,
    and `NaN` otherwise.  If `self.allow_nan_stats` is `False`, an exception
    will be raised rather than returning `NaN`.

    Args:
      name:  A name to give this op.

    Returns:
      The mode for every batch member, a `Tensor` with same `dtype` as self.
    """
    alpha = self._alpha
    beta = self._beta
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=[alpha, beta]):
        mode_if_defined = (alpha - 1.0) / beta
        if self.allow_nan_stats:
          alpha_ge_1 = alpha >= 1.0
          nan = np.nan * self._ones()
          return math_ops.select(alpha_ge_1, mode_if_defined, nan)
        else:
          one = constant_op.constant(1.0, dtype=self.dtype)
          return control_flow_ops.with_dependencies(
              [check_ops.assert_less(
                  one, alpha,
                  message="mode not defined for components of alpha <= 1"
              )], mode_if_defined)
Пример #12
0
 def test_doesnt_raise_when_both_empty(self):
   with self.test_session():
     larry = constant_op.constant([])
     curly = constant_op.constant([])
     with ops.control_dependencies([check_ops.assert_less(larry, curly)]):
       out = array_ops.identity(larry)
     out.eval()
Пример #13
0
  def mean(self, name="mean"):
    """Mean of the distribution.

    The mean of Student's T equals `mu` if `df > 1`, otherwise it is `NaN`.  If
    `self.allow_nan_stats=False`, then an exception will be raised rather than
    returning `NaN`.

    Args:
      name:  A name to give this op.

    Returns:
      The mean for every batch member, a `Tensor` with same `dtype` as self.
    """
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=[self._mu]):
        result_if_defined = self._mu * self._ones()
        if self.allow_nan_stats:
          df_gt_1 = self._df > self._ones()
          nan = np.nan + self._zeros()
          return math_ops.select(df_gt_1, result_if_defined, nan)
        else:
          one = constant_op.constant(1.0, dtype=self.dtype)
          return control_flow_ops.with_dependencies(
              [check_ops.assert_less(
                  one, self._df,
                  message="mean not defined for components of df <= 1"
              )], result_if_defined)
Пример #14
0
  def _variance(self):
    # We need to put the tf.where inside the outer tf.where to ensure we never
    # hit a NaN in the gradient.
    denom = array_ops.where(math_ops.greater(self.df, 2.),
                            self.df - 2.,
                            array_ops.ones_like(self.df))
    # Abs(scale) superfluous.
    var = (array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) *
           math_ops.square(self.scale) * self.df / denom)
    # When 1 < df <= 2, variance is infinite.
    inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())
    result_where_defined = array_ops.where(
        self.df > array_ops.fill(self.batch_shape_tensor(), 2.),
        var,
        array_ops.fill(self.batch_shape_tensor(), inf, name="inf"))

    if self.allow_nan_stats:
      nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
      return array_ops.where(
          math_ops.greater(
              self.df,
              array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)),
          result_where_defined,
          array_ops.fill(self.batch_shape_tensor(), nan, name="nan"))
    else:
      return control_flow_ops.with_dependencies(
          [
              check_ops.assert_less(
                  array_ops.ones([], dtype=self.dtype),
                  self.df,
                  message="variance not defined for components of df <= 1"),
          ],
          result_where_defined)
Пример #15
0
 def test_raises_when_greater(self):
   small = constant_op.constant([1, 2], name="small")
   big = constant_op.constant([3, 4], name="big")
   with self.assertRaisesOpError("x < y did not hold"):
     with ops.control_dependencies([check_ops.assert_less(big, small)]):
       out = array_ops.identity(small)
     self.evaluate(out)
Пример #16
0
  def mode(self, name="mode"):
    """Mode of the distribution.

    Note that the mode for the Beta distribution is only defined
    when `alpha > 1`. This returns the mode when `alpha > 1`,
    and NaN otherwise. If `self.allow_nan_stats` is `False`, an exception
    will be raised rather than returning `NaN`.

    Args:
      name: The name for this op.

    Returns:
      Mode of the Dirichlet distribution.
    """
    with ops.name_scope(self.name):
      with ops.op_scope([self._alpha, self._alpha_0], name):
        one = constant_op.constant(1, self.dtype)
        mode = (self._alpha - 1)/ (
            array_ops.expand_dims(self._alpha_0, -1) - math_ops.cast(
                self.event_shape()[0], self.dtype))

        if self.allow_nan_stats:
          return math_ops.select(
              math_ops.greater(self._alpha, 1),
              mode,
              (constant_op.constant(float("NaN"), dtype=self.dtype) *
               array_ops.ones_like(self._alpha, dtype=self.dtype)))
        else:
          return control_flow_ops.with_dependencies([
              check_ops.assert_less(
                  one, self._alpha,
                  message="mode not defined for components of alpha <= 1")
          ], mode)
Пример #17
0
  def __init__(self,
               a=0.0,
               b=1.0,
               validate_args=True,
               allow_nan_stats=False,
               name="Uniform"):
    """Construct Uniform distributions with `a` and `b`.

    The parameters `a` and `b` must be shaped in a way that supports
    broadcasting (e.g. `b - a` is a valid operation).

    Here are examples without broadcasting:

    ```python
    # Without broadcasting
    u1 = Uniform(3.0, 4.0)  # a single uniform distribution [3, 4]
    u2 = Uniform([1.0, 2.0], [3.0, 4.0])  # 2 distributions [1, 3], [2, 4]
    u3 = Uniform([[1.0, 2.0],
                  [3.0, 4.0]],
                 [[1.5, 2.5],
                  [3.5, 4.5]])  # 4 distributions
    ```

    And with broadcasting:

    ```python
    u1 = Uniform(3.0, [5.0, 6.0, 7.0])  # 3 distributions
    ```

    Args:
      a: Floating point tensor, the minimum endpoint.
      b: Floating point tensor, the maximum endpoint. Must be > `a`.
      validate_args: Whether to assert that `a > b`. If `validate_args` is
        `False` and inputs are invalid, correct behavior is not guaranteed.
      allow_nan_stats:  Boolean, default `False`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member.  If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to prefix Ops created by this distribution class.

    Raises:
      InvalidArgumentError: if `a >= b` and `validate_args=True`.
    """
    self._allow_nan_stats = allow_nan_stats
    self._validate_args = validate_args
    with ops.name_scope(name, values=[a, b]):
      with ops.control_dependencies([check_ops.assert_less(
          a, b, message="uniform not defined when a > b.")] if validate_args
                                    else []):
        a = array_ops.identity(a, name="a")
        b = array_ops.identity(b, name="b")

    self._a = a
    self._b = b
    self._name = name
    self._batch_shape = common_shapes.broadcast_shape(
        self._a.get_shape(), self._b.get_shape())
    self._event_shape = tensor_shape.TensorShape([])

    contrib_tensor_util.assert_same_float_dtype((a, b))
Пример #18
0
 def test_raises_when_less_but_non_broadcastable_shapes(self):
   with self.test_session():
     small = constant_op.constant([1, 1, 1], name="small")
     big = constant_op.constant([3, 2], name="big")
     with self.assertRaisesRegexp(ValueError, "must be"):
       with ops.control_dependencies([check_ops.assert_less(small, big)]):
         out = array_ops.identity(small)
       out.eval()
Пример #19
0
 def _check_x(self, x):
   """Check x for proper shape, values, then return tensor version."""
   x = ops.convert_to_tensor(x, name="x_before_deps")
   dependencies = [
       check_ops.assert_positive(x),
       check_ops.assert_less(x, constant_op.constant(
           1, self.dtype))] if self.validate_args else []
   return control_flow_ops.with_dependencies(dependencies, x)
Пример #20
0
 def test_raises_when_equal(self):
   small = constant_op.constant([1, 2], name="small")
   with self.assertRaisesOpError("failure message.*\n*.* x < y did not hold"):
     with ops.control_dependencies(
         [check_ops.assert_less(
             small, small, message="failure message")]):
       out = array_ops.identity(small)
     self.evaluate(out)
Пример #21
0
def _assert_range(labels, n_classes):
  assert_less = check_ops.assert_less(
      labels,
      ops.convert_to_tensor(n_classes, dtype=labels.dtype),
      message='Label IDs must < n_classes')
  assert_greater = check_ops.assert_non_negative(
      labels, message='Label IDs must >= 0')
  with ops.control_dependencies((assert_less, assert_greater)):
    return array_ops.identity(labels)
Пример #22
0
 def test_raises_when_equal(self):
   with self.test_session():
     small = constant_op.constant([1, 2], name="small")
     with ops.control_dependencies(
         [check_ops.assert_less(
             small, small, message="fail")]):
       out = array_ops.identity(small)
     with self.assertRaisesOpError("fail.*small.*small"):
       out.eval()
Пример #23
0
 def _check_x(self, x):
   """Check x for proper shape, values, then return tensor version."""
   x = ops.convert_to_tensor(x, name="x_before_deps")
   candidate_one = math_ops.reduce_sum(x, reduction_indices=[-1])
   one = constant_op.constant(1., self.dtype)
   dependencies = [check_ops.assert_positive(x), check_ops.assert_less(
       x, one, message="x has components greater than or equal to 1"),
                   distribution_util.assert_close(one, candidate_one)
                  ] if self.validate_args else []
   return control_flow_ops.with_dependencies(dependencies, x)
Пример #24
0
 def _mode(self):
   mode = (self.concentration1 - 1.) / (self.total_concentration - 2.)
   if self.allow_nan_stats:
     nan = array_ops.fill(
         self.batch_shape_tensor(),
         np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
         name="nan")
     is_defined = math_ops.logical_and(self.concentration1 > 1.,
                                       self.concentration0 > 1.)
     return array_ops.where(is_defined, mode, nan)
   return control_flow_ops.with_dependencies([
       check_ops.assert_less(
           array_ops.ones([], dtype=self.dtype),
           self.concentration1,
           message="Mode undefined for concentration1 <= 1."),
       check_ops.assert_less(
           array_ops.ones([], dtype=self.dtype),
           self.concentration0,
           message="Mode undefined for concentration0 <= 1.")
   ], mode)
Пример #25
0
 def _check_x(self, x):
     """Check x for proper shape, values, then return tensor version."""
     x = ops.convert_to_tensor(x, name="x_before_deps")
     candidate_one = math_ops.reduce_sum(x, reduction_indices=[-1])
     one = constant_op.constant(1.0, self.dtype)
     dependencies = (
         [check_ops.assert_positive(x), check_ops.assert_less(x, one), _assert_close(one, candidate_one)]
         if self.validate_args
         else []
     )
     return control_flow_ops.with_dependencies(dependencies, x)
Пример #26
0
 def _maybe_assert_valid_sample(self, x):
   """Checks the validity of a sample."""
   if not self.validate_args:
     return x
   return control_flow_ops.with_dependencies([
       check_ops.assert_positive(x, message="sample must be positive"),
       check_ops.assert_less(
           x,
           array_ops.ones([], self.dtype),
           message="sample must be less than `1`."),
   ], x)
Пример #27
0
 def _assert_valid_sample(self, x):
   """Check x for proper shape, values, then return tensor version."""
   if not self.validate_args: return x
   return control_flow_ops.with_dependencies([
       check_ops.assert_positive(
           x,
           message="Negative events lie outside Beta distribution support."),
       check_ops.assert_less(
           x, array_ops.ones((), self.dtype),
           message="Event>=1 lies outside Beta distribution support."),
   ], x)
Пример #28
0
  def __init__(
      self, a=0.0, b=1.0, strict=True, strict_statistics=True, name="Uniform"):
    """Construct Uniform distributions with `a` and `b`.

    The parameters `a` and `b` must be shaped in a way that supports
    broadcasting (e.g. `b - a` is a valid operation).

    Here are examples without broadcasting:

    ```python
    # Without broadcasting
    u1 = Uniform(3.0, 4.0)  # a single uniform distribution [3, 4]
    u2 = Uniform([1.0, 2.0], [3.0, 4.0])  # 2 distributions [1, 3], [2, 4]
    u3 = Uniform([[1.0, 2.0],
                  [3.0, 4.0]],
                 [[1.5, 2.5],
                  [3.5, 4.5]])  # 4 distributions
    ```

    And with broadcasting:

    ```python
    u1 = Uniform(3.0, [5.0, 6.0, 7.0])  # 3 distributions
    ```

    Args:
      a: `float` or `double` tensor, the minimum endpoint.
      b: `float` or `double` tensor, the maximum endpoint. Must be > `a`.
      strict: Whether to assert that `a > b`. If `strict` is False and inputs
        are invalid, correct behavior is not guaranteed.
      strict_statistics:  Boolean, default True.  If True, raise an exception if
        a statistic (e.g. mean/mode/etc...) is undefined for any batch member.
        If False, batch members with valid parameters leading to undefined
        statistics will return NaN for this statistic.
      name: The name to prefix Ops created by this distribution class.

    Raises:
      InvalidArgumentError: if `a >= b` and `strict=True`.
    """
    self._strict_statistics = strict_statistics
    self._strict = strict
    with ops.op_scope([a, b], name):
      with ops.control_dependencies(
          [check_ops.assert_less(a, b)] if strict else []):
        a = array_ops.identity(a, name="a")
        b = array_ops.identity(b, name="b")

    self._a = a
    self._b = b
    self._name = name
    self._batch_shape = self._ones().get_shape()
    self._event_shape = tensor_shape.TensorShape([])

    contrib_tensor_util.assert_same_float_dtype((a, b))
Пример #29
0
 def _mode(self):
   a = self.concentration1
   b = self.concentration0
   mode = ((a - 1) / (a * b - 1))**(1. / a)
   if self.allow_nan_stats:
     nan = array_ops.fill(
         self.batch_shape_tensor(),
         np.array(np.nan, dtype=self.dtype.as_numpy_dtype),
         name="nan")
     is_defined = (self.concentration1 > 1.) & (self.concentration0 > 1.)
     return array_ops.where(is_defined, mode, nan)
   return control_flow_ops.with_dependencies([
       check_ops.assert_less(
           array_ops.ones([], dtype=self.dtype),
           self.concentration1,
           message="Mode undefined for concentration1 <= 1."),
       check_ops.assert_less(
           array_ops.ones([], dtype=self.dtype),
           self.concentration0,
           message="Mode undefined for concentration0 <= 1.")
   ], mode)
Пример #30
0
def _FakeQuantWithMinMaxVars(inputs, min_var, max_var, per_channel, num_bits,
                             narrow_range):
  """Adds a fake quantization operation.

  Depending on value of per_channel, this operation may do global quantization
  or per channel quantization.  min_var and max_var should have corresponding
  shapes: [1] when per_channel == False and [d] when per_channel == True.

  Args:
    inputs: a tensor containing values to be quantized.
    min_var: a variable containing quantization range lower end(s).
    max_var: a variable containing quantization range lupper end(s).
    per_channel: a boolean specifying whether to use per-channel quantizatioh.
    num_bits: Number of bits to use for quantization, must be between 2 and 8.
    narrow_range: Whether to use the narrow quantization range
      [1; 2^num_bits - 1] or wide range [0; 2^num_bits - 1].
  Returns:
    a tensor containing quantized values.
  """

  if per_channel:
    assert len(min_var.get_shape()) == 1
    assert len(max_var.get_shape()) == 1
    with ops.control_dependencies([check_ops.assert_less(min_var, max_var)]):
      return array_ops.fake_quant_with_min_max_vars_per_channel(
          inputs,
          min_var,
          max_var,
          num_bits=num_bits,
          narrow_range=narrow_range)
  else:
    assert min_var.get_shape() == []  # pylint: disable=g-explicit-bool-comparison
    assert max_var.get_shape() == []  # pylint: disable=g-explicit-bool-comparison
    with ops.control_dependencies([check_ops.assert_less(min_var, max_var)]):
      return array_ops.fake_quant_with_min_max_vars(
          inputs,
          min_var,
          max_var,
          num_bits=num_bits,
          narrow_range=narrow_range)
Пример #31
0
 def _maybe_assert_valid_concentration(self, concentration, validate_args):
     """Checks the validity of the concentration parameter."""
     if not validate_args:
         return concentration
     return control_flow_ops.with_dependencies([
         check_ops.assert_positive(
             concentration,
             message="Concentration parameter must be positive."),
         check_ops.assert_rank_at_least(
             concentration,
             1,
             message="Concentration parameter must have >=1 dimensions."),
         check_ops.assert_less(
             1,
             array_ops.shape(concentration)[-1],
             message="Concentration parameter must have event_size >= 2."),
     ], concentration)
Пример #32
0
 def _variance(self):
     var = (math_ops.square(self.rate) /
            math_ops.square(self.concentration - 1.) /
            (self.concentration - 2.))
     if self.allow_nan_stats:
         nan = array_ops.fill(self.batch_shape_tensor(),
                              np.array(np.nan,
                                       dtype=self.dtype.as_numpy_dtype()),
                              name="nan")
         return array_ops.where(self.concentration > 2., var, nan)
     else:
         return control_flow_ops.with_dependencies([
             check_ops.assert_less(
                 constant_op.constant(2., dtype=self.dtype),
                 self.concentration,
                 message="variance undefined when any concentration <= 2"),
         ], var)
Пример #33
0
 def _mode(self):
   k = math_ops.cast(self.event_shape_tensor()[0], self.dtype)
   mode = (self.concentration - 1.) / (
       self.total_concentration[..., array_ops.newaxis] - k)
   if self.allow_nan_stats:
     nan = array_ops.fill(
         array_ops.shape(mode),
         np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
         name="nan")
     return array_ops.where_v2(
         math_ops.reduce_all(self.concentration > 1., axis=-1), mode, nan)
   return control_flow_ops.with_dependencies([
       check_ops.assert_less(
           array_ops.ones([], self.dtype),
           self.concentration,
           message="Mode undefined when any concentration <= 1"),
   ], mode)
Пример #34
0
 def _mode(self):
   mode = ((self.alpha - 1.) /
           (array_ops.expand_dims(self.alpha_sum, dim=-1) -
            math_ops.cast(self.event_shape()[0], self.dtype)))
   if self.allow_nan_stats:
     nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
     shape = array_ops.concat(0, (self.batch_shape(), self.event_shape()))
     return array_ops.where(
         math_ops.greater(self.alpha, 1.),
         mode,
         array_ops.fill(shape, nan, name="nan"))
   else:
     return control_flow_ops.with_dependencies([
         check_ops.assert_less(
             array_ops.ones((), dtype=self.dtype), self.alpha,
             message="mode not defined for components of alpha <= 1")
     ], mode)
Пример #35
0
 def _assert_non_singular(self):
   """Private default implementation of _assert_non_singular."""
   logging.warn(
       "Using (possibly slow) default implementation of assert_non_singular."
       "  Requires conversion to a dense matrix and O(N^3) operations.")
   if self._can_use_cholesky():
     return self.assert_positive_definite()
   else:
     singular_values = linalg_ops.svd(
         self._get_cached_dense_matrix(), compute_uv=False)
     # TODO(langmore) Add .eig and .cond as methods.
     cond = (math_ops.reduce_max(singular_values, axis=-1) /
             math_ops.reduce_min(singular_values, axis=-1))
     return check_ops.assert_less(
         cond,
         self._max_condition_number_to_be_non_singular(),
         message="Singular matrix up to precision epsilon.")
Пример #36
0
def _assert_close(expected, actual, rtol=1e-04, name='assert_close'):
  with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
    expected = ops.convert_to_tensor(expected, name='expected')
    actual = ops.convert_to_tensor(actual, name='actual')
    rdiff = math_ops.abs(expected - actual, 'diff') / math_ops.abs(expected)
    rtol = ops.convert_to_tensor(rtol, name='rtol')
    return check_ops.assert_less(
        rdiff,
        rtol,
        data=(
            'Condition expected =~ actual did not hold element-wise:'
            'expected = ', expected,
            'actual = ', actual,
            'rdiff = ', rdiff,
            'rtol = ', rtol,
        ),
        name=scope)
Пример #37
0
    def __init__(self,
                 low=0.,
                 high=1.,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="Uniform"):
        """Initialize a batch of Uniform distributions.

    Args:
      low: Floating point tensor, lower boundary of the output interval. Must
        have `low < high`.
      high: Floating point tensor, upper boundary of the output interval. Must
        have `low < high`.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value "`NaN`" to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      InvalidArgumentError: if `low >= high` and `validate_args=False`.
    """
        parameters = dict(locals())
        with ops.name_scope(name, values=[low, high]) as name:
            with ops.control_dependencies([
                    check_ops.assert_less(
                        low,
                        high,
                        message="uniform not defined when low >= high.")
            ] if validate_args else []):
                self._low = array_ops.identity(low, name="low")
                self._high = array_ops.identity(high, name="high")
                check_ops.assert_same_float_dtype([self._low, self._high])
        super(Uniform, self).__init__(
            dtype=self._low.dtype,
            reparameterization_type=distribution.FULLY_REPARAMETERIZED,
            validate_args=validate_args,
            allow_nan_stats=allow_nan_stats,
            parameters=parameters,
            graph_parents=[self._low, self._high],
            name=name)
Пример #38
0
def assert_no_entries_with_modulus_zero(
    x, message=None, name="assert_no_entries_with_modulus_zero"):
  """Returns `Op` that asserts Tensor `x` has no entries with modulus zero.

  Args:
    x:  Numeric `Tensor`, real, integer, or complex.
    message:  A string message to prepend to failure message.
    name:  A name to give this `Op`.

  Returns:
    An `Op` that asserts `x` has no entries with modulus zero.
  """
  with ops.name_scope(name, values=[x]):
    x = ops.convert_to_tensor(x, name="x")
    dtype = x.dtype.base_dtype
    should_be_nonzero = math_ops.abs(x)
    zero = ops.convert_to_tensor(0, dtype=dtype.real_dtype)
    return check_ops.assert_less(zero, should_be_nonzero, message=message)
Пример #39
0
 def _mean(self):
     mean = self.loc * array_ops.ones(self.batch_shape_tensor(),
                                      dtype=self.dtype)
     if self.allow_nan_stats:
         nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
         return array_ops.where(
             math_ops.greater(
                 self.df,
                 array_ops.ones(self.batch_shape_tensor(),
                                dtype=self.dtype)), mean,
             array_ops.fill(self.batch_shape_tensor(), nan, name="nan"))
     else:
         return control_flow_ops.with_dependencies([
             check_ops.assert_less(
                 array_ops.ones([], dtype=self.dtype),
                 self.df,
                 message="mean not defined for components of df <= 1"),
         ], mean)
Пример #40
0
def _verify_input(data, labels, probs):
    """Verify that batched inputs are well-formed."""
    # Probabilities must be able to be converted to a 1D non-object numpy array.
    probs = np.asarray(probs)
    if probs.dtype == np.dtype('object'):
        raise ValueError(
            'Probabilities must be able to be converted to a numpy '
            'array.')
    if len(probs.shape) != 1:
        raise ValueError('Probabilities must be 1D.')

    # Probabilities must sum to one.
    # TODO(joelshor): Investigate whether logits should be passed instead of
    # probs.
    if not np.isclose(np.sum(probs), 1.0):
        raise ValueError('Probabilities must sum to one.', np.sum(probs))

    # Labels tensor should only have batch dimension.
    labels.get_shape().assert_has_rank(1)

    # Data tensor should have a batch dimension.
    data_shape = data.get_shape().with_rank_at_least(1)

    # Data and label batch dimensions must be compatible.
    data_shape[0].assert_is_compatible_with(labels.get_shape()[0])

    # Data and labels must have the same, strictly positive batch size. Since we
    # can't assume we know the batch size at graph creation, add runtime checks.
    data_batch_size = array_ops.shape(data)[0]
    labels_batch_size = array_ops.shape(labels)[0]

    data = control_flow_ops.with_dependencies([
        check_ops.assert_positive(data_batch_size),
        check_ops.assert_equal(data_batch_size, labels_batch_size)
    ], data)

    # Label's classes must be integers 0 <= x < num_classes.
    labels = control_flow_ops.with_dependencies([
        check_ops.assert_integer(labels),
        check_ops.assert_non_negative(labels),
        check_ops.assert_less(labels, math_ops.cast(len(probs), labels.dtype))
    ], labels)

    return data, labels, probs
Пример #41
0
    def variance(self, name="variance"):
        """Variance of the distribution.

    Variance for Student's T equals

    ```
    df / (df - 2), when df > 2
    infinity, when 1 < df <= 2
    NaN, when df <= 1
    ```

    The NaN state occurs because mean is undefined for `df <= 1`, and if
    `self.allow_nan_stats` is `False`, an exception will be raised if any batch
    members fall into this state.

    Args:
      name:  A name for this op.

    Returns:
      The variance for every batch member, a `Tensor` with same `dtype` as self.
    """
        with ops.name_scope(self.name):
            with ops.name_scope(name, values=[self._df, self._sigma]):
                result_where_finite = (
                    self._zeros() + math_ops.square(self._sigma) * self._df /
                    (self._df - 2))
                # When 1 < df <= 2, variance is infinite.
                result_where_defined = math_ops.select(
                    self._zeros() + self._df > 2, result_where_finite,
                    self._zeros() + np.inf)

                if self.allow_nan_stats:
                    return math_ops.select((self._zeros() + self._df > 1),
                                           result_where_defined,
                                           self._zeros() + np.nan)
                else:
                    one = constant_op.constant(1.0, dtype=self.dtype)
                    return control_flow_ops.with_dependencies([
                        check_ops.assert_less(
                            one,
                            self._df,
                            message=
                            "variance not defined for components of df <= 1")
                    ], result_where_defined)
Пример #42
0
  def __init__(self, a=0.0, b=1.0, name="Uniform"):
    """Construct Uniform distributions with `a` and `b`.

    The parameters `a` and `b` must be shaped in a way that supports
    broadcasting (e.g. `b - a` is a valid operation).

    Here are examples without broadcasting:

    ```python
    # Without broadcasting
    u1 = Uniform(3.0, 4.0)  # a single uniform distribution [3, 4]
    u2 = Uniform([1.0, 2.0], [3.0, 4.0])  # 2 distributions [1, 3], [2, 4]
    u3 = Uniform([[1.0, 2.0],
                  [3.0, 4.0]],
                 [[1.5, 2.5],
                  [3.5, 4.5]])  # 4 distributions
    ```

    And with broadcasting:

    ```python
    u1 = Uniform(3.0, [5.0, 6.0, 7.0])  # 3 distributions
    ```

    Args:
      a: `float` or `double` tensor, the minimum endpoint.
      b: `float` or `double` tensor, the maximum endpoint. Must be > `a`.
      name: The name to prefix Ops created by this distribution class.

    Raises:
      InvalidArgumentError: if `a >= b`.
    """
    with ops.op_scope([a, b], name):
      with ops.control_dependencies([check_ops.assert_less(a, b)]):
        a = array_ops.identity(a, name="a")
        b = array_ops.identity(b, name="b")

    self._a = a
    self._b = b
    self._name = name
    self._batch_shape = self._ones().get_shape()
    self._event_shape = tensor_shape.TensorShape([])

    contrib_tensor_util.assert_same_float_dtype((a, b))
Пример #43
0
    def assert_hermitian_spectrum(self, name="assert_hermitian_spectrum"):
        """Returns an `Op` that asserts this operator has Hermitian spectrum.

    This operator corresponds to a real-valued matrix if and only if its
    spectrum is Hermitian.

    Args:
      name:  A name to give this `Op`.

    Returns:
      An `Op` that asserts this operator has Hermitian spectrum.
    """
        eps = np.finfo(self.dtype.real_dtype.as_numpy_dtype).eps
        with self._name_scope(name):  # pylint: disable=not-callable
            # Assume linear accumulation of error.
            max_err = eps * self.domain_dimension_tensor()
            imag_convolution_kernel = math_ops.imag(self.convolution_kernel())
            return check_ops.assert_less(math_ops.abs(imag_convolution_kernel),
                                         max_err,
                                         message="Spectrum was not Hermitian")
Пример #44
0
def _assert_close(
    expected, actual, rtol=1e-04, message='', name='assert_close'):
  with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
    expected = ops.convert_to_tensor(expected, name='expected')
    actual = ops.convert_to_tensor(actual, name='actual')
    rdiff = math_ops.abs((expected - actual) / expected, 'diff')
    rtol = ops.convert_to_tensor(rtol, name='rtol')
    return check_ops.assert_less(
        rdiff,
        rtol,
        data=(
            message,
            'Condition expected =~ actual did not hold element-wise:'
            'expected = ', expected,
            'actual = ', actual,
            'rdiff = ', rdiff,
            'rtol = ', rtol,
        ),
        summarize=expected.get_shape().num_elements(),
        name=scope)
Пример #45
0
 def batching_fn(bucket_id, grouped_dataset):
   """Batch elements in dataset."""
   batch_size = batch_sizes[bucket_id]
   none_filler = None
   if pad_to_bucket_boundary:
     err_msg = ("When pad_to_bucket_boundary=True, elements must have "
                "length <= max(bucket_boundaries).")
     check = check_ops.assert_less(
         bucket_id,
         constant_op.constant(len(bucket_batch_sizes) - 1,
                              dtype=dtypes.int64),
         message=err_msg)
     with ops.control_dependencies([check]):
       boundaries = constant_op.constant(bucket_boundaries,
                                         dtype=dtypes.int64)
       bucket_boundary = boundaries[bucket_id]
       none_filler = bucket_boundary
   shapes = make_padded_shapes(
       padded_shapes or grouped_dataset.output_shapes,
       none_filler=none_filler)
   return grouped_dataset.padded_batch(batch_size, shapes, padding_values)
Пример #46
0
    def _variance(self):
        var = (self._ones() * math_ops.square(self.sigma) * self.df /
               (self.df - 2))
        # When 1 < df <= 2, variance is infinite.
        inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())
        result_where_defined = array_ops.where(
            math_ops.greater(self.df, array_ops.fill(self.batch_shape(), 2.)),
            var, array_ops.fill(self.batch_shape(), inf, name="inf"))

        if self.allow_nan_stats:
            nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
            return array_ops.where(
                math_ops.greater(self.df, self._ones()), result_where_defined,
                array_ops.fill(self.batch_shape(), nan, name="nan"))
        else:
            return control_flow_ops.with_dependencies([
                check_ops.assert_less(
                    array_ops.ones((), dtype=self.dtype),
                    self.df,
                    message="variance not defined for components of df <= 1"),
            ], result_where_defined)
Пример #47
0
 def _process_labels(self, labels):
     if labels is None:
         raise ValueError(
             'You must provide a labels Tensor. Given: None. '
             'Suggested troubleshooting steps: Check that your data contain '
             'your label feature. Check that your input_fn properly parses and '
             'returns labels.')
     if isinstance(labels, sparse_tensor.SparseTensor):
         if labels.dtype == dtypes.string:
             label_ids_values = lookup_ops.index_table_from_tensor(
                 vocabulary_list=tuple(self._label_vocabulary),
                 name='class_id_lookup').lookup(labels.values)
             label_ids = sparse_tensor.SparseTensor(
                 indices=labels.indices,
                 values=label_ids_values,
                 dense_shape=labels.dense_shape)
             return math_ops.to_int64(
                 sparse_ops.sparse_to_indicator(label_ids, self._n_classes))
         else:
             err_msg = (
                 r'labels must be an integer SparseTensor with values in '
                 r'[0, {})'.format(self._n_classes))
             assert_int = check_ops.assert_integer(labels.values,
                                                   message=err_msg)
             assert_less = check_ops.assert_less(labels.values,
                                                 ops.convert_to_tensor(
                                                     self._n_classes,
                                                     dtype=labels.dtype),
                                                 message=err_msg)
             assert_greater = check_ops.assert_non_negative(labels.values,
                                                            message=err_msg)
             with ops.control_dependencies(
                 [assert_int, assert_less, assert_greater]):
                 return math_ops.to_int64(
                     sparse_ops.sparse_to_indicator(labels,
                                                    self._n_classes))
     err_msg = (
         r'labels must be an integer indicator Tensor with values in [0, 1]'
     )
     return head_lib._assert_range(labels, 2, message=err_msg)  # pylint:disable=protected-access,
Пример #48
0
 def _label_ids(self, labels):
   """Converts labels to integer id space."""
   if self._label_vocabulary is None:
     if not labels.dtype.is_integer:
       raise ValueError('Labels dtype should be integer '
                        'Instead got %s.' % labels.dtype)
     label_ids = labels
   else:
     if labels.dtype != dtypes.string:
       raise ValueError('Labels dtype should be string if there is a '
                        'vocabulary. Instead got {}'.format(labels.dtype))
     label_ids = lookup_ops.index_table_from_tensor(
         vocabulary_list=tuple(self._label_vocabulary),
         name='class_id_lookup').lookup(labels)
   assert_less = check_ops.assert_less(
       label_ids,
       ops.convert_to_tensor(self._n_classes, dtype=label_ids.dtype),
       message='Label IDs must < n_classes')
   assert_greater = check_ops.assert_non_negative(
       label_ids, message='Label Ids must >= 0')
   with ops.control_dependencies((assert_less, assert_greater)):
     return array_ops.identity(label_ids)
    def __init__(self,
                 low=0.,
                 high=1.,
                 temp=1e2,
                 validate_args=False,
                 allow_nan_stats=True,
                 squared=True,
                 activation=tf.nn.relu,
                 name="SoftUniform"):
        parameters = locals()

        parameters = dict(locals())

        self._activation = activation
        self._squared = squared

        with ops.name_scope(name, values=[low, high]) as name:
            with ops.control_dependencies([
                    check_ops.assert_less(
                        low,
                        high,
                        message="uniform not defined when low >= high.")
            ] if validate_args else []):
                self._low = array_ops.identity(low, name="low")
                self._high = array_ops.identity(high, name="high")
                self._temp = array_ops.identity(temp, name="temp")
                check_ops.assert_same_float_dtype(
                    [self._low, self._high, self._temp])

        super(SoftUniform,
              self).__init__(dtype=self._low.dtype,
                             reparameterization_type=ds.FULLY_REPARAMETERIZED,
                             validate_args=validate_args,
                             allow_nan_stats=allow_nan_stats,
                             parameters=parameters,
                             graph_parents=[self._low, self._high, self._temp],
                             name=name)
Пример #50
0
 def _variance(self):
     variance = (math_ops.square(self.scale) * (self.concentration - 1) /
                 (math_ops.square(self.concentration - 1) *
                  (self.concentration - 2)))
     if self.allow_nan_stats:
         nan = array_ops.fill(self.batch_shape_tensor(),
                              np.array(np.nan,
                                       dtype=self.dtype.as_numpy_dtype()),
                              name="nan")
         inf = array_ops.fill(self.batch_shape_tensor(),
                              np.array(np.inf,
                                       dtype=self.dtype.as_numpy_dtype()),
                              name="inf")
         return array_ops.where(
             self.concentration > 2., variance,
             array_ops.where(self.concentration > 1., inf, nan))
     else:
         return control_flow_ops.with_dependencies([
             check_ops.assert_less(
                 array_ops.ones([], self.dtype),
                 self.concentration,
                 message="variance not defined when any concentration <= 1"
             ),
         ], variance)
Пример #51
0
  def mean(self, name="mean"):
    """Mean of the distribution.

    The mean of Student's T equals `mu` if `df > 1`, otherwise it is `NaN`.  If
    `self.strict_statistics=True`, then an exception will be raised rather than
    returning `NaN`.

    Args:
      name:  A name to give this op.

    Returns:
      The mean for every batch member, a `Tensor` with same `dtype` as self.
    """
    with ops.name_scope(self.name):
      with ops.op_scope([self._mu], name):
        result_if_defined = self._mu * self._ones()
        if self.strict_statistics:
          one = ops.convert_to_tensor(1.0, dtype=self.dtype)
          return control_flow_ops.with_dependencies(
              [check_ops.assert_less(one, self._df)], result_if_defined)
        else:
          df_gt_1 = self._df > self._ones()
          nan = np.nan + self._zeros()
          return math_ops.select(df_gt_1, result_if_defined, nan)
Пример #52
0
def stack_dynamic_partitions(data, partitions, num_partitions, name=None):
  """Stacks dynamic partitions of a Tensor or RaggedTensor.

  Returns a RaggedTensor `output` with `num_partitions` rows, where the row
  `output[i]` is formed by stacking all slices `data[j1...jN]` such that
  `partitions[j1...jN] = i`.  Slices of `data` are stacked in row-major
  order.

  If `num_partitions` is an `int` (not a `Tensor`), then this is equivalent to
  `tf.ragged.stack(tf.dynamic_partition(data, partitions, num_partitions))`.

  #### Example:

  >>> data           = ['a', 'b', 'c', 'd', 'e']
  >>> partitions     = [  3,   0,   2,   2,   3]
  >>> num_partitions = 5
  >>> tf.ragged.stack_dynamic_partitions(data, partitions, num_partitions)
  <tf.RaggedTensor [[b'b'], [], [b'c', b'd'], [b'a', b'e'], []]>

  Args:
    data: A `Tensor` or `RaggedTensor` containing the values to stack.
    partitions: An `int32` or `int64` `Tensor` or `RaggedTensor` specifying the
      partition that each slice of `data` should be added to. `partitions.shape`
      must be a prefix of `data.shape`.  Values must be greater than or equal to
      zero, and less than `num_partitions`. `partitions` is not required to be
      sorted.
    num_partitions: An `int32` or `int64` scalar specifying the number of
      partitions to output.  This determines the number of rows in `output`.
    name: A name prefix for the returned tensor (optional).

  Returns:
    A `RaggedTensor` containing the stacked partitions.  The returned tensor
    has the same dtype as `data`, and its shape is
    `[num_partitions, (D)] + data.shape[partitions.rank:]`, where `(D)` is a
    ragged dimension whose length is the number of data slices stacked for
    each `partition`.
  """
  with ops.name_scope(name, 'SegmentStack', [data, partitions, num_partitions]):
    # Convert inputs to tensors.
    data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data')
    row_splits_dtype = (
        data.row_splits.dtype
        if isinstance(data, ragged_tensor.RaggedTensor) else None)
    partitions = ragged_tensor.convert_to_tensor_or_ragged_tensor(
        partitions, name='partitions', preferred_dtype=row_splits_dtype)
    num_partitions = ops.convert_to_tensor(
        num_partitions, name='num_partitions', preferred_dtype=partitions.dtype)
    if row_splits_dtype is not None:
      partitions = math_ops.cast(partitions, row_splits_dtype)
    num_partitions = math_ops.cast(num_partitions, partitions.dtype)

    # Sanity-checks for shapes.
    partitions_rank = partitions.shape.ndims
    if partitions_rank is None:
      raise ValueError('partitions must have known rank.')
    num_partitions.shape.assert_has_rank(0)
    partitions.shape.assert_is_compatible_with(data.shape[:partitions_rank])

    if partitions_rank == 0:
      # If partitions is a scalar, then just create a RaggedTensor containing
      # that single the complete `data` value in the specified row.
      return ragged_tensor.RaggedTensor.from_value_rowids(
          values=array_ops.stack([data]),
          value_rowids=array_ops.stack([partitions]),
          nrows=num_partitions,
          validate=False)

    elif partitions_rank == 1:
      # If partitions is a vector (the typical case): we can just use data and
      # partitions as the `values` and `value_rowids` for `from_value_rowids`,
      # as long as we sort them first.
      permutation = sort_ops.argsort(partitions, stable=True)
      value_rowids = array_ops.gather(partitions, permutation)
      values = array_ops.gather(data, permutation)
      check = check_ops.assert_less(
          value_rowids[-1:],
          num_partitions,
          message='partitions must be less than num_partitions')
      with ops.control_dependencies([check]):
        return ragged_tensor.RaggedTensor.from_value_rowids(
            values, value_rowids, nrows=num_partitions, validate=False)

    else:
      # Handle higher-dimensional partitions via recursion.
      if not isinstance(data, ragged_tensor.RaggedTensor):
        data = ragged_tensor.RaggedTensor.from_tensor(
            data, row_splits_dtype=partitions.dtype, ragged_rank=1)
      if not isinstance(partitions, ragged_tensor.RaggedTensor):
        partitions = ragged_tensor.RaggedTensor.from_tensor(
            partitions,
            row_splits_dtype=partitions.dtype,
            ragged_rank=max(data.ragged_rank, partitions_rank - 1))
      check = check_ops.assert_equal(
          data.row_splits,
          partitions.row_splits,
          message='data and partitions have incompatible ragged shapes')
      with ops.control_dependencies([check]):
        return stack_dynamic_partitions(data.values, partitions.values,
                                        num_partitions)
Пример #53
0
def _verify_input(tensor_list, labels, probs_list):
    """Verify that batched inputs are well-formed."""
    checked_probs_list = []
    for probs in probs_list:
        # Since number of classes shouldn't change at runtime, probabilities shape
        # should be fully defined.
        probs.get_shape().assert_is_fully_defined()

        # Probabilities must be 1D.
        probs.get_shape().assert_has_rank(1)

        # Probabilities must be nonnegative and sum to one.
        tol = 1e-6
        prob_sum = math_ops.reduce_sum(probs)
        checked_probs = control_flow_ops.with_dependencies([
            check_ops.assert_non_negative(probs),
            check_ops.assert_less(prob_sum, 1.0 + tol),
            check_ops.assert_less(1.0 - tol, prob_sum)
        ], probs)
        checked_probs_list.append(checked_probs)

    # All probabilities should be the same length.
    prob_length = checked_probs_list[0].get_shape().num_elements()
    for checked_prob in checked_probs_list:
        if checked_prob.get_shape().num_elements() != prob_length:
            raise ValueError(
                'Probability parameters must have the same length.')

    # Labels tensor should only have batch dimension.
    labels.get_shape().assert_has_rank(1)

    for tensor in tensor_list:
        # Data tensor should have a batch dimension.
        shape = tensor.get_shape().with_rank_at_least(1)

        # Data and label batch dimensions must be compatible.
        tensor_shape.dimension_at_index(shape, 0).assert_is_compatible_with(
            labels.get_shape()[0])

    # Data and labels must have the same, strictly positive batch size. Since we
    # can't assume we know the batch size at graph creation, add runtime checks.
    labels_batch_size = array_ops.shape(labels)[0]
    lbl_assert = check_ops.assert_positive(labels_batch_size)

    # Make each tensor depend on its own checks.
    labels = control_flow_ops.with_dependencies([lbl_assert], labels)
    tensor_list = [
        control_flow_ops.with_dependencies([
            lbl_assert,
            check_ops.assert_equal(array_ops.shape(x)[0], labels_batch_size)
        ], x) for x in tensor_list
    ]

    # Label's classes must be integers 0 <= x < num_classes.
    labels = control_flow_ops.with_dependencies([
        check_ops.assert_integer(labels),
        check_ops.assert_non_negative(labels),
        check_ops.assert_less(labels, math_ops.cast(prob_length, labels.dtype))
    ], labels)

    return tensor_list, labels, checked_probs_list
Пример #54
0
    def __init__(self,
                 distribution,
                 low=None,
                 high=None,
                 validate_args=False,
                 name="QuantizedDistribution"):
        """Construct a Quantized Distribution representing `Y = ceiling(X)`.

    Some properties are inherited from the distribution defining `X`. Example:
    `allow_nan_stats` is determined for this `QuantizedDistribution` by reading
    the `distribution`.

    Args:
      distribution:  The base distribution class to transform. Typically an
        instance of `Distribution`.
      low: `Tensor` with same `dtype` as this distribution and shape
        able to be added to samples. Should be a whole number. Default `None`.
        If provided, base distribution's `prob` should be defined at
        `low`.
      high: `Tensor` with same `dtype` as this distribution and shape
        able to be added to samples. Should be a whole number. Default `None`.
        If provided, base distribution's `prob` should be defined at
        `high - 1`.
        `high` must be strictly greater than `low`.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      TypeError: If `dist_cls` is not a subclass of
          `Distribution` or continuous.
      NotImplementedError:  If the base distribution does not implement `cdf`.
    """
        parameters = locals()
        values = (list(distribution.parameters.values()) + [low, high])
        with ops.name_scope(name, values=values) as ns:
            self._dist = distribution

            if low is not None:
                low = ops.convert_to_tensor(low, name="low")
            if high is not None:
                high = ops.convert_to_tensor(high, name="high")
            contrib_tensor_util.assert_same_float_dtype(
                tensors=[self.distribution, low, high])

            # We let QuantizedDistribution access _graph_parents since this class is
            # more like a baseclass.
            graph_parents = self._dist._graph_parents  # pylint: disable=protected-access

            checks = []
            if low is not None and high is not None:
                message = "low must be strictly less than high."
                checks.append(check_ops.assert_less(low, high,
                                                    message=message))
            self._validate_args = validate_args  # self._check_integer uses this.
            with ops.control_dependencies(checks if validate_args else []):
                if low is not None:
                    self._low = self._check_integer(low)
                    graph_parents += [self._low]
                else:
                    self._low = None
                if high is not None:
                    self._high = self._check_integer(high)
                    graph_parents += [self._high]
                else:
                    self._high = None

        super(QuantizedDistribution, self).__init__(
            dtype=self._dist.dtype,
            reparameterization_type=distributions.NOT_REPARAMETERIZED,
            validate_args=validate_args,
            allow_nan_stats=self._dist.allow_nan_stats,
            parameters=parameters,
            graph_parents=graph_parents,
            name=ns)
Пример #55
0
    def __init__(self,
                 distribution,
                 lower_cutoff=None,
                 upper_cutoff=None,
                 name="QuantizedDistribution"):
        """Construct a Quantized Distribution representing `Y = ceiling(X)`.

    Some properties are inherited from the distribution defining `X`.
    In particular, `validate_args` and `allow_nan_stats` are determined for this
    `QuantizedDistribution` by reading the `distribution`.

    Args:
      distribution:  The base distribution class to transform. Typically an
        instance of `Distribution`.
      lower_cutoff:  `Tensor` with same `dtype` as this distribution and shape
        able to be added to samples.  Should be a whole number.  Default `None`.
        If provided, base distribution's pdf/pmf should be defined at
        `lower_cutoff`.
      upper_cutoff:  `Tensor` with same `dtype` as this distribution and shape
        able to be added to samples.  Should be a whole number.  Default `None`.
        If provided, base distribution's pdf/pmf should be defined at
        `upper_cutoff - 1`.
        `upper_cutoff` must be strictly greater than `lower_cutoff`.
      name: The name for the distribution.

    Raises:
      TypeError: If `dist_cls` is not a subclass of
          `Distribution` or continuous.
      NotImplementedError:  If the base distribution does not implement `cdf`.
    """
        values = (list(distribution.parameters.values()) +
                  [lower_cutoff, upper_cutoff])
        with ops.name_scope(name, values=values):
            self._dist = distribution
            super(QuantizedDistribution,
                  self).__init__(dtype=self._dist.dtype,
                                 parameters={
                                     "distribution": distribution,
                                     "lower_cutoff": lower_cutoff,
                                     "upper_cutoff": upper_cutoff,
                                 },
                                 is_continuous=False,
                                 is_reparameterized=False,
                                 validate_args=self._dist.validate_args,
                                 allow_nan_stats=self._dist.allow_nan_stats,
                                 name=name)

            if lower_cutoff is not None:
                lower_cutoff = ops.convert_to_tensor(lower_cutoff,
                                                     name="lower_cutoff")
            if upper_cutoff is not None:
                upper_cutoff = ops.convert_to_tensor(upper_cutoff,
                                                     name="upper_cutoff")
            contrib_tensor_util.assert_same_float_dtype(
                tensors=[self.distribution, lower_cutoff, upper_cutoff])

            checks = []
            if lower_cutoff is not None and upper_cutoff is not None:
                message = "lower_cutoff must be strictly less than upper_cutoff."
                checks.append(
                    check_ops.assert_less(lower_cutoff,
                                          upper_cutoff,
                                          message=message))

            with ops.control_dependencies(
                    checks if self.validate_args else []):
                if lower_cutoff is not None:
                    self._lower_cutoff = self._check_integer(lower_cutoff)
                else:
                    self._lower_cutoff = None
                if upper_cutoff is not None:
                    self._upper_cutoff = self._check_integer(upper_cutoff)
                else:
                    self._upper_cutoff = None
Пример #56
0
    def __init__(self,
                 distribution,
                 lower_cutoff=None,
                 upper_cutoff=None,
                 validate_args=False,
                 name="QuantizedDistribution"):
        """Construct a Quantized Distribution representing `Y = ceiling(X)`.

    Some properties are inherited from the distribution defining `X`. Example:
    `allow_nan_stats` is determined for this `QuantizedDistribution` by reading
    the `distribution`.

    Args:
      distribution:  The base distribution class to transform. Typically an
        instance of `Distribution`.
      lower_cutoff:  `Tensor` with same `dtype` as this distribution and shape
        able to be added to samples.  Should be a whole number.  Default `None`.
        If provided, base distribution's `prob` should be defined at
        `lower_cutoff`.
      upper_cutoff:  `Tensor` with same `dtype` as this distribution and shape
        able to be added to samples.  Should be a whole number.  Default `None`.
        If provided, base distribution's `prob` should be defined at
        `upper_cutoff - 1`.
        `upper_cutoff` must be strictly greater than `lower_cutoff`.
      validate_args: Python boolean.  Whether to validate input with asserts.
        If `validate_args` is `False`, and the inputs are invalid,
        correct behavior is not guaranteed.
      name: The name for the distribution.

    Raises:
      TypeError: If `dist_cls` is not a subclass of
          `Distribution` or continuous.
      NotImplementedError:  If the base distribution does not implement `cdf`.
    """
        parameters = locals()
        parameters.pop("self")
        values = (list(distribution.parameters.values()) +
                  [lower_cutoff, upper_cutoff])
        with ops.name_scope(name, values=values) as ns:
            self._dist = distribution

            if lower_cutoff is not None:
                lower_cutoff = ops.convert_to_tensor(lower_cutoff,
                                                     name="lower_cutoff")
            if upper_cutoff is not None:
                upper_cutoff = ops.convert_to_tensor(upper_cutoff,
                                                     name="upper_cutoff")
            contrib_tensor_util.assert_same_float_dtype(
                tensors=[self.distribution, lower_cutoff, upper_cutoff])

            # We let QuantizedDistribution access _graph_parents since this class is
            # more like a baseclass.
            graph_parents = self._dist._graph_parents  # pylint: disable=protected-access

            checks = []
            if lower_cutoff is not None and upper_cutoff is not None:
                message = "lower_cutoff must be strictly less than upper_cutoff."
                checks.append(
                    check_ops.assert_less(lower_cutoff,
                                          upper_cutoff,
                                          message=message))
            self._validate_args = validate_args  # self._check_integer uses this.
            with ops.control_dependencies(checks if validate_args else []):
                if lower_cutoff is not None:
                    self._lower_cutoff = self._check_integer(lower_cutoff)
                    graph_parents += [self._lower_cutoff]
                else:
                    self._lower_cutoff = None
                if upper_cutoff is not None:
                    self._upper_cutoff = self._check_integer(upper_cutoff)
                    graph_parents += [self._upper_cutoff]
                else:
                    self._upper_cutoff = None

        super(QuantizedDistribution, self).__init__(
            dtype=self._dist.dtype,
            is_continuous=False,
            reparameterization_type=distributions.NOT_REPARAMETERIZED,
            validate_args=validate_args,
            allow_nan_stats=self._dist.allow_nan_stats,
            parameters=parameters,
            graph_parents=graph_parents,
            name=ns)
Пример #57
0
  def from_value_rowids(cls,
                        value_rowids,
                        nrows=None,
                        validate=True,
                        preferred_dtype=None):
    """Creates a `RowPartition` with rows partitioned by `value_rowids`.

    This `RowPartition` divides a sequence `values` into rows by specifying
    which row each value should be added to:

    ```python
    rows = [[] for _ in nrows]
    for (value, rowid) in zip(values, value_rowids):
      rows[rowid].append(value)
    ``

    Args:
      value_rowids: A 1-D integer tensor with shape `[nvals]`, which corresponds
        one-to-one with `values`, and specifies each value's row index.  Must be
        nonnegative, and must be sorted in ascending order.
      nrows: An integer scalar specifying the number of rows.  This should be
        specified if the `RowPartition` may containing empty training rows. Must
        be greater than `value_rowids[-1]` (or greater than or equal to zero if
        `value_rowids` is empty). Defaults to `value_rowids[-1]` (or zero if
        `value_rowids` is empty).
      validate: If true, then use assertions to check that the arguments form a
        valid `RowPartition`.
      preferred_dtype: The dtype to encode value_rowids if it doesn't already
        have one. The default is tf.int64.

    Returns:
      A `RowPartition`.

    Raises:
      ValueError: If `nrows` is incompatible with `value_rowids`.

    #### Example:

    >>> print(RowPartition.from_value_rowids(
    ...     value_rowids=[0, 0, 0, 0, 2, 2, 2, 3],
    ...     nrows=4))
    tf.RowPartition(row_splits=tf.Tensor([0 4 4 7 8], shape=(5,), dtype=int64))
    """
    if not isinstance(validate, bool):
      raise TypeError("validate must have type bool")
    with ops.name_scope(None, "RowPartitionFromValueRowIds",
                        [value_rowids, nrows]):
      value_rowids = cls._convert_row_partition(value_rowids, "value_rowids",
                                                preferred_dtype)
      if nrows is None:
        const_rowids = tensor_util.constant_value(value_rowids)
        if const_rowids is None:
          nrows = array_ops.concat([value_rowids[-1:], [-1]], axis=0)[0] + 1
          const_nrows = None
        else:
          const_nrows = const_rowids[-1] + 1 if const_rowids.size > 0 else 0
          nrows = ops.convert_to_tensor(
              const_nrows, value_rowids.dtype, name="nrows")
      else:
        nrows = ops.convert_to_tensor(nrows, value_rowids.dtype, "nrows")
        const_nrows = tensor_util.constant_value(nrows)
        if const_nrows is not None:
          if const_nrows < 0:
            raise ValueError("Expected nrows >= 0; got %d" % const_nrows)
          const_rowids = tensor_util.constant_value(value_rowids)
          if const_rowids is not None and const_rowids.size > 0:
            if not const_nrows >= const_rowids[-1] + 1:
              raise ValueError(
                  "Expected nrows >= value_rowids[-1] + 1; got nrows=%d, "
                  "value_rowids[-1]=%d" % (const_nrows, const_rowids[-1]))

      value_rowids.shape.assert_has_rank(1)
      nrows.shape.assert_has_rank(0)

      if validate:
        msg = ("Arguments to from_value_rowids do not form a valid "
               "RowPartition")
        checks = [
            check_ops.assert_rank(value_rowids, 1, message=msg),
            check_ops.assert_rank(nrows, 0, message=msg),
            check_ops.assert_non_negative(value_rowids[:1], message=msg),
            _assert_monotonic_increasing(value_rowids, message=msg),
            check_ops.assert_less(value_rowids[-1:], nrows, message=msg),
        ]
        value_rowids = control_flow_ops.with_dependencies(checks, value_rowids)

      # Convert value_rowids & nrows to row_splits.
      # Note: we don't use segment_ids_to_row_splits() here because we want
      # to save the intermediate value `row_lengths`, so we can cache it.
      # TODO(b/116708836) Upgrade bincount to accept int64 so we can skip the
      # cast.
      value_rowids_int32 = math_ops.cast(value_rowids, dtypes.int32)
      nrows_int32 = math_ops.cast(nrows, dtypes.int32)
      row_lengths = math_ops.bincount(
          value_rowids_int32,
          minlength=nrows_int32,
          maxlength=nrows_int32,
          dtype=value_rowids.dtype)
      row_splits = array_ops.concat([[0], math_ops.cumsum(row_lengths)], axis=0)
      if const_nrows is not None:
        row_lengths.set_shape([const_nrows])
        row_splits.set_shape([const_nrows + 1])

      return cls(
          row_splits=row_splits,
          row_lengths=row_lengths,
          value_rowids=value_rowids,
          nrows=nrows,
          internal=_row_partition_factory_key)
Пример #58
0
    def __init__(self,
                 a=0.,
                 b=1.,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="Uniform"):
        """Construct Uniform distributions with `a` and `b`.

    The parameters `a` and `b` must be shaped in a way that supports
    broadcasting (e.g. `b - a` is a valid operation).

    Here are examples without broadcasting:

    ```python
    # Without broadcasting
    u1 = Uniform(3.0, 4.0)  # a single uniform distribution [3, 4]
    u2 = Uniform([1.0, 2.0], [3.0, 4.0])  # 2 distributions [1, 3], [2, 4]
    u3 = Uniform([[1.0, 2.0],
                  [3.0, 4.0]],
                 [[1.5, 2.5],
                  [3.5, 4.5]])  # 4 distributions
    ```

    And with broadcasting:

    ```python
    u1 = Uniform(3.0, [5.0, 6.0, 7.0])  # 3 distributions
    ```

    Args:
      a: Floating point tensor, the minimum endpoint.
      b: Floating point tensor, the maximum endpoint. Must be > `a`.
      validate_args: `Boolean`, default `False`.  Whether to validate input with
        asserts. If `validate_args` is `False`, and the inputs are invalid,
        correct behavior is not guaranteed.
      allow_nan_stats: `Boolean`, default `True`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member.  If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to prefix Ops created by this distribution class.

    Raises:
      InvalidArgumentError: if `a >= b` and `validate_args=False`.
    """
        parameters = locals()
        parameters.pop("self")
        with ops.name_scope(name, values=[a, b]) as ns:
            with ops.control_dependencies([
                    check_ops.assert_less(
                        a, b, message="uniform not defined when a > b.")
            ] if validate_args else []):
                self._a = array_ops.identity(a, name="a")
                self._b = array_ops.identity(b, name="b")
                contrib_tensor_util.assert_same_float_dtype((self._a, self._b))
        super(Uniform, self).__init__(dtype=self._a.dtype,
                                      is_reparameterized=True,
                                      is_continuous=True,
                                      validate_args=validate_args,
                                      allow_nan_stats=allow_nan_stats,
                                      parameters=parameters,
                                      graph_parents=[self._a, self._b],
                                      name=ns)
Пример #59
0
def confusion_matrix(labels,
                     predictions,
                     num_classes=None,
                     weights=None,
                     dtype=dtypes.int32,
                     name=None):
    """Computes the confusion matrix from predictions and labels.

  The matrix columns represent the prediction labels and the rows represent the
  real labels. The confusion matrix is always a 2-D array of shape `[n, n]`,
  where `n` is the number of valid labels for a given classification task. Both
  prediction and labels must be 1-D arrays of the same shape in order for this
  function to work.

  If `num_classes` is `None`, then `num_classes` will be set to one plus the
  maximum value in either predictions or labels. Class labels are expected to
  start at 0. For example, if `num_classes` is 3, then the possible labels
  would be `[0, 1, 2]`.

  If `weights` is not `None`, then each prediction contributes its
  corresponding weight to the total value of the confusion matrix cell.

  For example:

  ```python
    tf.math.confusion_matrix([1, 2, 4], [2, 2, 4]) ==>
        [[0 0 0 0 0]
         [0 0 1 0 0]
         [0 0 1 0 0]
         [0 0 0 0 0]
         [0 0 0 0 1]]
  ```

  Note that the possible labels are assumed to be `[0, 1, 2, 3, 4]`,
  resulting in a 5x5 confusion matrix.

  Args:
    labels: 1-D `Tensor` of real labels for the classification task.
    predictions: 1-D `Tensor` of predictions for a given classification.
    num_classes: The possible number of labels the classification task can
                 have. If this value is not provided, it will be calculated
                 using both predictions and labels array.
    weights: An optional `Tensor` whose shape matches `predictions`.
    dtype: Data type of the confusion matrix.
    name: Scope name.

  Returns:
    A `Tensor` of type `dtype` with shape `[n, n]` representing the confusion
    matrix, where `n` is the number of possible labels in the classification
    task.

  Raises:
    ValueError: If both predictions and labels are not 1-D vectors and have
      mismatched shapes, or if `weights` is not `None` and its shape doesn't
      match `predictions`.
  """
    with ops.name_scope(name, 'confusion_matrix',
                        (predictions, labels, num_classes, weights)) as name:
        labels, predictions = remove_squeezable_dimensions(
            ops.convert_to_tensor(labels, name='labels'),
            ops.convert_to_tensor(predictions, name='predictions'))
        predictions = math_ops.cast(predictions, dtypes.int64)
        labels = math_ops.cast(labels, dtypes.int64)

        # Sanity checks - underflow or overflow can cause memory corruption.
        labels = control_flow_ops.with_dependencies([
            check_ops.assert_non_negative(
                labels, message='`labels` contains negative values')
        ], labels)
        predictions = control_flow_ops.with_dependencies([
            check_ops.assert_non_negative(
                predictions, message='`predictions` contains negative values')
        ], predictions)

        if num_classes is None:
            num_classes = math_ops.maximum(math_ops.reduce_max(predictions),
                                           math_ops.reduce_max(labels)) + 1
        else:
            num_classes_int64 = math_ops.cast(num_classes, dtypes.int64)
            labels = control_flow_ops.with_dependencies([
                check_ops.assert_less(
                    labels, num_classes_int64, message='`labels` out of bound')
            ], labels)
            predictions = control_flow_ops.with_dependencies([
                check_ops.assert_less(predictions,
                                      num_classes_int64,
                                      message='`predictions` out of bound')
            ], predictions)

        if weights is not None:
            weights = ops.convert_to_tensor(weights, name='weights')
            predictions.get_shape().assert_is_compatible_with(
                weights.get_shape())
            weights = math_ops.cast(weights, dtype)

        shape = array_ops.stack([num_classes, num_classes])
        indices = array_ops.stack([labels, predictions], axis=1)
        values = (array_ops.ones_like(predictions, dtype)
                  if weights is None else weights)
        return array_ops.scatter_nd(indices=indices,
                                    updates=values,
                                    shape=math_ops.cast(shape, dtypes.int64))
Пример #60
0
    def __init__(self,
                 shift=None,
                 scale_identity_multiplier=None,
                 scale_diag=None,
                 scale_tril=None,
                 scale_perturb_factor=None,
                 scale_perturb_diag=None,
                 event_ndims=1,
                 validate_args=False,
                 name="affine"):
        """Instantiates the `Affine` bijector.

    This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,
    giving the forward operation:

    ```none
    Y = g(X) = scale @ X + shift
    ```

    where the `scale` term is logically equivalent to:

    ```python
    scale = (
      scale_identity_multiplier * tf.diag(tf.ones(d)) +
      tf.diag(scale_diag) +
      scale_tril +
      scale_perturb_factor @ diag(scale_perturb_diag) @
        tf.transpose([scale_perturb_factor])
    )
    ```

    If none of `scale_identity_multiplier`, `scale_diag`, or `scale_tril` are
    specified then `scale += IdentityMatrix`. Otherwise specifying a
    `scale` argument has the semantics of `scale += Expand(arg)`, i.e.,
    `scale_diag != None` means `scale += tf.diag(scale_diag)`.

    Args:
      shift: Floating-point `Tensor`. If this is set to `None`, no shift is
        applied.
      scale_identity_multiplier: floating point rank 0 `Tensor` representing a
        scaling done to the identity matrix.
        When `scale_identity_multiplier = scale_diag = scale_tril = None` then
        `scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
        to `scale`.
      scale_diag: Floating-point `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ...  k], which represents a k x k
        diagonal matrix.
        When `None` no diagonal term is added to `scale`.
      scale_tril: Floating-point `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ...  k, k], which represents a k x k
        lower triangular matrix.
        When `None` no `scale_tril` term is added to `scale`.
        The upper triangular elements above the diagonal are ignored.
      scale_perturb_factor: Floating-point `Tensor` representing factor matrix
        with last two dimensions of shape `(k, r)`. When `None`, no rank-r
        update is added to `scale`.
      scale_perturb_diag: Floating-point `Tensor` representing the diagonal
        matrix. `scale_perturb_diag` has shape [N1, N2, ...  r], which
        represents an `r x r` diagonal matrix. When `None` low rank updates will
        take the form `scale_perturb_factor * scale_perturb_factor.T`.
      event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
        associated with a particular draw from the distribution. Must be 0 or 1.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.

    Raises:
      ValueError: if `perturb_diag` is specified but not `perturb_factor`.
      TypeError: if `shift` has different `dtype` from `scale` arguments.
    """
        self._graph_parents = []
        self._name = name
        self._validate_args = validate_args
        # Ambiguous definition of low rank update.
        if scale_perturb_diag is not None and scale_perturb_factor is None:
            raise ValueError("When scale_perturb_diag is specified, "
                             "scale_perturb_factor must be specified.")
        # Special case, only handling a scaled identity matrix. We don't know its
        # dimensions, so this is special cased.
        # We don't check identity_multiplier, since below we set it to 1. if all
        # other scale args are None.
        self._is_only_identity_multiplier = (scale_tril is None
                                             and scale_diag is None
                                             and scale_perturb_factor is None)
        # When no args are specified, pretend the scale matrix is the identity
        # matrix.
        if self._is_only_identity_multiplier and scale_identity_multiplier is None:
            scale_identity_multiplier = 1.
        with self._name_scope("init",
                              values=[
                                  shift, scale_identity_multiplier, scale_diag,
                                  scale_tril, scale_perturb_diag,
                                  scale_perturb_factor, event_ndims
                              ]):
            event_ndims = ops.convert_to_tensor(event_ndims,
                                                name="event_ndims")
            if validate_args:
                is_less_than_two = check_ops.assert_less(
                    event_ndims, 2, message="event_ndims must be 0 or 1")
                event_ndims = control_flow_ops.with_dependencies(
                    [is_less_than_two], event_ndims)
            self._shift = _as_tensor(shift, "shift")
            # self._create_scale_operator returns an OperatorPD in all cases except if
            # self._is_only_identity_multiplier; in which case it returns a scalar
            # Tensor.
            self._scale = self._create_scale_operator(
                identity_multiplier=scale_identity_multiplier,
                diag=scale_diag,
                tril=scale_tril,
                perturb_diag=scale_perturb_diag,
                perturb_factor=scale_perturb_factor,
                event_ndims=event_ndims,
                validate_args=validate_args)
            if (self._shift is not None and self._shift.dtype.base_dtype !=
                    self._scale.dtype.base_dtype):
                raise TypeError(
                    "shift.dtype({}) does not match scale.dtype({})".format(
                        self._shift.dtype, self._scale.dtype))
            self._shaper = _DistributionShape(
                batch_ndims=self._infer_batch_ndims(),
                event_ndims=event_ndims,
                validate_args=validate_args)
            super(Affine, self).__init__(
                event_ndims=event_ndims,
                graph_parents=(
                    [event_ndims] + [self._scale] if tensor_util.is_tensor(
                        self._scale) else self._scale.inputs +
                    [self._shift] if self._shift is not None else []),
                is_constant_jacobian=True,
                dtype=self._scale.dtype,
                validate_args=validate_args,
                name=name)