예제 #1
0
 def test_doesnt_raise_when_not_equal_and_broadcastable_shapes(self):
   small = constant_op.constant([1, 2], name="small")
   big = constant_op.constant([3], name="big")
   with ops.control_dependencies(
       [check_ops.assert_none_equal(small, big)]):
     out = array_ops.identity(small)
   self.evaluate(out)
예제 #2
0
 def test_raises_when_equal(self):
   small = constant_op.constant([3, 1], name="small")
   with self.assertRaisesOpError("x != y did not hold"):
     with ops.control_dependencies(
         [check_ops.assert_none_equal(small, small)]):
       out = array_ops.identity(small)
     self.evaluate(out)
예제 #3
0
 def test_doesnt_raise_when_not_equal(self):
   small = constant_op.constant([1, 2], name="small")
   big = constant_op.constant([10, 20], name="small")
   with ops.control_dependencies(
       [check_ops.assert_none_equal(big, small)]):
     out = array_ops.identity(small)
   self.evaluate(out)
예제 #4
0
 def test_raises_when_equal(self):
   with self.test_session():
     small = constant_op.constant([3, 1], name="small")
     with ops.control_dependencies(
         [check_ops.assert_none_equal(small, small)]):
       out = array_ops.identity(small)
     with self.assertRaisesOpError("x != y did not hold"):
       out.eval()
예제 #5
0
 def test_doesnt_raise_when_not_equal(self):
   with self.test_session():
     small = constant_op.constant([1, 2], name="small")
     big = constant_op.constant([10, 20], name="small")
     with ops.control_dependencies(
         [check_ops.assert_none_equal(big, small)]):
       out = array_ops.identity(small)
     out.eval()
예제 #6
0
 def test_doesnt_raise_when_both_empty(self):
   with self.test_session():
     larry = constant_op.constant([])
     curly = constant_op.constant([])
     with ops.control_dependencies(
         [check_ops.assert_none_equal(larry, curly)]):
       out = array_ops.identity(larry)
     out.eval()
예제 #7
0
 def test_doesnt_raise_when_not_equal_and_broadcastable_shapes(self):
   with self.test_session():
     small = constant_op.constant([1, 2], name="small")
     big = constant_op.constant([3], name="big")
     with ops.control_dependencies(
         [check_ops.assert_none_equal(small, big)]):
       out = array_ops.identity(small)
     out.eval()
예제 #8
0
 def test_raises_when_not_equal_but_non_broadcastable_shapes(self):
   with self.test_session():
     small = constant_op.constant([1, 1, 1], name="small")
     big = constant_op.constant([10, 10], name="big")
     with self.assertRaisesRegexp(ValueError, "must be"):
       with ops.control_dependencies(
           [check_ops.assert_none_equal(small, big)]):
         out = array_ops.identity(small)
       out.eval()
예제 #9
0
  def __init__(self,
               shift=None,
               scale=None,
               validate_args=False,
               name="affine_scalar"):
    """Instantiates the `AffineScalar` bijector.

    This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,
    giving the forward operation:

    ```none
    Y = g(X) = scale * X + shift
    ```

    if `scale` is not specified, then the bijector has the semantics of
    `scale = 1.`. Similarly, if `shift` is not specified, then the bijector
    has the semantics of `shift = 0.`.

    Args:
      shift: Floating-point `Tensor`. If this is set to `None`, no shift is
        applied.
      scale: Floating-point `Tensor`. If this is set to `None`, no scale is
        applied.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.
    """
    self._graph_parents = []
    self._name = name
    self._validate_args = validate_args

    with self._name_scope("init", values=[scale, shift]):
      self._shift = shift
      self._scale = scale

      if self._shift is not None:
        self._shift = ops.convert_to_tensor(shift, name="shift")

      if self._scale is not None:
        self._scale = ops.convert_to_tensor(self._scale, name="scale")
        if validate_args:
          self._scale = control_flow_ops.with_dependencies(
              [check_ops.assert_none_equal(
                  self._scale,
                  array_ops.zeros([], dtype=self._scale.dtype))],
              self._scale)

      super(AffineScalar, self).__init__(
          event_ndims=0,
          is_constant_jacobian=True,
          validate_args=validate_args,
          name=name)
예제 #10
0
  def __init__(self,
               shift=None,
               scale=None,
               validate_args=False,
               name="affine_scalar"):
    """Instantiates the `AffineScalar` bijector.

    This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,
    giving the forward operation:

    ```none
    Y = g(X) = scale * X + shift
    ```

    if `scale` is not specified, then the bijector has the semantics of
    `scale = 1.`. Similarly, if `shift` is not specified, then the bijector
    has the semantics of `shift = 0.`.

    Args:
      shift: Floating-point `Tensor`. If this is set to `None`, no shift is
        applied.
      scale: Floating-point `Tensor`. If this is set to `None`, no scale is
        applied.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.
    """
    self._graph_parents = []
    self._name = name
    self._validate_args = validate_args

    with self._name_scope("init", values=[scale, shift]):
      self._shift = shift
      self._scale = scale

      if self._shift is not None:
        self._shift = ops.convert_to_tensor(shift, name="shift")

      if self._scale is not None:
        self._scale = ops.convert_to_tensor(self._scale, name="scale")
        if validate_args:
          self._scale = control_flow_ops.with_dependencies(
              [check_ops.assert_none_equal(
                  self._scale,
                  array_ops.zeros([], dtype=self._scale.dtype))],
              self._scale)

      super(AffineScalar, self).__init__(
          forward_min_event_ndims=0,
          is_constant_jacobian=True,
          validate_args=validate_args,
          name=name)
예제 #11
0
 def _maybe_attach_assertion(x):
   if not validate_args:
     return x
   if assert_positive:
     return control_flow_ops.with_dependencies([
         check_ops.assert_positive(
             x, message="diagonal part must be positive"),
     ], x)
   return control_flow_ops.with_dependencies([
       check_ops.assert_none_equal(
           x,
           array_ops.zeros([], x.dtype),
           message="diagonal part must be non-zero")], x)
예제 #12
0
 def _maybe_attach_assertion(x):
   if not validate_args:
     return x
   if assert_positive:
     return control_flow_ops.with_dependencies([
         check_ops.assert_positive(
             x, message="diagonal part must be positive"),
     ], x)
   return control_flow_ops.with_dependencies([
       check_ops.assert_none_equal(
           x,
           array_ops.zeros([], x.dtype),
           message="diagonal part must be non-zero")], x)
예제 #13
0
 def test_raises_when_not_equal_but_non_broadcastable_shapes(self):
   with self.test_session():
     small = constant_op.constant([1, 1, 1], name="small")
     big = constant_op.constant([10, 10], name="big")
     # The exception in eager and non-eager mode is different because
     # eager mode relies on shape check done as part of the C++ op, while
     # graph mode does shape checks when creating the `Operation` instance.
     with self.assertRaisesRegexp(
         (ValueError, errors.InvalidArgumentError),
         (r"Incompatible shapes: \[3\] vs. \[2\]|"
          r"Dimensions must be equal, but are 3 and 2")):
       with ops.control_dependencies(
           [check_ops.assert_none_equal(small, big)]):
         out = array_ops.identity(small)
       self.evaluate(out)
예제 #14
0
    def __init__(self,
                 hinge_softness=None,
                 validate_args=False,
                 name="softplus"):
        with ops.name_scope(name, values=[hinge_softness]):
            if hinge_softness is not None:
                self._hinge_softness = ops.convert_to_tensor(
                    hinge_softness, name="hinge_softness")
            else:
                self._hinge_softness = None
            if validate_args:
                nonzero_check = check_ops.assert_none_equal(
                    ops.convert_to_tensor(0, dtype=self.hinge_softness.dtype),
                    self.hinge_softness,
                    message="hinge_softness must be non-zero")
                self._hinge_softness = control_flow_ops.with_dependencies(
                    [nonzero_check], self.hinge_softness)

        super(Softplus, self).__init__(forward_min_event_ndims=0,
                                       validate_args=validate_args,
                                       name=name)
 def _assertions(self, x):
   if not self.validate_args:
     return []
   shape = array_ops.shape(x)
   is_matrix = check_ops.assert_rank_at_least(
       x, 2, message="Input must have rank at least 2.")
   is_square = check_ops.assert_equal(
       shape[-2], shape[-1], message="Input must be a square matrix.")
   above_diagonal = array_ops.matrix_band_part(
       array_ops.matrix_set_diag(
           x, array_ops.zeros(shape[:-1], dtype=dtypes.float32)),
       0, -1)
   is_lower_triangular = check_ops.assert_equal(
       above_diagonal, array_ops.zeros_like(above_diagonal),
       message="Input must be lower triangular.")
   # A lower triangular matrix is nonsingular iff all its diagonal entries are
   # nonzero.
   diag_part = array_ops.matrix_diag_part(x)
   is_nonsingular = check_ops.assert_none_equal(
       diag_part, array_ops.zeros_like(diag_part),
       message="Input must have all diagonal entries nonzero.")
   return [is_matrix, is_square, is_lower_triangular, is_nonsingular]
예제 #16
0
  def __init__(self,
               event_ndims=0,
               hinge_softness=None,
               validate_args=False,
               name="softplus"):
    with ops.name_scope(name, values=[hinge_softness]):
      if hinge_softness is not None:
        self._hinge_softness = ops.convert_to_tensor(
            hinge_softness, name="hinge_softness")
      else:
        self._hinge_softness = None
      if validate_args:
        nonzero_check = check_ops.assert_none_equal(
            ops.convert_to_tensor(
                0, dtype=self.hinge_softness.dtype), self.hinge_softness)
        self._hinge_softness = control_flow_ops.with_dependencies(
            [nonzero_check], self.hinge_softness)

    super(Softplus, self).__init__(
        event_ndims=event_ndims,
        validate_args=validate_args,
        name=name)
예제 #17
0
 def _assertions(self, x):
   if not self.validate_args:
     return []
   shape = array_ops.shape(x)
   is_matrix = check_ops.assert_rank_at_least(
       x, 2, message="Input must have rank at least 2.")
   is_square = check_ops.assert_equal(
       shape[-2], shape[-1], message="Input must be a square matrix.")
   above_diagonal = array_ops.matrix_band_part(
       array_ops.matrix_set_diag(
           x, array_ops.zeros(shape[:-1], dtype=dtypes.float32)),
       0, -1)
   is_lower_triangular = check_ops.assert_equal(
       above_diagonal, array_ops.zeros_like(above_diagonal),
       message="Input must be lower triangular.")
   # A lower triangular matrix is nonsingular iff all its diagonal entries are
   # nonzero.
   diag_part = array_ops.matrix_diag_part(x)
   is_nonsingular = check_ops.assert_none_equal(
       diag_part, array_ops.zeros_like(diag_part),
       message="Input must have all diagonal entries nonzero.")
   return [is_matrix, is_square, is_lower_triangular, is_nonsingular]
예제 #18
0
 def test_returns_none_with_eager(self):
   with context.eager_mode():
     t1 = constant_op.constant([1, 2])
     t2 = constant_op.constant([3, 4])
     x = check_ops.assert_none_equal(t1, t2)
     assert x is None
예제 #19
0
    def _create_scale_operator(self, identity_multiplier, diag, tril,
                               perturb_diag, perturb_factor, shift,
                               validate_args):
        """Construct `scale` from various components.

    Args:
      identity_multiplier: floating point rank 0 `Tensor` representing a scaling
        done to the identity matrix.
      diag: Floating-point `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ...  k], which represents a k x k
        diagonal matrix.
      tril: Floating-point `Tensor` representing the diagonal matrix.
        `scale_tril` has shape [N1, N2, ...  k], which represents a k x k lower
        triangular matrix.
      perturb_diag: Floating-point `Tensor` representing the diagonal matrix of
        the low rank update.
      perturb_factor: Floating-point `Tensor` representing factor matrix.
      shift: Floating-point `Tensor` representing `shift in `scale @ X + shift`.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.

    Returns:
      scale. In the case of scaling by a constant, scale is a
      floating point `Tensor`. Otherwise, scale is a `LinearOperator`.

    Raises:
      ValueError: if all of `tril`, `diag` and `identity_multiplier` are `None`.
    """
        identity_multiplier = _as_tensor(identity_multiplier,
                                         "identity_multiplier")
        diag = _as_tensor(diag, "diag")
        tril = _as_tensor(tril, "tril")
        perturb_diag = _as_tensor(perturb_diag, "perturb_diag")
        perturb_factor = _as_tensor(perturb_factor, "perturb_factor")

        # If possible, use the low rank update to infer the shape of
        # the identity matrix, when scale represents a scaled identity matrix
        # with a low rank update.
        shape_hint = None
        if perturb_factor is not None:
            shape_hint = distribution_util.dimension_size(perturb_factor,
                                                          axis=-2)

        if self._is_only_identity_multiplier:
            if validate_args:
                return control_flow_ops.with_dependencies([
                    check_ops.assert_none_equal(
                        identity_multiplier,
                        array_ops.zeros([], identity_multiplier.dtype),
                        ["identity_multiplier should be non-zero."])
                ], identity_multiplier)
            return identity_multiplier

        scale = distribution_util.make_tril_scale(
            loc=shift,
            scale_tril=tril,
            scale_diag=diag,
            scale_identity_multiplier=identity_multiplier,
            validate_args=validate_args,
            assert_positive=False,
            shape_hint=shape_hint)

        if perturb_factor is not None:
            return linalg.LinearOperatorLowRankUpdate(
                scale,
                u=perturb_factor,
                diag_update=perturb_diag,
                is_diag_update_positive=perturb_diag is None,
                is_non_singular=True,  # Implied by is_positive_definite=True.
                is_self_adjoint=True,
                is_positive_definite=True,
                is_square=True)

        return scale
예제 #20
0
  def _create_scale_operator(self, identity_multiplier, diag, tril,
                             perturb_diag, perturb_factor, shift,
                             validate_args):
    """Construct `scale` from various components.

    Args:
      identity_multiplier: floating point rank 0 `Tensor` representing a scaling
        done to the identity matrix.
      diag: Floating-point `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ...  k], which represents a k x k
        diagonal matrix.
      tril: Floating-point `Tensor` representing the diagonal matrix.
        `scale_tril` has shape [N1, N2, ...  k], which represents a k x k lower
        triangular matrix.
      perturb_diag: Floating-point `Tensor` representing the diagonal matrix of
        the low rank update.
      perturb_factor: Floating-point `Tensor` representing factor matrix.
      shift: Floating-point `Tensor` representing `shift in `scale @ X + shift`.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.

    Returns:
      scale. In the case of scaling by a constant, scale is a
      floating point `Tensor`. Otherwise, scale is a `LinearOperator`.

    Raises:
      ValueError: if all of `tril`, `diag` and `identity_multiplier` are `None`.
    """
    identity_multiplier = _as_tensor(identity_multiplier, "identity_multiplier")
    diag = _as_tensor(diag, "diag")
    tril = _as_tensor(tril, "tril")
    perturb_diag = _as_tensor(perturb_diag, "perturb_diag")
    perturb_factor = _as_tensor(perturb_factor, "perturb_factor")

    # If possible, use the low rank update to infer the shape of
    # the identity matrix, when scale represents a scaled identity matrix
    # with a low rank update.
    shape_hint = None
    if perturb_factor is not None:
      shape_hint = distribution_util.dimension_size(perturb_factor, axis=-2)

    if self._is_only_identity_multiplier:
      if validate_args:
        return control_flow_ops.with_dependencies(
            [check_ops.assert_none_equal(
                identity_multiplier,
                array_ops.zeros([], identity_multiplier.dtype),
                ["identity_multiplier should be non-zero."])],
            identity_multiplier)
      return identity_multiplier

    scale = distribution_util.make_tril_scale(
        loc=shift,
        scale_tril=tril,
        scale_diag=diag,
        scale_identity_multiplier=identity_multiplier,
        validate_args=validate_args,
        assert_positive=False,
        shape_hint=shape_hint)

    if perturb_factor is not None:
      return linalg.LinearOperatorUDVHUpdate(
          scale,
          u=perturb_factor,
          diag_update=perturb_diag,
          is_diag_update_positive=perturb_diag is None,
          is_non_singular=True,  # Implied by is_positive_definite=True.
          is_self_adjoint=True,
          is_positive_definite=True,
          is_square=True)

    return scale