def _eigvals(self):
   # We have (n - 1) +1 eigenvalues and a single -1 eigenvalue.
   result_shape = array_ops.shape(self.reflection_axis)
   n = result_shape[-1]
   ones_shape = array_ops.concat([result_shape[:-1], [n - 1]], axis=-1)
   neg_shape = array_ops.concat([result_shape[:-1], [1]], axis=-1)
   eigvals = array_ops.ones(shape=ones_shape, dtype=self.dtype)
   eigvals = array_ops.concat(
       [-array_ops.ones(shape=neg_shape, dtype=self.dtype), eigvals], axis=-1)
   return eigvals
 def _eigvals(self):
   # We have (n - 1) +1 eigenvalues and a single -1 eigenvalue.
   result_shape = prefer_static.shape(self.reflection_axis)
   n = result_shape[-1]
   ones_shape = prefer_static.concat([result_shape[:-1], [n - 1]], axis=-1)
   neg_shape = prefer_static.concat([result_shape[:-1], [1]], axis=-1)
   eigvals = array_ops.ones(shape=ones_shape, dtype=self.dtype)
   eigvals = prefer_static.concat(
       [-array_ops.ones(shape=neg_shape, dtype=self.dtype), eigvals], axis=-1)  # pylint: disable=invalid-unary-operand-type
   return eigvals
  def _trace(self):
    # Get Tensor of all ones of same shape as self.batch_shape.
    if self.batch_shape.is_fully_defined():
      batch_of_ones = array_ops.ones(shape=self.batch_shape, dtype=self.dtype)
    else:
      batch_of_ones = array_ops.ones(
          shape=self.batch_shape_tensor(), dtype=self.dtype)

    if self._min_matrix_dim() is not None:
      return self.multiplier * self._min_matrix_dim() * batch_of_ones
    else:
      return (self.multiplier * _ops.cast(self._min_matrix_dim_tensor(),
                                              self.dtype) * batch_of_ones)
 def _trace(self):
   # We have (n - 1) +1 eigenvalues and a single -1 eigenvalue.
   shape = self.shape_tensor()
   return _ops.cast(
       self._domain_dimension_tensor(shape=shape) - 2,
       self.dtype) * array_ops.ones(
           shape=self._batch_shape_tensor(shape=shape), dtype=self.dtype)
Ejemplo n.º 5
0
    def _diag_part(self):
        # Get ones in shape of diag, which is [B1,...,Bb, N]
        # Also get the size of the diag, "N".
        if tensor_shape.TensorShape(self.shape).is_fully_defined():
            diag_shape = tensor_shape.TensorShape(self.shape)[:-1]
            diag_size = self.domain_dimension.value
        else:
            diag_shape = self.shape_tensor()[:-1]
            diag_size = self.domain_dimension_tensor()
        ones_diag = array_ops.ones(diag_shape, dtype=self.dtype)

        # As proved in comments in self._trace, the value on the diag is constant,
        # repeated N times.  This value is the trace divided by N.

        # The handling of tensor_shape.TensorShape(self.shape) = (0, 0) is tricky, and is the reason we choose
        # to compute trace and use that to compute diag_part, rather than computing
        # the value on the diagonal ("diag_value") directly.  Both result in a 0/0,
        # but in different places, and the current method gives the right result in
        # the end.

        # Here, if tensor_shape.TensorShape(self.shape) = (0, 0), then self.trace() = 0., and then
        # diag_value = 0. / 0. = NaN.
        diag_value = self.trace() / _ops.cast(diag_size, self.dtype)

        # If tensor_shape.TensorShape(self.shape) = (0, 0), then ones_diag = [] (empty tensor), and then
        # the following line is NaN * [] = [], as needed.
        return diag_value[..., _ops.newaxis] * ones_diag
Ejemplo n.º 6
0
 def __getitem__(self, slices):
     # Slice the batch shape and return a new LinearOperatorIdentity.
     # Use a proxy shape and slice it. Use this as the new batch shape
     new_batch_shape = prefer_static.shape(
         array_ops.ones(self._batch_shape_arg)[slices])
     parameters = dict(self.parameters, batch_shape=new_batch_shape)
     return LinearOperatorZeros(**parameters)
Ejemplo n.º 7
0
def _broadcast_parameter_with_batch_shape(param, param_ndims_to_matrix_ndims,
                                          batch_shape):
    """Broadcasts `param` with the given batch shape, recursively."""
    if hasattr(param, 'batch_shape_tensor'):
        # Recursively broadcast every parameter inside the operator.
        override_dict = {}
        for name, ndims in param._experimental_parameter_ndims_to_matrix_ndims.items(
        ):  # pylint:disable=protected-access,line-too-long
            sub_param = getattr(param, name)
            override_dict[name] = nest.map_structure_up_to(
                sub_param,
                functools.partial(_broadcast_parameter_with_batch_shape,
                                  batch_shape=batch_shape), sub_param, ndims)
        parameters = dict(param.parameters, **override_dict)
        return type(param)(**parameters)

    base_shape = prefer_static.concat([
        batch_shape,
        array_ops.ones([param_ndims_to_matrix_ndims], dtype=dtypes.int32)
    ],
                                      axis=0)
    return _ops.broadcast_to(
        param,
        array_ops.broadcast_dynamic_shape(base_shape,
                                          prefer_static.shape(param)))
    def _ones_diag(self):
        """Returns the diagonal of this operator as all ones."""
        if tensor_shape.TensorShape(self.shape).is_fully_defined():
            d_shape = self.batch_shape.concatenate([self._min_matrix_dim()])
        else:
            d_shape = array_ops.concat(
                [self.batch_shape_tensor(), [self._min_matrix_dim_tensor()]],
                axis=0)

        return array_ops.ones(shape=d_shape, dtype=self.dtype)
Ejemplo n.º 9
0
  def _add(self, op1, op2, operator_name, hints):
    # Will build a LinearOperatorScaledIdentity.

    if _type(op1) == _SCALED_IDENTITY:
      multiplier_1 = op1.multiplier
    else:
      multiplier_1 = array_ops.ones(op1.batch_shape_tensor(), dtype=op1.dtype)

    if _type(op2) == _SCALED_IDENTITY:
      multiplier_2 = op2.multiplier
    else:
      multiplier_2 = array_ops.ones(op2.batch_shape_tensor(), dtype=op2.dtype)

    return linear_operator_identity.LinearOperatorScaledIdentity(
        num_rows=op1.range_dimension_tensor(),
        multiplier=multiplier_1 + multiplier_2,
        is_non_singular=hints.is_non_singular,
        is_self_adjoint=hints.is_self_adjoint,
        is_positive_definite=hints.is_positive_definite,
        name=operator_name)
 def _diag_part(self):
     diag_entry = self.col[..., 0:1]
     return diag_entry * array_ops.ones([self.domain_dimension_tensor()],
                                        self.dtype)
 def _cond(self):
   # Householder matrices are rotations which have condition number 1.
   return array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
 def _determinant(self):
   # For householder transformations, the determinant is -1.
   return -array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype)  # pylint: disable=invalid-unary-operand-type
 def _cond(self):
     return array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
 def _determinant(self):
     return array_ops.ones(shape=self.batch_shape_tensor(),
                           dtype=self.dtype)
 def _determinant(self):
   # For householder transformations, the determinant is -1.
   return -array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype)