def _min_matrix_dim(self):
     """Minimum of domain/range dimension, if statically available, else None."""
     domain_dim = tensor_shape.dimension_value(self.domain_dimension)
     range_dim = tensor_shape.dimension_value(self.range_dimension)
     if domain_dim is None or range_dim is None:
         return None
     return min(domain_dim, range_dim)
Exemplo n.º 2
0
 def _set_diag_operators(self, diag_update, is_diag_update_positive):
   """Set attributes self._diag_update and self._diag_operator."""
   if diag_update is not None:
     self._diag_operator = linear_operator_diag.LinearOperatorDiag(
         self._diag_update, is_positive_definite=is_diag_update_positive)
   else:
     if tensor_shape.dimension_value(tensor_shape.TensorShape(self.u.shape)[-1]) is not None:
       r = tensor_shape.dimension_value(tensor_shape.TensorShape(self.u.shape)[-1])
     else:
       r = array_ops.shape(self.u)[-1]
     self._diag_operator = linear_operator_identity.LinearOperatorIdentity(
         num_rows=r, dtype=self.dtype)
Exemplo n.º 3
0
 def _range_dimension_tensor(self, shape=None):
     # `shape` may be passed in if this can be pre-computed in a
     # more efficient manner, e.g. without excessive Tensor conversions.
     dim_value = tensor_shape.dimension_value(self.range_dimension)
     if dim_value is not None:
         return ops.convert_to_tensor(dim_value)
     else:
         shape = self.shape_tensor() if shape is None else shape
         return shape[-2]
Exemplo n.º 4
0
def _static_check_for_same_dimensions(operators):
  """ValueError if operators determined to have different dimensions."""
  if len(operators) < 2:
    return

  domain_dimensions = [
      (op.name, tensor_shape.dimension_value(op.domain_dimension))
      for op in operators
      if tensor_shape.dimension_value(op.domain_dimension) is not None]
  if len(set(value for name, value in domain_dimensions)) > 1:
    raise ValueError(f"All `operators` must have the same `domain_dimension`. "
                     f"Received: {domain_dimensions}.")

  range_dimensions = [
      (op.name, tensor_shape.dimension_value(op.range_dimension))
      for op in operators
      if tensor_shape.dimension_value(op.range_dimension) is not None]
  if len(set(value for name, value in range_dimensions)) > 1:
    raise ValueError(f"All operators must have the same `range_dimension`. "
                     f"Received: {range_dimensions}.")
Exemplo n.º 5
0
def _static_check_for_same_dimensions(operators):
    """ValueError if operators determined to have different dimensions."""
    if len(operators) < 2:
        return

    domain_dimensions = [
        (op.name, tensor_shape.dimension_value(op.domain_dimension))
        for op in operators
        if tensor_shape.dimension_value(op.domain_dimension) is not None
    ]
    if len(set(value for name, value in domain_dimensions)) > 1:
        raise ValueError(
            "Operators must have the same domain dimension. Found: %s" %
            domain_dimensions)

    range_dimensions = [
        (op.name, tensor_shape.dimension_value(op.range_dimension))
        for op in operators
        if tensor_shape.dimension_value(op.range_dimension) is not None
    ]
    if len(set(value for name, value in range_dimensions)) > 1:
        raise ValueError(
            "Operators must have the same range dimension. Found: %s" %
            range_dimensions)
Exemplo n.º 6
0
  def _to_dense(self):
    """Generic and often inefficient implementation.  Override often."""
    if self.batch_shape.is_fully_defined():
      batch_shape = self.batch_shape
    else:
      batch_shape = self.batch_shape_tensor()

    dim_value = tensor_shape.dimension_value(self.domain_dimension)
    if dim_value is not None:
      n = dim_value
    else:
      n = self.domain_dimension_tensor()

    eye = linalg_ops.eye(num_rows=n, batch_shape=batch_shape, dtype=self.dtype)
    return self.matmul(eye)
Exemplo n.º 7
0
def _convert_dimension_to_tensor(value, dtype=None):
  dtype = dtype or np.int32
  if dtype not in (np.int32, np.int64):
    raise TypeConversionError(value, dtype)
  return convert_to_tensor(tensor_shape.dimension_value(value), dtype=dtype)