def _trace(self): # Get Tensor of all ones of same shape as self.batch_shape. if self.batch_shape.is_fully_defined(): batch_of_ones = array_ops.ones(shape=self.batch_shape, dtype=self.dtype) else: batch_of_ones = array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype) if self._min_matrix_dim() is not None: return self._min_matrix_dim() * batch_of_ones else: return (_ops.cast(self._min_matrix_dim_tensor(), self.dtype) * batch_of_ones)
def _diag_part(self): # Get ones in shape of diag, which is [B1,...,Bb, N] # Also get the size of the diag, "N". if _ops.TensorShape(self.shape).is_fully_defined(): diag_shape = _ops.TensorShape(self.shape)[:-1] diag_size = self.domain_dimension.value else: diag_shape = self.shape_tensor()[:-1] diag_size = self.domain_dimension_tensor() ones_diag = array_ops.ones(diag_shape, dtype=self.dtype) # As proved in comments in self._trace, the value on the diag is constant, # repeated N times. This value is the trace divided by N. # The handling of _ops.TensorShape(self.shape) = (0, 0) is tricky, and is the reason we choose # to compute trace and use that to compute diag_part, rather than computing # the value on the diagonal ("diag_value") directly. Both result in a 0/0, # but in different places, and the current method gives the right result in # the end. # Here, if _ops.TensorShape(self.shape) = (0, 0), then self.trace() = 0., and then # diag_value = 0. / 0. = NaN. diag_value = self.trace() / _ops.cast(diag_size, self.dtype) # If _ops.TensorShape(self.shape) = (0, 0), then ones_diag = [] (empty tensor), and then # the following line is NaN * [] = [], as needed. return diag_value[..., array_ops.newaxis] * ones_diag
def _ones_diag(self): """Returns the diagonal of this operator as all ones.""" if _ops.TensorShape(self.shape).is_fully_defined(): d_shape = self.batch_shape.concatenate([self._min_matrix_dim()]) else: d_shape = array_ops.concat( [self.batch_shape_tensor(), [self._min_matrix_dim_tensor()]], axis=0) return array_ops.ones(shape=d_shape, dtype=self.dtype)
def _add(self, op1, op2, operator_name, hints): # Will build a LinearOperatorScaledIdentity. if _type(op1) == _SCALED_IDENTITY: multiplier_1 = op1.multiplier else: multiplier_1 = array_ops.ones(op1.batch_shape_tensor(), dtype=op1.dtype) if _type(op2) == _SCALED_IDENTITY: multiplier_2 = op2.multiplier else: multiplier_2 = array_ops.ones(op2.batch_shape_tensor(), dtype=op2.dtype) return linear_operator_identity.LinearOperatorScaledIdentity( num_rows=op1.range_dimension_tensor(), multiplier=multiplier_1 + multiplier_2, is_non_singular=hints.is_non_singular, is_self_adjoint=hints.is_self_adjoint, is_positive_definite=hints.is_positive_definite, name=operator_name)
def _diag_part(self): diag_entry = self.col[..., 0:1] return diag_entry * array_ops.ones( [self.domain_dimension_tensor()], self.dtype)
def _determinant(self): # For householder transformations, the determinant is -1. return -array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _trace(self): # We have (n - 1) +1 eigenvalues and a single -1 eigenvalue. return _ops.cast(self.domain_dimension_tensor() - 2, self.dtype) * array_ops.ones( shape=self.batch_shape_tensor(), dtype=self.dtype)
def _determinant(self): return array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype)