def _shape_tensor(self): matrix_shape = array_ops.stack((self._num_rows, self._num_rows), axis=0) if self._batch_shape_arg is None: return matrix_shape return array_ops.concat((self._batch_shape_arg, matrix_shape), 0)
def _shape_tensor(self): # Avoid messy broadcasting if possible. if tensor_shape.TensorShape(self.shape).is_fully_defined(): return ops.convert_to_tensor( tensor_shape.TensorShape(self.shape).as_list(), dtype=dtypes.int32, name="shape") domain_dimension = sum(self._block_domain_dimension_tensors()) range_dimension = sum(self._block_range_dimension_tensors()) matrix_shape = array_ops.stack([domain_dimension, range_dimension]) batch_shape = self.operators[0][0].batch_shape_tensor() for row in self.operators[1:]: for operator in row: batch_shape = array_ops.broadcast_dynamic_shape( batch_shape, operator.batch_shape_tensor()) return prefer_static.concat((batch_shape, matrix_shape), 0)
def _shape_tensor(self): # Avoid messy broadcasting if possible. if tensor_shape.TensorShape(self.shape).is_fully_defined(): return ops.convert_to_tensor( tensor_shape.TensorShape(self.shape).as_list(), dtype=dtypes.int32, name="shape") domain_dimension = sum(self._block_domain_dimension_tensors()) range_dimension = sum(self._block_range_dimension_tensors()) matrix_shape = array_ops.stack([domain_dimension, range_dimension]) # Dummy Tensor of zeros. Will never be materialized. zeros = array_ops.zeros(shape=self.operators[0].batch_shape_tensor()) for operator in self.operators[1:]: zeros = zeros + array_ops.zeros(shape=operator.batch_shape_tensor()) batch_shape = array_ops.shape(zeros) return array_ops.concat((batch_shape, matrix_shape), 0)
def _shape_tensor(self): # Avoid messy broadcasting if possible. if tensor_shape.TensorShape(self.shape).is_fully_defined(): return ops.convert_to_tensor( tensor_shape.TensorShape(self.shape).as_list(), dtype=dtypes.int32, name="shape") # Don't check the matrix dimensions. That would add unnecessary Asserts to # the graph. Things will fail at runtime naturally if shapes are # incompatible. matrix_shape = array_ops.stack([ self.operators[0].range_dimension_tensor(), self.operators[-1].domain_dimension_tensor() ]) # Dummy Tensor of zeros. Will never be materialized. zeros = array_ops.zeros(shape=self.operators[0].batch_shape_tensor()) for operator in self.operators[1:]: zeros += array_ops.zeros(shape=operator.batch_shape_tensor()) batch_shape = array_ops.shape(zeros) return array_ops.concat((batch_shape, matrix_shape), 0)
def _shape_tensor(self): matrix_shape = array_ops.stack((self._num_rows, self._num_rows), axis=0) batch_shape = array_ops.shape(self.multiplier) return array_ops.concat((batch_shape, matrix_shape), 0)