def reduce_sum(input_tensor, reduction_indices=None, keep_dims=False, name=None): """Computes the sum of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `reduction_indices`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions are retained with length 1. If `reduction_indices` has no entries, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python # 'x' is [[1, 1, 1] # [1, 1, 1]] tf.reduce_sum(x) ==> 6 tf.reduce_sum(x, 0) ==> [2, 2, 2] tf.reduce_sum(x, 1) ==> [3, 3] tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]] tf.reduce_sum(x, [0, 1]) ==> 6 ``` Args: input_tensor: The tensor to reduce. Should have numeric type. reduction_indices: The dimensions to reduce. If `None` (the default), reduces all dimensions. keep_dims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. """ return gen_math_ops._sum(input_tensor, _ReductionDims(input_tensor, reduction_indices), keep_dims, name=name)
def reduce_sum(input_tensor, reduction_indices=None, keep_dims=False, name=None): """Computes the sum of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `reduction_indices`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions are retained with length 1. If `reduction_indices` has no entries, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python # 'x' is [[1, 1, 1]] # [1, 1, 1]] tf.reduce_sum(x) ==> 6 tf.reduce_sum(x, 0) ==> [2, 2, 2] tf.reduce_sum(x, 1) ==> [3, 3] tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]] tf.reduce_sum(x, [0, 1]) ==> 6 ``` Args: input_tensor: The tensor to reduce. Should have numeric type. reduction_indices: The dimensions to reduce. If `None` (the defaut), reduces all dimensions. keep_dims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. """ return gen_math_ops._sum(input_tensor, _ReductionDims(input_tensor, reduction_indices), keep_dims, name=name)
def sum1(self, input_itensor): """Create a specialized op that sums over 1 dimensional vector. This avoids having to create Rank/Range ops that initialize indices in the default tf.reduce_sum.""" op_type_name = "sum1" tf_dtype = input_itensor.dtype current_device = get_current_device_string(self.g) current_device_sanitized = current_device.replace(":", "") key = (op_type_name, tf_dtype.name, current_device_sanitized) if key in self.op_cache: if self.PRINT_CACHE_HITS: print("Imperative cache hit for %s"%(str(key))) op = self.op_cache[key] else: if self.PRINT_CACHE_MISSES: print("Imperative cache miss for %s"%(str(key))) with self.g.as_default(): op_prefix = op_type_name + "." + tf_dtype.name holder, tensor = session_ops.get_session_tensor( input_itensor.tf_handle, input_itensor.dtype, name=op_prefix+".0") input_holders = {"input": holder} reduction_indices = constant_op.constant([0], dtype=dtypes.int32, name=op_prefix+".1") output = gen_math_ops._sum(input=tensor, reduction_indices=reduction_indices, keep_dims=False, name=op_prefix+".op") op_prefix = op_prefix+".out" output_handle = session_ops.get_session_handle(output, op_prefix+".handle") op = Op(self, input_holders, output_handle) self.cache_add(key, op) return op(input=input_itensor)
def fast_sum(tensor, name=None): return gen_math_ops._sum(input=tensor, reduction_indices=[], keep_dims=False, name=name)
def fast_sum(tensor, name=None): return gen_math_ops._sum(input=tensor, reduction_indices=[], keep_dims=False, name=name)