コード例 #1
0
def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
    if context.executing_eagerly() and not isinstance(
            input,
        (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
        input = ops.convert_to_tensor(input)
        np_out_type = out_type.as_numpy_dtype
        num_elements = np.prod(input._shape_tuple(), dtype=np_out_type)
        return ops.convert_to_tensor(num_elements, dtype=out_type)
    with ops.name_scope(name, "Size", [input]) as name:
        if isinstance(
                input,
            (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
            return gen_math_ops.prod(gen_math_ops.cast(input.dense_shape,
                                                       out_type),
                                     0,
                                     name=name)
        else:
            input_tensor = ops.convert_to_tensor(input)
            input_shape = input_tensor.get_shape()
            if optimize:
                if input_shape.is_fully_defined():
                    return constant(input_shape.num_elements(),
                                    out_type,
                                    name=name)
                if input_shape.dims and any(dim == 0
                                            for dim in input_shape.dims):
                    return constant(0, out_type, name=name)
            return gen_array_ops.size(input, name=name, out_type=out_type)
コード例 #2
0
ファイル: ekf.py プロジェクト: isabeaups/optimizers
    def _create_slots(self, var_list):
        # Create the beta1 and beta2 accumulators on the same device as the first
        # variable.
        if (self._Rt is None or
                    self._Rt.graph is not var_list[0].graph):
            with ops.colocate_with(var_list[0]):
                self._Rt = linalg_ops.eye(self.y_dim,
                                       name="R") / self._lr
                print("R shape; {}".format(self._Rt.get_shape().as_list()))

        # Create slots for the first and second moments.
        for v in var_list:
            self._get_or_make_slot(v, linalg_ops.eye(gen_array_ops.size(v.initialized_value()),
                                                  dtype=v.dtype.base_dtype) * self.Q, "Q", self._name)
            self._get_or_make_slot(v, linalg_ops.eye(gen_array_ops.size(v.initialized_value()),
                                                  dtype=v.dtype.base_dtype) * self.P0, "P", self._name)
コード例 #3
0
 def Grad(unused_g, variables=None):  # pylint: disable=redefined-outer-name
     del variables
     gradient_graph = ops.get_default_graph()
     shape = gen_array_ops.shape(x)
     assert shape.graph is forward_graph
     rank = gen_array_ops.rank(x)
     assert rank.graph is forward_graph
     size = gen_array_ops.size(x)
     assert size.graph is forward_graph
     zeros = array_ops.zeros(shape)
     assert zeros.graph is gradient_graph
     return zeros
コード例 #4
0
def rank_internal(input, name=None, optimize=True):
  # pylint: disable=redefined-builtin
  """Returns the rank of a tensor.
  Args:
    input: A `Tensor` or `SparseTensor`.
    name: A name for the operation (optional).
    optimize: if true, encode the rank as a constant when possible.
  Returns:
    A `Tensor` of type `int32`.
  """
  with ops.name_scope(name, "Rank", [input]) as name:
    if isinstance(
        input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
      return gen_array_ops.size(input.dense_shape, name=name)
    else:
      input = ops.convert_to_tensor(input)
      input_shape = input.get_shape()
      if optimize and input_shape.ndims is not None:
        return constant(input_shape.ndims, dtypes.int32, name=name)
      return gen_array_ops.rank(input, name=name)