Beispiel #1
0
  def _write(self, index, value):
    """Writes `value` into index named by `index`.

    Args:
      index: 0-D.  int32 scalar with the index to write to.
      value: N-D.  Tensor of type `dtype`.  The `Tensor` to write to `index`.

    Raises:
      errors_impl.InvalidArgumentError: `value` dtype does not match dtype.
      errors_impl.OutOfRangeError: `index` is out of bounds.
      ValueError: shape of `value` is not consistent with inferred shape.
    """

    if isinstance(index, ops.EagerTensor):
      index = index.numpy()

    if index < 0:
      raise errors_impl.OutOfRangeError(
          None, None,
          "Writing to negative indices (index %d) is not allowed." % index)

    size = len(self._tensor_array)
    if index >= size:
      if not self._dynamic_size:
        raise errors_impl.OutOfRangeError(
            None, None,
            "Tried to write to index %d but array is not resizeable and size "
            "is: %d" % (index, size))
      self._tensor_array.extend([None for _ in range(index - size + 1)])

    if not isinstance(value, ops.EagerTensor):
      value = ops.convert_to_tensor(value)

    if self._infer_shape:
      if self._element_shape is None:
        self._element_shape = value.shape
      elif not self._element_shape.is_compatible_with(value.shape):
        raise ValueError("Incompatible shape for value (%s), expected (%s)" %
                         (value.shape.as_list(), self._element_shape.as_list()))

    if self._dtype != value.dtype:
      raise errors_impl.InvalidArgumentError(
          None, None,
          "TensorArray dtype is %s but Op is trying to write dtype %s" %
          (self._dtype.name, value.dtype.name))
    self._tensor_array[index] = value
Beispiel #2
0
 def concat(self, name=None):
   """See TensorArray."""
   try:
     return array_ops.concat(
         [self._maybe_zero(ix) for ix in range(len(self._tensor_array))],
         0, name=name)
   except errors_impl.OpError:
     # Reproduce a subset of the error-handling for graph-mode TensorArrays.
     shapes = [t.shape for t in self._tensor_array]
     ndims = [s.ndims for s in shapes]
     if 0 in ndims:
       idx = ndims.index(0)
       raise errors_impl.InvalidArgumentError(
           None, None, "Concat saw a scalar shape at index %d but requires "
           "at least vectors." % idx)
     else:
       raise
Beispiel #3
0
 def sparse_read(self, indices, name=None):
   """Reads the value of this variable sparsely, using `gather`."""
   if indices.dtype != self._ktype:
     raise errors_impl.InvalidArgumentError(
         None, None,
         "type of indices is not match with EmbeddingVariable key type.")
   with ops.name_scope("Gather" if name is None else name) as name:
     resource_variable_ops.variable_accessed(self)
     default_value = self._initializer(array_ops.concat(
         [array_ops.shape(indices),
          self.shape.as_list()[1:]], axis=0),
                                       dtype=self.dtype)
     value = gen_ev_ops.ev_gather(self._handle,
                                  indices,
                                  default_value,
                                  name=name)
   return array_ops.identity(value)
def error_translator(e):
  """Translate the tensor_slice_reader.cc errors."""
  # TODO(b/143319754): Remove the RuntimeError casting logic once we resolve the
  # issue with throwing python exceptions from C++.
  error_message = str(e)
  if 'not found in checkpoint' in error_message or (
      'Failed to find any '
      'matching files for') in error_message:
    raise errors_impl.NotFoundError(None, None, error_message)
  elif 'Sliced checkpoints are not supported' in error_message or (
      'Data type '
      'not '
      'supported') in error_message:
    raise errors_impl.UnimplementedError(None, None, error_message)
  elif 'Failed to get matching files on' in error_message:
    raise errors_impl.InvalidArgumentError(None, None, error_message)
  elif 'Unable to open table file' in error_message:
    raise errors_impl.DataLossError(None, None, error_message)
  elif 'Failed to find the saved tensor slices' in error_message or (
      'not convertible to numpy dtype' in error_message):
    raise errors_impl.InternalError(None, None, error_message)
  else:
    raise errors_impl.OpError(None, None, error_message, errors_impl.UNKNOWN)
def graph_to_function_def(graph, operations, inputs, outputs, out_names=None):
    """Returns `graph` as a `FunctionDef` protocol buffer.

  This method creates a [`FunctionDef`](
  https://www.tensorflow.org/code/tensorflow/core/framework/function.proto)
  protocol buffer that contains all the ops in `operations`.  The
  operations become the body of the function.

  The arguments `inputs` and `outputs` will be listed as the inputs
  and outputs tensors of the function.  They must be lists of
  tensors present in the graph.  The lists can optionally be empty.

  Args:
    graph: Graph.
    operations: the operations to put in the function. Must be a subset of
     the operations in the graph.
    inputs: List of tensors. Inputs to the function.
    outputs: List of tensors. Outputs of the function.
    out_names: Optional list of string names for the outputs.

  Returns:
    A FunctionDef protocol buffer.

  Raises:
    ValueError: if out_names is specified and the wrong length.
  """
    func = function_pb2.FunctionDef()
    func.signature.name = "_"
    used_names = set()
    func.signature.input_arg.extend(
        [_tensor_to_argdef(i, used_names=used_names) for i in inputs])
    # Initializes the input map with all placeholder input tensors.
    initial_dict = {}
    for o, m in zip(inputs, func.signature.input_arg):
        initial_dict[o.name] = m.name
    if out_names is None:
        used_names = set()
        func.signature.output_arg.extend(
            [_tensor_to_argdef(o, used_names=used_names) for o in outputs])
    elif len(outputs) != len(out_names):
        raise errors_impl.InvalidArgumentError(
            None, None,
            "output names must be either empty or equal in size to outputs. "
            "output names size = %d outputs size = %d" %
            (len(out_names), len(outputs)))
    elif len(out_names) != len(set(out_names)):
        raise ValueError("Must not have duplicates in out_names: %s" %
                         ", ".join(out_names))
    else:
        func.signature.output_arg.extend(
            [_tensor_to_argdef(o, name=n) for o, n in zip(outputs, out_names)])
    func_arg_placeholders = set(i.name for i in inputs)
    input_dict = _create_input_dict(graph,
                                    func_arg_placeholders,
                                    initial_value=initial_dict)

    for op in operations:
        if _is_in_placeholders(op, func_arg_placeholders):
            continue
        _add_op_node(op, func, input_dict)

    if out_names is None:
        for index, o in enumerate(outputs):
            k = func.signature.output_arg[index].name
            func.ret[k] = input_dict[o.name]
    else:
        for o, n in zip(outputs, out_names):
            func.ret[n] = input_dict[o.name]

    return func