Exemple #1
0
def _convert_to_graph_constant(value, dtype=None, name=None, as_ref=False):
  """Captures a tfe Tensor while building a graph mode function.

  Creates a placeholder to pass the tensor as an argument.

  Arguments:
    value: A tfe.Tensor object
    dtype: The datatype of the value produced by the node in the graph.
    name:  Name of the node in the graph.
    as_ref: Ignored (required by register_tensor_conversion_function).

  Returns:
    A placeholder which will, at runtime, have the value of this tensor.

  Raises:
    ValueError: if called outside a defun context.
  """
  if context.in_eager_mode():
    return value
  _ = as_ref
  tensor_map = _scoped_captures.tensors
  if tensor_map is None:
    raise ValueError(
        "Trying to use tfe.Tensor objects in a graph outside graph mode. "
        "To build a graph use tfe.defun or tfe.make_template.")
  captured_value = tensor_map.get(tape.tensor_id(value), None)
  if captured_value is None:
    captured_value = graph_placeholder(
        dtype=dtype or value.dtype, shape=value.shape, name=name)
    if captured_value.dtype == dtypes.resource:
      captured_value._handle_data = value._handle_data  # pylint: disable=protected-access
    tensor_map[tape.tensor_id(value)] = (value, captured_value)
  else:
    captured_value = captured_value[1]
  return captured_value
Exemple #2
0
 def _copy(self, ctx, device_name):
     """Copies tensor to dest device."""
     # pylint: disable=protected-access
     # Creates a new tensor on the dest device.
     with errors.raise_exception_on_not_ok_status() as status:
         h = pywrap_tensorflow.TFE_TensorHandleCopyToDevice(
             self._handle, ctx._handle, device_name, status)
     new_tensor = _tensor_from_handle(h)
     if core.active_trace() is not None:
         core.active_trace().record_tensor("COPY",
                                           tape.tensor_id(new_tensor),
                                           new_tensor.device,
                                           new_tensor.shape.num_elements())
     return new_tensor
Exemple #3
0
def execute(op_name, num_outputs, inputs, attrs=None, name=None):
  """Execute a TensorFlow operation.

  Args:
    op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to
      execute.
    num_outputs: The number of outputs of the operation to fetch.
                 (Explicitly provided instead of being inferred for performance
                 reasons).
    inputs: A list of inputs to the operation. Each entry should be a Tensor, or
      a value which can be passed to the Tensor constructor to create one.
    attrs: A tuple with alternating string attr names and attr values for this
      operation.
    name: Customized name for the operation.

  Returns:
    None if there are no outputs, a single Tensor object if there is one output
    and a list of Tensor objects if there are multiple outputs.

  Raises:
    An exception on error.
  """
  ctx = context.get_default_context()
  # TODO(apassos) move this to convert_to_tensor
  inputs = [ag_core.getval(x) for x in inputs]
  # pylint: disable=protected-access
  input_handles = [c._handle for c in inputs]
  device_name = ctx.device_name
  try:
    outh = pywrap_tensorflow.TFE_Py_Execute(ctx._handle, device_name,
                                            op_name, input_handles, attrs,
                                            num_outputs)
    # pylint: enable=protected-access
  except core._NotOkStatusException as e:  # pylint: disable=protected-access
    raise core._status_to_exception(e.code, e.message)  # pylint: disable=protected-access
  # pylint: enable=protected-access

  tensors = [tensor._tensor_from_handle(x) for x in outh]  # pylint: disable=protected-access
  if core.active_trace() is not None:
    trace_name = name if name else op_name
    for t in tensors:
      # pylint: disable=protected-access
      core.active_trace().record_tensor(trace_name,
                                        tape.tensor_id(t),
                                        t._device_name(),
                                        t.shape.num_elements())
      # pylint: enable=protected-access
  return tensors
Exemple #4
0
    def _backprop_call(self, args):
        """Calls the wrapped function and records the result on a tape."""
        all_args = args + self._extra_inputs
        signature = self._forward_fdef.definition.signature
        if context.in_graph_mode():
            g = ops.get_default_graph()
            g._add_function(self._forward_fdef)  # pylint: disable=protected-access
            unwrapped_args = [ag_core.getval(x) for x in all_args]
            op = g.create_op(
                signature.name,
                [ops.convert_to_tensor(x) for x in unwrapped_args],
                [dtypes.DType(x.type) for x in signature.output_arg],
                op_def=signature,
                name="FunctionCall",
                compute_shapes=False)
            outputs = op.outputs
            outputs = [outputs] if isinstance(outputs,
                                              (tensor.Tensor, ops.Tensor,
                                               type(None))) else list(outputs)
            for i, s in enumerate(self._output_shapes):
                outputs[i].set_shape(s)
        else:
            outputs = execute.execute(signature.name,
                                      num_outputs=len(signature.output_arg),
                                      inputs=all_args)
        real_outputs = outputs[:len(self._returns)]
        side_outputs = outputs[len(self._returns):]
        watched_extra_inputs = []
        for t in self._extra_inputs:
            tid = tape.tensor_id(t)
            for t in tape._tape_stack.stack:  # pylint: disable=protected-access
                w = t.value.tensors.get(tid, None)
                if w is not None:
                    watched_extra_inputs.append(w)
                    break
            else:  # Note: for-else here done on purpose
                watched_extra_inputs.append(t)
        real_outputs = tape.record_operation(real_outputs,
                                             (args + watched_extra_inputs),
                                             side_outputs,
                                             self._backward_function)

        return self._build_call_outputs(self._returns, real_outputs)
Exemple #5
0
def _watch_value_from_tape(tensor):
  for t in tape._tape_stack.stack:  # pylint: disable=protected-access
    w = t.value.tensors.get(tape.tensor_id(tensor), None)
    if w is not None:
      return w
  return tensor
Exemple #6
0
def _watch_value_from_tape(tensor):
    for t in tape._tape_stack.stack:  # pylint: disable=protected-access
        w = t.value.tensors.get(tape.tensor_id(tensor), None)
        if w is not None:
            return w
    return tensor
Exemple #7
0
    def __init__(self, value, dtype=None):
        """Creates a Tensor object from a Python object or numpy array.

    May share storage with the numpy array, in which case changes to the numpy
    object will reflect
    in the Tensor.

    Arguments:
      value: A numpy.array or a Python object to create a Tensor for.
      dtype: TensorFlow dtype for the returned Tensor. If None, one will be
        automatically selected.
    """
        # TODO(ashankar): Evaluate if we can and perhaps share code with
        # tf.constant defined in
        # https://www.tensorflow.org/code/tensorflow/python/framework/constant_op.py
        self._id = tf_ops.uid()
        if not isinstance(value, np.ndarray):
            npt = None if dtype is None else dtype.as_numpy_dtype
            value = np.array(value, dtype=npt)
            if dtype is None:
                value = _maybe_modify_numpy_dtype_determination(value)
        elif dtype is not None:
            npt = dtype.as_numpy_dtype
            if npt != value.dtype:
                value = value.astype(npt)
        try:
            value = np.asarray(value, order="C")
            self._handle = pywrap_tensorflow.TFE_Py_NumpyToTensorHandle(value)
        except core._NotOkStatusException as e:  # pylint: disable=protected-access
            raise core._status_to_exception(e.code, e.message)  # pylint: disable=protected-access

        # Almost all TensorFlow kernels for GPU devices keep int32 tensors in host
        # memory.  This change approximates the same behavior for eager execution -
        # keeping int32 tensors in host memory.
        #
        # We do so to preclude the need for callers into such kernels from having to
        # explicitly place the int32 tensors in host memory. For example, prior to
        # this change one needed:
        #
        # with tfe.device('/gpu:0'):
        #   ...  # code here
        #   with tfe.device('/cpu:0'):
        #     shape = tfe.Tensor(...)
        #   y = tfe.ops.random_uniform(.., shape)
        #
        # Without the CPU device block tfe.ops.random_uniform would fail since the
        # kernel expects the shape in host memory.
        #
        # After this change, we simplify the code:
        #
        # with tfe.device('/gpu:0'):
        #   y = tfe.ops.random_uniform(, tfe.Tensor(...))
        #
        # The approximation is not exact since if there are GPU kernels which do not
        # require host memory for int32 tensors, there will be a discrepancy between
        # eager execution and TensorFlow graphs. However, as of July 2017, there
        # were no known GPU kernels that kept int32 tensors in device memory.
        if _in_gpu_device() and value.dtype != np.int32:
            ctx = context.get_default_context()
            # pylint: disable=protected-access
            device_name = ctx.device_name
            with errors.raise_exception_on_not_ok_status() as status:
                self._handle = pywrap_tensorflow.TFE_TensorHandleCopyToDevice(
                    self._handle, ctx._handle, device_name, status)
            # pylint: enable=protected-access

        self._dtype = dtypes.as_dtype(
            pywrap_tensorflow.TFE_TensorHandleDataType(self._handle))

        # This mirrors tensorflow.core.framework.ops.Tensor._handle_data Which will
        # be None for tensors of type other than DT_REOSURCE. For DT_RESOURCE
        # tensors, this will contain a serialized HandleData proto with shape
        # inference metadata about shapes and dtypes of resources accessible from
        # this handle.
        self._handle_data = None
        if core.active_trace() is not None:
            core.active_trace().record_tensor("MANUAL", tape.tensor_id(self),
                                              self.device,
                                              self.shape.num_elements())
Exemple #8
0
 def __del__(self):
     if (pywrap_tensorflow is not None
             and pywrap_tensorflow.TFE_DeleteTensorHandle is not None):
         pywrap_tensorflow.TFE_DeleteTensorHandle(self._handle)
     if core.active_trace() is not None:
         core.active_trace().delete_tensor(tape.tensor_id(self))