Esempio n. 1
0
  def decorator(self, **kwargs):
    """Finds existing Tensors, runs the test, checks for new Tensors."""

    def _is_tensor(obj):
      try:
        return (isinstance(obj, ops.Tensor) or
                isinstance(obj, variables.Variable))
      except ReferenceError:
        # If the object no longer exists, we don't care about it.
        return False

    tensors_before = set(id(obj) for obj in gc.get_objects() if _is_tensor(obj))
    outside_container_prefix = ops.get_default_graph()._container_prefix
    with IsolateTest():
      # Run the test in a new graph so that collections get cleared when it's
      # done, but inherit the container prefix so that we can print the values
      # of variables which get leaked when executing eagerly.
      ops.get_default_graph()._container_prefix = outside_container_prefix
      f(self, **kwargs)
    # Make an effort to clear caches, which would otherwise look like leaked
    # Tensors.
    backprop._last_zero = [None]
    backprop._shape_dtype = [None, None]
    context.get_default_context().scalar_cache().clear()
    gc.collect()
    tensors_after = [
        obj for obj in gc.get_objects()
        if _is_tensor(obj) and id(obj) not in tensors_before
    ]
    if tensors_after:
      raise AssertionError(("%d Tensors not deallocated after test: %s" % (
          len(tensors_after),
          str(tensors_after),
      )))
Esempio n. 2
0
  def decorator(self, **kwargs):
    """Finds existing Tensors, runs the test, checks for new Tensors."""

    def _is_tensor(obj):
      try:
        return (isinstance(obj, ops.Tensor) or
                isinstance(obj, variables.Variable))
      except ReferenceError:
        # If the object no longer exists, we don't care about it.
        return False

    tensors_before = set(id(obj) for obj in gc.get_objects() if _is_tensor(obj))
    outside_container_prefix = ops.get_default_graph()._container_prefix
    with IsolateTest():
      # Run the test in a new graph so that collections get cleared when it's
      # done, but inherit the container prefix so that we can print the values
      # of variables which get leaked when executing eagerly.
      ops.get_default_graph()._container_prefix = outside_container_prefix
      f(self, **kwargs)
    # Make an effort to clear caches, which would otherwise look like leaked
    # Tensors.
    backprop._last_zero = [None]
    backprop._shape_dtype = [None, None]
    context.get_default_context().scalar_cache().clear()
    gc.collect()
    tensors_after = [
        obj for obj in gc.get_objects()
        if _is_tensor(obj) and id(obj) not in tensors_before
    ]
    if tensors_after:
      raise AssertionError(("%d Tensors not deallocated after test: %s" % (
          len(tensors_after),
          str(tensors_after),
      )))
 def _compare(self, dims, val, np_ans, use_gpu):
   ctx = context.get_default_context()
   device = "GPU:0" if (use_gpu and ctx.num_gpus()) else "CPU:0"
   with ops.device(device):
     tf_ans = array_ops.fill(dims, val, name="fill")
     out = tf_ans.numpy()
   self.assertAllClose(np_ans, out)
Esempio n. 4
0
    def testEagerIdentity(self):
        with context.eager_mode():
            ctx = context.get_default_context()
            if not ctx.num_gpus():
                self.skipTest("No GPUs found")

            def _test(x, y, device):
                self.assertIsNot(x, y)
                self.assertAllEqual(x.numpy(), y.numpy())
                self.assertTrue(device in y.device.lower())

            with ops.device("gpu:0"):
                a = constant_op.constant([[2], [3]], dtype=dtypes.float32)
            with ops.device("gpu:0"):
                b = array_ops.identity(a)
                _test(a, b, "gpu")
            with ops.device("cpu:0"):
                c = array_ops.identity(b)
                _test(b, c, "cpu")
            with ops.device("cpu:0"):
                d = array_ops.identity(c)
                _test(c, d, "cpu")
            with ops.device("gpu:0"):
                e = array_ops.identity(d)
                _test(d, e, "gpu")
Esempio n. 5
0
 def _compare(self, dims, val, np_ans, use_gpu):
     ctx = context.get_default_context()
     device = "GPU:0" if (use_gpu and ctx.num_gpus()) else "CPU:0"
     with ops.device(device):
         tf_ans = array_ops.fill(dims, val, name="fill")
         out = tf_ans.numpy()
     self.assertAllClose(np_ans, out)
Esempio n. 6
0
def execute(op_name, num_outputs, inputs, attrs=None, name=None):
  """Execute a TensorFlow operation.

  Args:
    op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to
      execute.
    num_outputs: The number of outputs of the operation to fetch.
                 (Explicitly provided instead of being inferred for performance
                 reasons).
    inputs: A list of inputs to the operation. Each entry should be a Tensor, or
      a value which can be passed to the Tensor constructor to create one.
    attrs: A tuple with alternating string attr names and attr values for this
      operation.
    name: Customized name for the operation.

  Returns:
    None if there are no outputs, a single Tensor object if there is one output
    and a list of Tensor objects if there are multiple outputs.

  Raises:
    An exception on error.
  """
  ctx = context.get_default_context()
  # TODO(apassos) move this to convert_to_tensor
  inputs = [ag_core.getval(x) for x in inputs]
  # pylint: disable=protected-access
  input_handles = [c._handle for c in inputs]
  device_name = ctx.device_name
  try:
    outh = pywrap_tensorflow.TFE_Py_Execute(ctx._handle, device_name,
                                            str(op_name), input_handles, attrs,
                                            num_outputs)
    # pylint: enable=protected-access
  except core._NotOkStatusException as e:  # pylint: disable=protected-access
    if name is not None:
      message = e.message + " name: " + name
    else:
      message = e.message
    raise core._status_to_exception(e.code, message)  # pylint: disable=protected-access
  # pylint: enable=protected-access

  tensors = [tensor._tensor_from_handle(x) for x in outh]  # pylint: disable=protected-access
  # TODO(alive, cais): Use the execution callback mechanism.
  if core.active_trace() is not None:
    trace_name = name if name else op_name
    for t in tensors:
      # pylint: disable=protected-access
      core.active_trace().record_tensor(trace_name,
                                        ops.tensor_id(t),
                                        t._device_name(),
                                        t.shape.num_elements())
      # pylint: enable=protected-access

  # TODO(cais): Optimize this, perhaps by replacing this execute function with
  # a different one when there are execution callback(s).
  for callback in ctx.post_execution_callbacks:
    callback(op_name, name, attrs, inputs, tensors)

  return tensors
Esempio n. 7
0
def register_function_def(fdef):
  fdef_string = fdef.SerializeToString()
  with errors.raise_exception_on_not_ok_status() as status:
    pywrap_tensorflow.TFE_ContextAddFunctionDef(
        context.get_default_context()._handle,  # pylint: disable=protected-access
        fdef_string,
        len(fdef_string),
        status)
Esempio n. 8
0
def register_function_def(fdef):
    fdef_string = fdef.SerializeToString()
    with errors.raise_exception_on_not_ok_status() as status:
        pywrap_tensorflow.TFE_ContextAddFunctionDef(
            context.get_default_context()._handle,  # pylint: disable=protected-access
            fdef_string,
            len(fdef_string),
            status)
Esempio n. 9
0
def enable_tracing():
  """Enables tracing of execution and memory usage.

  WARNING: tracing is not thread-safe.
  """
  global _active_trace
  _active_trace = memory_trace.MemoryTrace(
      len(context.get_default_context().devices()))
Esempio n. 10
0
def add_execution_callback(callback):
  """Add an execution callback to the default eager context.

  An execution callback is invoked immediately after an eager operation or
  function has finished execution, providing access to the op's type, name
  input and output tensors. Multiple execution callbacks can be added, in
  which case the callbacks will be invoked in the order in which they are
  added. To clear all execution callbacks that have been added, use
  `clear_execution_callbacks()`.

  Example:
  ```python
  def print_even_callback(op_type, op_name, attrs, inputs, outputs):
    # A callback that prints only the even output values.
    if outputs[0].numpy() % 2 == 0:
      print("Even output from %s: %s" % (op_name or op_type,  outputs))
  tfe.add_execution_callback(print_even_callback)

  x = tf.pow(2.0, 3.0) - 3.0
  y = tf.multiply(x, tf.add(1.0, 5.0))
  # When the line above is run, you will see all intermediate outputs that are
  # even numbers printed to the console.

  tfe.clear_execution_callbacks()
  ```

  Args:
    callback: a callable of the signature
      `f(op_type, op_name, attrs, inputs, outputs)`.
      `op_type` is the type of the operation that was just executed (e.g.,
        `MatMul`).
      `op_name` is the name of the operation that was just executed. This
        name is set by the client who created the operation and can be `None` if
        it is unset.
      `attrs` contains the attributes of the operation as a `tuple` of
        alternating attribute name and attribute value.
      `inputs` is the `list` of input `Tensor`(s) to the op.
      `outputs` is the `list` of output `Tensor`(s) from the op.
       Return value(s) from the callback are ignored.
  """
  execute.execute = execute.execute_with_callbacks
  context.get_default_context().add_post_execution_callback(callback)
Esempio n. 11
0
def add_execution_callback(callback):
    """Add an execution callback to the default eager context.

  An execution callback is invoked immediately after an eager operation or
  function has finished execution, providing access to the op's type, name
  input and output tensors. Multiple execution callbacks can be added, in
  which case the callbacks will be invoked in the order in which they are
  added. To clear all execution callbacks that have been added, use
  `clear_execution_callbacks()`.

  Example:
  ```python
  def print_even_callback(op_type, op_name, attrs, inputs, outputs):
    # A callback that prints only the even output values.
    if outputs[0].numpy() % 2 == 0:
      print("Even output from %s: %s" % (op_name or op_type,  outputs))
  tfe.add_execution_callback(print_even_callback)

  x = tf.pow(2.0, 3.0) - 3.0
  y = tf.multiply(x, tf.add(1.0, 5.0))
  # When the line above is run, you will see all intermediate outputs that are
  # even numbers printed to the console.

  tfe.clear_execution_callbacks()
  ```

  Args:
    callback: a callable of the signature
      `f(op_type, op_name, attrs, inputs, outputs)`.
      `op_type` is the type of the operation that was just executed (e.g.,
        `MatMul`).
      `op_name` is the name of the operation that was just executed. This
        name is set by the client who created the operation and can be `None` if
        it is unset.
      `attrs` contains the attributes of the operation as a `tuple` of
        alternating attribute name and attribute value.
      `inputs` is the `list` of input `Tensor`(s) to the op.
      `outputs` is the `list` of output `Tensor`(s) from the op.
       Return value(s) from the callback are ignored.
  """
    execute.execute = execute.execute_with_callbacks
    context.get_default_context().add_post_execution_callback(callback)
Esempio n. 12
0
 def testDefaultContext(self):
     orig = context.get_default_context()
     self.assertIs(context.get_default_context(), orig)
     c0 = context.Context()
     self.assertIs(context.get_default_context(), orig)
     context_manager_0 = c0.as_default()
     self.assertIs(context.get_default_context(), orig)
     with context_manager_0 as c0:
         self.assertIs(context.get_default_context(), c0)
         with context.Context().as_default() as c1:
             self.assertIs(context.get_default_context(), c1)
         self.assertIs(context.get_default_context(), c0)
     self.assertIs(context.get_default_context(), orig)
Esempio n. 13
0
 def testDefaultContext(self):
   orig = context.get_default_context()
   self.assertIs(context.get_default_context(), orig)
   c0 = context.Context()
   self.assertIs(context.get_default_context(), orig)
   context_manager_0 = c0.as_default()
   self.assertIs(context.get_default_context(), orig)
   with context_manager_0 as c0:
     self.assertIs(context.get_default_context(), c0)
     with context.Context().as_default() as c1:
       self.assertIs(context.get_default_context(), c1)
     self.assertIs(context.get_default_context(), c0)
   self.assertIs(context.get_default_context(), orig)
Esempio n. 14
0
 def run_fn(ctx1):
     ctx2 = context.get_default_context()
     # Default context created in different threads are different.
     self.assertIsNot(ctx1, ctx2)
     # Check that default values of the context created in a different thread
     # are set correctly.
     self.assertFalse(ctx2.in_graph_mode())
     self.assertTrue(ctx2.in_eager_mode())
     self.assertEqual('', ctx2.scope_name)
     self.assertEqual(-1, ctx2._device_index)  # pylint: disable=protected-access
     self.assertFalse(ctx2.recording_summaries)
     self.assertIsNone(ctx2.summary_writer_resource)
Esempio n. 15
0
 def run_fn(ctx1):
   ctx2 = context.get_default_context()
   # Default context created in different threads are different.
   self.assertIsNot(ctx1, ctx2)
   # Check that default values of the context created in a different thread
   # are set correctly.
   self.assertFalse(ctx2.in_graph_mode())
   self.assertTrue(ctx2.in_eager_mode())
   self.assertEqual('', ctx2.scope_name)
   self.assertEqual(-1, ctx2._device_index)  # pylint: disable=protected-access
   self.assertFalse(ctx2.recording_summaries)
   self.assertIsNone(ctx2.summary_writer_resource)
Esempio n. 16
0
    def testVariableEager(self):
        with context.eager_mode():
            init = array_ops.ones(shape=[10, 20, 35], dtype=dtypes.int32)
            constraint = lambda x: x
            with ops.name_scope("foo"):
                v = resource_variable_ops.ResourceVariable(
                    name="var7",
                    initial_value=init,
                    caching_device="cpu:0",
                    constraint=constraint)
            # Test properties
            self.assertEqual(dtypes.int32, v.dtype)
            self.assertEqual("foo/var7:0", v.name)
            self.assertAllEqual([10, 20, 35], v.shape.as_list())
            self.assertEqual(context.get_default_context().device_name,
                             v.device)
            self.assertTrue(isinstance(v.handle, ops.EagerTensor))
            self.assertEqual(constraint, v.constraint)
            self.assertAllEqual(init.numpy(), v.read_value().numpy())
            self.assertAllEqual(init.numpy(), v.value().numpy())

            # Callable init.
            callable_init = lambda: init * 2
            v2 = resource_variable_ops.ResourceVariable(
                initial_value=callable_init, name="var7")
            self.assertEqual("var7:0", v2.name)
            self.assertAllEqual(2 * init.numpy(), v2.read_value().numpy())

            # Test assign_add.
            new_v2_val = v2.assign_add(v.read_value())
            self.assertAllEqual(v.read_value().numpy() * 3, new_v2_val.numpy())

            # Test assign_sub.
            new_v2_val = v2.assign_sub(v.read_value())
            self.assertAllEqual(v.read_value().numpy() * 2, new_v2_val.numpy())

            # Test assign.
            v2.assign(v.read_value())
            self.assertAllEqual(v.read_value().numpy(),
                                v2.read_value().numpy())

            # Test load
            v2.load(2 * v.read_value())
            self.assertAllEqual(2 * v.read_value().numpy(),
                                v2.read_value().numpy())

            # Test convert_to_tensor
            t = ops.convert_to_tensor(v)
            self.assertAllEqual(t.numpy(), v.read_value().numpy())

            # Test operations
            self.assertAllEqual((v * 2).numpy(), (v + v).numpy())
Esempio n. 17
0
    def as_gpu_tensor(self, gpu_index=0):
        """A copy of this Tensor with contents backed by memory on the GPU.

    Arguments:
      gpu_index: Identifies which GPU to place the contents on the returned
        Tensor in.

    Returns:
      A GPU-memory backed Tensor object initialized with the same contents
      as this Tensor.
    """
        return self._copy(context.get_default_context(),
                          "GPU:" + str(gpu_index))
  def testVariableEager(self):
    with context.eager_mode():
      init = array_ops.ones(shape=[10, 20, 35], dtype=dtypes.int32)
      constraint = lambda x: x
      with ops.name_scope("foo"):
        v = resource_variable_ops.ResourceVariable(
            name="var7",
            initial_value=init,
            caching_device="cpu:0",
            constraint=constraint)
      # Test properties
      self.assertEqual(dtypes.int32, v.dtype)
      self.assertEqual("foo/var7:0", v.name)
      self.assertAllEqual([10, 20, 35], v.shape.as_list())
      self.assertEqual(context.get_default_context().device_name, v.device)
      self.assertTrue(isinstance(v.handle, ops.EagerTensor))
      self.assertEqual(constraint, v.constraint)
      self.assertAllEqual(init.numpy(), v.read_value().numpy())
      self.assertAllEqual(init.numpy(), v.value().numpy())

      # Callable init.
      callable_init = lambda: init * 2
      v2 = resource_variable_ops.ResourceVariable(
          initial_value=callable_init, name="var7")
      self.assertEqual("var7:0", v2.name)
      self.assertAllEqual(2 * init.numpy(), v2.read_value().numpy())

      # Test assign_add.
      new_v2_val = v2.assign_add(v.read_value())
      self.assertAllEqual(v.read_value().numpy() * 3, new_v2_val.numpy())

      # Test assign_sub.
      new_v2_val = v2.assign_sub(v.read_value())
      self.assertAllEqual(v.read_value().numpy() * 2, new_v2_val.numpy())

      # Test assign.
      v2.assign(v.read_value())
      self.assertAllEqual(v.read_value().numpy(), v2.read_value().numpy())

      # Test load
      v2.load(2 * v.read_value())
      self.assertAllEqual(2 * v.read_value().numpy(), v2.read_value().numpy())

      # Test convert_to_tensor
      t = ops.convert_to_tensor(v)
      self.assertAllEqual(t.numpy(), v.read_value().numpy())

      # Test operations
      self.assertAllEqual((v * 2).numpy(), (v + v).numpy())
Esempio n. 19
0
def add_execution_callback(callback):
    """Add an execution callback to the default eager context.

  An execution callback is invoked immediately after an eager operation or
  function has finished execution, providing access to the op's type, name
  input and output tensors. Multiple execution callbacks can be added, in
  which case the callbacks will be invoked in the order in which they are
  added.

  Args:
    callback: a callable of the signature
      `f(op_type, op_name, attrs, inputs, outputs)`.
      `op_type` is the type of the operation that was just executed (e.g.,
        `MatMul`).
      `op_name` is the name of the operation that has was just executed. This
        name is set by the client who created the operation and can be `None` if
        it is unset.
      `attrs` contains the attributes of the operation as a `tuple` of
        alternating attribute name and attribute value.
      `inputs` is the `list` of input `tfe.Tensor`(s) to the op.
      `outputs` is the `list` of output `tfe.Tensor`(s) from the op.
       Return value(s) from the callback are ignored.
  """
    context.get_default_context().add_post_execution_callback(callback)
def add_execution_callback(callback):
  """Add an execution callback to the default eager context.

  An execution callback is invoked immediately after an eager operation or
  function has finished execution, providing access to the op's type, name
  input and output tensors. Multiple execution callbacks can be added, in
  which case the callbacks will be invoked in the order in which they are
  added.

  Args:
    callback: a callable of the signature
      `f(op_type, op_name, attrs, inputs, outputs)`.
      `op_type` is the type of the operation that was just executed (e.g.,
        `MatMul`).
      `op_name` is the name of the operation that has was just executed. This
        name is set by the client who created the operation and can be `None` if
        it is unset.
      `attrs` contains the attributes of the operation as a `tuple` of
        alternating attribute name and attribute value.
      `inputs` is the `list` of input `Tensor`(s) to the op.
      `outputs` is the `list` of output `Tensor`(s) from the op.
       Return value(s) from the callback are ignored.
  """
  context.get_default_context().add_post_execution_callback(callback)
Esempio n. 21
0
  def testEagerIdentity(self):
    with context.eager_mode():
      ctx = context.get_default_context()
      if not ctx.num_gpus():
        self.skipTest("No GPUs found")

      def _test(x, y, device):
        self.assertAllEqual(x.numpy(), y.numpy())
        self.assertTrue(device in y.device.lower())

      with ops.device("gpu:0"):
        a = constant_op.constant([[2], [3]], dtype=dtypes.float32)
      with ops.device("gpu:0"):
        b = array_ops.identity(a)
        _test(a, b, "gpu")
      with ops.device("cpu:0"):
        c = array_ops.identity(b)
        _test(b, c, "cpu")
      with ops.device("cpu:0"):
        d = array_ops.identity(c)
        _test(c, d, "cpu")
      with ops.device("gpu:0"):
        e = array_ops.identity(d)
        _test(d, e, "gpu")
def clear_execution_callbacks():
  """Clear all execution callbacks from the default eager context."""
  context.get_default_context().clear_post_execution_callbacks()
Esempio n. 23
0
  def _init_from_args(self,
                      initial_value=None,
                      trainable=True,
                      collections=None,
                      validate_shape=True,
                      caching_device=None,
                      name=None,
                      dtype=None,
                      constraint=None):
    """Creates a variable.

    Args:
      initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
        which is the initial value for the Variable. The initial value must have
        a shape specified unless `validate_shape` is set to False. Can also be a
        callable with no argument that returns the initial value when called.
        (Note that initializer functions from init_ops.py must first be bound
         to a shape before being used here.)
      trainable: If `True`, the default, also adds the variable to the graph
        collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
        the default list of variables to use by the `Optimizer` classes.
      collections: List of graph collections keys. The new variable is added to
        these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
      validate_shape: Ignored. Provided for compatibility with tf.Variable.
      caching_device: Optional device string or function describing where the
        Variable should be cached for reading.  Defaults to the Variable's
        device.  If not `None`, caches on another device.  Typical use is to
        cache on the device where the Ops using the Variable reside, to
        deduplicate copying through `Switch` and other conditional statements.
      name: Optional name for the variable. Defaults to `'Variable'` and gets
        uniquified automatically.
      dtype: If set, initial_value will be converted to the given type.
        If None, either the datatype will be kept (if initial_value is
       a Tensor) or float32 will be used (if it is a Python object convertible
       to a Tensor).
      constraint: An optional projection function to be applied to the variable
        after being updated by an `Optimizer` (e.g. used to implement norm
        constraints or value constraints for layer weights). The function must
        take as input the unprojected Tensor representing the value of the
        variable and return the Tensor for the projected value
        (which must have the same shape). Constraints are not safe to
        use when doing asynchronous distributed training.

    Raises:
      ValueError: If the initial value is not specified, or does not have a
        shape and `validate_shape` is `True`.

    @compatibility(eager)
    When Eager Execution is enabled, variables are never added to collections.
    It is not implicitly added to the `GLOBAL_VARIABLES` or
    `TRAINABLE_VARIABLES` collections, and the `collections` argument is
    ignored.
    @end_compatibility
    """
    if initial_value is None:
      raise ValueError("initial_value must be specified.")
    init_from_fn = callable(initial_value)

    if collections is None:
      collections = [ops.GraphKeys.GLOBAL_VARIABLES]
    if not isinstance(collections, (list, tuple, set)):
      raise ValueError(
          "collections argument to Variable constructor must be a list, tuple, "
          "or set. Got %s of type %s" % (collections, type(collections)))
    if constraint is not None and not callable(constraint):
      raise ValueError("The `constraint` argument must be a callable.")

    if isinstance(initial_value, checkpointable.CheckpointInitialValue):
      self._maybe_initialize_checkpointable()
      self._update_uid = initial_value.checkpoint_position.restore_uid
      initial_value = initial_value.wrapped_value

    self._trainable = trainable
    if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
      collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
    self._save_slice_info = None
    # Store the graph key so optimizers know how to only retrieve variables from
    # this graph.
    self._graph_key = ops.get_default_graph()._graph_key  # pylint: disable=protected-access
    with ops.init_scope():
      self._in_graph_mode = context.in_graph_mode()
      with ops.name_scope(name, "Variable", []
                          if init_from_fn else [initial_value]) as name:
        # pylint: disable=protected-access
        handle_name = ops._name_from_scope_name(name)
        if init_from_fn:
          # Use attr_scope and device(None) to simulate the behavior of
          # colocate_with when the variable we want to colocate with doesn't
          # yet exist.
          if self._in_graph_mode:
            attr = attr_value_pb2.AttrValue(
                list=attr_value_pb2.AttrValue.ListValue(
                    s=[compat.as_bytes("loc:@%s" % handle_name)]))
            with ops.get_default_graph()._attr_scope({"_class": attr}):
              with ops.name_scope("Initializer"), ops.device(None):
                initial_value = ops.convert_to_tensor(
                    initial_value(), name="initial_value", dtype=dtype)
              self._handle = _eager_safe_variable_handle(
                  shape=initial_value.get_shape(),
                  dtype=initial_value.dtype.base_dtype,
                  shared_name=handle_name,
                  name=name,
                  graph_mode=self._in_graph_mode)
              self._handle_device = (
                  self._handle.device if self._in_graph_mode else
                  context.get_default_context().device_name)
              self._shape = initial_value.get_shape()
          else:
            initial_value = initial_value()
            with ops.name_scope("Initializer"):
              initial_value = ops.convert_to_tensor(
                  initial_value, name="initial_value", dtype=dtype)
            self._handle = _eager_safe_variable_handle(
                shape=initial_value.get_shape(),
                dtype=initial_value.dtype.base_dtype,
                shared_name=handle_name,
                name=name,
                graph_mode=False)
            self._handle_device = (
                self._handle.device if self._in_graph_mode else
                context.get_default_context().device_name)
            self._shape = initial_value.get_shape()
        # pylint: enable=protected-access

        # Or get the initial value from a Tensor or Python object.
        else:
          with ops.name_scope("Initializer"):
            initial_value = ops.convert_to_tensor(
                initial_value, name="initial_value", dtype=dtype)
          # pylint: disable=protected-access
          if (self._in_graph_mode and initial_value is not None and
              initial_value.op._get_control_flow_context() is not None):
            raise ValueError(
                "Initializer for variable %s is from inside a control-flow "
                "construct, such as a loop or conditional. When creating a "
                "variable inside a loop or conditional, use a lambda as the "
                "initializer." % name)
          # pylint: enable=protected-access
          self._handle = _eager_safe_variable_handle(
              shape=initial_value.get_shape(),
              dtype=initial_value.dtype.base_dtype,
              shared_name=handle_name,
              name=name,
              graph_mode=self._in_graph_mode)
          self._handle_device = (self._handle.device if self._in_graph_mode else
                                 context.get_default_context().device_name)
          self._shape = initial_value.get_shape()

        self._initial_value = initial_value if self._in_graph_mode else None
        self._handle_name = handle_name + ":0"
        self._dtype = initial_value.dtype.base_dtype
        self._constraint = constraint

        if self._in_graph_mode:
          with ops.name_scope("IsInitialized"):
            self._is_initialized_op = (
                gen_resource_variable_ops.var_is_initialized_op(self._handle))
          if initial_value is not None:
            with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
              self._initializer_op = (
                  gen_resource_variable_ops.assign_variable_op(
                      self._handle,
                      self._try_guard_against_uninitialized_dependencies(
                          initial_value),
                      name=n))
          with ops.name_scope("Read"), ops.colocate_with(self._handle):
            # Manually assign reads to the handle's device to avoid log
            # messages.
            with ops.device(self._handle_device):
              value = self._read_variable_op()
            self._graph_element = value
            if caching_device is not None:
              # Variables may be created in a tf.device() or ops.colocate_with()
              # context. At the same time, users would expect caching device to
              # be independent of this context, and/or would not expect the
              # current device context to be merged with the caching device
              # spec.  Therefore we reset the colocation stack before creating
              # the cached value. Note that resetting the colocation stack will
              # also reset the device stack.
              with ops.colocate_with(None, ignore_existing=True):
                with ops.device(caching_device):
                  self._cached_value = array_ops.identity(value)
            else:
              self._cached_value = None
        else:
          gen_resource_variable_ops.assign_variable_op(self._handle,
                                                       initial_value)
          self._is_initialized_op = None
          self._initializer_op = None
          self._graph_element = None
          if caching_device:
            with ops.device(caching_device):
              self._cached_value = self._read_variable_op()
          else:
            self._cached_value = None
        if context.in_graph_mode():
          ops.add_to_collections(collections, self)
        elif ops.GraphKeys.GLOBAL_STEP in collections:
          ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, self)

    if not self._in_graph_mode:
      # After the handle has been created, set up a way to clean it up when
      # executing eagerly. We'll hold the only reference to the deleter, so that
      # when this object is garbage collected the deleter will be too. This
      # means ResourceVariables can be part of reference cycles without those
      # cycles being uncollectable, and means that no __del__ will be defined at
      # all in graph mode.
      self._handle_deleter = EagerResourceDeleter(
          handle=self._handle, handle_device=self._handle_device)
Esempio n. 24
0
def seterr(inf_or_nan=None):
    """Set how abnormal conditions are handled by the default eager context.

  Example:
  ```python
  tfe.seterr(inf_or_nan="raise")
  a = tf.constant(10.0)
  b = tf.constant(0.0)
  try:
    c = a / b  # <-- Raises InfOrNanError.
  except Exception as e:
    print("Caught Exception: %s" % e)

  tfe.seterr(inf_or_nan="ignore")
  c = a / b  # <-- Does NOT raise exception anymore.
  ```

  Args:
    inf_or_nan: Set action for infinity (`inf`) and NaN (`nan`) values.
      Possible values: `{"ignore", "print", "raise", "warn"}`.
      `"ignore"`: take no action when `inf` values appear.
      `"print"`: print a warning to `stdout`.
      `"raise"`: raise an `InfOrNanError`.
      `"warn"`: print a warning using `tf.logging.warn`.
      A value of `None` leads to no change in the action of the condition.

  Returns:
    A dictionary of old actions.

  Raises:
    ValueError: If the value of any keyword arguments is invalid.
  """
    if inf_or_nan not in _VALID_CALLBACK_ACTIONS:
        raise ValueError("Invalid action value for inf_or_nan: %s. "
                         "Valid actions are %s." %
                         (inf_or_nan, _VALID_CALLBACK_ACTIONS))

    old_settings = {"inf_or_nan": "ignore"}
    default_context = context.get_default_context()

    carryover_callbacks = []
    for callback in default_context.post_execution_callbacks:
        # Check whether the callback is inf_nan_callback or a partial object of
        # inf_nan_callback.
        if (callback == inf_nan_callback
                or isinstance(callback, functools.partial)
                and callback.func == inf_nan_callback):
            if callback == inf_nan_callback:
                old_settings["inf_or_nan"] = _DEFAULT_CALLBACK_ACTION
            else:
                old_settings["inf_or_nan"] = callback.keywords.get(
                    "action", _DEFAULT_CALLBACK_ACTION)
        elif inf_or_nan is not None:
            carryover_callbacks.append(callback)

    if inf_or_nan is not None:
        default_context.clear_post_execution_callbacks()
        for callback in carryover_callbacks:
            default_context.add_post_execution_callback(callback)
        if inf_or_nan != "ignore":
            default_context.add_post_execution_callback(
                functools.partial(inf_nan_callback, action=inf_or_nan))

    return old_settings
Esempio n. 25
0
def clear_execution_callbacks():
    """Clear all execution callbacks from the default eager context."""
    context.get_default_context().clear_post_execution_callbacks()
Esempio n. 26
0
def inf_nan_callback(op_type,
                     inputs,
                     attrs,
                     outputs,
                     op_name,
                     check_inf=True,
                     check_nan=True,
                     action=_DEFAULT_CALLBACK_ACTION):
    """An execution callback that checks for `inf`s and `nan`s in output tensors.

  This callback can be used with `tfe.add_execute_callback` to check for invalid
  numeric values. E.g.,
  ```python
  tfe.add_execute_callback(tfe.inf_nan_callback)
  ```

  Args:
    op_type: Name of the TFE operation type (e.g., `MatMul`).
    inputs: The `list` of input tensors to the operation, currently unused by
      this callback.
    attrs: Attributes of the TFE operation, as a tuple of alternating attribute
      names and attribute values.
    outputs: The `list` of output tensors from the operation, checked by this
      callback for `inf` and `nan` values.
    op_name: Name of the TFE operation. This name is set by client and can be
      `None` if it unset.
    check_inf: (`bool`) Whether this callback should check for `inf` values in
      the output tensor values.
    check_nan: (`bool`) Whether this callback should check for `nan` values in
      the output tensor values.
    action: (`str`) Action to be taken by the callback when `inf` or `nan`
      values are detected. Possible values {"raise", "warn", "print"}
      `"raise"`: Raise a `InfOrNanError`.
      `"warn"`: Log a warning using `tf.logging.warn`.
      `"print"`: Print a message to `sys.stdout`.

  Raises:
    InfOrNanError: iff `inf` or `nan` values are seen in any of `outputs` and
      `action` is `"raise"`.
    ValueError: iff the value of `action` is invalid.
  """
    del attrs, inputs  # Not used.

    ctx = context.get_default_context()

    for index, output in enumerate(outputs):
        if not output.dtype.is_numpy_compatible:
            continue

        numpy_dtype = output.dtype.as_numpy_dtype
        if (np.issubdtype(numpy_dtype, np.floating)
                or np.issubdtype(numpy_dtype, np.complex)
                or np.issubdtype(numpy_dtype, np.integer)):
            try:
                check_numerics_op_attrs = ("message",
                                           "Eager-mode inf/nan check", "T",
                                           outputs[0].dtype.as_datatype_enum)
                # TODO(cais): Consider moving this into execute.py.
                # pylint: disable=protected-access
                pywrap_tensorflow.TFE_Py_Execute(ctx._handle, output.device,
                                                 "CheckNumerics", [output],
                                                 check_numerics_op_attrs, 1)
                # pylint: enable=protected-access
            except core._NotOkStatusException:  # pylint: disable=protected-access
                value = output.numpy()
                inf_detected = np.any(np.isinf(value)) and check_inf
                nan_detected = np.any(np.isnan(value)) and check_nan
                if not inf_detected and not nan_detected:
                    continue

                error = InfOrNanError(op_type, op_name, index, len(outputs),
                                      value)
                if action == "print":
                    print("Warning: %s" % str(error))
                elif action == "warn":
                    logging.warn(str(error))
                elif action == "raise":
                    raise error
                else:
                    raise ValueError(
                        "Invalid action for inf_nan_callback: %s. Valid actions are: "
                        "{print | warn | raise}" % action)
Esempio n. 27
0
def _in_gpu_device():
    return context.get_default_context()._device_index > 0  # pylint: disable=protected-access
Esempio n. 28
0
    def _init_from_args(self,
                        initial_value=None,
                        trainable=True,
                        collections=None,
                        validate_shape=True,
                        caching_device=None,
                        name=None,
                        dtype=None,
                        constraint=None):
        """Creates a variable.

    Args:
      initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
        which is the initial value for the Variable. The initial value must have
        a shape specified unless `validate_shape` is set to False. Can also be a
        callable with no argument that returns the initial value when called.
        (Note that initializer functions from init_ops.py must first be bound
         to a shape before being used here.)
      trainable: If `True`, the default, also adds the variable to the graph
        collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
        the default list of variables to use by the `Optimizer` classes.
      collections: List of graph collections keys. The new variable is added to
        these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
      validate_shape: Ignored. Provided for compatibility with tf.Variable.
      caching_device: Optional device string or function describing where the
        Variable should be cached for reading.  Defaults to the Variable's
        device.  If not `None`, caches on another device.  Typical use is to
        cache on the device where the Ops using the Variable reside, to
        deduplicate copying through `Switch` and other conditional statements.
      name: Optional name for the variable. Defaults to `'Variable'` and gets
        uniquified automatically.
      dtype: If set, initial_value will be converted to the given type.
        If None, either the datatype will be kept (if initial_value is
       a Tensor) or float32 will be used (if it is a Python object convertible
       to a Tensor).
      constraint: An optional projection function to be applied to the variable
        after being updated by an `Optimizer` (e.g. used to implement norm
        constraints or value constraints for layer weights). The function must
        take as input the unprojected Tensor representing the value of the
        variable and return the Tensor for the projected value
        (which must have the same shape). Constraints are not safe to
        use when doing asynchronous distributed training.

    Raises:
      ValueError: If the initial value is not specified, or does not have a
        shape and `validate_shape` is `True`.

    @compatibility(eager)
    When Eager Execution is enabled, variables are never added to collections.
    It is not implicitly added to the `GLOBAL_VARIABLES` or
    `TRAINABLE_VARIABLES` collections, and the `collections` argument is
    ignored.
    @end_compatibility
    """
        if initial_value is None:
            raise ValueError("initial_value must be specified.")
        init_from_fn = callable(initial_value)

        if collections is None:
            collections = [ops.GraphKeys.GLOBAL_VARIABLES]
        if not isinstance(collections, (list, tuple, set)):
            raise ValueError(
                "collections argument to Variable constructor must be a list, tuple, "
                "or set. Got %s of type %s" % (collections, type(collections)))
        if constraint is not None and not callable(constraint):
            raise ValueError("The `constraint` argument must be a callable.")

        self._trainable = trainable
        if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
            collections = list(collections) + [
                ops.GraphKeys.TRAINABLE_VARIABLES
            ]
        self._save_slice_info = None
        # Store the graph key so optimizers know how to only retrieve variables from
        # this graph.
        self._graph_key = ops.get_default_graph()._graph_key  # pylint: disable=protected-access
        with ops.init_scope():
            self._in_graph_mode = context.in_graph_mode()
            with ops.name_scope(
                    name, "Variable",
                [] if init_from_fn else [initial_value]) as name:
                # pylint: disable=protected-access
                handle_name = ops._name_from_scope_name(name)
                if init_from_fn:
                    # Use attr_scope and device(None) to simulate the behavior of
                    # colocate_with when the variable we want to colocate with doesn't
                    # yet exist.
                    if self._in_graph_mode:
                        attr = attr_value_pb2.AttrValue(
                            list=attr_value_pb2.AttrValue.ListValue(
                                s=[compat.as_bytes("loc:@%s" % handle_name)]))
                        with ops.get_default_graph()._attr_scope(
                            {"_class": attr}):
                            with ops.name_scope("Initializer"), ops.device(
                                    None):
                                initial_value = ops.convert_to_tensor(
                                    initial_value(),
                                    name="initial_value",
                                    dtype=dtype)
                            self._handle = _eager_safe_variable_handle(
                                shape=initial_value.get_shape(),
                                dtype=initial_value.dtype.base_dtype,
                                shared_name=handle_name,
                                name=name,
                                graph_mode=self._in_graph_mode)
                            self._handle_device = (
                                self._handle.device if self._in_graph_mode else
                                context.get_default_context().device_name)
                            self._shape = initial_value.get_shape()
                    else:
                        initial_value = initial_value()
                        with ops.name_scope("Initializer"):
                            initial_value = ops.convert_to_tensor(
                                initial_value,
                                name="initial_value",
                                dtype=dtype)
                        self._handle = _eager_safe_variable_handle(
                            shape=initial_value.get_shape(),
                            dtype=initial_value.dtype.base_dtype,
                            shared_name=handle_name,
                            name=name,
                            graph_mode=False)
                        self._handle_device = (
                            self._handle.device if self._in_graph_mode else
                            context.get_default_context().device_name)
                        self._shape = initial_value.get_shape()
                # pylint: enable=protected-access

                # Or get the initial value from a Tensor or Python object.
                else:
                    with ops.name_scope("Initializer"):
                        initial_value = ops.convert_to_tensor(
                            initial_value, name="initial_value", dtype=dtype)
                    # pylint: disable=protected-access
                    if (self._in_graph_mode and initial_value is not None
                            and initial_value.op._get_control_flow_context()
                            is not None):
                        raise ValueError(
                            "Initializer for variable %s is from inside a control-flow "
                            "construct, such as a loop or conditional. When creating a "
                            "variable inside a loop or conditional, use a lambda as the "
                            "initializer." % name)
                    # pylint: enable=protected-access
                    self._handle = _eager_safe_variable_handle(
                        shape=initial_value.get_shape(),
                        dtype=initial_value.dtype.base_dtype,
                        shared_name=handle_name,
                        name=name,
                        graph_mode=self._in_graph_mode)
                    self._handle_device = (
                        self._handle.device if self._in_graph_mode else
                        context.get_default_context().device_name)
                    self._shape = initial_value.get_shape()

                self._initial_value = initial_value if self._in_graph_mode else None
                self._handle_name = handle_name + ":0"
                self._dtype = initial_value.dtype.base_dtype
                self._constraint = constraint

                if self._in_graph_mode:
                    with ops.name_scope("IsInitialized"):
                        self._is_initialized_op = (
                            gen_resource_variable_ops.var_is_initialized_op(
                                self._handle))
                    if initial_value is not None:
                        with ops.name_scope("Assign") as n, ops.colocate_with(
                                self._handle):
                            self._initializer_op = (
                                gen_resource_variable_ops.assign_variable_op(
                                    self._handle,
                                    self.
                                    _try_guard_against_uninitialized_dependencies(
                                        initial_value),
                                    name=n))
                    with ops.name_scope("Read"), ops.colocate_with(
                            self._handle):
                        # Manually assign reads to the handle's device to avoid log
                        # messages.
                        with ops.device(self._handle_device):
                            value = self._read_variable_op()
                        self._graph_element = value
                        if caching_device is not None:
                            # Variables may be created in a tf.device() or ops.colocate_with()
                            # context. At the same time, users would expect caching device to
                            # be independent of this context, and/or would not expect the
                            # current device context to be merged with the caching device
                            # spec.  Therefore we reset the colocation stack before creating
                            # the cached value. Note that resetting the colocation stack will
                            # also reset the device stack.
                            with ops.colocate_with(None, ignore_existing=True):
                                with ops.device(caching_device):
                                    self._cached_value = array_ops.identity(
                                        value)
                        else:
                            self._cached_value = None
                else:
                    gen_resource_variable_ops.assign_variable_op(
                        self._handle, initial_value)
                    self._is_initialized_op = None
                    self._initializer_op = None
                    self._graph_element = None
                    if caching_device:
                        with ops.device(caching_device):
                            self._cached_value = self._read_variable_op()
                    else:
                        self._cached_value = None
                if context.in_graph_mode():
                    ops.add_to_collections(collections, self)
                elif ops.GraphKeys.GLOBAL_STEP in collections:
                    ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, self)

        if not self._in_graph_mode:
            # After the handle has been created, set up a way to clean it up when
            # executing eagerly. We'll hold the only reference to the deleter, so that
            # when this object is garbage collected the deleter will be too. This
            # means ResourceVariables can be part of reference cycles without those
            # cycles being uncollectable, and means that no __del__ will be defined at
            # all in graph mode.
            self._handle_deleter = EagerResourceDeleter(
                handle=self._handle, handle_device=self._handle_device)
def inf_nan_callback(op_type,
                     op_name,
                     attrs,
                     inputs,
                     outputs,
                     check_inf=True,
                     check_nan=True,
                     action=_DEFAULT_CALLBACK_ACTION):
  """An execution callback that checks for `inf`s and `nan`s in output tensors.

  This callback can be used with `tfe.add_execute_callback` to check for invalid
  numeric values. E.g.,
  ```python
  tfe.add_execute_callback(tfe.inf_nan_callback)
  ```

  Args:
    op_type: Name of the TFE operation type (e.g., `MatMul`).
    op_name: Name of the TFE operation. This name is set by client and can be
      `None` if it unset.
    attrs: Attributes of the TFE operation, as a tuple of alternating attribute
      names and attribute values.
    inputs: The `list` of input tensors to the operation, currently unused by
      this callback.
    outputs: The `list` of output tensors from the operation, checked by this
      callback for `inf` and `nan` values.
    check_inf: (`bool`) Whether this callback should check for `inf` values in
      the output tensor values.
    check_nan: (`bool`) Whether this callback should check for `nan` values in
      the output tensor values.
    action: (`str`) Action to be taken by the callback when `inf` or `nan`
      values are detected. Possible values {"raise", "warn", "print"}
      `"raise"`: Raise a `InfOrNanError`.
      `"warn"`: Log a warning using `tf.logging.warn`.
      `"print"`: Print a message to `sys.stdout`.

  Raises:
    InfOrNanError: iff `inf` or `nan` values are seen in any of `outputs` and
      `action` is `"raise"`.
    ValueError: iff the value of `action` is invalid.
  """
  del attrs, inputs  # Not used.

  ctx = context.get_default_context()

  for index, output in enumerate(outputs):
    if not output.dtype.is_numpy_compatible:
      continue

    numpy_dtype = output.dtype.as_numpy_dtype
    if (np.issubdtype(numpy_dtype, np.float) or
        np.issubdtype(numpy_dtype, np.complex) or
        np.issubdtype(numpy_dtype, np.integer)):
      try:
        check_numerics_op_attrs = (
            "message", "Eager-mode inf/nan check",
            "T", outputs[0].dtype.as_datatype_enum)
        # TODO(cais): Consider moving this into execute.py.
        # pylint: disable=protected-access
        pywrap_tensorflow.TFE_Py_Execute(
            ctx._handle, output.device, "CheckNumerics", [output._handle],
            check_numerics_op_attrs, 1)
        # pylint: enable=protected-access
      except core._NotOkStatusException:  # pylint: disable=protected-access
        value = output.numpy()
        inf_detected = np.any(np.isinf(value)) and check_inf
        nan_detected = np.any(np.isnan(value)) and check_nan
        if not inf_detected and not nan_detected:
          continue

        error = InfOrNanError(op_type, op_name, index, len(outputs), value)
        if action == "print":
          print("Warning: %s" % str(error))
        elif action == "warn":
          logging.warn(str(error))
        elif action == "raise":
          raise error
        else:
          raise ValueError(
              "Invalid action for inf_nan_callback: %s. Valid actions are: "
              "{print | warn | raise}" % action)
Esempio n. 30
0
 def as_cpu_tensor(self):
     """A copy of this Tensor with contents backed by host memory."""
     return self._copy(context.get_default_context(), "CPU:0")
def seterr(inf_or_nan=None):
  """Set how abnormal conditions are handled by the default eager context.

  Example:
  ```python
  tfe.seterr(inf_or_nan="raise")
  a = tf.constant(10.0)
  b = tf.constant(0.0)
  try:
    c = a / b  # <-- Raises InfOrNanError.
  except Exception as e:
    print("Caught Exception: %s" % e)

  tfe.seterr(inf_or_nan="ignore")
  c = a / b  # <-- Does NOT raise exception anymore.
  ```

  Args:
    inf_or_nan: Set action for infinity (`inf`) and NaN (`nan`) values.
      Possible values: `{"ignore", "print", "raise", "warn"}`.
      `"ignore"`: take no action when `inf` values appear.
      `"print"`: print a warning to `stdout`.
      `"raise"`: raise an `InfOrNanError`.
      `"warn"`: print a warning using `tf.logging.warn`.
      A value of `None` leads to no change in the action of the condition.

  Returns:
    A dictionary of old actions.

  Raises:
    ValueError: If the value of any keyword arguments is invalid.
  """
  if inf_or_nan not in _VALID_CALLBACK_ACTIONS:
    raise ValueError(
        "Invalid action value for inf_or_nan: %s. "
        "Valid actions are %s." % (inf_or_nan, _VALID_CALLBACK_ACTIONS))

  old_settings = {"inf_or_nan": "ignore"}
  default_context = context.get_default_context()

  carryover_callbacks = []
  for callback in default_context.post_execution_callbacks:
    # Check whether the callback is inf_nan_callback or a partial object of
    # inf_nan_callback.
    if (callback == inf_nan_callback or
        isinstance(callback, functools.partial) and
        callback.func == inf_nan_callback):
      if callback == inf_nan_callback:
        old_settings["inf_or_nan"] = _DEFAULT_CALLBACK_ACTION
      else:
        old_settings["inf_or_nan"] = callback.keywords.get(
            "action", _DEFAULT_CALLBACK_ACTION)
    elif inf_or_nan is not None:
      carryover_callbacks.append(callback)

  if inf_or_nan is not None:
    default_context.clear_post_execution_callbacks()
    for callback in carryover_callbacks:
      default_context.add_post_execution_callback(callback)
    if inf_or_nan != "ignore":
      default_context.add_post_execution_callback(
          functools.partial(inf_nan_callback, action=inf_or_nan))

  return old_settings
Esempio n. 32
0
    def __init__(self, value, dtype=None):
        """Creates a Tensor object from a Python object or numpy array.

    May share storage with the numpy array, in which case changes to the numpy
    object will reflect
    in the Tensor.

    Arguments:
      value: A numpy.array or a Python object to create a Tensor for.
      dtype: TensorFlow dtype for the returned Tensor. If None, one will be
        automatically selected.
    """
        # TODO(ashankar): Evaluate if we can and perhaps share code with
        # tf.constant defined in
        # https://www.tensorflow.org/code/tensorflow/python/framework/constant_op.py
        self._id = tf_ops.uid()
        if not isinstance(value, np.ndarray):
            npt = None if dtype is None else dtype.as_numpy_dtype
            value = np.array(value, dtype=npt)
            if dtype is None:
                value = _maybe_modify_numpy_dtype_determination(value)
        elif dtype is not None:
            npt = dtype.as_numpy_dtype
            if npt != value.dtype:
                value = value.astype(npt)
        try:
            value = np.asarray(value, order="C")
            self._handle = pywrap_tensorflow.TFE_Py_NumpyToTensorHandle(value)
        except core._NotOkStatusException as e:  # pylint: disable=protected-access
            raise core._status_to_exception(e.code, e.message)  # pylint: disable=protected-access

        # Almost all TensorFlow kernels for GPU devices keep int32 tensors in host
        # memory.  This change approximates the same behavior for eager execution -
        # keeping int32 tensors in host memory.
        #
        # We do so to preclude the need for callers into such kernels from having to
        # explicitly place the int32 tensors in host memory. For example, prior to
        # this change one needed:
        #
        # with tfe.device('/gpu:0'):
        #   ...  # code here
        #   with tfe.device('/cpu:0'):
        #     shape = tfe.Tensor(...)
        #   y = tfe.ops.random_uniform(.., shape)
        #
        # Without the CPU device block tfe.ops.random_uniform would fail since the
        # kernel expects the shape in host memory.
        #
        # After this change, we simplify the code:
        #
        # with tfe.device('/gpu:0'):
        #   y = tfe.ops.random_uniform(, tfe.Tensor(...))
        #
        # The approximation is not exact since if there are GPU kernels which do not
        # require host memory for int32 tensors, there will be a discrepancy between
        # eager execution and TensorFlow graphs. However, as of July 2017, there
        # were no known GPU kernels that kept int32 tensors in device memory.
        if _in_gpu_device() and value.dtype != np.int32:
            ctx = context.get_default_context()
            # pylint: disable=protected-access
            device_name = ctx.device_name
            with errors.raise_exception_on_not_ok_status() as status:
                self._handle = pywrap_tensorflow.TFE_TensorHandleCopyToDevice(
                    self._handle, ctx._handle, device_name, status)
            # pylint: enable=protected-access

        self._dtype = dtypes.as_dtype(
            pywrap_tensorflow.TFE_TensorHandleDataType(self._handle))

        # This mirrors tensorflow.core.framework.ops.Tensor._handle_data Which will
        # be None for tensors of type other than DT_REOSURCE. For DT_RESOURCE
        # tensors, this will contain a serialized HandleData proto with shape
        # inference metadata about shapes and dtypes of resources accessible from
        # this handle.
        self._handle_data = None
        if core.active_trace() is not None:
            core.active_trace().record_tensor("MANUAL", tape.tensor_id(self),
                                              self.device,
                                              self.shape.num_elements())