def _lazy_read(self, op):
   if hasattr(self, "_trainable") and self._trainable:
     tape.watch_variable(self)
   return _UnreadVariable(
       self._handle, self.dtype, self._handle_device, self._shape,
       self._in_graph_mode,
       self._handle_deleter if not self._in_graph_mode else None, op)
 def _read_variable_op(self):
   if hasattr(self, "_trainable") and self._trainable:
     tape.watch_variable(self)
     return read_variable_op(self._handle, dtype=self._dtype)
   else:
     return gen_resource_variable_ops.read_variable_op(self._handle,
                                                       self._dtype)
 def _lazy_read(self, op):
   if self.trainable:
     tape.watch_variable(self)
   return _UnreadVariable(
       self._handle, self.dtype, self._shape, self._in_graph_mode,
       self._handle_deleter if not self._in_graph_mode else None, op,
       self._unique_id)
 def sparse_read(self, indices, name=None):
   """Reads the value of this variable sparsely, using `gather`."""
   with ops.name_scope("Gather" if name is None else name) as name:
     if self._trainable:
       tape.watch_variable(self)
     value = gen_resource_variable_ops.resource_gather(
         self._handle, indices, dtype=self._dtype, name=name)
   return array_ops.identity(value)
Example #5
0
  def watch(self, tensor):
    """Ensures that `tensor` is being traced by this tape.

    Args:
      tensor: a Tensor or list of Tensors.
    """
    for t in nest.flatten(tensor):
      if hasattr(t, "handle"):
        # There are many variable-like objects, all of them currently have
        # `handle` attribute that points to a tensor. If this changes, internals
        # of watch_variable need to change as well.
        tape.watch_variable(self._tape, t)
      else:
        tape.watch(self._tape, t)
Example #6
0
  def __call__(self, *args):
    """Executes the passed function in eager mode."""
    for v in self._variables:
      if v._trainable:  # pylint: disable=protected-access
        tape.watch_variable(v)

    tensor_inputs = [
        x for x in nest.flatten(args)
        if isinstance(x, ops.Tensor)
    ]

    if tape.should_record(tensor_inputs) or tape.should_record(
        self._extra_inputs):
      if not self._has_backprop:
        self._compute_backprop()
      return self._backprop_call(tensor_inputs)

    ctx = context.context()
    if ctx.in_graph_mode():
      g = ops.get_default_graph()
      if self._fdef.name not in g._functions:  # pylint: disable=protected-access
        g._add_function(self._fdef)  # pylint: disable=protected-access
      for f in self._graph._functions.values():  # pylint: disable=protected-access
        if f.name not in g._functions:  # pylint: disable=protected-access
          g._add_function(f)  # pylint: disable=protected-access
      signature = self._fdef.definition.signature
      args = list(tensor_inputs) + self._extra_inputs
      op = g.create_op(
          signature.name, [ops.convert_to_tensor(x) for x in args],
          [dtypes.DType(x.type) for x in signature.output_arg],
          op_def=signature,
          name="FunctionCall",
          compute_shapes=False)
      result = op.outputs
      if not result:
        return op
      for i, s in enumerate(self._output_shapes):
        result[i].set_shape(s)
    else:
      result = execute.execute(
          str(self._func_name),
          num_outputs=self._num_outputs,
          inputs=tensor_inputs + self._extra_inputs,
          attrs=None,
          ctx=ctx)

    return self._build_call_outputs(result)
Example #7
0
  def watch(self, tensor):
    """Ensures that `tensor` is being traced by this tape.

    Args:
      tensor: a Tensor or list of Tensors.
    """
    for t in nest.flatten(tensor):
      if not t.dtype.is_floating:
        logging.log_first_n(
            logging.WARN, "The dtype of the watched tensor must be "
            "floating (e.g. tf.float32), got %r", 5, t.dtype)
      if hasattr(t, "handle"):
        # There are many variable-like objects, all of them currently have
        # `handle` attribute that points to a tensor. If this changes, internals
        # of watch_variable need to change as well.
        tape.watch_variable(self._tape, t)
      else:
        tape.watch(self._tape, t)
Example #8
0
  def __call__(self, *args):
    nest.assert_same_structure(self.shape_and_dtypes, args, check_types=False)
    if not all([
        shape.is_compatible_with(arg.shape)
        for shape, arg in zip(self.flattened_shapes, nest.flatten(args))
    ]):
      raise ValueError(
          "Declared shapes do not match argument shapes: Expected %s, found %s."
          % (self.flattened_shapes, [arg.shape for arg in nest.flatten(args)]))

    initialized = [resource_variable_ops.var_is_initialized_op(
        v.handle).numpy() for v in self._call_fn.variables]
    if all(x for x in initialized):
      for v in self._call_fn.variables:
        if v._trainable:  # pylint: disable=protected-access
          tape.watch_variable(v)
      return self._call_fn(*args)
    elif all(not x for x in initialized):
      return self._init_fn(*args)
    else:
      raise ValueError("Some, but not all, variables are initialized.")
Example #9
0
  def __call__(self, *args):
    """Executes the passed function in eager mode."""
    for v in self._variables:
      if v._trainable:  # pylint: disable=protected-access
        tape.watch_variable(v)

    tensor_inputs = [x for x in nest.flatten(args) if isinstance(x, ops.Tensor)]
    if tape.should_record(tensor_inputs) or tape.should_record(
        self._extra_inputs):
      if self._backward_function is None:
        self._construct_backprop_function()
      return self._backprop_call(tensor_inputs)

    ctx = context.context()
    if ctx.executing_eagerly():
      result = execute.execute(
          str(self._func_name),
          num_outputs=self._num_outputs,
          inputs=tensor_inputs + self._extra_inputs,
          attrs=None,
          ctx=ctx)
    else:
      g = ops.get_default_graph()
      self.add_to_graph(g)
      signature = self._function_def.definition.signature
      args = list(tensor_inputs) + self._extra_inputs
      op = g.create_op(
          signature.name,
          [ops.internal_convert_to_tensor(x, ctx=ctx) for x in args],
          tuple(dtypes_module.DType(x.type) for x in signature.output_arg),
          op_def=signature,
          name="FunctionCall",
          compute_shapes=False)
      result = op.outputs
      if not result:
        return op
      for i, s in enumerate(self._output_shapes):
        result[i].set_shape(s)

    return self._build_call_outputs(result)
Example #10
0
  def watch(self, tensor):
    """Ensures that `tensor` is being traced by this tape.

    Args:
      tensor: a Tensor or list of Tensors.

    Raises:
      ValueError: if it encounters something that is not a tensor.
    """
    for t in nest.flatten(tensor):
      if not (_pywrap_utils.IsTensor(t) or _pywrap_utils.IsVariable(t)):
        raise ValueError("Passed in object of type {}, not tf.Tensor".format(
            type(t)))
      if not t.dtype.is_floating:
        logging.log_first_n(
            logging.WARN, "The dtype of the watched tensor must be "
            "floating (e.g. tf.float32), got %r", 5, t.dtype)
      if hasattr(t, "handle"):
        # There are many variable-like objects, all of them currently have
        # `handle` attribute that points to a tensor. If this changes, internals
        # of watch_variable need to change as well.
        tape.watch_variable(self._tape, t)
      else:
        tape.watch(self._tape, t)
Example #11
0
 def fn():
   tape.watch_variable(x)
   b = tensor.Tensor(2.0)
   c = math_ops.add(x.value(), b)
   return math_ops.add(c, tensor.Tensor(3.0))
Example #12
0
 def fn():
   tape.watch_variable(x)
   a = math_ops.add(x.value(), 1.0)
   # Make sure convert_to_tensor works correctly with list of TensorNodes.
   b = array_ops.stack([a, a], axis=0)
   return math_ops.reduce_mean(b)
Example #13
0
 def fn():
     tape.watch_variable(x)
     a = math_ops.add(x.value(), 1.0)
     # Make sure convert_to_tensor works correctly with list of TensorNodes.
     b = array_ops.stack([a, a], axis=0)
     return math_ops.reduce_mean(b)
Example #14
0
 def f():
     tape.watch_variable(embedding)
     embedded_x = embedding_ops.embedding_lookup(embedding, x)
     return constant_op.constant(1.0, dtypes.float32) - embedded_x
Example #15
0
 def g(x):
     tape.watch_variable(three)
     return f(x)
Example #16
0
 def g(x):
   tape.watch_variable(x)
   y = math_ops.add(x, three)
   f(y)
Example #17
0
 def f():
   tape.watch_variable(embedding)
   embedded_x = embedding_ops.embedding_lookup(embedding, x)
   return constant_op.constant(1.0, dtypes.float32) - embedded_x
Example #18
0
 def g(x):
     tape.watch_variable(x)
     y = math_ops.add(x, three)
     f(y)
 def _read_variable_op(self):
   if self.trainable:
     tape.watch_variable(self)
   return gen_resource_variable_ops.read_variable_op(self._handle,
                                                     self._dtype)
Example #20
0
 def _read_variable_op(self):
   if self.trainable:
     tape.watch_variable(self)
   return gen_resource_variable_ops.read_variable_op(self._handle,
                                                     self._dtype)
Example #21
0
 def fn():
   tape.watch_variable(x)
   b = constant_op.constant(2.0)
   c = math_ops.add(x.value(), b)
   return math_ops.add(c, constant_op.constant(3.0))
 def _read_variable_op(self):
     if hasattr(self, "_trainable") and self._trainable:
         tape.watch_variable(self)
     return gen_resource_variable_ops.read_variable_op(
         self._handle, self._dtype)
Example #23
0
 def _lazy_read(self, op):
     if hasattr(self, "_trainable") and self._trainable:
         tape.watch_variable(self)
     return _UnreadVariable(
         self._handle, self.dtype, self._shape, self._in_graph_mode,
         self._handle_deleter if not self._in_graph_mode else None, op)
Example #24
0
 def f():
   tape.watch_variable(embedding)
   embedded_x = embedding_ops.embedding_lookup(embedding, x)
   return tensor.Tensor(1.0, dtypes.float32) - embedded_x
Example #25
0
 def read(self, want_gradients=True):
   if want_gradients and self.trainable:
     v = tape.watch_variable(self.variable)
   else:
     v = self.variable
   return v.read_value()
Example #26
0
 def g(x):
   tape.watch_variable(three)
   return f(x)
Example #27
0
 def f():
     tape.watch_variable(embedding)
     embedded_x = embedding_ops.embedding_lookup(embedding, x)
     return tensor.Tensor(1.0, dtypes.float32) - embedded_x
Example #28
0
 def inner():
     tape.watch_variable(v)
     return v * v
Example #29
0
 def fn():
     tape.watch_variable(x)
     b = tensor.Tensor(2.0)
     c = math_ops.add(x.value(), b)
     return math_ops.add(c, tensor.Tensor(3.0))
Example #30
0
 def f():
     with context.device('gpu:0'):
         tape.watch_variable(v)
         return v.read_value()
Example #31
0
 def inner():
   tape.watch_variable(v)
   return v * v
Example #32
0
 def fn():
     tape.watch_variable(x)
     b = constant_op.constant(2.0)
     c = math_ops.add(x.value(), b)
     return math_ops.add(c, constant_op.constant(3.0))
Example #33
0
 def f():
   with context.device('gpu:0'):
     tape.watch_variable(v)
     return v.read_value()